diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2015-10-01 15:29:50 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-10-12 18:29:26 +0200 |
commit | 32857f4d5e165329c03d66000d666975d85f882a (patch) | |
tree | 8a4a67da85dc98ba2ae67fde892161c56f1e49d2 | |
parent | 53f8a5e9e2633a4a3b6918c36aec725aa80f2887 (diff) |
exec.c: Collect AddressSpace related fields into a CPUAddressSpace struct
Gather up all the fields currently in CPUState which deal with the CPU's
AddressSpace into a separate CPUAddressSpace struct. This paves the way
for allowing the CPU to know about more than one AddressSpace.
The rearrangement also allows us to make the MemoryListener a directly
embedded object in the CPUAddressSpace (it could not be embedded in
CPUState because 'struct MemoryListener' isn't defined for the user-only
builds). This allows us to resolve the FIXME in tcg_commit() by going
directly from the MemoryListener to the CPUAddressSpace.
This patch extracts the actual update of the cached dispatch pointer
from cpu_reload_memory_map() (which is renamed accordingly to
cpu_reloading_memory_map() as it is only responsible for breaking
cpu-exec.c's RCU critical section now). This lets us keep the definition
of the CPUAddressSpace struct private to exec.c.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Message-Id: <1443709790-25180-4-git-send-email-peter.maydell@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | cpu-exec-common.c | 13 | ||||
-rw-r--r-- | exec.c | 56 | ||||
-rw-r--r-- | include/exec/exec-all.h | 2 | ||||
-rw-r--r-- | include/qemu/typedefs.h | 1 | ||||
-rw-r--r-- | include/qom/cpu.h | 7 |
5 files changed, 48 insertions, 31 deletions
diff --git a/cpu-exec-common.c b/cpu-exec-common.c index b95b09a77d..43edf36777 100644 --- a/cpu-exec-common.c +++ b/cpu-exec-common.c @@ -37,10 +37,8 @@ void cpu_resume_from_signal(CPUState *cpu, void *puc) siglongjmp(cpu->jmp_env, 1); } -void cpu_reload_memory_map(CPUState *cpu) +void cpu_reloading_memory_map(void) { - AddressSpaceDispatch *d; - if (qemu_in_vcpu_thread()) { /* The guest can in theory prolong the RCU critical section as long * as it feels like. The major problem with this is that because it @@ -59,17 +57,12 @@ void cpu_reload_memory_map(CPUState *cpu) * part of this callback might become unnecessary.) * * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which - * only protects cpu->as->dispatch. Since we reload it below, we can - * split the critical section. + * only protects cpu->as->dispatch. Since we know our caller is about + * to reload it, it's safe to split the critical section. */ rcu_read_unlock(); rcu_read_lock(); } - - /* The CPU and TLB are protected by the iothread lock. */ - d = atomic_rcu_read(&cpu->as->dispatch); - cpu->memory_dispatch = d; - tlb_flush(cpu, 1); } #endif @@ -161,6 +161,21 @@ static void memory_map_init(void); static void tcg_commit(MemoryListener *listener); static MemoryRegion io_mem_watch; + +/** + * CPUAddressSpace: all the information a CPU needs about an AddressSpace + * @cpu: the CPU whose AddressSpace this is + * @as: the AddressSpace itself + * @memory_dispatch: its dispatch pointer (cached, RCU protected) + * @tcg_as_listener: listener for tracking changes to the AddressSpace + */ +struct CPUAddressSpace { + CPUState *cpu; + AddressSpace *as; + struct AddressSpaceDispatch *memory_dispatch; + MemoryListener tcg_as_listener; +}; + #endif #if !defined(CONFIG_USER_ONLY) @@ -431,7 +446,7 @@ address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr, hwaddr *xlat, hwaddr *plen) { MemoryRegionSection *section; - section = address_space_translate_internal(cpu->memory_dispatch, + section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch, addr, xlat, plen, false); assert(!section->mr->iommu_ops); @@ -537,13 +552,16 @@ void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as) /* We only support one address space per cpu at the moment. */ assert(cpu->as == as); - if (cpu->tcg_as_listener) { - memory_listener_unregister(cpu->tcg_as_listener); - } else { - cpu->tcg_as_listener = g_new0(MemoryListener, 1); + if (cpu->cpu_ases) { + /* We've already registered the listener for our only AS */ + return; } - cpu->tcg_as_listener->commit = tcg_commit; - memory_listener_register(cpu->tcg_as_listener, as); + + cpu->cpu_ases = g_new0(CPUAddressSpace, 1); + cpu->cpu_ases[0].cpu = cpu; + cpu->cpu_ases[0].as = as; + cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit; + memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as); } #endif @@ -2218,7 +2236,8 @@ static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as, MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index) { - AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch); + CPUAddressSpace *cpuas = &cpu->cpu_ases[0]; + AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch); MemoryRegionSection *sections = d->map.sections; return sections[index & ~TARGET_PAGE_MASK].mr; @@ -2277,19 +2296,20 @@ static void mem_commit(MemoryListener *listener) static void tcg_commit(MemoryListener *listener) { - CPUState *cpu; + CPUAddressSpace *cpuas; + AddressSpaceDispatch *d; /* since each CPU stores ram addresses in its TLB cache, we must reset the modified entries */ - /* XXX: slow ! */ - CPU_FOREACH(cpu) { - /* FIXME: Disentangle the cpu.h circular files deps so we can - directly get the right CPU from listener. */ - if (cpu->tcg_as_listener != listener) { - continue; - } - cpu_reload_memory_map(cpu); - } + cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); + cpu_reloading_memory_map(); + /* The CPU and TLB are protected by the iothread lock. + * We reload the dispatch pointer now because cpu_reloading_memory_map() + * may have split the RCU critical section. + */ + d = atomic_rcu_read(&cpuas->as->dispatch); + cpuas->memory_dispatch = d; + tlb_flush(cpuas->cpu, 1); } void address_space_init_dispatch(AddressSpace *as) diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index a63fd6015e..4e8afbfd47 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -85,7 +85,7 @@ void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); #if !defined(CONFIG_USER_ONLY) bool qemu_in_vcpu_thread(void); -void cpu_reload_memory_map(CPUState *cpu); +void cpu_reloading_memory_map(void); void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as); /* cputlb.c */ /** diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h index ee1ce1d44d..d4a8f7a6d5 100644 --- a/include/qemu/typedefs.h +++ b/include/qemu/typedefs.h @@ -16,6 +16,7 @@ typedef struct BusClass BusClass; typedef struct BusState BusState; typedef struct CharDriverState CharDriverState; typedef struct CompatProperty CompatProperty; +typedef struct CPUAddressSpace CPUAddressSpace; typedef struct DeviceState DeviceState; typedef struct DeviceListener DeviceListener; typedef struct DisplayChangeListener DisplayChangeListener; diff --git a/include/qom/cpu.h b/include/qom/cpu.h index b613ff0329..51a1323ead 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -234,6 +234,10 @@ struct kvm_run; * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution * requires that IO only be performed on the last instruction of a TB * so that interrupts take effect immediately. + * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the + * AddressSpaces this CPU has) + * @as: Pointer to the first AddressSpace, for the convenience of targets which + * only have a single AddressSpace * @env_ptr: Pointer to subclass-specific CPUArchState field. * @current_tb: Currently executing TB. * @gdb_regs: Additional GDB registers. @@ -280,9 +284,8 @@ struct CPUState { QemuMutex work_mutex; struct qemu_work_item *queued_work_first, *queued_work_last; + CPUAddressSpace *cpu_ases; AddressSpace *as; - struct AddressSpaceDispatch *memory_dispatch; - MemoryListener *tcg_as_listener; void *env_ptr; /* CPUArchState */ struct TranslationBlock *current_tb; |