aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg/cputlb.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2017-11-20 18:08:28 +0000
committerPeter Maydell <peter.maydell@linaro.org>2017-11-21 12:09:25 +0000
commit34d49937e480edfa173d71e8c17972ad866b56c6 (patch)
treed2080bc4d17a444d90a3b95a82ce0324e2898122 /accel/tcg/cputlb.c
parent27266271977c5a30f2f7d493e042be1897827bdd (diff)
accel/tcg: Handle atomic accesses to notdirty memory correctly
To do a write to memory that is marked as notdirty, we need to invalidate any TBs we have cached for that memory, and update the cpu physical memory dirty flags for VGA and migration. The slowpath code in notdirty_mem_write() does all this correctly, but the new atomic handling code in atomic_mmu_lookup() doesn't do anything at all, it just clears the dirty bit in the TLB. The effect of this bug is that if the first write to a notdirty page for which we have cached TBs is by a guest atomic access, we fail to invalidate the TBs and subsequently will execute incorrect code. This can be seen by trying to run 'javac' on AArch64. Use the new notdirty_call_before() and notdirty_call_after() functions to correctly handle the update to notdirty memory in the atomic codepath. Cc: qemu-stable@nongnu.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 1511201308-23580-3-git-send-email-peter.maydell@linaro.org
Diffstat (limited to 'accel/tcg/cputlb.c')
-rw-r--r--accel/tcg/cputlb.c38
1 files changed, 25 insertions, 13 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index d071ca4d14..8fd84209df 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -946,7 +946,8 @@ void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
* operations, or io operations to proceed. Return the host address. */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ TCGMemOpIdx oi, uintptr_t retaddr,
+ NotDirtyInfo *ndi)
{
size_t mmu_idx = get_mmuidx(oi);
size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
@@ -955,6 +956,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
TCGMemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
int s_bits = mop & MO_SIZE;
+ void *hostaddr;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
@@ -984,21 +986,15 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK;
}
- /* Check notdirty */
- if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
- tlb_set_dirty(ENV_GET_CPU(env), addr);
- tlb_addr = tlb_addr & ~TLB_NOTDIRTY;
- }
-
/* Notice an IO access */
- if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
+ if (unlikely(tlb_addr & TLB_MMIO)) {
/* There's really nothing that can be done to
support this apart from stop-the-world. */
goto stop_the_world;
}
/* Let the guest notice RMW on a write-only page. */
- if (unlikely(tlbe->addr_read != tlb_addr)) {
+ if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
/* Since we don't support reads and writes to different addresses,
and we do have the proper page loaded for write, this shouldn't
@@ -1006,7 +1002,17 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
goto stop_the_world;
}
- return (void *)((uintptr_t)addr + tlbe->addend);
+ hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
+
+ ndi->active = false;
+ if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
+ ndi->active = true;
+ memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
+ qemu_ram_addr_from_host_nofail(hostaddr),
+ 1 << s_bits);
+ }
+
+ return hostaddr;
stop_the_world:
cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
@@ -1040,8 +1046,14 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
#define ATOMIC_NAME(X) \
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
-#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
-#define ATOMIC_MMU_CLEANUP do { } while (0)
+#define ATOMIC_MMU_DECLS NotDirtyInfo ndi
+#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
+#define ATOMIC_MMU_CLEANUP \
+ do { \
+ if (unlikely(ndi.active)) { \
+ memory_notdirty_write_complete(&ndi); \
+ } \
+ } while (0)
#define DATA_SIZE 1
#include "atomic_template.h"
@@ -1069,7 +1081,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
#undef ATOMIC_MMU_LOOKUP
#define EXTRA_ARGS , TCGMemOpIdx oi
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
-#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
+#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
#define DATA_SIZE 1
#include "atomic_template.h"