aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg/cputlb.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2021-06-12 17:21:06 -0700
committerRichard Henderson <richard.henderson@linaro.org>2021-06-19 11:09:10 -0700
commit08dff435e27dc2bc3582e3f25e7cf01956dddafc (patch)
tree8baa5b3e6f75fc60c67a313b8a61e2da874553a0 /accel/tcg/cputlb.c
parente5b4654907e9d96e1b215fa943e2f62e61676ed6 (diff)
accel/tcg: Probe the proper permissions for atomic ops
We had a single ATOMIC_MMU_LOOKUP macro that probed for read+write on all atomic ops. This is incorrect for plain atomic load and atomic store. For user-only, we rely on the host page permissions. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/390 Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg/cputlb.c')
-rw-r--r--accel/tcg/cputlb.c95
1 files changed, 66 insertions, 29 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index f24348e979..b6d5fc6326 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1742,18 +1742,22 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
#endif
-/* Probe for a read-modify-write atomic operation. Do not allow unaligned
- * operations, or io operations to proceed. Return the host address. */
+/*
+ * Probe for an atomic operation. Do not allow unaligned operations,
+ * or io operations to proceed. Return the host address.
+ *
+ * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
+ */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ TCGMemOpIdx oi, int size, int prot,
+ uintptr_t retaddr)
{
size_t mmu_idx = get_mmuidx(oi);
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr = tlb_addr_write(tlbe);
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
- int s_bits = mop & MO_SIZE;
+ uintptr_t index;
+ CPUTLBEntry *tlbe;
+ target_ulong tlb_addr;
void *hostaddr;
/* Adjust the given return address. */
@@ -1767,7 +1771,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
}
/* Enforce qemu required alignment. */
- if (unlikely(addr & ((1 << s_bits) - 1))) {
+ if (unlikely(addr & (size - 1))) {
/* We get here if guest alignment was not requested,
or was not enforced by cpu_unaligned_access above.
We might widen the access and emulate, but for now
@@ -1775,15 +1779,45 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
goto stop_the_world;
}
+ index = tlb_index(env, mmu_idx, addr);
+ tlbe = tlb_entry(env, mmu_idx, addr);
+
/* Check TLB entry and enforce page permissions. */
- if (!tlb_hit(tlb_addr, addr)) {
- if (!VICTIM_TLB_HIT(addr_write, addr)) {
- tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
- mmu_idx, retaddr);
- index = tlb_index(env, mmu_idx, addr);
- tlbe = tlb_entry(env, mmu_idx, addr);
+ if (prot & PAGE_WRITE) {
+ tlb_addr = tlb_addr_write(tlbe);
+ if (!tlb_hit(tlb_addr, addr)) {
+ if (!VICTIM_TLB_HIT(addr_write, addr)) {
+ tlb_fill(env_cpu(env), addr, size,
+ MMU_DATA_STORE, mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ tlbe = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
+ }
+
+ /* Let the guest notice RMW on a write-only page. */
+ if ((prot & PAGE_READ) &&
+ unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
+ tlb_fill(env_cpu(env), addr, size,
+ MMU_DATA_LOAD, mmu_idx, retaddr);
+ /*
+ * Since we don't support reads and writes to different addresses,
+ * and we do have the proper page loaded for write, this shouldn't
+ * ever return. But just in case, handle via stop-the-world.
+ */
+ goto stop_the_world;
+ }
+ } else /* if (prot & PAGE_READ) */ {
+ tlb_addr = tlbe->addr_read;
+ if (!tlb_hit(tlb_addr, addr)) {
+ if (!VICTIM_TLB_HIT(addr_write, addr)) {
+ tlb_fill(env_cpu(env), addr, size,
+ MMU_DATA_LOAD, mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ tlbe = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
}
- tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
}
/* Notice an IO access or a needs-MMU-lookup access */
@@ -1793,20 +1827,10 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
goto stop_the_world;
}
- /* Let the guest notice RMW on a write-only page. */
- if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
- tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
- mmu_idx, retaddr);
- /* Since we don't support reads and writes to different addresses,
- and we do have the proper page loaded for write, this shouldn't
- ever return. But just in case, handle via stop-the-world. */
- goto stop_the_world;
- }
-
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, 1 << s_bits,
+ notdirty_write(env_cpu(env), addr, size,
&env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
}
@@ -2669,7 +2693,12 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
#define ATOMIC_NAME(X) \
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
#define ATOMIC_MMU_DECLS
-#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
+#define ATOMIC_MMU_LOOKUP_RW \
+ atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr)
+#define ATOMIC_MMU_LOOKUP_R \
+ atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, retaddr)
+#define ATOMIC_MMU_LOOKUP_W \
+ atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, retaddr)
#define ATOMIC_MMU_CLEANUP
#define ATOMIC_MMU_IDX get_mmuidx(oi)
@@ -2698,10 +2727,18 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
#undef EXTRA_ARGS
#undef ATOMIC_NAME
-#undef ATOMIC_MMU_LOOKUP
+#undef ATOMIC_MMU_LOOKUP_RW
+#undef ATOMIC_MMU_LOOKUP_R
+#undef ATOMIC_MMU_LOOKUP_W
+
#define EXTRA_ARGS , TCGMemOpIdx oi
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
-#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
+#define ATOMIC_MMU_LOOKUP_RW \
+ atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, GETPC())
+#define ATOMIC_MMU_LOOKUP_R \
+ atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, GETPC())
+#define ATOMIC_MMU_LOOKUP_W \
+ atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, GETPC())
#define DATA_SIZE 1
#include "atomic_template.h"