aboutsummaryrefslogtreecommitdiff
path: root/linux-user
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2024-01-02 12:57:52 +1100
committerRichard Henderson <richard.henderson@linaro.org>2024-02-29 11:35:36 -1000
commite8cec51be073bb1abac3850a4c1a90f96a416af2 (patch)
treea25f07d3a94ad44786f716f66fa4cb96f76f7117 /linux-user
parentd558c395a9bc3f15e010dc0fb786af9a400bccac (diff)
linux-user: Move some mmap checks outside the lock
Basic validation of operands does not require the lock. Hoist them from target_mmap__locked back into target_mmap. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Ilya Leoshkevich <iii@linux.ibm.com> Acked-by: Helge Deller <deller@gmx.de> Message-Id: <20240102015808.132373-18-richard.henderson@linaro.org>
Diffstat (limited to 'linux-user')
-rw-r--r--linux-user/mmap.c107
1 files changed, 53 insertions, 54 deletions
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index b4c3cc65aa..fbaea832c5 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -491,52 +491,14 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
}
static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
- int target_prot, int flags,
+ int target_prot, int flags, int page_flags,
int fd, off_t offset)
{
int host_page_size = qemu_real_host_page_size();
abi_ulong ret, last, real_start, real_last, retaddr, host_len;
abi_ulong passthrough_start = -1, passthrough_last = 0;
- int page_flags;
off_t host_offset;
- if (!len) {
- errno = EINVAL;
- return -1;
- }
-
- page_flags = validate_prot_to_pageflags(target_prot);
- if (!page_flags) {
- errno = EINVAL;
- return -1;
- }
-
- /* Also check for overflows... */
- len = TARGET_PAGE_ALIGN(len);
- if (!len) {
- errno = ENOMEM;
- return -1;
- }
-
- if (offset & ~TARGET_PAGE_MASK) {
- errno = EINVAL;
- return -1;
- }
-
- /*
- * If we're mapping shared memory, ensure we generate code for parallel
- * execution and flush old translations. This will work up to the level
- * supported by the host -- anything that requires EXCP_ATOMIC will not
- * be atomic with respect to an external process.
- */
- if (flags & MAP_SHARED) {
- CPUState *cpu = thread_cpu;
- if (!(cpu->tcg_cflags & CF_PARALLEL)) {
- cpu->tcg_cflags |= CF_PARALLEL;
- tb_flush(cpu);
- }
- }
-
real_start = start & -host_page_size;
host_offset = offset & -host_page_size;
@@ -616,23 +578,9 @@ static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
passthrough_start = start;
passthrough_last = last;
} else {
- if (start & ~TARGET_PAGE_MASK) {
- errno = EINVAL;
- return -1;
- }
last = start + len - 1;
real_last = ROUND_UP(last, host_page_size) - 1;
- /*
- * Test if requested memory area fits target address space
- * It can fail only on 64-bit host with 32-bit target.
- * On any other target/host host mmap() handles this error correctly.
- */
- if (last < start || !guest_range_valid_untagged(start, len)) {
- errno = ENOMEM;
- return -1;
- }
-
if (flags & MAP_FIXED_NOREPLACE) {
/* Validate that the chosen range is empty. */
if (!page_check_range_empty(start, last)) {
@@ -778,13 +726,64 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
int flags, int fd, off_t offset)
{
abi_long ret;
+ int page_flags;
trace_target_mmap(start, len, target_prot, flags, fd, offset);
+
+ if (!len) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ page_flags = validate_prot_to_pageflags(target_prot);
+ if (!page_flags) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ /* Also check for overflows... */
+ len = TARGET_PAGE_ALIGN(len);
+ if (!len || len != (size_t)len) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+ if (offset & ~TARGET_PAGE_MASK) {
+ errno = EINVAL;
+ return -1;
+ }
+ if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) {
+ if (start & ~TARGET_PAGE_MASK) {
+ errno = EINVAL;
+ return -1;
+ }
+ if (!guest_range_valid_untagged(start, len)) {
+ errno = ENOMEM;
+ return -1;
+ }
+ }
+
mmap_lock();
- ret = target_mmap__locked(start, len, target_prot, flags, fd, offset);
+ ret = target_mmap__locked(start, len, target_prot, flags,
+ page_flags, fd, offset);
mmap_unlock();
+
+ /*
+ * If we're mapping shared memory, ensure we generate code for parallel
+ * execution and flush old translations. This will work up to the level
+ * supported by the host -- anything that requires EXCP_ATOMIC will not
+ * be atomic with respect to an external process.
+ */
+ if (ret != -1 && (flags & MAP_TYPE) != MAP_PRIVATE) {
+ CPUState *cpu = thread_cpu;
+ if (!(cpu->tcg_cflags & CF_PARALLEL)) {
+ cpu->tcg_cflags |= CF_PARALLEL;
+ tb_flush(cpu);
+ }
+ }
+
return ret;
}