aboutsummaryrefslogtreecommitdiff
path: root/linux-user/main.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-03-06 01:26:29 +0300
committerRichard Henderson <richard.henderson@linaro.org>2023-03-28 15:23:10 -0700
commit95059f9c313a7fbd7f22e4cdc1977c0393addc7b (patch)
tree5cbda761db3d946f2dafca04afaab139077596dd /linux-user/main.c
parenta3a67f54f0b4ec98ff2380a792e5bfeebc47d554 (diff)
include/exec: Change reserved_va semantics to last byte
Change the semantics to be the last byte of the guest va, rather than the following byte. This avoids some overflow conditions. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'linux-user/main.c')
-rw-r--r--linux-user/main.c27
1 files changed, 13 insertions, 14 deletions
diff --git a/linux-user/main.c b/linux-user/main.c
index 39d9bd4d7a..fe03293516 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -109,11 +109,9 @@ static const char *last_log_filename;
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
-/* There are a number of places where we assign reserved_va to a variable
- of type abi_ulong and expect it to fit. Avoid the last page. */
-# define MAX_RESERVED_VA(CPU) (0xfffffffful & TARGET_PAGE_MASK)
+# define MAX_RESERVED_VA(CPU) 0xfffffffful
# else
-# define MAX_RESERVED_VA(CPU) (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
+# define MAX_RESERVED_VA(CPU) ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
# endif
# else
# define MAX_RESERVED_VA(CPU) 0
@@ -379,7 +377,9 @@ static void handle_arg_reserved_va(const char *arg)
{
char *p;
int shift = 0;
- reserved_va = strtoul(arg, &p, 0);
+ unsigned long val;
+
+ val = strtoul(arg, &p, 0);
switch (*p) {
case 'k':
case 'K':
@@ -393,10 +393,10 @@ static void handle_arg_reserved_va(const char *arg)
break;
}
if (shift) {
- unsigned long unshifted = reserved_va;
+ unsigned long unshifted = val;
p++;
- reserved_va <<= shift;
- if (reserved_va >> shift != unshifted) {
+ val <<= shift;
+ if (val >> shift != unshifted) {
fprintf(stderr, "Reserved virtual address too big\n");
exit(EXIT_FAILURE);
}
@@ -405,6 +405,8 @@ static void handle_arg_reserved_va(const char *arg)
fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p);
exit(EXIT_FAILURE);
}
+ /* The representation is size - 1, with 0 remaining "default". */
+ reserved_va = val ? val - 1 : 0;
}
static void handle_arg_singlestep(const char *arg)
@@ -793,7 +795,7 @@ int main(int argc, char **argv, char **envp)
*/
max_reserved_va = MAX_RESERVED_VA(cpu);
if (reserved_va != 0) {
- if (reserved_va % qemu_host_page_size) {
+ if ((reserved_va + 1) % qemu_host_page_size) {
char *s = size_to_str(qemu_host_page_size);
fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s);
g_free(s);
@@ -804,11 +806,8 @@ int main(int argc, char **argv, char **envp)
exit(EXIT_FAILURE);
}
} else if (HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32) {
- /*
- * reserved_va must be aligned with the host page size
- * as it is used with mmap()
- */
- reserved_va = max_reserved_va & qemu_host_page_mask;
+ /* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */
+ reserved_va = max_reserved_va;
}
{