aboutsummaryrefslogtreecommitdiff
path: root/system/xen/xsa/xsa212.patch
diff options
context:
space:
mode:
Diffstat (limited to 'system/xen/xsa/xsa212.patch')
-rw-r--r--system/xen/xsa/xsa212.patch87
1 files changed, 0 insertions, 87 deletions
diff --git a/system/xen/xsa/xsa212.patch b/system/xen/xsa/xsa212.patch
deleted file mode 100644
index 2c435c413644..000000000000
--- a/system/xen/xsa/xsa212.patch
+++ /dev/null
@@ -1,87 +0,0 @@
-memory: properly check guest memory ranges in XENMEM_exchange handling
-
-The use of guest_handle_okay() here (as introduced by the XSA-29 fix)
-is insufficient here, guest_handle_subrange_okay() needs to be used
-instead.
-
-Note that the uses are okay in
-- XENMEM_add_to_physmap_batch handling due to the size field being only
- 16 bits wide,
-- livepatch_list() due to the limit of 1024 enforced on the
- number-of-entries input (leaving aside the fact that this can be
- called by a privileged domain only anyway),
-- compat mode handling due to counts there being limited to 32 bits,
-- everywhere else due to guest arrays being accessed sequentially from
- index zero.
-
-This is XSA-212.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/common/memory.c
-+++ b/xen/common/memory.c
-@@ -436,8 +436,8 @@ static long memory_exchange(XEN_GUEST_HA
- goto fail_early;
- }
-
-- if ( !guest_handle_okay(exch.in.extent_start, exch.in.nr_extents) ||
-- !guest_handle_okay(exch.out.extent_start, exch.out.nr_extents) )
-+ if ( !guest_handle_subrange_okay(exch.in.extent_start, exch.nr_exchanged,
-+ exch.in.nr_extents - 1) )
- {
- rc = -EFAULT;
- goto fail_early;
-@@ -447,11 +447,27 @@ static long memory_exchange(XEN_GUEST_HA
- {
- in_chunk_order = exch.out.extent_order - exch.in.extent_order;
- out_chunk_order = 0;
-+
-+ if ( !guest_handle_subrange_okay(exch.out.extent_start,
-+ exch.nr_exchanged >> in_chunk_order,
-+ exch.out.nr_extents - 1) )
-+ {
-+ rc = -EFAULT;
-+ goto fail_early;
-+ }
- }
- else
- {
- in_chunk_order = 0;
- out_chunk_order = exch.in.extent_order - exch.out.extent_order;
-+
-+ if ( !guest_handle_subrange_okay(exch.out.extent_start,
-+ exch.nr_exchanged << out_chunk_order,
-+ exch.out.nr_extents - 1) )
-+ {
-+ rc = -EFAULT;
-+ goto fail_early;
-+ }
- }
-
- d = rcu_lock_domain_by_any_id(exch.in.domid);
---- a/xen/include/asm-x86/x86_64/uaccess.h
-+++ b/xen/include/asm-x86/x86_64/uaccess.h
-@@ -29,8 +29,9 @@ extern void *xlat_malloc(unsigned long *
- /*
- * Valid if in +ve half of 48-bit address space, or above Xen-reserved area.
- * This is also valid for range checks (addr, addr+size). As long as the
-- * start address is outside the Xen-reserved area then we will access a
-- * non-canonical address (and thus fault) before ever reaching VIRT_START.
-+ * start address is outside the Xen-reserved area, sequential accesses
-+ * (starting at addr) will hit a non-canonical address (and thus fault)
-+ * before ever reaching VIRT_START.
- */
- #define __addr_ok(addr) \
- (((unsigned long)(addr) < (1UL<<47)) || \
-@@ -40,7 +41,8 @@ extern void *xlat_malloc(unsigned long *
- (__addr_ok(addr) || is_compat_arg_xlat_range(addr, size))
-
- #define array_access_ok(addr, count, size) \
-- (access_ok(addr, (count)*(size)))
-+ (likely(((count) ?: 0UL) < (~0UL / (size))) && \
-+ access_ok(addr, (count) * (size)))
-
- #define __compat_addr_ok(d, addr) \
- ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(d))