aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMario Preksavec <mario@slackware.hr>2023-09-20 21:37:21 +0200
committerWilly Sudiarto Raharjo <willysr@slackbuilds.org>2023-09-23 11:33:48 +0700
commitccfef38374428a2084e70626615d4c8a0e6398fa (patch)
treec2f51af4d6c733ea8d10b690e4eeaf02490983cd
parentd91281e469fd626c5ed9dbdf1c15951c46d60691 (diff)
system/xen: Updated for version 4.17.2.
Signed-off-by: Mario Preksavec <mario@slackware.hr> Signed-off-by: Willy Sudiarto Raharjo <willysr@slackbuilds.org>
-rw-r--r--system/xen/xen.SlackBuild2
-rw-r--r--system/xen/xen.info6
-rw-r--r--system/xen/xsa/xsa437.patch110
3 files changed, 114 insertions, 4 deletions
diff --git a/system/xen/xen.SlackBuild b/system/xen/xen.SlackBuild
index c02761dccc818..4bee50e1e26b8 100644
--- a/system/xen/xen.SlackBuild
+++ b/system/xen/xen.SlackBuild
@@ -25,7 +25,7 @@
cd $(dirname $0) ; CWD=$(pwd)
PRGNAM=xen
-VERSION=${VERSION:-4.17.1}
+VERSION=${VERSION:-4.17.2}
BUILD=${BUILD:-1}
TAG=${TAG:-_SBo}
PKGTYPE=${PKGTYPE:-tgz}
diff --git a/system/xen/xen.info b/system/xen/xen.info
index 0b80662c90862..e1a3760f26096 100644
--- a/system/xen/xen.info
+++ b/system/xen/xen.info
@@ -1,9 +1,9 @@
PRGNAM="xen"
-VERSION="4.17.1"
+VERSION="4.17.2"
HOMEPAGE="http://www.xenproject.org/"
DOWNLOAD="UNSUPPORTED"
MD5SUM=""
-DOWNLOAD_x86_64="http://mirror.slackware.hr/sources/xen/xen-4.17.1.tar.gz \
+DOWNLOAD_x86_64="http://mirror.slackware.hr/sources/xen/xen-4.17.2.tar.gz \
http://mirror.slackware.hr/sources/xen-extfiles/ipxe-git-3c040ad387099483102708bb1839110bc788cefb.tar.gz \
http://mirror.slackware.hr/sources/xen-extfiles/lwip-1.3.0.tar.gz \
http://mirror.slackware.hr/sources/xen-extfiles/zlib-1.2.3.tar.gz \
@@ -15,7 +15,7 @@ DOWNLOAD_x86_64="http://mirror.slackware.hr/sources/xen/xen-4.17.1.tar.gz \
http://mirror.slackware.hr/sources/xen-extfiles/tpm_emulator-0.7.4.tar.gz \
http://mirror.slackware.hr/sources/xen-seabios/seabios-1.16.0.tar.gz \
http://mirror.slackware.hr/sources/xen-ovmf/xen-ovmf-20210824_7b4a99be8a.tar.bz2"
-MD5SUM_x86_64="5276f3c78c58a538ca266d203a482349 \
+MD5SUM_x86_64="f344056c4566ac1627db46ea92588c3a \
23ba00d5e2c5b4343d12665af73e1cb5 \
36cc57650cffda9a0269493be2a169bb \
debc62758716a169df9f62e6ab2bc634 \
diff --git a/system/xen/xsa/xsa437.patch b/system/xen/xsa/xsa437.patch
new file mode 100644
index 0000000000000..18c9f8fc103c9
--- /dev/null
+++ b/system/xen/xsa/xsa437.patch
@@ -0,0 +1,110 @@
+From 7fac5971340a13ca9458195305bcfe14df2e52d2 Mon Sep 17 00:00:00 2001
+From: Stefano Stabellini <stefano.stabellini@amd.com>
+Date: Thu, 17 Aug 2023 13:41:35 +0100
+Subject: [PATCH] xen/arm: page: Handle cache flush of an element at the top of
+ the address space
+
+The region that needs to be cleaned/invalidated may be at the top
+of the address space. This means that 'end' (i.e. 'p + size') will
+be 0 and therefore nothing will be cleaned/invalidated as the check
+in the loop will always be false.
+
+On Arm64, we only support we only support up to 48-bit Virtual
+address space. So this is not a concern there. However, for 32-bit,
+the mapcache is using the last 2GB of the address space. Therefore
+we may not clean/invalidate properly some pages. This could lead
+to memory corruption or data leakage (the scrubbed value may
+still sit in the cache when the guest could read directly the memory
+and therefore read the old content).
+
+Rework invalidate_dcache_va_range(), clean_dcache_va_range(),
+clean_and_invalidate_dcache_va_range() to handle a cache flush
+with an element at the top of the address space.
+
+This is CVE-2023-34321 / XSA-437.
+
+Reported-by: Julien Grall <jgrall@amazon.com>
+Signed-off-by: Stefano Stabellini <stefano.stabellini@amd.com>
+Signed-off-by: Julien Grall <jgrall@amazon.com>
+Acked-by: Bertrand Marquis <bertrand.marquis@arm.com>
+
+---
+ xen/arch/arm/include/asm/page.h | 33 ++++++++++++++++++++-------------
+ 1 file changed, 20 insertions(+), 13 deletions(-)
+
+diff --git a/xen/arch/arm/include/asm/page.h b/xen/arch/arm/include/asm/page.h
+index e7cd62190c7f..d7fe770a5e49 100644
+--- a/xen/arch/arm/include/asm/page.h
++++ b/xen/arch/arm/include/asm/page.h
+@@ -160,26 +160,25 @@ static inline size_t read_dcache_line_bytes(void)
+
+ static inline int invalidate_dcache_va_range(const void *p, unsigned long size)
+ {
+- const void *end = p + size;
+ size_t cacheline_mask = dcache_line_bytes - 1;
+
+ dsb(sy); /* So the CPU issues all writes to the range */
+
+ if ( (uintptr_t)p & cacheline_mask )
+ {
++ size -= dcache_line_bytes - ((uintptr_t)p & cacheline_mask);
+ p = (void *)((uintptr_t)p & ~cacheline_mask);
+ asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
+ p += dcache_line_bytes;
+ }
+- if ( (uintptr_t)end & cacheline_mask )
+- {
+- end = (void *)((uintptr_t)end & ~cacheline_mask);
+- asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (end));
+- }
+
+- for ( ; p < end; p += dcache_line_bytes )
++ for ( ; size >= dcache_line_bytes;
++ p += dcache_line_bytes, size -= dcache_line_bytes )
+ asm volatile (__invalidate_dcache_one(0) : : "r" (p));
+
++ if ( size > 0 )
++ asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
++
+ dsb(sy); /* So we know the flushes happen before continuing */
+
+ return 0;
+@@ -187,10 +186,14 @@ static inline int invalidate_dcache_va_range(const void *p, unsigned long size)
+
+ static inline int clean_dcache_va_range(const void *p, unsigned long size)
+ {
+- const void *end = p + size;
++ size_t cacheline_mask = dcache_line_bytes - 1;
++
+ dsb(sy); /* So the CPU issues all writes to the range */
+- p = (void *)((uintptr_t)p & ~(dcache_line_bytes - 1));
+- for ( ; p < end; p += dcache_line_bytes )
++ size += (uintptr_t)p & cacheline_mask;
++ size = (size + cacheline_mask) & ~cacheline_mask;
++ p = (void *)((uintptr_t)p & ~cacheline_mask);
++ for ( ; size >= dcache_line_bytes;
++ p += dcache_line_bytes, size -= dcache_line_bytes )
+ asm volatile (__clean_dcache_one(0) : : "r" (p));
+ dsb(sy); /* So we know the flushes happen before continuing */
+ /* ARM callers assume that dcache_* functions cannot fail. */
+@@ -200,10 +203,14 @@ static inline int clean_dcache_va_range(const void *p, unsigned long size)
+ static inline int clean_and_invalidate_dcache_va_range
+ (const void *p, unsigned long size)
+ {
+- const void *end = p + size;
++ size_t cacheline_mask = dcache_line_bytes - 1;
++
+ dsb(sy); /* So the CPU issues all writes to the range */
+- p = (void *)((uintptr_t)p & ~(dcache_line_bytes - 1));
+- for ( ; p < end; p += dcache_line_bytes )
++ size += (uintptr_t)p & cacheline_mask;
++ size = (size + cacheline_mask) & ~cacheline_mask;
++ p = (void *)((uintptr_t)p & ~cacheline_mask);
++ for ( ; size >= dcache_line_bytes;
++ p += dcache_line_bytes, size -= dcache_line_bytes )
+ asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
+ dsb(sy); /* So we know the flushes happen before continuing */
+ /* ARM callers assume that dcache_* functions cannot fail. */
+--
+2.40.1
+