aboutsummaryrefslogtreecommitdiff
path: root/system/xen
diff options
context:
space:
mode:
authorMario Preksavec <mario@slackware.hr>2017-08-10 19:10:29 +0200
committerWilly Sudiarto Raharjo <willysr@slackbuilds.org>2017-08-12 06:58:50 +0700
commit1e89475715cf98d9dbce566054ae14599b761766 (patch)
tree494c610f190ae71f7277b0db2f4e88bc17a2a514 /system/xen
parent099d6f0c1bb5835f89458dd0ed47704b18514245 (diff)
system/xen: Updated for version 4.9.0.
Signed-off-by: Mario Preksavec <mario@slackware.hr>
Diffstat (limited to 'system/xen')
-rw-r--r--system/xen/dom0/README.dom02
-rw-r--r--system/xen/dom0/config-4.4.75-xen.i686 (renamed from system/xen/dom0/config-4.4.38-xen.i686)2
-rw-r--r--system/xen/dom0/config-4.4.75-xen.x86_64 (renamed from system/xen/dom0/config-4.4.38-xen.x86_64)2
-rw-r--r--system/xen/dom0/kernel-xen.sh4
-rw-r--r--system/xen/domU/domU.sh2
-rw-r--r--system/xen/patches/gcc7-fix-incorrect-comparison.patch40
-rw-r--r--system/xen/patches/gcc7-minios-implement-udivmoddi4.patch44
-rw-r--r--system/xen/patches/gcc7-vtpm-implicit-fallthrough.patch46
-rw-r--r--system/xen/patches/gcc7-vtpmmgr-make-inline-static.patch1161
-rw-r--r--system/xen/patches/patch-inbuild-ipxe-gcc7-implicit-fallthrough-ath5k.patch28
-rw-r--r--system/xen/patches/patch-inbuild-ipxe-gcc7-implicit-fallthrough-curses.patch24
-rw-r--r--system/xen/patches/patch-inbuild-ipxe-gcc7-implicit-fallthrough.patch163
-rw-r--r--system/xen/patches/patch-ipxe-patches-series.patch18
-rw-r--r--system/xen/patches/stubdom_zlib_disable_man_install.diff32
-rw-r--r--system/xen/xen.SlackBuild28
-rw-r--r--system/xen/xen.info10
-rw-r--r--system/xen/xsa/xsa213-4.8.patch177
-rw-r--r--system/xen/xsa/xsa214.patch41
-rw-r--r--system/xen/xsa/xsa216-qemuu.patch113
-rw-r--r--system/xen/xsa/xsa217.patch41
-rw-r--r--system/xen/xsa/xsa218-0001-gnttab-fix-unmap-pin-accounting-race.patch102
-rw-r--r--system/xen/xsa/xsa218-0002-gnttab-Avoid-potential-double-put-of-maptrack-entry.patch231
-rw-r--r--system/xen/xsa/xsa218-0003-gnttab-correct-maptrack-table-accesses.patch84
-rw-r--r--system/xen/xsa/xsa219-4.8.patch151
-rw-r--r--system/xen/xsa/xsa220-4.8.patch94
-rw-r--r--system/xen/xsa/xsa221.patch194
-rw-r--r--system/xen/xsa/xsa222-1.patch124
-rw-r--r--system/xen/xsa/xsa222-2-4.8.patch405
-rw-r--r--system/xen/xsa/xsa223.patch54
-rw-r--r--system/xen/xsa/xsa224-0001-gnttab-Fix-handling-of-dev_bus_addr-during-unmap.patch111
-rw-r--r--system/xen/xsa/xsa224-0002-gnttab-never-create-host-mapping-unless-asked-to.patch42
-rw-r--r--system/xen/xsa/xsa224-0003-gnttab-correct-logic-to-get-page-references-during-m.patch186
-rw-r--r--system/xen/xsa/xsa224-0004-gnttab-__gnttab_unmap_common_complete-is-all-or-noth.patch319
-rw-r--r--system/xen/xsa/xsa225.patch35
34 files changed, 1592 insertions, 2518 deletions
diff --git a/system/xen/dom0/README.dom0 b/system/xen/dom0/README.dom0
index f23bf14c04641..2521cf48bce62 100644
--- a/system/xen/dom0/README.dom0
+++ b/system/xen/dom0/README.dom0
@@ -46,7 +46,7 @@ Xen EFI binary.
To make things a bit easier, a copy of Xen EFI binary can be found here:
- http://slackware.hr/~mario/xen/xen-4.8.1.efi.gz
+ http://slackware.hr/~mario/xen/xen-4.9.0.efi.gz
If an automatic boot to Xen kernel is desired, the binary should be renamed and
copied to the following location: /boot/efi/EFI/BOOT/bootx64.efi
diff --git a/system/xen/dom0/config-4.4.38-xen.i686 b/system/xen/dom0/config-4.4.75-xen.i686
index cfbfe0d20259d..b29da0f7b087e 100644
--- a/system/xen/dom0/config-4.4.38-xen.i686
+++ b/system/xen/dom0/config-4.4.75-xen.i686
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 4.4.38 Kernel Configuration
+# Linux/x86 4.4.75 Kernel Configuration
#
# CONFIG_64BIT is not set
CONFIG_X86_32=y
diff --git a/system/xen/dom0/config-4.4.38-xen.x86_64 b/system/xen/dom0/config-4.4.75-xen.x86_64
index db5c9e8c51159..9ed52dccfb117 100644
--- a/system/xen/dom0/config-4.4.38-xen.x86_64
+++ b/system/xen/dom0/config-4.4.75-xen.x86_64
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 4.4.38 Kernel Configuration
+# Linux/x86 4.4.75 Kernel Configuration
#
CONFIG_64BIT=y
CONFIG_X86_64=y
diff --git a/system/xen/dom0/kernel-xen.sh b/system/xen/dom0/kernel-xen.sh
index 27ad9bc836eb0..d13851c97fdca 100644
--- a/system/xen/dom0/kernel-xen.sh
+++ b/system/xen/dom0/kernel-xen.sh
@@ -5,8 +5,8 @@
# Written by Chris Abela <chris.abela@maltats.com>, 20100515
# Modified by Mario Preksavec <mario@slackware.hr>
-KERNEL=${KERNEL:-4.4.38}
-XEN=${XEN:-4.8.1}
+KERNEL=${KERNEL:-4.4.75}
+XEN=${XEN:-4.9.0}
BOOTLOADER=${BOOTLOADER:-lilo}
ROOTMOD=${ROOTMOD:-ext4}
diff --git a/system/xen/domU/domU.sh b/system/xen/domU/domU.sh
index 6d61485b323f6..584132724e314 100644
--- a/system/xen/domU/domU.sh
+++ b/system/xen/domU/domU.sh
@@ -7,7 +7,7 @@
set -e
-KERNEL=${KERNEL:-4.4.38}
+KERNEL=${KERNEL:-4.4.75}
# Build an image for the root file system and another for the swap
# Default values : 8GB and 500MB resepectively.
diff --git a/system/xen/patches/gcc7-fix-incorrect-comparison.patch b/system/xen/patches/gcc7-fix-incorrect-comparison.patch
new file mode 100644
index 0000000000000..91dc6c08a6e8e
--- /dev/null
+++ b/system/xen/patches/gcc7-fix-incorrect-comparison.patch
@@ -0,0 +1,40 @@
+From fe4a28ccbfd33cae9e1f56b174d46b4eb2329efd Mon Sep 17 00:00:00 2001
+From: Dandan Bi <dandan.bi@intel.com>
+Date: Sat, 1 Apr 2017 10:31:14 +0800
+Subject: [PATCH] MdeModulePkg/UefiHiiLib:Fix incorrect comparison expression
+
+Fix the incorrect comparison between pointer and constant zero character.
+
+https://bugzilla.tianocore.org/show_bug.cgi?id=416
+
+V2: The pointer StringPtr points to a string returned
+by ExtractConfig/ExportConfig, if it is NULL, function
+InternalHiiIfrValueAction will return FALSE. So in
+current usage model, the StringPtr can not be NULL before
+using it, so we can add ASSERT here.
+
+Cc: Eric Dong <eric.dong@intel.com>
+Cc: Liming Gao <liming.gao@intel.com>
+Contributed-under: TianoCore Contribution Agreement 1.0
+Signed-off-by: Dandan Bi <dandan.bi@intel.com>
+Reviewed-by: Eric Dong <eric.dong@intel.com>
+---
+ MdeModulePkg/Library/UefiHiiLib/HiiLib.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/MdeModulePkg/Library/UefiHiiLib/HiiLib.c b/MdeModulePkg/Library/UefiHiiLib/HiiLib.c
+index a2abf26980b..cd0cd35a0f3 100644
+--- a/MdeModulePkg/Library/UefiHiiLib/HiiLib.c
++++ b/MdeModulePkg/Library/UefiHiiLib/HiiLib.c
+@@ -2201,8 +2201,9 @@ InternalHiiIfrValueAction (
+ }
+
+ StringPtr = ConfigAltResp;
+-
+- while (StringPtr != L'\0') {
++ ASSERT (StringPtr != NULL);
++
++ while (*StringPtr != L'\0') {
+ //
+ // 1. Find <ConfigHdr> GUID=...&NAME=...&PATH=...
+ //
diff --git a/system/xen/patches/gcc7-minios-implement-udivmoddi4.patch b/system/xen/patches/gcc7-minios-implement-udivmoddi4.patch
new file mode 100644
index 0000000000000..7d6c510944d62
--- /dev/null
+++ b/system/xen/patches/gcc7-minios-implement-udivmoddi4.patch
@@ -0,0 +1,44 @@
+From d991bdbc062248221511ecb795617c36b37e1d2e Mon Sep 17 00:00:00 2001
+From: Wei Liu <wei.liu2@citrix.com>
+Date: Wed, 9 Aug 2017 13:15:48 +0100
+Subject: [PATCH] lib/math.c: implement __udivmoddi4
+
+Some code compiled by gcc 7 requires this.
+
+Signed-off-by: Wei Liu <wei.liu2@citrix.com>
+Reviewed-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
+---
+ lib/math.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/lib/math.c b/lib/math.c
+index 561393e..b98cc1d 100644
+--- a/lib/math.c
++++ b/lib/math.c
+@@ -6,6 +6,7 @@
+ * File: math.c
+ * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
+ * Changes:
++ * Implement __udivmoddi4 (Wei Liu <wei.liu2@citrix.com>)
+ *
+ * Date: Aug 2003
+ *
+@@ -397,6 +398,15 @@ __umoddi3(u_quad_t a, u_quad_t b)
+ }
+
+ /*
++ * Returns the quotient and places remainder in r
++ */
++u_quad_t
++__udivmoddi4(u_quad_t a, u_quad_t b, u_quad_t *r)
++{
++ return __qdivrem(a, b, r);
++}
++
++/*
+ * From
+ * moddi3.c
+ */
+--
+2.1.4
+
diff --git a/system/xen/patches/gcc7-vtpm-implicit-fallthrough.patch b/system/xen/patches/gcc7-vtpm-implicit-fallthrough.patch
new file mode 100644
index 0000000000000..068752d2d1a04
--- /dev/null
+++ b/system/xen/patches/gcc7-vtpm-implicit-fallthrough.patch
@@ -0,0 +1,46 @@
+GCC-7 have -Wimplicit-fallthrough enabled with -Wextra. Add appropriate
+comment which both mute the warning and improve readibility.
+
+Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+---
+ stubdom/Makefile | 1 +
+ stubdom/vtpm-implicit-fallthrough.patch | 10 ++++++++++
+ 2 files changed, 11 insertions(+)
+ create mode 100644 stubdom/vtpm-implicit-fallthrough.patch
+
+diff --git a/stubdom/Makefile b/stubdom/Makefile
+index db01827..5055e31 100644
+--- a/stubdom/Makefile
++++ b/stubdom/Makefile
+@@ -228,6 +228,7 @@ tpm_emulator-$(XEN_TARGET_ARCH): tpm_emulator-$(TPMEMU_VERSION).tar.gz
+ patch -d $@ -p1 < vtpm-deepquote.patch
+ patch -d $@ -p1 < vtpm-deepquote-anyloc.patch
+ patch -d $@ -p1 < vtpm-cmake-Wextra.patch
++ patch -d $@ -p1 < vtpm-implicit-fallthrough.patch
+ mkdir $@/build
+ cd $@/build; CC=${CC} $(CMAKE) .. -DCMAKE_C_FLAGS:STRING="-std=c99 -DTPM_NO_EXTERN $(TARGET_CPPFLAGS) $(TARGET_CFLAGS) -Wno-declaration-after-statement"
+ touch $@
+diff --git a/stubdom/vtpm-implicit-fallthrough.patch b/stubdom/vtpm-implicit-fallthrough.patch
+new file mode 100644
+index 0000000..db97be5
+--- /dev/null
++++ b/stubdom/vtpm-implicit-fallthrough.patch
+@@ -0,0 +1,10 @@
++--- tpm_emulator-x86_64/tpm/tpm_cmd_handler.c.orig 2017-04-27 13:37:14.408000000 +0200
+++++ tpm_emulator-x86_64/tpm/tpm_cmd_handler.c 2017-04-27 13:39:53.585000000 +0200
++@@ -3397,6 +3397,7 @@
++ sizeof(rsp->auth2->nonceOdd.nonce));
++ tpm_hmac_update(&hmac, (BYTE*)&rsp->auth2->continueAuthSession, 1);
++ tpm_hmac_final(&hmac, rsp->auth2->auth);
+++ /* fall-thru */
++ case TPM_TAG_RSP_AUTH1_COMMAND:
++ tpm_hmac_init(&hmac, rsp->auth1->secret, sizeof(rsp->auth1->secret));
++ tpm_hmac_update(&hmac, rsp->auth1->digest, sizeof(rsp->auth1->digest));
+--
+2.7.4
+
+
+_______________________________________________
+Xen-devel mailing list
+Xen-devel@lists.xen.org
+https://lists.xen.org/xen-devel
diff --git a/system/xen/patches/gcc7-vtpmmgr-make-inline-static.patch b/system/xen/patches/gcc7-vtpmmgr-make-inline-static.patch
new file mode 100644
index 0000000000000..a2c96691a95f0
--- /dev/null
+++ b/system/xen/patches/gcc7-vtpmmgr-make-inline-static.patch
@@ -0,0 +1,1161 @@
+gcc7 is more strict with functions marked as inline. They are not
+automatically inlined. Instead a function call is generated, but the
+actual code is not visible by the linker.
+
+Do a mechanical change and mark every 'inline' as 'static inline'. For
+simpler review the static goes into an extra line.
+
+Signed-off-by: Olaf Hering <olaf@aepfle.de>
+---
+ stubdom/vtpmmgr/marshal.h | 76 ++++++++++++++++++++++++++++++++++++++++++
+ stubdom/vtpmmgr/tcg.h | 14 ++++++++
+ stubdom/vtpmmgr/tpm2_marshal.h | 58 ++++++++++++++++++++++++++++++++
+ stubdom/vtpmmgr/tpmrsa.h | 1 +
+ 4 files changed, 149 insertions(+)
+
+diff --git a/stubdom/vtpmmgr/marshal.h b/stubdom/vtpmmgr/marshal.h
+index d826f19d89..dce19c6439 100644
+--- a/stubdom/vtpmmgr/marshal.h
++++ b/stubdom/vtpmmgr/marshal.h
+@@ -47,16 +47,19 @@ typedef enum UnpackPtr {
+ UNPACK_ALLOC
+ } UnpackPtr;
+
++static
+ inline BYTE* pack_BYTE(BYTE* ptr, BYTE t) {
+ ptr[0] = t;
+ return ++ptr;
+ }
+
++static
+ inline BYTE* unpack_BYTE(BYTE* ptr, BYTE* t) {
+ t[0] = ptr[0];
+ return ++ptr;
+ }
+
++static
+ inline int unpack3_BYTE(BYTE* ptr, UINT32* pos, UINT32 max, BYTE *t)
+ {
+ if (*pos + 1 > max)
+@@ -72,18 +75,21 @@ inline int unpack3_BYTE(BYTE* ptr, UINT32* pos, UINT32 max, BYTE *t)
+ #define unpack3_BOOL(p, x, m, t) unpack3_BYTE(p, x, m, t)
+ #define sizeof_BOOL(t) 1
+
++static
+ inline BYTE* pack_UINT16(void* ptr, UINT16 t) {
+ UINT16* p = ptr;
+ *p = cpu_to_be16(t);
+ return ptr + sizeof(UINT16);
+ }
+
++static
+ inline BYTE* unpack_UINT16(void* ptr, UINT16* t) {
+ UINT16* p = ptr;
+ *t = be16_to_cpu(*p);
+ return ptr + sizeof(UINT16);
+ }
+
++static
+ inline int unpack3_UINT16(BYTE* ptr, UINT32* pos, UINT32 max, UINT16 *t)
+ {
+ if (*pos + 2 > max)
+@@ -93,18 +99,21 @@ inline int unpack3_UINT16(BYTE* ptr, UINT32* pos, UINT32 max, UINT16 *t)
+ return 0;
+ }
+
++static
+ inline BYTE* pack_UINT32(void* ptr, UINT32 t) {
+ UINT32* p = ptr;
+ *p = cpu_to_be32(t);
+ return ptr + sizeof(UINT32);
+ }
+
++static
+ inline BYTE* unpack_UINT32(void* ptr, UINT32* t) {
+ UINT32* p = ptr;
+ *t = be32_to_cpu(*p);
+ return ptr + sizeof(UINT32);
+ }
+
++static
+ inline int unpack3_UINT32(BYTE* ptr, UINT32* pos, UINT32 max, UINT32 *t)
+ {
+ if (*pos + 4 > max)
+@@ -236,16 +245,19 @@ inline int unpack3_UINT32(BYTE* ptr, UINT32* pos, UINT32 max, UINT32 *t)
+ #define sizeof_TCS_KEY_HANDLE(t) sizeof_UINT32(t)
+
+
++static
+ inline BYTE* pack_BUFFER(BYTE* ptr, const BYTE* buf, UINT32 size) {
+ memcpy(ptr, buf, size);
+ return ptr + size;
+ }
+
++static
+ inline BYTE* unpack_BUFFER(BYTE* ptr, BYTE* buf, UINT32 size) {
+ memcpy(buf, ptr, size);
+ return ptr + size;
+ }
+
++static
+ inline int unpack3_BUFFER(BYTE* ptr, UINT32* pos, UINT32 max, BYTE* buf, UINT32 size) {
+ if (*pos + size > max)
+ return TPM_SIZE;
+@@ -256,11 +268,13 @@ inline int unpack3_BUFFER(BYTE* ptr, UINT32* pos, UINT32 max, BYTE* buf, UINT32
+
+ #define sizeof_BUFFER(b, s) s
+
++static
+ inline BYTE* unpack_ALIAS(BYTE* ptr, BYTE** buf, UINT32 size) {
+ *buf = ptr;
+ return ptr + size;
+ }
+
++static
+ inline BYTE* unpack_ALLOC(BYTE* ptr, BYTE** buf, UINT32 size) {
+ if(size) {
+ *buf = malloc(size);
+@@ -271,6 +285,7 @@ inline BYTE* unpack_ALLOC(BYTE* ptr, BYTE** buf, UINT32 size) {
+ return ptr + size;
+ }
+
++static
+ inline BYTE* unpack_PTR(BYTE* ptr, BYTE** buf, UINT32 size, UnpackPtr alloc) {
+ if(alloc == UNPACK_ALLOC) {
+ return unpack_ALLOC(ptr, buf, size);
+@@ -279,6 +294,7 @@ inline BYTE* unpack_PTR(BYTE* ptr, BYTE** buf, UINT32 size, UnpackPtr alloc) {
+ }
+ }
+
++static
+ inline int unpack3_PTR(BYTE* ptr, UINT32* pos, UINT32 max, BYTE** buf, UINT32 size, UnpackPtr alloc) {
+ if (size > max || *pos + size > max)
+ return TPM_SIZE;
+@@ -292,14 +308,17 @@ inline int unpack3_PTR(BYTE* ptr, UINT32* pos, UINT32 max, BYTE** buf, UINT32 si
+ }
+ #define unpack3_VPTR(ptr, pos, max, buf, size, alloc) unpack3_PTR(ptr, pos, max, (void*)(buf), size, alloc)
+
++static
+ inline BYTE* pack_TPM_AUTHDATA(BYTE* ptr, const TPM_AUTHDATA* d) {
+ return pack_BUFFER(ptr, *d, TPM_DIGEST_SIZE);
+ }
+
++static
+ inline BYTE* unpack_TPM_AUTHDATA(BYTE* ptr, TPM_AUTHDATA* d) {
+ return unpack_BUFFER(ptr, *d, TPM_DIGEST_SIZE);
+ }
+
++static
+ inline int unpack3_TPM_AUTHDATA(BYTE* ptr, UINT32* pos, UINT32 len, TPM_AUTHDATA* d) {
+ return unpack3_BUFFER(ptr, pos, len, *d, TPM_DIGEST_SIZE);
+ }
+@@ -325,6 +344,7 @@ inline int unpack3_TPM_AUTHDATA(BYTE* ptr, UINT32* pos, UINT32 len, TPM_AUTHDATA
+ #define sizeof_TPM_TAG(t) sizeof_UINT16(t)
+ #define sizeof_TPM_STRUCTURE_TAG(t) sizeof_UINT16(t)
+
++static
+ inline BYTE* pack_TPM_VERSION(BYTE* ptr, const TPM_VERSION* t) {
+ ptr[0] = t->major;
+ ptr[1] = t->minor;
+@@ -333,6 +353,7 @@ inline BYTE* pack_TPM_VERSION(BYTE* ptr, const TPM_VERSION* t) {
+ return ptr + 4;
+ }
+
++static
+ inline BYTE* unpack_TPM_VERSION(BYTE* ptr, TPM_VERSION* t) {
+ t->major = ptr[0];
+ t->minor = ptr[1];
+@@ -341,6 +362,7 @@ inline BYTE* unpack_TPM_VERSION(BYTE* ptr, TPM_VERSION* t) {
+ return ptr + 4;
+ }
+
++static
+ inline int unpack3_TPM_VERSION(BYTE* ptr, UINT32 *pos, UINT32 max, TPM_VERSION* t) {
+ if (*pos + 4 > max)
+ return TPM_SIZE;
+@@ -355,6 +377,7 @@ inline int unpack3_TPM_VERSION(BYTE* ptr, UINT32 *pos, UINT32 max, TPM_VERSION*
+
+ #define sizeof_TPM_VERSION(x) 4
+
++static
+ inline BYTE* pack_TPM_CAP_VERSION_INFO(BYTE* ptr, const TPM_CAP_VERSION_INFO* v) {
+ ptr = pack_TPM_STRUCTURE_TAG(ptr, v->tag);
+ ptr = pack_TPM_VERSION(ptr, &v->version);
+@@ -366,6 +389,7 @@ inline BYTE* pack_TPM_CAP_VERSION_INFO(BYTE* ptr, const TPM_CAP_VERSION_INFO* v)
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPM_CAP_VERSION_INFO(BYTE* ptr, TPM_CAP_VERSION_INFO* v, UnpackPtr alloc) {
+ ptr = unpack_TPM_STRUCTURE_TAG(ptr, &v->tag);
+ ptr = unpack_TPM_VERSION(ptr, &v->version);
+@@ -377,14 +401,17 @@ inline BYTE* unpack_TPM_CAP_VERSION_INFO(BYTE* ptr, TPM_CAP_VERSION_INFO* v, Unp
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPM_DIGEST(BYTE* ptr, const TPM_DIGEST* d) {
+ return pack_BUFFER(ptr, d->digest, TPM_DIGEST_SIZE);
+ }
+
++static
+ inline BYTE* unpack_TPM_DIGEST(BYTE* ptr, TPM_DIGEST* d) {
+ return unpack_BUFFER(ptr, d->digest, TPM_DIGEST_SIZE);
+ }
+
++static
+ inline int unpack3_TPM_DIGEST(BYTE* ptr, UINT32* pos, UINT32 max, TPM_DIGEST* d) {
+ return unpack3_BUFFER(ptr, pos, max, d->digest, TPM_DIGEST_SIZE);
+ }
+@@ -409,20 +436,24 @@ inline int unpack3_TPM_DIGEST(BYTE* ptr, UINT32* pos, UINT32 max, TPM_DIGEST* d)
+ #define pack_TPM_CHOSENID_HASH(ptr, d) pack_TPM_DIGEST(ptr, d)
+ #define unpack_TPM_CHOSENID_HASH(ptr, d) unpack_TPM_DIGEST(ptr, d)
+
++static
+ inline BYTE* pack_TPM_NONCE(BYTE* ptr, const TPM_NONCE* n) {
+ return pack_BUFFER(ptr, n->nonce, TPM_DIGEST_SIZE);
+ }
+
++static
+ inline BYTE* unpack_TPM_NONCE(BYTE* ptr, TPM_NONCE* n) {
+ return unpack_BUFFER(ptr, n->nonce, TPM_DIGEST_SIZE);
+ }
+
+ #define sizeof_TPM_NONCE(x) TPM_DIGEST_SIZE
+
++static
+ inline int unpack3_TPM_NONCE(BYTE* ptr, UINT32* pos, UINT32 max, TPM_NONCE* n) {
+ return unpack3_BUFFER(ptr, pos, max, n->nonce, TPM_DIGEST_SIZE);
+ }
+
++static
+ inline BYTE* pack_TPM_SYMMETRIC_KEY_PARMS(BYTE* ptr, const TPM_SYMMETRIC_KEY_PARMS* k) {
+ ptr = pack_UINT32(ptr, k->keyLength);
+ ptr = pack_UINT32(ptr, k->blockSize);
+@@ -430,6 +461,7 @@ inline BYTE* pack_TPM_SYMMETRIC_KEY_PARMS(BYTE* ptr, const TPM_SYMMETRIC_KEY_PAR
+ return pack_BUFFER(ptr, k->IV, k->ivSize);
+ }
+
++static
+ inline BYTE* pack_TPM_SYMMETRIC_KEY(BYTE* ptr, const TPM_SYMMETRIC_KEY* k) {
+ ptr = pack_UINT32(ptr, k->algId);
+ ptr = pack_UINT16(ptr, k->encScheme);
+@@ -437,6 +469,7 @@ inline BYTE* pack_TPM_SYMMETRIC_KEY(BYTE* ptr, const TPM_SYMMETRIC_KEY* k) {
+ return pack_BUFFER(ptr, k->data, k->size);
+ }
+
++static
+ inline int unpack3_TPM_SYMMETRIC_KEY_PARMS(BYTE* ptr, UINT32* pos, UINT32 max, TPM_SYMMETRIC_KEY_PARMS* k, UnpackPtr alloc) {
+ return unpack3_UINT32(ptr, pos, max, &k->keyLength) ||
+ unpack3_UINT32(ptr, pos, max, &k->blockSize) ||
+@@ -444,10 +477,12 @@ inline int unpack3_TPM_SYMMETRIC_KEY_PARMS(BYTE* ptr, UINT32* pos, UINT32 max, T
+ unpack3_PTR(ptr, pos, max, &k->IV, k->ivSize, alloc);
+ }
+
++static
+ inline int sizeof_TPM_SYMMETRIC_KEY_PARMS(const TPM_SYMMETRIC_KEY_PARMS* k) {
+ return 12 + k->ivSize;
+ }
+
++static
+ inline int unpack3_TPM_SYMMETRIC_KEY(BYTE* ptr, UINT32* pos, UINT32 max, TPM_SYMMETRIC_KEY* k, UnpackPtr alloc) {
+ return unpack3_UINT32(ptr, pos, max, &k->algId) ||
+ unpack3_UINT16(ptr, pos, max, &k->encScheme) ||
+@@ -455,6 +490,7 @@ inline int unpack3_TPM_SYMMETRIC_KEY(BYTE* ptr, UINT32* pos, UINT32 max, TPM_SYM
+ unpack3_PTR(ptr, pos, max, &k->data, k->size, alloc);
+ }
+
++static
+ inline BYTE* pack_TPM_RSA_KEY_PARMS(BYTE* ptr, const TPM_RSA_KEY_PARMS* k) {
+ ptr = pack_UINT32(ptr, k->keyLength);
+ ptr = pack_UINT32(ptr, k->numPrimes);
+@@ -462,6 +498,7 @@ inline BYTE* pack_TPM_RSA_KEY_PARMS(BYTE* ptr, const TPM_RSA_KEY_PARMS* k) {
+ return pack_BUFFER(ptr, k->exponent, k->exponentSize);
+ }
+
++static
+ inline int unpack3_TPM_RSA_KEY_PARMS(BYTE* ptr, UINT32* pos, UINT32 max, TPM_RSA_KEY_PARMS* k, UnpackPtr alloc) {
+ return unpack3_UINT32(ptr, pos, max, &k->keyLength) ||
+ unpack3_UINT32(ptr, pos, max, &k->numPrimes) ||
+@@ -469,11 +506,13 @@ inline int unpack3_TPM_RSA_KEY_PARMS(BYTE* ptr, UINT32* pos, UINT32 max, TPM_RSA
+ unpack3_PTR(ptr, pos, max, &k->exponent, k->exponentSize, alloc);
+ }
+
++static
+ inline int sizeof_TPM_RSA_KEY_PARMS(const TPM_RSA_KEY_PARMS* k) {
+ return 12 + k->exponentSize;
+ }
+
+
++static
+ inline BYTE* pack_TPM_KEY_PARMS(BYTE* ptr, const TPM_KEY_PARMS* k) {
+ ptr = pack_TPM_ALGORITHM_ID(ptr, k->algorithmID);
+ ptr = pack_TPM_ENC_SCHEME(ptr, k->encScheme);
+@@ -493,6 +532,7 @@ inline BYTE* pack_TPM_KEY_PARMS(BYTE* ptr, const TPM_KEY_PARMS* k) {
+ return ptr;
+ }
+
++static
+ inline int unpack3_TPM_KEY_PARMS(BYTE* ptr, UINT32* pos, UINT32 len, TPM_KEY_PARMS* k, UnpackPtr alloc) {
+ int rc = unpack3_TPM_ALGORITHM_ID(ptr, pos, len, &k->algorithmID) ||
+ unpack3_TPM_ENC_SCHEME(ptr, pos, len, &k->encScheme) ||
+@@ -511,6 +551,7 @@ inline int unpack3_TPM_KEY_PARMS(BYTE* ptr, UINT32* pos, UINT32 len, TPM_KEY_PAR
+ return TPM_FAIL;
+ }
+
++static
+ inline int sizeof_TPM_KEY_PARMS(const TPM_KEY_PARMS* k) {
+ int rc = 0;
+ rc += sizeof_TPM_ALGORITHM_ID(&k->algorithmID);
+@@ -532,52 +573,62 @@ inline int sizeof_TPM_KEY_PARMS(const TPM_KEY_PARMS* k) {
+ return rc;
+ }
+
++static
+ inline BYTE* pack_TPM_STORE_PUBKEY(BYTE* ptr, const TPM_STORE_PUBKEY* k) {
+ ptr = pack_UINT32(ptr, k->keyLength);
+ ptr = pack_BUFFER(ptr, k->key, k->keyLength);
+ return ptr;
+ }
+
++static
+ inline int unpack3_TPM_STORE_PUBKEY(BYTE* ptr, UINT32* pos, UINT32 max, TPM_STORE_PUBKEY* k, UnpackPtr alloc) {
+ return unpack3_UINT32(ptr, pos, max, &k->keyLength) ||
+ unpack3_PTR(ptr, pos, max, &k->key, k->keyLength, alloc);
+ }
+
++static
+ inline int sizeof_TPM_STORE_PUBKEY(const TPM_STORE_PUBKEY* k) {
+ return 4 + k->keyLength;
+ }
+
++static
+ inline BYTE* pack_TPM_PUBKEY(BYTE* ptr, const TPM_PUBKEY* k) {
+ ptr = pack_TPM_KEY_PARMS(ptr, &k->algorithmParms);
+ return pack_TPM_STORE_PUBKEY(ptr, &k->pubKey);
+ }
+
++static
+ inline int unpack3_TPM_PUBKEY(BYTE* ptr, UINT32* pos, UINT32 len, TPM_PUBKEY* k, UnpackPtr alloc) {
+ return unpack3_TPM_KEY_PARMS(ptr, pos, len, &k->algorithmParms, alloc) ||
+ unpack3_TPM_STORE_PUBKEY(ptr, pos, len, &k->pubKey, alloc);
+ }
+
++static
+ inline BYTE* pack_TPM_PCR_SELECTION(BYTE* ptr, const TPM_PCR_SELECTION* p) {
+ ptr = pack_UINT16(ptr, p->sizeOfSelect);
+ ptr = pack_BUFFER(ptr, p->pcrSelect, p->sizeOfSelect);
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPM_PCR_SELECTION(BYTE* ptr, TPM_PCR_SELECTION* p, UnpackPtr alloc) {
+ ptr = unpack_UINT16(ptr, &p->sizeOfSelect);
+ ptr = unpack_PTR(ptr, &p->pcrSelect, p->sizeOfSelect, alloc);
+ return ptr;
+ }
+
++static
+ inline int unpack3_TPM_PCR_SELECTION(BYTE* ptr, UINT32* pos, UINT32 max, TPM_PCR_SELECTION* p, UnpackPtr alloc) {
+ return unpack3_UINT16(ptr, pos, max, &p->sizeOfSelect) ||
+ unpack3_PTR(ptr, pos, max, &p->pcrSelect, p->sizeOfSelect, alloc);
+ }
+
++static
+ inline int sizeof_TPM_PCR_SELECTION(const TPM_PCR_SELECTION* p) {
+ return 2 + p->sizeOfSelect;
+ }
+
++static
+ inline BYTE* pack_TPM_PCR_INFO(BYTE* ptr, const TPM_PCR_INFO* p) {
+ ptr = pack_TPM_PCR_SELECTION(ptr, &p->pcrSelection);
+ ptr = pack_TPM_COMPOSITE_HASH(ptr, &p->digestAtRelease);
+@@ -585,12 +636,14 @@ inline BYTE* pack_TPM_PCR_INFO(BYTE* ptr, const TPM_PCR_INFO* p) {
+ return ptr;
+ }
+
++static
+ inline int unpack3_TPM_PCR_INFO(BYTE* ptr, UINT32* pos, UINT32 max, TPM_PCR_INFO* p, UnpackPtr alloc) {
+ return unpack3_TPM_PCR_SELECTION(ptr, pos, max, &p->pcrSelection, alloc) ||
+ unpack3_TPM_COMPOSITE_HASH(ptr, pos, max, &p->digestAtRelease) ||
+ unpack3_TPM_COMPOSITE_HASH(ptr, pos, max, &p->digestAtCreation);
+ }
+
++static
+ inline int sizeof_TPM_PCR_INFO(const TPM_PCR_INFO* p) {
+ int rc = 0;
+ rc += sizeof_TPM_PCR_SELECTION(&p->pcrSelection);
+@@ -599,6 +652,7 @@ inline int sizeof_TPM_PCR_INFO(const TPM_PCR_INFO* p) {
+ return rc;
+ }
+
++static
+ inline BYTE* pack_TPM_PCR_INFO_LONG(BYTE* ptr, const TPM_PCR_INFO_LONG* p) {
+ ptr = pack_TPM_STRUCTURE_TAG(ptr, p->tag);
+ ptr = pack_TPM_LOCALITY_SELECTION(ptr, p->localityAtCreation);
+@@ -610,6 +664,7 @@ inline BYTE* pack_TPM_PCR_INFO_LONG(BYTE* ptr, const TPM_PCR_INFO_LONG* p) {
+ return ptr;
+ }
+
++static
+ inline int sizeof_TPM_PCR_INFO_LONG(const TPM_PCR_INFO_LONG* p) {
+ int rc = 0;
+ rc += sizeof_TPM_STRUCTURE_TAG(p->tag);
+@@ -622,6 +677,7 @@ inline int sizeof_TPM_PCR_INFO_LONG(const TPM_PCR_INFO_LONG* p) {
+ return rc;
+ }
+
++static
+ inline int unpack3_TPM_PCR_INFO_LONG(BYTE* ptr, UINT32* pos, UINT32 max, TPM_PCR_INFO_LONG* p, UnpackPtr alloc) {
+ return unpack3_TPM_STRUCTURE_TAG(ptr, pos, max, &p->tag) ||
+ unpack3_TPM_LOCALITY_SELECTION(ptr, pos, max,
+@@ -637,6 +693,7 @@ inline int unpack3_TPM_PCR_INFO_LONG(BYTE* ptr, UINT32* pos, UINT32 max, TPM_PCR
+ unpack3_TPM_COMPOSITE_HASH(ptr, pos, max, &p->digestAtRelease);
+ }
+
++static
+ inline BYTE* pack_TPM_PCR_COMPOSITE(BYTE* ptr, const TPM_PCR_COMPOSITE* p) {
+ ptr = pack_TPM_PCR_SELECTION(ptr, &p->select);
+ ptr = pack_UINT32(ptr, p->valueSize);
+@@ -644,12 +701,14 @@ inline BYTE* pack_TPM_PCR_COMPOSITE(BYTE* ptr, const TPM_PCR_COMPOSITE* p) {
+ return ptr;
+ }
+
++static
+ inline int unpack3_TPM_PCR_COMPOSITE(BYTE* ptr, UINT32* pos, UINT32 max, TPM_PCR_COMPOSITE* p, UnpackPtr alloc) {
+ return unpack3_TPM_PCR_SELECTION(ptr, pos, max, &p->select, alloc) ||
+ unpack3_UINT32(ptr, pos, max, &p->valueSize) ||
+ unpack3_PTR(ptr, pos, max, (BYTE**)&p->pcrValue, p->valueSize, alloc);
+ }
+
++static
+ inline BYTE* pack_TPM_KEY(BYTE* ptr, const TPM_KEY* k) {
+ ptr = pack_TPM_VERSION(ptr, &k->ver);
+ ptr = pack_TPM_KEY_USAGE(ptr, k->keyUsage);
+@@ -665,6 +724,7 @@ inline BYTE* pack_TPM_KEY(BYTE* ptr, const TPM_KEY* k) {
+ return pack_BUFFER(ptr, k->encData, k->encDataSize);
+ }
+
++static
+ inline int unpack3_TPM_KEY(BYTE* ptr, UINT32* pos, UINT32 max, TPM_KEY* k, UnpackPtr alloc) {
+ int rc = unpack3_TPM_VERSION(ptr, pos, max, &k->ver) ||
+ unpack3_TPM_KEY_USAGE(ptr, pos, max, &k->keyUsage) ||
+@@ -682,6 +742,7 @@ inline int unpack3_TPM_KEY(BYTE* ptr, UINT32* pos, UINT32 max, TPM_KEY* k, Unpac
+ unpack3_PTR(ptr, pos, max, &k->encData, k->encDataSize, alloc);
+ }
+
++static
+ inline int sizeof_TPM_KEY(const TPM_KEY* k) {
+ int rc = 0;
+ rc += sizeof_TPM_VERSION(&k->ver);
+@@ -699,18 +760,21 @@ inline int sizeof_TPM_KEY(const TPM_KEY* k) {
+ return rc;
+ }
+
++static
+ inline BYTE* pack_TPM_BOUND_DATA(BYTE* ptr, const TPM_BOUND_DATA* b, UINT32 payloadSize) {
+ ptr = pack_TPM_VERSION(ptr, &b->ver);
+ ptr = pack_TPM_PAYLOAD_TYPE(ptr, b->payload);
+ return pack_BUFFER(ptr, b->payloadData, payloadSize);
+ }
+
++static
+ inline BYTE* unpack_TPM_BOUND_DATA(BYTE* ptr, TPM_BOUND_DATA* b, UINT32 payloadSize, UnpackPtr alloc) {
+ ptr = unpack_TPM_VERSION(ptr, &b->ver);
+ ptr = unpack_TPM_PAYLOAD_TYPE(ptr, &b->payload);
+ return unpack_PTR(ptr, &b->payloadData, payloadSize, alloc);
+ }
+
++static
+ inline BYTE* pack_TPM_STORED_DATA(BYTE* ptr, const TPM_STORED_DATA* d) {
+ ptr = pack_TPM_VERSION(ptr, &d->ver);
+ ptr = pack_UINT32(ptr, d->sealInfoSize);
+@@ -722,6 +786,7 @@ inline BYTE* pack_TPM_STORED_DATA(BYTE* ptr, const TPM_STORED_DATA* d) {
+ return ptr;
+ }
+
++static
+ inline int sizeof_TPM_STORED_DATA(const TPM_STORED_DATA* d) {
+ int rv = sizeof_TPM_VERSION(&d->ver) + sizeof_UINT32(d->sealInfoSize);
+ if (d->sealInfoSize) {
+@@ -732,6 +797,7 @@ inline int sizeof_TPM_STORED_DATA(const TPM_STORED_DATA* d) {
+ return rv;
+ }
+
++static
+ inline int unpack3_TPM_STORED_DATA(BYTE* ptr, UINT32* pos, UINT32 len, TPM_STORED_DATA* d, UnpackPtr alloc) {
+ int rc = unpack3_TPM_VERSION(ptr, pos, len, &d->ver) ||
+ unpack3_UINT32(ptr, pos, len, &d->sealInfoSize);
+@@ -746,6 +812,7 @@ inline int unpack3_TPM_STORED_DATA(BYTE* ptr, UINT32* pos, UINT32 len, TPM_STORE
+ return rc;
+ }
+
++static
+ inline BYTE* pack_TPM_STORED_DATA12(BYTE* ptr, const TPM_STORED_DATA12* d) {
+ ptr = pack_TPM_STRUCTURE_TAG(ptr, d->tag);
+ ptr = pack_TPM_ENTITY_TYPE(ptr, d->et);
+@@ -758,6 +825,7 @@ inline BYTE* pack_TPM_STORED_DATA12(BYTE* ptr, const TPM_STORED_DATA12* d) {
+ return ptr;
+ }
+
++static
+ inline int sizeof_TPM_STORED_DATA12(const TPM_STORED_DATA12* d) {
+ int rv = sizeof_TPM_STRUCTURE_TAG(&d->ver) +
+ sizeof_TPM_ENTITY_TYPE(&d->et) +
+@@ -770,6 +838,7 @@ inline int sizeof_TPM_STORED_DATA12(const TPM_STORED_DATA12* d) {
+ return rv;
+ }
+
++static
+ inline int unpack3_TPM_STORED_DATA12(BYTE* ptr, UINT32* pos, UINT32 len, TPM_STORED_DATA12* d, UnpackPtr alloc) {
+ int rc = unpack3_TPM_STRUCTURE_TAG(ptr, pos, len, &d->tag) ||
+ unpack3_TPM_ENTITY_TYPE(ptr, pos, len, &d->et) ||
+@@ -786,6 +855,7 @@ inline int unpack3_TPM_STORED_DATA12(BYTE* ptr, UINT32* pos, UINT32 len, TPM_STO
+ return rc;
+ }
+
++static
+ inline BYTE* pack_TPM_AUTH_SESSION(BYTE* ptr, const TPM_AUTH_SESSION* auth) {
+ ptr = pack_TPM_AUTH_HANDLE(ptr, auth->AuthHandle);
+ ptr = pack_TPM_NONCE(ptr, &auth->NonceOdd);
+@@ -794,6 +864,7 @@ inline BYTE* pack_TPM_AUTH_SESSION(BYTE* ptr, const TPM_AUTH_SESSION* auth) {
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPM_AUTH_SESSION(BYTE* ptr, TPM_AUTH_SESSION* auth) {
+ ptr = unpack_TPM_NONCE(ptr, &auth->NonceEven);
+ ptr = unpack_BOOL(ptr, &auth->fContinueAuthSession);
+@@ -801,6 +872,7 @@ inline BYTE* unpack_TPM_AUTH_SESSION(BYTE* ptr, TPM_AUTH_SESSION* auth) {
+ return ptr;
+ }
+
++static
+ inline int unpack3_TPM_AUTH_SESSION(BYTE* ptr, UINT32* pos, UINT32 len, TPM_AUTH_SESSION* auth) {
+ return unpack3_TPM_NONCE(ptr, pos, len, &auth->NonceEven) ||
+ unpack3_BOOL(ptr, pos, len, &auth->fContinueAuthSession) ||
+@@ -808,6 +880,7 @@ inline int unpack3_TPM_AUTH_SESSION(BYTE* ptr, UINT32* pos, UINT32 len, TPM_AUTH
+ }
+
+
++static
+ inline int sizeof_TPM_AUTH_SESSION(const TPM_AUTH_SESSION* auth) {
+ int rv = 0;
+ rv += sizeof_TPM_AUTH_HANDLE(auth->AuthHandle);
+@@ -817,6 +890,7 @@ inline int sizeof_TPM_AUTH_SESSION(const TPM_AUTH_SESSION* auth) {
+ return rv;
+ }
+
++static
+ inline BYTE* pack_TPM_RQU_HEADER(BYTE* ptr,
+ TPM_TAG tag,
+ UINT32 size,
+@@ -826,6 +900,7 @@ inline BYTE* pack_TPM_RQU_HEADER(BYTE* ptr,
+ return pack_UINT32(ptr, ord);
+ }
+
++static
+ inline BYTE* unpack_TPM_RQU_HEADER(BYTE* ptr,
+ TPM_TAG* tag,
+ UINT32* size,
+@@ -836,6 +911,7 @@ inline BYTE* unpack_TPM_RQU_HEADER(BYTE* ptr,
+ return ptr;
+ }
+
++static
+ inline int unpack3_TPM_RQU_HEADER(BYTE* ptr, UINT32* pos, UINT32 max,
+ TPM_TAG* tag, UINT32* size, TPM_COMMAND_CODE* ord) {
+ return
+diff --git a/stubdom/vtpmmgr/tcg.h b/stubdom/vtpmmgr/tcg.h
+index 813ce57a2d..423131dc25 100644
+--- a/stubdom/vtpmmgr/tcg.h
++++ b/stubdom/vtpmmgr/tcg.h
+@@ -461,6 +461,7 @@ typedef struct TPM_CAP_VERSION_INFO {
+ BYTE* vendorSpecific;
+ } TPM_CAP_VERSION_INFO;
+
++static
+ inline void free_TPM_CAP_VERSION_INFO(TPM_CAP_VERSION_INFO* v) {
+ free(v->vendorSpecific);
+ v->vendorSpecific = NULL;
+@@ -494,6 +495,7 @@ typedef struct TPM_SYMMETRIC_KEY {
+ BYTE* data;
+ } TPM_SYMMETRIC_KEY;
+
++static
+ inline void free_TPM_SYMMETRIC_KEY_PARMS(TPM_SYMMETRIC_KEY_PARMS* p) {
+ free(p->IV);
+ p->IV = NULL;
+@@ -510,6 +512,7 @@ typedef struct TPM_RSA_KEY_PARMS {
+
+ #define TPM_RSA_KEY_PARMS_INIT { 0, 0, 0, NULL }
+
++static
+ inline void free_TPM_RSA_KEY_PARMS(TPM_RSA_KEY_PARMS* p) {
+ free(p->exponent);
+ p->exponent = NULL;
+@@ -528,6 +531,7 @@ typedef struct TPM_KEY_PARMS {
+
+ #define TPM_KEY_PARMS_INIT { 0, 0, 0, 0 }
+
++static
+ inline void free_TPM_KEY_PARMS(TPM_KEY_PARMS* p) {
+ if(p->parmSize) {
+ switch(p->algorithmID) {
+@@ -550,6 +554,7 @@ typedef struct TPM_STORE_PUBKEY {
+
+ #define TPM_STORE_PUBKEY_INIT { 0, NULL }
+
++static
+ inline void free_TPM_STORE_PUBKEY(TPM_STORE_PUBKEY* p) {
+ free(p->key);
+ p->key = NULL;
+@@ -562,6 +567,7 @@ typedef struct TPM_PUBKEY {
+
+ #define TPM_PUBKEY_INIT { TPM_KEY_PARMS_INIT, TPM_STORE_PUBKEY_INIT }
+
++static
+ inline void free_TPM_PUBKEY(TPM_PUBKEY* k) {
+ free_TPM_KEY_PARMS(&k->algorithmParms);
+ free_TPM_STORE_PUBKEY(&k->pubKey);
+@@ -574,6 +580,7 @@ typedef struct TPM_PCR_SELECTION {
+
+ #define TPM_PCR_SELECTION_INIT { 0, NULL }
+
++static
+ inline void free_TPM_PCR_SELECTION(TPM_PCR_SELECTION* p) {
+ free(p->pcrSelect);
+ p->pcrSelect = NULL;
+@@ -594,6 +601,7 @@ typedef struct TPM_PCR_INFO_LONG {
+ #define TPM_PCR_INFO_LONG_INIT { 0, 0, 0, TPM_PCR_SELECTION_INIT, \
+ TPM_PCR_SELECTION_INIT }
+
++static
+ inline void free_TPM_PCR_INFO_LONG(TPM_PCR_INFO_LONG* p) {
+ free_TPM_PCR_SELECTION(&p->creationPCRSelection);
+ free_TPM_PCR_SELECTION(&p->releasePCRSelection);
+@@ -607,6 +615,7 @@ typedef struct TPM_PCR_INFO {
+
+ #define TPM_PCR_INFO_INIT { TPM_PCR_SELECTION_INIT }
+
++static
+ inline void free_TPM_PCR_INFO(TPM_PCR_INFO* p) {
+ free_TPM_PCR_SELECTION(&p->pcrSelection);
+ }
+@@ -619,6 +628,7 @@ typedef struct TPM_PCR_COMPOSITE {
+
+ #define TPM_PCR_COMPOSITE_INIT { TPM_PCR_SELECTION_INIT, 0, NULL }
+
++static
+ inline void free_TPM_PCR_COMPOSITE(TPM_PCR_COMPOSITE* p) {
+ free_TPM_PCR_SELECTION(&p->select);
+ free(p->pcrValue);
+@@ -643,6 +653,7 @@ typedef struct TPM_KEY {
+ .pubKey = TPM_STORE_PUBKEY_INIT, \
+ .encDataSize = 0, .encData = NULL }
+
++static
+ inline void free_TPM_KEY(TPM_KEY* k) {
+ if(k->PCRInfoSize) {
+ free_TPM_PCR_INFO(&k->PCRInfo);
+@@ -660,6 +671,7 @@ typedef struct TPM_BOUND_DATA {
+
+ #define TPM_BOUND_DATA_INIT { .payloadData = NULL }
+
++static
+ inline void free_TPM_BOUND_DATA(TPM_BOUND_DATA* d) {
+ free(d->payloadData);
+ d->payloadData = NULL;
+@@ -676,6 +688,7 @@ typedef struct TPM_STORED_DATA {
+ #define TPM_STORED_DATA_INIT { .sealInfoSize = 0, sealInfo = TPM_PCR_INFO_INIT,\
+ .encDataSize = 0, .encData = NULL }
+
++static
+ inline void free_TPM_STORED_DATA(TPM_STORED_DATA* d) {
+ if(d->sealInfoSize) {
+ free_TPM_PCR_INFO(&d->sealInfo);
+@@ -696,6 +709,7 @@ typedef struct TPM_STORED_DATA12 {
+ #define TPM_STORED_DATA12_INIT { .sealInfoLongSize = 0, \
+ sealInfoLong = TPM_PCR_INFO_INIT, .encDataSize = 0, .encData = NULL }
+
++static
+ inline void free_TPM_STORED_DATA12(TPM_STORED_DATA12* d) {
+ if(d->sealInfoLongSize) {
+ free_TPM_PCR_INFO_LONG(&d->sealInfoLong);
+diff --git a/stubdom/vtpmmgr/tpm2_marshal.h b/stubdom/vtpmmgr/tpm2_marshal.h
+index aaa44645a2..ba070ad38e 100644
+--- a/stubdom/vtpmmgr/tpm2_marshal.h
++++ b/stubdom/vtpmmgr/tpm2_marshal.h
+@@ -52,6 +52,7 @@
+ #define pack_TPM_BUFFER(ptr, buf, size) pack_BUFFER(ptr, buf, size)
+ #define unpack_TPM_BUFFER(ptr, buf, size) unpack_BUFFER(ptr, buf, size)
+
++static
+ inline BYTE* pack_BYTE_ARRAY(BYTE* ptr, const BYTE* array, UINT32 size)
+ {
+ int i;
+@@ -60,21 +61,25 @@ inline BYTE* pack_BYTE_ARRAY(BYTE* ptr, const BYTE* array, UINT32 size)
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPMA_SESSION(BYTE* ptr, const TPMA_SESSION *attr)
+ {
+ return pack_BYTE(ptr, (BYTE)(*attr));
+ }
+
++static
+ inline BYTE* unpack_TPMA_SESSION(BYTE* ptr, TPMA_SESSION *attr)
+ {
+ return unpack_BYTE(ptr, (BYTE *)attr);
+ }
+
++static
+ inline BYTE* pack_TPMI_ALG_HASH(BYTE* ptr, const TPMI_ALG_HASH *hash)
+ {
+ return pack_UINT16(ptr, *hash);
+ }
+
++static
+ inline BYTE* unpack_TPMI_ALG_HASH(BYTE *ptr, TPMI_ALG_HASH *hash)
+ {
+ return unpack_UINT16(ptr, hash);
+@@ -125,6 +130,7 @@ inline BYTE* unpack_TPMI_ALG_HASH(BYTE *ptr, TPMI_ALG_HASH *hash)
+ #define pack_TPMI_RH_LOCKOUT(ptr, l) pack_TPM2_HANDLE(ptr, l)
+ #define unpack_TPMI_RH_LOCKOUT(ptr, l) unpack_TPM2_HANDLE(ptr, l)
+
++static
+ inline BYTE* pack_TPM2B_DIGEST(BYTE* ptr, const TPM2B_DIGEST *digest)
+ {
+ ptr = pack_UINT16(ptr, digest->size);
+@@ -132,6 +138,7 @@ inline BYTE* pack_TPM2B_DIGEST(BYTE* ptr, const TPM2B_DIGEST *digest)
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPM2B_DIGEST(BYTE* ptr, TPM2B_DIGEST *digest)
+ {
+ ptr = unpack_UINT16(ptr, &digest->size);
+@@ -139,6 +146,7 @@ inline BYTE* unpack_TPM2B_DIGEST(BYTE* ptr, TPM2B_DIGEST *digest)
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPMT_TK_CREATION(BYTE* ptr,const TPMT_TK_CREATION *ticket )
+ {
+ ptr = pack_TPM_ST(ptr , &ticket->tag);
+@@ -147,6 +155,7 @@ inline BYTE* pack_TPMT_TK_CREATION(BYTE* ptr,const TPMT_TK_CREATION *ticket )
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPMT_TK_CREATION(BYTE* ptr, TPMT_TK_CREATION *ticket )
+ {
+ ptr = unpack_TPM_ST(ptr, &ticket->tag);
+@@ -155,6 +164,7 @@ inline BYTE* unpack_TPMT_TK_CREATION(BYTE* ptr, TPMT_TK_CREATION *ticket )
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPM2B_NAME(BYTE* ptr,const TPM2B_NAME *name )
+ {
+ ptr = pack_UINT16(ptr, name->size);
+@@ -162,6 +172,7 @@ inline BYTE* pack_TPM2B_NAME(BYTE* ptr,const TPM2B_NAME *name )
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPM2B_NAME(BYTE* ptr, TPM2B_NAME *name)
+ {
+ ptr = unpack_UINT16(ptr, &name->size);
+@@ -169,6 +180,7 @@ inline BYTE* unpack_TPM2B_NAME(BYTE* ptr, TPM2B_NAME *name)
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPM2B_NONCE(BYTE* ptr, const TPM2B_NONCE *nonce)
+ {
+ return pack_TPM2B_DIGEST(ptr, (const TPM2B_DIGEST*)nonce);
+@@ -176,6 +188,7 @@ inline BYTE* pack_TPM2B_NONCE(BYTE* ptr, const TPM2B_NONCE *nonce)
+
+ #define unpack_TPM2B_NONCE(ptr, nonce) unpack_TPM2B_DIGEST(ptr, (TPM2B_DIGEST*)nonce)
+
++static
+ inline BYTE* pack_TPM2B_AUTH(BYTE* ptr, const TPM2B_AUTH *auth)
+ {
+ return pack_TPM2B_DIGEST(ptr, (const TPM2B_DIGEST*)auth);
+@@ -183,6 +196,7 @@ inline BYTE* pack_TPM2B_AUTH(BYTE* ptr, const TPM2B_AUTH *auth)
+
+ #define unpack_TPM2B_AUTH(ptr, auth) unpack_TPM2B_DIGEST(ptr, (TPM2B_DIGEST*)auth)
+
++static
+ inline BYTE* pack_TPM2B_DATA(BYTE* ptr, const TPM2B_DATA *data)
+ {
+ return pack_TPM2B_DIGEST(ptr, (const TPM2B_DIGEST*)data);
+@@ -190,6 +204,7 @@ inline BYTE* pack_TPM2B_DATA(BYTE* ptr, const TPM2B_DATA *data)
+
+ #define unpack_TPM2B_DATA(ptr, data) unpack_TPM2B_DIGEST(ptr, (TPM2B_DIGEST*)data)
+
++static
+ inline BYTE* pack_TPM2B_SENSITIVE_DATA(BYTE* ptr, const TPM2B_SENSITIVE_DATA *data)
+ {
+ return pack_TPM2B_DIGEST(ptr, (const TPM2B_DIGEST*)data);
+@@ -197,6 +212,7 @@ inline BYTE* pack_TPM2B_SENSITIVE_DATA(BYTE* ptr, const TPM2B_SENSITIVE_DATA *da
+
+ #define unpack_TPM2B_SENSITIVE_DATA(ptr, data) unpack_TPM2B_DIGEST(ptr, (TPM2B_DIGEST*)data)
+
++static
+ inline BYTE* pack_TPM2B_PUBLIC_KEY_RSA(BYTE* ptr, const TPM2B_PUBLIC_KEY_RSA *rsa)
+ {
+ return pack_TPM2B_DIGEST(ptr, (const TPM2B_DIGEST*)rsa);
+@@ -204,6 +220,7 @@ inline BYTE* pack_TPM2B_PUBLIC_KEY_RSA(BYTE* ptr, const TPM2B_PUBLIC_KEY_RSA *rs
+
+ #define unpack_TPM2B_PUBLIC_KEY_RSA(ptr, rsa) unpack_TPM2B_DIGEST(ptr, (TPM2B_DIGEST*)rsa)
+
++static
+ inline BYTE* pack_TPM2B_PRIVATE(BYTE* ptr, const TPM2B_PRIVATE *Private)
+ {
+ ptr = pack_UINT16(ptr, Private->size);
+@@ -211,6 +228,7 @@ inline BYTE* pack_TPM2B_PRIVATE(BYTE* ptr, const TPM2B_PRIVATE *Private)
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPM2B_PRIVATE(BYTE* ptr, TPM2B_PRIVATE *Private)
+ {
+ ptr = unpack_UINT16(ptr, &Private->size);
+@@ -218,6 +236,7 @@ inline BYTE* unpack_TPM2B_PRIVATE(BYTE* ptr, TPM2B_PRIVATE *Private)
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPMS_PCR_SELECTION_ARRAY(BYTE* ptr, const TPMS_PCR_SELECTION *sel, UINT32 count)
+ {
+ int i;
+@@ -229,6 +248,7 @@ inline BYTE* pack_TPMS_PCR_SELECTION_ARRAY(BYTE* ptr, const TPMS_PCR_SELECTION *
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPMS_PCR_SELECTION_ARRAY(BYTE* ptr, TPMS_PCR_SELECTION *sel, UINT32 count)
+ {
+ int i;
+@@ -240,6 +260,7 @@ inline BYTE* unpack_TPMS_PCR_SELECTION_ARRAY(BYTE* ptr, TPMS_PCR_SELECTION *sel,
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPML_PCR_SELECTION(BYTE* ptr, const TPML_PCR_SELECTION *sel)
+ {
+ ptr = pack_UINT32(ptr, sel->count);
+@@ -247,6 +268,7 @@ inline BYTE* pack_TPML_PCR_SELECTION(BYTE* ptr, const TPML_PCR_SELECTION *sel)
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPML_PCR_SELECTION(BYTE* ptr, TPML_PCR_SELECTION *sel)
+ {
+ ptr = unpack_UINT32(ptr, &sel->count);
+@@ -254,6 +276,7 @@ inline BYTE* unpack_TPML_PCR_SELECTION(BYTE* ptr, TPML_PCR_SELECTION *sel)
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPML_DIGEST(BYTE* ptr,TPML_DIGEST *digest)
+ {
+ int i;
+@@ -265,6 +288,7 @@ inline BYTE* unpack_TPML_DIGEST(BYTE* ptr,TPML_DIGEST *digest)
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPMS_CREATION_DATA(BYTE* ptr,const TPMS_CREATION_DATA *data)
+ {
+ ptr = pack_TPML_PCR_SELECTION(ptr, &data->pcrSelect);
+@@ -276,6 +300,7 @@ inline BYTE* pack_TPMS_CREATION_DATA(BYTE* ptr,const TPMS_CREATION_DATA *data)
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPMS_CREATION_DATA(BYTE* ptr, TPMS_CREATION_DATA *data)
+ {
+ ptr = unpack_TPML_PCR_SELECTION(ptr, &data->pcrSelect);
+@@ -288,6 +313,7 @@ inline BYTE* unpack_TPMS_CREATION_DATA(BYTE* ptr, TPMS_CREATION_DATA *data)
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPM2B_CREATION_DATA(BYTE* ptr, const TPM2B_CREATION_DATA *data )
+ {
+ ptr = pack_UINT16(ptr, data->size);
+@@ -295,6 +321,7 @@ inline BYTE* pack_TPM2B_CREATION_DATA(BYTE* ptr, const TPM2B_CREATION_DATA *data
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPM2B_CREATION_DATA(BYTE* ptr, TPM2B_CREATION_DATA * data)
+ {
+ ptr = unpack_UINT16(ptr, &data->size);
+@@ -302,6 +329,7 @@ inline BYTE* unpack_TPM2B_CREATION_DATA(BYTE* ptr, TPM2B_CREATION_DATA * data)
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPMS_SENSITIVE_CREATE(BYTE* ptr, const TPMS_SENSITIVE_CREATE *create)
+ {
+ ptr = pack_TPM2B_AUTH(ptr, &create->userAuth);
+@@ -309,6 +337,7 @@ inline BYTE* pack_TPMS_SENSITIVE_CREATE(BYTE* ptr, const TPMS_SENSITIVE_CREATE *
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPM2B_SENSITIVE_CREATE(BYTE* ptr, const TPM2B_SENSITIVE_CREATE *create)
+ {
+ BYTE* sizePtr = ptr;
+@@ -318,6 +347,7 @@ inline BYTE* pack_TPM2B_SENSITIVE_CREATE(BYTE* ptr, const TPM2B_SENSITIVE_CREATE
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPMU_SYM_MODE(BYTE* ptr, const TPMU_SYM_MODE *p,
+ const TPMI_ALG_SYM_OBJECT *sel)
+ {
+@@ -336,6 +366,7 @@ inline BYTE* pack_TPMU_SYM_MODE(BYTE* ptr, const TPMU_SYM_MODE *p,
+ }
+ return ptr;
+ }
++static
+ inline BYTE* unpack_TPMU_SYM_MODE(BYTE* ptr, TPMU_SYM_MODE *p,
+ const TPMI_ALG_SYM_OBJECT *sel)
+ {
+@@ -355,6 +386,7 @@ inline BYTE* unpack_TPMU_SYM_MODE(BYTE* ptr, TPMU_SYM_MODE *p,
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPMU_SYM_KEY_BITS(BYTE* ptr, const TPMU_SYM_KEY_BITS *p,
+ const TPMI_ALG_SYM_OBJECT *sel)
+ {
+@@ -376,6 +408,7 @@ inline BYTE* pack_TPMU_SYM_KEY_BITS(BYTE* ptr, const TPMU_SYM_KEY_BITS *p,
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPMU_SYM_KEY_BITS(BYTE* ptr, TPMU_SYM_KEY_BITS *p,
+ const TPMI_ALG_SYM_OBJECT *sel)
+ {
+@@ -397,6 +430,7 @@ inline BYTE* unpack_TPMU_SYM_KEY_BITS(BYTE* ptr, TPMU_SYM_KEY_BITS *p,
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPMT_SYM_DEF_OBJECT(BYTE* ptr, const TPMT_SYM_DEF_OBJECT *p)
+ {
+ ptr = pack_TPMI_ALG_SYM_OBJECT(ptr, &p->algorithm);
+@@ -405,6 +439,7 @@ inline BYTE* pack_TPMT_SYM_DEF_OBJECT(BYTE* ptr, const TPMT_SYM_DEF_OBJECT *p)
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPMT_SYM_DEF_OBJECT(BYTE *ptr, TPMT_SYM_DEF_OBJECT *p)
+ {
+ ptr = unpack_TPMI_ALG_SYM_OBJECT(ptr, &p->algorithm);
+@@ -416,6 +451,7 @@ inline BYTE* unpack_TPMT_SYM_DEF_OBJECT(BYTE *ptr, TPMT_SYM_DEF_OBJECT *p)
+ #define pack_TPMS_SCHEME_OAEP(p, t) pack_TPMI_ALG_HASH(p, &((t)->hashAlg))
+ #define unpack_TPMS_SCHEME_OAEP(p, t) unpack_TPMI_ALG_HASH(p, &((t)->hashAlg))
+
++static
+ inline BYTE* pack_TPMU_ASYM_SCHEME(BYTE *ptr, const TPMU_ASYM_SCHEME *p,
+ const TPMI_ALG_RSA_SCHEME *s)
+ {
+@@ -438,6 +474,7 @@ inline BYTE* pack_TPMU_ASYM_SCHEME(BYTE *ptr, const TPMU_ASYM_SCHEME *p,
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPMU_ASYM_SCHEME(BYTE *ptr, TPMU_ASYM_SCHEME *p,
+ const TPMI_ALG_RSA_SCHEME *s)
+ {
+@@ -462,6 +499,7 @@ inline BYTE* unpack_TPMU_ASYM_SCHEME(BYTE *ptr, TPMU_ASYM_SCHEME *p,
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPMT_RSA_SCHEME(BYTE* ptr, const TPMT_RSA_SCHEME *p)
+ {
+ ptr = pack_TPMI_ALG_RSA_SCHEME(ptr, &p->scheme);
+@@ -469,6 +507,7 @@ inline BYTE* pack_TPMT_RSA_SCHEME(BYTE* ptr, const TPMT_RSA_SCHEME *p)
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPMT_RSA_SCHEME(BYTE* ptr, TPMT_RSA_SCHEME *p)
+ {
+ ptr = unpack_TPMI_ALG_RSA_SCHEME(ptr, &p->scheme);
+@@ -476,6 +515,7 @@ inline BYTE* unpack_TPMT_RSA_SCHEME(BYTE* ptr, TPMT_RSA_SCHEME *p)
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPMT_RSA_DECRYPT(BYTE* ptr, const TPMT_RSA_DECRYPT *p)
+ {
+ ptr = pack_TPMI_ALG_RSA_SCHEME(ptr, &p->scheme);
+@@ -483,6 +523,7 @@ inline BYTE* pack_TPMT_RSA_DECRYPT(BYTE* ptr, const TPMT_RSA_DECRYPT *p)
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPMS_RSA_PARMS(BYTE* ptr, const TPMS_RSA_PARMS *p)
+ {
+ ptr = pack_TPMT_SYM_DEF_OBJECT(ptr, &p->symmetric);
+@@ -492,6 +533,7 @@ inline BYTE* pack_TPMS_RSA_PARMS(BYTE* ptr, const TPMS_RSA_PARMS *p)
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPMS_RSA_PARMS(BYTE *ptr, TPMS_RSA_PARMS *p)
+ {
+ ptr = unpack_TPMT_SYM_DEF_OBJECT(ptr, &p->symmetric);
+@@ -501,6 +543,7 @@ inline BYTE* unpack_TPMS_RSA_PARMS(BYTE *ptr, TPMS_RSA_PARMS *p)
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPMU_PUBLIC_PARMS(BYTE* ptr, const TPMU_PUBLIC_PARMS *param,
+ const TPMI_ALG_PUBLIC *selector)
+ {
+@@ -518,6 +561,7 @@ inline BYTE* pack_TPMU_PUBLIC_PARMS(BYTE* ptr, const TPMU_PUBLIC_PARMS *param,
+ return NULL;
+ }
+
++static
+ inline BYTE* unpack_TPMU_PUBLIC_PARMS(BYTE* ptr, TPMU_PUBLIC_PARMS *param,
+ const TPMI_ALG_PUBLIC *selector)
+ {
+@@ -535,18 +579,21 @@ inline BYTE* unpack_TPMU_PUBLIC_PARMS(BYTE* ptr, TPMU_PUBLIC_PARMS *param,
+ return NULL;
+ }
+
++static
+ inline BYTE* pack_TPMS_ECC_POINT(BYTE* ptr, const TPMS_ECC_POINT *point)
+ {
+ assert(false);
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPMS_ECC_POINT(BYTE* ptr, TPMS_ECC_POINT *point)
+ {
+ assert(false);
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPMU_PUBLIC_ID(BYTE* ptr, const TPMU_PUBLIC_ID *id,
+ const TPMI_ALG_PUBLIC *selector)
+ {
+@@ -564,6 +611,7 @@ inline BYTE* pack_TPMU_PUBLIC_ID(BYTE* ptr, const TPMU_PUBLIC_ID *id,
+ return NULL;
+ }
+
++static
+ inline BYTE* unpack_TPMU_PUBLIC_ID(BYTE* ptr, TPMU_PUBLIC_ID *id, TPMI_ALG_PUBLIC *selector)
+ {
+ switch (*selector) {
+@@ -580,6 +628,7 @@ inline BYTE* unpack_TPMU_PUBLIC_ID(BYTE* ptr, TPMU_PUBLIC_ID *id, TPMI_ALG_PUBLI
+ return NULL;
+ }
+
++static
+ inline BYTE* pack_TPMT_PUBLIC(BYTE* ptr, const TPMT_PUBLIC *public)
+ {
+ ptr = pack_TPMI_ALG_PUBLIC(ptr, &public->type);
+@@ -591,6 +640,7 @@ inline BYTE* pack_TPMT_PUBLIC(BYTE* ptr, const TPMT_PUBLIC *public)
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPMT_PUBLIC(BYTE* ptr, TPMT_PUBLIC *public)
+ {
+ ptr = unpack_TPMI_ALG_PUBLIC(ptr, &public->type);
+@@ -602,6 +652,7 @@ inline BYTE* unpack_TPMT_PUBLIC(BYTE* ptr, TPMT_PUBLIC *public)
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPM2B_PUBLIC(BYTE* ptr, const TPM2B_PUBLIC *public)
+ {
+ BYTE *sizePtr = ptr;
+@@ -611,6 +662,7 @@ inline BYTE* pack_TPM2B_PUBLIC(BYTE* ptr, const TPM2B_PUBLIC *public)
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPM2B_PUBLIC(BYTE* ptr, TPM2B_PUBLIC *public)
+ {
+ ptr = unpack_UINT16(ptr, &public->size);
+@@ -618,6 +670,7 @@ inline BYTE* unpack_TPM2B_PUBLIC(BYTE* ptr, TPM2B_PUBLIC *public)
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPMS_PCR_SELECTION(BYTE* ptr, const TPMS_PCR_SELECTION *selection)
+ {
+ ptr = pack_TPMI_ALG_HASH(ptr, &selection->hash);
+@@ -626,6 +679,7 @@ inline BYTE* pack_TPMS_PCR_SELECTION(BYTE* ptr, const TPMS_PCR_SELECTION *select
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPMS_PCR_SELECTION_Array(BYTE* ptr, const TPMS_PCR_SELECTION *selections,
+ const UINT32 cnt)
+ {
+@@ -635,6 +689,7 @@ inline BYTE* pack_TPMS_PCR_SELECTION_Array(BYTE* ptr, const TPMS_PCR_SELECTION *
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPM_AuthArea(BYTE* ptr, const TPM_AuthArea *auth)
+ {
+ BYTE* sizePtr = ptr;
+@@ -647,6 +702,7 @@ inline BYTE* pack_TPM_AuthArea(BYTE* ptr, const TPM_AuthArea *auth)
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPM_AuthArea(BYTE* ptr, TPM_AuthArea *auth)
+ {
+ ptr = unpack_UINT32(ptr, &auth->size);
+@@ -657,6 +713,7 @@ inline BYTE* unpack_TPM_AuthArea(BYTE* ptr, TPM_AuthArea *auth)
+ return ptr;
+ }
+
++static
+ inline BYTE* pack_TPM2_RSA_KEY(BYTE* ptr, const TPM2_RSA_KEY *key)
+ {
+ ptr = pack_TPM2B_PRIVATE(ptr, &key->Private);
+@@ -664,6 +721,7 @@ inline BYTE* pack_TPM2_RSA_KEY(BYTE* ptr, const TPM2_RSA_KEY *key)
+ return ptr;
+ }
+
++static
+ inline BYTE* unpack_TPM2_RSA_KEY(BYTE* ptr, TPM2_RSA_KEY *key)
+ {
+ ptr = unpack_TPM2B_PRIVATE(ptr, &key->Private);
+diff --git a/stubdom/vtpmmgr/tpmrsa.h b/stubdom/vtpmmgr/tpmrsa.h
+index 08213bbb7a..65fd32a45c 100644
+--- a/stubdom/vtpmmgr/tpmrsa.h
++++ b/stubdom/vtpmmgr/tpmrsa.h
+@@ -62,6 +62,7 @@ TPM_RESULT tpmrsa_pub_encrypt_oaep( tpmrsa_context *ctx,
+ unsigned char *output );
+
+ /* free tpmrsa key */
++static
+ inline void tpmrsa_free( tpmrsa_context *ctx ) {
+ mpi_free( &ctx->RN ); mpi_free( &ctx->E ); mpi_free( &ctx->N );
+ }
+
+_______________________________________________
+Xen-devel mailing list
+Xen-devel@lists.xen.org
+https://lists.xen.org/xen-devel
diff --git a/system/xen/patches/patch-inbuild-ipxe-gcc7-implicit-fallthrough-ath5k.patch b/system/xen/patches/patch-inbuild-ipxe-gcc7-implicit-fallthrough-ath5k.patch
new file mode 100644
index 0000000000000..2de261aa02700
--- /dev/null
+++ b/system/xen/patches/patch-inbuild-ipxe-gcc7-implicit-fallthrough-ath5k.patch
@@ -0,0 +1,28 @@
+From 45f2265bfcbbf2afd7fac24372ae26e453f2b52d Mon Sep 17 00:00:00 2001
+From: Michael Brown <mcb30@ipxe.org>
+Date: Wed, 22 Mar 2017 11:52:09 +0200
+Subject: [PATCH] [ath] Add missing break statements
+
+Signed-off-by: Michael Brown <mcb30@ipxe.org>
+---
+ src/drivers/net/ath/ath5k/ath5k_desc.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/src/drivers/net/ath/ath5k/ath5k_desc.c b/src/drivers/net/ath/ath5k/ath5k_desc.c
+index 30fe1c777..816d26ede 100644
+--- a/src/drivers/net/ath/ath5k/ath5k_desc.c
++++ b/src/drivers/net/ath/ath5k/ath5k_desc.c
+@@ -104,10 +104,13 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
+ case AR5K_PKT_TYPE_BEACON:
+ case AR5K_PKT_TYPE_PROBE_RESP:
+ frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY;
++ break;
+ case AR5K_PKT_TYPE_PIFS:
+ frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
++ break;
+ default:
+ frame_type = type /*<< 2 ?*/;
++ break;
+ }
+
+ tx_ctl->tx_control_0 |=
diff --git a/system/xen/patches/patch-inbuild-ipxe-gcc7-implicit-fallthrough-curses.patch b/system/xen/patches/patch-inbuild-ipxe-gcc7-implicit-fallthrough-curses.patch
new file mode 100644
index 0000000000000..5faa5600ba580
--- /dev/null
+++ b/system/xen/patches/patch-inbuild-ipxe-gcc7-implicit-fallthrough-curses.patch
@@ -0,0 +1,24 @@
+From 28e26dd2503e6006fabb26f8c33050ba93a99623 Mon Sep 17 00:00:00 2001
+From: Michael Brown <mcb30@ipxe.org>
+Date: Wed, 29 Mar 2017 10:35:05 +0300
+Subject: [PATCH] [mucurses] Fix erroneous __nonnull attribute
+
+Signed-off-by: Michael Brown <mcb30@ipxe.org>
+---
+ src/include/curses.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/src/include/curses.h b/src/include/curses.h
+index 04060fe27..1f6fe029b 100644
+--- a/src/include/curses.h
++++ b/src/include/curses.h
+@@ -443,7 +443,8 @@ extern int wborder ( WINDOW *, chtype, chtype, chtype, chtype, chtype, chtype,
+ extern int wclrtobot ( WINDOW * ) __nonnull;
+ extern int wclrtoeol ( WINDOW * ) __nonnull;
+ extern void wcursyncup ( WINDOW * );
+-extern int wcolour_set ( WINDOW *, short, void * ) __nonnull;
++extern int wcolour_set ( WINDOW *, short, void * )
++ __attribute__ (( nonnull (1)));
+ #define wcolor_set(w,s,v) wcolour_set((w),(s),(v))
+ extern int wdelch ( WINDOW * ) __nonnull;
+ extern int wdeleteln ( WINDOW * ) __nonnull;
diff --git a/system/xen/patches/patch-inbuild-ipxe-gcc7-implicit-fallthrough.patch b/system/xen/patches/patch-inbuild-ipxe-gcc7-implicit-fallthrough.patch
new file mode 100644
index 0000000000000..fe379699b36ee
--- /dev/null
+++ b/system/xen/patches/patch-inbuild-ipxe-gcc7-implicit-fallthrough.patch
@@ -0,0 +1,163 @@
+From 5f85cbb9ee1c00cec81a848a9e871ad5d1e7f53f Mon Sep 17 00:00:00 2001
+From: Michael Brown <mcb30@ipxe.org>
+Date: Wed, 29 Mar 2017 10:36:03 +0300
+Subject: [PATCH] [build] Avoid implicit-fallthrough warnings on GCC 7
+
+Reported-by: Vinson Lee <vlee@freedesktop.org>
+Reported-by: Liang Yan <lyan@suse.com>
+Signed-off-by: Michael Brown <mcb30@ipxe.org>
+---
+ src/arch/x86/image/bzimage.c | 2 ++
+ src/drivers/infiniband/golan.c | 1 +
+ src/drivers/net/ath/ath9k/ath9k_ar5008_phy.c | 2 ++
+ src/drivers/net/ath/ath9k/ath9k_ar9002_phy.c | 1 +
+ src/drivers/net/ath/ath9k/ath9k_ar9003_phy.c | 1 +
+ src/drivers/net/igbvf/igbvf_vf.c | 1 +
+ src/drivers/net/tg3/tg3_hw.c | 12 ++++++++++++
+ src/tests/setjmp_test.c | 5 +++--
+ 8 files changed, 23 insertions(+), 2 deletions(-)
+
+diff --git a/src/arch/x86/image/bzimage.c b/src/arch/x86/image/bzimage.c
+index e3c4cb83d..51498bf95 100644
+--- a/src/arch/x86/image/bzimage.c
++++ b/src/arch/x86/image/bzimage.c
+@@ -282,9 +282,11 @@ static int bzimage_parse_cmdline ( struct image *image,
+ case 'G':
+ case 'g':
+ bzimg->mem_limit <<= 10;
++ /* Fall through */
+ case 'M':
+ case 'm':
+ bzimg->mem_limit <<= 10;
++ /* Fall through */
+ case 'K':
+ case 'k':
+ bzimg->mem_limit <<= 10;
+diff --git a/src/drivers/infiniband/golan.c b/src/drivers/infiniband/golan.c
+index 30eaabab2..61331d4c1 100755
+--- a/src/drivers/infiniband/golan.c
++++ b/src/drivers/infiniband/golan.c
+@@ -1956,6 +1956,7 @@ static inline void golan_handle_port_event(struct golan *golan, struct golan_eqe
+ case GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
+ case GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE:
+ golan_ib_update ( ibdev );
++ /* Fall through */
+ case GOLAN_PORT_CHANGE_SUBTYPE_DOWN:
+ case GOLAN_PORT_CHANGE_SUBTYPE_LID:
+ case GOLAN_PORT_CHANGE_SUBTYPE_PKEY:
+diff --git a/src/drivers/net/ath/ath9k/ath9k_ar5008_phy.c b/src/drivers/net/ath/ath9k/ath9k_ar5008_phy.c
+index 2b6c133cb..a98e4bb66 100644
+--- a/src/drivers/net/ath/ath9k/ath9k_ar5008_phy.c
++++ b/src/drivers/net/ath/ath9k/ath9k_ar5008_phy.c
+@@ -640,12 +640,14 @@ static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
+ case 0x5:
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
++ /* Fall through */
+ case 0x3:
+ if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
+ break;
+ }
++ /* Fall through */
+ case 0x1:
+ case 0x2:
+ case 0x7:
+diff --git a/src/drivers/net/ath/ath9k/ath9k_ar9002_phy.c b/src/drivers/net/ath/ath9k/ath9k_ar9002_phy.c
+index 72203ba48..65cfad597 100644
+--- a/src/drivers/net/ath/ath9k/ath9k_ar9002_phy.c
++++ b/src/drivers/net/ath/ath9k/ath9k_ar9002_phy.c
+@@ -122,6 +122,7 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+ aModeRefSel = 2;
+ if (aModeRefSel)
+ break;
++ /* Fall through */
+ case 1:
+ default:
+ aModeRefSel = 0;
+diff --git a/src/drivers/net/ath/ath9k/ath9k_ar9003_phy.c b/src/drivers/net/ath/ath9k/ath9k_ar9003_phy.c
+index 2244b775a..b66358b92 100644
+--- a/src/drivers/net/ath/ath9k/ath9k_ar9003_phy.c
++++ b/src/drivers/net/ath/ath9k/ath9k_ar9003_phy.c
+@@ -539,6 +539,7 @@ void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
+ case 0x5:
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
++ /* Fall through */
+ case 0x3:
+ case 0x1:
+ case 0x2:
+diff --git a/src/drivers/net/igbvf/igbvf_vf.c b/src/drivers/net/igbvf/igbvf_vf.c
+index f2dac8be7..f841d5e3d 100644
+--- a/src/drivers/net/igbvf/igbvf_vf.c
++++ b/src/drivers/net/igbvf/igbvf_vf.c
+@@ -357,6 +357,7 @@ s32 igbvf_promisc_set_vf(struct e1000_hw *hw, enum e1000_promisc_type type)
+ break;
+ case e1000_promisc_enabled:
+ msgbuf |= E1000_VF_SET_PROMISC_MULTICAST;
++ /* Fall through */
+ case e1000_promisc_unicast:
+ msgbuf |= E1000_VF_SET_PROMISC_UNICAST;
+ case e1000_promisc_disabled:
+diff --git a/src/drivers/net/tg3/tg3_hw.c b/src/drivers/net/tg3/tg3_hw.c
+index 50353cf36..798f8519f 100644
+--- a/src/drivers/net/tg3/tg3_hw.c
++++ b/src/drivers/net/tg3/tg3_hw.c
+@@ -2518,28 +2518,40 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
+ switch (limit) {
+ case 16:
+ tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
++ /* Fall through */
+ case 15:
+ tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
++ /* Fall through */
+ case 14:
+ tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
++ /* Fall through */
+ case 13:
+ tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
++ /* Fall through */
+ case 12:
+ tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
++ /* Fall through */
+ case 11:
+ tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
++ /* Fall through */
+ case 10:
+ tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
++ /* Fall through */
+ case 9:
+ tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
++ /* Fall through */
+ case 8:
+ tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
++ /* Fall through */
+ case 7:
+ tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
++ /* Fall through */
+ case 6:
+ tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
++ /* Fall through */
+ case 5:
+ tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
++ /* Fall through */
+ case 4:
+ /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
+ case 3:
+diff --git a/src/tests/setjmp_test.c b/src/tests/setjmp_test.c
+index 50ad13f3c..deafcee09 100644
+--- a/src/tests/setjmp_test.c
++++ b/src/tests/setjmp_test.c
+@@ -111,8 +111,9 @@ static void setjmp_return_ok ( struct setjmp_test *test, int value ) {
+ * @v file Test code file
+ * @v line Test code line
+ */
+-static void longjmp_okx ( struct setjmp_test *test, int value,
+- const char *file, unsigned int line ) {
++static void __attribute__ (( noreturn ))
++longjmp_okx ( struct setjmp_test *test, int value,
++ const char *file, unsigned int line ) {
+
+ /* Record expected value. A zero passed to longjmp() should
+ * result in setjmp() returning a value of one.
diff --git a/system/xen/patches/patch-ipxe-patches-series.patch b/system/xen/patches/patch-ipxe-patches-series.patch
new file mode 100644
index 0000000000000..30e9164177bff
--- /dev/null
+++ b/system/xen/patches/patch-ipxe-patches-series.patch
@@ -0,0 +1,18 @@
+Subject: [PATCH] Fix gcc7 warn
+
+---
+ tools/firmware/etherboot/patches/series | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/tools/firmware/etherboot/patches/series b/tools/firmware/etherboot/patches/series
+index 86cb300..780c6c6 100644
+--- a/tools/firmware/etherboot/patches/series
++++ b/tools/firmware/etherboot/patches/series
+@@ -1 +1,4 @@
+ boot_prompt_option.patch
++patch-inbuild-ipxe-gcc7-implicit-fallthrough.patch
++patch-inbuild-ipxe-gcc7-implicit-fallthrough-ath5k.patch
++patch-inbuild-ipxe-gcc7-implicit-fallthrough-curses.patch
+--
+2.13.0
+
diff --git a/system/xen/patches/stubdom_zlib_disable_man_install.diff b/system/xen/patches/stubdom_zlib_disable_man_install.diff
new file mode 100644
index 0000000000000..e59d377005f28
--- /dev/null
+++ b/system/xen/patches/stubdom_zlib_disable_man_install.diff
@@ -0,0 +1,32 @@
+--- xen-4.9.0/stubdom/Makefile.orig 2017-06-27 20:13:19.000000000 +0200
++++ xen-4.9.0/stubdom/Makefile 2017-08-08 19:03:03.968141229 +0200
+@@ -117,6 +117,7 @@
+ cross-zlib: $(ZLIB_STAMPFILE)
+ $(ZLIB_STAMPFILE): zlib-$(XEN_TARGET_ARCH) $(NEWLIB_STAMPFILE)
+ ( cd $< && \
++ patch -p1 < ../zlib_disable_man_install.diff && \
+ CFLAGS="$(TARGET_CPPFLAGS) $(TARGET_CFLAGS)" CC=$(CC) ./configure --prefix=$(CROSS_PREFIX)/$(GNU_TARGET_ARCH)-xen-elf && \
+ $(MAKE) DESTDIR= libz.a && \
+ $(MAKE) DESTDIR= install )
+--- xen-4.9.0/stubdom/zlib_disable_man_install.diff.orig 1970-01-01 01:00:00.000000000 +0100
++++ xen-4.9.0/stubdom/zlib_disable_man_install.diff 2017-08-08 18:57:11.541178819 +0200
+@@ -0,0 +1,19 @@
++--- zlib-1.2.3/Makefile.in.orig 2005-07-18 04:25:21.000000000 +0200
+++++ zlib-1.2.3/Makefile.in 2017-08-08 18:56:06.611161407 +0200
++@@ -93,7 +93,6 @@
++ -@if [ ! -d $(exec_prefix) ]; then mkdir -p $(exec_prefix); fi
++ -@if [ ! -d $(includedir) ]; then mkdir -p $(includedir); fi
++ -@if [ ! -d $(libdir) ]; then mkdir -p $(libdir); fi
++- -@if [ ! -d $(man3dir) ]; then mkdir -p $(man3dir); fi
++ cp zlib.h zconf.h $(includedir)
++ chmod 644 $(includedir)/zlib.h $(includedir)/zconf.h
++ cp $(LIBS) $(libdir)
++@@ -105,8 +104,6 @@
++ ln -s $(SHAREDLIBV) $(SHAREDLIBM); \
++ (ldconfig || true) >/dev/null 2>&1; \
++ fi
++- cp zlib.3 $(man3dir)
++- chmod 644 $(man3dir)/zlib.3
++ # The ranlib in install is needed on NeXTSTEP which checks file times
++ # ldconfig is for Linux
++
diff --git a/system/xen/xen.SlackBuild b/system/xen/xen.SlackBuild
index 7421c54c05619..54e278aad4976 100644
--- a/system/xen/xen.SlackBuild
+++ b/system/xen/xen.SlackBuild
@@ -23,12 +23,12 @@
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
PRGNAM=xen
-VERSION=${VERSION:-4.8.1}
-BUILD=${BUILD:-2}
+VERSION=${VERSION:-4.9.0}
+BUILD=${BUILD:-1}
TAG=${TAG:-_SBo}
SEABIOS=${SEABIOS:-1.10.0}
-OVMF=${OVMF:-20160905_bc54e50}
+OVMF=${OVMF:-20170321_5920a9d}
IPXE=${IPXE:-827dd1bfee67daa683935ce65316f7e0f057fe1c}
if [ -z "$ARCH" ]; then
@@ -158,6 +158,25 @@ cp $CWD/ipxe-git-$IPXE.tar.gz tools/firmware/etherboot/_ipxe.tar.gz
cp $CWD/{lwip,zlib,newlib,pciutils,grub,gmp,tpm_emulator}-*.tar.?z* \
$CWD/polarssl-*.tgz stubdom
+# Prevent leaks during the build
+patch -p1 <$CWD/patches/stubdom_zlib_disable_man_install.diff
+
+# GCC7 support with help from Mark Pryor (PryMar56) and ArchLinux folks
+if [ $(gcc -dumpfullversion | cut -d. -f1) -eq 7 ]; then
+ # OVMF
+ patch -d tools/firmware/ovmf-dir \
+ -p1 <$CWD/patches/gcc7-fix-incorrect-comparison.patch
+ # vTPM
+ patch -p1 <$CWD/patches/gcc7-vtpmmgr-make-inline-static.patch
+ patch -p1 <$CWD/patches/gcc7-vtpm-implicit-fallthrough.patch
+ # Mini-OS
+ patch -d extras/mini-os \
+ -p1 <$CWD/patches/gcc7-minios-implement-udivmoddi4.patch
+ # IPXE
+ patch -p1 <$CWD/patches/patch-ipxe-patches-series.patch
+ cp $CWD/patches/patch-inbuild-ipxe*.patch tools/firmware/etherboot/patches/
+fi
+
CFLAGS="$SLKCFLAGS" \
CXXFLAGS="$SLKCFLAGS" \
./configure \
@@ -214,6 +233,9 @@ rmdir $PKG/etc/rc.d/init.d/
# Append .new to config files
for i in $PKG/etc/{default/*,xen/*.conf} ; do mv $i $i.new ; done
+# Remove some executable flags
+chmod -x $PKG/usr/libexec/xen/boot/*.gz 2>/dev/null || true
+
find $PKG | xargs file | grep -e "executable" -e "shared object" | grep ELF \
| cut -f 1 -d : | xargs strip --strip-unneeded 2> /dev/null || true
diff --git a/system/xen/xen.info b/system/xen/xen.info
index fff59055e697c..946878c42bd77 100644
--- a/system/xen/xen.info
+++ b/system/xen/xen.info
@@ -1,7 +1,7 @@
PRGNAM="xen"
-VERSION="4.8.1"
+VERSION="4.9.0"
HOMEPAGE="http://www.xenproject.org/"
-DOWNLOAD="http://mirror.slackware.hr/sources/xen/xen-4.8.1.tar.gz \
+DOWNLOAD="http://mirror.slackware.hr/sources/xen/xen-4.9.0.tar.gz \
http://mirror.slackware.hr/sources/xen-extfiles/ipxe-git-827dd1bfee67daa683935ce65316f7e0f057fe1c.tar.gz \
http://mirror.slackware.hr/sources/xen-extfiles/lwip-1.3.0.tar.gz \
http://mirror.slackware.hr/sources/xen-extfiles/zlib-1.2.3.tar.gz \
@@ -12,8 +12,8 @@ DOWNLOAD="http://mirror.slackware.hr/sources/xen/xen-4.8.1.tar.gz \
http://mirror.slackware.hr/sources/xen-extfiles/gmp-4.3.2.tar.bz2 \
http://mirror.slackware.hr/sources/xen-extfiles/tpm_emulator-0.7.4.tar.gz \
http://mirror.slackware.hr/sources/xen-seabios/seabios-1.10.0.tar.gz \
- http://mirror.slackware.hr/sources/xen-ovmf/xen-ovmf-20160905_bc54e50.tar.bz2"
-MD5SUM="2ef8a991752bf71614651bf2ab0df390 \
+ http://mirror.slackware.hr/sources/xen-ovmf/xen-ovmf-20170321_5920a9d.tar.bz2"
+MD5SUM="f0a753637630f982dfbdb64121fd71e1 \
71c69b5e1db9e01d5f246226eca03c22 \
36cc57650cffda9a0269493be2a169bb \
debc62758716a169df9f62e6ab2bc634 \
@@ -24,7 +24,7 @@ MD5SUM="2ef8a991752bf71614651bf2ab0df390 \
dd60683d7057917e34630b4a787932e8 \
e26becb8a6a2b6695f6b3e8097593db8 \
633ffc9df0295eeeb4182444eb0300ee \
- 87ba85cfec3993e4ee566dc58724d8a6"
+ 8caf4ea54fcc035d604f35556066e312"
DOWNLOAD_x86_64=""
MD5SUM_x86_64=""
REQUIRES="acpica yajl"
diff --git a/system/xen/xsa/xsa213-4.8.patch b/system/xen/xsa/xsa213-4.8.patch
deleted file mode 100644
index 2f9fa6ab11b04..0000000000000
--- a/system/xen/xsa/xsa213-4.8.patch
+++ /dev/null
@@ -1,177 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: multicall: deal with early exit conditions
-
-In particular changes to guest privilege level require the multicall
-sequence to be aborted, as hypercalls are permitted from kernel mode
-only. While likely not very useful in a multicall, also properly handle
-the return value in the HYPERVISOR_iret case (which should be the guest
-specified value).
-
-This is XSA-213.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Acked-by: Julien Grall <julien.grall@arm.com>
-
---- a/xen/arch/arm/traps.c
-+++ b/xen/arch/arm/traps.c
-@@ -1550,7 +1550,7 @@ static bool_t check_multicall_32bit_clea
- return true;
- }
-
--void arch_do_multicall_call(struct mc_state *state)
-+enum mc_disposition arch_do_multicall_call(struct mc_state *state)
- {
- struct multicall_entry *multi = &state->call;
- arm_hypercall_fn_t call = NULL;
-@@ -1558,23 +1558,26 @@ void arch_do_multicall_call(struct mc_st
- if ( multi->op >= ARRAY_SIZE(arm_hypercall_table) )
- {
- multi->result = -ENOSYS;
-- return;
-+ return mc_continue;
- }
-
- call = arm_hypercall_table[multi->op].fn;
- if ( call == NULL )
- {
- multi->result = -ENOSYS;
-- return;
-+ return mc_continue;
- }
-
- if ( is_32bit_domain(current->domain) &&
- !check_multicall_32bit_clean(multi) )
-- return;
-+ return mc_continue;
-
- multi->result = call(multi->args[0], multi->args[1],
- multi->args[2], multi->args[3],
- multi->args[4]);
-+
-+ return likely(!psr_mode_is_user(guest_cpu_user_regs()))
-+ ? mc_continue : mc_preempt;
- }
-
- /*
---- a/xen/arch/x86/hypercall.c
-+++ b/xen/arch/x86/hypercall.c
-@@ -255,15 +255,19 @@ void pv_hypercall(struct cpu_user_regs *
- perfc_incr(hypercalls);
- }
-
--void arch_do_multicall_call(struct mc_state *state)
-+enum mc_disposition arch_do_multicall_call(struct mc_state *state)
- {
-- if ( !is_pv_32bit_vcpu(current) )
-+ struct vcpu *curr = current;
-+ unsigned long op;
-+
-+ if ( !is_pv_32bit_vcpu(curr) )
- {
- struct multicall_entry *call = &state->call;
-
-- if ( (call->op < ARRAY_SIZE(pv_hypercall_table)) &&
-- pv_hypercall_table[call->op].native )
-- call->result = pv_hypercall_table[call->op].native(
-+ op = call->op;
-+ if ( (op < ARRAY_SIZE(pv_hypercall_table)) &&
-+ pv_hypercall_table[op].native )
-+ call->result = pv_hypercall_table[op].native(
- call->args[0], call->args[1], call->args[2],
- call->args[3], call->args[4], call->args[5]);
- else
-@@ -274,15 +278,21 @@ void arch_do_multicall_call(struct mc_st
- {
- struct compat_multicall_entry *call = &state->compat_call;
-
-- if ( (call->op < ARRAY_SIZE(pv_hypercall_table)) &&
-- pv_hypercall_table[call->op].compat )
-- call->result = pv_hypercall_table[call->op].compat(
-+ op = call->op;
-+ if ( (op < ARRAY_SIZE(pv_hypercall_table)) &&
-+ pv_hypercall_table[op].compat )
-+ call->result = pv_hypercall_table[op].compat(
- call->args[0], call->args[1], call->args[2],
- call->args[3], call->args[4], call->args[5]);
- else
- call->result = -ENOSYS;
- }
- #endif
-+
-+ return unlikely(op == __HYPERVISOR_iret)
-+ ? mc_exit
-+ : likely(guest_kernel_mode(curr, guest_cpu_user_regs()))
-+ ? mc_continue : mc_preempt;
- }
-
- /*
---- a/xen/common/multicall.c
-+++ b/xen/common/multicall.c
-@@ -40,6 +40,7 @@ do_multicall(
- struct mc_state *mcs = &current->mc_state;
- uint32_t i;
- int rc = 0;
-+ enum mc_disposition disp = mc_continue;
-
- if ( unlikely(__test_and_set_bit(_MCSF_in_multicall, &mcs->flags)) )
- {
-@@ -50,7 +51,7 @@ do_multicall(
- if ( unlikely(!guest_handle_okay(call_list, nr_calls)) )
- rc = -EFAULT;
-
-- for ( i = 0; !rc && i < nr_calls; i++ )
-+ for ( i = 0; !rc && disp == mc_continue && i < nr_calls; i++ )
- {
- if ( i && hypercall_preempt_check() )
- goto preempted;
-@@ -63,7 +64,7 @@ do_multicall(
-
- trace_multicall_call(&mcs->call);
-
-- arch_do_multicall_call(mcs);
-+ disp = arch_do_multicall_call(mcs);
-
- #ifndef NDEBUG
- {
-@@ -77,7 +78,14 @@ do_multicall(
- }
- #endif
-
-- if ( unlikely(__copy_field_to_guest(call_list, &mcs->call, result)) )
-+ if ( unlikely(disp == mc_exit) )
-+ {
-+ if ( __copy_field_to_guest(call_list, &mcs->call, result) )
-+ /* nothing, best effort only */;
-+ rc = mcs->call.result;
-+ }
-+ else if ( unlikely(__copy_field_to_guest(call_list, &mcs->call,
-+ result)) )
- rc = -EFAULT;
- else if ( mcs->flags & MCSF_call_preempted )
- {
-@@ -93,6 +101,9 @@ do_multicall(
- guest_handle_add_offset(call_list, 1);
- }
-
-+ if ( unlikely(disp == mc_preempt) && i < nr_calls )
-+ goto preempted;
-+
- perfc_incr(calls_to_multicall);
- perfc_add(calls_from_multicall, i);
- mcs->flags = 0;
---- a/xen/include/xen/multicall.h
-+++ b/xen/include/xen/multicall.h
-@@ -24,6 +24,10 @@ struct mc_state {
- };
- };
-
--void arch_do_multicall_call(struct mc_state *mc);
-+enum mc_disposition {
-+ mc_continue,
-+ mc_exit,
-+ mc_preempt,
-+} arch_do_multicall_call(struct mc_state *mc);
-
- #endif /* __XEN_MULTICALL_H__ */
diff --git a/system/xen/xsa/xsa214.patch b/system/xen/xsa/xsa214.patch
deleted file mode 100644
index 46a3d3a4c6030..0000000000000
--- a/system/xen/xsa/xsa214.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86: discard type information when stealing pages
-
-While a page having just a single general reference left necessarily
-has a zero type reference count too, its type may still be valid (and
-in validated state; at present this is only possible and relevant for
-PGT_seg_desc_page, as page tables have their type forcibly zapped when
-their type reference count drops to zero, and
-PGT_{writable,shared}_page pages don't require any validation). In
-such a case when the page is being re-used with the same type again,
-validation is being skipped. As validation criteria differ between
-32- and 64-bit guests, pages to be transferred between guests need to
-have their validation indicator zapped (and with it we zap all other
-type information at once).
-
-This is XSA-214.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -4466,6 +4466,17 @@ int steal_page(
- y = cmpxchg(&page->count_info, x, x & ~PGC_count_mask);
- } while ( y != x );
-
-+ /*
-+ * With the sole reference dropped temporarily, no-one can update type
-+ * information. Type count also needs to be zero in this case, but e.g.
-+ * PGT_seg_desc_page may still have PGT_validated set, which we need to
-+ * clear before transferring ownership (as validation criteria vary
-+ * depending on domain type).
-+ */
-+ BUG_ON(page->u.inuse.type_info & (PGT_count_mask | PGT_locked |
-+ PGT_pinned));
-+ page->u.inuse.type_info = 0;
-+
- /* Swizzle the owner then reinstate the PGC_allocated reference. */
- page_set_owner(page, NULL);
- y = page->count_info;
diff --git a/system/xen/xsa/xsa216-qemuu.patch b/system/xen/xsa/xsa216-qemuu.patch
deleted file mode 100644
index dd4d90d2ce31f..0000000000000
--- a/system/xen/xsa/xsa216-qemuu.patch
+++ /dev/null
@@ -1,113 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: xen/disk: don't leak stack data via response ring
-
-Rather than constructing a local structure instance on the stack, fill
-the fields directly on the shared ring, just like other (Linux)
-backends do. Build on the fact that all response structure flavors are
-actually identical (the old code did make this assumption too).
-
-This is XSA-216.
-
-Reported-by: Anthony Perard <anthony.perard@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Acked-by: Anthony PERARD <anthony.perard@citrix.com>
----
-v2: Add QEMU_PACKED to fix handling 32-bit guests by 64-bit qemu.
-
---- a/hw/block/xen_blkif.h
-+++ b/hw/block/xen_blkif.h
-@@ -14,9 +14,6 @@
- struct blkif_common_request {
- char dummy;
- };
--struct blkif_common_response {
-- char dummy;
--};
-
- /* i386 protocol version */
- #pragma pack(push, 4)
-@@ -36,13 +33,7 @@ struct blkif_x86_32_request_discard {
- blkif_sector_t sector_number; /* start sector idx on disk (r/w only) */
- uint64_t nr_sectors; /* # of contiguous sectors to discard */
- };
--struct blkif_x86_32_response {
-- uint64_t id; /* copied from request */
-- uint8_t operation; /* copied from request */
-- int16_t status; /* BLKIF_RSP_??? */
--};
- typedef struct blkif_x86_32_request blkif_x86_32_request_t;
--typedef struct blkif_x86_32_response blkif_x86_32_response_t;
- #pragma pack(pop)
-
- /* x86_64 protocol version */
-@@ -62,20 +53,14 @@ struct blkif_x86_64_request_discard {
- blkif_sector_t sector_number; /* start sector idx on disk (r/w only) */
- uint64_t nr_sectors; /* # of contiguous sectors to discard */
- };
--struct blkif_x86_64_response {
-- uint64_t __attribute__((__aligned__(8))) id;
-- uint8_t operation; /* copied from request */
-- int16_t status; /* BLKIF_RSP_??? */
--};
- typedef struct blkif_x86_64_request blkif_x86_64_request_t;
--typedef struct blkif_x86_64_response blkif_x86_64_response_t;
-
- DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
-- struct blkif_common_response);
-+ struct blkif_response);
- DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
-- struct blkif_x86_32_response);
-+ struct blkif_response QEMU_PACKED);
- DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
-- struct blkif_x86_64_response);
-+ struct blkif_response);
-
- union blkif_back_rings {
- blkif_back_ring_t native;
---- a/hw/block/xen_disk.c
-+++ b/hw/block/xen_disk.c
-@@ -769,31 +769,30 @@ static int blk_send_response_one(struct
- struct XenBlkDev *blkdev = ioreq->blkdev;
- int send_notify = 0;
- int have_requests = 0;
-- blkif_response_t resp;
-- void *dst;
--
-- resp.id = ioreq->req.id;
-- resp.operation = ioreq->req.operation;
-- resp.status = ioreq->status;
-+ blkif_response_t *resp;
-
- /* Place on the response ring for the relevant domain. */
- switch (blkdev->protocol) {
- case BLKIF_PROTOCOL_NATIVE:
-- dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
-+ resp = RING_GET_RESPONSE(&blkdev->rings.native,
-+ blkdev->rings.native.rsp_prod_pvt);
- break;
- case BLKIF_PROTOCOL_X86_32:
-- dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
-- blkdev->rings.x86_32_part.rsp_prod_pvt);
-+ resp = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
-+ blkdev->rings.x86_32_part.rsp_prod_pvt);
- break;
- case BLKIF_PROTOCOL_X86_64:
-- dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
-- blkdev->rings.x86_64_part.rsp_prod_pvt);
-+ resp = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
-+ blkdev->rings.x86_64_part.rsp_prod_pvt);
- break;
- default:
-- dst = NULL;
- return 0;
- }
-- memcpy(dst, &resp, sizeof(resp));
-+
-+ resp->id = ioreq->req.id;
-+ resp->operation = ioreq->req.operation;
-+ resp->status = ioreq->status;
-+
- blkdev->rings.common.rsp_prod_pvt++;
-
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
diff --git a/system/xen/xsa/xsa217.patch b/system/xen/xsa/xsa217.patch
deleted file mode 100644
index 1d4eb01f232b5..0000000000000
--- a/system/xen/xsa/xsa217.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/mm: disallow page stealing from HVM domains
-
-The operation's success can't be controlled by the guest, as the device
-model may have an active mapping of the page. If we nevertheless
-permitted this operation, we'd have to add further TLB flushing to
-prevent scenarios like
-
-"Domains A (HVM), B (PV), C (PV); B->target==A
- Steps:
- 1. B maps page X from A as writable
- 2. B unmaps page X without a TLB flush
- 3. A sends page X to C via GNTTABOP_transfer
- 4. C maps page X as pagetable (potentially causing a TLB flush in C,
- but not in B)
-
- At this point, X would be mapped as a pagetable in C while being
- writable through a stale TLB entry in B."
-
-A similar scenario could be constructed for A using XENMEM_exchange and
-some arbitrary PV domain C then having this page allocated.
-
-This is XSA-217.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Acked-by: George Dunlap <george.dunlap@citrix.com>
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -4449,6 +4449,9 @@ int steal_page(
- bool_t drop_dom_ref = 0;
- const struct domain *owner = dom_xen;
-
-+ if ( paging_mode_external(d) )
-+ return -1;
-+
- spin_lock(&d->page_alloc_lock);
-
- if ( is_xen_heap_page(page) || ((owner = page_get_owner(page)) != d) )
diff --git a/system/xen/xsa/xsa218-0001-gnttab-fix-unmap-pin-accounting-race.patch b/system/xen/xsa/xsa218-0001-gnttab-fix-unmap-pin-accounting-race.patch
deleted file mode 100644
index ecdf0943ef16d..0000000000000
--- a/system/xen/xsa/xsa218-0001-gnttab-fix-unmap-pin-accounting-race.patch
+++ /dev/null
@@ -1,102 +0,0 @@
-From 25263d50f1440e3c1ff7782892e81f2612bcfce1 Mon Sep 17 00:00:00 2001
-From: Jan Beulich <jbeulich@suse.com>
-Date: Fri, 2 Jun 2017 12:22:42 +0100
-Subject: [PATCH 1/3] gnttab: fix unmap pin accounting race
-
-Once all {writable} mappings of a grant entry have been unmapped, the
-hypervisor informs the guest that the grant entry has been released by
-clearing the _GTF_{reading,writing} usage flags in the guest's grant
-table as appropriate.
-
-Unfortunately, at the moment, the code that updates the accounting
-happens in a different critical section than the one which updates the
-usage flags; this means that under the right circumstances, there may be
-a window in time after the hypervisor reported the grant as being free
-during which the grant referee still had access to the page.
-
-Move the grant accounting code into the same critical section as the
-reporting code to make sure this kind of race can't happen.
-
-This is part of XSA-218.
-
-Reported-by: Jann Horn <jannh.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
----
- xen/common/grant_table.c | 32 +++++++++++++++++---------------
- 1 file changed, 17 insertions(+), 15 deletions(-)
-
-diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
-index e2c4097..d80bd49 100644
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -1150,15 +1150,8 @@ __gnttab_unmap_common(
- PIN_FAIL(act_release_out, GNTST_general_error,
- "Bad frame number doesn't match gntref. (%lx != %lx)\n",
- op->frame, act->frame);
-- if ( op->flags & GNTMAP_device_map )
-- {
-- ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
-- op->map->flags &= ~GNTMAP_device_map;
-- if ( op->flags & GNTMAP_readonly )
-- act->pin -= GNTPIN_devr_inc;
-- else
-- act->pin -= GNTPIN_devw_inc;
-- }
-+
-+ op->map->flags &= ~GNTMAP_device_map;
- }
-
- if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
-@@ -1168,12 +1161,7 @@ __gnttab_unmap_common(
- op->flags)) < 0 )
- goto act_release_out;
-
-- ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
- op->map->flags &= ~GNTMAP_host_map;
-- if ( op->flags & GNTMAP_readonly )
-- act->pin -= GNTPIN_hstr_inc;
-- else
-- act->pin -= GNTPIN_hstw_inc;
- }
-
- act_release_out:
-@@ -1266,6 +1254,12 @@ __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
- else
- put_page_and_type(pg);
- }
-+
-+ ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
-+ if ( op->flags & GNTMAP_readonly )
-+ act->pin -= GNTPIN_devr_inc;
-+ else
-+ act->pin -= GNTPIN_devw_inc;
- }
-
- if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
-@@ -1274,7 +1268,9 @@ __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
- {
- /*
- * Suggests that __gntab_unmap_common failed in
-- * replace_grant_host_mapping() so nothing further to do
-+ * replace_grant_host_mapping() or IOMMU handling, so nothing
-+ * further to do (short of re-establishing the mapping in the
-+ * latter case).
- */
- goto act_release_out;
- }
-@@ -1285,6 +1281,12 @@ __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
- put_page_type(pg);
- put_page(pg);
- }
-+
-+ ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
-+ if ( op->flags & GNTMAP_readonly )
-+ act->pin -= GNTPIN_hstr_inc;
-+ else
-+ act->pin -= GNTPIN_hstw_inc;
- }
-
- if ( (op->map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
---
-2.1.4
-
diff --git a/system/xen/xsa/xsa218-0002-gnttab-Avoid-potential-double-put-of-maptrack-entry.patch b/system/xen/xsa/xsa218-0002-gnttab-Avoid-potential-double-put-of-maptrack-entry.patch
deleted file mode 100644
index b07c978b3c810..0000000000000
--- a/system/xen/xsa/xsa218-0002-gnttab-Avoid-potential-double-put-of-maptrack-entry.patch
+++ /dev/null
@@ -1,231 +0,0 @@
-From bb6d476b09e635baf5e9fb22540ab7c3530d1d98 Mon Sep 17 00:00:00 2001
-From: George Dunlap <george.dunlap@citrix.com>
-Date: Thu, 15 Jun 2017 12:05:14 +0100
-Subject: [PATCH 2/3] gnttab: Avoid potential double-put of maptrack entry
-
-Each grant mapping for a particular domain is tracked by an in-Xen
-"maptrack" entry. This entry is is referenced by a "handle", which is
-given to the guest when it calls gnttab_map_grant_ref().
-
-There are two types of mapping a particular handle can refer to:
-GNTMAP_host_map and GNTMAP_device_map. A given
-gnttab_unmap_grant_ref() call can remove either only one or both of
-these entries. When a particular handle has no entries left, it must
-be freed.
-
-gnttab_unmap_grant_ref() loops through its grant unmap request list
-twice. It first removes entries from any host pagetables and (if
-appropraite) iommus; then it does a single domain TLB flush; then it
-does the clean-up, including telling the granter that entries are no
-longer being used (if appropriate).
-
-At the moment, it's during the first pass that the maptrack flags are
-cleared, but the second pass that the maptrack entry is freed.
-
-Unfortunately this allows the following race, which results in a
-double-free:
-
- A: (pass 1) clear host_map
- B: (pass 1) clear device_map
- A: (pass 2) See that maptrack entry has no mappings, free it
- B: (pass 2) See that maptrack entry has no mappings, free it #
-
-Unfortunately, unlike the active entry pinning update, we can't simply
-move the maptrack flag changes to the second half, because the
-maptrack flags are used to determine if iommu entries need to be
-added: a domain's iommu must never have fewer permissions than the
-maptrack flags indicate, or a subsequent map_grant_ref() might fail to
-add the necessary iommu entries.
-
-Instead, free the maptrack entry in the first pass if there are no
-further mappings.
-
-This is part of XSA-218.
-
-Reported-by: Jan Beulich <jbeulich.com>
-Signed-off-by: George Dunlap <george.dunlap@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
----
- xen/common/grant_table.c | 77 +++++++++++++++++++++++++++++++++---------------
- 1 file changed, 53 insertions(+), 24 deletions(-)
-
-diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
-index d80bd49..ba10e76 100644
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -98,8 +98,8 @@ struct gnttab_unmap_common {
- /* Shared state beteen *_unmap and *_unmap_complete */
- u16 flags;
- unsigned long frame;
-- struct grant_mapping *map;
- struct domain *rd;
-+ grant_ref_t ref;
- };
-
- /* Number of unmap operations that are done between each tlb flush */
-@@ -1079,6 +1079,8 @@ __gnttab_unmap_common(
- struct grant_table *lgt, *rgt;
- struct active_grant_entry *act;
- s16 rc = 0;
-+ struct grant_mapping *map;
-+ bool put_handle = false;
-
- ld = current->domain;
- lgt = ld->grant_table;
-@@ -1092,11 +1094,11 @@ __gnttab_unmap_common(
- return;
- }
-
-- op->map = &maptrack_entry(lgt, op->handle);
-+ map = &maptrack_entry(lgt, op->handle);
-
- grant_read_lock(lgt);
-
-- if ( unlikely(!read_atomic(&op->map->flags)) )
-+ if ( unlikely(!read_atomic(&map->flags)) )
- {
- grant_read_unlock(lgt);
- gdprintk(XENLOG_INFO, "Zero flags for handle (%d).\n", op->handle);
-@@ -1104,7 +1106,7 @@ __gnttab_unmap_common(
- return;
- }
-
-- dom = op->map->domid;
-+ dom = map->domid;
- grant_read_unlock(lgt);
-
- if ( unlikely((rd = rcu_lock_domain_by_id(dom)) == NULL) )
-@@ -1129,16 +1131,43 @@ __gnttab_unmap_common(
-
- grant_read_lock(rgt);
-
-- op->flags = read_atomic(&op->map->flags);
-- if ( unlikely(!op->flags) || unlikely(op->map->domid != dom) )
-+ op->rd = rd;
-+ op->ref = map->ref;
-+
-+ /*
-+ * We can't assume there was no racing unmap for this maptrack entry,
-+ * and hence we can't assume map->ref is valid for rd. While the checks
-+ * below (with the active entry lock held) will reject any such racing
-+ * requests, we still need to make sure we don't attempt to acquire an
-+ * invalid lock.
-+ */
-+ smp_rmb();
-+ if ( unlikely(op->ref >= nr_grant_entries(rgt)) )
- {
- gdprintk(XENLOG_WARNING, "Unstable handle %u\n", op->handle);
- rc = GNTST_bad_handle;
-- goto unmap_out;
-+ goto unlock_out;
- }
-
-- op->rd = rd;
-- act = active_entry_acquire(rgt, op->map->ref);
-+ act = active_entry_acquire(rgt, op->ref);
-+
-+ /*
-+ * Note that we (ab)use the active entry lock here to protect against
-+ * multiple unmaps of the same mapping here. We don't want to hold lgt's
-+ * lock, and we only hold rgt's lock for reading (but the latter wouldn't
-+ * be the right one anyway). Hence the easiest is to rely on a lock we
-+ * hold anyway; see docs/misc/grant-tables.txt's "Locking" section.
-+ */
-+
-+ op->flags = read_atomic(&map->flags);
-+ smp_rmb();
-+ if ( unlikely(!op->flags) || unlikely(map->domid != dom) ||
-+ unlikely(map->ref != op->ref) )
-+ {
-+ gdprintk(XENLOG_WARNING, "Unstable handle %#x\n", op->handle);
-+ rc = GNTST_bad_handle;
-+ goto act_release_out;
-+ }
-
- if ( op->frame == 0 )
- {
-@@ -1151,7 +1180,7 @@ __gnttab_unmap_common(
- "Bad frame number doesn't match gntref. (%lx != %lx)\n",
- op->frame, act->frame);
-
-- op->map->flags &= ~GNTMAP_device_map;
-+ map->flags &= ~GNTMAP_device_map;
- }
-
- if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
-@@ -1161,14 +1190,23 @@ __gnttab_unmap_common(
- op->flags)) < 0 )
- goto act_release_out;
-
-- op->map->flags &= ~GNTMAP_host_map;
-+ map->flags &= ~GNTMAP_host_map;
-+ }
-+
-+ if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
-+ {
-+ map->flags = 0;
-+ put_handle = true;
- }
-
- act_release_out:
- active_entry_release(act);
-- unmap_out:
-+ unlock_out:
- grant_read_unlock(rgt);
-
-+ if ( put_handle )
-+ put_maptrack_handle(lgt, op->handle);
-+
- if ( rc == GNTST_okay && gnttab_need_iommu_mapping(ld) )
- {
- unsigned int kind;
-@@ -1205,7 +1243,6 @@ __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
- grant_entry_header_t *sha;
- struct page_info *pg;
- uint16_t *status;
-- bool_t put_handle = 0;
-
- if ( rd == NULL )
- {
-@@ -1226,13 +1263,13 @@ __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
- if ( rgt->gt_version == 0 )
- goto unlock_out;
-
-- act = active_entry_acquire(rgt, op->map->ref);
-- sha = shared_entry_header(rgt, op->map->ref);
-+ act = active_entry_acquire(rgt, op->ref);
-+ sha = shared_entry_header(rgt, op->ref);
-
- if ( rgt->gt_version == 1 )
- status = &sha->flags;
- else
-- status = &status_entry(rgt, op->map->ref);
-+ status = &status_entry(rgt, op->ref);
-
- if ( unlikely(op->frame != act->frame) )
- {
-@@ -1289,9 +1326,6 @@ __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
- act->pin -= GNTPIN_hstw_inc;
- }
-
-- if ( (op->map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
-- put_handle = 1;
--
- if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
- !(op->flags & GNTMAP_readonly) )
- gnttab_clear_flag(_GTF_writing, status);
-@@ -1304,11 +1338,6 @@ __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
- unlock_out:
- grant_read_unlock(rgt);
-
-- if ( put_handle )
-- {
-- op->map->flags = 0;
-- put_maptrack_handle(ld->grant_table, op->handle);
-- }
- rcu_unlock_domain(rd);
- }
-
---
-2.1.4
-
diff --git a/system/xen/xsa/xsa218-0003-gnttab-correct-maptrack-table-accesses.patch b/system/xen/xsa/xsa218-0003-gnttab-correct-maptrack-table-accesses.patch
deleted file mode 100644
index 60e1583f0f64f..0000000000000
--- a/system/xen/xsa/xsa218-0003-gnttab-correct-maptrack-table-accesses.patch
+++ /dev/null
@@ -1,84 +0,0 @@
-From 29f04a077972e07c86c9e911005220f6d691ffa6 Mon Sep 17 00:00:00 2001
-From: Jan Beulich <jbeulich@suse.com>
-Date: Thu, 15 Jun 2017 12:05:29 +0100
-Subject: [PATCH 3/3] gnttab: correct maptrack table accesses
-
-In order to observe a consistent (limit,pointer-table) pair, the reader
-needs to either hold the maptrack lock (in line with documentation) or
-both sides need to order their accesses suitably (the writer side
-barrier was removed by commit dff515dfea ["gnttab: use per-VCPU
-maptrack free lists"], and a read side barrier has never been there).
-
-Make the writer publish a new table page before limit (for bounds
-checks to work), and new list head last (for racing maptrack_entry()
-invocations to work). At the same time add read barriers to lockless
-readers.
-
-Additionally get_maptrack_handle() must not assume ->maptrack_head to
-not change behind its back: Another handle may be put (updating only
-->maptrack_tail) and then got or stolen (updating ->maptrack_head).
-
-This is part of XSA-218.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: George Dunlap <george.dunlap@citrix.com>
----
- xen/common/grant_table.c | 13 +++++++++----
- 1 file changed, 9 insertions(+), 4 deletions(-)
-
-diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
-index ba10e76..627947a 100644
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -395,7 +395,7 @@ get_maptrack_handle(
- struct grant_table *lgt)
- {
- struct vcpu *curr = current;
-- int i;
-+ unsigned int i, head;
- grant_handle_t handle;
- struct grant_mapping *new_mt;
-
-@@ -451,17 +451,20 @@ get_maptrack_handle(
- new_mt[i].ref = handle + i + 1;
- new_mt[i].vcpu = curr->vcpu_id;
- }
-- new_mt[i - 1].ref = curr->maptrack_head;
-
- /* Set tail directly if this is the first page for this VCPU. */
- if ( curr->maptrack_tail == MAPTRACK_TAIL )
- curr->maptrack_tail = handle + MAPTRACK_PER_PAGE - 1;
-
-- write_atomic(&curr->maptrack_head, handle + 1);
--
- lgt->maptrack[nr_maptrack_frames(lgt)] = new_mt;
-+ smp_wmb();
- lgt->maptrack_limit += MAPTRACK_PER_PAGE;
-
-+ do {
-+ new_mt[i - 1].ref = read_atomic(&curr->maptrack_head);
-+ head = cmpxchg(&curr->maptrack_head, new_mt[i - 1].ref, handle + 1);
-+ } while ( head != new_mt[i - 1].ref );
-+
- spin_unlock(&lgt->maptrack_lock);
-
- return handle;
-@@ -727,6 +730,7 @@ static unsigned int mapkind(
- for ( handle = 0; !(kind & MAPKIND_WRITE) &&
- handle < lgt->maptrack_limit; handle++ )
- {
-+ smp_rmb();
- map = &maptrack_entry(lgt, handle);
- if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) ||
- map->domid != rd->domain_id )
-@@ -1094,6 +1098,7 @@ __gnttab_unmap_common(
- return;
- }
-
-+ smp_rmb();
- map = &maptrack_entry(lgt, op->handle);
-
- grant_read_lock(lgt);
---
-2.1.4
-
diff --git a/system/xen/xsa/xsa219-4.8.patch b/system/xen/xsa/xsa219-4.8.patch
deleted file mode 100644
index 68c4677da3c69..0000000000000
--- a/system/xen/xsa/xsa219-4.8.patch
+++ /dev/null
@@ -1,151 +0,0 @@
-From 3986b845e87c3f963227ece86bb633450761ec18 Mon Sep 17 00:00:00 2001
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Date: Thu, 11 May 2017 14:47:00 +0100
-Subject: [PATCH] x86/shadow: Hold references for the duration of emulated
- writes
-
-The (misnamed) emulate_gva_to_mfn() function translates a linear address to an
-mfn, but releases its page reference before returning the mfn to its caller.
-
-sh_emulate_map_dest() uses the results of one or two translations to construct
-a virtual mapping to the underlying frames, completes an emulated
-write/cmpxchg, then unmaps the virtual mappings.
-
-The page references need holding until the mappings are unmapped, or the
-frames can change ownership before the writes occurs.
-
-This is XSA-219
-
-Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Tim Deegan <tim@xen.org>
----
- xen/arch/x86/mm/shadow/common.c | 54 +++++++++++++++++++++++++++--------------
- 1 file changed, 36 insertions(+), 18 deletions(-)
-
-diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
-index ced2313..13305d2 100644
---- a/xen/arch/x86/mm/shadow/common.c
-+++ b/xen/arch/x86/mm/shadow/common.c
-@@ -1703,7 +1703,10 @@ static unsigned int shadow_get_allocation(struct domain *d)
- /**************************************************************************/
- /* Handling guest writes to pagetables. */
-
--/* Translate a VA to an MFN, injecting a page-fault if we fail. */
-+/*
-+ * Translate a VA to an MFN, injecting a page-fault if we fail. If the
-+ * mapping succeeds, a reference will be held on the underlying page.
-+ */
- #define BAD_GVA_TO_GFN (~0UL)
- #define BAD_GFN_TO_MFN (~1UL)
- #define READONLY_GFN (~2UL)
-@@ -1751,16 +1754,15 @@ static mfn_t emulate_gva_to_mfn(struct vcpu *v, unsigned long vaddr,
- ASSERT(mfn_valid(mfn));
-
- v->arch.paging.last_write_was_pt = !!sh_mfn_is_a_page_table(mfn);
-- /*
-- * Note shadow cannot page out or unshare this mfn, so the map won't
-- * disappear. Otherwise, caller must hold onto page until done.
-- */
-- put_page(page);
-
- return mfn;
- }
-
--/* Check that the user is allowed to perform this write. */
-+/*
-+ * Check that the user is allowed to perform this write. If a mapping is
-+ * returned, page references will be held on sh_ctxt->mfn[0] and
-+ * sh_ctxt->mfn[1] iff !INVALID_MFN.
-+ */
- void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr,
- unsigned int bytes,
- struct sh_emulate_ctxt *sh_ctxt)
-@@ -1768,13 +1770,6 @@ void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr,
- struct domain *d = v->domain;
- void *map;
-
-- sh_ctxt->mfn[0] = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
-- if ( !mfn_valid(sh_ctxt->mfn[0]) )
-- return ((mfn_x(sh_ctxt->mfn[0]) == BAD_GVA_TO_GFN) ?
-- MAPPING_EXCEPTION :
-- (mfn_x(sh_ctxt->mfn[0]) == READONLY_GFN) ?
-- MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE);
--
- #ifndef NDEBUG
- /* We don't emulate user-mode writes to page tables. */
- if ( has_hvm_container_domain(d)
-@@ -1787,6 +1782,17 @@ void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr,
- }
- #endif
-
-+ sh_ctxt->mfn[0] = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
-+ if ( !mfn_valid(sh_ctxt->mfn[0]) )
-+ {
-+ switch ( mfn_x(sh_ctxt->mfn[0]) )
-+ {
-+ case BAD_GVA_TO_GFN: return MAPPING_EXCEPTION;
-+ case READONLY_GFN: return MAPPING_SILENT_FAIL;
-+ default: return MAPPING_UNHANDLEABLE;
-+ }
-+ }
-+
- /* Unaligned writes mean probably this isn't a pagetable. */
- if ( vaddr & (bytes - 1) )
- sh_remove_shadows(d, sh_ctxt->mfn[0], 0, 0 /* Slow, can fail. */ );
-@@ -1803,6 +1809,7 @@ void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr,
- * Cross-page emulated writes are only supported for HVM guests;
- * PV guests ought to know better.
- */
-+ put_page(mfn_to_page(sh_ctxt->mfn[0]));
- return MAPPING_UNHANDLEABLE;
- }
- else
-@@ -1810,17 +1817,26 @@ void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr,
- /* This write crosses a page boundary. Translate the second page. */
- sh_ctxt->mfn[1] = emulate_gva_to_mfn(v, vaddr + bytes - 1, sh_ctxt);
- if ( !mfn_valid(sh_ctxt->mfn[1]) )
-- return ((mfn_x(sh_ctxt->mfn[1]) == BAD_GVA_TO_GFN) ?
-- MAPPING_EXCEPTION :
-- (mfn_x(sh_ctxt->mfn[1]) == READONLY_GFN) ?
-- MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE);
-+ {
-+ put_page(mfn_to_page(sh_ctxt->mfn[0]));
-+ switch ( mfn_x(sh_ctxt->mfn[1]) )
-+ {
-+ case BAD_GVA_TO_GFN: return MAPPING_EXCEPTION;
-+ case READONLY_GFN: return MAPPING_SILENT_FAIL;
-+ default: return MAPPING_UNHANDLEABLE;
-+ }
-+ }
-
- /* Cross-page writes mean probably not a pagetable. */
- sh_remove_shadows(d, sh_ctxt->mfn[1], 0, 0 /* Slow, can fail. */ );
-
- map = vmap(sh_ctxt->mfn, 2);
- if ( !map )
-+ {
-+ put_page(mfn_to_page(sh_ctxt->mfn[0]));
-+ put_page(mfn_to_page(sh_ctxt->mfn[1]));
- return MAPPING_UNHANDLEABLE;
-+ }
- map += (vaddr & ~PAGE_MASK);
- }
-
-@@ -1890,10 +1906,12 @@ void sh_emulate_unmap_dest(struct vcpu *v, void *addr, unsigned int bytes,
- }
-
- paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[0]));
-+ put_page(mfn_to_page(sh_ctxt->mfn[0]));
-
- if ( unlikely(mfn_valid(sh_ctxt->mfn[1])) )
- {
- paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[1]));
-+ put_page(mfn_to_page(sh_ctxt->mfn[1]));
- vunmap((void *)((unsigned long)addr & PAGE_MASK));
- }
- else
---
-2.1.4
-
diff --git a/system/xen/xsa/xsa220-4.8.patch b/system/xen/xsa/xsa220-4.8.patch
deleted file mode 100644
index 4a1ecd0d6d078..0000000000000
--- a/system/xen/xsa/xsa220-4.8.patch
+++ /dev/null
@@ -1,94 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86: avoid leaking PKRU and BND* between vCPU-s
-
-PKRU is explicitly "XSAVE-managed but not XSAVE-enabled", so guests
-might access the register (via {RD,WR}PKRU) without setting XCR0.PKRU.
-Force context switching as well as migrating the register as soon as
-CR4.PKE is being set the first time.
-
-For MPX (BND<n>, BNDCFGU, and BNDSTATUS) the situation is less clear,
-and the SDM has not entirely consistent information for that case.
-While experimentally the instructions don't change register state as
-long as the two XCR0 bits aren't both 1, be on the safe side and enable
-both if BNDCFGS.EN is being set the first time.
-
-This is XSA-220.
-
-Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/hvm/hvm.c
-+++ b/xen/arch/x86/hvm/hvm.c
-@@ -311,10 +311,39 @@ int hvm_set_guest_pat(struct vcpu *v, u6
-
- bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val)
- {
-- return hvm_funcs.set_guest_bndcfgs &&
-- is_canonical_address(val) &&
-- !(val & IA32_BNDCFGS_RESERVED) &&
-- hvm_funcs.set_guest_bndcfgs(v, val);
-+ if ( !hvm_funcs.set_guest_bndcfgs ||
-+ !is_canonical_address(val) ||
-+ (val & IA32_BNDCFGS_RESERVED) )
-+ return false;
-+
-+ /*
-+ * While MPX instructions are supposed to be gated on XCR0.BND*, let's
-+ * nevertheless force the relevant XCR0 bits on when the feature is being
-+ * enabled in BNDCFGS.
-+ */
-+ if ( (val & IA32_BNDCFGS_ENABLE) &&
-+ !(v->arch.xcr0_accum & (XSTATE_BNDREGS | XSTATE_BNDCSR)) )
-+ {
-+ uint64_t xcr0 = get_xcr0();
-+ int rc;
-+
-+ if ( v != current )
-+ return false;
-+
-+ rc = handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
-+ xcr0 | XSTATE_BNDREGS | XSTATE_BNDCSR);
-+
-+ if ( rc )
-+ {
-+ HVM_DBG_LOG(DBG_LEVEL_1, "Failed to force XCR0.BND*: %d", rc);
-+ return false;
-+ }
-+
-+ if ( handle_xsetbv(XCR_XFEATURE_ENABLED_MASK, xcr0) )
-+ /* nothing, best effort only */;
-+ }
-+
-+ return hvm_funcs.set_guest_bndcfgs(v, val);
- }
-
- /*
-@@ -2477,6 +2506,27 @@ int hvm_set_cr4(unsigned long value, boo
- paging_update_paging_modes(v);
- }
-
-+ /*
-+ * {RD,WR}PKRU are not gated on XCR0.PKRU and hence an oddly behaving
-+ * guest may enable the feature in CR4 without enabling it in XCR0. We
-+ * need to context switch / migrate PKRU nevertheless.
-+ */
-+ if ( (value & X86_CR4_PKE) && !(v->arch.xcr0_accum & XSTATE_PKRU) )
-+ {
-+ int rc = handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
-+ get_xcr0() | XSTATE_PKRU);
-+
-+ if ( rc )
-+ {
-+ HVM_DBG_LOG(DBG_LEVEL_1, "Failed to force XCR0.PKRU: %d", rc);
-+ goto gpf;
-+ }
-+
-+ if ( handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
-+ get_xcr0() & ~XSTATE_PKRU) )
-+ /* nothing, best effort only */;
-+ }
-+
- return X86EMUL_OKAY;
-
- gpf:
diff --git a/system/xen/xsa/xsa221.patch b/system/xen/xsa/xsa221.patch
deleted file mode 100644
index c7fec966683ff..0000000000000
--- a/system/xen/xsa/xsa221.patch
+++ /dev/null
@@ -1,194 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: evtchn: avoid NULL derefs
-
-Commit fbbd5009e6 ("evtchn: refactor low-level event channel port ops")
-added a de-reference of the struct evtchn pointer for a port without
-first making sure the bucket pointer is non-NULL. This de-reference is
-actually entirely unnecessary, as all relevant callers (beyond the
-problematic do_poll()) already hold the port number in their hands, and
-the actual leaf functions need nothing else.
-
-For FIFO event channels there's a second problem in that the ordering
-of reads and updates to ->num_evtchns and ->event_array[] was so far
-undefined (the read side isn't always holding the domain's event lock).
-Add respective barriers.
-
-This is XSA-221.
-
-Reported-by: Ankur Arora <ankur.a.arora@oracle.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-
---- a/xen/arch/x86/irq.c
-+++ b/xen/arch/x86/irq.c
-@@ -1486,7 +1486,7 @@ int pirq_guest_unmask(struct domain *d)
- {
- pirq = pirqs[i]->pirq;
- if ( pirqs[i]->masked &&
-- !evtchn_port_is_masked(d, evtchn_from_port(d, pirqs[i]->evtchn)) )
-+ !evtchn_port_is_masked(d, pirqs[i]->evtchn) )
- pirq_guest_eoi(pirqs[i]);
- }
- } while ( ++pirq < d->nr_pirqs && n == ARRAY_SIZE(pirqs) );
-@@ -2244,7 +2244,6 @@ static void dump_irqs(unsigned char key)
- int i, irq, pirq;
- struct irq_desc *desc;
- irq_guest_action_t *action;
-- struct evtchn *evtchn;
- struct domain *d;
- const struct pirq *info;
- unsigned long flags;
-@@ -2287,11 +2286,10 @@ static void dump_irqs(unsigned char key)
- d = action->guest[i];
- pirq = domain_irq_to_pirq(d, irq);
- info = pirq_info(d, pirq);
-- evtchn = evtchn_from_port(d, info->evtchn);
- printk("%u:%3d(%c%c%c)",
- d->domain_id, pirq,
-- (evtchn_port_is_pending(d, evtchn) ? 'P' : '-'),
-- (evtchn_port_is_masked(d, evtchn) ? 'M' : '-'),
-+ evtchn_port_is_pending(d, info->evtchn) ? 'P' : '-',
-+ evtchn_port_is_masked(d, info->evtchn) ? 'M' : '-',
- (info->masked ? 'M' : '-'));
- if ( i != action->nr_guests )
- printk(",");
---- a/xen/common/event_2l.c
-+++ b/xen/common/event_2l.c
-@@ -61,16 +61,20 @@ static void evtchn_2l_unmask(struct doma
- }
- }
-
--static bool_t evtchn_2l_is_pending(struct domain *d,
-- const struct evtchn *evtchn)
-+static bool_t evtchn_2l_is_pending(struct domain *d, evtchn_port_t port)
- {
-- return test_bit(evtchn->port, &shared_info(d, evtchn_pending));
-+ unsigned int max_ports = BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d);
-+
-+ ASSERT(port < max_ports);
-+ return port < max_ports && test_bit(port, &shared_info(d, evtchn_pending));
- }
-
--static bool_t evtchn_2l_is_masked(struct domain *d,
-- const struct evtchn *evtchn)
-+static bool_t evtchn_2l_is_masked(struct domain *d, evtchn_port_t port)
- {
-- return test_bit(evtchn->port, &shared_info(d, evtchn_mask));
-+ unsigned int max_ports = BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d);
-+
-+ ASSERT(port < max_ports);
-+ return port >= max_ports || test_bit(port, &shared_info(d, evtchn_mask));
- }
-
- static void evtchn_2l_print_state(struct domain *d,
---- a/xen/common/event_channel.c
-+++ b/xen/common/event_channel.c
-@@ -1380,8 +1380,8 @@ static void domain_dump_evtchn_info(stru
-
- printk(" %4u [%d/%d/",
- port,
-- !!evtchn_port_is_pending(d, chn),
-- !!evtchn_port_is_masked(d, chn));
-+ evtchn_port_is_pending(d, port),
-+ evtchn_port_is_masked(d, port));
- evtchn_port_print_state(d, chn);
- printk("]: s=%d n=%d x=%d",
- chn->state, chn->notify_vcpu_id, chn->xen_consumer);
---- a/xen/common/event_fifo.c
-+++ b/xen/common/event_fifo.c
-@@ -27,6 +27,12 @@ static inline event_word_t *evtchn_fifo_
- if ( unlikely(port >= d->evtchn_fifo->num_evtchns) )
- return NULL;
-
-+ /*
-+ * Callers aren't required to hold d->event_lock, so we need to synchronize
-+ * with add_page_to_event_array().
-+ */
-+ smp_rmb();
-+
- p = port / EVTCHN_FIFO_EVENT_WORDS_PER_PAGE;
- w = port % EVTCHN_FIFO_EVENT_WORDS_PER_PAGE;
-
-@@ -287,24 +293,22 @@ static void evtchn_fifo_unmask(struct do
- evtchn_fifo_set_pending(v, evtchn);
- }
-
--static bool_t evtchn_fifo_is_pending(struct domain *d,
-- const struct evtchn *evtchn)
-+static bool_t evtchn_fifo_is_pending(struct domain *d, evtchn_port_t port)
- {
- event_word_t *word;
-
-- word = evtchn_fifo_word_from_port(d, evtchn->port);
-+ word = evtchn_fifo_word_from_port(d, port);
- if ( unlikely(!word) )
- return 0;
-
- return test_bit(EVTCHN_FIFO_PENDING, word);
- }
-
--static bool_t evtchn_fifo_is_masked(struct domain *d,
-- const struct evtchn *evtchn)
-+static bool_t evtchn_fifo_is_masked(struct domain *d, evtchn_port_t port)
- {
- event_word_t *word;
-
-- word = evtchn_fifo_word_from_port(d, evtchn->port);
-+ word = evtchn_fifo_word_from_port(d, port);
- if ( unlikely(!word) )
- return 1;
-
-@@ -593,6 +597,10 @@ static int add_page_to_event_array(struc
- return rc;
-
- d->evtchn_fifo->event_array[slot] = virt;
-+
-+ /* Synchronize with evtchn_fifo_word_from_port(). */
-+ smp_wmb();
-+
- d->evtchn_fifo->num_evtchns += EVTCHN_FIFO_EVENT_WORDS_PER_PAGE;
-
- /*
---- a/xen/common/schedule.c
-+++ b/xen/common/schedule.c
-@@ -965,7 +965,7 @@ static long do_poll(struct sched_poll *s
- goto out;
-
- rc = 0;
-- if ( evtchn_port_is_pending(d, evtchn_from_port(d, port)) )
-+ if ( evtchn_port_is_pending(d, port) )
- goto out;
- }
-
---- a/xen/include/xen/event.h
-+++ b/xen/include/xen/event.h
-@@ -137,8 +137,8 @@ struct evtchn_port_ops {
- void (*set_pending)(struct vcpu *v, struct evtchn *evtchn);
- void (*clear_pending)(struct domain *d, struct evtchn *evtchn);
- void (*unmask)(struct domain *d, struct evtchn *evtchn);
-- bool_t (*is_pending)(struct domain *d, const struct evtchn *evtchn);
-- bool_t (*is_masked)(struct domain *d, const struct evtchn *evtchn);
-+ bool_t (*is_pending)(struct domain *d, evtchn_port_t port);
-+ bool_t (*is_masked)(struct domain *d, evtchn_port_t port);
- /*
- * Is the port unavailable because it's still being cleaned up
- * after being closed?
-@@ -175,15 +175,15 @@ static inline void evtchn_port_unmask(st
- }
-
- static inline bool_t evtchn_port_is_pending(struct domain *d,
-- const struct evtchn *evtchn)
-+ evtchn_port_t port)
- {
-- return d->evtchn_port_ops->is_pending(d, evtchn);
-+ return d->evtchn_port_ops->is_pending(d, port);
- }
-
- static inline bool_t evtchn_port_is_masked(struct domain *d,
-- const struct evtchn *evtchn)
-+ evtchn_port_t port)
- {
-- return d->evtchn_port_ops->is_masked(d, evtchn);
-+ return d->evtchn_port_ops->is_masked(d, port);
- }
-
- static inline bool_t evtchn_port_is_busy(struct domain *d, evtchn_port_t port)
diff --git a/system/xen/xsa/xsa222-1.patch b/system/xen/xsa/xsa222-1.patch
deleted file mode 100644
index 6f1290b6a8d62..0000000000000
--- a/system/xen/xsa/xsa222-1.patch
+++ /dev/null
@@ -1,124 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: xen/memory: Fix return value handing of guest_remove_page()
-
-Despite the description in mm.h, guest_remove_page() previously returned 0 for
-paging errors.
-
-Switch guest_remove_page() to having regular 0/-error semantics, and propagate
-the return values from clear_mmio_p2m_entry() and mem_sharing_unshare_page()
-to the callers (although decrease_reservation() is the only caller which
-currently cares).
-
-This is part of XSA-222.
-
-Reported-by: Julien Grall <julien.grall@arm.com>
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
-diff --git a/xen/common/memory.c b/xen/common/memory.c
-index 52879e7..a40bc1c 100644
---- a/xen/common/memory.c
-+++ b/xen/common/memory.c
-@@ -265,6 +265,7 @@ int guest_remove_page(struct domain *d, unsigned long gmfn)
- p2m_type_t p2mt;
- #endif
- mfn_t mfn;
-+ int rc;
-
- #ifdef CONFIG_X86
- mfn = get_gfn_query(d, gmfn, &p2mt);
-@@ -282,13 +283,15 @@ int guest_remove_page(struct domain *d, unsigned long gmfn)
- put_page(page);
- }
- p2m_mem_paging_drop_page(d, gmfn, p2mt);
-- return 1;
-+
-+ return 0;
- }
- if ( p2mt == p2m_mmio_direct )
- {
-- clear_mmio_p2m_entry(d, gmfn, mfn, 0);
-+ rc = clear_mmio_p2m_entry(d, gmfn, mfn, PAGE_ORDER_4K);
- put_gfn(d, gmfn);
-- return 1;
-+
-+ return rc;
- }
- #else
- mfn = gfn_to_mfn(d, _gfn(gmfn));
-@@ -298,21 +301,25 @@ int guest_remove_page(struct domain *d, unsigned long gmfn)
- put_gfn(d, gmfn);
- gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
- d->domain_id, gmfn);
-- return 0;
-+
-+ return -EINVAL;
- }
-
- #ifdef CONFIG_X86
- if ( p2m_is_shared(p2mt) )
- {
-- /* Unshare the page, bail out on error. We unshare because
-- * we might be the only one using this shared page, and we
-- * need to trigger proper cleanup. Once done, this is
-- * like any other page. */
-- if ( mem_sharing_unshare_page(d, gmfn, 0) )
-+ /*
-+ * Unshare the page, bail out on error. We unshare because we
-+ * might be the only one using this shared page, and we need to
-+ * trigger proper cleanup. Once done, this is like any other page.
-+ */
-+ rc = mem_sharing_unshare_page(d, gmfn, 0);
-+ if ( rc )
- {
- put_gfn(d, gmfn);
- (void)mem_sharing_notify_enomem(d, gmfn, 0);
-- return 0;
-+
-+ return rc;
- }
- /* Maybe the mfn changed */
- mfn = get_gfn_query_unlocked(d, gmfn, &p2mt);
-@@ -325,7 +332,8 @@ int guest_remove_page(struct domain *d, unsigned long gmfn)
- {
- put_gfn(d, gmfn);
- gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id);
-- return 0;
-+
-+ return -ENXIO;
- }
-
- if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
-@@ -348,7 +356,7 @@ int guest_remove_page(struct domain *d, unsigned long gmfn)
- put_page(page);
- put_gfn(d, gmfn);
-
-- return 1;
-+ return 0;
- }
-
- static void decrease_reservation(struct memop_args *a)
-@@ -392,7 +400,7 @@ static void decrease_reservation(struct memop_args *a)
- continue;
-
- for ( j = 0; j < (1 << a->extent_order); j++ )
-- if ( !guest_remove_page(a->domain, gmfn + j) )
-+ if ( guest_remove_page(a->domain, gmfn + j) )
- goto out;
- }
-
-diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
-index 88de3c1..b367930 100644
---- a/xen/include/xen/mm.h
-+++ b/xen/include/xen/mm.h
-@@ -553,9 +553,8 @@ int xenmem_add_to_physmap_one(struct domain *d, unsigned int space,
- union xen_add_to_physmap_batch_extra extra,
- unsigned long idx, gfn_t gfn);
-
--/* Returns 1 on success, 0 on error, negative if the ring
-- * for event propagation is full in the presence of paging */
--int guest_remove_page(struct domain *d, unsigned long gfn);
-+/* Returns 0 on success, or negative on error. */
-+int guest_remove_page(struct domain *d, unsigned long gmfn);
-
- #define RAM_TYPE_CONVENTIONAL 0x00000001
- #define RAM_TYPE_RESERVED 0x00000002
diff --git a/system/xen/xsa/xsa222-2-4.8.patch b/system/xen/xsa/xsa222-2-4.8.patch
deleted file mode 100644
index 2825d2a0614af..0000000000000
--- a/system/xen/xsa/xsa222-2-4.8.patch
+++ /dev/null
@@ -1,405 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: guest_physmap_remove_page() needs its return value checked
-
-Callers, namely such subsequently freeing the page, must not blindly
-assume success - the function may namely fail when needing to shatter a
-super page, but there not being memory available for the then needed
-intermediate page table.
-
-As it happens, guest_remove_page() callers now also all check the
-return value.
-
-Furthermore a missed put_gfn() on an error path in gnttab_transfer() is
-also being taken care of.
-
-This is part of XSA-222.
-
-Reported-by: Julien Grall <julien.grall@arm.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Signed-off-by: Julien Grall <julien.grall@arm.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/arm/mm.c
-+++ b/xen/arch/arm/mm.c
-@@ -1340,13 +1340,14 @@ int replace_grant_host_mapping(unsigned
- {
- gfn_t gfn = _gfn(addr >> PAGE_SHIFT);
- struct domain *d = current->domain;
-+ int rc;
-
- if ( new_addr != 0 || (flags & GNTMAP_contains_pte) )
- return GNTST_general_error;
-
-- guest_physmap_remove_page(d, gfn, _mfn(mfn), 0);
-+ rc = guest_physmap_remove_page(d, gfn, _mfn(mfn), 0);
-
-- return GNTST_okay;
-+ return rc ? GNTST_general_error : GNTST_okay;
- }
-
- int is_iomem_page(unsigned long mfn)
---- a/xen/arch/arm/p2m.c
-+++ b/xen/arch/arm/p2m.c
-@@ -1211,11 +1211,10 @@ int guest_physmap_add_entry(struct domai
- return p2m_insert_mapping(d, gfn, (1 << page_order), mfn, t);
- }
-
--void guest_physmap_remove_page(struct domain *d,
-- gfn_t gfn,
-- mfn_t mfn, unsigned int page_order)
-+int guest_physmap_remove_page(struct domain *d, gfn_t gfn, mfn_t mfn,
-+ unsigned int page_order)
- {
-- p2m_remove_mapping(d, gfn, (1 << page_order), mfn);
-+ return p2m_remove_mapping(d, gfn, (1 << page_order), mfn);
- }
-
- static int p2m_alloc_table(struct domain *d)
---- a/xen/arch/x86/domain.c
-+++ b/xen/arch/x86/domain.c
-@@ -808,7 +808,15 @@ int arch_domain_soft_reset(struct domain
- ret = -ENOMEM;
- goto exit_put_gfn;
- }
-- guest_physmap_remove_page(d, _gfn(gfn), _mfn(mfn), PAGE_ORDER_4K);
-+
-+ ret = guest_physmap_remove_page(d, _gfn(gfn), _mfn(mfn), PAGE_ORDER_4K);
-+ if ( ret )
-+ {
-+ printk(XENLOG_G_ERR "Failed to remove Dom%d's shared_info frame %lx\n",
-+ d->domain_id, gfn);
-+ free_domheap_page(new_page);
-+ goto exit_put_gfn;
-+ }
-
- ret = guest_physmap_add_page(d, _gfn(gfn), _mfn(page_to_mfn(new_page)),
- PAGE_ORDER_4K);
---- a/xen/arch/x86/domain_build.c
-+++ b/xen/arch/x86/domain_build.c
-@@ -427,7 +427,9 @@ static __init void pvh_add_mem_mapping(s
- if ( !iomem_access_permitted(d, mfn + i, mfn + i) )
- {
- omfn = get_gfn_query_unlocked(d, gfn + i, &t);
-- guest_physmap_remove_page(d, _gfn(gfn + i), omfn, PAGE_ORDER_4K);
-+ if ( guest_physmap_remove_page(d, _gfn(gfn + i), omfn,
-+ PAGE_ORDER_4K) )
-+ /* nothing, best effort only */;
- continue;
- }
-
---- a/xen/arch/x86/hvm/ioreq.c
-+++ b/xen/arch/x86/hvm/ioreq.c
-@@ -267,8 +267,9 @@ bool_t is_ioreq_server_page(struct domai
- static void hvm_remove_ioreq_gmfn(
- struct domain *d, struct hvm_ioreq_page *iorp)
- {
-- guest_physmap_remove_page(d, _gfn(iorp->gmfn),
-- _mfn(page_to_mfn(iorp->page)), 0);
-+ if ( guest_physmap_remove_page(d, _gfn(iorp->gmfn),
-+ _mfn(page_to_mfn(iorp->page)), 0) )
-+ domain_crash(d);
- clear_page(iorp->va);
- }
-
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -4276,7 +4276,11 @@ static int replace_grant_p2m_mapping(
- type, mfn_x(old_mfn), frame);
- return GNTST_general_error;
- }
-- guest_physmap_remove_page(d, _gfn(gfn), _mfn(frame), PAGE_ORDER_4K);
-+ if ( guest_physmap_remove_page(d, _gfn(gfn), _mfn(frame), PAGE_ORDER_4K) )
-+ {
-+ put_gfn(d, gfn);
-+ return GNTST_general_error;
-+ }
-
- put_gfn(d, gfn);
- return GNTST_okay;
-@@ -4798,7 +4802,7 @@ int xenmem_add_to_physmap_one(
- struct page_info *page = NULL;
- unsigned long gfn = 0; /* gcc ... */
- unsigned long prev_mfn, mfn = 0, old_gpfn;
-- int rc;
-+ int rc = 0;
- p2m_type_t p2mt;
-
- switch ( space )
-@@ -4872,25 +4876,30 @@ int xenmem_add_to_physmap_one(
- {
- if ( is_xen_heap_mfn(prev_mfn) )
- /* Xen heap frames are simply unhooked from this phys slot. */
-- guest_physmap_remove_page(d, gpfn, _mfn(prev_mfn), PAGE_ORDER_4K);
-+ rc = guest_physmap_remove_page(d, gpfn, _mfn(prev_mfn), PAGE_ORDER_4K);
- else
- /* Normal domain memory is freed, to avoid leaking memory. */
-- guest_remove_page(d, gfn_x(gpfn));
-+ rc = guest_remove_page(d, gfn_x(gpfn));
- }
- /* In the XENMAPSPACE_gmfn case we still hold a ref on the old page. */
- put_gfn(d, gfn_x(gpfn));
-
-+ if ( rc )
-+ goto put_both;
-+
- /* Unmap from old location, if any. */
- old_gpfn = get_gpfn_from_mfn(mfn);
- ASSERT( old_gpfn != SHARED_M2P_ENTRY );
- if ( space == XENMAPSPACE_gmfn || space == XENMAPSPACE_gmfn_range )
- ASSERT( old_gpfn == gfn );
- if ( old_gpfn != INVALID_M2P_ENTRY )
-- guest_physmap_remove_page(d, _gfn(old_gpfn), _mfn(mfn), PAGE_ORDER_4K);
-+ rc = guest_physmap_remove_page(d, _gfn(old_gpfn), _mfn(mfn), PAGE_ORDER_4K);
-
- /* Map at new location. */
-- rc = guest_physmap_add_page(d, gpfn, _mfn(mfn), PAGE_ORDER_4K);
-+ if ( !rc )
-+ rc = guest_physmap_add_page(d, gpfn, _mfn(mfn), PAGE_ORDER_4K);
-
-+ put_both:
- /* In the XENMAPSPACE_gmfn, we took a ref of the gfn at the top */
- if ( space == XENMAPSPACE_gmfn || space == XENMAPSPACE_gmfn_range )
- put_gfn(d, gfn);
---- a/xen/arch/x86/mm/p2m.c
-+++ b/xen/arch/x86/mm/p2m.c
-@@ -2925,10 +2925,12 @@ int p2m_add_foreign(struct domain *tdom,
- {
- if ( is_xen_heap_mfn(mfn_x(prev_mfn)) )
- /* Xen heap frames are simply unhooked from this phys slot */
-- guest_physmap_remove_page(tdom, _gfn(gpfn), prev_mfn, 0);
-+ rc = guest_physmap_remove_page(tdom, _gfn(gpfn), prev_mfn, 0);
- else
- /* Normal domain memory is freed, to avoid leaking memory. */
-- guest_remove_page(tdom, gpfn);
-+ rc = guest_remove_page(tdom, gpfn);
-+ if ( rc )
-+ goto put_both;
- }
- /*
- * Create the new mapping. Can't use guest_physmap_add_page() because it
-@@ -2941,6 +2943,7 @@ int p2m_add_foreign(struct domain *tdom,
- "gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n",
- gpfn, mfn_x(mfn), fgfn, tdom->domain_id, fdom->domain_id);
-
-+ put_both:
- put_page(page);
-
- /*
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -1768,6 +1768,7 @@ gnttab_transfer(
- for ( i = 0; i < count; i++ )
- {
- bool_t okay;
-+ int rc;
-
- if (i && hypercall_preempt_check())
- return i;
-@@ -1818,27 +1819,33 @@ gnttab_transfer(
- goto copyback;
- }
-
-- guest_physmap_remove_page(d, _gfn(gop.mfn), _mfn(mfn), 0);
-+ rc = guest_physmap_remove_page(d, _gfn(gop.mfn), _mfn(mfn), 0);
- gnttab_flush_tlb(d);
-+ if ( rc )
-+ {
-+ gdprintk(XENLOG_INFO,
-+ "gnttab_transfer: can't remove GFN %"PRI_xen_pfn" (MFN %lx)\n",
-+ gop.mfn, mfn);
-+ gop.status = GNTST_general_error;
-+ goto put_gfn_and_copyback;
-+ }
-
- /* Find the target domain. */
- if ( unlikely((e = rcu_lock_domain_by_id(gop.domid)) == NULL) )
- {
-- put_gfn(d, gop.mfn);
- gdprintk(XENLOG_INFO, "gnttab_transfer: can't find domain %d\n",
- gop.domid);
-- page->count_info &= ~(PGC_count_mask|PGC_allocated);
-- free_domheap_page(page);
- gop.status = GNTST_bad_domain;
-- goto copyback;
-+ goto put_gfn_and_copyback;
- }
-
- if ( xsm_grant_transfer(XSM_HOOK, d, e) )
- {
-- put_gfn(d, gop.mfn);
- gop.status = GNTST_permission_denied;
- unlock_and_copyback:
- rcu_unlock_domain(e);
-+ put_gfn_and_copyback:
-+ put_gfn(d, gop.mfn);
- page->count_info &= ~(PGC_count_mask|PGC_allocated);
- free_domheap_page(page);
- goto copyback;
-@@ -1887,12 +1894,8 @@ gnttab_transfer(
- "Transferee (d%d) has no headroom (tot %u, max %u)\n",
- e->domain_id, e->tot_pages, e->max_pages);
-
-- rcu_unlock_domain(e);
-- put_gfn(d, gop.mfn);
-- page->count_info &= ~(PGC_count_mask|PGC_allocated);
-- free_domheap_page(page);
- gop.status = GNTST_general_error;
-- goto copyback;
-+ goto unlock_and_copyback;
- }
-
- /* Okay, add the page to 'e'. */
-@@ -1921,13 +1924,8 @@ gnttab_transfer(
-
- if ( drop_dom_ref )
- put_domain(e);
-- rcu_unlock_domain(e);
--
-- put_gfn(d, gop.mfn);
-- page->count_info &= ~(PGC_count_mask|PGC_allocated);
-- free_domheap_page(page);
- gop.status = GNTST_general_error;
-- goto copyback;
-+ goto unlock_and_copyback;
- }
-
- page_list_add_tail(page, &e->page_list);
---- a/xen/common/memory.c
-+++ b/xen/common/memory.c
-@@ -272,8 +272,12 @@ int guest_remove_page(struct domain *d,
- mfn = get_gfn_query(d, gmfn, &p2mt);
- if ( unlikely(p2m_is_paging(p2mt)) )
- {
-- guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);
-+ rc = guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);
- put_gfn(d, gmfn);
-+
-+ if ( rc )
-+ return rc;
-+
- /* If the page hasn't yet been paged out, there is an
- * actual page that needs to be released. */
- if ( p2mt == p2m_ram_paging_out )
-@@ -337,7 +341,9 @@ int guest_remove_page(struct domain *d,
- return -ENXIO;
- }
-
-- if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
-+ rc = guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);
-+
-+ if ( !rc && test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
- put_page_and_type(page);
-
- /*
-@@ -348,16 +354,14 @@ int guest_remove_page(struct domain *d,
- * For this purpose (and to match populate_physmap() behavior), the page
- * is kept allocated.
- */
-- if ( !is_domain_direct_mapped(d) &&
-+ if ( !rc && !is_domain_direct_mapped(d) &&
- test_and_clear_bit(_PGC_allocated, &page->count_info) )
- put_page(page);
-
-- guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);
--
- put_page(page);
- put_gfn(d, gmfn);
-
-- return 0;
-+ return rc;
- }
-
- static void decrease_reservation(struct memop_args *a)
-@@ -592,7 +596,8 @@ static long memory_exchange(XEN_GUEST_HA
- gfn = mfn_to_gmfn(d, mfn);
- /* Pages were unshared above */
- BUG_ON(SHARED_M2P(gfn));
-- guest_physmap_remove_page(d, _gfn(gfn), _mfn(mfn), 0);
-+ if ( guest_physmap_remove_page(d, _gfn(gfn), _mfn(mfn), 0) )
-+ domain_crash(d);
- put_page(page);
- }
-
-@@ -1151,8 +1156,8 @@ long do_memory_op(unsigned long cmd, XEN
- page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
- if ( page )
- {
-- guest_physmap_remove_page(d, _gfn(xrfp.gpfn),
-- _mfn(page_to_mfn(page)), 0);
-+ rc = guest_physmap_remove_page(d, _gfn(xrfp.gpfn),
-+ _mfn(page_to_mfn(page)), 0);
- put_page(page);
- }
- else
---- a/xen/drivers/passthrough/arm/smmu.c
-+++ b/xen/drivers/passthrough/arm/smmu.c
-@@ -2786,9 +2786,7 @@ static int __must_check arm_smmu_unmap_p
- if ( !is_domain_direct_mapped(d) )
- return -EINVAL;
-
-- guest_physmap_remove_page(d, _gfn(gfn), _mfn(gfn), 0);
--
-- return 0;
-+ return guest_physmap_remove_page(d, _gfn(gfn), _mfn(gfn), 0);
- }
-
- static const struct iommu_ops arm_smmu_iommu_ops = {
---- a/xen/include/asm-arm/p2m.h
-+++ b/xen/include/asm-arm/p2m.h
-@@ -268,10 +268,6 @@ static inline int guest_physmap_add_page
- return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
- }
-
--void guest_physmap_remove_page(struct domain *d,
-- gfn_t gfn,
-- mfn_t mfn, unsigned int page_order);
--
- mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn);
-
- /*
---- a/xen/include/asm-x86/p2m.h
-+++ b/xen/include/asm-x86/p2m.h
-@@ -561,10 +561,6 @@ static inline int guest_physmap_add_page
- return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
- }
-
--/* Remove a page from a domain's p2m table */
--int guest_physmap_remove_page(struct domain *d,
-- gfn_t gfn, mfn_t mfn, unsigned int page_order);
--
- /* Set a p2m range as populate-on-demand */
- int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
- unsigned int order);
---- a/xen/include/xen/p2m-common.h
-+++ b/xen/include/xen/p2m-common.h
-@@ -1,6 +1,7 @@
- #ifndef _XEN_P2M_COMMON_H
- #define _XEN_P2M_COMMON_H
-
-+#include <xen/mm.h>
- #include <public/vm_event.h>
-
- /*
-@@ -33,6 +34,11 @@ typedef enum {
- /* NOTE: Assumed to be only 4 bits right now on x86. */
- } p2m_access_t;
-
-+/* Remove a page from a domain's p2m table */
-+int __must_check
-+guest_physmap_remove_page(struct domain *d, gfn_t gfn, mfn_t mfn,
-+ unsigned int page_order);
-+
- /* Map MMIO regions in the p2m: start_gfn and nr describe the range in
- * * the guest physical address space to map, starting from the machine
- * * frame number mfn. */
---- a/xen/include/xen/mm.h
-+++ b/xen/include/xen/mm.h
-@@ -554,7 +554,7 @@ int xenmem_add_to_physmap_one(struct dom
- unsigned long idx, gfn_t gfn);
-
- /* Returns 0 on success, or negative on error. */
--int guest_remove_page(struct domain *d, unsigned long gmfn);
-+int __must_check guest_remove_page(struct domain *d, unsigned long gmfn);
-
- #define RAM_TYPE_CONVENTIONAL 0x00000001
- #define RAM_TYPE_RESERVED 0x00000002
diff --git a/system/xen/xsa/xsa223.patch b/system/xen/xsa/xsa223.patch
deleted file mode 100644
index 42f116b70ce31..0000000000000
--- a/system/xen/xsa/xsa223.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From: Julien Grall <julien.grall@arm.com>
-Subject: arm: vgic: Don't update the LR when the IRQ is not enabled
-
-gic_raise_inflight_irq will be called if the IRQ is already inflight
-(i.e the IRQ is injected to the guest). If the IRQ is already already in
-the LRs, then the associated LR will be updated.
-
-To know if the interrupt is already in the LR, the function check if the
-interrupt is queued. However, if the interrupt is not enabled then the
-interrupt may not be queued nor in the LR. So gic_update_one_lr may be
-called (if we inject on the current vCPU) and read the LR.
-
-Because the interrupt is not in the LR, Xen will either read:
- * LR 0 if the interrupt was never injected before
- * LR 255 (GIC_INVALID_LR) if the interrupt was injected once. This
- is because gic_update_one_lr will reset p->lr.
-
-Reading LR 0 will result to potentially update the wrong interrupt and
-not keep the LRs in sync with Xen.
-
-Reading LR 255 will result to:
- * Crash Xen on GICv3 as the LR index is bigger than supported (see
- gicv3_ich_read_lr).
- * Read/write always GICH_LR + 255 * 4 that is not part of the memory
- mapped.
-
-The problem can be prevented by checking whether the interrupt is
-enabled in gic_raise_inflight_irq before calling gic_update_one_lr.
-
-A follow-up of this patch is expected to mitigate the issue in the
-future.
-
-This is XSA-223.
-
-Reported-by: Julien Grall <julien.grall@arm.com>
-Signed-off-by: Julien Grall <julien.grall@arm.com>
-Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
----
- xen/arch/arm/gic.c | 4 ++++
- 1 file changed, 4 insertions(+)
-
---- a/xen/arch/arm/gic.c
-+++ b/xen/arch/arm/gic.c
-@@ -417,6 +417,10 @@ void gic_raise_inflight_irq(struct vcpu *v, unsigned int virtual_irq)
-
- ASSERT(spin_is_locked(&v->arch.vgic.lock));
-
-+ /* Don't try to update the LR if the interrupt is disabled */
-+ if ( !test_bit(GIC_IRQ_GUEST_ENABLED, &n->status) )
-+ return;
-+
- if ( list_empty(&n->lr_queue) )
- {
- if ( v == current )
diff --git a/system/xen/xsa/xsa224-0001-gnttab-Fix-handling-of-dev_bus_addr-during-unmap.patch b/system/xen/xsa/xsa224-0001-gnttab-Fix-handling-of-dev_bus_addr-during-unmap.patch
deleted file mode 100644
index 6a55f86b07986..0000000000000
--- a/system/xen/xsa/xsa224-0001-gnttab-Fix-handling-of-dev_bus_addr-during-unmap.patch
+++ /dev/null
@@ -1,111 +0,0 @@
-From 9808ed0b1ebc3a5d2aa08a9ff91fcf3ecb42bc9f Mon Sep 17 00:00:00 2001
-From: George Dunlap <george.dunlap@citrix.com>
-Date: Thu, 15 Jun 2017 16:24:02 +0100
-Subject: [PATCH 1/4] gnttab: Fix handling of dev_bus_addr during unmap
-
-If a grant has been mapped with the GNTTAB_device_map flag, calling
-grant_unmap_ref() with dev_bus_addr set to zero should cause the
-GNTTAB_device_map part of the mapping to be left alone.
-
-Unfortunately, at the moment, op->dev_bus_addr is implicitly checked
-before clearing the map and adjusting the pin count, but only the bits
-above 12; and it is not checked at all before dropping page
-references. This means a guest can repeatedly make such a call to
-cause the reference count to drop to zero, causing the page to be
-freed and re-used, even though it's still mapped in its pagetables.
-
-To fix this, always check op->dev_bus_addr explicitly for being
-non-zero, as well as op->flag & GNTMAP_device_map, before doing
-operations on the device_map.
-
-While we're here, make the logic a bit cleaner:
-
-* Always initialize op->frame to zero and set it from act->frame, to reduce the
-chance of untrusted input being used
-
-* Explicitly check the full dev_bus_addr against act->frame <<
- PAGE_SHIFT, rather than ignoring the lower 12 bits
-
-This is part of XSA-224.
-
-Reported-by: Jan Beulich <jbeulich@suse.com>
-Signed-off-by: George Dunlap <george.dunlap@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
----
- xen/common/grant_table.c | 23 +++++++++++------------
- 1 file changed, 11 insertions(+), 12 deletions(-)
-
-diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
-index ba10e76..2671761 100644
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -1085,8 +1085,6 @@ __gnttab_unmap_common(
- ld = current->domain;
- lgt = ld->grant_table;
-
-- op->frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT);
--
- if ( unlikely(op->handle >= lgt->maptrack_limit) )
- {
- gdprintk(XENLOG_INFO, "Bad handle (%d).\n", op->handle);
-@@ -1169,16 +1167,14 @@ __gnttab_unmap_common(
- goto act_release_out;
- }
-
-- if ( op->frame == 0 )
-- {
-- op->frame = act->frame;
-- }
-- else
-+ op->frame = act->frame;
-+
-+ if ( op->dev_bus_addr )
- {
-- if ( unlikely(op->frame != act->frame) )
-+ if ( unlikely(op->dev_bus_addr != pfn_to_paddr(act->frame)) )
- PIN_FAIL(act_release_out, GNTST_general_error,
-- "Bad frame number doesn't match gntref. (%lx != %lx)\n",
-- op->frame, act->frame);
-+ "Bus address doesn't match gntref (%"PRIx64" != %"PRIpaddr")\n",
-+ op->dev_bus_addr, pfn_to_paddr(act->frame));
-
- map->flags &= ~GNTMAP_device_map;
- }
-@@ -1271,7 +1267,8 @@ __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
- else
- status = &status_entry(rgt, op->ref);
-
-- if ( unlikely(op->frame != act->frame) )
-+ if ( op->dev_bus_addr &&
-+ unlikely(op->dev_bus_addr != pfn_to_paddr(act->frame)) )
- {
- /*
- * Suggests that __gntab_unmap_common failed early and so
-@@ -1282,7 +1279,7 @@ __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
-
- pg = mfn_to_page(op->frame);
-
-- if ( op->flags & GNTMAP_device_map )
-+ if ( op->dev_bus_addr && (op->flags & GNTMAP_device_map) )
- {
- if ( !is_iomem_page(act->frame) )
- {
-@@ -1353,6 +1350,7 @@ __gnttab_unmap_grant_ref(
- /* Intialise these in case common contains old state */
- common->new_addr = 0;
- common->rd = NULL;
-+ common->frame = 0;
-
- __gnttab_unmap_common(common);
- op->status = common->status;
-@@ -1417,6 +1415,7 @@ __gnttab_unmap_and_replace(
- /* Intialise these in case common contains old state */
- common->dev_bus_addr = 0;
- common->rd = NULL;
-+ common->frame = 0;
-
- __gnttab_unmap_common(common);
- op->status = common->status;
---
-2.1.4
-
diff --git a/system/xen/xsa/xsa224-0002-gnttab-never-create-host-mapping-unless-asked-to.patch b/system/xen/xsa/xsa224-0002-gnttab-never-create-host-mapping-unless-asked-to.patch
deleted file mode 100644
index bea0214db0e10..0000000000000
--- a/system/xen/xsa/xsa224-0002-gnttab-never-create-host-mapping-unless-asked-to.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From 2d6357522946bd5a105066db8079e5dd46cb3047 Mon Sep 17 00:00:00 2001
-From: Jan Beulich <jbeulich@suse.com>
-Date: Fri, 2 Jun 2017 15:21:27 +0100
-Subject: [PATCH 2/4] gnttab: never create host mapping unless asked to
-
-We shouldn't create a host mapping unless asked to even in the case of
-mapping a granted MMIO page. In particular the mapping wouldn't be torn
-down when processing the matching unmap request.
-
-This is part of XSA-224.
-
-Reported-by: Jan Beulich <jbeulich@suse.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
----
- xen/common/grant_table.c | 11 +++++++----
- 1 file changed, 7 insertions(+), 4 deletions(-)
-
-diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
-index 2671761..5baae24 100644
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -907,10 +907,13 @@ __gnttab_map_grant_ref(
- goto undo_out;
- }
-
-- rc = create_grant_host_mapping(
-- op->host_addr, frame, op->flags, cache_flags);
-- if ( rc != GNTST_okay )
-- goto undo_out;
-+ if ( op->flags & GNTMAP_host_map )
-+ {
-+ rc = create_grant_host_mapping(op->host_addr, frame, op->flags,
-+ cache_flags);
-+ if ( rc != GNTST_okay )
-+ goto undo_out;
-+ }
- }
- else if ( owner == rd || owner == dom_cow )
- {
---
-2.1.4
-
diff --git a/system/xen/xsa/xsa224-0003-gnttab-correct-logic-to-get-page-references-during-m.patch b/system/xen/xsa/xsa224-0003-gnttab-correct-logic-to-get-page-references-during-m.patch
deleted file mode 100644
index f2d26d5fff2c8..0000000000000
--- a/system/xen/xsa/xsa224-0003-gnttab-correct-logic-to-get-page-references-during-m.patch
+++ /dev/null
@@ -1,186 +0,0 @@
-From 4e718be6f59526927d5cd31ecd80c5c758dca3f5 Mon Sep 17 00:00:00 2001
-From: George Dunlap <george.dunlap@citrix.com>
-Date: Fri, 2 Jun 2017 15:21:27 +0100
-Subject: [PATCH 3/4] gnttab: correct logic to get page references during map
- requests
-
-The rules for reference counting are somewhat complicated:
-
-* Each of GNTTAB_host_map and GNTTAB_device_map need their own
-reference count
-
-* If the mapping is writeable:
- - GNTTAB_host_map needs a type count under only some conditions
- - GNTTAB_device_map always needs a type count
-
-If the mapping succeeds, we need to keep all of these; if the mapping
-fails, we need to release whatever references we have acquired so far.
-
-Additionally, the code that does a lot of this calculation "inherits"
-a reference as part of the process of finding out who the owner is.
-
-Finally, if the grant is mapped as writeable (without the
-GNTMAP_readonly flag), but the hypervisor cannot grab a
-PGT_writeable_page type, the entire operation should fail.
-
-Unfortunately, the current code has several logic holes:
-
-* If a grant is mapped only GNTTAB_device_map, and with a writeable
- mapping, but in conditions where a *host* type count is not
- necessary, the code will fail to grab the necessary type count.
-
-* If a grant is mapped both GNTTAB_device_map and GNTTAB_host_map,
- with a writeable mapping, in conditions where the host type count is
- not necessary, *and* where the page cannot be changed to type
- PGT_writeable, the condition will not be detected.
-
-In both cases, this means that on success, the type count will be
-erroneously reduced when the grant is unmapped. In the second case,
-the type count will be erroneously reduced on the failure path as
-well. (In the first case the failure path logic has the same hole
-as the reference grabbing logic.)
-
-Additionally, the return value of get_page() is not checked; but this
-may fail even if the first get_page() succeeded due to a reference
-counting overflow.
-
-First of all, simplify the restoration logic by explicitly counting
-the reference and type references acquired.
-
-Consider each mapping type separately, explicitly marking the
-'incoming' reference as used so we know when we need to grab a second
-one.
-
-Finally, always check the return value of get_page[_type]() and go to
-the failure path if appropriate.
-
-This is part of XSA-224.
-
-Reported-by: Jan Beulich <jbeulich@suse.com>
-Signed-off-by: George Dunlap <george.dunlap@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
----
- xen/common/grant_table.c | 58 +++++++++++++++++++++++++++---------------------
- 1 file changed, 33 insertions(+), 25 deletions(-)
-
-diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
-index 5baae24..d07b931 100644
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -754,12 +754,12 @@ __gnttab_map_grant_ref(
- struct grant_table *lgt, *rgt;
- struct vcpu *led;
- int handle;
-- unsigned long frame = 0, nr_gets = 0;
-+ unsigned long frame = 0;
- struct page_info *pg = NULL;
- int rc = GNTST_okay;
- u32 old_pin;
- u32 act_pin;
-- unsigned int cache_flags;
-+ unsigned int cache_flags, refcnt = 0, typecnt = 0;
- struct active_grant_entry *act = NULL;
- struct grant_mapping *mt;
- grant_entry_header_t *shah;
-@@ -885,11 +885,17 @@ __gnttab_map_grant_ref(
- else
- owner = page_get_owner(pg);
-
-+ if ( owner )
-+ refcnt++;
-+
- if ( !pg || (owner == dom_io) )
- {
- /* Only needed the reference to confirm dom_io ownership. */
- if ( pg )
-+ {
- put_page(pg);
-+ refcnt--;
-+ }
-
- if ( paging_mode_external(ld) )
- {
-@@ -917,27 +923,38 @@ __gnttab_map_grant_ref(
- }
- else if ( owner == rd || owner == dom_cow )
- {
-- if ( gnttab_host_mapping_get_page_type(op, ld, rd) )
-+ if ( (op->flags & GNTMAP_device_map) && !(op->flags & GNTMAP_readonly) )
- {
- if ( (owner == dom_cow) ||
- !get_page_type(pg, PGT_writable_page) )
- goto could_not_pin;
-+ typecnt++;
- }
-
-- nr_gets++;
- if ( op->flags & GNTMAP_host_map )
- {
-- rc = create_grant_host_mapping(op->host_addr, frame, op->flags, 0);
-- if ( rc != GNTST_okay )
-- goto undo_out;
--
-+ /*
-+ * Only need to grab another reference if device_map claimed
-+ * the other one.
-+ */
- if ( op->flags & GNTMAP_device_map )
- {
-- nr_gets++;
-- (void)get_page(pg, rd);
-- if ( !(op->flags & GNTMAP_readonly) )
-- get_page_type(pg, PGT_writable_page);
-+ if ( !get_page(pg, rd) )
-+ goto could_not_pin;
-+ refcnt++;
-+ }
-+
-+ if ( gnttab_host_mapping_get_page_type(op, ld, rd) )
-+ {
-+ if ( (owner == dom_cow) ||
-+ !get_page_type(pg, PGT_writable_page) )
-+ goto could_not_pin;
-+ typecnt++;
- }
-+
-+ rc = create_grant_host_mapping(op->host_addr, frame, op->flags, 0);
-+ if ( rc != GNTST_okay )
-+ goto undo_out;
- }
- }
- else
-@@ -946,8 +963,6 @@ __gnttab_map_grant_ref(
- if ( !rd->is_dying )
- gdprintk(XENLOG_WARNING, "Could not pin grant frame %lx\n",
- frame);
-- if ( owner != NULL )
-- put_page(pg);
- rc = GNTST_general_error;
- goto undo_out;
- }
-@@ -1010,18 +1025,11 @@ __gnttab_map_grant_ref(
- return;
-
- undo_out:
-- if ( nr_gets > 1 )
-- {
-- if ( !(op->flags & GNTMAP_readonly) )
-- put_page_type(pg);
-- put_page(pg);
-- }
-- if ( nr_gets > 0 )
-- {
-- if ( gnttab_host_mapping_get_page_type(op, ld, rd) )
-- put_page_type(pg);
-+ while ( typecnt-- )
-+ put_page_type(pg);
-+
-+ while ( refcnt-- )
- put_page(pg);
-- }
-
- grant_read_lock(rgt);
-
---
-2.1.4
-
diff --git a/system/xen/xsa/xsa224-0004-gnttab-__gnttab_unmap_common_complete-is-all-or-noth.patch b/system/xen/xsa/xsa224-0004-gnttab-__gnttab_unmap_common_complete-is-all-or-noth.patch
deleted file mode 100644
index 9c1bd40becf5d..0000000000000
--- a/system/xen/xsa/xsa224-0004-gnttab-__gnttab_unmap_common_complete-is-all-or-noth.patch
+++ /dev/null
@@ -1,319 +0,0 @@
-From d27237abe37e45a1f245e23484062b09ff3477ed Mon Sep 17 00:00:00 2001
-From: Jan Beulich <jbeulich@suse.com>
-Date: Thu, 15 Jun 2017 16:25:27 +0100
-Subject: [PATCH 4/4] gnttab: __gnttab_unmap_common_complete() is
- all-or-nothing
-
-All failures have to be detected in __gnttab_unmap_common(), the
-completion function must not skip part of its processing. In particular
-the GNTMAP_device_map related putting of page references and adjustment
-of pin count must not occur if __gnttab_unmap_common() signaled an
-error. Furthermore the function must not make adjustments to global
-state (here: clearing GNTTAB_device_map) before all possibly failing
-operations have been performed.
-
-There's one exception for IOMMU related failures: As IOMMU manipulation
-occurs after GNTMAP_*_map have been cleared already, the related page
-reference and pin count adjustments need to be done nevertheless. A
-fundamental requirement for the correctness of this is that
-iommu_{,un}map_page() crash any affected DomU in case of failure.
-
-The version check appears to be pointless (or could perhaps be a
-BUG_ON() or ASSERT()), but for the moment also move it.
-
-This is part of XSA-224.
-
-Reported-by: Jan Beulich <jbeulich@suse.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
----
- xen/common/grant_table.c | 108 ++++++++++++++++++--------------------
- xen/include/asm-arm/grant_table.h | 2 +-
- xen/include/asm-x86/grant_table.h | 5 +-
- 3 files changed, 55 insertions(+), 60 deletions(-)
-
-diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
-index d07b931..7ea68b1 100644
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -96,7 +96,7 @@ struct gnttab_unmap_common {
- int16_t status;
-
- /* Shared state beteen *_unmap and *_unmap_complete */
-- u16 flags;
-+ u16 done;
- unsigned long frame;
- struct domain *rd;
- grant_ref_t ref;
-@@ -944,7 +944,8 @@ __gnttab_map_grant_ref(
- refcnt++;
- }
-
-- if ( gnttab_host_mapping_get_page_type(op, ld, rd) )
-+ if ( gnttab_host_mapping_get_page_type(op->flags & GNTMAP_readonly,
-+ ld, rd) )
- {
- if ( (owner == dom_cow) ||
- !get_page_type(pg, PGT_writable_page) )
-@@ -1091,6 +1092,7 @@ __gnttab_unmap_common(
- struct active_grant_entry *act;
- s16 rc = 0;
- struct grant_mapping *map;
-+ unsigned int flags;
- bool put_handle = false;
-
- ld = current->domain;
-@@ -1140,6 +1142,20 @@ __gnttab_unmap_common(
-
- grant_read_lock(rgt);
-
-+ if ( rgt->gt_version == 0 )
-+ {
-+ /*
-+ * This ought to be impossible, as such a mapping should not have
-+ * been established (see the nr_grant_entries(rgt) bounds check in
-+ * __gnttab_map_grant_ref()). Doing this check only in
-+ * __gnttab_unmap_common_complete() - as it used to be done - would,
-+ * however, be too late.
-+ */
-+ rc = GNTST_bad_gntref;
-+ flags = 0;
-+ goto unlock_out;
-+ }
-+
- op->rd = rd;
- op->ref = map->ref;
-
-@@ -1155,6 +1171,7 @@ __gnttab_unmap_common(
- {
- gdprintk(XENLOG_WARNING, "Unstable handle %u\n", op->handle);
- rc = GNTST_bad_handle;
-+ flags = 0;
- goto unlock_out;
- }
-
-@@ -1168,9 +1185,9 @@ __gnttab_unmap_common(
- * hold anyway; see docs/misc/grant-tables.txt's "Locking" section.
- */
-
-- op->flags = read_atomic(&map->flags);
-+ flags = read_atomic(&map->flags);
- smp_rmb();
-- if ( unlikely(!op->flags) || unlikely(map->domid != dom) ||
-+ if ( unlikely(!flags) || unlikely(map->domid != dom) ||
- unlikely(map->ref != op->ref) )
- {
- gdprintk(XENLOG_WARNING, "Unstable handle %#x\n", op->handle);
-@@ -1180,24 +1197,27 @@ __gnttab_unmap_common(
-
- op->frame = act->frame;
-
-- if ( op->dev_bus_addr )
-- {
-- if ( unlikely(op->dev_bus_addr != pfn_to_paddr(act->frame)) )
-- PIN_FAIL(act_release_out, GNTST_general_error,
-- "Bus address doesn't match gntref (%"PRIx64" != %"PRIpaddr")\n",
-- op->dev_bus_addr, pfn_to_paddr(act->frame));
--
-- map->flags &= ~GNTMAP_device_map;
-- }
-+ if ( op->dev_bus_addr &&
-+ unlikely(op->dev_bus_addr != pfn_to_paddr(act->frame)) )
-+ PIN_FAIL(act_release_out, GNTST_general_error,
-+ "Bus address doesn't match gntref (%"PRIx64" != %"PRIpaddr")\n",
-+ op->dev_bus_addr, pfn_to_paddr(act->frame));
-
-- if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
-+ if ( op->host_addr && (flags & GNTMAP_host_map) )
- {
- if ( (rc = replace_grant_host_mapping(op->host_addr,
- op->frame, op->new_addr,
-- op->flags)) < 0 )
-+ flags)) < 0 )
- goto act_release_out;
-
- map->flags &= ~GNTMAP_host_map;
-+ op->done |= GNTMAP_host_map | (flags & GNTMAP_readonly);
-+ }
-+
-+ if ( op->dev_bus_addr && (flags & GNTMAP_device_map) )
-+ {
-+ map->flags &= ~GNTMAP_device_map;
-+ op->done |= GNTMAP_device_map | (flags & GNTMAP_readonly);
- }
-
- if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
-@@ -1234,7 +1254,7 @@ __gnttab_unmap_common(
- }
-
- /* If just unmapped a writable mapping, mark as dirtied */
-- if ( rc == GNTST_okay && !(op->flags & GNTMAP_readonly) )
-+ if ( rc == GNTST_okay && !(flags & GNTMAP_readonly) )
- gnttab_mark_dirty(rd, op->frame);
-
- op->status = rc;
-@@ -1251,13 +1271,9 @@ __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
- struct page_info *pg;
- uint16_t *status;
-
-- if ( rd == NULL )
-+ if ( !op->done )
- {
-- /*
-- * Suggests that __gntab_unmap_common failed in
-- * rcu_lock_domain_by_id() or earlier, and so we have nothing
-- * to complete
-- */
-+ /* __gntab_unmap_common() didn't do anything - nothing to complete. */
- return;
- }
-
-@@ -1267,8 +1283,6 @@ __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
- rgt = rd->grant_table;
-
- grant_read_lock(rgt);
-- if ( rgt->gt_version == 0 )
-- goto unlock_out;
-
- act = active_entry_acquire(rgt, op->ref);
- sha = shared_entry_header(rgt, op->ref);
-@@ -1278,72 +1292,50 @@ __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
- else
- status = &status_entry(rgt, op->ref);
-
-- if ( op->dev_bus_addr &&
-- unlikely(op->dev_bus_addr != pfn_to_paddr(act->frame)) )
-- {
-- /*
-- * Suggests that __gntab_unmap_common failed early and so
-- * nothing further to do
-- */
-- goto act_release_out;
-- }
--
- pg = mfn_to_page(op->frame);
-
-- if ( op->dev_bus_addr && (op->flags & GNTMAP_device_map) )
-+ if ( op->done & GNTMAP_device_map )
- {
- if ( !is_iomem_page(act->frame) )
- {
-- if ( op->flags & GNTMAP_readonly )
-+ if ( op->done & GNTMAP_readonly )
- put_page(pg);
- else
- put_page_and_type(pg);
- }
-
- ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
-- if ( op->flags & GNTMAP_readonly )
-+ if ( op->done & GNTMAP_readonly )
- act->pin -= GNTPIN_devr_inc;
- else
- act->pin -= GNTPIN_devw_inc;
- }
-
-- if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
-+ if ( op->done & GNTMAP_host_map )
- {
-- if ( op->status != 0 )
-+ if ( !is_iomem_page(op->frame) )
- {
-- /*
-- * Suggests that __gntab_unmap_common failed in
-- * replace_grant_host_mapping() or IOMMU handling, so nothing
-- * further to do (short of re-establishing the mapping in the
-- * latter case).
-- */
-- goto act_release_out;
-- }
--
-- if ( !is_iomem_page(op->frame) )
-- {
-- if ( gnttab_host_mapping_get_page_type(op, ld, rd) )
-+ if ( gnttab_host_mapping_get_page_type(op->done & GNTMAP_readonly,
-+ ld, rd) )
- put_page_type(pg);
- put_page(pg);
- }
-
- ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
-- if ( op->flags & GNTMAP_readonly )
-+ if ( op->done & GNTMAP_readonly )
- act->pin -= GNTPIN_hstr_inc;
- else
- act->pin -= GNTPIN_hstw_inc;
- }
-
- if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
-- !(op->flags & GNTMAP_readonly) )
-+ !(op->done & GNTMAP_readonly) )
- gnttab_clear_flag(_GTF_writing, status);
-
- if ( act->pin == 0 )
- gnttab_clear_flag(_GTF_reading, status);
-
-- act_release_out:
- active_entry_release(act);
-- unlock_out:
- grant_read_unlock(rgt);
-
- rcu_unlock_domain(rd);
-@@ -1359,6 +1351,7 @@ __gnttab_unmap_grant_ref(
- common->handle = op->handle;
-
- /* Intialise these in case common contains old state */
-+ common->done = 0;
- common->new_addr = 0;
- common->rd = NULL;
- common->frame = 0;
-@@ -1424,6 +1417,7 @@ __gnttab_unmap_and_replace(
- common->handle = op->handle;
-
- /* Intialise these in case common contains old state */
-+ common->done = 0;
- common->dev_bus_addr = 0;
- common->rd = NULL;
- common->frame = 0;
-@@ -3385,7 +3379,9 @@ gnttab_release_mappings(
- if ( gnttab_release_host_mappings(d) &&
- !is_iomem_page(act->frame) )
- {
-- if ( gnttab_host_mapping_get_page_type(map, d, rd) )
-+ if ( gnttab_host_mapping_get_page_type((map->flags &
-+ GNTMAP_readonly),
-+ d, rd) )
- put_page_type(pg);
- put_page(pg);
- }
-diff --git a/xen/include/asm-arm/grant_table.h b/xen/include/asm-arm/grant_table.h
-index eb02423..bc4d61a 100644
---- a/xen/include/asm-arm/grant_table.h
-+++ b/xen/include/asm-arm/grant_table.h
-@@ -9,7 +9,7 @@ void gnttab_clear_flag(unsigned long nr, uint16_t *addr);
- int create_grant_host_mapping(unsigned long gpaddr,
- unsigned long mfn, unsigned int flags, unsigned int
- cache_flags);
--#define gnttab_host_mapping_get_page_type(op, d, rd) (0)
-+#define gnttab_host_mapping_get_page_type(ro, ld, rd) (0)
- int replace_grant_host_mapping(unsigned long gpaddr, unsigned long mfn,
- unsigned long new_gpaddr, unsigned int flags);
- void gnttab_mark_dirty(struct domain *d, unsigned long l);
-diff --git a/xen/include/asm-x86/grant_table.h b/xen/include/asm-x86/grant_table.h
-index 8c9bbcf..9ca631c 100644
---- a/xen/include/asm-x86/grant_table.h
-+++ b/xen/include/asm-x86/grant_table.h
-@@ -58,9 +58,8 @@ static inline void gnttab_clear_flag(unsigned int nr, uint16_t *st)
- }
-
- /* Foreign mappings of HHVM-guest pages do not modify the type count. */
--#define gnttab_host_mapping_get_page_type(op, ld, rd) \
-- (!((op)->flags & GNTMAP_readonly) && \
-- (((ld) == (rd)) || !paging_mode_external(rd)))
-+#define gnttab_host_mapping_get_page_type(ro, ld, rd) \
-+ (!(ro) && (((ld) == (rd)) || !paging_mode_external(rd)))
-
- /* Done implicitly when page tables are destroyed. */
- #define gnttab_release_host_mappings(domain) ( paging_mode_external(domain) )
---
-2.1.4
-
diff --git a/system/xen/xsa/xsa225.patch b/system/xen/xsa/xsa225.patch
deleted file mode 100644
index 900487a631cfb..0000000000000
--- a/system/xen/xsa/xsa225.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From b0547f9c9707e0dc473601a166da32dfec1f526e Mon Sep 17 00:00:00 2001
-From: Julien Grall <julien.grall@arm.com>
-Date: Tue, 6 Jun 2017 15:35:42 +0100
-Subject: [PATCH] xen/arm: vgic: Sanitize target mask used to send SGI
-
-The current function vgic_to_sgi does not sanitize the target mask and
-may therefore get an invalid vCPU ID. This will result to an out of
-bound access of d->vcpu[...] as there is no check whether the vCPU ID is
-within the maximum supported by the guest.
-
-This was introduced by commit ea37fd2111 "xen/arm: split vgic driver
-into generic and vgic-v2 driver".
-
-Signed-off-by: Julien Grall <julien.grall@arm.com>
-Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
----
- xen/arch/arm/vgic.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
-index 83569b09e7..c6c6f8cb66 100644
---- a/xen/arch/arm/vgic.c
-+++ b/xen/arch/arm/vgic.c
-@@ -399,7 +399,8 @@ bool vgic_to_sgi(struct vcpu *v, register_t sgir, enum gic_sgi_mode irqmode,
- for_each_set_bit( i, &bitmap, sizeof(target->list) * 8 )
- {
- vcpuid = base + i;
-- if ( d->vcpu[vcpuid] == NULL || !is_vcpu_online(d->vcpu[vcpuid]) )
-+ if ( vcpuid >= d->max_vcpus || d->vcpu[vcpuid] == NULL ||
-+ !is_vcpu_online(d->vcpu[vcpuid]) )
- {
- gprintk(XENLOG_WARNING, "VGIC: write r=%"PRIregister" \
- target->list=%hx, wrong CPUTargetList \n",
---
-2.11.0