aboutsummaryrefslogtreecommitdiff
path: root/system/xen/patches
diff options
context:
space:
mode:
authorMario Preksavec <mario@slackware.hr>2016-03-31 22:20:20 +0200
committerRobby Workman <rworkman@slackbuilds.org>2016-04-03 02:33:04 -0500
commit50cf57ce56b4877aca6148cd3de5efb13157cea9 (patch)
treedbd3a9c1c903b266b9ac216cdba631a785ac87ff /system/xen/patches
parent7859671412ce6e2cbe91dfd00657f675e44dca07 (diff)
system/xen: Updated for kernel 4.4.6 and fixes.
Signed-off-by: Mario Preksavec <mario@slackware.hr>
Diffstat (limited to 'system/xen/patches')
-rw-r--r--system/xen/patches/xsa172.patch39
1 files changed, 39 insertions, 0 deletions
diff --git a/system/xen/patches/xsa172.patch b/system/xen/patches/xsa172.patch
new file mode 100644
index 0000000000000..8b1d01fa8449f
--- /dev/null
+++ b/system/xen/patches/xsa172.patch
@@ -0,0 +1,39 @@
+x86: fix information leak on AMD CPUs
+
+The fix for XSA-52 was wrong, and so was the change synchronizing that
+new behavior to the FXRSTOR logic: AMD's manuals explictly state that
+writes to the ES bit are ignored, and it instead gets calculated from
+the exception and mask bits (it gets set whenever there is an unmasked
+exception, and cleared otherwise). Hence we need to follow that model
+in our workaround.
+
+This is XSA-172.
+
+The first hunk (xen/arch/x86/i387.c:fpu_fxrstor) is CVE-2016-3159.
+The second hunk (xen/arch/x86/xstate.c:xrstor) is CVE-2016-3158.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+
+--- a/xen/arch/x86/i387.c
++++ b/xen/arch/x86/i387.c
+@@ -49,7 +49,7 @@ static inline void fpu_fxrstor(struct vc
+ * sometimes new user value. Both should be ok. Use the FPU saved
+ * data block as a safe address because it should be in L1.
+ */
+- if ( !(fpu_ctxt->fsw & 0x0080) &&
++ if ( !(fpu_ctxt->fsw & ~fpu_ctxt->fcw & 0x003f) &&
+ boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ {
+ asm volatile ( "fnclex\n\t"
+--- a/xen/arch/x86/xstate.c
++++ b/xen/arch/x86/xstate.c
+@@ -344,7 +344,7 @@ void xrstor(struct vcpu *v, uint64_t mas
+ * data block as a safe address because it should be in L1.
+ */
+ if ( (mask & ptr->xsave_hdr.xstate_bv & XSTATE_FP) &&
+- !(ptr->fpu_sse.fsw & 0x0080) &&
++ !(ptr->fpu_sse.fsw & ~ptr->fpu_sse.fcw & 0x003f) &&
+ boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ asm volatile ( "fnclex\n\t" /* clear exceptions */
+ "ffree %%st(7)\n\t" /* clear stack tag */