aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorfanquake <fanquake@gmail.com>2022-04-01 10:43:32 +0100
committerfanquake <fanquake@gmail.com>2022-04-04 10:48:33 +0100
commitdb8a5d60944dd565e15f4aba776a475972b4a9c9 (patch)
tree0cd3ab540516c6a77f7846692267e9d499e2fb52
parente089c68aa17a5f605b295907e80dd355fef4f185 (diff)
downloadbitcoin-db8a5d60944dd565e15f4aba776a475972b4a9c9.tar.xz
guix: fix vmov alignment issues with gcc 10.3.0 & mingw-w64
This introduces a patch to our GCC (10.3.0) mingw-w64 compiler, in Guix, to make it avoid using aligned vmov instructions. This works around a longstanding issue in GCC, https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412, which was recently discovered to be causing issues, see #24726. Note that distros like Debian are also patching around this issue, and that is where this patch comes from. This would also explain why we haven't run into this problem earlier, in development builds. See: https://salsa.debian.org/mingw-w64-team/gcc-mingw-w64/-/blob/master/debian/patches/vmov-alignment.patch. Fixes #24726. Alternative to #24727. See also: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=939559 Github-Pull: #24736 Rebased-From: d6fae988eff78e28756d9b6219ec0239c420f51b
-rw-r--r--contrib/guix/manifest.scm6
-rw-r--r--contrib/guix/patches/vmov-alignment.patch267
2 files changed, 272 insertions, 1 deletions
diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm
index 052c1066e2..5bf8949c37 100644
--- a/contrib/guix/manifest.scm
+++ b/contrib/guix/manifest.scm
@@ -166,13 +166,17 @@ desirable for building Bitcoin Core release binaries."
(define (make-gcc-without-newlib gcc)
(package-with-extra-configure-variable gcc "--with-newlib" "no"))
+(define (make-mingw-w64-cross-gcc-vmov-alignment cross-gcc)
+ (package-with-extra-patches cross-gcc
+ (search-our-patches "vmov-alignment.patch")))
+
(define (make-mingw-pthreads-cross-toolchain target)
"Create a cross-compilation toolchain package for TARGET"
(let* ((xbinutils (cross-binutils target))
(pthreads-xlibc mingw-w64-x86_64-winpthreads)
(pthreads-xgcc (make-gcc-with-pthreads
(cross-gcc target
- #:xgcc (make-gcc-without-newlib (make-ssp-fixed-gcc base-gcc))
+ #:xgcc (make-gcc-without-newlib (make-ssp-fixed-gcc (make-mingw-w64-cross-gcc-vmov-alignment base-gcc)))
#:xbinutils xbinutils
#:libc pthreads-xlibc))))
;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and
diff --git a/contrib/guix/patches/vmov-alignment.patch b/contrib/guix/patches/vmov-alignment.patch
new file mode 100644
index 0000000000..072f76eafd
--- /dev/null
+++ b/contrib/guix/patches/vmov-alignment.patch
@@ -0,0 +1,267 @@
+Description: Use unaligned VMOV instructions
+Author: Stephen Kitt <skitt@debian.org>
+Bug-Debian: https://bugs.debian.org/939559
+
+Based on a patch originally by Claude Heiland-Allen <claude@mathr.co.uk>
+
+--- a/gcc/config/i386/sse.md
++++ b/gcc/config/i386/sse.md
+@@ -1058,17 +1058,11 @@
+ {
+ if (FLOAT_MODE_P (GET_MODE_INNER (<MODE>mode)))
+ {
+- if (misaligned_operand (operands[1], <MODE>mode))
+- return "vmovu<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
+- else
+- return "vmova<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
++ return "vmovu<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
+ }
+ else
+ {
+- if (misaligned_operand (operands[1], <MODE>mode))
+- return "vmovdqu<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
+- else
+- return "vmovdqa<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
++ return "vmovdqu<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
+ }
+ }
+ [(set_attr "type" "ssemov")
+@@ -1184,17 +1178,11 @@
+ {
+ if (FLOAT_MODE_P (GET_MODE_INNER (<MODE>mode)))
+ {
+- if (misaligned_operand (operands[0], <MODE>mode))
+- return "vmovu<ssemodesuffix>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
+- else
+- return "vmova<ssemodesuffix>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
++ return "vmovu<ssemodesuffix>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
+ }
+ else
+ {
+- if (misaligned_operand (operands[0], <MODE>mode))
+- return "vmovdqu<ssescalarsize>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
+- else
+- return "vmovdqa<ssescalarsize>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
++ return "vmovdqu<ssescalarsize>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
+ }
+ }
+ [(set_attr "type" "ssemov")
+@@ -7806,7 +7794,7 @@
+ "TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "@
+ %vmovlps\t{%1, %0|%q0, %1}
+- %vmovaps\t{%1, %0|%0, %1}
++ %vmovups\t{%1, %0|%0, %1}
+ %vmovlps\t{%1, %d0|%d0, %q1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix" "maybe_vex")
+@@ -13997,29 +13985,15 @@
+ switch (<MODE>mode)
+ {
+ case E_V8DFmode:
+- if (misaligned_operand (operands[2], <ssequartermode>mode))
+- return "vmovupd\t{%2, %x0|%x0, %2}";
+- else
+- return "vmovapd\t{%2, %x0|%x0, %2}";
++ return "vmovupd\t{%2, %x0|%x0, %2}";
+ case E_V16SFmode:
+- if (misaligned_operand (operands[2], <ssequartermode>mode))
+- return "vmovups\t{%2, %x0|%x0, %2}";
+- else
+- return "vmovaps\t{%2, %x0|%x0, %2}";
++ return "vmovups\t{%2, %x0|%x0, %2}";
+ case E_V8DImode:
+- if (misaligned_operand (operands[2], <ssequartermode>mode))
+- return which_alternative == 2 ? "vmovdqu64\t{%2, %x0|%x0, %2}"
++ return which_alternative == 2 ? "vmovdqu64\t{%2, %x0|%x0, %2}"
+ : "vmovdqu\t{%2, %x0|%x0, %2}";
+- else
+- return which_alternative == 2 ? "vmovdqa64\t{%2, %x0|%x0, %2}"
+- : "vmovdqa\t{%2, %x0|%x0, %2}";
+ case E_V16SImode:
+- if (misaligned_operand (operands[2], <ssequartermode>mode))
+- return which_alternative == 2 ? "vmovdqu32\t{%2, %x0|%x0, %2}"
++ return which_alternative == 2 ? "vmovdqu32\t{%2, %x0|%x0, %2}"
+ : "vmovdqu\t{%2, %x0|%x0, %2}";
+- else
+- return which_alternative == 2 ? "vmovdqa32\t{%2, %x0|%x0, %2}"
+- : "vmovdqa\t{%2, %x0|%x0, %2}";
+ default:
+ gcc_unreachable ();
+ }
+@@ -21225,63 +21199,27 @@
+ switch (get_attr_mode (insn))
+ {
+ case MODE_V16SF:
+- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
+- return "vmovups\t{%1, %t0|%t0, %1}";
+- else
+- return "vmovaps\t{%1, %t0|%t0, %1}";
++ return "vmovups\t{%1, %t0|%t0, %1}";
+ case MODE_V8DF:
+- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
+- return "vmovupd\t{%1, %t0|%t0, %1}";
+- else
+- return "vmovapd\t{%1, %t0|%t0, %1}";
++ return "vmovupd\t{%1, %t0|%t0, %1}";
+ case MODE_V8SF:
+- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
+- return "vmovups\t{%1, %x0|%x0, %1}";
+- else
+- return "vmovaps\t{%1, %x0|%x0, %1}";
++ return "vmovups\t{%1, %x0|%x0, %1}";
+ case MODE_V4DF:
+- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
+- return "vmovupd\t{%1, %x0|%x0, %1}";
+- else
+- return "vmovapd\t{%1, %x0|%x0, %1}";
++ return "vmovupd\t{%1, %x0|%x0, %1}";
+ case MODE_XI:
+- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
+- {
+- if (which_alternative == 2)
+- return "vmovdqu\t{%1, %t0|%t0, %1}";
+- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
+- return "vmovdqu64\t{%1, %t0|%t0, %1}";
+- else
+- return "vmovdqu32\t{%1, %t0|%t0, %1}";
+- }
++ if (which_alternative == 2)
++ return "vmovdqu\t{%1, %t0|%t0, %1}";
++ else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
++ return "vmovdqu64\t{%1, %t0|%t0, %1}";
+ else
+- {
+- if (which_alternative == 2)
+- return "vmovdqa\t{%1, %t0|%t0, %1}";
+- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
+- return "vmovdqa64\t{%1, %t0|%t0, %1}";
+- else
+- return "vmovdqa32\t{%1, %t0|%t0, %1}";
+- }
++ return "vmovdqu32\t{%1, %t0|%t0, %1}";
+ case MODE_OI:
+- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
+- {
+- if (which_alternative == 2)
+- return "vmovdqu\t{%1, %x0|%x0, %1}";
+- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
+- return "vmovdqu64\t{%1, %x0|%x0, %1}";
+- else
+- return "vmovdqu32\t{%1, %x0|%x0, %1}";
+- }
++ if (which_alternative == 2)
++ return "vmovdqu\t{%1, %x0|%x0, %1}";
++ else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
++ return "vmovdqu64\t{%1, %x0|%x0, %1}";
+ else
+- {
+- if (which_alternative == 2)
+- return "vmovdqa\t{%1, %x0|%x0, %1}";
+- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
+- return "vmovdqa64\t{%1, %x0|%x0, %1}";
+- else
+- return "vmovdqa32\t{%1, %x0|%x0, %1}";
+- }
++ return "vmovdqu32\t{%1, %x0|%x0, %1}";
+ default:
+ gcc_unreachable ();
+ }
+--- a/gcc/config/i386/i386.c
++++ b/gcc/config/i386/i386.c
+@@ -4981,13 +4981,13 @@
+ switch (type)
+ {
+ case opcode_int:
+- opcode = misaligned_p ? "vmovdqu32" : "vmovdqa32";
++ opcode = "vmovdqu32";
+ break;
+ case opcode_float:
+- opcode = misaligned_p ? "vmovups" : "vmovaps";
++ opcode = "vmovups";
+ break;
+ case opcode_double:
+- opcode = misaligned_p ? "vmovupd" : "vmovapd";
++ opcode = "vmovupd";
+ break;
+ }
+ }
+@@ -4996,16 +4996,16 @@
+ switch (scalar_mode)
+ {
+ case E_SFmode:
+- opcode = misaligned_p ? "%vmovups" : "%vmovaps";
++ opcode = "%vmovups";
+ break;
+ case E_DFmode:
+- opcode = misaligned_p ? "%vmovupd" : "%vmovapd";
++ opcode = "%vmovupd";
+ break;
+ case E_TFmode:
+ if (evex_reg_p)
+- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64";
++ opcode = "vmovdqu64";
+ else
+- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa";
++ opcode = "%vmovdqu";
+ break;
+ default:
+ gcc_unreachable ();
+@@ -5017,48 +5017,32 @@
+ {
+ case E_QImode:
+ if (evex_reg_p)
+- opcode = (misaligned_p
+- ? (TARGET_AVX512BW
+- ? "vmovdqu8"
+- : "vmovdqu64")
+- : "vmovdqa64");
++ opcode = TARGET_AVX512BW ? "vmovdqu8" : "vmovdqu64";
+ else
+- opcode = (misaligned_p
+- ? (TARGET_AVX512BW
+- ? "vmovdqu8"
+- : "%vmovdqu")
+- : "%vmovdqa");
++ opcode = TARGET_AVX512BW ? "vmovdqu8" : "%vmovdqu";
+ break;
+ case E_HImode:
+ if (evex_reg_p)
+- opcode = (misaligned_p
+- ? (TARGET_AVX512BW
+- ? "vmovdqu16"
+- : "vmovdqu64")
+- : "vmovdqa64");
++ opcode = TARGET_AVX512BW ? "vmovdqu16" : "vmovdqu64";
+ else
+- opcode = (misaligned_p
+- ? (TARGET_AVX512BW
+- ? "vmovdqu16"
+- : "%vmovdqu")
+- : "%vmovdqa");
++ opcode = TARGET_AVX512BW ? "vmovdqu16" : "%vmovdqu";
+ break;
+ case E_SImode:
+ if (evex_reg_p)
+- opcode = misaligned_p ? "vmovdqu32" : "vmovdqa32";
++ opcode = "vmovdqu32";
+ else
+- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa";
++ opcode = "%vmovdqu";
+ break;
+ case E_DImode:
+ case E_TImode:
+ case E_OImode:
+ if (evex_reg_p)
+- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64";
++ opcode = "vmovdqu64";
+ else
+- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa";
++ opcode = "%vmovdqu";
+ break;
+ case E_XImode:
+- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64";
++ opcode = "vmovdqu64";
+ break;
+ default:
+ gcc_unreachable ();