aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--contrib/guix/manifest.scm9
-rw-r--r--contrib/guix/patches/binutils-unaligned-default.patch22
-rw-r--r--contrib/guix/patches/vmov-alignment.patch288
3 files changed, 28 insertions, 291 deletions
diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm
index 41a87eec57..8f13c642d3 100644
--- a/contrib/guix/manifest.scm
+++ b/contrib/guix/manifest.scm
@@ -110,12 +110,15 @@ desirable for building Bitcoin Core release binaries."
(define (gcc-mingw-patches gcc)
(package-with-extra-patches gcc
- (search-our-patches "gcc-remap-guix-store.patch"
- "vmov-alignment.patch")))
+ (search-our-patches "gcc-remap-guix-store.patch")))
+
+(define (binutils-mingw-patches binutils)
+ (package-with-extra-patches binutils
+ (search-our-patches "binutils-unaligned-default.patch")))
(define (make-mingw-pthreads-cross-toolchain target)
"Create a cross-compilation toolchain package for TARGET"
- (let* ((xbinutils (cross-binutils target))
+ (let* ((xbinutils (binutils-mingw-patches (cross-binutils target)))
(pthreads-xlibc mingw-w64-x86_64-winpthreads)
(pthreads-xgcc (cross-gcc target
#:xgcc (gcc-mingw-patches mingw-w64-base-gcc)
diff --git a/contrib/guix/patches/binutils-unaligned-default.patch b/contrib/guix/patches/binutils-unaligned-default.patch
new file mode 100644
index 0000000000..d1bc71aee1
--- /dev/null
+++ b/contrib/guix/patches/binutils-unaligned-default.patch
@@ -0,0 +1,22 @@
+commit 6537181f59ed186a341db621812a6bc35e22eaf6
+Author: fanquake <fanquake@gmail.com>
+Date: Wed Apr 10 12:15:52 2024 +0200
+
+ build: turn on -muse-unaligned-vector-move by default
+
+ This allows us to avoid (more invasively) patching GCC, to avoid
+ unaligned instruction use.
+
+diff --git a/gas/config/tc-i386.c b/gas/config/tc-i386.c
+index e0632681477..14a9653abdf 100644
+--- a/gas/config/tc-i386.c
++++ b/gas/config/tc-i386.c
+@@ -801,7 +801,7 @@ static unsigned int no_cond_jump_promotion = 0;
+ static unsigned int sse2avx;
+
+ /* Encode aligned vector move as unaligned vector move. */
+-static unsigned int use_unaligned_vector_move;
++static unsigned int use_unaligned_vector_move = 1;
+
+ /* Encode scalar AVX instructions with specific vector length. */
+ static enum
diff --git a/contrib/guix/patches/vmov-alignment.patch b/contrib/guix/patches/vmov-alignment.patch
deleted file mode 100644
index 96e1cb7cd1..0000000000
--- a/contrib/guix/patches/vmov-alignment.patch
+++ /dev/null
@@ -1,288 +0,0 @@
-Description: Use unaligned VMOV instructions
-Author: Stephen Kitt <skitt@debian.org>
-Bug-Debian: https://bugs.debian.org/939559
-See also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412
-
-Based on a patch originally by Claude Heiland-Allen <claude@mathr.co.uk>
-
---- a/gcc/config/i386/sse.md
-+++ b/gcc/config/i386/sse.md
-@@ -1058,17 +1058,11 @@
- {
- if (FLOAT_MODE_P (GET_MODE_INNER (<MODE>mode)))
- {
-- if (misaligned_operand (operands[1], <MODE>mode))
-- return "vmovu<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
-- else
-- return "vmova<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
-+ return "vmovu<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
- }
- else
- {
-- if (misaligned_operand (operands[1], <MODE>mode))
-- return "vmovdqu<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
-- else
-- return "vmovdqa<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
-+ return "vmovdqu<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
- }
- }
- [(set_attr "type" "ssemov")
-@@ -1184,17 +1178,11 @@
- {
- if (FLOAT_MODE_P (GET_MODE_INNER (<MODE>mode)))
- {
-- if (misaligned_operand (operands[0], <MODE>mode))
-- return "vmovu<ssemodesuffix>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
-- else
-- return "vmova<ssemodesuffix>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
-+ return "vmovu<ssemodesuffix>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
- }
- else
- {
-- if (misaligned_operand (operands[0], <MODE>mode))
-- return "vmovdqu<ssescalarsize>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
-- else
-- return "vmovdqa<ssescalarsize>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
-+ return "vmovdqu<ssescalarsize>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
- }
- }
- [(set_attr "type" "ssemov")
-@@ -7806,7 +7794,7 @@
- "TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
- "@
- %vmovlps\t{%1, %0|%q0, %1}
-- %vmovaps\t{%1, %0|%0, %1}
-+ %vmovups\t{%1, %0|%0, %1}
- %vmovlps\t{%1, %d0|%d0, %q1}"
- [(set_attr "type" "ssemov")
- (set_attr "prefix" "maybe_vex")
-@@ -13997,29 +13985,15 @@
- switch (<MODE>mode)
- {
- case E_V8DFmode:
-- if (misaligned_operand (operands[2], <ssequartermode>mode))
-- return "vmovupd\t{%2, %x0|%x0, %2}";
-- else
-- return "vmovapd\t{%2, %x0|%x0, %2}";
-+ return "vmovupd\t{%2, %x0|%x0, %2}";
- case E_V16SFmode:
-- if (misaligned_operand (operands[2], <ssequartermode>mode))
-- return "vmovups\t{%2, %x0|%x0, %2}";
-- else
-- return "vmovaps\t{%2, %x0|%x0, %2}";
-+ return "vmovups\t{%2, %x0|%x0, %2}";
- case E_V8DImode:
-- if (misaligned_operand (operands[2], <ssequartermode>mode))
-- return which_alternative == 2 ? "vmovdqu64\t{%2, %x0|%x0, %2}"
-+ return which_alternative == 2 ? "vmovdqu64\t{%2, %x0|%x0, %2}"
- : "vmovdqu\t{%2, %x0|%x0, %2}";
-- else
-- return which_alternative == 2 ? "vmovdqa64\t{%2, %x0|%x0, %2}"
-- : "vmovdqa\t{%2, %x0|%x0, %2}";
- case E_V16SImode:
-- if (misaligned_operand (operands[2], <ssequartermode>mode))
-- return which_alternative == 2 ? "vmovdqu32\t{%2, %x0|%x0, %2}"
-+ return which_alternative == 2 ? "vmovdqu32\t{%2, %x0|%x0, %2}"
- : "vmovdqu\t{%2, %x0|%x0, %2}";
-- else
-- return which_alternative == 2 ? "vmovdqa32\t{%2, %x0|%x0, %2}"
-- : "vmovdqa\t{%2, %x0|%x0, %2}";
- default:
- gcc_unreachable ();
- }
-@@ -21225,63 +21199,27 @@
- switch (get_attr_mode (insn))
- {
- case MODE_V16SF:
-- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
-- return "vmovups\t{%1, %t0|%t0, %1}";
-- else
-- return "vmovaps\t{%1, %t0|%t0, %1}";
-+ return "vmovups\t{%1, %t0|%t0, %1}";
- case MODE_V8DF:
-- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
-- return "vmovupd\t{%1, %t0|%t0, %1}";
-- else
-- return "vmovapd\t{%1, %t0|%t0, %1}";
-+ return "vmovupd\t{%1, %t0|%t0, %1}";
- case MODE_V8SF:
-- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
-- return "vmovups\t{%1, %x0|%x0, %1}";
-- else
-- return "vmovaps\t{%1, %x0|%x0, %1}";
-+ return "vmovups\t{%1, %x0|%x0, %1}";
- case MODE_V4DF:
-- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
-- return "vmovupd\t{%1, %x0|%x0, %1}";
-- else
-- return "vmovapd\t{%1, %x0|%x0, %1}";
-+ return "vmovupd\t{%1, %x0|%x0, %1}";
- case MODE_XI:
-- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
-- {
-- if (which_alternative == 2)
-- return "vmovdqu\t{%1, %t0|%t0, %1}";
-- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
-- return "vmovdqu64\t{%1, %t0|%t0, %1}";
-- else
-- return "vmovdqu32\t{%1, %t0|%t0, %1}";
-- }
-+ if (which_alternative == 2)
-+ return "vmovdqu\t{%1, %t0|%t0, %1}";
-+ else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
-+ return "vmovdqu64\t{%1, %t0|%t0, %1}";
- else
-- {
-- if (which_alternative == 2)
-- return "vmovdqa\t{%1, %t0|%t0, %1}";
-- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
-- return "vmovdqa64\t{%1, %t0|%t0, %1}";
-- else
-- return "vmovdqa32\t{%1, %t0|%t0, %1}";
-- }
-+ return "vmovdqu32\t{%1, %t0|%t0, %1}";
- case MODE_OI:
-- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
-- {
-- if (which_alternative == 2)
-- return "vmovdqu\t{%1, %x0|%x0, %1}";
-- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
-- return "vmovdqu64\t{%1, %x0|%x0, %1}";
-- else
-- return "vmovdqu32\t{%1, %x0|%x0, %1}";
-- }
-+ if (which_alternative == 2)
-+ return "vmovdqu\t{%1, %x0|%x0, %1}";
-+ else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
-+ return "vmovdqu64\t{%1, %x0|%x0, %1}";
- else
-- {
-- if (which_alternative == 2)
-- return "vmovdqa\t{%1, %x0|%x0, %1}";
-- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
-- return "vmovdqa64\t{%1, %x0|%x0, %1}";
-- else
-- return "vmovdqa32\t{%1, %x0|%x0, %1}";
-- }
-+ return "vmovdqu32\t{%1, %x0|%x0, %1}";
- default:
- gcc_unreachable ();
- }
---- a/gcc/config/i386/i386.cc
-+++ b/gcc/config/i386/i386.cc
-@@ -5418,17 +5418,15 @@ ix86_get_ssemov (rtx *operands, unsigned size,
- {
- case opcode_int:
- if (scalar_mode == E_HFmode)
-- opcode = (misaligned_p
-- ? (TARGET_AVX512BW ? "vmovdqu16" : "vmovdqu64")
-- : "vmovdqa64");
-+ opcode = TARGET_AVX512BW ? "vmovdqu16" : "vmovdqu64";
- else
-- opcode = misaligned_p ? "vmovdqu32" : "vmovdqa32";
-+ opcode = "vmovdqu32";
- break;
- case opcode_float:
-- opcode = misaligned_p ? "vmovups" : "vmovaps";
-+ opcode = "vmovups";
- break;
- case opcode_double:
-- opcode = misaligned_p ? "vmovupd" : "vmovapd";
-+ opcode = "vmovupd";
- break;
- }
- }
-@@ -5438,29 +5436,21 @@ ix86_get_ssemov (rtx *operands, unsigned size,
- {
- case E_HFmode:
- if (evex_reg_p)
-- opcode = (misaligned_p
-- ? (TARGET_AVX512BW
-- ? "vmovdqu16"
-- : "vmovdqu64")
-- : "vmovdqa64");
-+ opcode = TARGET_AVX512BW ? "vmovdqu16" : "vmovdqu64";
- else
-- opcode = (misaligned_p
-- ? (TARGET_AVX512BW
-- ? "vmovdqu16"
-- : "%vmovdqu")
-- : "%vmovdqa");
-+ opcode = TARGET_AVX512BW ? "vmovdqu16" : "%vmovdqu";
- break;
- case E_SFmode:
-- opcode = misaligned_p ? "%vmovups" : "%vmovaps";
-+ opcode = "%vmovups";
- break;
- case E_DFmode:
-- opcode = misaligned_p ? "%vmovupd" : "%vmovapd";
-+ opcode = "%vmovupd";
- break;
- case E_TFmode:
- if (evex_reg_p)
-- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64";
-+ opcode = "vmovdqu64";
- else
-- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa";
-+ opcode = "%vmovdqu";
- break;
- default:
- gcc_unreachable ();
-@@ -5472,48 +5462,32 @@ ix86_get_ssemov (rtx *operands, unsigned size,
- {
- case E_QImode:
- if (evex_reg_p)
-- opcode = (misaligned_p
-- ? (TARGET_AVX512BW
-- ? "vmovdqu8"
-- : "vmovdqu64")
-- : "vmovdqa64");
-+ opcode = TARGET_AVX512BW ? "vmovdqu8" : "vmovdqu64";
- else
-- opcode = (misaligned_p
-- ? (TARGET_AVX512BW
-- ? "vmovdqu8"
-- : "%vmovdqu")
-- : "%vmovdqa");
-+ opcode = TARGET_AVX512BW ? "vmovdqu8" : "%vmovdqu";
- break;
- case E_HImode:
- if (evex_reg_p)
-- opcode = (misaligned_p
-- ? (TARGET_AVX512BW
-- ? "vmovdqu16"
-- : "vmovdqu64")
-- : "vmovdqa64");
-+ opcode = TARGET_AVX512BW ? "vmovdqu16" : "vmovdqu64";
- else
-- opcode = (misaligned_p
-- ? (TARGET_AVX512BW
-- ? "vmovdqu16"
-- : "%vmovdqu")
-- : "%vmovdqa");
-+ opcode = TARGET_AVX512BW ? "vmovdqu16" : "%vmovdqu";
- break;
- case E_SImode:
- if (evex_reg_p)
-- opcode = misaligned_p ? "vmovdqu32" : "vmovdqa32";
-+ opcode = "vmovdqu32";
- else
-- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa";
-+ opcode = "%vmovdqu";
- break;
- case E_DImode:
- case E_TImode:
- case E_OImode:
- if (evex_reg_p)
-- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64";
-+ opcode = "vmovdqu64";
- else
-- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa";
-+ opcode = "%vmovdqu";
- break;
- case E_XImode:
-- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64";
-+ opcode = "vmovdqu64";
- break;
- default:
- gcc_unreachable ();