aboutsummaryrefslogtreecommitdiff
path: root/sysdeps
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/aarch64/Makefile4
-rw-r--r--sysdeps/aarch64/chacha20-aarch64.S314
-rw-r--r--sysdeps/aarch64/chacha20_arch.h40
-rw-r--r--sysdeps/generic/chacha20_arch.h24
-rw-r--r--sysdeps/generic/not-cancel.h3
-rw-r--r--sysdeps/generic/tls-internal-struct.h1
-rw-r--r--sysdeps/generic/tls-internal.c10
-rw-r--r--sysdeps/mach/hurd/_Fork.c2
-rw-r--r--sysdeps/mach/hurd/not-cancel.h4
-rw-r--r--sysdeps/nptl/_Fork.c2
-rw-r--r--sysdeps/powerpc/powerpc64/be/multiarch/Makefile4
-rw-r--r--sysdeps/powerpc/powerpc64/be/multiarch/chacha20-ppc.c1
-rw-r--r--sysdeps/powerpc/powerpc64/be/multiarch/chacha20_arch.h42
-rw-r--r--sysdeps/powerpc/powerpc64/power8/Makefile5
-rw-r--r--sysdeps/powerpc/powerpc64/power8/chacha20-ppc.c256
-rw-r--r--sysdeps/powerpc/powerpc64/power8/chacha20_arch.h37
-rw-r--r--sysdeps/s390/s390-64/Makefile6
-rw-r--r--sysdeps/s390/s390-64/chacha20-s390x.S573
-rw-r--r--sysdeps/s390/s390-64/chacha20_arch.h45
-rw-r--r--sysdeps/unix/sysv/linux/not-cancel.h8
-rw-r--r--sysdeps/unix/sysv/linux/tls-internal.c10
-rw-r--r--sysdeps/unix/sysv/linux/tls-internal.h1
-rw-r--r--sysdeps/x86_64/Makefile7
-rw-r--r--sysdeps/x86_64/chacha20-amd64-avx2.S328
-rw-r--r--sysdeps/x86_64/chacha20-amd64-sse2.S311
-rw-r--r--sysdeps/x86_64/chacha20_arch.h55
26 files changed, 14 insertions, 2079 deletions
diff --git a/sysdeps/aarch64/Makefile b/sysdeps/aarch64/Makefile
index 7dfd1b62dd..17fb1c5b72 100644
--- a/sysdeps/aarch64/Makefile
+++ b/sysdeps/aarch64/Makefile
@@ -51,10 +51,6 @@ ifeq ($(subdir),csu)
gen-as-const-headers += tlsdesc.sym
endif
-ifeq ($(subdir),stdlib)
-sysdep_routines += chacha20-aarch64
-endif
-
ifeq ($(subdir),gmon)
CFLAGS-mcount.c += -mgeneral-regs-only
endif
diff --git a/sysdeps/aarch64/chacha20-aarch64.S b/sysdeps/aarch64/chacha20-aarch64.S
deleted file mode 100644
index cce5291c5c..0000000000
--- a/sysdeps/aarch64/chacha20-aarch64.S
+++ /dev/null
@@ -1,314 +0,0 @@
-/* Optimized AArch64 implementation of ChaCha20 cipher.
- Copyright (C) 2022 Free Software Foundation, Inc.
-
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <https://www.gnu.org/licenses/>. */
-
-/* Copyright (C) 2017-2019 Jussi Kivilinna <jussi.kivilinna@iki.fi>
-
- This file is part of Libgcrypt.
-
- Libgcrypt is free software; you can redistribute it and/or modify
- it under the terms of the GNU Lesser General Public License as
- published by the Free Software Foundation; either version 2.1 of
- the License, or (at your option) any later version.
-
- Libgcrypt is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, see <https://www.gnu.org/licenses/>.
- */
-
-/* Based on D. J. Bernstein reference implementation at
- http://cr.yp.to/chacha.html:
-
- chacha-regs.c version 20080118
- D. J. Bernstein
- Public domain. */
-
-#include <sysdep.h>
-
-/* Only LE is supported. */
-#ifdef __AARCH64EL__
-
-#define GET_DATA_POINTER(reg, name) \
- adrp reg, name ; \
- add reg, reg, :lo12:name
-
-/* 'ret' instruction replacement for straight-line speculation mitigation */
-#define ret_spec_stop \
- ret; dsb sy; isb;
-
-.cpu generic+simd
-
-.text
-
-/* register macros */
-#define INPUT x0
-#define DST x1
-#define SRC x2
-#define NBLKS x3
-#define ROUND x4
-#define INPUT_CTR x5
-#define INPUT_POS x6
-#define CTR x7
-
-/* vector registers */
-#define X0 v16
-#define X4 v17
-#define X8 v18
-#define X12 v19
-
-#define X1 v20
-#define X5 v21
-
-#define X9 v22
-#define X13 v23
-#define X2 v24
-#define X6 v25
-
-#define X3 v26
-#define X7 v27
-#define X11 v28
-#define X15 v29
-
-#define X10 v30
-#define X14 v31
-
-#define VCTR v0
-#define VTMP0 v1
-#define VTMP1 v2
-#define VTMP2 v3
-#define VTMP3 v4
-#define X12_TMP v5
-#define X13_TMP v6
-#define ROT8 v7
-
-/**********************************************************************
- helper macros
- **********************************************************************/
-
-#define _(...) __VA_ARGS__
-
-#define vpunpckldq(s1, s2, dst) \
- zip1 dst.4s, s2.4s, s1.4s;
-
-#define vpunpckhdq(s1, s2, dst) \
- zip2 dst.4s, s2.4s, s1.4s;
-
-#define vpunpcklqdq(s1, s2, dst) \
- zip1 dst.2d, s2.2d, s1.2d;
-
-#define vpunpckhqdq(s1, s2, dst) \
- zip2 dst.2d, s2.2d, s1.2d;
-
-/* 4x4 32-bit integer matrix transpose */
-#define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \
- vpunpckhdq(x1, x0, t2); \
- vpunpckldq(x1, x0, x0); \
- \
- vpunpckldq(x3, x2, t1); \
- vpunpckhdq(x3, x2, x2); \
- \
- vpunpckhqdq(t1, x0, x1); \
- vpunpcklqdq(t1, x0, x0); \
- \
- vpunpckhqdq(x2, t2, x3); \
- vpunpcklqdq(x2, t2, x2);
-
-/**********************************************************************
- 4-way chacha20
- **********************************************************************/
-
-#define XOR(d,s1,s2) \
- eor d.16b, s2.16b, s1.16b;
-
-#define PLUS(ds,s) \
- add ds.4s, ds.4s, s.4s;
-
-#define ROTATE4(dst1,dst2,dst3,dst4,c,src1,src2,src3,src4) \
- shl dst1.4s, src1.4s, #(c); \
- shl dst2.4s, src2.4s, #(c); \
- shl dst3.4s, src3.4s, #(c); \
- shl dst4.4s, src4.4s, #(c); \
- sri dst1.4s, src1.4s, #(32 - (c)); \
- sri dst2.4s, src2.4s, #(32 - (c)); \
- sri dst3.4s, src3.4s, #(32 - (c)); \
- sri dst4.4s, src4.4s, #(32 - (c));
-
-#define ROTATE4_8(dst1,dst2,dst3,dst4,src1,src2,src3,src4) \
- tbl dst1.16b, {src1.16b}, ROT8.16b; \
- tbl dst2.16b, {src2.16b}, ROT8.16b; \
- tbl dst3.16b, {src3.16b}, ROT8.16b; \
- tbl dst4.16b, {src4.16b}, ROT8.16b;
-
-#define ROTATE4_16(dst1,dst2,dst3,dst4,src1,src2,src3,src4) \
- rev32 dst1.8h, src1.8h; \
- rev32 dst2.8h, src2.8h; \
- rev32 dst3.8h, src3.8h; \
- rev32 dst4.8h, src4.8h;
-
-#define QUARTERROUND4(a1,b1,c1,d1,a2,b2,c2,d2,a3,b3,c3,d3,a4,b4,c4,d4,ign,tmp1,tmp2,tmp3,tmp4) \
- PLUS(a1,b1); PLUS(a2,b2); \
- PLUS(a3,b3); PLUS(a4,b4); \
- XOR(tmp1,d1,a1); XOR(tmp2,d2,a2); \
- XOR(tmp3,d3,a3); XOR(tmp4,d4,a4); \
- ROTATE4_16(d1, d2, d3, d4, tmp1, tmp2, tmp3, tmp4); \
- PLUS(c1,d1); PLUS(c2,d2); \
- PLUS(c3,d3); PLUS(c4,d4); \
- XOR(tmp1,b1,c1); XOR(tmp2,b2,c2); \
- XOR(tmp3,b3,c3); XOR(tmp4,b4,c4); \
- ROTATE4(b1, b2, b3, b4, 12, tmp1, tmp2, tmp3, tmp4) \
- PLUS(a1,b1); PLUS(a2,b2); \
- PLUS(a3,b3); PLUS(a4,b4); \
- XOR(tmp1,d1,a1); XOR(tmp2,d2,a2); \
- XOR(tmp3,d3,a3); XOR(tmp4,d4,a4); \
- ROTATE4_8(d1, d2, d3, d4, tmp1, tmp2, tmp3, tmp4) \
- PLUS(c1,d1); PLUS(c2,d2); \
- PLUS(c3,d3); PLUS(c4,d4); \
- XOR(tmp1,b1,c1); XOR(tmp2,b2,c2); \
- XOR(tmp3,b3,c3); XOR(tmp4,b4,c4); \
- ROTATE4(b1, b2, b3, b4, 7, tmp1, tmp2, tmp3, tmp4) \
-
-.align 4
-L(__chacha20_blocks4_data_inc_counter):
- .long 0,1,2,3
-
-.align 4
-L(__chacha20_blocks4_data_rot8):
- .byte 3,0,1,2
- .byte 7,4,5,6
- .byte 11,8,9,10
- .byte 15,12,13,14
-
-.hidden __chacha20_neon_blocks4
-ENTRY (__chacha20_neon_blocks4)
- /* input:
- * x0: input
- * x1: dst
- * x2: src
- * x3: nblks (multiple of 4)
- */
-
- GET_DATA_POINTER(CTR, L(__chacha20_blocks4_data_rot8))
- add INPUT_CTR, INPUT, #(12*4);
- ld1 {ROT8.16b}, [CTR];
- GET_DATA_POINTER(CTR, L(__chacha20_blocks4_data_inc_counter))
- mov INPUT_POS, INPUT;
- ld1 {VCTR.16b}, [CTR];
-
-L(loop4):
- /* Construct counter vectors X12 and X13 */
-
- ld1 {X15.16b}, [INPUT_CTR];
- mov ROUND, #20;
- ld1 {VTMP1.16b-VTMP3.16b}, [INPUT_POS];
-
- dup X12.4s, X15.s[0];
- dup X13.4s, X15.s[1];
- ldr CTR, [INPUT_CTR];
- add X12.4s, X12.4s, VCTR.4s;
- dup X0.4s, VTMP1.s[0];
- dup X1.4s, VTMP1.s[1];
- dup X2.4s, VTMP1.s[2];
- dup X3.4s, VTMP1.s[3];
- dup X14.4s, X15.s[2];
- cmhi VTMP0.4s, VCTR.4s, X12.4s;
- dup X15.4s, X15.s[3];
- add CTR, CTR, #4; /* Update counter */
- dup X4.4s, VTMP2.s[0];
- dup X5.4s, VTMP2.s[1];
- dup X6.4s, VTMP2.s[2];
- dup X7.4s, VTMP2.s[3];
- sub X13.4s, X13.4s, VTMP0.4s;
- dup X8.4s, VTMP3.s[0];
- dup X9.4s, VTMP3.s[1];
- dup X10.4s, VTMP3.s[2];
- dup X11.4s, VTMP3.s[3];
- mov X12_TMP.16b, X12.16b;
- mov X13_TMP.16b, X13.16b;
- str CTR, [INPUT_CTR];
-
-L(round2):
- subs ROUND, ROUND, #2
- QUARTERROUND4(X0, X4, X8, X12, X1, X5, X9, X13,
- X2, X6, X10, X14, X3, X7, X11, X15,
- tmp:=,VTMP0,VTMP1,VTMP2,VTMP3)
- QUARTERROUND4(X0, X5, X10, X15, X1, X6, X11, X12,
- X2, X7, X8, X13, X3, X4, X9, X14,
- tmp:=,VTMP0,VTMP1,VTMP2,VTMP3)
- b.ne L(round2);
-
- ld1 {VTMP0.16b, VTMP1.16b}, [INPUT_POS], #32;
-
- PLUS(X12, X12_TMP); /* INPUT + 12 * 4 + counter */
- PLUS(X13, X13_TMP); /* INPUT + 13 * 4 + counter */
-
- dup VTMP2.4s, VTMP0.s[0]; /* INPUT + 0 * 4 */
- dup VTMP3.4s, VTMP0.s[1]; /* INPUT + 1 * 4 */
- dup X12_TMP.4s, VTMP0.s[2]; /* INPUT + 2 * 4 */
- dup X13_TMP.4s, VTMP0.s[3]; /* INPUT + 3 * 4 */
- PLUS(X0, VTMP2);
- PLUS(X1, VTMP3);
- PLUS(X2, X12_TMP);
- PLUS(X3, X13_TMP);
-
- dup VTMP2.4s, VTMP1.s[0]; /* INPUT + 4 * 4 */
- dup VTMP3.4s, VTMP1.s[1]; /* INPUT + 5 * 4 */
- dup X12_TMP.4s, VTMP1.s[2]; /* INPUT + 6 * 4 */
- dup X13_TMP.4s, VTMP1.s[3]; /* INPUT + 7 * 4 */
- ld1 {VTMP0.16b, VTMP1.16b}, [INPUT_POS];
- mov INPUT_POS, INPUT;
- PLUS(X4, VTMP2);
- PLUS(X5, VTMP3);
- PLUS(X6, X12_TMP);
- PLUS(X7, X13_TMP);
-
- dup VTMP2.4s, VTMP0.s[0]; /* INPUT + 8 * 4 */
- dup VTMP3.4s, VTMP0.s[1]; /* INPUT + 9 * 4 */
- dup X12_TMP.4s, VTMP0.s[2]; /* INPUT + 10 * 4 */
- dup X13_TMP.4s, VTMP0.s[3]; /* INPUT + 11 * 4 */
- dup VTMP0.4s, VTMP1.s[2]; /* INPUT + 14 * 4 */
- dup VTMP1.4s, VTMP1.s[3]; /* INPUT + 15 * 4 */
- PLUS(X8, VTMP2);
- PLUS(X9, VTMP3);
- PLUS(X10, X12_TMP);
- PLUS(X11, X13_TMP);
- PLUS(X14, VTMP0);
- PLUS(X15, VTMP1);
-
- transpose_4x4(X0, X1, X2, X3, VTMP0, VTMP1, VTMP2);
- transpose_4x4(X4, X5, X6, X7, VTMP0, VTMP1, VTMP2);
- transpose_4x4(X8, X9, X10, X11, VTMP0, VTMP1, VTMP2);
- transpose_4x4(X12, X13, X14, X15, VTMP0, VTMP1, VTMP2);
-
- subs NBLKS, NBLKS, #4;
-
- st1 {X0.16b,X4.16B,X8.16b, X12.16b}, [DST], #64
- st1 {X1.16b,X5.16b}, [DST], #32;
- st1 {X9.16b, X13.16b, X2.16b, X6.16b}, [DST], #64
- st1 {X10.16b,X14.16b}, [DST], #32;
- st1 {X3.16b, X7.16b, X11.16b, X15.16b}, [DST], #64;
-
- b.ne L(loop4);
-
- ret_spec_stop
-END (__chacha20_neon_blocks4)
-
-#endif
diff --git a/sysdeps/aarch64/chacha20_arch.h b/sysdeps/aarch64/chacha20_arch.h
deleted file mode 100644
index 37dbb917f1..0000000000
--- a/sysdeps/aarch64/chacha20_arch.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Chacha20 implementation, used on arc4random.
- Copyright (C) 2022 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <https://www.gnu.org/licenses/>. */
-
-#include <ldsodefs.h>
-#include <stdbool.h>
-
-unsigned int __chacha20_neon_blocks4 (uint32_t *state, uint8_t *dst,
- const uint8_t *src, size_t nblks)
- attribute_hidden;
-
-static void
-chacha20_crypt (uint32_t *state, uint8_t *dst, const uint8_t *src,
- size_t bytes)
-{
- _Static_assert (CHACHA20_BUFSIZE % 4 == 0,
- "CHACHA20_BUFSIZE not multiple of 4");
- _Static_assert (CHACHA20_BUFSIZE > CHACHA20_BLOCK_SIZE * 4,
- "CHACHA20_BUFSIZE <= CHACHA20_BLOCK_SIZE * 4");
-#ifdef __AARCH64EL__
- __chacha20_neon_blocks4 (state, dst, src,
- CHACHA20_BUFSIZE / CHACHA20_BLOCK_SIZE);
-#else
- chacha20_crypt_generic (state, dst, src, bytes);
-#endif
-}
diff --git a/sysdeps/generic/chacha20_arch.h b/sysdeps/generic/chacha20_arch.h
deleted file mode 100644
index 1b4559ccbc..0000000000
--- a/sysdeps/generic/chacha20_arch.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Chacha20 implementation, generic interface for encrypt.
- Copyright (C) 2022 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <https://www.gnu.org/licenses/>. */
-
-static inline void
-chacha20_crypt (uint32_t *state, uint8_t *dst, const uint8_t *src,
- size_t bytes)
-{
- chacha20_crypt_generic (state, dst, src, bytes);
-}
diff --git a/sysdeps/generic/not-cancel.h b/sysdeps/generic/not-cancel.h
index acceb9b67f..b5a42c70d6 100644
--- a/sysdeps/generic/not-cancel.h
+++ b/sysdeps/generic/not-cancel.h
@@ -20,6 +20,7 @@
# define NOT_CANCEL_H
#include <fcntl.h>
+#include <poll.h>
#include <unistd.h>
#include <sys/wait.h>
#include <time.h>
@@ -50,5 +51,7 @@
__fcntl64 (fd, cmd, __VA_ARGS__)
#define __getrandom_nocancel(buf, size, flags) \
__getrandom (buf, size, flags)
+#define __poll_infinity_nocancel(fds, nfds) \
+ __poll (fds, nfds, -1)
#endif /* NOT_CANCEL_H */
diff --git a/sysdeps/generic/tls-internal-struct.h b/sysdeps/generic/tls-internal-struct.h
index a91915831b..d76c715a96 100644
--- a/sysdeps/generic/tls-internal-struct.h
+++ b/sysdeps/generic/tls-internal-struct.h
@@ -23,7 +23,6 @@ struct tls_internal_t
{
char *strsignal_buf;
char *strerror_l_buf;
- struct arc4random_state_t *rand_state;
};
#endif
diff --git a/sysdeps/generic/tls-internal.c b/sysdeps/generic/tls-internal.c
index 8a0f37d509..b32b31b5a9 100644
--- a/sysdeps/generic/tls-internal.c
+++ b/sysdeps/generic/tls-internal.c
@@ -16,7 +16,6 @@
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
-#include <stdlib/arc4random.h>
#include <string.h>
#include <tls-internal.h>
@@ -27,13 +26,4 @@ __glibc_tls_internal_free (void)
{
free (__tls_internal.strsignal_buf);
free (__tls_internal.strerror_l_buf);
-
- if (__tls_internal.rand_state != NULL)
- {
- /* Clear any lingering random state prior so if the thread stack is
- cached it won't leak any data. */
- explicit_bzero (__tls_internal.rand_state,
- sizeof (*__tls_internal.rand_state));
- free (__tls_internal.rand_state);
- }
}
diff --git a/sysdeps/mach/hurd/_Fork.c b/sysdeps/mach/hurd/_Fork.c
index 667068c8cf..e60b86fab1 100644
--- a/sysdeps/mach/hurd/_Fork.c
+++ b/sysdeps/mach/hurd/_Fork.c
@@ -662,8 +662,6 @@ retry:
_hurd_malloc_fork_child ();
call_function_static_weak (__malloc_fork_unlock_child);
- call_function_static_weak (__arc4random_fork_subprocess);
-
/* Run things that want to run in the child task to set up. */
RUN_HOOK (_hurd_fork_child_hook, ());
diff --git a/sysdeps/mach/hurd/not-cancel.h b/sysdeps/mach/hurd/not-cancel.h
index 9a3a7ed59a..ae58b734e3 100644
--- a/sysdeps/mach/hurd/not-cancel.h
+++ b/sysdeps/mach/hurd/not-cancel.h
@@ -21,6 +21,7 @@
#include <fcntl.h>
#include <unistd.h>
+#include <poll.h>
#include <sys/wait.h>
#include <time.h>
#include <sys/uio.h>
@@ -77,6 +78,9 @@ __typeof (__fcntl) __fcntl_nocancel;
#define __getrandom_nocancel(buf, size, flags) \
__getrandom (buf, size, flags)
+#define __poll_infinity_nocancel(fds, nfds) \
+ __poll (fds, nfds, -1)
+
#if IS_IN (libc)
hidden_proto (__close_nocancel)
hidden_proto (__close_nocancel_nostatus)
diff --git a/sysdeps/nptl/_Fork.c b/sysdeps/nptl/_Fork.c
index 7dc02569f6..dd568992e2 100644
--- a/sysdeps/nptl/_Fork.c
+++ b/sysdeps/nptl/_Fork.c
@@ -43,8 +43,6 @@ _Fork (void)
self->robust_head.list = &self->robust_head;
INTERNAL_SYSCALL_CALL (set_robust_list, &self->robust_head,
sizeof (struct robust_list_head));
-
- call_function_static_weak (__arc4random_fork_subprocess);
}
return pid;
}
diff --git a/sysdeps/powerpc/powerpc64/be/multiarch/Makefile b/sysdeps/powerpc/powerpc64/be/multiarch/Makefile
deleted file mode 100644
index 8c75165f7f..0000000000
--- a/sysdeps/powerpc/powerpc64/be/multiarch/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-ifeq ($(subdir),stdlib)
-sysdep_routines += chacha20-ppc
-CFLAGS-chacha20-ppc.c += -mcpu=power8
-endif
diff --git a/sysdeps/powerpc/powerpc64/be/multiarch/chacha20-ppc.c b/sysdeps/powerpc/powerpc64/be/multiarch/chacha20-ppc.c
deleted file mode 100644
index cf9e735326..0000000000
--- a/sysdeps/powerpc/powerpc64/be/multiarch/chacha20-ppc.c
+++ /dev/null
@@ -1 +0,0 @@
-#include <sysdeps/powerpc/powerpc64/power8/chacha20-ppc.c>
diff --git a/sysdeps/powerpc/powerpc64/be/multiarch/chacha20_arch.h b/sysdeps/powerpc/powerpc64/be/multiarch/chacha20_arch.h
deleted file mode 100644
index 08494dc045..0000000000
--- a/sysdeps/powerpc/powerpc64/be/multiarch/chacha20_arch.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* PowerPC optimization for ChaCha20.
- Copyright (C) 2022 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <https://www.gnu.org/licenses/>. */
-
-#include <stdbool.h>
-#include <ldsodefs.h>
-
-unsigned int __chacha20_power8_blocks4 (uint32_t *state, uint8_t *dst,
- const uint8_t *src, size_t nblks)
- attribute_hidden;
-
-static void
-chacha20_crypt (uint32_t *state, uint8_t *dst,
- const uint8_t *src, size_t bytes)
-{
- _Static_assert (CHACHA20_BUFSIZE % 4 == 0,
- "CHACHA20_BUFSIZE not multiple of 4");
- _Static_assert (CHACHA20_BUFSIZE >= CHACHA20_BLOCK_SIZE * 4,
- "CHACHA20_BUFSIZE < CHACHA20_BLOCK_SIZE * 4");
-
- unsigned long int hwcap = GLRO(dl_hwcap);
- unsigned long int hwcap2 = GLRO(dl_hwcap2);
- if (hwcap2 & PPC_FEATURE2_ARCH_2_07 && hwcap & PPC_FEATURE_HAS_ALTIVEC)
- __chacha20_power8_blocks4 (state, dst, src,
- CHACHA20_BUFSIZE / CHACHA20_BLOCK_SIZE);
- else
- chacha20_crypt_generic (state, dst, src, bytes);
-}
diff --git a/sysdeps/powerpc/powerpc64/power8/Makefile b/sysdeps/powerpc/powerpc64/power8/Makefile
index abb0aa3f11..71a59529f3 100644
--- a/sysdeps/powerpc/powerpc64/power8/Makefile
+++ b/sysdeps/powerpc/powerpc64/power8/Makefile
@@ -1,8 +1,3 @@
ifeq ($(subdir),string)
sysdep_routines += strcasestr-ppc64
endif
-
-ifeq ($(subdir),stdlib)
-sysdep_routines += chacha20-ppc
-CFLAGS-chacha20-ppc.c += -mcpu=power8
-endif
diff --git a/sysdeps/powerpc/powerpc64/power8/chacha20-ppc.c b/sysdeps/powerpc/powerpc64/power8/chacha20-ppc.c
deleted file mode 100644
index 0bbdcb9363..0000000000
--- a/sysdeps/powerpc/powerpc64/power8/chacha20-ppc.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/* Optimized PowerPC implementation of ChaCha20 cipher.
- Copyright (C) 2022 Free Software Foundation, Inc.
-
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <https://www.gnu.org/licenses/>. */
-
-/* chacha20-ppc.c - PowerPC vector implementation of ChaCha20
- Copyright (C) 2019 Jussi Kivilinna <jussi.kivilinna@iki.fi>
-
- This file is part of Libgcrypt.
-
- Libgcrypt is free software; you can redistribute it and/or modify
- it under the terms of the GNU Lesser General Public License as
- published by the Free Software Foundation; either version 2.1 of
- the License, or (at your option) any later version.
-
- Libgcrypt is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, see <https://www.gnu.org/licenses/>.
- */
-
-#include <altivec.h>
-#include <endian.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/cdefs.h>
-
-typedef vector unsigned char vector16x_u8;
-typedef vector unsigned int vector4x_u32;
-typedef vector unsigned long long vector2x_u64;
-
-#if __BYTE_ORDER == __BIG_ENDIAN
-static const vector16x_u8 le_bswap_const =
- { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
-#endif
-
-static inline vector4x_u32
-vec_rol_elems (vector4x_u32 v, unsigned int idx)
-{
-#if __BYTE_ORDER != __BIG_ENDIAN
- return vec_sld (v, v, (16 - (4 * idx)) & 15);
-#else
- return vec_sld (v, v, (4 * idx) & 15);
-#endif
-}
-
-static inline vector4x_u32
-vec_load_le (unsigned long offset, const unsigned char *ptr)
-{
- vector4x_u32 vec;
- vec = vec_vsx_ld (offset, (const uint32_t *)ptr);
-#if __BYTE_ORDER == __BIG_ENDIAN
- vec = (vector4x_u32) vec_perm ((vector16x_u8)vec, (vector16x_u8)vec,
- le_bswap_const);
-#endif
- return vec;
-}
-
-static inline void
-vec_store_le (vector4x_u32 vec, unsigned long offset, unsigned char *ptr)
-{
-#if __BYTE_ORDER == __BIG_ENDIAN
- vec = (vector4x_u32)vec_perm((vector16x_u8)vec, (vector16x_u8)vec,
- le_bswap_const);
-#endif
- vec_vsx_st (vec, offset, (uint32_t *)ptr);
-}
-
-
-static inline vector4x_u32
-vec_add_ctr_u64 (vector4x_u32 v, vector4x_u32 a)
-{
-#if __BYTE_ORDER == __BIG_ENDIAN
- static const vector16x_u8 swap32 =
- { 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11 };
- vector2x_u64 vec, add, sum;
-
- vec = (vector2x_u64)vec_perm ((vector16x_u8)v, (vector16x_u8)v, swap32);
- add = (vector2x_u64)vec_perm ((vector16x_u8)a, (vector16x_u8)a, swap32);
- sum = vec + add;
- return (vector4x_u32)vec_perm ((vector16x_u8)sum, (vector16x_u8)sum, swap32);
-#else
- return (vector4x_u32)((vector2x_u64)(v) + (vector2x_u64)(a));
-#endif
-}
-
-/**********************************************************************
- 4-way chacha20
- **********************************************************************/
-
-#define ROTATE(v1,rolv) \
- __asm__ ("vrlw %0,%1,%2\n\t" : "=v" (v1) : "v" (v1), "v" (rolv))
-
-#define PLUS(ds,s) \
- ((ds) += (s))
-
-#define XOR(ds,s) \
- ((ds) ^= (s))
-
-#define ADD_U64(v,a) \
- (v = vec_add_ctr_u64(v, a))
-
-/* 4x4 32-bit integer matrix transpose */
-#define transpose_4x4(x0, x1, x2, x3) ({ \
- vector4x_u32 t1 = vec_mergeh(x0, x2); \
- vector4x_u32 t2 = vec_mergel(x0, x2); \
- vector4x_u32 t3 = vec_mergeh(x1, x3); \
- x3 = vec_mergel(x1, x3); \
- x0 = vec_mergeh(t1, t3); \
- x1 = vec_mergel(t1, t3); \
- x2 = vec_mergeh(t2, x3); \
- x3 = vec_mergel(t2, x3); \
- })
-
-#define QUARTERROUND2(a1,b1,c1,d1,a2,b2,c2,d2) \
- PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \
- ROTATE(d1, rotate_16); ROTATE(d2, rotate_16); \
- PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \
- ROTATE(b1, rotate_12); ROTATE(b2, rotate_12); \
- PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \
- ROTATE(d1, rotate_8); ROTATE(d2, rotate_8); \
- PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \
- ROTATE(b1, rotate_7); ROTATE(b2, rotate_7);
-
-unsigned int attribute_hidden
-__chacha20_power8_blocks4 (uint32_t *state, uint8_t *dst, const uint8_t *src,
- size_t nblks)
-{
- vector4x_u32 counters_0123 = { 0, 1, 2, 3 };
- vector4x_u32 counter_4 = { 4, 0, 0, 0 };
- vector4x_u32 rotate_16 = { 16, 16, 16, 16 };
- vector4x_u32 rotate_12 = { 12, 12, 12, 12 };
- vector4x_u32 rotate_8 = { 8, 8, 8, 8 };
- vector4x_u32 rotate_7 = { 7, 7, 7, 7 };
- vector4x_u32 state0, state1, state2, state3;
- vector4x_u32 v0, v1, v2, v3, v4, v5, v6, v7;
- vector4x_u32 v8, v9, v10, v11, v12, v13, v14, v15;
- vector4x_u32 tmp;
- int i;
-
- /* Force preload of constants to vector registers. */
- __asm__ ("": "+v" (counters_0123) :: "memory");
- __asm__ ("": "+v" (counter_4) :: "memory");
- __asm__ ("": "+v" (rotate_16) :: "memory");
- __asm__ ("": "+v" (rotate_12) :: "memory");
- __asm__ ("": "+v" (rotate_8) :: "memory");
- __asm__ ("": "+v" (rotate_7) :: "memory");
-
- state0 = vec_vsx_ld (0 * 16, state);
- state1 = vec_vsx_ld (1 * 16, state);
- state2 = vec_vsx_ld (2 * 16, state);
- state3 = vec_vsx_ld (3 * 16, state);
-
- do
- {
- v0 = vec_splat (state0, 0);
- v1 = vec_splat (state0, 1);
- v2 = vec_splat (state0, 2);
- v3 = vec_splat (state0, 3);
- v4 = vec_splat (state1, 0);
- v5 = vec_splat (state1, 1);
- v6 = vec_splat (state1, 2);
- v7 = vec_splat (state1, 3);
- v8 = vec_splat (state2, 0);
- v9 = vec_splat (state2, 1);
- v10 = vec_splat (state2, 2);
- v11 = vec_splat (state2, 3);
- v12 = vec_splat (state3, 0);
- v13 = vec_splat (state3, 1);
- v14 = vec_splat (state3, 2);
- v15 = vec_splat (state3, 3);
-
- v12 += counters_0123;
- v13 -= vec_cmplt (v12, counters_0123);
-
- for (i = 20; i > 0; i -= 2)
- {
- QUARTERROUND2 (v0, v4, v8, v12, v1, v5, v9, v13)
- QUARTERROUND2 (v2, v6, v10, v14, v3, v7, v11, v15)
- QUARTERROUND2 (v0, v5, v10, v15, v1, v6, v11, v12)
- QUARTERROUND2 (v2, v7, v8, v13, v3, v4, v9, v14)
- }
-
- v0 += vec_splat (state0, 0);
- v1 += vec_splat (state0, 1);
- v2 += vec_splat (state0, 2);
- v3 += vec_splat (state0, 3);
- v4 += vec_splat (state1, 0);
- v5 += vec_splat (state1, 1);
- v6 += vec_splat (state1, 2);
- v7 += vec_splat (state1, 3);
- v8 += vec_splat (state2, 0);
- v9 += vec_splat (state2, 1);
- v10 += vec_splat (state2, 2);
- v11 += vec_splat (state2, 3);
- tmp = vec_splat( state3, 0);
- tmp += counters_0123;
- v12 += tmp;
- v13 += vec_splat (state3, 1) - vec_cmplt (tmp, counters_0123);
- v14 += vec_splat (state3, 2);
- v15 += vec_splat (state3, 3);
- ADD_U64 (state3, counter_4);
-
- transpose_4x4 (v0, v1, v2, v3);
- transpose_4x4 (v4, v5, v6, v7);
- transpose_4x4 (v8, v9, v10, v11);
- transpose_4x4 (v12, v13, v14, v15);
-
- vec_store_le (v0, (64 * 0 + 16 * 0), dst);
- vec_store_le (v1, (64 * 1 + 16 * 0), dst);
- vec_store_le (v2, (64 * 2 + 16 * 0), dst);
- vec_store_le (v3, (64 * 3 + 16 * 0), dst);
-
- vec_store_le (v4, (64 * 0 + 16 * 1), dst);
- vec_store_le (v5, (64 * 1 + 16 * 1), dst);
- vec_store_le (v6, (64 * 2 + 16 * 1), dst);
- vec_store_le (v7, (64 * 3 + 16 * 1), dst);
-
- vec_store_le (v8, (64 * 0 + 16 * 2), dst);
- vec_store_le (v9, (64 * 1 + 16 * 2), dst);
- vec_store_le (v10, (64 * 2 + 16 * 2), dst);
- vec_store_le (v11, (64 * 3 + 16 * 2), dst);
-
- vec_store_le (v12, (64 * 0 + 16 * 3), dst);
- vec_store_le (v13, (64 * 1 + 16 * 3), dst);
- vec_store_le (v14, (64 * 2 + 16 * 3), dst);
- vec_store_le (v15, (64 * 3 + 16 * 3), dst);
-
- src += 4*64;
- dst += 4*64;
-
- nblks -= 4;
- }
- while (nblks);
-
- vec_vsx_st (state3, 3 * 16, state);
-
- return 0;
-}
diff --git a/sysdeps/powerpc/powerpc64/power8/chacha20_arch.h b/sysdeps/powerpc/powerpc64/power8/chacha20_arch.h
deleted file mode 100644
index ded06762b6..0000000000
--- a/sysdeps/powerpc/powerpc64/power8/chacha20_arch.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* PowerPC optimization for ChaCha20.
- Copyright (C) 2022 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <https://www.gnu.org/licenses/>. */
-
-#include <stdbool.h>
-#include <ldsodefs.h>
-
-unsigned int __chacha20_power8_blocks4 (uint32_t *state, uint8_t *dst,
- const uint8_t *src, size_t nblks)
- attribute_hidden;
-
-static void
-chacha20_crypt (uint32_t *state, uint8_t *dst,
- const uint8_t *src, size_t bytes)
-{
- _Static_assert (CHACHA20_BUFSIZE % 4 == 0,
- "CHACHA20_BUFSIZE not multiple of 4");
- _Static_assert (CHACHA20_BUFSIZE >= CHACHA20_BLOCK_SIZE * 4,
- "CHACHA20_BUFSIZE < CHACHA20_BLOCK_SIZE * 4");
-
- __chacha20_power8_blocks4 (state, dst, src,
- CHACHA20_BUFSIZE / CHACHA20_BLOCK_SIZE);
-}
diff --git a/sysdeps/s390/s390-64/Makefile b/sysdeps/s390/s390-64/Makefile
index 96c110f490..66ed844e68 100644
--- a/sysdeps/s390/s390-64/Makefile
+++ b/sysdeps/s390/s390-64/Makefile
@@ -67,9 +67,3 @@ tests-container += tst-glibc-hwcaps-cache
endif
endif # $(subdir) == elf
-
-ifeq ($(subdir),stdlib)
-sysdep_routines += \
- chacha20-s390x \
- # sysdep_routines
-endif
diff --git a/sysdeps/s390/s390-64/chacha20-s390x.S b/sysdeps/s390/s390-64/chacha20-s390x.S
deleted file mode 100644
index e38504d370..0000000000
--- a/sysdeps/s390/s390-64/chacha20-s390x.S
+++ /dev/null
@@ -1,573 +0,0 @@
-/* Optimized s390x implementation of ChaCha20 cipher.
- Copyright (C) 2022 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <https://www.gnu.org/licenses/>. */
-
-/* chacha20-s390x.S - zSeries implementation of ChaCha20 cipher
-
- Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi>
-
- This file is part of Libgcrypt.
-
- Libgcrypt is free software; you can redistribute it and/or modify
- it under the terms of the GNU Lesser General Public License as
- published by the Free Software Foundation; either version 2.1 of
- the License, or (at your option) any later version.
-
- Libgcrypt is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, see <https://www.gnu.org/licenses/>.
- */
-
-#include <sysdep.h>
-
-#ifdef HAVE_S390_VX_ASM_SUPPORT
-
-/* CFA expressions are used for pointing CFA and registers to
- * SP relative offsets. */
-# define DW_REGNO_SP 15
-
-/* Fixed length encoding used for integers for now. */
-# define DW_SLEB128_7BIT(value) \
- 0x00|((value) & 0x7f)
-# define DW_SLEB128_28BIT(value) \
- 0x80|((value)&0x7f), \
- 0x80|(((value)>>7)&0x7f), \
- 0x80|(((value)>>14)&0x7f), \
- 0x00|(((value)>>21)&0x7f)
-
-# define cfi_cfa_on_stack(rsp_offs,cfa_depth) \
- .cfi_escape \
- 0x0f, /* DW_CFA_def_cfa_expression */ \
- DW_SLEB128_7BIT(11), /* length */ \
- 0x7f, /* DW_OP_breg15, rsp + constant */ \
- DW_SLEB128_28BIT(rsp_offs), \
- 0x06, /* DW_OP_deref */ \
- 0x23, /* DW_OP_plus_constu */ \
- DW_SLEB128_28BIT((cfa_depth)+160)
-
-.machine "z13+vx"
-.text
-
-.balign 16
-.Lconsts:
-.Lwordswap:
- .byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
-.Lbswap128:
- .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
-.Lbswap32:
- .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
-.Lone:
- .long 0, 0, 0, 1
-.Ladd_counter_0123:
- .long 0, 1, 2, 3
-.Ladd_counter_4567:
- .long 4, 5, 6, 7
-
-/* register macros */
-#define INPUT %r2
-#define DST %r3
-#define SRC %r4
-#define NBLKS %r0
-#define ROUND %r1
-
-/* stack structure */
-
-#define STACK_FRAME_STD (8 * 16 + 8 * 4)
-#define STACK_FRAME_F8_F15 (8 * 8)
-#define STACK_FRAME_Y0_Y15 (16 * 16)
-#define STACK_FRAME_CTR (4 * 16)
-#define STACK_FRAME_PARAMS (6 * 8)
-
-#define STACK_MAX (STACK_FRAME_STD + STACK_FRAME_F8_F15 + \
- STACK_FRAME_Y0_Y15 + STACK_FRAME_CTR + \
- STACK_FRAME_PARAMS)
-
-#define STACK_F8 (STACK_MAX - STACK_FRAME_F8_F15)
-#define STACK_F9 (STACK_F8 + 8)
-#define STACK_F10 (STACK_F9 + 8)
-#define STACK_F11 (STACK_F10 + 8)
-#define STACK_F12 (STACK_F11 + 8)
-#define STACK_F13 (STACK_F12 + 8)
-#define STACK_F14 (STACK_F13 + 8)
-#define STACK_F15 (STACK_F14 + 8)
-#define STACK_Y0_Y15 (STACK_F8 - STACK_FRAME_Y0_Y15)
-#define STACK_CTR (STACK_Y0_Y15 - STACK_FRAME_CTR)
-#define STACK_INPUT (STACK_CTR - STACK_FRAME_PARAMS)
-#define STACK_DST (STACK_INPUT + 8)
-#define STACK_SRC (STACK_DST + 8)
-#define STACK_NBLKS (STACK_SRC + 8)
-#define STACK_POCTX (STACK_NBLKS + 8)
-#define STACK_POSRC (STACK_POCTX + 8)
-
-#define STACK_G0_H3 STACK_Y0_Y15
-
-/* vector registers */
-#define A0 %v0
-#define A1 %v1
-#define A2 %v2
-#define A3 %v3
-
-#define B0 %v4
-#define B1 %v5
-#define B2 %v6
-#define B3 %v7
-
-#define C0 %v8
-#define C1 %v9
-#define C2 %v10
-#define C3 %v11
-
-#define D0 %v12
-#define D1 %v13
-#define D2 %v14
-#define D3 %v15
-
-#define E0 %v16
-#define E1 %v17
-#define E2 %v18
-#define E3 %v19
-
-#define F0 %v20
-#define F1 %v21
-#define F2 %v22
-#define F3 %v23
-
-#define G0 %v24
-#define G1 %v25
-#define G2 %v26
-#define G3 %v27
-
-#define H0 %v28
-#define H1 %v29
-#define H2 %v30
-#define H3 %v31
-
-#define IO0 E0
-#define IO1 E1
-#define IO2 E2
-#define IO3 E3
-#define IO4 F0
-#define IO5 F1
-#define IO6 F2
-#define IO7 F3
-
-#define S0 G0
-#define S1 G1
-#define S2 G2
-#define S3 G3
-
-#define TMP0 H0
-#define TMP1 H1
-#define TMP2 H2
-#define TMP3 H3
-
-#define X0 A0
-#define X1 A1
-#define X2 A2
-#define X3 A3
-#define X4 B0
-#define X5 B1
-#define X6 B2
-#define X7 B3
-#define X8 C0
-#define X9 C1
-#define X10 C2
-#define X11 C3
-#define X12 D0
-#define X13 D1
-#define X14 D2
-#define X15 D3
-
-#define Y0 E0
-#define Y1 E1
-#define Y2 E2
-#define Y3 E3
-#define Y4 F0
-#define Y5 F1
-#define Y6 F2
-#define Y7 F3
-#define Y8 G0
-#define Y9 G1
-#define Y10 G2
-#define Y11 G3
-#define Y12 H0
-#define Y13 H1
-#define Y14 H2
-#define Y15 H3
-
-/**********************************************************************
- helper macros
- **********************************************************************/
-
-#define _ /*_*/
-
-#define START_STACK(last_r) \
- lgr %r0, %r15; \
- lghi %r1, ~15; \
- stmg %r6, last_r, 6 * 8(%r15); \
- aghi %r0, -STACK_MAX; \
- ngr %r0, %r1; \
- lgr %r1, %r15; \
- cfi_def_cfa_register(1); \
- lgr %r15, %r0; \
- stg %r1, 0(%r15); \
- cfi_cfa_on_stack(0, 0); \
- std %f8, STACK_F8(%r15); \
- std %f9, STACK_F9(%r15); \
- std %f10, STACK_F10(%r15); \
- std %f11, STACK_F11(%r15); \
- std %f12, STACK_F12(%r15); \
- std %f13, STACK_F13(%r15); \
- std %f14, STACK_F14(%r15); \
- std %f15, STACK_F15(%r15);
-
-#define END_STACK(last_r) \
- lg %r1, 0(%r15); \
- ld %f8, STACK_F8(%r15); \
- ld %f9, STACK_F9(%r15); \
- ld %f10, STACK_F10(%r15); \
- ld %f11, STACK_F11(%r15); \
- ld %f12, STACK_F12(%r15); \
- ld %f13, STACK_F13(%r15); \
- ld %f14, STACK_F14(%r15); \
- ld %f15, STACK_F15(%r15); \
- lmg %r6, last_r, 6 * 8(%r1); \
- lgr %r15, %r1; \
- cfi_def_cfa_register(DW_REGNO_SP);
-
-#define PLUS(dst,src) \
- vaf dst, dst, src;
-
-#define XOR(dst,src) \
- vx dst, dst, src;
-
-#define ROTATE(v1,c) \
- verllf v1, v1, (c)(0);
-
-#define WORD_ROTATE(v1,s) \
- vsldb v1, v1, v1, ((s) * 4);
-
-#define DST_8(OPER, I, J) \
- OPER(A##I, J); OPER(B##I, J); OPER(C##I, J); OPER(D##I, J); \
- OPER(E##I, J); OPER(F##I, J); OPER(G##I, J); OPER(H##I, J);
-
-/**********************************************************************
- round macros
- **********************************************************************/
-
-/**********************************************************************
- 8-way chacha20 ("vertical")
- **********************************************************************/
-
-#define QUARTERROUND4_V8_POLY(x0,x1,x2,x3,x4,x5,x6,x7,\
- x8,x9,x10,x11,x12,x13,x14,x15,\
- y0,y1,y2,y3,y4,y5,y6,y7,\
- y8,y9,y10,y11,y12,y13,y14,y15,\
- op1,op2,op3,op4,op5,op6,op7,op8,\
- op9,op10,op11,op12) \
- op1; \
- PLUS(x0, x1); PLUS(x4, x5); \
- PLUS(x8, x9); PLUS(x12, x13); \
- PLUS(y0, y1); PLUS(y4, y5); \
- PLUS(y8, y9); PLUS(y12, y13); \
- op2; \
- XOR(x3, x0); XOR(x7, x4); \
- XOR(x11, x8); XOR(x15, x12); \
- XOR(y3, y0); XOR(y7, y4); \
- XOR(y11, y8); XOR(y15, y12); \
- op3; \
- ROTATE(x3, 16); ROTATE(x7, 16); \
- ROTATE(x11, 16); ROTATE(x15, 16); \
- ROTATE(y3, 16); ROTATE(y7, 16); \
- ROTATE(y11, 16); ROTATE(y15, 16); \
- op4; \
- PLUS(x2, x3); PLUS(x6, x7); \
- PLUS(x10, x11); PLUS(x14, x15); \
- PLUS(y2, y3); PLUS(y6, y7); \
- PLUS(y10, y11); PLUS(y14, y15); \
- op5; \
- XOR(x1, x2); XOR(x5, x6); \
- XOR(x9, x10); XOR(x13, x14); \
- XOR(y1, y2); XOR(y5, y6); \
- XOR(y9, y10); XOR(y13, y14); \
- op6; \
- ROTATE(x1,12); ROTATE(x5,12); \
- ROTATE(x9,12); ROTATE(x13,12); \
- ROTATE(y1,12); ROTATE(y5,12); \
- ROTATE(y9,12); ROTATE(y13,12); \
- op7; \
- PLUS(x0, x1); PLUS(x4, x5); \
- PLUS(x8, x9); PLUS(x12, x13); \
- PLUS(y0, y1); PLUS(y4, y5); \
- PLUS(y8, y9); PLUS(y12, y13); \
- op8; \
- XOR(x3, x0); XOR(x7, x4); \
- XOR(x11, x8); XOR(x15, x12); \
- XOR(y3, y0); XOR(y7, y4); \
- XOR(y11, y8); XOR(y15, y12); \
- op9; \
- ROTATE(x3,8); ROTATE(x7,8); \
- ROTATE(x11,8); ROTATE(x15,8); \
- ROTATE(y3,8); ROTATE(y7,8); \
- ROTATE(y11,8); ROTATE(y15,8); \
- op10; \
- PLUS(x2, x3); PLUS(x6, x7); \
- PLUS(x10, x11); PLUS(x14, x15); \
- PLUS(y2, y3); PLUS(y6, y7); \
- PLUS(y10, y11); PLUS(y14, y15); \
- op11; \
- XOR(x1, x2); XOR(x5, x6); \
- XOR(x9, x10); XOR(x13, x14); \
- XOR(y1, y2); XOR(y5, y6); \
- XOR(y9, y10); XOR(y13, y14); \
- op12; \
- ROTATE(x1,7); ROTATE(x5,7); \
- ROTATE(x9,7); ROTATE(x13,7); \
- ROTATE(y1,7); ROTATE(y5,7); \
- ROTATE(y9,7); ROTATE(y13,7);
-
-#define QUARTERROUND4_V8(x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,\
- y0,y1,y2,y3,y4,y5,y6,y7,y8,y9,y10,y11,y12,y13,y14,y15) \
- QUARTERROUND4_V8_POLY(x0,x1,x2,x3,x4,x5,x6,x7,\
- x8,x9,x10,x11,x12,x13,x14,x15,\
- y0,y1,y2,y3,y4,y5,y6,y7,\
- y8,y9,y10,y11,y12,y13,y14,y15,\
- ,,,,,,,,,,,)
-
-#define TRANSPOSE_4X4_2(v0,v1,v2,v3,va,vb,vc,vd,tmp0,tmp1,tmp2,tmpa,tmpb,tmpc) \
- vmrhf tmp0, v0, v1; \
- vmrhf tmp1, v2, v3; \
- vmrlf tmp2, v0, v1; \
- vmrlf v3, v2, v3; \
- vmrhf tmpa, va, vb; \
- vmrhf tmpb, vc, vd; \
- vmrlf tmpc, va, vb; \
- vmrlf vd, vc, vd; \
- vpdi v0, tmp0, tmp1, 0; \
- vpdi v1, tmp0, tmp1, 5; \
- vpdi v2, tmp2, v3, 0; \
- vpdi v3, tmp2, v3, 5; \
- vpdi va, tmpa, tmpb, 0; \
- vpdi vb, tmpa, tmpb, 5; \
- vpdi vc, tmpc, vd, 0; \
- vpdi vd, tmpc, vd, 5;
-
-.balign 8
-.globl __chacha20_s390x_vx_blocks8
-ENTRY (__chacha20_s390x_vx_blocks8)
- /* input:
- * %r2: input
- * %r3: dst
- * %r4: src
- * %r5: nblks (multiple of 8)
- */
-
- START_STACK(%r8);
- lgr NBLKS, %r5;
-
- larl %r7, .Lconsts;
-
- /* Load counter. */
- lg %r8, (12 * 4)(INPUT);
- rllg %r8, %r8, 32;
-
-.balign 4
- /* Process eight chacha20 blocks per loop. */
-.Lloop8:
- vlm Y0, Y3, 0(INPUT);
-
- slgfi NBLKS, 8;
- lghi ROUND, (20 / 2);
-
- /* Construct counter vectors X12/X13 & Y12/Y13. */
- vl X4, (.Ladd_counter_0123 - .Lconsts)(%r7);
- vl Y4, (.Ladd_counter_4567 - .Lconsts)(%r7);
- vrepf Y12, Y3, 0;
- vrepf Y13, Y3, 1;
- vaccf X5, Y12, X4;
- vaccf Y5, Y12, Y4;
- vaf X12, Y12, X4;
- vaf Y12, Y12, Y4;
- vaf X13, Y13, X5;
- vaf Y13, Y13, Y5;
-
- vrepf X0, Y0, 0;
- vrepf X1, Y0, 1;
- vrepf X2, Y0, 2;
- vrepf X3, Y0, 3;
- vrepf X4, Y1, 0;
- vrepf X5, Y1, 1;
- vrepf X6, Y1, 2;
- vrepf X7, Y1, 3;
- vrepf X8, Y2, 0;
- vrepf X9, Y2, 1;
- vrepf X10, Y2, 2;
- vrepf X11, Y2, 3;
- vrepf X14, Y3, 2;
- vrepf X15, Y3, 3;
-
- /* Store counters for blocks 0-7. */
- vstm X12, X13, (STACK_CTR + 0 * 16)(%r15);
- vstm Y12, Y13, (STACK_CTR + 2 * 16)(%r15);
-
- vlr Y0, X0;
- vlr Y1, X1;
- vlr Y2, X2;
- vlr Y3, X3;
- vlr Y4, X4;
- vlr Y5, X5;
- vlr Y6, X6;
- vlr Y7, X7;
- vlr Y8, X8;
- vlr Y9, X9;
- vlr Y10, X10;
- vlr Y11, X11;
- vlr Y14, X14;
- vlr Y15, X15;
-
- /* Update and store counter. */
- agfi %r8, 8;
- rllg %r5, %r8, 32;
- stg %r5, (12 * 4)(INPUT);
-
-.balign 4
-.Lround2_8:
- QUARTERROUND4_V8(X0, X4, X8, X12, X1, X5, X9, X13,
- X2, X6, X10, X14, X3, X7, X11, X15,
- Y0, Y4, Y8, Y12, Y1, Y5, Y9, Y13,
- Y2, Y6, Y10, Y14, Y3, Y7, Y11, Y15);
- QUARTERROUND4_V8(X0, X5, X10, X15, X1, X6, X11, X12,
- X2, X7, X8, X13, X3, X4, X9, X14,
- Y0, Y5, Y10, Y15, Y1, Y6, Y11, Y12,
- Y2, Y7, Y8, Y13, Y3, Y4, Y9, Y14);
- brctg ROUND, .Lround2_8;
-
- /* Store blocks 4-7. */
- vstm Y0, Y15, STACK_Y0_Y15(%r15);
-
- /* Load counters for blocks 0-3. */
- vlm Y0, Y1, (STACK_CTR + 0 * 16)(%r15);
-
- lghi ROUND, 1;
- j .Lfirst_output_4blks_8;
-
-.balign 4
-.Lsecond_output_4blks_8:
- /* Load blocks 4-7. */
- vlm X0, X15, STACK_Y0_Y15(%r15);
-
- /* Load counters for blocks 4-7. */
- vlm Y0, Y1, (STACK_CTR + 2 * 16)(%r15);
-
- lghi ROUND, 0;
-
-.balign 4
- /* Output four chacha20 blocks per loop. */
-.Lfirst_output_4blks_8:
- vlm Y12, Y15, 0(INPUT);
- PLUS(X12, Y0);
- PLUS(X13, Y1);
- vrepf Y0, Y12, 0;
- vrepf Y1, Y12, 1;
- vrepf Y2, Y12, 2;
- vrepf Y3, Y12, 3;
- vrepf Y4, Y13, 0;
- vrepf Y5, Y13, 1;
- vrepf Y6, Y13, 2;
- vrepf Y7, Y13, 3;
- vrepf Y8, Y14, 0;
- vrepf Y9, Y14, 1;
- vrepf Y10, Y14, 2;
- vrepf Y11, Y14, 3;
- vrepf Y14, Y15, 2;
- vrepf Y15, Y15, 3;
- PLUS(X0, Y0);
- PLUS(X1, Y1);
- PLUS(X2, Y2);
- PLUS(X3, Y3);
- PLUS(X4, Y4);
- PLUS(X5, Y5);
- PLUS(X6, Y6);
- PLUS(X7, Y7);
- PLUS(X8, Y8);
- PLUS(X9, Y9);
- PLUS(X10, Y10);
- PLUS(X11, Y11);
- PLUS(X14, Y14);
- PLUS(X15, Y15);
-
- vl Y15, (.Lbswap32 - .Lconsts)(%r7);
- TRANSPOSE_4X4_2(X0, X1, X2, X3, X4, X5, X6, X7,
- Y9, Y10, Y11, Y12, Y13, Y14);
- TRANSPOSE_4X4_2(X8, X9, X10, X11, X12, X13, X14, X15,
- Y9, Y10, Y11, Y12, Y13, Y14);
-
- vlm Y0, Y14, 0(SRC);
- vperm X0, X0, X0, Y15;
- vperm X1, X1, X1, Y15;
- vperm X2, X2, X2, Y15;
- vperm X3, X3, X3, Y15;
- vperm X4, X4, X4, Y15;
- vperm X5, X5, X5, Y15;
- vperm X6, X6, X6, Y15;
- vperm X7, X7, X7, Y15;
- vperm X8, X8, X8, Y15;
- vperm X9, X9, X9, Y15;
- vperm X10, X10, X10, Y15;
- vperm X11, X11, X11, Y15;
- vperm X12, X12, X12, Y15;
- vperm X13, X13, X13, Y15;
- vperm X14, X14, X14, Y15;
- vperm X15, X15, X15, Y15;
- vl Y15, (15 * 16)(SRC);
-
- XOR(Y0, X0);
- XOR(Y1, X4);
- XOR(Y2, X8);
- XOR(Y3, X12);
- XOR(Y4, X1);
- XOR(Y5, X5);
- XOR(Y6, X9);
- XOR(Y7, X13);
- XOR(Y8, X2);
- XOR(Y9, X6);
- XOR(Y10, X10);
- XOR(Y11, X14);
- XOR(Y12, X3);
- XOR(Y13, X7);
- XOR(Y14, X11);
- XOR(Y15, X15);
- vstm Y0, Y15, 0(DST);
-
- aghi SRC, 256;
- aghi DST, 256;
-
- clgije ROUND, 1, .Lsecond_output_4blks_8;
-
- clgijhe NBLKS, 8, .Lloop8;
-
-
- END_STACK(%r8);
- xgr %r2, %r2;
- br %r14;
-END (__chacha20_s390x_vx_blocks8)
-
-#endif /* HAVE_S390_VX_ASM_SUPPORT */
diff --git a/sysdeps/s390/s390-64/chacha20_arch.h b/sysdeps/s390/s390-64/chacha20_arch.h
deleted file mode 100644
index 0c6abf77e8..0000000000
--- a/sysdeps/s390/s390-64/chacha20_arch.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* s390x optimization for ChaCha20.VE_S390_VX_ASM_SUPPORT
- Copyright (C) 2022 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <https://www.gnu.org/licenses/>. */
-
-#include <stdbool.h>
-#include <ldsodefs.h>
-#include <sys/auxv.h>
-
-unsigned int __chacha20_s390x_vx_blocks8 (uint32_t *state, uint8_t *dst,
- const uint8_t *src, size_t nblks)
- attribute_hidden;
-
-static inline void
-chacha20_crypt (uint32_t *state, uint8_t *dst, const uint8_t *src,
- size_t bytes)
-{
-#ifdef HAVE_S390_VX_ASM_SUPPORT
- _Static_assert (CHACHA20_BUFSIZE % 8 == 0,
- "CHACHA20_BUFSIZE not multiple of 8");
- _Static_assert (CHACHA20_BUFSIZE >= CHACHA20_BLOCK_SIZE * 8,
- "CHACHA20_BUFSIZE < CHACHA20_BLOCK_SIZE * 8");
-
- if (GLRO(dl_hwcap) & HWCAP_S390_VX)
- {
- __chacha20_s390x_vx_blocks8 (state, dst, src,
- CHACHA20_BUFSIZE / CHACHA20_BLOCK_SIZE);
- return;
- }
-#endif
- chacha20_crypt_generic (state, dst, src, bytes);
-}
diff --git a/sysdeps/unix/sysv/linux/not-cancel.h b/sysdeps/unix/sysv/linux/not-cancel.h
index 2c58d5ae2f..a263d294b1 100644
--- a/sysdeps/unix/sysv/linux/not-cancel.h
+++ b/sysdeps/unix/sysv/linux/not-cancel.h
@@ -23,6 +23,7 @@
#include <sysdep.h>
#include <errno.h>
#include <unistd.h>
+#include <sys/poll.h>
#include <sys/syscall.h>
#include <sys/wait.h>
#include <time.h>
@@ -70,9 +71,14 @@ __writev_nocancel_nostatus (int fd, const struct iovec *iov, int iovcnt)
static inline int
__getrandom_nocancel (void *buf, size_t buflen, unsigned int flags)
{
- return INTERNAL_SYSCALL_CALL (getrandom, buf, buflen, flags);
+ return INLINE_SYSCALL_CALL (getrandom, buf, buflen, flags);
}
+static inline int
+__poll_infinity_nocancel (struct pollfd *fds, nfds_t nfds)
+{
+ return INLINE_SYSCALL_CALL (ppoll, fds, nfds, NULL, NULL, 0);
+}
/* Uncancelable fcntl. */
__typeof (__fcntl) __fcntl64_nocancel;
diff --git a/sysdeps/unix/sysv/linux/tls-internal.c b/sysdeps/unix/sysv/linux/tls-internal.c
index 0326ebb767..c8a9ed2d40 100644
--- a/sysdeps/unix/sysv/linux/tls-internal.c
+++ b/sysdeps/unix/sysv/linux/tls-internal.c
@@ -16,7 +16,6 @@
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
-#include <stdlib/arc4random.h>
#include <string.h>
#include <tls-internal.h>
@@ -26,13 +25,4 @@ __glibc_tls_internal_free (void)
struct pthread *self = THREAD_SELF;
free (self->tls_state.strsignal_buf);
free (self->tls_state.strerror_l_buf);
-
- if (self->tls_state.rand_state != NULL)
- {
- /* Clear any lingering random state prior so if the thread stack is
- cached it won't leak any data. */
- explicit_bzero (self->tls_state.rand_state,
- sizeof (*self->tls_state.rand_state));
- free (self->tls_state.rand_state);
- }
}
diff --git a/sysdeps/unix/sysv/linux/tls-internal.h b/sysdeps/unix/sysv/linux/tls-internal.h
index ebc65d896a..2ebe977802 100644
--- a/sysdeps/unix/sysv/linux/tls-internal.h
+++ b/sysdeps/unix/sysv/linux/tls-internal.h
@@ -28,7 +28,6 @@ __glibc_tls_internal (void)
return &THREAD_SELF->tls_state;
}
-/* Reset the arc4random TCB state on fork. */
extern void __glibc_tls_internal_free (void) attribute_hidden;
#endif
diff --git a/sysdeps/x86_64/Makefile b/sysdeps/x86_64/Makefile
index 1178475d75..c19bef2dec 100644
--- a/sysdeps/x86_64/Makefile
+++ b/sysdeps/x86_64/Makefile
@@ -5,13 +5,6 @@ ifeq ($(subdir),csu)
gen-as-const-headers += link-defines.sym
endif
-ifeq ($(subdir),stdlib)
-sysdep_routines += \
- chacha20-amd64-sse2 \
- chacha20-amd64-avx2 \
- # sysdep_routines
-endif
-
ifeq ($(subdir),gmon)
sysdep_routines += _mcount
# We cannot compile _mcount.S with -pg because that would create
diff --git a/sysdeps/x86_64/chacha20-amd64-avx2.S b/sysdeps/x86_64/chacha20-amd64-avx2.S
deleted file mode 100644
index aefd1cdbd0..0000000000
--- a/sysdeps/x86_64/chacha20-amd64-avx2.S
+++ /dev/null
@@ -1,328 +0,0 @@
-/* Optimized AVX2 implementation of ChaCha20 cipher.
- Copyright (C) 2022 Free Software Foundation, Inc.
-
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <https://www.gnu.org/licenses/>. */
-
-/* chacha20-amd64-avx2.S - AVX2 implementation of ChaCha20 cipher
-
- Copyright (C) 2017-2019 Jussi Kivilinna <jussi.kivilinna@iki.fi>
-
- This file is part of Libgcrypt.
-
- Libgcrypt is free software; you can redistribute it and/or modify
- it under the terms of the GNU Lesser General Public License as
- published by the Free Software Foundation; either version 2.1 of
- the License, or (at your option) any later version.
-
- Libgcrypt is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, see <https://www.gnu.org/licenses/>.
-*/
-
-/* Based on D. J. Bernstein reference implementation at
- http://cr.yp.to/chacha.html:
-
- chacha-regs.c version 20080118
- D. J. Bernstein
- Public domain. */
-
-#include <sysdep.h>
-
-#ifdef PIC
-# define rRIP (%rip)
-#else
-# define rRIP
-#endif
-
-/* register macros */
-#define INPUT %rdi
-#define DST %rsi
-#define SRC %rdx
-#define NBLKS %rcx
-#define ROUND %eax
-
-/* stack structure */
-#define STACK_VEC_X12 (32)
-#define STACK_VEC_X13 (32 + STACK_VEC_X12)
-#define STACK_TMP (32 + STACK_VEC_X13)
-#define STACK_TMP1 (32 + STACK_TMP)
-
-#define STACK_MAX (32 + STACK_TMP1)
-
-/* vector registers */
-#define X0 %ymm0
-#define X1 %ymm1
-#define X2 %ymm2
-#define X3 %ymm3
-#define X4 %ymm4
-#define X5 %ymm5
-#define X6 %ymm6
-#define X7 %ymm7
-#define X8 %ymm8
-#define X9 %ymm9
-#define X10 %ymm10
-#define X11 %ymm11
-#define X12 %ymm12
-#define X13 %ymm13
-#define X14 %ymm14
-#define X15 %ymm15
-
-#define X0h %xmm0
-#define X1h %xmm1
-#define X2h %xmm2
-#define X3h %xmm3
-#define X4h %xmm4
-#define X5h %xmm5
-#define X6h %xmm6
-#define X7h %xmm7
-#define X8h %xmm8
-#define X9h %xmm9
-#define X10h %xmm10
-#define X11h %xmm11
-#define X12h %xmm12
-#define X13h %xmm13
-#define X14h %xmm14
-#define X15h %xmm15
-
-/**********************************************************************
- helper macros
- **********************************************************************/
-
-/* 4x4 32-bit integer matrix transpose */
-#define transpose_4x4(x0,x1,x2,x3,t1,t2) \
- vpunpckhdq x1, x0, t2; \
- vpunpckldq x1, x0, x0; \
- \
- vpunpckldq x3, x2, t1; \
- vpunpckhdq x3, x2, x2; \
- \
- vpunpckhqdq t1, x0, x1; \
- vpunpcklqdq t1, x0, x0; \
- \
- vpunpckhqdq x2, t2, x3; \
- vpunpcklqdq x2, t2, x2;
-
-/* 2x2 128-bit matrix transpose */
-#define transpose_16byte_2x2(x0,x1,t1) \
- vmovdqa x0, t1; \
- vperm2i128 $0x20, x1, x0, x0; \
- vperm2i128 $0x31, x1, t1, x1;
-
-/**********************************************************************
- 8-way chacha20
- **********************************************************************/
-
-#define ROTATE2(v1,v2,c,tmp) \
- vpsrld $(32 - (c)), v1, tmp; \
- vpslld $(c), v1, v1; \
- vpaddb tmp, v1, v1; \
- vpsrld $(32 - (c)), v2, tmp; \
- vpslld $(c), v2, v2; \
- vpaddb tmp, v2, v2;
-
-#define ROTATE_SHUF_2(v1,v2,shuf) \
- vpshufb shuf, v1, v1; \
- vpshufb shuf, v2, v2;
-
-#define XOR(ds,s) \
- vpxor s, ds, ds;
-
-#define PLUS(ds,s) \
- vpaddd s, ds, ds;
-
-#define QUARTERROUND2(a1,b1,c1,d1,a2,b2,c2,d2,ign,tmp1,\
- interleave_op1,interleave_op2,\
- interleave_op3,interleave_op4) \
- vbroadcasti128 .Lshuf_rol16 rRIP, tmp1; \
- interleave_op1; \
- PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \
- ROTATE_SHUF_2(d1, d2, tmp1); \
- interleave_op2; \
- PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \
- ROTATE2(b1, b2, 12, tmp1); \
- vbroadcasti128 .Lshuf_rol8 rRIP, tmp1; \
- interleave_op3; \
- PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \
- ROTATE_SHUF_2(d1, d2, tmp1); \
- interleave_op4; \
- PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \
- ROTATE2(b1, b2, 7, tmp1);
-
- .section .text.avx2, "ax", @progbits
- .align 32
-chacha20_data:
-L(shuf_rol16):
- .byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13
-L(shuf_rol8):
- .byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14
-L(inc_counter):
- .byte 0,1,2,3,4,5,6,7
-L(unsigned_cmp):
- .long 0x80000000
-
- .hidden __chacha20_avx2_blocks8
-ENTRY (__chacha20_avx2_blocks8)
- /* input:
- * %rdi: input
- * %rsi: dst
- * %rdx: src
- * %rcx: nblks (multiple of 8)
- */
- vzeroupper;
-
- pushq %rbp;
- cfi_adjust_cfa_offset(8);
- cfi_rel_offset(rbp, 0)
- movq %rsp, %rbp;
- cfi_def_cfa_register(rbp);
-
- subq $STACK_MAX, %rsp;
- andq $~31, %rsp;
-
-L(loop8):
- mov $20, ROUND;
-
- /* Construct counter vectors X12 and X13 */
- vpmovzxbd L(inc_counter) rRIP, X0;
- vpbroadcastd L(unsigned_cmp) rRIP, X2;
- vpbroadcastd (12 * 4)(INPUT), X12;
- vpbroadcastd (13 * 4)(INPUT), X13;
- vpaddd X0, X12, X12;
- vpxor X2, X0, X0;
- vpxor X2, X12, X1;
- vpcmpgtd X1, X0, X0;
- vpsubd X0, X13, X13;
- vmovdqa X12, (STACK_VEC_X12)(%rsp);
- vmovdqa X13, (STACK_VEC_X13)(%rsp);
-
- /* Load vectors */
- vpbroadcastd (0 * 4)(INPUT), X0;
- vpbroadcastd (1 * 4)(INPUT), X1;
- vpbroadcastd (2 * 4)(INPUT), X2;
- vpbroadcastd (3 * 4)(INPUT), X3;
- vpbroadcastd (4 * 4)(INPUT), X4;
- vpbroadcastd (5 * 4)(INPUT), X5;
- vpbroadcastd (6 * 4)(INPUT), X6;
- vpbroadcastd (7 * 4)(INPUT), X7;
- vpbroadcastd (8 * 4)(INPUT), X8;
- vpbroadcastd (9 * 4)(INPUT), X9;
- vpbroadcastd (10 * 4)(INPUT), X10;
- vpbroadcastd (11 * 4)(INPUT), X11;
- vpbroadcastd (14 * 4)(INPUT), X14;
- vpbroadcastd (15 * 4)(INPUT), X15;
- vmovdqa X15, (STACK_TMP)(%rsp);
-
-L(round2):
- QUARTERROUND2(X0, X4, X8, X12, X1, X5, X9, X13, tmp:=,X15,,,,)
- vmovdqa (STACK_TMP)(%rsp), X15;
- vmovdqa X8, (STACK_TMP)(%rsp);
- QUARTERROUND2(X2, X6, X10, X14, X3, X7, X11, X15, tmp:=,X8,,,,)
- QUARTERROUND2(X0, X5, X10, X15, X1, X6, X11, X12, tmp:=,X8,,,,)
- vmovdqa (STACK_TMP)(%rsp), X8;
- vmovdqa X15, (STACK_TMP)(%rsp);
- QUARTERROUND2(X2, X7, X8, X13, X3, X4, X9, X14, tmp:=,X15,,,,)
- sub $2, ROUND;
- jnz L(round2);
-
- vmovdqa X8, (STACK_TMP1)(%rsp);
-
- /* tmp := X15 */
- vpbroadcastd (0 * 4)(INPUT), X15;
- PLUS(X0, X15);
- vpbroadcastd (1 * 4)(INPUT), X15;
- PLUS(X1, X15);
- vpbroadcastd (2 * 4)(INPUT), X15;
- PLUS(X2, X15);
- vpbroadcastd (3 * 4)(INPUT), X15;
- PLUS(X3, X15);
- vpbroadcastd (4 * 4)(INPUT), X15;
- PLUS(X4, X15);
- vpbroadcastd (5 * 4)(INPUT), X15;
- PLUS(X5, X15);
- vpbroadcastd (6 * 4)(INPUT), X15;
- PLUS(X6, X15);
- vpbroadcastd (7 * 4)(INPUT), X15;
- PLUS(X7, X15);
- transpose_4x4(X0, X1, X2, X3, X8, X15);
- transpose_4x4(X4, X5, X6, X7, X8, X15);
- vmovdqa (STACK_TMP1)(%rsp), X8;
- transpose_16byte_2x2(X0, X4, X15);
- transpose_16byte_2x2(X1, X5, X15);
- transpose_16byte_2x2(X2, X6, X15);
- transpose_16byte_2x2(X3, X7, X15);
- vmovdqa (STACK_TMP)(%rsp), X15;
- vmovdqu X0, (64 * 0 + 16 * 0)(DST)
- vmovdqu X1, (64 * 1 + 16 * 0)(DST)
- vpbroadcastd (8 * 4)(INPUT), X0;
- PLUS(X8, X0);
- vpbroadcastd (9 * 4)(INPUT), X0;
- PLUS(X9, X0);
- vpbroadcastd (10 * 4)(INPUT), X0;
- PLUS(X10, X0);
- vpbroadcastd (11 * 4)(INPUT), X0;
- PLUS(X11, X0);
- vmovdqa (STACK_VEC_X12)(%rsp), X0;
- PLUS(X12, X0);
- vmovdqa (STACK_VEC_X13)(%rsp), X0;
- PLUS(X13, X0);
- vpbroadcastd (14 * 4)(INPUT), X0;
- PLUS(X14, X0);
- vpbroadcastd (15 * 4)(INPUT), X0;
- PLUS(X15, X0);
- vmovdqu X2, (64 * 2 + 16 * 0)(DST)
- vmovdqu X3, (64 * 3 + 16 * 0)(DST)
-
- /* Update counter */
- addq $8, (12 * 4)(INPUT);
-
- transpose_4x4(X8, X9, X10, X11, X0, X1);
- transpose_4x4(X12, X13, X14, X15, X0, X1);
- vmovdqu X4, (64 * 4 + 16 * 0)(DST)
- vmovdqu X5, (64 * 5 + 16 * 0)(DST)
- transpose_16byte_2x2(X8, X12, X0);
- transpose_16byte_2x2(X9, X13, X0);
- transpose_16byte_2x2(X10, X14, X0);
- transpose_16byte_2x2(X11, X15, X0);
- vmovdqu X6, (64 * 6 + 16 * 0)(DST)
- vmovdqu X7, (64 * 7 + 16 * 0)(DST)
- vmovdqu X8, (64 * 0 + 16 * 2)(DST)
- vmovdqu X9, (64 * 1 + 16 * 2)(DST)
- vmovdqu X10, (64 * 2 + 16 * 2)(DST)
- vmovdqu X11, (64 * 3 + 16 * 2)(DST)
- vmovdqu X12, (64 * 4 + 16 * 2)(DST)
- vmovdqu X13, (64 * 5 + 16 * 2)(DST)
- vmovdqu X14, (64 * 6 + 16 * 2)(DST)
- vmovdqu X15, (64 * 7 + 16 * 2)(DST)
-
- sub $8, NBLKS;
- lea (8 * 64)(DST), DST;
- lea (8 * 64)(SRC), SRC;
- jnz L(loop8);
-
- vzeroupper;
-
- /* eax zeroed by round loop. */
- leave;
- cfi_adjust_cfa_offset(-8)
- cfi_def_cfa_register(%rsp);
- ret;
- int3;
-END(__chacha20_avx2_blocks8)
diff --git a/sysdeps/x86_64/chacha20-amd64-sse2.S b/sysdeps/x86_64/chacha20-amd64-sse2.S
deleted file mode 100644
index 351a1109c6..0000000000
--- a/sysdeps/x86_64/chacha20-amd64-sse2.S
+++ /dev/null
@@ -1,311 +0,0 @@
-/* Optimized SSE2 implementation of ChaCha20 cipher.
- Copyright (C) 2022 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <https://www.gnu.org/licenses/>. */
-
-/* chacha20-amd64-ssse3.S - SSSE3 implementation of ChaCha20 cipher
-
- Copyright (C) 2017-2019 Jussi Kivilinna <jussi.kivilinna@iki.fi>
-
- This file is part of Libgcrypt.
-
- Libgcrypt is free software; you can redistribute it and/or modify
- it under the terms of the GNU Lesser General Public License as
- published by the Free Software Foundation; either version 2.1 of
- the License, or (at your option) any later version.
-
- Libgcrypt is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, see <https://www.gnu.org/licenses/>.
-*/
-
-/* Based on D. J. Bernstein reference implementation at
- http://cr.yp.to/chacha.html:
-
- chacha-regs.c version 20080118
- D. J. Bernstein
- Public domain. */
-
-#include <sysdep.h>
-#include <isa-level.h>
-
-#if MINIMUM_X86_ISA_LEVEL <= 2
-
-#ifdef PIC
-# define rRIP (%rip)
-#else
-# define rRIP
-#endif
-
-/* 'ret' instruction replacement for straight-line speculation mitigation */
-#define ret_spec_stop \
- ret; int3;
-
-/* register macros */
-#define INPUT %rdi
-#define DST %rsi
-#define SRC %rdx
-#define NBLKS %rcx
-#define ROUND %eax
-
-/* stack structure */
-#define STACK_VEC_X12 (16)
-#define STACK_VEC_X13 (16 + STACK_VEC_X12)
-#define STACK_TMP (16 + STACK_VEC_X13)
-#define STACK_TMP1 (16 + STACK_TMP)
-#define STACK_TMP2 (16 + STACK_TMP1)
-
-#define STACK_MAX (16 + STACK_TMP2)
-
-/* vector registers */
-#define X0 %xmm0
-#define X1 %xmm1
-#define X2 %xmm2
-#define X3 %xmm3
-#define X4 %xmm4
-#define X5 %xmm5
-#define X6 %xmm6
-#define X7 %xmm7
-#define X8 %xmm8
-#define X9 %xmm9
-#define X10 %xmm10
-#define X11 %xmm11
-#define X12 %xmm12
-#define X13 %xmm13
-#define X14 %xmm14
-#define X15 %xmm15
-
-/**********************************************************************
- helper macros
- **********************************************************************/
-
-/* 4x4 32-bit integer matrix transpose */
-#define TRANSPOSE_4x4(x0, x1, x2, x3, t1, t2, t3) \
- movdqa x0, t2; \
- punpckhdq x1, t2; \
- punpckldq x1, x0; \
- \
- movdqa x2, t1; \
- punpckldq x3, t1; \
- punpckhdq x3, x2; \
- \
- movdqa x0, x1; \
- punpckhqdq t1, x1; \
- punpcklqdq t1, x0; \
- \
- movdqa t2, x3; \
- punpckhqdq x2, x3; \
- punpcklqdq x2, t2; \
- movdqa t2, x2;
-
-/* fill xmm register with 32-bit value from memory */
-#define PBROADCASTD(mem32, xreg) \
- movd mem32, xreg; \
- pshufd $0, xreg, xreg;
-
-/**********************************************************************
- 4-way chacha20
- **********************************************************************/
-
-#define ROTATE2(v1,v2,c,tmp1,tmp2) \
- movdqa v1, tmp1; \
- movdqa v2, tmp2; \
- psrld $(32 - (c)), v1; \
- pslld $(c), tmp1; \
- paddb tmp1, v1; \
- psrld $(32 - (c)), v2; \
- pslld $(c), tmp2; \
- paddb tmp2, v2;
-
-#define XOR(ds,s) \
- pxor s, ds;
-
-#define PLUS(ds,s) \
- paddd s, ds;
-
-#define QUARTERROUND2(a1,b1,c1,d1,a2,b2,c2,d2,ign,tmp1,tmp2) \
- PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \
- ROTATE2(d1, d2, 16, tmp1, tmp2); \
- PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \
- ROTATE2(b1, b2, 12, tmp1, tmp2); \
- PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \
- ROTATE2(d1, d2, 8, tmp1, tmp2); \
- PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \
- ROTATE2(b1, b2, 7, tmp1, tmp2);
-
- .section .text.sse2,"ax",@progbits
-
-chacha20_data:
- .align 16
-L(counter1):
- .long 1,0,0,0
-L(inc_counter):
- .long 0,1,2,3
-L(unsigned_cmp):
- .long 0x80000000,0x80000000,0x80000000,0x80000000
-
- .hidden __chacha20_sse2_blocks4
-ENTRY (__chacha20_sse2_blocks4)
- /* input:
- * %rdi: input
- * %rsi: dst
- * %rdx: src
- * %rcx: nblks (multiple of 4)
- */
-
- pushq %rbp;
- cfi_adjust_cfa_offset(8);
- cfi_rel_offset(rbp, 0)
- movq %rsp, %rbp;
- cfi_def_cfa_register(%rbp);
-
- subq $STACK_MAX, %rsp;
- andq $~15, %rsp;
-
-L(loop4):
- mov $20, ROUND;
-
- /* Construct counter vectors X12 and X13 */
- movdqa L(inc_counter) rRIP, X0;
- movdqa L(unsigned_cmp) rRIP, X2;
- PBROADCASTD((12 * 4)(INPUT), X12);
- PBROADCASTD((13 * 4)(INPUT), X13);
- paddd X0, X12;
- movdqa X12, X1;
- pxor X2, X0;
- pxor X2, X1;
- pcmpgtd X1, X0;
- psubd X0, X13;
- movdqa X12, (STACK_VEC_X12)(%rsp);
- movdqa X13, (STACK_VEC_X13)(%rsp);
-
- /* Load vectors */
- PBROADCASTD((0 * 4)(INPUT), X0);
- PBROADCASTD((1 * 4)(INPUT), X1);
- PBROADCASTD((2 * 4)(INPUT), X2);
- PBROADCASTD((3 * 4)(INPUT), X3);
- PBROADCASTD((4 * 4)(INPUT), X4);
- PBROADCASTD((5 * 4)(INPUT), X5);
- PBROADCASTD((6 * 4)(INPUT), X6);
- PBROADCASTD((7 * 4)(INPUT), X7);
- PBROADCASTD((8 * 4)(INPUT), X8);
- PBROADCASTD((9 * 4)(INPUT), X9);
- PBROADCASTD((10 * 4)(INPUT), X10);
- PBROADCASTD((11 * 4)(INPUT), X11);
- PBROADCASTD((14 * 4)(INPUT), X14);
- PBROADCASTD((15 * 4)(INPUT), X15);
- movdqa X11, (STACK_TMP)(%rsp);
- movdqa X15, (STACK_TMP1)(%rsp);
-
-L(round2_4):
- QUARTERROUND2(X0, X4, X8, X12, X1, X5, X9, X13, tmp:=,X11,X15)
- movdqa (STACK_TMP)(%rsp), X11;
- movdqa (STACK_TMP1)(%rsp), X15;
- movdqa X8, (STACK_TMP)(%rsp);
- movdqa X9, (STACK_TMP1)(%rsp);
- QUARTERROUND2(X2, X6, X10, X14, X3, X7, X11, X15, tmp:=,X8,X9)
- QUARTERROUND2(X0, X5, X10, X15, X1, X6, X11, X12, tmp:=,X8,X9)
- movdqa (STACK_TMP)(%rsp), X8;
- movdqa (STACK_TMP1)(%rsp), X9;
- movdqa X11, (STACK_TMP)(%rsp);
- movdqa X15, (STACK_TMP1)(%rsp);
- QUARTERROUND2(X2, X7, X8, X13, X3, X4, X9, X14, tmp:=,X11,X15)
- sub $2, ROUND;
- jnz L(round2_4);
-
- /* tmp := X15 */
- movdqa (STACK_TMP)(%rsp), X11;
- PBROADCASTD((0 * 4)(INPUT), X15);
- PLUS(X0, X15);
- PBROADCASTD((1 * 4)(INPUT), X15);
- PLUS(X1, X15);
- PBROADCASTD((2 * 4)(INPUT), X15);
- PLUS(X2, X15);
- PBROADCASTD((3 * 4)(INPUT), X15);
- PLUS(X3, X15);
- PBROADCASTD((4 * 4)(INPUT), X15);
- PLUS(X4, X15);
- PBROADCASTD((5 * 4)(INPUT), X15);
- PLUS(X5, X15);
- PBROADCASTD((6 * 4)(INPUT), X15);
- PLUS(X6, X15);
- PBROADCASTD((7 * 4)(INPUT), X15);
- PLUS(X7, X15);
- PBROADCASTD((8 * 4)(INPUT), X15);
- PLUS(X8, X15);
- PBROADCASTD((9 * 4)(INPUT), X15);
- PLUS(X9, X15);
- PBROADCASTD((10 * 4)(INPUT), X15);
- PLUS(X10, X15);
- PBROADCASTD((11 * 4)(INPUT), X15);
- PLUS(X11, X15);
- movdqa (STACK_VEC_X12)(%rsp), X15;
- PLUS(X12, X15);
- movdqa (STACK_VEC_X13)(%rsp), X15;
- PLUS(X13, X15);
- movdqa X13, (STACK_TMP)(%rsp);
- PBROADCASTD((14 * 4)(INPUT), X15);
- PLUS(X14, X15);
- movdqa (STACK_TMP1)(%rsp), X15;
- movdqa X14, (STACK_TMP1)(%rsp);
- PBROADCASTD((15 * 4)(INPUT), X13);
- PLUS(X15, X13);
- movdqa X15, (STACK_TMP2)(%rsp);
-
- /* Update counter */
- addq $4, (12 * 4)(INPUT);
-
- TRANSPOSE_4x4(X0, X1, X2, X3, X13, X14, X15);
- movdqu X0, (64 * 0 + 16 * 0)(DST)
- movdqu X1, (64 * 1 + 16 * 0)(DST)
- movdqu X2, (64 * 2 + 16 * 0)(DST)
- movdqu X3, (64 * 3 + 16 * 0)(DST)
- TRANSPOSE_4x4(X4, X5, X6, X7, X0, X1, X2);
- movdqa (STACK_TMP)(%rsp), X13;
- movdqa (STACK_TMP1)(%rsp), X14;
- movdqa (STACK_TMP2)(%rsp), X15;
- movdqu X4, (64 * 0 + 16 * 1)(DST)
- movdqu X5, (64 * 1 + 16 * 1)(DST)
- movdqu X6, (64 * 2 + 16 * 1)(DST)
- movdqu X7, (64 * 3 + 16 * 1)(DST)
- TRANSPOSE_4x4(X8, X9, X10, X11, X0, X1, X2);
- movdqu X8, (64 * 0 + 16 * 2)(DST)
- movdqu X9, (64 * 1 + 16 * 2)(DST)
- movdqu X10, (64 * 2 + 16 * 2)(DST)
- movdqu X11, (64 * 3 + 16 * 2)(DST)
- TRANSPOSE_4x4(X12, X13, X14, X15, X0, X1, X2);
- movdqu X12, (64 * 0 + 16 * 3)(DST)
- movdqu X13, (64 * 1 + 16 * 3)(DST)
- movdqu X14, (64 * 2 + 16 * 3)(DST)
- movdqu X15, (64 * 3 + 16 * 3)(DST)
-
- sub $4, NBLKS;
- lea (4 * 64)(DST), DST;
- lea (4 * 64)(SRC), SRC;
- jnz L(loop4);
-
- /* eax zeroed by round loop. */
- leave;
- cfi_adjust_cfa_offset(-8)
- cfi_def_cfa_register(%rsp);
- ret_spec_stop;
-END (__chacha20_sse2_blocks4)
-
-#endif /* if MINIMUM_X86_ISA_LEVEL <= 2 */
diff --git a/sysdeps/x86_64/chacha20_arch.h b/sysdeps/x86_64/chacha20_arch.h
deleted file mode 100644
index 6f3784e392..0000000000
--- a/sysdeps/x86_64/chacha20_arch.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* Chacha20 implementation, used on arc4random.
- Copyright (C) 2022 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <https://www.gnu.org/licenses/>. */
-
-#include <isa-level.h>
-#include <ldsodefs.h>
-#include <cpu-features.h>
-#include <sys/param.h>
-
-unsigned int __chacha20_sse2_blocks4 (uint32_t *state, uint8_t *dst,
- const uint8_t *src, size_t nblks)
- attribute_hidden;
-unsigned int __chacha20_avx2_blocks8 (uint32_t *state, uint8_t *dst,
- const uint8_t *src, size_t nblks)
- attribute_hidden;
-
-static inline void
-chacha20_crypt (uint32_t *state, uint8_t *dst, const uint8_t *src,
- size_t bytes)
-{
- _Static_assert (CHACHA20_BUFSIZE % 4 == 0 && CHACHA20_BUFSIZE % 8 == 0,
- "CHACHA20_BUFSIZE not multiple of 4 or 8");
- _Static_assert (CHACHA20_BUFSIZE >= CHACHA20_BLOCK_SIZE * 8,
- "CHACHA20_BUFSIZE < CHACHA20_BLOCK_SIZE * 8");
-
-#if MINIMUM_X86_ISA_LEVEL > 2
- __chacha20_avx2_blocks8 (state, dst, src,
- CHACHA20_BUFSIZE / CHACHA20_BLOCK_SIZE);
-#else
- const struct cpu_features* cpu_features = __get_cpu_features ();
-
- /* AVX2 version uses vzeroupper, so disable it if RTM is enabled. */
- if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX2)
- && X86_ISA_CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER, !))
- __chacha20_avx2_blocks8 (state, dst, src,
- CHACHA20_BUFSIZE / CHACHA20_BLOCK_SIZE);
- else
- __chacha20_sse2_blocks4 (state, dst, src,
- CHACHA20_BUFSIZE / CHACHA20_BLOCK_SIZE);
-#endif
-}