diff options
author | Roland McGrath <roland@redhat.com> | 2010-03-02 11:57:25 -0800 |
---|---|---|
committer | Roland McGrath <roland@redhat.com> | 2010-03-02 11:58:09 -0800 |
commit | 42488a4d317ffdc9274b8e2b430fc930db4da8b8 (patch) | |
tree | 5f715e0176c6934c5bf71fe3646586c44167581e /sysdeps/sparc | |
parent | f2899a62cfd037ac59dddb4dc62c7a2d91238fa7 (diff) | |
parent | 3afd5a3b555694361181f854f658f669105d0ad6 (diff) | |
download | glibc-42488a4d317ffdc9274b8e2b430fc930db4da8b8.tar glibc-42488a4d317ffdc9274b8e2b430fc930db4da8b8.tar.gz glibc-42488a4d317ffdc9274b8e2b430fc930db4da8b8.tar.bz2 glibc-42488a4d317ffdc9274b8e2b430fc930db4da8b8.zip |
Merge commit 'origin/davem/sparc'
Diffstat (limited to 'sysdeps/sparc')
34 files changed, 720 insertions, 1854 deletions
diff --git a/sysdeps/sparc/Makefile b/sysdeps/sparc/Makefile index 73b926554e..735e4a40db 100644 --- a/sysdeps/sparc/Makefile +++ b/sysdeps/sparc/Makefile @@ -10,3 +10,8 @@ endif ifeq ($(subdir),db2) CPPFLAGS += -DHAVE_SPINLOCKS=1 -DHAVE_ASSEM_SPARC_GCC=1 endif + +ifeq ($(subdir),csu) +# get offset to rtld_global._dl_hwcap +gen-as-const-headers += rtld-global-offsets.sym +endif diff --git a/sysdeps/sparc/elf/rtld-global-offsets.sym b/sysdeps/sparc/elf/rtld-global-offsets.sym new file mode 100644 index 0000000000..ff4e97f2a6 --- /dev/null +++ b/sysdeps/sparc/elf/rtld-global-offsets.sym @@ -0,0 +1,7 @@ +#define SHARED 1 + +#include <ldsodefs.h> + +#define rtld_global_ro_offsetof(mem) offsetof (struct rtld_global_ro, mem) + +RTLD_GLOBAL_RO_DL_HWCAP_OFFSET rtld_global_ro_offsetof (_dl_hwcap) diff --git a/sysdeps/sparc/sparc32/bcopy.c b/sysdeps/sparc/sparc32/bcopy.c deleted file mode 100644 index 9a455f33c4..0000000000 --- a/sysdeps/sparc/sparc32/bcopy.c +++ /dev/null @@ -1 +0,0 @@ -/* bcopy is in memcpy.S */ diff --git a/sysdeps/sparc/sparc32/memcpy.S b/sysdeps/sparc/sparc32/memcpy.S index 6bd55c06a1..c9c7c40e81 100644 --- a/sysdeps/sparc/sparc32/memcpy.S +++ b/sysdeps/sparc/sparc32/memcpy.S @@ -68,45 +68,6 @@ stb %t0, [%dst - offset - 0x02]; \ stb %t1, [%dst - offset - 0x01]; -/* Both these macros have to start with exactly the same insn */ -#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ - ldd [%src - offset - 0x20], %t0; \ - ldd [%src - offset - 0x18], %t2; \ - ldd [%src - offset - 0x10], %t4; \ - ldd [%src - offset - 0x08], %t6; \ - st %t0, [%dst - offset - 0x20]; \ - st %t1, [%dst - offset - 0x1c]; \ - st %t2, [%dst - offset - 0x18]; \ - st %t3, [%dst - offset - 0x14]; \ - st %t4, [%dst - offset - 0x10]; \ - st %t5, [%dst - offset - 0x0c]; \ - st %t6, [%dst - offset - 0x08]; \ - st %t7, [%dst - offset - 0x04]; - -#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ - ldd [%src - offset - 0x20], %t0; \ - ldd [%src - offset - 0x18], %t2; \ - ldd [%src - offset - 0x10], %t4; \ - ldd [%src - offset - 0x08], %t6; \ - std %t0, [%dst - offset - 0x20]; \ - std %t2, [%dst - offset - 0x18]; \ - std %t4, [%dst - offset - 0x10]; \ - std %t6, [%dst - offset - 0x08]; - -#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldd [%src + offset + 0x00], %t0; \ - ldd [%src + offset + 0x08], %t2; \ - st %t0, [%dst + offset + 0x00]; \ - st %t1, [%dst + offset + 0x04]; \ - st %t2, [%dst + offset + 0x08]; \ - st %t3, [%dst + offset + 0x0c]; - -#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \ - ldub [%src + offset + 0x00], %t0; \ - ldub [%src + offset + 0x01], %t1; \ - stb %t0, [%dst + offset + 0x00]; \ - stb %t1, [%dst + offset + 0x01]; - #define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \ ldd [%src + offset + 0x00], %t0; \ ldd [%src + offset + 0x08], %t2; \ @@ -146,312 +107,6 @@ .text .align 4 -ENTRY(bcopy) - mov %o0, %o3 - mov %o1, %o0 - mov %o3, %o1 -END(bcopy) - -ENTRY(memmove) - cmp %o0, %o1 - st %o0, [%sp + 64] - bleu 9f - sub %o0, %o1, %o4 - - add %o1, %o2, %o3 - cmp %o3, %o0 - bleu 0f - andcc %o4, 3, %o5 - - add %o1, %o2, %o1 - add %o0, %o2, %o0 - bne 77f - cmp %o2, 15 - bleu 91f - andcc %o1, 3, %g0 - be 3f - nop - - andcc %o1, 1, %g0 - be 4f - andcc %o1, 2, %g0 - - ldub [%o1 - 1], %g2 - sub %o1, 1, %o1 - stb %g2, [%o0 - 1] - sub %o2, 1, %o2 - be 3f - sub %o0, 1, %o0 -4: lduh [%o1 - 2], %g2 - sub %o1, 2, %o1 - sth %g2, [%o0 - 2] - sub %o2, 2, %o2 - sub %o0, 2, %o0 - -3: andcc %o1, 4, %g0 - - be 2f - mov %o2, %g1 - - ld [%o1 - 4], %o4 - sub %g1, 4, %g1 - st %o4, [%o0 - 4] - sub %o1, 4, %o1 - sub %o0, 4, %o0 -2: andcc %g1, 0xffffff80, %g6 - be 3f - andcc %o0, 4, %g0 - - be 74f + 4 -5: RMOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5) - RMOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) - RMOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) - RMOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) - subcc %g6, 128, %g6 - sub %o1, 128, %o1 - bne 5b - sub %o0, 128, %o0 - -3: andcc %g1, 0x70, %g6 - be 72f - andcc %g1, 8, %g0 - - srl %g6, 1, %o4 - mov %o7, %g2 - add %g6, %o4, %o4 -101: call 100f - sub %o1, %g6, %o1 - mov %g2, %o7 - jmpl %o5 + (72f - 101b), %g0 - sub %o0, %g6, %o0 - -71: RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5) - RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5) - RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5) - RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5) - RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5) - RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5) - RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5) -72: be 73f - andcc %g1, 4, %g0 - - ldd [%o1 - 0x08], %g2 - sub %o0, 8, %o0 - sub %o1, 8, %o1 - st %g2, [%o0] - st %g3, [%o0 + 0x04] -73: be 1f - andcc %g1, 2, %g0 - - ld [%o1 - 4], %g2 - sub %o1, 4, %o1 - st %g2, [%o0 - 4] - sub %o0, 4, %o0 -1: be 1f - andcc %g1, 1, %g0 - - lduh [%o1 - 2], %g2 - sub %o1, 2, %o1 - sth %g2, [%o0 - 2] - sub %o0, 2, %o0 -1: be 1f - nop - - ldub [%o1 - 1], %g2 - stb %g2, [%o0 - 1] -1: retl - ld [%sp + 64], %o0 - -74: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5) - RMOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) - RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) - RMOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) - subcc %g6, 128, %g6 - sub %o1, 128, %o1 - bne 74b - sub %o0, 128, %o0 - - andcc %g1, 0x70, %g6 - be 72b - andcc %g1, 8, %g0 - - srl %g6, 1, %o4 - mov %o7, %g2 - add %g6, %o4, %o4 -102: call 100f - sub %o1, %g6, %o1 - mov %g2, %o7 - jmpl %o5 + (72b - 102b), %g0 - sub %o0, %g6, %o0 - -75: and %o2, 0xe, %o3 - mov %o7, %g2 - sll %o3, 3, %o4 - sub %o0, %o3, %o0 -103: call 100f - sub %o1, %o3, %o1 - mov %g2, %o7 - jmpl %o5 + (76f - 103b), %g0 - andcc %o2, 1, %g0 - - RMOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3) - RMOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3) - RMOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3) - RMOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3) - RMOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3) - RMOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3) - RMOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3) - -76: be 1f - nop - ldub [%o1 - 1], %g2 - stb %g2, [%o0 - 1] -1: retl - ld [%sp + 64], %o0 - -91: bne 75b - andcc %o2, 8, %g0 - - be 1f - andcc %o2, 4, %g0 - - ld [%o1 - 0x08], %g2 - ld [%o1 - 0x04], %g3 - sub %o1, 8, %o1 - st %g2, [%o0 - 0x08] - st %g3, [%o0 - 0x04] - sub %o0, 8, %o0 -1: b 73b - mov %o2, %g1 - -77: cmp %o2, 15 - bleu 75b - andcc %o0, 3, %g0 - be 64f - andcc %o0, 1, %g0 - be 63f - andcc %o0, 2, %g0 - ldub [%o1 - 1], %g5 - sub %o1, 1, %o1 - stb %g5, [%o0 - 1] - sub %o0, 1, %o0 - be 64f - sub %o2, 1, %o2 - -63: ldub [%o1 - 1], %g5 - sub %o1, 2, %o1 - stb %g5, [%o0 - 1] - sub %o0, 2, %o0 - ldub [%o1], %g5 - sub %o2, 2, %o2 - stb %g5, [%o0] -64: and %o1, 3, %g2 - and %o1, -4, %o1 - and %o2, 0xc, %g3 - add %o1, 4, %o1 - cmp %g3, 4 - sll %g2, 3, %g4 - mov 32, %g2 - be 4f - sub %g2, %g4, %g6 - - blu 3f - cmp %g3, 8 - - be 2f - srl %o2, 2, %g3 - - ld [%o1 - 4], %o3 - add %o0, -8, %o0 - ld [%o1 - 8], %o4 - add %o1, -16, %o1 - b 7f - add %g3, 1, %g3 -2: ld [%o1 - 4], %o4 - add %o0, -4, %o0 - ld [%o1 - 8], %g1 - add %o1, -12, %o1 - b 8f - add %g3, 2, %g3 -3: ld [%o1 - 4], %o5 - add %o0, -12, %o0 - ld [%o1 - 8], %o3 - add %o1, -20, %o1 - b 6f - srl %o2, 2, %g3 -4: ld [%o1 - 4], %g1 - srl %o2, 2, %g3 - ld [%o1 - 8], %o5 - add %o1, -24, %o1 - add %o0, -16, %o0 - add %g3, -1, %g3 - - ld [%o1 + 12], %o3 -5: sll %o5, %g4, %g2 - srl %g1, %g6, %g5 - or %g2, %g5, %g2 - st %g2, [%o0 + 12] -6: ld [%o1 + 8], %o4 - sll %o3, %g4, %g2 - srl %o5, %g6, %g5 - or %g2, %g5, %g2 - st %g2, [%o0 + 8] -7: ld [%o1 + 4], %g1 - sll %o4, %g4, %g2 - srl %o3, %g6, %g5 - or %g2, %g5, %g2 - st %g2, [%o0 + 4] -8: ld [%o1], %o5 - sll %g1, %g4, %g2 - srl %o4, %g6, %g5 - addcc %g3, -4, %g3 - or %g2, %g5, %g2 - add %o1, -16, %o1 - st %g2, [%o0] - add %o0, -16, %o0 - bne,a 5b - ld [%o1 + 12], %o3 - sll %o5, %g4, %g2 - srl %g1, %g6, %g5 - srl %g4, 3, %g3 - or %g2, %g5, %g2 - add %o1, %g3, %o1 - andcc %o2, 2, %g0 - st %g2, [%o0 + 12] - be 1f - andcc %o2, 1, %g0 - - ldub [%o1 + 15], %g5 - add %o1, -2, %o1 - stb %g5, [%o0 + 11] - add %o0, -2, %o0 - ldub [%o1 + 16], %g5 - stb %g5, [%o0 + 12] -1: be 1f - nop - ldub [%o1 + 15], %g5 - stb %g5, [%o0 + 11] -1: retl - ld [%sp + 64], %o0 - -78: andcc %o1, 1, %g0 - be 4f - andcc %o1, 2, %g0 - - ldub [%o1], %g2 - add %o1, 1, %o1 - stb %g2, [%o0] - sub %o2, 1, %o2 - bne 3f - add %o0, 1, %o0 -4: lduh [%o1], %g2 - add %o1, 2, %o1 - sth %g2, [%o0] - sub %o2, 2, %o2 - b 3f - add %o0, 2, %o0 -END(memmove) - ENTRY(memcpy) /* %o0=dst %o1=src %o2=len */ sub %o0, %o1, %o4 st %o0, [%sp + 64] @@ -968,5 +623,5 @@ ENTRY(memcpy) /* %o0=dst %o1=src %o2=len */ 110: retl sub %o7, %g6, %o5 END(memcpy) + libc_hidden_builtin_def (memcpy) -libc_hidden_builtin_def (memmove) diff --git a/sysdeps/sparc/sparc32/memmove.c b/sysdeps/sparc/sparc32/memmove.c deleted file mode 100644 index a8d2d49948..0000000000 --- a/sysdeps/sparc/sparc32/memmove.c +++ /dev/null @@ -1 +0,0 @@ -/* memmove is in memcpy.S */ diff --git a/sysdeps/sparc/sparc32/sparcv9/bcopy.c b/sysdeps/sparc/sparc32/sparcv9/bcopy.c deleted file mode 100644 index 9a455f33c4..0000000000 --- a/sysdeps/sparc/sparc32/sparcv9/bcopy.c +++ /dev/null @@ -1 +0,0 @@ -/* bcopy is in memcpy.S */ diff --git a/sysdeps/sparc/sparc32/sparcv9/memmove.c b/sysdeps/sparc/sparc32/sparcv9/memmove.c deleted file mode 100644 index a8d2d49948..0000000000 --- a/sysdeps/sparc/sparc32/sparcv9/memmove.c +++ /dev/null @@ -1 +0,0 @@ -/* memmove is in memcpy.S */ diff --git a/sysdeps/sparc/sparc32/sparcv9/multiarch/Makefile b/sysdeps/sparc/sparc32/sparcv9/multiarch/Makefile new file mode 100644 index 0000000000..4d45042a95 --- /dev/null +++ b/sysdeps/sparc/sparc32/sparcv9/multiarch/Makefile @@ -0,0 +1,4 @@ +ifeq ($(subdir),string) +sysdep_routines += memcpy-ultra3 memcpy-niagara1 memcpy-niagara2 \ + memset-niagara1 +endif diff --git a/sysdeps/sparc/sparc32/sparcv9/multiarch/memcpy-niagara1.S b/sysdeps/sparc/sparc32/sparcv9/multiarch/memcpy-niagara1.S new file mode 100644 index 0000000000..10aef85fe1 --- /dev/null +++ b/sysdeps/sparc/sparc32/sparcv9/multiarch/memcpy-niagara1.S @@ -0,0 +1,2 @@ +#define XCC icc +#include <sparc64/multiarch/memcpy-niagara1.S> diff --git a/sysdeps/sparc/sparc32/sparcv9/multiarch/memcpy-niagara2.S b/sysdeps/sparc/sparc32/sparcv9/multiarch/memcpy-niagara2.S new file mode 100644 index 0000000000..6b1bf6ea70 --- /dev/null +++ b/sysdeps/sparc/sparc32/sparcv9/multiarch/memcpy-niagara2.S @@ -0,0 +1,2 @@ +#define XCC icc +#include <sparc64/multiarch/memcpy-niagara2.S> diff --git a/sysdeps/sparc/sparc32/sparcv9/multiarch/memcpy-ultra3.S b/sysdeps/sparc/sparc32/sparcv9/multiarch/memcpy-ultra3.S new file mode 100644 index 0000000000..77adf151aa --- /dev/null +++ b/sysdeps/sparc/sparc32/sparcv9/multiarch/memcpy-ultra3.S @@ -0,0 +1,2 @@ +#define XCC icc +#include <sparc64/multiarch/memcpy-ultra3.S> diff --git a/sysdeps/sparc/sparc32/sparcv9/multiarch/memcpy.S b/sysdeps/sparc/sparc32/sparcv9/multiarch/memcpy.S new file mode 100644 index 0000000000..14df91e005 --- /dev/null +++ b/sysdeps/sparc/sparc32/sparcv9/multiarch/memcpy.S @@ -0,0 +1,4 @@ +#define ASI_PNF 0x82 +#define ASI_BLK_P 0xf0 +#define XCC icc +#include <sparc64/multiarch/memcpy.S> diff --git a/sysdeps/sparc/sparc32/sparcv9/multiarch/memset-niagara1.S b/sysdeps/sparc/sparc32/sparcv9/multiarch/memset-niagara1.S new file mode 100644 index 0000000000..b432420876 --- /dev/null +++ b/sysdeps/sparc/sparc32/sparcv9/multiarch/memset-niagara1.S @@ -0,0 +1,2 @@ +#define XCC icc +#include <sparc64/multiarch/memset-niagara1.S> diff --git a/sysdeps/sparc/sparc32/sparcv9/multiarch/memset.S b/sysdeps/sparc/sparc32/sparcv9/multiarch/memset.S new file mode 100644 index 0000000000..8f8264337d --- /dev/null +++ b/sysdeps/sparc/sparc32/sparcv9/multiarch/memset.S @@ -0,0 +1,4 @@ +#define ASI_PNF 0x82 +#define ASI_BLK_P 0xf0 +#define XCC icc +#include <sparc64/multiarch/memset.S> diff --git a/sysdeps/sparc/sparc32/sparcv9/sparcv9b/memcpy.S b/sysdeps/sparc/sparc32/sparcv9/sparcv9b/memcpy.S deleted file mode 100644 index 61960dce61..0000000000 --- a/sysdeps/sparc/sparc32/sparcv9/sparcv9b/memcpy.S +++ /dev/null @@ -1,2 +0,0 @@ -#define XCC icc -#include <sparc64/sparcv9b/memcpy.S> diff --git a/sysdeps/sparc/sparc32/sparcv9/sparcv9v/memcpy.S b/sysdeps/sparc/sparc32/sparcv9/sparcv9v/memcpy.S deleted file mode 100644 index 4c05f57bc2..0000000000 --- a/sysdeps/sparc/sparc32/sparcv9/sparcv9v/memcpy.S +++ /dev/null @@ -1,2 +0,0 @@ -#define XCC icc -#include <sparc64/sparcv9v/memcpy.S> diff --git a/sysdeps/sparc/sparc32/sparcv9/sparcv9v/memset.S b/sysdeps/sparc/sparc32/sparcv9/sparcv9v/memset.S deleted file mode 100644 index 5e46c7489f..0000000000 --- a/sysdeps/sparc/sparc32/sparcv9/sparcv9v/memset.S +++ /dev/null @@ -1,2 +0,0 @@ -#define XCC icc -#include <sparc64/sparcv9v/memset.S> diff --git a/sysdeps/sparc/sparc32/sparcv9/sparcv9v2/memcpy.S b/sysdeps/sparc/sparc32/sparcv9/sparcv9v2/memcpy.S deleted file mode 100644 index 7f4606037c..0000000000 --- a/sysdeps/sparc/sparc32/sparcv9/sparcv9v2/memcpy.S +++ /dev/null @@ -1,2 +0,0 @@ -#define XCC icc -#include <sparc64/sparcv9v2/memcpy.S> diff --git a/sysdeps/sparc/sparc32/sparcv9/sparcv9v2/memset.S b/sysdeps/sparc/sparc32/sparcv9/sparcv9v2/memset.S deleted file mode 100644 index 72de7bb0cf..0000000000 --- a/sysdeps/sparc/sparc32/sparcv9/sparcv9v2/memset.S +++ /dev/null @@ -1,2 +0,0 @@ -#define XCC icc -#include <sparc64/sparcv9v2/memset.S> diff --git a/sysdeps/sparc/sparc64/Makefile b/sysdeps/sparc/sparc64/Makefile index 3bb0238832..1a859dffc0 100644 --- a/sysdeps/sparc/sparc64/Makefile +++ b/sysdeps/sparc/sparc64/Makefile @@ -6,3 +6,7 @@ endif ifeq ($(subdir),csu) CFLAGS-initfini.s += -mcpu=v9 endif + +ifeq ($(subdir),string) +sysdep_routines += align-cpy +endif diff --git a/sysdeps/sparc/sparc64/align-cpy.S b/sysdeps/sparc/sparc64/align-cpy.S new file mode 100644 index 0000000000..bae788fe44 --- /dev/null +++ b/sysdeps/sparc/sparc64/align-cpy.S @@ -0,0 +1,85 @@ +/* Aligned copy routines specified by Sparc V9 ABI. + For 64-bit sparc. + Copyright (C) 2010 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by David S. Miller (davem@davemloft.net) + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#include <sysdep.h> + + .text + .align 8 +ENTRY(__align_cpy_8) +10: cmp %o0, %o1 + be,pn %xcc, 9f + mov %o0, %o3 + subcc %o2, 0x08, %o2 + be,pn %xcc, 8f +1: ldx [%o1 + 0x00], %o5 + ldx [%o1 + 0x08], %o4 + subcc %o2, 0x10, %o2 + add %o1, 0x10, %o1 + stx %o5, [%o3 + 0x00] + stx %o4, [%o3 + 0x08] + bg,pt %xcc, 1b + add %o3, 0x10, %o3 + bne,pn %xcc, 9f + nop + ldx [%o1 + 0x00], %o5 +8: stx %o5, [%o3 + 0x00] +9: retl + nop +END(__align_cpy_8) + + .align 8 +ENTRY(__align_cpy_4) +20: cmp %o0, %o1 + be,pn %xcc, 9f + mov %o0, %o3 + subcc %o2, 0x04, %o2 + be,pn %xcc, 8f +1: lduw [%o1 + 0x00], %o5 + lduw [%o1 + 0x04], %o4 + subcc %o2, 0x08, %o2 + add %o1, 0x08, %o1 + stw %o5, [%o3 + 0x00] + stw %o4, [%o3 + 0x04] + bg,pt %xcc, 1b + add %o3, 0x08, %o3 + bne,pn %xcc, 9f + nop + lduw [%o1 + 0x00], %o5 +8: stw %o5, [%o3 + 0x00] +9: retl + nop +END(__align_cpy_4) + + .align 8 +ENTRY(__align_cpy_2) + or %o0, %o1, %o3 + or %o2, %o3, %o3 + andcc %o3, 0x7, %g0 + be,pt %xcc, 10b + andcc %o3, 0x3, %g0 + be,pt %xcc, 20b + mov %o7, %g1 + call HIDDEN_JUMPTARGET(memcpy) + mov %o7, %g1 +END(__align_cpy_2) + +weak_alias (__align_cpy_8, __align_cpy_16) +weak_alias (__align_cpy_2, __align_cpy_1) diff --git a/sysdeps/sparc/sparc64/bcopy.c b/sysdeps/sparc/sparc64/bcopy.c deleted file mode 100644 index 9a455f33c4..0000000000 --- a/sysdeps/sparc/sparc64/bcopy.c +++ /dev/null @@ -1 +0,0 @@ -/* bcopy is in memcpy.S */ diff --git a/sysdeps/sparc/sparc64/memcopy.h b/sysdeps/sparc/sparc64/memcopy.h new file mode 100644 index 0000000000..ec978e3c80 --- /dev/null +++ b/sysdeps/sparc/sparc64/memcopy.h @@ -0,0 +1 @@ +#include <sparc32/memcopy.h> diff --git a/sysdeps/sparc/sparc64/memcpy.S b/sysdeps/sparc/sparc64/memcpy.S index 5993358017..71e7100658 100644 --- a/sysdeps/sparc/sparc64/memcpy.S +++ b/sysdeps/sparc/sparc64/memcpy.S @@ -136,79 +136,8 @@ stx %t0, [%dst - offset - 0x10]; \ stx %t1, [%dst - offset - 0x08]; - /* Macros for non-VIS memmove code. */ -#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldx [%src - offset - 0x20], %t0; \ - ldx [%src - offset - 0x18], %t1; \ - ldx [%src - offset - 0x10], %t2; \ - ldx [%src - offset - 0x08], %t3; \ - stw %t0, [%dst - offset - 0x1c]; \ - srlx %t0, 32, %t0; \ - stw %t0, [%dst - offset - 0x20]; \ - stw %t1, [%dst - offset - 0x14]; \ - srlx %t1, 32, %t1; \ - stw %t1, [%dst - offset - 0x18]; \ - stw %t2, [%dst - offset - 0x0c]; \ - srlx %t2, 32, %t2; \ - stw %t2, [%dst - offset - 0x10]; \ - stw %t3, [%dst - offset - 0x04]; \ - srlx %t3, 32, %t3; \ - stw %t3, [%dst - offset - 0x08]; - -#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldx [%src - offset - 0x20], %t0; \ - ldx [%src - offset - 0x18], %t1; \ - ldx [%src - offset - 0x10], %t2; \ - ldx [%src - offset - 0x08], %t3; \ - stx %t0, [%dst - offset - 0x20]; \ - stx %t1, [%dst - offset - 0x18]; \ - stx %t2, [%dst - offset - 0x10]; \ - stx %t3, [%dst - offset - 0x08]; \ - ldx [%src - offset - 0x40], %t0; \ - ldx [%src - offset - 0x38], %t1; \ - ldx [%src - offset - 0x30], %t2; \ - ldx [%src - offset - 0x28], %t3; \ - stx %t0, [%dst - offset - 0x40]; \ - stx %t1, [%dst - offset - 0x38]; \ - stx %t2, [%dst - offset - 0x30]; \ - stx %t3, [%dst - offset - 0x28]; - -#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldx [%src + offset + 0x00], %t0; \ - ldx [%src + offset + 0x08], %t1; \ - stw %t0, [%dst + offset + 0x04]; \ - srlx %t0, 32, %t2; \ - stw %t2, [%dst + offset + 0x00]; \ - stw %t1, [%dst + offset + 0x0c]; \ - srlx %t1, 32, %t3; \ - stw %t3, [%dst + offset + 0x08]; - -#define RMOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \ - ldx [%src + offset + 0x00], %t0; \ - ldx [%src + offset + 0x08], %t1; \ - stx %t0, [%dst + offset + 0x00]; \ - stx %t1, [%dst + offset + 0x08]; - .text .align 32 - -ENTRY(bcopy) - sub %o1, %o0, %o4 /* IEU0 Group */ - mov %o0, %g3 /* IEU1 */ - cmp %o4, %o2 /* IEU1 Group */ - mov %o1, %o0 /* IEU0 */ - bgeu,pt %XCC, 210f /* CTI */ - mov %g3, %o1 /* IEU0 Group */ -#ifndef USE_BPR - srl %o2, 0, %o2 /* IEU1 */ -#endif - brnz,pn %o2, 220f /* CTI Group */ - add %o0, %o2, %o0 /* IEU0 */ - retl - nop -END(bcopy) - - .align 32 ENTRY(__memcpy_large) 200: be,pt %xcc, 201f /* CTI */ andcc %o0, 0x38, %g5 /* IEU1 Group */ @@ -446,65 +375,6 @@ ENTRY(__memcpy_large) mov %g4, %o0 END(__memcpy_large) -#ifdef USE_BPR - - /* void *__align_cpy_4(void *dest, void *src, size_t n) - * SPARC v9 SYSV ABI - * Like memcpy, but results are undefined if (!n || ((dest | src | n) & 3)) - */ - - .align 32 -ENTRY(__align_cpy_4) - mov %o0, %g4 /* IEU0 Group */ - cmp %o2, 15 /* IEU1 */ - bleu,pn %xcc, 208b /* CTI */ - cmp %o2, (64 * 6) /* IEU1 Group */ - bgeu,pn %xcc, 200b /* CTI */ - andcc %o0, 7, %g2 /* IEU1 Group */ - ba,pt %xcc, 216f /* CTI */ - andcc %o1, 4, %g0 /* IEU1 Group */ -END(__align_cpy_4) - - /* void *__align_cpy_8(void *dest, void *src, size_t n) - * SPARC v9 SYSV ABI - * Like memcpy, but results are undefined if (!n || ((dest | src | n) & 7)) - */ - - .align 32 -ENTRY(__align_cpy_8) - mov %o0, %g4 /* IEU0 Group */ - cmp %o2, 15 /* IEU1 */ - bleu,pn %xcc, 208b /* CTI */ - cmp %o2, (64 * 6) /* IEU1 Group */ - bgeu,pn %xcc, 201b /* CTI */ - andcc %o0, 0x38, %g5 /* IEU1 Group */ - andcc %o2, -128, %g6 /* IEU1 Group */ - bne,a,pt %xcc, 82f + 4 /* CTI */ - ldx [%o1], %g1 /* Load */ - ba,pt %xcc, 41f /* CTI Group */ - andcc %o2, 0x70, %g6 /* IEU1 */ -END(__align_cpy_8) - - /* void *__align_cpy_16(void *dest, void *src, size_t n) - * SPARC v9 SYSV ABI - * Like memcpy, but results are undefined if (!n || ((dest | src | n) & 15)) - */ - - .align 32 -ENTRY(__align_cpy_16) - mov %o0, %g4 /* IEU0 Group */ - cmp %o2, (64 * 6) /* IEU1 */ - bgeu,pn %xcc, 201b /* CTI */ - andcc %o0, 0x38, %g5 /* IEU1 Group */ - andcc %o2, -128, %g6 /* IEU1 Group */ - bne,a,pt %xcc, 82f + 4 /* CTI */ - ldx [%o1], %g1 /* Load */ - ba,pt %xcc, 41f /* CTI Group */ - andcc %o2, 0x70, %g6 /* IEU1 */ -END(__align_cpy_16) - -#endif - .align 32 ENTRY(memcpy) 210: @@ -699,227 +569,4 @@ ENTRY(memcpy) mov %g4, %o0 END(memcpy) - .align 32 -ENTRY(__memmove_slowpath) -228: andcc %o2, 1, %g0 /* IEU1 Group */ - be,pt %icc, 2f+4 /* CTI */ -1: ldub [%o1 - 1], %o5 /* LOAD Group */ - sub %o1, 1, %o1 /* IEU0 */ - sub %o0, 1, %o0 /* IEU1 */ - subcc %o2, 1, %o2 /* IEU1 Group */ - be,pn %xcc, 229f /* CTI */ - stb %o5, [%o0] /* Store */ -2: ldub [%o1 - 1], %o5 /* LOAD Group */ - sub %o0, 2, %o0 /* IEU0 */ - ldub [%o1 - 2], %g5 /* LOAD Group */ - sub %o1, 2, %o1 /* IEU0 */ - subcc %o2, 2, %o2 /* IEU1 Group */ - stb %o5, [%o0 + 1] /* Store */ - bne,pt %xcc, 2b /* CTI */ - stb %g5, [%o0] /* Store */ -229: retl - mov %g4, %o0 -219: retl - nop -END(__memmove_slowpath) - - .align 32 -ENTRY(memmove) -#ifndef USE_BPR - srl %o2, 0, %o2 /* IEU1 Group */ -#endif - brz,pn %o2, 219b /* CTI Group */ - sub %o0, %o1, %o4 /* IEU0 */ - cmp %o4, %o2 /* IEU1 Group */ - bgeu,pt %XCC, 218b /* CTI */ - mov %o0, %g4 /* IEU0 */ - add %o0, %o2, %o0 /* IEU0 Group */ -220: add %o1, %o2, %o1 /* IEU1 */ - cmp %o2, 15 /* IEU1 Group */ - bleu,pn %xcc, 228b /* CTI */ - andcc %o0, 7, %g2 /* IEU1 Group */ - sub %o0, %o1, %g5 /* IEU0 */ - andcc %g5, 3, %o5 /* IEU1 Group */ - bne,pn %xcc, 232f /* CTI */ - andcc %o1, 3, %g0 /* IEU1 Group */ - be,a,pt %xcc, 236f /* CTI */ - andcc %o1, 4, %g0 /* IEU1 Group */ - andcc %o1, 1, %g0 /* IEU1 Group */ - be,pn %xcc, 4f /* CTI */ - andcc %o1, 2, %g0 /* IEU1 Group */ - ldub [%o1 - 1], %g2 /* Load Group */ - sub %o1, 1, %o1 /* IEU0 */ - sub %o0, 1, %o0 /* IEU1 */ - sub %o2, 1, %o2 /* IEU0 Group */ - be,pn %xcc, 5f /* CTI Group */ - stb %g2, [%o0] /* Store */ -4: lduh [%o1 - 2], %g2 /* Load Group */ - sub %o1, 2, %o1 /* IEU0 */ - sub %o0, 2, %o0 /* IEU1 */ - sub %o2, 2, %o2 /* IEU0 */ - sth %g2, [%o0] /* Store Group + bubble */ -5: andcc %o1, 4, %g0 /* IEU1 */ -236: be,a,pn %xcc, 2f /* CTI */ - andcc %o2, -128, %g6 /* IEU1 Group */ - lduw [%o1 - 4], %g5 /* Load Group */ - sub %o1, 4, %o1 /* IEU0 */ - sub %o0, 4, %o0 /* IEU1 */ - sub %o2, 4, %o2 /* IEU0 Group */ - stw %g5, [%o0] /* Store */ - andcc %o2, -128, %g6 /* IEU1 Group */ -2: be,pn %xcc, 235f /* CTI */ - andcc %o0, 4, %g0 /* IEU1 Group */ - be,pn %xcc, 282f + 4 /* CTI Group */ -5: RMOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5) - RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5) - RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5) - RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5) - subcc %g6, 128, %g6 /* IEU1 Group */ - sub %o1, 128, %o1 /* IEU0 */ - bne,pt %xcc, 5b /* CTI */ - sub %o0, 128, %o0 /* IEU0 Group */ -235: andcc %o2, 0x70, %g6 /* IEU1 Group */ -41: be,pn %xcc, 280f /* CTI */ - andcc %o2, 8, %g0 /* IEU1 Group */ - /* Clk1 8-( */ - /* Clk2 8-( */ - /* Clk3 8-( */ - /* Clk4 8-( */ -279: rd %pc, %o5 /* PDU Group */ - sll %g6, 1, %g5 /* IEU0 Group */ - sub %o1, %g6, %o1 /* IEU1 */ - sub %o5, %g5, %o5 /* IEU0 Group */ - jmpl %o5 + %lo(280f - 279b), %g0 /* CTI Group brk forced*/ - sub %o0, %g6, %o0 /* IEU0 Group */ - RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5) -280: be,pt %xcc, 281f /* CTI */ - andcc %o2, 4, %g0 /* IEU1 */ - ldx [%o1 - 8], %g2 /* Load Group */ - sub %o0, 8, %o0 /* IEU0 */ - stw %g2, [%o0 + 4] /* Store Group */ - sub %o1, 8, %o1 /* IEU1 */ - srlx %g2, 32, %g2 /* IEU0 Group */ - stw %g2, [%o0] /* Store */ -281: be,pt %xcc, 1f /* CTI */ - andcc %o2, 2, %g0 /* IEU1 Group */ - lduw [%o1 - 4], %g2 /* Load Group */ - sub %o1, 4, %o1 /* IEU0 */ - stw %g2, [%o0 - 4] /* Store Group */ - sub %o0, 4, %o0 /* IEU0 */ -1: be,pt %xcc, 1f /* CTI */ - andcc %o2, 1, %g0 /* IEU1 Group */ - lduh [%o1 - 2], %g2 /* Load Group */ - sub %o1, 2, %o1 /* IEU0 */ - sth %g2, [%o0 - 2] /* Store Group */ - sub %o0, 2, %o0 /* IEU0 */ -1: be,pt %xcc, 211f /* CTI */ - nop /* IEU1 */ - ldub [%o1 - 1], %g2 /* Load Group */ - stb %g2, [%o0 - 1] /* Store Group + bubble */ -211: retl - mov %g4, %o0 - -282: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5) - RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5) - subcc %g6, 128, %g6 /* IEU1 Group */ - sub %o1, 128, %o1 /* IEU0 */ - bne,pt %xcc, 282b /* CTI */ - sub %o0, 128, %o0 /* IEU0 Group */ - andcc %o2, 0x70, %g6 /* IEU1 */ - be,pn %xcc, 284f /* CTI */ - andcc %o2, 8, %g0 /* IEU1 Group */ - /* Clk1 8-( */ - /* Clk2 8-( */ - /* Clk3 8-( */ - /* Clk4 8-( */ -283: rd %pc, %o5 /* PDU Group */ - sub %o1, %g6, %o1 /* IEU0 Group */ - sub %o5, %g6, %o5 /* IEU1 */ - jmpl %o5 + %lo(284f - 283b), %g0 /* CTI Group brk forced*/ - sub %o0, %g6, %o0 /* IEU0 Group */ - RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3) -284: be,pt %xcc, 285f /* CTI Group */ - andcc %o2, 4, %g0 /* IEU1 */ - ldx [%o1 - 8], %g2 /* Load Group */ - sub %o0, 8, %o0 /* IEU0 */ - sub %o1, 8, %o1 /* IEU0 Group */ - stx %g2, [%o0] /* Store */ -285: be,pt %xcc, 1f /* CTI */ - andcc %o2, 2, %g0 /* IEU1 Group */ - lduw [%o1 - 4], %g2 /* Load Group */ - sub %o0, 4, %o0 /* IEU0 */ - sub %o1, 4, %o1 /* IEU0 Group */ - stw %g2, [%o0] /* Store */ -1: be,pt %xcc, 1f /* CTI */ - andcc %o2, 1, %g0 /* IEU1 Group */ - lduh [%o1 - 2], %g2 /* Load Group */ - sub %o0, 2, %o0 /* IEU0 */ - sub %o1, 2, %o1 /* IEU0 Group */ - sth %g2, [%o0] /* Store */ -1: be,pt %xcc, 1f /* CTI */ - nop /* IEU0 Group */ - ldub [%o1 - 1], %g2 /* Load Group */ - stb %g2, [%o0 - 1] /* Store Group + bubble */ -1: retl - mov %g4, %o0 - -232: brz,pt %g2, 2f /* CTI Group */ - sub %o2, %g2, %o2 /* IEU0 Group */ -1: ldub [%o1 - 1], %g5 /* Load Group */ - sub %o1, 1, %o1 /* IEU0 */ - sub %o0, 1, %o0 /* IEU1 */ - subcc %g2, 1, %g2 /* IEU1 Group */ - bne,pt %xcc, 1b /* CTI */ - stb %g5, [%o0] /* Store */ -2: andn %o2, 7, %g5 /* IEU0 Group */ - and %o2, 7, %o2 /* IEU1 */ - fmovd %f0, %f2 /* FPU */ - alignaddr %o1, %g0, %g1 /* GRU Group */ - ldd [%g1], %f4 /* Load Group */ -1: ldd [%g1 - 8], %f6 /* Load Group */ - sub %g1, 8, %g1 /* IEU0 Group */ - subcc %g5, 8, %g5 /* IEU1 */ - faligndata %f6, %f4, %f0 /* GRU Group */ - std %f0, [%o0 - 8] /* Store */ - sub %o1, 8, %o1 /* IEU0 Group */ - be,pn %xcc, 233f /* CTI */ - sub %o0, 8, %o0 /* IEU1 */ - ldd [%g1 - 8], %f4 /* Load Group */ - sub %g1, 8, %g1 /* IEU0 */ - subcc %g5, 8, %g5 /* IEU1 */ - faligndata %f4, %f6, %f0 /* GRU Group */ - std %f0, [%o0 - 8] /* Store */ - sub %o1, 8, %o1 /* IEU0 */ - bne,pn %xcc, 1b /* CTI Group */ - sub %o0, 8, %o0 /* IEU0 */ -233: brz,pn %o2, 234f /* CTI Group */ - nop /* IEU0 */ -237: ldub [%o1 - 1], %g5 /* LOAD */ - sub %o1, 1, %o1 /* IEU0 */ - sub %o0, 1, %o0 /* IEU1 */ - subcc %o2, 1, %o2 /* IEU1 */ - bne,pt %xcc, 237b /* CTI */ - stb %g5, [%o0] /* Store Group */ -234: wr %g0, FPRS_FEF, %fprs - retl - mov %g4, %o0 -END(memmove) - -#ifdef USE_BPR -weak_alias (memcpy, __align_cpy_1) -weak_alias (memcpy, __align_cpy_2) -#endif libc_hidden_builtin_def (memcpy) -libc_hidden_builtin_def (memmove) diff --git a/sysdeps/sparc/sparc64/memmove.c b/sysdeps/sparc/sparc64/memmove.c deleted file mode 100644 index a8d2d49948..0000000000 --- a/sysdeps/sparc/sparc64/memmove.c +++ /dev/null @@ -1 +0,0 @@ -/* memmove is in memcpy.S */ diff --git a/sysdeps/sparc/sparc64/multiarch/Makefile b/sysdeps/sparc/sparc64/multiarch/Makefile new file mode 100644 index 0000000000..4d45042a95 --- /dev/null +++ b/sysdeps/sparc/sparc64/multiarch/Makefile @@ -0,0 +1,4 @@ +ifeq ($(subdir),string) +sysdep_routines += memcpy-ultra3 memcpy-niagara1 memcpy-niagara2 \ + memset-niagara1 +endif diff --git a/sysdeps/sparc/sparc64/sparcv9v/memcpy.S b/sysdeps/sparc/sparc64/multiarch/memcpy-niagara1.S index ad2b0f742c..6a78295e81 100644 --- a/sysdeps/sparc/sparc64/sparcv9v/memcpy.S +++ b/sysdeps/sparc/sparc64/multiarch/memcpy-niagara1.S @@ -36,34 +36,19 @@ #define XCC xcc #endif +#if !defined NOT_IN_libc + .register %g2,#scratch .register %g3,#scratch .register %g6,#scratch .text - .align 32 - -ENTRY(bcopy) - sub %o1, %o0, %o4 - mov %o0, %g4 - cmp %o4, %o2 - mov %o1, %o0 - bgeu,pt %XCC, 100f - mov %g4, %o1 -#ifndef USE_BPR - srl %o2, 0, %o2 -#endif - brnz,pn %o2, 220f - add %o0, %o2, %o0 - retl - nop -END(bcopy) .align 32 -ENTRY(memcpy) -#ifndef USE_BPR +ENTRY(__memcpy_niagara1) +# ifndef USE_BPR srl %o2, 0, %o2 -#endif +# endif 100: /* %o0=dst, %o1=src, %o2=len */ mov %o0, %g5 cmp %o2, 0 @@ -352,245 +337,6 @@ ENTRY(memcpy) retl mov %g5, %o0 -END(memcpy) - -#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldx [%src - offset - 0x20], %t0; \ - ldx [%src - offset - 0x18], %t1; \ - ldx [%src - offset - 0x10], %t2; \ - ldx [%src - offset - 0x08], %t3; \ - stw %t0, [%dst - offset - 0x1c]; \ - srlx %t0, 32, %t0; \ - stw %t0, [%dst - offset - 0x20]; \ - stw %t1, [%dst - offset - 0x14]; \ - srlx %t1, 32, %t1; \ - stw %t1, [%dst - offset - 0x18]; \ - stw %t2, [%dst - offset - 0x0c]; \ - srlx %t2, 32, %t2; \ - stw %t2, [%dst - offset - 0x10]; \ - stw %t3, [%dst - offset - 0x04]; \ - srlx %t3, 32, %t3; \ - stw %t3, [%dst - offset - 0x08]; - -#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldx [%src - offset - 0x20], %t0; \ - ldx [%src - offset - 0x18], %t1; \ - ldx [%src - offset - 0x10], %t2; \ - ldx [%src - offset - 0x08], %t3; \ - stx %t0, [%dst - offset - 0x20]; \ - stx %t1, [%dst - offset - 0x18]; \ - stx %t2, [%dst - offset - 0x10]; \ - stx %t3, [%dst - offset - 0x08]; \ - ldx [%src - offset - 0x40], %t0; \ - ldx [%src - offset - 0x38], %t1; \ - ldx [%src - offset - 0x30], %t2; \ - ldx [%src - offset - 0x28], %t3; \ - stx %t0, [%dst - offset - 0x40]; \ - stx %t1, [%dst - offset - 0x38]; \ - stx %t2, [%dst - offset - 0x30]; \ - stx %t3, [%dst - offset - 0x28]; - -#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldx [%src + offset + 0x00], %t0; \ - ldx [%src + offset + 0x08], %t1; \ - stw %t0, [%dst + offset + 0x04]; \ - srlx %t0, 32, %t2; \ - stw %t2, [%dst + offset + 0x00]; \ - stw %t1, [%dst + offset + 0x0c]; \ - srlx %t1, 32, %t3; \ - stw %t3, [%dst + offset + 0x08]; - -#define RMOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \ - ldx [%src + offset + 0x00], %t0; \ - ldx [%src + offset + 0x08], %t1; \ - stx %t0, [%dst + offset + 0x00]; \ - stx %t1, [%dst + offset + 0x08]; +END(__memcpy_niagara1) - .align 32 -228: andcc %o2, 1, %g0 - be,pt %icc, 2f+4 -1: ldub [%o1 - 1], %o5 - sub %o1, 1, %o1 - sub %o0, 1, %o0 - subcc %o2, 1, %o2 - be,pn %xcc, 229f - stb %o5, [%o0] -2: ldub [%o1 - 1], %o5 - sub %o0, 2, %o0 - ldub [%o1 - 2], %g5 - sub %o1, 2, %o1 - subcc %o2, 2, %o2 - stb %o5, [%o0 + 1] - bne,pt %xcc, 2b - stb %g5, [%o0] -229: retl - mov %g4, %o0 -out: retl - mov %g5, %o0 - - .align 32 -ENTRY(memmove) - mov %o0, %g5 -#ifndef USE_BPR - srl %o2, 0, %o2 -#endif - brz,pn %o2, out - sub %o0, %o1, %o4 - cmp %o4, %o2 - bgeu,pt %XCC, 218b - mov %o0, %g4 - add %o0, %o2, %o0 -220: add %o1, %o2, %o1 - cmp %o2, 15 - bleu,pn %xcc, 228b - andcc %o0, 7, %g2 - sub %o0, %o1, %g5 - andcc %g5, 3, %o5 - bne,pn %xcc, 232f - andcc %o1, 3, %g0 - be,a,pt %xcc, 236f - andcc %o1, 4, %g0 - andcc %o1, 1, %g0 - be,pn %xcc, 4f - andcc %o1, 2, %g0 - ldub [%o1 - 1], %g2 - sub %o1, 1, %o1 - sub %o0, 1, %o0 - sub %o2, 1, %o2 - be,pn %xcc, 5f - stb %g2, [%o0] -4: lduh [%o1 - 2], %g2 - sub %o1, 2, %o1 - sub %o0, 2, %o0 - sub %o2, 2, %o2 - sth %g2, [%o0] -5: andcc %o1, 4, %g0 -236: be,a,pn %xcc, 2f - andcc %o2, -128, %g6 - lduw [%o1 - 4], %g5 - sub %o1, 4, %o1 - sub %o0, 4, %o0 - sub %o2, 4, %o2 - stw %g5, [%o0] - andcc %o2, -128, %g6 -2: be,pn %xcc, 235f - andcc %o0, 4, %g0 - be,pn %xcc, 282f + 4 -5: RMOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5) - RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5) - RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5) - RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5) - subcc %g6, 128, %g6 - sub %o1, 128, %o1 - bne,pt %xcc, 5b - sub %o0, 128, %o0 -235: andcc %o2, 0x70, %g6 -41: be,pn %xcc, 280f - andcc %o2, 8, %g0 - -279: rd %pc, %o5 - sll %g6, 1, %g5 - sub %o1, %g6, %o1 - sub %o5, %g5, %o5 - jmpl %o5 + %lo(280f - 279b), %g0 - sub %o0, %g6, %o0 - RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5) -280: be,pt %xcc, 281f - andcc %o2, 4, %g0 - ldx [%o1 - 8], %g2 - sub %o0, 8, %o0 - stw %g2, [%o0 + 4] - sub %o1, 8, %o1 - srlx %g2, 32, %g2 - stw %g2, [%o0] -281: be,pt %xcc, 1f - andcc %o2, 2, %g0 - lduw [%o1 - 4], %g2 - sub %o1, 4, %o1 - stw %g2, [%o0 - 4] - sub %o0, 4, %o0 -1: be,pt %xcc, 1f - andcc %o2, 1, %g0 - lduh [%o1 - 2], %g2 - sub %o1, 2, %o1 - sth %g2, [%o0 - 2] - sub %o0, 2, %o0 -1: be,pt %xcc, 211f - nop - ldub [%o1 - 1], %g2 - stb %g2, [%o0 - 1] -211: retl - mov %g4, %o0 - -282: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5) - RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5) - subcc %g6, 128, %g6 - sub %o1, 128, %o1 - bne,pt %xcc, 282b - sub %o0, 128, %o0 - andcc %o2, 0x70, %g6 - be,pn %xcc, 284f - andcc %o2, 8, %g0 - -283: rd %pc, %o5 - sub %o1, %g6, %o1 - sub %o5, %g6, %o5 - jmpl %o5 + %lo(284f - 283b), %g0 - sub %o0, %g6, %o0 - RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3) -284: be,pt %xcc, 285f - andcc %o2, 4, %g0 - ldx [%o1 - 8], %g2 - sub %o0, 8, %o0 - sub %o1, 8, %o1 - stx %g2, [%o0] -285: be,pt %xcc, 1f - andcc %o2, 2, %g0 - lduw [%o1 - 4], %g2 - sub %o0, 4, %o0 - sub %o1, 4, %o1 - stw %g2, [%o0] -1: be,pt %xcc, 1f - andcc %o2, 1, %g0 - lduh [%o1 - 2], %g2 - sub %o0, 2, %o0 - sub %o1, 2, %o1 - sth %g2, [%o0] -1: be,pt %xcc, 1f - nop - ldub [%o1 - 1], %g2 - stb %g2, [%o0 - 1] -1: retl - mov %g4, %o0 - -232: ldub [%o1 - 1], %g5 - sub %o1, 1, %o1 - sub %o0, 1, %o0 - subcc %o2, 1, %o2 - bne,pt %xcc, 232b - stb %g5, [%o0] -234: retl - mov %g4, %o0 -END(memmove) - -#ifdef USE_BPR -weak_alias (memcpy, __align_cpy_1) -weak_alias (memcpy, __align_cpy_2) -weak_alias (memcpy, __align_cpy_4) -weak_alias (memcpy, __align_cpy_8) -weak_alias (memcpy, __align_cpy_16) #endif -libc_hidden_builtin_def (memcpy) -libc_hidden_builtin_def (memmove) diff --git a/sysdeps/sparc/sparc64/sparcv9v2/memcpy.S b/sysdeps/sparc/sparc64/multiarch/memcpy-niagara2.S index b261f461a4..35f6989aca 100644 --- a/sysdeps/sparc/sparc64/sparcv9v2/memcpy.S +++ b/sysdeps/sparc/sparc64/multiarch/memcpy-niagara2.S @@ -138,34 +138,19 @@ LOAD(ldd, base + 0x28, %x5); \ LOAD(ldd, base + 0x30, %x6); +#if !defined NOT_IN_libc + .register %g2,#scratch .register %g3,#scratch .register %g6,#scratch .text - .align 32 - -ENTRY(bcopy) - sub %o1, %o0, %o4 - mov %o0, %g4 - cmp %o4, %o2 - mov %o1, %o0 - bgeu,pt %XCC, 100f - mov %g4, %o1 -#ifndef USE_BPR - srl %o2, 0, %o2 -#endif - brnz,pn %o2, 220f - add %o0, %o2, %o0 - retl - nop -END(bcopy) .align 32 -ENTRY(memcpy) -#ifndef USE_BPR +ENTRY(__memcpy_niagara2) +# ifndef USE_BPR srl %o2, 0, %o2 -#endif +# endif 100: /* %o0=dst, %o1=src, %o2=len */ mov %o0, %g5 cmp %o2, 0 @@ -502,245 +487,6 @@ ENTRY(memcpy) retl mov %g5, %o0 -END(memcpy) - -#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldx [%src - offset - 0x20], %t0; \ - ldx [%src - offset - 0x18], %t1; \ - ldx [%src - offset - 0x10], %t2; \ - ldx [%src - offset - 0x08], %t3; \ - stw %t0, [%dst - offset - 0x1c]; \ - srlx %t0, 32, %t0; \ - stw %t0, [%dst - offset - 0x20]; \ - stw %t1, [%dst - offset - 0x14]; \ - srlx %t1, 32, %t1; \ - stw %t1, [%dst - offset - 0x18]; \ - stw %t2, [%dst - offset - 0x0c]; \ - srlx %t2, 32, %t2; \ - stw %t2, [%dst - offset - 0x10]; \ - stw %t3, [%dst - offset - 0x04]; \ - srlx %t3, 32, %t3; \ - stw %t3, [%dst - offset - 0x08]; - -#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldx [%src - offset - 0x20], %t0; \ - ldx [%src - offset - 0x18], %t1; \ - ldx [%src - offset - 0x10], %t2; \ - ldx [%src - offset - 0x08], %t3; \ - stx %t0, [%dst - offset - 0x20]; \ - stx %t1, [%dst - offset - 0x18]; \ - stx %t2, [%dst - offset - 0x10]; \ - stx %t3, [%dst - offset - 0x08]; \ - ldx [%src - offset - 0x40], %t0; \ - ldx [%src - offset - 0x38], %t1; \ - ldx [%src - offset - 0x30], %t2; \ - ldx [%src - offset - 0x28], %t3; \ - stx %t0, [%dst - offset - 0x40]; \ - stx %t1, [%dst - offset - 0x38]; \ - stx %t2, [%dst - offset - 0x30]; \ - stx %t3, [%dst - offset - 0x28]; - -#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldx [%src + offset + 0x00], %t0; \ - ldx [%src + offset + 0x08], %t1; \ - stw %t0, [%dst + offset + 0x04]; \ - srlx %t0, 32, %t2; \ - stw %t2, [%dst + offset + 0x00]; \ - stw %t1, [%dst + offset + 0x0c]; \ - srlx %t1, 32, %t3; \ - stw %t3, [%dst + offset + 0x08]; - -#define RMOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \ - ldx [%src + offset + 0x00], %t0; \ - ldx [%src + offset + 0x08], %t1; \ - stx %t0, [%dst + offset + 0x00]; \ - stx %t1, [%dst + offset + 0x08]; +END(__memcpy_niagara2) - .align 32 -228: andcc %o2, 1, %g0 - be,pt %icc, 2f+4 -1: ldub [%o1 - 1], %o5 - sub %o1, 1, %o1 - sub %o0, 1, %o0 - subcc %o2, 1, %o2 - be,pn %XCC, 229f - stb %o5, [%o0] -2: ldub [%o1 - 1], %o5 - sub %o0, 2, %o0 - ldub [%o1 - 2], %g5 - sub %o1, 2, %o1 - subcc %o2, 2, %o2 - stb %o5, [%o0 + 1] - bne,pt %XCC, 2b - stb %g5, [%o0] -229: retl - mov %g4, %o0 -out: retl - mov %g5, %o0 - - .align 32 -ENTRY(memmove) - mov %o0, %g5 -#ifndef USE_BPR - srl %o2, 0, %o2 -#endif - brz,pn %o2, out - sub %o0, %o1, %o4 - cmp %o4, %o2 - bgeu,pt %XCC, 218b - mov %o0, %g4 - add %o0, %o2, %o0 -220: add %o1, %o2, %o1 - cmp %o2, 15 - bleu,pn %XCC, 228b - andcc %o0, 7, %g2 - sub %o0, %o1, %g5 - andcc %g5, 3, %o5 - bne,pn %XCC, 232f - andcc %o1, 3, %g0 - be,a,pt %XCC, 236f - andcc %o1, 4, %g0 - andcc %o1, 1, %g0 - be,pn %XCC, 4f - andcc %o1, 2, %g0 - ldub [%o1 - 1], %g2 - sub %o1, 1, %o1 - sub %o0, 1, %o0 - sub %o2, 1, %o2 - be,pn %XCC, 5f - stb %g2, [%o0] -4: lduh [%o1 - 2], %g2 - sub %o1, 2, %o1 - sub %o0, 2, %o0 - sub %o2, 2, %o2 - sth %g2, [%o0] -5: andcc %o1, 4, %g0 -236: be,a,pn %XCC, 2f - andcc %o2, -128, %g6 - lduw [%o1 - 4], %g5 - sub %o1, 4, %o1 - sub %o0, 4, %o0 - sub %o2, 4, %o2 - stw %g5, [%o0] - andcc %o2, -128, %g6 -2: be,pn %XCC, 235f - andcc %o0, 4, %g0 - be,pn %XCC, 282f + 4 -5: RMOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5) - RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5) - RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5) - RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5) - subcc %g6, 128, %g6 - sub %o1, 128, %o1 - bne,pt %XCC, 5b - sub %o0, 128, %o0 -235: andcc %o2, 0x70, %g6 -41: be,pn %XCC, 280f - andcc %o2, 8, %g0 - -279: rd %pc, %o5 - sll %g6, 1, %g5 - sub %o1, %g6, %o1 - sub %o5, %g5, %o5 - jmpl %o5 + %lo(280f - 279b), %g0 - sub %o0, %g6, %o0 - RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5) -280: be,pt %XCC, 281f - andcc %o2, 4, %g0 - ldx [%o1 - 8], %g2 - sub %o0, 8, %o0 - stw %g2, [%o0 + 4] - sub %o1, 8, %o1 - srlx %g2, 32, %g2 - stw %g2, [%o0] -281: be,pt %XCC, 1f - andcc %o2, 2, %g0 - lduw [%o1 - 4], %g2 - sub %o1, 4, %o1 - stw %g2, [%o0 - 4] - sub %o0, 4, %o0 -1: be,pt %XCC, 1f - andcc %o2, 1, %g0 - lduh [%o1 - 2], %g2 - sub %o1, 2, %o1 - sth %g2, [%o0 - 2] - sub %o0, 2, %o0 -1: be,pt %XCC, 211f - nop - ldub [%o1 - 1], %g2 - stb %g2, [%o0 - 1] -211: retl - mov %g4, %o0 - -282: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5) - RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5) - subcc %g6, 128, %g6 - sub %o1, 128, %o1 - bne,pt %XCC, 282b - sub %o0, 128, %o0 - andcc %o2, 0x70, %g6 - be,pn %XCC, 284f - andcc %o2, 8, %g0 - -283: rd %pc, %o5 - sub %o1, %g6, %o1 - sub %o5, %g6, %o5 - jmpl %o5 + %lo(284f - 283b), %g0 - sub %o0, %g6, %o0 - RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3) -284: be,pt %XCC, 285f - andcc %o2, 4, %g0 - ldx [%o1 - 8], %g2 - sub %o0, 8, %o0 - sub %o1, 8, %o1 - stx %g2, [%o0] -285: be,pt %XCC, 1f - andcc %o2, 2, %g0 - lduw [%o1 - 4], %g2 - sub %o0, 4, %o0 - sub %o1, 4, %o1 - stw %g2, [%o0] -1: be,pt %XCC, 1f - andcc %o2, 1, %g0 - lduh [%o1 - 2], %g2 - sub %o0, 2, %o0 - sub %o1, 2, %o1 - sth %g2, [%o0] -1: be,pt %XCC, 1f - nop - ldub [%o1 - 1], %g2 - stb %g2, [%o0 - 1] -1: retl - mov %g4, %o0 - -232: ldub [%o1 - 1], %g5 - sub %o1, 1, %o1 - sub %o0, 1, %o0 - subcc %o2, 1, %o2 - bne,pt %XCC, 232b - stb %g5, [%o0] -234: retl - mov %g4, %o0 -END(memmove) - -#ifdef USE_BPR -weak_alias (memcpy, __align_cpy_1) -weak_alias (memcpy, __align_cpy_2) -weak_alias (memcpy, __align_cpy_4) -weak_alias (memcpy, __align_cpy_8) -weak_alias (memcpy, __align_cpy_16) #endif -libc_hidden_builtin_def (memcpy) -libc_hidden_builtin_def (memmove) diff --git a/sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S b/sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S new file mode 100644 index 0000000000..34ca089f93 --- /dev/null +++ b/sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S @@ -0,0 +1,320 @@ +/* Copy SIZE bytes from SRC to DEST. + For UltraSPARC-III. + Copyright (C) 2001, 2003 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by David S. Miller (davem@redhat.com) + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#include <sysdep.h> + +#define ASI_BLK_P 0xf0 +#define FPRS_FEF 0x04 +#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs +#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs + +#ifndef XCC +#define USE_BPR +#define XCC xcc +#endif + +#if !defined NOT_IN_libc + + .register %g2,#scratch + .register %g3,#scratch + .register %g6,#scratch + + .text + + /* Special/non-trivial issues of this code: + * + * 1) %o5 is preserved from VISEntryHalf to VISExitHalf + * 2) Only low 32 FPU registers are used so that only the + * lower half of the FPU register set is dirtied by this + * code. This is especially important in the kernel. + * 3) This code never prefetches cachelines past the end + * of the source buffer. + * + * The cheetah's flexible spine, oversized liver, enlarged heart, + * slender muscular body, and claws make it the swiftest hunter + * in Africa and the fastest animal on land. Can reach speeds + * of up to 2.4GB per second. + */ + .align 32 +ENTRY(__memcpy_ultra3) + +100: /* %o0=dst, %o1=src, %o2=len */ + mov %o0, %g5 + cmp %o2, 0 + be,pn %XCC, out +218: or %o0, %o1, %o3 + cmp %o2, 16 + bleu,a,pn %XCC, small_copy + or %o3, %o2, %o3 + + cmp %o2, 256 + blu,pt %XCC, medium_copy + andcc %o3, 0x7, %g0 + + ba,pt %xcc, enter + andcc %o0, 0x3f, %g2 + + /* Here len >= 256 and condition codes reflect execution + * of "andcc %o0, 0x7, %g2", done by caller. + */ + .align 64 +enter: + /* Is 'dst' already aligned on an 64-byte boundary? */ + be,pt %XCC, 2f + + /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number + * of bytes to copy to make 'dst' 64-byte aligned. We pre- + * subtract this from 'len'. + */ + sub %g2, 0x40, %g2 + sub %g0, %g2, %g2 + sub %o2, %g2, %o2 + + /* Copy %g2 bytes from src to dst, one byte at a time. */ +1: ldub [%o1 + 0x00], %o3 + add %o1, 0x1, %o1 + add %o0, 0x1, %o0 + subcc %g2, 0x1, %g2 + + bg,pt %XCC, 1b + stb %o3, [%o0 + -1] + +2: VISEntryHalf + and %o1, 0x7, %g1 + ba,pt %xcc, begin + alignaddr %o1, %g0, %o1 + + .align 64 +begin: + prefetch [%o1 + 0x000], #one_read + prefetch [%o1 + 0x040], #one_read + andn %o2, (0x40 - 1), %o4 + prefetch [%o1 + 0x080], #one_read + prefetch [%o1 + 0x0c0], #one_read + ldd [%o1 + 0x000], %f0 + prefetch [%o1 + 0x100], #one_read + ldd [%o1 + 0x008], %f2 + prefetch [%o1 + 0x140], #one_read + ldd [%o1 + 0x010], %f4 + prefetch [%o1 + 0x180], #one_read + faligndata %f0, %f2, %f16 + ldd [%o1 + 0x018], %f6 + faligndata %f2, %f4, %f18 + ldd [%o1 + 0x020], %f8 + faligndata %f4, %f6, %f20 + ldd [%o1 + 0x028], %f10 + faligndata %f6, %f8, %f22 + + ldd [%o1 + 0x030], %f12 + faligndata %f8, %f10, %f24 + ldd [%o1 + 0x038], %f14 + faligndata %f10, %f12, %f26 + ldd [%o1 + 0x040], %f0 + + sub %o4, 0x80, %o4 + add %o1, 0x40, %o1 + ba,pt %xcc, loop + srl %o4, 6, %o3 + + .align 64 +loop: + ldd [%o1 + 0x008], %f2 + faligndata %f12, %f14, %f28 + ldd [%o1 + 0x010], %f4 + faligndata %f14, %f0, %f30 + stda %f16, [%o0] ASI_BLK_P + ldd [%o1 + 0x018], %f6 + faligndata %f0, %f2, %f16 + + ldd [%o1 + 0x020], %f8 + faligndata %f2, %f4, %f18 + ldd [%o1 + 0x028], %f10 + faligndata %f4, %f6, %f20 + ldd [%o1 + 0x030], %f12 + faligndata %f6, %f8, %f22 + ldd [%o1 + 0x038], %f14 + faligndata %f8, %f10, %f24 + + ldd [%o1 + 0x040], %f0 + prefetch [%o1 + 0x180], #one_read + faligndata %f10, %f12, %f26 + subcc %o3, 0x01, %o3 + add %o1, 0x40, %o1 + bg,pt %XCC, loop + add %o0, 0x40, %o0 + + /* Finally we copy the last full 64-byte block. */ +loopfini: + ldd [%o1 + 0x008], %f2 + faligndata %f12, %f14, %f28 + ldd [%o1 + 0x010], %f4 + faligndata %f14, %f0, %f30 + stda %f16, [%o0] ASI_BLK_P + ldd [%o1 + 0x018], %f6 + faligndata %f0, %f2, %f16 + ldd [%o1 + 0x020], %f8 + faligndata %f2, %f4, %f18 + ldd [%o1 + 0x028], %f10 + faligndata %f4, %f6, %f20 + ldd [%o1 + 0x030], %f12 + faligndata %f6, %f8, %f22 + ldd [%o1 + 0x038], %f14 + faligndata %f8, %f10, %f24 + cmp %g1, 0 + be,pt %XCC, 1f + add %o0, 0x40, %o0 + ldd [%o1 + 0x040], %f0 +1: faligndata %f10, %f12, %f26 + faligndata %f12, %f14, %f28 + faligndata %f14, %f0, %f30 + stda %f16, [%o0] ASI_BLK_P + add %o0, 0x40, %o0 + add %o1, 0x40, %o1 + membar #Sync + + /* Now we copy the (len modulo 64) bytes at the end. + * Note how we borrow the %f0 loaded above. + * + * Also notice how this code is careful not to perform a + * load past the end of the src buffer. + */ +loopend: + and %o2, 0x3f, %o2 + andcc %o2, 0x38, %g2 + be,pn %XCC, endcruft + subcc %g2, 0x8, %g2 + be,pn %XCC, endcruft + cmp %g1, 0 + + be,a,pt %XCC, 1f + ldd [%o1 + 0x00], %f0 + +1: ldd [%o1 + 0x08], %f2 + add %o1, 0x8, %o1 + sub %o2, 0x8, %o2 + subcc %g2, 0x8, %g2 + faligndata %f0, %f2, %f8 + std %f8, [%o0 + 0x00] + be,pn %XCC, endcruft + add %o0, 0x8, %o0 + ldd [%o1 + 0x08], %f0 + add %o1, 0x8, %o1 + sub %o2, 0x8, %o2 + subcc %g2, 0x8, %g2 + faligndata %f2, %f0, %f8 + std %f8, [%o0 + 0x00] + bne,pn %XCC, 1b + add %o0, 0x8, %o0 + + /* If anything is left, we copy it one byte at a time. + * Note that %g1 is (src & 0x3) saved above before the + * alignaddr was performed. + */ +endcruft: + cmp %o2, 0 + add %o1, %g1, %o1 + VISExitHalf + be,pn %XCC, out + sub %o0, %o1, %o3 + + andcc %g1, 0x7, %g0 + bne,pn %icc, small_copy_unaligned + andcc %o2, 0x8, %g0 + be,pt %icc, 1f + nop + ldx [%o1], %o5 + stx %o5, [%o1 + %o3] + add %o1, 0x8, %o1 + +1: andcc %o2, 0x4, %g0 + be,pt %icc, 1f + nop + lduw [%o1], %o5 + stw %o5, [%o1 + %o3] + add %o1, 0x4, %o1 + +1: andcc %o2, 0x2, %g0 + be,pt %icc, 1f + nop + lduh [%o1], %o5 + sth %o5, [%o1 + %o3] + add %o1, 0x2, %o1 + +1: andcc %o2, 0x1, %g0 + be,pt %icc, out + nop + ldub [%o1], %o5 + ba,pt %xcc, out + stb %o5, [%o1 + %o3] + +medium_copy: /* 16 < len <= 64 */ + bne,pn %XCC, small_copy_unaligned + sub %o0, %o1, %o3 + +medium_copy_aligned: + andn %o2, 0x7, %o4 + and %o2, 0x7, %o2 +1: subcc %o4, 0x8, %o4 + ldx [%o1], %o5 + stx %o5, [%o1 + %o3] + bgu,pt %XCC, 1b + add %o1, 0x8, %o1 + andcc %o2, 0x4, %g0 + be,pt %XCC, 1f + nop + sub %o2, 0x4, %o2 + lduw [%o1], %o5 + stw %o5, [%o1 + %o3] + add %o1, 0x4, %o1 +1: cmp %o2, 0 + be,pt %XCC, out + nop + ba,pt %xcc, small_copy_unaligned + nop + +small_copy: /* 0 < len <= 16 */ + andcc %o3, 0x3, %g0 + bne,pn %XCC, small_copy_unaligned + sub %o0, %o1, %o3 + +small_copy_aligned: + subcc %o2, 4, %o2 + lduw [%o1], %g1 + stw %g1, [%o1 + %o3] + bgu,pt %XCC, small_copy_aligned + add %o1, 4, %o1 + +out: retl + mov %g5, %o0 + + .align 32 +small_copy_unaligned: + subcc %o2, 1, %o2 + ldub [%o1], %g1 + stb %g1, [%o1 + %o3] + bgu,pt %XCC, small_copy_unaligned + add %o1, 1, %o1 + retl + mov %g5, %o0 + +END(__memcpy_ultra3) + +#endif
\ No newline at end of file diff --git a/sysdeps/sparc/sparc64/multiarch/memcpy.S b/sysdeps/sparc/sparc64/multiarch/memcpy.S new file mode 100644 index 0000000000..a708de10e2 --- /dev/null +++ b/sysdeps/sparc/sparc64/multiarch/memcpy.S @@ -0,0 +1,107 @@ +/* Multiple versions of memcpy + Copyright (C) 2010 Free Software Foundation, Inc. + Contributed by David S. Miller (davem@davemloft.net) + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#include <sysdep.h> +#include <rtld-global-offsets.h> + +#if !defined NOT_IN_libc + .text +ENTRY(memcpy) + .type memcpy, @gnu_indirect_function +# ifdef SHARED + mov %o7, %o5 + sethi %hi(_GLOBAL_OFFSET_TABLE_-4), %o3 + call 1f + or %o3, %lo(_GLOBAL_OFFSET_TABLE_+4), %o3 +1: add %o7, %o3, %o3 + mov %o5, %o7 + sethi %hi(_rtld_global_ro), %o2 + or %o2, %lo(_rtld_global_ro), %o2 +# ifdef __arch64__ + ldx [%o3 + %o2], %o2 + ldx [%o2 + RTLD_GLOBAL_RO_DL_HWCAP_OFFSET], %o2 +# else + ld [%o3 + %o2], %o2 + ld [%o2 + RTLD_GLOBAL_RO_DL_HWCAP_OFFSET + 4], %o2 +# endif +# else + set _dl_hwcap, %o3 +# ifdef __arch64__ + ldx [%o3], %o2 +# else + ld [%o3 + 4], %o2 +# endif +# endif + andcc %o2, 0x80, %g0 ! HWCAP_SPARC_N2 + be 1f + andcc %o2, 0x40, %g0 ! HWCAP_SPARC_BLKINIT +# ifdef SHARED + sethi %gdop_hix22(__memcpy_niagara2), %o1 + xor %o1, %gdop_lox10(__memcpy_niagara2), %o1 +# else + set __memcpy_niagara2, %o1 +# endif + ba 10f + nop +1: be 1f + andcc %o2, 0x20, %g0 ! HWCAP_SPARC_ULTRA3 +# ifdef SHARED + sethi %gdop_hix22(__memcpy_niagara1), %o1 + xor %o1, %gdop_lox10(__memcpy_niagara1), %o1 +# else + set __memcpy_niagara1, %o1 +# endif + ba 10f + nop +1: be 9f + nop +# ifdef SHARED + sethi %gdop_hix22(__memcpy_ultra3), %o1 + xor %o1, %gdop_lox10(__memcpy_ultra3), %o1 +# else + set __memcpy_ultra3, %o1 +# endif + ba 10f + nop +9: +# ifdef SHARED + sethi %gdop_hix22(__memcpy_ultra1), %o1 + xor %o1, %gdop_lox10(__memcpy_ultra1), %o1 +# else + set __memcpy_ultra1, %o1 +# endif +10: +# ifdef SHARED + add %o3, %o1, %o1 +# endif + retl + mov %o1, %o0 +END(memcpy) + +# undef libc_hidden_builtin_def +/* IFUNC doesn't work with the hidden functions in a shared library. */ +# define libc_hidden_builtin_def(name) \ + .globl __GI_memcpy; __GI_memcpy = __memcpy_ultra1 + +#define memcpy __memcpy_ultra1 + +#endif + +#include "../memcpy.S" diff --git a/sysdeps/sparc/sparc64/sparcv9v/memset.S b/sysdeps/sparc/sparc64/multiarch/memset-niagara1.S index 64817b8871..20ea056216 100644 --- a/sysdeps/sparc/sparc64/sparcv9v/memset.S +++ b/sysdeps/sparc/sparc64/multiarch/memset-niagara1.S @@ -29,12 +29,14 @@ #define XCC xcc #endif +#if !defined NOT_IN_libc + .register %g2,#scratch .text .align 32 -ENTRY(memset) +ENTRY(__memset_niagara1) /* %o0=buf, %o1=pat, %o2=len */ and %o1, 0xff, %o3 mov %o2, %o1 @@ -45,14 +47,14 @@ ENTRY(memset) sllx %o2, 32, %g1 ba,pt %XCC, 1f or %g1, %o2, %o2 -END(memset) +END(__memset_niagara1) -ENTRY(__bzero) +ENTRY(__bzero_niagara1) clr %o2 1: -#ifndef USE_BRP +# ifndef USE_BRP srl %o1, 0, %o1 -#endif +# endif brz,pn %o1, 90f mov %o0, %o3 @@ -125,7 +127,6 @@ ENTRY(__bzero) 90: retl mov %o3, %o0 -END(__bzero) +END(__bzero_niagara1) -libc_hidden_builtin_def (memset) -weak_alias (__bzero, bzero) +#endif diff --git a/sysdeps/sparc/sparc64/multiarch/memset.S b/sysdeps/sparc/sparc64/multiarch/memset.S new file mode 100644 index 0000000000..23e513f18f --- /dev/null +++ b/sysdeps/sparc/sparc64/multiarch/memset.S @@ -0,0 +1,145 @@ +/* Multiple versions of memset and bzero + Copyright (C) 2010 Free Software Foundation, Inc. + Contributed by David S. Miller (davem@davemloft.net) + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#include <sysdep.h> +#include <rtld-global-offsets.h> + +#if !defined NOT_IN_libc + .text +ENTRY(memset) + .type memset, @gnu_indirect_function +# ifdef SHARED + mov %o7, %o5 + sethi %hi(_GLOBAL_OFFSET_TABLE_-4), %o3 + call 1f + or %o3, %lo(_GLOBAL_OFFSET_TABLE_+4), %o3 +1: add %o7, %o3, %o3 + mov %o5, %o7 + sethi %hi(_rtld_global_ro), %o2 + or %o2, %lo(_rtld_global_ro), %o2 +# ifdef __arch64__ + ldx [%o3 + %o2], %o2 + ldx [%o2 + RTLD_GLOBAL_RO_DL_HWCAP_OFFSET], %o2 +# else + ld [%o3 + %o2], %o2 + ld [%o2 + RTLD_GLOBAL_RO_DL_HWCAP_OFFSET + 4], %o2 +# endif +# else + set _dl_hwcap, %o3 +# ifdef __arch64__ + ldx [%o3], %o2 +# else + ld [%o3 + 4], %o2 +# endif +# endif + andcc %o2, 0x40, %g0 ! HWCAP_SPARC_BLKINIT + be 9f + nop +# ifdef SHARED + sethi %gdop_hix22(__memset_niagara1), %o1 + xor %o1, %gdop_lox10(__memset_niagara1), %o1 +# else + set __memset_niagara1, %o1 +# endif + ba 10f + nop +9: +# ifdef SHARED + sethi %gdop_hix22(__memset_ultra1), %o1 + xor %o1, %gdop_lox10(__memset_ultra1), %o1 +# else + set __memset_ultra1, %o1 +# endif +10: +# ifdef SHARED + add %o3, %o1, %o1 +# endif + retl + mov %o1, %o0 +END(memset) + +ENTRY(__bzero) + .type bzero, @gnu_indirect_function +# ifdef SHARED + mov %o7, %o5 + sethi %hi(_GLOBAL_OFFSET_TABLE_-4), %o3 + call 1f + or %o3, %lo(_GLOBAL_OFFSET_TABLE_+4), %o3 +1: add %o7, %o3, %o3 + mov %o5, %o7 + sethi %hi(_rtld_global_ro), %o2 + or %o2, %lo(_rtld_global_ro), %o2 +# ifdef __arch64__ + ldx [%o3 + %o2], %o2 + ldx [%o2 + RTLD_GLOBAL_RO_DL_HWCAP_OFFSET], %o2 +# else + ld [%o3 + %o2], %o2 + ld [%o2 + RTLD_GLOBAL_RO_DL_HWCAP_OFFSET + 4], %o2 +# endif +# else + set _dl_hwcap, %o3 +# ifdef __arch64__ + ldx [%o3], %o2 +# else + ld [%o3 + 4], %o2 +# endif +# endif + andcc %o2, 0x40, %g0 ! HWCAP_SPARC_BLKINIT + be 9f + nop +# ifdef SHARED + sethi %gdop_hix22(__bzero_niagara1), %o1 + xor %o1, %gdop_lox10(__bzero_niagara1), %o1 +# else + set __bzero_niagara1, %o1 +# endif + ba 10f + nop +9: +# ifdef SHARED + sethi %gdop_hix22(__memset_ultra1), %o1 + xor %o1, %gdop_lox10(__memset_ultra1), %o1 +# else + set __bzero_ultra1, %o1 +# endif +10: +# ifdef SHARED + add %o3, %o1, %o1 +# endif + retl + mov %o1, %o0 +END(__bzero) + +weak_alias (__bzero, bzero) + +# undef weak_alias +# define weak_alias(a, b) + +# undef libc_hidden_builtin_def +/* IFUNC doesn't work with the hidden functions in a shared library. */ +# define libc_hidden_builtin_def(name) \ + .globl __GI_memset; __GI_memset = __memset_ultra1 + +#define memset __memset_ultra1 +#define __bzero __bzero_ultra1 + +#endif + +#include "../memset.S" diff --git a/sysdeps/sparc/sparc64/sparcv9b/memcpy.S b/sysdeps/sparc/sparc64/sparcv9b/memcpy.S deleted file mode 100644 index 760d526630..0000000000 --- a/sysdeps/sparc/sparc64/sparcv9b/memcpy.S +++ /dev/null @@ -1,610 +0,0 @@ -/* Copy SIZE bytes from SRC to DEST. - For UltraSPARC-III. - Copyright (C) 2001, 2003 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by David S. Miller (davem@redhat.com) - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, write to the Free - Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - 02111-1307 USA. */ - -#include <sysdep.h> - -#define ASI_BLK_P 0xf0 -#define FPRS_FEF 0x04 -#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs -#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs - -#ifndef XCC -#define USE_BPR -#define XCC xcc -#endif - - .register %g2,#scratch - .register %g3,#scratch - .register %g6,#scratch - - .text - .align 32 - -ENTRY(bcopy) - sub %o1, %o0, %o4 - mov %o0, %g4 - cmp %o4, %o2 - mov %o1, %o0 - bgeu,pt %XCC, 100f - mov %g4, %o1 -#ifndef USE_BPR - srl %o2, 0, %o2 -#endif - brnz,pn %o2, 220f - add %o0, %o2, %o0 - retl - nop -END(bcopy) - - /* Special/non-trivial issues of this code: - * - * 1) %o5 is preserved from VISEntryHalf to VISExitHalf - * 2) Only low 32 FPU registers are used so that only the - * lower half of the FPU register set is dirtied by this - * code. This is especially important in the kernel. - * 3) This code never prefetches cachelines past the end - * of the source buffer. - * - * The cheetah's flexible spine, oversized liver, enlarged heart, - * slender muscular body, and claws make it the swiftest hunter - * in Africa and the fastest animal on land. Can reach speeds - * of up to 2.4GB per second. - */ - .align 32 -ENTRY(memcpy) - -100: /* %o0=dst, %o1=src, %o2=len */ - mov %o0, %g5 - cmp %o2, 0 - be,pn %XCC, out -218: or %o0, %o1, %o3 - cmp %o2, 16 - bleu,a,pn %XCC, small_copy - or %o3, %o2, %o3 - - cmp %o2, 256 - blu,pt %XCC, medium_copy - andcc %o3, 0x7, %g0 - - ba,pt %xcc, enter - andcc %o0, 0x3f, %g2 - - /* Here len >= 256 and condition codes reflect execution - * of "andcc %o0, 0x7, %g2", done by caller. - */ - .align 64 -enter: - /* Is 'dst' already aligned on an 64-byte boundary? */ - be,pt %XCC, 2f - - /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number - * of bytes to copy to make 'dst' 64-byte aligned. We pre- - * subtract this from 'len'. - */ - sub %g2, 0x40, %g2 - sub %g0, %g2, %g2 - sub %o2, %g2, %o2 - - /* Copy %g2 bytes from src to dst, one byte at a time. */ -1: ldub [%o1 + 0x00], %o3 - add %o1, 0x1, %o1 - add %o0, 0x1, %o0 - subcc %g2, 0x1, %g2 - - bg,pt %XCC, 1b - stb %o3, [%o0 + -1] - -2: VISEntryHalf - and %o1, 0x7, %g1 - ba,pt %xcc, begin - alignaddr %o1, %g0, %o1 - - .align 64 -begin: - prefetch [%o1 + 0x000], #one_read - prefetch [%o1 + 0x040], #one_read - andn %o2, (0x40 - 1), %o4 - prefetch [%o1 + 0x080], #one_read - prefetch [%o1 + 0x0c0], #one_read - ldd [%o1 + 0x000], %f0 - prefetch [%o1 + 0x100], #one_read - ldd [%o1 + 0x008], %f2 - prefetch [%o1 + 0x140], #one_read - ldd [%o1 + 0x010], %f4 - prefetch [%o1 + 0x180], #one_read - faligndata %f0, %f2, %f16 - ldd [%o1 + 0x018], %f6 - faligndata %f2, %f4, %f18 - ldd [%o1 + 0x020], %f8 - faligndata %f4, %f6, %f20 - ldd [%o1 + 0x028], %f10 - faligndata %f6, %f8, %f22 - - ldd [%o1 + 0x030], %f12 - faligndata %f8, %f10, %f24 - ldd [%o1 + 0x038], %f14 - faligndata %f10, %f12, %f26 - ldd [%o1 + 0x040], %f0 - - sub %o4, 0x80, %o4 - add %o1, 0x40, %o1 - ba,pt %xcc, loop - srl %o4, 6, %o3 - - .align 64 -loop: - ldd [%o1 + 0x008], %f2 - faligndata %f12, %f14, %f28 - ldd [%o1 + 0x010], %f4 - faligndata %f14, %f0, %f30 - stda %f16, [%o0] ASI_BLK_P - ldd [%o1 + 0x018], %f6 - faligndata %f0, %f2, %f16 - - ldd [%o1 + 0x020], %f8 - faligndata %f2, %f4, %f18 - ldd [%o1 + 0x028], %f10 - faligndata %f4, %f6, %f20 - ldd [%o1 + 0x030], %f12 - faligndata %f6, %f8, %f22 - ldd [%o1 + 0x038], %f14 - faligndata %f8, %f10, %f24 - - ldd [%o1 + 0x040], %f0 - prefetch [%o1 + 0x180], #one_read - faligndata %f10, %f12, %f26 - subcc %o3, 0x01, %o3 - add %o1, 0x40, %o1 - bg,pt %XCC, loop - add %o0, 0x40, %o0 - - /* Finally we copy the last full 64-byte block. */ -loopfini: - ldd [%o1 + 0x008], %f2 - faligndata %f12, %f14, %f28 - ldd [%o1 + 0x010], %f4 - faligndata %f14, %f0, %f30 - stda %f16, [%o0] ASI_BLK_P - ldd [%o1 + 0x018], %f6 - faligndata %f0, %f2, %f16 - ldd [%o1 + 0x020], %f8 - faligndata %f2, %f4, %f18 - ldd [%o1 + 0x028], %f10 - faligndata %f4, %f6, %f20 - ldd [%o1 + 0x030], %f12 - faligndata %f6, %f8, %f22 - ldd [%o1 + 0x038], %f14 - faligndata %f8, %f10, %f24 - cmp %g1, 0 - be,pt %XCC, 1f - add %o0, 0x40, %o0 - ldd [%o1 + 0x040], %f0 -1: faligndata %f10, %f12, %f26 - faligndata %f12, %f14, %f28 - faligndata %f14, %f0, %f30 - stda %f16, [%o0] ASI_BLK_P - add %o0, 0x40, %o0 - add %o1, 0x40, %o1 - membar #Sync - - /* Now we copy the (len modulo 64) bytes at the end. - * Note how we borrow the %f0 loaded above. - * - * Also notice how this code is careful not to perform a - * load past the end of the src buffer. - */ -loopend: - and %o2, 0x3f, %o2 - andcc %o2, 0x38, %g2 - be,pn %XCC, endcruft - subcc %g2, 0x8, %g2 - be,pn %XCC, endcruft - cmp %g1, 0 - - be,a,pt %XCC, 1f - ldd [%o1 + 0x00], %f0 - -1: ldd [%o1 + 0x08], %f2 - add %o1, 0x8, %o1 - sub %o2, 0x8, %o2 - subcc %g2, 0x8, %g2 - faligndata %f0, %f2, %f8 - std %f8, [%o0 + 0x00] - be,pn %XCC, endcruft - add %o0, 0x8, %o0 - ldd [%o1 + 0x08], %f0 - add %o1, 0x8, %o1 - sub %o2, 0x8, %o2 - subcc %g2, 0x8, %g2 - faligndata %f2, %f0, %f8 - std %f8, [%o0 + 0x00] - bne,pn %XCC, 1b - add %o0, 0x8, %o0 - - /* If anything is left, we copy it one byte at a time. - * Note that %g1 is (src & 0x3) saved above before the - * alignaddr was performed. - */ -endcruft: - cmp %o2, 0 - add %o1, %g1, %o1 - VISExitHalf - be,pn %XCC, out - sub %o0, %o1, %o3 - - andcc %g1, 0x7, %g0 - bne,pn %icc, small_copy_unaligned - andcc %o2, 0x8, %g0 - be,pt %icc, 1f - nop - ldx [%o1], %o5 - stx %o5, [%o1 + %o3] - add %o1, 0x8, %o1 - -1: andcc %o2, 0x4, %g0 - be,pt %icc, 1f - nop - lduw [%o1], %o5 - stw %o5, [%o1 + %o3] - add %o1, 0x4, %o1 - -1: andcc %o2, 0x2, %g0 - be,pt %icc, 1f - nop - lduh [%o1], %o5 - sth %o5, [%o1 + %o3] - add %o1, 0x2, %o1 - -1: andcc %o2, 0x1, %g0 - be,pt %icc, out - nop - ldub [%o1], %o5 - ba,pt %xcc, out - stb %o5, [%o1 + %o3] - -medium_copy: /* 16 < len <= 64 */ - bne,pn %XCC, small_copy_unaligned - sub %o0, %o1, %o3 - -medium_copy_aligned: - andn %o2, 0x7, %o4 - and %o2, 0x7, %o2 -1: subcc %o4, 0x8, %o4 - ldx [%o1], %o5 - stx %o5, [%o1 + %o3] - bgu,pt %XCC, 1b - add %o1, 0x8, %o1 - andcc %o2, 0x4, %g0 - be,pt %XCC, 1f - nop - sub %o2, 0x4, %o2 - lduw [%o1], %o5 - stw %o5, [%o1 + %o3] - add %o1, 0x4, %o1 -1: cmp %o2, 0 - be,pt %XCC, out - nop - ba,pt %xcc, small_copy_unaligned - nop - -small_copy: /* 0 < len <= 16 */ - andcc %o3, 0x3, %g0 - bne,pn %XCC, small_copy_unaligned - sub %o0, %o1, %o3 - -small_copy_aligned: - subcc %o2, 4, %o2 - lduw [%o1], %g1 - stw %g1, [%o1 + %o3] - bgu,pt %XCC, small_copy_aligned - add %o1, 4, %o1 - -out: retl - mov %g5, %o0 - - .align 32 -small_copy_unaligned: - subcc %o2, 1, %o2 - ldub [%o1], %g1 - stb %g1, [%o1 + %o3] - bgu,pt %XCC, small_copy_unaligned - add %o1, 1, %o1 - retl - mov %g5, %o0 - -END(memcpy) - -#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldx [%src - offset - 0x20], %t0; \ - ldx [%src - offset - 0x18], %t1; \ - ldx [%src - offset - 0x10], %t2; \ - ldx [%src - offset - 0x08], %t3; \ - stw %t0, [%dst - offset - 0x1c]; \ - srlx %t0, 32, %t0; \ - stw %t0, [%dst - offset - 0x20]; \ - stw %t1, [%dst - offset - 0x14]; \ - srlx %t1, 32, %t1; \ - stw %t1, [%dst - offset - 0x18]; \ - stw %t2, [%dst - offset - 0x0c]; \ - srlx %t2, 32, %t2; \ - stw %t2, [%dst - offset - 0x10]; \ - stw %t3, [%dst - offset - 0x04]; \ - srlx %t3, 32, %t3; \ - stw %t3, [%dst - offset - 0x08]; - -#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldx [%src - offset - 0x20], %t0; \ - ldx [%src - offset - 0x18], %t1; \ - ldx [%src - offset - 0x10], %t2; \ - ldx [%src - offset - 0x08], %t3; \ - stx %t0, [%dst - offset - 0x20]; \ - stx %t1, [%dst - offset - 0x18]; \ - stx %t2, [%dst - offset - 0x10]; \ - stx %t3, [%dst - offset - 0x08]; \ - ldx [%src - offset - 0x40], %t0; \ - ldx [%src - offset - 0x38], %t1; \ - ldx [%src - offset - 0x30], %t2; \ - ldx [%src - offset - 0x28], %t3; \ - stx %t0, [%dst - offset - 0x40]; \ - stx %t1, [%dst - offset - 0x38]; \ - stx %t2, [%dst - offset - 0x30]; \ - stx %t3, [%dst - offset - 0x28]; - -#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldx [%src + offset + 0x00], %t0; \ - ldx [%src + offset + 0x08], %t1; \ - stw %t0, [%dst + offset + 0x04]; \ - srlx %t0, 32, %t2; \ - stw %t2, [%dst + offset + 0x00]; \ - stw %t1, [%dst + offset + 0x0c]; \ - srlx %t1, 32, %t3; \ - stw %t3, [%dst + offset + 0x08]; - -#define RMOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \ - ldx [%src + offset + 0x00], %t0; \ - ldx [%src + offset + 0x08], %t1; \ - stx %t0, [%dst + offset + 0x00]; \ - stx %t1, [%dst + offset + 0x08]; - - .align 32 -228: andcc %o2, 1, %g0 /* IEU1 Group */ - be,pt %icc, 2f+4 /* CTI */ -1: ldub [%o1 - 1], %o5 /* LOAD Group */ - sub %o1, 1, %o1 /* IEU0 */ - sub %o0, 1, %o0 /* IEU1 */ - subcc %o2, 1, %o2 /* IEU1 Group */ - be,pn %xcc, 229f /* CTI */ - stb %o5, [%o0] /* Store */ -2: ldub [%o1 - 1], %o5 /* LOAD Group */ - sub %o0, 2, %o0 /* IEU0 */ - ldub [%o1 - 2], %g5 /* LOAD Group */ - sub %o1, 2, %o1 /* IEU0 */ - subcc %o2, 2, %o2 /* IEU1 Group */ - stb %o5, [%o0 + 1] /* Store */ - bne,pt %xcc, 2b /* CTI */ - stb %g5, [%o0] /* Store */ -229: retl - mov %g4, %o0 - - .align 32 -ENTRY(memmove) - mov %o0, %g5 -#ifndef USE_BPR - srl %o2, 0, %o2 /* IEU1 Group */ -#endif - brz,pn %o2, out /* CTI Group */ - sub %o0, %o1, %o4 /* IEU0 */ - cmp %o4, %o2 /* IEU1 Group */ - bgeu,pt %XCC, 218b /* CTI */ - mov %o0, %g4 /* IEU0 */ - add %o0, %o2, %o0 /* IEU0 Group */ -220: add %o1, %o2, %o1 /* IEU1 */ - cmp %o2, 15 /* IEU1 Group */ - bleu,pn %xcc, 228b /* CTI */ - andcc %o0, 7, %g2 /* IEU1 Group */ - sub %o0, %o1, %g5 /* IEU0 */ - andcc %g5, 3, %o5 /* IEU1 Group */ - bne,pn %xcc, 232f /* CTI */ - andcc %o1, 3, %g0 /* IEU1 Group */ - be,a,pt %xcc, 236f /* CTI */ - andcc %o1, 4, %g0 /* IEU1 Group */ - andcc %o1, 1, %g0 /* IEU1 Group */ - be,pn %xcc, 4f /* CTI */ - andcc %o1, 2, %g0 /* IEU1 Group */ - ldub [%o1 - 1], %g2 /* Load Group */ - sub %o1, 1, %o1 /* IEU0 */ - sub %o0, 1, %o0 /* IEU1 */ - sub %o2, 1, %o2 /* IEU0 Group */ - be,pn %xcc, 5f /* CTI Group */ - stb %g2, [%o0] /* Store */ -4: lduh [%o1 - 2], %g2 /* Load Group */ - sub %o1, 2, %o1 /* IEU0 */ - sub %o0, 2, %o0 /* IEU1 */ - sub %o2, 2, %o2 /* IEU0 */ - sth %g2, [%o0] /* Store Group + bubble */ -5: andcc %o1, 4, %g0 /* IEU1 */ -236: be,a,pn %xcc, 2f /* CTI */ - andcc %o2, -128, %g6 /* IEU1 Group */ - lduw [%o1 - 4], %g5 /* Load Group */ - sub %o1, 4, %o1 /* IEU0 */ - sub %o0, 4, %o0 /* IEU1 */ - sub %o2, 4, %o2 /* IEU0 Group */ - stw %g5, [%o0] /* Store */ - andcc %o2, -128, %g6 /* IEU1 Group */ -2: be,pn %xcc, 235f /* CTI */ - andcc %o0, 4, %g0 /* IEU1 Group */ - be,pn %xcc, 282f + 4 /* CTI Group */ -5: RMOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5) - RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5) - RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5) - RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5) - subcc %g6, 128, %g6 /* IEU1 Group */ - sub %o1, 128, %o1 /* IEU0 */ - bne,pt %xcc, 5b /* CTI */ - sub %o0, 128, %o0 /* IEU0 Group */ -235: andcc %o2, 0x70, %g6 /* IEU1 Group */ -41: be,pn %xcc, 280f /* CTI */ - andcc %o2, 8, %g0 /* IEU1 Group */ - /* Clk1 8-( */ - /* Clk2 8-( */ - /* Clk3 8-( */ - /* Clk4 8-( */ -279: rd %pc, %o5 /* PDU Group */ - sll %g6, 1, %g5 /* IEU0 Group */ - sub %o1, %g6, %o1 /* IEU1 */ - sub %o5, %g5, %o5 /* IEU0 Group */ - jmpl %o5 + %lo(280f - 279b), %g0 /* CTI Group brk forced*/ - sub %o0, %g6, %o0 /* IEU0 Group */ - RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5) - RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5) -280: be,pt %xcc, 281f /* CTI */ - andcc %o2, 4, %g0 /* IEU1 */ - ldx [%o1 - 8], %g2 /* Load Group */ - sub %o0, 8, %o0 /* IEU0 */ - stw %g2, [%o0 + 4] /* Store Group */ - sub %o1, 8, %o1 /* IEU1 */ - srlx %g2, 32, %g2 /* IEU0 Group */ - stw %g2, [%o0] /* Store */ -281: be,pt %xcc, 1f /* CTI */ - andcc %o2, 2, %g0 /* IEU1 Group */ - lduw [%o1 - 4], %g2 /* Load Group */ - sub %o1, 4, %o1 /* IEU0 */ - stw %g2, [%o0 - 4] /* Store Group */ - sub %o0, 4, %o0 /* IEU0 */ -1: be,pt %xcc, 1f /* CTI */ - andcc %o2, 1, %g0 /* IEU1 Group */ - lduh [%o1 - 2], %g2 /* Load Group */ - sub %o1, 2, %o1 /* IEU0 */ - sth %g2, [%o0 - 2] /* Store Group */ - sub %o0, 2, %o0 /* IEU0 */ -1: be,pt %xcc, 211f /* CTI */ - nop /* IEU1 */ - ldub [%o1 - 1], %g2 /* Load Group */ - stb %g2, [%o0 - 1] /* Store Group + bubble */ -211: retl - mov %g4, %o0 - -282: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5) - RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5) - subcc %g6, 128, %g6 /* IEU1 Group */ - sub %o1, 128, %o1 /* IEU0 */ - bne,pt %xcc, 282b /* CTI */ - sub %o0, 128, %o0 /* IEU0 Group */ - andcc %o2, 0x70, %g6 /* IEU1 */ - be,pn %xcc, 284f /* CTI */ - andcc %o2, 8, %g0 /* IEU1 Group */ - /* Clk1 8-( */ - /* Clk2 8-( */ - /* Clk3 8-( */ - /* Clk4 8-( */ -283: rd %pc, %o5 /* PDU Group */ - sub %o1, %g6, %o1 /* IEU0 Group */ - sub %o5, %g6, %o5 /* IEU1 */ - jmpl %o5 + %lo(284f - 283b), %g0 /* CTI Group brk forced*/ - sub %o0, %g6, %o0 /* IEU0 Group */ - RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3) - RMOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3) -284: be,pt %xcc, 285f /* CTI Group */ - andcc %o2, 4, %g0 /* IEU1 */ - ldx [%o1 - 8], %g2 /* Load Group */ - sub %o0, 8, %o0 /* IEU0 */ - sub %o1, 8, %o1 /* IEU0 Group */ - stx %g2, [%o0] /* Store */ -285: be,pt %xcc, 1f /* CTI */ - andcc %o2, 2, %g0 /* IEU1 Group */ - lduw [%o1 - 4], %g2 /* Load Group */ - sub %o0, 4, %o0 /* IEU0 */ - sub %o1, 4, %o1 /* IEU0 Group */ - stw %g2, [%o0] /* Store */ -1: be,pt %xcc, 1f /* CTI */ - andcc %o2, 1, %g0 /* IEU1 Group */ - lduh [%o1 - 2], %g2 /* Load Group */ - sub %o0, 2, %o0 /* IEU0 */ - sub %o1, 2, %o1 /* IEU0 Group */ - sth %g2, [%o0] /* Store */ -1: be,pt %xcc, 1f /* CTI */ - nop /* IEU0 Group */ - ldub [%o1 - 1], %g2 /* Load Group */ - stb %g2, [%o0 - 1] /* Store Group + bubble */ -1: retl - mov %g4, %o0 - -232: brz,pt %g2, 2f /* CTI Group */ - sub %o2, %g2, %o2 /* IEU0 Group */ -1: ldub [%o1 - 1], %g5 /* Load Group */ - sub %o1, 1, %o1 /* IEU0 */ - sub %o0, 1, %o0 /* IEU1 */ - subcc %g2, 1, %g2 /* IEU1 Group */ - bne,pt %xcc, 1b /* CTI */ - stb %g5, [%o0] /* Store */ -2: andn %o2, 7, %g5 /* IEU0 Group */ - and %o2, 7, %o2 /* IEU1 */ - fmovd %f0, %f2 /* FPU */ - alignaddr %o1, %g0, %g1 /* GRU Group */ - ldd [%g1], %f4 /* Load Group */ -1: ldd [%g1 - 8], %f6 /* Load Group */ - sub %g1, 8, %g1 /* IEU0 Group */ - subcc %g5, 8, %g5 /* IEU1 */ - faligndata %f6, %f4, %f0 /* GRU Group */ - std %f0, [%o0 - 8] /* Store */ - sub %o1, 8, %o1 /* IEU0 Group */ - be,pn %xcc, 233f /* CTI */ - sub %o0, 8, %o0 /* IEU1 */ - ldd [%g1 - 8], %f4 /* Load Group */ - sub %g1, 8, %g1 /* IEU0 */ - subcc %g5, 8, %g5 /* IEU1 */ - faligndata %f4, %f6, %f0 /* GRU Group */ - std %f0, [%o0 - 8] /* Store */ - sub %o1, 8, %o1 /* IEU0 */ - bne,pn %xcc, 1b /* CTI Group */ - sub %o0, 8, %o0 /* IEU0 */ -233: brz,pn %o2, 234f /* CTI Group */ - nop /* IEU0 */ -237: ldub [%o1 - 1], %g5 /* LOAD */ - sub %o1, 1, %o1 /* IEU0 */ - sub %o0, 1, %o0 /* IEU1 */ - subcc %o2, 1, %o2 /* IEU1 */ - bne,pt %xcc, 237b /* CTI */ - stb %g5, [%o0] /* Store Group */ -234: wr %g0, FPRS_FEF, %fprs - retl - mov %g4, %o0 -END(memmove) - -#ifdef USE_BPR -weak_alias (memcpy, __align_cpy_1) -weak_alias (memcpy, __align_cpy_2) -weak_alias (memcpy, __align_cpy_4) -weak_alias (memcpy, __align_cpy_8) -weak_alias (memcpy, __align_cpy_16) -#endif -libc_hidden_builtin_def (memcpy) -libc_hidden_builtin_def (memmove) diff --git a/sysdeps/sparc/sparc64/sparcv9v2/memset.S b/sysdeps/sparc/sparc64/sparcv9v2/memset.S deleted file mode 100644 index 809d3ed9c6..0000000000 --- a/sysdeps/sparc/sparc64/sparcv9v2/memset.S +++ /dev/null @@ -1 +0,0 @@ -#include <sparc64/sparcv9v/memset.S> |