aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRoland McGrath <roland@gnu.org>2002-10-11 07:22:18 +0000
committerRoland McGrath <roland@gnu.org>2002-10-11 07:22:18 +0000
commit679e4c434f755644cc2093c9940ac58d0c2b51cf (patch)
tree9b854f1c5d1bb85bdb6698b6051e77dfd877147e
parent704bb2fd8e613322b308ed53c98b3d0d1bd98526 (diff)
downloadglibc-679e4c434f755644cc2093c9940ac58d0c2b51cf.tar
glibc-679e4c434f755644cc2093c9940ac58d0c2b51cf.tar.gz
glibc-679e4c434f755644cc2093c9940ac58d0c2b51cf.tar.bz2
glibc-679e4c434f755644cc2093c9940ac58d0c2b51cf.zip
* locale/newlocale.c (__newlocale): If setting all categories to "C",
just return &_nl_C_locobj instead of copying it. * locale/freelocale.c (__freelocale): Check for &_nl_C_locobj. * locale/duplocale.c (__duplocale): Likewise. 2002-10-07 Roland McGrath <roland@frob.com> * config.h.in (HAVE_I386_SET_GDT): New #undef. * sysdeps/mach/configure.in: Define it with new check for i386_set_gdt. * sysdeps/mach/configure: Regenerated. 2002-10-06 Franz Sirl <Franz.Sirl-kernel@lauterbach.com> * sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h (INLINE_SYSCALL): Add all necessary register outputs for syscall-clobbered registers. 2002-10-02 David Mosberger <davidm@hpl.hp.com> * sysdeps/ia64/bzero.S: Rewritten by Sverre Jarp to tune for Itanium 2 (and Itanium). Fix unwind directives and make it fit in 80 columns. * sysdeps/ia64/memset.S: Ditto. * sysdeps/ia64/memcpy.S: Ditto. Move jump table to .rodata section. 2002-10-03 Roland McGrath <roland@frob.com> * sysdeps/mach/hurd/i386/init-first.c (_hurd_stack_setup): Add clobbers to asm.
-rw-r--r--ChangeLog32
-rw-r--r--config.h.in3
-rw-r--r--locale/duplocale.c4
-rw-r--r--locale/freelocale.c4
-rw-r--r--locale/newlocale.c11
-rw-r--r--sysdeps/ia64/bzero.S334
-rw-r--r--sysdeps/ia64/memcpy.S470
-rw-r--r--sysdeps/ia64/memset.S412
-rw-r--r--sysdeps/mach/configure30
-rw-r--r--sysdeps/mach/configure.in9
-rw-r--r--sysdeps/mach/hurd/i386/init-first.c2
11 files changed, 1069 insertions, 242 deletions
diff --git a/ChangeLog b/ChangeLog
index 16ef96aeb8..b1b5601ddd 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,35 @@
+2002-10-08 Roland McGrath <roland@redhat.com>
+
+ * locale/newlocale.c (__newlocale): If setting all categories to "C",
+ just return &_nl_C_locobj instead of copying it.
+ * locale/freelocale.c (__freelocale): Check for &_nl_C_locobj.
+ * locale/duplocale.c (__duplocale): Likewise.
+
+2002-10-07 Roland McGrath <roland@frob.com>
+
+ * config.h.in (HAVE_I386_SET_GDT): New #undef.
+ * sysdeps/mach/configure.in: Define it with new check for i386_set_gdt.
+ * sysdeps/mach/configure: Regenerated.
+
+2002-10-06 Franz Sirl <Franz.Sirl-kernel@lauterbach.com>
+
+ * sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h (INLINE_SYSCALL):
+ Add all necessary register outputs for syscall-clobbered registers.
+
+2002-10-02 David Mosberger <davidm@hpl.hp.com>
+
+ * sysdeps/ia64/bzero.S: Rewritten by Sverre Jarp to tune for
+ Itanium 2 (and Itanium).
+ Fix unwind directives and make it fit in 80 columns.
+ * sysdeps/ia64/memset.S: Ditto.
+ * sysdeps/ia64/memcpy.S: Ditto.
+ Move jump table to .rodata section.
+
+2002-10-03 Roland McGrath <roland@frob.com>
+
+ * sysdeps/mach/hurd/i386/init-first.c (_hurd_stack_setup): Add
+ clobbers to asm.
+
2002-10-10 Andreas Jaeger <aj@suse.de>
* sysdeps/x86_64/_mcount.S: Restore correct registers.
diff --git a/config.h.in b/config.h.in
index b8a6c09270..86ebe1b750 100644
--- a/config.h.in
+++ b/config.h.in
@@ -179,6 +179,9 @@
/* Mach/i386 specific: define if the `i386_io_perm_*' RPCs are available. */
#undef HAVE_I386_IO_PERM_MODIFY
+/* Mach/i386 specific: define if the `i386_set_gdt' RPC is available. */
+#undef HAVE_I386_SET_GDT
+
/*
*/
diff --git a/locale/duplocale.c b/locale/duplocale.c
index 2fa29d14d6..77dfc01046 100644
--- a/locale/duplocale.c
+++ b/locale/duplocale.c
@@ -33,6 +33,10 @@ __libc_lock_define (extern , __libc_setlocale_lock attribute_hidden)
__locale_t
__duplocale (__locale_t dataset)
{
+ /* This static object is returned for newlocale (LC_ALL_MASK, "C"). */
+ if (dataset == &_nl_C_locobj)
+ return dataset;
+
__locale_t result;
int cnt;
size_t names_len = 0;
diff --git a/locale/freelocale.c b/locale/freelocale.c
index ba0ae85d84..ec169bcb66 100644
--- a/locale/freelocale.c
+++ b/locale/freelocale.c
@@ -34,6 +34,10 @@ __freelocale (__locale_t dataset)
{
int cnt;
+ /* This static object is returned for newlocale (LC_ALL_MASK, "C"). */
+ if (dataset == &_nl_C_locobj)
+ return;
+
/* We modify global data (the usage counts). */
__libc_lock_lock (__libc_setlocale_lock);
diff --git a/locale/newlocale.c b/locale/newlocale.c
index 3b8676ceeb..1131f62c0b 100644
--- a/locale/newlocale.c
+++ b/locale/newlocale.c
@@ -60,6 +60,17 @@ __newlocale (int category_mask, const char *locale, __locale_t base)
if (locale == NULL)
ERROR_RETURN;
+ if (base == &_nl_C_locobj)
+ /* We're to modify BASE, returned for a previous call with "C".
+ We can't really modify the read-only structure, so instead
+ start over by copying it. */
+ base = NULL;
+
+ if ((base == NULL || category_mask == (1 << __LC_LAST) - 1 - (1 << LC_ALL))
+ && (category_mask == 0 || !strcmp (locale, "C")))
+ /* Asking for the "C" locale needn't allocate a new object. */
+ return &_nl_C_locobj;
+
/* Allocate memory for the result. */
if (base != NULL)
result = *base;
diff --git a/sysdeps/ia64/bzero.S b/sysdeps/ia64/bzero.S
index 3225ed4975..bcca41d5e9 100644
--- a/sysdeps/ia64/bzero.S
+++ b/sysdeps/ia64/bzero.S
@@ -1,7 +1,8 @@
/* Optimized version of the standard bzero() function.
This file is part of the GNU C Library.
Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
- Contributed by Dan Pop <Dan.Pop@cern.ch>.
+ Contributed by Dan Pop for Itanium <Dan.Pop@cern.ch>.
+ Rewritten for McKinley by Sverre Jarp, HP Labs/CERN <Sverre.Jarp@cern.ch>
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -25,8 +26,11 @@
in1: count
The algorithm is fairly straightforward: set byte by byte until we
- we get to a word aligned address, then set word by word as much as
- possible; the remaining few bytes are set one by one. */
+ we get to a 16B-aligned address, then loop on 128 B chunks using an
+ early store as prefetching, then loop on 32B chucks, then clear remaining
+ words, finally clear remaining bytes.
+ Since a stf.spill f0 can store 16B in one go, we use this instruction
+ to get peak speed. */
#include <sysdep.h>
#undef ret
@@ -34,62 +38,278 @@
#define dest in0
#define cnt in1
-#define save_pfs loc0
-#define ptr1 loc1
-#define ptr2 loc2
-#define tmp loc3
-#define loopcnt loc4
-#define save_lc loc5
+#define tmp r31
+#define save_lc r30
+#define ptr0 r29
+#define ptr1 r28
+#define ptr2 r27
+#define ptr3 r26
+#define ptr9 r24
+#define loopcnt r23
+#define linecnt r22
+#define bytecnt r21
-ENTRY(__bzero)
+// This routine uses only scratch predicate registers (p6 - p15)
+#define p_scr p6 // default register for same-cycle branches
+#define p_unalgn p9
+#define p_y p11
+#define p_n p12
+#define p_yy p13
+#define p_nn p14
+
+#define movi0 mov
+
+#define MIN1 15
+#define MIN1P1HALF 8
+#define LINE_SIZE 128
+#define LSIZE_SH 7 // shift amount
+#define PREF_AHEAD 8
+
+#define USE_FLP
+#if defined(USE_INT)
+#define store st8
+#define myval r0
+#elif defined(USE_FLP)
+#define store stf8
+#define myval f0
+#endif
+
+.align 64
+ENTRY(bzero)
+{ .mmi
.prologue
- alloc save_pfs = ar.pfs, 2, 6, 0, 0
- .save ar.lc, save_lc
- mov save_lc = ar.lc
+ alloc tmp = ar.pfs, 2, 0, 0, 0
+ lfetch.nt1 [dest]
+ .save ar.lc, save_lc
+ movi0 save_lc = ar.lc
+} { .mmi
.body
- mov ret0 = dest
- and tmp = 7, dest
- cmp.eq p6, p0 = cnt, r0
-(p6) br.cond.spnt .restore_and_exit ;;
+ mov ret0 = dest // return value
+ nop.m 0
+ cmp.eq p_scr, p0 = cnt, r0
+;; }
+{ .mmi
+ and ptr2 = -(MIN1+1), dest // aligned address
+ and tmp = MIN1, dest // prepare to check for alignment
+ tbit.nz p_y, p_n = dest, 0 // Do we have an odd address? (M_B_U)
+} { .mib
mov ptr1 = dest
- sub loopcnt = 8, tmp
- cmp.gt p6, p0 = 16, cnt
-(p6) br.cond.spnt .set_few;;
- cmp.eq p6, p0 = tmp, r0
-(p6) br.cond.sptk .dest_aligned
- sub cnt = cnt, loopcnt
- adds loopcnt = -1, loopcnt;;
- mov ar.lc = loopcnt;;
-.l1:
- st1 [ptr1] = r0, 1
- br.cloop.dptk .l1 ;;
-.dest_aligned:
- adds ptr2 = 8, ptr1
- shr.u loopcnt = cnt, 4 ;; // loopcnt = cnt / 16
- cmp.eq p6, p0 = loopcnt, r0
-(p6) br.cond.spnt .one_more
- and cnt = 0xf, cnt // compute the remaining cnt
- adds loopcnt = -1, loopcnt;;
- mov ar.lc = loopcnt;;
-.l2:
- st8 [ptr1] = r0, 16
- st8 [ptr2] = r0, 16
- br.cloop.dptk .l2
- cmp.le p6, p0 = 8, cnt ;;
-.one_more:
-(p6) st8 [ptr1] = r0, 8
-(p6) adds cnt = -8, cnt ;;
- cmp.eq p6, p0 = cnt, r0
-(p6) br.cond.spnt .restore_and_exit
-.set_few:
- adds loopcnt = -1, cnt;;
- mov ar.lc = loopcnt;;
-.l3:
- st1 [ptr1] = r0, 1
- br.cloop.dptk .l3 ;;
+ nop.i 0
+(p_scr) br.ret.dpnt.many rp // return immediately if count = 0
+;; }
+{ .mib
+ cmp.ne p_unalgn, p0 = tmp, r0
+} { .mib // NB: # of bytes to move is 1
+ sub bytecnt = (MIN1+1), tmp // higher than loopcnt
+ cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task?
+(p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U)
+;; }
+{ .mmi
+(p_unalgn) add ptr1 = (MIN1+1), ptr2 // after alignment
+(p_unalgn) add ptr2 = MIN1P1HALF, ptr2 // after alignment
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 // should we do a st8 ?
+;; }
+{ .mib
+(p_y) add cnt = -8, cnt
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 // should we do a st4 ?
+} { .mib
+(p_y) st8 [ptr2] = r0,-4
+(p_n) add ptr2 = 4, ptr2
+;; }
+{ .mib
+(p_yy) add cnt = -4, cnt
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 // should we do a st2 ?
+} { .mib
+(p_yy) st4 [ptr2] = r0,-2
+(p_nn) add ptr2 = 2, ptr2
+;; }
+{ .mmi
+ mov tmp = LINE_SIZE+1 // for compare
+(p_y) add cnt = -2, cnt
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 // should we do a st1 ?
+} { .mmi
+ nop.m 0
+(p_y) st2 [ptr2] = r0,-1
+(p_n) add ptr2 = 1, ptr2
+;; }
+
+{ .mmi
+(p_yy) st1 [ptr2] = r0
+ cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task?
+} { .mbb
+(p_yy) add cnt = -1, cnt
+(p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few
+;; }
+{ .mib
+ nop.m 0
+ shr.u linecnt = cnt, LSIZE_SH
+ nop.b 0
+;; }
+
+ .align 32
+.l1b: // ------------------// L1B: store ahead into cache lines; fill later
+{ .mmi
+ and tmp = -(LINE_SIZE), cnt // compute end of range
+ mov ptr9 = ptr1 // used for prefetching
+ and cnt = (LINE_SIZE-1), cnt // remainder
+} { .mmi
+ mov loopcnt = PREF_AHEAD-1 // default prefetch loop
+ cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
+;; }
+{ .mmi
+(p_scr) add loopcnt = -1, linecnt
+ add ptr2 = 16, ptr1 // start of stores (beyond prefetch stores)
+ add ptr1 = tmp, ptr1 // first address beyond total range
+;; }
+{ .mmi
+ add tmp = -1, linecnt // next loop count
+ movi0 ar.lc = loopcnt
+;; }
+.pref_l1b:
+{ .mib
+ stf.spill [ptr9] = f0, 128 // Do stores one cache line apart
+ nop.i 0
+ br.cloop.dptk.few .pref_l1b
+;; }
+{ .mmi
+ add ptr0 = 16, ptr2 // Two stores in parallel
+ movi0 ar.lc = tmp
+;; }
+.l1bx:
+ { .mmi
+ stf.spill [ptr2] = f0, 32
+ stf.spill [ptr0] = f0, 32
+ ;; }
+ { .mmi
+ stf.spill [ptr2] = f0, 32
+ stf.spill [ptr0] = f0, 32
+ ;; }
+ { .mmi
+ stf.spill [ptr2] = f0, 32
+ stf.spill [ptr0] = f0, 64
+ cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
+ ;; }
+{ .mmb
+ stf.spill [ptr2] = f0, 32
+(p_scr) stf.spill [ptr9] = f0, 128
+ br.cloop.dptk.few .l1bx
+;; }
+{ .mib
+ cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
+(p_scr) br.cond.dpnt.many .move_bytes_from_alignment
+;; }
+
+.fraction_of_line:
+{ .mib
+ add ptr2 = 16, ptr1
+ shr.u loopcnt = cnt, 5 // loopcnt = cnt / 32
+;; }
+{ .mib
+ cmp.eq p_scr, p0 = loopcnt, r0
+ add loopcnt = -1, loopcnt
+(p_scr) br.cond.dpnt.many .store_words
+;; }
+{ .mib
+ and cnt = 0x1f, cnt // compute the remaining cnt
+ movi0 ar.lc = loopcnt
+;; }
+ .align 32
+.l2: // -----------------------------// L2A: store 32B in 2 cycles
+{ .mmb
+ store [ptr1] = myval, 8
+ store [ptr2] = myval, 8
+;; } { .mmb
+ store [ptr1] = myval, 24
+ store [ptr2] = myval, 24
+ br.cloop.dptk.many .l2
+;; }
+.store_words:
+{ .mib
+ cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
+(p_scr) br.cond.dpnt.many .move_bytes_from_alignment // Branch
+;; }
+
+{ .mmi
+ store [ptr1] = myval, 8 // store
+ cmp.le p_y, p_n = 16, cnt //
+ add cnt = -8, cnt // subtract
+;; }
+{ .mmi
+(p_y) store [ptr1] = myval, 8 // store
+(p_y) cmp.le.unc p_yy, p_nn = 16, cnt
+(p_y) add cnt = -8, cnt // subtract
+;; }
+{ .mmi // store
+(p_yy) store [ptr1] = myval, 8
+(p_yy) add cnt = -8, cnt // subtract
+;; }
+
+.move_bytes_from_alignment:
+{ .mib
+ cmp.eq p_scr, p0 = cnt, r0
+ tbit.nz.unc p_y, p0 = cnt, 2 // should we terminate with a st4 ?
+(p_scr) br.cond.dpnt.few .restore_and_exit
+;; }
+{ .mib
+(p_y) st4 [ptr1] = r0,4
+ tbit.nz.unc p_yy, p0 = cnt, 1 // should we terminate with a st2 ?
+;; }
+{ .mib
+(p_yy) st2 [ptr1] = r0,2
+ tbit.nz.unc p_y, p0 = cnt, 0 // should we terminate with a st1 ?
+;; }
+
+{ .mib
+(p_y) st1 [ptr1] = r0
+;; }
.restore_and_exit:
- mov ar.lc = save_lc
- mov ar.pfs = save_pfs
- br.ret.sptk.many b0
-END(__bzero)
-weak_alias (__bzero, bzero)
+{ .mib
+ nop.m 0
+ movi0 ar.lc = save_lc
+ br.ret.sptk.many rp
+;; }
+
+.move_bytes_unaligned:
+{ .mmi
+ .pred.rel "mutex",p_y, p_n
+ .pred.rel "mutex",p_yy, p_nn
+(p_n) cmp.le p_yy, p_nn = 4, cnt
+(p_y) cmp.le p_yy, p_nn = 5, cnt
+(p_n) add ptr2 = 2, ptr1
+} { .mmi
+(p_y) add ptr2 = 3, ptr1
+(p_y) st1 [ptr1] = r0, 1 // fill 1 (odd-aligned) byte
+(p_y) add cnt = -1, cnt // [15, 14 (or less) left]
+;; }
+{ .mmi
+(p_yy) cmp.le.unc p_y, p0 = 8, cnt
+ add ptr3 = ptr1, cnt // prepare last store
+ movi0 ar.lc = save_lc
+} { .mmi
+(p_yy) st2 [ptr1] = r0, 4 // fill 2 (aligned) bytes
+(p_yy) st2 [ptr2] = r0, 4 // fill 2 (aligned) bytes
+(p_yy) add cnt = -4, cnt // [11, 10 (o less) left]
+;; }
+{ .mmi
+(p_y) cmp.le.unc p_yy, p0 = 8, cnt
+ add ptr3 = -1, ptr3 // last store
+ tbit.nz p_scr, p0 = cnt, 1 // will there be a st2 at the end ?
+} { .mmi
+(p_y) st2 [ptr1] = r0, 4 // fill 2 (aligned) bytes
+(p_y) st2 [ptr2] = r0, 4 // fill 2 (aligned) bytes
+(p_y) add cnt = -4, cnt // [7, 6 (or less) left]
+;; }
+{ .mmi
+(p_yy) st2 [ptr1] = r0, 4 // fill 2 (aligned) bytes
+(p_yy) st2 [ptr2] = r0, 4 // fill 2 (aligned) bytes
+ // [3, 2 (or less) left]
+ tbit.nz p_y, p0 = cnt, 0 // will there be a st1 at the end ?
+} { .mmi
+(p_yy) add cnt = -4, cnt
+;; }
+{ .mmb
+(p_scr) st2 [ptr1] = r0 // fill 2 (aligned) bytes
+(p_y) st1 [ptr3] = r0 // fill last byte (using ptr3)
+ br.ret.sptk.many rp
+;; }
+END(bzero)
diff --git a/sysdeps/ia64/memcpy.S b/sysdeps/ia64/memcpy.S
index b49f99e286..257e1aa341 100644
--- a/sysdeps/ia64/memcpy.S
+++ b/sysdeps/ia64/memcpy.S
@@ -1,7 +1,8 @@
/* Optimized version of the standard memcpy() function.
This file is part of the GNU C Library.
Copyright (C) 2000, 2001 Free Software Foundation, Inc.
- Contributed by Dan Pop <Dan.Pop@cern.ch>.
+ Contributed by Dan Pop for Itanium <Dan.Pop@cern.ch>.
+ Rewritten for McKinley by Sverre Jarp, HP Labs/CERN <Sverre.Jarp@cern.ch>
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -26,27 +27,39 @@
in2: byte count
An assembly implementation of the algorithm used by the generic C
- version from glibc. The case when all three arguments are multiples
- of 8 is treated separatedly, for extra performance.
+ version from glibc. The case when source and sest are aligned is
+ treated separately, for extra performance.
- In this form, it assumes little endian mode. For big endian mode,
+ In this form, memcpy assumes little endian mode. For big endian mode,
sh1 must be computed using an extra instruction: sub sh1 = 64, sh1
and the order of r[MEMLAT] and r[MEMLAT+1] must be reverted in the
shrp instruction. */
+#define USE_LFETCH
+#define USE_FLP
#include <sysdep.h>
#undef ret
+#define LFETCH_DIST 500
+
+#define ALIGN_UNROLL_no 4 // no. of elements
+#define ALIGN_UNROLL_sh 2 // (shift amount)
+
+#define MEMLAT 8
+#define Nrot ((4*(MEMLAT+2) + 7) & ~7)
+
#define OP_T_THRES 16
#define OPSIZ 8
-#define adest r15
-#define saved_pr r17
-#define saved_lc r18
+#define loopcnt r14
+#define elemcnt r15
+#define saved_pr r16
+#define saved_lc r17
+#define adest r18
#define dest r19
-#define src r20
-#define len r21
-#define asrc r22
+#define asrc r20
+#define src r21
+#define len r22
#define tmp2 r23
#define tmp3 r24
#define tmp4 r25
@@ -54,113 +67,339 @@
#define ploop56 r27
#define loopaddr r28
#define sh1 r29
-#define loopcnt r30
-#define value r31
-
-#define LOOP(shift) \
- .align 32 ; \
-.loop##shift##: \
-(p[0]) ld8 r[0] = [asrc], 8 ; /* w1 */ \
-(p[MEMLAT+1]) st8 [dest] = value, 8 ; \
-(p[MEMLAT]) shrp value = r[MEMLAT], r[MEMLAT+1], shift ; \
- nop.b 0 ; \
- nop.b 0 ; \
- br.ctop.sptk .loop##shift ; \
- br.cond.sptk .cpyfew ; /* deal with the remaining bytes */
-
-#define MEMLAT 21
-#define Nrot (((2*MEMLAT+3) + 7) & ~7)
+#define ptr1 r30
+#define ptr2 r31
+
+#define movi0 mov
+
+#define p_scr p6
+#define p_xtr p7
+#define p_nxtr p8
+#define p_few p9
+
+#if defined(USE_FLP)
+#define load ldf8
+#define store stf8
+#define tempreg f6
+#define the_r fr
+#define the_s fs
+#define the_t ft
+#define the_q fq
+#define the_w fw
+#define the_x fx
+#define the_y fy
+#define the_z fz
+#elif defined(USE_INT)
+#define load ld8
+#define store st8
+#define tempreg tmp2
+#define the_r r
+#define the_s s
+#define the_t t
+#define the_q q
+#define the_w w
+#define the_x x
+#define the_y y
+#define the_z z
+#endif
+
+
+#if defined(USE_LFETCH)
+#define LOOP(shift) \
+ .align 32 ; \
+.loop##shift##: \
+{ .mmb \
+(p[0]) ld8.nt1 r[0] = [asrc], 8 ; \
+(p[0]) lfetch.nt1 [ptr1], 16 ; \
+ nop.b 0 ; \
+} { .mib \
+(p[MEMLAT+1]) st8 [dest] = tmp3, 8 ; \
+(p[MEMLAT]) shrp tmp3 = r[MEMLAT], s[MEMLAT+1], shift ; \
+ nop.b 0 ;; \
+ } { .mmb \
+(p[0]) ld8.nt1 s[0] = [asrc], 8 ; \
+(p[0]) lfetch.nt1 [ptr2], 16 ; \
+ nop.b 0 ; \
+} { .mib \
+(p[MEMLAT+1]) st8 [dest] = tmp4, 8 ; \
+(p[MEMLAT]) shrp tmp4 = s[MEMLAT], r[MEMLAT], shift ; \
+ br.ctop.sptk.many .loop##shift \
+;; } \
+{ .mib \
+ br.cond.sptk.many .copy_bytes ; /* deal with the remaining bytes */ \
+}
+#else
+#define LOOP(shift) \
+ .align 32 ; \
+.loop##shift##: \
+{ .mmb \
+(p[0]) ld8.nt1 r[0] = [asrc], 8 ; \
+ nop.b 0 ; \
+} { .mib \
+(p[MEMLAT+1]) st8 [dest] = tmp3, 8 ; \
+(p[MEMLAT]) shrp tmp3 = r[MEMLAT], s[MEMLAT+1], shift ; \
+ nop.b 0 ;; \
+ } { .mmb \
+(p[0]) ld8.nt1 s[0] = [asrc], 8 ; \
+ nop.b 0 ; \
+} { .mib \
+(p[MEMLAT+1]) st8 [dest] = tmp4, 8 ; \
+(p[MEMLAT]) shrp tmp4 = s[MEMLAT], r[MEMLAT], shift ; \
+ br.ctop.sptk.many .loop##shift \
+;; } \
+{ .mib \
+ br.cond.sptk.many .copy_bytes ; /* deal with the remaining bytes */ \
+}
+#endif
+
ENTRY(memcpy)
+{ .mmi
.prologue
alloc r2 = ar.pfs, 3, Nrot - 3, 0, Nrot
- .rotr r[MEMLAT + 2], q[MEMLAT + 1]
- .rotp p[MEMLAT + 2]
- mov ret0 = in0 // return value = dest
- .save pr, saved_pr
- mov saved_pr = pr // save the predicate registers
- .save ar.lc, saved_lc
- mov saved_lc = ar.lc // save the loop counter
- .body
- or tmp3 = in0, in1 ;; // tmp3 = dest | src
- or tmp3 = tmp3, in2 // tmp3 = dest | src | len
+ .rotr r[MEMLAT+1], s[MEMLAT+2], q[MEMLAT+1], t[MEMLAT+1]
+ .rotp p[MEMLAT+2]
+ .rotf fr[MEMLAT+1], fq[MEMLAT+1], fs[MEMLAT+1], ft[MEMLAT+1]
+ mov ret0 = in0 // return tmp2 = dest
+ .save pr, saved_pr
+ movi0 saved_pr = pr // save the predicate registers
+} { .mmi
+ and tmp4 = 7, in0 // check if destination is aligned
mov dest = in0 // dest
mov src = in1 // src
+;; }
+{ .mii
+ cmp.eq p_scr, p0 = in2, r0 // if (len == 0)
+ .save ar.lc, saved_lc
+ movi0 saved_lc = ar.lc // save the loop counter
+ .body
+ cmp.ge p_few, p0 = OP_T_THRES, in2 // is len <= OP_T_THRESH
+} { .mbb
mov len = in2 // len
- sub tmp2 = r0, in0 // tmp2 = -dest
- cmp.eq p6, p0 = in2, r0 // if (len == 0)
-(p6) br.cond.spnt .restore_and_exit;;// return dest;
- and tmp4 = 7, tmp3 // tmp4 = (dest | src | len) & 7
- shr.u loopcnt = len, 4 ;; // loopcnt = len / 16
- cmp.ne p6, p0 = tmp4, r0 // if ((dest | src | len) & 7 != 0)
-(p6) br.cond.sptk .next // goto next;
-
-// The optimal case, when dest, src and len are all multiples of 8
-
- and tmp3 = 0xf, len // tmp3 = len % 16
- mov pr.rot = 1 << 16 // set rotating predicates
- mov ar.ec = MEMLAT + 1 ;; // set the epilog counter
- cmp.ne p6, p0 = tmp3, r0 // do we have to copy an extra word?
- adds loopcnt = -1, loopcnt;; // --loopcnt
-(p6) ld8 value = [src], 8;;
-(p6) st8 [dest] = value, 8 // copy the "extra" word
- mov ar.lc = loopcnt // set the loop counter
- cmp.eq p6, p0 = 8, len
-(p6) br.cond.spnt .restore_and_exit;;// there was only one word to copy
- adds adest = 8, dest
- adds asrc = 8, src ;;
+(p_scr) br.cond.dpnt.few .restore_and_exit // Branch no. 1: return dest
+(p_few) br.cond.dpnt.many .copy_bytes // Branch no. 2: copy byte by byte
+;; }
+{ .mmi
+#if defined(USE_LFETCH)
+ lfetch.nt1 [dest] //
+ lfetch.nt1 [src] //
+#endif
+ shr.u elemcnt = len, 3 // elemcnt = len / 8
+} { .mib
+ cmp.eq p_scr, p0 = tmp4, r0 // is destination aligned?
+ sub loopcnt = 7, tmp4 //
+(p_scr) br.cond.dptk.many .dest_aligned
+;; }
+{ .mmi
+ ld1 tmp2 = [src], 1 //
+ sub len = len, loopcnt, 1 // reduce len
+ movi0 ar.lc = loopcnt //
+} { .mib
+ cmp.ne p_scr, p0 = 0, loopcnt // avoid loading beyond end-point
+;; }
+
+.l0: // ---------------------------- // L0: Align src on 8-byte boundary
+{ .mmi
+ st1 [dest] = tmp2, 1 //
+(p_scr) ld1 tmp2 = [src], 1 //
+} { .mib
+ cmp.lt p_scr, p0 = 1, loopcnt // avoid load beyond end-point
+ add loopcnt = -1, loopcnt
+ br.cloop.dptk.few .l0 //
+;; }
+
+.dest_aligned:
+{ .mmi
+ and tmp4 = 7, src // ready for alignment check
+ shr.u elemcnt = len, 3 // elemcnt = len / 8
+;; }
+{ .mib
+ cmp.ne p_scr, p0 = tmp4, r0 // is source also aligned
+ tbit.nz p_xtr, p_nxtr = src, 3 // prepare a separate move if src
+} { .mib // is not 16B aligned
+ add ptr2 = LFETCH_DIST, dest // prefetch address
+ add ptr1 = LFETCH_DIST, src
+(p_scr) br.cond.dptk.many .src_not_aligned
+;; }
+
+// The optimal case, when dest, and src are aligned
+
+.both_aligned:
+{ .mmi
+ .pred.rel "mutex",p_xtr,p_nxtr
+(p_xtr) cmp.gt p_scr, p0 = ALIGN_UNROLL_no+1, elemcnt // Need N + 1 to qualify
+(p_nxtr) cmp.gt p_scr, p0 = ALIGN_UNROLL_no, elemcnt // Need only N to qualify
+ movi0 pr.rot = 1 << 16 // set rotating predicates
+} { .mib
+(p_scr) br.cond.dpnt.many .copy_full_words
+;; }
+
+{ .mmi
+(p_xtr) load tempreg = [src], 8
+(p_xtr) add elemcnt = -1, elemcnt
+ movi0 ar.ec = MEMLAT + 1 // set the epilog counter
+;; }
+{ .mmi
+(p_xtr) add len = -8, len //
+ add asrc = 16, src // one bank apart (for USE_INT)
+ shr.u loopcnt = elemcnt, ALIGN_UNROLL_sh // cater for unrolling
+;;}
+{ .mmi
+ add loopcnt = -1, loopcnt
+(p_xtr) store [dest] = tempreg, 8 // copy the "extra" word
+ nop.i 0
+;; }
+{ .mib
+ add adest = 16, dest
+ movi0 ar.lc = loopcnt // set the loop counter
+;; }
+
.align 32
-.l0:
-(p[0]) ld8 r[0] = [src], 16
-(p[0]) ld8 q[0] = [asrc], 16
-(p[MEMLAT]) st8 [dest] = r[MEMLAT], 16
-(p[MEMLAT]) st8 [adest] = q[MEMLAT], 16
- br.ctop.dptk .l0 ;;
-
- mov pr = saved_pr, -1 // restore the predicate registers
- mov ar.lc = saved_lc // restore the loop counter
+#if defined(USE_FLP)
+.l1: // ------------------------------- // L1: Everything a multiple of 8
+{ .mmi
+#if defined(USE_LFETCH)
+(p[0]) lfetch.nt1 [ptr2],32
+#endif
+(p[0]) ldfp8 the_r[0],the_q[0] = [src], 16
+(p[0]) add len = -32, len
+} {.mmb
+(p[MEMLAT]) store [dest] = the_r[MEMLAT], 8
+(p[MEMLAT]) store [adest] = the_s[MEMLAT], 8
+;; }
+{ .mmi
+#if defined(USE_LFETCH)
+(p[0]) lfetch.nt1 [ptr1],32
+#endif
+(p[0]) ldfp8 the_s[0], the_t[0] = [src], 16
+} {.mmb
+(p[MEMLAT]) store [dest] = the_q[MEMLAT], 24
+(p[MEMLAT]) store [adest] = the_t[MEMLAT], 24
+ br.ctop.dptk.many .l1
+;; }
+#elif defined(USE_INT)
+.l1: // ------------------------------- // L1: Everything a multiple of 8
+{ .mmi
+(p[0]) load the_r[0] = [src], 8
+(p[0]) load the_q[0] = [asrc], 8
+(p[0]) add len = -32, len
+} {.mmb
+(p[MEMLAT]) store [dest] = the_r[MEMLAT], 8
+(p[MEMLAT]) store [adest] = the_q[MEMLAT], 8
+;; }
+{ .mmi
+(p[0]) load the_s[0] = [src], 24
+(p[0]) load the_t[0] = [asrc], 24
+} {.mmb
+(p[MEMLAT]) store [dest] = the_s[MEMLAT], 24
+(p[MEMLAT]) store [adest] = the_t[MEMLAT], 24
+#if defined(USE_LFETCH)
+;; }
+{ .mmb
+(p[0]) lfetch.nt1 [ptr2],32
+(p[0]) lfetch.nt1 [ptr1],32
+#endif
+ br.ctop.dptk.many .l1
+;; }
+#endif
+
+.copy_full_words:
+{ .mib
+ cmp.gt p_scr, p0 = 8, len //
+ shr.u elemcnt = len, 3 //
+(p_scr) br.cond.dpnt.many .copy_bytes
+;; }
+{ .mii
+ load tempreg = [src], 8
+ add loopcnt = -1, elemcnt //
+;; }
+{ .mii
+ cmp.ne p_scr, p0 = 0, loopcnt //
+ mov ar.lc = loopcnt //
+;; }
+
+.l2: // ------------------------------- // L2: Max 4 words copied separately
+{ .mmi
+ store [dest] = tempreg, 8
+(p_scr) load tempreg = [src], 8 //
+ add len = -8, len
+} { .mib
+ cmp.lt p_scr, p0 = 1, loopcnt // avoid load beyond end-point
+ add loopcnt = -1, loopcnt
+ br.cloop.dptk.few .l2
+;; }
+
+.copy_bytes:
+{ .mib
+ cmp.eq p_scr, p0 = len, r0 // is len == 0 ?
+ add loopcnt = -1, len // len--;
+(p_scr) br.cond.spnt .restore_and_exit
+;; }
+{ .mii
+ ld1 tmp2 = [src], 1
+ movi0 ar.lc = loopcnt
+ cmp.ne p_scr, p0 = 0, loopcnt // avoid load beyond end-point
+;; }
+
+.l3: // ------------------------------- // L3: Final byte move
+{ .mmi
+ st1 [dest] = tmp2, 1
+(p_scr) ld1 tmp2 = [src], 1
+} { .mib
+ cmp.lt p_scr, p0 = 1, loopcnt // avoid load beyond end-point
+ add loopcnt = -1, loopcnt
+ br.cloop.dptk.few .l3
+;; }
+
+.restore_and_exit:
+{ .mmi
+ movi0 pr = saved_pr, -1 // restore the predicate registers
+;; }
+{ .mib
+ movi0 ar.lc = saved_lc // restore the loop counter
br.ret.sptk.many b0
-.next:
- cmp.ge p6, p0 = OP_T_THRES, len // is len <= OP_T_THRES
- and loopcnt = 7, tmp2 // loopcnt = -dest % 8
-(p6) br.cond.spnt .cpyfew // copy byte by byte
- ;;
- cmp.eq p6, p0 = loopcnt, r0
-(p6) br.cond.sptk .dest_aligned
- sub len = len, loopcnt // len -= -dest % 8
- adds loopcnt = -1, loopcnt // --loopcnt
- ;;
- mov ar.lc = loopcnt
-.l1: // copy -dest % 8 bytes
- ld1 value = [src], 1 // value = *src++
- ;;
- st1 [dest] = value, 1 // *dest++ = value
- br.cloop.dptk .l1 ;;
-.dest_aligned:
+;; }
+
+
+.src_not_aligned:
+{ .mmi
+ cmp.gt p_scr, p0 = 16, len
and sh1 = 7, src // sh1 = src % 8
- and tmp2 = -8, len // tmp2 = len & -OPSIZ
- and asrc = -8, src // asrc = src & -OPSIZ -- align src
- shr.u loopcnt = len, 3 // loopcnt = len / 8
- and len = 7, len;; // len = len % 8
- adds loopcnt = -1, loopcnt // --loopcnt
- addl tmp4 = @ltoff(.table), gp
- addl tmp3 = @ltoff(.loop56), gp
- mov ar.ec = MEMLAT + 1 // set EC
- mov pr.rot = 1 << 16;; // set rotating predicates
- mov ar.lc = loopcnt // set LC
- cmp.eq p6, p0 = sh1, r0 // is the src aligned?
-(p6) br.cond.sptk .src_aligned
- add src = src, tmp2 // src += len & -OPSIZ
+ shr.u loopcnt = len, 4 // element-cnt = len / 16
+} { .mib
+ add tmp4 = @ltoff(.table), gp
+ add tmp3 = @ltoff(.loop56), gp
+(p_scr) br.cond.dpnt.many .copy_bytes // do byte by byte if too few
+;; }
+{ .mmi
+ and asrc = -8, src // asrc = (-8) -- align src for loop
+ add loopcnt = -1, loopcnt // loopcnt--
shl sh1 = sh1, 3 // sh1 = 8 * (src % 8)
+} { .mmi
+ ld8 ptable = [tmp4] // ptable = &table
ld8 ploop56 = [tmp3] // ploop56 = &loop56
- ld8 ptable = [tmp4];; // ptable = &table
- add tmp3 = ptable, sh1;; // tmp3 = &table + sh1
- mov ar.ec = MEMLAT + 1 + 1 // one more pass needed
- ld8 tmp4 = [tmp3];; // tmp4 = loop offset
+ and tmp2 = -16, len // tmp2 = len & -OPSIZ
+;; }
+{ .mmi
+ add tmp3 = ptable, sh1 // tmp3 = &table + sh1
+ add src = src, tmp2 // src += len & (-16)
+ movi0 ar.lc = loopcnt // set LC
+;; }
+{ .mmi
+ ld8 tmp4 = [tmp3] // tmp4 = loop offset
+ sub len = len, tmp2 // len -= len & (-16)
+ movi0 ar.ec = MEMLAT + 2 // one more pass needed
+;; }
+{ .mmi
+ ld8 s[1] = [asrc], 8 // preload
sub loopaddr = ploop56,tmp4 // loopadd = &loop56 - loop offset
- ld8 r[1] = [asrc], 8;; // w0
- mov b6 = loopaddr;;
+ movi0 pr.rot = 1 << 16 // set rotating predicates
+;; }
+{ .mib
+ nop.m 0
+ movi0 b6 = loopaddr
br b6 // jump to the appropriate loop
+;; }
LOOP(8)
LOOP(16)
@@ -169,26 +408,9 @@ ENTRY(memcpy)
LOOP(40)
LOOP(48)
LOOP(56)
-
-.src_aligned:
-.l3:
-(p[0]) ld8 r[0] = [src], 8
-(p[MEMLAT]) st8 [dest] = r[MEMLAT], 8
- br.ctop.dptk .l3 ;;
-.cpyfew:
- cmp.eq p6, p0 = len, r0 // is len == 0 ?
- adds len = -1, len // --len;
-(p6) br.cond.spnt .restore_and_exit ;;
- mov ar.lc = len
-.l4:
- ld1 value = [src], 1
- ;;
- st1 [dest] = value, 1
- br.cloop.dptk .l4 ;;
-.restore_and_exit:
- mov pr = saved_pr, -1 // restore the predicate registers
- mov ar.lc = saved_lc // restore the loop counter
- br.ret.sptk.many b0
+END(memcpy)
+
+ .rodata
.align 8
.table:
data8 0 // dummy entry
@@ -199,5 +421,3 @@ ENTRY(memcpy)
data8 .loop56 - .loop40
data8 .loop56 - .loop48
data8 .loop56 - .loop56
-
-END(memcpy)
diff --git a/sysdeps/ia64/memset.S b/sysdeps/ia64/memset.S
index 56db66fd0b..2257b16047 100644
--- a/sysdeps/ia64/memset.S
+++ b/sysdeps/ia64/memset.S
@@ -1,7 +1,8 @@
/* Optimized version of the standard memset() function.
This file is part of the GNU C Library.
- Copyright (C) 2000, 2001 Free Software Foundation, Inc.
- Contributed by Dan Pop <Dan.Pop@cern.ch>.
+ Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
+ Contributed by Dan Pop for Itanium <Dan.Pop@cern.ch>.
+ Rewritten for McKinley by Sverre Jarp, HP Labs/CERN <Sverre.Jarp@cern.ch>
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -19,80 +20,373 @@
02111-1307 USA. */
/* Return: dest
-
+
Inputs:
in0: dest
in1: value
in2: count
The algorithm is fairly straightforward: set byte by byte until we
- we get to a word aligned address, then set word by word as much as
- possible; the remaining few bytes are set one by one. */
+ we get to a 16B-aligned address, then loop on 128 B chunks using an
+ early store as prefetching, then loop on 32B chucks, then clear remaining
+ words, finally clear remaining bytes.
+ Since a stf.spill f0 can store 16B in one go, we use this instruction
+ to get peak speed when value = 0. */
#include <sysdep.h>
#undef ret
#define dest in0
-#define byteval in1
+#define value in1
#define cnt in2
-#define save_pfs loc0
-#define ptr1 loc1
-#define ptr2 loc2
-#define tmp loc3
-#define loopcnt loc4
-#define save_lc loc5
-#define wordval loc6
+#define tmp r31
+#define save_lc r30
+#define ptr0 r29
+#define ptr1 r28
+#define ptr2 r27
+#define ptr3 r26
+#define ptr9 r24
+#define loopcnt r23
+#define linecnt r22
+#define bytecnt r21
+
+#define fvalue f6
+
+// This routine uses only scratch predicate registers (p6 - p15)
+#define p_scr p6 // default register for same-cycle branches
+#define p_nz p7
+#define p_zr p8
+#define p_unalgn p9
+#define p_y p11
+#define p_n p12
+#define p_yy p13
+#define p_nn p14
+
+#define movi0 mov
+#define MIN1 15
+#define MIN1P1HALF 8
+#define LINE_SIZE 128
+#define LSIZE_SH 7 // shift amount
+#define PREF_AHEAD 8
+
+#define USE_FLP
+#if defined(USE_INT)
+#define store st8
+#define myval value
+#elif defined(USE_FLP)
+#define store stf8
+#define myval fvalue
+#endif
+
+.align 64
ENTRY(memset)
+{ .mmi
.prologue
- alloc save_pfs = ar.pfs, 3, 7, 0, 0
- .save ar.lc, save_lc
- mov save_lc = ar.lc
+ alloc tmp = ar.pfs, 3, 0, 0, 0
+ lfetch.nt1 [dest]
+ .save ar.lc, save_lc
+ movi0 save_lc = ar.lc
+} { .mmi
.body
- mov ret0 = dest
- and tmp = 7, dest
- cmp.eq p6, p0 = cnt, r0
-(p6) br.cond.spnt .restore_and_exit ;;
+ mov ret0 = dest // return value
+ cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero
+ cmp.eq p_scr, p0 = cnt, r0
+;; }
+{ .mmi
+ and ptr2 = -(MIN1+1), dest // aligned address
+ and tmp = MIN1, dest // prepare to check for alignment
+ tbit.nz p_y, p_n = dest, 0 // Do we have an odd address? (M_B_U)
+} { .mib
mov ptr1 = dest
- sub loopcnt = 8, tmp
- cmp.gt p6, p0 = 16, cnt
-(p6) br.cond.spnt .set_few;;
- cmp.eq p6, p0 = tmp, r0
-(p6) br.cond.sptk .dest_aligned
- sub cnt = cnt, loopcnt
- adds loopcnt = -1, loopcnt;;
- mov ar.lc = loopcnt;;
-.l1:
- st1 [ptr1] = byteval, 1
- br.cloop.dptk .l1 ;;
-.dest_aligned:
- adds ptr2 = 8, ptr1
- mux1 wordval = byteval, @brcst
- shr.u loopcnt = cnt, 4 ;; // loopcnt = cnt / 16
- cmp.eq p6, p0 = loopcnt, r0
-(p6) br.cond.spnt .one_more
- and cnt = 0xf, cnt // compute the remaining cnt
- adds loopcnt = -1, loopcnt;;
- mov ar.lc = loopcnt;;
-.l2:
- st8 [ptr1] = wordval, 16
- st8 [ptr2] = wordval, 16
- br.cloop.dptk .l2
- cmp.le p6, p0 = 8, cnt ;;
-.one_more:
-(p6) st8 [ptr1] = wordval, 8
-(p6) adds cnt = -8, cnt ;;
- cmp.eq p6, p0 = cnt, r0
-(p6) br.cond.spnt .restore_and_exit
-.set_few:
- adds loopcnt = -1, cnt;;
- mov ar.lc = loopcnt;;
-.l3:
- st1 [ptr1] = byteval, 1
- br.cloop.dptk .l3 ;;
+ mux1 value = value, @brcst // create 8 identical bytes in word
+(p_scr) br.ret.dpnt.many rp // return immediately if count = 0
+;; }
+{ .mib
+ cmp.ne p_unalgn, p0 = tmp, r0
+} { .mib // NB: # of bytes to move is 1 higher
+ sub bytecnt = (MIN1+1), tmp // than loopcnt
+ cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task?
+(p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U)
+;; }
+{ .mmi
+(p_unalgn) add ptr1 = (MIN1+1), ptr2 // after alignment
+(p_unalgn) add ptr2 = MIN1P1HALF, ptr2 // after alignment
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 // should we do a st8 ?
+;; }
+{ .mib
+(p_y) add cnt = -8, cnt
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 // should we do a st4 ?
+} { .mib
+(p_y) st8 [ptr2] = value, -4
+(p_n) add ptr2 = 4, ptr2
+;; }
+{ .mib
+(p_yy) add cnt = -4, cnt
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 // should we do a st2 ?
+} { .mib
+(p_yy) st4 [ptr2] = value, -2
+(p_nn) add ptr2 = 2, ptr2
+;; }
+{ .mmi
+ mov tmp = LINE_SIZE+1 // for compare
+(p_y) add cnt = -2, cnt
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 // should we do a st1 ?
+} { .mmi
+ setf.sig fvalue=value // transfer value to FLP side
+(p_y) st2 [ptr2] = value, -1
+(p_n) add ptr2 = 1, ptr2
+;; }
+
+{ .mmi
+(p_yy) st1 [ptr2] = value
+ cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task?
+} { .mbb
+(p_yy) add cnt = -1, cnt
+(p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few
+;; }
+
+{ .mib
+ nop.m 0
+ shr.u linecnt = cnt, LSIZE_SH
+(p_zr) br.cond.dptk.many .l1b // Jump to use stf.spill
+;; }
+
+ .align 32 // -------- // L1A: store ahead into cache lines; fill later
+{ .mmi
+ and tmp = -(LINE_SIZE), cnt // compute end of range
+ mov ptr9 = ptr1 // used for prefetching
+ and cnt = (LINE_SIZE-1), cnt // remainder
+} { .mmi
+ mov loopcnt = PREF_AHEAD-1 // default prefetch loop
+ cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
+;; }
+{ .mmi
+(p_scr) add loopcnt = -1, linecnt // start of stores
+ add ptr2 = 8, ptr1 // (beyond prefetch stores)
+ add ptr1 = tmp, ptr1 // first address beyond total
+;; } // range
+{ .mmi
+ add tmp = -1, linecnt // next loop count
+ movi0 ar.lc = loopcnt
+;; }
+.pref_l1a:
+{ .mib
+ store [ptr9] = myval, 128 // Do stores one cache line apart
+ nop.i 0
+ br.cloop.dptk.few .pref_l1a
+;; }
+{ .mmi
+ add ptr0 = 16, ptr2 // Two stores in parallel
+ movi0 ar.lc = tmp
+;; }
+.l1ax:
+ { .mmi
+ store [ptr2] = myval, 8
+ store [ptr0] = myval, 8
+ ;; }
+ { .mmi
+ store [ptr2] = myval, 24
+ store [ptr0] = myval, 24
+ ;; }
+ { .mmi
+ store [ptr2] = myval, 8
+ store [ptr0] = myval, 8
+ ;; }
+ { .mmi
+ store [ptr2] = myval, 24
+ store [ptr0] = myval, 24
+ ;; }
+ { .mmi
+ store [ptr2] = myval, 8
+ store [ptr0] = myval, 8
+ ;; }
+ { .mmi
+ store [ptr2] = myval, 24
+ store [ptr0] = myval, 24
+ ;; }
+ { .mmi
+ store [ptr2] = myval, 8
+ store [ptr0] = myval, 32
+ cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
+ ;; }
+{ .mmb
+ store [ptr2] = myval, 24
+(p_scr) store [ptr9] = myval, 128
+ br.cloop.dptk.few .l1ax
+;; }
+{ .mbb
+ cmp.le p_scr, p0 = 8, cnt // just a few bytes left ?
+(p_scr) br.cond.dpnt.many .fraction_of_line // Branch no. 2
+ br.cond.dpnt.many .move_bytes_from_alignment // Branch no. 3
+;; }
+
+ .align 32
+.l1b: // ------------------ // L1B: store ahead into cache lines; fill later
+{ .mmi
+ and tmp = -(LINE_SIZE), cnt // compute end of range
+ mov ptr9 = ptr1 // used for prefetching
+ and cnt = (LINE_SIZE-1), cnt // remainder
+} { .mmi
+ mov loopcnt = PREF_AHEAD-1 // default prefetch loop
+ cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
+;; }
+{ .mmi
+(p_scr) add loopcnt = -1, linecnt
+ add ptr2 = 16, ptr1 // start of stores (beyond prefetch stores)
+ add ptr1 = tmp, ptr1 // first address beyond total range
+;; }
+{ .mmi
+ add tmp = -1, linecnt // next loop count
+ movi0 ar.lc = loopcnt
+;; }
+.pref_l1b:
+{ .mib
+ stf.spill [ptr9] = f0, 128 // Do stores one cache line apart
+ nop.i 0
+ br.cloop.dptk.few .pref_l1b
+;; }
+{ .mmi
+ add ptr0 = 16, ptr2 // Two stores in parallel
+ movi0 ar.lc = tmp
+;; }
+.l1bx:
+ { .mmi
+ stf.spill [ptr2] = f0, 32
+ stf.spill [ptr0] = f0, 32
+ ;; }
+ { .mmi
+ stf.spill [ptr2] = f0, 32
+ stf.spill [ptr0] = f0, 32
+ ;; }
+ { .mmi
+ stf.spill [ptr2] = f0, 32
+ stf.spill [ptr0] = f0, 64
+ cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
+ ;; }
+{ .mmb
+ stf.spill [ptr2] = f0, 32
+(p_scr) stf.spill [ptr9] = f0, 128
+ br.cloop.dptk.few .l1bx
+;; }
+{ .mib
+ cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
+(p_scr) br.cond.dpnt.many .move_bytes_from_alignment
+;; }
+
+.fraction_of_line:
+{ .mib
+ add ptr2 = 16, ptr1
+ shr.u loopcnt = cnt, 5 // loopcnt = cnt / 32
+;; }
+{ .mib
+ cmp.eq p_scr, p0 = loopcnt, r0
+ add loopcnt = -1, loopcnt
+(p_scr) br.cond.dpnt.many .store_words
+;; }
+{ .mib
+ and cnt = 0x1f, cnt // compute the remaining cnt
+ movi0 ar.lc = loopcnt
+;; }
+ .align 32
+.l2: // ---------------------------- // L2A: store 32B in 2 cycles
+{ .mmb
+ store [ptr1] = myval, 8
+ store [ptr2] = myval, 8
+;; } { .mmb
+ store [ptr1] = myval, 24
+ store [ptr2] = myval, 24
+ br.cloop.dptk.many .l2
+;; }
+.store_words:
+{ .mib
+ cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
+(p_scr) br.cond.dpnt.many .move_bytes_from_alignment // Branch
+;; }
+
+{ .mmi
+ store [ptr1] = myval, 8 // store
+ cmp.le p_y, p_n = 16, cnt //
+ add cnt = -8, cnt // subtract
+;; }
+{ .mmi
+(p_y) store [ptr1] = myval, 8 // store
+(p_y) cmp.le.unc p_yy, p_nn = 16, cnt //
+(p_y) add cnt = -8, cnt // subtract
+;; }
+{ .mmi // store
+(p_yy) store [ptr1] = myval, 8 //
+(p_yy) add cnt = -8, cnt // subtract
+;; }
+
+.move_bytes_from_alignment:
+{ .mib
+ cmp.eq p_scr, p0 = cnt, r0
+ tbit.nz.unc p_y, p0 = cnt, 2 // should we terminate with a st4 ?
+(p_scr) br.cond.dpnt.few .restore_and_exit
+;; }
+{ .mib
+(p_y) st4 [ptr1] = value, 4
+ tbit.nz.unc p_yy, p0 = cnt, 1 // should we terminate with a st2 ?
+;; }
+{ .mib
+(p_yy) st2 [ptr1] = value, 2
+ tbit.nz.unc p_y, p0 = cnt, 0
+;; }
+
+{ .mib
+(p_y) st1 [ptr1] = value
+;; }
.restore_and_exit:
- mov ar.lc = save_lc
- mov ar.pfs = save_pfs
- br.ret.sptk.many b0
+{ .mib
+ nop.m 0
+ movi0 ar.lc = save_lc
+ br.ret.sptk.many rp
+;; }
+
+.move_bytes_unaligned:
+{ .mmi
+ .pred.rel "mutex",p_y, p_n
+ .pred.rel "mutex",p_yy, p_nn
+(p_n) cmp.le p_yy, p_nn = 4, cnt
+(p_y) cmp.le p_yy, p_nn = 5, cnt
+(p_n) add ptr2 = 2, ptr1
+} { .mmi
+(p_y) add ptr2 = 3, ptr1
+(p_y) st1 [ptr1] = value, 1 // fill 1 (odd-aligned) byte
+(p_y) add cnt = -1, cnt // [15, 14 (or less) left]
+;; }
+{ .mmi
+(p_yy) cmp.le.unc p_y, p0 = 8, cnt
+ add ptr3 = ptr1, cnt // prepare last store
+ movi0 ar.lc = save_lc
+} { .mmi
+(p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes
+(p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes
+(p_yy) add cnt = -4, cnt // [11, 10 (o less) left]
+;; }
+{ .mmi
+(p_y) cmp.le.unc p_yy, p0 = 8, cnt
+ add ptr3 = -1, ptr3 // last store
+ tbit.nz p_scr, p0 = cnt, 1 // will there be a st2 at the end ?
+} { .mmi
+(p_y) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes
+(p_y) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes
+(p_y) add cnt = -4, cnt // [7, 6 (or less) left]
+;; }
+{ .mmi
+(p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes
+(p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes
+ // [3, 2 (or less) left]
+ tbit.nz p_y, p0 = cnt, 0 // will there be a st1 at the end ?
+} { .mmi
+(p_yy) add cnt = -4, cnt
+;; }
+{ .mmb
+(p_scr) st2 [ptr1] = value // fill 2 (aligned) bytes
+(p_y) st1 [ptr3] = value // fill last byte (using ptr3)
+ br.ret.sptk.many rp
+;; }
END(memset)
+
diff --git a/sysdeps/mach/configure b/sysdeps/mach/configure
index 6f1013236a..d255328188 100644
--- a/sysdeps/mach/configure
+++ b/sysdeps/mach/configure
@@ -338,3 +338,33 @@ if test $libc_cv_mach_i386_ioports = yes; then
EOF
fi
+
+echo $ac_n "checking for i386_set_gdt in mach_i386.defs""... $ac_c" 1>&6
+echo "configure:344: checking for i386_set_gdt in mach_i386.defs" >&5
+if eval "test \"`echo '$''{'libc_cv_mach_i386_gdt'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 349 "configure"
+#include "confdefs.h"
+#include <mach/i386/mach_i386.defs>
+EOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ egrep "i386_set_gdt" >/dev/null 2>&1; then
+ rm -rf conftest*
+ libc_cv_mach_i386_gdt=yes
+else
+ rm -rf conftest*
+ libc_cv_mach_i386_gdt=no
+fi
+rm -f conftest*
+
+fi
+
+echo "$ac_t""$libc_cv_mach_i386_gdt" 1>&6
+if test $libc_cv_mach_i386_gdt = yes; then
+ cat >> confdefs.h <<\EOF
+#define HAVE_I386_SET_GDT 1
+EOF
+
+fi
diff --git a/sysdeps/mach/configure.in b/sysdeps/mach/configure.in
index 6efd39f58d..92b7777304 100644
--- a/sysdeps/mach/configure.in
+++ b/sysdeps/mach/configure.in
@@ -89,3 +89,12 @@ AC_EGREP_HEADER(i386_io_perm_modify, mach/i386/mach_i386.defs,
if test $libc_cv_mach_i386_ioports = yes; then
AC_DEFINE([HAVE_I386_IO_PERM_MODIFY])
fi
+
+AC_CACHE_CHECK(for i386_set_gdt in mach_i386.defs,
+ libc_cv_mach_i386_gdt, [dnl
+AC_EGREP_HEADER(i386_set_gdt, mach/i386/mach_i386.defs,
+ libc_cv_mach_i386_gdt=yes,
+ libc_cv_mach_i386_gdt=no)])
+if test $libc_cv_mach_i386_gdt = yes; then
+ AC_DEFINE([HAVE_I386_SET_GDT])
+fi
diff --git a/sysdeps/mach/hurd/i386/init-first.c b/sysdeps/mach/hurd/i386/init-first.c
index 47edae5086..b3804cba92 100644
--- a/sysdeps/mach/hurd/i386/init-first.c
+++ b/sysdeps/mach/hurd/i386/init-first.c
@@ -336,7 +336,7 @@ _hurd_stack_setup (volatile int argc, ...)
*--data = (&argc)[-1];
asm volatile ("movl %0, %%esp\n" /* Switch to new outermost stack. */
"movl $0, %%ebp\n" /* Clear outermost frame pointer. */
- "jmp *%1" : : "r" (data), "r" (&doinit1));
+ "jmp *%1" : : "r" (data), "r" (&doinit1) : "sp", "bp");
/* NOTREACHED */
}