diff options
Diffstat (limited to 'sysdeps')
-rw-r--r-- | sysdeps/generic/rawmemchr.c | 182 | ||||
-rw-r--r-- | sysdeps/i386/i486/bits/string.h | 30 | ||||
-rw-r--r-- | sysdeps/i386/rawmemchr.S | 218 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/bits/posix_opt.h | 7 |
4 files changed, 434 insertions, 3 deletions
diff --git a/sysdeps/generic/rawmemchr.c b/sysdeps/generic/rawmemchr.c new file mode 100644 index 0000000000..c205968668 --- /dev/null +++ b/sysdeps/generic/rawmemchr.c @@ -0,0 +1,182 @@ +/* Copyright (C) 1991, 1993, 1996, 1997, 1999 Free Software Foundation, Inc. + Based on strlen implementation by Torbjorn Granlund (tege@sics.se), + with help from Dan Sahlin (dan@sics.se) and + commentary by Jim Blandy (jimb@ai.mit.edu); + adaptation to memchr suggested by Dick Karpinski (dick@cca.ucsf.edu), + and implemented by Roland McGrath (roland@ai.mit.edu). + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with the GNU C Library; see the file COPYING.LIB. If not, + write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +#ifdef HAVE_CONFIG_H +#include <config.h> +#endif + +#undef __ptr_t +#if defined (__cplusplus) || (defined (__STDC__) && __STDC__) +# define __ptr_t void * +#else /* Not C++ or ANSI C. */ +# define __ptr_t char * +#endif /* C++ or ANSI C. */ + +#if defined (_LIBC) +# include <string.h> +#endif + +#if defined (HAVE_LIMITS_H) || defined (_LIBC) +# include <limits.h> +#endif + +#define LONG_MAX_32_BITS 2147483647 + +#ifndef LONG_MAX +#define LONG_MAX LONG_MAX_32_BITS +#endif + +#include <sys/types.h> + +#undef memchr + + +/* Search no more than N bytes of S for C. */ +__ptr_t +__rawmemchr (s, c) + const __ptr_t s; + int c; +{ + const unsigned char *char_ptr; + const unsigned long int *longword_ptr; + unsigned long int longword, magic_bits, charmask; + + c = (unsigned char) c; + + /* Handle the first few characters by reading one character at a time. + Do this until CHAR_PTR is aligned on a longword boundary. */ + for (char_ptr = (const unsigned char *) s; + ((unsigned long int) char_ptr & (sizeof (longword) - 1)) != 0; + ++char_ptr) + if (*char_ptr == c) + return (__ptr_t) char_ptr; + + /* All these elucidatory comments refer to 4-byte longwords, + but the theory applies equally well to 8-byte longwords. */ + + longword_ptr = (unsigned long int *) char_ptr; + + /* Bits 31, 24, 16, and 8 of this number are zero. Call these bits + the "holes." Note that there is a hole just to the left of + each byte, with an extra at the end: + + bits: 01111110 11111110 11111110 11111111 + bytes: AAAAAAAA BBBBBBBB CCCCCCCC DDDDDDDD + + The 1-bits make sure that carries propagate to the next 0-bit. + The 0-bits provide holes for carries to fall into. */ + + if (sizeof (longword) != 4 && sizeof (longword) != 8) + abort (); + +#if LONG_MAX <= LONG_MAX_32_BITS + magic_bits = 0x7efefeff; +#else + magic_bits = ((unsigned long int) 0x7efefefe << 32) | 0xfefefeff; +#endif + + /* Set up a longword, each of whose bytes is C. */ + charmask = c | (c << 8); + charmask |= charmask << 16; +#if LONG_MAX > LONG_MAX_32_BITS + charmask |= charmask << 32; +#endif + + /* Instead of the traditional loop which tests each character, + we will test a longword at a time. The tricky part is testing + if *any of the four* bytes in the longword in question are zero. */ + while (1) + { + /* We tentatively exit the loop if adding MAGIC_BITS to + LONGWORD fails to change any of the hole bits of LONGWORD. + + 1) Is this safe? Will it catch all the zero bytes? + Suppose there is a byte with all zeros. Any carry bits + propagating from its left will fall into the hole at its + least significant bit and stop. Since there will be no + carry from its most significant bit, the LSB of the + byte to the left will be unchanged, and the zero will be + detected. + + 2) Is this worthwhile? Will it ignore everything except + zero bytes? Suppose every byte of LONGWORD has a bit set + somewhere. There will be a carry into bit 8. If bit 8 + is set, this will carry into bit 16. If bit 8 is clear, + one of bits 9-15 must be set, so there will be a carry + into bit 16. Similarly, there will be a carry into bit + 24. If one of bits 24-30 is set, there will be a carry + into bit 31, so all of the hole bits will be changed. + + The one misfire occurs when bits 24-30 are clear and bit + 31 is set; in this case, the hole at bit 31 is not + changed. If we had access to the processor carry flag, + we could close this loophole by putting the fourth hole + at bit 32! + + So it ignores everything except 128's, when they're aligned + properly. + + 3) But wait! Aren't we looking for C, not zero? + Good point. So what we do is XOR LONGWORD with a longword, + each of whose bytes is C. This turns each byte that is C + into a zero. */ + + longword = *longword_ptr++ ^ charmask; + + /* Add MAGIC_BITS to LONGWORD. */ + if ((((longword + magic_bits) + + /* Set those bits that were unchanged by the addition. */ + ^ ~longword) + + /* Look at only the hole bits. If any of the hole bits + are unchanged, most likely one of the bytes was a + zero. */ + & ~magic_bits) != 0) + { + /* Which of the bytes was C? If none of them were, it was + a misfire; continue the search. */ + + const unsigned char *cp = (const unsigned char *) (longword_ptr - 1); + + if (cp[0] == c) + return (__ptr_t) cp; + if (cp[1] == c) + return (__ptr_t) &cp[1]; + if (cp[2] == c) + return (__ptr_t) &cp[2]; + if (cp[3] == c) + return (__ptr_t) &cp[3]; +#if LONG_MAX > 2147483647 + if (cp[4] == c) + return (__ptr_t) &cp[4]; + if (cp[5] == c) + return (__ptr_t) &cp[5]; + if (cp[6] == c) + return (__ptr_t) &cp[6]; + if (cp[7] == c) + return (__ptr_t) &cp[7]; +#endif + } + } +} +weak_alias (__rawmemchr, rawmemchr) diff --git a/sysdeps/i386/i486/bits/string.h b/sysdeps/i386/i486/bits/string.h index f810de06ea..e20f037ab7 100644 --- a/sysdeps/i386/i486/bits/string.h +++ b/sysdeps/i386/i486/bits/string.h @@ -1,5 +1,5 @@ /* Optimized, inlined string functions. i486 version. - Copyright (C) 1997, 1998 Free Software Foundation, Inc. + Copyright (C) 1997, 1998, 1999 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -377,6 +377,30 @@ memchr (__const void *__s, int __c, size_t __n) } +/* Return pointer to C in S. */ +#define _HAVE_STRING_ARCH_rawmemchr 1 +__STRING_INLINE void * +__rawmemchr (const void *__s, int __c) +{ + register unsigned long int __d0; + register unsigned char *__res; + __asm__ __volatile__ + ("cld\n\t" + "repne; scasb\n\t" + : "=D" (__res), "=&c" (__d0) + : "a" (__c), "0" (__s), "1" (0xffffffff) + : "cc"); + return __res - 1; +} +#ifdef __USE_GNU +__STRING_INLINE void * +rawmemchr (const void *__s, int __c) +{ + return __rawmemchr (__s, __c); +} +#endif /* use GNU */ + + /* Return the length of S. */ #define _HAVE_STRING_ARCH_strlen 1 #define strlen(str) \ @@ -1049,7 +1073,9 @@ __strncmp_g (__const char *__s1, __const char *__s2, size_t __n) #define _HAVE_STRING_ARCH_strchr 1 #define strchr(s, c) \ (__extension__ (__builtin_constant_p (c) \ - ? __strchr_c (s, ((c) & 0xff) << 8) \ + ? ((c) == '\0' \ + ? (char *) __rawmemchr (s, c) \ + : __strchr_c (s, ((c) & 0xff) << 8)) \ : __strchr_g (s, c))) __STRING_INLINE char *__strchr_c (__const char *__s, int __c); diff --git a/sysdeps/i386/rawmemchr.S b/sysdeps/i386/rawmemchr.S new file mode 100644 index 0000000000..83626e0473 --- /dev/null +++ b/sysdeps/i386/rawmemchr.S @@ -0,0 +1,218 @@ +/* rawmemchr (str, ch) -- Return pointer to first occurrence of CH in STR. + For Intel 80x86, x>=3. + Copyright (C) 1994, 95, 96, 97, 98, 99 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper <drepper@gnu.ai.mit.edu> + Optimised a little by Alan Modra <Alan@SPRI.Levels.UniSA.Edu.Au> + + This version is developed using the same algorithm as the fast C + version which carries the following introduction: + + Based on strlen implementation by Torbjorn Granlund (tege@sics.se), + with help from Dan Sahlin (dan@sics.se) and + commentary by Jim Blandy (jimb@ai.mit.edu); + adaptation to memchr suggested by Dick Karpinski (dick@cca.ucsf.edu), + and implemented by Roland McGrath (roland@ai.mit.edu). + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with the GNU C Library; see the file COPYING.LIB. If not, + write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +#include <sysdep.h> +#include "asm-syntax.h" + +/* + INPUT PARAMETERS: + str (sp + 4) + c (sp + 8) +*/ + + .text +ENTRY (__rawmemchr) + /* Save callee-safe register used in this function. */ + pushl %edi + + /* Load parameters into registers. */ + movl 8(%esp), %eax /* str: pointer to memory block. */ + movl 12(%esp), %edx /* c: byte we are looking for. */ + + /* At the moment %edx contains C. What we need for the + algorithm is C in all bytes of the dword. Avoid + operations on 16 bit words because these require an + prefix byte (and one more cycle). */ + movb %dl, %dh /* Now it is 0|0|c|c */ + movl %edx, %ecx + shll $16, %edx /* Now c|c|0|0 */ + movw %cx, %dx /* And finally c|c|c|c */ + + /* Better performance can be achieved if the word (32 + bit) memory access is aligned on a four-byte-boundary. + So process first bytes one by one until boundary is + reached. Don't use a loop for better performance. */ + + testb $3, %al /* correctly aligned ? */ + je L(1) /* yes => begin loop */ + cmpb %dl, (%eax) /* compare byte */ + je L(9) /* target found => return */ + incl %eax /* increment source pointer */ + + testb $3, %al /* correctly aligned ? */ + je L(1) /* yes => begin loop */ + cmpb %dl, (%eax) /* compare byte */ + je L(9) /* target found => return */ + incl %eax /* increment source pointer */ + + testb $3, %al /* correctly aligned ? */ + je L(1) /* yes => begin loop */ + cmpb %dl, (%eax) /* compare byte */ + je L(9) /* target found => return */ + incl %eax /* increment source pointer */ + + /* We exit the loop if adding MAGIC_BITS to LONGWORD fails to + change any of the hole bits of LONGWORD. + + 1) Is this safe? Will it catch all the zero bytes? + Suppose there is a byte with all zeros. Any carry bits + propagating from its left will fall into the hole at its + least significant bit and stop. Since there will be no + carry from its most significant bit, the LSB of the + byte to the left will be unchanged, and the zero will be + detected. + + 2) Is this worthwhile? Will it ignore everything except + zero bytes? Suppose every byte of LONGWORD has a bit set + somewhere. There will be a carry into bit 8. If bit 8 + is set, this will carry into bit 16. If bit 8 is clear, + one of bits 9-15 must be set, so there will be a carry + into bit 16. Similarly, there will be a carry into bit + 24. If one of bits 24-31 is set, there will be a carry + into bit 32 (=carry flag), so all of the hole bits will + be changed. + + 3) But wait! Aren't we looking for C, not zero? + Good point. So what we do is XOR LONGWORD with a longword, + each of whose bytes is C. This turns each byte that is C + into a zero. */ + + + /* Each round the main loop processes 16 bytes. */ + ALIGN (4) + +L(1): movl (%eax), %ecx /* get word (= 4 bytes) in question */ + movl $0xfefefeff, %edi /* magic value */ + xorl %edx, %ecx /* XOR with word c|c|c|c => bytes of str == c + are now 0 */ + addl %ecx, %edi /* add the magic value to the word. We get + carry bits reported for each byte which + is *not* 0 */ + + /* According to the algorithm we had to reverse the effect of the + XOR first and then test the overflow bits. But because the + following XOR would destroy the carry flag and it would (in a + representation with more than 32 bits) not alter then last + overflow, we can now test this condition. If no carry is signaled + no overflow must have occurred in the last byte => it was 0. */ + jnc L(8) + + /* We are only interested in carry bits that change due to the + previous add, so remove original bits */ + xorl %ecx, %edi /* ((word^charmask)+magic)^(word^charmask) */ + + /* Now test for the other three overflow bits. */ + orl $0xfefefeff, %edi /* set all non-carry bits */ + incl %edi /* add 1: if one carry bit was *not* set + the addition will not result in 0. */ + + /* If at least one byte of the word is C we don't get 0 in %edi. */ + jnz L(8) /* found it => return pointer */ + + /* This process is unfolded four times for better performance. + we don't increment the source pointer each time. Instead we + use offsets and increment by 16 in each run of the loop. But + before probing for the matching byte we need some extra code + (following LL(13) below). Even the len can be compared with + constants instead of decrementing each time. */ + + movl 4(%eax), %ecx /* get word (= 4 bytes) in question */ + movl $0xfefefeff, %edi /* magic value */ + xorl %edx, %ecx /* XOR with word c|c|c|c => bytes of str == c + are now 0 */ + addl %ecx, %edi /* add the magic value to the word. We get + carry bits reported for each byte which + is *not* 0 */ + jnc L(7) /* highest byte is C => return pointer */ + xorl %ecx, %edi /* ((word^charmask)+magic)^(word^charmask) */ + orl $0xfefefeff, %edi /* set all non-carry bits */ + incl %edi /* add 1: if one carry bit was *not* set + the addition will not result in 0. */ + jnz L(7) /* found it => return pointer */ + + movl 8(%eax), %ecx /* get word (= 4 bytes) in question */ + movl $0xfefefeff, %edi /* magic value */ + xorl %edx, %ecx /* XOR with word c|c|c|c => bytes of str == c + are now 0 */ + addl %ecx, %edi /* add the magic value to the word. We get + carry bits reported for each byte which + is *not* 0 */ + jnc L(6) /* highest byte is C => return pointer */ + xorl %ecx, %edi /* ((word^charmask)+magic)^(word^charmask) */ + orl $0xfefefeff, %edi /* set all non-carry bits */ + incl %edi /* add 1: if one carry bit was *not* set + the addition will not result in 0. */ + jnz L(6) /* found it => return pointer */ + + movl 12(%eax), %ecx /* get word (= 4 bytes) in question */ + movl $0xfefefeff, %edi /* magic value */ + xorl %edx, %ecx /* XOR with word c|c|c|c => bytes of str == c + are now 0 */ + addl %ecx, %edi /* add the magic value to the word. We get + carry bits reported for each byte which + is *not* 0 */ + jnc L(5) /* highest byte is C => return pointer */ + xorl %ecx, %edi /* ((word^charmask)+magic)^(word^charmask) */ + orl $0xfefefeff, %edi /* set all non-carry bits */ + incl %edi /* add 1: if one carry bit was *not* set + the addition will not result in 0. */ + jnz L(5) /* found it => return pointer */ + + /* Adjust both counters for a full round, i.e. 16 bytes. */ + addl $16, %eax + jmp L(1) + /* add missing source pointer increments */ +L(5): addl $4, %eax +L(6): addl $4, %eax +L(7): addl $4, %eax + + /* Test for the matching byte in the word. %ecx contains a NUL + char in the byte which originally was the byte we are looking + at. */ +L(8): testb %cl, %cl /* test first byte in dword */ + jz L(9) /* if zero => return pointer */ + incl %eax /* increment source pointer */ + + testb %ch, %ch /* test second byte in dword */ + jz L(9) /* if zero => return pointer */ + incl %eax /* increment source pointer */ + + testl $0xff0000, %ecx /* test third byte in dword */ + jz L(9) /* if zero => return pointer */ + incl %eax /* increment source pointer */ + + /* No further test needed we we know it is one of the four bytes. */ + +L(9): popl %edi /* pop saved register */ + + ret +END (__rawmemchr) +weak_alias (__rawmemchr, rawmemchr) diff --git a/sysdeps/unix/sysv/linux/bits/posix_opt.h b/sysdeps/unix/sysv/linux/bits/posix_opt.h index 5ef6d0563d..2a53420578 100644 --- a/sysdeps/unix/sysv/linux/bits/posix_opt.h +++ b/sysdeps/unix/sysv/linux/bits/posix_opt.h @@ -1,5 +1,5 @@ /* Define POSIX options for Linux. - Copyright (C) 1996, 1997 Free Software Foundation, Inc. + Copyright (C) 1996, 1997, 1999 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -76,4 +76,9 @@ /* Real-time signals are supported. */ #define _POSIX_REALTIME_SIGNALS 1 +/* The LFS interface is available, except for the asynchronous I/O. */ +#define _LFS_LARGEFILE 1 +#define _LFS64_LARGEFILE 1 +#define _LFS64_STDIO 1 + #endif /* bits/posix_opt.h */ |