diff options
author | Andrew Senkevich <andrew.n.senkevich@gmail.com> | 2018-03-23 16:19:45 +0100 |
---|---|---|
committer | Aurelien Jarno <aurelien@aurel32.net> | 2018-12-20 08:14:01 +0100 |
commit | 3f949b03473b4ca8b8e69a4e540511dfee39e493 (patch) | |
tree | 21f4fbcbb118638fbc37917154312e97c0073b04 | |
parent | 682f24d0f3995689f407dee842002099d3604586 (diff) | |
download | glibc-3f949b03473b4ca8b8e69a4e540511dfee39e493.tar glibc-3f949b03473b4ca8b8e69a4e540511dfee39e493.tar.gz glibc-3f949b03473b4ca8b8e69a4e540511dfee39e493.tar.bz2 glibc-3f949b03473b4ca8b8e69a4e540511dfee39e493.zip |
Fix i386 memmove issue (bug 22644).
[BZ #22644]
* sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S: Fixed
branch conditions.
* string/test-memmove.c (do_test2): New testcase.
(cherry picked from commit cd66c0e584c6d692bc8347b5e72723d02b8a8ada)
-rw-r--r-- | ChangeLog | 8 | ||||
-rw-r--r-- | NEWS | 2 | ||||
-rw-r--r-- | string/test-memmove.c | 57 | ||||
-rw-r--r-- | sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S | 12 |
4 files changed, 73 insertions, 6 deletions
@@ -1,3 +1,11 @@ +2018-03-23 Andrew Senkevich <andrew.senkevich@intel.com> + Max Horn <max@quendi.de> + + [BZ #22644] + * sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S: Fixed + branch conditions. + * string/test-memmove.c (do_test2): New testcase. + 2018-09-06 Stefan Liebler <stli@linux.ibm.com> * sysdeps/unix/sysv/linux/spawni.c (maybe_script_execute): @@ -60,6 +60,8 @@ The following bugs are resolved with this release: [21609] x86-64: Align the stack in __tls_get_addr [21624] Unsafe alloca allows local attackers to alias stack and heap (CVE-2017-1000366) [21654] nss: Fix invalid cast in group merging + [22644] string: memmove-sse2-unaligned on 32bit x86 produces garbage when + crossing 2GB threshold (CVE-2017-18269) [22715] x86-64: Properly align La_x86_64_retval to VEC_SIZE Version 2.24 diff --git a/string/test-memmove.c b/string/test-memmove.c index 43433297e5..f44c05d669 100644 --- a/string/test-memmove.c +++ b/string/test-memmove.c @@ -245,6 +245,60 @@ do_random_tests (void) } } +static void +do_test2 (void) +{ + size_t size = 0x20000000; + uint32_t * large_buf; + + large_buf = mmap ((void*) 0x70000000, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANON, -1, 0); + + if (large_buf == MAP_FAILED) + error (77, errno, "Large mmap failed"); + + if ((uintptr_t) large_buf > 0x80000000 - 128 + || 0x80000000 - (uintptr_t) large_buf > 0x20000000) + { + error (0, 0, "Large mmap allocated improperly"); + ret = 77; + munmap ((void *) large_buf, size); + return; + } + + size_t bytes_move = 0x80000000 - (uintptr_t) large_buf; + size_t arr_size = bytes_move / sizeof (uint32_t); + size_t i; + + FOR_EACH_IMPL (impl, 0) + { + for (i = 0; i < arr_size; i++) + large_buf[i] = (uint32_t) i; + + uint32_t * dst = &large_buf[33]; + +#ifdef TEST_BCOPY + CALL (impl, (char *) large_buf, (char *) dst, bytes_move); +#else + CALL (impl, (char *) dst, (char *) large_buf, bytes_move); +#endif + + for (i = 0; i < arr_size; i++) + { + if (dst[i] != (uint32_t) i) + { + error (0, 0, + "Wrong result in function %s dst \"%p\" src \"%p\" offset \"%zd\"", + impl->name, dst, large_buf, i); + ret = 1; + break; + } + } + } + + munmap ((void *) large_buf, size); +} + int test_main (void) { @@ -284,6 +338,9 @@ test_main (void) } do_random_tests (); + + do_test2 (); + return ret; } diff --git a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S index 76f34291a3..bb26708d67 100644 --- a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S +++ b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S @@ -72,7 +72,7 @@ ENTRY (MEMCPY) cmp %edx, %eax # ifdef USE_AS_MEMMOVE - jg L(check_forward) + ja L(check_forward) L(mm_len_0_or_more_backward): /* Now do checks for lengths. We do [0..16], [16..32], [32..64], [64..128] @@ -81,7 +81,7 @@ L(mm_len_0_or_more_backward): jbe L(mm_len_0_16_bytes_backward) cmpl $32, %ecx - jg L(mm_len_32_or_more_backward) + ja L(mm_len_32_or_more_backward) /* Copy [0..32] and return. */ movdqu (%eax), %xmm0 @@ -92,7 +92,7 @@ L(mm_len_0_or_more_backward): L(mm_len_32_or_more_backward): cmpl $64, %ecx - jg L(mm_len_64_or_more_backward) + ja L(mm_len_64_or_more_backward) /* Copy [0..64] and return. */ movdqu (%eax), %xmm0 @@ -107,7 +107,7 @@ L(mm_len_32_or_more_backward): L(mm_len_64_or_more_backward): cmpl $128, %ecx - jg L(mm_len_128_or_more_backward) + ja L(mm_len_128_or_more_backward) /* Copy [0..128] and return. */ movdqu (%eax), %xmm0 @@ -132,7 +132,7 @@ L(mm_len_128_or_more_backward): add %ecx, %eax cmp %edx, %eax movl SRC(%esp), %eax - jle L(forward) + jbe L(forward) PUSH (%esi) PUSH (%edi) PUSH (%ebx) @@ -269,7 +269,7 @@ L(check_forward): add %edx, %ecx cmp %eax, %ecx movl LEN(%esp), %ecx - jle L(forward) + jbe L(forward) /* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128] separately. */ |