From 09e0389eb12491d3e9ef74b299b66efdd67adb1c Mon Sep 17 00:00:00 2001 From: "H.J. Lu" Date: Wed, 29 Jul 2009 08:40:54 -0700 Subject: Properly restore AVX registers on x86-64. tst-audit4 and tst-audit5 fail under AVX emulator due to je instead of jne. This patch fixes them. --- ChangeLog | 4 ++++ sysdeps/x86_64/dl-trampoline.S | 20 ++++++++++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/ChangeLog b/ChangeLog index 23e6906d06..856689f1c0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2009-07-28 H.J. Lu + + * sysdeps/x86_64/dl-trampoline.S: Properly restore AVX registers. + 2009-07-29 Ulrich Drepper * elf/dl-runtime.c (_dl_fixup): Indicate before _dl_lookup_symbol_x diff --git a/sysdeps/x86_64/dl-trampoline.S b/sysdeps/x86_64/dl-trampoline.S index 7ecf1b0c64..5a4c6ddecd 100644 --- a/sysdeps/x86_64/dl-trampoline.S +++ b/sysdeps/x86_64/dl-trampoline.S @@ -203,49 +203,49 @@ L(no_avx1): vpcmpeqq (LR_SIZE)(%rsp), %xmm0, %xmm8 vpmovmskb %xmm8, %esi cmpl $0xffff, %esi - je 1f + jne 1f vmovdqu (LR_VECTOR_OFFSET)(%rsp), %ymm0 1: vpcmpeqq (LR_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm8 vpmovmskb %xmm8, %esi cmpl $0xffff, %esi - je 1f + jne 1f vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp), %ymm1 1: vpcmpeqq (LR_SIZE + XMM_SIZE*2)(%rsp), %xmm2, %xmm8 vpmovmskb %xmm8, %esi cmpl $0xffff, %esi - je 1f + jne 1f vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp), %ymm2 1: vpcmpeqq (LR_SIZE + XMM_SIZE*3)(%rsp), %xmm3, %xmm8 vpmovmskb %xmm8, %esi cmpl $0xffff, %esi - je 1f + jne 1f vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp), %ymm3 1: vpcmpeqq (LR_SIZE + XMM_SIZE*4)(%rsp), %xmm4, %xmm8 vpmovmskb %xmm8, %esi cmpl $0xffff, %esi - je 1f + jne 1f vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp), %ymm4 1: vpcmpeqq (LR_SIZE + XMM_SIZE*5)(%rsp), %xmm5, %xmm8 vpmovmskb %xmm8, %esi cmpl $0xffff, %esi - je 1f + jne 1f vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp), %ymm5 1: vpcmpeqq (LR_SIZE + XMM_SIZE*6)(%rsp), %xmm6, %xmm8 vpmovmskb %xmm8, %esi cmpl $0xffff, %esi - je 1f + jne 1f vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp), %ymm6 1: vpcmpeqq (LR_SIZE + XMM_SIZE*7)(%rsp), %xmm7, %xmm8 vpmovmskb %xmm8, %esi cmpl $0xffff, %esi - je 1f + jne 1f vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp), %ymm7 L(no_avx2): @@ -361,13 +361,13 @@ L(no_avx3): vpcmpeqq (LRV_SIZE)(%rsp), %xmm0, %xmm2 vpmovmskb %xmm2, %esi cmpl $0xffff, %esi - je 1f + jne 1f vmovdqu LRV_VECTOR0_OFFSET(%rsp), %ymm0 1: vpcmpeqq (LRV_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm2 vpmovmskb %xmm2, %esi cmpl $0xffff, %esi - je 1f + jne 1f vmovdqu LRV_VECTOR1_OFFSET(%rsp), %ymm1 L(no_avx4): -- cgit v1.2.3