diff options
Diffstat (limited to 'sysdeps/x86_64/addmul_1.S')
-rw-r--r-- | sysdeps/x86_64/addmul_1.S | 115 |
1 files changed, 92 insertions, 23 deletions
diff --git a/sysdeps/x86_64/addmul_1.S b/sysdeps/x86_64/addmul_1.S index bdb5226a33..e997896703 100644 --- a/sysdeps/x86_64/addmul_1.S +++ b/sysdeps/x86_64/addmul_1.S @@ -1,6 +1,6 @@ -/* AMD64 __mpn_addmul_1 -- Multiply a limb vector with a limb and add +/* x86-64 __mpn_addmul_1 -- Multiply a limb vector with a limb and add the result to a second limb vector. - Copyright (C) 2004 Free Software Foundation, Inc. + Copyright (C) 2003,2004,2005,2007,2008,2009 Free Software Foundation, Inc. This file is part of the GNU MP Library. The GNU MP Library is free software; you can redistribute it and/or modify @@ -21,26 +21,95 @@ #include "sysdep.h" #include "asm-syntax.h" +#define rp %rdi +#define up %rsi +#define n %rdx +#define v0 %rcx + +#ifndef func +# define func __mpn_addmul_1 +# define ADDSUB add +#endif + .text -ENTRY (__mpn_addmul_1) - movq %rdx, %r11 - leaq (%rsi,%rdx,8), %rsi - leaq (%rdi,%rdx,8), %rdi - negq %r11 - xorl %r8d, %r8d - xorl %r10d, %r10d - .p2align 2 -L(loop): - movq (%rsi,%r11,8), %rax - mulq %rcx - addq (%rdi,%r11,8), %rax - adcq %r10, %rdx - addq %r8, %rax - movq %r10, %r8 - movq %rax, (%rdi,%r11,8) - adcq %rdx, %r8 - incq %r11 - jne L(loop) - movq %r8, %rax +ENTRY (func) + push %rbx + push %rbp + lea (%rdx), %rbx + neg %rbx + + mov (up), %rax + mov (rp), %r10 + + lea -16(rp,%rdx,8), rp + lea (up,%rdx,8), up + mul %rcx + + bt $0, %ebx + jc L(odd) + + lea (%rax), %r11 + mov 8(up,%rbx,8), %rax + lea (%rdx), %rbp + mul %rcx + add $2, %rbx + jns L(n2) + + lea (%rax), %r8 + mov (up,%rbx,8), %rax + lea (%rdx), %r9 + jmp L(mid) + +L(odd): add $1, %rbx + jns L(n1) + + lea (%rax), %r8 + mov (up,%rbx,8), %rax + lea (%rdx), %r9 + mul %rcx + lea (%rax), %r11 + mov 8(up,%rbx,8), %rax + lea (%rdx), %rbp + jmp L(e) + + .p2align 4 +L(top): mul %rcx + ADDSUB %r8, %r10 + lea (%rax), %r8 + mov (up,%rbx,8), %rax + adc %r9, %r11 + mov %r10, -8(rp,%rbx,8) + mov (rp,%rbx,8), %r10 + lea (%rdx), %r9 + adc $0, %rbp +L(mid): mul %rcx + ADDSUB %r11, %r10 + lea (%rax), %r11 + mov 8(up,%rbx,8), %rax + adc %rbp, %r8 + mov %r10, (rp,%rbx,8) + mov 8(rp,%rbx,8), %r10 + lea (%rdx), %rbp + adc $0, %r9 +L(e): add $2, %rbx + js L(top) + + mul %rcx + ADDSUB %r8, %r10 + adc %r9, %r11 + mov %r10, -8(rp) + adc $0, %rbp +L(n2): mov (rp), %r10 + ADDSUB %r11, %r10 + adc %rbp, %rax + mov %r10, (rp) + adc $0, %rdx +L(n1): mov 8(rp), %r10 + ADDSUB %rax, %r10 + mov %r10, 8(rp) + mov %ebx, %eax /* zero rax */ + adc %rdx, %rax + pop %rbp + pop %rbx ret -END (__mpn_addmul_1) +END (func) |