aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/x86_64/addmul_1.S
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2010-09-02 23:36:25 -0700
committerUlrich Drepper <drepper@redhat.com>2010-09-02 23:36:25 -0700
commit0959ffc97b738c489087bcf45578c1580a87e66d (patch)
treeac76fbfa5e53376a579a3220a4a7873624e4a296 /sysdeps/x86_64/addmul_1.S
parentece298407076558531796450af39199aa0b34bef (diff)
downloadglibc-0959ffc97b738c489087bcf45578c1580a87e66d.tar
glibc-0959ffc97b738c489087bcf45578c1580a87e66d.tar.gz
glibc-0959ffc97b738c489087bcf45578c1580a87e66d.tar.bz2
glibc-0959ffc97b738c489087bcf45578c1580a87e66d.zip
Update x86-64 mpn routines from GMP 5.0.1.
Diffstat (limited to 'sysdeps/x86_64/addmul_1.S')
-rw-r--r--sysdeps/x86_64/addmul_1.S115
1 files changed, 92 insertions, 23 deletions
diff --git a/sysdeps/x86_64/addmul_1.S b/sysdeps/x86_64/addmul_1.S
index bdb5226a33..e997896703 100644
--- a/sysdeps/x86_64/addmul_1.S
+++ b/sysdeps/x86_64/addmul_1.S
@@ -1,6 +1,6 @@
-/* AMD64 __mpn_addmul_1 -- Multiply a limb vector with a limb and add
+/* x86-64 __mpn_addmul_1 -- Multiply a limb vector with a limb and add
the result to a second limb vector.
- Copyright (C) 2004 Free Software Foundation, Inc.
+ Copyright (C) 2003,2004,2005,2007,2008,2009 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
The GNU MP Library is free software; you can redistribute it and/or modify
@@ -21,26 +21,95 @@
#include "sysdep.h"
#include "asm-syntax.h"
+#define rp %rdi
+#define up %rsi
+#define n %rdx
+#define v0 %rcx
+
+#ifndef func
+# define func __mpn_addmul_1
+# define ADDSUB add
+#endif
+
.text
-ENTRY (__mpn_addmul_1)
- movq %rdx, %r11
- leaq (%rsi,%rdx,8), %rsi
- leaq (%rdi,%rdx,8), %rdi
- negq %r11
- xorl %r8d, %r8d
- xorl %r10d, %r10d
- .p2align 2
-L(loop):
- movq (%rsi,%r11,8), %rax
- mulq %rcx
- addq (%rdi,%r11,8), %rax
- adcq %r10, %rdx
- addq %r8, %rax
- movq %r10, %r8
- movq %rax, (%rdi,%r11,8)
- adcq %rdx, %r8
- incq %r11
- jne L(loop)
- movq %r8, %rax
+ENTRY (func)
+ push %rbx
+ push %rbp
+ lea (%rdx), %rbx
+ neg %rbx
+
+ mov (up), %rax
+ mov (rp), %r10
+
+ lea -16(rp,%rdx,8), rp
+ lea (up,%rdx,8), up
+ mul %rcx
+
+ bt $0, %ebx
+ jc L(odd)
+
+ lea (%rax), %r11
+ mov 8(up,%rbx,8), %rax
+ lea (%rdx), %rbp
+ mul %rcx
+ add $2, %rbx
+ jns L(n2)
+
+ lea (%rax), %r8
+ mov (up,%rbx,8), %rax
+ lea (%rdx), %r9
+ jmp L(mid)
+
+L(odd): add $1, %rbx
+ jns L(n1)
+
+ lea (%rax), %r8
+ mov (up,%rbx,8), %rax
+ lea (%rdx), %r9
+ mul %rcx
+ lea (%rax), %r11
+ mov 8(up,%rbx,8), %rax
+ lea (%rdx), %rbp
+ jmp L(e)
+
+ .p2align 4
+L(top): mul %rcx
+ ADDSUB %r8, %r10
+ lea (%rax), %r8
+ mov (up,%rbx,8), %rax
+ adc %r9, %r11
+ mov %r10, -8(rp,%rbx,8)
+ mov (rp,%rbx,8), %r10
+ lea (%rdx), %r9
+ adc $0, %rbp
+L(mid): mul %rcx
+ ADDSUB %r11, %r10
+ lea (%rax), %r11
+ mov 8(up,%rbx,8), %rax
+ adc %rbp, %r8
+ mov %r10, (rp,%rbx,8)
+ mov 8(rp,%rbx,8), %r10
+ lea (%rdx), %rbp
+ adc $0, %r9
+L(e): add $2, %rbx
+ js L(top)
+
+ mul %rcx
+ ADDSUB %r8, %r10
+ adc %r9, %r11
+ mov %r10, -8(rp)
+ adc $0, %rbp
+L(n2): mov (rp), %r10
+ ADDSUB %r11, %r10
+ adc %rbp, %rax
+ mov %r10, (rp)
+ adc $0, %rdx
+L(n1): mov 8(rp), %r10
+ ADDSUB %rax, %r10
+ mov %r10, 8(rp)
+ mov %ebx, %eax /* zero rax */
+ adc %rdx, %rax
+ pop %rbp
+ pop %rbx
ret
-END (__mpn_addmul_1)
+END (func)