aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/sparc/sparc64/addmul_1.S
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>1999-07-14 00:54:57 +0000
committerUlrich Drepper <drepper@redhat.com>1999-07-14 00:54:57 +0000
commitabfbdde177c3a7155070dda1b2cdc8292054cc26 (patch)
treee021306b596381fbf8311d2b7eb294e918ff17c8 /sysdeps/sparc/sparc64/addmul_1.S
parent86421aa57ecfd70963ae66848bd6a6dd3b8e0fe6 (diff)
downloadglibc-abfbdde177c3a7155070dda1b2cdc8292054cc26.tar
glibc-abfbdde177c3a7155070dda1b2cdc8292054cc26.tar.gz
glibc-abfbdde177c3a7155070dda1b2cdc8292054cc26.tar.bz2
glibc-abfbdde177c3a7155070dda1b2cdc8292054cc26.zip
Update.
Diffstat (limited to 'sysdeps/sparc/sparc64/addmul_1.S')
-rw-r--r--sysdeps/sparc/sparc64/addmul_1.S36
1 files changed, 16 insertions, 20 deletions
diff --git a/sysdeps/sparc/sparc64/addmul_1.S b/sysdeps/sparc/sparc64/addmul_1.S
index 6782db77df..db8f53656e 100644
--- a/sysdeps/sparc/sparc64/addmul_1.S
+++ b/sysdeps/sparc/sparc64/addmul_1.S
@@ -1,7 +1,7 @@
/* SPARC v9 __mpn_addmul_1 -- Multiply a limb vector with a single limb and
add the product to a second limb vector.
- Copyright (C) 1996 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1999 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
@@ -31,29 +31,26 @@
ENTRY(__mpn_addmul_1)
- !#PROLOGUE# 0
save %sp,-192,%sp
- !#PROLOGUE# 1
sub %g0,%i2,%o7
- sllx %o7,3,%g5
- sub %i1,%g5,%o3
- sub %i0,%g5,%o4
mov 0,%o0 ! zero cy_limb
-
+ sllx %o7,3,%o7
+ sethi %hi(0x80000000),%o2
srl %i3,0,%o1 ! extract low 32 bits of s2_limb
+ sub %i1,%o7,%o3
srlx %i3,32,%i3 ! extract high 32 bits of s2_limb
- mov 1,%o2
- sllx %o2,32,%o2 ! o2 = 0x100000000
+ sub %i0,%o7,%o4
+ add %o2,%o2,%o2 ! o2 = 0x100000000
! hi !
! mid-1 !
! mid-2 !
! lo !
1:
- sllx %o7,3,%g1
- ldx [%o3+%g1],%g5
+ ldx [%o3+%o7],%g5
srl %g5,0,%i0 ! zero hi bits
+ ldx [%o4+%o7],%l1
srlx %g5,32,%g5
mulx %o1,%i0,%i4 ! lo product
mulx %i3,%i0,%i1 ! mid-1 product
@@ -64,25 +61,24 @@ ENTRY(__mpn_addmul_1)
addcc %i1,%l2,%i1 ! add mid products
mov 0,%l0 ! we need the carry from that add...
movcs %xcc,%o2,%l0 ! ...compute it and...
+ sllx %i1,32,%i0 ! align low bits of mid product
add %i5,%l0,%i5 ! ...add to bit 32 of the hi product
- sllx %i1,32,%i0 ! align low bits of mid product
srl %i4,0,%g5 ! zero high 32 bits of lo product
add %i0,%g5,%i0 ! combine into low 64 bits of result
srlx %i1,32,%i1 ! extract high bits of mid product...
+ addcc %i0,%o0,%i0 ! add cy_limb to low 64 bits of result
add %i5,%i1,%i1 ! ...and add them to the high result
- addcc %i0,%o0,%i0 ! add cy_limb to low 64 bits of result
mov 0,%g5
movcs %xcc,1,%g5
- add %o7,1,%o7
- ldx [%o4+%g1],%l1
addcc %l1,%i0,%i0
- movcs %xcc,1,%g5
- stx %i0,[%o4+%g1]
- brnz %o7,1b
+ stx %i0,[%o4+%o7]
+ add %g5,1,%l1
+ movcs %xcc,%l1,%g5
+ addcc %o7,8,%o7
+ bne,pt %xcc,1b
add %i1,%g5,%o0 ! compute new cy_limb
- mov %o0,%i0
jmpl %i7+8, %g0
- restore
+ restore %o0,%g0,%o0
END(__mpn_addmul_1)