aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/m68k/m68020
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2004-12-22 20:10:10 +0000
committerUlrich Drepper <drepper@redhat.com>2004-12-22 20:10:10 +0000
commita334319f6530564d22e775935d9c91663623a1b4 (patch)
treeb5877475619e4c938e98757d518bb1e9cbead751 /sysdeps/m68k/m68020
parent0ecb606cb6cf65de1d9fc8a919bceb4be476c602 (diff)
downloadglibc-a334319f6530564d22e775935d9c91663623a1b4.tar
glibc-a334319f6530564d22e775935d9c91663623a1b4.tar.gz
glibc-a334319f6530564d22e775935d9c91663623a1b4.tar.bz2
glibc-a334319f6530564d22e775935d9c91663623a1b4.zip
(CFLAGS-tst-align.c): Add -mpreferred-stack-boundary=4.
Diffstat (limited to 'sysdeps/m68k/m68020')
-rw-r--r--sysdeps/m68k/m68020/Makefile3
-rw-r--r--sysdeps/m68k/m68020/addmul_1.S80
-rw-r--r--sysdeps/m68k/m68020/bits/atomic.h254
-rw-r--r--sysdeps/m68k/m68020/bits/string.h26
-rw-r--r--sysdeps/m68k/m68020/mul_1.S87
-rw-r--r--sysdeps/m68k/m68020/submul_1.S80
-rw-r--r--sysdeps/m68k/m68020/wordcopy.S1
7 files changed, 531 insertions, 0 deletions
diff --git a/sysdeps/m68k/m68020/Makefile b/sysdeps/m68k/m68020/Makefile
new file mode 100644
index 0000000000..b17635467d
--- /dev/null
+++ b/sysdeps/m68k/m68020/Makefile
@@ -0,0 +1,3 @@
+ifeq ($(subdir),db2)
+CPPFLAGS += -DHAVE_SPINLOCKS=1 -DHAVE_ASSEM_MC68020_GCC=1
+endif
diff --git a/sysdeps/m68k/m68020/addmul_1.S b/sysdeps/m68k/m68020/addmul_1.S
new file mode 100644
index 0000000000..05d1d8a804
--- /dev/null
+++ b/sysdeps/m68k/m68020/addmul_1.S
@@ -0,0 +1,80 @@
+/* mc68020 __mpn_addmul_1 -- Multiply a limb vector with a limb and add
+ the result to a second limb vector.
+
+Copyright (C) 1992, 1994, 1996, 1998 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 2.1 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+/*
+ INPUT PARAMETERS
+ res_ptr (sp + 4)
+ s1_ptr (sp + 8)
+ s1_size (sp + 12)
+ s2_limb (sp + 16)
+*/
+
+#include "sysdep.h"
+#include "asm-syntax.h"
+
+ TEXT
+ENTRY(__mpn_addmul_1)
+
+#define res_ptr a0
+#define s1_ptr a1
+#define s1_size d2
+#define s2_limb d4
+
+/* Save used registers on the stack. */
+ moveml R(d2)-R(d5),MEM_PREDEC(sp)
+
+/* Copy the arguments to registers. Better use movem? */
+ movel MEM_DISP(sp,20),R(res_ptr)
+ movel MEM_DISP(sp,24),R(s1_ptr)
+ movel MEM_DISP(sp,28),R(s1_size)
+ movel MEM_DISP(sp,32),R(s2_limb)
+
+ eorw #1,R(s1_size)
+ clrl R(d1)
+ clrl R(d5)
+ lsrl #1,R(s1_size)
+ bcc L(L1)
+ subql #1,R(s1_size)
+ subl R(d0),R(d0) /* (d0,cy) <= (0,0) */
+
+L(Loop:)
+ movel MEM_POSTINC(s1_ptr),R(d3)
+ mulul R(s2_limb),R(d1):R(d3)
+ addxl R(d0),R(d3)
+ addxl R(d5),R(d1)
+ addl R(d3),MEM_POSTINC(res_ptr)
+L(L1:) movel MEM_POSTINC(s1_ptr),R(d3)
+ mulul R(s2_limb),R(d0):R(d3)
+ addxl R(d1),R(d3)
+ addxl R(d5),R(d0)
+ addl R(d3),MEM_POSTINC(res_ptr)
+
+ dbf R(s1_size),L(Loop)
+ addxl R(d5),R(d0)
+ subl #0x10000,R(s1_size)
+ bcc L(Loop)
+
+/* Restore used registers from stack frame. */
+ moveml MEM_POSTINC(sp),R(d2)-R(d5)
+
+ rts
+END(__mpn_addmul_1)
diff --git a/sysdeps/m68k/m68020/bits/atomic.h b/sysdeps/m68k/m68020/bits/atomic.h
new file mode 100644
index 0000000000..6b6db71465
--- /dev/null
+++ b/sysdeps/m68k/m68020/bits/atomic.h
@@ -0,0 +1,254 @@
+/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Andreas Schwab <schwab@suse.de>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <stdint.h>
+
+
+typedef int8_t atomic8_t;
+typedef uint8_t uatomic8_t;
+typedef int_fast8_t atomic_fast8_t;
+typedef uint_fast8_t uatomic_fast8_t;
+
+typedef int16_t atomic16_t;
+typedef uint16_t uatomic16_t;
+typedef int_fast16_t atomic_fast16_t;
+typedef uint_fast16_t uatomic_fast16_t;
+
+typedef int32_t atomic32_t;
+typedef uint32_t uatomic32_t;
+typedef int_fast32_t atomic_fast32_t;
+typedef uint_fast32_t uatomic_fast32_t;
+
+typedef int64_t atomic64_t;
+typedef uint64_t uatomic64_t;
+typedef int_fast64_t atomic_fast64_t;
+typedef uint_fast64_t uatomic_fast64_t;
+
+typedef intptr_t atomicptr_t;
+typedef uintptr_t uatomicptr_t;
+typedef intmax_t atomic_max_t;
+typedef uintmax_t uatomic_max_t;
+
+#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
+ ({ __typeof (*(mem)) __ret; \
+ __asm __volatile ("cas%.b %0,%2,%1" \
+ : "=d" (__ret), "+m" (*(mem)) \
+ : "d" (newval), "0" (oldval)); \
+ __ret; })
+
+#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
+ ({ __typeof (*(mem)) __ret; \
+ __asm __volatile ("cas%.w %0,%2,%1" \
+ : "=d" (__ret), "+m" (*(mem)) \
+ : "d" (newval), "0" (oldval)); \
+ __ret; })
+
+#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+ ({ __typeof (*(mem)) __ret; \
+ __asm __volatile ("cas%.l %0,%2,%1" \
+ : "=d" (__ret), "+m" (*(mem)) \
+ : "d" (newval), "0" (oldval)); \
+ __ret; })
+
+# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+ ({ __typeof (*(mem)) __ret; \
+ __typeof (mem) __memp = (mem); \
+ __asm __volatile ("cas2%.l %0:%R0,%1:%R1,(%2):(%3)" \
+ : "=d" (__ret) \
+ : "d" (newval), "r" (__memp), \
+ "r" ((char *) __memp + 4), "0" (oldval) \
+ : "memory"); \
+ __ret; })
+
+#define atomic_exchange_acq(mem, newvalue) \
+ ({ __typeof (*(mem)) __result = *(mem); \
+ if (sizeof (*(mem)) == 1) \
+ __asm __volatile ("1: cas%.b %0,%2,%1;" \
+ " jbne 1b" \
+ : "=d" (__result), "+m" (*(mem)) \
+ : "d" (newvalue), "0" (__result)); \
+ else if (sizeof (*(mem)) == 2) \
+ __asm __volatile ("1: cas%.w %0,%2,%1;" \
+ " jbne 1b" \
+ : "=d" (__result), "+m" (*(mem)) \
+ : "d" (newvalue), "0" (__result)); \
+ else if (sizeof (*(mem)) == 4) \
+ __asm __volatile ("1: cas%.l %0,%2,%1;" \
+ " jbne 1b" \
+ : "=d" (__result), "+m" (*(mem)) \
+ : "d" (newvalue), "0" (__result)); \
+ else \
+ { \
+ __typeof (mem) __memp = (mem); \
+ __asm __volatile ("1: cas2%.l %0:%R0,%1:%R1,(%2):(%3);" \
+ " jbne 1b" \
+ : "=d" (__result) \
+ : "d" (newvalue), "r" (__memp), \
+ "r" ((char *) __memp + 4), "0" (__result) \
+ : "memory"); \
+ } \
+ __result; })
+
+#define atomic_exchange_and_add(mem, value) \
+ ({ __typeof (*(mem)) __result = *(mem); \
+ __typeof (*(mem)) __temp; \
+ if (sizeof (*(mem)) == 1) \
+ __asm __volatile ("1: move%.b %0,%2;" \
+ " add%.b %3,%2;" \
+ " cas%.b %0,%2,%1;" \
+ " jbne 1b" \
+ : "=d" (__result), "+m" (*(mem)), \
+ "=&d" (__temp) \
+ : "d" (value), "0" (__result)); \
+ else if (sizeof (*(mem)) == 2) \
+ __asm __volatile ("1: move%.w %0,%2;" \
+ " add%.w %3,%2;" \
+ " cas%.w %0,%2,%1;" \
+ " jbne 1b" \
+ : "=d" (__result), "+m" (*(mem)), \
+ "=&d" (__temp) \
+ : "d" (value), "0" (__result)); \
+ else if (sizeof (*(mem)) == 4) \
+ __asm __volatile ("1: move%.l %0,%2;" \
+ " add%.l %3,%2;" \
+ " cas%.l %0,%2,%1;" \
+ " jbne 1b" \
+ : "=d" (__result), "+m" (*(mem)), \
+ "=&d" (__temp) \
+ : "d" (value), "0" (__result)); \
+ else \
+ { \
+ __typeof (mem) __memp = (mem); \
+ __asm __volatile ("1: move%.l %0,%1;" \
+ " move%.l %R0,%R1;" \
+ " add%.l %2,%1;" \
+ " addx%.l %R2,%R1;" \
+ " cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \
+ " jbne 1b" \
+ : "=d" (__result), "=&d" (__temp) \
+ : "d" (value), "r" (__memp), \
+ "r" ((char *) __memp + 4), "0" (__result) \
+ : "memory"); \
+ } \
+ __result; })
+
+#define atomic_add(mem, value) \
+ (void) ({ if (sizeof (*(mem)) == 1) \
+ __asm __volatile ("add%.b %1,%0" \
+ : "+m" (*(mem)) \
+ : "id" (value)); \
+ else if (sizeof (*(mem)) == 2) \
+ __asm __volatile ("add%.w %1,%0" \
+ : "+m" (*(mem)) \
+ : "id" (value)); \
+ else if (sizeof (*(mem)) == 4) \
+ __asm __volatile ("add%.l %1,%0" \
+ : "+m" (*(mem)) \
+ : "id" (value)); \
+ else \
+ { \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*(mem)) __oldval = *__memp; \
+ __typeof (*(mem)) __temp; \
+ __asm __volatile ("1: move%.l %0,%1;" \
+ " move%.l %R0,%R1;" \
+ " add%.l %2,%1;" \
+ " addx%.l %R2,%R1;" \
+ " cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \
+ " jbne 1b" \
+ : "=d" (__oldval), "=&d" (__temp) \
+ : "d" (value), "r" (__memp), \
+ "r" ((char *) __memp + 4), "0" (__oldval) \
+ : "memory"); \
+ } \
+ })
+
+#define atomic_increment_and_test(mem) \
+ ({ char __result; \
+ if (sizeof (*(mem)) == 1) \
+ __asm __volatile ("addq%.b %#1,%1; seq %0" \
+ : "=dm" (__result), "+m" (*(mem))); \
+ else if (sizeof (*(mem)) == 2) \
+ __asm __volatile ("addq%.w %#1,%1; seq %0" \
+ : "=dm" (__result), "+m" (*(mem))); \
+ else if (sizeof (*(mem)) == 4) \
+ __asm __volatile ("addq%.l %#1,%1; seq %0" \
+ : "=dm" (__result), "+m" (*(mem))); \
+ else \
+ { \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*(mem)) __oldval = *__memp; \
+ __typeof (*(mem)) __temp; \
+ __asm __volatile ("1: move%.l %1,%2;" \
+ " move%.l %R1,%R2;" \
+ " addq%.l %#1,%2;" \
+ " addx%.l %5,%R2;" \
+ " seq %0;" \
+ " cas2%.l %1:%R1,%2:%R2,(%3):(%4);" \
+ " jbne 1b" \
+ : "=&dm" (__result), "=d" (__oldval), \
+ "=&d" (__temp) \
+ : "r" (__memp), "r" ((char *) __memp + 4), \
+ "d" (0), "1" (__oldval) \
+ : "memory"); \
+ } \
+ __result; })
+
+#define atomic_decrement_and_test(mem) \
+ ({ char __result; \
+ if (sizeof (*(mem)) == 1) \
+ __asm __volatile ("subq%.b %#1,%1; seq %0" \
+ : "=dm" (__result), "+m" (*(mem))); \
+ else if (sizeof (*(mem)) == 2) \
+ __asm __volatile ("subq%.w %#1,%1; seq %0" \
+ : "=dm" (__result), "+m" (*(mem))); \
+ else if (sizeof (*(mem)) == 4) \
+ __asm __volatile ("subq%.l %#1,%1; seq %0" \
+ : "=dm" (__result), "+m" (*(mem))); \
+ else \
+ { \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*(mem)) __oldval = *__memp; \
+ __typeof (*(mem)) __temp; \
+ __asm __volatile ("1: move%.l %1,%2;" \
+ " move%.l %R1,%R2;" \
+ " subq%.l %#1,%2;" \
+ " subx%.l %5,%R2;" \
+ " seq %0;" \
+ " cas2%.l %1:%R1,%2:%R2,(%3):(%4);" \
+ " jbne 1b" \
+ : "=&dm" (__result), "=d" (__oldval), \
+ "=&d" (__temp) \
+ : "r" (__memp), "r" ((char *) __memp + 4), \
+ "d" (0), "1" (__oldval) \
+ : "memory"); \
+ } \
+ __result; })
+
+#define atomic_bit_set(mem, bit) \
+ __asm __volatile ("bfset %0{%1,#1}" \
+ : "+m" (*(mem)) \
+ : "di" (sizeof (*(mem)) * 8 - (bit) - 1))
+
+#define atomic_bit_test_set(mem, bit) \
+ ({ char __result; \
+ __asm __volatile ("bfset %1{%2,#1}; sne %0" \
+ : "=dm" (__result), "+m" (*(mem)) \
+ : "di" (sizeof (*(mem)) * 8 - (bit) - 1)); \
+ __result; })
diff --git a/sysdeps/m68k/m68020/bits/string.h b/sysdeps/m68k/m68020/bits/string.h
new file mode 100644
index 0000000000..84be224b73
--- /dev/null
+++ b/sysdeps/m68k/m68020/bits/string.h
@@ -0,0 +1,26 @@
+/* Optimized, inlined string functions. m680x0 version, x >= 2.
+ Copyright (C) 1997 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _STRING_H
+# error "Never use <bits/string.h> directly; include <string.h> instead."
+#endif
+
+/* Currently the only purpose of this file is to tell the generic inline
+ macros that unaligned memory access is possible. */
+#define _STRING_ARCH_unaligned 1
diff --git a/sysdeps/m68k/m68020/mul_1.S b/sysdeps/m68k/m68020/mul_1.S
new file mode 100644
index 0000000000..f3e450ed93
--- /dev/null
+++ b/sysdeps/m68k/m68020/mul_1.S
@@ -0,0 +1,87 @@
+/* mc68020 __mpn_mul_1 -- Multiply a limb vector with a limb and store
+ the result in a second limb vector.
+
+Copyright (C) 1992, 1994, 1996, 1998 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 2.1 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+/*
+ INPUT PARAMETERS
+ res_ptr (sp + 4)
+ s1_ptr (sp + 8)
+ s1_size (sp + 12)
+ s2_limb (sp + 16)
+*/
+
+#include "sysdep.h"
+#include "asm-syntax.h"
+
+ TEXT
+ENTRY(__mpn_mul_1)
+
+#define res_ptr a0
+#define s1_ptr a1
+#define s1_size d2
+#define s2_limb d4
+
+/* Save used registers on the stack. */
+ moveml R(d2)-R(d4),MEM_PREDEC(sp)
+#if 0
+ movel R(d2),MEM_PREDEC(sp)
+ movel R(d3),MEM_PREDEC(sp)
+ movel R(d4),MEM_PREDEC(sp)
+#endif
+
+/* Copy the arguments to registers. Better use movem? */
+ movel MEM_DISP(sp,16),R(res_ptr)
+ movel MEM_DISP(sp,20),R(s1_ptr)
+ movel MEM_DISP(sp,24),R(s1_size)
+ movel MEM_DISP(sp,28),R(s2_limb)
+
+ eorw #1,R(s1_size)
+ clrl R(d1)
+ lsrl #1,R(s1_size)
+ bcc L(L1)
+ subql #1,R(s1_size)
+ subl R(d0),R(d0) /* (d0,cy) <= (0,0) */
+
+L(Loop:)
+ movel MEM_POSTINC(s1_ptr),R(d3)
+ mulul R(s2_limb),R(d1):R(d3)
+ addxl R(d0),R(d3)
+ movel R(d3),MEM_POSTINC(res_ptr)
+L(L1:) movel MEM_POSTINC(s1_ptr),R(d3)
+ mulul R(s2_limb),R(d0):R(d3)
+ addxl R(d1),R(d3)
+ movel R(d3),MEM_POSTINC(res_ptr)
+
+ dbf R(s1_size),L(Loop)
+ clrl R(d3)
+ addxl R(d3),R(d0)
+ subl #0x10000,R(s1_size)
+ bcc L(Loop)
+
+/* Restore used registers from stack frame. */
+ moveml MEM_POSTINC(sp),R(d2)-R(d4)
+#if 0
+ movel MEM_POSTINC(sp),R(d4)
+ movel MEM_POSTINC(sp),R(d3)
+ movel MEM_POSTINC(sp),R(d2)
+#endif
+ rts
+END(__mpn_mul_1)
diff --git a/sysdeps/m68k/m68020/submul_1.S b/sysdeps/m68k/m68020/submul_1.S
new file mode 100644
index 0000000000..7522046b43
--- /dev/null
+++ b/sysdeps/m68k/m68020/submul_1.S
@@ -0,0 +1,80 @@
+/* mc68020 __mpn_submul_1 -- Multiply a limb vector with a limb and subtract
+ the result from a second limb vector.
+
+Copyright (C) 1992, 1994, 1996, 1998 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 2.1 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+/*
+ INPUT PARAMETERS
+ res_ptr (sp + 4)
+ s1_ptr (sp + 8)
+ s1_size (sp + 12)
+ s2_limb (sp + 16)
+*/
+
+#include "sysdep.h"
+#include "asm-syntax.h"
+
+ TEXT
+ENTRY(__mpn_submul_1)
+
+#define res_ptr a0
+#define s1_ptr a1
+#define s1_size d2
+#define s2_limb d4
+
+/* Save used registers on the stack. */
+ moveml R(d2)-R(d5),MEM_PREDEC(sp)
+
+/* Copy the arguments to registers. Better use movem? */
+ movel MEM_DISP(sp,20),R(res_ptr)
+ movel MEM_DISP(sp,24),R(s1_ptr)
+ movel MEM_DISP(sp,28),R(s1_size)
+ movel MEM_DISP(sp,32),R(s2_limb)
+
+ eorw #1,R(s1_size)
+ clrl R(d1)
+ clrl R(d5)
+ lsrl #1,R(s1_size)
+ bcc L(L1)
+ subql #1,R(s1_size)
+ subl R(d0),R(d0) /* (d0,cy) <= (0,0) */
+
+L(Loop:)
+ movel MEM_POSTINC(s1_ptr),R(d3)
+ mulul R(s2_limb),R(d1):R(d3)
+ addxl R(d0),R(d3)
+ addxl R(d5),R(d1)
+ subl R(d3),MEM_POSTINC(res_ptr)
+L(L1:) movel MEM_POSTINC(s1_ptr),R(d3)
+ mulul R(s2_limb),R(d0):R(d3)
+ addxl R(d1),R(d3)
+ addxl R(d5),R(d0)
+ subl R(d3),MEM_POSTINC(res_ptr)
+
+ dbf R(s1_size),L(Loop)
+ addxl R(d5),R(d0)
+ subl #0x10000,R(s1_size)
+ bcc L(Loop)
+
+/* Restore used registers from stack frame. */
+ moveml MEM_POSTINC(sp),R(d2)-R(d5)
+
+ rts
+END(__mpn_submul_1)
diff --git a/sysdeps/m68k/m68020/wordcopy.S b/sysdeps/m68k/m68020/wordcopy.S
new file mode 100644
index 0000000000..4fb1a4518f
--- /dev/null
+++ b/sysdeps/m68k/m68020/wordcopy.S
@@ -0,0 +1 @@
+/* Empty, not needed. */