summaryrefslogtreecommitdiff
path: root/gcc/config/sparc
diff options
context:
space:
mode:
authorDoug Evans <dje@gnu.org>1994-11-18 00:10:29 +0000
committerDoug Evans <dje@gnu.org>1994-11-18 00:10:29 +0000
commitb3ab657fbf21a054e7deaf8a6e61f2f8fdd294b0 (patch)
tree18f486589a6fd5ce1fd03312d65698762fcf929b /gcc/config/sparc
parent0d64f74cb323b4f1d9ea6b8b1b4d525bf9cee469 (diff)
downloadgcc-b3ab657fbf21a054e7deaf8a6e61f2f8fdd294b0.tar.gz
Initial revision
From-SVN: r8497
Diffstat (limited to 'gcc/config/sparc')
-rw-r--r--gcc/config/sparc/aout.h25
-rw-r--r--gcc/config/sparc/lb1spc.asm486
-rw-r--r--gcc/config/sparc/lb1spl.asm246
-rw-r--r--gcc/config/sparc/litecoff.h126
-rw-r--r--gcc/config/sparc/t-sparcbare14
-rw-r--r--gcc/config/sparc/t-sparclite17
6 files changed, 914 insertions, 0 deletions
diff --git a/gcc/config/sparc/aout.h b/gcc/config/sparc/aout.h
new file mode 100644
index 00000000000..265d9ba63ff
--- /dev/null
+++ b/gcc/config/sparc/aout.h
@@ -0,0 +1,25 @@
+/* Definitions of target machine for GNU compiler, for SPARC using a.out.
+ Copyright (C) 1994 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include "aoutos.h" /* A.out definitions */
+#include "sparc/sparc.h" /* SPARC definitions */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -D__GCC_NEW_VARARGS__"
diff --git a/gcc/config/sparc/lb1spc.asm b/gcc/config/sparc/lb1spc.asm
new file mode 100644
index 00000000000..c74ef6a93a5
--- /dev/null
+++ b/gcc/config/sparc/lb1spc.asm
@@ -0,0 +1,486 @@
+/* This is an assembly language implementation of libgcc1.c for the sparc
+ processor.
+
+ These routines are derived from the Sparc Architecture Manual, version 8,
+ slightly edited to match the desired calling convention, and also to
+ optimize them for our purposes. */
+
+#ifdef L_mulsi3
+.text
+ .align 4
+ .global .umul
+ .proc 4
+.umul:
+ or %o0, %o1, %o4 ! logical or of multiplier and multiplicand
+ mov %o0, %y ! multiplier to Y register
+ andncc %o4, 0xfff, %o5 ! mask out lower 12 bits
+ be mul_shortway ! can do it the short way
+ andcc %g0, %g0, %o4 ! zero the partial product and clear NV cc
+ !
+ ! long multiply
+ !
+ mulscc %o4, %o1, %o4 ! first iteration of 33
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4 ! 32nd iteration
+ mulscc %o4, %g0, %o4 ! last iteration only shifts
+ ! the upper 32 bits of product are wrong, but we do not care
+ retl
+ rd %y, %o0
+ !
+ ! short multiply
+ !
+mul_shortway:
+ mulscc %o4, %o1, %o4 ! first iteration of 13
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4 ! 12th iteration
+ mulscc %o4, %g0, %o4 ! last iteration only shifts
+ rd %y, %o5
+ sll %o4, 12, %o4 ! left shift partial product by 12 bits
+ srl %o5, 20, %o5 ! right shift partial product by 20 bits
+ retl
+ or %o5, %o4, %o0 ! merge for true product
+#endif
+
+#ifdef L_divsi3
+.text
+ .align 4
+ .global .udiv
+ .proc 4
+.udiv:
+ save %sp, -64, %sp
+ b divide
+ mov 0, %i2 ! result always positive
+ .global .div
+ .proc 4
+.div:
+ save %sp, -64, %sp
+ orcc %i1, %i0, %g0 ! is either operand negative
+ bge divide ! if not, skip this junk
+ xor %i1, %i0, %i2 ! record sign of result in sign of %i2
+ tst %i1
+ bge 2f
+ tst %i0
+ ! %i1 < 0
+ bge divide
+ neg %i1
+2: ! %i0 < 0
+ neg %i0
+ ! FALL THROUGH
+divide:
+ ! Compute size of quotient, scale comparand.
+ orcc %i1, %g0, %l1 ! movcc %i1, %l1
+ te 2 ! if %i1 = 0
+ mov %i0, %i3
+ mov 0, %i2
+ sethi %hi(1<<(32-2-1)), %l3
+ cmp %i3, %l3
+ blu not_really_big
+ mov 0, %l0
+ !
+ ! Here, the %i0 is >= 2^(31-3) or so. We must be careful here,
+ ! as our usual 3-at-a-shot divide step will cause overflow and havoc.
+ ! The total number of bits in the result here is 3*%l0+%l4, where
+ ! %l4 <= 3.
+ ! Compute %l0 in an unorthodox manner: know we need to Shift %l1 into
+ ! the top decade: so do not even bother to compare to %i3.
+1: cmp %l1, %l3
+ bgeu 3f
+ mov 1, %l4
+ sll %l1, 3, %l1
+ b 1b
+ inc %l0
+ !
+ ! Now compute %l4
+ !
+2: addcc %l1, %l1, %l1
+ bcc not_too_big
+ add %l4, 1, %l4
+ !
+ ! We are here if the %i1 overflowed when Shifting.
+ ! This means that %i3 has the high-order bit set.
+ ! Restore %l1 and subtract from %i3.
+ sll %l3, 2, %l3
+ srl %l1, 1, %l1
+ add %l1, %l3, %l1
+ b do_single_div
+ dec %l4
+not_too_big:
+3: cmp %l1, %i3
+ blu 2b
+ nop
+ be do_single_div
+ nop
+ ! %l1 > %i3: went too far: back up 1 step
+ ! srl %l1, 1, %l1
+ ! dec %l4
+ ! do single-bit divide steps
+ !
+ ! We have to be careful here. We know that %i3 >= %l1, so we can do the
+ ! first divide step without thinking. BUT, the others are conditional,
+ ! and are only done if %i3 >= 0. Because both %i3 and %l1 may have the
+ ! high-order bit set in the first step, just falling into the regular
+ ! division loop will mess up the first time around.
+ ! So we unroll slightly...
+do_single_div:
+ deccc %l4
+ bl end_regular_divide
+ nop
+ sub %i3, %l1, %i3
+ mov 1, %i2
+ b end_single_divloop
+ nop
+single_divloop:
+ sll %i2, 1, %i2
+ bl 1f
+ srl %l1, 1, %l1
+ ! %i3 >= 0
+ sub %i3, %l1, %i3
+ b 2f
+ inc %i2
+1: ! %i3 < 0
+ add %i3, %l1, %i3
+ dec %i2
+end_single_divloop:
+2: deccc %l4
+ bge single_divloop
+ tst %i3
+ b end_regular_divide
+ nop
+not_really_big:
+1: sll %l1, 3, %l1
+ cmp %l1, %i3
+ bleu 1b
+ inccc %l0
+ be got_result
+ dec %l0
+do_regular_divide:
+ ! Do the main division iteration
+ tst %i3
+ ! Fall through into divide loop
+divloop:
+ sll %i2, 3, %i2
+ ! depth 1, accumulated bits 0
+ bl L.1.8
+ srl %l1,1,%l1
+ ! remainder is positive
+ subcc %i3,%l1,%i3
+ ! depth 2, accumulated bits 1
+ bl L.2.9
+ srl %l1,1,%l1
+ ! remainder is positive
+ subcc %i3,%l1,%i3
+ ! depth 3, accumulated bits 3
+ bl L.3.11
+ srl %l1,1,%l1
+ ! remainder is positive
+ subcc %i3,%l1,%i3
+ b 9f
+ add %i2, (3*2+1), %i2
+L.3.11: ! remainder is negative
+ addcc %i3,%l1,%i3
+ b 9f
+ add %i2, (3*2-1), %i2
+L.2.9: ! remainder is negative
+ addcc %i3,%l1,%i3
+ ! depth 3, accumulated bits 1
+ bl L.3.9
+ srl %l1,1,%l1
+ ! remainder is positive
+ subcc %i3,%l1,%i3
+ b 9f
+ add %i2, (1*2+1), %i2
+L.3.9: ! remainder is negative
+ addcc %i3,%l1,%i3
+ b 9f
+ add %i2, (1*2-1), %i2
+L.1.8: ! remainder is negative
+ addcc %i3,%l1,%i3
+ ! depth 2, accumulated bits -1
+ bl L.2.7
+ srl %l1,1,%l1
+ ! remainder is positive
+ subcc %i3,%l1,%i3
+ ! depth 3, accumulated bits -1
+ bl L.3.7
+ srl %l1,1,%l1
+ ! remainder is positive
+ subcc %i3,%l1,%i3
+ b 9f
+ add %i2, (-1*2+1), %i2
+L.3.7: ! remainder is negative
+ addcc %i3,%l1,%i3
+ b 9f
+ add %i2, (-1*2-1), %i2
+L.2.7: ! remainder is negative
+ addcc %i3,%l1,%i3
+ ! depth 3, accumulated bits -3
+ bl L.3.5
+ srl %l1,1,%l1
+ ! remainder is positive
+ subcc %i3,%l1,%i3
+ b 9f
+ add %i2, (-3*2+1), %i2
+L.3.5: ! remainder is negative
+ addcc %i3,%l1,%i3
+ b 9f
+ add %i2, (-3*2-1), %i2
+end_regular_divide:
+9: deccc %l0
+ bge divloop
+ tst %i3
+ bge got_result
+ nop
+ ! non-restoring fixup here
+ dec %i2
+got_result:
+ tst %i2
+ bge 1f
+ restore
+ ! answer < 0
+ retl ! leaf-routine return
+ neg %o2, %o0 ! quotient <- -%i2
+1: retl ! leaf-routine return
+ mov %o2, %o0 ! quotient <- %i2
+#endif
+
+#ifdef L_modsi3
+.text
+ .align 4
+ .global .urem
+ .proc 4
+.urem:
+ save %sp, -64, %sp
+ b divide
+ mov 0, %i2 ! result always positive
+ .global .rem
+ .proc 4
+.rem:
+ save %sp, -64, %sp
+ orcc %i1, %i0, %g0 ! is either operand negative
+ bge divide ! if not, skip this junk
+ mov %i0, %i2 ! record sign of result in sign of %i2
+ tst %i1
+ bge 2f
+ tst %i0
+ ! %i1 < 0
+ bge divide
+ neg %i1
+2: ! %i0 < 0
+ neg %i0
+ ! FALL THROUGH
+divide:
+ ! Compute size of quotient, scale comparand.
+ orcc %i1, %g0, %l1 ! movcc %i1, %l1
+ te 2 ! if %i1 = 0
+ mov %i0, %i3
+ mov 0, %i2
+ sethi %hi(1<<(32-2-1)), %l3
+ cmp %i3, %l3
+ blu not_really_big
+ mov 0, %l0
+ !
+ ! Here, the %i0 is >= 2^(31-3) or so. We must be careful here,
+ ! as our usual 3-at-a-shot divide step will cause overflow and havoc.
+ ! The total number of bits in the result here is 3*%l0+%l4, where
+ ! %l4 <= 3.
+ ! Compute %l0 in an unorthodox manner: know we need to Shift %l1 into
+ ! the top decade: so do not even bother to compare to %i3.
+1: cmp %l1, %l3
+ bgeu 3f
+ mov 1, %l4
+ sll %l1, 3, %l1
+ b 1b
+ inc %l0
+ !
+ ! Now compute %l4
+ !
+2: addcc %l1, %l1, %l1
+ bcc not_too_big
+ add %l4, 1, %l4
+ !
+ ! We are here if the %i1 overflowed when Shifting.
+ ! This means that %i3 has the high-order bit set.
+ ! Restore %l1 and subtract from %i3.
+ sll %l3, 2, %l3
+ srl %l1, 1, %l1
+ add %l1, %l3, %l1
+ b do_single_div
+ dec %l4
+not_too_big:
+3: cmp %l1, %i3
+ blu 2b
+ nop
+ be do_single_div
+ nop
+ ! %l1 > %i3: went too far: back up 1 step
+ ! srl %l1, 1, %l1
+ ! dec %l4
+ ! do single-bit divide steps
+ !
+ ! We have to be careful here. We know that %i3 >= %l1, so we can do the
+ ! first divide step without thinking. BUT, the others are conditional,
+ ! and are only done if %i3 >= 0. Because both %i3 and %l1 may have the
+ ! high-order bit set in the first step, just falling into the regular
+ ! division loop will mess up the first time around.
+ ! So we unroll slightly...
+do_single_div:
+ deccc %l4
+ bl end_regular_divide
+ nop
+ sub %i3, %l1, %i3
+ mov 1, %i2
+ b end_single_divloop
+ nop
+single_divloop:
+ sll %i2, 1, %i2
+ bl 1f
+ srl %l1, 1, %l1
+ ! %i3 >= 0
+ sub %i3, %l1, %i3
+ b 2f
+ inc %i2
+1: ! %i3 < 0
+ add %i3, %l1, %i3
+ dec %i2
+end_single_divloop:
+2: deccc %l4
+ bge single_divloop
+ tst %i3
+ b end_regular_divide
+ nop
+not_really_big:
+1: sll %l1, 3, %l1
+ cmp %l1, %i3
+ bleu 1b
+ inccc %l0
+ be got_result
+ dec %l0
+do_regular_divide:
+ ! Do the main division iteration
+ tst %i3
+ ! Fall through into divide loop
+divloop:
+ sll %i2, 3, %i2
+ ! depth 1, accumulated bits 0
+ bl L.1.8
+ srl %l1,1,%l1
+ ! remainder is positive
+ subcc %i3,%l1,%i3
+ ! depth 2, accumulated bits 1
+ bl L.2.9
+ srl %l1,1,%l1
+ ! remainder is positive
+ subcc %i3,%l1,%i3
+ ! depth 3, accumulated bits 3
+ bl L.3.11
+ srl %l1,1,%l1
+ ! remainder is positive
+ subcc %i3,%l1,%i3
+ b 9f
+ add %i2, (3*2+1), %i2
+L.3.11: ! remainder is negative
+ addcc %i3,%l1,%i3
+ b 9f
+ add %i2, (3*2-1), %i2
+L.2.9: ! remainder is negative
+ addcc %i3,%l1,%i3
+ ! depth 3, accumulated bits 1
+ bl L.3.9
+ srl %l1,1,%l1
+ ! remainder is positive
+ subcc %i3,%l1,%i3
+ b 9f
+ add %i2, (1*2+1), %i2
+L.3.9: ! remainder is negative
+ addcc %i3,%l1,%i3
+ b 9f
+ add %i2, (1*2-1), %i2
+L.1.8: ! remainder is negative
+ addcc %i3,%l1,%i3
+ ! depth 2, accumulated bits -1
+ bl L.2.7
+ srl %l1,1,%l1
+ ! remainder is positive
+ subcc %i3,%l1,%i3
+ ! depth 3, accumulated bits -1
+ bl L.3.7
+ srl %l1,1,%l1
+ ! remainder is positive
+ subcc %i3,%l1,%i3
+ b 9f
+ add %i2, (-1*2+1), %i2
+L.3.7: ! remainder is negative
+ addcc %i3,%l1,%i3
+ b 9f
+ add %i2, (-1*2-1), %i2
+L.2.7: ! remainder is negative
+ addcc %i3,%l1,%i3
+ ! depth 3, accumulated bits -3
+ bl L.3.5
+ srl %l1,1,%l1
+ ! remainder is positive
+ subcc %i3,%l1,%i3
+ b 9f
+ add %i2, (-3*2+1), %i2
+L.3.5: ! remainder is negative
+ addcc %i3,%l1,%i3
+ b 9f
+ add %i2, (-3*2-1), %i2
+end_regular_divide:
+9: deccc %l0
+ bge divloop
+ tst %i3
+ bge got_result
+ nop
+ ! non-restoring fixup here
+ add %i3, %i1, %i3
+got_result:
+ tst %i2
+ bge 1f
+ restore
+ ! answer < 0
+ retl ! leaf-routine return
+ neg %o3, %o0 ! remainder <- -%i3
+1: retl ! leaf-routine return
+ mov %o3, %o0 ! remainder <- %i3
+#endif
+
+
diff --git a/gcc/config/sparc/lb1spl.asm b/gcc/config/sparc/lb1spl.asm
new file mode 100644
index 00000000000..4c8bc30b83d
--- /dev/null
+++ b/gcc/config/sparc/lb1spl.asm
@@ -0,0 +1,246 @@
+/* This is an assembly language implementation of libgcc1.c for the sparclite
+ processor.
+
+ These routines are all from the Sparclite User's Guide, slightly edited
+ to match the desired calling convention, and also to optimize them. */
+
+#ifdef L_udivsi3
+.text
+ .align 4
+ .global .udiv
+ .proc 04
+.udiv:
+ wr %g0,%g0,%y ! Not a delayed write for sparclite
+ tst %g0
+ divscc %o0,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ retl
+ divscc %g1,%o1,%o0
+#endif
+
+#ifdef L_umodsi3
+.text
+ .align 4
+ .global .urem
+ .proc 04
+.urem:
+ wr %g0,%g0,%y ! Not a delayed write for sparclite
+ tst %g0
+ divscc %o0,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ bl 1f
+ rd %y,%o0
+ retl
+ nop
+1: retl
+ add %o0,%o1,%o0
+#endif
+
+#ifdef L_divsi3
+.text
+ .align 4
+ .global .div
+ .proc 04
+! ??? This routine could be made faster if was optimized, and if it was
+! rewritten to only calculate the quotient.
+.div:
+ wr %g0,%g0,%y ! Not a delayed write for sparclite
+ mov %o1,%o4
+ tst %o1
+ bl,a 1f
+ sub %g0,%o4,%o4
+1: tst %o0
+ bl,a 2f
+ mov -1,%y
+2: divscc %o0,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ be 6f
+ mov %y,%o3
+ bg 4f
+ addcc %o3,%o4,%g0
+ be,a 6f
+ mov %g0,%o3
+ tst %o0
+ bl 5f
+ tst %g1
+ ba 5f
+ add %o3,%o4,%o3
+4: subcc %o3,%o4,%g0
+ be,a 6f
+ mov %g0,%o3
+ tst %o0
+ bge 5f
+ tst %g1
+ sub %o3,%o4,%o3
+5: bl,a 6f
+ add %g1,1,%g1
+6: tst %o1
+ bl,a 7f
+ sub %g0,%g1,%g1
+7: retl
+ mov %g1,%o0 ! Quotient is in %g1.
+#endif
+
+#ifdef L_modsi3
+.text
+ .align 4
+ .global .rem
+ .proc 04
+! ??? This routine could be made faster if was optimized, and if it was
+! rewritten to only calculate the remainder.
+.rem:
+ wr %g0,%g0,%y ! Not a delayed write for sparclite
+ mov %o1,%o4
+ tst %o1
+ bl,a 1f
+ sub %g0,%o4,%o4
+1: tst %o0
+ bl,a 2f
+ mov -1,%y
+2: divscc %o0,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ be 6f
+ mov %y,%o3
+ bg 4f
+ addcc %o3,%o4,%g0
+ be,a 6f
+ mov %g0,%o3
+ tst %o0
+ bl 5f
+ tst %g1
+ ba 5f
+ add %o3,%o4,%o3
+4: subcc %o3,%o4,%g0
+ be,a 6f
+ mov %g0,%o3
+ tst %o0
+ bge 5f
+ tst %g1
+ sub %o3,%o4,%o3
+5: bl,a 6f
+ add %g1,1,%g1
+6: tst %o1
+ bl,a 7f
+ sub %g0,%g1,%g1
+7: retl
+ mov %o3,%o0 ! Remainder is in %o3.
+#endif
diff --git a/gcc/config/sparc/litecoff.h b/gcc/config/sparc/litecoff.h
new file mode 100644
index 00000000000..a471cb3194a
--- /dev/null
+++ b/gcc/config/sparc/litecoff.h
@@ -0,0 +1,126 @@
+/* Definitions of target machine for GNU compiler, for SPARClite w/o FPU, COFF.
+ Copyright (C) 1994 Free Software Foundation, Inc.
+ Written by Ken Raeburn (raeburn@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include "sparc/lite.h"
+
+#undef ASM_OUTPUT_IDENT
+
+/* This is copied from final.c and sparc.h. */
+#undef ASM_OUTPUT_SOURCE_LINE
+#define ASM_OUTPUT_SOURCE_LINE(FILE, LINE) \
+{ if (write_symbols == SDB_DEBUG) { \
+ fprintf ((FILE), "\t.ln\t%d\n", \
+ ((sdb_begin_function_line > -1) \
+ ? (LINE) - sdb_begin_function_line : 1)); \
+ } else if (write_symbols == DBX_DEBUG) { \
+ static int sym_lineno = 1; \
+ fprintf ((FILE), ".stabn 68,0,%d,LM%d\nLM%d:\n", \
+ (LINE), sym_lineno, sym_lineno); \
+ sym_lineno += 1; \
+ } }
+
+#undef SELECT_SECTION
+#undef SELECT_RTX_SECTION
+#define BSS_SECTION_ASM_OP ".section\t\".bss\""
+
+#include "svr3.h"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -Dsparclite -Acpu(sparc) -Amachine(sparc)"
+
+/* just in case */
+#undef DBX_DEBUGGING_INFO
+#undef PREFERRED_DEBUGGING_TYPE
+#define DBX_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+/* Support the ctors and dtors sections for g++. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Support the ctors and dtors sections for g++. */
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP ".section\t.ctors,\"x\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP ".section\t.dtors,\"x\""
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS in_const, in_bss, in_ctors, in_dtors
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ CONST_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+#define INT_ASM_OP ".long"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(FILE,NAME) \
+ do { \
+ ctors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(FILE,NAME) \
+ do { \
+ dtors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+#undef DO_GLOBAL_CTORS_BODY
+#undef DO_GLOBAL_DTORS_BODY
diff --git a/gcc/config/sparc/t-sparcbare b/gcc/config/sparc/t-sparcbare
new file mode 100644
index 00000000000..5cda0642a8b
--- /dev/null
+++ b/gcc/config/sparc/t-sparcbare
@@ -0,0 +1,14 @@
+# configuration file for a bare sparc cpu, aout format files
+
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = sparc/lb1spc.asm
+LIB1ASMFUNCS = _mulsi3 _divsi3 _modsi3
+
+# MULTILIB_OPTIONS should have msparclite too, but we'd have to make
+# gas build...
+MULTILIB_OPTIONS=msoft-float mv8
+MULTILIB_DIRNAMES=soft v8
+MULTILIB_MATCHES=msoft-float=mno-fpu
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc/config/sparc/t-sparclite b/gcc/config/sparc/t-sparclite
new file mode 100644
index 00000000000..84a4d50098f
--- /dev/null
+++ b/gcc/config/sparc/t-sparclite
@@ -0,0 +1,17 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = sparc/lb1spl.asm
+LIB1ASMFUNCS = _divsi3 _udivsi3 _modsi3 _umodsi3
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define US_SOFTWARE_GOFAST' > dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#define US_SOFTWARE_GOFAST' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c