summaryrefslogtreecommitdiff
path: root/gcc/config/mcore/lib1.asm
blob: 701762f2a3c66671c01ddae552aec791841b7c56 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
/* libgcc routines for the MCore.
   Copyright (C) 1993, 1999, 2000, 2009 Free Software Foundation, Inc.

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.

This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
General Public License for more details.

Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.

You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
<http://www.gnu.org/licenses/>.  */

#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b

/* Use the right prefix for global labels.  */

#define SYM(x) CONCAT1 (__, x)

#ifdef __ELF__
#define TYPE(x) .type SYM (x),@function
#define SIZE(x) .size SYM (x), . - SYM (x)
#else
#define TYPE(x)
#define SIZE(x)
#endif

.macro FUNC_START name
	.text
	.globl SYM (\name)
	TYPE (\name)
SYM (\name):
.endm

.macro FUNC_END name
	SIZE (\name)
.endm

#ifdef	L_udivsi3
FUNC_START udiv32
FUNC_START udivsi32

	movi	r1,0		// r1-r2 form 64 bit dividend
	movi	r4,1		// r4 is quotient (1 for a sentinel)

	cmpnei	r3,0		// look for 0 divisor
	bt	9f
	trap	3		// divide by 0
9:
	// control iterations; skip across high order 0 bits in dividend
	mov	r7,r2
	cmpnei	r7,0
	bt	8f
	movi	r2,0		// 0 dividend
	jmp	r15		// quick return
8:
	ff1	r7		// figure distance to skip
	lsl	r4,r7		// move the sentinel along (with 0's behind)
	lsl	r2,r7		// and the low 32 bits of numerator

// appears to be wrong...
// tested out incorrectly in our OS work...
//	mov	r7,r3		// looking at divisor
//	ff1	r7		// I can move 32-r7 more bits to left.
//	addi	r7,1		// ok, one short of that...
//	mov	r1,r2
//	lsr	r1,r7		// bits that came from low order...
//	rsubi	r7,31		// r7 == "32-n" == LEFT distance
//	addi	r7,1		// this is (32-n)
//	lsl	r4,r7		// fixes the high 32 (quotient)
//	lsl	r2,r7
//	cmpnei	r4,0
//	bf	4f		// the sentinel went away...

	// run the remaining bits

1:	lslc	r2,1		// 1 bit left shift of r1-r2
	addc	r1,r1
	cmphs	r1,r3		// upper 32 of dividend >= divisor?
	bf	2f
	sub	r1,r3		// if yes, subtract divisor
2:	addc	r4,r4		// shift by 1 and count subtracts
	bf	1b		// if sentinel falls out of quotient, stop

4:	mov	r2,r4		// return quotient
	mov	r3,r1		// and piggyback the remainder
	jmp	r15
FUNC_END udiv32
FUNC_END udivsi32
#endif

#ifdef	L_umodsi3
FUNC_START urem32
FUNC_START umodsi3
	movi	r1,0		// r1-r2 form 64 bit dividend
	movi	r4,1		// r4 is quotient (1 for a sentinel)
	cmpnei	r3,0		// look for 0 divisor
	bt	9f
	trap	3		// divide by 0
9:
	// control iterations; skip across high order 0 bits in dividend
	mov	r7,r2
	cmpnei	r7,0
	bt	8f
	movi	r2,0		// 0 dividend
	jmp	r15		// quick return
8:
	ff1	r7		// figure distance to skip
	lsl	r4,r7		// move the sentinel along (with 0's behind)
	lsl	r2,r7		// and the low 32 bits of numerator

1:	lslc	r2,1		// 1 bit left shift of r1-r2
	addc	r1,r1
	cmphs	r1,r3		// upper 32 of dividend >= divisor?
	bf	2f
	sub	r1,r3		// if yes, subtract divisor
2:	addc	r4,r4		// shift by 1 and count subtracts
	bf	1b		// if sentinel falls out of quotient, stop
	mov	r2,r1		// return remainder
	jmp	r15
FUNC_END urem32
FUNC_END umodsi3
#endif

#ifdef	L_divsi3
FUNC_START div32
FUNC_START divsi3
	mov	r5,r2		// calc sign of quotient
	xor	r5,r3
	abs	r2		// do unsigned divide
	abs	r3
	movi	r1,0		// r1-r2 form 64 bit dividend
	movi	r4,1		// r4 is quotient (1 for a sentinel)
	cmpnei	r3,0		// look for 0 divisor
	bt	9f
	trap	3		// divide by 0
9:
	// control iterations; skip across high order 0 bits in dividend
	mov	r7,r2
	cmpnei	r7,0
	bt	8f
	movi	r2,0		// 0 dividend
	jmp	r15		// quick return
8:
	ff1	r7		// figure distance to skip
	lsl	r4,r7		// move the sentinel along (with 0's behind)
	lsl	r2,r7		// and the low 32 bits of numerator

// tested out incorrectly in our OS work...
//	mov	r7,r3		// looking at divisor
//	ff1	r7		// I can move 32-r7 more bits to left.
//	addi	r7,1		// ok, one short of that...
//	mov	r1,r2
//	lsr	r1,r7		// bits that came from low order...
//	rsubi	r7,31		// r7 == "32-n" == LEFT distance
//	addi	r7,1		// this is (32-n)
//	lsl	r4,r7		// fixes the high 32 (quotient)
//	lsl	r2,r7
//	cmpnei	r4,0
//	bf	4f		// the sentinel went away...

	// run the remaining bits
1:	lslc	r2,1		// 1 bit left shift of r1-r2
	addc	r1,r1
	cmphs	r1,r3		// upper 32 of dividend >= divisor?
	bf	2f
	sub	r1,r3		// if yes, subtract divisor
2:	addc	r4,r4		// shift by 1 and count subtracts
	bf	1b		// if sentinel falls out of quotient, stop

4:	mov	r2,r4		// return quotient
	mov	r3,r1		// piggyback the remainder
	btsti	r5,31		// after adjusting for sign
	bf	3f
	rsubi	r2,0
	rsubi	r3,0
3:	jmp	r15
FUNC_END div32
FUNC_END divsi3
#endif

#ifdef	L_modsi3
FUNC_START rem32
FUNC_START modsi3
	mov	r5,r2		// calc sign of remainder
	abs	r2		// do unsigned divide
	abs	r3
	movi	r1,0		// r1-r2 form 64 bit dividend
	movi	r4,1		// r4 is quotient (1 for a sentinel)
	cmpnei	r3,0		// look for 0 divisor
	bt	9f
	trap	3		// divide by 0
9: 
	// control iterations; skip across high order 0 bits in dividend
	mov	r7,r2
	cmpnei	r7,0
	bt	8f
	movi	r2,0		// 0 dividend
	jmp	r15		// quick return
8:
	ff1	r7		// figure distance to skip
	lsl	r4,r7		// move the sentinel along (with 0's behind)
	lsl	r2,r7		// and the low 32 bits of numerator

1:	lslc	r2,1		// 1 bit left shift of r1-r2
	addc	r1,r1
	cmphs	r1,r3		// upper 32 of dividend >= divisor?
	bf	2f
	sub	r1,r3		// if yes, subtract divisor
2:	addc	r4,r4		// shift by 1 and count subtracts
	bf	1b		// if sentinel falls out of quotient, stop
	mov	r2,r1		// return remainder
	btsti	r5,31		// after adjusting for sign
	bf	3f
	rsubi	r2,0
3:	jmp	r15
FUNC_END rem32
FUNC_END modsi3
#endif


/* GCC expects that {__eq,__ne,__gt,__ge,__le,__lt}{df2,sf2}
   will behave as __cmpdf2. So, we stub the implementations to
   jump on to __cmpdf2 and __cmpsf2.
 
   All of these shortcircuit the return path so that __cmp{sd}f2
   will go directly back to the caller.  */

.macro  COMPARE_DF_JUMP name
	.import SYM (cmpdf2)
FUNC_START \name
	jmpi SYM (cmpdf2)
FUNC_END \name
.endm
		
#ifdef  L_eqdf2
COMPARE_DF_JUMP eqdf2
#endif /* L_eqdf2 */

#ifdef  L_nedf2
COMPARE_DF_JUMP nedf2
#endif /* L_nedf2 */

#ifdef  L_gtdf2
COMPARE_DF_JUMP gtdf2
#endif /* L_gtdf2 */

#ifdef  L_gedf2
COMPARE_DF_JUMP gedf2
#endif /* L_gedf2 */

#ifdef  L_ltdf2
COMPARE_DF_JUMP ltdf2
#endif /* L_ltdf2 */
	
#ifdef  L_ledf2
COMPARE_DF_JUMP ledf2
#endif /* L_ledf2 */

/* SINGLE PRECISION FLOATING POINT STUBS */

.macro  COMPARE_SF_JUMP name
	.import SYM (cmpsf2)
FUNC_START \name
	jmpi SYM (cmpsf2)
FUNC_END \name
.endm
		
#ifdef  L_eqsf2
COMPARE_SF_JUMP eqsf2
#endif /* L_eqsf2 */
	
#ifdef  L_nesf2
COMPARE_SF_JUMP nesf2
#endif /* L_nesf2 */
	
#ifdef  L_gtsf2
COMPARE_SF_JUMP gtsf2
#endif /* L_gtsf2 */
	
#ifdef  L_gesf2
COMPARE_SF_JUMP __gesf2
#endif /* L_gesf2 */
	
#ifdef  L_ltsf2
COMPARE_SF_JUMP __ltsf2
#endif /* L_ltsf2 */
	
#ifdef  L_lesf2
COMPARE_SF_JUMP lesf2
#endif /* L_lesf2 */