summaryrefslogtreecommitdiff
path: root/sysdeps/i386/i586/rshift.S
blob: 0a3c0fdf4ca4d4feda2b939f2094dd9919a0a801 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
/* Pentium optimized __mpn_rshift --
   Copyright (C) 1992-2020 Free Software Foundation, Inc.
   This file is part of the GNU MP Library.

   The GNU MP Library is free software; you can redistribute it and/or modify
   it under the terms of the GNU Lesser General Public License as published by
   the Free Software Foundation; either version 2.1 of the License, or (at your
   option) any later version.

   The GNU MP Library is distributed in the hope that it will be useful, but
   WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
   License for more details.

   You should have received a copy of the GNU Lesser General Public License
   along with the GNU MP Library; see the file COPYING.LIB.  If not,
   see <https://www.gnu.org/licenses/>.  */

#include "sysdep.h"
#include "asm-syntax.h"

#define PARMS	4+16		/* space for 4 saved regs */
#define RES	PARMS
#define S	RES+4
#define SIZE	S+4
#define CNT	SIZE+4

	.text
ENTRY (__mpn_rshift)

	pushl	%edi
	cfi_adjust_cfa_offset (4)
	pushl	%esi
	cfi_adjust_cfa_offset (4)
	pushl	%ebp
	cfi_adjust_cfa_offset (4)
	cfi_rel_offset (ebp, 0)
	pushl	%ebx
	cfi_adjust_cfa_offset (4)

	movl	RES(%esp),%edi
	cfi_rel_offset (edi, 12)
	movl	S(%esp),%esi
	cfi_rel_offset (esi, 8)
	movl	SIZE(%esp),%ebx
	cfi_rel_offset (ebx, 0)
	movl	CNT(%esp),%ecx

/* We can use faster code for shift-by-1 under certain conditions.  */
	cmp	$1,%ecx
	jne	L(normal)
	leal	4(%edi),%eax
	cmpl	%esi,%eax
	jnc	L(special)		/* jump if res_ptr + 1 >= s_ptr */
	leal	(%edi,%ebx,4),%eax
	cmpl	%eax,%esi
	jnc	L(special)		/* jump if s_ptr >= res_ptr + size */

L(normal):
	movl	(%esi),%edx
	addl	$4,%esi
	xorl	%eax,%eax
	shrdl	%cl,%edx,%eax		/* compute carry limb */
	pushl	%eax			/* push carry limb onto stack */
	cfi_adjust_cfa_offset (4)

	decl	%ebx
	pushl	%ebx
	cfi_adjust_cfa_offset (4)
	shrl	$3,%ebx
	jz	L(end)

	movl	(%edi),%eax		/* fetch destination cache line */

	ALIGN	(2)
L(oop):	movl	28(%edi),%eax		/* fetch destination cache line */
	movl	%edx,%ebp

	movl	(%esi),%eax
	movl	4(%esi),%edx
	shrdl	%cl,%eax,%ebp
	shrdl	%cl,%edx,%eax
	movl	%ebp,(%edi)
	movl	%eax,4(%edi)

	movl	8(%esi),%ebp
	movl	12(%esi),%eax
	shrdl	%cl,%ebp,%edx
	shrdl	%cl,%eax,%ebp
	movl	%edx,8(%edi)
	movl	%ebp,12(%edi)

	movl	16(%esi),%edx
	movl	20(%esi),%ebp
	shrdl	%cl,%edx,%eax
	shrdl	%cl,%ebp,%edx
	movl	%eax,16(%edi)
	movl	%edx,20(%edi)

	movl	24(%esi),%eax
	movl	28(%esi),%edx
	shrdl	%cl,%eax,%ebp
	shrdl	%cl,%edx,%eax
	movl	%ebp,24(%edi)
	movl	%eax,28(%edi)

	addl	$32,%esi
	addl	$32,%edi
	decl	%ebx
	jnz	L(oop)

L(end):	popl	%ebx
	cfi_adjust_cfa_offset (-4)
	andl	$7,%ebx
	jz	L(end2)
L(oop2):
	movl	(%esi),%eax
	shrdl	%cl,%eax,%edx		/* compute result limb */
	movl	%edx,(%edi)
	movl	%eax,%edx
	addl	$4,%esi
	addl	$4,%edi
	decl	%ebx
	jnz	L(oop2)

L(end2):
	shrl	%cl,%edx		/* compute most significant limb */
	movl	%edx,(%edi)		/* store it */

	popl	%eax			/* pop carry limb */
	cfi_adjust_cfa_offset (-4)

	popl	%ebx
	cfi_adjust_cfa_offset (-4)
	cfi_restore (ebx)
	popl	%ebp
	cfi_adjust_cfa_offset (-4)
	cfi_restore (ebp)
	popl	%esi
	cfi_adjust_cfa_offset (-4)
	cfi_restore (esi)
	popl	%edi
	cfi_adjust_cfa_offset (-4)
	cfi_restore (edi)

	ret

/* We loop from least significant end of the arrays, which is only
   permissible if the source and destination don't overlap, since the
   function is documented to work for overlapping source and destination.
*/

	cfi_adjust_cfa_offset (16)
	cfi_rel_offset (edi, 12)
	cfi_rel_offset (esi, 8)
	cfi_rel_offset (ebp, 4)
	cfi_rel_offset (ebx, 0)
L(special):
	leal	-4(%edi,%ebx,4),%edi
	leal	-4(%esi,%ebx,4),%esi

	movl	(%esi),%edx
	subl	$4,%esi

	decl	%ebx
	pushl	%ebx
	cfi_adjust_cfa_offset (4)
	shrl	$3,%ebx

	shrl	$1,%edx
	incl	%ebx
	decl	%ebx
	jz	L(Lend)

	movl	(%edi),%eax		/* fetch destination cache line */

	ALIGN	(2)
L(Loop):
	movl	-28(%edi),%eax		/* fetch destination cache line */
	movl	%edx,%ebp

	movl	(%esi),%eax
	movl	-4(%esi),%edx
	rcrl	$1,%eax
	movl	%ebp,(%edi)
	rcrl	$1,%edx
	movl	%eax,-4(%edi)

	movl	-8(%esi),%ebp
	movl	-12(%esi),%eax
	rcrl	$1,%ebp
	movl	%edx,-8(%edi)
	rcrl	$1,%eax
	movl	%ebp,-12(%edi)

	movl	-16(%esi),%edx
	movl	-20(%esi),%ebp
	rcrl	$1,%edx
	movl	%eax,-16(%edi)
	rcrl	$1,%ebp
	movl	%edx,-20(%edi)

	movl	-24(%esi),%eax
	movl	-28(%esi),%edx
	rcrl	$1,%eax
	movl	%ebp,-24(%edi)
	rcrl	$1,%edx
	movl	%eax,-28(%edi)

	leal	-32(%esi),%esi		/* use leal not to clobber carry */
	leal	-32(%edi),%edi
	decl	%ebx
	jnz	L(Loop)

L(Lend):
	popl	%ebx
	cfi_adjust_cfa_offset (-4)
	sbbl	%eax,%eax		/* save carry in %eax */
	andl	$7,%ebx
	jz	L(Lend2)
	addl	%eax,%eax		/* restore carry from eax */
L(Loop2):
	movl	%edx,%ebp
	movl	(%esi),%edx
	rcrl	$1,%edx
	movl	%ebp,(%edi)

	leal	-4(%esi),%esi		/* use leal not to clobber carry */
	leal	-4(%edi),%edi
	decl	%ebx
	jnz	L(Loop2)

	jmp	L(L1)
L(Lend2):
	addl	%eax,%eax		/* restore carry from eax */
L(L1):	movl	%edx,(%edi)		/* store last limb */

	movl	$0,%eax
	rcrl	$1,%eax

	popl	%ebx
	cfi_adjust_cfa_offset (-4)
	cfi_restore (ebx)
	popl	%ebp
	cfi_adjust_cfa_offset (-4)
	cfi_restore (ebp)
	popl	%esi
	cfi_adjust_cfa_offset (-4)
	cfi_restore (esi)
	popl	%edi
	cfi_adjust_cfa_offset (-4)
	cfi_restore (edi)

	ret
END (__mpn_rshift)