summaryrefslogtreecommitdiff
path: root/gmp/mpn/x86_64/lshift.asm
blob: f368944b85d9d74a997e4f5525f95b9d48ebd76c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
dnl  AMD64 mpn_lshift -- mpn left shift.

dnl  Copyright 2003, 2005, 2007, 2009, 2011, 2012 Free Software Foundation,
dnl  Inc.

dnl  This file is part of the GNU MP Library.
dnl
dnl  The GNU MP Library is free software; you can redistribute it and/or modify
dnl  it under the terms of either:
dnl
dnl    * the GNU Lesser General Public License as published by the Free
dnl      Software Foundation; either version 3 of the License, or (at your
dnl      option) any later version.
dnl
dnl  or
dnl
dnl    * the GNU General Public License as published by the Free Software
dnl      Foundation; either version 2 of the License, or (at your option) any
dnl      later version.
dnl
dnl  or both in parallel, as here.
dnl
dnl  The GNU MP Library is distributed in the hope that it will be useful, but
dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
dnl  for more details.
dnl
dnl  You should have received copies of the GNU General Public License and the
dnl  GNU Lesser General Public License along with the GNU MP Library.  If not,
dnl  see https://www.gnu.org/licenses/.

include(`../config.m4')


C	     cycles/limb   cycles/limb cnt=1
C AMD K8,K9	 2.375		 1.375
C AMD K10	 2.375		 1.375
C Intel P4	 8		10.5
C Intel core2	 2.11		 4.28
C Intel corei	 ?		 ?
C Intel atom	 5.75		 3.5
C VIA nano	 3.5		 2.25


C INPUT PARAMETERS
define(`rp',	`%rdi')
define(`up',	`%rsi')
define(`n',	`%rdx')
define(`cnt',	`%rcx')

ABI_SUPPORT(DOS64)
ABI_SUPPORT(STD64)

ASM_START()
	TEXT
	ALIGN(32)
PROLOGUE(mpn_lshift)
	FUNC_ENTRY(4)
	cmp	$1, R8(%rcx)
	jne	L(gen)

C For cnt=1 we want to work from lowest limb towards higher limbs.
C Check for bad overlap (up=rp is OK!) up=rp+1..rp+n-1 is bad.
C FIXME: this could surely be done more cleverly.

	mov    rp, %rax
	sub    up, %rax
	je     L(fwd)			C rp = up
	shr    $3, %rax
	cmp    n, %rax
	jb     L(gen)

L(fwd):	mov	R32(n), R32(%rax)
	shr	$2, n
	je	L(e1)
	and	$3, R32(%rax)

	ALIGN(8)
	nop
	nop
L(t1):	mov	(up), %r8
	mov	8(up), %r9
	mov	16(up), %r10
	mov	24(up), %r11
	lea	32(up), up
	adc	%r8, %r8
	mov	%r8, (rp)
	adc	%r9, %r9
	mov	%r9, 8(rp)
	adc	%r10, %r10
	mov	%r10, 16(rp)
	adc	%r11, %r11
	mov	%r11, 24(rp)
	lea	32(rp), rp
	dec	n
	jne	L(t1)

	inc	R32(%rax)
	dec	R32(%rax)
	jne	L(n00)
	adc	R32(%rax), R32(%rax)
	FUNC_EXIT()
	ret
L(e1):	test	R32(%rax), R32(%rax)	C clear cy
L(n00):	mov	(up), %r8
	dec	R32(%rax)
	jne	L(n01)
	adc	%r8, %r8
	mov	%r8, (rp)
L(ret):	adc	R32(%rax), R32(%rax)
	FUNC_EXIT()
	ret
L(n01):	dec	R32(%rax)
	mov	8(up), %r9
	jne	L(n10)
	adc	%r8, %r8
	adc	%r9, %r9
	mov	%r8, (rp)
	mov	%r9, 8(rp)
	adc	R32(%rax), R32(%rax)
	FUNC_EXIT()
	ret
L(n10):	mov	16(up), %r10
	adc	%r8, %r8
	adc	%r9, %r9
	adc	%r10, %r10
	mov	%r8, (rp)
	mov	%r9, 8(rp)
	mov	%r10, 16(rp)
	adc	$-1, R32(%rax)
	FUNC_EXIT()
	ret

L(gen):	neg	R32(%rcx)		C put rsh count in cl
	mov	-8(up,n,8), %rax
	shr	R8(%rcx), %rax		C function return value

	neg	R32(%rcx)		C put lsh count in cl
	lea	1(n), R32(%r8)
	and	$3, R32(%r8)
	je	L(rlx)			C jump for n = 3, 7, 11, ...

	dec	R32(%r8)
	jne	L(1)
C	n = 4, 8, 12, ...
	mov	-8(up,n,8), %r10
	shl	R8(%rcx), %r10
	neg	R32(%rcx)		C put rsh count in cl
	mov	-16(up,n,8), %r8
	shr	R8(%rcx), %r8
	or	%r8, %r10
	mov	%r10, -8(rp,n,8)
	dec	n
	jmp	L(rll)

L(1):	dec	R32(%r8)
	je	L(1x)			C jump for n = 1, 5, 9, 13, ...
C	n = 2, 6, 10, 16, ...
	mov	-8(up,n,8), %r10
	shl	R8(%rcx), %r10
	neg	R32(%rcx)		C put rsh count in cl
	mov	-16(up,n,8), %r8
	shr	R8(%rcx), %r8
	or	%r8, %r10
	mov	%r10, -8(rp,n,8)
	dec	n
	neg	R32(%rcx)		C put lsh count in cl
L(1x):
	cmp	$1, n
	je	L(ast)
	mov	-8(up,n,8), %r10
	shl	R8(%rcx), %r10
	mov	-16(up,n,8), %r11
	shl	R8(%rcx), %r11
	neg	R32(%rcx)		C put rsh count in cl
	mov	-16(up,n,8), %r8
	mov	-24(up,n,8), %r9
	shr	R8(%rcx), %r8
	or	%r8, %r10
	shr	R8(%rcx), %r9
	or	%r9, %r11
	mov	%r10, -8(rp,n,8)
	mov	%r11, -16(rp,n,8)
	sub	$2, n

L(rll):	neg	R32(%rcx)		C put lsh count in cl
L(rlx):	mov	-8(up,n,8), %r10
	shl	R8(%rcx), %r10
	mov	-16(up,n,8), %r11
	shl	R8(%rcx), %r11

	sub	$4, n			C				      4
	jb	L(end)			C				      2
	ALIGN(16)
L(top):
	C finish stuff from lsh block
	neg	R32(%rcx)		C put rsh count in cl
	mov	16(up,n,8), %r8
	mov	8(up,n,8), %r9
	shr	R8(%rcx), %r8
	or	%r8, %r10
	shr	R8(%rcx), %r9
	or	%r9, %r11
	mov	%r10, 24(rp,n,8)
	mov	%r11, 16(rp,n,8)
	C start two new rsh
	mov	0(up,n,8), %r8
	mov	-8(up,n,8), %r9
	shr	R8(%rcx), %r8
	shr	R8(%rcx), %r9

	C finish stuff from rsh block
	neg	R32(%rcx)		C put lsh count in cl
	mov	8(up,n,8), %r10
	mov	0(up,n,8), %r11
	shl	R8(%rcx), %r10
	or	%r10, %r8
	shl	R8(%rcx), %r11
	or	%r11, %r9
	mov	%r8, 8(rp,n,8)
	mov	%r9, 0(rp,n,8)
	C start two new lsh
	mov	-8(up,n,8), %r10
	mov	-16(up,n,8), %r11
	shl	R8(%rcx), %r10
	shl	R8(%rcx), %r11

	sub	$4, n
	jae	L(top)			C				      2
L(end):
	neg	R32(%rcx)		C put rsh count in cl
	mov	8(up), %r8
	shr	R8(%rcx), %r8
	or	%r8, %r10
	mov	(up), %r9
	shr	R8(%rcx), %r9
	or	%r9, %r11
	mov	%r10, 16(rp)
	mov	%r11, 8(rp)

	neg	R32(%rcx)		C put lsh count in cl
L(ast):	mov	(up), %r10
	shl	R8(%rcx), %r10
	mov	%r10, (rp)
	FUNC_EXIT()
	ret
EPILOGUE()