summaryrefslogtreecommitdiff
path: root/vp8/encoder/ppc/sad_altivec.asm
blob: e5f26380f96be5c0dc615db4113a8dff88d0d119 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
;
;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
;
;  Use of this source code is governed by a BSD-style license
;  that can be found in the LICENSE file in the root of the source
;  tree. An additional intellectual property rights grant can be found
;  in the file PATENTS.  All contributing project authors may
;  be found in the AUTHORS file in the root of the source tree.
;


    .globl vp8_sad16x16_ppc
    .globl vp8_sad16x8_ppc
    .globl vp8_sad8x16_ppc
    .globl vp8_sad8x8_ppc
    .globl vp8_sad4x4_ppc

.macro load_aligned_16 V R O
    lvsl    v3,  0, \R          ;# permutate value for alignment

    lvx     v1,  0, \R
    lvx     v2, \O, \R

    vperm   \V, v1, v2, v3
.endm

.macro prologue
    mfspr   r11, 256            ;# get old VRSAVE
    oris    r12, r11, 0xffc0
    mtspr   256, r12            ;# set VRSAVE

    stwu    r1, -32(r1)         ;# create space on the stack

    li      r10, 16             ;# load offset and loop counter

    vspltisw v8, 0              ;# zero out total to start
.endm

.macro epilogue
    addi    r1, r1, 32          ;# recover stack

    mtspr   256, r11            ;# reset old VRSAVE
.endm

.macro SAD_16
    ;# v6 = abs (v4 - v5)
    vsububs v6, v4, v5
    vsububs v7, v5, v4
    vor     v6, v6, v7

    ;# v8 += abs (v4 - v5)
    vsum4ubs v8, v6, v8
.endm

.macro sad_16_loop loop_label
    lvsl    v3,  0, r5          ;# only needs to be done once per block

    ;# preload a line of data before getting into the loop
    lvx     v4, 0, r3
    lvx     v1,  0, r5
    lvx     v2, r10, r5

    add     r5, r5, r6
    add     r3, r3, r4

    vperm   v5, v1, v2, v3

    .align 4
\loop_label:
    ;# compute difference on first row
    vsububs v6, v4, v5
    vsububs v7, v5, v4

    ;# load up next set of data
    lvx     v9, 0, r3
    lvx     v1,  0, r5
    lvx     v2, r10, r5

    ;# perform abs() of difference
    vor     v6, v6, v7
    add     r3, r3, r4

    ;# add to the running tally
    vsum4ubs v8, v6, v8

    ;# now onto the next line
    vperm   v5, v1, v2, v3
    add     r5, r5, r6
    lvx     v4, 0, r3

    ;# compute difference on second row
    vsububs v6, v9, v5
    lvx     v1,  0, r5
    vsububs v7, v5, v9
    lvx     v2, r10, r5
    vor     v6, v6, v7
    add     r3, r3, r4
    vsum4ubs v8, v6, v8
    vperm   v5, v1, v2, v3
    add     r5, r5, r6

    bdnz    \loop_label

    vspltisw v7, 0

    vsumsws v8, v8, v7

    stvx    v8, 0, r1
    lwz     r3, 12(r1)
.endm

.macro sad_8_loop loop_label
    .align 4
\loop_label:
    ;# only one of the inputs should need to be aligned.
    load_aligned_16 v4, r3, r10
    load_aligned_16 v5, r5, r10

    ;# move onto the next line
    add     r3, r3, r4
    add     r5, r5, r6

    ;# only one of the inputs should need to be aligned.
    load_aligned_16 v6, r3, r10
    load_aligned_16 v7, r5, r10

    ;# move onto the next line
    add     r3, r3, r4
    add     r5, r5, r6

    vmrghb  v4, v4, v6
    vmrghb  v5, v5, v7

    SAD_16

    bdnz    \loop_label

    vspltisw v7, 0

    vsumsws v8, v8, v7

    stvx    v8, 0, r1
    lwz     r3, 12(r1)
.endm

    .align 2
;# r3 unsigned char *src_ptr
;# r4 int  src_stride
;# r5 unsigned char *ref_ptr
;# r6 int  ref_stride
;#
;# r3 return value
vp8_sad16x16_ppc:

    prologue

    li      r9, 8
    mtctr   r9

    sad_16_loop sad16x16_loop

    epilogue

    blr

    .align 2
;# r3 unsigned char *src_ptr
;# r4 int  src_stride
;# r5 unsigned char *ref_ptr
;# r6 int  ref_stride
;#
;# r3 return value
vp8_sad16x8_ppc:

    prologue

    li      r9, 4
    mtctr   r9

    sad_16_loop sad16x8_loop

    epilogue

    blr

    .align 2
;# r3 unsigned char *src_ptr
;# r4 int  src_stride
;# r5 unsigned char *ref_ptr
;# r6 int  ref_stride
;#
;# r3 return value
vp8_sad8x16_ppc:

    prologue

    li      r9, 8
    mtctr   r9

    sad_8_loop sad8x16_loop

    epilogue

    blr

    .align 2
;# r3 unsigned char *src_ptr
;# r4 int  src_stride
;# r5 unsigned char *ref_ptr
;# r6 int  ref_stride
;#
;# r3 return value
vp8_sad8x8_ppc:

    prologue

    li      r9, 4
    mtctr   r9

    sad_8_loop sad8x8_loop

    epilogue

    blr

.macro transfer_4x4 I P
    lwz     r0, 0(\I)
    add     \I, \I, \P

    lwz     r7, 0(\I)
    add     \I, \I, \P

    lwz     r8, 0(\I)
    add     \I, \I, \P

    lwz     r9, 0(\I)

    stw     r0,  0(r1)
    stw     r7,  4(r1)
    stw     r8,  8(r1)
    stw     r9, 12(r1)
.endm

    .align 2
;# r3 unsigned char *src_ptr
;# r4 int  src_stride
;# r5 unsigned char *ref_ptr
;# r6 int  ref_stride
;#
;# r3 return value
vp8_sad4x4_ppc:

    prologue

    transfer_4x4 r3, r4
    lvx     v4, 0, r1

    transfer_4x4 r5, r6
    lvx     v5, 0, r1

    vspltisw v8, 0              ;# zero out total to start

    ;# v6 = abs (v4 - v5)
    vsububs v6, v4, v5
    vsububs v7, v5, v4
    vor     v6, v6, v7

    ;# v8 += abs (v4 - v5)
    vsum4ubs v7, v6, v8
    vsumsws v7, v7, v8

    stvx    v7, 0, r1
    lwz     r3, 12(r1)

    epilogue

    blr