summaryrefslogtreecommitdiff
path: root/vp8/encoder/ppc/variance_altivec.asm
blob: a1ebf663aa5b3c2170666ece4923811c97755a35 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
;
;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
;
;  Use of this source code is governed by a BSD-style license
;  that can be found in the LICENSE file in the root of the source
;  tree. An additional intellectual property rights grant can be found
;  in the file PATENTS.  All contributing project authors may
;  be found in the AUTHORS file in the root of the source tree.
;


    .globl vp8_get8x8var_ppc
    .globl vp8_get16x16var_ppc
    .globl vp8_mse16x16_ppc
    .globl vp8_variance16x16_ppc
    .globl vp8_variance16x8_ppc
    .globl vp8_variance8x16_ppc
    .globl vp8_variance8x8_ppc
    .globl vp8_variance4x4_ppc

.macro load_aligned_16 V R O
    lvsl    v3,  0, \R          ;# permutate value for alignment

    lvx     v1,  0, \R
    lvx     v2, \O, \R

    vperm   \V, v1, v2, v3
.endm

.macro prologue
    mfspr   r11, 256            ;# get old VRSAVE
    oris    r12, r11, 0xffc0
    mtspr   256, r12            ;# set VRSAVE

    stwu    r1, -32(r1)         ;# create space on the stack

    li      r10, 16             ;# load offset and loop counter

    vspltisw v7, 0              ;# zero for merging
    vspltisw v8, 0              ;# zero out total to start
    vspltisw v9, 0              ;# zero out total for dif^2
.endm

.macro epilogue
    addi    r1, r1, 32          ;# recover stack

    mtspr   256, r11            ;# reset old VRSAVE
.endm

.macro compute_sum_sse
    ;# Compute sum first.  Unpack to so signed subract
    ;#  can be used.  Only have a half word signed
    ;#  subract.  Do high, then low.
    vmrghb  v2, v7, v4
    vmrghb  v3, v7, v5
    vsubshs v2, v2, v3
    vsum4shs v8, v2, v8

    vmrglb  v2, v7, v4
    vmrglb  v3, v7, v5
    vsubshs v2, v2, v3
    vsum4shs v8, v2, v8

    ;# Now compute sse.
    vsububs v2, v4, v5
    vsububs v3, v5, v4
    vor     v2, v2, v3

    vmsumubm v9, v2, v2, v9
.endm

.macro variance_16 DS loop_label store_sum
\loop_label:
    ;# only one of the inputs should need to be aligned.
    load_aligned_16 v4, r3, r10
    load_aligned_16 v5, r5, r10

    ;# move onto the next line
    add     r3, r3, r4
    add     r5, r5, r6

    compute_sum_sse

    bdnz    \loop_label

    vsumsws v8, v8, v7
    vsumsws v9, v9, v7

    stvx    v8, 0, r1
    lwz     r3, 12(r1)

    stvx    v9, 0, r1
    lwz     r4, 12(r1)

.if \store_sum
    stw     r3, 0(r8)           ;# sum
.endif
    stw     r4, 0(r7)           ;# sse

    mullw   r3, r3, r3          ;# sum*sum
    srawi   r3, r3, \DS         ;# (sum*sum) >> DS
    subf    r3, r3, r4          ;# sse - ((sum*sum) >> DS)
.endm

.macro variance_8 DS loop_label store_sum
\loop_label:
    ;# only one of the inputs should need to be aligned.
    load_aligned_16 v4, r3, r10
    load_aligned_16 v5, r5, r10

    ;# move onto the next line
    add     r3, r3, r4
    add     r5, r5, r6

    ;# only one of the inputs should need to be aligned.
    load_aligned_16 v6, r3, r10
    load_aligned_16 v0, r5, r10

    ;# move onto the next line
    add     r3, r3, r4
    add     r5, r5, r6

    vmrghb  v4, v4, v6
    vmrghb  v5, v5, v0

    compute_sum_sse

    bdnz    \loop_label

    vsumsws v8, v8, v7
    vsumsws v9, v9, v7

    stvx    v8, 0, r1
    lwz     r3, 12(r1)

    stvx    v9, 0, r1
    lwz     r4, 12(r1)

.if \store_sum
    stw     r3, 0(r8)           ;# sum
.endif
    stw     r4, 0(r7)           ;# sse

    mullw   r3, r3, r3          ;# sum*sum
    srawi   r3, r3, \DS         ;# (sum*sum) >> 8
    subf    r3, r3, r4          ;# sse - ((sum*sum) >> 8)
.endm

    .align 2
;# r3 unsigned char *src_ptr
;# r4 int  source_stride
;# r5 unsigned char *ref_ptr
;# r6 int  recon_stride
;# r7 unsigned int *SSE
;# r8 int *Sum
;#
;# r3 return value
vp8_get8x8var_ppc:

    prologue

    li      r9, 4
    mtctr   r9

    variance_8 6, get8x8var_loop, 1

    epilogue

    blr

    .align 2
;# r3 unsigned char *src_ptr
;# r4 int  source_stride
;# r5 unsigned char *ref_ptr
;# r6 int  recon_stride
;# r7 unsigned int *SSE
;# r8 int *Sum
;#
;# r3 return value
vp8_get16x16var_ppc:

    prologue

    mtctr   r10

    variance_16 8, get16x16var_loop, 1

    epilogue

    blr

    .align 2
;# r3 unsigned char *src_ptr
;# r4 int  source_stride
;# r5 unsigned char *ref_ptr
;# r6 int  recon_stride
;# r7 unsigned int *sse
;#
;# r 3 return value
vp8_mse16x16_ppc:
    prologue

    mtctr   r10

mse16x16_loop:
    ;# only one of the inputs should need to be aligned.
    load_aligned_16 v4, r3, r10
    load_aligned_16 v5, r5, r10

    ;# move onto the next line
    add     r3, r3, r4
    add     r5, r5, r6

    ;# Now compute sse.
    vsububs v2, v4, v5
    vsububs v3, v5, v4
    vor     v2, v2, v3

    vmsumubm v9, v2, v2, v9

    bdnz    mse16x16_loop

    vsumsws v9, v9, v7

    stvx    v9, 0, r1
    lwz     r3, 12(r1)

    stvx    v9, 0, r1
    lwz     r3, 12(r1)

    stw     r3, 0(r7)           ;# sse

    epilogue

    blr

    .align 2
;# r3 unsigned char *src_ptr
;# r4 int  source_stride
;# r5 unsigned char *ref_ptr
;# r6 int  recon_stride
;# r7 unsigned int *sse
;#
;# r3 return value
vp8_variance16x16_ppc:

    prologue

    mtctr   r10

    variance_16 8, variance16x16_loop, 0

    epilogue

    blr

    .align 2
;# r3 unsigned char *src_ptr
;# r4 int  source_stride
;# r5 unsigned char *ref_ptr
;# r6 int  recon_stride
;# r7 unsigned int *sse
;#
;# r3 return value
vp8_variance16x8_ppc:

    prologue

    li      r9, 8
    mtctr   r9

    variance_16 7, variance16x8_loop, 0

    epilogue

    blr

    .align 2
;# r3 unsigned char *src_ptr
;# r4 int  source_stride
;# r5 unsigned char *ref_ptr
;# r6 int  recon_stride
;# r7 unsigned int *sse
;#
;# r3 return value
vp8_variance8x16_ppc:

    prologue

    li      r9, 8
    mtctr   r9

    variance_8 7, variance8x16_loop, 0

    epilogue

    blr

    .align 2
;# r3 unsigned char *src_ptr
;# r4 int  source_stride
;# r5 unsigned char *ref_ptr
;# r6 int  recon_stride
;# r7 unsigned int *sse
;#
;# r3 return value
vp8_variance8x8_ppc:

    prologue

    li      r9, 4
    mtctr   r9

    variance_8 6, variance8x8_loop, 0

    epilogue

    blr

.macro transfer_4x4 I P
    lwz     r0, 0(\I)
    add     \I, \I, \P

    lwz     r10,0(\I)
    add     \I, \I, \P

    lwz     r8, 0(\I)
    add     \I, \I, \P

    lwz     r9, 0(\I)

    stw     r0,  0(r1)
    stw     r10, 4(r1)
    stw     r8,  8(r1)
    stw     r9, 12(r1)
.endm

    .align 2
;# r3 unsigned char *src_ptr
;# r4 int  source_stride
;# r5 unsigned char *ref_ptr
;# r6 int  recon_stride
;# r7 unsigned int *sse
;#
;# r3 return value
vp8_variance4x4_ppc:

    prologue

    transfer_4x4 r3, r4
    lvx     v4, 0, r1

    transfer_4x4 r5, r6
    lvx     v5, 0, r1

    compute_sum_sse

    vsumsws v8, v8, v7
    vsumsws v9, v9, v7

    stvx    v8, 0, r1
    lwz     r3, 12(r1)

    stvx    v9, 0, r1
    lwz     r4, 12(r1)

    stw     r4, 0(r7)           ;# sse

    mullw   r3, r3, r3          ;# sum*sum
    srawi   r3, r3, 4           ;# (sum*sum) >> 4
    subf    r3, r3, r4          ;# sse - ((sum*sum) >> 4)

    epilogue

    blr