summaryrefslogtreecommitdiff
path: root/vpx_dsp/inv_txfm.c
diff options
context:
space:
mode:
authorclang-format <noreply@google.com>2016-07-22 20:07:03 -0700
committerJames Zern <jzern@google.com>2016-07-25 14:14:19 -0700
commit099bd7f07e510a4ea26acd9c1f2820be7785a510 (patch)
treecfbf7f94ce8365ff7271b6365b4e8ef3b0f6440e /vpx_dsp/inv_txfm.c
parent82070ae9393b1e79559d81fcf1aa89c2e4aa58ee (diff)
downloadlibvpx-099bd7f07e510a4ea26acd9c1f2820be7785a510.tar
libvpx-099bd7f07e510a4ea26acd9c1f2820be7785a510.tar.gz
libvpx-099bd7f07e510a4ea26acd9c1f2820be7785a510.tar.bz2
libvpx-099bd7f07e510a4ea26acd9c1f2820be7785a510.zip
vpx_dsp: apply clang-format
Change-Id: I3ea3e77364879928bd916f2b0a7838073ade5975
Diffstat (limited to 'vpx_dsp/inv_txfm.c')
-rw-r--r--vpx_dsp/inv_txfm.c316
1 files changed, 146 insertions, 170 deletions
diff --git a/vpx_dsp/inv_txfm.c b/vpx_dsp/inv_txfm.c
index e18d31d7a..1526663d8 100644
--- a/vpx_dsp/inv_txfm.c
+++ b/vpx_dsp/inv_txfm.c
@@ -15,8 +15,8 @@
#include "vpx_dsp/inv_txfm.h"
void vpx_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
-/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
- 0.5 shifts per pixel. */
+ /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
+ 0.5 shifts per pixel. */
int i;
tran_low_t output[16];
tran_high_t a1, b1, c1, d1, e1;
@@ -127,8 +127,7 @@ void vpx_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
// Columns
for (i = 0; i < 4; ++i) {
- for (j = 0; j < 4; ++j)
- temp_in[j] = out[j * 4 + i];
+ for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
idct4_c(temp_in, temp_out);
for (j = 0; j < 4; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -223,8 +222,7 @@ void vpx_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
// Then transform columns
for (i = 0; i < 8; ++i) {
- for (j = 0; j < 8; ++j)
- temp_in[j] = out[j * 8 + i];
+ for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
idct8_c(temp_in, temp_out);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -240,8 +238,7 @@ void vpx_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
out = WRAPLOW(dct_const_round_shift(out * cospi_16_64));
a1 = ROUND_POWER_OF_TWO(out, 5);
for (j = 0; j < 8; ++j) {
- for (i = 0; i < 8; ++i)
- dest[i] = clip_pixel_add(dest[i], a1);
+ for (i = 0; i < 8; ++i) dest[i] = clip_pixel_add(dest[i], a1);
dest += stride;
}
}
@@ -296,20 +293,20 @@ void iadst8_c(const tran_low_t *input, tran_low_t *output) {
tran_high_t x7 = input[6];
if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) {
- output[0] = output[1] = output[2] = output[3] = output[4]
- = output[5] = output[6] = output[7] = 0;
+ output[0] = output[1] = output[2] = output[3] = output[4] = output[5] =
+ output[6] = output[7] = 0;
return;
}
// stage 1
- s0 = (int)(cospi_2_64 * x0 + cospi_30_64 * x1);
- s1 = (int)(cospi_30_64 * x0 - cospi_2_64 * x1);
+ s0 = (int)(cospi_2_64 * x0 + cospi_30_64 * x1);
+ s1 = (int)(cospi_30_64 * x0 - cospi_2_64 * x1);
s2 = (int)(cospi_10_64 * x2 + cospi_22_64 * x3);
s3 = (int)(cospi_22_64 * x2 - cospi_10_64 * x3);
s4 = (int)(cospi_18_64 * x4 + cospi_14_64 * x5);
s5 = (int)(cospi_14_64 * x4 - cospi_18_64 * x5);
- s6 = (int)(cospi_26_64 * x6 + cospi_6_64 * x7);
- s7 = (int)(cospi_6_64 * x6 - cospi_26_64 * x7);
+ s6 = (int)(cospi_26_64 * x6 + cospi_6_64 * x7);
+ s7 = (int)(cospi_6_64 * x6 - cospi_26_64 * x7);
x0 = WRAPLOW(dct_const_round_shift(s0 + s4));
x1 = WRAPLOW(dct_const_round_shift(s1 + s5));
@@ -376,8 +373,7 @@ void vpx_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
// Then transform columns
for (i = 0; i < 8; ++i) {
- for (j = 0; j < 8; ++j)
- temp_in[j] = out[j * 8 + i];
+ for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
idct8_c(temp_in, temp_out);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -391,22 +387,22 @@ void idct16_c(const tran_low_t *input, tran_low_t *output) {
tran_high_t temp1, temp2;
// stage 1
- step1[0] = input[0/2];
- step1[1] = input[16/2];
- step1[2] = input[8/2];
- step1[3] = input[24/2];
- step1[4] = input[4/2];
- step1[5] = input[20/2];
- step1[6] = input[12/2];
- step1[7] = input[28/2];
- step1[8] = input[2/2];
- step1[9] = input[18/2];
- step1[10] = input[10/2];
- step1[11] = input[26/2];
- step1[12] = input[6/2];
- step1[13] = input[22/2];
- step1[14] = input[14/2];
- step1[15] = input[30/2];
+ step1[0] = input[0 / 2];
+ step1[1] = input[16 / 2];
+ step1[2] = input[8 / 2];
+ step1[3] = input[24 / 2];
+ step1[4] = input[4 / 2];
+ step1[5] = input[20 / 2];
+ step1[6] = input[12 / 2];
+ step1[7] = input[28 / 2];
+ step1[8] = input[2 / 2];
+ step1[9] = input[18 / 2];
+ step1[10] = input[10 / 2];
+ step1[11] = input[26 / 2];
+ step1[12] = input[6 / 2];
+ step1[13] = input[22 / 2];
+ step1[14] = input[14 / 2];
+ step1[15] = input[30 / 2];
// stage 2
step2[0] = step1[0];
@@ -567,8 +563,7 @@ void vpx_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
// Then transform columns
for (i = 0; i < 16; ++i) {
- for (j = 0; j < 16; ++j)
- temp_in[j] = out[j * 16 + i];
+ for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
idct16_c(temp_in, temp_out);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -598,21 +593,20 @@ void iadst16_c(const tran_low_t *input, tran_low_t *output) {
tran_high_t x14 = input[1];
tran_high_t x15 = input[14];
- if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8
- | x9 | x10 | x11 | x12 | x13 | x14 | x15)) {
- output[0] = output[1] = output[2] = output[3] = output[4]
- = output[5] = output[6] = output[7] = output[8]
- = output[9] = output[10] = output[11] = output[12]
- = output[13] = output[14] = output[15] = 0;
+ if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | x11 | x12 |
+ x13 | x14 | x15)) {
+ output[0] = output[1] = output[2] = output[3] = output[4] = output[5] =
+ output[6] = output[7] = output[8] = output[9] = output[10] =
+ output[11] = output[12] = output[13] = output[14] = output[15] = 0;
return;
}
// stage 1
- s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
+ s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
- s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
+ s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
- s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
+ s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
@@ -621,9 +615,9 @@ void iadst16_c(const tran_low_t *input, tran_low_t *output) {
s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
- s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
+ s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
- s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
+ s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
x0 = WRAPLOW(dct_const_round_shift(s0 + s8));
x1 = WRAPLOW(dct_const_round_shift(s1 + s9));
@@ -651,14 +645,14 @@ void iadst16_c(const tran_low_t *input, tran_low_t *output) {
s5 = x5;
s6 = x6;
s7 = x7;
- s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
- s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
- s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
- s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
- s12 = - x12 * cospi_28_64 + x13 * cospi_4_64;
- s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
- s14 = - x14 * cospi_12_64 + x15 * cospi_20_64;
- s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
+ s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+ s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+ s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+ s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+ s12 = -x12 * cospi_28_64 + x13 * cospi_4_64;
+ s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+ s14 = -x14 * cospi_12_64 + x15 * cospi_20_64;
+ s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
x0 = WRAPLOW(s0 + s4);
x1 = WRAPLOW(s1 + s5);
@@ -682,18 +676,18 @@ void iadst16_c(const tran_low_t *input, tran_low_t *output) {
s1 = x1;
s2 = x2;
s3 = x3;
- s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
+ s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
- s6 = - x6 * cospi_24_64 + x7 * cospi_8_64;
- s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
+ s6 = -x6 * cospi_24_64 + x7 * cospi_8_64;
+ s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
s8 = x8;
s9 = x9;
s10 = x10;
s11 = x11;
- s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
+ s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
- s14 = - x14 * cospi_24_64 + x15 * cospi_8_64;
- s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
+ s14 = -x14 * cospi_24_64 + x15 * cospi_8_64;
+ s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
x0 = WRAPLOW(s0 + s2);
x1 = WRAPLOW(s1 + s3);
@@ -713,13 +707,13 @@ void iadst16_c(const tran_low_t *input, tran_low_t *output) {
x15 = WRAPLOW(dct_const_round_shift(s13 - s15));
// stage 4
- s2 = (- cospi_16_64) * (x2 + x3);
+ s2 = (-cospi_16_64) * (x2 + x3);
s3 = cospi_16_64 * (x2 - x3);
s6 = cospi_16_64 * (x6 + x7);
- s7 = cospi_16_64 * (- x6 + x7);
+ s7 = cospi_16_64 * (-x6 + x7);
s10 = cospi_16_64 * (x10 + x11);
- s11 = cospi_16_64 * (- x10 + x11);
- s14 = (- cospi_16_64) * (x14 + x15);
+ s11 = cospi_16_64 * (-x10 + x11);
+ s14 = (-cospi_16_64) * (x14 + x15);
s15 = cospi_16_64 * (x14 - x15);
x2 = WRAPLOW(dct_const_round_shift(s2));
@@ -766,8 +760,7 @@ void vpx_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
// Then transform columns
for (i = 0; i < 16; ++i) {
- for (j = 0; j < 16; ++j)
- temp_in[j] = out[j*16 + i];
+ for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
idct16_c(temp_in, temp_out);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -783,8 +776,7 @@ void vpx_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
out = WRAPLOW(dct_const_round_shift(out * cospi_16_64));
a1 = ROUND_POWER_OF_TWO(out, 6);
for (j = 0; j < 16; ++j) {
- for (i = 0; i < 16; ++i)
- dest[i] = clip_pixel_add(dest[i], a1);
+ for (i = 0; i < 16; ++i) dest[i] = clip_pixel_add(dest[i], a1);
dest += stride;
}
}
@@ -1166,8 +1158,7 @@ void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
// Rows
for (i = 0; i < 32; ++i) {
int16_t zero_coeff[16];
- for (j = 0; j < 16; ++j)
- zero_coeff[j] = input[2 * j] | input[2 * j + 1];
+ for (j = 0; j < 16; ++j) zero_coeff[j] = input[2 * j] | input[2 * j + 1];
for (j = 0; j < 8; ++j)
zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
for (j = 0; j < 4; ++j)
@@ -1185,8 +1176,7 @@ void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
// Columns
for (i = 0; i < 32; ++i) {
- for (j = 0; j < 32; ++j)
- temp_in[j] = out[j * 32 + i];
+ for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
idct32_c(temp_in, temp_out);
for (j = 0; j < 32; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -1197,7 +1187,7 @@ void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest,
int stride) {
- tran_low_t out[32 * 32] = {0};
+ tran_low_t out[32 * 32] = { 0 };
tran_low_t *outptr = out;
int i, j;
tran_low_t temp_in[32], temp_out[32];
@@ -1212,8 +1202,7 @@ void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest,
// Columns
for (i = 0; i < 32; ++i) {
- for (j = 0; j < 32; ++j)
- temp_in[j] = out[j * 32 + i];
+ for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
idct32_c(temp_in, temp_out);
for (j = 0; j < 32; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -1224,7 +1213,7 @@ void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest,
void vpx_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
int stride) {
- tran_low_t out[32 * 32] = {0};
+ tran_low_t out[32 * 32] = { 0 };
tran_low_t *outptr = out;
int i, j;
tran_low_t temp_in[32], temp_out[32];
@@ -1239,8 +1228,7 @@ void vpx_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
// Columns
for (i = 0; i < 32; ++i) {
- for (j = 0; j < 32; ++j)
- temp_in[j] = out[j * 32 + i];
+ for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
idct32_c(temp_in, temp_out);
for (j = 0; j < 32; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
@@ -1258,8 +1246,7 @@ void vpx_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
a1 = ROUND_POWER_OF_TWO(out, 6);
for (j = 0; j < 32; ++j) {
- for (i = 0; i < 32; ++i)
- dest[i] = clip_pixel_add(dest[i], a1);
+ for (i = 0; i < 32; ++i) dest[i] = clip_pixel_add(dest[i], a1);
dest += stride;
}
}
@@ -1309,14 +1296,14 @@ void vpx_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
c1 = e1 - c1;
a1 -= b1;
d1 += c1;
- dest[stride * 0] = highbd_clip_pixel_add(dest[stride * 0],
- HIGHBD_WRAPLOW(a1, bd), bd);
- dest[stride * 1] = highbd_clip_pixel_add(dest[stride * 1],
- HIGHBD_WRAPLOW(b1, bd), bd);
- dest[stride * 2] = highbd_clip_pixel_add(dest[stride * 2],
- HIGHBD_WRAPLOW(c1, bd), bd);
- dest[stride * 3] = highbd_clip_pixel_add(dest[stride * 3],
- HIGHBD_WRAPLOW(d1, bd), bd);
+ dest[stride * 0] =
+ highbd_clip_pixel_add(dest[stride * 0], HIGHBD_WRAPLOW(a1, bd), bd);
+ dest[stride * 1] =
+ highbd_clip_pixel_add(dest[stride * 1], HIGHBD_WRAPLOW(b1, bd), bd);
+ dest[stride * 2] =
+ highbd_clip_pixel_add(dest[stride * 2], HIGHBD_WRAPLOW(c1, bd), bd);
+ dest[stride * 3] =
+ highbd_clip_pixel_add(dest[stride * 3], HIGHBD_WRAPLOW(d1, bd), bd);
ip++;
dest++;
@@ -1331,7 +1318,7 @@ void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
const tran_low_t *ip = in;
tran_low_t *op = tmp;
uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
- (void) bd;
+ (void)bd;
a1 = ip[0] >> UNIT_QUANT_SHIFT;
e1 = a1 >> 1;
@@ -1343,14 +1330,14 @@ void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
for (i = 0; i < 4; i++) {
e1 = ip[0] >> 1;
a1 = ip[0] - e1;
- dest[dest_stride * 0] = highbd_clip_pixel_add(
- dest[dest_stride * 0], a1, bd);
- dest[dest_stride * 1] = highbd_clip_pixel_add(
- dest[dest_stride * 1], e1, bd);
- dest[dest_stride * 2] = highbd_clip_pixel_add(
- dest[dest_stride * 2], e1, bd);
- dest[dest_stride * 3] = highbd_clip_pixel_add(
- dest[dest_stride * 3], e1, bd);
+ dest[dest_stride * 0] =
+ highbd_clip_pixel_add(dest[dest_stride * 0], a1, bd);
+ dest[dest_stride * 1] =
+ highbd_clip_pixel_add(dest[dest_stride * 1], e1, bd);
+ dest[dest_stride * 2] =
+ highbd_clip_pixel_add(dest[dest_stride * 2], e1, bd);
+ dest[dest_stride * 3] =
+ highbd_clip_pixel_add(dest[dest_stride * 3], e1, bd);
ip++;
dest++;
}
@@ -1359,7 +1346,7 @@ void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
void vpx_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t step[4];
tran_high_t temp1, temp2;
- (void) bd;
+ (void)bd;
// stage 1
temp1 = (input[0] + input[2]) * cospi_16_64;
temp2 = (input[0] - input[2]) * cospi_16_64;
@@ -1394,8 +1381,7 @@ void vpx_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
// Columns
for (i = 0; i < 4; ++i) {
- for (j = 0; j < 4; ++j)
- temp_in[j] = out[j * 4 + i];
+ for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
vpx_highbd_idct4_c(temp_in, temp_out, bd);
for (j = 0; j < 4; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
@@ -1408,8 +1394,8 @@ void vpx_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
int dest_stride, int bd) {
int i;
tran_high_t a1;
- tran_low_t out = HIGHBD_WRAPLOW(
- highbd_dct_const_round_shift(input[0] * cospi_16_64), bd);
+ tran_low_t out =
+ HIGHBD_WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64), bd);
uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
out = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64), bd);
@@ -1486,8 +1472,7 @@ void vpx_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
// Then transform columns.
for (i = 0; i < 8; ++i) {
- for (j = 0; j < 8; ++j)
- temp_in[j] = out[j * 8 + i];
+ for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
vpx_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
@@ -1500,14 +1485,13 @@ void vpx_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
int i, j;
tran_high_t a1;
- tran_low_t out = HIGHBD_WRAPLOW(
- highbd_dct_const_round_shift(input[0] * cospi_16_64), bd);
+ tran_low_t out =
+ HIGHBD_WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64), bd);
uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
out = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64), bd);
a1 = ROUND_POWER_OF_TWO(out, 5);
for (j = 0; j < 8; ++j) {
- for (i = 0; i < 8; ++i)
- dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
+ for (i = 0; i < 8; ++i) dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
dest += stride;
}
}
@@ -1519,7 +1503,7 @@ void vpx_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t x1 = input[1];
tran_low_t x2 = input[2];
tran_low_t x3 = input[3];
- (void) bd;
+ (void)bd;
if (!(x0 | x1 | x2 | x3)) {
memset(output, 0, 4 * sizeof(*output));
@@ -1561,7 +1545,7 @@ void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t x5 = input[4];
tran_low_t x6 = input[1];
tran_low_t x7 = input[6];
- (void) bd;
+ (void)bd;
if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) {
memset(output, 0, 8 * sizeof(*output));
@@ -1569,14 +1553,14 @@ void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
}
// stage 1
- s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
- s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
+ s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+ s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
- s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
- s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
+ s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+ s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
x0 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s0 + s4), bd);
x1 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s1 + s5), bd);
@@ -1592,10 +1576,10 @@ void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
s1 = x1;
s2 = x2;
s3 = x3;
- s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
- s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
- s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
- s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
+ s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+ s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+ s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
+ s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
x0 = HIGHBD_WRAPLOW(s0 + s2, bd);
x1 = HIGHBD_WRAPLOW(s1 + s3, bd);
@@ -1644,8 +1628,7 @@ void vpx_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
}
// Then transform columns.
for (i = 0; i < 8; ++i) {
- for (j = 0; j < 8; ++j)
- temp_in[j] = out[j * 8 + i];
+ for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
vpx_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
@@ -1657,25 +1640,25 @@ void vpx_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
void vpx_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t step1[16], step2[16];
tran_high_t temp1, temp2;
- (void) bd;
+ (void)bd;
// stage 1
- step1[0] = input[0/2];
- step1[1] = input[16/2];
- step1[2] = input[8/2];
- step1[3] = input[24/2];
- step1[4] = input[4/2];
- step1[5] = input[20/2];
- step1[6] = input[12/2];
- step1[7] = input[28/2];
- step1[8] = input[2/2];
- step1[9] = input[18/2];
- step1[10] = input[10/2];
- step1[11] = input[26/2];
- step1[12] = input[6/2];
- step1[13] = input[22/2];
- step1[14] = input[14/2];
- step1[15] = input[30/2];
+ step1[0] = input[0 / 2];
+ step1[1] = input[16 / 2];
+ step1[2] = input[8 / 2];
+ step1[3] = input[24 / 2];
+ step1[4] = input[4 / 2];
+ step1[5] = input[20 / 2];
+ step1[6] = input[12 / 2];
+ step1[7] = input[28 / 2];
+ step1[8] = input[2 / 2];
+ step1[9] = input[18 / 2];
+ step1[10] = input[10 / 2];
+ step1[11] = input[26 / 2];
+ step1[12] = input[6 / 2];
+ step1[13] = input[22 / 2];
+ step1[14] = input[14 / 2];
+ step1[15] = input[30 / 2];
// stage 2
step2[0] = step1[0];
@@ -1837,8 +1820,7 @@ void vpx_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
// Then transform columns.
for (i = 0; i < 16; ++i) {
- for (j = 0; j < 16; ++j)
- temp_in[j] = out[j * 16 + i];
+ for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
vpx_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
@@ -1867,20 +1849,20 @@ void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
tran_low_t x13 = input[12];
tran_low_t x14 = input[1];
tran_low_t x15 = input[14];
- (void) bd;
+ (void)bd;
- if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8
- | x9 | x10 | x11 | x12 | x13 | x14 | x15)) {
+ if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | x11 | x12 |
+ x13 | x14 | x15)) {
memset(output, 0, 16 * sizeof(*output));
return;
}
// stage 1
- s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
+ s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
- s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
+ s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
- s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
+ s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
@@ -1889,9 +1871,9 @@ void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
- s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
+ s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
- s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
+ s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
x0 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s0 + s8), bd);
x1 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s1 + s9), bd);
@@ -1901,8 +1883,8 @@ void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
x5 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s5 + s13), bd);
x6 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s6 + s14), bd);
x7 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s7 + s15), bd);
- x8 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s0 - s8), bd);
- x9 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s1 - s9), bd);
+ x8 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s0 - s8), bd);
+ x9 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s1 - s9), bd);
x10 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s2 - s10), bd);
x11 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s3 - s11), bd);
x12 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s4 - s12), bd);
@@ -1981,13 +1963,13 @@ void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
x15 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s13 - s15), bd);
// stage 4
- s2 = (- cospi_16_64) * (x2 + x3);
+ s2 = (-cospi_16_64) * (x2 + x3);
s3 = cospi_16_64 * (x2 - x3);
s6 = cospi_16_64 * (x6 + x7);
s7 = cospi_16_64 * (-x6 + x7);
s10 = cospi_16_64 * (x10 + x11);
s11 = cospi_16_64 * (-x10 + x11);
- s14 = (- cospi_16_64) * (x14 + x15);
+ s14 = (-cospi_16_64) * (x14 + x15);
s15 = cospi_16_64 * (x14 - x15);
x2 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s2), bd);
@@ -2035,8 +2017,7 @@ void vpx_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
// Then transform columns.
for (i = 0; i < 16; ++i) {
- for (j = 0; j < 16; ++j)
- temp_in[j] = out[j*16 + i];
+ for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
vpx_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
@@ -2049,24 +2030,23 @@ void vpx_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
int i, j;
tran_high_t a1;
- tran_low_t out = HIGHBD_WRAPLOW(
- highbd_dct_const_round_shift(input[0] * cospi_16_64), bd);
+ tran_low_t out =
+ HIGHBD_WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64), bd);
uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
out = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64), bd);
a1 = ROUND_POWER_OF_TWO(out, 6);
for (j = 0; j < 16; ++j) {
- for (i = 0; i < 16; ++i)
- dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
+ for (i = 0; i < 16; ++i) dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
dest += stride;
}
}
-static void highbd_idct32_c(const tran_low_t *input,
- tran_low_t *output, int bd) {
+static void highbd_idct32_c(const tran_low_t *input, tran_low_t *output,
+ int bd) {
tran_low_t step1[32], step2[32];
tran_high_t temp1, temp2;
- (void) bd;
+ (void)bd;
// stage 1
step1[0] = input[0];
@@ -2442,8 +2422,7 @@ void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
// Rows
for (i = 0; i < 32; ++i) {
tran_low_t zero_coeff[16];
- for (j = 0; j < 16; ++j)
- zero_coeff[j] = input[2 * j] | input[2 * j + 1];
+ for (j = 0; j < 16; ++j) zero_coeff[j] = input[2 * j] | input[2 * j + 1];
for (j = 0; j < 8; ++j)
zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
for (j = 0; j < 4; ++j)
@@ -2461,8 +2440,7 @@ void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
// Columns
for (i = 0; i < 32; ++i) {
- for (j = 0; j < 32; ++j)
- temp_in[j] = out[j * 32 + i];
+ for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
highbd_idct32_c(temp_in, temp_out, bd);
for (j = 0; j < 32; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
@@ -2473,7 +2451,7 @@ void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
void vpx_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
- tran_low_t out[32 * 32] = {0};
+ tran_low_t out[32 * 32] = { 0 };
tran_low_t *outptr = out;
int i, j;
tran_low_t temp_in[32], temp_out[32];
@@ -2488,8 +2466,7 @@ void vpx_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
}
// Columns
for (i = 0; i < 32; ++i) {
- for (j = 0; j < 32; ++j)
- temp_in[j] = out[j * 32 + i];
+ for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
highbd_idct32_c(temp_in, temp_out, bd);
for (j = 0; j < 32; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
@@ -2504,14 +2481,13 @@ void vpx_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
int a1;
uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
- tran_low_t out = HIGHBD_WRAPLOW(
- highbd_dct_const_round_shift(input[0] * cospi_16_64), bd);
+ tran_low_t out =
+ HIGHBD_WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64), bd);
out = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64), bd);
a1 = ROUND_POWER_OF_TWO(out, 6);
for (j = 0; j < 32; ++j) {
- for (i = 0; i < 32; ++i)
- dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
+ for (i = 0; i < 32; ++i) dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
dest += stride;
}
}