summaryrefslogtreecommitdiff
path: root/vpx_dsp/x86/inv_txfm_sse2.c
diff options
context:
space:
mode:
authorJingning Han <jingning@google.com>2015-08-03 14:51:10 -0700
committerJingning Han <jingning@google.com>2015-08-04 13:46:11 -0700
commit08a453b9de37c796f52e439da98364923726d095 (patch)
tree4ceea9ae4b52a2abc0a714f81946d91fc2e3f5db /vpx_dsp/x86/inv_txfm_sse2.c
parentbfad9d2fe6e3a17e1deff19977c8c0907828bb6f (diff)
downloadlibvpx-08a453b9de37c796f52e439da98364923726d095.tar
libvpx-08a453b9de37c796f52e439da98364923726d095.tar.gz
libvpx-08a453b9de37c796f52e439da98364923726d095.tar.bz2
libvpx-08a453b9de37c796f52e439da98364923726d095.zip
Replace vp9_ prefix with vpx_ prefix in vpx_dsp function names
This commit clears the function naming convention in vpx_dsp. It replaces vp9_ prefix of global functions with vpx_ prefix. It also removes the vp9_ prefix from static functions. Change-Id: I6394359a63b71a51dda01342eec6a3cc08dfeedf
Diffstat (limited to 'vpx_dsp/x86/inv_txfm_sse2.c')
-rw-r--r--vpx_dsp/x86/inv_txfm_sse2.c56
1 files changed, 28 insertions, 28 deletions
diff --git a/vpx_dsp/x86/inv_txfm_sse2.c b/vpx_dsp/x86/inv_txfm_sse2.c
index 125fe2281..f3af68f08 100644
--- a/vpx_dsp/x86/inv_txfm_sse2.c
+++ b/vpx_dsp/x86/inv_txfm_sse2.c
@@ -21,7 +21,7 @@
*(int *)(dest) = _mm_cvtsi128_si32(d0); \
}
-void vp9_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void vpx_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i eight = _mm_set1_epi16(8);
const __m128i cst = _mm_setr_epi16(
@@ -151,7 +151,7 @@ void vp9_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
}
}
-void vp9_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void vpx_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
int a;
@@ -449,7 +449,7 @@ void iadst4_sse2(__m128i *in) {
out7 = _mm_subs_epi16(stp1_0, stp2_7); \
}
-void vp9_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void vpx_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 4);
@@ -480,7 +480,7 @@ void vp9_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
// 2-D
for (i = 0; i < 2; i++) {
- // 8x8 Transpose is copied from vp9_fdct8x8_sse2()
+ // 8x8 Transpose is copied from vpx_fdct8x8_sse2()
TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
@@ -518,7 +518,7 @@ void vp9_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
RECON_AND_STORE(dest + 7 * stride, in7);
}
-void vp9_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void vpx_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
int a;
@@ -555,7 +555,7 @@ void idct8_sse2(__m128i *in) {
__m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- // 8x8 Transpose is copied from vp9_fdct8x8_sse2()
+ // 8x8 Transpose is copied from vpx_fdct8x8_sse2()
TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7],
in0, in1, in2, in3, in4, in5, in6, in7);
@@ -792,7 +792,7 @@ void iadst8_sse2(__m128i *in) {
in[7] = _mm_sub_epi16(k__const_0, s1);
}
-void vp9_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void vpx_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 4);
@@ -1169,7 +1169,7 @@ void vp9_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
stp2_10, stp2_13, stp2_11, stp2_12) \
}
-void vp9_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
+void vpx_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
@@ -1294,7 +1294,7 @@ void vp9_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
}
}
-void vp9_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void vpx_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
int a, i;
@@ -2152,7 +2152,7 @@ void iadst16_sse2(__m128i *in0, __m128i *in1) {
iadst16_8col(in1);
}
-void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
+void vpx_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
@@ -3029,7 +3029,7 @@ void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
}
// Only upper-left 8x8 has non-zero coeff
-void vp9_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
+void vpx_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1<<5);
@@ -3188,7 +3188,7 @@ void vp9_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
}
}
-void vp9_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
+void vpx_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
int stride) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
@@ -3464,7 +3464,7 @@ void vp9_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
}
}
-void vp9_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void vpx_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
__m128i dc_value;
const __m128i zero = _mm_setzero_si128();
int a, i;
@@ -3498,7 +3498,7 @@ static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
return retval;
}
-void vp9_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[4 * 4];
tran_low_t *outptr = out;
@@ -3561,7 +3561,7 @@ void vp9_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
} else {
// Run the un-optimised row transform
for (i = 0; i < 4; ++i) {
- vp9_highbd_idct4_c(input, outptr, bd);
+ vpx_highbd_idct4_c(input, outptr, bd);
input += 4;
outptr += 4;
}
@@ -3605,7 +3605,7 @@ void vp9_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j)
temp_in[j] = out[j * 4 + i];
- vp9_highbd_idct4_c(temp_in, temp_out, bd);
+ vpx_highbd_idct4_c(temp_in, temp_out, bd);
for (j = 0; j < 4; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
@@ -3614,7 +3614,7 @@ void vp9_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
}
}
-void vp9_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void vpx_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[8 * 8];
tran_low_t *outptr = out;
@@ -3679,7 +3679,7 @@ void vp9_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
} else {
// Run the un-optimised row transform
for (i = 0; i < 8; ++i) {
- vp9_highbd_idct8_c(input, outptr, bd);
+ vpx_highbd_idct8_c(input, outptr, bd);
input += 8;
outptr += 8;
}
@@ -3706,7 +3706,7 @@ void vp9_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j)
temp_in[j] = out[j * 8 + i];
- vp9_highbd_idct8_c(temp_in, temp_out, bd);
+ vpx_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3715,7 +3715,7 @@ void vp9_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
}
}
-void vp9_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void vpx_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[8 * 8] = { 0 };
tran_low_t *outptr = out;
@@ -3783,7 +3783,7 @@ void vp9_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
} else {
// Run the un-optimised row transform
for (i = 0; i < 4; ++i) {
- vp9_highbd_idct8_c(input, outptr, bd);
+ vpx_highbd_idct8_c(input, outptr, bd);
input += 8;
outptr += 8;
}
@@ -3810,7 +3810,7 @@ void vp9_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j)
temp_in[j] = out[j * 8 + i];
- vp9_highbd_idct8_c(temp_in, temp_out, bd);
+ vpx_highbd_idct8_c(temp_in, temp_out, bd);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3819,7 +3819,7 @@ void vp9_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
}
}
-void vp9_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[16 * 16];
tran_low_t *outptr = out;
@@ -3892,7 +3892,7 @@ void vp9_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
} else {
// Run the un-optimised row transform
for (i = 0; i < 16; ++i) {
- vp9_highbd_idct16_c(input, outptr, bd);
+ vpx_highbd_idct16_c(input, outptr, bd);
input += 16;
outptr += 16;
}
@@ -3924,7 +3924,7 @@ void vp9_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j)
temp_in[j] = out[j * 16 + i];
- vp9_highbd_idct16_c(temp_in, temp_out, bd);
+ vpx_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -3933,7 +3933,7 @@ void vp9_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
}
}
-void vp9_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void vpx_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
tran_low_t out[16 * 16] = { 0 };
tran_low_t *outptr = out;
@@ -4011,7 +4011,7 @@ void vp9_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
} else {
// Run the un-optimised row transform
for (i = 0; i < 4; ++i) {
- vp9_highbd_idct16_c(input, outptr, bd);
+ vpx_highbd_idct16_c(input, outptr, bd);
input += 16;
outptr += 16;
}
@@ -4043,7 +4043,7 @@ void vp9_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j)
temp_in[j] = out[j * 16 + i];
- vp9_highbd_idct16_c(temp_in, temp_out, bd);
+ vpx_highbd_idct16_c(temp_in, temp_out, bd);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = highbd_clip_pixel_add(
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);