summaryrefslogtreecommitdiff
path: root/vpx_dsp
diff options
context:
space:
mode:
Diffstat (limited to 'vpx_dsp')
-rw-r--r--vpx_dsp/x86/inv_txfm_sse2.c74
-rw-r--r--vpx_dsp/x86/inv_txfm_sse2.h48
-rw-r--r--vpx_dsp/x86/inv_txfm_ssse3.c29
3 files changed, 52 insertions, 99 deletions
diff --git a/vpx_dsp/x86/inv_txfm_sse2.c b/vpx_dsp/x86/inv_txfm_sse2.c
index 32f1b63b8..9e77bdbdb 100644
--- a/vpx_dsp/x86/inv_txfm_sse2.c
+++ b/vpx_dsp/x86/inv_txfm_sse2.c
@@ -219,57 +219,18 @@ static INLINE void idct8(const __m128i *const in, __m128i *const out) {
void vpx_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
- const __m128i final_rounding = _mm_set1_epi16(1 << 4);
-
__m128i in[8];
int i;
// Load input data.
- in[0] = load_input_data(input);
- in[1] = load_input_data(input + 8 * 1);
- in[2] = load_input_data(input + 8 * 2);
- in[3] = load_input_data(input + 8 * 3);
- in[4] = load_input_data(input + 8 * 4);
- in[5] = load_input_data(input + 8 * 5);
- in[6] = load_input_data(input + 8 * 6);
- in[7] = load_input_data(input + 8 * 7);
+ load_buffer_8x8(input, in);
// 2-D
for (i = 0; i < 2; i++) {
- // 8x8 Transpose is copied from vpx_fdct8x8_sse2()
- transpose_16bit_8x8(in, in);
-
- // 4-stage 1D idct8x8
- idct8(in, in);
+ idct8_sse2(in);
}
- // Final rounding and shift
- in[0] = _mm_adds_epi16(in[0], final_rounding);
- in[1] = _mm_adds_epi16(in[1], final_rounding);
- in[2] = _mm_adds_epi16(in[2], final_rounding);
- in[3] = _mm_adds_epi16(in[3], final_rounding);
- in[4] = _mm_adds_epi16(in[4], final_rounding);
- in[5] = _mm_adds_epi16(in[5], final_rounding);
- in[6] = _mm_adds_epi16(in[6], final_rounding);
- in[7] = _mm_adds_epi16(in[7], final_rounding);
-
- in[0] = _mm_srai_epi16(in[0], 5);
- in[1] = _mm_srai_epi16(in[1], 5);
- in[2] = _mm_srai_epi16(in[2], 5);
- in[3] = _mm_srai_epi16(in[3], 5);
- in[4] = _mm_srai_epi16(in[4], 5);
- in[5] = _mm_srai_epi16(in[5], 5);
- in[6] = _mm_srai_epi16(in[6], 5);
- in[7] = _mm_srai_epi16(in[7], 5);
-
- recon_and_store(dest + 0 * stride, in[0]);
- recon_and_store(dest + 1 * stride, in[1]);
- recon_and_store(dest + 2 * stride, in[2]);
- recon_and_store(dest + 3 * stride, in[3]);
- recon_and_store(dest + 4 * stride, in[4]);
- recon_and_store(dest + 5 * stride, in[5]);
- recon_and_store(dest + 6 * stride, in[6]);
- recon_and_store(dest + 7 * stride, in[7]);
+ write_buffer_8x8(in, dest, stride);
}
void vpx_idct8x8_1_add_sse2(const tran_low_t *input, uint8_t *dest,
@@ -505,7 +466,6 @@ void iadst8_sse2(__m128i *in) {
void vpx_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
const __m128i zero = _mm_setzero_si128();
- const __m128i final_rounding = _mm_set1_epi16(1 << 4);
const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64);
@@ -575,33 +535,7 @@ void vpx_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest,
in[4] = in[5] = in[6] = in[7] = zero;
idct8(in, in);
- // Final rounding and shift
- in[0] = _mm_adds_epi16(in[0], final_rounding);
- in[1] = _mm_adds_epi16(in[1], final_rounding);
- in[2] = _mm_adds_epi16(in[2], final_rounding);
- in[3] = _mm_adds_epi16(in[3], final_rounding);
- in[4] = _mm_adds_epi16(in[4], final_rounding);
- in[5] = _mm_adds_epi16(in[5], final_rounding);
- in[6] = _mm_adds_epi16(in[6], final_rounding);
- in[7] = _mm_adds_epi16(in[7], final_rounding);
-
- in[0] = _mm_srai_epi16(in[0], 5);
- in[1] = _mm_srai_epi16(in[1], 5);
- in[2] = _mm_srai_epi16(in[2], 5);
- in[3] = _mm_srai_epi16(in[3], 5);
- in[4] = _mm_srai_epi16(in[4], 5);
- in[5] = _mm_srai_epi16(in[5], 5);
- in[6] = _mm_srai_epi16(in[6], 5);
- in[7] = _mm_srai_epi16(in[7], 5);
-
- recon_and_store(dest + 0 * stride, in[0]);
- recon_and_store(dest + 1 * stride, in[1]);
- recon_and_store(dest + 2 * stride, in[2]);
- recon_and_store(dest + 3 * stride, in[3]);
- recon_and_store(dest + 4 * stride, in[4]);
- recon_and_store(dest + 5 * stride, in[5]);
- recon_and_store(dest + 6 * stride, in[6]);
- recon_and_store(dest + 7 * stride, in[7]);
+ write_buffer_8x8(in, dest, stride);
}
#define IDCT16 \
diff --git a/vpx_dsp/x86/inv_txfm_sse2.h b/vpx_dsp/x86/inv_txfm_sse2.h
index a739fd1a4..acaf86178 100644
--- a/vpx_dsp/x86/inv_txfm_sse2.h
+++ b/vpx_dsp/x86/inv_txfm_sse2.h
@@ -99,6 +99,18 @@ static INLINE __m128i load_input_data(const tran_low_t *data) {
#endif
}
+static INLINE void load_buffer_8x8(const tran_low_t *const input,
+ __m128i *const in) {
+ in[0] = load_input_data(input + 0 * 8);
+ in[1] = load_input_data(input + 1 * 8);
+ in[2] = load_input_data(input + 2 * 8);
+ in[3] = load_input_data(input + 3 * 8);
+ in[4] = load_input_data(input + 4 * 8);
+ in[5] = load_input_data(input + 5 * 8);
+ in[6] = load_input_data(input + 6 * 8);
+ in[7] = load_input_data(input + 7 * 8);
+}
+
static INLINE void load_buffer_8x16(const tran_low_t *const input,
__m128i *const in) {
in[0] = load_input_data(input + 0 * 16);
@@ -129,7 +141,41 @@ static INLINE void recon_and_store(uint8_t *const dest, const __m128i in_x) {
_mm_storel_epi64((__m128i *)(dest), d0);
}
-static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
+static INLINE void write_buffer_8x8(const __m128i *const in,
+ uint8_t *const dest, const int stride) {
+ const __m128i final_rounding = _mm_set1_epi16(1 << 4);
+ __m128i t[8];
+ // Final rounding and shift
+ t[0] = _mm_adds_epi16(in[0], final_rounding);
+ t[1] = _mm_adds_epi16(in[1], final_rounding);
+ t[2] = _mm_adds_epi16(in[2], final_rounding);
+ t[3] = _mm_adds_epi16(in[3], final_rounding);
+ t[4] = _mm_adds_epi16(in[4], final_rounding);
+ t[5] = _mm_adds_epi16(in[5], final_rounding);
+ t[6] = _mm_adds_epi16(in[6], final_rounding);
+ t[7] = _mm_adds_epi16(in[7], final_rounding);
+
+ t[0] = _mm_srai_epi16(t[0], 5);
+ t[1] = _mm_srai_epi16(t[1], 5);
+ t[2] = _mm_srai_epi16(t[2], 5);
+ t[3] = _mm_srai_epi16(t[3], 5);
+ t[4] = _mm_srai_epi16(t[4], 5);
+ t[5] = _mm_srai_epi16(t[5], 5);
+ t[6] = _mm_srai_epi16(t[6], 5);
+ t[7] = _mm_srai_epi16(t[7], 5);
+
+ recon_and_store(dest + 0 * stride, t[0]);
+ recon_and_store(dest + 1 * stride, t[1]);
+ recon_and_store(dest + 2 * stride, t[2]);
+ recon_and_store(dest + 3 * stride, t[3]);
+ recon_and_store(dest + 4 * stride, t[4]);
+ recon_and_store(dest + 5 * stride, t[5]);
+ recon_and_store(dest + 6 * stride, t[6]);
+ recon_and_store(dest + 7 * stride, t[7]);
+}
+
+static INLINE void write_buffer_8x16(uint8_t *const dest, __m128i *const in,
+ const int stride) {
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
// Final rounding and shift
in[0] = _mm_adds_epi16(in[0], final_rounding);
diff --git a/vpx_dsp/x86/inv_txfm_ssse3.c b/vpx_dsp/x86/inv_txfm_ssse3.c
index ae0b59194..0e86e43f1 100644
--- a/vpx_dsp/x86/inv_txfm_ssse3.c
+++ b/vpx_dsp/x86/inv_txfm_ssse3.c
@@ -19,7 +19,6 @@ void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
- const __m128i final_rounding = _mm_set1_epi16(1 << 4);
const __m128i stg1_0 = pair_set_epi16(2 * cospi_28_64, 2 * cospi_28_64);
const __m128i stg1_1 = pair_set_epi16(2 * cospi_4_64, 2 * cospi_4_64);
const __m128i stg1_2 = pair_set_epi16(-2 * cospi_20_64, -2 * cospi_20_64);
@@ -148,33 +147,7 @@ void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
in[6] = _mm_sub_epi16(stp1_1, stp1_6);
in[7] = _mm_sub_epi16(stp1_0, stp2_7);
- // Final rounding and shift
- in[0] = _mm_adds_epi16(in[0], final_rounding);
- in[1] = _mm_adds_epi16(in[1], final_rounding);
- in[2] = _mm_adds_epi16(in[2], final_rounding);
- in[3] = _mm_adds_epi16(in[3], final_rounding);
- in[4] = _mm_adds_epi16(in[4], final_rounding);
- in[5] = _mm_adds_epi16(in[5], final_rounding);
- in[6] = _mm_adds_epi16(in[6], final_rounding);
- in[7] = _mm_adds_epi16(in[7], final_rounding);
-
- in[0] = _mm_srai_epi16(in[0], 5);
- in[1] = _mm_srai_epi16(in[1], 5);
- in[2] = _mm_srai_epi16(in[2], 5);
- in[3] = _mm_srai_epi16(in[3], 5);
- in[4] = _mm_srai_epi16(in[4], 5);
- in[5] = _mm_srai_epi16(in[5], 5);
- in[6] = _mm_srai_epi16(in[6], 5);
- in[7] = _mm_srai_epi16(in[7], 5);
-
- recon_and_store(dest + 0 * stride, in[0]);
- recon_and_store(dest + 1 * stride, in[1]);
- recon_and_store(dest + 2 * stride, in[2]);
- recon_and_store(dest + 3 * stride, in[3]);
- recon_and_store(dest + 4 * stride, in[4]);
- recon_and_store(dest + 5 * stride, in[5]);
- recon_and_store(dest + 6 * stride, in[6]);
- recon_and_store(dest + 7 * stride, in[7]);
+ write_buffer_8x8(in, dest, stride);
}
// Only do addition and subtraction butterfly, size = 16, 32