diff options
-rw-r--r-- | vp9/common/vp9_rtcd_defs.sh | 9 | ||||
-rw-r--r-- | vp9/decoder/vp9_dequantize.c | 21 | ||||
-rw-r--r-- | vp9/decoder/x86/vp9_dequantize_x86.c | 207 |
3 files changed, 234 insertions, 3 deletions
diff --git a/vp9/common/vp9_rtcd_defs.sh b/vp9/common/vp9_rtcd_defs.sh index 87628659b..48ae860a9 100644 --- a/vp9/common/vp9_rtcd_defs.sh +++ b/vp9/common/vp9_rtcd_defs.sh @@ -151,6 +151,15 @@ specialize vp9_add_residual_16x16 sse2 prototype void vp9_add_residual_32x32 "const int16_t *diff, const uint8_t *pred, int pitch, uint8_t *dest, int stride" specialize vp9_add_residual_32x32 sse2 + +prototype void vp9_add_constant_residual_8x8 "const int16_t diff, const uint8_t *pred, int pitch, uint8_t *dest, int stride" +specialize vp9_add_constant_residual_8x8 sse2 + +prototype void vp9_add_constant_residual_16x16 "const int16_t diff, const uint8_t *pred, int pitch, uint8_t *dest, int stride" +specialize vp9_add_constant_residual_16x16 sse2 + +prototype void vp9_add_constant_residual_32x32 "const int16_t diff, const uint8_t *pred, int pitch, uint8_t *dest, int stride" +specialize vp9_add_constant_residual_32x32 sse2 fi # diff --git a/vp9/decoder/vp9_dequantize.c b/vp9/decoder/vp9_dequantize.c index 271a75d16..92b78ed19 100644 --- a/vp9/decoder/vp9_dequantize.c +++ b/vp9/decoder/vp9_dequantize.c @@ -64,6 +64,21 @@ static void add_constant_residual(const int16_t diff, const uint8_t *pred, } } +void vp9_add_constant_residual_8x8_c(const int16_t diff, const uint8_t *pred, + int pitch, uint8_t *dest, int stride) { + add_constant_residual(diff, pred, pitch, dest, stride, 8, 8); +} + +void vp9_add_constant_residual_16x16_c(const int16_t diff, const uint8_t *pred, + int pitch, uint8_t *dest, int stride) { + add_constant_residual(diff, pred, pitch, dest, stride, 16, 16); +} + +void vp9_add_constant_residual_32x32_c(const int16_t diff, const uint8_t *pred, + int pitch, uint8_t *dest, int stride) { + add_constant_residual(diff, pred, pitch, dest, stride, 32, 32); +} + void vp9_ht_dequant_idct_add_c(TX_TYPE tx_type, int16_t *input, const int16_t *dq, uint8_t *pred, uint8_t *dest, @@ -202,7 +217,7 @@ void vp9_dequant_idct_add_8x8_c(int16_t *input, const int16_t *dq, vp9_short_idct1_8x8_c(&in, &out); input[0] = 0; - add_constant_residual(out, pred, pitch, dest, stride, 8, 8); + vp9_add_constant_residual_8x8(out, pred, pitch, dest, stride); } else if (eob <= 10) { input[1] *= dq[1]; input[2] *= dq[1]; @@ -285,7 +300,7 @@ void vp9_dequant_idct_add_16x16_c(int16_t *input, const int16_t *dq, vp9_short_idct1_16x16_c(&in, &out); input[0] = 0; - add_constant_residual(out, pred, pitch, dest, stride, 16, 16); + vp9_add_constant_residual_16x16(out, pred, pitch, dest, stride); } else if (eob <= 10) { input[0] *= dq[0]; @@ -335,7 +350,7 @@ void vp9_dequant_idct_add_32x32_c(int16_t *input, const int16_t *dq, input[0] = input[0] * dq[0] / 2; if (eob == 1) { vp9_short_idct1_32x32(input, output); - add_constant_residual(output[0], pred, pitch, dest, stride, 32, 32); + vp9_add_constant_residual_32x32(output[0], pred, pitch, dest, stride); input[0] = 0; } else if (eob <= 10) { input[1] = input[1] * dq[1] / 2; diff --git a/vp9/decoder/x86/vp9_dequantize_x86.c b/vp9/decoder/x86/vp9_dequantize_x86.c index 4fa408690..acfae2a27 100644 --- a/vp9/decoder/x86/vp9_dequantize_x86.c +++ b/vp9/decoder/x86/vp9_dequantize_x86.c @@ -245,4 +245,211 @@ void vp9_add_residual_32x32_sse2(const int16_t *diff, const uint8_t *pred, dest += 2 * stride; } while (--i); } + +void vp9_add_constant_residual_8x8_sse2(const int16_t diff, const uint8_t *pred, + int pitch, uint8_t *dest, int stride) { + uint8_t abs_diff; + __m128i d; + + // Prediction data. + __m128i p0 = _mm_loadl_epi64((const __m128i *)(pred + 0 * pitch)); + __m128i p1 = _mm_loadl_epi64((const __m128i *)(pred + 1 * pitch)); + __m128i p2 = _mm_loadl_epi64((const __m128i *)(pred + 2 * pitch)); + __m128i p3 = _mm_loadl_epi64((const __m128i *)(pred + 3 * pitch)); + __m128i p4 = _mm_loadl_epi64((const __m128i *)(pred + 4 * pitch)); + __m128i p5 = _mm_loadl_epi64((const __m128i *)(pred + 5 * pitch)); + __m128i p6 = _mm_loadl_epi64((const __m128i *)(pred + 6 * pitch)); + __m128i p7 = _mm_loadl_epi64((const __m128i *)(pred + 7 * pitch)); + + p0 = _mm_unpacklo_epi64(p0, p1); + p2 = _mm_unpacklo_epi64(p2, p3); + p4 = _mm_unpacklo_epi64(p4, p5); + p6 = _mm_unpacklo_epi64(p6, p7); + + // Clip diff value to [0, 255] range. Then, do addition or subtraction + // according to its sign. + if (diff >= 0) { + abs_diff = (diff > 255) ? 255 : diff; + d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0); + + p0 = _mm_adds_epu8(p0, d); + p2 = _mm_adds_epu8(p2, d); + p4 = _mm_adds_epu8(p4, d); + p6 = _mm_adds_epu8(p6, d); + } else { + abs_diff = (diff < -255) ? 255 : -diff; + d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0); + + p0 = _mm_subs_epu8(p0, d); + p2 = _mm_subs_epu8(p2, d); + p4 = _mm_subs_epu8(p4, d); + p6 = _mm_subs_epu8(p6, d); + } + + _mm_storel_epi64((__m128i *)(dest + 0 * stride), p0); + p0 = _mm_srli_si128(p0, 8); + _mm_storel_epi64((__m128i *)(dest + 1 * stride), p0); + + _mm_storel_epi64((__m128i *)(dest + 2 * stride), p2); + p2 = _mm_srli_si128(p2, 8); + _mm_storel_epi64((__m128i *)(dest + 3 * stride), p2); + + _mm_storel_epi64((__m128i *)(dest + 4 * stride), p4); + p4 = _mm_srli_si128(p4, 8); + _mm_storel_epi64((__m128i *)(dest + 5 * stride), p4); + + _mm_storel_epi64((__m128i *)(dest + 6 * stride), p6); + p6 = _mm_srli_si128(p6, 8); + _mm_storel_epi64((__m128i *)(dest + 7 * stride), p6); +} + +void vp9_add_constant_residual_16x16_sse2(const int16_t diff, + const uint8_t *pred, int pitch, + uint8_t *dest, int stride) { + uint8_t abs_diff; + __m128i d; + + // Prediction data. + __m128i p0 = _mm_load_si128((const __m128i *)(pred + 0 * pitch)); + __m128i p1 = _mm_load_si128((const __m128i *)(pred + 1 * pitch)); + __m128i p2 = _mm_load_si128((const __m128i *)(pred + 2 * pitch)); + __m128i p3 = _mm_load_si128((const __m128i *)(pred + 3 * pitch)); + __m128i p4 = _mm_load_si128((const __m128i *)(pred + 4 * pitch)); + __m128i p5 = _mm_load_si128((const __m128i *)(pred + 5 * pitch)); + __m128i p6 = _mm_load_si128((const __m128i *)(pred + 6 * pitch)); + __m128i p7 = _mm_load_si128((const __m128i *)(pred + 7 * pitch)); + __m128i p8 = _mm_load_si128((const __m128i *)(pred + 8 * pitch)); + __m128i p9 = _mm_load_si128((const __m128i *)(pred + 9 * pitch)); + __m128i p10 = _mm_load_si128((const __m128i *)(pred + 10 * pitch)); + __m128i p11 = _mm_load_si128((const __m128i *)(pred + 11 * pitch)); + __m128i p12 = _mm_load_si128((const __m128i *)(pred + 12 * pitch)); + __m128i p13 = _mm_load_si128((const __m128i *)(pred + 13 * pitch)); + __m128i p14 = _mm_load_si128((const __m128i *)(pred + 14 * pitch)); + __m128i p15 = _mm_load_si128((const __m128i *)(pred + 15 * pitch)); + + // Clip diff value to [0, 255] range. Then, do addition or subtraction + // according to its sign. + if (diff >= 0) { + abs_diff = (diff > 255) ? 255 : diff; + d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0); + + p0 = _mm_adds_epu8(p0, d); + p1 = _mm_adds_epu8(p1, d); + p2 = _mm_adds_epu8(p2, d); + p3 = _mm_adds_epu8(p3, d); + p4 = _mm_adds_epu8(p4, d); + p5 = _mm_adds_epu8(p5, d); + p6 = _mm_adds_epu8(p6, d); + p7 = _mm_adds_epu8(p7, d); + p8 = _mm_adds_epu8(p8, d); + p9 = _mm_adds_epu8(p9, d); + p10 = _mm_adds_epu8(p10, d); + p11 = _mm_adds_epu8(p11, d); + p12 = _mm_adds_epu8(p12, d); + p13 = _mm_adds_epu8(p13, d); + p14 = _mm_adds_epu8(p14, d); + p15 = _mm_adds_epu8(p15, d); + } else { + abs_diff = (diff < -255) ? 255 : -diff; + d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0); + + p0 = _mm_subs_epu8(p0, d); + p1 = _mm_subs_epu8(p1, d); + p2 = _mm_subs_epu8(p2, d); + p3 = _mm_subs_epu8(p3, d); + p4 = _mm_subs_epu8(p4, d); + p5 = _mm_subs_epu8(p5, d); + p6 = _mm_subs_epu8(p6, d); + p7 = _mm_subs_epu8(p7, d); + p8 = _mm_subs_epu8(p8, d); + p9 = _mm_subs_epu8(p9, d); + p10 = _mm_subs_epu8(p10, d); + p11 = _mm_subs_epu8(p11, d); + p12 = _mm_subs_epu8(p12, d); + p13 = _mm_subs_epu8(p13, d); + p14 = _mm_subs_epu8(p14, d); + p15 = _mm_subs_epu8(p15, d); + } + + // Store results + _mm_store_si128((__m128i *)(dest + 0 * stride), p0); + _mm_store_si128((__m128i *)(dest + 1 * stride), p1); + _mm_store_si128((__m128i *)(dest + 2 * stride), p2); + _mm_store_si128((__m128i *)(dest + 3 * stride), p3); + _mm_store_si128((__m128i *)(dest + 4 * stride), p4); + _mm_store_si128((__m128i *)(dest + 5 * stride), p5); + _mm_store_si128((__m128i *)(dest + 6 * stride), p6); + _mm_store_si128((__m128i *)(dest + 7 * stride), p7); + _mm_store_si128((__m128i *)(dest + 8 * stride), p8); + _mm_store_si128((__m128i *)(dest + 9 * stride), p9); + _mm_store_si128((__m128i *)(dest + 10 * stride), p10); + _mm_store_si128((__m128i *)(dest + 11 * stride), p11); + _mm_store_si128((__m128i *)(dest + 12 * stride), p12); + _mm_store_si128((__m128i *)(dest + 13 * stride), p13); + _mm_store_si128((__m128i *)(dest + 14 * stride), p14); + _mm_store_si128((__m128i *)(dest + 15 * stride), p15); +} + +void vp9_add_constant_residual_32x32_sse2(const int16_t diff, + const uint8_t *pred, int pitch, + uint8_t *dest, int stride) { + uint8_t abs_diff; + __m128i d; + int i = 8; + + if (diff >= 0) { + abs_diff = (diff > 255) ? 255 : diff; + d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0); + } else { + abs_diff = (diff < -255) ? 255 : -diff; + d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0); + } + + do { + // Prediction data. + __m128i p0 = _mm_load_si128((const __m128i *)(pred + 0 * pitch)); + __m128i p1 = _mm_load_si128((const __m128i *)(pred + 0 * pitch + 16)); + __m128i p2 = _mm_load_si128((const __m128i *)(pred + 1 * pitch)); + __m128i p3 = _mm_load_si128((const __m128i *)(pred + 1 * pitch + 16)); + __m128i p4 = _mm_load_si128((const __m128i *)(pred + 2 * pitch)); + __m128i p5 = _mm_load_si128((const __m128i *)(pred + 2 * pitch + 16)); + __m128i p6 = _mm_load_si128((const __m128i *)(pred + 3 * pitch)); + __m128i p7 = _mm_load_si128((const __m128i *)(pred + 3 * pitch + 16)); + + // Clip diff value to [0, 255] range. Then, do addition or subtraction + // according to its sign. + if (diff >= 0) { + p0 = _mm_adds_epu8(p0, d); + p1 = _mm_adds_epu8(p1, d); + p2 = _mm_adds_epu8(p2, d); + p3 = _mm_adds_epu8(p3, d); + p4 = _mm_adds_epu8(p4, d); + p5 = _mm_adds_epu8(p5, d); + p6 = _mm_adds_epu8(p6, d); + p7 = _mm_adds_epu8(p7, d); + } else { + p0 = _mm_subs_epu8(p0, d); + p1 = _mm_subs_epu8(p1, d); + p2 = _mm_subs_epu8(p2, d); + p3 = _mm_subs_epu8(p3, d); + p4 = _mm_subs_epu8(p4, d); + p5 = _mm_subs_epu8(p5, d); + p6 = _mm_subs_epu8(p6, d); + p7 = _mm_subs_epu8(p7, d); + } + + // Store results + _mm_store_si128((__m128i *)(dest + 0 * stride), p0); + _mm_store_si128((__m128i *)(dest + 0 * stride + 16), p1); + _mm_store_si128((__m128i *)(dest + 1 * stride), p2); + _mm_store_si128((__m128i *)(dest + 1 * stride + 16), p3); + _mm_store_si128((__m128i *)(dest + 2 * stride), p4); + _mm_store_si128((__m128i *)(dest + 2 * stride + 16), p5); + _mm_store_si128((__m128i *)(dest + 3 * stride), p6); + _mm_store_si128((__m128i *)(dest + 3 * stride + 16), p7); + + pred += 4 * pitch; + dest += 4 * stride; + } while (--i); +} #endif |