summaryrefslogtreecommitdiff
path: root/vp9/decoder/x86/vp9_dequantize_x86.c
diff options
context:
space:
mode:
Diffstat (limited to 'vp9/decoder/x86/vp9_dequantize_x86.c')
-rw-r--r--vp9/decoder/x86/vp9_dequantize_x86.c207
1 files changed, 207 insertions, 0 deletions
diff --git a/vp9/decoder/x86/vp9_dequantize_x86.c b/vp9/decoder/x86/vp9_dequantize_x86.c
index 4fa408690..acfae2a27 100644
--- a/vp9/decoder/x86/vp9_dequantize_x86.c
+++ b/vp9/decoder/x86/vp9_dequantize_x86.c
@@ -245,4 +245,211 @@ void vp9_add_residual_32x32_sse2(const int16_t *diff, const uint8_t *pred,
dest += 2 * stride;
} while (--i);
}
+
+void vp9_add_constant_residual_8x8_sse2(const int16_t diff, const uint8_t *pred,
+ int pitch, uint8_t *dest, int stride) {
+ uint8_t abs_diff;
+ __m128i d;
+
+ // Prediction data.
+ __m128i p0 = _mm_loadl_epi64((const __m128i *)(pred + 0 * pitch));
+ __m128i p1 = _mm_loadl_epi64((const __m128i *)(pred + 1 * pitch));
+ __m128i p2 = _mm_loadl_epi64((const __m128i *)(pred + 2 * pitch));
+ __m128i p3 = _mm_loadl_epi64((const __m128i *)(pred + 3 * pitch));
+ __m128i p4 = _mm_loadl_epi64((const __m128i *)(pred + 4 * pitch));
+ __m128i p5 = _mm_loadl_epi64((const __m128i *)(pred + 5 * pitch));
+ __m128i p6 = _mm_loadl_epi64((const __m128i *)(pred + 6 * pitch));
+ __m128i p7 = _mm_loadl_epi64((const __m128i *)(pred + 7 * pitch));
+
+ p0 = _mm_unpacklo_epi64(p0, p1);
+ p2 = _mm_unpacklo_epi64(p2, p3);
+ p4 = _mm_unpacklo_epi64(p4, p5);
+ p6 = _mm_unpacklo_epi64(p6, p7);
+
+ // Clip diff value to [0, 255] range. Then, do addition or subtraction
+ // according to its sign.
+ if (diff >= 0) {
+ abs_diff = (diff > 255) ? 255 : diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+
+ p0 = _mm_adds_epu8(p0, d);
+ p2 = _mm_adds_epu8(p2, d);
+ p4 = _mm_adds_epu8(p4, d);
+ p6 = _mm_adds_epu8(p6, d);
+ } else {
+ abs_diff = (diff < -255) ? 255 : -diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+
+ p0 = _mm_subs_epu8(p0, d);
+ p2 = _mm_subs_epu8(p2, d);
+ p4 = _mm_subs_epu8(p4, d);
+ p6 = _mm_subs_epu8(p6, d);
+ }
+
+ _mm_storel_epi64((__m128i *)(dest + 0 * stride), p0);
+ p0 = _mm_srli_si128(p0, 8);
+ _mm_storel_epi64((__m128i *)(dest + 1 * stride), p0);
+
+ _mm_storel_epi64((__m128i *)(dest + 2 * stride), p2);
+ p2 = _mm_srli_si128(p2, 8);
+ _mm_storel_epi64((__m128i *)(dest + 3 * stride), p2);
+
+ _mm_storel_epi64((__m128i *)(dest + 4 * stride), p4);
+ p4 = _mm_srli_si128(p4, 8);
+ _mm_storel_epi64((__m128i *)(dest + 5 * stride), p4);
+
+ _mm_storel_epi64((__m128i *)(dest + 6 * stride), p6);
+ p6 = _mm_srli_si128(p6, 8);
+ _mm_storel_epi64((__m128i *)(dest + 7 * stride), p6);
+}
+
+void vp9_add_constant_residual_16x16_sse2(const int16_t diff,
+ const uint8_t *pred, int pitch,
+ uint8_t *dest, int stride) {
+ uint8_t abs_diff;
+ __m128i d;
+
+ // Prediction data.
+ __m128i p0 = _mm_load_si128((const __m128i *)(pred + 0 * pitch));
+ __m128i p1 = _mm_load_si128((const __m128i *)(pred + 1 * pitch));
+ __m128i p2 = _mm_load_si128((const __m128i *)(pred + 2 * pitch));
+ __m128i p3 = _mm_load_si128((const __m128i *)(pred + 3 * pitch));
+ __m128i p4 = _mm_load_si128((const __m128i *)(pred + 4 * pitch));
+ __m128i p5 = _mm_load_si128((const __m128i *)(pred + 5 * pitch));
+ __m128i p6 = _mm_load_si128((const __m128i *)(pred + 6 * pitch));
+ __m128i p7 = _mm_load_si128((const __m128i *)(pred + 7 * pitch));
+ __m128i p8 = _mm_load_si128((const __m128i *)(pred + 8 * pitch));
+ __m128i p9 = _mm_load_si128((const __m128i *)(pred + 9 * pitch));
+ __m128i p10 = _mm_load_si128((const __m128i *)(pred + 10 * pitch));
+ __m128i p11 = _mm_load_si128((const __m128i *)(pred + 11 * pitch));
+ __m128i p12 = _mm_load_si128((const __m128i *)(pred + 12 * pitch));
+ __m128i p13 = _mm_load_si128((const __m128i *)(pred + 13 * pitch));
+ __m128i p14 = _mm_load_si128((const __m128i *)(pred + 14 * pitch));
+ __m128i p15 = _mm_load_si128((const __m128i *)(pred + 15 * pitch));
+
+ // Clip diff value to [0, 255] range. Then, do addition or subtraction
+ // according to its sign.
+ if (diff >= 0) {
+ abs_diff = (diff > 255) ? 255 : diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+
+ p0 = _mm_adds_epu8(p0, d);
+ p1 = _mm_adds_epu8(p1, d);
+ p2 = _mm_adds_epu8(p2, d);
+ p3 = _mm_adds_epu8(p3, d);
+ p4 = _mm_adds_epu8(p4, d);
+ p5 = _mm_adds_epu8(p5, d);
+ p6 = _mm_adds_epu8(p6, d);
+ p7 = _mm_adds_epu8(p7, d);
+ p8 = _mm_adds_epu8(p8, d);
+ p9 = _mm_adds_epu8(p9, d);
+ p10 = _mm_adds_epu8(p10, d);
+ p11 = _mm_adds_epu8(p11, d);
+ p12 = _mm_adds_epu8(p12, d);
+ p13 = _mm_adds_epu8(p13, d);
+ p14 = _mm_adds_epu8(p14, d);
+ p15 = _mm_adds_epu8(p15, d);
+ } else {
+ abs_diff = (diff < -255) ? 255 : -diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+
+ p0 = _mm_subs_epu8(p0, d);
+ p1 = _mm_subs_epu8(p1, d);
+ p2 = _mm_subs_epu8(p2, d);
+ p3 = _mm_subs_epu8(p3, d);
+ p4 = _mm_subs_epu8(p4, d);
+ p5 = _mm_subs_epu8(p5, d);
+ p6 = _mm_subs_epu8(p6, d);
+ p7 = _mm_subs_epu8(p7, d);
+ p8 = _mm_subs_epu8(p8, d);
+ p9 = _mm_subs_epu8(p9, d);
+ p10 = _mm_subs_epu8(p10, d);
+ p11 = _mm_subs_epu8(p11, d);
+ p12 = _mm_subs_epu8(p12, d);
+ p13 = _mm_subs_epu8(p13, d);
+ p14 = _mm_subs_epu8(p14, d);
+ p15 = _mm_subs_epu8(p15, d);
+ }
+
+ // Store results
+ _mm_store_si128((__m128i *)(dest + 0 * stride), p0);
+ _mm_store_si128((__m128i *)(dest + 1 * stride), p1);
+ _mm_store_si128((__m128i *)(dest + 2 * stride), p2);
+ _mm_store_si128((__m128i *)(dest + 3 * stride), p3);
+ _mm_store_si128((__m128i *)(dest + 4 * stride), p4);
+ _mm_store_si128((__m128i *)(dest + 5 * stride), p5);
+ _mm_store_si128((__m128i *)(dest + 6 * stride), p6);
+ _mm_store_si128((__m128i *)(dest + 7 * stride), p7);
+ _mm_store_si128((__m128i *)(dest + 8 * stride), p8);
+ _mm_store_si128((__m128i *)(dest + 9 * stride), p9);
+ _mm_store_si128((__m128i *)(dest + 10 * stride), p10);
+ _mm_store_si128((__m128i *)(dest + 11 * stride), p11);
+ _mm_store_si128((__m128i *)(dest + 12 * stride), p12);
+ _mm_store_si128((__m128i *)(dest + 13 * stride), p13);
+ _mm_store_si128((__m128i *)(dest + 14 * stride), p14);
+ _mm_store_si128((__m128i *)(dest + 15 * stride), p15);
+}
+
+void vp9_add_constant_residual_32x32_sse2(const int16_t diff,
+ const uint8_t *pred, int pitch,
+ uint8_t *dest, int stride) {
+ uint8_t abs_diff;
+ __m128i d;
+ int i = 8;
+
+ if (diff >= 0) {
+ abs_diff = (diff > 255) ? 255 : diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+ } else {
+ abs_diff = (diff < -255) ? 255 : -diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+ }
+
+ do {
+ // Prediction data.
+ __m128i p0 = _mm_load_si128((const __m128i *)(pred + 0 * pitch));
+ __m128i p1 = _mm_load_si128((const __m128i *)(pred + 0 * pitch + 16));
+ __m128i p2 = _mm_load_si128((const __m128i *)(pred + 1 * pitch));
+ __m128i p3 = _mm_load_si128((const __m128i *)(pred + 1 * pitch + 16));
+ __m128i p4 = _mm_load_si128((const __m128i *)(pred + 2 * pitch));
+ __m128i p5 = _mm_load_si128((const __m128i *)(pred + 2 * pitch + 16));
+ __m128i p6 = _mm_load_si128((const __m128i *)(pred + 3 * pitch));
+ __m128i p7 = _mm_load_si128((const __m128i *)(pred + 3 * pitch + 16));
+
+ // Clip diff value to [0, 255] range. Then, do addition or subtraction
+ // according to its sign.
+ if (diff >= 0) {
+ p0 = _mm_adds_epu8(p0, d);
+ p1 = _mm_adds_epu8(p1, d);
+ p2 = _mm_adds_epu8(p2, d);
+ p3 = _mm_adds_epu8(p3, d);
+ p4 = _mm_adds_epu8(p4, d);
+ p5 = _mm_adds_epu8(p5, d);
+ p6 = _mm_adds_epu8(p6, d);
+ p7 = _mm_adds_epu8(p7, d);
+ } else {
+ p0 = _mm_subs_epu8(p0, d);
+ p1 = _mm_subs_epu8(p1, d);
+ p2 = _mm_subs_epu8(p2, d);
+ p3 = _mm_subs_epu8(p3, d);
+ p4 = _mm_subs_epu8(p4, d);
+ p5 = _mm_subs_epu8(p5, d);
+ p6 = _mm_subs_epu8(p6, d);
+ p7 = _mm_subs_epu8(p7, d);
+ }
+
+ // Store results
+ _mm_store_si128((__m128i *)(dest + 0 * stride), p0);
+ _mm_store_si128((__m128i *)(dest + 0 * stride + 16), p1);
+ _mm_store_si128((__m128i *)(dest + 1 * stride), p2);
+ _mm_store_si128((__m128i *)(dest + 1 * stride + 16), p3);
+ _mm_store_si128((__m128i *)(dest + 2 * stride), p4);
+ _mm_store_si128((__m128i *)(dest + 2 * stride + 16), p5);
+ _mm_store_si128((__m128i *)(dest + 3 * stride), p6);
+ _mm_store_si128((__m128i *)(dest + 3 * stride + 16), p7);
+
+ pred += 4 * pitch;
+ dest += 4 * stride;
+ } while (--i);
+}
#endif