summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--test/datarate_test.cc8
-rw-r--r--test/variance_test.cc472
-rw-r--r--vp8/common/rtcd_defs.pl2
-rw-r--r--vp8/encoder/arm/neon/denoising_neon.c18
-rw-r--r--vp8/encoder/denoising.c34
-rw-r--r--vp8/encoder/x86/denoising_sse2.c20
-rw-r--r--vp8/encoder/x86/quantize_ssse3.c4
-rw-r--r--vp9/common/vp9_idct.h3
-rw-r--r--vp9/encoder/vp9_encodeframe.c67
-rw-r--r--vp9/encoder/vp9_encoder.c4
-rw-r--r--vp9/encoder/vp9_firstpass.c4
-rw-r--r--vp9/encoder/vp9_mcomp.c6
-rw-r--r--vp9/encoder/vp9_mcomp.h4
-rw-r--r--vp9/encoder/vp9_pickmode.c12
-rw-r--r--vp9/encoder/vp9_ratectrl.c80
-rw-r--r--vp9/encoder/vp9_rdopt.c8
-rw-r--r--vp9/encoder/vp9_speed_features.h2
-rw-r--r--vp9/encoder/vp9_svc_layercontext.c2
-rw-r--r--vp9/encoder/x86/vp9_dct32x32_sse2.c3
-rw-r--r--vp9/vp9_dx_iface.c98
-rw-r--r--webmdec.cc4
21 files changed, 318 insertions, 537 deletions
diff --git a/test/datarate_test.cc b/test/datarate_test.cc
index 2b4aa3acb..80be05ee9 100644
--- a/test/datarate_test.cc
+++ b/test/datarate_test.cc
@@ -522,10 +522,14 @@ TEST_P(DatarateTestVP9Large, BasicRateTargeting3TemporalLayers) {
cfg_.ts_target_bitrate[2] = cfg_.rc_target_bitrate;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
for (int j = 0; j < static_cast<int>(cfg_.ts_number_layers); ++j) {
- ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.85)
+ // TODO(yaowu): Work out more stable rc control strategy and
+ // Adjust the thresholds to be tighter than .75.
+ ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.75)
<< " The datarate for the file is lower than target by too much, "
"for layer: " << j;
- ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.15)
+ // TODO(yaowu): Work out more stable rc control strategy and
+ // Adjust the thresholds to be tighter than 1.25.
+ ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.25)
<< " The datarate for the file is greater than target by too much, "
"for layer: " << j;
}
diff --git a/test/variance_test.cc b/test/variance_test.cc
index c9bf13a6b..817ba1411 100644
--- a/test/variance_test.cc
+++ b/test/variance_test.cc
@@ -294,60 +294,41 @@ TEST_P(VP8VarianceTest, Zero) { ZeroTest(); }
TEST_P(VP8VarianceTest, Ref) { RefTest(); }
TEST_P(VP8VarianceTest, OneQuarter) { OneQuarterTest(); }
-const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c;
-const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c;
-const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c;
-const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c;
-const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c;
INSTANTIATE_TEST_CASE_P(
C, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c)));
+ ::testing::Values(make_tuple(2, 2, vp8_variance4x4_c),
+ make_tuple(3, 3, vp8_variance8x8_c),
+ make_tuple(3, 4, vp8_variance8x16_c),
+ make_tuple(4, 3, vp8_variance16x8_c),
+ make_tuple(4, 4, vp8_variance16x16_c)));
#if HAVE_NEON
-const vp8_variance_fn_t variance8x8_neon = vp8_variance8x8_neon;
-const vp8_variance_fn_t variance8x16_neon = vp8_variance8x16_neon;
-const vp8_variance_fn_t variance16x8_neon = vp8_variance16x8_neon;
-const vp8_variance_fn_t variance16x16_neon = vp8_variance16x16_neon;
INSTANTIATE_TEST_CASE_P(
NEON, VP8VarianceTest,
- ::testing::Values(make_tuple(3, 3, variance8x8_neon),
- make_tuple(3, 4, variance8x16_neon),
- make_tuple(4, 3, variance16x8_neon),
- make_tuple(4, 4, variance16x16_neon)));
+ ::testing::Values(make_tuple(3, 3, vp8_variance8x8_neon),
+ make_tuple(3, 4, vp8_variance8x16_neon),
+ make_tuple(4, 3, vp8_variance16x8_neon),
+ make_tuple(4, 4, vp8_variance16x16_neon)));
#endif
#if HAVE_MMX
-const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx;
-const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx;
-const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx;
-const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx;
-const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx;
INSTANTIATE_TEST_CASE_P(
MMX, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
+ ::testing::Values(make_tuple(2, 2, vp8_variance4x4_mmx),
+ make_tuple(3, 3, vp8_variance8x8_mmx),
+ make_tuple(3, 4, vp8_variance8x16_mmx),
+ make_tuple(4, 3, vp8_variance16x8_mmx),
+ make_tuple(4, 4, vp8_variance16x16_mmx)));
#endif
#if HAVE_SSE2
-const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt;
-const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt;
-const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt;
-const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt;
-const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt;
INSTANTIATE_TEST_CASE_P(
SSE2, VP8VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_wmt),
- make_tuple(3, 3, variance8x8_wmt),
- make_tuple(3, 4, variance8x16_wmt),
- make_tuple(4, 3, variance16x8_wmt),
- make_tuple(4, 4, variance16x16_wmt)));
+ ::testing::Values(make_tuple(2, 2, vp8_variance4x4_wmt),
+ make_tuple(3, 3, vp8_variance8x8_wmt),
+ make_tuple(3, 4, vp8_variance8x16_wmt),
+ make_tuple(4, 3, vp8_variance16x8_wmt),
+ make_tuple(4, 4, vp8_variance16x16_wmt)));
#endif
#endif // CONFIG_VP8_ENCODER
@@ -369,337 +350,150 @@ TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); }
TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); }
TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); }
-const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c;
-const vp9_variance_fn_t variance4x8_c = vp9_variance4x8_c;
-const vp9_variance_fn_t variance8x4_c = vp9_variance8x4_c;
-const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c;
-const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c;
-const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c;
-const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c;
-const vp9_variance_fn_t variance16x32_c = vp9_variance16x32_c;
-const vp9_variance_fn_t variance32x16_c = vp9_variance32x16_c;
-const vp9_variance_fn_t variance32x32_c = vp9_variance32x32_c;
-const vp9_variance_fn_t variance32x64_c = vp9_variance32x64_c;
-const vp9_variance_fn_t variance64x32_c = vp9_variance64x32_c;
-const vp9_variance_fn_t variance64x64_c = vp9_variance64x64_c;
INSTANTIATE_TEST_CASE_P(
C, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_c),
- make_tuple(2, 3, variance4x8_c),
- make_tuple(3, 2, variance8x4_c),
- make_tuple(3, 3, variance8x8_c),
- make_tuple(3, 4, variance8x16_c),
- make_tuple(4, 3, variance16x8_c),
- make_tuple(4, 4, variance16x16_c),
- make_tuple(4, 5, variance16x32_c),
- make_tuple(5, 4, variance32x16_c),
- make_tuple(5, 5, variance32x32_c),
- make_tuple(5, 6, variance32x64_c),
- make_tuple(6, 5, variance64x32_c),
- make_tuple(6, 6, variance64x64_c)));
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_c =
- vp9_sub_pixel_variance4x4_c;
-const vp9_subpixvariance_fn_t subpel_variance4x8_c =
- vp9_sub_pixel_variance4x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x4_c =
- vp9_sub_pixel_variance8x4_c;
-const vp9_subpixvariance_fn_t subpel_variance8x8_c =
- vp9_sub_pixel_variance8x8_c;
-const vp9_subpixvariance_fn_t subpel_variance8x16_c =
- vp9_sub_pixel_variance8x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x8_c =
- vp9_sub_pixel_variance16x8_c;
-const vp9_subpixvariance_fn_t subpel_variance16x16_c =
- vp9_sub_pixel_variance16x16_c;
-const vp9_subpixvariance_fn_t subpel_variance16x32_c =
- vp9_sub_pixel_variance16x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x16_c =
- vp9_sub_pixel_variance32x16_c;
-const vp9_subpixvariance_fn_t subpel_variance32x32_c =
- vp9_sub_pixel_variance32x32_c;
-const vp9_subpixvariance_fn_t subpel_variance32x64_c =
- vp9_sub_pixel_variance32x64_c;
-const vp9_subpixvariance_fn_t subpel_variance64x32_c =
- vp9_sub_pixel_variance64x32_c;
-const vp9_subpixvariance_fn_t subpel_variance64x64_c =
- vp9_sub_pixel_variance64x64_c;
+ ::testing::Values(make_tuple(2, 2, vp9_variance4x4_c),
+ make_tuple(2, 3, vp9_variance4x8_c),
+ make_tuple(3, 2, vp9_variance8x4_c),
+ make_tuple(3, 3, vp9_variance8x8_c),
+ make_tuple(3, 4, vp9_variance8x16_c),
+ make_tuple(4, 3, vp9_variance16x8_c),
+ make_tuple(4, 4, vp9_variance16x16_c),
+ make_tuple(4, 5, vp9_variance16x32_c),
+ make_tuple(5, 4, vp9_variance32x16_c),
+ make_tuple(5, 5, vp9_variance32x32_c),
+ make_tuple(5, 6, vp9_variance32x64_c),
+ make_tuple(6, 5, vp9_variance64x32_c),
+ make_tuple(6, 6, vp9_variance64x64_c)));
+
INSTANTIATE_TEST_CASE_P(
C, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c),
- make_tuple(2, 3, subpel_variance4x8_c),
- make_tuple(3, 2, subpel_variance8x4_c),
- make_tuple(3, 3, subpel_variance8x8_c),
- make_tuple(3, 4, subpel_variance8x16_c),
- make_tuple(4, 3, subpel_variance16x8_c),
- make_tuple(4, 4, subpel_variance16x16_c),
- make_tuple(4, 5, subpel_variance16x32_c),
- make_tuple(5, 4, subpel_variance32x16_c),
- make_tuple(5, 5, subpel_variance32x32_c),
- make_tuple(5, 6, subpel_variance32x64_c),
- make_tuple(6, 5, subpel_variance64x32_c),
- make_tuple(6, 6, subpel_variance64x64_c)));
-
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c =
- vp9_sub_pixel_avg_variance4x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c =
- vp9_sub_pixel_avg_variance4x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c =
- vp9_sub_pixel_avg_variance8x4_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c =
- vp9_sub_pixel_avg_variance8x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c =
- vp9_sub_pixel_avg_variance8x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c =
- vp9_sub_pixel_avg_variance16x8_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c =
- vp9_sub_pixel_avg_variance16x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c =
- vp9_sub_pixel_avg_variance16x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c =
- vp9_sub_pixel_avg_variance32x16_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c =
- vp9_sub_pixel_avg_variance32x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c =
- vp9_sub_pixel_avg_variance32x64_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c =
- vp9_sub_pixel_avg_variance64x32_c;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c =
- vp9_sub_pixel_avg_variance64x64_c;
+ ::testing::Values(make_tuple(2, 2, vp9_sub_pixel_variance4x4_c),
+ make_tuple(2, 3, vp9_sub_pixel_variance4x8_c),
+ make_tuple(3, 2, vp9_sub_pixel_variance8x4_c),
+ make_tuple(3, 3, vp9_sub_pixel_variance8x8_c),
+ make_tuple(3, 4, vp9_sub_pixel_variance8x16_c),
+ make_tuple(4, 3, vp9_sub_pixel_variance16x8_c),
+ make_tuple(4, 4, vp9_sub_pixel_variance16x16_c),
+ make_tuple(4, 5, vp9_sub_pixel_variance16x32_c),
+ make_tuple(5, 4, vp9_sub_pixel_variance32x16_c),
+ make_tuple(5, 5, vp9_sub_pixel_variance32x32_c),
+ make_tuple(5, 6, vp9_sub_pixel_variance32x64_c),
+ make_tuple(6, 5, vp9_sub_pixel_variance64x32_c),
+ make_tuple(6, 6, vp9_sub_pixel_variance64x64_c)));
+
INSTANTIATE_TEST_CASE_P(
C, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c),
- make_tuple(2, 3, subpel_avg_variance4x8_c),
- make_tuple(3, 2, subpel_avg_variance8x4_c),
- make_tuple(3, 3, subpel_avg_variance8x8_c),
- make_tuple(3, 4, subpel_avg_variance8x16_c),
- make_tuple(4, 3, subpel_avg_variance16x8_c),
- make_tuple(4, 4, subpel_avg_variance16x16_c),
- make_tuple(4, 5, subpel_avg_variance16x32_c),
- make_tuple(5, 4, subpel_avg_variance32x16_c),
- make_tuple(5, 5, subpel_avg_variance32x32_c),
- make_tuple(5, 6, subpel_avg_variance32x64_c),
- make_tuple(6, 5, subpel_avg_variance64x32_c),
- make_tuple(6, 6, subpel_avg_variance64x64_c)));
+ ::testing::Values(make_tuple(2, 2, vp9_sub_pixel_avg_variance4x4_c),
+ make_tuple(2, 3, vp9_sub_pixel_avg_variance4x8_c),
+ make_tuple(3, 2, vp9_sub_pixel_avg_variance8x4_c),
+ make_tuple(3, 3, vp9_sub_pixel_avg_variance8x8_c),
+ make_tuple(3, 4, vp9_sub_pixel_avg_variance8x16_c),
+ make_tuple(4, 3, vp9_sub_pixel_avg_variance16x8_c),
+ make_tuple(4, 4, vp9_sub_pixel_avg_variance16x16_c),
+ make_tuple(4, 5, vp9_sub_pixel_avg_variance16x32_c),
+ make_tuple(5, 4, vp9_sub_pixel_avg_variance32x16_c),
+ make_tuple(5, 5, vp9_sub_pixel_avg_variance32x32_c),
+ make_tuple(5, 6, vp9_sub_pixel_avg_variance32x64_c),
+ make_tuple(6, 5, vp9_sub_pixel_avg_variance64x32_c),
+ make_tuple(6, 6, vp9_sub_pixel_avg_variance64x64_c)));
#if HAVE_MMX
-const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
-const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
-const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
-const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
-const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
INSTANTIATE_TEST_CASE_P(
MMX, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
- make_tuple(3, 3, variance8x8_mmx),
- make_tuple(3, 4, variance8x16_mmx),
- make_tuple(4, 3, variance16x8_mmx),
- make_tuple(4, 4, variance16x16_mmx)));
+ ::testing::Values(make_tuple(2, 2, vp9_variance4x4_mmx),
+ make_tuple(3, 3, vp9_variance8x8_mmx),
+ make_tuple(3, 4, vp9_variance8x16_mmx),
+ make_tuple(4, 3, vp9_variance16x8_mmx),
+ make_tuple(4, 4, vp9_variance16x16_mmx)));
#endif
#if HAVE_SSE2
#if CONFIG_USE_X86INC
-const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
-const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2;
-const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2;
-const vp9_variance_fn_t variance8x8_sse2 = vp9_variance8x8_sse2;
-const vp9_variance_fn_t variance8x16_sse2 = vp9_variance8x16_sse2;
-const vp9_variance_fn_t variance16x8_sse2 = vp9_variance16x8_sse2;
-const vp9_variance_fn_t variance16x16_sse2 = vp9_variance16x16_sse2;
-const vp9_variance_fn_t variance16x32_sse2 = vp9_variance16x32_sse2;
-const vp9_variance_fn_t variance32x16_sse2 = vp9_variance32x16_sse2;
-const vp9_variance_fn_t variance32x32_sse2 = vp9_variance32x32_sse2;
-const vp9_variance_fn_t variance32x64_sse2 = vp9_variance32x64_sse2;
-const vp9_variance_fn_t variance64x32_sse2 = vp9_variance64x32_sse2;
-const vp9_variance_fn_t variance64x64_sse2 = vp9_variance64x64_sse2;
INSTANTIATE_TEST_CASE_P(
SSE2, VP9VarianceTest,
- ::testing::Values(make_tuple(2, 2, variance4x4_sse2),
- make_tuple(2, 3, variance4x8_sse2),
- make_tuple(3, 2, variance8x4_sse2),
- make_tuple(3, 3, variance8x8_sse2),
- make_tuple(3, 4, variance8x16_sse2),
- make_tuple(4, 3, variance16x8_sse2),
- make_tuple(4, 4, variance16x16_sse2),
- make_tuple(4, 5, variance16x32_sse2),
- make_tuple(5, 4, variance32x16_sse2),
- make_tuple(5, 5, variance32x32_sse2),
- make_tuple(5, 6, variance32x64_sse2),
- make_tuple(6, 5, variance64x32_sse2),
- make_tuple(6, 6, variance64x64_sse2)));
-
-const vp9_subpixvariance_fn_t subpel_variance4x4_sse =
- vp9_sub_pixel_variance4x4_sse;
-const vp9_subpixvariance_fn_t subpel_variance4x8_sse =
- vp9_sub_pixel_variance4x8_sse;
-const vp9_subpixvariance_fn_t subpel_variance8x4_sse2 =
- vp9_sub_pixel_variance8x4_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x8_sse2 =
- vp9_sub_pixel_variance8x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance8x16_sse2 =
- vp9_sub_pixel_variance8x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x8_sse2 =
- vp9_sub_pixel_variance16x8_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x16_sse2 =
- vp9_sub_pixel_variance16x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance16x32_sse2 =
- vp9_sub_pixel_variance16x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x16_sse2 =
- vp9_sub_pixel_variance32x16_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x32_sse2 =
- vp9_sub_pixel_variance32x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance32x64_sse2 =
- vp9_sub_pixel_variance32x64_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x32_sse2 =
- vp9_sub_pixel_variance64x32_sse2;
-const vp9_subpixvariance_fn_t subpel_variance64x64_sse2 =
- vp9_sub_pixel_variance64x64_sse2;
+ ::testing::Values(make_tuple(2, 2, vp9_variance4x4_sse2),
+ make_tuple(2, 3, vp9_variance4x8_sse2),
+ make_tuple(3, 2, vp9_variance8x4_sse2),
+ make_tuple(3, 3, vp9_variance8x8_sse2),
+ make_tuple(3, 4, vp9_variance8x16_sse2),
+ make_tuple(4, 3, vp9_variance16x8_sse2),
+ make_tuple(4, 4, vp9_variance16x16_sse2),
+ make_tuple(4, 5, vp9_variance16x32_sse2),
+ make_tuple(5, 4, vp9_variance32x16_sse2),
+ make_tuple(5, 5, vp9_variance32x32_sse2),
+ make_tuple(5, 6, vp9_variance32x64_sse2),
+ make_tuple(6, 5, vp9_variance64x32_sse2),
+ make_tuple(6, 6, vp9_variance64x64_sse2)));
+
INSTANTIATE_TEST_CASE_P(
SSE2, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse),
- make_tuple(2, 3, subpel_variance4x8_sse),
- make_tuple(3, 2, subpel_variance8x4_sse2),
- make_tuple(3, 3, subpel_variance8x8_sse2),
- make_tuple(3, 4, subpel_variance8x16_sse2),
- make_tuple(4, 3, subpel_variance16x8_sse2),
- make_tuple(4, 4, subpel_variance16x16_sse2),
- make_tuple(4, 5, subpel_variance16x32_sse2),
- make_tuple(5, 4, subpel_variance32x16_sse2),
- make_tuple(5, 5, subpel_variance32x32_sse2),
- make_tuple(5, 6, subpel_variance32x64_sse2),
- make_tuple(6, 5, subpel_variance64x32_sse2),
- make_tuple(6, 6, subpel_variance64x64_sse2)));
-
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse =
- vp9_sub_pixel_avg_variance4x4_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse =
- vp9_sub_pixel_avg_variance4x8_sse;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 =
- vp9_sub_pixel_avg_variance8x4_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 =
- vp9_sub_pixel_avg_variance8x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 =
- vp9_sub_pixel_avg_variance8x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 =
- vp9_sub_pixel_avg_variance16x8_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 =
- vp9_sub_pixel_avg_variance16x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 =
- vp9_sub_pixel_avg_variance16x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 =
- vp9_sub_pixel_avg_variance32x16_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 =
- vp9_sub_pixel_avg_variance32x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 =
- vp9_sub_pixel_avg_variance32x64_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 =
- vp9_sub_pixel_avg_variance64x32_sse2;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 =
- vp9_sub_pixel_avg_variance64x64_sse2;
+ ::testing::Values(make_tuple(2, 2, vp9_sub_pixel_variance4x4_sse),
+ make_tuple(2, 3, vp9_sub_pixel_variance4x8_sse),
+ make_tuple(3, 2, vp9_sub_pixel_variance8x4_sse2),
+ make_tuple(3, 3, vp9_sub_pixel_variance8x8_sse2),
+ make_tuple(3, 4, vp9_sub_pixel_variance8x16_sse2),
+ make_tuple(4, 3, vp9_sub_pixel_variance16x8_sse2),
+ make_tuple(4, 4, vp9_sub_pixel_variance16x16_sse2),
+ make_tuple(4, 5, vp9_sub_pixel_variance16x32_sse2),
+ make_tuple(5, 4, vp9_sub_pixel_variance32x16_sse2),
+ make_tuple(5, 5, vp9_sub_pixel_variance32x32_sse2),
+ make_tuple(5, 6, vp9_sub_pixel_variance32x64_sse2),
+ make_tuple(6, 5, vp9_sub_pixel_variance64x32_sse2),
+ make_tuple(6, 6, vp9_sub_pixel_variance64x64_sse2)));
+
INSTANTIATE_TEST_CASE_P(
SSE2, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse),
- make_tuple(2, 3, subpel_avg_variance4x8_sse),
- make_tuple(3, 2, subpel_avg_variance8x4_sse2),
- make_tuple(3, 3, subpel_avg_variance8x8_sse2),
- make_tuple(3, 4, subpel_avg_variance8x16_sse2),
- make_tuple(4, 3, subpel_avg_variance16x8_sse2),
- make_tuple(4, 4, subpel_avg_variance16x16_sse2),
- make_tuple(4, 5, subpel_avg_variance16x32_sse2),
- make_tuple(5, 4, subpel_avg_variance32x16_sse2),
- make_tuple(5, 5, subpel_avg_variance32x32_sse2),
- make_tuple(5, 6, subpel_avg_variance32x64_sse2),
- make_tuple(6, 5, subpel_avg_variance64x32_sse2),
- make_tuple(6, 6, subpel_avg_variance64x64_sse2)));
+ ::testing::Values(make_tuple(2, 2, vp9_sub_pixel_avg_variance4x4_sse),
+ make_tuple(2, 3, vp9_sub_pixel_avg_variance4x8_sse),
+ make_tuple(3, 2, vp9_sub_pixel_avg_variance8x4_sse2),
+ make_tuple(3, 3, vp9_sub_pixel_avg_variance8x8_sse2),
+ make_tuple(3, 4, vp9_sub_pixel_avg_variance8x16_sse2),
+ make_tuple(4, 3, vp9_sub_pixel_avg_variance16x8_sse2),
+ make_tuple(4, 4, vp9_sub_pixel_avg_variance16x16_sse2),
+ make_tuple(4, 5, vp9_sub_pixel_avg_variance16x32_sse2),
+ make_tuple(5, 4, vp9_sub_pixel_avg_variance32x16_sse2),
+ make_tuple(5, 5, vp9_sub_pixel_avg_variance32x32_sse2),
+ make_tuple(5, 6, vp9_sub_pixel_avg_variance32x64_sse2),
+ make_tuple(6, 5, vp9_sub_pixel_avg_variance64x32_sse2),
+ make_tuple(6, 6, vp9_sub_pixel_avg_variance64x64_sse2)));
#endif
#endif
#if HAVE_SSSE3
#if CONFIG_USE_X86INC
-const vp9_subpixvariance_fn_t subpel_variance4x4_ssse3 =
- vp9_sub_pixel_variance4x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance4x8_ssse3 =
- vp9_sub_pixel_variance4x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x4_ssse3 =
- vp9_sub_pixel_variance8x4_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x8_ssse3 =
- vp9_sub_pixel_variance8x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance8x16_ssse3 =
- vp9_sub_pixel_variance8x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x8_ssse3 =
- vp9_sub_pixel_variance16x8_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x16_ssse3 =
- vp9_sub_pixel_variance16x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance16x32_ssse3 =
- vp9_sub_pixel_variance16x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x16_ssse3 =
- vp9_sub_pixel_variance32x16_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x32_ssse3 =
- vp9_sub_pixel_variance32x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance32x64_ssse3 =
- vp9_sub_pixel_variance32x64_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x32_ssse3 =
- vp9_sub_pixel_variance64x32_ssse3;
-const vp9_subpixvariance_fn_t subpel_variance64x64_ssse3 =
- vp9_sub_pixel_variance64x64_ssse3;
INSTANTIATE_TEST_CASE_P(
SSSE3, VP9SubpelVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3),
- make_tuple(2, 3, subpel_variance4x8_ssse3),
- make_tuple(3, 2, subpel_variance8x4_ssse3),
- make_tuple(3, 3, subpel_variance8x8_ssse3),
- make_tuple(3, 4, subpel_variance8x16_ssse3),
- make_tuple(4, 3, subpel_variance16x8_ssse3),
- make_tuple(4, 4, subpel_variance16x16_ssse3),
- make_tuple(4, 5, subpel_variance16x32_ssse3),
- make_tuple(5, 4, subpel_variance32x16_ssse3),
- make_tuple(5, 5, subpel_variance32x32_ssse3),
- make_tuple(5, 6, subpel_variance32x64_ssse3),
- make_tuple(6, 5, subpel_variance64x32_ssse3),
- make_tuple(6, 6, subpel_variance64x64_ssse3)));
-
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 =
- vp9_sub_pixel_avg_variance4x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 =
- vp9_sub_pixel_avg_variance4x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 =
- vp9_sub_pixel_avg_variance8x4_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 =
- vp9_sub_pixel_avg_variance8x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 =
- vp9_sub_pixel_avg_variance8x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 =
- vp9_sub_pixel_avg_variance16x8_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 =
- vp9_sub_pixel_avg_variance16x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 =
- vp9_sub_pixel_avg_variance16x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 =
- vp9_sub_pixel_avg_variance32x16_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 =
- vp9_sub_pixel_avg_variance32x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 =
- vp9_sub_pixel_avg_variance32x64_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 =
- vp9_sub_pixel_avg_variance64x32_ssse3;
-const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 =
- vp9_sub_pixel_avg_variance64x64_ssse3;
+ ::testing::Values(make_tuple(2, 2, vp9_sub_pixel_variance4x4_ssse3),
+ make_tuple(2, 3, vp9_sub_pixel_variance4x8_ssse3),
+ make_tuple(3, 2, vp9_sub_pixel_variance8x4_ssse3),
+ make_tuple(3, 3, vp9_sub_pixel_variance8x8_ssse3),
+ make_tuple(3, 4, vp9_sub_pixel_variance8x16_ssse3),
+ make_tuple(4, 3, vp9_sub_pixel_variance16x8_ssse3),
+ make_tuple(4, 4, vp9_sub_pixel_variance16x16_ssse3),
+ make_tuple(4, 5, vp9_sub_pixel_variance16x32_ssse3),
+ make_tuple(5, 4, vp9_sub_pixel_variance32x16_ssse3),
+ make_tuple(5, 5, vp9_sub_pixel_variance32x32_ssse3),
+ make_tuple(5, 6, vp9_sub_pixel_variance32x64_ssse3),
+ make_tuple(6, 5, vp9_sub_pixel_variance64x32_ssse3),
+ make_tuple(6, 6, vp9_sub_pixel_variance64x64_ssse3)));
+
INSTANTIATE_TEST_CASE_P(
SSSE3, VP9SubpelAvgVarianceTest,
- ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3),
- make_tuple(2, 3, subpel_avg_variance4x8_ssse3),
- make_tuple(3, 2, subpel_avg_variance8x4_ssse3),
- make_tuple(3, 3, subpel_avg_variance8x8_ssse3),
- make_tuple(3, 4, subpel_avg_variance8x16_ssse3),
- make_tuple(4, 3, subpel_avg_variance16x8_ssse3),
- make_tuple(4, 4, subpel_avg_variance16x16_ssse3),
- make_tuple(4, 5, subpel_avg_variance16x32_ssse3),
- make_tuple(5, 4, subpel_avg_variance32x16_ssse3),
- make_tuple(5, 5, subpel_avg_variance32x32_ssse3),
- make_tuple(5, 6, subpel_avg_variance32x64_ssse3),
- make_tuple(6, 5, subpel_avg_variance64x32_ssse3),
- make_tuple(6, 6, subpel_avg_variance64x64_ssse3)));
+ ::testing::Values(make_tuple(2, 2, vp9_sub_pixel_avg_variance4x4_ssse3),
+ make_tuple(2, 3, vp9_sub_pixel_avg_variance4x8_ssse3),
+ make_tuple(3, 2, vp9_sub_pixel_avg_variance8x4_ssse3),
+ make_tuple(3, 3, vp9_sub_pixel_avg_variance8x8_ssse3),
+ make_tuple(3, 4, vp9_sub_pixel_avg_variance8x16_ssse3),
+ make_tuple(4, 3, vp9_sub_pixel_avg_variance16x8_ssse3),
+ make_tuple(4, 4, vp9_sub_pixel_avg_variance16x16_ssse3),
+ make_tuple(4, 5, vp9_sub_pixel_avg_variance16x32_ssse3),
+ make_tuple(5, 4, vp9_sub_pixel_avg_variance32x16_ssse3),
+ make_tuple(5, 5, vp9_sub_pixel_avg_variance32x32_ssse3),
+ make_tuple(5, 6, vp9_sub_pixel_avg_variance32x64_ssse3),
+ make_tuple(6, 5, vp9_sub_pixel_avg_variance64x32_ssse3),
+ make_tuple(6, 6, vp9_sub_pixel_avg_variance64x64_ssse3)));
#endif
#endif
#endif // CONFIG_VP9_ENCODER
diff --git a/vp8/common/rtcd_defs.pl b/vp8/common/rtcd_defs.pl
index 789b0de54..130d96535 100644
--- a/vp8/common/rtcd_defs.pl
+++ b/vp8/common/rtcd_defs.pl
@@ -532,7 +532,7 @@ specialize qw/vp8_yv12_copy_partial_frame neon/;
# Denoiser filter
#
if (vpx_config("CONFIG_TEMPORAL_DENOISING") eq "yes") {
- add_proto qw/int vp8_denoiser_filter/, "unsigned char *mc_running_avg_y, int mc_avg_y_stride, unsigned char *running_avg_y, int avg_y_stride, unsigned char *sig, int sig_stride, unsigned int motion_magnitude";
+ add_proto qw/int vp8_denoiser_filter/, "struct yv12_buffer_config* mc_running_avg, struct yv12_buffer_config* running_avg, struct macroblock* signal, unsigned int motion_magnitude2, int y_offset, int uv_offset";
specialize qw/vp8_denoiser_filter sse2 neon/;
}
diff --git a/vp8/encoder/arm/neon/denoising_neon.c b/vp8/encoder/arm/neon/denoising_neon.c
index 1bebe8fba..23dc0a967 100644
--- a/vp8/encoder/arm/neon/denoising_neon.c
+++ b/vp8/encoder/arm/neon/denoising_neon.c
@@ -45,12 +45,10 @@
* [16, 255] 3 6 7
*/
-int vp8_denoiser_filter_neon(unsigned char *mc_running_avg_y,
- int mc_running_avg_y_stride,
- unsigned char *running_avg_y,
- int running_avg_y_stride,
- unsigned char *sig, int sig_stride,
- unsigned int motion_magnitude) {
+int vp8_denoiser_filter_neon(YV12_BUFFER_CONFIG *mc_running_avg,
+ YV12_BUFFER_CONFIG *running_avg,
+ MACROBLOCK *signal, unsigned int motion_magnitude,
+ int y_offset, int uv_offset) {
/* If motion_magnitude is small, making the denoiser more aggressive by
* increasing the adjustment for each level, level1 adjustment is
* increased, the deltas stay the same.
@@ -62,6 +60,14 @@ int vp8_denoiser_filter_neon(unsigned char *mc_running_avg_y,
const uint8x16_t v_level1_threshold = vdupq_n_u8(4);
const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
+
+ /* Local variables for array pointers and strides. */
+ unsigned char *sig = signal->thismb;
+ int sig_stride = 16;
+ unsigned char *mc_running_avg_y = mc_running_avg->y_buffer + y_offset;
+ int mc_running_avg_y_stride = mc_running_avg->y_stride;
+ unsigned char *running_avg_y = running_avg->y_buffer + y_offset;
+ int running_avg_y_stride = running_avg->y_stride;
int64x2_t v_sum_diff_total = vdupq_n_s64(0);
/* Go over lines. */
diff --git a/vp8/encoder/denoising.c b/vp8/encoder/denoising.c
index bfce28052..781926547 100644
--- a/vp8/encoder/denoising.c
+++ b/vp8/encoder/denoising.c
@@ -51,13 +51,17 @@ static const unsigned int SSE_THRESHOLD = 16 * 16 * 40;
* [16, 255] 6 7
*/
-int vp8_denoiser_filter_c(unsigned char *mc_running_avg_y, int mc_avg_y_stride,
- unsigned char *running_avg_y, int avg_y_stride,
- unsigned char *sig, int sig_stride,
- unsigned int motion_magnitude)
+int vp8_denoiser_filter_c(YV12_BUFFER_CONFIG *mc_running_avg,
+ YV12_BUFFER_CONFIG *running_avg, MACROBLOCK *signal,
+ unsigned int motion_magnitude, int y_offset,
+ int uv_offset)
{
- unsigned char *running_avg_y_start = running_avg_y;
- unsigned char *sig_start = sig;
+ unsigned char *sig = signal->thismb;
+ int sig_stride = 16;
+ unsigned char *mc_running_avg_y = mc_running_avg->y_buffer + y_offset;
+ int mc_avg_y_stride = mc_running_avg->y_stride;
+ unsigned char *running_avg_y = running_avg->y_buffer + y_offset;
+ int avg_y_stride = running_avg->y_stride;
int r, c, i;
int sum_diff = 0;
int adj_val[3] = {3, 4, 6};
@@ -126,7 +130,8 @@ int vp8_denoiser_filter_c(unsigned char *mc_running_avg_y, int mc_avg_y_stride,
if (abs(sum_diff) > SUM_DIFF_THRESHOLD)
return COPY_BLOCK;
- vp8_copy_mem16x16(running_avg_y_start, avg_y_stride, sig_start, sig_stride);
+ vp8_copy_mem16x16(running_avg->y_buffer + y_offset, avg_y_stride,
+ signal->thismb, sig_stride);
return FILTER_BLOCK;
}
@@ -280,17 +285,12 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
if (decision == FILTER_BLOCK)
{
- unsigned char *mc_running_avg_y =
- denoiser->yv12_mc_running_avg.y_buffer + recon_yoffset;
- int mc_avg_y_stride = denoiser->yv12_mc_running_avg.y_stride;
- unsigned char *running_avg_y =
- denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset;
- int avg_y_stride = denoiser->yv12_running_avg[INTRA_FRAME].y_stride;
-
/* Filter. */
- decision = vp8_denoiser_filter(mc_running_avg_y, mc_avg_y_stride,
- running_avg_y, avg_y_stride,
- x->thismb, 16, motion_magnitude2);
+ decision = vp8_denoiser_filter(&denoiser->yv12_mc_running_avg,
+ &denoiser->yv12_running_avg[INTRA_FRAME],
+ x,
+ motion_magnitude2,
+ recon_yoffset, recon_uvoffset);
}
if (decision == COPY_BLOCK)
{
diff --git a/vp8/encoder/x86/denoising_sse2.c b/vp8/encoder/x86/denoising_sse2.c
index d1f76b2cb..cceb8263f 100644
--- a/vp8/encoder/x86/denoising_sse2.c
+++ b/vp8/encoder/x86/denoising_sse2.c
@@ -22,14 +22,17 @@ union sum_union {
signed char e[16];
};
-int vp8_denoiser_filter_sse2(unsigned char *mc_running_avg_y,
- int mc_avg_y_stride,
- unsigned char *running_avg_y, int avg_y_stride,
- unsigned char *sig, int sig_stride,
- unsigned int motion_magnitude)
+int vp8_denoiser_filter_sse2(YV12_BUFFER_CONFIG *mc_running_avg,
+ YV12_BUFFER_CONFIG *running_avg,
+ MACROBLOCK *signal, unsigned int motion_magnitude,
+ int y_offset, int uv_offset)
{
- unsigned char *running_avg_y_start = running_avg_y;
- unsigned char *sig_start = sig;
+ unsigned char *sig = signal->thismb;
+ int sig_stride = 16;
+ unsigned char *mc_running_avg_y = mc_running_avg->y_buffer + y_offset;
+ int mc_avg_y_stride = mc_running_avg->y_stride;
+ unsigned char *running_avg_y = running_avg->y_buffer + y_offset;
+ int avg_y_stride = running_avg->y_stride;
int r;
__m128i acc_diff = _mm_setzero_si128();
const __m128i k_0 = _mm_setzero_si128();
@@ -111,6 +114,7 @@ int vp8_denoiser_filter_sse2(unsigned char *mc_running_avg_y,
}
}
- vp8_copy_mem16x16(running_avg_y_start, avg_y_stride, sig_start, sig_stride);
+ vp8_copy_mem16x16(running_avg->y_buffer + y_offset, avg_y_stride,
+ signal->thismb, sig_stride);
return FILTER_BLOCK;
}
diff --git a/vp8/encoder/x86/quantize_ssse3.c b/vp8/encoder/x86/quantize_ssse3.c
index 9b4471d4f..448217ff4 100644
--- a/vp8/encoder/x86/quantize_ssse3.c
+++ b/vp8/encoder/x86/quantize_ssse3.c
@@ -27,7 +27,11 @@ static int bsr(int mask) {
#else
static int bsr(int mask) {
int eob;
+#if defined(__GNUC__) && __GNUC__
+ __asm__ __volatile__("bsr %1, %0" : "=r" (eob) : "r" (mask) : "flags");
+#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
asm volatile("bsr %1, %0" : "=r" (eob) : "r" (mask) : "flags");
+#endif
eob++;
if (mask == 0)
eob = 0;
diff --git a/vp9/common/vp9_idct.h b/vp9/common/vp9_idct.h
index ceca7951b..d86877622 100644
--- a/vp9/common/vp9_idct.h
+++ b/vp9/common/vp9_idct.h
@@ -33,9 +33,6 @@ extern "C" {
#define pair_set_epi16(a, b) \
_mm_set_epi16(b, a, b, a, b, a, b, a)
-#define pair_set_epi32(a, b) \
- _mm_set_epi32(b, a, b, a)
-
// Constants:
// for (int i = 1; i< 32; ++i)
// printf("static const int cospi_%d_64 = %.0f;\n", i,
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index ef1d4e60e..87051d56b 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -3063,6 +3063,23 @@ static void encode_frame_internal(VP9_COMP *cpi) {
#endif
}
+static INTERP_FILTER get_interp_filter(
+ const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) {
+ if (!is_alt_ref &&
+ threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
+ threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] &&
+ threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) {
+ return EIGHTTAP_SMOOTH;
+ } else if (threshes[EIGHTTAP_SHARP] > threshes[EIGHTTAP] &&
+ threshes[EIGHTTAP_SHARP] > threshes[SWITCHABLE - 1]) {
+ return EIGHTTAP_SHARP;
+ } else if (threshes[EIGHTTAP] > threshes[SWITCHABLE - 1]) {
+ return EIGHTTAP;
+ } else {
+ return SWITCHABLE;
+ }
+}
+
void vp9_encode_frame(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
RD_OPT *const rd_opt = &cpi->rd;
@@ -3098,59 +3115,41 @@ void vp9_encode_frame(VP9_COMP *cpi) {
// that for subsequent frames.
// It does the same analysis for transform size selection also.
const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
- const int64_t *mode_thresh = rd_opt->prediction_type_threshes[frame_type];
- const int64_t *filter_thresh = rd_opt->filter_threshes[frame_type];
+ int64_t *const mode_thrs = rd_opt->prediction_type_threshes[frame_type];
+ int64_t *const filter_thrs = rd_opt->filter_threshes[frame_type];
+ int *const tx_thrs = rd_opt->tx_select_threshes[frame_type];
+ const int is_alt_ref = frame_type == ALTREF_FRAME;
/* prediction (compound, single or hybrid) mode selection */
- if (frame_type == ALTREF_FRAME || !cm->allow_comp_inter_inter)
+ if (is_alt_ref || !cm->allow_comp_inter_inter)
cm->reference_mode = SINGLE_REFERENCE;
- else if (mode_thresh[COMPOUND_REFERENCE] > mode_thresh[SINGLE_REFERENCE] &&
- mode_thresh[COMPOUND_REFERENCE] >
- mode_thresh[REFERENCE_MODE_SELECT] &&
+ else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] &&
+ mode_thrs[COMPOUND_REFERENCE] >
+ mode_thrs[REFERENCE_MODE_SELECT] &&
check_dual_ref_flags(cpi) &&
cpi->static_mb_pct == 100)
cm->reference_mode = COMPOUND_REFERENCE;
- else if (mode_thresh[SINGLE_REFERENCE] > mode_thresh[REFERENCE_MODE_SELECT])
+ else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT])
cm->reference_mode = SINGLE_REFERENCE;
else
cm->reference_mode = REFERENCE_MODE_SELECT;
- if (cm->interp_filter == SWITCHABLE) {
- if (frame_type != ALTREF_FRAME &&
- filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP] &&
- filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP_SHARP] &&
- filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[SWITCHABLE - 1]) {
- cm->interp_filter = EIGHTTAP_SMOOTH;
- } else if (filter_thresh[EIGHTTAP_SHARP] > filter_thresh[EIGHTTAP] &&
- filter_thresh[EIGHTTAP_SHARP] > filter_thresh[SWITCHABLE - 1]) {
- cm->interp_filter = EIGHTTAP_SHARP;
- } else if (filter_thresh[EIGHTTAP] > filter_thresh[SWITCHABLE - 1]) {
- cm->interp_filter = EIGHTTAP;
- }
- }
+ if (cm->interp_filter == SWITCHABLE)
+ cm->interp_filter = get_interp_filter(filter_thrs, is_alt_ref);
encode_frame_internal(cpi);
- for (i = 0; i < REFERENCE_MODES; ++i) {
- const int diff = (int) (rd_opt->comp_pred_diff[i] / cm->MBs);
- rd_opt->prediction_type_threshes[frame_type][i] += diff;
- rd_opt->prediction_type_threshes[frame_type][i] >>= 1;
- }
+ for (i = 0; i < REFERENCE_MODES; ++i)
+ mode_thrs[i] = (mode_thrs[i] + rd_opt->comp_pred_diff[i] / cm->MBs) / 2;
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
- const int64_t diff = rd_opt->filter_diff[i] / cm->MBs;
- rd_opt->filter_threshes[frame_type][i] =
- (rd_opt->filter_threshes[frame_type][i] + diff) / 2;
- }
+ for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
+ filter_thrs[i] = (filter_thrs[i] + rd_opt->filter_diff[i] / cm->MBs) / 2;
for (i = 0; i < TX_MODES; ++i) {
int64_t pd = rd_opt->tx_select_diff[i];
- int diff;
if (i == TX_MODE_SELECT)
pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv, 2048 * (TX_SIZES - 1), 0);
- diff = (int) (pd / cm->MBs);
- rd_opt->tx_select_threshes[frame_type][i] += diff;
- rd_opt->tx_select_threshes[frame_type][i] /= 2;
+ tx_thrs[i] = (tx_thrs[i] + (int)(pd / cm->MBs)) / 2;
}
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
diff --git a/vp9/encoder/vp9_encoder.c b/vp9/encoder/vp9_encoder.c
index a9a9695de..cc2c552a7 100644
--- a/vp9/encoder/vp9_encoder.c
+++ b/vp9/encoder/vp9_encoder.c
@@ -2061,7 +2061,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
cm->lf.mode_ref_delta_update = 0;
// Initialize cpi->mv_step_param to default based on max resolution.
- cpi->mv_step_param = vp9_init_search_range(cpi, max_mv_def);
+ cpi->mv_step_param = vp9_init_search_range(sf, max_mv_def);
// Initialize cpi->max_mv_magnitude and cpi->mv_step_param if appropriate.
if (sf->auto_mv_step_size) {
if (frame_is_intra_only(cm)) {
@@ -2073,7 +2073,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
// Allow mv_steps to correspond to twice the max mv magnitude found
// in the previous frame, capped by the default max_mv_magnitude based
// on resolution.
- cpi->mv_step_param = vp9_init_search_range(cpi, MIN(max_mv_def, 2 *
+ cpi->mv_step_param = vp9_init_search_range(sf, MIN(max_mv_def, 2 *
cpi->max_mv_magnitude));
cpi->max_mv_magnitude = 0;
}
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index 5867fb3fc..efa320f3f 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -2402,8 +2402,8 @@ void vp9_twopass_postencode_update(VP9_COMP *cpi) {
const double progress =
(double)(cpi->twopass.stats_in - cpi->twopass.stats_in_start) /
(cpi->twopass.stats_in_end - cpi->twopass.stats_in_start);
- const int bits_used = progress * rc->this_frame_target +
- (1.0 - progress) * rc->projected_frame_size;
+ const int bits_used = (int)(progress * rc->this_frame_target +
+ (1.0 - progress) * rc->projected_frame_size);
#endif
cpi->twopass.bits_left -= bits_used;
diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c
index 15afa1ab5..43c8ab868 100644
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -51,7 +51,7 @@ void vp9_set_mv_search_range(MACROBLOCK *x, const MV *mv) {
x->mv_row_max = row_max;
}
-int vp9_init_search_range(VP9_COMP *cpi, int size) {
+int vp9_init_search_range(const SPEED_FEATURES *sf, int size) {
int sr = 0;
// Minimum search size no matter what the passed in value.
@@ -60,8 +60,8 @@ int vp9_init_search_range(VP9_COMP *cpi, int size) {
while ((size << sr) < MAX_FULL_PEL_VAL)
sr++;
- sr += cpi->sf.reduce_first_step_size;
- sr = MIN(sr, (cpi->sf.max_step_search_steps - 2));
+ sr += sf->reduce_first_step_size;
+ sr = MIN(sr, (sf->max_step_search_steps - 2));
return sr;
}
diff --git a/vp9/encoder/vp9_mcomp.h b/vp9/encoder/vp9_mcomp.h
index 1f524f1f6..827957d62 100644
--- a/vp9/encoder/vp9_mcomp.h
+++ b/vp9/encoder/vp9_mcomp.h
@@ -62,7 +62,9 @@ int vp9_get_mvpred_av_var(const MACROBLOCK *x,
int use_mvcost);
struct VP9_COMP;
-int vp9_init_search_range(struct VP9_COMP *cpi, int size);
+struct SPEED_FEATURES;
+
+int vp9_init_search_range(const struct SPEED_FEATURES *sf, int size);
// Runs sequence of diamond searches in smaller steps for RD
int vp9_full_pixel_diamond(const struct VP9_COMP *cpi, MACROBLOCK *x,
diff --git a/vp9/encoder/vp9_pickmode.c b/vp9/encoder/vp9_pickmode.c
index e750a53b6..adaa04447 100644
--- a/vp9/encoder/vp9_pickmode.c
+++ b/vp9/encoder/vp9_pickmode.c
@@ -161,10 +161,16 @@ static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize,
struct macroblock_plane *const p = &x->plane[0];
struct macroblockd_plane *const pd = &xd->plane[0];
- int var = cpi->fn_ptr[bsize].vf(p->src.buf, p->src.stride,
- pd->dst.buf, pd->dst.stride, &sse);
+ unsigned int var = cpi->fn_ptr[bsize].vf(p->src.buf, p->src.stride,
+ pd->dst.buf, pd->dst.stride, &sse);
- vp9_model_rd_from_var_lapndz(sse + var, 1 << num_pels_log2_lookup[bsize],
+ // TODO(jingning) This is a temporary solution to account for frames with
+ // light changes. Need to customize the rate-distortion modeling for non-RD
+ // mode decision.
+ if ((sse >> 3) > var)
+ sse = var;
+
+ vp9_model_rd_from_var_lapndz(var + sse, 1 << num_pels_log2_lookup[bsize],
pd->dequant[1] >> 3, &rate, &dist);
*out_rate_sum = rate;
*out_dist_sum = dist << 3;
diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c
index 985743d88..fe43f3a07 100644
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -45,11 +45,9 @@
// Tables relating active max Q to active min Q
static int kf_low_motion_minq[QINDEX_RANGE];
static int kf_high_motion_minq[QINDEX_RANGE];
-static int gf_low_motion_minq[QINDEX_RANGE];
-static int gf_high_motion_minq[QINDEX_RANGE];
+static int arfgf_low_motion_minq[QINDEX_RANGE];
+static int arfgf_high_motion_minq[QINDEX_RANGE];
static int inter_minq[QINDEX_RANGE];
-static int afq_low_motion_minq[QINDEX_RANGE];
-static int afq_high_motion_minq[QINDEX_RANGE];
static int gf_high = 2000;
static int gf_low = 400;
static int kf_high = 5000;
@@ -81,13 +79,11 @@ void vp9_rc_init_minq_luts() {
for (i = 0; i < QINDEX_RANGE; i++) {
const double maxq = vp9_convert_qindex_to_q(i);
- kf_low_motion_minq[i] = get_minq_index(maxq, 0.000001, -0.0004, 0.15);
+ kf_low_motion_minq[i] = get_minq_index(maxq, 0.000001, -0.0004, 0.125);
kf_high_motion_minq[i] = get_minq_index(maxq, 0.000002, -0.0012, 0.50);
- gf_low_motion_minq[i] = get_minq_index(maxq, 0.0000015, -0.0009, 0.32);
- gf_high_motion_minq[i] = get_minq_index(maxq, 0.0000021, -0.00125, 0.50);
- afq_low_motion_minq[i] = get_minq_index(maxq, 0.0000015, -0.0009, 0.33);
- afq_high_motion_minq[i] = get_minq_index(maxq, 0.0000021, -0.00125, 0.55);
- inter_minq[i] = get_minq_index(maxq, 0.00000271, -0.00113, 0.75);
+ arfgf_low_motion_minq[i] = get_minq_index(maxq, 0.0000015, -0.0009, 0.30);
+ arfgf_high_motion_minq[i] = get_minq_index(maxq, 0.0000021, -0.00125, 0.50);
+ inter_minq[i] = get_minq_index(maxq, 0.00000271, -0.00113, 0.90);
}
}
@@ -548,7 +544,7 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP9_COMP *cpi,
}
active_best_quality = get_active_quality(
q, rc->gfu_boost, gf_low, gf_high,
- gf_low_motion_minq, gf_high_motion_minq);
+ arfgf_low_motion_minq, arfgf_high_motion_minq);
} else {
// Use the lower of active_worst_quality and recent/average Q.
if (cm->current_video_frame > 1) {
@@ -676,17 +672,12 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP9_COMP *cpi,
if (oxcf->rc_mode == RC_MODE_CONSTRAINED_QUALITY) {
if (q < cq_level)
q = cq_level;
- if (rc->frames_since_key > 1) {
- active_best_quality = get_active_quality(q, rc->gfu_boost,
- gf_low, gf_high,
- afq_low_motion_minq,
- afq_high_motion_minq);
- } else {
- active_best_quality = get_active_quality(q, rc->gfu_boost,
- gf_low, gf_high,
- gf_low_motion_minq,
- gf_high_motion_minq);
- }
+
+ active_best_quality = get_active_quality(q, rc->gfu_boost,
+ gf_low, gf_high,
+ arfgf_low_motion_minq,
+ arfgf_high_motion_minq);
+
// Constrained quality use slightly lower active best.
active_best_quality = active_best_quality * 15 / 16;
@@ -694,20 +685,14 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP9_COMP *cpi,
if (!cpi->refresh_alt_ref_frame) {
active_best_quality = cq_level;
} else {
- if (rc->frames_since_key > 1) {
- active_best_quality = get_active_quality(
- q, rc->gfu_boost, gf_low, gf_high,
- afq_low_motion_minq, afq_high_motion_minq);
- } else {
- active_best_quality = get_active_quality(
- q, rc->gfu_boost, gf_low, gf_high,
- gf_low_motion_minq, gf_high_motion_minq);
- }
+ active_best_quality = get_active_quality(
+ q, rc->gfu_boost, gf_low, gf_high,
+ arfgf_low_motion_minq, arfgf_high_motion_minq);
}
} else {
active_best_quality = get_active_quality(
q, rc->gfu_boost, gf_low, gf_high,
- gf_low_motion_minq, gf_high_motion_minq);
+ arfgf_low_motion_minq, arfgf_high_motion_minq);
}
} else {
if (oxcf->rc_mode == RC_MODE_CONSTANT_QUALITY) {
@@ -867,17 +852,12 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi,
if (oxcf->rc_mode == RC_MODE_CONSTRAINED_QUALITY) {
if (q < cq_level)
q = cq_level;
- if (rc->frames_since_key > 1) {
- active_best_quality = get_active_quality(q, rc->gfu_boost,
- gf_low, gf_high,
- afq_low_motion_minq,
- afq_high_motion_minq);
- } else {
- active_best_quality = get_active_quality(q, rc->gfu_boost,
- gf_low, gf_high,
- gf_low_motion_minq,
- gf_high_motion_minq);
- }
+
+ active_best_quality = get_active_quality(q, rc->gfu_boost,
+ gf_low, gf_high,
+ arfgf_low_motion_minq,
+ arfgf_high_motion_minq);
+
// Constrained quality use slightly lower active best.
active_best_quality = active_best_quality * 15 / 16;
@@ -885,20 +865,14 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi,
if (!cpi->refresh_alt_ref_frame) {
active_best_quality = cq_level;
} else {
- if (rc->frames_since_key > 1) {
- active_best_quality = get_active_quality(
- q, rc->gfu_boost, gf_low, gf_high,
- afq_low_motion_minq, afq_high_motion_minq);
- } else {
- active_best_quality = get_active_quality(
- q, rc->gfu_boost, gf_low, gf_high,
- gf_low_motion_minq, gf_high_motion_minq);
- }
+ active_best_quality = get_active_quality(
+ q, rc->gfu_boost, gf_low, gf_high,
+ arfgf_low_motion_minq, arfgf_high_motion_minq);
}
} else {
active_best_quality = get_active_quality(
q, rc->gfu_boost, gf_low, gf_high,
- gf_low_motion_minq, gf_high_motion_minq);
+ arfgf_low_motion_minq, arfgf_high_motion_minq);
}
} else {
if (oxcf->rc_mode == RC_MODE_CONSTANT_QUALITY) {
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index b9cd7d554..f4def1eef 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -1833,8 +1833,8 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x,
// Take wtd average of the step_params based on the last frame's
// max mv magnitude and the best ref mvs of the current block for
// the given reference.
- step_param = (vp9_init_search_range(cpi, max_mv) +
- cpi->mv_step_param) >> 1;
+ step_param = (vp9_init_search_range(&cpi->sf, max_mv) +
+ cpi->mv_step_param) / 2;
} else {
step_param = cpi->mv_step_param;
}
@@ -2352,8 +2352,8 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
// Take wtd average of the step_params based on the last frame's
// max mv magnitude and that based on the best ref mvs of the current
// block for the given reference.
- step_param = (vp9_init_search_range(cpi, x->max_mv_context[ref]) +
- cpi->mv_step_param) >> 1;
+ step_param = (vp9_init_search_range(&cpi->sf, x->max_mv_context[ref]) +
+ cpi->mv_step_param) / 2;
} else {
step_param = cpi->mv_step_param;
}
diff --git a/vp9/encoder/vp9_speed_features.h b/vp9/encoder/vp9_speed_features.h
index cff99a6dc..a384a4360 100644
--- a/vp9/encoder/vp9_speed_features.h
+++ b/vp9/encoder/vp9_speed_features.h
@@ -129,7 +129,7 @@ typedef enum {
ONE_LOOP_REDUCED = 2
} FAST_COEFF_UPDATE;
-typedef struct {
+typedef struct SPEED_FEATURES {
// Frame level coding parameter update
int frame_parameter_update;
diff --git a/vp9/encoder/vp9_svc_layercontext.c b/vp9/encoder/vp9_svc_layercontext.c
index 792e8d21b..2e98fa717 100644
--- a/vp9/encoder/vp9_svc_layercontext.c
+++ b/vp9/encoder/vp9_svc_layercontext.c
@@ -221,7 +221,7 @@ void vp9_inc_frame_in_layer(SVC *svc) {
++lc->current_video_frame_in_layer;
}
-int vp9_is_upper_layer_key_frame(const VP9_COMP *cpi) {
+int vp9_is_upper_layer_key_frame(const VP9_COMP *const cpi) {
return cpi->use_svc &&
cpi->svc.number_temporal_layers == 1 &&
cpi->svc.spatial_layer_id > 0 &&
diff --git a/vp9/encoder/x86/vp9_dct32x32_sse2.c b/vp9/encoder/x86/vp9_dct32x32_sse2.c
index 2d59775ce..42fdbbdc5 100644
--- a/vp9/encoder/x86/vp9_dct32x32_sse2.c
+++ b/vp9/encoder/x86/vp9_dct32x32_sse2.c
@@ -12,6 +12,9 @@
#include "vp9/common/vp9_idct.h" // for cospi constants
#include "vpx_ports/mem.h"
+#define pair_set_epi32(a, b) \
+ _mm_set_epi32(b, a, b, a)
+
#if FDCT32x32_HIGH_PRECISION
static INLINE __m128i k_madd_epi32(__m128i a, __m128i b) {
__m128i buf0, buf1;
diff --git a/vp9/vp9_dx_iface.c b/vp9/vp9_dx_iface.c
index 06b482305..6dca8399f 100644
--- a/vp9/vp9_dx_iface.c
+++ b/vp9/vp9_dx_iface.c
@@ -375,80 +375,68 @@ static void parse_superframe_index(const uint8_t *data, size_t data_sz,
}
}
+static vpx_codec_err_t decode_one_iter(vpx_codec_alg_priv_t *ctx,
+ const uint8_t **data_start_ptr,
+ const uint8_t *data_end,
+ uint32_t frame_size, void *user_priv,
+ long deadline) {
+ const vpx_codec_err_t res = decode_one(ctx, data_start_ptr, frame_size,
+ user_priv, deadline);
+ if (res != VPX_CODEC_OK)
+ return res;
+
+ // Account for suboptimal termination by the encoder.
+ while (*data_start_ptr < data_end) {
+ const uint8_t marker = read_marker(ctx->decrypt_cb, ctx->decrypt_state,
+ *data_start_ptr);
+ if (marker)
+ break;
+ (*data_start_ptr)++;
+ }
+
+ return VPX_CODEC_OK;
+}
+
static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
const uint8_t *data, unsigned int data_sz,
void *user_priv, long deadline) {
const uint8_t *data_start = data;
- const uint8_t *data_end = data + data_sz;
- vpx_codec_err_t res = VPX_CODEC_OK;
- uint32_t sizes[8];
- int frames_this_pts, frame_count = 0;
+ const uint8_t *const data_end = data + data_sz;
+ vpx_codec_err_t res;
+ uint32_t frame_sizes[8];
+ int frame_count;
if (data == NULL || data_sz == 0)
return VPX_CODEC_INVALID_PARAM;
- parse_superframe_index(data, data_sz, sizes, &frames_this_pts,
+ parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
ctx->decrypt_cb, ctx->decrypt_state);
- do {
- if (data_sz) {
- uint8_t marker = read_marker(ctx->decrypt_cb, ctx->decrypt_state,
- data_start);
- // Skip over the superframe index, if present
- if ((marker & 0xe0) == 0xc0) {
- const uint32_t frames = (marker & 0x7) + 1;
- const uint32_t mag = ((marker >> 3) & 0x3) + 1;
- const uint32_t index_sz = 2 + mag * frames;
-
- if (data_sz >= index_sz) {
- uint8_t marker2 = read_marker(ctx->decrypt_cb, ctx->decrypt_state,
- data_start + index_sz - 1);
- if (marker2 == marker) {
- data_start += index_sz;
- data_sz -= index_sz;
- if (data_start < data_end)
- continue;
- else
- break;
- }
- }
- }
- }
-
- // Use the correct size for this frame, if an index is present.
- if (frames_this_pts) {
- uint32_t this_sz = sizes[frame_count];
+ if (frame_count > 0) {
+ int i;
- if (data_sz < this_sz) {
+ for (i = 0; i < frame_count; ++i) {
+ const uint32_t frame_size = frame_sizes[i];
+ if (data_start < data || data_start + frame_size >= data_end) {
ctx->base.err_detail = "Invalid frame size in index";
return VPX_CODEC_CORRUPT_FRAME;
}
- data_sz = this_sz;
- frame_count++;
+ res = decode_one_iter(ctx, &data_start, data_end, frame_size,
+ user_priv, deadline);
+ if (res != VPX_CODEC_OK)
+ return res;
}
-
- res = decode_one(ctx, &data_start, data_sz, user_priv, deadline);
- assert(data_start >= data);
- assert(data_start <= data_end);
-
- // Early exit if there was a decode error
- if (res)
- break;
-
- // Account for suboptimal termination by the encoder.
+ } else {
while (data_start < data_end) {
- uint8_t marker3 = read_marker(ctx->decrypt_cb, ctx->decrypt_state,
- data_start);
- if (marker3)
- break;
- data_start++;
+ res = decode_one_iter(ctx, &data_start, data_end, data_end - data_start,
+ user_priv, deadline);
+ if (res != VPX_CODEC_OK)
+ return res;
}
+ }
- data_sz = (unsigned int)(data_end - data_start);
- } while (data_start < data_end);
-
- return res;
+ return VPX_CODEC_OK;
}
static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
diff --git a/webmdec.cc b/webmdec.cc
index eb89befd8..4383e8efd 100644
--- a/webmdec.cc
+++ b/webmdec.cc
@@ -108,8 +108,8 @@ int file_is_webm(struct WebmInputContext *webm_ctx,
vpx_ctx->framerate.denominator = 0;
vpx_ctx->framerate.numerator = 0;
- vpx_ctx->width = video_track->GetWidth();
- vpx_ctx->height = video_track->GetHeight();
+ vpx_ctx->width = static_cast<uint32_t>(video_track->GetWidth());
+ vpx_ctx->height = static_cast<uint32_t>(video_track->GetHeight());
get_first_cluster(webm_ctx);