summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--test/dct16x16_test.cc64
-rw-r--r--test/sad_test.cc12
-rw-r--r--third_party/googletest/README.libvpx2
-rw-r--r--third_party/googletest/src/src/gtest-all.cc2
-rw-r--r--vp8/encoder/onyx_if.c8
-rw-r--r--vp9/common/vp9_blockd.h2
-rw-r--r--vp9/common/x86/vp9_idct_intrin_ssse3.c110
-rw-r--r--vp9/decoder/vp9_decodeframe.c50
-rw-r--r--vp9/encoder/vp9_aq_complexity.c16
-rw-r--r--vp9/encoder/vp9_block.h1
-rw-r--r--vp9/encoder/vp9_encodeframe.c7
-rw-r--r--vp9/encoder/vp9_encodemb.c6
-rw-r--r--vp9/encoder/vp9_encoder.c4
-rw-r--r--vp9/encoder/vp9_firstpass.c2
-rw-r--r--vp9/encoder/vp9_mcomp.c3
-rw-r--r--vp9/encoder/vp9_pickmode.c3
-rw-r--r--vp9/encoder/vp9_ratectrl.c36
-rw-r--r--vp9/encoder/vp9_ratectrl.h3
-rw-r--r--vp9/encoder/vp9_rdopt.c52
-rw-r--r--vp9/encoder/vp9_speed_features.c61
-rw-r--r--vp9/encoder/vp9_speed_features.h4
-rw-r--r--vp9/encoder/vp9_svc_layercontext.c15
-rw-r--r--vp9/vp9_dx_iface.c29
-rw-r--r--vpx/exports_enc3
24 files changed, 282 insertions, 213 deletions
diff --git a/test/dct16x16_test.cc b/test/dct16x16_test.cc
index 20b1c8fbd..92976e14c 100644
--- a/test/dct16x16_test.cc
+++ b/test/dct16x16_test.cc
@@ -272,10 +272,18 @@ void fdct16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
vp9_fdct16x16_c(in, out, stride);
}
+void idct16x16_ref(const int16_t *in, uint8_t *dest, int stride, int tx_type) {
+ vp9_idct16x16_256_add_c(in, dest, stride);
+}
+
void fht16x16_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
vp9_fht16x16_c(in, out, stride, tx_type);
}
+void iht16x16_ref(const int16_t *in, uint8_t *dest, int stride, int tx_type) {
+ vp9_iht16x16_256_add_c(in, dest, stride, tx_type);
+}
+
class Trans16x16TestBase {
public:
virtual ~Trans16x16TestBase() {}
@@ -378,6 +386,47 @@ class Trans16x16TestBase {
}
}
+ void RunQuantCheck(int dc_thred, int ac_thred) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
+
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, ref, kNumCoeffs);
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ }
+ if (i == 0)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = 255;
+ if (i == 1)
+ for (int j = 0; j < kNumCoeffs; ++j)
+ input_extreme_block[j] = -255;
+
+ fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
+
+ // clear reconstructed pixel buffers
+ vpx_memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
+ vpx_memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
+
+ // quantization with maximum allowed step sizes
+ output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred;
+ for (int j = 1; j < kNumCoeffs; ++j)
+ output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred;
+ inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_);
+ REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_));
+
+ for (int j = 0; j < kNumCoeffs; ++j)
+ EXPECT_EQ(ref[j], dst[j]);
+ }
+ }
+
void RunInvAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
@@ -414,6 +463,7 @@ class Trans16x16TestBase {
int pitch_;
int tx_type_;
fht_t fwd_txfm_ref;
+ iht_t inv_txfm_ref;
};
class Trans16x16DCT
@@ -428,6 +478,7 @@ class Trans16x16DCT
tx_type_ = GET_PARAM(2);
pitch_ = 16;
fwd_txfm_ref = fdct16x16_ref;
+ inv_txfm_ref = idct16x16_ref;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
@@ -455,6 +506,12 @@ TEST_P(Trans16x16DCT, MemCheck) {
RunMemCheck();
}
+TEST_P(Trans16x16DCT, QuantCheck) {
+ // Use maximally allowed quantization step sizes for DC and AC
+ // coefficients respectively.
+ RunQuantCheck(1336, 1828);
+}
+
TEST_P(Trans16x16DCT, InvAccuracyCheck) {
RunInvAccuracyCheck();
}
@@ -471,6 +528,7 @@ class Trans16x16HT
tx_type_ = GET_PARAM(2);
pitch_ = 16;
fwd_txfm_ref = fht16x16_ref;
+ inv_txfm_ref = iht16x16_ref;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
@@ -498,6 +556,12 @@ TEST_P(Trans16x16HT, MemCheck) {
RunMemCheck();
}
+TEST_P(Trans16x16HT, QuantCheck) {
+ // The encoder skips any non-DC intra prediction modes,
+ // when the quantization step size goes beyond 988.
+ RunQuantCheck(549, 988);
+}
+
using std::tr1::make_tuple;
INSTANTIATE_TEST_CASE_P(
diff --git a/test/sad_test.cc b/test/sad_test.cc
index adb191fd0..f9ffa92de 100644
--- a/test/sad_test.cc
+++ b/test/sad_test.cc
@@ -185,10 +185,8 @@ class SADTest
}
void CheckSAD(unsigned int max_sad) {
- unsigned int reference_sad, exp_sad;
-
- reference_sad = ReferenceSAD(max_sad, 0);
- exp_sad = SAD(max_sad, 0);
+ const unsigned int reference_sad = ReferenceSAD(max_sad, 0);
+ const unsigned int exp_sad = SAD(max_sad, 0);
if (reference_sad <= max_sad) {
ASSERT_EQ(exp_sad, reference_sad);
@@ -218,10 +216,8 @@ class SADVP9Test
}
void CheckSAD() {
- unsigned int reference_sad, exp_sad;
-
- reference_sad = ReferenceSAD(UINT_MAX, 0);
- exp_sad = SAD(0);
+ const unsigned int reference_sad = ReferenceSAD(UINT_MAX, 0);
+ const unsigned int exp_sad = SAD(0);
ASSERT_EQ(reference_sad, exp_sad);
}
diff --git a/third_party/googletest/README.libvpx b/third_party/googletest/README.libvpx
index 6fdeb8731..7201a67d3 100644
--- a/third_party/googletest/README.libvpx
+++ b/third_party/googletest/README.libvpx
@@ -12,4 +12,4 @@ failures, various options for running the tests, and XML test report
generation.
Local Modifications:
-None. \ No newline at end of file
+Removed unused declarations of kPathSeparatorString to have warning free build. \ No newline at end of file
diff --git a/third_party/googletest/src/src/gtest-all.cc b/third_party/googletest/src/src/gtest-all.cc
index a9a03b2e3..8d906279a 100644
--- a/third_party/googletest/src/src/gtest-all.cc
+++ b/third_party/googletest/src/src/gtest-all.cc
@@ -7904,7 +7904,6 @@ namespace internal {
// of them.
const char kPathSeparator = '\\';
const char kAlternatePathSeparator = '/';
-const char kPathSeparatorString[] = "\\";
const char kAlternatePathSeparatorString[] = "/";
# if GTEST_OS_WINDOWS_MOBILE
// Windows CE doesn't have a current directory. You should not use
@@ -7918,7 +7917,6 @@ const char kCurrentDirectoryString[] = ".\\";
# endif // GTEST_OS_WINDOWS_MOBILE
#else
const char kPathSeparator = '/';
-const char kPathSeparatorString[] = "/";
const char kCurrentDirectoryString[] = "./";
#endif // GTEST_OS_WINDOWS
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index e95e44fd5..762b7e345 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -2611,9 +2611,8 @@ int vp8_update_entropy(VP8_COMP *cpi, int update)
#if OUTPUT_YUV_SRC
-void vp8_write_yuv_frame(const char *name, YV12_BUFFER_CONFIG *s)
+void vp8_write_yuv_frame(FILE *yuv_file, YV12_BUFFER_CONFIG *s)
{
- FILE *yuv_file = fopen(name, "ab");
unsigned char *src = s->y_buffer;
int h = s->y_height;
@@ -2643,12 +2642,9 @@ void vp8_write_yuv_frame(const char *name, YV12_BUFFER_CONFIG *s)
src += s->uv_stride;
}
while (--h);
-
- fclose(yuv_file);
}
#endif
-
static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
{
VP8_COMMON *cm = &cpi->common;
@@ -3895,7 +3891,7 @@ static void encode_frame_to_data_rate
#endif
#ifdef OUTPUT_YUV_SRC
- vp8_write_yuv_frame(cpi->Source);
+ vp8_write_yuv_frame(yuv_file, cpi->Source);
#endif
do
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index 8ca356dd6..9088b0bde 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -228,8 +228,6 @@ typedef struct macroblockd {
DECLARE_ALIGNED(16, uint8_t, mc_buf[80 * 2 * 80 * 2]);
int lossless;
- /* Inverse transform function pointers. */
- void (*itxm_add)(const int16_t *input, uint8_t *dest, int stride, int eob);
int corrupted;
diff --git a/vp9/common/x86/vp9_idct_intrin_ssse3.c b/vp9/common/x86/vp9_idct_intrin_ssse3.c
index e5d3cb5f4..0930e7805 100644
--- a/vp9/common/x86/vp9_idct_intrin_ssse3.c
+++ b/vp9/common/x86/vp9_idct_intrin_ssse3.c
@@ -16,7 +16,7 @@
#include <tmmintrin.h> // SSSE3
#include "vp9/common/x86/vp9_idct_intrin_sse2.h"
-static void idct16_8col(__m128i *in) {
+static void idct16_8col(__m128i *in, int round) {
const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64);
@@ -36,6 +36,8 @@ static void idct16_8col(__m128i *in) {
const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i k__cospi_p16_p16_x2 = pair_set_epi16(23170, 23170);
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
+ const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
__m128i v[16], u[16], s[16], t[16];
@@ -266,28 +268,80 @@ static void idct16_8col(__m128i *in) {
t[15] = _mm_add_epi16(s[12], s[15]);
// stage 6
- s[0] = _mm_add_epi16(t[0], t[7]);
- s[1] = _mm_add_epi16(t[1], t[6]);
- s[2] = _mm_add_epi16(t[2], t[5]);
- s[3] = _mm_add_epi16(t[3], t[4]);
- s[4] = _mm_sub_epi16(t[3], t[4]);
- s[5] = _mm_sub_epi16(t[2], t[5]);
- s[6] = _mm_sub_epi16(t[1], t[6]);
- s[7] = _mm_sub_epi16(t[0], t[7]);
- s[8] = t[8];
- s[9] = t[9];
-
- u[0] = _mm_sub_epi16(t[13], t[10]);
- u[1] = _mm_add_epi16(t[13], t[10]);
- u[2] = _mm_sub_epi16(t[12], t[11]);
- u[3] = _mm_add_epi16(t[12], t[11]);
-
- s[10] = _mm_mulhrs_epi16(u[0], k__cospi_p16_p16_x2);
- s[13] = _mm_mulhrs_epi16(u[1], k__cospi_p16_p16_x2);
- s[11] = _mm_mulhrs_epi16(u[2], k__cospi_p16_p16_x2);
- s[12] = _mm_mulhrs_epi16(u[3], k__cospi_p16_p16_x2);
- s[14] = t[14];
- s[15] = t[15];
+ if (round == 1) {
+ s[0] = _mm_add_epi16(t[0], t[7]);
+ s[1] = _mm_add_epi16(t[1], t[6]);
+ s[2] = _mm_add_epi16(t[2], t[5]);
+ s[3] = _mm_add_epi16(t[3], t[4]);
+ s[4] = _mm_sub_epi16(t[3], t[4]);
+ s[5] = _mm_sub_epi16(t[2], t[5]);
+ s[6] = _mm_sub_epi16(t[1], t[6]);
+ s[7] = _mm_sub_epi16(t[0], t[7]);
+ s[8] = t[8];
+ s[9] = t[9];
+
+ u[0] = _mm_unpacklo_epi16(t[10], t[13]);
+ u[1] = _mm_unpackhi_epi16(t[10], t[13]);
+ u[2] = _mm_unpacklo_epi16(t[11], t[12]);
+ u[3] = _mm_unpackhi_epi16(t[11], t[12]);
+
+ v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16);
+ v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16);
+ v[2] = _mm_madd_epi16(u[0], k__cospi_p16_p16);
+ v[3] = _mm_madd_epi16(u[1], k__cospi_p16_p16);
+ v[4] = _mm_madd_epi16(u[2], k__cospi_m16_p16);
+ v[5] = _mm_madd_epi16(u[3], k__cospi_m16_p16);
+ v[6] = _mm_madd_epi16(u[2], k__cospi_p16_p16);
+ v[7] = _mm_madd_epi16(u[3], k__cospi_p16_p16);
+
+ u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+ u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+ u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+ u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+ u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+ u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+ u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+ u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+
+ u[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+ u[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+ u[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+ u[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+ u[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+ u[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+ u[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+ u[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+
+ s[10] = _mm_packs_epi32(u[0], u[1]);
+ s[13] = _mm_packs_epi32(u[2], u[3]);
+ s[11] = _mm_packs_epi32(u[4], u[5]);
+ s[12] = _mm_packs_epi32(u[6], u[7]);
+ s[14] = t[14];
+ s[15] = t[15];
+ } else {
+ s[0] = _mm_add_epi16(t[0], t[7]);
+ s[1] = _mm_add_epi16(t[1], t[6]);
+ s[2] = _mm_add_epi16(t[2], t[5]);
+ s[3] = _mm_add_epi16(t[3], t[4]);
+ s[4] = _mm_sub_epi16(t[3], t[4]);
+ s[5] = _mm_sub_epi16(t[2], t[5]);
+ s[6] = _mm_sub_epi16(t[1], t[6]);
+ s[7] = _mm_sub_epi16(t[0], t[7]);
+ s[8] = t[8];
+ s[9] = t[9];
+
+ u[0] = _mm_sub_epi16(t[13], t[10]);
+ u[1] = _mm_add_epi16(t[13], t[10]);
+ u[2] = _mm_sub_epi16(t[12], t[11]);
+ u[3] = _mm_add_epi16(t[12], t[11]);
+
+ s[10] = _mm_mulhrs_epi16(u[0], k__cospi_p16_p16_x2);
+ s[13] = _mm_mulhrs_epi16(u[1], k__cospi_p16_p16_x2);
+ s[11] = _mm_mulhrs_epi16(u[2], k__cospi_p16_p16_x2);
+ s[12] = _mm_mulhrs_epi16(u[3], k__cospi_p16_p16_x2);
+ s[14] = t[14];
+ s[15] = t[15];
+ }
// stage 7
in[0] = _mm_add_epi16(s[0], s[15]);
@@ -308,10 +362,10 @@ static void idct16_8col(__m128i *in) {
in[15] = _mm_sub_epi16(s[0], s[15]);
}
-static void idct16_sse2(__m128i *in0, __m128i *in1) {
+static void idct16_sse2(__m128i *in0, __m128i *in1, int round) {
array_transpose_16x16(in0, in1);
- idct16_8col(in0);
- idct16_8col(in1);
+ idct16_8col(in0, round);
+ idct16_8col(in1, round);
}
void vp9_idct16x16_256_add_ssse3(const int16_t *input, uint8_t *dest,
@@ -322,8 +376,8 @@ void vp9_idct16x16_256_add_ssse3(const int16_t *input, uint8_t *dest,
input += 8;
load_buffer_8x16(input, in1);
- idct16_sse2(in0, in1);
- idct16_sse2(in0, in1);
+ idct16_sse2(in0, in1, 0);
+ idct16_sse2(in0, in1, 1);
write_buffer_8x16(dest, in0, stride);
dest += 8;
diff --git a/vp9/decoder/vp9_decodeframe.c b/vp9/decoder/vp9_decodeframe.c
index 121b1f2cd..fc70035f2 100644
--- a/vp9/decoder/vp9_decodeframe.c
+++ b/vp9/decoder/vp9_decodeframe.c
@@ -195,30 +195,32 @@ static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block,
struct macroblockd_plane *const pd = &xd->plane[plane];
if (eob > 0) {
TX_TYPE tx_type;
- const PLANE_TYPE plane_type = pd->plane_type;
int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
- switch (tx_size) {
- case TX_4X4:
- tx_type = get_tx_type_4x4(plane_type, xd, block);
- if (tx_type == DCT_DCT)
- xd->itxm_add(dqcoeff, dst, stride, eob);
- else
- vp9_iht4x4_16_add(dqcoeff, dst, stride, tx_type);
- break;
- case TX_8X8:
- tx_type = get_tx_type(plane_type, xd);
- vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
- break;
- case TX_16X16:
- tx_type = get_tx_type(plane_type, xd);
- vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
- break;
- case TX_32X32:
- tx_type = DCT_DCT;
- vp9_idct32x32_add(dqcoeff, dst, stride, eob);
- break;
- default:
- assert(0 && "Invalid transform size");
+ if (xd->lossless) {
+ tx_type = DCT_DCT;
+ vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
+ } else {
+ const PLANE_TYPE plane_type = pd->plane_type;
+ switch (tx_size) {
+ case TX_4X4:
+ tx_type = get_tx_type_4x4(plane_type, xd, block);
+ vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob);
+ break;
+ case TX_8X8:
+ tx_type = get_tx_type(plane_type, xd);
+ vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
+ break;
+ case TX_16X16:
+ tx_type = get_tx_type(plane_type, xd);
+ vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
+ break;
+ case TX_32X32:
+ tx_type = DCT_DCT;
+ vp9_idct32x32_add(dqcoeff, dst, stride, eob);
+ break;
+ default:
+ assert(0 && "Invalid transform size");
+ }
}
if (eob == 1) {
@@ -588,8 +590,6 @@ static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd,
cm->y_dc_delta_q == 0 &&
cm->uv_dc_delta_q == 0 &&
cm->uv_ac_delta_q == 0;
-
- xd->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
}
static INTERP_FILTER read_interp_filter(struct vp9_read_bit_buffer *rb) {
diff --git a/vp9/encoder/vp9_aq_complexity.c b/vp9/encoder/vp9_aq_complexity.c
index 47ad8d8cc..0d6b41d15 100644
--- a/vp9/encoder/vp9_aq_complexity.c
+++ b/vp9/encoder/vp9_aq_complexity.c
@@ -47,11 +47,21 @@ void vp9_setup_in_frame_q_adj(VP9_COMP *cpi) {
// Use some of the segments for in frame Q adjustment.
for (segment = 1; segment < 2; segment++) {
- const int qindex_delta =
+ int qindex_delta =
vp9_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
in_frame_q_adj_ratio[segment]);
- vp9_enable_segfeature(seg, segment, SEG_LVL_ALT_Q);
- vp9_set_segdata(seg, segment, SEG_LVL_ALT_Q, qindex_delta);
+
+ // For AQ mode 2, we dont allow Q0 in a segment if the base Q is not 0.
+ // Q0 (lossless) implies 4x4 only and in AQ mode 2 a segment Q delta
+ // is sometimes applied without going back around the rd loop.
+ // This could lead to an illegal combination of partition size and q.
+ if ((cm->base_qindex != 0) && ((cm->base_qindex + qindex_delta) == 0)) {
+ qindex_delta = -cm->base_qindex + 1;
+ }
+ if ((cm->base_qindex + qindex_delta) > 0) {
+ vp9_enable_segfeature(seg, segment, SEG_LVL_ALT_Q);
+ vp9_set_segdata(seg, segment, SEG_LVL_ALT_Q, qindex_delta);
+ }
}
}
}
diff --git a/vp9/encoder/vp9_block.h b/vp9/encoder/vp9_block.h
index c406860a0..c3cd93b78 100644
--- a/vp9/encoder/vp9_block.h
+++ b/vp9/encoder/vp9_block.h
@@ -109,6 +109,7 @@ struct macroblock {
MV pred_mv[MAX_REF_FRAMES];
void (*fwd_txm4x4)(const int16_t *input, int16_t *output, int stride);
+ void (*itxm_add)(const int16_t *input, uint8_t *dest, int stride, int eob);
};
#ifdef __cplusplus
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index c449e4755..37fb0f3c6 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -2373,7 +2373,7 @@ static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
if (lossless) {
// printf("Switching to lossless\n");
cpi->mb.fwd_txm4x4 = vp9_fwht4x4;
- cpi->mb.e_mbd.itxm_add = vp9_iwht4x4_add;
+ cpi->mb.itxm_add = vp9_iwht4x4_add;
cpi->mb.optimize = 0;
cpi->common.lf.filter_level = 0;
cpi->zbin_mode_boost_enabled = 0;
@@ -2381,7 +2381,7 @@ static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
} else {
// printf("Not lossless\n");
cpi->mb.fwd_txm4x4 = vp9_fdct4x4;
- cpi->mb.e_mbd.itxm_add = vp9_idct4x4_add;
+ cpi->mb.itxm_add = vp9_idct4x4_add;
}
}
@@ -3357,7 +3357,8 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
} else {
mbmi->skip = 1;
- if (output_enabled)
+ if (output_enabled &&
+ !vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
cm->counts.skip[vp9_get_skip_context(xd)][1]++;
reset_skip_context(xd, MAX(bsize, BLOCK_8X8));
}
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index 3b231b7f2..8581e6117 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -406,7 +406,7 @@ static void encode_block(int plane, int block, BLOCK_SIZE plane_bsize,
// this is like vp9_short_idct4x4 but has a special case around eob<=1
// which is significant (not just an optimization) for the lossless
// case.
- xd->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
+ x->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
break;
default:
assert(0 && "Invalid transform size");
@@ -428,7 +428,7 @@ static void encode_block_pass1(int plane, int block, BLOCK_SIZE plane_bsize,
vp9_xform_quant(x, plane, block, plane_bsize, tx_size);
if (p->eobs[block] > 0)
- xd->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
+ x->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
}
void vp9_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
@@ -574,7 +574,7 @@ static void encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
// this is like vp9_short_idct4x4 but has a special case around eob<=1
// which is significant (not just an optimization) for the lossless
// case.
- xd->itxm_add(dqcoeff, dst, dst_stride, *eob);
+ x->itxm_add(dqcoeff, dst, dst_stride, *eob);
else
vp9_iht4x4_16_add(dqcoeff, dst, dst_stride, tx_type);
}
diff --git a/vp9/encoder/vp9_encoder.c b/vp9/encoder/vp9_encoder.c
index 2ce5483d6..90155f3a9 100644
--- a/vp9/encoder/vp9_encoder.c
+++ b/vp9/encoder/vp9_encoder.c
@@ -602,9 +602,9 @@ void vp9_change_config(struct VP9_COMP *cpi, const VP9EncoderConfig *oxcf) {
// is set.
cpi->oxcf.worst_allowed_q = 0;
cpi->oxcf.best_allowed_q = 0;
- cpi->mb.e_mbd.itxm_add = vp9_iwht4x4_add;
+ cpi->mb.itxm_add = vp9_iwht4x4_add;
} else {
- cpi->mb.e_mbd.itxm_add = vp9_idct4x4_add;
+ cpi->mb.itxm_add = vp9_idct4x4_add;
}
rc->baseline_gf_interval = DEFAULT_GF_INTERVAL;
cpi->ref_frame_flags = VP9_ALT_FLAG | VP9_GOLD_FLAG | VP9_LAST_FLAG;
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index 0d4f2c72c..c1d925ab4 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -1576,7 +1576,7 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
// Break out conditions.
if (
- // Break at cpi->max_gf_interval unless almost totally static.
+ // Break at active_max_gf_interval unless almost totally static.
(i >= active_max_gf_interval && (zero_motion_accumulator < 0.995)) ||
(
// Don't break out with a very short interval.
diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c
index 9d2b2a497..dbd19a2d6 100644
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -524,7 +524,8 @@ static int vp9_pattern_search(const MACROBLOCK *x,
// Work out the start point for the search
bestsad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, ref_mv), in_what->stride);
+ get_buf_from_mv(in_what, ref_mv), in_what->stride) +
+ mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
// Search all possible scales upto the search param around the center point
// pick the scale of the point that is best as the starting scale of
diff --git a/vp9/encoder/vp9_pickmode.c b/vp9/encoder/vp9_pickmode.c
index 11633a73d..913b8ead4 100644
--- a/vp9/encoder/vp9_pickmode.c
+++ b/vp9/encoder/vp9_pickmode.c
@@ -280,8 +280,7 @@ int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
int rate_mv = 0;
- if (cpi->sf.disable_inter_mode_mask[bsize] &
- (1 << INTER_OFFSET(this_mode)))
+ if (!(cpi->sf.inter_mode_mask[bsize] & (1 << this_mode)))
continue;
if (rd_less_than_thresh(best_rd, rd_threshes[mode_idx[this_mode]],
diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c
index 6f646ea0e..b58eac981 100644
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -1388,6 +1388,24 @@ int vp9_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
return target_index - qindex;
}
+void vp9_rc_set_gf_max_interval(const VP9EncoderConfig *const oxcf,
+ RATE_CONTROL *const rc) {
+ // Set Maximum gf/arf interval
+ rc->max_gf_interval = 16;
+
+ // Extended interval for genuinely static scenes
+ rc->static_scene_max_gf_interval = oxcf->key_freq >> 1;
+
+ // Special conditions when alt ref frame enabled
+ if (oxcf->play_alternate && oxcf->lag_in_frames) {
+ if (rc->static_scene_max_gf_interval > oxcf->lag_in_frames - 1)
+ rc->static_scene_max_gf_interval = oxcf->lag_in_frames - 1;
+ }
+
+ if (rc->max_gf_interval > rc->static_scene_max_gf_interval)
+ rc->max_gf_interval = rc->static_scene_max_gf_interval;
+}
+
void vp9_rc_update_framerate(VP9_COMP *cpi) {
const VP9_COMMON *const cm = &cpi->common;
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
@@ -1412,21 +1430,5 @@ void vp9_rc_update_framerate(VP9_COMP *cpi) {
rc->max_frame_bandwidth = MAX(MAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P),
vbr_max_bits);
- // Set Maximum gf/arf interval
- rc->max_gf_interval = 16;
-
- // Extended interval for genuinely static scenes
- rc->static_scene_max_gf_interval = cpi->oxcf.key_freq >> 1;
-
- // Special conditions when alt ref frame enabled in lagged compress mode
- if (oxcf->play_alternate && oxcf->lag_in_frames) {
- if (rc->max_gf_interval > oxcf->lag_in_frames - 1)
- rc->max_gf_interval = oxcf->lag_in_frames - 1;
-
- if (rc->static_scene_max_gf_interval > oxcf->lag_in_frames - 1)
- rc->static_scene_max_gf_interval = oxcf->lag_in_frames - 1;
- }
-
- if (rc->max_gf_interval > rc->static_scene_max_gf_interval)
- rc->max_gf_interval = rc->static_scene_max_gf_interval;
+ vp9_rc_set_gf_max_interval(oxcf, rc);
}
diff --git a/vp9/encoder/vp9_ratectrl.h b/vp9/encoder/vp9_ratectrl.h
index 614078eef..d6a0151b6 100644
--- a/vp9/encoder/vp9_ratectrl.h
+++ b/vp9/encoder/vp9_ratectrl.h
@@ -178,6 +178,9 @@ int vp9_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
void vp9_rc_update_framerate(struct VP9_COMP *cpi);
+void vp9_rc_set_gf_max_interval(const struct VP9EncoderConfig *const oxcf,
+ RATE_CONTROL *const rc);
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index d402d7b40..f68aa2738 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -1675,9 +1675,9 @@ static INLINE int mv_has_subpel(const MV *mv) {
static int check_best_zero_mv(
const VP9_COMP *cpi, const uint8_t mode_context[MAX_REF_FRAMES],
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
- int disable_inter_mode_mask, int this_mode,
+ int inter_mode_mask, int this_mode,
const MV_REFERENCE_FRAME ref_frames[2]) {
- if (!(disable_inter_mode_mask & (1 << INTER_OFFSET(ZEROMV))) &&
+ if ((inter_mode_mask & (1 << ZEROMV)) &&
(this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
(ref_frames[1] == NONE ||
@@ -1743,7 +1743,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x,
ENTROPY_CONTEXT t_above[2], t_left[2];
int subpelmv = 1, have_ref = 0;
const int has_second_rf = has_second_ref(mbmi);
- const int disable_inter_mode_mask = cpi->sf.disable_inter_mode_mask[bsize];
+ const int inter_mode_mask = cpi->sf.inter_mode_mask[bsize];
vp9_zero(*bsi);
@@ -1792,11 +1792,11 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x,
mode_idx = INTER_OFFSET(this_mode);
bsi->rdstat[i][mode_idx].brdcost = INT64_MAX;
- if (disable_inter_mode_mask & (1 << mode_idx))
+ if (!(inter_mode_mask & (1 << this_mode)))
continue;
if (!check_best_zero_mv(cpi, mbmi->mode_context, frame_mv,
- disable_inter_mode_mask,
+ inter_mode_mask,
this_mode, mbmi->ref_frame))
continue;
@@ -3063,7 +3063,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
const int mode_search_skip_flags = cpi->sf.mode_search_skip_flags;
const int intra_y_mode_mask =
cpi->sf.intra_y_mode_mask[max_txsize_lookup[bsize]];
- int disable_inter_mode_mask = cpi->sf.disable_inter_mode_mask[bsize];
+ int inter_mode_mask = cpi->sf.inter_mode_mask[bsize];
vp9_zero(best_mbmode);
x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
@@ -3130,7 +3130,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
const int inter_non_zero_mode_mask = 0x1F7F7;
mode_skip_mask |= inter_non_zero_mode_mask;
mode_skip_mask &= ~(1 << THR_ZEROMV);
- disable_inter_mode_mask = ~(1 << INTER_OFFSET(ZEROMV));
+ inter_mode_mask = (1 << ZEROMV);
}
// Disable this drop out case if the ref frame
@@ -3182,7 +3182,8 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
mode_index = THR_ZEROMV;
mode_skip_mask = ~(1 << mode_index);
mode_skip_start = MAX_MODES;
- disable_inter_mode_mask = 0;
+ inter_mode_mask = (1 << NEARESTMV) | (1 << NEARMV) | (1 << ZEROMV) |
+ (1 << NEWMV);
}
for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
@@ -3229,8 +3230,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
this_mode = vp9_mode_order[mode_index].mode;
ref_frame = vp9_mode_order[mode_index].ref_frame[0];
- if (ref_frame != INTRA_FRAME &&
- disable_inter_mode_mask & (1 << INTER_OFFSET(this_mode)))
+ if (ref_frame != INTRA_FRAME && !(inter_mode_mask & (1 << this_mode)))
continue;
second_ref_frame = vp9_mode_order[mode_index].ref_frame[1];
@@ -3279,7 +3279,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
const MV_REFERENCE_FRAME ref_frames[2] = {ref_frame, second_ref_frame};
if (!check_best_zero_mv(cpi, mbmi->mode_context, frame_mv,
- disable_inter_mode_mask, this_mode, ref_frames))
+ inter_mode_mask, this_mode, ref_frames))
continue;
}
}
@@ -3665,7 +3665,6 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
int_mv seg_mvs[4][MAX_REF_FRAMES];
b_mode_info best_bmodes[4];
int best_skip2 = 0;
- int ref_frame_mask = 0;
int mode_skip_mask = 0;
x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
@@ -3700,17 +3699,6 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
frame_mv[ZEROMV][ref_frame].as_int = 0;
}
- for (ref_frame = LAST_FRAME;
- ref_frame <= ALTREF_FRAME && cpi->sf.reference_masking; ++ref_frame) {
- int i;
- for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
- if ((x->pred_mv_sad[ref_frame] >> 1) > x->pred_mv_sad[i]) {
- ref_frame_mask |= (1 << ref_frame);
- break;
- }
- }
- }
-
for (ref_index = 0; ref_index < MAX_REFS; ++ref_index) {
int mode_excluded = 0;
int64_t this_rd = INT64_MAX;
@@ -3805,11 +3793,6 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
vp9_get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) !=
(int)ref_frame) {
continue;
- // If the segment skip feature is enabled....
- // then do nothing if the current mode is not allowed..
- } else if (vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP) &&
- ref_frame != INTRA_FRAME) {
- continue;
// Disable this drop out case if the ref frame
// segment level feature is enabled for this segment. This is to
// prevent the possibility that we end up unable to pick any mode.
@@ -4034,15 +4017,10 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
}
if (!disable_skip) {
- // Test for the condition where skip block will be activated
- // because there are no non zero coefficients and make any
- // necessary adjustment for rate. Ignore if skip is coded at
- // segment level as the cost wont have been added in.
- // Is Mb level skip allowed (i.e. not coded at segment level).
- const int mb_skip_allowed = !vp9_segfeature_active(seg, segment_id,
- SEG_LVL_SKIP);
+ // Skip is never coded at the segment level for sub8x8 blocks and instead
+ // always coded in the bitstream at the mode info level.
- if (mb_skip_allowed && ref_frame != INTRA_FRAME && !xd->lossless) {
+ if (ref_frame != INTRA_FRAME && !xd->lossless) {
if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
// Add in the cost of the no skip flag.
@@ -4057,7 +4035,7 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
rate_uv = 0;
this_skip2 = 1;
}
- } else if (mb_skip_allowed) {
+ } else {
// Add in the cost of the no skip flag.
rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
}
diff --git a/vp9/encoder/vp9_speed_features.c b/vp9/encoder/vp9_speed_features.c
index 7c3abd5d7..15b986197 100644
--- a/vp9/encoder/vp9_speed_features.c
+++ b/vp9/encoder/vp9_speed_features.c
@@ -14,20 +14,23 @@
#include "vp9/encoder/vp9_speed_features.h"
enum {
- ALL_INTRA_MODES = (1 << DC_PRED) |
+ INTRA_ALL = (1 << DC_PRED) |
(1 << V_PRED) | (1 << H_PRED) |
(1 << D45_PRED) | (1 << D135_PRED) |
(1 << D117_PRED) | (1 << D153_PRED) |
(1 << D207_PRED) | (1 << D63_PRED) |
(1 << TM_PRED),
-
- INTRA_DC_ONLY = (1 << DC_PRED),
-
- INTRA_DC_TM = (1 << TM_PRED) | (1 << DC_PRED),
-
+ INTRA_DC = (1 << DC_PRED),
+ INTRA_DC_TM = (1 << DC_PRED) | (1 << TM_PRED),
INTRA_DC_H_V = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED),
+ INTRA_DC_TM_H_V = (1 << DC_PRED) | (1 << TM_PRED) | (1 << V_PRED) |
+ (1 << H_PRED)
+};
- INTRA_DC_TM_H_V = INTRA_DC_TM | (1 << V_PRED) | (1 << H_PRED)
+enum {
+ INTER_ALL = (1 << NEARESTMV) | (1 << NEARMV) | (1 << ZEROMV) | (1 << NEWMV),
+ INTER_NEAREST = (1 << NEARESTMV),
+ INTER_NEAREST_NEAR_NEW = (1 << NEARESTMV) | (1 << NEARMV) | (1 << NEWMV)
};
enum {
@@ -140,8 +143,8 @@ static void set_good_speed_feature(VP9_COMP *cpi, VP9_COMMON *cm,
sf->search_method = HEX;
sf->disable_filter_search_var_thresh = 500;
for (i = 0; i < TX_SIZES; ++i) {
- sf->intra_y_mode_mask[i] = INTRA_DC_ONLY;
- sf->intra_uv_mode_mask[i] = INTRA_DC_ONLY;
+ sf->intra_y_mode_mask[i] = INTRA_DC;
+ sf->intra_uv_mode_mask[i] = INTRA_DC;
}
cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
}
@@ -156,7 +159,7 @@ static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf,
sf->adaptive_rd_thresh = 1;
sf->use_fast_coef_costing = 1;
- if (speed == 1) {
+ if (speed >= 1) {
sf->use_square_partition_only = !frame_is_intra_only(cm);
sf->less_rectangular_check = 1;
sf->tx_size_search_method = frame_is_intra_only(cm) ? USE_FULL_RD
@@ -179,13 +182,9 @@ static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf,
}
if (speed >= 2) {
- sf->use_square_partition_only = !frame_is_intra_only(cm);
- sf->less_rectangular_check = 1;
- sf->tx_size_search_method = frame_is_intra_only(cm) ? USE_FULL_RD
- : USE_LARGESTALL;
if (MIN(cm->width, cm->height) >= 720)
- sf->disable_split_mask = cm->show_frame ?
- DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
+ sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT
+ : DISABLE_ALL_INTER_SPLIT;
else
sf->disable_split_mask = LAST_AND_INTRA_SPLIT_ONLY;
@@ -193,28 +192,18 @@ static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf,
FLAG_SKIP_INTRA_BESTINTER |
FLAG_SKIP_COMP_BESTINTRA |
FLAG_SKIP_INTRA_LOWVAR;
- sf->use_rd_breakout = 1;
- sf->adaptive_motion_search = 1;
sf->adaptive_pred_interp_filter = 2;
- sf->auto_mv_step_size = 1;
sf->reference_masking = 1;
-
sf->disable_filter_search_var_thresh = 50;
sf->comp_inter_joint_search_thresh = BLOCK_SIZES;
-
sf->auto_min_max_partition_size = RELAXED_NEIGHBORING_MIN_MAX;
sf->use_lastframe_partitioning = LAST_FRAME_PARTITION_LOW_MOTION;
sf->lf_motion_threshold = LOW_MOITION_THRESHOLD;
sf->adjust_partitioning_from_last_frame = 1;
sf->last_partitioning_redo_frequency = 3;
-
- sf->adaptive_rd_thresh = 2;
sf->use_lp32x32fdct = 1;
sf->mode_skip_start = 11;
- sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V;
sf->intra_y_mode_mask[TX_16X16] = INTRA_DC_H_V;
- sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V;
- sf->intra_uv_mode_mask[TX_16X16] = INTRA_DC_H_V;
}
if (speed >= 3) {
@@ -246,15 +235,15 @@ static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf,
sf->subpel_force_stop = 1;
for (i = 0; i < TX_SIZES; i++) {
sf->intra_y_mode_mask[i] = INTRA_DC_H_V;
- sf->intra_uv_mode_mask[i] = INTRA_DC_ONLY;
+ sf->intra_uv_mode_mask[i] = INTRA_DC;
}
- sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_ONLY;
+ sf->intra_y_mode_mask[TX_32X32] = INTRA_DC;
sf->frame_parameter_update = 0;
sf->search_method = FAST_HEX;
- sf->disable_inter_mode_mask[BLOCK_32X32] = 1 << INTER_OFFSET(ZEROMV);
- sf->disable_inter_mode_mask[BLOCK_32X64] = ~(1 << INTER_OFFSET(NEARESTMV));
- sf->disable_inter_mode_mask[BLOCK_64X32] = ~(1 << INTER_OFFSET(NEARESTMV));
- sf->disable_inter_mode_mask[BLOCK_64X64] = ~(1 << INTER_OFFSET(NEARESTMV));
+ sf->inter_mode_mask[BLOCK_32X32] = INTER_NEAREST_NEAR_NEW;
+ sf->inter_mode_mask[BLOCK_32X64] = INTER_NEAREST;
+ sf->inter_mode_mask[BLOCK_64X32] = INTER_NEAREST;
+ sf->inter_mode_mask[BLOCK_64X64] = INTER_NEAREST;
sf->max_intra_bsize = BLOCK_32X32;
sf->allow_skip_recode = 1;
}
@@ -285,7 +274,7 @@ static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf,
if (speed >= 7) {
int i;
for (i = 0; i < BLOCK_SIZES; ++i)
- sf->disable_inter_mode_mask[i] = ~(1 << INTER_OFFSET(NEARESTMV));
+ sf->inter_mode_mask[i] = INTER_NEAREST;
}
}
@@ -330,8 +319,8 @@ void vp9_set_speed_features(VP9_COMP *cpi) {
sf->disable_split_var_thresh = 0;
sf->disable_filter_search_var_thresh = 0;
for (i = 0; i < TX_SIZES; i++) {
- sf->intra_y_mode_mask[i] = ALL_INTRA_MODES;
- sf->intra_uv_mode_mask[i] = ALL_INTRA_MODES;
+ sf->intra_y_mode_mask[i] = INTRA_ALL;
+ sf->intra_uv_mode_mask[i] = INTRA_ALL;
}
sf->use_rd_breakout = 0;
sf->skip_encode_sb = 0;
@@ -343,7 +332,7 @@ void vp9_set_speed_features(VP9_COMP *cpi) {
sf->mode_skip_start = MAX_MODES; // Mode index at which mode skip mask set
sf->use_nonrd_pick_mode = 0;
for (i = 0; i < BLOCK_SIZES; ++i)
- sf->disable_inter_mode_mask[i] = 0;
+ sf->inter_mode_mask[i] = INTER_ALL;
sf->max_intra_bsize = BLOCK_64X64;
// This setting only takes effect when partition_search_type is set
// to FIXED_PARTITION.
diff --git a/vp9/encoder/vp9_speed_features.h b/vp9/encoder/vp9_speed_features.h
index a54599e6a..3e7cd27d8 100644
--- a/vp9/encoder/vp9_speed_features.h
+++ b/vp9/encoder/vp9_speed_features.h
@@ -331,8 +331,8 @@ typedef struct SPEED_FEATURES {
int use_nonrd_pick_mode;
// A binary mask indicating if NEARESTMV, NEARMV, ZEROMV, NEWMV
- // modes are disabled in order from LSB to MSB for each BLOCK_SIZE.
- int disable_inter_mode_mask[BLOCK_SIZES];
+ // modes are used in order from LSB to MSB for each BLOCK_SIZE.
+ int inter_mode_mask[BLOCK_SIZES];
// This feature controls whether we do the expensive context update and
// calculation in the rd coefficient costing loop.
diff --git a/vp9/encoder/vp9_svc_layercontext.c b/vp9/encoder/vp9_svc_layercontext.c
index 95ea1072d..c25314b42 100644
--- a/vp9/encoder/vp9_svc_layercontext.c
+++ b/vp9/encoder/vp9_svc_layercontext.c
@@ -149,20 +149,7 @@ void vp9_update_spatial_layer_framerate(VP9_COMP *const cpi, double framerate) {
oxcf->two_pass_vbrmin_section / 100);
lrc->max_frame_bandwidth = (int)(((int64_t)lrc->avg_frame_bandwidth *
oxcf->two_pass_vbrmax_section) / 100);
- lrc->max_gf_interval = 16;
-
- lrc->static_scene_max_gf_interval = cpi->oxcf.key_freq >> 1;
-
- if (oxcf->play_alternate && oxcf->lag_in_frames) {
- if (lrc->max_gf_interval > oxcf->lag_in_frames - 1)
- lrc->max_gf_interval = oxcf->lag_in_frames - 1;
-
- if (lrc->static_scene_max_gf_interval > oxcf->lag_in_frames - 1)
- lrc->static_scene_max_gf_interval = oxcf->lag_in_frames - 1;
- }
-
- if (lrc->max_gf_interval > lrc->static_scene_max_gf_interval)
- lrc->max_gf_interval = lrc->static_scene_max_gf_interval;
+ vp9_rc_set_gf_max_interval(oxcf, lrc);
}
void vp9_restore_layer_context(VP9_COMP *const cpi) {
diff --git a/vp9/vp9_dx_iface.c b/vp9/vp9_dx_iface.c
index 6e56c8418..48110b414 100644
--- a/vp9/vp9_dx_iface.c
+++ b/vp9/vp9_dx_iface.c
@@ -38,7 +38,6 @@ struct vpx_codec_alg_priv {
vpx_decrypt_cb decrypt_cb;
void *decrypt_state;
vpx_image_t img;
- int img_avail;
int invert_tile_order;
// External frame buffer info to save for VP9 common.
@@ -245,15 +244,11 @@ static void init_decoder(vpx_codec_alg_priv_t *ctx) {
static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
const uint8_t **data, unsigned int data_sz,
void *user_priv, int64_t deadline) {
- YV12_BUFFER_CONFIG sd;
- vp9_ppflags_t flags = {0, 0, 0};
+ vp9_ppflags_t flags = {0};
VP9_COMMON *cm = NULL;
(void)deadline;
- vp9_zero(sd);
- ctx->img_avail = 0;
-
// Determine the stream parameters. Note that we rely on peek_si to
// validate that we have a buffer that does not wrap around the top
// of the heap.
@@ -288,13 +283,6 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
if (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)
set_ppflags(ctx, &flags);
- if (vp9_get_raw_frame(ctx->pbi, &sd, &flags))
- return update_error_state(ctx, &cm->error);
-
- yuvconfig2image(&ctx->img, &sd, user_priv);
- ctx->img.fb_priv = cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
- ctx->img_avail = 1;
-
return VPX_CODEC_OK;
}
@@ -423,15 +411,20 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
vpx_codec_iter_t *iter) {
vpx_image_t *img = NULL;
- if (ctx->img_avail) {
- // iter acts as a flip flop, so an image is only returned on the first
- // call to get_frame.
- if (!(*iter)) {
+ // iter acts as a flip flop, so an image is only returned on the first
+ // call to get_frame.
+ if (*iter == NULL && ctx->pbi != NULL) {
+ YV12_BUFFER_CONFIG sd;
+ vp9_ppflags_t flags = {0, 0, 0};
+
+ if (vp9_get_raw_frame(ctx->pbi, &sd, &flags) == 0) {
+ VP9_COMMON *cm = &ctx->pbi->common;
+ yuvconfig2image(&ctx->img, &sd, NULL);
+ ctx->img.fb_priv = cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
img = &ctx->img;
*iter = img;
}
}
- ctx->img_avail = 0;
return img;
}
diff --git a/vpx/exports_enc b/vpx/exports_enc
index 155faf6f6..07f0280ec 100644
--- a/vpx/exports_enc
+++ b/vpx/exports_enc
@@ -8,7 +8,6 @@ text vpx_codec_get_preview_frame
text vpx_codec_set_cx_data_buf
text vpx_svc_dump_statistics
text vpx_svc_encode
-text vpx_svc_free
text vpx_svc_get_buffer
text vpx_svc_get_encode_frame_count
text vpx_svc_get_frame_size
@@ -22,4 +21,4 @@ text vpx_svc_set_quantizers
text vpx_svc_set_scale_factors
text vpx_svc_get_layer_resolution
text vpx_svc_get_rc_stats_buffer_size
-text vpx_svc_get_rc_stats_buffer \ No newline at end of file
+text vpx_svc_get_rc_stats_buffer