summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--examples/vp9_spatial_svc_encoder.c6
-rw-r--r--test/decode_test_driver.cc6
-rw-r--r--test/svc_test.cc121
-rw-r--r--test/test.mk2
-rw-r--r--test/user_priv_test.cc33
-rw-r--r--vp9/decoder/vp9_decoder.c9
-rw-r--r--vp9/decoder/vp9_dthread.h17
-rw-r--r--vp9/encoder/vp9_encodeframe.c5
-rw-r--r--vp9/encoder/vp9_encoder.c95
-rw-r--r--vp9/encoder/vp9_encoder.h4
-rw-r--r--vp9/encoder/vp9_firstpass.c3
-rw-r--r--vp9/encoder/vp9_lookahead.c12
-rw-r--r--vp9/encoder/vp9_lookahead.h19
-rw-r--r--vp9/encoder/vp9_pickmode.c154
-rw-r--r--vp9/encoder/vp9_pickmode.h6
-rw-r--r--vp9/encoder/vp9_rdopt.c24
-rw-r--r--vp9/encoder/vp9_rdopt.h10
-rw-r--r--vp9/encoder/vp9_speed_features.c4
-rw-r--r--vp9/encoder/vp9_speed_features.h5
-rw-r--r--vp9/encoder/vp9_svc_layercontext.c99
-rw-r--r--vp9/encoder/vp9_svc_layercontext.h18
-rw-r--r--vp9/vp9_cx_iface.c90
-rw-r--r--vp9/vp9_dx_iface.c288
-rw-r--r--vp9/vp9cx.mk6
-rw-r--r--vpx/src/svc_encodeframe.c385
-rw-r--r--vpx/svc_context.h8
26 files changed, 944 insertions, 485 deletions
diff --git a/examples/vp9_spatial_svc_encoder.c b/examples/vp9_spatial_svc_encoder.c
index 5bc657576..8c87b2a44 100644
--- a/examples/vp9_spatial_svc_encoder.c
+++ b/examples/vp9_spatial_svc_encoder.c
@@ -296,6 +296,7 @@ int main(int argc, const char **argv) {
int frame_duration = 1; /* 1 timebase tick per frame */
FILE *infile = NULL;
int end_of_stream = 0;
+ int frame_size;
memset(&svc_ctx, 0, sizeof(svc_ctx));
svc_ctx.log_print = 1;
@@ -351,11 +352,10 @@ int main(int argc, const char **argv) {
die_codec(&codec, "Failed to encode frame");
}
if (!(app_input.passes == 2 && app_input.pass == 1)) {
- if (vpx_svc_get_frame_size(&svc_ctx) > 0) {
+ while ((frame_size = vpx_svc_get_frame_size(&svc_ctx)) > 0) {
vpx_video_writer_write_frame(writer,
vpx_svc_get_buffer(&svc_ctx),
- vpx_svc_get_frame_size(&svc_ctx),
- pts);
+ frame_size, pts);
}
}
if (vpx_svc_get_rc_stats_buffer_size(&svc_ctx) > 0) {
diff --git a/test/decode_test_driver.cc b/test/decode_test_driver.cc
index 655b09055..8bea4ccf9 100644
--- a/test/decode_test_driver.cc
+++ b/test/decode_test_driver.cc
@@ -19,7 +19,8 @@ const char kVP8Name[] = "WebM Project VP8";
vpx_codec_err_t Decoder::PeekStream(const uint8_t *cxdata, size_t size,
vpx_codec_stream_info_t *stream_info) {
- return vpx_codec_peek_stream_info(CodecInterface(), cxdata, size,
+ return vpx_codec_peek_stream_info(CodecInterface(),
+ cxdata, static_cast<unsigned int>(size),
stream_info);
}
@@ -46,7 +47,8 @@ void DecoderTest::RunLoop(CompressedVideoSource *video) {
const bool is_vp8 = strncmp(kVP8Name, codec_name, sizeof(kVP8Name) - 1) == 0;
// Decode frames.
- for (video->Begin(); video->cxdata(); video->Next()) {
+ for (video->Begin(); !::testing::Test::HasFailure() && video->cxdata();
+ video->Next()) {
PreDecodeFrameHook(*video, decoder);
vpx_codec_stream_info_t stream_info;
diff --git a/test/svc_test.cc b/test/svc_test.cc
index db26a8e9d..f831e751c 100644
--- a/test/svc_test.cc
+++ b/test/svc_test.cc
@@ -265,9 +265,17 @@ TEST_F(SvcTest, FirstFrameHasLayers) {
video.duration(), VPX_DL_GOOD_QUALITY);
EXPECT_EQ(VPX_CODEC_OK, res);
+ if (vpx_svc_get_frame_size(&svc_) == 0) {
+ // Flush encoder
+ res = vpx_svc_encode(&svc_, &codec_, NULL, 0,
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ }
+
+ int frame_size = vpx_svc_get_frame_size(&svc_);
+ EXPECT_GT(frame_size, 0);
const vpx_codec_err_t res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
// this test fails with a decoder error
ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
@@ -277,6 +285,9 @@ TEST_F(SvcTest, EncodeThreeFrames) {
svc_.spatial_layers = 2;
vpx_svc_set_scale_factors(&svc_, "4/16,16/16");
vpx_svc_set_quantizers(&svc_, "40,30", 0);
+ int decoded_frames = 0;
+ vpx_codec_err_t res_dec;
+ int frame_size;
vpx_codec_err_t res =
vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
@@ -291,13 +302,14 @@ TEST_F(SvcTest, EncodeThreeFrames) {
// This frame is a keyframe.
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(1, vpx_svc_is_keyframe(&svc_));
- vpx_codec_err_t res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ if ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
// FRAME 1
video.Next();
@@ -305,12 +317,14 @@ TEST_F(SvcTest, EncodeThreeFrames) {
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ if ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
// FRAME 2
video.Next();
@@ -318,12 +332,29 @@ TEST_F(SvcTest, EncodeThreeFrames) {
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ if ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
+
+ // Flush encoder
+ res = vpx_svc_encode(&svc_, &codec_, NULL, 0,
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ EXPECT_EQ(VPX_CODEC_OK, res);
+
+ while ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
+
+ EXPECT_EQ(decoded_frames, 3);
}
TEST_F(SvcTest, GetLayerResolution) {
@@ -413,6 +444,9 @@ TEST_F(SvcTest, TwoPassEncode) {
vpx_codec_destroy(&codec_);
// Second pass encode
+ int decoded_frames = 0;
+ vpx_codec_err_t res_dec;
+ int frame_size;
codec_enc_.g_pass = VPX_RC_LAST_PASS;
codec_enc_.rc_twopass_stats_in.buf = &stats_buf[0];
codec_enc_.rc_twopass_stats_in.sz = stats_buf.size();
@@ -427,12 +461,14 @@ TEST_F(SvcTest, TwoPassEncode) {
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(1, vpx_svc_is_keyframe(&svc_));
- vpx_codec_err_t res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ if ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
// FRAME 1
video.Next();
@@ -440,12 +476,14 @@ TEST_F(SvcTest, TwoPassEncode) {
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ if ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
// FRAME 2
video.Next();
@@ -453,12 +491,29 @@ TEST_F(SvcTest, TwoPassEncode) {
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ if ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
+
+ // Flush encoder
+ res = vpx_svc_encode(&svc_, &codec_, NULL, 0,
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ EXPECT_EQ(VPX_CODEC_OK, res);
+
+ while ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
+
+ EXPECT_EQ(decoded_frames, 3);
}
} // namespace
diff --git a/test/test.mk b/test/test.mk
index af344e52e..e7c4036e7 100644
--- a/test/test.mk
+++ b/test/test.mk
@@ -758,8 +758,6 @@ LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-15-segkey.webm
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-15-segkey.webm.md5
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-15-segkey_adpq.webm
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-15-segkey_adpq.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-15-fuzz-flicker.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-15-fuzz-flicker.webm.md5
# Invalid files for testing libvpx error checking.
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-01.webm
diff --git a/test/user_priv_test.cc b/test/user_priv_test.cc
index 38eef1c92..f9aef33da 100644
--- a/test/user_priv_test.cc
+++ b/test/user_priv_test.cc
@@ -13,6 +13,7 @@
#include <string>
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./vpx_config.h"
+#include "test/acm_random.h"
#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "test/ivf_video_source.h"
@@ -22,17 +23,27 @@
#include "test/webm_video_source.h"
#endif
#include "vpx_mem/vpx_mem.h"
+#include "vpx/vp8.h"
namespace {
using std::string;
+using libvpx_test::ACMRandom;
#if CONFIG_WEBM_IO
+
+void CheckUserPrivateData(void *user_priv, int *target) {
+ // actual pointer value should be the same as expected.
+ EXPECT_EQ(reinterpret_cast<void *>(target), user_priv) <<
+ "user_priv pointer value does not match.";
+}
+
// Decodes |filename|. Passes in user_priv data when calling DecodeFrame and
// compares the user_priv from return img with the original user_priv to see if
// they match. Both the pointer values and the values inside the addresses
// should match.
string DecodeFile(const string &filename) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
libvpx_test::WebMVideoSource video(filename);
video.Init();
@@ -41,7 +52,8 @@ string DecodeFile(const string &filename) {
libvpx_test::MD5 md5;
int frame_num = 0;
- for (video.Begin(); video.cxdata(); video.Next()) {
+ for (video.Begin(); !::testing::Test::HasFailure() && video.cxdata();
+ video.Next()) {
void *user_priv = reinterpret_cast<void *>(&frame_num);
const vpx_codec_err_t res =
decoder.DecodeFrame(video.cxdata(), video.frame_size(),
@@ -56,16 +68,17 @@ string DecodeFile(const string &filename) {
// Get decompressed data.
while ((img = dec_iter.Next())) {
if (frame_num == 0) {
- // user_priv pointer value should be the same.
- EXPECT_EQ(img->user_priv, reinterpret_cast<void *>(NULL)) <<
- "user_priv pointer value does not match.";
+ CheckUserPrivateData(img->user_priv, NULL);
} else {
- // user_priv pointer value should be the same.
- EXPECT_EQ(img->user_priv, reinterpret_cast<void *>(&frame_num)) <<
- "user_priv pointer value does not match.";
- // value in user_priv pointer should also be the same.
- EXPECT_EQ(*reinterpret_cast<int *>(img->user_priv), frame_num) <<
- "Value in user_priv does not match.";
+ CheckUserPrivateData(img->user_priv, &frame_num);
+
+ // Also test ctrl_get_reference api.
+ struct vp9_ref_frame ref;
+ // Randomly fetch a reference frame.
+ ref.idx = rnd.Rand8() % 3;
+ decoder.Control(VP9_GET_REFERENCE, &ref);
+
+ CheckUserPrivateData(ref.img.user_priv, &frame_num);
}
md5.Add(img);
}
diff --git a/vp9/decoder/vp9_decoder.c b/vp9/decoder/vp9_decoder.c
index 84cb84aec..245c5f195 100644
--- a/vp9/decoder/vp9_decoder.c
+++ b/vp9/decoder/vp9_decoder.c
@@ -210,7 +210,10 @@ static void swap_frame_buffers(VP9Decoder *pbi) {
}
cm->frame_to_show = get_frame_new_buffer(cm);
- cm->frame_bufs[cm->new_fb_idx].ref_count--;
+
+ if (!pbi->frame_parallel_decode || !cm->show_frame) {
+ --cm->frame_bufs[cm->new_fb_idx].ref_count;
+ }
// Invalidate these references until the next frame starts.
for (ref_index = 0; ref_index < 3; ref_index++)
@@ -239,7 +242,9 @@ int vp9_receive_compressed_data(VP9Decoder *pbi,
}
// Check if the previous frame was a frame without any references to it.
- if (cm->new_fb_idx >= 0 && cm->frame_bufs[cm->new_fb_idx].ref_count == 0)
+ // Release frame buffer if not decoding in frame parallel mode.
+ if (!pbi->frame_parallel_decode && cm->new_fb_idx >= 0
+ && cm->frame_bufs[cm->new_fb_idx].ref_count == 0)
cm->release_fb_cb(cm->cb_priv,
&cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer);
cm->new_fb_idx = get_free_fb(cm);
diff --git a/vp9/decoder/vp9_dthread.h b/vp9/decoder/vp9_dthread.h
index a727e2aef..01c07f1a0 100644
--- a/vp9/decoder/vp9_dthread.h
+++ b/vp9/decoder/vp9_dthread.h
@@ -40,6 +40,23 @@ typedef struct VP9LfSyncData {
int sync_range;
} VP9LfSync;
+// WorkerData for the FrameWorker thread. It contains all the information of
+// the worker and decode structures for decoding a frame.
+typedef struct FrameWorkerData {
+ struct VP9Decoder *pbi;
+ const uint8_t *data;
+ const uint8_t *data_end;
+ size_t data_size;
+ void *user_priv;
+ int result;
+ int worker_id;
+
+ // scratch_buffer is used in frame parallel mode only.
+ // It is used to make a copy of the compressed data.
+ uint8_t *scratch_buffer;
+ size_t scratch_buffer_size;
+} FrameWorkerData;
+
// Allocate memory for loopfilter row synchronization.
void vp9_loop_filter_alloc(struct VP9Common *cm, VP9LfSync *lf_sync,
int rows, int width);
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index f031c18d9..b9349a49a 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -3318,7 +3318,10 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
&xd->block_refs[ref]->sf);
}
- vp9_build_inter_predictors_sb(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
+ if (!cpi->sf.reuse_inter_pred_sby)
+ vp9_build_inter_predictors_sby(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
+
+ vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
if (!x->skip) {
mbmi->skip = 1;
diff --git a/vp9/encoder/vp9_encoder.c b/vp9/encoder/vp9_encoder.c
index ef31c7478..08968b247 100644
--- a/vp9/encoder/vp9_encoder.c
+++ b/vp9/encoder/vp9_encoder.c
@@ -106,7 +106,7 @@ static INLINE void Scale2Ratio(VPX_SCALING mode, int *hr, int *hs) {
}
}
-static void set_high_precision_mv(VP9_COMP *cpi, int allow_high_precision_mv) {
+void vp9_set_high_precision_mv(VP9_COMP *cpi, int allow_high_precision_mv) {
MACROBLOCK *const mb = &cpi->mb;
cpi->common.allow_high_precision_mv = allow_high_precision_mv;
if (cpi->common.allow_high_precision_mv) {
@@ -572,7 +572,7 @@ void vp9_change_config(struct VP9_COMP *cpi, const VP9EncoderConfig *oxcf) {
cm->reset_frame_context = 0;
vp9_reset_segment_features(&cm->seg);
- set_high_precision_mv(cpi, 0);
+ vp9_set_high_precision_mv(cpi, 0);
{
int i;
@@ -2117,7 +2117,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
if (!frame_is_intra_only(cm)) {
cm->interp_filter = DEFAULT_INTERP_FILTER;
/* TODO: Decide this more intelligently */
- set_high_precision_mv(cpi, q < HIGH_PRECISION_MV_QTHRESH);
+ vp9_set_high_precision_mv(cpi, q < HIGH_PRECISION_MV_QTHRESH);
}
if (cpi->sf.recode_loop == DISALLOW_RECODE) {
@@ -2298,12 +2298,22 @@ int vp9_receive_raw_frame(VP9_COMP *cpi, unsigned int frame_flags,
int res = 0;
const int subsampling_x = sd->uv_width < sd->y_width;
const int subsampling_y = sd->uv_height < sd->y_height;
+ const int is_spatial_svc = cpi->use_svc &&
+ (cpi->svc.number_temporal_layers == 1);
check_initial_width(cpi, subsampling_x, subsampling_y);
vpx_usec_timer_start(&timer);
- if (vp9_lookahead_push(cpi->lookahead,
- sd, time_stamp, end_time, frame_flags))
+
+#ifdef CONFIG_SPATIAL_SVC
+ if (is_spatial_svc)
+ res = vp9_svc_lookahead_push(cpi, cpi->lookahead, sd, time_stamp, end_time,
+ frame_flags);
+ else
+#endif
+ res = vp9_lookahead_push(cpi->lookahead,
+ sd, time_stamp, end_time, frame_flags);
+ if (res)
res = -1;
vpx_usec_timer_mark(&timer);
cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
@@ -2419,11 +2429,14 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
YV12_BUFFER_CONFIG *force_src_buffer = NULL;
MV_REFERENCE_FRAME ref_frame;
int arf_src_index;
+ const int is_spatial_svc = cpi->use_svc &&
+ (cpi->svc.number_temporal_layers == 1);
if (!cpi)
return -1;
- if (cpi->svc.number_spatial_layers > 1 && cpi->pass == 2) {
+ if (is_spatial_svc && cpi->pass == 2) {
+ vp9_svc_lookahead_peek(cpi, cpi->lookahead, 0, 1);
vp9_restore_layer_context(cpi);
}
@@ -2432,7 +2445,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
cpi->source = NULL;
cpi->last_source = NULL;
- set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
+ vp9_set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
// Normal defaults
cm->reset_frame_context = 0;
@@ -2446,7 +2459,14 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
if (arf_src_index) {
assert(arf_src_index <= rc->frames_to_key);
- if ((cpi->source = vp9_lookahead_peek(cpi->lookahead, arf_src_index))) {
+#ifdef CONFIG_SPATIAL_SVC
+ if (is_spatial_svc)
+ cpi->source = vp9_svc_lookahead_peek(cpi, cpi->lookahead,
+ arf_src_index, 1);
+ else
+#endif
+ cpi->source = vp9_lookahead_peek(cpi->lookahead, arf_src_index);
+ if (cpi->source != NULL) {
cpi->alt_ref_source = cpi->source;
if (cpi->oxcf.arnr_max_frames > 0) {
@@ -2472,12 +2492,24 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
if (!cpi->source) {
// Get last frame source.
if (cm->current_video_frame > 0) {
- if ((cpi->last_source = vp9_lookahead_peek(cpi->lookahead, -1)) == NULL)
+#ifdef CONFIG_SPATIAL_SVC
+ if (is_spatial_svc)
+ cpi->last_source = vp9_svc_lookahead_peek(cpi, cpi->lookahead, -1, 0);
+ else
+#endif
+ cpi->last_source = vp9_lookahead_peek(cpi->lookahead, -1);
+ if (cpi->last_source == NULL)
return -1;
}
// Read in the source frame.
- if ((cpi->source = vp9_lookahead_pop(cpi->lookahead, flush))) {
+#ifdef CONFIG_SPATIAL_SVC
+ if (is_spatial_svc)
+ cpi->source = vp9_svc_lookahead_pop(cpi, cpi->lookahead, flush);
+ else
+#endif
+ cpi->source = vp9_lookahead_pop(cpi->lookahead, flush);
+ if (cpi->source != NULL) {
cm->show_frame = 1;
cm->intra_only = 0;
@@ -2498,7 +2530,9 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
*time_stamp = cpi->source->ts_start;
*time_end = cpi->source->ts_end;
- *frame_flags = cpi->source->flags;
+ *frame_flags =
+ (cpi->source->flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0;
+
} else {
*size = 0;
if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
@@ -2829,3 +2863,42 @@ int vp9_get_y_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b) {
int vp9_get_quantizer(VP9_COMP *cpi) {
return cpi->common.base_qindex;
}
+
+void vp9_apply_encoding_flags(VP9_COMP *cpi, vpx_enc_frame_flags_t flags) {
+ if (flags & (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_REF_ARF)) {
+ int ref = 7;
+
+ if (flags & VP8_EFLAG_NO_REF_LAST)
+ ref ^= VP9_LAST_FLAG;
+
+ if (flags & VP8_EFLAG_NO_REF_GF)
+ ref ^= VP9_GOLD_FLAG;
+
+ if (flags & VP8_EFLAG_NO_REF_ARF)
+ ref ^= VP9_ALT_FLAG;
+
+ vp9_use_as_reference(cpi, ref);
+ }
+
+ if (flags & (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_FORCE_GF |
+ VP8_EFLAG_FORCE_ARF)) {
+ int upd = 7;
+
+ if (flags & VP8_EFLAG_NO_UPD_LAST)
+ upd ^= VP9_LAST_FLAG;
+
+ if (flags & VP8_EFLAG_NO_UPD_GF)
+ upd ^= VP9_GOLD_FLAG;
+
+ if (flags & VP8_EFLAG_NO_UPD_ARF)
+ upd ^= VP9_ALT_FLAG;
+
+ vp9_update_reference(cpi, upd);
+ }
+
+ if (flags & VP8_EFLAG_NO_UPD_ENTROPY) {
+ vp9_update_entropy(cpi, 0);
+ }
+}
diff --git a/vp9/encoder/vp9_encoder.h b/vp9/encoder/vp9_encoder.h
index fc2e007ad..47649a863 100644
--- a/vp9/encoder/vp9_encoder.h
+++ b/vp9/encoder/vp9_encoder.h
@@ -517,10 +517,14 @@ void vp9_update_reference_frames(VP9_COMP *cpi);
int64_t vp9_rescale(int64_t val, int64_t num, int denom);
+void vp9_set_high_precision_mv(VP9_COMP *cpi, int allow_high_precision_mv);
+
YV12_BUFFER_CONFIG *vp9_scale_if_required(VP9_COMMON *cm,
YV12_BUFFER_CONFIG *unscaled,
YV12_BUFFER_CONFIG *scaled);
+void vp9_apply_encoding_flags(VP9_COMP *cpi, vpx_enc_frame_flags_t flags);
+
static INLINE void set_ref_ptrs(VP9_COMMON *cm, MACROBLOCKD *xd,
MV_REFERENCE_FRAME ref0,
MV_REFERENCE_FRAME ref1) {
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index 5e5a85d36..d0dd18213 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -607,7 +607,8 @@ void vp9_first_pass(VP9_COMP *cpi) {
&unscaled_last_source_buf_2d);
// TODO(pengchong): Replace the hard-coded threshold
- if (raw_motion_error > 25) {
+ if (raw_motion_error > 25 ||
+ (cpi->use_svc && cpi->svc.number_temporal_layers == 1)) {
// Test last reference frame using the previous best mv as the
// starting point (best reference) for the search.
first_pass_motion_search(cpi, x, &best_ref_mv.as_mv, &mv.as_mv,
diff --git a/vp9/encoder/vp9_lookahead.c b/vp9/encoder/vp9_lookahead.c
index abe71e681..e7435170e 100644
--- a/vp9/encoder/vp9_lookahead.c
+++ b/vp9/encoder/vp9_lookahead.c
@@ -18,18 +18,6 @@
#include "vp9/encoder/vp9_extend.h"
#include "vp9/encoder/vp9_lookahead.h"
-// The max of past frames we want to keep in the queue.
-#define MAX_PRE_FRAMES 1
-
-struct lookahead_ctx {
- unsigned int max_sz; /* Absolute size of the queue */
- unsigned int sz; /* Number of buffers currently in the queue */
- unsigned int read_idx; /* Read index */
- unsigned int write_idx; /* Write index */
- struct lookahead_entry *buf; /* Buffer list */
-};
-
-
/* Return the buffer at the given absolute index and increment the index */
static struct lookahead_entry *pop(struct lookahead_ctx *ctx,
unsigned int *idx) {
diff --git a/vp9/encoder/vp9_lookahead.h b/vp9/encoder/vp9_lookahead.h
index ff63c0d0d..f9cc3c8db 100644
--- a/vp9/encoder/vp9_lookahead.h
+++ b/vp9/encoder/vp9_lookahead.h
@@ -14,6 +14,11 @@
#include "vpx_scale/yv12config.h"
#include "vpx/vpx_integer.h"
+#ifdef CONFIG_SPATIAL_SVC
+#include "vpx/vp8cx.h"
+#include "vpx/vpx_encoder.h"
+#endif
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -25,10 +30,22 @@ struct lookahead_entry {
int64_t ts_start;
int64_t ts_end;
unsigned int flags;
+
+#ifdef CONFIG_SPATIAL_SVC
+ vpx_svc_parameters_t svc_params[VPX_SS_MAX_LAYERS];
+#endif
};
+// The max of past frames we want to keep in the queue.
+#define MAX_PRE_FRAMES 1
-struct lookahead_ctx;
+struct lookahead_ctx {
+ unsigned int max_sz; /* Absolute size of the queue */
+ unsigned int sz; /* Number of buffers currently in the queue */
+ unsigned int read_idx; /* Read index */
+ unsigned int write_idx; /* Write index */
+ struct lookahead_entry *buf; /* Buffer list */
+};
/**\brief Initializes the lookahead stage
*
diff --git a/vp9/encoder/vp9_pickmode.c b/vp9/encoder/vp9_pickmode.c
index 29896aa9e..e33d52b0c 100644
--- a/vp9/encoder/vp9_pickmode.c
+++ b/vp9/encoder/vp9_pickmode.c
@@ -23,6 +23,7 @@
#include "vp9/common/vp9_reconintra.h"
#include "vp9/encoder/vp9_encoder.h"
+#include "vp9/encoder/vp9_pickmode.h"
#include "vp9/encoder/vp9_ratectrl.h"
#include "vp9/encoder/vp9_rdopt.h"
@@ -183,6 +184,22 @@ static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize,
*out_dist_sum += dist << 4;
}
+static int get_pred_buffer(PRED_BUFFER *p, int len) {
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (!p[i].in_use) {
+ p[i].in_use = 1;
+ return i;
+ }
+ }
+ return -1;
+}
+
+static void free_pred_buffer(PRED_BUFFER *p) {
+ p->in_use = 0;
+}
+
// TODO(jingning) placeholder for inter-frame non-RD mode decision.
// this needs various further optimizations. to be continued..
int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
@@ -229,6 +246,31 @@ int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
const int pred_filter_search = (((mi_row + mi_col) >> bsl) +
get_chessboard_index(cm)) % 2;
+ // For speed 6, the result of interp filter is reused later in actual encoding
+ // process.
+ int bh = num_4x4_blocks_high_lookup[bsize] << 2;
+ int bw = num_4x4_blocks_wide_lookup[bsize] << 2;
+ int pixels_in_block = bh * bw;
+ // tmp[3] points to dst buffer, and the other 3 point to allocated buffers.
+ PRED_BUFFER tmp[4];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, pred_buf, 3 * 64 * 64);
+ struct buf_2d orig_dst = pd->dst;
+ PRED_BUFFER *best_pred = NULL;
+ PRED_BUFFER *this_mode_pred = NULL;
+ int i;
+
+ if (cpi->sf.reuse_inter_pred_sby) {
+ for (i = 0; i < 3; i++) {
+ tmp[i].data = &pred_buf[pixels_in_block * i];
+ tmp[i].stride = bw;
+ tmp[i].in_use = 0;
+ }
+
+ tmp[3].data = pd->dst.buf;
+ tmp[3].stride = pd->dst.stride;
+ tmp[3].in_use = 0;
+ }
+
x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
x->skip = 0;
@@ -241,9 +283,9 @@ int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
mbmi->ref_frame[0] = NONE;
mbmi->ref_frame[1] = NONE;
mbmi->tx_size = MIN(max_txsize_lookup[bsize],
- tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
- mbmi->interp_filter = cpi->common.interp_filter == SWITCHABLE ?
- EIGHTTAP : cpi->common.interp_filter;
+ tx_mode_to_biggest_tx_size[cm->tx_mode]);
+ mbmi->interp_filter = cm->interp_filter == SWITCHABLE ?
+ EIGHTTAP : cm->interp_filter;
mbmi->skip = 0;
mbmi->segment_id = segment_id;
@@ -324,6 +366,16 @@ int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
// Search for the best prediction filter type, when the resulting
// motion vector is at sub-pixel accuracy level for luma component, i.e.,
// the last three bits are all zeros.
+ if (cpi->sf.reuse_inter_pred_sby) {
+ if (this_mode == NEARESTMV) {
+ this_mode_pred = &tmp[3];
+ } else {
+ this_mode_pred = &tmp[get_pred_buffer(tmp, 3)];
+ pd->dst.buf = this_mode_pred->data;
+ pd->dst.stride = bw;
+ }
+ }
+
if ((this_mode == NEWMV || filter_ref == SWITCHABLE) &&
pred_filter_search &&
((mbmi->mv[0].as_mv.row & 0x07) != 0 ||
@@ -334,6 +386,7 @@ int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
unsigned int pf_sse[3];
int64_t best_cost = INT64_MAX;
INTERP_FILTER best_filter = SWITCHABLE, filter;
+ PRED_BUFFER *current_pred = this_mode_pred;
for (filter = EIGHTTAP; filter <= EIGHTTAP_SHARP; ++filter) {
int64_t cost;
@@ -345,12 +398,28 @@ int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
vp9_get_switchable_rate(cpi) + pf_rate[filter],
pf_dist[filter]);
if (cost < best_cost) {
- best_filter = filter;
- best_cost = cost;
- skip_txfm = x->skip_txfm;
+ best_filter = filter;
+ best_cost = cost;
+ skip_txfm = x->skip_txfm;
+
+ if (cpi->sf.reuse_inter_pred_sby) {
+ if (this_mode_pred != current_pred) {
+ free_pred_buffer(this_mode_pred);
+ this_mode_pred = current_pred;
+ }
+
+ if (filter < EIGHTTAP_SHARP) {
+ current_pred = &tmp[get_pred_buffer(tmp, 3)];
+ pd->dst.buf = current_pred->data;
+ pd->dst.stride = bw;
+ }
+ }
}
}
+ if (cpi->sf.reuse_inter_pred_sby && this_mode_pred != current_pred)
+ free_pred_buffer(current_pred);
+
mbmi->interp_filter = best_filter;
rate = pf_rate[mbmi->interp_filter];
dist = pf_dist[mbmi->interp_filter];
@@ -370,29 +439,35 @@ int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
// Skipping checking: test to see if this block can be reconstructed by
// prediction only.
- if (cpi->allow_encode_breakout && x->encode_breakout) {
+ if (cpi->allow_encode_breakout) {
const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]);
unsigned int var = var_y, sse = sse_y;
// Skipping threshold for ac.
unsigned int thresh_ac;
// Skipping threshold for dc.
unsigned int thresh_dc;
- // Set a maximum for threshold to avoid big PSNR loss in low bit rate
- // case. Use extreme low threshold for static frames to limit skipping.
- const unsigned int max_thresh = 36000;
- // The encode_breakout input
- const unsigned int min_thresh =
- MIN(((unsigned int)x->encode_breakout << 4), max_thresh);
-
- // Calculate threshold according to dequant value.
- thresh_ac = (xd->plane[0].dequant[1] * xd->plane[0].dequant[1]) / 9;
- thresh_ac = clamp(thresh_ac, min_thresh, max_thresh);
-
- // Adjust ac threshold according to partition size.
- thresh_ac >>= 8 - (b_width_log2_lookup[bsize] +
- b_height_log2_lookup[bsize]);
-
- thresh_dc = (xd->plane[0].dequant[0] * xd->plane[0].dequant[0] >> 6);
+ if (x->encode_breakout > 0) {
+ // Set a maximum for threshold to avoid big PSNR loss in low bit rate
+ // case. Use extreme low threshold for static frames to limit
+ // skipping.
+ const unsigned int max_thresh = 36000;
+ // The encode_breakout input
+ const unsigned int min_thresh =
+ MIN(((unsigned int)x->encode_breakout << 4), max_thresh);
+
+ // Calculate threshold according to dequant value.
+ thresh_ac = (xd->plane[0].dequant[1] * xd->plane[0].dequant[1]) / 9;
+ thresh_ac = clamp(thresh_ac, min_thresh, max_thresh);
+
+ // Adjust ac threshold according to partition size.
+ thresh_ac >>=
+ 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
+
+ thresh_dc = (xd->plane[0].dequant[0] * xd->plane[0].dequant[0] >> 6);
+ } else {
+ thresh_ac = 0;
+ thresh_dc = 0;
+ }
// Y skipping condition checking for ac and dc.
if (var <= thresh_ac && (sse - var) <= thresh_dc) {
@@ -449,6 +524,16 @@ int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
best_pred_filter = mbmi->interp_filter;
best_ref_frame = ref_frame;
skip_txfm = x->skip_txfm;
+
+ if (cpi->sf.reuse_inter_pred_sby) {
+ if (best_pred != NULL)
+ free_pred_buffer(best_pred);
+
+ best_pred = this_mode_pred;
+ }
+ } else {
+ if (cpi->sf.reuse_inter_pred_sby)
+ free_pred_buffer(this_mode_pred);
}
if (x->skip)
@@ -456,6 +541,19 @@ int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
}
}
+ // If best prediction is not in dst buf, then copy the prediction block from
+ // temp buf to dst buf.
+ if (cpi->sf.reuse_inter_pred_sby && best_pred->data != orig_dst.buf) {
+ uint8_t *copy_from, *copy_to;
+
+ pd->dst = orig_dst;
+ copy_to = pd->dst.buf;
+
+ copy_from = best_pred->data;
+
+ vp9_convolve_copy(copy_from, bw, copy_to, pd->dst.stride, NULL, 0, NULL, 0,
+ bw, bh);
+ }
mbmi->mode = best_mode;
mbmi->interp_filter = best_pred_filter;
@@ -469,12 +567,21 @@ int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (!x->skip && best_rd > inter_mode_thresh &&
bsize <= cpi->sf.max_intra_bsize) {
for (this_mode = DC_PRED; this_mode <= DC_PRED; ++this_mode) {
+ if (cpi->sf.reuse_inter_pred_sby) {
+ pd->dst.buf = tmp[0].data;
+ pd->dst.stride = bw;
+ }
+
vp9_predict_intra_block(xd, 0, b_width_log2(bsize),
mbmi->tx_size, this_mode,
&p->src.buf[0], p->src.stride,
&pd->dst.buf[0], pd->dst.stride, 0, 0, 0);
model_rd_for_sb_y(cpi, bsize, x, xd, &rate, &dist, &var_y, &sse_y);
+
+ if (cpi->sf.reuse_inter_pred_sby)
+ pd->dst = orig_dst;
+
rate += cpi->mbmode_cost[this_mode];
rate += intra_cost_penalty;
this_rd = RDCOST(x->rdmult, x->rddiv, rate, dist);
@@ -492,6 +599,7 @@ int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
}
}
}
+
#if CONFIG_DENOISING
vp9_denoiser_denoise(&cpi->denoiser, x, mi_row, mi_col, bsize);
#endif
diff --git a/vp9/encoder/vp9_pickmode.h b/vp9/encoder/vp9_pickmode.h
index a9c948d31..3d89974fc 100644
--- a/vp9/encoder/vp9_pickmode.h
+++ b/vp9/encoder/vp9_pickmode.h
@@ -17,6 +17,12 @@
extern "C" {
#endif
+typedef struct {
+ uint8_t *data;
+ int stride;
+ int in_use;
+} PRED_BUFFER;
+
int64_t vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
const struct TileInfo *const tile,
int mi_row, int mi_col,
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index 9bd9bc64c..9402d4a4e 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -2066,9 +2066,9 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x,
return bsi->segment_rd;
}
-static void mv_pred(VP9_COMP *cpi, MACROBLOCK *x,
- uint8_t *ref_y_buffer, int ref_y_stride,
- int ref_frame, BLOCK_SIZE block_size ) {
+void vp9_mv_pred(VP9_COMP *cpi, MACROBLOCK *x,
+ uint8_t *ref_y_buffer, int ref_y_stride,
+ int ref_frame, BLOCK_SIZE block_size) {
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
int_mv this_mv;
@@ -2207,12 +2207,12 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS);
}
-static void setup_pred_block(const MACROBLOCKD *xd,
- struct buf_2d dst[MAX_MB_PLANE],
- const YV12_BUFFER_CONFIG *src,
- int mi_row, int mi_col,
- const struct scale_factors *scale,
- const struct scale_factors *scale_uv) {
+void vp9_setup_pred_block(const MACROBLOCKD *xd,
+ struct buf_2d dst[MAX_MB_PLANE],
+ const YV12_BUFFER_CONFIG *src,
+ int mi_row, int mi_col,
+ const struct scale_factors *scale,
+ const struct scale_factors *scale_uv) {
int i;
dst[0].buf = src->y_buffer;
@@ -2250,7 +2250,7 @@ void vp9_setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
// TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
// use the UV scaling factors.
- setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
+ vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
// Gets an initial list of candidate vectors from neighbours and orders them
vp9_find_mv_refs(cm, xd, tile, mi, ref_frame, candidates, mi_row, mi_col);
@@ -2264,8 +2264,8 @@ void vp9_setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
// in full and choose the best as the centre point for subsequent searches.
// The current implementation doesn't support scaling.
if (!vp9_is_scaled(sf) && block_size >= BLOCK_8X8)
- mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride,
- ref_frame, block_size);
+ vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride,
+ ref_frame, block_size);
}
const YV12_BUFFER_CONFIG *vp9_get_scaled_ref_frame(const VP9_COMP *cpi,
diff --git a/vp9/encoder/vp9_rdopt.h b/vp9/encoder/vp9_rdopt.h
index 4f5729389..3dfe2d07f 100644
--- a/vp9/encoder/vp9_rdopt.h
+++ b/vp9/encoder/vp9_rdopt.h
@@ -191,6 +191,16 @@ static INLINE int rd_less_than_thresh(int64_t best_rd, int thresh,
return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
}
+void vp9_mv_pred(struct VP9_COMP *cpi, MACROBLOCK *x,
+ uint8_t *ref_y_buffer, int ref_y_stride,
+ int ref_frame, BLOCK_SIZE block_size);
+
+void vp9_setup_pred_block(const MACROBLOCKD *xd,
+ struct buf_2d dst[MAX_MB_PLANE],
+ const YV12_BUFFER_CONFIG *src,
+ int mi_row, int mi_col,
+ const struct scale_factors *scale,
+ const struct scale_factors *scale_uv);
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/vp9/encoder/vp9_speed_features.c b/vp9/encoder/vp9_speed_features.c
index 9c3fb5ea0..d7017f269 100644
--- a/vp9/encoder/vp9_speed_features.c
+++ b/vp9/encoder/vp9_speed_features.c
@@ -277,6 +277,9 @@ static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf,
// is checked for a partition block. Later, we can try to allow large
// partitions to do intra mode checking.
sf->max_intra_bsize = BLOCK_8X8;
+
+ // This feature is only enabled when partition search is disabled.
+ sf->reuse_inter_pred_sby = 1;
}
if (speed >= 7) {
@@ -342,6 +345,7 @@ void vp9_set_speed_features(VP9_COMP *cpi) {
for (i = 0; i < BLOCK_SIZES; ++i)
sf->inter_mode_mask[i] = INTER_ALL;
sf->max_intra_bsize = BLOCK_64X64;
+ sf->reuse_inter_pred_sby = 0;
// This setting only takes effect when partition_search_type is set
// to FIXED_PARTITION.
sf->always_this_block_size = BLOCK_16X16;
diff --git a/vp9/encoder/vp9_speed_features.h b/vp9/encoder/vp9_speed_features.h
index f5d0b85e2..75070a70f 100644
--- a/vp9/encoder/vp9_speed_features.h
+++ b/vp9/encoder/vp9_speed_features.h
@@ -353,6 +353,11 @@ typedef struct SPEED_FEATURES {
// The threshold used in SOURCE_VAR_BASED_PARTITION search type.
unsigned int source_var_thresh;
+
+ // When partition is pre-set, the inter prediction result from pick_inter_mode
+ // can be reused in final block encoding process. It is enabled only for real-
+ // time mode speed 6.
+ int reuse_inter_pred_sby;
} SPEED_FEATURES;
struct VP9_COMP;
diff --git a/vp9/encoder/vp9_svc_layercontext.c b/vp9/encoder/vp9_svc_layercontext.c
index 84f344945..07c17b22a 100644
--- a/vp9/encoder/vp9_svc_layercontext.c
+++ b/vp9/encoder/vp9_svc_layercontext.c
@@ -12,6 +12,7 @@
#include "vp9/encoder/vp9_encoder.h"
#include "vp9/encoder/vp9_svc_layercontext.h"
+#include "vp9/encoder/vp9_extend.h"
void vp9_init_layer_context(VP9_COMP *const cpi) {
SVC *const svc = &cpi->svc;
@@ -209,3 +210,101 @@ int vp9_is_upper_layer_key_frame(const VP9_COMP *const cpi) {
cpi->svc.spatial_layer_id > 0 &&
cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame;
}
+
+int vp9_svc_lookahead_push(const VP9_COMP *const cpi, struct lookahead_ctx *ctx,
+ YV12_BUFFER_CONFIG *src, int64_t ts_start,
+ int64_t ts_end, unsigned int flags) {
+ struct lookahead_entry *buf;
+ int i, index;
+
+ if (vp9_lookahead_push(ctx, src, ts_start, ts_end, flags))
+ return 1;
+
+ index = ctx->write_idx - 1;
+ if (index < 0)
+ index += ctx->max_sz;
+
+ buf = ctx->buf + index;
+
+ if (buf == NULL)
+ return 1;
+
+ // Store svc parameters for each layer
+ for (i = 0; i < cpi->svc.number_spatial_layers; ++i)
+ buf->svc_params[i] = cpi->svc.layer_context[i].svc_params_received;
+
+ return 0;
+}
+
+static int copy_svc_params(VP9_COMP *const cpi, struct lookahead_entry *buf) {
+ int layer_id;
+ vpx_svc_parameters_t *layer_param;
+ vpx_enc_frame_flags_t flags;
+
+ // Find the next layer to be encoded
+ for (layer_id = 0; layer_id < cpi->svc.number_spatial_layers; ++layer_id) {
+ if (buf->svc_params[layer_id].spatial_layer >=0)
+ break;
+ }
+
+ if (layer_id == cpi->svc.number_spatial_layers)
+ return 1;
+
+ layer_param = &buf->svc_params[layer_id];
+ buf->flags = flags = layer_param->flags;
+ cpi->svc.spatial_layer_id = layer_param->spatial_layer;
+ cpi->svc.temporal_layer_id = layer_param->temporal_layer;
+ cpi->lst_fb_idx = layer_param->lst_fb_idx;
+ cpi->gld_fb_idx = layer_param->gld_fb_idx;
+ cpi->alt_fb_idx = layer_param->alt_fb_idx;
+
+ if (vp9_set_size_literal(cpi, layer_param->width, layer_param->height) != 0)
+ return VPX_CODEC_INVALID_PARAM;
+
+ cpi->oxcf.worst_allowed_q =
+ vp9_quantizer_to_qindex(layer_param->max_quantizer);
+ cpi->oxcf.best_allowed_q =
+ vp9_quantizer_to_qindex(layer_param->min_quantizer);
+
+ vp9_change_config(cpi, &cpi->oxcf);
+
+ vp9_set_high_precision_mv(cpi, 1);
+
+ // Retrieve the encoding flags for each layer and apply it to encoder.
+ // It includes reference frame flags and update frame flags.
+ vp9_apply_encoding_flags(cpi, flags);
+
+ return 0;
+}
+
+struct lookahead_entry *vp9_svc_lookahead_peek(VP9_COMP *const cpi,
+ struct lookahead_ctx *ctx,
+ int index, int copy_params) {
+ struct lookahead_entry *buf = vp9_lookahead_peek(ctx, index);
+
+ if (buf != NULL && copy_params != 0) {
+ if (copy_svc_params(cpi, buf) != 0)
+ return NULL;
+ }
+ return buf;
+}
+
+struct lookahead_entry *vp9_svc_lookahead_pop(VP9_COMP *const cpi,
+ struct lookahead_ctx *ctx,
+ int drain) {
+ struct lookahead_entry *buf = NULL;
+
+ if (ctx->sz && (drain || ctx->sz == ctx->max_sz - MAX_PRE_FRAMES)) {
+ buf = vp9_svc_lookahead_peek(cpi, ctx, 0, 1);
+ if (buf != NULL) {
+ // Only remove the buffer when pop the highest layer. Simply set the
+ // spatial_layer to -1 for lower layers.
+ buf->svc_params[cpi->svc.spatial_layer_id].spatial_layer = -1;
+ if (cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 1) {
+ vp9_lookahead_pop(ctx, drain);
+ }
+ }
+ }
+
+ return buf;
+}
diff --git a/vp9/encoder/vp9_svc_layercontext.h b/vp9/encoder/vp9_svc_layercontext.h
index 36e2027fd..3ebb831b5 100644
--- a/vp9/encoder/vp9_svc_layercontext.h
+++ b/vp9/encoder/vp9_svc_layercontext.h
@@ -28,6 +28,7 @@ typedef struct {
struct vpx_fixed_buf rc_twopass_stats_in;
unsigned int current_video_frame_in_layer;
int is_key_frame;
+ vpx_svc_parameters_t svc_params_received;
} LAYER_CONTEXT;
typedef struct {
@@ -74,6 +75,23 @@ void vp9_inc_frame_in_layer(SVC *svc);
// Check if current layer is key frame in spatial upper layer
int vp9_is_upper_layer_key_frame(const struct VP9_COMP *const cpi);
+// Copy the source image, flags and svc parameters into a new framebuffer
+// with the expected stride/border
+int vp9_svc_lookahead_push(const struct VP9_COMP *const cpi,
+ struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+ int64_t ts_start, int64_t ts_end,
+ unsigned int flags);
+
+// Get the next source buffer to encode
+struct lookahead_entry *vp9_svc_lookahead_pop(struct VP9_COMP *const cpi,
+ struct lookahead_ctx *ctx,
+ int drain);
+
+// Get a future source buffer to encode
+struct lookahead_entry *vp9_svc_lookahead_peek(struct VP9_COMP *const cpi,
+ struct lookahead_ctx *ctx,
+ int index, int copy_params);
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/vp9/vp9_cx_iface.c b/vp9/vp9_cx_iface.c
index edd59ab74..b1501619e 100644
--- a/vp9/vp9_cx_iface.c
+++ b/vp9/vp9_cx_iface.c
@@ -88,8 +88,8 @@ struct vpx_codec_alg_priv {
size_t pending_frame_magnitude;
vpx_image_t preview_img;
vp8_postproc_cfg_t preview_ppcfg;
- vpx_codec_pkt_list_decl(64) pkt_list;
- unsigned int fixed_kf_cntr;
+ vpx_codec_pkt_list_decl(128) pkt_list;
+ unsigned int fixed_kf_cntr;
};
static VP9_REFFRAME ref_frame_to_vp9_reframe(vpx_ref_frame_type_t frame) {
@@ -795,42 +795,7 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
return VPX_CODEC_INVALID_PARAM;
}
- if (flags & (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_REF_ARF)) {
- int ref = 7;
-
- if (flags & VP8_EFLAG_NO_REF_LAST)
- ref ^= VP9_LAST_FLAG;
-
- if (flags & VP8_EFLAG_NO_REF_GF)
- ref ^= VP9_GOLD_FLAG;
-
- if (flags & VP8_EFLAG_NO_REF_ARF)
- ref ^= VP9_ALT_FLAG;
-
- vp9_use_as_reference(ctx->cpi, ref);
- }
-
- if (flags & (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_FORCE_GF |
- VP8_EFLAG_FORCE_ARF)) {
- int upd = 7;
-
- if (flags & VP8_EFLAG_NO_UPD_LAST)
- upd ^= VP9_LAST_FLAG;
-
- if (flags & VP8_EFLAG_NO_UPD_GF)
- upd ^= VP9_GOLD_FLAG;
-
- if (flags & VP8_EFLAG_NO_UPD_ARF)
- upd ^= VP9_ALT_FLAG;
-
- vp9_update_reference(ctx->cpi, upd);
- }
-
- if (flags & VP8_EFLAG_NO_UPD_ENTROPY) {
- vp9_update_entropy(ctx->cpi, 0);
- }
+ vp9_apply_encoding_flags(ctx->cpi, flags);
// Handle fixed keyframe intervals
if (ctx->cfg.kf_mode == VPX_KF_AUTO &&
@@ -843,7 +808,7 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
// Initialize the encoder instance on the first frame.
if (res == VPX_CODEC_OK && ctx->cpi != NULL) {
- unsigned int lib_flags;
+ unsigned int lib_flags = 0;
YV12_BUFFER_CONFIG sd;
int64_t dst_time_stamp, dst_end_time_stamp;
size_t size, cx_data_sz;
@@ -853,9 +818,6 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
if (ctx->base.init_flags & VPX_CODEC_USE_PSNR)
((VP9_COMP *)ctx->cpi)->b_calculate_psnr = 1;
- // Convert API flags to internal codec lib flags
- lib_flags = (flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0;
-
/* vp9 use 10,000,000 ticks/second as time stamp */
dst_time_stamp = (pts * 10000000 * ctx->cfg.g_timebase.num)
/ ctx->cfg.g_timebase.den;
@@ -865,7 +827,9 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
if (img != NULL) {
res = image2yuvconfig(img, &sd);
- if (vp9_receive_raw_frame(ctx->cpi, lib_flags,
+ // Store the original flags in to the frame buffer. Will extract the
+ // key frame flag when we actually encode this frame.
+ if (vp9_receive_raw_frame(ctx->cpi, flags,
&sd, dst_time_stamp, dst_end_time_stamp)) {
VP9_COMP *cpi = (VP9_COMP *)ctx->cpi;
res = update_error_state(ctx, &cpi->common.error);
@@ -874,7 +838,6 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
cx_data = ctx->cx_data;
cx_data_sz = ctx->cx_data_sz;
- lib_flags = 0;
/* Any pending invisible frames? */
if (ctx->pending_cx_data) {
@@ -902,7 +865,12 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
VP9_COMP *const cpi = (VP9_COMP *)ctx->cpi;
// Pack invisible frames with the next visible frame
- if (cpi->common.show_frame == 0) {
+ if (cpi->common.show_frame == 0
+#ifdef CONFIG_SPATIAL_SVC
+ || (cpi->use_svc && cpi->svc.number_temporal_layers == 1 &&
+ cpi->svc.spatial_layer_id < cpi->svc.number_spatial_layers - 1)
+#endif
+ ) {
if (ctx->pending_cx_data == 0)
ctx->pending_cx_data = cx_data;
ctx->pending_cx_data_sz += size;
@@ -925,7 +893,12 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
/ ctx->cfg.g_timebase.num / 10000000);
pkt.data.frame.flags = lib_flags << 16;
- if (lib_flags & FRAMEFLAGS_KEY)
+ if (lib_flags & FRAMEFLAGS_KEY
+#ifdef CONFIG_SPATIAL_SVC
+ || (cpi->use_svc && cpi->svc.number_temporal_layers == 1 &&
+ cpi->svc.layer_context[0].is_key_frame)
+#endif
+ )
pkt.data.frame.flags |= VPX_FRAME_IS_KEY;
if (cpi->common.show_frame == 0) {
@@ -1165,24 +1138,19 @@ static vpx_codec_err_t ctrl_set_svc_parameters(vpx_codec_alg_priv_t *ctx,
VP9_COMP *const cpi = ctx->cpi;
vpx_svc_parameters_t *const params = va_arg(args, vpx_svc_parameters_t *);
- if (params == NULL)
- return VPX_CODEC_INVALID_PARAM;
-
- cpi->svc.spatial_layer_id = params->spatial_layer;
- cpi->svc.temporal_layer_id = params->temporal_layer;
-
- cpi->lst_fb_idx = params->lst_fb_idx;
- cpi->gld_fb_idx = params->gld_fb_idx;
- cpi->alt_fb_idx = params->alt_fb_idx;
-
- if (vp9_set_size_literal(ctx->cpi, params->width, params->height) != 0)
+ if (params == NULL || params->spatial_layer < 0 ||
+ params->spatial_layer >= cpi->svc.number_spatial_layers)
return VPX_CODEC_INVALID_PARAM;
- ctx->cfg.rc_max_quantizer = params->max_quantizer;
- ctx->cfg.rc_min_quantizer = params->min_quantizer;
+ if (params->spatial_layer == 0) {
+ int i;
+ for (i = 0; i < cpi->svc.number_spatial_layers; ++i) {
+ cpi->svc.layer_context[i].svc_params_received.spatial_layer = -1;
+ }
+ }
- set_encoder_config(&ctx->oxcf, &ctx->cfg, &ctx->extra_cfg);
- vp9_change_config(ctx->cpi, &ctx->oxcf);
+ cpi->svc.layer_context[params->spatial_layer].svc_params_received =
+ *params;
return VPX_CODEC_OK;
}
diff --git a/vp9/vp9_dx_iface.c b/vp9/vp9_dx_iface.c
index c3ca7ee8f..fd868ae73 100644
--- a/vp9/vp9_dx_iface.c
+++ b/vp9/vp9_dx_iface.c
@@ -32,15 +32,19 @@ struct vpx_codec_alg_priv {
vpx_codec_priv_t base;
vpx_codec_dec_cfg_t cfg;
vp9_stream_info_t si;
- struct VP9Decoder *pbi;
int postproc_cfg_set;
vp8_postproc_cfg_t postproc_cfg;
vpx_decrypt_cb decrypt_cb;
void *decrypt_state;
vpx_image_t img;
- int img_avail;
int invert_tile_order;
int frame_parallel_decode; // frame-based threading.
+ int last_show_frame; // Index of last output frame.
+
+ VP9Worker *frame_workers;
+ int num_frame_workers;
+ int next_submit_thread_id;
+ int next_output_thread_id;
// External frame buffer info to save for VP9 common.
void *ext_priv; // Private data associated with the external frame buffers.
@@ -85,11 +89,17 @@ static vpx_codec_err_t decoder_init(vpx_codec_ctx_t *ctx,
}
static vpx_codec_err_t decoder_destroy(vpx_codec_alg_priv_t *ctx) {
- if (ctx->pbi) {
- vp9_decoder_remove(ctx->pbi);
- ctx->pbi = NULL;
+ if (ctx->frame_workers != NULL) {
+ int i;
+ for (i = 0; i < ctx->num_frame_workers; ++i) {
+ VP9Worker *const worker = &ctx->frame_workers[i];
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
+ vp9_decoder_remove(worker_data->pbi);
+ vpx_free(worker_data);
+ }
}
+ vpx_free(ctx->frame_workers);
vpx_free(ctx);
return VPX_CODEC_OK;
@@ -188,32 +198,42 @@ static vpx_codec_err_t decoder_get_si(vpx_codec_alg_priv_t *ctx,
return VPX_CODEC_OK;
}
+static void set_error_detail(vpx_codec_alg_priv_t *ctx,
+ const char *const error) {
+ ctx->base.err_detail = error;
+}
+
static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx,
const struct vpx_internal_error_info *error) {
if (error->error_code)
- ctx->base.err_detail = error->has_detail ? error->detail : NULL;
+ set_error_detail(ctx, error->has_detail ? error->detail : NULL);
return error->error_code;
}
static void init_buffer_callbacks(vpx_codec_alg_priv_t *ctx) {
- VP9_COMMON *const cm = &ctx->pbi->common;
-
- cm->new_fb_idx = -1;
-
- if (ctx->get_ext_fb_cb != NULL && ctx->release_ext_fb_cb != NULL) {
- cm->get_fb_cb = ctx->get_ext_fb_cb;
- cm->release_fb_cb = ctx->release_ext_fb_cb;
- cm->cb_priv = ctx->ext_priv;
- } else {
- cm->get_fb_cb = vp9_get_frame_buffer;
- cm->release_fb_cb = vp9_release_frame_buffer;
+ int i;
+
+ for (i = 0; i < ctx->num_frame_workers; ++i) {
+ VP9Worker *const worker = &ctx->frame_workers[i];
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
+ VP9_COMMON *const cm = &worker_data->pbi->common;
+
+ cm->new_fb_idx = -1;
+ if (ctx->get_ext_fb_cb != NULL && ctx->release_ext_fb_cb != NULL) {
+ cm->get_fb_cb = ctx->get_ext_fb_cb;
+ cm->release_fb_cb = ctx->release_ext_fb_cb;
+ cm->cb_priv = ctx->ext_priv;
+ } else {
+ cm->get_fb_cb = vp9_get_frame_buffer;
+ cm->release_fb_cb = vp9_release_frame_buffer;
- if (vp9_alloc_internal_frame_buffers(&cm->int_frame_buffers))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
- "Failed to initialize internal frame buffers");
+ if (vp9_alloc_internal_frame_buffers(&cm->int_frame_buffers))
+ vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ "Failed to initialize internal frame buffers");
- cm->cb_priv = &cm->int_frame_buffers;
+ cm->cb_priv = &cm->int_frame_buffers;
+ }
}
}
@@ -232,14 +252,58 @@ static void set_ppflags(const vpx_codec_alg_priv_t *ctx,
flags->noise_level = ctx->postproc_cfg.noise_level;
}
-static void init_decoder(vpx_codec_alg_priv_t *ctx) {
- ctx->pbi = vp9_decoder_create();
- if (ctx->pbi == NULL)
- return;
+static int frame_worker_hook(void *arg1, void *arg2) {
+ FrameWorkerData *const worker_data = (FrameWorkerData *)arg1;
+ const uint8_t *data = worker_data->data;
+ (void)arg2;
+ worker_data->result = vp9_receive_compressed_data(worker_data->pbi,
+ worker_data->data_size,
+ &data);
+ worker_data->data_end = data;
+ return !worker_data->result;
+}
+
+static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
+ int i;
+
+ ctx->last_show_frame = -1;
+ ctx->next_submit_thread_id = 0;
+ ctx->next_output_thread_id = 0;
+ ctx->num_frame_workers =
+ (ctx->frame_parallel_decode == 1) ? ctx->cfg.threads: 1;
- ctx->pbi->max_threads = ctx->cfg.threads;
- ctx->pbi->inv_tile_order = ctx->invert_tile_order;
- ctx->pbi->frame_parallel_decode = ctx->frame_parallel_decode;
+ ctx->frame_workers = (VP9Worker *)
+ vpx_malloc(ctx->num_frame_workers * sizeof(*ctx->frame_workers));
+ if (ctx->frame_workers == NULL) {
+ set_error_detail(ctx, "Failed to allocate frame_workers");
+ return VPX_CODEC_MEM_ERROR;
+ }
+
+ for (i = 0; i < ctx->num_frame_workers; ++i) {
+ VP9Worker *const worker = &ctx->frame_workers[i];
+ FrameWorkerData *worker_data = NULL;
+ vp9_worker_init(worker);
+ worker->data1 = vpx_memalign(32, sizeof(FrameWorkerData));
+ if (worker->data1 == NULL) {
+ set_error_detail(ctx, "Failed to allocate worker_data");
+ return VPX_CODEC_MEM_ERROR;
+ }
+ worker_data = (FrameWorkerData *)worker->data1;
+ worker_data->pbi = vp9_decoder_create();
+ if (worker_data->pbi == NULL) {
+ set_error_detail(ctx, "Failed to allocate worker_data");
+ return VPX_CODEC_MEM_ERROR;
+ }
+
+ // If decoding in serial mode, FrameWorker thread could create tile worker
+ // thread or loopfilter thread.
+ worker_data->pbi->max_threads =
+ (ctx->frame_parallel_decode == 0) ? ctx->cfg.threads : 0;
+
+ worker_data->pbi->inv_tile_order = ctx->invert_tile_order;
+ worker_data->pbi->frame_parallel_decode = ctx->frame_parallel_decode;
+ worker->hook = (VP9WorkerHook)frame_worker_hook;
+ }
// If postprocessing was enabled by the application and a
// configuration has not been provided, default it.
@@ -248,20 +312,16 @@ static void init_decoder(vpx_codec_alg_priv_t *ctx) {
set_default_ppflags(&ctx->postproc_cfg);
init_buffer_callbacks(ctx);
+
+ return VPX_CODEC_OK;
}
static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
const uint8_t **data, unsigned int data_sz,
void *user_priv, int64_t deadline) {
- YV12_BUFFER_CONFIG sd;
- vp9_ppflags_t flags = {0, 0, 0};
- VP9_COMMON *cm = NULL;
-
+ vp9_ppflags_t flags = {0};
(void)deadline;
- vp9_zero(sd);
- ctx->img_avail = 0;
-
// Determine the stream parameters. Note that we rely on peek_si to
// validate that we have a buffer that does not wrap around the top
// of the heap.
@@ -276,33 +336,39 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
return VPX_CODEC_ERROR;
}
- // Initialize the decoder instance on the first frame
- if (ctx->pbi == NULL) {
- init_decoder(ctx);
- if (ctx->pbi == NULL)
- return VPX_CODEC_ERROR;
+ // Initialize the decoder workers on the first frame
+ if (ctx->frame_workers == NULL) {
+ const vpx_codec_err_t res = init_decoder(ctx);
+ if (res != VPX_CODEC_OK)
+ return res;
}
- // Set these even if already initialized. The caller may have changed the
- // decrypt config between frames.
- ctx->pbi->decrypt_cb = ctx->decrypt_cb;
- ctx->pbi->decrypt_state = ctx->decrypt_state;
+ if (!ctx->frame_parallel_decode) {
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
+ worker_data->data = *data;
+ worker_data->data_size = data_sz;
+ worker_data->user_priv = user_priv;
- cm = &ctx->pbi->common;
+ // Set these even if already initialized. The caller may have changed the
+ // decrypt config between frames.
+ worker_data->pbi->decrypt_cb = ctx->decrypt_cb;
+ worker_data->pbi->decrypt_state = ctx->decrypt_state;
- if (vp9_receive_compressed_data(ctx->pbi, data_sz, data))
- return update_error_state(ctx, &cm->error);
+ vp9_worker_execute(worker);
+ if (worker->had_error)
+ return update_error_state(ctx, &worker_data->pbi->common.error);
+
+ // Update data pointer after decode.
+ *data = worker_data->data_end;
+ } else {
+ // TODO(hkuang): Implement frame parallel decode.
+ return VPX_CODEC_INCAPABLE;
+ }
if (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)
set_ppflags(ctx, &flags);
- if (vp9_get_raw_frame(ctx->pbi, &sd, &flags))
- return update_error_state(ctx, &cm->error);
-
- yuvconfig2image(&ctx->img, &sd, user_priv);
- ctx->img.fb_priv = cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
- ctx->img_avail = 1;
-
return VPX_CODEC_OK;
}
@@ -412,7 +478,7 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
vpx_codec_err_t res;
if (data_start < data
|| frame_size > (uint32_t) (data_end - data_start)) {
- ctx->base.err_detail = "Invalid frame size in index";
+ set_error_detail(ctx, "Invalid frame size in index");
return VPX_CODEC_CORRUPT_FRAME;
}
@@ -430,7 +496,7 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
// Extra data detected after the frame.
if (data_start < data_end - 1) {
- ctx->base.err_detail = "Fail to decode frame in parallel mode";
+ set_error_detail(ctx, "Fail to decode frame in parallel mode");
return VPX_CODEC_INCAPABLE;
}
}
@@ -445,7 +511,7 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
vpx_codec_err_t res;
if (data_start < data
|| frame_size > (uint32_t) (data_end - data_start)) {
- ctx->base.err_detail = "Invalid frame size in index";
+ set_error_detail(ctx, "Invalid frame size in index");
return VPX_CODEC_CORRUPT_FRAME;
}
@@ -483,15 +549,31 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
vpx_codec_iter_t *iter) {
vpx_image_t *img = NULL;
- if (ctx->img_avail) {
- // iter acts as a flip flop, so an image is only returned on the first
- // call to get_frame.
- if (!(*iter)) {
+ // iter acts as a flip flop, so an image is only returned on the first
+ // call to get_frame.
+ if (*iter == NULL && ctx->frame_workers != NULL) {
+ YV12_BUFFER_CONFIG sd;
+ vp9_ppflags_t flags = {0, 0, 0};
+
+ VP9Worker *const worker = &ctx->frame_workers[ctx->next_output_thread_id];
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
+ if (vp9_get_raw_frame(worker_data->pbi, &sd, &flags) == 0) {
+ VP9_COMMON *const cm = &worker_data->pbi->common;
+ yuvconfig2image(&ctx->img, &sd, worker_data->user_priv);
+ ctx->img.fb_priv = cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
img = &ctx->img;
*iter = img;
+ // Decrease reference count of last output frame in frame parallel mode.
+ if (ctx->frame_parallel_decode && ctx->last_show_frame >= 0) {
+ --cm->frame_bufs[ctx->last_show_frame].ref_count;
+ if (cm->frame_bufs[ctx->last_show_frame].ref_count == 0) {
+ cm->release_fb_cb(cm->cb_priv,
+ &cm->frame_bufs[ctx->last_show_frame].raw_frame_buffer);
+ }
+ }
+ ctx->last_show_frame = worker_data->pbi->common.new_fb_idx;
}
}
- ctx->img_avail = 0;
return img;
}
@@ -502,7 +584,7 @@ static vpx_codec_err_t decoder_set_fb_fn(
vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) {
if (cb_get == NULL || cb_release == NULL) {
return VPX_CODEC_INVALID_PARAM;
- } else if (ctx->pbi == NULL) {
+ } else if (ctx->frame_workers == NULL) {
// If the decoder has already been initialized, do not accept changes to
// the frame buffer functions.
ctx->get_ext_fb_cb = cb_get;
@@ -518,12 +600,19 @@ static vpx_codec_err_t ctrl_set_reference(vpx_codec_alg_priv_t *ctx,
va_list args) {
vpx_ref_frame_t *const data = va_arg(args, vpx_ref_frame_t *);
+ // Only support this function in serial decode.
+ if (ctx->frame_parallel_decode) {
+ set_error_detail(ctx, "Not supported in frame parallel decode");
+ return VPX_CODEC_INCAPABLE;
+ }
+
if (data) {
vpx_ref_frame_t *const frame = (vpx_ref_frame_t *)data;
YV12_BUFFER_CONFIG sd;
-
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
image2yuvconfig(&frame->img, &sd);
- return vp9_set_reference_dec(&ctx->pbi->common,
+ return vp9_set_reference_dec(&worker_data->pbi->common,
(VP9_REFFRAME)frame->frame_type, &sd);
} else {
return VPX_CODEC_INVALID_PARAM;
@@ -534,13 +623,19 @@ static vpx_codec_err_t ctrl_copy_reference(vpx_codec_alg_priv_t *ctx,
va_list args) {
vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+ // Only support this function in serial decode.
+ if (ctx->frame_parallel_decode) {
+ set_error_detail(ctx, "Not supported in frame parallel decode");
+ return VPX_CODEC_INCAPABLE;
+ }
+
if (data) {
- vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *) data;
YV12_BUFFER_CONFIG sd;
-
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
image2yuvconfig(&frame->img, &sd);
-
- return vp9_copy_reference_dec(ctx->pbi,
+ return vp9_copy_reference_dec(worker_data->pbi,
(VP9_REFFRAME)frame->frame_type, &sd);
} else {
return VPX_CODEC_INVALID_PARAM;
@@ -551,11 +646,18 @@ static vpx_codec_err_t ctrl_get_reference(vpx_codec_alg_priv_t *ctx,
va_list args) {
vp9_ref_frame_t *data = va_arg(args, vp9_ref_frame_t *);
+ // Only support this function in serial decode.
+ if (ctx->frame_parallel_decode) {
+ set_error_detail(ctx, "Not supported in frame parallel decode");
+ return VPX_CODEC_INCAPABLE;
+ }
+
if (data) {
YV12_BUFFER_CONFIG* fb;
-
- vp9_get_reference_dec(ctx->pbi, data->idx, &fb);
- yuvconfig2image(&data->img, fb, NULL);
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
+ vp9_get_reference_dec(worker_data->pbi, data->idx, &fb);
+ yuvconfig2image(&data->img, fb, worker_data->user_priv);
return VPX_CODEC_OK;
} else {
return VPX_CODEC_INVALID_PARAM;
@@ -592,11 +694,20 @@ static vpx_codec_err_t ctrl_get_last_ref_updates(vpx_codec_alg_priv_t *ctx,
va_list args) {
int *const update_info = va_arg(args, int *);
+ // Only support this function in serial decode.
+ if (ctx->frame_parallel_decode) {
+ set_error_detail(ctx, "Not supported in frame parallel decode");
+ return VPX_CODEC_INCAPABLE;
+ }
+
if (update_info) {
- if (ctx->pbi)
- *update_info = ctx->pbi->refresh_frame_flags;
- else
+ if (ctx->frame_workers) {
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
+ *update_info = worker_data->pbi->refresh_frame_flags;
+ } else {
return VPX_CODEC_ERROR;
+ }
return VPX_CODEC_OK;
} else {
return VPX_CODEC_INVALID_PARAM;
@@ -608,11 +719,20 @@ static vpx_codec_err_t ctrl_get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
va_list args) {
int *corrupted = va_arg(args, int *);
+ // Only support this function in serial decode.
+ if (ctx->frame_parallel_decode) {
+ set_error_detail(ctx, "Not supported in frame parallel decode");
+ return VPX_CODEC_INCAPABLE;
+ }
+
if (corrupted) {
- if (ctx->pbi)
- *corrupted = ctx->pbi->common.frame_to_show->corrupted;
- else
+ if (ctx->frame_workers) {
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
+ *corrupted = worker_data->pbi->common.frame_to_show->corrupted;
+ } else {
return VPX_CODEC_ERROR;
+ }
return VPX_CODEC_OK;
} else {
return VPX_CODEC_INVALID_PARAM;
@@ -623,9 +743,17 @@ static vpx_codec_err_t ctrl_get_display_size(vpx_codec_alg_priv_t *ctx,
va_list args) {
int *const display_size = va_arg(args, int *);
+ // Only support this function in serial decode.
+ if (ctx->frame_parallel_decode) {
+ set_error_detail(ctx, "Not supported in frame parallel decode");
+ return VPX_CODEC_INCAPABLE;
+ }
+
if (display_size) {
- if (ctx->pbi) {
- const VP9_COMMON *const cm = &ctx->pbi->common;
+ if (ctx->frame_workers) {
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
+ const VP9_COMMON *const cm = &worker_data->pbi->common;
display_size[0] = cm->display_width;
display_size[1] = cm->display_height;
} else {
diff --git a/vp9/vp9cx.mk b/vp9/vp9cx.mk
index 9dbb67810..6a34f7e0f 100644
--- a/vp9/vp9cx.mk
+++ b/vp9/vp9cx.mk
@@ -105,11 +105,9 @@ VP9_CX_SRCS-$(HAVE_SSE3) += encoder/x86/vp9_sad_sse3.asm
ifeq ($(CONFIG_USE_X86INC),yes)
VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_dct_mmx.asm
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_error_sse2.asm
-VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_error_intrin_avx2.c
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_sad_sse2.asm
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_subtract_sse2.asm
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_variance_sse2.c
-VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_variance_avx2.c
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_subpel_variance.asm
endif
@@ -124,7 +122,9 @@ VP9_CX_SRCS-$(ARCH_X86_64) += encoder/x86/vp9_ssim_opt_x86_64.asm
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_dct_sse2.c
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_dct32x32_sse2.c
-VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_dct_avx2.c
VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_dct32x32_avx2.c
+VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_dct_avx2.c
+VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_error_intrin_avx2.c
+VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_variance_avx2.c
VP9_CX_SRCS-yes := $(filter-out $(VP9_CX_SRCS_REMOVE-yes),$(VP9_CX_SRCS-yes))
diff --git a/vpx/src/svc_encodeframe.c b/vpx/src/svc_encodeframe.c
index 17e165bfb..4efba9c00 100644
--- a/vpx/src/svc_encodeframe.c
+++ b/vpx/src/svc_encodeframe.c
@@ -24,6 +24,7 @@
#include "vpx/svc_context.h"
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
+#include "vpx_mem/vpx_mem.h"
#ifdef __MINGW32__
#define strtok_r strtok_s
@@ -47,6 +48,14 @@ _CRTIMP char *__cdecl strtok_s(char *str, const char *delim, char **context);
static const char *DEFAULT_QUANTIZER_VALUES = "60,53,39,33,27";
static const char *DEFAULT_SCALE_FACTORS = "4/16,5/16,7/16,11/16,16/16";
+// One encoded frame
+typedef struct FrameData {
+ void *buf; // compressed data buffer
+ size_t size; // length of compressed data
+ vpx_codec_frame_flags_t flags; /**< flags for this frame */
+ struct FrameData *next;
+} FrameData;
+
typedef struct SvcInternal {
char options[OPTION_BUFFER_SIZE]; // set by vpx_svc_set_options
char quantizers[OPTION_BUFFER_SIZE]; // set by vpx_svc_set_quantizers
@@ -72,15 +81,15 @@ typedef struct SvcInternal {
// state variables
int encode_frame_count;
+ int frame_received;
int frame_within_gop;
vpx_enc_frame_flags_t enc_frame_flags;
int layers;
int layer;
int is_keyframe;
- size_t frame_size;
- size_t buffer_size;
- void *buffer;
+ FrameData *frame_list;
+ FrameData *frame_temp;
char *rc_stats_buf;
size_t rc_stats_buf_size;
@@ -90,128 +99,54 @@ typedef struct SvcInternal {
vpx_codec_ctx_t *codec_ctx;
} SvcInternal;
-// Superframe is used to generate an index of individual frames (i.e., layers)
-struct Superframe {
- int count;
- uint32_t sizes[SUPERFRAME_SLOTS];
- uint32_t magnitude;
- uint8_t buffer[SUPERFRAME_BUFFER_SIZE];
- size_t index_size;
-};
-
-// One encoded frame layer
-struct LayerData {
- void *buf; // compressed data buffer
- size_t size; // length of compressed data
- struct LayerData *next;
-};
-
-// create LayerData from encoder output
-static struct LayerData *ld_create(void *buf, size_t size) {
- struct LayerData *const layer_data =
- (struct LayerData *)malloc(sizeof(*layer_data));
- if (layer_data == NULL) {
+// create FrameData from encoder output
+static struct FrameData *fd_create(void *buf, size_t size,
+ vpx_codec_frame_flags_t flags) {
+ struct FrameData *const frame_data =
+ (struct FrameData *)vpx_malloc(sizeof(*frame_data));
+ if (frame_data == NULL) {
return NULL;
}
- layer_data->buf = malloc(size);
- if (layer_data->buf == NULL) {
- free(layer_data);
+ frame_data->buf = vpx_malloc(size);
+ if (frame_data->buf == NULL) {
+ vpx_free(frame_data);
return NULL;
}
- memcpy(layer_data->buf, buf, size);
- layer_data->size = size;
- return layer_data;
+ vpx_memcpy(frame_data->buf, buf, size);
+ frame_data->size = size;
+ frame_data->flags = flags;
+ return frame_data;
}
-// free LayerData
-static void ld_free(struct LayerData *layer_data) {
- if (layer_data) {
- if (layer_data->buf) {
- free(layer_data->buf);
- layer_data->buf = NULL;
- }
- free(layer_data);
+// free FrameData
+static void fd_free(struct FrameData *p) {
+ if (p) {
+ if (p->buf)
+ vpx_free(p->buf);
+ vpx_free(p);
}
}
-// add layer data to list
-static void ld_list_add(struct LayerData **list, struct LayerData *layer_data) {
- struct LayerData **p = list;
+// add FrameData to list
+static void fd_list_add(struct FrameData **list, struct FrameData *layer_data) {
+ struct FrameData **p = list;
while (*p != NULL) p = &(*p)->next;
*p = layer_data;
layer_data->next = NULL;
}
-// get accumulated size of layer data
-static size_t ld_list_get_buffer_size(struct LayerData *list) {
- struct LayerData *p;
- size_t size = 0;
-
- for (p = list; p != NULL; p = p->next) {
- size += p->size;
- }
- return size;
-}
-
-// copy layer data to buffer
-static void ld_list_copy_to_buffer(struct LayerData *list, uint8_t *buffer) {
- struct LayerData *p;
-
- for (p = list; p != NULL; p = p->next) {
- buffer[0] = 1;
- memcpy(buffer, p->buf, p->size);
- buffer += p->size;
- }
-}
-
-// free layer data list
-static void ld_list_free(struct LayerData *list) {
- struct LayerData *p = list;
+// free FrameData list
+static void fd_free_list(struct FrameData *list) {
+ struct FrameData *p = list;
while (p) {
list = list->next;
- ld_free(p);
+ fd_free(p);
p = list;
}
}
-static void sf_create_index(struct Superframe *sf) {
- uint8_t marker = 0xc0;
- int i;
- uint32_t mag, mask;
- uint8_t *bufp;
-
- if (sf->count == 0 || sf->count >= 8) return;
-
- // Add the number of frames to the marker byte
- marker |= sf->count - 1;
-
- // Choose the magnitude
- for (mag = 0, mask = 0xff; mag < 4; ++mag) {
- if (sf->magnitude < mask) break;
- mask <<= 8;
- mask |= 0xff;
- }
- marker |= mag << 3;
-
- // Write the index
- sf->index_size = 2 + (mag + 1) * sf->count;
- bufp = sf->buffer;
-
- *bufp++ = marker;
- for (i = 0; i < sf->count; ++i) {
- int this_sz = sf->sizes[i];
- uint32_t j;
-
- for (j = 0; j <= mag; ++j) {
- *bufp++ = this_sz & 0xff;
- this_sz >>= 8;
- }
- }
- *bufp++ = marker;
-}
-
static SvcInternal *get_svc_internal(SvcContext *svc_ctx) {
if (svc_ctx == NULL) return NULL;
if (svc_ctx->internal == NULL) {
@@ -574,8 +509,6 @@ vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
// modify encoder configuration
enc_cfg->ss_number_layers = si->layers;
enc_cfg->ts_number_layers = 1; // Temporal layers not used in this encoder.
- // Lag in frames not currently supported
- enc_cfg->g_lag_in_frames = 0;
// TODO(ivanmaltz): determine if these values need to be set explicitly for
// svc, or if the normal default/override mechanism can be used
@@ -608,6 +541,34 @@ vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
return VPX_CODEC_OK;
}
+static void accumulate_frame_size_for_each_layer(SvcInternal *const si,
+ const uint8_t *const buf,
+ const size_t size) {
+ uint8_t marker = buf[size - 1];
+ if ((marker & 0xe0) == 0xc0) {
+ const uint32_t frames = (marker & 0x7) + 1;
+ const uint32_t mag = ((marker >> 3) & 0x3) + 1;
+ const size_t index_sz = 2 + mag * frames;
+
+ uint8_t marker2 = buf[size - index_sz];
+
+ if (size >= index_sz && marker2 == marker) {
+ // found a valid superframe index
+ uint32_t i, j;
+ const uint8_t *x = &buf[size - index_sz + 1];
+
+ // frames has a maximum of 8 and mag has a maximum of 4.
+ for (i = 0; i < frames; i++) {
+ uint32_t this_sz = 0;
+
+ for (j = 0; j < mag; j++)
+ this_sz |= (*x++) << (j * 8);
+ si->bytes_sum[i] += this_sz;
+ }
+ }
+ }
+}
+
// SVC Algorithm flags - these get mapped to VP8_EFLAG_* defined in vp8cx.h
// encoder should reference the last frame
@@ -846,15 +807,12 @@ vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
vpx_codec_err_t res;
vpx_codec_iter_t iter;
const vpx_codec_cx_pkt_t *cx_pkt;
- struct LayerData *cx_layer_list = NULL;
- struct LayerData *layer_data;
- struct Superframe superframe;
+ int layer_for_psnr = 0;
SvcInternal *const si = get_svc_internal(svc_ctx);
if (svc_ctx == NULL || codec_ctx == NULL || si == NULL) {
return VPX_CODEC_INVALID_PARAM;
}
- memset(&superframe, 0, sizeof(superframe));
svc_log_reset(svc_ctx);
si->rc_stats_buf_used = 0;
@@ -863,7 +821,6 @@ vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
si->frame_within_gop = 0;
}
si->is_keyframe = (si->frame_within_gop == 0);
- si->frame_size = 0;
if (rawimg != NULL) {
svc_log(svc_ctx, SVC_LOG_DEBUG,
@@ -872,124 +829,90 @@ vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
si->frame_within_gop);
}
- // encode each layer
- for (si->layer = 0; si->layer < si->layers; ++si->layer) {
- if (svc_ctx->encoding_mode == ALT_INTER_LAYER_PREDICTION_IP &&
- si->is_keyframe && (si->layer == 1 || si->layer == 3)) {
- svc_log(svc_ctx, SVC_LOG_DEBUG, "Skip encoding layer %d\n", si->layer);
- continue;
- }
-
- if (rawimg != NULL) {
+ if (rawimg != NULL) {
+ // encode each layer
+ for (si->layer = 0; si->layer < si->layers; ++si->layer) {
+ if (svc_ctx->encoding_mode == ALT_INTER_LAYER_PREDICTION_IP &&
+ si->is_keyframe && (si->layer == 1 || si->layer == 3)) {
+ svc_log(svc_ctx, SVC_LOG_DEBUG, "Skip encoding layer %d\n", si->layer);
+ continue;
+ }
calculate_enc_frame_flags(svc_ctx);
set_svc_parameters(svc_ctx, codec_ctx);
}
+ }
- res = vpx_codec_encode(codec_ctx, rawimg, pts, (uint32_t)duration,
- si->enc_frame_flags, deadline);
- if (res != VPX_CODEC_OK) {
- return res;
- }
- // save compressed data
- iter = NULL;
- while ((cx_pkt = vpx_codec_get_cx_data(codec_ctx, &iter))) {
- switch (cx_pkt->kind) {
- case VPX_CODEC_CX_FRAME_PKT: {
- const uint32_t frame_pkt_size = (uint32_t)(cx_pkt->data.frame.sz);
- si->bytes_sum[si->layer] += frame_pkt_size;
- svc_log(svc_ctx, SVC_LOG_DEBUG,
- "SVC frame: %d, layer: %d, size: %u\n",
- si->encode_frame_count, si->layer, frame_pkt_size);
- layer_data =
- ld_create(cx_pkt->data.frame.buf, (size_t)frame_pkt_size);
- if (layer_data == NULL) {
- svc_log(svc_ctx, SVC_LOG_ERROR, "Error allocating LayerData\n");
- return VPX_CODEC_OK;
- }
- ld_list_add(&cx_layer_list, layer_data);
-
- // save layer size in superframe index
- superframe.sizes[superframe.count++] = frame_pkt_size;
- superframe.magnitude |= frame_pkt_size;
- break;
+ res = vpx_codec_encode(codec_ctx, rawimg, pts, (uint32_t)duration, 0,
+ deadline);
+ if (res != VPX_CODEC_OK) {
+ return res;
+ }
+ // save compressed data
+ iter = NULL;
+ while ((cx_pkt = vpx_codec_get_cx_data(codec_ctx, &iter))) {
+ switch (cx_pkt->kind) {
+ case VPX_CODEC_CX_FRAME_PKT: {
+ fd_list_add(&si->frame_list, fd_create(cx_pkt->data.frame.buf,
+ cx_pkt->data.frame.sz,
+ cx_pkt->data.frame.flags));
+ accumulate_frame_size_for_each_layer(si, cx_pkt->data.frame.buf,
+ cx_pkt->data.frame.sz);
+
+ svc_log(svc_ctx, SVC_LOG_DEBUG, "SVC frame: %d, kf: %d, size: %d, "
+ "pts: %d\n", si->frame_received,
+ (cx_pkt->data.frame.flags & VPX_FRAME_IS_KEY) ? 1 : 0,
+ (int)cx_pkt->data.frame.sz, (int)cx_pkt->data.frame.pts);
+
+ ++si->frame_received;
+ layer_for_psnr = 0;
+ break;
+ }
+ case VPX_CODEC_PSNR_PKT: {
+ int i;
+ svc_log(svc_ctx, SVC_LOG_DEBUG,
+ "SVC frame: %d, layer: %d, PSNR(Total/Y/U/V): "
+ "%2.3f %2.3f %2.3f %2.3f \n",
+ si->frame_received, layer_for_psnr,
+ cx_pkt->data.psnr.psnr[0], cx_pkt->data.psnr.psnr[1],
+ cx_pkt->data.psnr.psnr[2], cx_pkt->data.psnr.psnr[3]);
+ svc_log(svc_ctx, SVC_LOG_DEBUG,
+ "SVC frame: %d, layer: %d, SSE(Total/Y/U/V): "
+ "%2.3f %2.3f %2.3f %2.3f \n",
+ si->frame_received, layer_for_psnr,
+ cx_pkt->data.psnr.sse[0], cx_pkt->data.psnr.sse[1],
+ cx_pkt->data.psnr.sse[2], cx_pkt->data.psnr.sse[3]);
+ for (i = 0; i < COMPONENTS; i++) {
+ si->psnr_sum[layer_for_psnr][i] += cx_pkt->data.psnr.psnr[i];
+ si->sse_sum[layer_for_psnr][i] += cx_pkt->data.psnr.sse[i];
}
- case VPX_CODEC_PSNR_PKT: {
- int i;
- svc_log(svc_ctx, SVC_LOG_DEBUG,
- "SVC frame: %d, layer: %d, PSNR(Total/Y/U/V): "
- "%2.3f %2.3f %2.3f %2.3f \n",
- si->encode_frame_count, si->layer,
- cx_pkt->data.psnr.psnr[0], cx_pkt->data.psnr.psnr[1],
- cx_pkt->data.psnr.psnr[2], cx_pkt->data.psnr.psnr[3]);
- svc_log(svc_ctx, SVC_LOG_DEBUG,
- "SVC frame: %d, layer: %d, SSE(Total/Y/U/V): "
- "%2.3f %2.3f %2.3f %2.3f \n",
- si->encode_frame_count, si->layer,
- cx_pkt->data.psnr.sse[0], cx_pkt->data.psnr.sse[1],
- cx_pkt->data.psnr.sse[2], cx_pkt->data.psnr.sse[3]);
- for (i = 0; i < COMPONENTS; i++) {
- si->psnr_sum[si->layer][i] += cx_pkt->data.psnr.psnr[i];
- si->sse_sum[si->layer][i] += cx_pkt->data.psnr.sse[i];
+ ++layer_for_psnr;
+ break;
+ }
+ case VPX_CODEC_STATS_PKT: {
+ size_t new_size = si->rc_stats_buf_used +
+ cx_pkt->data.twopass_stats.sz;
+
+ if (new_size > si->rc_stats_buf_size) {
+ char *p = (char*)realloc(si->rc_stats_buf, new_size);
+ if (p == NULL) {
+ svc_log(svc_ctx, SVC_LOG_ERROR, "Error allocating stats buf\n");
+ return VPX_CODEC_MEM_ERROR;
}
- break;
+ si->rc_stats_buf = p;
+ si->rc_stats_buf_size = new_size;
}
- case VPX_CODEC_STATS_PKT: {
- size_t new_size = si->rc_stats_buf_used +
- cx_pkt->data.twopass_stats.sz;
-
- if (new_size > si->rc_stats_buf_size) {
- char *p = (char*)realloc(si->rc_stats_buf, new_size);
- if (p == NULL) {
- svc_log(svc_ctx, SVC_LOG_ERROR, "Error allocating stats buf\n");
- break;
- }
- si->rc_stats_buf = p;
- si->rc_stats_buf_size = new_size;
- }
- memcpy(si->rc_stats_buf + si->rc_stats_buf_used,
- cx_pkt->data.twopass_stats.buf, cx_pkt->data.twopass_stats.sz);
- si->rc_stats_buf_used += cx_pkt->data.twopass_stats.sz;
- break;
- }
- default: {
- break;
- }
+ memcpy(si->rc_stats_buf + si->rc_stats_buf_used,
+ cx_pkt->data.twopass_stats.buf, cx_pkt->data.twopass_stats.sz);
+ si->rc_stats_buf_used += cx_pkt->data.twopass_stats.sz;
+ break;
}
- }
- if (rawimg == NULL) {
- break;
- }
- }
- if (codec_ctx->config.enc->g_pass != VPX_RC_FIRST_PASS) {
- // add superframe index to layer data list
- sf_create_index(&superframe);
- layer_data = ld_create(superframe.buffer, superframe.index_size);
- ld_list_add(&cx_layer_list, layer_data);
-
- // get accumulated size of layer data
- si->frame_size = ld_list_get_buffer_size(cx_layer_list);
- if (si->frame_size > 0) {
- // all layers encoded, create single buffer with concatenated layers
- if (si->frame_size > si->buffer_size) {
- free(si->buffer);
- si->buffer = malloc(si->frame_size);
- if (si->buffer == NULL) {
- ld_list_free(cx_layer_list);
- return VPX_CODEC_MEM_ERROR;
- }
- si->buffer_size = si->frame_size;
+ default: {
+ break;
}
- // copy layer data into packet
- ld_list_copy_to_buffer(cx_layer_list, (uint8_t *)si->buffer);
-
- ld_list_free(cx_layer_list);
-
- svc_log(svc_ctx, SVC_LOG_DEBUG, "SVC frame: %d, kf: %d, size: %d, "
- "pts: %d\n", si->encode_frame_count, si->is_keyframe,
- (int)si->frame_size, (int)pts);
}
}
+
if (rawimg != NULL) {
++si->frame_within_gop;
++si->encode_frame_count;
@@ -1004,16 +927,27 @@ const char *vpx_svc_get_message(const SvcContext *svc_ctx) {
return si->message_buffer;
}
-void *vpx_svc_get_buffer(const SvcContext *svc_ctx) {
- const SvcInternal *const si = get_const_svc_internal(svc_ctx);
- if (svc_ctx == NULL || si == NULL) return NULL;
- return si->buffer;
+// We will maintain a list of output frame buffers since with lag_in_frame
+// we need to output all frame buffers at the end. vpx_svc_get_buffer() will
+// remove a frame buffer from the list the put it to a temporal pointer, which
+// will be removed at the next vpx_svc_get_buffer() or when closing encoder.
+void *vpx_svc_get_buffer(SvcContext *svc_ctx) {
+ SvcInternal *const si = get_svc_internal(svc_ctx);
+ if (svc_ctx == NULL || si == NULL || si->frame_list == NULL) return NULL;
+
+ if (si->frame_temp)
+ fd_free(si->frame_temp);
+
+ si->frame_temp = si->frame_list;
+ si->frame_list = si->frame_list->next;
+
+ return si->frame_temp->buf;
}
size_t vpx_svc_get_frame_size(const SvcContext *svc_ctx) {
const SvcInternal *const si = get_const_svc_internal(svc_ctx);
- if (svc_ctx == NULL || si == NULL) return 0;
- return si->frame_size;
+ if (svc_ctx == NULL || si == NULL || si->frame_list == NULL) return 0;
+ return si->frame_list->size;
}
int vpx_svc_get_encode_frame_count(const SvcContext *svc_ctx) {
@@ -1024,8 +958,8 @@ int vpx_svc_get_encode_frame_count(const SvcContext *svc_ctx) {
int vpx_svc_is_keyframe(const SvcContext *svc_ctx) {
const SvcInternal *const si = get_const_svc_internal(svc_ctx);
- if (svc_ctx == NULL || si == NULL) return 0;
- return si->is_keyframe;
+ if (svc_ctx == NULL || si == NULL || si->frame_list == NULL) return 0;
+ return (si->frame_list->flags & VPX_FRAME_IS_KEY) != 0;
}
void vpx_svc_set_keyframe(SvcContext *svc_ctx) {
@@ -1112,7 +1046,8 @@ void vpx_svc_release(SvcContext *svc_ctx) {
// SvcInternal if it was not already allocated
si = (SvcInternal *)svc_ctx->internal;
if (si != NULL) {
- free(si->buffer);
+ fd_free(si->frame_temp);
+ fd_free_list(si->frame_list);
if (si->rc_stats_buf) {
free(si->rc_stats_buf);
}
diff --git a/vpx/svc_context.h b/vpx/svc_context.h
index 5d0fbbd77..058ee2094 100644
--- a/vpx/svc_context.h
+++ b/vpx/svc_context.h
@@ -104,14 +104,16 @@ const char *vpx_svc_dump_statistics(SvcContext *svc_ctx);
const char *vpx_svc_get_message(const SvcContext *svc_ctx);
/**
- * return size of encoded data to be returned by vpx_svc_get_buffer
+ * return size of encoded data to be returned by vpx_svc_get_buffer.
+ * it needs to be called before vpx_svc_get_buffer.
*/
size_t vpx_svc_get_frame_size(const SvcContext *svc_ctx);
/**
- * return buffer with encoded data
+ * return buffer with encoded data. encoder will maintain a list of frame
+ * buffers. each call of vpx_svc_get_buffer() will return one frame.
*/
-void *vpx_svc_get_buffer(const SvcContext *svc_ctx);
+void *vpx_svc_get_buffer(SvcContext *svc_ctx);
/**
* return size of two pass rate control stats data to be returned by