summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--vp8/encoder/firstpass.c3
-rw-r--r--vp9/common/vp9_thread_common.c84
-rw-r--r--vp9/common/vp9_thread_common.h2
-rw-r--r--vp9/decoder/vp9_decodeframe.c4
-rw-r--r--vp9/encoder/vp9_encoder.c5
-rw-r--r--vpx_ports/mem.h14
6 files changed, 64 insertions, 48 deletions
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index 72c9642fe..981c0fde3 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -2072,9 +2072,10 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
* score, otherwise it may be worse off than an "un-boosted" frame
*/
else {
+ // Avoid division by 0 by clamping cpi->twopass.kf_group_error_left to 1
int alt_gf_bits =
(int)((double)cpi->twopass.kf_group_bits * mod_frame_err /
- DOUBLE_DIVIDE_CHECK((double)cpi->twopass.kf_group_error_left));
+ (double)VPXMAX(cpi->twopass.kf_group_error_left, 1));
if (alt_gf_bits > gf_bits) {
gf_bits = alt_gf_bits;
diff --git a/vp9/common/vp9_thread_common.c b/vp9/common/vp9_thread_common.c
index c79d9b7f0..b3d50162b 100644
--- a/vp9/common/vp9_thread_common.c
+++ b/vp9/common/vp9_thread_common.c
@@ -298,7 +298,10 @@ void vp9_loop_filter_alloc(VP9LfSync *lf_sync, VP9_COMMON *cm, int rows,
pthread_cond_init(&lf_sync->cond[i], NULL);
}
}
- pthread_mutex_init(&lf_sync->lf_mutex, NULL);
+
+ CHECK_MEM_ERROR(cm, lf_sync->lf_mutex,
+ vpx_malloc(sizeof(*lf_sync->lf_mutex)));
+ pthread_mutex_init(lf_sync->lf_mutex, NULL);
CHECK_MEM_ERROR(cm, lf_sync->recon_done_mutex,
vpx_malloc(sizeof(*lf_sync->recon_done_mutex) * rows));
@@ -339,47 +342,50 @@ void vp9_loop_filter_alloc(VP9LfSync *lf_sync, VP9_COMMON *cm, int rows,
// Deallocate lf synchronization related mutex and data
void vp9_loop_filter_dealloc(VP9LfSync *lf_sync) {
- if (lf_sync != NULL) {
+ assert(lf_sync != NULL);
+
#if CONFIG_MULTITHREAD
+ if (lf_sync->mutex != NULL) {
int i;
-
- if (lf_sync->mutex != NULL) {
- for (i = 0; i < lf_sync->rows; ++i) {
- pthread_mutex_destroy(&lf_sync->mutex[i]);
- }
- vpx_free(lf_sync->mutex);
+ for (i = 0; i < lf_sync->rows; ++i) {
+ pthread_mutex_destroy(&lf_sync->mutex[i]);
}
- if (lf_sync->cond != NULL) {
- for (i = 0; i < lf_sync->rows; ++i) {
- pthread_cond_destroy(&lf_sync->cond[i]);
- }
- vpx_free(lf_sync->cond);
+ vpx_free(lf_sync->mutex);
+ }
+ if (lf_sync->cond != NULL) {
+ int i;
+ for (i = 0; i < lf_sync->rows; ++i) {
+ pthread_cond_destroy(&lf_sync->cond[i]);
}
- if (lf_sync->recon_done_mutex != NULL) {
- int i;
- for (i = 0; i < lf_sync->rows; ++i) {
- pthread_mutex_destroy(&lf_sync->recon_done_mutex[i]);
- }
- vpx_free(lf_sync->recon_done_mutex);
+ vpx_free(lf_sync->cond);
+ }
+ if (lf_sync->recon_done_mutex != NULL) {
+ int i;
+ for (i = 0; i < lf_sync->rows; ++i) {
+ pthread_mutex_destroy(&lf_sync->recon_done_mutex[i]);
}
+ vpx_free(lf_sync->recon_done_mutex);
+ }
- pthread_mutex_destroy(&lf_sync->lf_mutex);
- if (lf_sync->recon_done_cond != NULL) {
- int i;
- for (i = 0; i < lf_sync->rows; ++i) {
- pthread_cond_destroy(&lf_sync->recon_done_cond[i]);
- }
- vpx_free(lf_sync->recon_done_cond);
+ if (lf_sync->lf_mutex != NULL) {
+ pthread_mutex_destroy(lf_sync->lf_mutex);
+ vpx_free(lf_sync->lf_mutex);
+ }
+ if (lf_sync->recon_done_cond != NULL) {
+ int i;
+ for (i = 0; i < lf_sync->rows; ++i) {
+ pthread_cond_destroy(&lf_sync->recon_done_cond[i]);
}
+ vpx_free(lf_sync->recon_done_cond);
+ }
#endif // CONFIG_MULTITHREAD
- vpx_free(lf_sync->lfdata);
- vpx_free(lf_sync->cur_sb_col);
- vpx_free(lf_sync->num_tiles_done);
- // clear the structure as the source of this call may be a resize in which
- // case this call will be followed by an _alloc() which may fail.
- vp9_zero(*lf_sync);
- }
+ vpx_free(lf_sync->lfdata);
+ vpx_free(lf_sync->cur_sb_col);
+ vpx_free(lf_sync->num_tiles_done);
+ // clear the structure as the source of this call may be a resize in which
+ // case this call will be followed by an _alloc() which may fail.
+ vp9_zero(*lf_sync);
}
static int get_next_row(VP9_COMMON *cm, VP9LfSync *lf_sync) {
@@ -390,7 +396,7 @@ static int get_next_row(VP9_COMMON *cm, VP9LfSync *lf_sync) {
#if CONFIG_MULTITHREAD
const int tile_cols = 1 << cm->log2_tile_cols;
- pthread_mutex_lock(&lf_sync->lf_mutex);
+ pthread_mutex_lock(lf_sync->lf_mutex);
if (cm->lf_row < max_rows) {
cur_row = cm->lf_row >> MI_BLOCK_SIZE_LOG2;
return_val = cm->lf_row;
@@ -401,7 +407,7 @@ static int get_next_row(VP9_COMMON *cm, VP9LfSync *lf_sync) {
cur_row += 1;
}
}
- pthread_mutex_unlock(&lf_sync->lf_mutex);
+ pthread_mutex_unlock(lf_sync->lf_mutex);
if (return_val == -1) return return_val;
@@ -411,7 +417,7 @@ static int get_next_row(VP9_COMMON *cm, VP9LfSync *lf_sync) {
&lf_sync->recon_done_mutex[cur_row]);
}
pthread_mutex_unlock(&lf_sync->recon_done_mutex[cur_row]);
- pthread_mutex_lock(&lf_sync->lf_mutex);
+ pthread_mutex_lock(lf_sync->lf_mutex);
if (lf_sync->corrupted) {
int row = return_val >> MI_BLOCK_SIZE_LOG2;
pthread_mutex_lock(&lf_sync->mutex[row]);
@@ -420,7 +426,7 @@ static int get_next_row(VP9_COMMON *cm, VP9LfSync *lf_sync) {
pthread_mutex_unlock(&lf_sync->mutex[row]);
return_val = -1;
}
- pthread_mutex_unlock(&lf_sync->lf_mutex);
+ pthread_mutex_unlock(lf_sync->lf_mutex);
#else
(void)lf_sync;
if (cm->lf_row < max_rows) {
@@ -455,9 +461,9 @@ void vp9_loopfilter_rows(LFWorkerData *lf_data, VP9LfSync *lf_sync) {
void vp9_set_row(VP9LfSync *lf_sync, int num_tiles, int row, int is_last_row,
int corrupted) {
#if CONFIG_MULTITHREAD
- pthread_mutex_lock(&lf_sync->lf_mutex);
+ pthread_mutex_lock(lf_sync->lf_mutex);
lf_sync->corrupted |= corrupted;
- pthread_mutex_unlock(&lf_sync->lf_mutex);
+ pthread_mutex_unlock(lf_sync->lf_mutex);
pthread_mutex_lock(&lf_sync->recon_done_mutex[row]);
lf_sync->num_tiles_done[row] += 1;
if (num_tiles == lf_sync->num_tiles_done[row]) {
diff --git a/vp9/common/vp9_thread_common.h b/vp9/common/vp9_thread_common.h
index 94c9de659..5df0117f1 100644
--- a/vp9/common/vp9_thread_common.h
+++ b/vp9/common/vp9_thread_common.h
@@ -40,7 +40,7 @@ typedef struct VP9LfSyncData {
int num_active_workers; // number of scheduled workers.
#if CONFIG_MULTITHREAD
- pthread_mutex_t lf_mutex;
+ pthread_mutex_t *lf_mutex;
pthread_mutex_t *recon_done_mutex;
pthread_cond_t *recon_done_cond;
#endif
diff --git a/vp9/decoder/vp9_decodeframe.c b/vp9/decoder/vp9_decodeframe.c
index 9e9f1eda9..e8b386994 100644
--- a/vp9/decoder/vp9_decodeframe.c
+++ b/vp9/decoder/vp9_decodeframe.c
@@ -1733,9 +1733,9 @@ static int lpf_map_write_check(VP9LfSync *lf_sync, int row, int num_tile_cols) {
int return_val = 0;
#if CONFIG_MULTITHREAD
int corrupted;
- pthread_mutex_lock(&lf_sync->lf_mutex);
+ pthread_mutex_lock(lf_sync->lf_mutex);
corrupted = lf_sync->corrupted;
- pthread_mutex_unlock(&lf_sync->lf_mutex);
+ pthread_mutex_unlock(lf_sync->lf_mutex);
if (!corrupted) {
pthread_mutex_lock(&lf_sync->recon_done_mutex[row]);
lf_sync->num_tiles_done[row] += 1;
diff --git a/vp9/encoder/vp9_encoder.c b/vp9/encoder/vp9_encoder.c
index 8ca7eb653..c7610ef06 100644
--- a/vp9/encoder/vp9_encoder.c
+++ b/vp9/encoder/vp9_encoder.c
@@ -2171,6 +2171,8 @@ static void init_ref_frame_bufs(VP9_COMMON *cm) {
}
}
+static void init_motion_estimation(VP9_COMP *cpi);
+
static void update_initial_width(VP9_COMP *cpi, int use_highbitdepth,
int subsampling_x, int subsampling_y) {
VP9_COMMON *const cm = &cpi->common;
@@ -2190,7 +2192,8 @@ static void update_initial_width(VP9_COMP *cpi, int use_highbitdepth,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth = use_highbitdepth;
#endif
-
+ alloc_util_frame_buffers(cpi);
+ init_motion_estimation(cpi);
cpi->initial_width = cm->width;
cpi->initial_height = cm->height;
cpi->initial_mbs = cm->MBs;
diff --git a/vpx_ports/mem.h b/vpx_ports/mem.h
index 737e9e071..4e9041304 100644
--- a/vpx_ports/mem.h
+++ b/vpx_ports/mem.h
@@ -51,13 +51,19 @@
#define VPX_WITH_ASAN 0
#endif // __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
+#if !defined(__has_attribute)
+#define __has_attribute(x) 0
+#endif // !defined(__has_attribute)
+
#if __has_attribute(uninitialized)
-// Attribute disables -ftrivial-auto-var-init=pattern for specific variables.
+// Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for
+// the specified variable.
+//
// -ftrivial-auto-var-init is security risk mitigation feature, so attribute
// should not be used "just in case", but only to fix real performance
-// bottlenecks when other approaches do not work. In general compiler is quite
-// effective eleminating unneeded initializations introduced by the flag, e.g.
-// when they are followed by actual initialization by a program.
+// bottlenecks when other approaches do not work. In general the compiler is
+// quite effective at eliminating unneeded initializations introduced by the
+// flag, e.g. when they are followed by actual initialization by a program.
// However if compiler optimization fails and code refactoring is hard, the
// attribute can be used as a workaround.
#define VPX_UNINITIALIZED __attribute__((uninitialized))