summaryrefslogtreecommitdiff
path: root/vp9/common
diff options
context:
space:
mode:
Diffstat (limited to 'vp9/common')
-rw-r--r--vp9/common/vp9_blockd.h35
-rw-r--r--vp9/common/vp9_coefupdateprobs.h8
-rw-r--r--vp9/common/vp9_convolve.c474
-rw-r--r--vp9/common/vp9_convolve.h44
-rw-r--r--vp9/common/vp9_default_coef_probs.h343
-rw-r--r--vp9/common/vp9_entropy.c1394
-rw-r--r--vp9/common/vp9_entropy.h143
-rw-r--r--vp9/common/vp9_mbpitch.c22
-rw-r--r--vp9/common/vp9_onyxc_int.h52
-rw-r--r--vp9/common/vp9_pred_common.c38
-rw-r--r--vp9/common/vp9_pred_common.h5
-rw-r--r--vp9/common/vp9_recon.c85
-rw-r--r--vp9/common/vp9_reconinter.c798
-rw-r--r--vp9/common/vp9_reconinter.h78
-rw-r--r--vp9/common/vp9_reconintra.c39
-rw-r--r--vp9/common/vp9_reconintra.h2
-rw-r--r--vp9/common/vp9_reconintra4x4.c23
-rw-r--r--vp9/common/vp9_rtcd_defs.sh71
-rw-r--r--vp9/common/x86/vp9_recon_wrapper_sse2.c22
19 files changed, 441 insertions, 3235 deletions
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index d30cd4960..0baadfc14 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -282,9 +282,6 @@ typedef struct {
INTERPOLATIONFILTERTYPE interp_filter;
BLOCK_SIZE_TYPE sb_type;
-#if CONFIG_CODE_NONZEROCOUNT
- uint16_t nzcs[256+64*2];
-#endif
} MB_MODE_INFO;
typedef struct {
@@ -329,15 +326,16 @@ struct scale_factors {
int den,
int offset_q4);
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
- convolve_fn_t predict[2][2][8]; // horiz, vert, weight (0 - 7)
-#else
convolve_fn_t predict[2][2][2]; // horiz, vert, avg
-#endif
};
enum { MAX_MB_PLANE = 3 };
+struct buf_2d {
+ uint8_t *buf;
+ int stride;
+};
+
struct mb_plane {
DECLARE_ALIGNED(16, int16_t, qcoeff[64 * 64]);
DECLARE_ALIGNED(16, int16_t, dqcoeff[64 * 64]);
@@ -346,6 +344,8 @@ struct mb_plane {
PLANE_TYPE plane_type;
int subsampling_x;
int subsampling_y;
+ struct buf_2d dst;
+ struct buf_2d pre[2];
};
#define BLOCK_OFFSET(x, i, n) ((x) + (i) * (n))
@@ -356,17 +356,11 @@ struct mb_plane {
BLOCK_OFFSET((x)->plane[2].field, ((i) - 20), 16))
typedef struct macroblockd {
-#if CONFIG_CODE_NONZEROCOUNT
- DECLARE_ALIGNED(16, uint16_t, nzcs[256+64*2]);
-#endif
struct mb_plane plane[MAX_MB_PLANE];
/* 16 Y blocks, 4 U, 4 V, each with 16 entries. */
BLOCKD block[24];
- YV12_BUFFER_CONFIG pre; /* Filtered copy of previous frame reconstruction */
- YV12_BUFFER_CONFIG second_pre;
- YV12_BUFFER_CONFIG dst;
struct scale_factors scale_factor[2];
struct scale_factors scale_factor_uv[2];
@@ -747,12 +741,6 @@ static TX_SIZE get_uv_tx_size(const MACROBLOCKD *xd) {
return size;
}
-#if CONFIG_CODE_NONZEROCOUNT
-static int get_nzc_used(TX_SIZE tx_size) {
- return (tx_size >= TX_16X16);
-}
-#endif
-
struct plane_block_idx {
int plane;
int block;
@@ -761,7 +749,7 @@ struct plane_block_idx {
// TODO(jkoleszar): returning a struct so it can be used in a const context,
// expect to refactor this further later.
static INLINE struct plane_block_idx plane_block_idx(int y_blocks,
- int b_idx) {
+ int b_idx) {
const int v_offset = y_blocks * 5 / 4;
struct plane_block_idx res;
@@ -933,6 +921,9 @@ static INLINE void foreach_predicted_block_uv(
}
}
-
-
+#if CONFIG_CODE_ZEROGROUP
+static int get_zpc_used(TX_SIZE tx_size) {
+ return (tx_size >= TX_16X16);
+}
+#endif
#endif // VP9_COMMON_VP9_BLOCKD_H_
diff --git a/vp9/common/vp9_coefupdateprobs.h b/vp9/common/vp9_coefupdateprobs.h
index b4d892df9..e49935c0c 100644
--- a/vp9/common/vp9_coefupdateprobs.h
+++ b/vp9/common/vp9_coefupdateprobs.h
@@ -18,12 +18,8 @@ static const vp9_prob vp9_coef_update_prob[ENTROPY_NODES] = {
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252
};
-#if CONFIG_CODE_NONZEROCOUNT
-#define NZC_UPDATE_PROB_4X4 252
-#define NZC_UPDATE_PROB_8X8 252
-#define NZC_UPDATE_PROB_16X16 252
-#define NZC_UPDATE_PROB_32X32 252
-#define NZC_UPDATE_PROB_PCAT 252
+#if CONFIG_CODE_ZEROGROUP
+#define ZPC_UPDATE_PROB 248
#endif
#if CONFIG_MODELCOEFPROB
diff --git a/vp9/common/vp9_convolve.c b/vp9/common/vp9_convolve.c
index a27ca6f5d..46ae50349 100644
--- a/vp9/common/vp9_convolve.c
+++ b/vp9/common/vp9_convolve.c
@@ -122,78 +122,6 @@ static void convolve_avg_horiz_c(const uint8_t *src, int src_stride,
}
}
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
-
-static inline uint8_t combine_qtr(uint8_t a, uint8_t b) {
- return (((a) + (b) * 3 + 2) >> 2);
-}
-
-static inline uint8_t combine_3qtr(uint8_t a, uint8_t b) {
- return (((a) * 3 + (b) + 2) >> 2);
-}
-
-static inline uint8_t combine_1by8(uint8_t a, uint8_t b) {
- return (((a) * 1 + (b) * 7 + 4) >> 3);
-}
-
-static inline uint8_t combine_3by8(uint8_t a, uint8_t b) {
- return (((a) * 3 + (b) * 5 + 4) >> 3);
-}
-
-static inline uint8_t combine_5by8(uint8_t a, uint8_t b) {
- return (((a) * 5 + (b) * 3 + 4) >> 3);
-}
-
-static inline uint8_t combine_7by8(uint8_t a, uint8_t b) {
- return (((a) * 7 + (b) * 1 + 4) >> 3);
-}
-
-// TODO(debargha): Implment with a separate weight parameter
-static void convolve_wtd_horiz_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x0, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h, int taps,
- uint8_t (*combine)(uint8_t a, uint8_t b)) {
- int x, y, k, sum;
- const int16_t *filter_x_base = filter_x0;
-
-#if ALIGN_FILTERS_256
- filter_x_base = (const int16_t *)(((intptr_t)filter_x0) & ~(intptr_t)0xff);
-#endif
-
- /* Adjust base pointer address for this source line */
- src -= taps / 2 - 1;
-
- for (y = 0; y < h; ++y) {
- /* Pointer to filter to use */
- const int16_t *filter_x = filter_x0;
-
- /* Initial phase offset */
- int x0_q4 = (filter_x - filter_x_base) / taps;
- int x_q4 = x0_q4;
-
- for (x = 0; x < w; ++x) {
- /* Per-pixel src offset */
- int src_x = (x_q4 - x0_q4) >> 4;
-
- for (sum = 0, k = 0; k < taps; ++k) {
- sum += src[src_x + k] * filter_x[k];
- }
- sum += (VP9_FILTER_WEIGHT >> 1);
- dst[x] = combine(dst[x], clip_pixel(sum >> VP9_FILTER_SHIFT));
-
- /* Adjust source and filter to use for the next pixel */
- x_q4 += x_step_q4;
- filter_x = filter_x_base + (x_q4 & 0xf) * taps;
- }
- src += src_stride;
- dst += dst_stride;
- }
-}
-
-#endif
-
static void convolve_vert_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
@@ -279,52 +207,6 @@ static void convolve_avg_vert_c(const uint8_t *src, int src_stride,
}
}
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
-static void convolve_wtd_vert_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y0, int y_step_q4,
- int w, int h, int taps,
- uint8_t (*combine)(uint8_t a, uint8_t b)) {
- int x, y, k, sum;
-
- const int16_t *filter_y_base = filter_y0;
-
-#if ALIGN_FILTERS_256
- filter_y_base = (const int16_t *)(((intptr_t)filter_y0) & ~(intptr_t)0xff);
-#endif
-
- /* Adjust base pointer address for this source column */
- src -= src_stride * (taps / 2 - 1);
- for (x = 0; x < w; ++x) {
- /* Pointer to filter to use */
- const int16_t *filter_y = filter_y0;
-
- /* Initial phase offset */
- int y0_q4 = (filter_y - filter_y_base) / taps;
- int y_q4 = y0_q4;
-
- for (y = 0; y < h; ++y) {
- /* Per-pixel src offset */
- int src_y = (y_q4 - y0_q4) >> 4;
-
- for (sum = 0, k = 0; k < taps; ++k) {
- sum += src[(src_y + k) * src_stride] * filter_y[k];
- }
- sum += (VP9_FILTER_WEIGHT >> 1);
- dst[y * dst_stride] = combine(dst[y * dst_stride],
- clip_pixel(sum >> VP9_FILTER_SHIFT));
-
- /* Adjust source and filter to use for the next pixel */
- y_q4 += y_step_q4;
- filter_y = filter_y_base + (y_q4 & 0xf) * taps;
- }
- ++src;
- ++dst;
- }
-}
-#endif
-
static void convolve_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
@@ -403,68 +285,6 @@ void vp9_convolve8_avg_horiz_c(const uint8_t *src, int src_stride,
w, h, 8);
}
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
-void vp9_convolve8_1by8_horiz_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- convolve_wtd_horiz_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h, 8, combine_1by8);
-}
-
-void vp9_convolve8_qtr_horiz_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- convolve_wtd_horiz_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h, 8, combine_qtr);
-}
-
-void vp9_convolve8_3by8_horiz_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- convolve_wtd_horiz_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h, 8, combine_3by8);
-}
-
-void vp9_convolve8_5by8_horiz_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- convolve_wtd_horiz_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h, 8, combine_5by8);
-}
-
-void vp9_convolve8_3qtr_horiz_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- convolve_wtd_horiz_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h, 8, combine_3qtr);
-}
-
-void vp9_convolve8_7by8_horiz_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- convolve_wtd_horiz_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h, 8, combine_7by8);
-}
-#endif
-
void vp9_convolve8_vert_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
@@ -485,68 +305,6 @@ void vp9_convolve8_avg_vert_c(const uint8_t *src, int src_stride,
w, h, 8);
}
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
-void vp9_convolve8_1by8_vert_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- convolve_wtd_vert_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h, 8, combine_1by8);
-}
-
-void vp9_convolve8_qtr_vert_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- convolve_wtd_vert_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h, 8, combine_qtr);
-}
-
-void vp9_convolve8_3by8_vert_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- convolve_wtd_vert_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h, 8, combine_3by8);
-}
-
-void vp9_convolve8_5by8_vert_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- convolve_wtd_vert_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h, 8, combine_5by8);
-}
-
-void vp9_convolve8_3qtr_vert_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- convolve_wtd_vert_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h, 8, combine_3qtr);
-}
-
-void vp9_convolve8_7by8_vert_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- convolve_wtd_vert_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h, 8, combine_7by8);
-}
-#endif
-
void vp9_convolve8_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
@@ -579,140 +337,6 @@ void vp9_convolve8_avg_c(const uint8_t *src, int src_stride,
w, h);
}
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
-void vp9_convolve8_1by8_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- /* Fixed size intermediate buffer places limits on parameters. */
- DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 16 * 16);
- assert(w <= 16);
- assert(h <= 16);
-
- vp9_convolve8(src, src_stride,
- temp, 16,
- filter_x, x_step_q4,
- filter_y, y_step_q4,
- w, h);
- vp9_convolve_1by8(temp, 16,
- dst, dst_stride,
- NULL, 0, /* These unused parameter should be removed! */
- NULL, 0, /* These unused parameter should be removed! */
- w, h);
-}
-
-void vp9_convolve8_qtr_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- /* Fixed size intermediate buffer places limits on parameters. */
- DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 16 * 16);
- assert(w <= 16);
- assert(h <= 16);
-
- vp9_convolve8(src, src_stride,
- temp, 16,
- filter_x, x_step_q4,
- filter_y, y_step_q4,
- w, h);
- vp9_convolve_qtr(temp, 16,
- dst, dst_stride,
- NULL, 0, /* These unused parameter should be removed! */
- NULL, 0, /* These unused parameter should be removed! */
- w, h);
-}
-
-void vp9_convolve8_3by8_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- /* Fixed size intermediate buffer places limits on parameters. */
- DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 16 * 16);
- assert(w <= 16);
- assert(h <= 16);
-
- vp9_convolve8(src, src_stride,
- temp, 16,
- filter_x, x_step_q4,
- filter_y, y_step_q4,
- w, h);
- vp9_convolve_3by8(temp, 16,
- dst, dst_stride,
- NULL, 0, /* These unused parameter should be removed! */
- NULL, 0, /* These unused parameter should be removed! */
- w, h);
-}
-
-void vp9_convolve8_5by8_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- /* Fixed size intermediate buffer places limits on parameters. */
- DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 16 * 16);
- assert(w <= 16);
- assert(h <= 16);
-
- vp9_convolve8(src, src_stride,
- temp, 16,
- filter_x, x_step_q4,
- filter_y, y_step_q4,
- w, h);
- vp9_convolve_5by8(temp, 16,
- dst, dst_stride,
- NULL, 0, /* These unused parameter should be removed! */
- NULL, 0, /* These unused parameter should be removed! */
- w, h);
-}
-
-void vp9_convolve8_3qtr_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- /* Fixed size intermediate buffer places limits on parameters. */
- DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 16 * 16);
- assert(w <= 16);
- assert(h <= 16);
-
- vp9_convolve8(src, src_stride,
- temp, 16,
- filter_x, x_step_q4,
- filter_y, y_step_q4,
- w, h);
- vp9_convolve_3qtr(temp, 16,
- dst, dst_stride,
- NULL, 0, /* These unused parameter should be removed! */
- NULL, 0, /* These unused parameter should be removed! */
- w, h);
-}
-
-void vp9_convolve8_7by8_c(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h) {
- /* Fixed size intermediate buffer places limits on parameters. */
- DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 16 * 16);
- assert(w <= 16);
- assert(h <= 16);
-
- vp9_convolve8(src, src_stride,
- temp, 16,
- filter_x, x_step_q4,
- filter_y, y_step_q4,
- w, h);
- vp9_convolve_7by8(temp, 16,
- dst, dst_stride,
- NULL, 0, /* These unused parameter should be removed! */
- NULL, 0, /* These unused parameter should be removed! */
- w, h);
-}
-#endif
-
void vp9_convolve_copy(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int filter_x_stride,
@@ -750,101 +374,3 @@ void vp9_convolve_avg(const uint8_t *src, int src_stride,
dst += dst_stride;
}
}
-
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
-void vp9_convolve_1by8(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int filter_x_stride,
- const int16_t *filter_y, int filter_y_stride,
- int w, int h) {
- int x, y;
-
- for (y = 0; y < h; ++y) {
- for (x = 0; x < w; ++x) {
- dst[x] = combine_1by8(dst[x], src[x]);
- }
- src += src_stride;
- dst += dst_stride;
- }
-}
-
-void vp9_convolve_qtr(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int filter_x_stride,
- const int16_t *filter_y, int filter_y_stride,
- int w, int h) {
- int x, y;
-
- for (y = 0; y < h; ++y) {
- for (x = 0; x < w; ++x) {
- dst[x] = combine_qtr(dst[x], src[x]);
- }
- src += src_stride;
- dst += dst_stride;
- }
-}
-
-void vp9_convolve_3by8(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int filter_x_stride,
- const int16_t *filter_y, int filter_y_stride,
- int w, int h) {
- int x, y;
-
- for (y = 0; y < h; ++y) {
- for (x = 0; x < w; ++x) {
- dst[x] = combine_3by8(dst[x], src[x]);
- }
- src += src_stride;
- dst += dst_stride;
- }
-}
-
-void vp9_convolve_5by8(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int filter_x_stride,
- const int16_t *filter_y, int filter_y_stride,
- int w, int h) {
- int x, y;
-
- for (y = 0; y < h; ++y) {
- for (x = 0; x < w; ++x) {
- dst[x] = combine_5by8(dst[x], src[x]);
- }
- src += src_stride;
- dst += dst_stride;
- }
-}
-
-void vp9_convolve_3qtr(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int filter_x_stride,
- const int16_t *filter_y, int filter_y_stride,
- int w, int h) {
- int x, y;
-
- for (y = 0; y < h; ++y) {
- for (x = 0; x < w; ++x) {
- dst[x] = combine_3qtr(dst[x], src[x]);
- }
- src += src_stride;
- dst += dst_stride;
- }
-}
-
-void vp9_convolve_7by8(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int filter_x_stride,
- const int16_t *filter_y, int filter_y_stride,
- int w, int h) {
- int x, y;
-
- for (y = 0; y < h; ++y) {
- for (x = 0; x < w; ++x) {
- dst[x] = combine_7by8(dst[x], src[x]);
- }
- src += src_stride;
- dst += dst_stride;
- }
-}
-#endif
diff --git a/vp9/common/vp9_convolve.h b/vp9/common/vp9_convolve.h
index bef2d8564..0596080c0 100644
--- a/vp9/common/vp9_convolve.h
+++ b/vp9/common/vp9_convolve.h
@@ -33,50 +33,6 @@ void vp9_convolve_avg(const uint8_t *src, int src_stride,
const int16_t *filter_y, int y_step_q4,
int w, int h);
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
-// Not a convolution, a block wtd (1/8, 7/8) average for (dst, src)
-void vp9_convolve_1by8(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h);
-
-// Not a convolution, a block wtd (1/4, 3/4) average for (dst, src)
-void vp9_convolve_qtr(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h);
-
-// Not a convolution, a block wtd (3/8, 5/8) average for (dst, src)
-void vp9_convolve_3by8(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h);
-
-// Not a convolution, a block wtd (5/8, 3/8) average for (dst, src)
-void vp9_convolve_5by8(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h);
-
-// Not a convolution, a block wtd (3/4, 1/4) average for (dst, src)
-void vp9_convolve_3qtr(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h);
-
-// Not a convolution, a block wtd (7/8, 1/8) average for (dst, src)
-void vp9_convolve_7by8(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, int y_step_q4,
- int w, int h);
-#endif
-
struct subpix_fn_table {
const int16_t (*filter_x)[8];
const int16_t (*filter_y)[8];
diff --git a/vp9/common/vp9_default_coef_probs.h b/vp9/common/vp9_default_coef_probs.h
index 5a781fb0a..453b4a243 100644
--- a/vp9/common/vp9_default_coef_probs.h
+++ b/vp9/common/vp9_default_coef_probs.h
@@ -700,298 +700,85 @@ static const vp9_coeff_probs default_coef_probs_32x32[BLOCK_TYPES] = {
}
};
-#if CONFIG_CODE_NONZEROCOUNT
+#if CONFIG_CODE_ZEROGROUP
-// TODO(debargha): Remove the macro and count tables after experimentation
-#define NZC_DEFAULT_COUNTS /* Uncomment to use counts as defaults */
-
-#ifdef NZC_DEFAULT_COUNTS
-static const unsigned int default_nzc_counts_4x4[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC4X4_TOKENS] = {
- {
- {
- { 967652, 29023, 15039, 6952, 1568, 116 },
- { 289116, 22938, 4522, 1935, 520, 47 }
- }, {
- { 967652, 29023, 15039, 6952, 1568, 116 },
- { 689116, 22938, 4522, 1935, 520, 47 }
- },
- }, {
- {
- { 124684, 37167, 15270, 8483, 1777, 102 },
- { 10405, 12395, 3401, 3574, 2461, 771 }
- }, {
- { 124684, 37167, 15270, 8483, 1777, 102 },
- { 20405, 12395, 3401, 3574, 2461, 771 }
- }
- }, {
- {
- { 4100, 22976, 15627, 16137, 7982, 1793 },
- { 4249, 3084, 2131, 4081, 6439, 1653 }
- }, {
- { 21100, 22976, 15627, 16137, 7982, 1793 },
- { 4249, 3084, 2131, 4081, 2439, 1653 }
- }
- }
-};
-
-static const unsigned int default_nzc_counts_8x8[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC8X8_TOKENS] = {
- {
- {
- { 372988, 62777, 19440, 11812, 5145, 1917, 439, 10 },
- { 72052, 30468, 6973, 3250, 1500, 750, 375, 5 },
- }, {
- { 372988, 62777, 19440, 11812, 5145, 1917, 439, 10 },
- { 192052, 30468, 6973, 3250, 1500, 750, 375, 5 },
- }
- }, {
- {
- { 121533, 33527, 15655, 11920, 5723, 2009, 315, 7 },
- { 23772, 23120, 13127, 8115, 4000, 2000, 200, 6 },
- }, {
- { 121533, 33527, 15655, 11920, 5723, 2009, 315, 7 },
- { 23772, 23120, 13127, 8115, 4000, 2000, 200, 6 },
+// There are two probs: the first is the prob(0) of the isolated zero bit,
+// the second is the prob(0) of the end of orientation symbol [if 0 that
+// indicates a zerotree root].
+static const vp9_zpc_probs default_zpc_probs_4x4 = {
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 1, }, { 1, }, { 1, },
+ }, { /* Coeff Band 1 */
+ { 1, }, { 1, }, { 1, },
+ }, { /* Coeff Band 2 */
+ { 1, }, { 1, }, { 1, },
}
- }, {
- {
- { 29408, 11758, 8023, 10123, 6705, 2468, 369, 17 },
- { 11612, 13874, 13329, 13022, 6500, 3250, 300, 12 },
- }, {
- { 29408, 11758, 8023, 10123, 6705, 2468, 369, 17 },
- { 11612, 13874, 13329, 13022, 6500, 3250, 300, 12 },
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 1, }, { 1, }, { 1, },
+ }, { /* Coeff Band 1 */
+ { 1, }, { 1, }, { 1, },
+ }, { /* Coeff Band 2 */
+ { 1, }, { 1, }, { 1, },
}
}
};
-
-static const unsigned int default_nzc_counts_16x16[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC16X16_TOKENS] = {
- {
- {
- { 372988, 62777, 19440, 11812, 5145, 1917, 439, 10, 5, 2 },
- { 72052, 30468, 6973, 3250, 1500, 750, 375, 50, 8, 1 },
- }, {
- { 32988, 62777, 19440, 11812, 5145, 1917, 439, 10, 5, 2 },
- { 92052, 30468, 6973, 3250, 1500, 750, 375, 50, 8, 1 },
- }
- }, {
- {
- { 21533, 33527, 15655, 11920, 5723, 2009, 315, 7, 4, 2 },
- { 47772, 23120, 13127, 8115, 4000, 2000, 200, 6, 4, 2 },
- }, {
- { 21533, 33527, 15655, 11920, 5723, 2009, 315, 7, 4, 2 },
- { 27772, 23120, 13127, 8115, 4000, 2000, 200, 6, 4, 2 },
+static const vp9_zpc_probs default_zpc_probs_8x8 = {
+ { /* Intra */
+ { /* ZPC Band 0 */
+ { 4, }, { 2, }, { 1, },
+ }, { /* ZPC Band 1 */
+ { 4, }, { 2, }, { 1, },
+ }, { /* ZPC Band 2 */
+ { 4, }, { 2, }, { 1, },
}
- }, {
- {
- { 19408, 31758, 16023, 10123, 6705, 2468, 369, 17, 10, 5 },
- { 9612, 13874, 13329, 13022, 6500, 3250, 300, 12, 6, 3 },
- }, {
- { 22408, 11758, 8023, 10123, 6705, 2468, 369, 17, 10, 5 },
- { 9612, 13874, 13329, 13022, 6500, 3250, 300, 12, 6, 3 },
+ }, { /* Inter */
+ { /* ZPC Band 0 */
+ { 4, }, { 2, }, { 1, },
+ }, { /* ZPC Band 1 */
+ { 4, }, { 2, }, { 1, },
+ }, { /* ZPC Band 2 */
+ { 4, }, { 2, }, { 1, },
}
}
};
-
-static const unsigned int default_nzc_counts_32x32[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC32X32_TOKENS] = {
- {
- {
- { 72988, 62777, 19440, 11812, 5145, 1917, 439, 10, 5, 2, 1, 0 },
- { 52052, 30468, 6973, 3250, 1500, 750, 375, 50, 8, 1, 0, 0 },
- }, {
- { 72988, 62777, 19440, 11812, 5145, 1917, 439, 10, 5, 2, 1, 0 },
- { 72052, 30468, 6973, 3250, 1500, 750, 375, 50, 8, 1, 0, 0 },
- }
- }, {
- {
- { 21533, 33527, 15655, 11920, 5723, 2009, 315, 7, 4, 2, 1, 0 },
- { 27772, 23120, 13127, 8115, 4000, 2000, 200, 6, 4, 2, 1, 0 },
- }, {
- { 21533, 33527, 15655, 11920, 5723, 2009, 315, 7, 4, 2, 1, 0 },
- { 27772, 23120, 13127, 8115, 4000, 2000, 200, 6, 4, 2, 1, 0 },
- }
- }, {
- {
- { 19408, 11758, 8023, 10123, 6705, 2468, 369, 17, 10, 5, 2, 1 },
- { 9612, 13874, 13329, 13022, 6500, 3250, 300, 12, 6, 3, 2, 1 },
- }, {
- { 29408, 11758, 8023, 10123, 6705, 2468, 369, 17, 10, 5, 2, 1 },
- { 9612, 13874, 13329, 13022, 6500, 3250, 300, 12, 6, 3, 2, 1 },
- }
- }
-};
-
-#else
-
-static const vp9_prob default_nzc_probs_4x4[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC4X4_TOKENS] = {
- {
- {
- { 219, 162, 179, 142, 242, },
- { 214, 253, 228, 246, 255, },
- }, {
- { 225, 236, 190, 229, 253, },
- { 251, 253, 240, 248, 255, },
- },
- }, {
- {
- { 106, 126, 158, 126, 244, },
- { 118, 241, 201, 240, 255, },
- }, {
- { 165, 179, 143, 189, 242, },
- { 173, 239, 192, 255, 128, },
+static const vp9_zpc_probs default_zpc_probs_16x16 = {
+ { /* Intra */
+ { /* ZPC Band 0 */
+ { 57, }, { 30, }, { 13, },
+ }, { /* ZPC Band 1 */
+ { 46, }, { 23, }, { 4, },
+ }, { /* ZPC Band 1 */
+ { 36, }, { 11, }, { 2, },
},
- }, {
- {
- { 42 , 78 , 153, 92 , 223, },
- { 128, 128, 128, 128, 128, },
- }, {
- { 76 , 68 , 126, 110, 216, },
- { 128, 128, 128, 128, 128, },
+ }, { /* Inter */
+ { /* ZPC Band 0 */
+ { 45, }, { 21 }, { 10, },
+ }, { /* ZPC Band 1 */
+ { 24, }, { 14, }, { 3, },
+ }, { /* ZPC Band 2 */
+ { 16, }, { 6, }, { 1, },
},
},
};
-
-static const vp9_prob default_nzc_probs_8x8[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC8X8_TOKENS] = {
- {
- {
- { 134, 139, 170, 178, 142, 197, 255, },
- { 167, 224, 199, 252, 205, 255, 128, },
- }, {
- { 181, 210, 180, 241, 190, 235, 255, },
- { 234, 251, 235, 252, 219, 255, 128, },
- },
- }, {
- {
- { 33 , 64 , 155, 143, 86 , 216, 255, },
- { 73 , 160, 167, 251, 153, 255, 128, },
- }, {
- { 79 , 104, 153, 195, 119, 246, 255, },
- { 149, 183, 186, 249, 203, 255, 128, },
- },
- }, {
- {
- { 10 , 25 , 156, 61 , 69 , 156, 254, },
- { 32 , 1 , 128, 146, 64 , 255, 128, },
- }, {
- { 37 , 48 , 143, 113, 81 , 202, 255, },
- { 1 , 255, 128, 128, 128, 128, 128, },
- },
- },
-};
-
-static const vp9_prob default_nzc_probs_16x16[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC16X16_TOKENS] = {
- {
- {
- { 11 , 188, 210, 167, 141, 143, 152, 255, 128, },
- { 171, 201, 203, 244, 207, 255, 255, 128, 128, },
- }, {
- { 23 , 217, 207, 251, 198, 255, 219, 128, 128, },
- { 235, 249, 229, 255, 199, 128, 128, 128, 128, },
- },
- }, {
- {
- { 9 , 45 , 168, 85 , 66 , 221, 139, 246, 255, },
- { 51 , 110, 163, 238, 94 , 255, 255, 128, 128, },
- }, {
- { 4 , 149, 175, 240, 149, 255, 205, 128, 128, },
- { 141, 217, 186, 255, 128, 128, 128, 128, 128, },
- },
- }, {
- {
- { 1 , 12 , 173, 6 , 68 , 145, 41 , 204, 255, },
- { 39 , 47 , 128, 199, 110, 255, 128, 128, 128, },
- }, {
- { 1 , 121, 171, 149, 115, 242, 159, 255, 128, },
- { 1 , 255, 255, 128, 128, 128, 128, 128, 128, },
+static const vp9_zpc_probs default_zpc_probs_32x32 = {
+ { /* Intra */
+ { /* ZPC Band 0 */
+ { 132, }, { 60, }, { 19, },
+ }, { /* ZPC Band 1 */
+ { 64, }, { 32, }, { 8, },
+ }, { /* ZPC Band 2 */
+ { 25, }, { 11, }, { 1, },
},
- },
-};
-
-static const vp9_prob default_nzc_probs_32x32[MAX_NZC_CONTEXTS]
- [REF_TYPES]
- [BLOCK_TYPES]
- [NZC32X32_TOKENS] = {
- {
- {
- { 11 , 216, 195, 201, 160, 247, 217, 255, 255, 128, 128, },
- { 177, 240, 239, 255, 192, 128, 128, 128, 128, 128, 128, },
- }, {
- { 48 , 235, 213, 235, 199, 255, 255, 128, 128, 128, 128, },
- { 205, 255, 248, 128, 128, 128, 128, 128, 128, 128, 128, },
- },
- }, {
- {
- { 6 , 96 , 138, 99 , 125, 248, 188, 255, 128, 128, 128, },
- { 17 , 53 , 43 , 189, 1 , 255, 171, 128, 128, 128, 128, },
- }, {
- { 5 , 187, 235, 232, 117, 255, 219, 128, 128, 128, 128, },
- { 146, 255, 255, 128, 128, 128, 128, 128, 128, 128, 128, },
- },
- }, {
- {
- { 1 , 7 , 93 , 14 , 100, 30 , 85 , 65 , 81 , 210, 255, },
- { 1 , 1 , 128, 26 , 1 , 218, 78 , 255, 255, 128, 128, },
- }, {
- { 4 , 148, 206, 137, 160, 255, 255, 128, 128, 128, 128, },
- { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, },
+ }, { /* Inter */
+ { /* ZPC Band 0 */
+ { 134, }, { 39, }, { 25, },
+ }, { /* ZPC Band 1 */
+ { 64, }, { 24, }, { 12, },
+ }, { /* ZPC Band 2 */
+ { 21, }, { 10, }, { 1, },
},
},
};
-#endif
-
-static const vp9_prob default_nzc_pcat_probs[MAX_NZC_CONTEXTS]
- [NZC_TOKENS_EXTRA]
- [NZC_BITS_EXTRA] = {
- // Bit probabilities are in least to most significance order
- {
- {176, 128, 128, 128, 128, 128, 128, 128, 128}, // 3 - 4
- {164, 192, 128, 128, 128, 128, 128, 128, 128}, // 5 - 8
- {154, 184, 208, 128, 128, 128, 128, 128, 128}, // 9 - 16
- {144, 176, 200, 216, 128, 128, 128, 128, 128}, // 17 - 32
- {140, 172, 192, 208, 224, 128, 128, 128, 128}, // 33 - 64
- {136, 168, 188, 200, 220, 232, 128, 128, 128}, // 65 - 128
- {132, 164, 184, 196, 216, 228, 240, 128, 128}, // 129 - 256
- {130, 162, 178, 194, 212, 226, 240, 248, 128}, // 257 - 512
- {128, 160, 176, 192, 208, 224, 240, 248, 254}, // 513 - 1024
- }, {
- {168, 128, 128, 128, 128, 128, 128, 128, 128}, // 3 - 4
- {152, 184, 128, 128, 128, 128, 128, 128, 128}, // 5 - 8
- {152, 184, 208, 128, 128, 128, 128, 128, 128}, // 9 - 16
- {144, 176, 200, 216, 128, 128, 128, 128, 128}, // 17 - 32
- {140, 172, 192, 208, 224, 128, 128, 128, 128}, // 33 - 64
- {136, 168, 188, 200, 220, 232, 128, 128, 128}, // 65 - 128
- {132, 164, 184, 196, 216, 228, 240, 128, 128}, // 129 - 256
- {130, 162, 178, 194, 212, 226, 240, 248, 128}, // 257 - 512
- {128, 160, 176, 192, 208, 224, 240, 248, 254}, // 513 - 1024
- }, {
- {160, 128, 128, 128, 128, 128, 128, 128, 128}, // 3 - 4
- {152, 176, 128, 128, 128, 128, 128, 128, 128}, // 5 - 8
- {150, 184, 208, 128, 128, 128, 128, 128, 128}, // 9 - 16
- {144, 176, 200, 216, 128, 128, 128, 128, 128}, // 17 - 32
- {140, 172, 192, 208, 224, 128, 128, 128, 128}, // 33 - 64
- {136, 168, 188, 200, 220, 232, 128, 128, 128}, // 65 - 128
- {132, 164, 184, 196, 216, 228, 240, 128, 128}, // 129 - 256
- {130, 162, 178, 194, 212, 226, 240, 248, 128}, // 257 - 512
- {128, 160, 176, 192, 208, 224, 240, 248, 254}, // 513 - 1024
- },
-};
-
-#endif // CONFIG_CODE_NONZEROCOUNT
+#endif // CONFIG_CODE_ZEROGROUP
diff --git a/vp9/common/vp9_entropy.c b/vp9/common/vp9_entropy.c
index 5e6cba2ed..a5437d889 100644
--- a/vp9/common/vp9_entropy.c
+++ b/vp9/common/vp9_entropy.c
@@ -400,65 +400,6 @@ static const vp9_prob Pcat6[] = {
254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129
};
-#if CONFIG_CODE_NONZEROCOUNT
-const vp9_tree_index vp9_nzc4x4_tree[2 * NZC4X4_NODES] = {
- -NZC_0, 2,
- 4, 6,
- -NZC_1, -NZC_2,
- -NZC_3TO4, 8,
- -NZC_5TO8, -NZC_9TO16,
-};
-struct vp9_token vp9_nzc4x4_encodings[NZC4X4_TOKENS];
-
-const vp9_tree_index vp9_nzc8x8_tree[2 * NZC8X8_NODES] = {
- -NZC_0, 2,
- 4, 6,
- -NZC_1, -NZC_2,
- 8, 10,
- -NZC_3TO4, -NZC_5TO8,
- -NZC_9TO16, 12,
- -NZC_17TO32, -NZC_33TO64,
-};
-struct vp9_token vp9_nzc8x8_encodings[NZC8X8_TOKENS];
-
-const vp9_tree_index vp9_nzc16x16_tree[2 * NZC16X16_NODES] = {
- -NZC_0, 2,
- 4, 6,
- -NZC_1, -NZC_2,
- 8, 10,
- -NZC_3TO4, -NZC_5TO8,
- 12, 14,
- -NZC_9TO16, -NZC_17TO32,
- -NZC_33TO64, 16,
- -NZC_65TO128, -NZC_129TO256,
-};
-struct vp9_token vp9_nzc16x16_encodings[NZC16X16_TOKENS];
-
-const vp9_tree_index vp9_nzc32x32_tree[2 * NZC32X32_NODES] = {
- -NZC_0, 2,
- 4, 6,
- -NZC_1, -NZC_2,
- 8, 10,
- -NZC_3TO4, -NZC_5TO8,
- 12, 14,
- -NZC_9TO16, -NZC_17TO32,
- 16, 18,
- -NZC_33TO64, -NZC_65TO128,
- -NZC_129TO256, 20,
- -NZC_257TO512, -NZC_513TO1024,
-};
-struct vp9_token vp9_nzc32x32_encodings[NZC32X32_TOKENS];
-
-const int vp9_extranzcbits[NZC32X32_TOKENS] = {
- 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
-};
-
-const int vp9_basenzcvalue[NZC32X32_TOKENS] = {
- 0, 1, 2, 3, 5, 9, 17, 33, 65, 129, 257, 513
-};
-
-#endif // CONFIG_CODE_NONZEROCOUNT
-
#if CONFIG_MODELCOEFPROB
#if UNCONSTRAINED_NODES == 2
@@ -1310,7 +1251,7 @@ static void init_bit_trees() {
init_bit_tree(cat6, 14);
}
-vp9_extra_bit_struct vp9_extra_bits[12] = {
+vp9_extra_bit vp9_extra_bits[12] = {
{ 0, 0, 0, 0},
{ 0, 0, 0, 1},
{ 0, 0, 0, 2},
@@ -1344,10 +1285,10 @@ int vp9_get_coef_context(const int *scan, const int *neighbors,
int ctx;
assert(neighbors[MAX_NEIGHBORS * c + 0] >= 0);
if (neighbors[MAX_NEIGHBORS * c + 1] >= 0) {
- ctx = (1 + token_cache[neighbors[MAX_NEIGHBORS * c + 0]] +
- token_cache[neighbors[MAX_NEIGHBORS * c + 1]]) >> 1;
+ ctx = (1 + token_cache[scan[neighbors[MAX_NEIGHBORS * c + 0]]] +
+ token_cache[scan[neighbors[MAX_NEIGHBORS * c + 1]]]) >> 1;
} else {
- ctx = token_cache[neighbors[MAX_NEIGHBORS * c + 0]];
+ ctx = token_cache[scan[neighbors[MAX_NEIGHBORS * c + 0]]];
}
return vp9_pt_energy_class[ctx];
}
@@ -1357,55 +1298,6 @@ void vp9_default_coef_probs(VP9_COMMON *pc) {
#if CONFIG_MODELCOEFPROB
int b, r, c, p;
#endif
-#if CONFIG_CODE_NONZEROCOUNT
-#ifdef NZC_DEFAULT_COUNTS
- int h, g;
- for (h = 0; h < MAX_NZC_CONTEXTS; ++h) {
- for (g = 0; g < REF_TYPES; ++g) {
- int i;
- unsigned int branch_ct4x4[NZC4X4_NODES][2];
- unsigned int branch_ct8x8[NZC8X8_NODES][2];
- unsigned int branch_ct16x16[NZC16X16_NODES][2];
- unsigned int branch_ct32x32[NZC32X32_NODES][2];
- for (i = 0; i < BLOCK_TYPES; ++i) {
- vp9_tree_probs_from_distribution(
- vp9_nzc4x4_tree,
- pc->fc.nzc_probs_4x4[h][g][i], branch_ct4x4,
- default_nzc_counts_4x4[h][g][i], 0);
- }
- for (i = 0; i < BLOCK_TYPES; ++i) {
- vp9_tree_probs_from_distribution(
- vp9_nzc8x8_tree,
- pc->fc.nzc_probs_8x8[h][g][i], branch_ct8x8,
- default_nzc_counts_8x8[h][g][i], 0);
- }
- for (i = 0; i < BLOCK_TYPES; ++i) {
- vp9_tree_probs_from_distribution(
- vp9_nzc16x16_tree,
- pc->fc.nzc_probs_16x16[h][g][i], branch_ct16x16,
- default_nzc_counts_16x16[h][g][i], 0);
- }
- for (i = 0; i < BLOCK_TYPES; ++i) {
- vp9_tree_probs_from_distribution(
- vp9_nzc32x32_tree,
- pc->fc.nzc_probs_32x32[h][g][i], branch_ct32x32,
- default_nzc_counts_32x32[h][g][i], 0);
- }
- }
- }
-#else
- vpx_memcpy(pc->fc.nzc_probs_4x4, default_nzc_probs_4x4,
- sizeof(pc->fc.nzc_probs_4x4));
- vpx_memcpy(pc->fc.nzc_probs_8x8, default_nzc_probs_8x8,
- sizeof(pc->fc.nzc_probs_8x8));
- vpx_memcpy(pc->fc.nzc_probs_16x16, default_nzc_probs_16x16,
- sizeof(pc->fc.nzc_probs_16x16));
- vpx_memcpy(pc->fc.nzc_probs_32x32, default_nzc_probs_32x32,
- sizeof(pc->fc.nzc_probs_32x32));
-#endif
- vpx_memcpy(pc->fc.nzc_pcat_probs, default_nzc_pcat_probs,
- sizeof(pc->fc.nzc_pcat_probs));
-#endif // CONFIG_CODE_NONZEROCOUNT
#if CONFIG_MODELCOEFPROB
for (b = 0; b < BLOCK_TYPES; ++b)
for (r = 0; r < REF_TYPES; ++r)
@@ -1447,6 +1339,16 @@ void vp9_default_coef_probs(VP9_COMMON *pc) {
vpx_memcpy(pc->fc.coef_probs_32x32, default_coef_probs_32x32,
sizeof(pc->fc.coef_probs_32x32));
#endif
+#if CONFIG_CODE_ZEROGROUP
+ vpx_memcpy(pc->fc.zpc_probs_4x4, default_zpc_probs_4x4,
+ sizeof(pc->fc.zpc_probs_4x4));
+ vpx_memcpy(pc->fc.zpc_probs_8x8, default_zpc_probs_8x8,
+ sizeof(pc->fc.zpc_probs_8x8));
+ vpx_memcpy(pc->fc.zpc_probs_16x16, default_zpc_probs_16x16,
+ sizeof(pc->fc.zpc_probs_16x16));
+ vpx_memcpy(pc->fc.zpc_probs_32x32, default_zpc_probs_32x32,
+ sizeof(pc->fc.zpc_probs_32x32));
+#endif
}
// Neighborhood 5-tuples for various scans and blocksizes,
@@ -1597,1099 +1499,8 @@ void vp9_coef_tree_initialize() {
vp9_init_neighbors();
init_bit_trees();
vp9_tokens_from_tree(vp9_coef_encodings, vp9_coef_tree);
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_tokens_from_tree(vp9_nzc4x4_encodings, vp9_nzc4x4_tree);
- vp9_tokens_from_tree(vp9_nzc8x8_encodings, vp9_nzc8x8_tree);
- vp9_tokens_from_tree(vp9_nzc16x16_encodings, vp9_nzc16x16_tree);
- vp9_tokens_from_tree(vp9_nzc32x32_encodings, vp9_nzc32x32_tree);
-#endif
-}
-
-#if CONFIG_CODE_NONZEROCOUNT
-
-#define mb_in_cur_tile(cm, mb_row, mb_col) \
- ((mb_col) >= (cm)->cur_tile_mb_col_start && \
- (mb_col) <= (cm)->cur_tile_mb_col_end && \
- (mb_row) >= 0)
-
-#define choose_nzc_context(nzc_exp, t2, t1) \
- ((nzc_exp) >= (t2) ? 2 : (nzc_exp) >= (t1) ? 1 : 0)
-
-#define NZC_T2_32X32 (16 << 6)
-#define NZC_T1_32X32 (4 << 6)
-
-#define NZC_T2_16X16 (12 << 6)
-#define NZC_T1_16X16 (3 << 6)
-
-#define NZC_T2_8X8 (8 << 6)
-#define NZC_T1_8X8 (2 << 6)
-
-#define NZC_T2_4X4 (4 << 6)
-#define NZC_T1_4X4 (1 << 6)
-
-// Transforms a mb16 block index to a sb64 block index
-static inline int mb16_to_sb64_index(int mb_row, int mb_col, int block) {
- int r = (mb_row & 3);
- int c = (mb_col & 3);
- int b;
- if (block < 16) { // Y
- int ib = block >> 2;
- int jb = block & 3;
- ib += r * 4;
- jb += c * 4;
- b = ib * 16 + jb;
- assert(b < 256);
- return b;
- } else { // UV
- int base = block - (block & 3);
- int ib = (block - base) >> 1;
- int jb = (block - base) & 1;
- ib += r * 2;
- jb += c * 2;
- b = base * 16 + ib * 8 + jb;
- assert(b >= 256 && b < 384);
- return b;
- }
-}
-
-// Transforms a mb16 block index to a sb32 block index
-static inline int mb16_to_sb32_index(int mb_row, int mb_col, int block) {
- int r = (mb_row & 1);
- int c = (mb_col & 1);
- int b;
- if (block < 16) { // Y
- int ib = block >> 2;
- int jb = block & 3;
- ib += r * 4;
- jb += c * 4;
- b = ib * 8 + jb;
- assert(b < 64);
- return b;
- } else { // UV
- int base = block - (block & 3);
- int ib = (block - base) >> 1;
- int jb = (block - base) & 1;
- ib += r * 2;
- jb += c * 2;
- b = base * 4 + ib * 4 + jb;
- assert(b >= 64 && b < 96);
- return b;
- }
-}
-
-static inline int block_to_txfm_index(int block, TX_SIZE tx_size, int s) {
- // s is the log of the number of 4x4 blocks in each row/col of larger block
- int b, ib, jb, nb;
- ib = block >> s;
- jb = block - (ib << s);
- ib >>= tx_size;
- jb >>= tx_size;
- nb = 1 << (s - tx_size);
- b = (ib * nb + jb) << (2 * tx_size);
- return b;
-}
-
-/* BEGIN - Helper functions to get the y nzcs */
-static unsigned int get_nzc_4x4_y_sb64(MB_MODE_INFO *mi, int block) {
- int b;
- assert(block < 256);
- b = block_to_txfm_index(block, mi->txfm_size, 4);
- assert(b < 256);
- return mi->nzcs[b] << (6 - 2 * mi->txfm_size);
-}
-
-static unsigned int get_nzc_4x4_y_sb32(MB_MODE_INFO *mi, int block) {
- int b;
- assert(block < 64);
- b = block_to_txfm_index(block, mi->txfm_size, 3);
- assert(b < 64);
- return mi->nzcs[b] << (6 - 2 * mi->txfm_size);
-}
-
-static unsigned int get_nzc_4x4_y_mb16(MB_MODE_INFO *mi, int block) {
- int b;
- assert(block < 16);
- b = block_to_txfm_index(block, mi->txfm_size, 2);
- assert(b < 16);
- return mi->nzcs[b] << (6 - 2 * mi->txfm_size);
-}
-/* END - Helper functions to get the y nzcs */
-
-/* Function to get y nzc where block index is in mb16 terms */
-static unsigned int get_nzc_4x4_y(VP9_COMMON *cm, MODE_INFO *m,
- int mb_row, int mb_col, int block) {
- // NOTE: All values returned are at 64 times the true value at 4x4 scale
- MB_MODE_INFO *const mi = &m->mbmi;
- const int mis = cm->mode_info_stride;
- if (mi->mb_skip_coeff || !mb_in_cur_tile(cm, mb_row, mb_col))
- return 0;
- if (mi->sb_type == BLOCK_SIZE_SB64X64) {
- int r = mb_row & 3;
- int c = mb_col & 3;
- m -= c + r * mis;
- if (m->mbmi.mb_skip_coeff || !mb_in_cur_tile(cm, mb_row - r, mb_col - c))
- return 0;
- else
- return get_nzc_4x4_y_sb64(
- &m->mbmi, mb16_to_sb64_index(mb_row, mb_col, block));
- } else if (mi->sb_type == BLOCK_SIZE_SB32X32) {
- int r = mb_row & 1;
- int c = mb_col & 1;
- m -= c + r * mis;
- if (m->mbmi.mb_skip_coeff || !mb_in_cur_tile(cm, mb_row - r, mb_col - c))
- return 0;
- else
- return get_nzc_4x4_y_sb32(
- &m->mbmi, mb16_to_sb32_index(mb_row, mb_col, block));
- } else {
- if (m->mbmi.mb_skip_coeff || !mb_in_cur_tile(cm, mb_row, mb_col))
- return 0;
- return get_nzc_4x4_y_mb16(mi, block);
- }
-}
-
-/* BEGIN - Helper functions to get the uv nzcs */
-static unsigned int get_nzc_4x4_uv_sb64(MB_MODE_INFO *mi, int block) {
- int b;
- int base, uvtxfm_size;
- assert(block >= 256 && block < 384);
- uvtxfm_size = mi->txfm_size;
- base = 256 + (block & 64);
- block -= base;
- b = base + block_to_txfm_index(block, uvtxfm_size, 3);
- assert(b >= 256 && b < 384);
- return mi->nzcs[b] << (6 - 2 * uvtxfm_size);
-}
-
-static unsigned int get_nzc_4x4_uv_sb32(MB_MODE_INFO *mi, int block) {
- int b;
- int base, uvtxfm_size;
- assert(block >= 64 && block < 96);
- if (mi->txfm_size == TX_32X32)
- uvtxfm_size = TX_16X16;
- else
- uvtxfm_size = mi->txfm_size;
- base = 64 + (block & 16);
- block -= base;
- b = base + block_to_txfm_index(block, uvtxfm_size, 2);
- assert(b >= 64 && b < 96);
- return mi->nzcs[b] << (6 - 2 * uvtxfm_size);
-}
-
-static unsigned int get_nzc_4x4_uv_mb16(MB_MODE_INFO *mi, int block) {
- int b;
- int base, uvtxfm_size;
- assert(block >= 16 && block < 24);
- if (mi->txfm_size == TX_8X8 &&
- (mi->mode == SPLITMV || mi->mode == I8X8_PRED))
- uvtxfm_size = TX_4X4;
- else if (mi->txfm_size == TX_16X16)
- uvtxfm_size = TX_8X8;
- else
- uvtxfm_size = mi->txfm_size;
- base = 16 + (block & 4);
- block -= base;
- b = base + block_to_txfm_index(block, uvtxfm_size, 1);
- assert(b >= 16 && b < 24);
- return mi->nzcs[b] << (6 - 2 * uvtxfm_size);
-}
-/* END - Helper functions to get the uv nzcs */
-
-/* Function to get uv nzc where block index is in mb16 terms */
-static unsigned int get_nzc_4x4_uv(VP9_COMMON *cm, MODE_INFO *m,
- int mb_row, int mb_col, int block) {
- // NOTE: All values returned are at 64 times the true value at 4x4 scale
- MB_MODE_INFO *const mi = &m->mbmi;
- const int mis = cm->mode_info_stride;
- if (mi->mb_skip_coeff || !mb_in_cur_tile(cm, mb_row, mb_col))
- return 0;
- if (mi->sb_type == BLOCK_SIZE_SB64X64) {
- int r = mb_row & 3;
- int c = mb_col & 3;
- m -= c + r * mis;
- if (m->mbmi.mb_skip_coeff || !mb_in_cur_tile(cm, mb_row - r, mb_col - c))
- return 0;
- else
- return get_nzc_4x4_uv_sb64(
- &m->mbmi, mb16_to_sb64_index(mb_row, mb_col, block));
- } else if (mi->sb_type == BLOCK_SIZE_SB32X32) {
- int r = mb_row & 1;
- int c = mb_col & 1;
- m -= c + r * mis;
- if (m->mbmi.mb_skip_coeff || !mb_in_cur_tile(cm, mb_row - r, mb_col - c))
- return 0;
- else
- return get_nzc_4x4_uv_sb32(
- &m->mbmi, mb16_to_sb32_index(mb_row, mb_col, block));
- } else {
- return get_nzc_4x4_uv_mb16(mi, block);
- }
-}
-
-int vp9_get_nzc_context_y_sb64(VP9_COMMON *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block) {
- // returns an index in [0, MAX_NZC_CONTEXTS - 1] to reflect how busy
- // neighboring blocks are
- int mis = cm->mode_info_stride;
- int nzc_exp = 0;
- TX_SIZE txfm_size = cur->mbmi.txfm_size;
- assert(block < 256);
- switch (txfm_size) {
- case TX_32X32:
- assert((block & 63) == 0);
- if (block < 128) {
- int o = (block >> 6) * 2;
- nzc_exp =
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 12) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 13) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 14) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 15) +
- get_nzc_4x4_y(cm, cur - mis + o + 1,
- mb_row - 1, mb_col + o + 1, 12) +
- get_nzc_4x4_y(cm, cur - mis + o + 1,
- mb_row - 1, mb_col + o + 1, 13) +
- get_nzc_4x4_y(cm, cur - mis + o + 1,
- mb_row - 1, mb_col + o + 1, 14) +
- get_nzc_4x4_y(cm, cur - mis + o + 1,
- mb_row - 1, mb_col + o + 1, 15);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 128] << 3;
- }
- if ((block & 127) == 0) {
- int o = (block >> 7) * 2;
- nzc_exp +=
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 3) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 7) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 11) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 15) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis + mis,
- mb_row + o + 1, mb_col - 1, 3) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis + mis,
- mb_row + o + 1, mb_col - 1, 7) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis + mis,
- mb_row + o + 1, mb_col - 1, 11) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis + mis,
- mb_row + o + 1, mb_col - 1, 15);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 64] << 3;
- }
- nzc_exp <<= 2;
- // Note nzc_exp is 64 times the average value expected at 32x32 scale
- return choose_nzc_context(nzc_exp, NZC_T2_32X32, NZC_T1_32X32);
- break;
-
- case TX_16X16:
- assert((block & 15) == 0);
- if (block < 64) {
- int o = block >> 4;
- nzc_exp =
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 12) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 13) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 14) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 15);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 64] << 4;
- }
- if ((block & 63) == 0) {
- int o = block >> 6;
- nzc_exp +=
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 3) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 7) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 11) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 15);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 16] << 4;
- }
- nzc_exp <<= 1;
- // Note nzc_exp is 64 times the average value expected at 16x16 scale
- return choose_nzc_context(nzc_exp, NZC_T2_16X16, NZC_T1_16X16);
- break;
-
- case TX_8X8:
- assert((block & 3) == 0);
- if (block < 32) {
- int o = block >> 3;
- int p = ((block >> 2) & 1) ? 14 : 12;
- nzc_exp =
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, p) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, p + 1);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 32] << 5;
- }
- if ((block & 31) == 0) {
- int o = block >> 6;
- int p = ((block >> 5) & 1) ? 11 : 3;
- nzc_exp +=
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, p) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, p + 4);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 4] << 5;
- }
- // Note nzc_exp is 64 times the average value expected at 8x8 scale
- return choose_nzc_context(nzc_exp, NZC_T2_8X8, NZC_T1_8X8);
- break;
-
- case TX_4X4:
- if (block < 16) {
- int o = block >> 2;
- int p = block & 3;
- nzc_exp = get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o,
- 12 + p);
- } else {
- nzc_exp = (cur->mbmi.nzcs[block - 16] << 6);
- }
- if ((block & 15) == 0) {
- int o = block >> 6;
- int p = (block >> 4) & 3;
- nzc_exp += get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- 3 + 4 * p);
- } else {
- nzc_exp += (cur->mbmi.nzcs[block - 1] << 6);
- }
- nzc_exp >>= 1;
- // Note nzc_exp is 64 times the average value expected at 4x4 scale
- return choose_nzc_context(nzc_exp, NZC_T2_4X4, NZC_T1_4X4);
- break;
-
- default:
- return 0;
- }
-}
-
-int vp9_get_nzc_context_y_sb32(VP9_COMMON *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block) {
- // returns an index in [0, MAX_NZC_CONTEXTS - 1] to reflect how busy
- // neighboring blocks are
- int mis = cm->mode_info_stride;
- int nzc_exp = 0;
- TX_SIZE txfm_size = cur->mbmi.txfm_size;
- assert(block < 64);
- switch (txfm_size) {
- case TX_32X32:
- assert(block == 0);
- nzc_exp =
- (get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 12) +
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 13) +
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 14) +
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 15) +
- get_nzc_4x4_y(cm, cur - mis + 1, mb_row - 1, mb_col + 1, 12) +
- get_nzc_4x4_y(cm, cur - mis + 1, mb_row - 1, mb_col + 1, 13) +
- get_nzc_4x4_y(cm, cur - mis + 1, mb_row - 1, mb_col + 1, 14) +
- get_nzc_4x4_y(cm, cur - mis + 1, mb_row - 1, mb_col + 1, 15) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 3) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 7) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 11) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 15) +
- get_nzc_4x4_y(cm, cur - 1 + mis, mb_row + 1, mb_col - 1, 3) +
- get_nzc_4x4_y(cm, cur - 1 + mis, mb_row + 1, mb_col - 1, 7) +
- get_nzc_4x4_y(cm, cur - 1 + mis, mb_row + 1, mb_col - 1, 11) +
- get_nzc_4x4_y(cm, cur - 1 + mis, mb_row + 1, mb_col - 1, 15)) << 2;
- // Note nzc_exp is 64 times the average value expected at 32x32 scale
- return choose_nzc_context(nzc_exp, NZC_T2_32X32, NZC_T1_32X32);
- break;
-
- case TX_16X16:
- assert((block & 15) == 0);
- if (block < 32) {
- int o = (block >> 4) & 1;
- nzc_exp =
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 12) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 13) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 14) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, 15);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 32] << 4;
- }
- if ((block & 31) == 0) {
- int o = block >> 5;
- nzc_exp +=
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 3) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 7) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 11) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, 15);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 16] << 4;
- }
- nzc_exp <<= 1;
- // Note nzc_exp is 64 times the average value expected at 16x16 scale
- return choose_nzc_context(nzc_exp, NZC_T2_16X16, NZC_T1_16X16);
- break;
-
- case TX_8X8:
- assert((block & 3) == 0);
- if (block < 16) {
- int o = block >> 3;
- int p = ((block >> 2) & 1) ? 14 : 12;
- nzc_exp =
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, p) +
- get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o, p + 1);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 16] << 5;
- }
- if ((block & 15) == 0) {
- int o = block >> 5;
- int p = ((block >> 4) & 1) ? 11 : 3;
- nzc_exp +=
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, p) +
- get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1, p + 4);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 4] << 5;
- }
- // Note nzc_exp is 64 times the average value expected at 8x8 scale
- return choose_nzc_context(nzc_exp, NZC_T2_8X8, NZC_T1_8X8);
- break;
-
- case TX_4X4:
- if (block < 8) {
- int o = block >> 2;
- int p = block & 3;
- nzc_exp = get_nzc_4x4_y(cm, cur - mis + o, mb_row - 1, mb_col + o,
- 12 + p);
- } else {
- nzc_exp = (cur->mbmi.nzcs[block - 8] << 6);
- }
- if ((block & 7) == 0) {
- int o = block >> 5;
- int p = (block >> 3) & 3;
- nzc_exp += get_nzc_4x4_y(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- 3 + 4 * p);
- } else {
- nzc_exp += (cur->mbmi.nzcs[block - 1] << 6);
- }
- nzc_exp >>= 1;
- // Note nzc_exp is 64 times the average value expected at 4x4 scale
- return choose_nzc_context(nzc_exp, NZC_T2_4X4, NZC_T1_4X4);
- break;
-
- default:
- return 0;
- break;
- }
-}
-
-int vp9_get_nzc_context_y_mb16(VP9_COMMON *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block) {
- // returns an index in [0, MAX_NZC_CONTEXTS - 1] to reflect how busy
- // neighboring blocks are
- int mis = cm->mode_info_stride;
- int nzc_exp = 0;
- TX_SIZE txfm_size = cur->mbmi.txfm_size;
- assert(block < 16);
- switch (txfm_size) {
- case TX_16X16:
- assert(block == 0);
- nzc_exp =
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 12) +
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 13) +
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 14) +
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, 15) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 3) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 7) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 11) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, 15);
- nzc_exp <<= 1;
- // Note nzc_exp is 64 times the average value expected at 16x16 scale
- return choose_nzc_context(nzc_exp, NZC_T2_16X16, NZC_T1_16X16);
-
- case TX_8X8:
- assert((block & 3) == 0);
- if (block < 8) {
- int p = ((block >> 2) & 1) ? 14 : 12;
- nzc_exp =
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, p) +
- get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col, p + 1);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 8] << 5;
- }
- if ((block & 7) == 0) {
- int p = ((block >> 3) & 1) ? 11 : 3;
- nzc_exp +=
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, p) +
- get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1, p + 4);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 4] << 5;
- }
- // Note nzc_exp is 64 times the average value expected at 8x8 scale
- return choose_nzc_context(nzc_exp, NZC_T2_8X8, NZC_T1_8X8);
-
- case TX_4X4:
- if (block < 4) {
- int p = block & 3;
- nzc_exp = get_nzc_4x4_y(cm, cur - mis, mb_row - 1, mb_col,
- 12 + p);
- } else {
- nzc_exp = (cur->mbmi.nzcs[block - 4] << 6);
- }
- if ((block & 3) == 0) {
- int p = (block >> 2) & 3;
- nzc_exp += get_nzc_4x4_y(cm, cur - 1, mb_row, mb_col - 1,
- 3 + 4 * p);
- } else {
- nzc_exp += (cur->mbmi.nzcs[block - 1] << 6);
- }
- nzc_exp >>= 1;
- // Note nzc_exp is 64 times the average value expected at 4x4 scale
- return choose_nzc_context(nzc_exp, NZC_T2_4X4, NZC_T1_4X4);
-
- default:
- return 0;
- break;
- }
-}
-
-int vp9_get_nzc_context_uv_sb64(VP9_COMMON *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block) {
- // returns an index in [0, MAX_NZC_CONTEXTS - 1] to reflect how busy
- // neighboring blocks are
- int mis = cm->mode_info_stride;
- int nzc_exp = 0;
- const int base = block - (block & 63);
- const int boff = (block & 63);
- const int base_mb16 = base >> 4;
- TX_SIZE txfm_size = cur->mbmi.txfm_size;
- TX_SIZE txfm_size_uv;
-
- assert(block >= 256 && block < 384);
- txfm_size_uv = txfm_size;
-
- switch (txfm_size_uv) {
- case TX_32X32:
- assert(block == 256 || block == 320);
- nzc_exp =
- get_nzc_4x4_uv(cm, cur - mis, mb_row - 1, mb_col,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis, mb_row - 1, mb_col,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - mis + 1, mb_row - 1, mb_col + 1,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + 1, mb_row - 1, mb_col + 1,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - mis + 2, mb_row - 1, mb_col + 2,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + 2, mb_row - 1, mb_col + 2,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - mis + 3, mb_row - 1, mb_col + 3,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + 3, mb_row - 1, mb_col + 3,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1, mb_row, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1, mb_row, mb_col - 1,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1 + mis, mb_row + 1, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + mis, mb_row + 1, mb_col - 1,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1 + 2 * mis, mb_row + 2, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + 2 * mis, mb_row + 2, mb_col - 1,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1 + 3 * mis, mb_row + 3, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + 3 * mis, mb_row + 3, mb_col - 1,
- base_mb16 + 3);
- nzc_exp <<= 2;
- // Note nzc_exp is 64 times the average value expected at 32x32 scale
- return choose_nzc_context(nzc_exp, NZC_T2_32X32, NZC_T1_32X32);
-
- case TX_16X16:
- // uv txfm_size 16x16
- assert((block & 15) == 0);
- if (boff < 32) {
- int o = (boff >> 4) & 1;
- nzc_exp =
- get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - mis + o + 1, mb_row - 1, mb_col + o + 1,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + o + 1, mb_row - 1, mb_col + o + 1,
- base_mb16 + 3);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 32] << 4;
- }
- if ((boff & 31) == 0) {
- int o = boff >> 5;
- nzc_exp +=
- get_nzc_4x4_uv(cm, cur - 1 + o * mis,
- mb_row + o, mb_col - 1, base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + o * mis,
- mb_row + o, mb_col - 1, base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1 + o * mis + mis,
- mb_row + o + 1, mb_col - 1, base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + o * mis + mis,
- mb_row + o + 1, mb_col - 1, base_mb16 + 3);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 16] << 4;
- }
- nzc_exp <<= 1;
- // Note nzc_exp is 64 times the average value expected at 16x16 scale
- return choose_nzc_context(nzc_exp, NZC_T2_16X16, NZC_T1_16X16);
-
- case TX_8X8:
- assert((block & 3) == 0);
- if (boff < 16) {
- int o = boff >> 2;
- nzc_exp =
- get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 3);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 16] << 5;
- }
- if ((boff & 15) == 0) {
- int o = boff >> 4;
- nzc_exp +=
- get_nzc_4x4_uv(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- base_mb16 + 3);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 4] << 5;
- }
- // Note nzc_exp is 64 times the average value expected at 8x8 scale
- return choose_nzc_context(nzc_exp, NZC_T2_8X8, NZC_T1_8X8);
-
- case TX_4X4:
- if (boff < 8) {
- int o = boff >> 1;
- int p = boff & 1;
- nzc_exp = get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 2 + p);
- } else {
- nzc_exp = (cur->mbmi.nzcs[block - 8] << 6);
- }
- if ((boff & 7) == 0) {
- int o = boff >> 4;
- int p = (boff >> 3) & 1;
- nzc_exp += get_nzc_4x4_uv(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- base_mb16 + 1 + 2 * p);
- } else {
- nzc_exp += (cur->mbmi.nzcs[block - 1] << 6);
- }
- nzc_exp >>= 1;
- // Note nzc_exp is 64 times the average value expected at 4x4 scale
- return choose_nzc_context(nzc_exp, NZC_T2_4X4, NZC_T1_4X4);
-
- default:
- return 0;
- }
-}
-
-int vp9_get_nzc_context_uv_sb32(VP9_COMMON *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block) {
- // returns an index in [0, MAX_NZC_CONTEXTS - 1] to reflect how busy
- // neighboring blocks are
- int mis = cm->mode_info_stride;
- int nzc_exp = 0;
- const int base = block - (block & 15);
- const int boff = (block & 15);
- const int base_mb16 = base >> 2;
- TX_SIZE txfm_size = cur->mbmi.txfm_size;
- TX_SIZE txfm_size_uv;
-
- assert(block >= 64 && block < 96);
- if (txfm_size == TX_32X32)
- txfm_size_uv = TX_16X16;
- else
- txfm_size_uv = txfm_size;
-
- switch (txfm_size_uv) {
- case TX_16X16:
- // uv txfm_size 16x16
- assert(block == 64 || block == 80);
- nzc_exp =
- get_nzc_4x4_uv(cm, cur - mis, mb_row - 1, mb_col,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis, mb_row - 1, mb_col,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - mis + 1, mb_row - 1, mb_col + 1,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + 1, mb_row - 1, mb_col + 1,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1 + mis, mb_row, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + mis, mb_row, mb_col - 1,
- base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1 + mis, mb_row + 1, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + mis, mb_row + 1, mb_col - 1,
- base_mb16 + 3);
- nzc_exp <<= 1;
- // Note nzc_exp is 64 times the average value expected at 16x16 scale
- return choose_nzc_context(nzc_exp, NZC_T2_16X16, NZC_T1_16X16);
- break;
-
- case TX_8X8:
- assert((block & 3) == 0);
- if (boff < 8) {
- int o = boff >> 2;
- nzc_exp =
- get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 3);
- } else {
- nzc_exp = cur->mbmi.nzcs[block - 8] << 5;
- }
- if ((boff & 7) == 0) {
- int o = boff >> 3;
- nzc_exp +=
- get_nzc_4x4_uv(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- base_mb16 + 3);
- } else {
- nzc_exp += cur->mbmi.nzcs[block - 4] << 5;
- }
- // Note nzc_exp is 64 times the average value expected at 8x8 scale
- return choose_nzc_context(nzc_exp, NZC_T2_8X8, NZC_T1_8X8);
-
- case TX_4X4:
- if (boff < 4) {
- int o = boff >> 1;
- int p = boff & 1;
- nzc_exp = get_nzc_4x4_uv(cm, cur - mis + o, mb_row - 1, mb_col + o,
- base_mb16 + 2 + p);
- } else {
- nzc_exp = (cur->mbmi.nzcs[block - 4] << 6);
- }
- if ((boff & 3) == 0) {
- int o = boff >> 3;
- int p = (boff >> 2) & 1;
- nzc_exp += get_nzc_4x4_uv(cm, cur - 1 + o * mis, mb_row + o, mb_col - 1,
- base_mb16 + 1 + 2 * p);
- } else {
- nzc_exp += (cur->mbmi.nzcs[block - 1] << 6);
- }
- nzc_exp >>= 1;
- // Note nzc_exp is 64 times the average value expected at 4x4 scale
- return choose_nzc_context(nzc_exp, NZC_T2_4X4, NZC_T1_4X4);
-
- default:
- return 0;
- }
-}
-
-int vp9_get_nzc_context_uv_mb16(VP9_COMMON *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block) {
- // returns an index in [0, MAX_NZC_CONTEXTS - 1] to reflect how busy
- // neighboring blocks are
- int mis = cm->mode_info_stride;
- int nzc_exp = 0;
- const int base = block - (block & 3);
- const int boff = (block & 3);
- const int base_mb16 = base;
- TX_SIZE txfm_size = cur->mbmi.txfm_size;
- TX_SIZE txfm_size_uv;
-
- assert(block >= 16 && block < 24);
- if (txfm_size == TX_16X16)
- txfm_size_uv = TX_8X8;
- else if (txfm_size == TX_8X8 &&
- (cur->mbmi.mode == I8X8_PRED || cur->mbmi.mode == SPLITMV))
- txfm_size_uv = TX_4X4;
- else
- txfm_size_uv = txfm_size;
-
- switch (txfm_size_uv) {
- case TX_8X8:
- assert((block & 3) == 0);
- nzc_exp =
- get_nzc_4x4_uv(cm, cur - mis, mb_row - 1, mb_col, base_mb16 + 2) +
- get_nzc_4x4_uv(cm, cur - mis, mb_row - 1, mb_col, base_mb16 + 3) +
- get_nzc_4x4_uv(cm, cur - 1, mb_row, mb_col - 1, base_mb16 + 1) +
- get_nzc_4x4_uv(cm, cur - 1, mb_row, mb_col - 1, base_mb16 + 3);
- // Note nzc_exp is 64 times the average value expected at 8x8 scale
- return choose_nzc_context(nzc_exp, NZC_T2_8X8, NZC_T1_8X8);
-
- case TX_4X4:
- if (boff < 2) {
- int p = boff & 1;
- nzc_exp = get_nzc_4x4_uv(cm, cur - mis, mb_row - 1, mb_col,
- base_mb16 + 2 + p);
- } else {
- nzc_exp = (cur->mbmi.nzcs[block - 2] << 6);
- }
- if ((boff & 1) == 0) {
- int p = (boff >> 1) & 1;
- nzc_exp += get_nzc_4x4_uv(cm, cur - 1, mb_row, mb_col - 1,
- base_mb16 + 1 + 2 * p);
- } else {
- nzc_exp += (cur->mbmi.nzcs[block - 1] << 6);
- }
- nzc_exp >>= 1;
- // Note nzc_exp is 64 times the average value expected at 4x4 scale
- return choose_nzc_context(nzc_exp, NZC_T2_4X4, NZC_T1_4X4);
-
- default:
- return 0;
- }
-}
-
-int vp9_get_nzc_context(VP9_COMMON *cm, MACROBLOCKD *xd, int block) {
- if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
- assert(block < 384);
- if (block < 256)
- return vp9_get_nzc_context_y_sb64(cm, xd->mode_info_context,
- get_mb_row(xd), get_mb_col(xd), block);
- else
- return vp9_get_nzc_context_uv_sb64(cm, xd->mode_info_context,
- get_mb_row(xd), get_mb_col(xd), block);
- } else if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32) {
- assert(block < 96);
- if (block < 64)
- return vp9_get_nzc_context_y_sb32(cm, xd->mode_info_context,
- get_mb_row(xd), get_mb_col(xd), block);
- else
- return vp9_get_nzc_context_uv_sb32(cm, xd->mode_info_context,
- get_mb_row(xd), get_mb_col(xd), block);
- } else {
- assert(block < 64);
- if (block < 16)
- return vp9_get_nzc_context_y_mb16(cm, xd->mode_info_context,
- get_mb_row(xd), get_mb_col(xd), block);
- else
- return vp9_get_nzc_context_uv_mb16(cm, xd->mode_info_context,
- get_mb_row(xd), get_mb_col(xd), block);
- }
-}
-
-static void update_nzc(VP9_COMMON *cm,
- uint16_t nzc,
- int nzc_context,
- TX_SIZE tx_size,
- int ref,
- int type) {
- int e, c;
- if (!get_nzc_used(tx_size)) return;
- c = codenzc(nzc);
- if (tx_size == TX_32X32)
- cm->fc.nzc_counts_32x32[nzc_context][ref][type][c]++;
- else if (tx_size == TX_16X16)
- cm->fc.nzc_counts_16x16[nzc_context][ref][type][c]++;
- else if (tx_size == TX_8X8)
- cm->fc.nzc_counts_8x8[nzc_context][ref][type][c]++;
- else if (tx_size == TX_4X4)
- cm->fc.nzc_counts_4x4[nzc_context][ref][type][c]++;
- else
- assert(0);
-
- if ((e = vp9_extranzcbits[c])) {
- int x = nzc - vp9_basenzcvalue[c];
- while (e--) {
- int b = (x >> e) & 1;
- cm->fc.nzc_pcat_counts[nzc_context][c - NZC_TOKENS_NOEXTRA][e][b]++;
- }
- }
-}
-
-static void update_nzcs_sb64(VP9_COMMON *cm,
- MACROBLOCKD *xd,
- int mb_row,
- int mb_col) {
- MODE_INFO *m = xd->mode_info_context;
- MB_MODE_INFO *const mi = &m->mbmi;
- int j, nzc_context;
- const int ref = m->mbmi.ref_frame != INTRA_FRAME;
-
- assert(mb_col == get_mb_col(xd));
- assert(mb_row == get_mb_row(xd));
-
- if (mi->mb_skip_coeff)
- return;
-
- switch (mi->txfm_size) {
- case TX_32X32:
- for (j = 0; j < 256; j += 64) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_32X32, ref, 0);
- }
- for (j = 256; j < 384; j += 64) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_32X32, ref, 1);
- }
- break;
-
- case TX_16X16:
- for (j = 0; j < 256; j += 16) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 0);
- }
- for (j = 256; j < 384; j += 16) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 1);
- }
- break;
-
- case TX_8X8:
- for (j = 0; j < 256; j += 4) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 0);
- }
- for (j = 256; j < 384; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 1);
- }
- break;
-
- case TX_4X4:
- for (j = 0; j < 256; ++j) {
- nzc_context = vp9_get_nzc_context_y_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 0);
- }
- for (j = 256; j < 384; ++j) {
- nzc_context = vp9_get_nzc_context_uv_sb64(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 1);
- }
- break;
-
- default:
- break;
- }
}
-static void update_nzcs_sb32(VP9_COMMON *cm,
- MACROBLOCKD *xd,
- int mb_row,
- int mb_col) {
- MODE_INFO *m = xd->mode_info_context;
- MB_MODE_INFO *const mi = &m->mbmi;
- int j, nzc_context;
- const int ref = m->mbmi.ref_frame != INTRA_FRAME;
-
- assert(mb_col == get_mb_col(xd));
- assert(mb_row == get_mb_row(xd));
-
- if (mi->mb_skip_coeff)
- return;
-
- switch (mi->txfm_size) {
- case TX_32X32:
- for (j = 0; j < 64; j += 64) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_32X32, ref, 0);
- }
- for (j = 64; j < 96; j += 16) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 1);
- }
- break;
-
- case TX_16X16:
- for (j = 0; j < 64; j += 16) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 0);
- }
- for (j = 64; j < 96; j += 16) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 1);
- }
- break;
-
- case TX_8X8:
- for (j = 0; j < 64; j += 4) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 0);
- }
- for (j = 64; j < 96; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 1);
- }
- break;
-
- case TX_4X4:
- for (j = 0; j < 64; ++j) {
- nzc_context = vp9_get_nzc_context_y_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 0);
- }
- for (j = 64; j < 96; ++j) {
- nzc_context = vp9_get_nzc_context_uv_sb32(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 1);
- }
- break;
-
- default:
- break;
- }
-}
-
-static void update_nzcs_mb16(VP9_COMMON *cm,
- MACROBLOCKD *xd,
- int mb_row,
- int mb_col) {
- MODE_INFO *m = xd->mode_info_context;
- MB_MODE_INFO *const mi = &m->mbmi;
- int j, nzc_context;
- const int ref = m->mbmi.ref_frame != INTRA_FRAME;
-
- assert(mb_col == get_mb_col(xd));
- assert(mb_row == get_mb_row(xd));
-
- if (mi->mb_skip_coeff)
- return;
-
- switch (mi->txfm_size) {
- case TX_16X16:
- for (j = 0; j < 16; j += 16) {
- nzc_context = vp9_get_nzc_context_y_mb16(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_16X16, ref, 0);
- }
- for (j = 16; j < 24; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 1);
- }
- break;
-
- case TX_8X8:
- for (j = 0; j < 16; j += 4) {
- nzc_context = vp9_get_nzc_context_y_mb16(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 0);
- }
- if (mi->mode == I8X8_PRED || mi->mode == SPLITMV) {
- for (j = 16; j < 24; ++j) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 1);
- }
- } else {
- for (j = 16; j < 24; j += 4) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_8X8, ref, 1);
- }
- }
- break;
-
- case TX_4X4:
- for (j = 0; j < 16; ++j) {
- nzc_context = vp9_get_nzc_context_y_mb16(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 0);
- }
- for (j = 16; j < 24; ++j) {
- nzc_context = vp9_get_nzc_context_uv_mb16(cm, m, mb_row, mb_col, j);
- update_nzc(cm, m->mbmi.nzcs[j], nzc_context, TX_4X4, ref, 1);
- }
- break;
-
- default:
- break;
- }
-}
-
-void vp9_update_nzc_counts(VP9_COMMON *cm,
- MACROBLOCKD *xd,
- int mb_row,
- int mb_col) {
- if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64)
- update_nzcs_sb64(cm, xd, mb_row, mb_col);
- else if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32)
- update_nzcs_sb32(cm, xd, mb_row, mb_col);
- else
- update_nzcs_mb16(cm, xd, mb_row, mb_col);
-}
-#endif // CONFIG_CODE_NONZEROCOUNT
-
// #define COEF_COUNT_TESTING
#define COEF_COUNT_SAT 24
@@ -2777,111 +1588,105 @@ void vp9_adapt_coef_probs(VP9_COMMON *cm) {
count_sat, update_factor);
}
-#if CONFIG_CODE_NONZEROCOUNT
-static void adapt_nzc_probs_common(VP9_COMMON *cm,
+#if CONFIG_CODE_ZEROGROUP
+OrientationType vp9_get_orientation(int rc, TX_SIZE tx_size) {
+ int i = rc >> (tx_size + 2);
+ int j = rc & ((4 << tx_size) - 1);
+ if (i > 2 * j)
+ return VERTICAL;
+ else if (j > 2 * i)
+ return HORIZONTAL;
+ else
+ return DIAGONAL;
+ /*
+ if (i == 0 && j == 0) return DIAGONAL;
+ while (i > 1 || j > 1) {
+ i >>= 1;
+ j >>= 1;
+ }
+ if (i == 0 && j == 1)
+ return HORIZONTAL; // horizontal
+ else if (i == 1 && j == 1)
+ return DIAGONAL; // diagonal
+ else if (i == 1 && j == 0)
+ return VERTICAL; // vertical
+ assert(0);
+ */
+}
+
+int vp9_use_eoo(int c, int seg_eob, const int *scan,
+ TX_SIZE tx_size, int *is_last_zero, int *is_eoo) {
+ // NOTE: returning 0 from this function will turn off eoo symbols
+ // For instance we can experiment with turning eoo off for smaller blocks
+ // and/or lower bands
+ int o = vp9_get_orientation(scan[c], tx_size);
+ int band = get_coef_band(scan, tx_size, c);
+ int use_eoo = (!is_last_zero[o] &&
+ !is_eoo[o] &&
+ band <= ZPC_EOO_BAND_UPPER &&
+ band >= ZPC_EOO_BAND_LOWER &&
+ get_zpc_used(tx_size) &&
+ seg_eob - c > (ZPC_USEEOO_THRESH << tx_size) &&
+ is_eoo[0] + is_eoo[1] + is_eoo[2] < 2);
+ return use_eoo;
+}
+
+int vp9_is_eoo(int c, int eob, const int *scan, TX_SIZE tx_size,
+ const int16_t *qcoeff_ptr, int *last_nz_pos) {
+ int rc = scan[c];
+ int o = vp9_get_orientation(rc, tx_size);
+ int eoo = c > last_nz_pos[o];
+ return eoo;
+}
+
+static void adapt_zpc_probs_common(VP9_COMMON *cm,
TX_SIZE tx_size,
int count_sat,
int update_factor) {
- int c, r, b, n;
+ int r, b, p, n;
int count, factor;
- unsigned int nzc_branch_ct[NZC32X32_NODES][2];
- vp9_prob nzc_probs[NZC32X32_NODES];
- int tokens, nodes;
- const vp9_tree_index *nzc_tree;
- vp9_prob *dst_nzc_probs;
- vp9_prob *pre_nzc_probs;
- unsigned int *nzc_counts;
-
- if (!get_nzc_used(tx_size)) return;
+ vp9_zpc_probs *zpc_probs;
+ vp9_zpc_probs *pre_zpc_probs;
+ vp9_zpc_count *zpc_counts;
+ if (!get_zpc_used(tx_size)) return;
if (tx_size == TX_32X32) {
- tokens = NZC32X32_TOKENS;
- nzc_tree = vp9_nzc32x32_tree;
- dst_nzc_probs = cm->fc.nzc_probs_32x32[0][0][0];
- pre_nzc_probs = cm->fc.pre_nzc_probs_32x32[0][0][0];
- nzc_counts = cm->fc.nzc_counts_32x32[0][0][0];
+ zpc_probs = &cm->fc.zpc_probs_32x32;
+ pre_zpc_probs = &cm->fc.pre_zpc_probs_32x32;
+ zpc_counts = &cm->fc.zpc_counts_32x32;
} else if (tx_size == TX_16X16) {
- tokens = NZC16X16_TOKENS;
- nzc_tree = vp9_nzc16x16_tree;
- dst_nzc_probs = cm->fc.nzc_probs_16x16[0][0][0];
- pre_nzc_probs = cm->fc.pre_nzc_probs_16x16[0][0][0];
- nzc_counts = cm->fc.nzc_counts_16x16[0][0][0];
+ zpc_probs = &cm->fc.zpc_probs_16x16;
+ pre_zpc_probs = &cm->fc.pre_zpc_probs_16x16;
+ zpc_counts = &cm->fc.zpc_counts_16x16;
} else if (tx_size == TX_8X8) {
- tokens = NZC8X8_TOKENS;
- nzc_tree = vp9_nzc8x8_tree;
- dst_nzc_probs = cm->fc.nzc_probs_8x8[0][0][0];
- pre_nzc_probs = cm->fc.pre_nzc_probs_8x8[0][0][0];
- nzc_counts = cm->fc.nzc_counts_8x8[0][0][0];
+ zpc_probs = &cm->fc.zpc_probs_8x8;
+ pre_zpc_probs = &cm->fc.pre_zpc_probs_8x8;
+ zpc_counts = &cm->fc.zpc_counts_8x8;
} else {
- nzc_tree = vp9_nzc4x4_tree;
- tokens = NZC4X4_TOKENS;
- dst_nzc_probs = cm->fc.nzc_probs_4x4[0][0][0];
- pre_nzc_probs = cm->fc.pre_nzc_probs_4x4[0][0][0];
- nzc_counts = cm->fc.nzc_counts_4x4[0][0][0];
+ zpc_probs = &cm->fc.zpc_probs_4x4;
+ pre_zpc_probs = &cm->fc.pre_zpc_probs_4x4;
+ zpc_counts = &cm->fc.zpc_counts_4x4;
}
- nodes = tokens - 1;
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c)
- for (r = 0; r < REF_TYPES; ++r)
- for (b = 0; b < BLOCK_TYPES; ++b) {
- int offset = c * REF_TYPES * BLOCK_TYPES + r * BLOCK_TYPES + b;
- int offset_nodes = offset * nodes;
- int offset_tokens = offset * tokens;
- vp9_tree_probs_from_distribution(nzc_tree,
- nzc_probs, nzc_branch_ct,
- nzc_counts + offset_tokens, 0);
- for (n = 0; n < nodes; ++n) {
- count = nzc_branch_ct[n][0] + nzc_branch_ct[n][1];
+ for (r = 0; r < REF_TYPES; ++r) {
+ for (b = 0; b < ZPC_BANDS; ++b) {
+ for (p = 0; p < ZPC_PTOKS; ++p) {
+ for (n = 0; n < ZPC_NODES; ++n) {
+ vp9_prob prob = get_binary_prob((*zpc_counts)[r][b][p][n][0],
+ (*zpc_counts)[r][b][p][n][1]);
+ count = (*zpc_counts)[r][b][p][n][0] + (*zpc_counts)[r][b][p][n][1];
count = count > count_sat ? count_sat : count;
factor = (update_factor * count / count_sat);
- dst_nzc_probs[offset_nodes + n] =
- weighted_prob(pre_nzc_probs[offset_nodes + n],
- nzc_probs[n], factor);
+ (*zpc_probs)[r][b][p][n] = weighted_prob(
+ (*pre_zpc_probs)[r][b][p][n], prob, factor);
}
}
-}
-
-static void adapt_nzc_pcat(VP9_COMMON *cm, int count_sat, int update_factor) {
- int c, t;
- int count, factor;
- if (!(get_nzc_used(TX_4X4) || get_nzc_used(TX_8X8) ||
- get_nzc_used(TX_16X16) || get_nzc_used(TX_32X32)))
- return;
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (t = 0; t < NZC_TOKENS_EXTRA; ++t) {
- int bits = vp9_extranzcbits[t + NZC_TOKENS_NOEXTRA];
- int b;
- for (b = 0; b < bits; ++b) {
- vp9_prob prob = get_binary_prob(cm->fc.nzc_pcat_counts[c][t][b][0],
- cm->fc.nzc_pcat_counts[c][t][b][1]);
- count = cm->fc.nzc_pcat_counts[c][t][b][0] +
- cm->fc.nzc_pcat_counts[c][t][b][1];
- count = count > count_sat ? count_sat : count;
- factor = (update_factor * count / count_sat);
- cm->fc.nzc_pcat_probs[c][t][b] = weighted_prob(
- cm->fc.pre_nzc_pcat_probs[c][t][b], prob, factor);
- }
}
}
}
-// #define NZC_COUNT_TESTING
-void vp9_adapt_nzc_probs(VP9_COMMON *cm) {
+// #define ZPC_COUNT_TESTING
+void vp9_adapt_zpc_probs(VP9_COMMON *cm) {
int count_sat;
int update_factor; /* denominator 256 */
-#ifdef NZC_COUNT_TESTING
- int c, r, b, t;
- printf("\n");
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c)
- for (r = 0; r < REF_TYPES; ++r) {
- for (b = 0; b < BLOCK_TYPES; ++b) {
- printf(" {");
- for (t = 0; t < NZC4X4_TOKENS; ++t) {
- printf(" %d,", cm->fc.nzc_counts_4x4[c][r][b][t]);
- }
- printf("}\n");
- }
- printf("\n");
- }
-#endif
if (cm->frame_type == KEY_FRAME) {
update_factor = COEF_MAX_UPDATE_FACTOR_KEY;
@@ -2894,10 +1699,9 @@ void vp9_adapt_nzc_probs(VP9_COMMON *cm) {
count_sat = COEF_COUNT_SAT;
}
- adapt_nzc_probs_common(cm, TX_4X4, count_sat, update_factor);
- adapt_nzc_probs_common(cm, TX_8X8, count_sat, update_factor);
- adapt_nzc_probs_common(cm, TX_16X16, count_sat, update_factor);
- adapt_nzc_probs_common(cm, TX_32X32, count_sat, update_factor);
- adapt_nzc_pcat(cm, count_sat, update_factor);
+ adapt_zpc_probs_common(cm, TX_4X4, count_sat, update_factor);
+ adapt_zpc_probs_common(cm, TX_8X8, count_sat, update_factor);
+ adapt_zpc_probs_common(cm, TX_16X16, count_sat, update_factor);
+ adapt_zpc_probs_common(cm, TX_32X32, count_sat, update_factor);
}
-#endif // CONFIG_CODE_NONZEROCOUNT
+#endif // CONFIG_CODE_ZEROGROUP
diff --git a/vp9/common/vp9_entropy.h b/vp9/common/vp9_entropy.h
index db167420c..123b5e289 100644
--- a/vp9/common/vp9_entropy.h
+++ b/vp9/common/vp9_entropy.h
@@ -45,11 +45,11 @@ extern struct vp9_token vp9_coef_encodings[MAX_ENTROPY_TOKENS];
typedef struct {
vp9_tree_p tree;
const vp9_prob *prob;
- int Len;
+ int len;
int base_val;
-} vp9_extra_bit_struct;
+} vp9_extra_bit;
-extern vp9_extra_bit_struct vp9_extra_bits[12]; /* indexed by token value */
+extern vp9_extra_bit vp9_extra_bits[12]; /* indexed by token value */
#define PROB_UPDATE_BASELINE_COST 7
@@ -167,88 +167,61 @@ void vp9_get_model_distribution(vp9_prob model, vp9_prob *tree_probs,
int b, int r);
#endif // CONFIG_MODELCOEFPROB
-#if CONFIG_CODE_NONZEROCOUNT
-/* Alphabet for number of non-zero symbols in block */
-#define NZC_0 0 /* Used for all blocks */
-#define NZC_1 1 /* Used for all blocks */
-#define NZC_2 2 /* Used for all blocks */
-#define NZC_3TO4 3 /* Used for all blocks */
-#define NZC_5TO8 4 /* Used for all blocks */
-#define NZC_9TO16 5 /* Used for all blocks */
-#define NZC_17TO32 6 /* Used for 8x8 and larger blocks */
-#define NZC_33TO64 7 /* Used for 8x8 and larger blocks */
-#define NZC_65TO128 8 /* Used for 16x16 and larger blocks */
-#define NZC_129TO256 9 /* Used for 16x16 and larger blocks */
-#define NZC_257TO512 10 /* Used for 32x32 and larger blocks */
-#define NZC_513TO1024 11 /* Used for 32x32 and larger blocks */
-
-/* Number of tokens for each block size */
-#define NZC4X4_TOKENS 6
-#define NZC8X8_TOKENS 8
-#define NZC16X16_TOKENS 10
-#define NZC32X32_TOKENS 12
-
-/* Number of nodes for each block size */
-#define NZC4X4_NODES 5
-#define NZC8X8_NODES 7
-#define NZC16X16_NODES 9
-#define NZC32X32_NODES 11
-
-/* Max number of tokens with extra bits */
-#define NZC_TOKENS_EXTRA 9
-
-/* Max number of extra bits */
-#define NZC_BITS_EXTRA 9
-
-/* Tokens without extra bits */
-#define NZC_TOKENS_NOEXTRA (NZC32X32_TOKENS - NZC_TOKENS_EXTRA)
-
-#define MAX_NZC_CONTEXTS 3
-
-/* whether to update extra bit probabilities */
-#define NZC_PCAT_UPDATE
-
-/* nzc trees */
-extern const vp9_tree_index vp9_nzc4x4_tree[];
-extern const vp9_tree_index vp9_nzc8x8_tree[];
-extern const vp9_tree_index vp9_nzc16x16_tree[];
-extern const vp9_tree_index vp9_nzc32x32_tree[];
-
-/* nzc encodings */
-extern struct vp9_token vp9_nzc4x4_encodings[NZC4X4_TOKENS];
-extern struct vp9_token vp9_nzc8x8_encodings[NZC8X8_TOKENS];
-extern struct vp9_token vp9_nzc16x16_encodings[NZC16X16_TOKENS];
-extern struct vp9_token vp9_nzc32x32_encodings[NZC32X32_TOKENS];
-
-#define codenzc(x) (\
- (x) <= 3 ? (x) : (x) <= 4 ? 3 : (x) <= 8 ? 4 : \
- (x) <= 16 ? 5 : (x) <= 32 ? 6 : (x) <= 64 ? 7 :\
- (x) <= 128 ? 8 : (x) <= 256 ? 9 : (x) <= 512 ? 10 : 11)
-
-int vp9_get_nzc_context_y_sb64(struct VP9Common *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block);
-int vp9_get_nzc_context_y_sb32(struct VP9Common *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block);
-int vp9_get_nzc_context_y_mb16(struct VP9Common *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block);
-int vp9_get_nzc_context_uv_sb64(struct VP9Common *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block);
-int vp9_get_nzc_context_uv_sb32(struct VP9Common *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block);
-int vp9_get_nzc_context_uv_mb16(struct VP9Common *cm, MODE_INFO *cur,
- int mb_row, int mb_col, int block);
-int vp9_get_nzc_context(struct VP9Common *cm, MACROBLOCKD *xd, int block);
-void vp9_update_nzc_counts(struct VP9Common *cm, MACROBLOCKD *xd,
- int mb_row, int mb_col);
-void vp9_adapt_nzc_probs(struct VP9Common *cm);
-
-/* Extra bits array */
-extern const int vp9_extranzcbits[NZC32X32_TOKENS];
-
-/* Base nzc values */
-extern const int vp9_basenzcvalue[NZC32X32_TOKENS];
-
-#endif // CONFIG_CODE_NONZEROCOUNT
+#if CONFIG_CODE_ZEROGROUP
+
+#define ZPC_STATS
+
+typedef enum {
+ HORIZONTAL = 0,
+ DIAGONAL,
+ VERTICAL,
+} OrientationType;
+
+/* Note EOB should become part of this symbol eventually,
+ * but holding off on this for now because that is a major
+ * change in the rest of the codebase */
+
+#define ZPC_ISOLATED (MAX_ENTROPY_TOKENS + 0) /* Isolated zero */
+
+/* ZPC_EOORIENT: All remaining coefficients in the same orientation are 0.
+ * In other words all remaining coeffs in the current subband, and all
+ * children of the current subband are zero. Subbands are defined by
+ * dyadic partitioning in the coeff domain */
+#define ZPC_EOORIENT (MAX_ENTROPY_TOKENS + 1) /* End of Orientation */
+
+/* Band limits over which the eoo bit is sent */
+#define ZPC_EOO_BAND_LOWER 0
+#define ZPC_EOO_BAND_UPPER 5
+
+#define USE_ZPC_EOORIENT 1 /* 0: not used */
+ /* 1: used */
+#define ZPC_NODES 1
+
+#define UNKNOWN_TOKEN 255 /* Not signalled, encoder only */
+
+#define ZPC_BANDS 3 /* context bands for izr */
+#define ZPC_PTOKS 3 /* context pt for zpcs */
+
+#define coef_to_zpc_band(b) ((b) >> 1)
+#define coef_to_zpc_ptok(p) ((p) > 2 ? 2 : (p))
+
+typedef vp9_prob vp9_zpc_probs[REF_TYPES][ZPC_BANDS]
+ [ZPC_PTOKS][ZPC_NODES];
+typedef unsigned int vp9_zpc_count[REF_TYPES][ZPC_BANDS]
+ [ZPC_PTOKS][ZPC_NODES][2];
+
+OrientationType vp9_get_orientation(int rc, TX_SIZE tx_size);
+int vp9_use_eoo(int c, int eob, const int *scan, TX_SIZE tx_size,
+ int *is_last_zero, int *is_eoo);
+int vp9_is_eoo(int c, int eob, const int *scan, TX_SIZE tx_size,
+ const int16_t *qcoeff_ptr, int *last_nz_pos);
+
+#define ZPC_USEEOO_THRESH 4
+#define ZPC_ZEROSSAVED_EOO 7 /* encoder only */
+
+void vp9_adapt_zpc_probs(struct VP9Common *cm);
+
+#endif // CONFIG_CODE_ZEROGROUP
#include "vp9/common/vp9_coefupdateprobs.h"
diff --git a/vp9/common/vp9_mbpitch.c b/vp9/common/vp9_mbpitch.c
index 00fe9aa15..d5f104d4d 100644
--- a/vp9/common/vp9_mbpitch.c
+++ b/vp9/common/vp9_mbpitch.c
@@ -36,32 +36,32 @@ static void setup_macroblock(MACROBLOCKD *mb, BLOCKSET bs) {
int i, stride;
if (bs == DEST) {
- y = &mb->dst.y_buffer;
- u = &mb->dst.u_buffer;
- v = &mb->dst.v_buffer;
+ y = &mb->plane[0].dst.buf;
+ u = &mb->plane[1].dst.buf;
+ v = &mb->plane[2].dst.buf;
y2 = NULL;
u2 = NULL;
v2 = NULL;
} else {
- y = &mb->pre.y_buffer;
- u = &mb->pre.u_buffer;
- v = &mb->pre.v_buffer;
+ y = &mb->plane[0].pre[0].buf;
+ u = &mb->plane[1].pre[0].buf;
+ v = &mb->plane[2].pre[0].buf;
- y2 = &mb->second_pre.y_buffer;
- u2 = &mb->second_pre.u_buffer;
- v2 = &mb->second_pre.v_buffer;
+ y2 = &mb->plane[0].pre[1].buf;
+ u2 = &mb->plane[1].pre[1].buf;
+ v2 = &mb->plane[2].pre[1].buf;
}
// luma
- stride = mb->dst.y_stride;
+ stride = mb->plane[0].dst.stride;
for (i = 0; i < 16; ++i) {
const int offset = (i >> 2) * 4 * stride + (i & 3) * 4;
setup_block(&blockd[i], y, y2, stride, offset, bs);
}
// chroma
- stride = mb->dst.uv_stride;
+ stride = mb->plane[1].dst.stride;
for (i = 16; i < 20; i++) {
const int offset = ((i - 16) >> 1) * 4 * stride + (i & 1) * 4;
setup_block(&blockd[i], u, u2, stride, offset, bs);
diff --git a/vp9/common/vp9_onyxc_int.h b/vp9/common/vp9_onyxc_int.h
index 7f2ae518b..f9d02e3ca 100644
--- a/vp9/common/vp9_onyxc_int.h
+++ b/vp9/common/vp9_onyxc_int.h
@@ -74,17 +74,11 @@ typedef struct frame_contexts {
vp9_coeff_probs coef_probs_8x8[BLOCK_TYPES];
vp9_coeff_probs coef_probs_16x16[BLOCK_TYPES];
vp9_coeff_probs coef_probs_32x32[BLOCK_TYPES];
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_prob nzc_probs_4x4[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC4X4_NODES];
- vp9_prob nzc_probs_8x8[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC8X8_NODES];
- vp9_prob nzc_probs_16x16[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC16X16_NODES];
- vp9_prob nzc_probs_32x32[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC32X32_NODES];
- vp9_prob nzc_pcat_probs[MAX_NZC_CONTEXTS]
- [NZC_TOKENS_EXTRA][NZC_BITS_EXTRA];
+#if CONFIG_CODE_ZEROGROUP
+ vp9_zpc_probs zpc_probs_4x4;
+ vp9_zpc_probs zpc_probs_8x8;
+ vp9_zpc_probs zpc_probs_16x16;
+ vp9_zpc_probs zpc_probs_32x32;
#endif
nmv_context nmvc;
@@ -110,17 +104,11 @@ typedef struct frame_contexts {
vp9_coeff_probs pre_coef_probs_8x8[BLOCK_TYPES];
vp9_coeff_probs pre_coef_probs_16x16[BLOCK_TYPES];
vp9_coeff_probs pre_coef_probs_32x32[BLOCK_TYPES];
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_prob pre_nzc_probs_4x4[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC4X4_NODES];
- vp9_prob pre_nzc_probs_8x8[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC8X8_NODES];
- vp9_prob pre_nzc_probs_16x16[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC16X16_NODES];
- vp9_prob pre_nzc_probs_32x32[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC32X32_NODES];
- vp9_prob pre_nzc_pcat_probs[MAX_NZC_CONTEXTS]
- [NZC_TOKENS_EXTRA][NZC_BITS_EXTRA];
+#if CONFIG_CODE_ZEROGROUP
+ vp9_zpc_probs pre_zpc_probs_4x4;
+ vp9_zpc_probs pre_zpc_probs_8x8;
+ vp9_zpc_probs pre_zpc_probs_16x16;
+ vp9_zpc_probs pre_zpc_probs_32x32;
#endif
vp9_coeff_count coef_counts_4x4[BLOCK_TYPES];
@@ -130,17 +118,11 @@ typedef struct frame_contexts {
unsigned int eob_branch_counts[TX_SIZE_MAX_SB][BLOCK_TYPES][REF_TYPES]
[COEF_BANDS][PREV_COEF_CONTEXTS];
-#if CONFIG_CODE_NONZEROCOUNT
- unsigned int nzc_counts_4x4[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC4X4_TOKENS];
- unsigned int nzc_counts_8x8[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC8X8_TOKENS];
- unsigned int nzc_counts_16x16[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC16X16_TOKENS];
- unsigned int nzc_counts_32x32[MAX_NZC_CONTEXTS][REF_TYPES][BLOCK_TYPES]
- [NZC32X32_TOKENS];
- unsigned int nzc_pcat_counts[MAX_NZC_CONTEXTS]
- [NZC_TOKENS_EXTRA][NZC_BITS_EXTRA][2];
+#if CONFIG_CODE_ZEROGROUP
+ vp9_zpc_count zpc_counts_4x4;
+ vp9_zpc_count zpc_counts_8x8;
+ vp9_zpc_count zpc_counts_16x16;
+ vp9_zpc_count zpc_counts_32x32;
#endif
nmv_context_counts NMVcount;
@@ -378,4 +360,8 @@ static int get_mb_row(const MACROBLOCKD *xd) {
static int get_mb_col(const MACROBLOCKD *xd) {
return ((-xd->mb_to_left_edge) >> 7);
}
+
+static int get_token_alloc(int mb_rows, int mb_cols) {
+ return mb_rows * mb_cols * (24 * 16 + 4);
+}
#endif // VP9_COMMON_VP9_ONYXC_INT_H_
diff --git a/vp9/common/vp9_pred_common.c b/vp9/common/vp9_pred_common.c
index ffdfa6782..e110cff44 100644
--- a/vp9/common/vp9_pred_common.c
+++ b/vp9/common/vp9_pred_common.c
@@ -9,6 +9,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <limits.h>
+
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_seg_common.h"
@@ -225,30 +227,26 @@ void vp9_set_pred_flag(MACROBLOCKD *const xd,
// peredict various bitstream signals.
// Macroblock segment id prediction function
-unsigned char vp9_get_pred_mb_segid(const VP9_COMMON *const cm,
- const MACROBLOCKD *const xd, int MbIndex) {
- // Currently the prediction for the macroblock segment ID is
- // the value stored for this macroblock in the previous frame.
- if (!xd->mode_info_context->mbmi.sb_type) {
- return cm->last_frame_seg_map[MbIndex];
- } else {
- BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
- const int bh = 1 << mb_height_log2(bsize);
- const int bw = 1 << mb_width_log2(bsize);
- const int mb_col = MbIndex % cm->mb_cols;
- const int mb_row = MbIndex / cm->mb_cols;
- const int x_mbs = MIN(bw, cm->mb_cols - mb_col);
- const int y_mbs = MIN(bh, cm->mb_rows - mb_row);
+int vp9_get_pred_mb_segid(VP9_COMMON *cm, BLOCK_SIZE_TYPE sb_type,
+ int mb_row, int mb_col) {
+ const int mb_index = mb_row * cm->mb_cols + mb_col;
+ if (sb_type) {
+ const int bw = 1 << mb_width_log2(sb_type);
+ const int bh = 1 << mb_height_log2(sb_type);
+ const int ymbs = MIN(cm->mb_rows - mb_row, bh);
+ const int xmbs = MIN(cm->mb_cols - mb_col, bw);
+ int segment_id = INT_MAX;
int x, y;
- unsigned seg_id = -1;
- for (y = mb_row; y < mb_row + y_mbs; y++) {
- for (x = mb_col; x < mb_col + x_mbs; x++) {
- seg_id = MIN(seg_id, cm->last_frame_seg_map[cm->mb_cols * y + x]);
+ for (y = 0; y < ymbs; y++) {
+ for (x = 0; x < xmbs; x++) {
+ const int index = mb_index + (y * cm->mb_cols + x);
+ segment_id = MIN(segment_id, cm->last_frame_seg_map[index]);
}
}
-
- return seg_id;
+ return segment_id;
+ } else {
+ return cm->last_frame_seg_map[mb_index];
}
}
diff --git a/vp9/common/vp9_pred_common.h b/vp9/common/vp9_pred_common.h
index 49dcf0a4c..222d5f3d0 100644
--- a/vp9/common/vp9_pred_common.h
+++ b/vp9/common/vp9_pred_common.h
@@ -43,9 +43,8 @@ void vp9_set_pred_flag(MACROBLOCKD *const xd,
unsigned char pred_flag);
-unsigned char vp9_get_pred_mb_segid(const VP9_COMMON *const cm,
- const MACROBLOCKD *const xd,
- int MbIndex);
+int vp9_get_pred_mb_segid(VP9_COMMON *cm, BLOCK_SIZE_TYPE sb_type,
+ int mb_row, int mb_col);
MV_REFERENCE_FRAME vp9_get_pred_ref(const VP9_COMMON *const cm,
const MACROBLOCKD *const xd);
diff --git a/vp9/common/vp9_recon.c b/vp9/common/vp9_recon.c
index fae35844d..41194504b 100644
--- a/vp9/common/vp9_recon.c
+++ b/vp9/common/vp9_recon.c
@@ -14,103 +14,64 @@
#include "vp9/common/vp9_blockd.h"
static INLINE void recon(int rows, int cols,
- const uint8_t *pred_ptr, int pred_stride,
const int16_t *diff_ptr, int diff_stride,
uint8_t *dst_ptr, int dst_stride) {
int r, c;
for (r = 0; r < rows; r++) {
for (c = 0; c < cols; c++)
- dst_ptr[c] = clip_pixel(diff_ptr[c] + pred_ptr[c]);
+ dst_ptr[c] = clip_pixel(diff_ptr[c] + dst_ptr[c]);
dst_ptr += dst_stride;
diff_ptr += diff_stride;
- pred_ptr += pred_stride;
}
}
void vp9_recon_b_c(uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_ptr,
int stride) {
- recon(4, 4, pred_ptr, stride, diff_ptr, 16, dst_ptr, stride);
+ assert(pred_ptr == dst_ptr);
+ recon(4, 4, diff_ptr, 16, dst_ptr, stride);
}
void vp9_recon_uv_b_c(uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_ptr,
int stride) {
- recon(4, 4, pred_ptr, stride, diff_ptr, 8, dst_ptr, stride);
+ assert(pred_ptr == dst_ptr);
+ recon(4, 4, diff_ptr, 8, dst_ptr, stride);
}
void vp9_recon4b_c(uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_ptr,
int stride) {
- recon(4, 16, pred_ptr, stride, diff_ptr, 16, dst_ptr, stride);
+ assert(pred_ptr == dst_ptr);
+ recon(4, 16, diff_ptr, 16, dst_ptr, stride);
}
void vp9_recon2b_c(uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_ptr,
int stride) {
- recon(4, 8, pred_ptr, stride, diff_ptr, 8, dst_ptr, stride);
+ assert(pred_ptr == dst_ptr);
+ recon(4, 8, diff_ptr, 8, dst_ptr, stride);
}
-void vp9_recon_sby_s_c(MACROBLOCKD *mb, uint8_t *dst,
- BLOCK_SIZE_TYPE bsize) {
- const int bw = 16 << mb_width_log2(bsize), bh = 16 << mb_height_log2(bsize);
- int x, y;
- const int stride = mb->block[0].dst_stride;
- const int16_t *diff = mb->plane[0].diff;
-
- for (y = 0; y < bh; y++) {
- for (x = 0; x < bw; x++)
- dst[x] = clip_pixel(dst[x] + diff[x]);
-
- dst += stride;
- diff += bw;
- }
+static void recon_plane(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize, int plane) {
+ const int bw = 4 << (b_width_log2(bsize) - xd->plane[plane].subsampling_x);
+ const int bh = 4 << (b_height_log2(bsize) - xd->plane[plane].subsampling_y);
+ recon(bh, bw,
+ xd->plane[plane].diff, bw,
+ xd->plane[plane].dst.buf, xd->plane[plane].dst.stride);
}
-void vp9_recon_sbuv_s_c(MACROBLOCKD *mb, uint8_t *u_dst, uint8_t *v_dst,
- BLOCK_SIZE_TYPE bsize) {
- const int bwl = mb_width_log2(bsize), bhl = mb_height_log2(bsize);
- const int bw = 8 << bwl, bh = 8 << bhl;
- int x, y;
- const int stride = mb->block[16].dst_stride;
- const int16_t *u_diff = mb->plane[1].diff;
- const int16_t *v_diff = mb->plane[2].diff;
-
- for (y = 0; y < bh; y++) {
- for (x = 0; x < bw; x++) {
- u_dst[x] = clip_pixel(u_dst[x] + u_diff[x]);
- v_dst[x] = clip_pixel(v_dst[x] + v_diff[x]);
- }
-
- u_dst += stride;
- v_dst += stride;
- u_diff += bw;
- v_diff += bw;
- }
+void vp9_recon_sby_c(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
+ recon_plane(mb, bsize, 0);
}
-void vp9_recon_mby_c(MACROBLOCKD *xd) {
+void vp9_recon_sbuv_c(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
int i;
- for (i = 0; i < 16; i += 4) {
- BLOCKD *b = &xd->block[i];
-
- vp9_recon4b(*(b->base_dst) + b->dst, b->diff,
- *(b->base_dst) + b->dst, b->dst_stride);
- }
+ for (i = 1; i < MAX_MB_PLANE; i++)
+ recon_plane(mb, bsize, i);
}
-void vp9_recon_mb_c(MACROBLOCKD *xd) {
- int i;
-
- for (i = 0; i < 16; i += 4) {
- BLOCKD *b = &xd->block[i];
- vp9_recon4b(*(b->base_dst) + b->dst, b->diff,
- *(b->base_dst) + b->dst, b->dst_stride);
- }
-
- for (i = 16; i < 24; i += 2) {
- BLOCKD *b = &xd->block[i];
- vp9_recon2b(*(b->base_dst) + b->dst, b->diff,
- *(b->base_dst) + b->dst, b->dst_stride);
- }
+void vp9_recon_sb_c(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
+ vp9_recon_sby(xd, bsize);
+ vp9_recon_sbuv(xd, bsize);
}
diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c
index 549993200..80eb2c690 100644
--- a/vp9/common/vp9_reconinter.c
+++ b/vp9/common/vp9_reconinter.c
@@ -57,127 +57,6 @@ void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
// applied in one direction only, and not at all for 0,0, seems to give the
// best quality, but it may be worth trying an additional mode that does
// do the filtering on full-pel.
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
- if (scale->x_step_q4 == 16) {
- if (scale->y_step_q4 == 16) {
- // No scaling in either direction.
- scale->predict[0][0][0] = vp9_convolve_copy;
- scale->predict[0][0][1] = vp9_convolve_1by8;
- scale->predict[0][0][2] = vp9_convolve_qtr;
- scale->predict[0][0][3] = vp9_convolve_3by8;
- scale->predict[0][0][4] = vp9_convolve_avg;
- scale->predict[0][0][5] = vp9_convolve_5by8;
- scale->predict[0][0][6] = vp9_convolve_3qtr;
- scale->predict[0][0][7] = vp9_convolve_7by8;
- scale->predict[0][1][0] = vp9_convolve8_vert;
- scale->predict[0][1][1] = vp9_convolve8_1by8_vert;
- scale->predict[0][1][2] = vp9_convolve8_qtr_vert;
- scale->predict[0][1][3] = vp9_convolve8_3by8_vert;
- scale->predict[0][1][4] = vp9_convolve8_avg_vert;
- scale->predict[0][1][5] = vp9_convolve8_5by8_vert;
- scale->predict[0][1][6] = vp9_convolve8_3qtr_vert;
- scale->predict[0][1][7] = vp9_convolve8_7by8_vert;
- scale->predict[1][0][0] = vp9_convolve8_horiz;
- scale->predict[1][0][1] = vp9_convolve8_1by8_horiz;
- scale->predict[1][0][2] = vp9_convolve8_qtr_horiz;
- scale->predict[1][0][3] = vp9_convolve8_3by8_horiz;
- scale->predict[1][0][4] = vp9_convolve8_avg_horiz;
- scale->predict[1][0][5] = vp9_convolve8_5by8_horiz;
- scale->predict[1][0][6] = vp9_convolve8_3qtr_horiz;
- scale->predict[1][0][7] = vp9_convolve8_7by8_horiz;
- } else {
- // No scaling in x direction. Must always scale in the y direction.
- scale->predict[0][0][0] = vp9_convolve8_vert;
- scale->predict[0][0][1] = vp9_convolve8_1by8_vert;
- scale->predict[0][0][2] = vp9_convolve8_qtr_vert;
- scale->predict[0][0][3] = vp9_convolve8_3by8_vert;
- scale->predict[0][0][4] = vp9_convolve8_avg_vert;
- scale->predict[0][0][5] = vp9_convolve8_5by8_vert;
- scale->predict[0][0][6] = vp9_convolve8_3qtr_vert;
- scale->predict[0][0][7] = vp9_convolve8_7by8_vert;
- scale->predict[0][1][0] = vp9_convolve8_vert;
- scale->predict[0][1][1] = vp9_convolve8_1by8_vert;
- scale->predict[0][1][2] = vp9_convolve8_qtr_vert;
- scale->predict[0][1][3] = vp9_convolve8_3by8_vert;
- scale->predict[0][1][4] = vp9_convolve8_avg_vert;
- scale->predict[0][1][5] = vp9_convolve8_5by8_vert;
- scale->predict[0][1][6] = vp9_convolve8_3qtr_vert;
- scale->predict[0][1][7] = vp9_convolve8_7by8_vert;
- scale->predict[1][0][0] = vp9_convolve8;
- scale->predict[1][0][1] = vp9_convolve8_1by8;
- scale->predict[1][0][2] = vp9_convolve8_qtr;
- scale->predict[1][0][3] = vp9_convolve8_3by8;
- scale->predict[1][0][4] = vp9_convolve8_avg;
- scale->predict[1][0][5] = vp9_convolve8_5by8;
- scale->predict[1][0][6] = vp9_convolve8_3qtr;
- scale->predict[1][0][7] = vp9_convolve8_7by8;
- }
- } else {
- if (scale->y_step_q4 == 16) {
- // No scaling in the y direction. Must always scale in the x direction.
- scale->predict[0][0][0] = vp9_convolve8_horiz;
- scale->predict[0][0][1] = vp9_convolve8_1by8_horiz;
- scale->predict[0][0][2] = vp9_convolve8_qtr_horiz;
- scale->predict[0][0][3] = vp9_convolve8_3by8_horiz;
- scale->predict[0][0][4] = vp9_convolve8_avg_horiz;
- scale->predict[0][0][5] = vp9_convolve8_5by8_horiz;
- scale->predict[0][0][6] = vp9_convolve8_3qtr_horiz;
- scale->predict[0][0][7] = vp9_convolve8_7by8_horiz;
- scale->predict[0][1][0] = vp9_convolve8;
- scale->predict[0][1][1] = vp9_convolve8_1by8;
- scale->predict[0][1][2] = vp9_convolve8_qtr;
- scale->predict[0][1][3] = vp9_convolve8_3by8;
- scale->predict[0][1][4] = vp9_convolve8_avg;
- scale->predict[0][1][5] = vp9_convolve8_5by8;
- scale->predict[0][1][6] = vp9_convolve8_3qtr;
- scale->predict[0][1][7] = vp9_convolve8_7by8;
- scale->predict[1][0][0] = vp9_convolve8_horiz;
- scale->predict[1][0][1] = vp9_convolve8_1by8_horiz;
- scale->predict[1][0][2] = vp9_convolve8_qtr_horiz;
- scale->predict[1][0][3] = vp9_convolve8_3by8_horiz;
- scale->predict[1][0][4] = vp9_convolve8_avg_horiz;
- scale->predict[1][0][5] = vp9_convolve8_5by8_horiz;
- scale->predict[1][0][6] = vp9_convolve8_3qtr_horiz;
- scale->predict[1][0][7] = vp9_convolve8_7by8_horiz;
- } else {
- // Must always scale in both directions.
- scale->predict[0][0][0] = vp9_convolve8;
- scale->predict[0][0][1] = vp9_convolve8_1by8;
- scale->predict[0][0][2] = vp9_convolve8_qtr;
- scale->predict[0][0][3] = vp9_convolve8_3by8;
- scale->predict[0][0][4] = vp9_convolve8_avg;
- scale->predict[0][0][5] = vp9_convolve8_5by8;
- scale->predict[0][0][6] = vp9_convolve8_3qtr;
- scale->predict[0][0][7] = vp9_convolve8_7by8;
- scale->predict[0][1][0] = vp9_convolve8;
- scale->predict[0][1][1] = vp9_convolve8_1by8;
- scale->predict[0][1][2] = vp9_convolve8_qtr;
- scale->predict[0][1][3] = vp9_convolve8_3by8;
- scale->predict[0][1][4] = vp9_convolve8_avg;
- scale->predict[0][1][5] = vp9_convolve8_5by8;
- scale->predict[0][1][6] = vp9_convolve8_3qtr;
- scale->predict[0][1][7] = vp9_convolve8_7by8;
- scale->predict[1][0][0] = vp9_convolve8;
- scale->predict[1][0][1] = vp9_convolve8_1by8;
- scale->predict[1][0][2] = vp9_convolve8_qtr;
- scale->predict[1][0][3] = vp9_convolve8_3by8;
- scale->predict[1][0][4] = vp9_convolve8_avg;
- scale->predict[1][0][5] = vp9_convolve8_5by8;
- scale->predict[1][0][6] = vp9_convolve8_3qtr;
- scale->predict[1][0][7] = vp9_convolve8_7by8;
- }
- }
- // 2D subpel motion always gets filtered in both directions
- scale->predict[1][1][0] = vp9_convolve8;
- scale->predict[1][1][1] = vp9_convolve8_1by8;
- scale->predict[1][1][2] = vp9_convolve8_qtr;
- scale->predict[1][1][3] = vp9_convolve8_3by8;
- scale->predict[1][1][4] = vp9_convolve8_avg;
- scale->predict[1][1][5] = vp9_convolve8_5by8;
- scale->predict[1][1][6] = vp9_convolve8_3qtr;
- scale->predict[1][1][7] = vp9_convolve8_7by8;
-}
-#else
if (scale->x_step_q4 == 16) {
if (scale->y_step_q4 == 16) {
// No scaling in either direction.
@@ -219,7 +98,6 @@ void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
scale->predict[1][1][0] = vp9_convolve8;
scale->predict[1][1][1] = vp9_convolve8_avg;
}
-#endif
void vp9_setup_interp_filters(MACROBLOCKD *xd,
INTERPOLATIONFILTERTYPE mcomp_filter_type,
@@ -383,60 +261,6 @@ void vp9_build_inter_predictor_q4(const uint8_t *src, int src_stride,
w, h);
}
-static void build_2x1_inter_predictor_wh(const BLOCKD *d0, const BLOCKD *d1,
- struct scale_factors *s,
- uint8_t *predictor,
- int block_size, int stride,
- int which_mv, int weight,
- int width, int height,
- const struct subpix_fn_table *subpix,
- int row, int col) {
- struct scale_factors * scale = &s[which_mv];
-
- assert(d1->dst - d0->dst == block_size);
- assert(d1->pre == d0->pre + block_size);
-
- scale->set_scaled_offsets(scale, row, col);
-
- if (d0->bmi.as_mv[which_mv].as_int == d1->bmi.as_mv[which_mv].as_int) {
- uint8_t **base_pre = which_mv ? d0->base_second_pre : d0->base_pre;
-
- vp9_build_inter_predictor(*base_pre + d0->pre,
- d0->pre_stride,
- predictor, stride,
- &d0->bmi.as_mv[which_mv],
- scale,
- width, height,
- weight, subpix);
-
- } else {
- uint8_t **base_pre0 = which_mv ? d0->base_second_pre : d0->base_pre;
- uint8_t **base_pre1 = which_mv ? d1->base_second_pre : d1->base_pre;
-
- vp9_build_inter_predictor(*base_pre0 + d0->pre,
- d0->pre_stride,
- predictor, stride,
- &d0->bmi.as_mv[which_mv],
- scale,
- width > block_size ? block_size : width, height,
- weight, subpix);
-
- if (width <= block_size) return;
-
- scale->set_scaled_offsets(scale, row, col + block_size);
-
- vp9_build_inter_predictor(*base_pre1 + d1->pre,
- d1->pre_stride,
- predictor + block_size, stride,
- &d1->bmi.as_mv[which_mv],
- scale,
- width - block_size, height,
- weight, subpix);
- }
-}
-
-#if !CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
-
static INLINE int round_mv_comp_q4(int value) {
return (value < 0 ? value - 2 : value + 2) / 4;
}
@@ -575,16 +399,15 @@ static void build_inter_predictors(int plane, int block,
}
}
void vp9_build_inter_predictors_sby(MACROBLOCKD *xd,
- uint8_t *dst_y,
- int dst_ystride,
int mb_row,
int mb_col,
BLOCK_SIZE_TYPE bsize) {
struct build_inter_predictors_args args = {
xd, mb_col * 16, mb_row * 16,
- {dst_y, NULL, NULL}, {dst_ystride, 0, 0},
- {{xd->pre.y_buffer, NULL, NULL}, {xd->second_pre.y_buffer, NULL, NULL}},
- {{xd->pre.y_stride, 0, 0}, {xd->second_pre.y_stride, 0, 0}},
+ {xd->plane[0].dst.buf, NULL, NULL}, {xd->plane[0].dst.stride, 0, 0},
+ {{xd->plane[0].pre[0].buf, NULL, NULL},
+ {xd->plane[0].pre[1].buf, NULL, NULL}},
+ {{xd->plane[0].pre[0].stride, 0, 0}, {xd->plane[0].pre[1].stride, 0, 0}},
};
// TODO(jkoleszar): This is a hack no matter where you put it, but does it
@@ -595,613 +418,45 @@ void vp9_build_inter_predictors_sby(MACROBLOCKD *xd,
foreach_predicted_block_in_plane(xd, bsize, 0, build_inter_predictors, &args);
}
void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride,
int mb_row,
int mb_col,
BLOCK_SIZE_TYPE bsize) {
struct build_inter_predictors_args args = {
xd, mb_col * 16, mb_row * 16,
- {NULL, dst_u, dst_v}, {0, dst_uvstride, dst_uvstride},
- {{NULL, xd->pre.u_buffer, xd->pre.v_buffer},
- {NULL, xd->second_pre.u_buffer, xd->second_pre.v_buffer}},
- {{0, xd->pre.uv_stride, xd->pre.uv_stride},
- {0, xd->second_pre.uv_stride, xd->second_pre.uv_stride}},
+ {NULL, xd->plane[1].dst.buf, xd->plane[2].dst.buf},
+ {0, xd->plane[1].dst.stride, xd->plane[1].dst.stride},
+ {{NULL, xd->plane[1].pre[0].buf, xd->plane[2].pre[0].buf},
+ {NULL, xd->plane[1].pre[1].buf, xd->plane[2].pre[1].buf}},
+ {{0, xd->plane[1].pre[0].stride, xd->plane[1].pre[0].stride},
+ {0, xd->plane[1].pre[1].stride, xd->plane[1].pre[1].stride}},
};
foreach_predicted_block_uv(xd, bsize, build_inter_predictors, &args);
}
void vp9_build_inter_predictors_sb(MACROBLOCKD *xd,
int mb_row, int mb_col,
BLOCK_SIZE_TYPE bsize) {
- uint8_t *const y = xd->dst.y_buffer;
- uint8_t *const u = xd->dst.u_buffer;
- uint8_t *const v = xd->dst.v_buffer;
- const int y_stride = xd->dst.y_stride;
- const int uv_stride = xd->dst.uv_stride;
-
- vp9_build_inter_predictors_sby(xd, y, y_stride, mb_row, mb_col, bsize);
- vp9_build_inter_predictors_sbuv(xd, u, v, uv_stride, mb_row, mb_col, bsize);
#if CONFIG_COMP_INTERINTRA_PRED
- if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
- if (bsize == BLOCK_SIZE_SB32X32)
- vp9_build_interintra_32x32_predictors_sb(xd, y, u, v,
- y_stride, uv_stride);
- else
- vp9_build_interintra_64x64_predictors_sb(xd, y, u, v,
- y_stride, uv_stride);
- }
-#endif
-}
-#endif // !CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
-
-#define AVERAGE_WEIGHT (1 << (2 * CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT))
-
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
-
-static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd) {
- /* If the MV points so far into the UMV border that no visible pixels
- * are used for reconstruction, the subpel part of the MV can be
- * discarded and the MV limited to 16 pixels with equivalent results.
- *
- * This limit kicks in at 19 pixels for the top and left edges, for
- * the 16 pixels plus 3 taps right of the central pixel when subpel
- * filtering. The bottom and right edges use 16 pixels plus 2 pixels
- * left of the central pixel when filtering.
- */
- if (mv->col < (xd->mb_to_left_edge - ((16 + VP9_INTERP_EXTEND) << 3)))
- mv->col = xd->mb_to_left_edge - (16 << 3);
- else if (mv->col > xd->mb_to_right_edge + ((15 + VP9_INTERP_EXTEND) << 3))
- mv->col = xd->mb_to_right_edge + (16 << 3);
-
- if (mv->row < (xd->mb_to_top_edge - ((16 + VP9_INTERP_EXTEND) << 3)))
- mv->row = xd->mb_to_top_edge - (16 << 3);
- else if (mv->row > xd->mb_to_bottom_edge + ((15 + VP9_INTERP_EXTEND) << 3))
- mv->row = xd->mb_to_bottom_edge + (16 << 3);
-}
-
-// Whether to use implicit weighting for UV
-#define USE_IMPLICIT_WEIGHT_UV
-
-// Whether to use implicit weighting for SplitMV
-// #define USE_IMPLICIT_WEIGHT_SPLITMV
-
-// #define SEARCH_MIN3
-static int64_t get_consistency_metric(MACROBLOCKD *xd,
- uint8_t *tmp_y, int tmp_ystride) {
- int block_size = 16 << xd->mode_info_context->mbmi.sb_type;
- uint8_t *rec_y = xd->dst.y_buffer;
- int rec_ystride = xd->dst.y_stride;
- int64_t metric = 0;
- int i;
- if (xd->up_available) {
- for (i = 0; i < block_size; ++i) {
- int diff = abs(*(rec_y - rec_ystride + i) -
- *(tmp_y + i));
-#ifdef SEARCH_MIN3
- // Searches for the min abs diff among 3 pixel neighbors in the border
- int diff1 = xd->left_available ?
- abs(*(rec_y - rec_ystride + i - 1) - *(tmp_y + i)) : diff;
- int diff2 = i < block_size - 1 ?
- abs(*(rec_y - rec_ystride + i + 1) - *(tmp_y + i)) : diff;
- diff = diff <= diff1 ? diff : diff1;
- diff = diff <= diff2 ? diff : diff2;
-#endif
- metric += diff;
- }
- }
- if (xd->left_available) {
- for (i = 0; i < block_size; ++i) {
- int diff = abs(*(rec_y - 1 + i * rec_ystride) -
- *(tmp_y + i * tmp_ystride));
-#ifdef SEARCH_MIN3
- // Searches for the min abs diff among 3 pixel neighbors in the border
- int diff1 = xd->up_available ?
- abs(*(rec_y - 1 + (i - 1) * rec_ystride) -
- *(tmp_y + i * tmp_ystride)) : diff;
- int diff2 = i < block_size - 1 ?
- abs(*(rec_y - 1 + (i + 1) * rec_ystride) -
- *(tmp_y + i * tmp_ystride)) : diff;
- diff = diff <= diff1 ? diff : diff1;
- diff = diff <= diff2 ? diff : diff2;
+ uint8_t *const y = xd->plane[0].dst.buf;
+ uint8_t *const u = xd->plane[1].dst.buf;
+ uint8_t *const v = xd->plane[2].dst.buf;
+ const int y_stride = xd->plane[0].dst.stride;
+ const int uv_stride = xd->plane[1].dst.stride;
#endif
- metric += diff;
- }
- }
- return metric;
-}
-
-static int get_weight(MACROBLOCKD *xd, int64_t metric_1, int64_t metric_2) {
- int weight = AVERAGE_WEIGHT;
- if (2 * metric_1 < metric_2)
- weight = 6;
- else if (4 * metric_1 < 3 * metric_2)
- weight = 5;
- else if (2 * metric_2 < metric_1)
- weight = 2;
- else if (4 * metric_2 < 3 * metric_1)
- weight = 3;
- return weight;
-}
-
-#ifdef USE_IMPLICIT_WEIGHT_SPLITMV
-static int get_implicit_compoundinter_weight_splitmv(
- MACROBLOCKD *xd, int mb_row, int mb_col) {
- MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
- BLOCKD *blockd = xd->block;
- const int use_second_ref = mbmi->second_ref_frame > 0;
- int64_t metric_2 = 0, metric_1 = 0;
- int i, which_mv, weight;
- uint8_t tmp_y[256];
- const int tmp_ystride = 16;
-
- if (!use_second_ref) return 0;
- if (!(xd->up_available || xd->left_available))
- return AVERAGE_WEIGHT;
-
- assert(xd->mode_info_context->mbmi.mode == SPLITMV);
-
- which_mv = 1; // second predictor
- if (xd->mode_info_context->mbmi.partitioning != PARTITIONING_4X4) {
- for (i = 0; i < 16; i += 8) {
- BLOCKD *d0 = &blockd[i];
- BLOCKD *d1 = &blockd[i + 2];
- const int y = i & 8;
-
- blockd[i + 0].bmi = xd->mode_info_context->bmi[i + 0];
- blockd[i + 2].bmi = xd->mode_info_context->bmi[i + 2];
-
- if (mbmi->need_to_clamp_mvs) {
- clamp_mv_to_umv_border(&blockd[i + 0].bmi.as_mv[which_mv].as_mv, xd);
- clamp_mv_to_umv_border(&blockd[i + 2].bmi.as_mv[which_mv].as_mv, xd);
- }
- if (i == 0) {
- build_2x1_inter_predictor_wh(d0, d1, xd->scale_factor, tmp_y, 8, 16,
- which_mv, 0, 16, 1,
- &xd->subpix, mb_row * 16 + y, mb_col * 16);
- build_2x1_inter_predictor_wh(d0, d1, xd->scale_factor, tmp_y, 8, 16,
- which_mv, 0, 1, 8,
- &xd->subpix, mb_row * 16 + y, mb_col * 16);
- } else {
- build_2x1_inter_predictor_wh(d0, d1, xd->scale_factor, tmp_y + 8 * 16,
- 8, 16, which_mv, 0, 1, 8,
- &xd->subpix, mb_row * 16 + y, mb_col * 16);
- }
- }
- } else {
- for (i = 0; i < 16; i += 2) {
- BLOCKD *d0 = &blockd[i];
- BLOCKD *d1 = &blockd[i + 1];
- const int x = (i & 3) * 4;
- const int y = (i >> 2) * 4;
-
- blockd[i + 0].bmi = xd->mode_info_context->bmi[i + 0];
- blockd[i + 1].bmi = xd->mode_info_context->bmi[i + 1];
-
- if (i >= 4 && (i & 3) != 0) continue;
-
- if (i == 0) {
- build_2x1_inter_predictor_wh(d0, d1, xd->scale_factor, tmp_y, 4, 16,
- which_mv, 0, 8, 1, &xd->subpix,
- mb_row * 16 + y, mb_col * 16 + x);
- build_2x1_inter_predictor_wh(d0, d1, xd->scale_factor, tmp_y, 4, 16,
- which_mv, 0, 1, 4, &xd->subpix,
- mb_row * 16 + y, mb_col * 16 + x);
- } else if (i < 4) {
- build_2x1_inter_predictor_wh(d0, d1, xd->scale_factor, tmp_y + x, 4, 16,
- which_mv, 0, 8, 1, &xd->subpix,
- mb_row * 16 + y, mb_col * 16 + x);
- } else {
- build_2x1_inter_predictor_wh(d0, d1, xd->scale_factor, tmp_y + y * 16,
- 4, 16, which_mv, 0, 1, 4, &xd->subpix,
- mb_row * 16 + y, mb_col * 16 + x);
- }
- }
- }
- metric_2 = get_consistency_metric(xd, tmp_y, tmp_ystride);
-
- which_mv = 0; // first predictor
- if (xd->mode_info_context->mbmi.partitioning != PARTITIONING_4X4) {
- for (i = 0; i < 16; i += 8) {
- BLOCKD *d0 = &blockd[i];
- BLOCKD *d1 = &blockd[i + 2];
- const int y = i & 8;
- blockd[i + 0].bmi = xd->mode_info_context->bmi[i + 0];
- blockd[i + 2].bmi = xd->mode_info_context->bmi[i + 2];
+ vp9_build_inter_predictors_sby(xd, mb_row, mb_col, bsize);
+ vp9_build_inter_predictors_sbuv(xd, mb_row, mb_col, bsize);
- if (mbmi->need_to_clamp_mvs) {
- clamp_mv_to_umv_border(&blockd[i + 0].bmi.as_mv[which_mv].as_mv, xd);
- clamp_mv_to_umv_border(&blockd[i + 2].bmi.as_mv[which_mv].as_mv, xd);
- }
- if (i == 0) {
- build_2x1_inter_predictor_wh(d0, d1, xd->scale_factor, tmp_y, 8, 16,
- which_mv, 0, 16, 1,
- &xd->subpix, mb_row * 16 + y, mb_col * 16);
- build_2x1_inter_predictor_wh(d0, d1, xd->scale_factor, tmp_y, 8, 16,
- which_mv, 0, 1, 8,
- &xd->subpix, mb_row * 16 + y, mb_col * 16);
- } else {
- build_2x1_inter_predictor_wh(d0, d1, xd->scale_factor, tmp_y + 8 * 16,
- 8, 16, which_mv, 0, 1, 8,
- &xd->subpix, mb_row * 16 + y, mb_col * 16);
- }
- }
- } else {
- for (i = 0; i < 16; i += 2) {
- BLOCKD *d0 = &blockd[i];
- BLOCKD *d1 = &blockd[i + 1];
- const int x = (i & 3) * 4;
- const int y = (i >> 2) * 4;
-
- blockd[i + 0].bmi = xd->mode_info_context->bmi[i + 0];
- blockd[i + 1].bmi = xd->mode_info_context->bmi[i + 1];
-
- if (i >= 4 && (i & 3) != 0) continue;
-
- if (i == 0) {
- build_2x1_inter_predictor_wh(d0, d1, xd->scale_factor, tmp_y, 4, 16,
- which_mv, 0, 8, 1, &xd->subpix,
- mb_row * 16 + y, mb_col * 16 + x);
- build_2x1_inter_predictor_wh(d0, d1, xd->scale_factor, tmp_y, 4, 16,
- which_mv, 0, 1, 4, &xd->subpix,
- mb_row * 16 + y, mb_col * 16 + x);
- } else if (i < 4) {
- build_2x1_inter_predictor_wh(d0, d1, xd->scale_factor, tmp_y + x, 4, 16,
- which_mv, 0, 8, 1, &xd->subpix,
- mb_row * 16 + y, mb_col * 16 + x);
- } else {
- build_2x1_inter_predictor_wh(d0, d1, xd->scale_factor, tmp_y + y * 16,
- 4, 16, which_mv, 0, 1, 4, &xd->subpix,
- mb_row * 16 + y, mb_col * 16 + x);
- }
- }
- }
- metric_1 = get_consistency_metric(xd, tmp_y, tmp_ystride);
-
- // Choose final weight for averaging
- weight = get_weight(xd, metric_1, metric_2);
- return weight;
-}
-#endif
-
-static int get_implicit_compoundinter_weight(MACROBLOCKD *xd,
- int mb_row,
- int mb_col) {
- const int use_second_ref = xd->mode_info_context->mbmi.second_ref_frame > 0;
- int64_t metric_2 = 0, metric_1 = 0;
- int n, clamp_mvs, pre_stride;
- uint8_t *base_pre;
- int_mv ymv;
- uint8_t tmp_y[4096];
- const int tmp_ystride = 64;
- int weight;
- int edge[4];
- int block_size = 16 << xd->mode_info_context->mbmi.sb_type;
- struct scale_factors *scale;
-
- if (!use_second_ref) return 0;
- if (!(xd->up_available || xd->left_available))
- return AVERAGE_WEIGHT;
-
- edge[0] = xd->mb_to_top_edge;
- edge[1] = xd->mb_to_bottom_edge;
- edge[2] = xd->mb_to_left_edge;
- edge[3] = xd->mb_to_right_edge;
-
- clamp_mvs = xd->mode_info_context->mbmi.need_to_clamp_secondmv;
- base_pre = xd->second_pre.y_buffer;
- pre_stride = xd->second_pre.y_stride;
- ymv.as_int = xd->mode_info_context->mbmi.mv[1].as_int;
- // First generate the second predictor
- scale = &xd->scale_factor[1];
- for (n = 0; n < block_size; n += 16) {
- xd->mb_to_left_edge = edge[2] - (n << 3);
- xd->mb_to_right_edge = edge[3] + ((16 - n) << 3);
- if (clamp_mvs)
- clamp_mv_to_umv_border(&ymv.as_mv, xd);
- scale->set_scaled_offsets(scale, mb_row * 16, mb_col * 16 + n);
- // predict a single row of pixels
- vp9_build_inter_predictor(base_pre +
- scaled_buffer_offset(n, 0, pre_stride, scale),
- pre_stride, tmp_y + n, tmp_ystride, &ymv, scale, 16, 1, 0, &xd->subpix);
- }
- xd->mb_to_left_edge = edge[2];
- xd->mb_to_right_edge = edge[3];
- for (n = 0; n < block_size; n += 16) {
- xd->mb_to_top_edge = edge[0] - (n << 3);
- xd->mb_to_bottom_edge = edge[1] + ((16 - n) << 3);
- if (clamp_mvs)
- clamp_mv_to_umv_border(&ymv.as_mv, xd);
- scale->set_scaled_offsets(scale, mb_row * 16 + n, mb_col * 16);
- // predict a single col of pixels
- vp9_build_inter_predictor(base_pre +
- scaled_buffer_offset(0, n, pre_stride, scale),
- pre_stride, tmp_y + n * tmp_ystride, tmp_ystride, &ymv,
- scale, 1, 16, 0, &xd->subpix);
- }
- xd->mb_to_top_edge = edge[0];
- xd->mb_to_bottom_edge = edge[1];
- // Compute consistency metric
- metric_2 = get_consistency_metric(xd, tmp_y, tmp_ystride);
-
- clamp_mvs = xd->mode_info_context->mbmi.need_to_clamp_mvs;
- base_pre = xd->pre.y_buffer;
- pre_stride = xd->pre.y_stride;
- ymv.as_int = xd->mode_info_context->mbmi.mv[0].as_int;
- // Now generate the first predictor
- scale = &xd->scale_factor[0];
- for (n = 0; n < block_size; n += 16) {
- xd->mb_to_left_edge = edge[2] - (n << 3);
- xd->mb_to_right_edge = edge[3] + ((16 - n) << 3);
- if (clamp_mvs)
- clamp_mv_to_umv_border(&ymv.as_mv, xd);
- scale->set_scaled_offsets(scale, mb_row * 16, mb_col * 16 + n);
- // predict a single row of pixels
- vp9_build_inter_predictor(base_pre +
- scaled_buffer_offset(n, 0, pre_stride, scale),
- pre_stride, tmp_y + n, tmp_ystride, &ymv, scale, 16, 1, 0, &xd->subpix);
- }
- xd->mb_to_left_edge = edge[2];
- xd->mb_to_right_edge = edge[3];
- for (n = 0; n < block_size; n += 16) {
- xd->mb_to_top_edge = edge[0] - (n << 3);
- xd->mb_to_bottom_edge = edge[1] + ((16 - n) << 3);
- if (clamp_mvs)
- clamp_mv_to_umv_border(&ymv.as_mv, xd);
- scale->set_scaled_offsets(scale, mb_row * 16 + n, mb_col * 16);
- // predict a single col of pixels
- vp9_build_inter_predictor(base_pre +
- scaled_buffer_offset(0, n, pre_stride, scale),
- pre_stride, tmp_y + n * tmp_ystride, tmp_ystride, &ymv,
- scale, 1, 16, 0, &xd->subpix);
- }
- xd->mb_to_top_edge = edge[0];
- xd->mb_to_bottom_edge = edge[1];
- metric_1 = get_consistency_metric(xd, tmp_y, tmp_ystride);
-
- // Choose final weight for averaging
- weight = get_weight(xd, metric_1, metric_2);
- return weight;
-}
-
-static void build_inter16x16_predictors_mby_w(MACROBLOCKD *xd,
- uint8_t *dst_y,
- int dst_ystride,
- int weight,
- int mb_row,
- int mb_col) {
- const int use_second_ref = xd->mode_info_context->mbmi.second_ref_frame > 0;
- int which_mv;
-
- for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
- const int clamp_mvs = which_mv ?
- xd->mode_info_context->mbmi.need_to_clamp_secondmv :
- xd->mode_info_context->mbmi.need_to_clamp_mvs;
-
- uint8_t *base_pre = which_mv ? xd->second_pre.y_buffer : xd->pre.y_buffer;
- int pre_stride = which_mv ? xd->second_pre.y_stride : xd->pre.y_stride;
- int_mv ymv;
- struct scale_factors *scale = &xd->scale_factor[which_mv];
-
- ymv.as_int = xd->mode_info_context->mbmi.mv[which_mv].as_int;
-
- if (clamp_mvs)
- clamp_mv_to_umv_border(&ymv.as_mv, xd);
-
- scale->set_scaled_offsets(scale, mb_row * 16, mb_col * 16);
-
- vp9_build_inter_predictor(base_pre, pre_stride, dst_y, dst_ystride,
- &ymv, scale, 16, 16,
- which_mv ? weight : 0, &xd->subpix);
- }
-}
-
-static void build_inter16x16_predictors_mbuv_w(MACROBLOCKD *xd,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride,
- int weight,
- int mb_row,
- int mb_col) {
- const int use_second_ref = xd->mode_info_context->mbmi.second_ref_frame > 0;
- int which_mv;
-
- for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
- const int clamp_mvs =
- which_mv ? xd->mode_info_context->mbmi.need_to_clamp_secondmv
- : xd->mode_info_context->mbmi.need_to_clamp_mvs;
- uint8_t *uptr, *vptr;
- int pre_stride = which_mv ? xd->second_pre.uv_stride
- : xd->pre.uv_stride;
- int_mv mv;
-
- struct scale_factors *scale = &xd->scale_factor_uv[which_mv];
- mv.as_int = xd->mode_info_context->mbmi.mv[which_mv].as_int;
-
-
- if (clamp_mvs)
- clamp_mv_to_umv_border(&mv.as_mv, xd);
-
- uptr = (which_mv ? xd->second_pre.u_buffer : xd->pre.u_buffer);
- vptr = (which_mv ? xd->second_pre.v_buffer : xd->pre.v_buffer);
-
- scale->set_scaled_offsets(scale, mb_row * 16, mb_col * 16);
-
- vp9_build_inter_predictor_q4(
- uptr, pre_stride, dst_u, dst_uvstride, &mv,
- scale, 8, 8, which_mv ? weight : 0, &xd->subpix);
-
- vp9_build_inter_predictor_q4(
- vptr, pre_stride, dst_v, dst_uvstride, &mv,
- scale, 8, 8, which_mv ? weight : 0, &xd->subpix);
- }
-}
-static void build_inter_predictors_sby_w(MACROBLOCKD *x,
- uint8_t *dst_y,
- int dst_ystride,
- int weight,
- int mb_row,
- int mb_col,
- BLOCK_SIZE_TYPE bsize) {
- const int bwl = mb_width_log2(bsize), bw = 1 << bwl;
- const int bhl = mb_height_log2(bsize), bh = 1 << bhl;
- uint8_t *y1 = x->pre.y_buffer;
- uint8_t *y2 = x->second_pre.y_buffer;
- int edge[4], n;
-
- edge[0] = x->mb_to_top_edge;
- edge[1] = x->mb_to_bottom_edge;
- edge[2] = x->mb_to_left_edge;
- edge[3] = x->mb_to_right_edge;
-
- for (n = 0; n < bw * bh; n++) {
- const int x_idx = n & (bw - 1), y_idx = n >> bwl;
-
- x->mb_to_top_edge = edge[0] - ((y_idx * 16) << 3);
- x->mb_to_bottom_edge = edge[1] + (((bh - 1 - y_idx) * 16) << 3);
- x->mb_to_left_edge = edge[2] - ((x_idx * 16) << 3);
- x->mb_to_right_edge = edge[3] + (((bw - 1 - x_idx) * 16) << 3);
-
- x->pre.y_buffer = y1 + scaled_buffer_offset(x_idx * 16,
- y_idx * 16,
- x->pre.y_stride,
- &x->scale_factor[0]);
- if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- x->second_pre.y_buffer = y2 +
- scaled_buffer_offset(x_idx * 16,
- y_idx * 16,
- x->second_pre.y_stride,
- &x->scale_factor[1]);
- }
- build_inter16x16_predictors_mby_w(x,
- dst_y + y_idx * 16 * dst_ystride + x_idx * 16,
- dst_ystride, weight, mb_row + y_idx, mb_col + x_idx);
- }
- x->mb_to_top_edge = edge[0];
- x->mb_to_bottom_edge = edge[1];
- x->mb_to_left_edge = edge[2];
- x->mb_to_right_edge = edge[3];
-
- x->pre.y_buffer = y1;
- if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- x->second_pre.y_buffer = y2;
- }
-}
-
-void vp9_build_inter_predictors_sby(MACROBLOCKD *x,
- uint8_t *dst_y,
- int dst_ystride,
- int mb_row,
- int mb_col,
- BLOCK_SIZE_TYPE bsize) {
- int weight = get_implicit_compoundinter_weight(x, mb_row, mb_col);
- build_inter_predictors_sby_w(x, dst_y, dst_ystride, weight,
- mb_row, mb_col, bsize);
-}
-
-static void build_inter_predictors_sbuv_w(MACROBLOCKD *x,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride,
- int weight,
- int mb_row,
- int mb_col,
- BLOCK_SIZE_TYPE bsize) {
- const int bwl = mb_width_log2(bsize), bw = 1 << bwl;
- const int bhl = mb_height_log2(bsize), bh = 1 << bhl;
- uint8_t *u1 = x->pre.u_buffer, *v1 = x->pre.v_buffer;
- uint8_t *u2 = x->second_pre.u_buffer, *v2 = x->second_pre.v_buffer;
- int edge[4], n;
-
- edge[0] = x->mb_to_top_edge;
- edge[1] = x->mb_to_bottom_edge;
- edge[2] = x->mb_to_left_edge;
- edge[3] = x->mb_to_right_edge;
-
- for (n = 0; n < bw * bh; n++) {
- int scaled_uv_offset;
- const int x_idx = n & (bw - 1), y_idx = n >> bwl;
-
- x->mb_to_top_edge = edge[0] - ((y_idx * 16) << 3);
- x->mb_to_bottom_edge = edge[1] + (((bh - 1 - y_idx) * 16) << 3);
- x->mb_to_left_edge = edge[2] - ((x_idx * 16) << 3);
- x->mb_to_right_edge = edge[3] + (((bw - 1 - x_idx) * 16) << 3);
-
- scaled_uv_offset = scaled_buffer_offset(x_idx * 8,
- y_idx * 8,
- x->pre.uv_stride,
- &x->scale_factor_uv[0]);
- x->pre.u_buffer = u1 + scaled_uv_offset;
- x->pre.v_buffer = v1 + scaled_uv_offset;
-
- if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- scaled_uv_offset = scaled_buffer_offset(x_idx * 8,
- y_idx * 8,
- x->second_pre.uv_stride,
- &x->scale_factor_uv[1]);
- x->second_pre.u_buffer = u2 + scaled_uv_offset;
- x->second_pre.v_buffer = v2 + scaled_uv_offset;
- }
-
- build_inter16x16_predictors_mbuv_w(x,
- dst_u + y_idx * 8 * dst_uvstride + x_idx * 8,
- dst_v + y_idx * 8 * dst_uvstride + x_idx * 8,
- dst_uvstride, weight, mb_row + y_idx, mb_col + x_idx);
- }
- x->mb_to_top_edge = edge[0];
- x->mb_to_bottom_edge = edge[1];
- x->mb_to_left_edge = edge[2];
- x->mb_to_right_edge = edge[3];
-
- x->pre.u_buffer = u1;
- x->pre.v_buffer = v1;
-
- if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- x->second_pre.u_buffer = u2;
- x->second_pre.v_buffer = v2;
- }
-}
-
-void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride,
- int mb_row,
- int mb_col,
- BLOCK_SIZE_TYPE bsize) {
-#ifdef USE_IMPLICIT_WEIGHT_UV
- int weight = get_implicit_compoundinter_weight(xd, mb_row, mb_col);
-#else
- int weight = AVERAGE_WEIGHT;
-#endif
- build_inter_predictors_sbuv_w(xd, dst_u, dst_v, dst_uvstride,
- weight, mb_row, mb_col, bsize);
-}
-
-void vp9_build_inter_predictors_sb(MACROBLOCKD *mb,
- int mb_row, int mb_col,
- BLOCK_SIZE_TYPE bsize) {
- uint8_t *const y = mb->dst.y_buffer;
- uint8_t *const u = mb->dst.u_buffer;
- uint8_t *const v = mb->dst.v_buffer;
- const int y_stride = mb->dst.y_stride;
- const int uv_stride = mb->dst.uv_stride;
-
- vp9_build_inter_predictors_sby(mb, y, y_stride, mb_row, mb_col, bsize);
- vp9_build_inter_predictors_sbuv(mb, u, v, uv_stride, mb_row, mb_col, bsize);
#if CONFIG_COMP_INTERINTRA_PRED
- if (mb->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
+ if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
if (bsize == BLOCK_SIZE_SB32X32)
- vp9_build_interintra_32x32_predictors_sb(mb, y, u, v,
+ vp9_build_interintra_32x32_predictors_sb(xd, y, u, v,
y_stride, uv_stride);
else
- vp9_build_interintra_64x64_predictors_sb(mb, y, u, v,
+ vp9_build_interintra_64x64_predictors_sb(xd, y, u, v,
y_stride, uv_stride);
}
#endif
}
-#endif // CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
static INLINE int round_mv_comp(int value) {
return (value < 0 ? value - 2 : value + 2) / 4;
@@ -1223,20 +478,9 @@ static int mi_mv_pred_col(MACROBLOCKD *mb, int off, int idx) {
return round_mv_comp(temp);
}
-void vp9_build_inter_predictors_mb(MACROBLOCKD *xd,
- int mb_row,
- int mb_col) {
- vp9_build_inter_predictors_sb(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
-}
-
-
/*encoder only*/
void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd,
int mb_row, int mb_col) {
- uint8_t *const u = xd->dst.u_buffer;
- uint8_t *const v = xd->dst.v_buffer;
- const int uv_stride = xd->dst.uv_stride;
-
- vp9_build_inter_predictors_sbuv(xd, u, v, uv_stride, mb_row, mb_col,
+ vp9_build_inter_predictors_sbuv(xd, mb_row, mb_col,
BLOCK_SIZE_MB16X16);
}
diff --git a/vp9/common/vp9_reconinter.h b/vp9/common/vp9_reconinter.h
index ee34fc5d2..51b705f71 100644
--- a/vp9/common/vp9_reconinter.h
+++ b/vp9/common/vp9_reconinter.h
@@ -17,31 +17,19 @@
struct subpix_fn_table;
void vp9_build_inter_predictors_sby(MACROBLOCKD *xd,
- uint8_t *dst_y,
- int dst_ystride,
int mb_row,
int mb_col,
BLOCK_SIZE_TYPE bsize);
void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride,
int mb_row,
int mb_col,
BLOCK_SIZE_TYPE bsize);
+
void vp9_build_inter_predictors_sb(MACROBLOCKD *mb,
int mb_row, int mb_col,
BLOCK_SIZE_TYPE bsize);
-void vp9_build_inter_predictors_mb(MACROBLOCKD *xd,
- int mb_row,
- int mb_col);
-
-void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd,
- int mb_row,
- int mb_col);
-
void vp9_setup_interp_filters(MACROBLOCKD *xd,
INTERPOLATIONFILTERTYPE filter,
VP9_COMMON *cm);
@@ -83,8 +71,68 @@ static int scaled_buffer_offset(int x_offset,
int y_offset,
int stride,
const struct scale_factors *scale) {
- return scale->scale_value_y(y_offset, scale) * stride +
- scale->scale_value_x(x_offset, scale);
+ if (scale)
+ return scale->scale_value_y(y_offset, scale) * stride +
+ scale->scale_value_x(x_offset, scale);
+ return y_offset * stride + x_offset;
+}
+
+static void setup_pred_plane(struct buf_2d *dst,
+ uint8_t *src, int stride,
+ int mb_row, int mb_col,
+ const struct scale_factors *scale,
+ int subsampling_x, int subsampling_y) {
+ const int x = (16 * mb_col) >> subsampling_x;
+ const int y = (16 * mb_row) >> subsampling_y;
+ dst->buf = src + scaled_buffer_offset(x, y, stride, scale);
+ dst->stride = stride;
+}
+
+// TODO(jkoleszar): audit all uses of this that don't set mb_row, mb_col
+static void setup_dst_planes(MACROBLOCKD *xd,
+ const YV12_BUFFER_CONFIG *src,
+ int mb_row, int mb_col) {
+ setup_pred_plane(&xd->plane[0].dst,
+ src->y_buffer, src->y_stride,
+ mb_row, mb_col, NULL,
+ xd->plane[0].subsampling_x, xd->plane[0].subsampling_y);
+ setup_pred_plane(&xd->plane[1].dst,
+ src->u_buffer, src->uv_stride,
+ mb_row, mb_col, NULL,
+ xd->plane[1].subsampling_x, xd->plane[1].subsampling_y);
+ setup_pred_plane(&xd->plane[2].dst,
+ src->v_buffer, src->uv_stride,
+ mb_row, mb_col, NULL,
+ xd->plane[2].subsampling_x, xd->plane[2].subsampling_y);
+}
+
+static void setup_pre_planes(MACROBLOCKD *xd,
+ const YV12_BUFFER_CONFIG *src0,
+ const YV12_BUFFER_CONFIG *src1,
+ int mb_row, int mb_col,
+ const struct scale_factors *scale,
+ const struct scale_factors *scale_uv) {
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ const YV12_BUFFER_CONFIG *src = i ? src1 : src0;
+
+ if (!src)
+ continue;
+
+ setup_pred_plane(&xd->plane[0].pre[i],
+ src->y_buffer, src->y_stride,
+ mb_row, mb_col, scale ? scale + i : NULL,
+ xd->plane[0].subsampling_x, xd->plane[0].subsampling_y);
+ setup_pred_plane(&xd->plane[1].pre[i],
+ src->u_buffer, src->uv_stride,
+ mb_row, mb_col, scale_uv ? scale_uv + i : NULL,
+ xd->plane[1].subsampling_x, xd->plane[1].subsampling_y);
+ setup_pred_plane(&xd->plane[2].pre[i],
+ src->v_buffer, src->uv_stride,
+ mb_row, mb_col, scale_uv ? scale_uv + i : NULL,
+ xd->plane[2].subsampling_x, xd->plane[2].subsampling_y);
+ }
}
static void setup_pred_block(YV12_BUFFER_CONFIG *dst,
diff --git a/vp9/common/vp9_reconintra.c b/vp9/common/vp9_reconintra.c
index 88c3f191e..6caebd784 100644
--- a/vp9/common/vp9_reconintra.c
+++ b/vp9/common/vp9_reconintra.c
@@ -269,15 +269,6 @@ static void corner_predictor(uint8_t *ypred_ptr, int y_stride, int n,
}
}
-void vp9_recon_intra_mbuv(MACROBLOCKD *xd) {
- int i;
- for (i = 16; i < 24; i += 2) {
- BLOCKD *b = &xd->block[i];
- vp9_recon2b(*(b->base_dst) + b->dst, b->diff,
- *(b->base_dst) + b->dst, b->dst_stride);
- }
-}
-
static INLINE int log2_minus_1(int n) {
switch (n) {
case 4: return 1;
@@ -609,7 +600,7 @@ void vp9_build_interintra_16x16_predictors_mby(MACROBLOCKD *xd,
int ystride) {
uint8_t intrapredictor[256];
vp9_build_intra_predictors(
- xd->dst.y_buffer, xd->dst.y_stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
intrapredictor, 16,
xd->mode_info_context->mbmi.interintra_mode, 16, 16,
xd->up_available, xd->left_available, xd->right_available);
@@ -624,12 +615,12 @@ void vp9_build_interintra_16x16_predictors_mbuv(MACROBLOCKD *xd,
uint8_t uintrapredictor[64];
uint8_t vintrapredictor[64];
vp9_build_intra_predictors(
- xd->dst.u_buffer, xd->dst.uv_stride,
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride,
uintrapredictor, 8,
xd->mode_info_context->mbmi.interintra_uv_mode, 8, 8,
xd->up_available, xd->left_available, xd->right_available);
vp9_build_intra_predictors(
- xd->dst.v_buffer, xd->dst.uv_stride,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
vintrapredictor, 8,
xd->mode_info_context->mbmi.interintra_uv_mode, 8, 8,
xd->up_available, xd->left_available, xd->right_available);
@@ -644,7 +635,7 @@ void vp9_build_interintra_32x32_predictors_sby(MACROBLOCKD *xd,
int ystride) {
uint8_t intrapredictor[1024];
vp9_build_intra_predictors(
- xd->dst.y_buffer, xd->dst.y_stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
intrapredictor, 32,
xd->mode_info_context->mbmi.interintra_mode, 32, 32,
xd->up_available, xd->left_available, xd->right_available);
@@ -659,12 +650,12 @@ void vp9_build_interintra_32x32_predictors_sbuv(MACROBLOCKD *xd,
uint8_t uintrapredictor[256];
uint8_t vintrapredictor[256];
vp9_build_intra_predictors(
- xd->dst.u_buffer, xd->dst.uv_stride,
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride,
uintrapredictor, 16,
xd->mode_info_context->mbmi.interintra_uv_mode, 16, 16,
xd->up_available, xd->left_available, xd->right_available);
vp9_build_intra_predictors(
- xd->dst.v_buffer, xd->dst.uv_stride,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
vintrapredictor, 16,
xd->mode_info_context->mbmi.interintra_uv_mode, 16, 16,
xd->up_available, xd->left_available, xd->right_available);
@@ -689,7 +680,7 @@ void vp9_build_interintra_64x64_predictors_sby(MACROBLOCKD *xd,
int ystride) {
uint8_t intrapredictor[4096];
const int mode = xd->mode_info_context->mbmi.interintra_mode;
- vp9_build_intra_predictors(xd->dst.y_buffer, xd->dst.y_stride,
+ vp9_build_intra_predictors(xd->plane[0].dst.buf, xd->plane[0].dst.stride,
intrapredictor, 64, mode, 64, 64,
xd->up_available, xd->left_available,
xd->right_available);
@@ -704,11 +695,11 @@ void vp9_build_interintra_64x64_predictors_sbuv(MACROBLOCKD *xd,
uint8_t uintrapredictor[1024];
uint8_t vintrapredictor[1024];
const int mode = xd->mode_info_context->mbmi.interintra_uv_mode;
- vp9_build_intra_predictors(xd->dst.u_buffer, xd->dst.uv_stride,
+ vp9_build_intra_predictors(xd->plane[1].dst.buf, xd->plane[1].dst.stride,
uintrapredictor, 32, mode, 32, 32,
xd->up_available, xd->left_available,
xd->right_available);
- vp9_build_intra_predictors(xd->dst.v_buffer, xd->dst.uv_stride,
+ vp9_build_intra_predictors(xd->plane[2].dst.buf, xd->plane[1].dst.stride,
vintrapredictor, 32, mode, 32, 32,
xd->up_available, xd->left_available,
xd->right_available);
@@ -734,8 +725,8 @@ void vp9_build_intra_predictors_sby_s(MACROBLOCKD *xd,
const int bwl = b_width_log2(bsize), bw = 4 << bwl;
const int bhl = b_height_log2(bsize), bh = 4 << bhl;
- vp9_build_intra_predictors(xd->dst.y_buffer, xd->dst.y_stride,
- xd->dst.y_buffer, xd->dst.y_stride,
+ vp9_build_intra_predictors(xd->plane[0].dst.buf, xd->plane[0].dst.stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
xd->mode_info_context->mbmi.mode,
bw, bh,
xd->up_available, xd->left_available,
@@ -747,13 +738,13 @@ void vp9_build_intra_predictors_sbuv_s(MACROBLOCKD *xd,
const int bwl = b_width_log2(bsize) - 1, bw = 4 << bwl;
const int bhl = b_height_log2(bsize) - 1, bh = 4 << bhl;
- vp9_build_intra_predictors(xd->dst.u_buffer, xd->dst.uv_stride,
- xd->dst.u_buffer, xd->dst.uv_stride,
+ vp9_build_intra_predictors(xd->plane[1].dst.buf, xd->plane[1].dst.stride,
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride,
xd->mode_info_context->mbmi.uv_mode,
bw, bh, xd->up_available,
xd->left_available, xd->right_available);
- vp9_build_intra_predictors(xd->dst.v_buffer, xd->dst.uv_stride,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ vp9_build_intra_predictors(xd->plane[2].dst.buf, xd->plane[1].dst.stride,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
xd->mode_info_context->mbmi.uv_mode,
bw, bh, xd->up_available,
xd->left_available, xd->right_available);
diff --git a/vp9/common/vp9_reconintra.h b/vp9/common/vp9_reconintra.h
index b97b6089d..1e0cfa42d 100644
--- a/vp9/common/vp9_reconintra.h
+++ b/vp9/common/vp9_reconintra.h
@@ -14,8 +14,6 @@
#include "vpx/vpx_integer.h"
#include "vp9/common/vp9_blockd.h"
-void vp9_recon_intra_mbuv(MACROBLOCKD *xd);
-
B_PREDICTION_MODE vp9_find_dominant_direction(uint8_t *ptr,
int stride, int n,
int tx, int ty);
diff --git a/vp9/common/vp9_reconintra4x4.c b/vp9/common/vp9_reconintra4x4.c
index a0700010b..7f81b051b 100644
--- a/vp9/common/vp9_reconintra4x4.c
+++ b/vp9/common/vp9_reconintra4x4.c
@@ -258,15 +258,22 @@ void vp9_intra4x4_predict(MACROBLOCKD *xd,
switch (b_mode) {
case B_DC_PRED: {
- int expected_dc = 0;
-
- for (i = 0; i < 4; i++) {
- expected_dc += above[i];
- expected_dc += left[i];
+ int expected_dc = 128;
+ if (have_top || have_left) {
+ int average = 0;
+ int count = 0;
+ if (have_top) {
+ for (i = 0; i < 4; i++)
+ average += above[i];
+ count += 4;
+ }
+ if (have_left) {
+ for (i = 0; i < 4; i++)
+ average += left[i];
+ count += 4;
+ }
+ expected_dc = (average + (count >> 1)) / count;
}
-
- expected_dc = ROUND_POWER_OF_TWO(expected_dc, 3);
-
for (r = 0; r < 4; r++) {
for (c = 0; c < 4; c++)
predictor[c] = expected_dc;
diff --git a/vp9/common/vp9_rtcd_defs.sh b/vp9/common/vp9_rtcd_defs.sh
index f9f2395f3..ecca21a78 100644
--- a/vp9/common/vp9_rtcd_defs.sh
+++ b/vp9/common/vp9_rtcd_defs.sh
@@ -78,17 +78,14 @@ prototype void vp9_recon4b "uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_p
specialize vp9_recon4b
# specialize vp9_recon4b sse2
-prototype void vp9_recon_mb "struct macroblockd *x"
-specialize vp9_recon_mb
+prototype void vp9_recon_sb "struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize"
+specialize vp9_recon_sb
-prototype void vp9_recon_mby "struct macroblockd *x"
-specialize vp9_recon_mby
+prototype void vp9_recon_sby "struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize"
+specialize vp9_recon_sby
-prototype void vp9_recon_sby_s "struct macroblockd *x, uint8_t *dst, enum BLOCK_SIZE_TYPE bsize"
-specialize vp9_recon_sby_s
-
-prototype void vp9_recon_sbuv_s "struct macroblockd *x, uint8_t *udst, uint8_t *vdst, enum BLOCK_SIZE_TYPE bsize"
-specialize void vp9_recon_sbuv_s
+prototype void vp9_recon_sbuv "struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize"
+specialize void vp9_recon_sbuv
prototype void vp9_build_intra_predictors "uint8_t *src, int src_stride, uint8_t *pred, int y_stride, int mode, int bw, int bh, int up_available, int left_available, int right_available"
specialize void vp9_build_intra_predictors
@@ -233,62 +230,6 @@ specialize vp9_convolve8_avg_horiz ssse3
prototype void vp9_convolve8_avg_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_avg_vert ssse3
-#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
-prototype void vp9_convolve8_1by8 "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_1by8
-
-prototype void vp9_convolve8_qtr "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_qtr
-
-prototype void vp9_convolve8_3by8 "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_3by8
-
-prototype void vp9_convolve8_5by8 "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_5by8
-
-prototype void vp9_convolve8_3qtr "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_3qtr
-
-prototype void vp9_convolve8_7by8 "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_7by8
-
-prototype void vp9_convolve8_1by8_horiz "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_1by8_horiz
-
-prototype void vp9_convolve8_qtr_horiz "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_qtr_horiz
-
-prototype void vp9_convolve8_3by8_horiz "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_3by8_horiz
-
-prototype void vp9_convolve8_5by8_horiz "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_5by8_horiz
-
-prototype void vp9_convolve8_3qtr_horiz "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_3qtr_horiz
-
-prototype void vp9_convolve8_7by8_horiz "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_7by8_horiz
-
-prototype void vp9_convolve8_1by8_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_1by8_vert
-
-prototype void vp9_convolve8_qtr_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_qtr_vert
-
-prototype void vp9_convolve8_3by8_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_3by8_vert
-
-prototype void vp9_convolve8_5by8_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_5by8_vert
-
-prototype void vp9_convolve8_3qtr_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_3qtr_vert
-
-prototype void vp9_convolve8_7by8_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
-specialize vp9_convolve8_7by8_vert
-#endif
-
#
# dct
#
diff --git a/vp9/common/x86/vp9_recon_wrapper_sse2.c b/vp9/common/x86/vp9_recon_wrapper_sse2.c
index 12d2f970c..97148fbb8 100644
--- a/vp9/common/x86/vp9_recon_wrapper_sse2.c
+++ b/vp9/common/x86/vp9_recon_wrapper_sse2.c
@@ -35,7 +35,7 @@ static void build_intra_predictors_mbuv_x86(MACROBLOCKD *xd,
build_intra_pred_mbuv_fn_t ho_fn) {
int mode = xd->mode_info_context->mbmi.uv_mode;
build_intra_pred_mbuv_fn_t fn;
- int src_stride = xd->dst.uv_stride;
+ int src_stride = xd->plane[1].dst.stride;
switch (mode) {
case V_PRED:
@@ -68,34 +68,34 @@ static void build_intra_predictors_mbuv_x86(MACROBLOCKD *xd,
return;
}
- fn(dst_u, dst_stride, xd->dst.u_buffer, src_stride);
- fn(dst_v, dst_stride, xd->dst.v_buffer, src_stride);
+ fn(dst_u, dst_stride, xd->plane[1].dst.buf, src_stride);
+ fn(dst_v, dst_stride, xd->plane[2].dst.buf, src_stride);
}
void vp9_build_intra_predictors_mbuv_sse2(MACROBLOCKD *xd) {
- build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ build_intra_predictors_mbuv_x86(xd, xd->plane[1].dst.buf,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
vp9_intra_pred_uv_tm_sse2,
vp9_intra_pred_uv_ho_mmx2);
}
void vp9_build_intra_predictors_mbuv_ssse3(MACROBLOCKD *xd) {
- build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ build_intra_predictors_mbuv_x86(xd, xd->plane[1].dst.buf,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
vp9_intra_pred_uv_tm_ssse3,
vp9_intra_pred_uv_ho_ssse3);
}
void vp9_build_intra_predictors_mbuv_s_sse2(MACROBLOCKD *xd) {
- build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ build_intra_predictors_mbuv_x86(xd, xd->plane[1].dst.buf,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
vp9_intra_pred_uv_tm_sse2,
vp9_intra_pred_uv_ho_mmx2);
}
void vp9_build_intra_predictors_mbuv_s_ssse3(MACROBLOCKD *xd) {
- build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ build_intra_predictors_mbuv_x86(xd, xd->plane[1].dst.buf,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
vp9_intra_pred_uv_tm_ssse3,
vp9_intra_pred_uv_ho_ssse3);
}