summaryrefslogtreecommitdiff
path: root/vp8/encoder/onyx_if.c
diff options
context:
space:
mode:
authorJim Bankoski <jimbankoski@google.com>2012-10-22 11:12:07 -0700
committerGerrit Code Review <gerrit@gerrit.golo.chromium.org>2012-10-22 11:12:07 -0700
commit8fc5774f8bbb14d68bc3a8b460b4e0ab9dd26edd (patch)
tree0befc55a0af625b479fe0f37988d3db80b774a4a /vp8/encoder/onyx_if.c
parent6e5cb12886e464c7f744bb905ebccfde5bf378b9 (diff)
parent118b2fe962e7a93a7989244d61dbd68354984c9e (diff)
downloadlibvpx-8fc5774f8bbb14d68bc3a8b460b4e0ab9dd26edd.tar
libvpx-8fc5774f8bbb14d68bc3a8b460b4e0ab9dd26edd.tar.gz
libvpx-8fc5774f8bbb14d68bc3a8b460b4e0ab9dd26edd.tar.bz2
libvpx-8fc5774f8bbb14d68bc3a8b460b4e0ab9dd26edd.zip
Merge "Remove variance vtable from rtcd" into experimental
Diffstat (limited to 'vp8/encoder/onyx_if.c')
-rw-r--r--vp8/encoder/onyx_if.c188
1 files changed, 70 insertions, 118 deletions
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 14e9e784a..0e46071a9 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -77,7 +77,7 @@ extern void vp8_yv12_copy_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFF
extern void vp8_yv12_copy_src_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
#endif
-int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
+int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
@@ -101,25 +101,14 @@ extern const int vp8_gf_interval_table[101];
#if CONFIG_INTERNAL_STATS
#include "math.h"
-extern double vp8_calc_ssim
-(
- YV12_BUFFER_CONFIG *source,
- YV12_BUFFER_CONFIG *dest,
- int lumamask,
- double *weight,
- const vp8_variance_rtcd_vtable_t *rtcd
-);
+extern double vp8_calc_ssim(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest, int lumamask,
+ double *weight);
-extern double vp8_calc_ssimg
-(
- YV12_BUFFER_CONFIG *source,
- YV12_BUFFER_CONFIG *dest,
- double *ssim_y,
- double *ssim_u,
- double *ssim_v,
- const vp8_variance_rtcd_vtable_t *rtcd
-);
+extern double vp8_calc_ssimg(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest, double *ssim_y,
+ double *ssim_u, double *ssim_v);
#endif
@@ -2027,74 +2016,48 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
init_mv_ref_counts();
#endif
+#define BFP(BT, SDF, VF, SVF, SVFHH, SVFHV, SVFHHV, SDX3F, SDX8F, SDX4DF) \
+ cpi->fn_ptr[BT].sdf = SDF; \
+ cpi->fn_ptr[BT].vf = VF; \
+ cpi->fn_ptr[BT].svf = SVF; \
+ cpi->fn_ptr[BT].svf_halfpix_h = SVFHH; \
+ cpi->fn_ptr[BT].svf_halfpix_v = SVFHV; \
+ cpi->fn_ptr[BT].svf_halfpix_hv = SVFHHV; \
+ cpi->fn_ptr[BT].sdx3f = SDX3F; \
+ cpi->fn_ptr[BT].sdx8f = SDX8F; \
+ cpi->fn_ptr[BT].sdx4df = SDX4DF;
+
+
#if CONFIG_SUPERBLOCKS
- cpi->fn_ptr[BLOCK_32X32].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad32x32);
- cpi->fn_ptr[BLOCK_32X32].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var32x32);
- cpi->fn_ptr[BLOCK_32X32].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar32x32);
- cpi->fn_ptr[BLOCK_32X32].svf_halfpix_h = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar32x32_h);
- cpi->fn_ptr[BLOCK_32X32].svf_halfpix_v = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar32x32_v);
- cpi->fn_ptr[BLOCK_32X32].svf_halfpix_hv = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar32x32_hv);
- cpi->fn_ptr[BLOCK_32X32].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad32x32x3);
- cpi->fn_ptr[BLOCK_32X32].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad32x32x8);
- cpi->fn_ptr[BLOCK_32X32].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad32x32x4d);
+ BFP(BLOCK_32X32, vp8_sad32x32, vp8_variance32x32, vp8_sub_pixel_variance32x32,
+ vp8_variance_halfpixvar32x32_h, vp8_variance_halfpixvar32x32_v,
+ vp8_variance_halfpixvar32x32_hv, vp8_sad32x32x3, vp8_sad32x32x8,
+ vp8_sad32x32x4d)
#endif
- cpi->fn_ptr[BLOCK_16X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16);
- cpi->fn_ptr[BLOCK_16X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16);
- cpi->fn_ptr[BLOCK_16X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x16);
- cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_h);
- cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_v);
- cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_hv);
- cpi->fn_ptr[BLOCK_16X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x3);
- cpi->fn_ptr[BLOCK_16X16].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x8);
- cpi->fn_ptr[BLOCK_16X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x4d);
-
- cpi->fn_ptr[BLOCK_16X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8);
- cpi->fn_ptr[BLOCK_16X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x8);
- cpi->fn_ptr[BLOCK_16X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x8);
- cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h = NULL;
- cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v = NULL;
- cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL;
- cpi->fn_ptr[BLOCK_16X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x3);
- cpi->fn_ptr[BLOCK_16X8].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x8);
- cpi->fn_ptr[BLOCK_16X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x4d);
-
- cpi->fn_ptr[BLOCK_8X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16);
- cpi->fn_ptr[BLOCK_8X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x16);
- cpi->fn_ptr[BLOCK_8X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x16);
- cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h = NULL;
- cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v = NULL;
- cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL;
- cpi->fn_ptr[BLOCK_8X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x3);
- cpi->fn_ptr[BLOCK_8X16].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x8);
- cpi->fn_ptr[BLOCK_8X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x4d);
-
- cpi->fn_ptr[BLOCK_8X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8);
- cpi->fn_ptr[BLOCK_8X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x8);
- cpi->fn_ptr[BLOCK_8X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x8);
- cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h = NULL;
- cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v = NULL;
- cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL;
- cpi->fn_ptr[BLOCK_8X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x3);
- cpi->fn_ptr[BLOCK_8X8].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x8);
- cpi->fn_ptr[BLOCK_8X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x4d);
-
- cpi->fn_ptr[BLOCK_4X4].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4);
- cpi->fn_ptr[BLOCK_4X4].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var4x4);
- cpi->fn_ptr[BLOCK_4X4].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar4x4);
- cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h = NULL;
- cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v = NULL;
- cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL;
- cpi->fn_ptr[BLOCK_4X4].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x3);
- cpi->fn_ptr[BLOCK_4X4].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x8);
- cpi->fn_ptr[BLOCK_4X4].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x4d);
+ BFP(BLOCK_16X16, vp8_sad16x16, vp8_variance16x16, vp8_sub_pixel_variance16x16,
+ vp8_variance_halfpixvar16x16_h, vp8_variance_halfpixvar16x16_v,
+ vp8_variance_halfpixvar16x16_hv, vp8_sad16x16x3, vp8_sad16x16x8,
+ vp8_sad16x16x4d)
+
+ BFP(BLOCK_16X8, vp8_sad16x8, vp8_variance16x8, vp8_sub_pixel_variance16x8,
+ NULL, NULL, NULL, vp8_sad16x8x3, vp8_sad16x8x8, vp8_sad16x8x4d)
+
+ BFP(BLOCK_8X16, vp8_sad8x16, vp8_variance8x16, vp8_sub_pixel_variance8x16,
+ NULL, NULL, NULL, vp8_sad8x16x3, vp8_sad8x16x8, vp8_sad8x16x4d)
+
+ BFP(BLOCK_8X8, vp8_sad8x8, vp8_variance8x8, vp8_sub_pixel_variance8x8,
+ NULL, NULL, NULL, vp8_sad8x8x3, vp8_sad8x8x8, vp8_sad8x8x4d)
+
+ BFP(BLOCK_4X4, vp8_sad4x4, vp8_variance4x4, vp8_sub_pixel_variance4x4,
+ NULL, NULL, NULL, vp8_sad4x4x3, vp8_sad4x4x8, vp8_sad4x4x4d)
#if ARCH_X86 || ARCH_X86_64
- cpi->fn_ptr[BLOCK_16X16].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
- cpi->fn_ptr[BLOCK_16X8].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
- cpi->fn_ptr[BLOCK_8X16].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
- cpi->fn_ptr[BLOCK_8X8].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
- cpi->fn_ptr[BLOCK_4X4].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
+ cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
+ cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
+ cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
+ cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
+ cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
#endif
cpi->full_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, full_search);
@@ -2370,8 +2333,7 @@ void vp8_remove_compressor(VP8_PTR *ptr) {
static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
unsigned char *recon, int recon_stride,
- unsigned int cols, unsigned int rows,
- vp8_variance_rtcd_vtable_t *rtcd) {
+ unsigned int cols, unsigned int rows) {
unsigned int row, col;
uint64_t total_sse = 0;
int diff;
@@ -2380,9 +2342,7 @@ static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
for (col = 0; col + 16 <= cols; col += 16) {
unsigned int sse;
- VARIANCE_INVOKE(rtcd, mse16x16)(orig + col, orig_stride,
- recon + col, recon_stride,
- &sse);
+ vp8_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
total_sse += sse;
}
@@ -2434,8 +2394,7 @@ static void generate_psnr_packet(VP8_COMP *cpi) {
pkt.kind = VPX_CODEC_PSNR_PKT;
sse = calc_plane_error(orig->y_buffer, orig->y_stride,
recon->y_buffer, recon->y_stride,
- width, height,
- IF_RTCD(&cpi->rtcd.variance));
+ width, height);
pkt.data.psnr.sse[0] = sse;
pkt.data.psnr.sse[1] = sse;
pkt.data.psnr.samples[0] = width * height;
@@ -2446,8 +2405,7 @@ static void generate_psnr_packet(VP8_COMP *cpi) {
sse = calc_plane_error(orig->u_buffer, orig->uv_stride,
recon->u_buffer, recon->uv_stride,
- width, height,
- IF_RTCD(&cpi->rtcd.variance));
+ width, height);
pkt.data.psnr.sse[0] += sse;
pkt.data.psnr.sse[2] = sse;
pkt.data.psnr.samples[0] += width * height;
@@ -2455,8 +2413,7 @@ static void generate_psnr_packet(VP8_COMP *cpi) {
sse = calc_plane_error(orig->v_buffer, orig->uv_stride,
recon->v_buffer, recon->uv_stride,
- width, height,
- IF_RTCD(&cpi->rtcd.variance));
+ width, height);
pkt.data.psnr.sse[0] += sse;
pkt.data.psnr.sse[3] = sse;
pkt.data.psnr.samples[0] += width * height;
@@ -3428,8 +3385,7 @@ static void encode_frame_to_data_rate
if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
int last_q = Q;
int kf_err = vp8_calc_ss_err(cpi->Source,
- &cm->yv12_fb[cm->new_fb_idx],
- IF_RTCD(&cpi->rtcd.variance));
+ &cm->yv12_fb[cm->new_fb_idx]);
int high_err_target = cpi->ambient_err;
int low_err_target = (cpi->ambient_err >> 1);
@@ -3621,8 +3577,7 @@ static void encode_frame_to_data_rate
if (Loop == FALSE && cm->frame_type != KEY_FRAME && sf->search_best_filter) {
if (mcomp_filter_index < mcomp_filters) {
INT64 err = vp8_calc_ss_err(cpi->Source,
- &cm->yv12_fb[cm->new_fb_idx],
- IF_RTCD(&cpi->rtcd.variance));
+ &cm->yv12_fb[cm->new_fb_idx]);
INT64 rate = cpi->projected_frame_size << 8;
mcomp_filter_cost[mcomp_filter_index] =
(RDCOST(cpi->RDMULT, cpi->RDDIV, rate, err));
@@ -3684,8 +3639,7 @@ static void encode_frame_to_data_rate
// the force key frame
if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
cpi->ambient_err = vp8_calc_ss_err(cpi->Source,
- &cm->yv12_fb[cm->new_fb_idx],
- IF_RTCD(&cpi->rtcd.variance));
+ &cm->yv12_fb[cm->new_fb_idx]);
}
// This frame's MVs are saved and will be used in next frame's MV
@@ -3903,8 +3857,7 @@ static void encode_frame_to_data_rate
vp8_clear_system_state(); // __asm emms;
recon_err = vp8_calc_ss_err(cpi->Source,
- &cm->yv12_fb[cm->new_fb_idx],
- IF_RTCD(&cpi->rtcd.variance));
+ &cm->yv12_fb[cm->new_fb_idx]);
if (cpi->twopass.total_left_stats->coded_error != 0.0)
fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d"
@@ -4390,16 +4343,16 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
int64_t sq_error;
ye = calc_plane_error(orig->y_buffer, orig->y_stride,
- recon->y_buffer, recon->y_stride, orig->y_width, orig->y_height,
- IF_RTCD(&cpi->rtcd.variance));
+ recon->y_buffer, recon->y_stride, orig->y_width,
+ orig->y_height);
ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
- recon->u_buffer, recon->uv_stride, orig->uv_width, orig->uv_height,
- IF_RTCD(&cpi->rtcd.variance));
+ recon->u_buffer, recon->uv_stride, orig->uv_width,
+ orig->uv_height);
ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
- recon->v_buffer, recon->uv_stride, orig->uv_width, orig->uv_height,
- IF_RTCD(&cpi->rtcd.variance));
+ recon->v_buffer, recon->uv_stride, orig->uv_width,
+ orig->uv_height);
sq_error = ye + ue + ve;
@@ -4419,16 +4372,16 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
vp8_clear_system_state();
ye = calc_plane_error(orig->y_buffer, orig->y_stride,
- pp->y_buffer, pp->y_stride, orig->y_width, orig->y_height,
- IF_RTCD(&cpi->rtcd.variance));
+ pp->y_buffer, pp->y_stride, orig->y_width,
+ orig->y_height);
ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
- pp->u_buffer, pp->uv_stride, orig->uv_width, orig->uv_height,
- IF_RTCD(&cpi->rtcd.variance));
+ pp->u_buffer, pp->uv_stride, orig->uv_width,
+ orig->uv_height);
ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
- pp->v_buffer, pp->uv_stride, orig->uv_width, orig->uv_height,
- IF_RTCD(&cpi->rtcd.variance));
+ pp->v_buffer, pp->uv_stride, orig->uv_width,
+ orig->uv_height);
sq_error = ye + ue + ve;
@@ -4441,8 +4394,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
cpi->totalp += frame_psnr2;
frame_ssim2 = vp8_calc_ssim(cpi->Source,
- &cm->post_proc_buffer, 1, &weight,
- IF_RTCD(&cpi->rtcd.variance));
+ &cm->post_proc_buffer, 1, &weight);
cpi->summed_quality += frame_ssim2 * weight;
cpi->summed_weights += weight;
@@ -4461,7 +4413,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
if (cpi->b_calculate_ssimg) {
double y, u, v, frame_all;
frame_all = vp8_calc_ssimg(cpi->Source, cm->frame_to_show,
- &y, &u, &v, IF_RTCD(&cpi->rtcd.variance));
+ &y, &u, &v);
cpi->total_ssimg_y += y;
cpi->total_ssimg_u += u;
cpi->total_ssimg_v += v;
@@ -4604,19 +4556,19 @@ int vp8_set_internal_size(VP8_PTR comp, VPX_SCALING horiz_mode, VPX_SCALING vert
-int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd) {
+int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
int i, j;
int Total = 0;
unsigned char *src = source->y_buffer;
unsigned char *dst = dest->y_buffer;
- (void)rtcd;
// Loop through the Y plane raw and reconstruction data summing (square differences)
for (i = 0; i < source->y_height; i += 16) {
for (j = 0; j < source->y_width; j += 16) {
unsigned int sse;
- Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
+ Total += vp8_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
+ &sse);
}
src += 16 * source->y_stride;