summaryrefslogtreecommitdiff
path: root/vp9
diff options
context:
space:
mode:
Diffstat (limited to 'vp9')
-rw-r--r--vp9/common/vp9_invtrans.c20
-rw-r--r--vp9/common/vp9_invtrans.h20
-rw-r--r--vp9/common/x86/vp9_subpixel_variance_sse2.c45
-rw-r--r--vp9/decoder/vp9_decodframe.c1
-rw-r--r--vp9/encoder/vp9_encodeframe.c1
-rw-r--r--vp9/encoder/vp9_encodeintra.c1
-rw-r--r--vp9/encoder/vp9_encodemb.c18
-rw-r--r--vp9/encoder/vp9_variance_c.c53
-rw-r--r--vp9/encoder/x86/vp9_subpel_variance_impl_sse2.asm (renamed from vp9/common/x86/vp9_subpel_variance_impl_sse2.asm)0
-rw-r--r--vp9/vp9_common.mk4
-rw-r--r--vp9/vp9cx.mk1
11 files changed, 41 insertions, 123 deletions
diff --git a/vp9/common/vp9_invtrans.c b/vp9/common/vp9_invtrans.c
deleted file mode 100644
index d47fca190..000000000
--- a/vp9/common/vp9_invtrans.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "vp9/common/vp9_invtrans.h"
-#include "./vp9_rtcd.h"
-
-void vp9_inverse_transform_b_4x4_add(MACROBLOCKD *xd, int eob, int16_t *dqcoeff,
- uint8_t *dest, int stride) {
- if (eob <= 1)
- xd->inv_txm4x4_1_add(dqcoeff, dest, stride);
- else
- xd->inv_txm4x4_add(dqcoeff, dest, stride);
-}
diff --git a/vp9/common/vp9_invtrans.h b/vp9/common/vp9_invtrans.h
deleted file mode 100644
index dbdc50a2a..000000000
--- a/vp9/common/vp9_invtrans.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VP9_COMMON_VP9_INVTRANS_H_
-#define VP9_COMMON_VP9_INVTRANS_H_
-
-#include "./vpx_config.h"
-#include "vpx/vpx_integer.h"
-#include "vp9/common/vp9_blockd.h"
-
-void vp9_inverse_transform_b_4x4_add(MACROBLOCKD *xd, int eob, int16_t *dqcoeff,
- uint8_t *dest, int stride);
-#endif // VP9_COMMON_VP9_INVTRANS_H_
diff --git a/vp9/common/x86/vp9_subpixel_variance_sse2.c b/vp9/common/x86/vp9_subpixel_variance_sse2.c
deleted file mode 100644
index c20b9fbe9..000000000
--- a/vp9/common/x86/vp9_subpixel_variance_sse2.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#define HALFNDX 8
-
-void vp9_half_horiz_variance16x_h_sse2(const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- int *sum,
- unsigned int *sumsquared);
-
-void vp9_half_vert_variance16x_h_sse2(const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- int *sum,
- unsigned int *sumsquared);
-
-void vp9_half_horiz_vert_variance16x_h_sse2(const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- int *sum,
- unsigned int *sumsquared);
-
-void vp9_filter_block2d_bil_var_sse2(const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- int xoffset,
- int yoffset,
- int *sum,
- unsigned int *sumsquared);
diff --git a/vp9/decoder/vp9_decodframe.c b/vp9/decoder/vp9_decodframe.c
index f17fd3c7a..49b181d69 100644
--- a/vp9/decoder/vp9_decodframe.c
+++ b/vp9/decoder/vp9_decodframe.c
@@ -20,7 +20,6 @@
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_entropy.h"
-#include "vp9/common/vp9_invtrans.h"
#include "vp9/common/vp9_alloccommon.h"
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_quant_common.h"
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index bb784db12..f0f2ef5d3 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -23,7 +23,6 @@
#include "vp9/encoder/vp9_segmentation.h"
#include "vp9/encoder/vp9_encodeintra.h"
#include "vp9/common/vp9_reconinter.h"
-#include "vp9/common/vp9_invtrans.h"
#include "vp9/encoder/vp9_rdopt.h"
#include "vp9/common/vp9_findnearmv.h"
#include "vp9/common/vp9_reconintra.h"
diff --git a/vp9/encoder/vp9_encodeintra.c b/vp9/encoder/vp9_encodeintra.c
index fe4e9fd0f..f29dba0c1 100644
--- a/vp9/encoder/vp9_encodeintra.c
+++ b/vp9/encoder/vp9_encodeintra.c
@@ -13,7 +13,6 @@
#include "vp9/encoder/vp9_quantize.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/encoder/vp9_encodemb.h"
-#include "vp9/common/vp9_invtrans.h"
#include "vp9/encoder/vp9_encodeintra.h"
int vp9_encode_intra(VP9_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index 90f00d2be..f0202450e 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -13,7 +13,6 @@
#include "vp9/common/vp9_reconinter.h"
#include "vp9/encoder/vp9_quantize.h"
#include "vp9/encoder/vp9_tokenize.h"
-#include "vp9/common/vp9_invtrans.h"
#include "vp9/common/vp9_reconintra.h"
#include "vpx_mem/vpx_mem.h"
#include "vp9/encoder/vp9_rdopt.h"
@@ -39,6 +38,15 @@ void vp9_subtract_block(int rows, int cols,
}
}
+static void inverse_transform_b_4x4_add(MACROBLOCKD *xd, int eob,
+ int16_t *dqcoeff, uint8_t *dest,
+ int stride) {
+ if (eob <= 1)
+ xd->inv_txm4x4_1_add(dqcoeff, dest, stride);
+ else
+ xd->inv_txm4x4_add(dqcoeff, dest, stride);
+}
+
static void subtract_plane(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize, int plane) {
struct macroblock_plane *const p = &x->plane[plane];
@@ -527,8 +535,8 @@ static void encode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
// this is like vp9_short_idct4x4 but has a special case around eob<=1
// which is significant (not just an optimization) for the lossless
// case.
- vp9_inverse_transform_b_4x4_add(xd, pd->eobs[block], dqcoeff,
- dst, pd->dst.stride);
+ inverse_transform_b_4x4_add(xd, pd->eobs[block], dqcoeff,
+ dst, pd->dst.stride);
else
vp9_short_iht4x4_add(dqcoeff, dst, pd->dst.stride, tx_type);
break;
@@ -667,8 +675,8 @@ static void encode_block_intra(int plane, int block, BLOCK_SIZE_TYPE bsize,
// this is like vp9_short_idct4x4 but has a special case around eob<=1
// which is significant (not just an optimization) for the lossless
// case.
- vp9_inverse_transform_b_4x4_add(xd, pd->eobs[block], dqcoeff,
- dst, pd->dst.stride);
+ inverse_transform_b_4x4_add(xd, pd->eobs[block], dqcoeff,
+ dst, pd->dst.stride);
else
vp9_short_iht4x4_add(dqcoeff, dst, pd->dst.stride, tx_type);
break;
diff --git a/vp9/encoder/vp9_variance_c.c b/vp9/encoder/vp9_variance_c.c
index 3b9d50f34..23e776791 100644
--- a/vp9/encoder/vp9_variance_c.c
+++ b/vp9/encoder/vp9_variance_c.c
@@ -14,6 +14,7 @@
#include "vp9/common/vp9_subpelvar.h"
#include "vpx/vpx_integer.h"
#include "vpx_ports/mem.h"
+#include "./vp9_rtcd.h"
unsigned int vp9_get_mb_ss_c(const int16_t *src_ptr) {
unsigned int i, sum = 0;
@@ -56,7 +57,7 @@ unsigned int vp9_sub_pixel_variance64x32_c(const uint8_t *src_ptr,
1, 33, 64, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 32, 64, vfilter);
- return vp9_variance64x32_c(temp2, 64, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance64x32(temp2, 64, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_avg_variance64x32_c(const uint8_t *src_ptr,
@@ -79,7 +80,7 @@ unsigned int vp9_sub_pixel_avg_variance64x32_c(const uint8_t *src_ptr,
1, 33, 64, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 32, 64, vfilter);
comp_avg_pred(temp3, second_pred, 64, 32, temp2, 64);
- return vp9_variance64x32_c(temp3, 64, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance64x32(temp3, 64, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_variance32x64_c(const uint8_t *src_ptr,
@@ -113,7 +114,7 @@ unsigned int vp9_sub_pixel_variance32x64_c(const uint8_t *src_ptr,
1, 65, 32, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 64, 32, vfilter);
- return vp9_variance32x64_c(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance32x64(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_avg_variance32x64_c(const uint8_t *src_ptr,
@@ -136,7 +137,7 @@ unsigned int vp9_sub_pixel_avg_variance32x64_c(const uint8_t *src_ptr,
1, 65, 32, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 64, 32, vfilter);
comp_avg_pred(temp3, second_pred, 32, 64, temp2, 32);
- return vp9_variance32x64_c(temp3, 32, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance32x64(temp3, 32, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_variance32x16_c(const uint8_t *src_ptr,
@@ -170,7 +171,7 @@ unsigned int vp9_sub_pixel_variance32x16_c(const uint8_t *src_ptr,
1, 17, 32, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 16, 32, vfilter);
- return vp9_variance32x16_c(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance32x16(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_avg_variance32x16_c(const uint8_t *src_ptr,
@@ -193,7 +194,7 @@ unsigned int vp9_sub_pixel_avg_variance32x16_c(const uint8_t *src_ptr,
1, 17, 32, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 16, 32, vfilter);
comp_avg_pred(temp3, second_pred, 32, 16, temp2, 32);
- return vp9_variance32x16_c(temp3, 32, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance32x16(temp3, 32, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_variance16x32_c(const uint8_t *src_ptr,
@@ -227,7 +228,7 @@ unsigned int vp9_sub_pixel_variance16x32_c(const uint8_t *src_ptr,
1, 33, 16, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 32, 16, vfilter);
- return vp9_variance16x32_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance16x32(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_avg_variance16x32_c(const uint8_t *src_ptr,
@@ -250,7 +251,7 @@ unsigned int vp9_sub_pixel_avg_variance16x32_c(const uint8_t *src_ptr,
1, 33, 16, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 32, 16, vfilter);
comp_avg_pred(temp3, second_pred, 16, 32, temp2, 16);
- return vp9_variance16x32_c(temp3, 16, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance16x32(temp3, 16, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_variance64x64_c(const uint8_t *src_ptr,
@@ -451,7 +452,7 @@ unsigned int vp9_sub_pixel_variance4x4_c(const uint8_t *src_ptr,
// Now filter Verticaly
var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4, vfilter);
- return vp9_variance4x4_c(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance4x4(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_avg_variance4x4_c(const uint8_t *src_ptr,
@@ -477,7 +478,7 @@ unsigned int vp9_sub_pixel_avg_variance4x4_c(const uint8_t *src_ptr,
// Now filter Verticaly
var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4, vfilter);
comp_avg_pred(temp3, second_pred, 4, 4, temp2, 4);
- return vp9_variance4x4_c(temp3, 4, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance4x4(temp3, 4, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_variance8x8_c(const uint8_t *src_ptr,
@@ -498,7 +499,7 @@ unsigned int vp9_sub_pixel_variance8x8_c(const uint8_t *src_ptr,
1, 9, 8, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 8, 8, vfilter);
- return vp9_variance8x8_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance8x8(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_avg_variance8x8_c(const uint8_t *src_ptr,
@@ -521,7 +522,7 @@ unsigned int vp9_sub_pixel_avg_variance8x8_c(const uint8_t *src_ptr,
1, 9, 8, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 8, 8, vfilter);
comp_avg_pred(temp3, second_pred, 8, 8, temp2, 8);
- return vp9_variance8x8_c(temp3, 8, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance8x8(temp3, 8, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_variance16x16_c(const uint8_t *src_ptr,
@@ -542,7 +543,7 @@ unsigned int vp9_sub_pixel_variance16x16_c(const uint8_t *src_ptr,
1, 17, 16, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 16, 16, vfilter);
- return vp9_variance16x16_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance16x16(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_avg_variance16x16_c(const uint8_t *src_ptr,
@@ -566,7 +567,7 @@ unsigned int vp9_sub_pixel_avg_variance16x16_c(const uint8_t *src_ptr,
var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 16, 16, vfilter);
comp_avg_pred(temp3, second_pred, 16, 16, temp2, 16);
- return vp9_variance16x16_c(temp3, 16, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance16x16(temp3, 16, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_variance64x64_c(const uint8_t *src_ptr,
@@ -587,7 +588,7 @@ unsigned int vp9_sub_pixel_variance64x64_c(const uint8_t *src_ptr,
1, 65, 64, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 64, 64, vfilter);
- return vp9_variance64x64_c(temp2, 64, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance64x64(temp2, 64, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_avg_variance64x64_c(const uint8_t *src_ptr,
@@ -610,7 +611,7 @@ unsigned int vp9_sub_pixel_avg_variance64x64_c(const uint8_t *src_ptr,
1, 65, 64, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 64, 64, vfilter);
comp_avg_pred(temp3, second_pred, 64, 64, temp2, 64);
- return vp9_variance64x64_c(temp3, 64, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance64x64(temp3, 64, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_variance32x32_c(const uint8_t *src_ptr,
@@ -631,7 +632,7 @@ unsigned int vp9_sub_pixel_variance32x32_c(const uint8_t *src_ptr,
1, 33, 32, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 32, 32, vfilter);
- return vp9_variance32x32_c(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance32x32(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_avg_variance32x32_c(const uint8_t *src_ptr,
@@ -654,7 +655,7 @@ unsigned int vp9_sub_pixel_avg_variance32x32_c(const uint8_t *src_ptr,
1, 33, 32, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 32, 32, vfilter);
comp_avg_pred(temp3, second_pred, 32, 32, temp2, 32);
- return vp9_variance32x32_c(temp3, 32, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance32x32(temp3, 32, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_variance_halfpixvar16x16_h_c(const uint8_t *src_ptr,
@@ -795,7 +796,7 @@ unsigned int vp9_sub_pixel_variance16x8_c(const uint8_t *src_ptr,
1, 9, 16, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 8, 16, vfilter);
- return vp9_variance16x8_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance16x8(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_avg_variance16x8_c(const uint8_t *src_ptr,
@@ -818,7 +819,7 @@ unsigned int vp9_sub_pixel_avg_variance16x8_c(const uint8_t *src_ptr,
1, 9, 16, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 8, 16, vfilter);
comp_avg_pred(temp3, second_pred, 16, 8, temp2, 16);
- return vp9_variance16x8_c(temp3, 16, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance16x8(temp3, 16, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_variance8x16_c(const uint8_t *src_ptr,
@@ -839,7 +840,7 @@ unsigned int vp9_sub_pixel_variance8x16_c(const uint8_t *src_ptr,
1, 17, 8, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 16, 8, vfilter);
- return vp9_variance8x16_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance8x16(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_avg_variance8x16_c(const uint8_t *src_ptr,
@@ -862,7 +863,7 @@ unsigned int vp9_sub_pixel_avg_variance8x16_c(const uint8_t *src_ptr,
1, 17, 8, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 16, 8, vfilter);
comp_avg_pred(temp3, second_pred, 8, 16, temp2, 8);
- return vp9_variance8x16_c(temp3, 8, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance8x16(temp3, 8, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_variance8x4_c(const uint8_t *src_ptr,
@@ -883,7 +884,7 @@ unsigned int vp9_sub_pixel_variance8x4_c(const uint8_t *src_ptr,
1, 5, 8, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 4, 8, vfilter);
- return vp9_variance8x4_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance8x4(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_avg_variance8x4_c(const uint8_t *src_ptr,
@@ -906,7 +907,7 @@ unsigned int vp9_sub_pixel_avg_variance8x4_c(const uint8_t *src_ptr,
1, 5, 8, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 4, 8, vfilter);
comp_avg_pred(temp3, second_pred, 8, 4, temp2, 8);
- return vp9_variance8x4_c(temp3, 8, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance8x4(temp3, 8, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_variance4x8_c(const uint8_t *src_ptr,
@@ -929,7 +930,7 @@ unsigned int vp9_sub_pixel_variance4x8_c(const uint8_t *src_ptr,
1, 9, 4, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 8, 4, vfilter);
- return vp9_variance4x8_c(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance4x8(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_avg_variance4x8_c(const uint8_t *src_ptr,
@@ -952,5 +953,5 @@ unsigned int vp9_sub_pixel_avg_variance4x8_c(const uint8_t *src_ptr,
1, 9, 4, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 8, 4, vfilter);
comp_avg_pred(temp3, second_pred, 4, 8, temp2, 4);
- return vp9_variance4x8_c(temp3, 4, dst_ptr, dst_pixels_per_line, sse);
+ return vp9_variance4x8(temp3, 4, dst_ptr, dst_pixels_per_line, sse);
}
diff --git a/vp9/common/x86/vp9_subpel_variance_impl_sse2.asm b/vp9/encoder/x86/vp9_subpel_variance_impl_sse2.asm
index 8a2a471f5..8a2a471f5 100644
--- a/vp9/common/x86/vp9_subpel_variance_impl_sse2.asm
+++ b/vp9/encoder/x86/vp9_subpel_variance_impl_sse2.asm
diff --git a/vp9/vp9_common.mk b/vp9/vp9_common.mk
index b6d50f8ef..7a7483332 100644
--- a/vp9/vp9_common.mk
+++ b/vp9/vp9_common.mk
@@ -38,7 +38,6 @@ VP9_COMMON_SRCS-yes += common/vp9_enums.h
VP9_COMMON_SRCS-yes += common/vp9_extend.h
VP9_COMMON_SRCS-yes += common/vp9_findnearmv.h
VP9_COMMON_SRCS-yes += common/vp9_idct.h
-VP9_COMMON_SRCS-yes += common/vp9_invtrans.h
VP9_COMMON_SRCS-yes += common/vp9_loopfilter.h
VP9_COMMON_SRCS-yes += common/vp9_modecont.h
VP9_COMMON_SRCS-yes += common/vp9_mv.h
@@ -59,7 +58,6 @@ VP9_COMMON_SRCS-yes += common/vp9_textblit.h
VP9_COMMON_SRCS-yes += common/vp9_tile_common.h
VP9_COMMON_SRCS-yes += common/vp9_tile_common.c
VP9_COMMON_SRCS-yes += common/vp9_treecoder.h
-VP9_COMMON_SRCS-yes += common/vp9_invtrans.c
VP9_COMMON_SRCS-yes += common/vp9_loopfilter.c
VP9_COMMON_SRCS-yes += common/vp9_loopfilter_filters.c
VP9_COMMON_SRCS-yes += common/vp9_mbpitch.c
@@ -87,8 +85,6 @@ VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_iwalsh_sse2.asm
VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_loopfilter_sse2.asm
VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_recon_sse2.asm
VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_recon_wrapper_sse2.c
-VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_subpel_variance_impl_sse2.asm
-VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_subpixel_variance_sse2.c
VP9_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/vp9_subpixel_8t_ssse3.asm
ifeq ($(CONFIG_POSTPROC),yes)
VP9_COMMON_SRCS-$(HAVE_MMX) += common/x86/vp9_postproc_mmx.asm
diff --git a/vp9/vp9cx.mk b/vp9/vp9cx.mk
index 86fd08850..4bed6c0d7 100644
--- a/vp9/vp9cx.mk
+++ b/vp9/vp9cx.mk
@@ -86,6 +86,7 @@ VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_sad_sse2.asm
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_sad4d_sse2.asm
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_fwalsh_sse2.asm
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_subtract_sse2.asm
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_subpel_variance_impl_sse2.asm
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_temporal_filter_apply_sse2.asm
VP9_CX_SRCS-$(HAVE_SSE3) += encoder/x86/vp9_sad_sse3.asm
VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_sad_ssse3.asm