summaryrefslogtreecommitdiff
path: root/vp9/encoder
diff options
context:
space:
mode:
authorJames Zern <jzern@google.com>2015-05-02 13:24:16 -0700
committerJames Zern <jzern@google.com>2015-05-07 11:55:08 -0700
commitfd3658b0e4b52b5045b4881900292258ed106f39 (patch)
tree2d44fe3f9faa39af60a67f06a79880ec0294be07 /vp9/encoder
parent76a08210b667f5c7ce8d64d930382c948dbeaea7 (diff)
downloadlibvpx-fd3658b0e4b52b5045b4881900292258ed106f39.tar
libvpx-fd3658b0e4b52b5045b4881900292258ed106f39.tar.gz
libvpx-fd3658b0e4b52b5045b4881900292258ed106f39.tar.bz2
libvpx-fd3658b0e4b52b5045b4881900292258ed106f39.zip
replace DECLARE_ALIGNED_ARRAY w/DECLARE_ALIGNED
this macro was used inconsistently and only differs in behavior from DECLARE_ALIGNED when an alignment attribute is unavailable. this macro is used with calls to assembly, while generic c-code doesn't rely on it, so in a c-only build without an alignment attribute the code will function as expected. Change-Id: Ie9d06d4028c0de17c63b3a27e6c1b0491cc4ea79
Diffstat (limited to 'vp9/encoder')
-rw-r--r--vp9/encoder/arm/neon/vp9_variance_neon.c119
-rw-r--r--vp9/encoder/vp9_encodeframe.c2
-rw-r--r--vp9/encoder/vp9_mcomp.c6
-rw-r--r--vp9/encoder/vp9_pickmode.c4
-rw-r--r--vp9/encoder/vp9_rdopt.c8
-rw-r--r--vp9/encoder/vp9_temporal_filter.c10
-rw-r--r--vp9/encoder/vp9_variance.c8
-rw-r--r--vp9/encoder/x86/vp9_dct_impl_sse2.c2
8 files changed, 72 insertions, 87 deletions
diff --git a/vp9/encoder/arm/neon/vp9_variance_neon.c b/vp9/encoder/arm/neon/vp9_variance_neon.c
index b1ad83731..cf82dd75d 100644
--- a/vp9/encoder/arm/neon/vp9_variance_neon.c
+++ b/vp9/encoder/arm/neon/vp9_variance_neon.c
@@ -20,21 +20,6 @@
#include "vp9/encoder/vp9_variance.h"
-enum { kWidth8 = 8 };
-enum { kHeight8 = 8 };
-enum { kHeight8PlusOne = 9 };
-enum { kWidth16 = 16 };
-enum { kHeight16 = 16 };
-enum { kHeight16PlusOne = 17 };
-enum { kWidth32 = 32 };
-enum { kHeight32 = 32 };
-enum { kHeight32PlusOne = 33 };
-enum { kWidth64 = 64 };
-enum { kHeight64 = 64 };
-enum { kHeight64PlusOne = 65 };
-enum { kPixelStepOne = 1 };
-enum { kAlign16 = 16 };
-
static INLINE int horizontal_add_s16x8(const int16x8_t v_16x8) {
const int32x4_t a = vpaddlq_s16(v_16x8);
const int64x2_t b = vpaddlq_s32(a);
@@ -84,30 +69,30 @@ static void variance_neon_w8(const uint8_t *a, int a_stride,
void vp9_get8x8var_neon(const uint8_t *src_ptr, int source_stride,
const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse, int *sum) {
- variance_neon_w8(src_ptr, source_stride, ref_ptr, ref_stride, kWidth8,
- kHeight8, sse, sum);
+ variance_neon_w8(src_ptr, source_stride, ref_ptr, ref_stride, 8,
+ 8, sse, sum);
}
unsigned int vp9_variance8x8_neon(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
unsigned int *sse) {
int sum;
- variance_neon_w8(a, a_stride, b, b_stride, kWidth8, kHeight8, sse, &sum);
+ variance_neon_w8(a, a_stride, b, b_stride, 8, 8, sse, &sum);
return *sse - (((int64_t)sum * sum) >> 6); // >> 6 = / 8 * 8
}
void vp9_get16x16var_neon(const uint8_t *src_ptr, int source_stride,
const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse, int *sum) {
- variance_neon_w8(src_ptr, source_stride, ref_ptr, ref_stride, kWidth16,
- kHeight16, sse, sum);
+ variance_neon_w8(src_ptr, source_stride, ref_ptr, ref_stride, 16,
+ 16, sse, sum);
}
unsigned int vp9_variance16x16_neon(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
unsigned int *sse) {
int sum;
- variance_neon_w8(a, a_stride, b, b_stride, kWidth16, kHeight16, sse, &sum);
+ variance_neon_w8(a, a_stride, b, b_stride, 16, 16, sse, &sum);
return *sse - (((int64_t)sum * sum) >> 8); // >> 8 = / 16 * 16
}
@@ -169,15 +154,15 @@ unsigned int vp9_sub_pixel_variance8x8_neon(const uint8_t *src,
const uint8_t *dst,
int dst_stride,
unsigned int *sse) {
- DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, temp2, kHeight8 * kWidth8);
- DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, fdata3, kHeight8PlusOne * kWidth8);
+ DECLARE_ALIGNED(16, uint8_t, temp2[8 * 8]);
+ DECLARE_ALIGNED(16, uint8_t, fdata3[9 * 8]);
- var_filter_block2d_bil_w8(src, fdata3, src_stride, kPixelStepOne,
- kHeight8PlusOne, kWidth8,
+ var_filter_block2d_bil_w8(src, fdata3, src_stride, 1,
+ 9, 8,
BILINEAR_FILTERS_2TAP(xoffset));
- var_filter_block2d_bil_w8(fdata3, temp2, kWidth8, kWidth8, kHeight8,
- kWidth8, BILINEAR_FILTERS_2TAP(yoffset));
- return vp9_variance8x8_neon(temp2, kWidth8, dst, dst_stride, sse);
+ var_filter_block2d_bil_w8(fdata3, temp2, 8, 8, 8,
+ 8, BILINEAR_FILTERS_2TAP(yoffset));
+ return vp9_variance8x8_neon(temp2, 8, dst, dst_stride, sse);
}
unsigned int vp9_sub_pixel_variance16x16_neon(const uint8_t *src,
@@ -187,29 +172,29 @@ unsigned int vp9_sub_pixel_variance16x16_neon(const uint8_t *src,
const uint8_t *dst,
int dst_stride,
unsigned int *sse) {
- DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, temp2, kHeight16 * kWidth16);
- DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, fdata3, kHeight16PlusOne * kWidth16);
+ DECLARE_ALIGNED(16, uint8_t, temp2[16 * 16]);
+ DECLARE_ALIGNED(16, uint8_t, fdata3[17 * 16]);
- var_filter_block2d_bil_w16(src, fdata3, src_stride, kPixelStepOne,
- kHeight16PlusOne, kWidth16,
+ var_filter_block2d_bil_w16(src, fdata3, src_stride, 1,
+ 17, 16,
BILINEAR_FILTERS_2TAP(xoffset));
- var_filter_block2d_bil_w16(fdata3, temp2, kWidth16, kWidth16, kHeight16,
- kWidth16, BILINEAR_FILTERS_2TAP(yoffset));
- return vp9_variance16x16_neon(temp2, kWidth16, dst, dst_stride, sse);
+ var_filter_block2d_bil_w16(fdata3, temp2, 16, 16, 16,
+ 16, BILINEAR_FILTERS_2TAP(yoffset));
+ return vp9_variance16x16_neon(temp2, 16, dst, dst_stride, sse);
}
void vp9_get32x32var_neon(const uint8_t *src_ptr, int source_stride,
const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse, int *sum) {
- variance_neon_w8(src_ptr, source_stride, ref_ptr, ref_stride, kWidth32,
- kHeight32, sse, sum);
+ variance_neon_w8(src_ptr, source_stride, ref_ptr, ref_stride, 32,
+ 32, sse, sum);
}
unsigned int vp9_variance32x32_neon(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
unsigned int *sse) {
int sum;
- variance_neon_w8(a, a_stride, b, b_stride, kWidth32, kHeight32, sse, &sum);
+ variance_neon_w8(a, a_stride, b, b_stride, 32, 32, sse, &sum);
return *sse - (((int64_t)sum * sum) >> 10); // >> 10 = / 32 * 32
}
@@ -218,9 +203,9 @@ unsigned int vp9_variance32x64_neon(const uint8_t *a, int a_stride,
unsigned int *sse) {
int sum1, sum2;
uint32_t sse1, sse2;
- variance_neon_w8(a, a_stride, b, b_stride, kWidth32, kHeight32, &sse1, &sum1);
- variance_neon_w8(a + (kHeight32 * a_stride), a_stride,
- b + (kHeight32 * b_stride), b_stride, kWidth32, kHeight32,
+ variance_neon_w8(a, a_stride, b, b_stride, 32, 32, &sse1, &sum1);
+ variance_neon_w8(a + (32 * a_stride), a_stride,
+ b + (32 * b_stride), b_stride, 32, 32,
&sse2, &sum2);
*sse = sse1 + sse2;
sum1 += sum2;
@@ -232,9 +217,9 @@ unsigned int vp9_variance64x32_neon(const uint8_t *a, int a_stride,
unsigned int *sse) {
int sum1, sum2;
uint32_t sse1, sse2;
- variance_neon_w8(a, a_stride, b, b_stride, kWidth64, kHeight16, &sse1, &sum1);
- variance_neon_w8(a + (kHeight16 * a_stride), a_stride,
- b + (kHeight16 * b_stride), b_stride, kWidth64, kHeight16,
+ variance_neon_w8(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1);
+ variance_neon_w8(a + (16 * a_stride), a_stride,
+ b + (16 * b_stride), b_stride, 64, 16,
&sse2, &sum2);
*sse = sse1 + sse2;
sum1 += sum2;
@@ -247,22 +232,22 @@ unsigned int vp9_variance64x64_neon(const uint8_t *a, int a_stride,
int sum1, sum2;
uint32_t sse1, sse2;
- variance_neon_w8(a, a_stride, b, b_stride, kWidth64, kHeight16, &sse1, &sum1);
- variance_neon_w8(a + (kHeight16 * a_stride), a_stride,
- b + (kHeight16 * b_stride), b_stride, kWidth64, kHeight16,
+ variance_neon_w8(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1);
+ variance_neon_w8(a + (16 * a_stride), a_stride,
+ b + (16 * b_stride), b_stride, 64, 16,
&sse2, &sum2);
sse1 += sse2;
sum1 += sum2;
- variance_neon_w8(a + (kHeight16 * 2 * a_stride), a_stride,
- b + (kHeight16 * 2 * b_stride), b_stride,
- kWidth64, kHeight16, &sse2, &sum2);
+ variance_neon_w8(a + (16 * 2 * a_stride), a_stride,
+ b + (16 * 2 * b_stride), b_stride,
+ 64, 16, &sse2, &sum2);
sse1 += sse2;
sum1 += sum2;
- variance_neon_w8(a + (kHeight16 * 3 * a_stride), a_stride,
- b + (kHeight16 * 3 * b_stride), b_stride,
- kWidth64, kHeight16, &sse2, &sum2);
+ variance_neon_w8(a + (16 * 3 * a_stride), a_stride,
+ b + (16 * 3 * b_stride), b_stride,
+ 64, 16, &sse2, &sum2);
*sse = sse1 + sse2;
sum1 += sum2;
return *sse - (((int64_t)sum1 * sum1) >> 12); // >> 12 = / 64 * 64
@@ -275,15 +260,15 @@ unsigned int vp9_sub_pixel_variance32x32_neon(const uint8_t *src,
const uint8_t *dst,
int dst_stride,
unsigned int *sse) {
- DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, temp2, kHeight32 * kWidth32);
- DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, fdata3, kHeight32PlusOne * kWidth32);
+ DECLARE_ALIGNED(16, uint8_t, temp2[32 * 32]);
+ DECLARE_ALIGNED(16, uint8_t, fdata3[33 * 32]);
- var_filter_block2d_bil_w16(src, fdata3, src_stride, kPixelStepOne,
- kHeight32PlusOne, kWidth32,
+ var_filter_block2d_bil_w16(src, fdata3, src_stride, 1,
+ 33, 32,
BILINEAR_FILTERS_2TAP(xoffset));
- var_filter_block2d_bil_w16(fdata3, temp2, kWidth32, kWidth32, kHeight32,
- kWidth32, BILINEAR_FILTERS_2TAP(yoffset));
- return vp9_variance32x32_neon(temp2, kWidth32, dst, dst_stride, sse);
+ var_filter_block2d_bil_w16(fdata3, temp2, 32, 32, 32,
+ 32, BILINEAR_FILTERS_2TAP(yoffset));
+ return vp9_variance32x32_neon(temp2, 32, dst, dst_stride, sse);
}
unsigned int vp9_sub_pixel_variance64x64_neon(const uint8_t *src,
@@ -293,13 +278,13 @@ unsigned int vp9_sub_pixel_variance64x64_neon(const uint8_t *src,
const uint8_t *dst,
int dst_stride,
unsigned int *sse) {
- DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, temp2, kHeight64 * kWidth64);
- DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, fdata3, kHeight64PlusOne * kWidth64);
+ DECLARE_ALIGNED(16, uint8_t, temp2[64 * 64]);
+ DECLARE_ALIGNED(16, uint8_t, fdata3[65 * 64]);
- var_filter_block2d_bil_w16(src, fdata3, src_stride, kPixelStepOne,
- kHeight64PlusOne, kWidth64,
+ var_filter_block2d_bil_w16(src, fdata3, src_stride, 1,
+ 65, 64,
BILINEAR_FILTERS_2TAP(xoffset));
- var_filter_block2d_bil_w16(fdata3, temp2, kWidth64, kWidth64, kHeight64,
- kWidth64, BILINEAR_FILTERS_2TAP(yoffset));
- return vp9_variance64x64_neon(temp2, kWidth64, dst, dst_stride, sse);
+ var_filter_block2d_bil_w16(fdata3, temp2, 64, 64, 64,
+ 64, BILINEAR_FILTERS_2TAP(yoffset));
+ return vp9_variance64x64_neon(temp2, 64, dst, dst_stride, sse);
}
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 39dffbdf2..0e74784e9 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -3657,7 +3657,7 @@ static int set_var_thresh_from_histogram(VP9_COMP *cpi) {
const int cutoff = (MIN(cm->width, cm->height) >= 720) ?
(cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100) :
(cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100);
- DECLARE_ALIGNED_ARRAY(16, int, hist, VAR_HIST_BINS);
+ DECLARE_ALIGNED(16, int, hist[VAR_HIST_BINS]);
diff *var16 = cpi->source_diff_var;
int sum = 0;
diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c
index ec089f17a..80c509a1b 100644
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -301,13 +301,13 @@ static INLINE unsigned int setup_center_error(const MACROBLOCKD *xd,
#if CONFIG_VP9_HIGHBITDEPTH
if (second_pred != NULL) {
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- DECLARE_ALIGNED_ARRAY(16, uint16_t, comp_pred16, 64 * 64);
+ DECLARE_ALIGNED(16, uint16_t, comp_pred16[64 * 64]);
vp9_highbd_comp_avg_pred(comp_pred16, second_pred, w, h, y + offset,
y_stride);
besterr = vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride,
sse1);
} else {
- DECLARE_ALIGNED_ARRAY(16, uint8_t, comp_pred, 64 * 64);
+ DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
vp9_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
besterr = vfp->vf(comp_pred, w, src, src_stride, sse1);
}
@@ -319,7 +319,7 @@ static INLINE unsigned int setup_center_error(const MACROBLOCKD *xd,
#else
(void) xd;
if (second_pred != NULL) {
- DECLARE_ALIGNED_ARRAY(16, uint8_t, comp_pred, 64 * 64);
+ DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
vp9_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
besterr = vfp->vf(comp_pred, w, src, src_stride, sse1);
} else {
diff --git a/vp9/encoder/vp9_pickmode.c b/vp9/encoder/vp9_pickmode.c
index 221e14d5b..9fb7cfba7 100644
--- a/vp9/encoder/vp9_pickmode.c
+++ b/vp9/encoder/vp9_pickmode.c
@@ -1099,9 +1099,9 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
// process.
// tmp[3] points to dst buffer, and the other 3 point to allocated buffers.
PRED_BUFFER tmp[4];
- DECLARE_ALIGNED_ARRAY(16, uint8_t, pred_buf, 3 * 64 * 64);
+ DECLARE_ALIGNED(16, uint8_t, pred_buf[3 * 64 * 64]);
#if CONFIG_VP9_HIGHBITDEPTH
- DECLARE_ALIGNED_ARRAY(16, uint16_t, pred_buf_16, 3 * 64 * 64);
+ DECLARE_ALIGNED(16, uint16_t, pred_buf_16[3 * 64 * 64]);
#endif
struct buf_2d orig_dst = pd->dst;
PRED_BUFFER *best_pred = NULL;
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index 676e3ca30..738256237 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -1573,10 +1573,10 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
// Prediction buffer from second frame.
#if CONFIG_VP9_HIGHBITDEPTH
- DECLARE_ALIGNED_ARRAY(16, uint16_t, second_pred_alloc_16, 64 * 64);
+ DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[64 * 64]);
uint8_t *second_pred;
#else
- DECLARE_ALIGNED_ARRAY(16, uint8_t, second_pred, 64 * 64);
+ DECLARE_ALIGNED(16, uint8_t, second_pred[64 * 64]);
#endif // CONFIG_VP9_HIGHBITDEPTH
for (ref = 0; ref < 2; ++ref) {
@@ -2420,10 +2420,10 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
(mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
int_mv cur_mv[2];
#if CONFIG_VP9_HIGHBITDEPTH
- DECLARE_ALIGNED_ARRAY(16, uint16_t, tmp_buf16, MAX_MB_PLANE * 64 * 64);
+ DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]);
uint8_t *tmp_buf;
#else
- DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf, MAX_MB_PLANE * 64 * 64);
+ DECLARE_ALIGNED(16, uint8_t, tmp_buf[MAX_MB_PLANE * 64 * 64]);
#endif // CONFIG_VP9_HIGHBITDEPTH
int pred_exists = 0;
int intpel_mv;
diff --git a/vp9/encoder/vp9_temporal_filter.c b/vp9/encoder/vp9_temporal_filter.c
index ed17c7c35..d7979ab53 100644
--- a/vp9/encoder/vp9_temporal_filter.c
+++ b/vp9/encoder/vp9_temporal_filter.c
@@ -280,17 +280,17 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi,
int mb_rows = (frames[alt_ref_index]->y_crop_height + 15) >> 4;
int mb_y_offset = 0;
int mb_uv_offset = 0;
- DECLARE_ALIGNED_ARRAY(16, unsigned int, accumulator, 16 * 16 * 3);
- DECLARE_ALIGNED_ARRAY(16, uint16_t, count, 16 * 16 * 3);
+ DECLARE_ALIGNED(16, unsigned int, accumulator[16 * 16 * 3]);
+ DECLARE_ALIGNED(16, uint16_t, count[16 * 16 * 3]);
MACROBLOCKD *mbd = &cpi->td.mb.e_mbd;
YV12_BUFFER_CONFIG *f = frames[alt_ref_index];
uint8_t *dst1, *dst2;
#if CONFIG_VP9_HIGHBITDEPTH
- DECLARE_ALIGNED_ARRAY(16, uint16_t, predictor16, 16 * 16 * 3);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, predictor8, 16 * 16 * 3);
+ DECLARE_ALIGNED(16, uint16_t, predictor16[16 * 16 * 3]);
+ DECLARE_ALIGNED(16, uint8_t, predictor8[16 * 16 * 3]);
uint8_t *predictor;
#else
- DECLARE_ALIGNED_ARRAY(16, uint8_t, predictor, 16 * 16 * 3);
+ DECLARE_ALIGNED(16, uint8_t, predictor[16 * 16 * 3]);
#endif
const int mb_uv_height = 16 >> mbd->plane[1].subsampling_y;
const int mb_uv_width = 16 >> mbd->plane[1].subsampling_x;
diff --git a/vp9/encoder/vp9_variance.c b/vp9/encoder/vp9_variance.c
index fea5f3351..f38f96d6c 100644
--- a/vp9/encoder/vp9_variance.c
+++ b/vp9/encoder/vp9_variance.c
@@ -145,7 +145,7 @@ unsigned int vp9_sub_pixel_avg_variance##W##x##H##_c( \
const uint8_t *second_pred) { \
uint16_t fdata3[(H + 1) * W]; \
uint8_t temp2[H * W]; \
- DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, H * W); \
+ DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
\
var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, W, \
BILINEAR_FILTERS_2TAP(xoffset)); \
@@ -464,7 +464,7 @@ unsigned int vp9_highbd_sub_pixel_avg_variance##W##x##H##_c( \
const uint8_t *second_pred) { \
uint16_t fdata3[(H + 1) * W]; \
uint16_t temp2[H * W]; \
- DECLARE_ALIGNED_ARRAY(16, uint16_t, temp3, H * W); \
+ DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
\
highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \
W, BILINEAR_FILTERS_2TAP(xoffset)); \
@@ -486,7 +486,7 @@ unsigned int vp9_highbd_10_sub_pixel_avg_variance##W##x##H##_c( \
const uint8_t *second_pred) { \
uint16_t fdata3[(H + 1) * W]; \
uint16_t temp2[H * W]; \
- DECLARE_ALIGNED_ARRAY(16, uint16_t, temp3, H * W); \
+ DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
\
highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \
W, BILINEAR_FILTERS_2TAP(xoffset)); \
@@ -508,7 +508,7 @@ unsigned int vp9_highbd_12_sub_pixel_avg_variance##W##x##H##_c( \
const uint8_t *second_pred) { \
uint16_t fdata3[(H + 1) * W]; \
uint16_t temp2[H * W]; \
- DECLARE_ALIGNED_ARRAY(16, uint16_t, temp3, H * W); \
+ DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
\
highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \
W, BILINEAR_FILTERS_2TAP(xoffset)); \
diff --git a/vp9/encoder/x86/vp9_dct_impl_sse2.c b/vp9/encoder/x86/vp9_dct_impl_sse2.c
index 12fa747e8..e03a76d2e 100644
--- a/vp9/encoder/x86/vp9_dct_impl_sse2.c
+++ b/vp9/encoder/x86/vp9_dct_impl_sse2.c
@@ -578,7 +578,7 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
// in normal/row positions).
int pass;
// We need an intermediate buffer between passes.
- DECLARE_ALIGNED_ARRAY(16, int16_t, intermediate, 256);
+ DECLARE_ALIGNED(16, int16_t, intermediate[256]);
const int16_t *in = input;
int16_t *out0 = intermediate;
tran_low_t *out1 = output;