summaryrefslogtreecommitdiff
path: root/vp9/common
diff options
context:
space:
mode:
authorJames Zern <jzern@google.com>2015-05-02 13:24:16 -0700
committerJames Zern <jzern@google.com>2015-05-07 11:55:08 -0700
commitfd3658b0e4b52b5045b4881900292258ed106f39 (patch)
tree2d44fe3f9faa39af60a67f06a79880ec0294be07 /vp9/common
parent76a08210b667f5c7ce8d64d930382c948dbeaea7 (diff)
downloadlibvpx-fd3658b0e4b52b5045b4881900292258ed106f39.tar
libvpx-fd3658b0e4b52b5045b4881900292258ed106f39.tar.gz
libvpx-fd3658b0e4b52b5045b4881900292258ed106f39.tar.bz2
libvpx-fd3658b0e4b52b5045b4881900292258ed106f39.zip
replace DECLARE_ALIGNED_ARRAY w/DECLARE_ALIGNED
this macro was used inconsistently and only differs in behavior from DECLARE_ALIGNED when an alignment attribute is unavailable. this macro is used with calls to assembly, while generic c-code doesn't rely on it, so in a c-only build without an alignment attribute the code will function as expected. Change-Id: Ie9d06d4028c0de17c63b3a27e6c1b0491cc4ea79
Diffstat (limited to 'vp9/common')
-rw-r--r--vp9/common/arm/neon/vp9_convolve_neon.c4
-rw-r--r--vp9/common/mips/dspr2/vp9_convolve8_avg_dspr2.c2
-rw-r--r--vp9/common/mips/dspr2/vp9_convolve8_dspr2.c2
-rw-r--r--vp9/common/vp9_convolve.c4
-rw-r--r--vp9/common/vp9_reconintra.c8
-rw-r--r--vp9/common/x86/vp9_asm_stubs.c8
-rw-r--r--vp9/common/x86/vp9_high_loopfilter_intrin_sse2.c24
-rw-r--r--vp9/common/x86/vp9_loopfilter_intrin_sse2.c34
8 files changed, 43 insertions, 43 deletions
diff --git a/vp9/common/arm/neon/vp9_convolve_neon.c b/vp9/common/arm/neon/vp9_convolve_neon.c
index f0881b5ae..2e28cb20e 100644
--- a/vp9/common/arm/neon/vp9_convolve_neon.c
+++ b/vp9/common/arm/neon/vp9_convolve_neon.c
@@ -20,7 +20,7 @@ void vp9_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride,
/* Given our constraints: w <= 64, h <= 64, taps == 8 we can reduce the
* maximum buffer size to 64 * 64 + 7 (+ 1 to make it divisible by 4).
*/
- DECLARE_ALIGNED_ARRAY(8, uint8_t, temp, 64 * 72);
+ DECLARE_ALIGNED(8, uint8_t, temp[64 * 72]);
// Account for the vertical phase needing 3 lines prior and 4 lines post
int intermediate_height = h + 7;
@@ -56,7 +56,7 @@ void vp9_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
- DECLARE_ALIGNED_ARRAY(8, uint8_t, temp, 64 * 72);
+ DECLARE_ALIGNED(8, uint8_t, temp[64 * 72]);
int intermediate_height = h + 7;
if (x_step_q4 != 16 || y_step_q4 != 16) {
diff --git a/vp9/common/mips/dspr2/vp9_convolve8_avg_dspr2.c b/vp9/common/mips/dspr2/vp9_convolve8_avg_dspr2.c
index ab18490dc..17422798c 100644
--- a/vp9/common/mips/dspr2/vp9_convolve8_avg_dspr2.c
+++ b/vp9/common/mips/dspr2/vp9_convolve8_avg_dspr2.c
@@ -413,7 +413,7 @@ void vp9_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
/* Fixed size intermediate buffer places limits on parameters. */
- DECLARE_ALIGNED_ARRAY(32, uint8_t, temp, 64 * 135);
+ DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]);
int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7;
assert(w <= 64);
diff --git a/vp9/common/mips/dspr2/vp9_convolve8_dspr2.c b/vp9/common/mips/dspr2/vp9_convolve8_dspr2.c
index 0ef9dd508..58b50d2df 100644
--- a/vp9/common/mips/dspr2/vp9_convolve8_dspr2.c
+++ b/vp9/common/mips/dspr2/vp9_convolve8_dspr2.c
@@ -950,7 +950,7 @@ void vp9_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
- DECLARE_ALIGNED_ARRAY(32, uint8_t, temp, 64 * 135);
+ DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]);
int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7;
uint32_t pos = 38;
diff --git a/vp9/common/vp9_convolve.c b/vp9/common/vp9_convolve.c
index 5fb56ecb6..90e337fd6 100644
--- a/vp9/common/vp9_convolve.c
+++ b/vp9/common/vp9_convolve.c
@@ -236,7 +236,7 @@ void vp9_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
/* Fixed size intermediate buffer places limits on parameters. */
- DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 64 * 64);
+ DECLARE_ALIGNED(16, uint8_t, temp[64 * 64]);
assert(w <= 64);
assert(h <= 64);
@@ -501,7 +501,7 @@ void vp9_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride,
const int16_t *filter_y, int y_step_q4,
int w, int h, int bd) {
// Fixed size intermediate buffer places limits on parameters.
- DECLARE_ALIGNED_ARRAY(16, uint16_t, temp, 64 * 64);
+ DECLARE_ALIGNED(16, uint16_t, temp[64 * 64]);
assert(w <= 64);
assert(h <= 64);
diff --git a/vp9/common/vp9_reconintra.c b/vp9/common/vp9_reconintra.c
index 709a78250..11dd3f325 100644
--- a/vp9/common/vp9_reconintra.c
+++ b/vp9/common/vp9_reconintra.c
@@ -657,8 +657,8 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd,
int i;
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
- DECLARE_ALIGNED_ARRAY(16, uint16_t, left_col, 64);
- DECLARE_ALIGNED_ARRAY(16, uint16_t, above_data, 128 + 16);
+ DECLARE_ALIGNED(16, uint16_t, left_col[64]);
+ DECLARE_ALIGNED(16, uint16_t, above_data[128 + 16]);
uint16_t *above_row = above_data + 16;
const uint16_t *const_above_row = above_row;
const int bs = 4 << tx_size;
@@ -780,8 +780,8 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
int right_available, int x, int y,
int plane) {
int i;
- DECLARE_ALIGNED_ARRAY(16, uint8_t, left_col, 64);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, above_data, 128 + 16);
+ DECLARE_ALIGNED(16, uint8_t, left_col[64]);
+ DECLARE_ALIGNED(16, uint8_t, above_data[128 + 16]);
uint8_t *above_row = above_data + 16;
const uint8_t *const_above_row = above_row;
const int bs = 4 << tx_size;
diff --git a/vp9/common/x86/vp9_asm_stubs.c b/vp9/common/x86/vp9_asm_stubs.c
index a0a599691..963023c53 100644
--- a/vp9/common/x86/vp9_asm_stubs.c
+++ b/vp9/common/x86/vp9_asm_stubs.c
@@ -118,7 +118,7 @@ void vp9_convolve8_##avg##opt(const uint8_t *src, ptrdiff_t src_stride, \
if (x_step_q4 == 16 && y_step_q4 == 16) { \
if (filter_x[0] || filter_x[1] || filter_x[2] || filter_x[3] == 128 || \
filter_y[0] || filter_y[1] || filter_y[2] || filter_y[3] == 128) { \
- DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64 * 71); \
+ DECLARE_ALIGNED(16, unsigned char, fdata2[64 * 71]); \
vp9_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2, 64, \
filter_x, x_step_q4, filter_y, y_step_q4, \
w, h + 7); \
@@ -126,7 +126,7 @@ void vp9_convolve8_##avg##opt(const uint8_t *src, ptrdiff_t src_stride, \
filter_x, x_step_q4, filter_y, \
y_step_q4, w, h); \
} else { \
- DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64 * 65); \
+ DECLARE_ALIGNED(16, unsigned char, fdata2[64 * 65]); \
vp9_convolve8_horiz_##opt(src, src_stride, fdata2, 64, \
filter_x, x_step_q4, filter_y, y_step_q4, \
w, h + 1); \
@@ -259,7 +259,7 @@ void vp9_highbd_convolve8_##avg##opt(const uint8_t *src, ptrdiff_t src_stride, \
if (x_step_q4 == 16 && y_step_q4 == 16) { \
if (filter_x[0] || filter_x[1] || filter_x[2] || filter_x[3] == 128 || \
filter_y[0] || filter_y[1] || filter_y[2] || filter_y[3] == 128) { \
- DECLARE_ALIGNED_ARRAY(16, uint16_t, fdata2, 64 * 71); \
+ DECLARE_ALIGNED(16, uint16_t, fdata2[64 * 71]); \
vp9_highbd_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, \
CONVERT_TO_BYTEPTR(fdata2), 64, \
filter_x, x_step_q4, \
@@ -271,7 +271,7 @@ void vp9_highbd_convolve8_##avg##opt(const uint8_t *src, ptrdiff_t src_stride, \
filter_y, y_step_q4, \
w, h, bd); \
} else { \
- DECLARE_ALIGNED_ARRAY(16, uint16_t, fdata2, 64 * 65); \
+ DECLARE_ALIGNED(16, uint16_t, fdata2[64 * 65]); \
vp9_highbd_convolve8_horiz_##opt(src, src_stride, \
CONVERT_TO_BYTEPTR(fdata2), 64, \
filter_x, x_step_q4, \
diff --git a/vp9/common/x86/vp9_high_loopfilter_intrin_sse2.c b/vp9/common/x86/vp9_high_loopfilter_intrin_sse2.c
index 4bd8ac4a3..1637f0e54 100644
--- a/vp9/common/x86/vp9_high_loopfilter_intrin_sse2.c
+++ b/vp9/common/x86/vp9_high_loopfilter_intrin_sse2.c
@@ -524,12 +524,12 @@ void vp9_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
const uint8_t *_limit,
const uint8_t *_thresh,
int count, int bd) {
- DECLARE_ALIGNED_ARRAY(16, uint16_t, flat_op2, 16);
- DECLARE_ALIGNED_ARRAY(16, uint16_t, flat_op1, 16);
- DECLARE_ALIGNED_ARRAY(16, uint16_t, flat_op0, 16);
- DECLARE_ALIGNED_ARRAY(16, uint16_t, flat_oq2, 16);
- DECLARE_ALIGNED_ARRAY(16, uint16_t, flat_oq1, 16);
- DECLARE_ALIGNED_ARRAY(16, uint16_t, flat_oq0, 16);
+ DECLARE_ALIGNED(16, uint16_t, flat_op2[16]);
+ DECLARE_ALIGNED(16, uint16_t, flat_op1[16]);
+ DECLARE_ALIGNED(16, uint16_t, flat_op0[16]);
+ DECLARE_ALIGNED(16, uint16_t, flat_oq2[16]);
+ DECLARE_ALIGNED(16, uint16_t, flat_oq1[16]);
+ DECLARE_ALIGNED(16, uint16_t, flat_oq0[16]);
const __m128i zero = _mm_set1_epi16(0);
__m128i blimit, limit, thresh;
__m128i mask, hev, flat;
@@ -1059,7 +1059,7 @@ void vp9_highbd_lpf_vertical_4_sse2(uint16_t *s, int p,
const uint8_t *limit,
const uint8_t *thresh,
int count, int bd) {
- DECLARE_ALIGNED_ARRAY(16, uint16_t, t_dst, 8 * 8);
+ DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]);
uint16_t *src[1];
uint16_t *dst[1];
(void)count;
@@ -1089,7 +1089,7 @@ void vp9_highbd_lpf_vertical_4_dual_sse2(uint16_t *s, int p,
const uint8_t *limit1,
const uint8_t *thresh1,
int bd) {
- DECLARE_ALIGNED_ARRAY(16, uint16_t, t_dst, 16 * 8);
+ DECLARE_ALIGNED(16, uint16_t, t_dst[16 * 8]);
uint16_t *src[2];
uint16_t *dst[2];
@@ -1113,7 +1113,7 @@ void vp9_highbd_lpf_vertical_8_sse2(uint16_t *s, int p,
const uint8_t *limit,
const uint8_t *thresh,
int count, int bd) {
- DECLARE_ALIGNED_ARRAY(16, uint16_t, t_dst, 8 * 8);
+ DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]);
uint16_t *src[1];
uint16_t *dst[1];
(void)count;
@@ -1143,7 +1143,7 @@ void vp9_highbd_lpf_vertical_8_dual_sse2(uint16_t *s, int p,
const uint8_t *limit1,
const uint8_t *thresh1,
int bd) {
- DECLARE_ALIGNED_ARRAY(16, uint16_t, t_dst, 16 * 8);
+ DECLARE_ALIGNED(16, uint16_t, t_dst[16 * 8]);
uint16_t *src[2];
uint16_t *dst[2];
@@ -1168,7 +1168,7 @@ void vp9_highbd_lpf_vertical_16_sse2(uint16_t *s, int p,
const uint8_t *limit,
const uint8_t *thresh,
int bd) {
- DECLARE_ALIGNED_ARRAY(16, uint16_t, t_dst, 8 * 16);
+ DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 16]);
uint16_t *src[2];
uint16_t *dst[2];
@@ -1198,7 +1198,7 @@ void vp9_highbd_lpf_vertical_16_dual_sse2(uint16_t *s,
const uint8_t *limit,
const uint8_t *thresh,
int bd) {
- DECLARE_ALIGNED_ARRAY(16, uint16_t, t_dst, 256);
+ DECLARE_ALIGNED(16, uint16_t, t_dst[256]);
// Transpose 16x16
highbd_transpose8x16(s - 8, s - 8 + 8 * p, p, t_dst, 16);
diff --git a/vp9/common/x86/vp9_loopfilter_intrin_sse2.c b/vp9/common/x86/vp9_loopfilter_intrin_sse2.c
index 320328e21..8723d3283 100644
--- a/vp9/common/x86/vp9_loopfilter_intrin_sse2.c
+++ b/vp9/common/x86/vp9_loopfilter_intrin_sse2.c
@@ -729,12 +729,12 @@ void vp9_lpf_horizontal_8_sse2(unsigned char *s, int p,
const unsigned char *_blimit,
const unsigned char *_limit,
const unsigned char *_thresh, int count) {
- DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_op2, 16);
- DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_op1, 16);
- DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_op0, 16);
- DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_oq2, 16);
- DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_oq1, 16);
- DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_oq0, 16);
+ DECLARE_ALIGNED(16, unsigned char, flat_op2[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_op1[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_op0[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_oq2[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_oq1[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_oq0[16]);
const __m128i zero = _mm_set1_epi16(0);
const __m128i blimit = _mm_load_si128((const __m128i *)_blimit);
const __m128i limit = _mm_load_si128((const __m128i *)_limit);
@@ -948,12 +948,12 @@ void vp9_lpf_horizontal_8_dual_sse2(uint8_t *s, int p,
const uint8_t *_blimit1,
const uint8_t *_limit1,
const uint8_t *_thresh1) {
- DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_op2, 16);
- DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_op1, 16);
- DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_op0, 16);
- DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_oq2, 16);
- DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_oq1, 16);
- DECLARE_ALIGNED_ARRAY(16, unsigned char, flat_oq0, 16);
+ DECLARE_ALIGNED(16, unsigned char, flat_op2[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_op1[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_op0[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_oq2[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_oq1[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_oq0[16]);
const __m128i zero = _mm_set1_epi16(0);
const __m128i blimit =
_mm_unpacklo_epi64(_mm_load_si128((const __m128i *)_blimit0),
@@ -1461,7 +1461,7 @@ void vp9_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
const uint8_t *blimit1,
const uint8_t *limit1,
const uint8_t *thresh1) {
- DECLARE_ALIGNED_ARRAY(16, unsigned char, t_dst, 16 * 8);
+ DECLARE_ALIGNED(16, unsigned char, t_dst[16 * 8]);
unsigned char *src[2];
unsigned char *dst[2];
@@ -1484,7 +1484,7 @@ void vp9_lpf_vertical_8_sse2(unsigned char *s, int p,
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh, int count) {
- DECLARE_ALIGNED_ARRAY(8, unsigned char, t_dst, 8 * 8);
+ DECLARE_ALIGNED(8, unsigned char, t_dst[8 * 8]);
unsigned char *src[1];
unsigned char *dst[1];
(void)count;
@@ -1511,7 +1511,7 @@ void vp9_lpf_vertical_8_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
const uint8_t *blimit1,
const uint8_t *limit1,
const uint8_t *thresh1) {
- DECLARE_ALIGNED_ARRAY(16, unsigned char, t_dst, 16 * 8);
+ DECLARE_ALIGNED(16, unsigned char, t_dst[16 * 8]);
unsigned char *src[2];
unsigned char *dst[2];
@@ -1535,7 +1535,7 @@ void vp9_lpf_vertical_16_sse2(unsigned char *s, int p,
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh) {
- DECLARE_ALIGNED_ARRAY(8, unsigned char, t_dst, 8 * 16);
+ DECLARE_ALIGNED(8, unsigned char, t_dst[8 * 16]);
unsigned char *src[2];
unsigned char *dst[2];
@@ -1562,7 +1562,7 @@ void vp9_lpf_vertical_16_sse2(unsigned char *s, int p,
void vp9_lpf_vertical_16_dual_sse2(unsigned char *s, int p,
const uint8_t *blimit, const uint8_t *limit,
const uint8_t *thresh) {
- DECLARE_ALIGNED_ARRAY(16, unsigned char, t_dst, 256);
+ DECLARE_ALIGNED(16, unsigned char, t_dst[256]);
// Transpose 16x16
transpose8x16(s - 8, s - 8 + 8 * p, p, t_dst, 16);