diff options
Diffstat (limited to 'vp9/encoder')
-rw-r--r-- | vp9/encoder/vp9_dct.c | 2 | ||||
-rw-r--r-- | vp9/encoder/vp9_encodeframe.c | 34 | ||||
-rw-r--r-- | vp9/encoder/vp9_encodemb.c | 16 | ||||
-rw-r--r-- | vp9/encoder/vp9_mcomp.c | 8 | ||||
-rw-r--r-- | vp9/encoder/vp9_onyx_if.c | 3 | ||||
-rw-r--r-- | vp9/encoder/vp9_rdopt.c | 24 | ||||
-rw-r--r-- | vp9/encoder/vp9_segmentation.c | 1 | ||||
-rw-r--r-- | vp9/encoder/vp9_temporal_filter.c | 32 | ||||
-rw-r--r-- | vp9/encoder/vp9_tokenize.c | 22 | ||||
-rw-r--r-- | vp9/encoder/x86/vp9_dct_sse2.c | 607 |
10 files changed, 609 insertions, 140 deletions
diff --git a/vp9/encoder/vp9_dct.c b/vp9/encoder/vp9_dct.c index a90bcf5df..8be893e85 100644 --- a/vp9/encoder/vp9_dct.c +++ b/vp9/encoder/vp9_dct.c @@ -587,7 +587,7 @@ void vp9_short_fht8x8_c(int16_t *input, int16_t *output, temp_in[j] = out[j + i * 8]; ht.rows(temp_in, temp_out); for (j = 0; j < 8; ++j) - output[j + i * 8] = temp_out[j] >> 1; + output[j + i * 8] = (temp_out[j] + (temp_out[j] < 0)) >> 1; } } diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c index f655d456b..54629415d 100644 --- a/vp9/encoder/vp9_encodeframe.c +++ b/vp9/encoder/vp9_encodeframe.c @@ -455,25 +455,6 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, } } -static unsigned find_seg_id(VP9_COMMON *cm, uint8_t *buf, BLOCK_SIZE_TYPE bsize, - int start_y, int height, int start_x, int width) { - const int bw = 1 << mi_width_log2(bsize), bh = 1 << mi_height_log2(bsize); - const int end_x = MIN(start_x + bw, width); - const int end_y = MIN(start_y + bh, height); - int x, y; - unsigned seg_id = -1; - - buf += width * start_y; - assert(start_y < cm->mi_rows && start_x < cm->cur_tile_mi_col_end); - for (y = start_y; y < end_y; y++, buf += width) { - for (x = start_x; x < end_x; x++) { - seg_id = MIN(seg_id, buf[x]); - } - } - - return seg_id; -} - void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src, int mb_row, int mb_col) { uint8_t *buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, src @@ -551,11 +532,9 @@ static void set_offsets(VP9_COMP *cpi, int mi_row, int mi_col, /* segment ID */ if (xd->segmentation_enabled) { - uint8_t *map = - xd->update_mb_segmentation_map ? - cpi->segmentation_map : cm->last_frame_seg_map; - mbmi->segment_id = find_seg_id(cm, map, bsize, mi_row, cm->mi_rows, mi_col, - cm->mi_cols); + uint8_t *map = xd->update_mb_segmentation_map ? cpi->segmentation_map + : cm->last_frame_seg_map; + mbmi->segment_id = vp9_get_pred_mi_segid(cm, bsize, map, mi_row, mi_col); assert(mbmi->segment_id <= (MAX_MB_SEGMENTS-1)); vp9_mb_init_quantizer(cpi, x); @@ -1617,6 +1596,12 @@ static void switch_lossless_mode(VP9_COMP *cpi, int lossless) { } } +static void switch_txfm_mode(VP9_COMP *cpi) { + if (cpi->sf.use_largest_txform && + cpi->common.txfm_mode >= ALLOW_32X32) + cpi->common.txfm_mode = ALLOW_32X32; +} + static void encode_frame_internal(VP9_COMP *cpi) { int mi_row; MACROBLOCK * const x = &cpi->mb; @@ -1661,6 +1646,7 @@ static void encode_frame_internal(VP9_COMP *cpi) { vp9_initialize_rd_consts(cpi, cm->base_qindex + cm->y_dc_delta_q); vp9_initialize_me_consts(cpi, cm->base_qindex); + switch_txfm_mode(cpi); if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { // Initialize encode frame context. diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c index 2f133ccbc..ccd84b39c 100644 --- a/vp9/encoder/vp9_encodemb.c +++ b/vp9/encoder/vp9_encodemb.c @@ -164,14 +164,14 @@ static void optimize_b(VP9_COMMON *const cm, MACROBLOCK *mb, break; } case TX_8X8: { - const TX_TYPE tx_type = plane == 0 ? get_tx_type_8x8(xd, ib) : DCT_DCT; + const TX_TYPE tx_type = plane == 0 ? get_tx_type_8x8(xd) : DCT_DCT; scan = get_scan_8x8(tx_type); default_eob = 64; band_translate = vp9_coefband_trans_8x8plus; break; } case TX_16X16: { - const TX_TYPE tx_type = plane == 0 ? get_tx_type_16x16(xd, ib) : DCT_DCT; + const TX_TYPE tx_type = plane == 0 ? get_tx_type_16x16(xd) : DCT_DCT; scan = get_scan_16x16(tx_type); default_eob = 256; band_translate = vp9_coefband_trans_8x8plus; @@ -468,14 +468,14 @@ static void xform_quant(int plane, int block, BLOCK_SIZE_TYPE bsize, vp9_short_fdct32x32(src_diff, coeff, bw * 2); break; case TX_16X16: - tx_type = plane == 0 ? get_tx_type_16x16(xd, raster_block) : DCT_DCT; + tx_type = plane == 0 ? get_tx_type_16x16(xd) : DCT_DCT; if (tx_type != DCT_DCT) vp9_short_fht16x16(src_diff, coeff, bw, tx_type); else x->fwd_txm16x16(src_diff, coeff, bw * 2); break; case TX_8X8: - tx_type = plane == 0 ? get_tx_type_8x8(xd, raster_block) : DCT_DCT; + tx_type = plane == 0 ? get_tx_type_8x8(xd) : DCT_DCT; if (tx_type != DCT_DCT) vp9_short_fht8x8(src_diff, coeff, bw, tx_type); else @@ -519,14 +519,14 @@ static void encode_block(int plane, int block, BLOCK_SIZE_TYPE bsize, vp9_short_idct32x32_add(dqcoeff, dst, pd->dst.stride); break; case TX_16X16: - tx_type = plane == 0 ? get_tx_type_16x16(xd, raster_block) : DCT_DCT; + tx_type = plane == 0 ? get_tx_type_16x16(xd) : DCT_DCT; if (tx_type == DCT_DCT) vp9_short_idct16x16_add(dqcoeff, dst, pd->dst.stride); else vp9_short_iht16x16_add(dqcoeff, dst, pd->dst.stride, tx_type); break; case TX_8X8: - tx_type = plane == 0 ? get_tx_type_8x8(xd, raster_block) : DCT_DCT; + tx_type = plane == 0 ? get_tx_type_8x8(xd) : DCT_DCT; if (tx_type == DCT_DCT) vp9_short_idct8x8_add(dqcoeff, dst, pd->dst.stride); else @@ -659,14 +659,14 @@ static void encode_block_intra(int plane, int block, BLOCK_SIZE_TYPE bsize, vp9_short_idct32x32_add(dqcoeff, dst, pd->dst.stride); break; case TX_16X16: - tx_type = plane == 0 ? get_tx_type_16x16(xd, raster_block) : DCT_DCT; + tx_type = plane == 0 ? get_tx_type_16x16(xd) : DCT_DCT; if (tx_type == DCT_DCT) vp9_short_idct16x16_add(dqcoeff, dst, pd->dst.stride); else vp9_short_iht16x16_add(dqcoeff, dst, pd->dst.stride, tx_type); break; case TX_8X8: - tx_type = plane == 0 ? get_tx_type_8x8(xd, raster_block) : DCT_DCT; + tx_type = plane == 0 ? get_tx_type_8x8(xd) : DCT_DCT; if (tx_type == DCT_DCT) vp9_short_idct8x8_add(dqcoeff, dst, pd->dst.stride); else diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c index 0f1062313..53b70adc4 100644 --- a/vp9/encoder/vp9_mcomp.c +++ b/vp9/encoder/vp9_mcomp.c @@ -447,7 +447,7 @@ int vp9_find_best_sub_pixel_comp(MACROBLOCK *x, int offset; int usehp = xd->allow_high_precision_mv; - uint8_t *comp_pred = vpx_memalign(16, w * h * sizeof(uint8_t)); + DECLARE_ALIGNED_ARRAY(16, uint8_t, comp_pred, 64 * 64); uint8_t *y = xd->plane[0].pre[0].buf + (bestmv->as_mv.row) * xd->plane[0].pre[0].stride + bestmv->as_mv.col; @@ -597,8 +597,6 @@ int vp9_find_best_sub_pixel_comp(MACROBLOCK *x, bestmv->as_mv.row = br; bestmv->as_mv.col = bc; - vpx_free(comp_pred); - if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL << 3)) || (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL << 3))) return INT_MAX; @@ -2356,7 +2354,7 @@ int vp9_refining_search_8p_c(MACROBLOCK *x, int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]}; /* Compound pred buffer */ - uint8_t *comp_pred = vpx_memalign(16, w * h * sizeof(uint8_t)); + DECLARE_ALIGNED_ARRAY(16, uint8_t, comp_pred, 64 * 64); fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; @@ -2420,10 +2418,8 @@ int vp9_refining_search_8p_c(MACROBLOCK *x, (unsigned int *)(&thissad)) + mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit, xd->allow_high_precision_mv); - vpx_free(comp_pred); return besterr; } else { - vpx_free(comp_pred); return INT_MAX; } } diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c index e02e73232..48a8b48c6 100644 --- a/vp9/encoder/vp9_onyx_if.c +++ b/vp9/encoder/vp9_onyx_if.c @@ -724,6 +724,9 @@ void vp9_set_speed_features(VP9_COMP *cpi) { sf->first_step = 1; sf->use_avoid_tested_higherror = 1; sf->adjust_thresholds_by_speed = 1; + sf->use_largest_txform = !(cpi->common.frame_type == KEY_FRAME || + cpi->common.intra_only || + cpi->common.show_frame == 0); } if (speed == 2) { sf->comp_inter_joint_search_thresh = BLOCK_SIZE_SB8X8; diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c index a48e7dbb3..b2a19c61d 100644 --- a/vp9/encoder/vp9_rdopt.c +++ b/vp9/encoder/vp9_rdopt.c @@ -337,11 +337,8 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb, break; } case TX_8X8: { - const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type; - const int sz = 1 + b_width_log2(sb_type); - const int x = block & ((1 << sz) - 1), y = block - x; - TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ? - get_tx_type_8x8(xd, y + (x >> 1)) : DCT_DCT; + const TX_TYPE tx_type = type == PLANE_TYPE_Y_WITH_DC ? + get_tx_type_8x8(xd) : DCT_DCT; above_ec = (A[0] + A[1]) != 0; left_ec = (L[0] + L[1]) != 0; scan = get_scan_8x8(tx_type); @@ -350,11 +347,8 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb, break; } case TX_16X16: { - const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type; - const int sz = 2 + b_width_log2(sb_type); - const int x = block & ((1 << sz) - 1), y = block - x; - TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ? - get_tx_type_16x16(xd, y + (x >> 2)) : DCT_DCT; + const TX_TYPE tx_type = type == PLANE_TYPE_Y_WITH_DC ? + get_tx_type_16x16(xd) : DCT_DCT; scan = get_scan_16x16(tx_type); seg_eob = 256; above_ec = (A[0] + A[1] + A[2] + A[3]) != 0; @@ -370,7 +364,7 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb, band_translate = vp9_coefband_trans_8x8plus; break; default: - abort(); + assert(0); break; } assert(eob <= seg_eob); @@ -1129,7 +1123,8 @@ static int64_t encode_inter_mb_segment(VP9_COMMON *const cm, xd->plane[0].dst.stride, &xd->mode_info_context->bmi[i].as_mv[0], &xd->scale_factor[0], - 4 * bw, 4 * bh, 0 /* no avg */, &xd->subpix); + 4 * bw, 4 * bh, 0 /* no avg */, &xd->subpix, + MV_PRECISION_Q3); // TODO(debargha): Make this work properly with the // implicit-compoundinter-weight experiment when implicit @@ -1143,7 +1138,7 @@ static int64_t encode_inter_mb_segment(VP9_COMMON *const cm, dst, xd->plane[0].dst.stride, &xd->mode_info_context->bmi[i].as_mv[1], &xd->scale_factor[1], 4 * bw, 4 * bh, 1, - &xd->subpix); + &xd->subpix, MV_PRECISION_Q3); } vp9_subtract_block(4 * bh, 4 * bw, src_diff, 8, @@ -1966,6 +1961,7 @@ static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE_TYPE bsize, int64_t dist; cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride, &sse); + model_rd_from_var_lapndz(sse, bw * bh, pd->dequant[1] >> 3, &rate, &dist); rate_sum += rate; @@ -2151,7 +2147,7 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, &frame_mv[refs[!id]], &xd->scale_factor[!id], pw, ph, 0, - &xd->subpix); + &xd->subpix, MV_PRECISION_Q3); // Compound motion search on first ref frame. if (id) diff --git a/vp9/encoder/vp9_segmentation.c b/vp9/encoder/vp9_segmentation.c index fe995ad72..af218b7d6 100644 --- a/vp9/encoder/vp9_segmentation.c +++ b/vp9/encoder/vp9_segmentation.c @@ -139,6 +139,7 @@ static void count_segs(VP9_COMP *cpi, if (cm->frame_type != KEY_FRAME) { // Test to see if the segment id matches the predicted value. const int pred_seg_id = vp9_get_pred_mi_segid(cm, mi->mbmi.sb_type, + cm->last_frame_seg_map, mi_row, mi_col); const int seg_predicted = (segment_id == pred_seg_id); diff --git a/vp9/encoder/vp9_temporal_filter.c b/vp9/encoder/vp9_temporal_filter.c index 47792fcc2..872bf267a 100644 --- a/vp9/encoder/vp9_temporal_filter.c +++ b/vp9/encoder/vp9_temporal_filter.c @@ -51,25 +51,25 @@ static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd, &xd->scale_factor[which_mv], 16, 16, which_mv, - &xd->subpix); + &xd->subpix, MV_PRECISION_Q3); stride = (stride + 1) >> 1; - vp9_build_inter_predictor_q4(u_mb_ptr, stride, - &pred[256], 8, - &mv, - &xd->scale_factor_uv[which_mv], - 8, 8, - which_mv, - &xd->subpix); - - vp9_build_inter_predictor_q4(v_mb_ptr, stride, - &pred[320], 8, - &mv, - &xd->scale_factor_uv[which_mv], - 8, 8, - which_mv, - &xd->subpix); + vp9_build_inter_predictor(u_mb_ptr, stride, + &pred[256], 8, + &mv, + &xd->scale_factor_uv[which_mv], + 8, 8, + which_mv, + &xd->subpix, MV_PRECISION_Q4); + + vp9_build_inter_predictor(v_mb_ptr, stride, + &pred[320], 8, + &mv, + &xd->scale_factor_uv[which_mv], + 8, 8, + which_mv, + &xd->subpix, MV_PRECISION_Q4); } void vp9_temporal_filter_apply_c(uint8_t *frame1, diff --git a/vp9/encoder/vp9_tokenize.c b/vp9/encoder/vp9_tokenize.c index 0a290e124..885079dc1 100644 --- a/vp9/encoder/vp9_tokenize.c +++ b/vp9/encoder/vp9_tokenize.c @@ -133,8 +133,7 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE_TYPE bsize, const int ref = mbmi->ref_frame[0] != INTRA_FRAME; ENTROPY_CONTEXT above_ec, left_ec; uint8_t token_cache[1024]; - TX_TYPE tx_type = DCT_DCT; - const uint8_t * band_translate; + const uint8_t *band_translate; assert((!type && !plane) || (type && plane)); counts = cpi->coef_counts[tx_size]; @@ -142,8 +141,8 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE_TYPE bsize, switch (tx_size) { default: case TX_4X4: { - tx_type = (type == PLANE_TYPE_Y_WITH_DC) ? - get_tx_type_4x4(xd, block) : DCT_DCT; + const TX_TYPE tx_type = type == PLANE_TYPE_Y_WITH_DC ? + get_tx_type_4x4(xd, block) : DCT_DCT; above_ec = A[0] != 0; left_ec = L[0] != 0; seg_eob = 16; @@ -152,10 +151,8 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE_TYPE bsize, break; } case TX_8X8: { - const int sz = 1 + b_width_log2(sb_type); - const int x = block & ((1 << sz) - 1), y = block - x; - tx_type = (type == PLANE_TYPE_Y_WITH_DC) ? - get_tx_type_8x8(xd, y + (x >> 1)) : DCT_DCT; + const TX_TYPE tx_type = type == PLANE_TYPE_Y_WITH_DC ? + get_tx_type_8x8(xd) : DCT_DCT; above_ec = (A[0] + A[1]) != 0; left_ec = (L[0] + L[1]) != 0; seg_eob = 64; @@ -164,10 +161,8 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE_TYPE bsize, break; } case TX_16X16: { - const int sz = 2 + b_width_log2(sb_type); - const int x = block & ((1 << sz) - 1), y = block - x; - tx_type = (type == PLANE_TYPE_Y_WITH_DC) ? - get_tx_type_16x16(xd, y + (x >> 2)) : DCT_DCT; + const TX_TYPE tx_type = type == PLANE_TYPE_Y_WITH_DC ? + get_tx_type_16x16(xd) : DCT_DCT; above_ec = (A[0] + A[1] + A[2] + A[3]) != 0; left_ec = (L[0] + L[1] + L[2] + L[3]) != 0; seg_eob = 256; @@ -263,8 +258,7 @@ int vp9_sb_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) { int vp9_sby_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) { int result = 1; struct is_skippable_args args = {xd, &result}; - foreach_transformed_block_in_plane(xd, bsize, 0, - is_skippable, &args); + foreach_transformed_block_in_plane(xd, bsize, 0, is_skippable, &args); return result; } diff --git a/vp9/encoder/x86/vp9_dct_sse2.c b/vp9/encoder/x86/vp9_dct_sse2.c index aaacebee2..f11200f62 100644 --- a/vp9/encoder/x86/vp9_dct_sse2.c +++ b/vp9/encoder/x86/vp9_dct_sse2.c @@ -10,6 +10,7 @@ #include <emmintrin.h> // SSE2 #include "vp9/common/vp9_idct.h" // for cospi constants +#include "vpx_ports/mem.h" void vp9_short_fdct4x4_sse2(int16_t *input, int16_t *output, int pitch) { // The 2D transform is done with two passes which are actually pretty @@ -133,14 +134,14 @@ void vp9_short_fdct8x8_sse2(int16_t *input, int16_t *output, int pitch) { const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); // Load input - __m128i in0 = _mm_loadu_si128((const __m128i *)(input + 0 * stride)); - __m128i in1 = _mm_loadu_si128((const __m128i *)(input + 1 * stride)); - __m128i in2 = _mm_loadu_si128((const __m128i *)(input + 2 * stride)); - __m128i in3 = _mm_loadu_si128((const __m128i *)(input + 3 * stride)); - __m128i in4 = _mm_loadu_si128((const __m128i *)(input + 4 * stride)); - __m128i in5 = _mm_loadu_si128((const __m128i *)(input + 5 * stride)); - __m128i in6 = _mm_loadu_si128((const __m128i *)(input + 6 * stride)); - __m128i in7 = _mm_loadu_si128((const __m128i *)(input + 7 * stride)); + __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); + __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); + __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); + __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); + __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); + __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); + __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride)); + __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride)); // Pre-condition input (shift by two) in0 = _mm_slli_epi16(in0, 2); in1 = _mm_slli_epi16(in1, 2); @@ -362,17 +363,509 @@ void vp9_short_fdct8x8_sse2(int16_t *input, int16_t *output, int pitch) { in6 = _mm_srai_epi16(in6, 1); in7 = _mm_srai_epi16(in7, 1); // store results - _mm_storeu_si128((__m128i *)(output + 0 * 8), in0); - _mm_storeu_si128((__m128i *)(output + 1 * 8), in1); - _mm_storeu_si128((__m128i *)(output + 2 * 8), in2); - _mm_storeu_si128((__m128i *)(output + 3 * 8), in3); - _mm_storeu_si128((__m128i *)(output + 4 * 8), in4); - _mm_storeu_si128((__m128i *)(output + 5 * 8), in5); - _mm_storeu_si128((__m128i *)(output + 6 * 8), in6); - _mm_storeu_si128((__m128i *)(output + 7 * 8), in7); + _mm_store_si128((__m128i *)(output + 0 * 8), in0); + _mm_store_si128((__m128i *)(output + 1 * 8), in1); + _mm_store_si128((__m128i *)(output + 2 * 8), in2); + _mm_store_si128((__m128i *)(output + 3 * 8), in3); + _mm_store_si128((__m128i *)(output + 4 * 8), in4); + _mm_store_si128((__m128i *)(output + 5 * 8), in5); + _mm_store_si128((__m128i *)(output + 6 * 8), in6); + _mm_store_si128((__m128i *)(output + 7 * 8), in7); } } +// load 8x8 array +static INLINE void load_buffer_8x8(int16_t *input, __m128i in[8], int stride) { + in[0] = _mm_load_si128((__m128i *)(input + 0 * stride)); + in[1] = _mm_load_si128((__m128i *)(input + 1 * stride)); + in[2] = _mm_load_si128((__m128i *)(input + 2 * stride)); + in[3] = _mm_load_si128((__m128i *)(input + 3 * stride)); + in[4] = _mm_load_si128((__m128i *)(input + 4 * stride)); + in[5] = _mm_load_si128((__m128i *)(input + 5 * stride)); + in[6] = _mm_load_si128((__m128i *)(input + 6 * stride)); + in[7] = _mm_load_si128((__m128i *)(input + 7 * stride)); + + in[0] = _mm_slli_epi16(in[0], 2); + in[1] = _mm_slli_epi16(in[1], 2); + in[2] = _mm_slli_epi16(in[2], 2); + in[3] = _mm_slli_epi16(in[3], 2); + in[4] = _mm_slli_epi16(in[4], 2); + in[5] = _mm_slli_epi16(in[5], 2); + in[6] = _mm_slli_epi16(in[6], 2); + in[7] = _mm_slli_epi16(in[7], 2); +} + +// write 8x8 array +static INLINE void write_buffer_8x8(int16_t *output, __m128i res[8]) { + res[0] = _mm_srai_epi16(res[0], 1); + res[1] = _mm_srai_epi16(res[1], 1); + res[2] = _mm_srai_epi16(res[2], 1); + res[3] = _mm_srai_epi16(res[3], 1); + res[4] = _mm_srai_epi16(res[4], 1); + res[5] = _mm_srai_epi16(res[5], 1); + res[6] = _mm_srai_epi16(res[6], 1); + res[7] = _mm_srai_epi16(res[7], 1); + + _mm_store_si128((__m128i *)(output + 0 * 8), res[0]); + _mm_store_si128((__m128i *)(output + 1 * 8), res[1]); + _mm_store_si128((__m128i *)(output + 2 * 8), res[2]); + _mm_store_si128((__m128i *)(output + 3 * 8), res[3]); + _mm_store_si128((__m128i *)(output + 4 * 8), res[4]); + _mm_store_si128((__m128i *)(output + 5 * 8), res[5]); + _mm_store_si128((__m128i *)(output + 6 * 8), res[6]); + _mm_store_si128((__m128i *)(output + 7 * 8), res[7]); +} + +// perform in-place transpose +static INLINE void array_transpose_8x8(__m128i res[8]) { + const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]); + const __m128i tr0_1 = _mm_unpacklo_epi16(res[2], res[3]); + const __m128i tr0_2 = _mm_unpackhi_epi16(res[0], res[1]); + const __m128i tr0_3 = _mm_unpackhi_epi16(res[2], res[3]); + const __m128i tr0_4 = _mm_unpacklo_epi16(res[4], res[5]); + const __m128i tr0_5 = _mm_unpacklo_epi16(res[6], res[7]); + const __m128i tr0_6 = _mm_unpackhi_epi16(res[4], res[5]); + const __m128i tr0_7 = _mm_unpackhi_epi16(res[6], res[7]); + // 00 10 01 11 02 12 03 13 + // 20 30 21 31 22 32 23 33 + // 04 14 05 15 06 16 07 17 + // 24 34 25 35 26 36 27 37 + // 40 50 41 51 42 52 43 53 + // 60 70 61 71 62 72 63 73 + // 44 54 45 55 46 56 47 57 + // 64 74 65 75 66 76 67 77 + const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); + const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_4, tr0_5); + const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); + const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_4, tr0_5); + const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_2, tr0_3); + const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); + const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_2, tr0_3); + const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); + // 00 10 20 30 01 11 21 31 + // 40 50 60 70 41 51 61 71 + // 02 12 22 32 03 13 23 33 + // 42 52 62 72 43 53 63 73 + // 04 14 24 34 05 15 25 35 + // 44 54 64 74 45 55 65 75 + // 06 16 26 36 07 17 27 37 + // 46 56 66 76 47 57 67 77 + res[0] = _mm_unpacklo_epi64(tr1_0, tr1_1); + res[1] = _mm_unpackhi_epi64(tr1_0, tr1_1); + res[2] = _mm_unpacklo_epi64(tr1_2, tr1_3); + res[3] = _mm_unpackhi_epi64(tr1_2, tr1_3); + res[4] = _mm_unpacklo_epi64(tr1_4, tr1_5); + res[5] = _mm_unpackhi_epi64(tr1_4, tr1_5); + res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7); + res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7); + // 00 10 20 30 40 50 60 70 + // 01 11 21 31 41 51 61 71 + // 02 12 22 32 42 52 62 72 + // 03 13 23 33 43 53 63 73 + // 04 14 24 34 44 54 64 74 + // 05 15 25 35 45 55 65 75 + // 06 16 26 36 46 56 66 76 + // 07 17 27 37 47 57 67 77 +} + +void fdct8_1d_sse2(__m128i in[8]) { + // constants + const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); + const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); + const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); + const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); + const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); + const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); + const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); + const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); + const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); + __m128i u0, u1, u2, u3, u4, u5, u6, u7; + __m128i v0, v1, v2, v3, v4, v5, v6, v7; + __m128i s0, s1, s2, s3, s4, s5, s6, s7; + + // stage 1 + s0 = _mm_add_epi16(in[0], in[7]); + s1 = _mm_add_epi16(in[1], in[6]); + s2 = _mm_add_epi16(in[2], in[5]); + s3 = _mm_add_epi16(in[3], in[4]); + s4 = _mm_sub_epi16(in[3], in[4]); + s5 = _mm_sub_epi16(in[2], in[5]); + s6 = _mm_sub_epi16(in[1], in[6]); + s7 = _mm_sub_epi16(in[0], in[7]); + + u0 = _mm_add_epi16(s0, s3); + u1 = _mm_add_epi16(s1, s2); + u2 = _mm_sub_epi16(s1, s2); + u3 = _mm_sub_epi16(s0, s3); + // interleave and perform butterfly multiplication/addition + v0 = _mm_unpacklo_epi16(u0, u1); + v1 = _mm_unpackhi_epi16(u0, u1); + v2 = _mm_unpacklo_epi16(u2, u3); + v3 = _mm_unpackhi_epi16(u2, u3); + + u0 = _mm_madd_epi16(v0, k__cospi_p16_p16); + u1 = _mm_madd_epi16(v1, k__cospi_p16_p16); + u2 = _mm_madd_epi16(v0, k__cospi_p16_m16); + u3 = _mm_madd_epi16(v1, k__cospi_p16_m16); + u4 = _mm_madd_epi16(v2, k__cospi_p24_p08); + u5 = _mm_madd_epi16(v3, k__cospi_p24_p08); + u6 = _mm_madd_epi16(v2, k__cospi_m08_p24); + u7 = _mm_madd_epi16(v3, k__cospi_m08_p24); + + // shift and rounding + v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); + v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); + v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); + v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); + v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); + v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); + v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); + v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); + + u0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + u1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + u2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + u4 = _mm_srai_epi32(v4, DCT_CONST_BITS); + u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); + u6 = _mm_srai_epi32(v6, DCT_CONST_BITS); + u7 = _mm_srai_epi32(v7, DCT_CONST_BITS); + + in[0] = _mm_packs_epi32(u0, u1); + in[2] = _mm_packs_epi32(u4, u5); + in[4] = _mm_packs_epi32(u2, u3); + in[6] = _mm_packs_epi32(u6, u7); + + // stage 2 + // interleave and perform butterfly multiplication/addition + u0 = _mm_unpacklo_epi16(s6, s5); + u1 = _mm_unpackhi_epi16(s6, s5); + v0 = _mm_madd_epi16(u0, k__cospi_p16_m16); + v1 = _mm_madd_epi16(u1, k__cospi_p16_m16); + v2 = _mm_madd_epi16(u0, k__cospi_p16_p16); + v3 = _mm_madd_epi16(u1, k__cospi_p16_p16); + + // shift and rounding + u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING); + u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING); + u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING); + u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING); + + v0 = _mm_srai_epi32(u0, DCT_CONST_BITS); + v1 = _mm_srai_epi32(u1, DCT_CONST_BITS); + v2 = _mm_srai_epi32(u2, DCT_CONST_BITS); + v3 = _mm_srai_epi32(u3, DCT_CONST_BITS); + + u0 = _mm_packs_epi32(v0, v1); + u1 = _mm_packs_epi32(v2, v3); + + // stage 3 + s0 = _mm_add_epi16(s4, u0); + s1 = _mm_sub_epi16(s4, u0); + s2 = _mm_sub_epi16(s7, u1); + s3 = _mm_add_epi16(s7, u1); + + // stage 4 + u0 = _mm_unpacklo_epi16(s0, s3); + u1 = _mm_unpackhi_epi16(s0, s3); + u2 = _mm_unpacklo_epi16(s1, s2); + u3 = _mm_unpackhi_epi16(s1, s2); + + v0 = _mm_madd_epi16(u0, k__cospi_p28_p04); + v1 = _mm_madd_epi16(u1, k__cospi_p28_p04); + v2 = _mm_madd_epi16(u2, k__cospi_p12_p20); + v3 = _mm_madd_epi16(u3, k__cospi_p12_p20); + v4 = _mm_madd_epi16(u2, k__cospi_m20_p12); + v5 = _mm_madd_epi16(u3, k__cospi_m20_p12); + v6 = _mm_madd_epi16(u0, k__cospi_m04_p28); + v7 = _mm_madd_epi16(u1, k__cospi_m04_p28); + + // shift and rounding + u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING); + u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING); + u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING); + u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING); + u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING); + u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING); + u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING); + u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING); + + v0 = _mm_srai_epi32(u0, DCT_CONST_BITS); + v1 = _mm_srai_epi32(u1, DCT_CONST_BITS); + v2 = _mm_srai_epi32(u2, DCT_CONST_BITS); + v3 = _mm_srai_epi32(u3, DCT_CONST_BITS); + v4 = _mm_srai_epi32(u4, DCT_CONST_BITS); + v5 = _mm_srai_epi32(u5, DCT_CONST_BITS); + v6 = _mm_srai_epi32(u6, DCT_CONST_BITS); + v7 = _mm_srai_epi32(u7, DCT_CONST_BITS); + + in[1] = _mm_packs_epi32(v0, v1); + in[3] = _mm_packs_epi32(v4, v5); + in[5] = _mm_packs_epi32(v2, v3); + in[7] = _mm_packs_epi32(v6, v7); + + // transpose + array_transpose_8x8(in); +} + +void fadst8_1d_sse2(__m128i in[8]) { + // Constants + const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64); + const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64); + const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64); + const __m128i k__cospi_p22_m10 = pair_set_epi16(cospi_22_64, -cospi_10_64); + const __m128i k__cospi_p18_p14 = pair_set_epi16(cospi_18_64, cospi_14_64); + const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64); + const __m128i k__cospi_p26_p06 = pair_set_epi16(cospi_26_64, cospi_6_64); + const __m128i k__cospi_p06_m26 = pair_set_epi16(cospi_6_64, -cospi_26_64); + const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); + const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); + const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64); + const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); + const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); + const __m128i k__const_0 = _mm_set1_epi16(0); + const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); + + __m128i u0, u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11, u12, u13, u14, u15; + __m128i v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15; + __m128i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15; + __m128i s0, s1, s2, s3, s4, s5, s6, s7; + __m128i in0, in1, in2, in3, in4, in5, in6, in7; + + // properly aligned for butterfly input + in0 = in[7]; + in1 = in[0]; + in2 = in[5]; + in3 = in[2]; + in4 = in[3]; + in5 = in[4]; + in6 = in[1]; + in7 = in[6]; + + // column transformation + // stage 1 + // interleave and multiply/add into 32-bit integer + s0 = _mm_unpacklo_epi16(in0, in1); + s1 = _mm_unpackhi_epi16(in0, in1); + s2 = _mm_unpacklo_epi16(in2, in3); + s3 = _mm_unpackhi_epi16(in2, in3); + s4 = _mm_unpacklo_epi16(in4, in5); + s5 = _mm_unpackhi_epi16(in4, in5); + s6 = _mm_unpacklo_epi16(in6, in7); + s7 = _mm_unpackhi_epi16(in6, in7); + + u0 = _mm_madd_epi16(s0, k__cospi_p02_p30); + u1 = _mm_madd_epi16(s1, k__cospi_p02_p30); + u2 = _mm_madd_epi16(s0, k__cospi_p30_m02); + u3 = _mm_madd_epi16(s1, k__cospi_p30_m02); + u4 = _mm_madd_epi16(s2, k__cospi_p10_p22); + u5 = _mm_madd_epi16(s3, k__cospi_p10_p22); + u6 = _mm_madd_epi16(s2, k__cospi_p22_m10); + u7 = _mm_madd_epi16(s3, k__cospi_p22_m10); + u8 = _mm_madd_epi16(s4, k__cospi_p18_p14); + u9 = _mm_madd_epi16(s5, k__cospi_p18_p14); + u10 = _mm_madd_epi16(s4, k__cospi_p14_m18); + u11 = _mm_madd_epi16(s5, k__cospi_p14_m18); + u12 = _mm_madd_epi16(s6, k__cospi_p26_p06); + u13 = _mm_madd_epi16(s7, k__cospi_p26_p06); + u14 = _mm_madd_epi16(s6, k__cospi_p06_m26); + u15 = _mm_madd_epi16(s7, k__cospi_p06_m26); + + // addition + w0 = _mm_add_epi32(u0, u8); + w1 = _mm_add_epi32(u1, u9); + w2 = _mm_add_epi32(u2, u10); + w3 = _mm_add_epi32(u3, u11); + w4 = _mm_add_epi32(u4, u12); + w5 = _mm_add_epi32(u5, u13); + w6 = _mm_add_epi32(u6, u14); + w7 = _mm_add_epi32(u7, u15); + w8 = _mm_sub_epi32(u0, u8); + w9 = _mm_sub_epi32(u1, u9); + w10 = _mm_sub_epi32(u2, u10); + w11 = _mm_sub_epi32(u3, u11); + w12 = _mm_sub_epi32(u4, u12); + w13 = _mm_sub_epi32(u5, u13); + w14 = _mm_sub_epi32(u6, u14); + w15 = _mm_sub_epi32(u7, u15); + + // shift and rounding + v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING); + v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING); + v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING); + v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING); + v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING); + v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING); + v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING); + v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING); + v8 = _mm_add_epi32(w8, k__DCT_CONST_ROUNDING); + v9 = _mm_add_epi32(w9, k__DCT_CONST_ROUNDING); + v10 = _mm_add_epi32(w10, k__DCT_CONST_ROUNDING); + v11 = _mm_add_epi32(w11, k__DCT_CONST_ROUNDING); + v12 = _mm_add_epi32(w12, k__DCT_CONST_ROUNDING); + v13 = _mm_add_epi32(w13, k__DCT_CONST_ROUNDING); + v14 = _mm_add_epi32(w14, k__DCT_CONST_ROUNDING); + v15 = _mm_add_epi32(w15, k__DCT_CONST_ROUNDING); + + u0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + u1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + u2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + u4 = _mm_srai_epi32(v4, DCT_CONST_BITS); + u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); + u6 = _mm_srai_epi32(v6, DCT_CONST_BITS); + u7 = _mm_srai_epi32(v7, DCT_CONST_BITS); + u8 = _mm_srai_epi32(v8, DCT_CONST_BITS); + u9 = _mm_srai_epi32(v9, DCT_CONST_BITS); + u10 = _mm_srai_epi32(v10, DCT_CONST_BITS); + u11 = _mm_srai_epi32(v11, DCT_CONST_BITS); + u12 = _mm_srai_epi32(v12, DCT_CONST_BITS); + u13 = _mm_srai_epi32(v13, DCT_CONST_BITS); + u14 = _mm_srai_epi32(v14, DCT_CONST_BITS); + u15 = _mm_srai_epi32(v15, DCT_CONST_BITS); + + // back to 16-bit and pack 8 integers into __m128i + in[0] = _mm_packs_epi32(u0, u1); + in[1] = _mm_packs_epi32(u2, u3); + in[2] = _mm_packs_epi32(u4, u5); + in[3] = _mm_packs_epi32(u6, u7); + in[4] = _mm_packs_epi32(u8, u9); + in[5] = _mm_packs_epi32(u10, u11); + in[6] = _mm_packs_epi32(u12, u13); + in[7] = _mm_packs_epi32(u14, u15); + + // stage 2 + s0 = _mm_add_epi16(in[0], in[2]); + s1 = _mm_add_epi16(in[1], in[3]); + s2 = _mm_sub_epi16(in[0], in[2]); + s3 = _mm_sub_epi16(in[1], in[3]); + u0 = _mm_unpacklo_epi16(in[4], in[5]); + u1 = _mm_unpackhi_epi16(in[4], in[5]); + u2 = _mm_unpacklo_epi16(in[6], in[7]); + u3 = _mm_unpackhi_epi16(in[6], in[7]); + + v0 = _mm_madd_epi16(u0, k__cospi_p08_p24); + v1 = _mm_madd_epi16(u1, k__cospi_p08_p24); + v2 = _mm_madd_epi16(u0, k__cospi_p24_m08); + v3 = _mm_madd_epi16(u1, k__cospi_p24_m08); + v4 = _mm_madd_epi16(u2, k__cospi_m24_p08); + v5 = _mm_madd_epi16(u3, k__cospi_m24_p08); + v6 = _mm_madd_epi16(u2, k__cospi_p08_p24); + v7 = _mm_madd_epi16(u3, k__cospi_p08_p24); + + w0 = _mm_add_epi32(v0, v4); + w1 = _mm_add_epi32(v1, v5); + w2 = _mm_add_epi32(v2, v6); + w3 = _mm_add_epi32(v3, v7); + w4 = _mm_sub_epi32(v0, v4); + w5 = _mm_sub_epi32(v1, v5); + w6 = _mm_sub_epi32(v2, v6); + w7 = _mm_sub_epi32(v3, v7); + + v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING); + v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING); + v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING); + v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING); + v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING); + v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING); + v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING); + v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING); + + u0 = _mm_srai_epi32(v0, DCT_CONST_BITS); + u1 = _mm_srai_epi32(v1, DCT_CONST_BITS); + u2 = _mm_srai_epi32(v2, DCT_CONST_BITS); + u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); + u4 = _mm_srai_epi32(v4, DCT_CONST_BITS); + u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); + u6 = _mm_srai_epi32(v6, DCT_CONST_BITS); + u7 = _mm_srai_epi32(v7, DCT_CONST_BITS); + + // back to 16-bit intergers + s4 = _mm_packs_epi32(u0, u1); + s5 = _mm_packs_epi32(u2, u3); + s6 = _mm_packs_epi32(u4, u5); + s7 = _mm_packs_epi32(u6, u7); + + // stage 3 + u0 = _mm_unpacklo_epi16(s2, s3); + u1 = _mm_unpackhi_epi16(s2, s3); + u2 = _mm_unpacklo_epi16(s6, s7); + u3 = _mm_unpackhi_epi16(s6, s7); + + v0 = _mm_madd_epi16(u0, k__cospi_p16_p16); + v1 = _mm_madd_epi16(u1, k__cospi_p16_p16); + v2 = _mm_madd_epi16(u0, k__cospi_p16_m16); + v3 = _mm_madd_epi16(u1, k__cospi_p16_m16); + v4 = _mm_madd_epi16(u2, k__cospi_p16_p16); + v5 = _mm_madd_epi16(u3, k__cospi_p16_p16); + v6 = _mm_madd_epi16(u2, k__cospi_p16_m16); + v7 = _mm_madd_epi16(u3, k__cospi_p16_m16); + + u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING); + u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING); + u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING); + u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING); + u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING); + u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING); + u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING); + u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING); + + v0 = _mm_srai_epi32(u0, DCT_CONST_BITS); + v1 = _mm_srai_epi32(u1, DCT_CONST_BITS); + v2 = _mm_srai_epi32(u2, DCT_CONST_BITS); + v3 = _mm_srai_epi32(u3, DCT_CONST_BITS); + v4 = _mm_srai_epi32(u4, DCT_CONST_BITS); + v5 = _mm_srai_epi32(u5, DCT_CONST_BITS); + v6 = _mm_srai_epi32(u6, DCT_CONST_BITS); + v7 = _mm_srai_epi32(u7, DCT_CONST_BITS); + + s2 = _mm_packs_epi32(v0, v1); + s3 = _mm_packs_epi32(v2, v3); + s6 = _mm_packs_epi32(v4, v5); + s7 = _mm_packs_epi32(v6, v7); + + // FIXME(jingning): do subtract using bit inversion? + in[0] = s0; + in[1] = _mm_sub_epi16(k__const_0, s4); + in[2] = s6; + in[3] = _mm_sub_epi16(k__const_0, s2); + in[4] = s3; + in[5] = _mm_sub_epi16(k__const_0, s7); + in[6] = s5; + in[7] = _mm_sub_epi16(k__const_0, s1); + + // transpose + array_transpose_8x8(in); +} + +void vp9_short_fht8x8_sse2(int16_t *input, int16_t *output, + int stride, int tx_type) { + __m128i in[8]; + load_buffer_8x8(input, in, stride); + switch (tx_type) { + case 0: // DCT_DCT + fdct8_1d_sse2(in); + fadst8_1d_sse2(in); + break; + case 1: // ADST_DCT + fadst8_1d_sse2(in); + fdct8_1d_sse2(in); + break; + case 2: // DCT_ADST + fdct8_1d_sse2(in); + fadst8_1d_sse2(in); + break; + case 3: // ADST_ADST + fadst8_1d_sse2(in); + fadst8_1d_sse2(in); + break; + default: + assert(0); + break; + } + write_buffer_8x8(output, in); +} + void vp9_short_fdct16x16_sse2(int16_t *input, int16_t *output, int pitch) { // The 2D transform is done with two passes which are actually pretty // similar. In the first one, we transform the columns and transpose @@ -383,7 +876,7 @@ void vp9_short_fdct16x16_sse2(int16_t *input, int16_t *output, int pitch) { const int stride = pitch >> 1; int pass; // We need an intermediate buffer between passes. - int16_t intermediate[256]; + DECLARE_ALIGNED_ARRAY(16, int16_t, intermediate, 256); int16_t *in = input; int16_t *out = intermediate; // Constants @@ -426,22 +919,22 @@ void vp9_short_fdct16x16_sse2(int16_t *input, int16_t *output, int pitch) { __m128i res08, res09, res10, res11, res12, res13, res14, res15; // Load and pre-condition input. if (0 == pass) { - in00 = _mm_loadu_si128((const __m128i *)(in + 0 * stride)); - in01 = _mm_loadu_si128((const __m128i *)(in + 1 * stride)); - in02 = _mm_loadu_si128((const __m128i *)(in + 2 * stride)); - in03 = _mm_loadu_si128((const __m128i *)(in + 3 * stride)); - in04 = _mm_loadu_si128((const __m128i *)(in + 4 * stride)); - in05 = _mm_loadu_si128((const __m128i *)(in + 5 * stride)); - in06 = _mm_loadu_si128((const __m128i *)(in + 6 * stride)); - in07 = _mm_loadu_si128((const __m128i *)(in + 7 * stride)); - in08 = _mm_loadu_si128((const __m128i *)(in + 8 * stride)); - in09 = _mm_loadu_si128((const __m128i *)(in + 9 * stride)); - in10 = _mm_loadu_si128((const __m128i *)(in + 10 * stride)); - in11 = _mm_loadu_si128((const __m128i *)(in + 11 * stride)); - in12 = _mm_loadu_si128((const __m128i *)(in + 12 * stride)); - in13 = _mm_loadu_si128((const __m128i *)(in + 13 * stride)); - in14 = _mm_loadu_si128((const __m128i *)(in + 14 * stride)); - in15 = _mm_loadu_si128((const __m128i *)(in + 15 * stride)); + in00 = _mm_load_si128((const __m128i *)(in + 0 * stride)); + in01 = _mm_load_si128((const __m128i *)(in + 1 * stride)); + in02 = _mm_load_si128((const __m128i *)(in + 2 * stride)); + in03 = _mm_load_si128((const __m128i *)(in + 3 * stride)); + in04 = _mm_load_si128((const __m128i *)(in + 4 * stride)); + in05 = _mm_load_si128((const __m128i *)(in + 5 * stride)); + in06 = _mm_load_si128((const __m128i *)(in + 6 * stride)); + in07 = _mm_load_si128((const __m128i *)(in + 7 * stride)); + in08 = _mm_load_si128((const __m128i *)(in + 8 * stride)); + in09 = _mm_load_si128((const __m128i *)(in + 9 * stride)); + in10 = _mm_load_si128((const __m128i *)(in + 10 * stride)); + in11 = _mm_load_si128((const __m128i *)(in + 11 * stride)); + in12 = _mm_load_si128((const __m128i *)(in + 12 * stride)); + in13 = _mm_load_si128((const __m128i *)(in + 13 * stride)); + in14 = _mm_load_si128((const __m128i *)(in + 14 * stride)); + in15 = _mm_load_si128((const __m128i *)(in + 15 * stride)); // x = x << 2 in00 = _mm_slli_epi16(in00, 2); in01 = _mm_slli_epi16(in01, 2); @@ -460,22 +953,22 @@ void vp9_short_fdct16x16_sse2(int16_t *input, int16_t *output, int pitch) { in14 = _mm_slli_epi16(in14, 2); in15 = _mm_slli_epi16(in15, 2); } else { - in00 = _mm_loadu_si128((const __m128i *)(in + 0 * 16)); - in01 = _mm_loadu_si128((const __m128i *)(in + 1 * 16)); - in02 = _mm_loadu_si128((const __m128i *)(in + 2 * 16)); - in03 = _mm_loadu_si128((const __m128i *)(in + 3 * 16)); - in04 = _mm_loadu_si128((const __m128i *)(in + 4 * 16)); - in05 = _mm_loadu_si128((const __m128i *)(in + 5 * 16)); - in06 = _mm_loadu_si128((const __m128i *)(in + 6 * 16)); - in07 = _mm_loadu_si128((const __m128i *)(in + 7 * 16)); - in08 = _mm_loadu_si128((const __m128i *)(in + 8 * 16)); - in09 = _mm_loadu_si128((const __m128i *)(in + 9 * 16)); - in10 = _mm_loadu_si128((const __m128i *)(in + 10 * 16)); - in11 = _mm_loadu_si128((const __m128i *)(in + 11 * 16)); - in12 = _mm_loadu_si128((const __m128i *)(in + 12 * 16)); - in13 = _mm_loadu_si128((const __m128i *)(in + 13 * 16)); - in14 = _mm_loadu_si128((const __m128i *)(in + 14 * 16)); - in15 = _mm_loadu_si128((const __m128i *)(in + 15 * 16)); + in00 = _mm_load_si128((const __m128i *)(in + 0 * 16)); + in01 = _mm_load_si128((const __m128i *)(in + 1 * 16)); + in02 = _mm_load_si128((const __m128i *)(in + 2 * 16)); + in03 = _mm_load_si128((const __m128i *)(in + 3 * 16)); + in04 = _mm_load_si128((const __m128i *)(in + 4 * 16)); + in05 = _mm_load_si128((const __m128i *)(in + 5 * 16)); + in06 = _mm_load_si128((const __m128i *)(in + 6 * 16)); + in07 = _mm_load_si128((const __m128i *)(in + 7 * 16)); + in08 = _mm_load_si128((const __m128i *)(in + 8 * 16)); + in09 = _mm_load_si128((const __m128i *)(in + 9 * 16)); + in10 = _mm_load_si128((const __m128i *)(in + 10 * 16)); + in11 = _mm_load_si128((const __m128i *)(in + 11 * 16)); + in12 = _mm_load_si128((const __m128i *)(in + 12 * 16)); + in13 = _mm_load_si128((const __m128i *)(in + 13 * 16)); + in14 = _mm_load_si128((const __m128i *)(in + 14 * 16)); + in15 = _mm_load_si128((const __m128i *)(in + 15 * 16)); // x = (x + 1) >> 2 in00 = _mm_add_epi16(in00, kOne); in01 = _mm_add_epi16(in01, kOne); @@ -982,14 +1475,14 @@ void vp9_short_fdct16x16_sse2(int16_t *input, int16_t *output, int pitch) { // 06 16 26 36 46 56 66 76 // 07 17 27 37 47 57 67 77 // Store results - _mm_storeu_si128((__m128i *)(out + 8 + 0 * 16), tr2_0); - _mm_storeu_si128((__m128i *)(out + 8 + 1 * 16), tr2_1); - _mm_storeu_si128((__m128i *)(out + 8 + 2 * 16), tr2_2); - _mm_storeu_si128((__m128i *)(out + 8 + 3 * 16), tr2_3); - _mm_storeu_si128((__m128i *)(out + 8 + 4 * 16), tr2_4); - _mm_storeu_si128((__m128i *)(out + 8 + 5 * 16), tr2_5); - _mm_storeu_si128((__m128i *)(out + 8 + 6 * 16), tr2_6); - _mm_storeu_si128((__m128i *)(out + 8 + 7 * 16), tr2_7); + _mm_store_si128((__m128i *)(out + 8 + 0 * 16), tr2_0); + _mm_store_si128((__m128i *)(out + 8 + 1 * 16), tr2_1); + _mm_store_si128((__m128i *)(out + 8 + 2 * 16), tr2_2); + _mm_store_si128((__m128i *)(out + 8 + 3 * 16), tr2_3); + _mm_store_si128((__m128i *)(out + 8 + 4 * 16), tr2_4); + _mm_store_si128((__m128i *)(out + 8 + 5 * 16), tr2_5); + _mm_store_si128((__m128i *)(out + 8 + 6 * 16), tr2_6); + _mm_store_si128((__m128i *)(out + 8 + 7 * 16), tr2_7); } out += 8*16; } |