summaryrefslogtreecommitdiff
path: root/vp8/encoder
diff options
context:
space:
mode:
authorJohn Koleszar <jkoleszar@google.com>2011-04-26 08:28:51 -0400
committerJohn Koleszar <jkoleszar@google.com>2011-04-26 08:28:51 -0400
commit0a77e59847967f76f022e3d8eacef302f4a16038 (patch)
tree226aad9ddfa0d2d7bba975a6b009ae83b06b9f60 /vp8/encoder
parent5227798c570af08d08dd6fdd7a3e96d5dc96977b (diff)
parentd5c46bdfc0d7bcc25e3ff549a0dc03d759050c19 (diff)
downloadlibvpx-0a77e59847967f76f022e3d8eacef302f4a16038.tar
libvpx-0a77e59847967f76f022e3d8eacef302f4a16038.tar.gz
libvpx-0a77e59847967f76f022e3d8eacef302f4a16038.tar.bz2
libvpx-0a77e59847967f76f022e3d8eacef302f4a16038.zip
Merge remote branch 'origin/master' into experimental
Conflicts: vp8/common/alloccommon.c vp8/encoder/rdopt.c Change-Id: I142167d31d1b9cffe143774f6915bca463df67f0
Diffstat (limited to 'vp8/encoder')
-rw-r--r--vp8/encoder/encodeframe.c5
-rw-r--r--vp8/encoder/onyx_if.c67
-rw-r--r--vp8/encoder/onyx_int.h1
-rw-r--r--vp8/encoder/pickinter.c1
-rw-r--r--vp8/encoder/ratectrl.c90
-rw-r--r--vp8/encoder/rdopt.c4
-rw-r--r--vp8/encoder/tokenize.c60
-rw-r--r--vp8/encoder/x86/quantize_sse4.asm254
-rw-r--r--vp8/encoder/x86/quantize_x86.h13
-rw-r--r--vp8/encoder/x86/x86_csystemdependent.c2
10 files changed, 323 insertions, 174 deletions
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index e09f95b19..ee3c7bcd4 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -1400,11 +1400,6 @@ int vp8cx_encode_inter_macroblock
{
if (cpi->common.mb_no_coeff_skip)
{
- if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV)
- xd->mode_info_context->mbmi.dc_diff = 0;
- else
- xd->mode_info_context->mbmi.dc_diff = 1;
-
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
cpi->skip_true_count ++;
vp8_fix_contexts(xd);
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 9798805b6..9ae6f56e2 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -1528,10 +1528,6 @@ static void init_config(VP8_PTR ptr, VP8_CONFIG *oxcf)
cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
// Initialise the starting buffer levels
- cpi->oxcf.starting_buffer_level =
- rescale(cpi->oxcf.starting_buffer_level,
- cpi->oxcf.target_bandwidth, 1000);
-
cpi->buffer_level = cpi->oxcf.starting_buffer_level;
cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
@@ -1704,6 +1700,10 @@ void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf)
// Convert target bandwidth from Kbit/s to Bit/s
cpi->oxcf.target_bandwidth *= 1000;
+ cpi->oxcf.starting_buffer_level =
+ rescale(cpi->oxcf.starting_buffer_level,
+ cpi->oxcf.target_bandwidth, 1000);
+
// Set or reset optimal and maximum buffer levels.
if (cpi->oxcf.optimal_buffer_level == 0)
cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
@@ -1753,8 +1753,6 @@ void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf)
// Only allow dropped frames in buffered mode
cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
- cm->filter_type = (LOOPFILTERTYPE) cpi->filter_type;
-
if (!cm->use_bilinear_mc_filter)
cm->mcomp_filter_type = SIXTAP;
else
@@ -2730,16 +2728,17 @@ static int pick_frame_size(VP8_COMP *cpi)
if (cpi->pass == 2)
vp8_calc_auto_iframe_target_size(cpi);
- // 1 Pass there is no information on which to base size so use bandwidth per second * fixed fraction
else
#endif
- cpi->this_frame_target = cpi->oxcf.target_bandwidth / 2;
-
- // in error resilient mode the first frame is bigger since it likely contains
- // all the static background
- if (cpi->oxcf.error_resilient_mode == 1 || (cpi->compressor_speed == 2))
{
- cpi->this_frame_target *= 3; // 5;
+ /* 1 Pass there is no information on which to base size so use
+ * bandwidth per second * fraction of the initial buffer
+ * level
+ */
+ cpi->this_frame_target = cpi->oxcf.starting_buffer_level / 2;
+
+ if(cpi->this_frame_target > cpi->oxcf.target_bandwidth * 3 / 2)
+ cpi->this_frame_target = cpi->oxcf.target_bandwidth * 3 / 2;
}
// Key frame from VFW/auto-keyframe/first frame
@@ -2773,6 +2772,19 @@ static int pick_frame_size(VP8_COMP *cpi)
}
}
+ /* Apply limits on keyframe target.
+ *
+ * TODO: move this after consolidating
+ * vp8_calc_iframe_target_size() and vp8_calc_auto_iframe_target_size()
+ */
+ if (cm->frame_type == KEY_FRAME && cpi->oxcf.rc_max_intra_bitrate_pct)
+ {
+ unsigned int max_rate = cpi->av_per_frame_bandwidth
+ * cpi->oxcf.rc_max_intra_bitrate_pct / 100;
+
+ if (cpi->this_frame_target > max_rate)
+ cpi->this_frame_target = max_rate;
+ }
return 1;
}
@@ -5274,35 +5286,6 @@ int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const
}
-static int calc_low_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd)
-{
- int i, j;
- int Total = 0;
-
- unsigned char *src = source->y_buffer;
- unsigned char *dst = dest->y_buffer;
- (void)rtcd;
-
- // Loop through the Y plane raw and reconstruction data summing (square differences)
- for (i = 0; i < source->y_height; i += 16)
- {
- for (j = 0; j < source->y_width; j += 16)
- {
- unsigned int sse;
- VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
-
- if (sse < 8096)
- Total += sse;
- }
-
- src += 16 * source->y_stride;
- dst += 16 * dest->y_stride;
- }
-
- return Total;
-}
-
-
int vp8_get_quantizer(VP8_PTR c)
{
VP8_COMP *cpi = (VP8_COMP *) c;
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index fdd3b99bb..e7ad8f327 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -506,7 +506,6 @@ typedef struct
int auto_adjust_key_quantizer;
int keyquantizer;
int auto_worst_q;
- int filter_type;
int cpu_used;
int chroma_boost;
int horiz_scale;
diff --git a/vp8/encoder/pickinter.c b/vp8/encoder/pickinter.c
index c56593e0b..765ff26f6 100644
--- a/vp8/encoder/pickinter.c
+++ b/vp8/encoder/pickinter.c
@@ -938,7 +938,6 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int re
best_mbmode.uv_mode = 0;
best_mbmode.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
best_mbmode.partitioning = 0;
- best_mbmode.dc_diff = 0;
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index 8d639ee28..fa9f04b15 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -707,8 +707,6 @@ void vp8_calc_pframe_target_size(VP8_COMP *cpi)
int min_frame_target;
int Adjustment;
- // Set the min frame bandwidth.
- //min_frame_target = estimate_min_frame_size( cpi );
min_frame_target = 0;
if (cpi->pass == 2)
@@ -862,11 +860,6 @@ void vp8_calc_pframe_target_size(VP8_COMP *cpi)
}
}
- // Set a reduced data rate target for our initial Q calculation.
- // This should help to save bits during earier sections.
- if ((cpi->oxcf.under_shoot_pct > 0) && (cpi->oxcf.under_shoot_pct <= 100))
- cpi->this_frame_target = (cpi->this_frame_target * cpi->oxcf.under_shoot_pct) / 100;
-
// Sanity check that the total sum of adjustments is not above the maximum allowed
// That is that having allowed for KF and GF penalties we have not pushed the
// current interframe target to low. If the adjustment we apply here is not capable of recovering
@@ -903,11 +896,6 @@ void vp8_calc_pframe_target_size(VP8_COMP *cpi)
percent_low =
(cpi->oxcf.optimal_buffer_level - cpi->buffer_level) /
one_percent_bits;
-
- if (percent_low > 100)
- percent_low = 100;
- else if (percent_low < 0)
- percent_low = 0;
}
// Are we overshooting the long term clip data rate...
else if (cpi->bits_off_target < 0)
@@ -915,16 +903,16 @@ void vp8_calc_pframe_target_size(VP8_COMP *cpi)
// Adjust per frame data target downwards to compensate.
percent_low = (int)(100 * -cpi->bits_off_target /
(cpi->total_byte_count * 8));
-
- if (percent_low > 100)
- percent_low = 100;
- else if (percent_low < 0)
- percent_low = 0;
}
+ if (percent_low > cpi->oxcf.under_shoot_pct)
+ percent_low = cpi->oxcf.under_shoot_pct;
+ else if (percent_low < 0)
+ percent_low = 0;
+
// lower the target bandwidth for this frame.
- cpi->this_frame_target =
- (cpi->this_frame_target * (100 - (percent_low / 2))) / 100;
+ cpi->this_frame_target -= (cpi->this_frame_target * percent_low)
+ / 200;
// Are we using allowing control of active_worst_allowed_q
// according to buffer level.
@@ -995,20 +983,29 @@ void vp8_calc_pframe_target_size(VP8_COMP *cpi)
}
else
{
- int percent_high;
+ int percent_high = 0;
- if (cpi->bits_off_target > cpi->oxcf.optimal_buffer_level)
+ if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
+ && (cpi->buffer_level > cpi->oxcf.optimal_buffer_level))
{
- percent_high = (int)(100 * (cpi->bits_off_target - cpi->oxcf.optimal_buffer_level) / (cpi->total_byte_count * 8));
+ percent_high = (cpi->buffer_level
+ - cpi->oxcf.optimal_buffer_level)
+ / one_percent_bits;
+ }
+ else if (cpi->bits_off_target > cpi->oxcf.optimal_buffer_level)
+ {
+ percent_high = (int)((100 * cpi->bits_off_target)
+ / (cpi->total_byte_count * 8));
+ }
- if (percent_high > 100)
- percent_high = 100;
- else if (percent_high < 0)
- percent_high = 0;
+ if (percent_high > cpi->oxcf.over_shoot_pct)
+ percent_high = cpi->oxcf.over_shoot_pct;
+ else if (percent_high < 0)
+ percent_high = 0;
- cpi->this_frame_target = (cpi->this_frame_target * (100 + (percent_high / 2))) / 100;
+ cpi->this_frame_target += (cpi->this_frame_target *
+ percent_high) / 200;
- }
// Are we allowing control of active_worst_allowed_q according to bufferl level.
if (cpi->auto_worst_q)
@@ -1464,39 +1461,6 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
return Q;
}
-static int estimate_min_frame_size(VP8_COMP *cpi)
-{
- double correction_factor;
- int bits_per_mb_at_max_q;
-
- // This funtion returns a default value for the first few frames untill the correction factor has had time to adapt.
- if (cpi->common.current_video_frame < 10)
- {
- if (cpi->pass == 2)
- return (cpi->min_frame_bandwidth);
- else
- return cpi->per_frame_bandwidth / 3;
- }
-
- /* // Select the appropriate correction factor based upon type of frame.
- if ( cpi->common.frame_type == KEY_FRAME )
- correction_factor = cpi->key_frame_rate_correction_factor;
- else
- {
- if ( cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame )
- correction_factor = cpi->gf_rate_correction_factor;
- else
- correction_factor = cpi->rate_correction_factor;
- }*/
-
- // We estimate at half the value we get from vp8_bits_per_mb
- correction_factor = cpi->rate_correction_factor / 2.0;
-
- bits_per_mb_at_max_q = (int)(.5 + correction_factor * vp8_bits_per_mb[cpi->common.frame_type][MAXQ]);
-
- return (bits_per_mb_at_max_q * cpi->common.MBs) >> BPER_MB_NORMBITS;
-}
-
static int estimate_keyframe_frequency(VP8_COMP *cpi)
{
@@ -1513,8 +1477,10 @@ static int estimate_keyframe_frequency(VP8_COMP *cpi)
/* Assume a default of 1 kf every 2 seconds, or the max kf interval,
* whichever is smaller.
*/
+ int key_freq = cpi->oxcf.key_freq>0 ? cpi->oxcf.key_freq : 1;
av_key_frame_frequency = (int)cpi->output_frame_rate * 2;
- if (av_key_frame_frequency > cpi->oxcf.key_freq)
+
+ if (cpi->oxcf.auto_key && av_key_frame_frequency > key_freq)
av_key_frame_frequency = cpi->oxcf.key_freq;
cpi->prior_key_frame_distance[KEY_FRAME_CONTEXT - 1]
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index 54cfab8c5..dfc9bec95 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -298,9 +298,6 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex)
#endif
vp8_set_speed_features(cpi);
- if (cpi->common.simpler_lpf)
- cpi->common.filter_type = SIMPLE_LOOPFILTER;
-
q = (int)pow(vp8_dc_quant(QIndex,0), 1.25);
if (q < 8)
@@ -2526,7 +2523,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
best_mbmode.uv_mode = 0;
best_mbmode.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
best_mbmode.partitioning = 0;
- best_mbmode.dc_diff = 0;
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c
index ccf0c7f34..1c5923813 100644
--- a/vp8/encoder/tokenize.c
+++ b/vp8/encoder/tokenize.c
@@ -224,18 +224,9 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
int plane_type;
int b;
- TOKENEXTRA *start = *t;
- TOKENEXTRA *tp = *t;
-
- x->mode_info_context->mbmi.dc_diff = 1;
-
-
-#if 1
-
x->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(x);
if (x->mode_info_context->mbmi.mb_skip_coeff)
{
-
cpi->skip_true_count++;
if (!cpi->common.mb_no_coeff_skip)
@@ -245,17 +236,11 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
vp8_fix_contexts(x);
}
- if (x->mode_info_context->mbmi.mode != B_PRED && x->mode_info_context->mbmi.mode != SPLITMV)
- x->mode_info_context->mbmi.dc_diff = 0;
- else
- x->mode_info_context->mbmi.dc_diff = 1;
-
-
return;
}
cpi->skip_false_count++;
-#endif
+
#if 0
vpx_memcpy(cpi->coef_counts_backup, cpi->coef_counts, sizeof(cpi->coef_counts));
#endif
@@ -282,42 +267,6 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
A + vp8_block2above[b],
L + vp8_block2left[b], cpi);
-#if 0
-
- if (cpi->common.mb_no_coeff_skip)
- {
- int skip = 1;
-
- while ((tp != *t) && skip)
- {
- skip = (skip && (tp->Token == DCT_EOB_TOKEN));
- tp ++;
- }
-
- if (skip != x->mbmi.mb_skip_coeff)
- skip += 0;
-
- x->mbmi.mb_skip_coeff = skip;
-
- if (x->mbmi.mb_skip_coeff == 1)
- {
- x->mbmi.dc_diff = 0;
- //redo the coutnts
- vpx_memcpy(cpi->coef_counts, cpi->coef_counts_backup, sizeof(cpi->coef_counts));
-
- *t = start;
- cpi->skip_true_count++;
- //skip_true_count++;
- }
- else
- {
-
- cpi->skip_false_count++;
- //skip_false_count++;
- }
- }
-
-#endif
}
@@ -500,13 +449,6 @@ void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
A + vp8_block2above[24], L + vp8_block2left[24], cpi);
plane_type = 0;
-
- if (x->mode_info_context->mbmi.mode != B_PRED && x->mode_info_context->mbmi.mode != SPLITMV)
- x->mode_info_context->mbmi.dc_diff = 0;
- else
- x->mode_info_context->mbmi.dc_diff = 1;
-
-
for (b = 0; b < 16; b++)
stuff1st_order_b(x->block + b, t, plane_type, x->frame_type,
A + vp8_block2above[b],
diff --git a/vp8/encoder/x86/quantize_sse4.asm b/vp8/encoder/x86/quantize_sse4.asm
new file mode 100644
index 000000000..258899eed
--- /dev/null
+++ b/vp8/encoder/x86/quantize_sse4.asm
@@ -0,0 +1,254 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license and patent
+; grant that can be found in the LICENSE file in the root of the source
+; tree. All contributing project authors may be found in the AUTHORS
+; file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+%include "asm_enc_offsets.asm"
+
+
+; void vp8_regular_quantize_b_sse4 | arg
+; (BLOCK *b, | 0
+; BLOCKD *d) | 1
+
+global sym(vp8_regular_quantize_b_sse4)
+sym(vp8_regular_quantize_b_sse4):
+
+%if ABI_IS_32BIT
+ push rbp
+ mov rbp, rsp
+ GET_GOT rbx
+ push rdi
+ push rsi
+
+ ALIGN_STACK 16, rax
+ %define qcoeff 0 ; 32
+ %define stack_size 32
+ sub rsp, stack_size
+%else
+ %ifidn __OUTPUT_FORMAT__,x64
+ SAVE_XMM 8, u
+ push rdi
+ push rsi
+ %endif
+%endif
+ ; end prolog
+
+%if ABI_IS_32BIT
+ mov rdi, arg(0) ; BLOCK *b
+ mov rsi, arg(1) ; BLOCKD *d
+%else
+ %ifidn __OUTPUT_FORMAT__,x64
+ mov rdi, rcx ; BLOCK *b
+ mov rsi, rdx ; BLOCKD *d
+ %else
+ ;mov rdi, rdi ; BLOCK *b
+ ;mov rsi, rsi ; BLOCKD *d
+ %endif
+%endif
+
+ mov rax, [rdi + vp8_block_coeff]
+ mov rcx, [rdi + vp8_block_zbin]
+ mov rdx, [rdi + vp8_block_round]
+ movd xmm7, [rdi + vp8_block_zbin_extra]
+
+ ; z
+ movdqa xmm0, [rax]
+ movdqa xmm1, [rax + 16]
+
+ ; duplicate zbin_oq_value
+ pshuflw xmm7, xmm7, 0
+ punpcklwd xmm7, xmm7
+
+ movdqa xmm2, xmm0
+ movdqa xmm3, xmm1
+
+ ; sz
+ psraw xmm0, 15
+ psraw xmm1, 15
+
+ ; (z ^ sz)
+ pxor xmm2, xmm0
+ pxor xmm3, xmm1
+
+ ; x = abs(z)
+ psubw xmm2, xmm0
+ psubw xmm3, xmm1
+
+ ; zbin
+ movdqa xmm4, [rcx]
+ movdqa xmm5, [rcx + 16]
+
+ ; *zbin_ptr + zbin_oq_value
+ paddw xmm4, xmm7
+ paddw xmm5, xmm7
+
+ movdqa xmm6, xmm2
+ movdqa xmm7, xmm3
+
+ ; x - (*zbin_ptr + zbin_oq_value)
+ psubw xmm6, xmm4
+ psubw xmm7, xmm5
+
+ ; round
+ movdqa xmm4, [rdx]
+ movdqa xmm5, [rdx + 16]
+
+ mov rax, [rdi + vp8_block_quant_shift]
+ mov rcx, [rdi + vp8_block_quant]
+ mov rdx, [rdi + vp8_block_zrun_zbin_boost]
+
+ ; x + round
+ paddw xmm2, xmm4
+ paddw xmm3, xmm5
+
+ ; quant
+ movdqa xmm4, [rcx]
+ movdqa xmm5, [rcx + 16]
+
+ ; y = x * quant_ptr >> 16
+ pmulhw xmm4, xmm2
+ pmulhw xmm5, xmm3
+
+ ; y += x
+ paddw xmm2, xmm4
+ paddw xmm3, xmm5
+
+ pxor xmm4, xmm4
+%if ABI_IS_32BIT
+ movdqa [rsp + qcoeff], xmm4
+ movdqa [rsp + qcoeff + 16], xmm4
+%else
+ pxor xmm8, xmm8
+%endif
+
+ ; quant_shift
+ movdqa xmm5, [rax]
+
+ ; zrun_zbin_boost
+ mov rax, rdx
+
+%macro ZIGZAG_LOOP 5
+ ; x
+ pextrw ecx, %4, %2
+
+ ; if (x >= zbin)
+ sub cx, WORD PTR[rdx] ; x - zbin
+ lea rdx, [rdx + 2] ; zbin_boost_ptr++
+ jl rq_zigzag_loop_%1 ; x < zbin
+
+ pextrw edi, %3, %2 ; y
+
+ ; downshift by quant_shift[rc]
+ pextrb ecx, xmm5, %1 ; quant_shift[rc]
+ sar edi, cl ; also sets Z bit
+ je rq_zigzag_loop_%1 ; !y
+%if ABI_IS_32BIT
+ mov WORD PTR[rsp + qcoeff + %1 *2], di
+%else
+ pinsrw %5, edi, %2 ; qcoeff[rc]
+%endif
+ mov rdx, rax ; reset to b->zrun_zbin_boost
+rq_zigzag_loop_%1:
+%endmacro
+; in vp8_default_zig_zag1d order: see vp8/common/entropy.c
+ZIGZAG_LOOP 0, 0, xmm2, xmm6, xmm4
+ZIGZAG_LOOP 1, 1, xmm2, xmm6, xmm4
+ZIGZAG_LOOP 4, 4, xmm2, xmm6, xmm4
+ZIGZAG_LOOP 8, 0, xmm3, xmm7, xmm8
+ZIGZAG_LOOP 5, 5, xmm2, xmm6, xmm4
+ZIGZAG_LOOP 2, 2, xmm2, xmm6, xmm4
+ZIGZAG_LOOP 3, 3, xmm2, xmm6, xmm4
+ZIGZAG_LOOP 6, 6, xmm2, xmm6, xmm4
+ZIGZAG_LOOP 9, 1, xmm3, xmm7, xmm8
+ZIGZAG_LOOP 12, 4, xmm3, xmm7, xmm8
+ZIGZAG_LOOP 13, 5, xmm3, xmm7, xmm8
+ZIGZAG_LOOP 10, 2, xmm3, xmm7, xmm8
+ZIGZAG_LOOP 7, 7, xmm2, xmm6, xmm4
+ZIGZAG_LOOP 11, 3, xmm3, xmm7, xmm8
+ZIGZAG_LOOP 14, 6, xmm3, xmm7, xmm8
+ZIGZAG_LOOP 15, 7, xmm3, xmm7, xmm8
+
+ mov rcx, [rsi + vp8_blockd_dequant]
+ mov rdi, [rsi + vp8_blockd_dqcoeff]
+
+%if ABI_IS_32BIT
+ movdqa xmm4, [rsp + qcoeff]
+ movdqa xmm5, [rsp + qcoeff + 16]
+%else
+ %define xmm5 xmm8
+%endif
+
+ ; y ^ sz
+ pxor xmm4, xmm0
+ pxor xmm5, xmm1
+ ; x = (y ^ sz) - sz
+ psubw xmm4, xmm0
+ psubw xmm5, xmm1
+
+ ; dequant
+ movdqa xmm0, [rcx]
+ movdqa xmm1, [rcx + 16]
+
+ mov rcx, [rsi + vp8_blockd_qcoeff]
+
+ pmullw xmm0, xmm4
+ pmullw xmm1, xmm5
+
+ ; store qcoeff
+ movdqa [rcx], xmm4
+ movdqa [rcx + 16], xmm5
+
+ ; store dqcoeff
+ movdqa [rdi], xmm0
+ movdqa [rdi + 16], xmm1
+
+ ; select the last value (in zig_zag order) for EOB
+ pxor xmm6, xmm6
+ pcmpeqw xmm4, xmm6
+ pcmpeqw xmm5, xmm6
+
+ packsswb xmm4, xmm5
+ pshufb xmm4, [GLOBAL(zig_zag1d)]
+ pmovmskb edx, xmm4
+ xor rdi, rdi
+ mov eax, -1
+ xor dx, ax
+ bsr eax, edx
+ sub edi, edx
+ sar edi, 31
+ add eax, 1
+ and eax, edi
+
+ mov [rsi + vp8_blockd_eob], eax
+
+ ; begin epilog
+%if ABI_IS_32BIT
+ add rsp, stack_size
+ pop rsp
+
+ pop rsi
+ pop rdi
+ RESTORE_GOT
+ pop rbp
+%else
+ %undef xmm5
+ %ifidn __OUTPUT_FORMAT__,x64
+ pop rsi
+ pop rdi
+ RESTORE_XMM
+ %endif
+%endif
+
+ ret
+
+SECTION_RODATA
+align 16
+; vp8/common/entropy.c: vp8_default_zig_zag1d
+zig_zag1d:
+ db 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15
diff --git a/vp8/encoder/x86/quantize_x86.h b/vp8/encoder/x86/quantize_x86.h
index f09358061..bbe475f8c 100644
--- a/vp8/encoder/x86/quantize_x86.h
+++ b/vp8/encoder/x86/quantize_x86.h
@@ -51,4 +51,17 @@ extern prototype_quantize_block(vp8_fast_quantize_b_ssse3);
#endif /* HAVE_SSSE3 */
+
+#if HAVE_SSE4_1
+extern prototype_quantize_block(vp8_regular_quantize_b_sse4);
+
+#if !CONFIG_RUNTIME_CPU_DETECT
+
+#undef vp8_quantize_quantb
+#define vp8_quantize_quantb vp8_regular_quantize_b_sse4
+
+#endif /* !CONFIG_RUNTIME_CPU_DETECT */
+
+#endif /* HAVE_SSE4_1 */
+
#endif /* QUANTIZE_X86_H */
diff --git a/vp8/encoder/x86/x86_csystemdependent.c b/vp8/encoder/x86/x86_csystemdependent.c
index 47a1788bc..b01319fa4 100644
--- a/vp8/encoder/x86/x86_csystemdependent.c
+++ b/vp8/encoder/x86/x86_csystemdependent.c
@@ -313,6 +313,8 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
cpi->rtcd.variance.sad8x8x8 = vp8_sad8x8x8_sse4;
cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_sse4;
cpi->rtcd.search.full_search = vp8_full_search_sadx8;
+
+ cpi->rtcd.quantize.quantb = vp8_regular_quantize_b_sse4;
}
#endif