diff options
Diffstat (limited to 'vp9')
-rw-r--r-- | vp9/common/vp9_rtcd_defs.sh | 15 | ||||
-rw-r--r-- | vp9/encoder/vp9_dct.c | 436 | ||||
-rw-r--r-- | vp9/encoder/vp9_encodeintra.c | 12 | ||||
-rw-r--r-- | vp9/encoder/vp9_encodemb.c | 16 | ||||
-rw-r--r-- | vp9/encoder/vp9_onyx_if.c | 66 | ||||
-rw-r--r-- | vp9/encoder/vp9_onyx_int.h | 2 | ||||
-rw-r--r-- | vp9/encoder/vp9_quantize.c | 12 | ||||
-rw-r--r-- | vp9/encoder/vp9_ratectrl.c | 60 | ||||
-rw-r--r-- | vp9/encoder/vp9_rdopt.c | 22 |
9 files changed, 499 insertions, 142 deletions
diff --git a/vp9/common/vp9_rtcd_defs.sh b/vp9/common/vp9_rtcd_defs.sh index e4781aa05..5a7484d81 100644 --- a/vp9/common/vp9_rtcd_defs.sh +++ b/vp9/common/vp9_rtcd_defs.sh @@ -568,6 +568,21 @@ fi prototype void vp9_fht "const int16_t *input, int pitch, int16_t *output, int tx_type, int tx_dim" specialize vp9_fht +#if CONFIG_INTHT4X4 +prototype void vp9_short_fht4x4 "int16_t *InputData, int16_t *OutputData, int pitch, int tx_type" +specialize vp9_short_fht4x4 +#endif + +#if CONFIG_INTHT +prototype void vp9_short_fht8x8 "int16_t *InputData, int16_t *OutputData, int pitch, int tx_type" +specialize vp9_short_fht8x8 +#endif + +#if CONFIG_INTHT16X16 +prototype void vp9_short_fht16x16 "int16_t *InputData, int16_t *OutputData, int pitch, int tx_type" +specialize vp9_short_fht16x16 +#endif + prototype void vp9_short_fdct8x8 "int16_t *InputData, int16_t *OutputData, int pitch" specialize vp9_short_fdct8x8 diff --git a/vp9/encoder/vp9_dct.c b/vp9/encoder/vp9_dct.c index b9648638d..4a1e78e93 100644 --- a/vp9/encoder/vp9_dct.c +++ b/vp9/encoder/vp9_dct.c @@ -816,6 +816,101 @@ void vp9_short_fdct4x4_c(short *input, short *output, int pitch) { } } +#if CONFIG_INTHT4X4 +static void fadst4_1d(int16_t *input, int16_t *output) { + int x0, x1, x2, x3; + int s0, s1, s2, s3, s4, s5, s6, s7; + + x0 = input[0]; + x1 = input[1]; + x2 = input[2]; + x3 = input[3]; + + if (!(x0 | x1 | x2 | x3)) { + output[0] = output[1] = output[2] = output[3] = 0; + return; + } + + s0 = sinpi_1_9 * x0; + s1 = sinpi_4_9 * x0; + s2 = sinpi_2_9 * x1; + s3 = sinpi_1_9 * x1; + s4 = sinpi_3_9 * x2; + s5 = sinpi_4_9 * x3; + s6 = sinpi_2_9 * x3; + s7 = x0 + x1 - x3; + + x0 = s0 + s2 + s5; + x1 = sinpi_3_9 * s7; + x2 = s1 - s3 + s6; + x3 = s4; + + s0 = x0 + x3; + s1 = x1; + s2 = x2 - x3; + s3 = x2 - x0 + x3; + + // 1-D transform scaling factor is sqrt(2). + output[0] = dct_const_round_shift(s0); + output[1] = dct_const_round_shift(s1); + output[2] = dct_const_round_shift(s2); + output[3] = dct_const_round_shift(s3); +} + +void vp9_short_fht4x4_c(int16_t *input, int16_t *output, + int pitch, TX_TYPE tx_type) { + int16_t out[4 * 4]; + int16_t *outptr = &out[0]; + const int short_pitch = pitch >> 1; + int i, j; + int16_t temp_in[4], temp_out[4]; + + void (*fwdr)(int16_t*, int16_t*); + void (*fwdc)(int16_t*, int16_t*); + + switch (tx_type) { + case ADST_ADST: + fwdc = &fadst4_1d; + fwdr = &fadst4_1d; + break; + case ADST_DCT: + fwdc = &fadst4_1d; + fwdr = &fdct4_1d; + break; + case DCT_ADST: + fwdc = &fdct4_1d; + fwdr = &fadst4_1d; + break; + case DCT_DCT: + fwdc = &fdct4_1d; + fwdr = &fdct4_1d; + break; + default: + assert(0); + } + + + // column transform + for (i = 0; i < 4; ++i) { + for (j = 0; j < 4; ++j) + temp_in[j] = input[j * short_pitch + i] << 4; + if (i == 0 && temp_in[0]) + temp_in[0] += 1; + fwdc(temp_in, temp_out); + for (j = 0; j < 4; ++j) + outptr[j * 4 + i] = temp_out[j]; + } + + // row transform + for (i = 0; i < 4; ++i) { + for (j = 0; j < 4; ++j) + temp_in[j] = out[j + i * 4]; + fwdr(temp_in, temp_out); + for (j = 0; j < 4; ++j) + output[j + i * 4] = (temp_out[j] + 1) >> 2; + } +} +#endif void vp9_short_fdct8x4_c(short *input, short *output, int pitch) { @@ -896,6 +991,131 @@ void vp9_short_fdct8x8_c(int16_t *input, int16_t *output, int pitch) { } #endif +#if CONFIG_INTHT +static void fadst8_1d(int16_t *input, int16_t *output) { + int x0, x1, x2, x3, x4, x5, x6, x7; + int s0, s1, s2, s3, s4, s5, s6, s7; + + x0 = input[7]; + x1 = input[0]; + x2 = input[5]; + x3 = input[2]; + x4 = input[3]; + x5 = input[4]; + x6 = input[1]; + x7 = input[6]; + + // stage 1 + s0 = cospi_2_64 * x0 + cospi_30_64 * x1; + s1 = cospi_30_64 * x0 - cospi_2_64 * x1; + s2 = cospi_10_64 * x2 + cospi_22_64 * x3; + s3 = cospi_22_64 * x2 - cospi_10_64 * x3; + s4 = cospi_18_64 * x4 + cospi_14_64 * x5; + s5 = cospi_14_64 * x4 - cospi_18_64 * x5; + s6 = cospi_26_64 * x6 + cospi_6_64 * x7; + s7 = cospi_6_64 * x6 - cospi_26_64 * x7; + + x0 = dct_const_round_shift(s0 + s4); + x1 = dct_const_round_shift(s1 + s5); + x2 = dct_const_round_shift(s2 + s6); + x3 = dct_const_round_shift(s3 + s7); + x4 = dct_const_round_shift(s0 - s4); + x5 = dct_const_round_shift(s1 - s5); + x6 = dct_const_round_shift(s2 - s6); + x7 = dct_const_round_shift(s3 - s7); + + // stage 2 + s0 = x0; + s1 = x1; + s2 = x2; + s3 = x3; + s4 = cospi_8_64 * x4 + cospi_24_64 * x5; + s5 = cospi_24_64 * x4 - cospi_8_64 * x5; + s6 = - cospi_24_64 * x6 + cospi_8_64 * x7; + s7 = cospi_8_64 * x6 + cospi_24_64 * x7; + + x0 = s0 + s2; + x1 = s1 + s3; + x2 = s0 - s2; + x3 = s1 - s3; + x4 = dct_const_round_shift(s4 + s6); + x5 = dct_const_round_shift(s5 + s7); + x6 = dct_const_round_shift(s4 - s6); + x7 = dct_const_round_shift(s5 - s7); + + // stage 3 + s2 = cospi_16_64 * (x2 + x3); + s3 = cospi_16_64 * (x2 - x3); + s6 = cospi_16_64 * (x6 + x7); + s7 = cospi_16_64 * (x6 - x7); + + x2 = dct_const_round_shift(s2); + x3 = dct_const_round_shift(s3); + x6 = dct_const_round_shift(s6); + x7 = dct_const_round_shift(s7); + + output[0] = x0; + output[1] = - x4; + output[2] = x6; + output[3] = - x2; + output[4] = x3; + output[5] = - x7; + output[6] = x5; + output[7] = - x1; +} + +void vp9_short_fht8x8_c(int16_t *input, int16_t *output, + int pitch, TX_TYPE tx_type) { + int16_t out[64]; + int16_t *outptr = &out[0]; + const int short_pitch = pitch >> 1; + int i, j; + int16_t temp_in[8], temp_out[8]; + + void (*fwdr)(int16_t*, int16_t*); + void (*fwdc)(int16_t*, int16_t*); + + switch (tx_type) { + case ADST_ADST: + fwdc = &fadst8_1d; + fwdr = &fadst8_1d; + break; + case ADST_DCT: + fwdc = &fadst8_1d; + fwdr = &fdct8_1d; + break; + case DCT_ADST: + fwdc = &fdct8_1d; + fwdr = &fadst8_1d; + break; + case DCT_DCT: + fwdc = &fdct8_1d; + fwdr = &fdct8_1d; + break; + default: + assert(0); + } + + // column transform + for (i = 0; i < 8; ++i) { + for (j = 0; j < 8; ++j) + temp_in[j] = input[j * short_pitch + i] << 2; + fwdc(temp_in, temp_out); + for (j = 0; j < 8; ++j) + outptr[j * 8 + i] = temp_out[j]; + } + + // row transform + for (i = 0; i < 8; ++i) { + for (j = 0; j < 8; ++j) + temp_in[j] = out[j + i * 8]; + fwdr(temp_in, temp_out); + for (j = 0; j < 8; ++j) + output[j + i * 8] = temp_out[j] >> 1; + } +} +#endif + void vp9_short_walsh4x4_x8_c(short *input, short *output, int pitch) { int i; int a1, b1, c1, d1; @@ -1507,6 +1727,222 @@ void vp9_short_fdct16x16_c(int16_t *input, int16_t *out, int pitch) { #endif #endif +#if CONFIG_INTHT16X16 +void fadst16_1d(int16_t *input, int16_t *output) { + int x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; + int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15; + + x0 = input[15]; + x1 = input[0]; + x2 = input[13]; + x3 = input[2]; + x4 = input[11]; + x5 = input[4]; + x6 = input[9]; + x7 = input[6]; + x8 = input[7]; + x9 = input[8]; + x10 = input[5]; + x11 = input[10]; + x12 = input[3]; + x13 = input[12]; + x14 = input[1]; + x15 = input[14]; + + // stage 1 + s0 = x0 * cospi_1_64 + x1 * cospi_31_64; + s1 = x0 * cospi_31_64 - x1 * cospi_1_64; + s2 = x2 * cospi_5_64 + x3 * cospi_27_64; + s3 = x2 * cospi_27_64 - x3 * cospi_5_64; + s4 = x4 * cospi_9_64 + x5 * cospi_23_64; + s5 = x4 * cospi_23_64 - x5 * cospi_9_64; + s6 = x6 * cospi_13_64 + x7 * cospi_19_64; + s7 = x6 * cospi_19_64 - x7 * cospi_13_64; + s8 = x8 * cospi_17_64 + x9 * cospi_15_64; + s9 = x8 * cospi_15_64 - x9 * cospi_17_64; + s10 = x10 * cospi_21_64 + x11 * cospi_11_64; + s11 = x10 * cospi_11_64 - x11 * cospi_21_64; + s12 = x12 * cospi_25_64 + x13 * cospi_7_64; + s13 = x12 * cospi_7_64 - x13 * cospi_25_64; + s14 = x14 * cospi_29_64 + x15 * cospi_3_64; + s15 = x14 * cospi_3_64 - x15 * cospi_29_64; + + x0 = dct_const_round_shift(s0 + s8); + x1 = dct_const_round_shift(s1 + s9); + x2 = dct_const_round_shift(s2 + s10); + x3 = dct_const_round_shift(s3 + s11); + x4 = dct_const_round_shift(s4 + s12); + x5 = dct_const_round_shift(s5 + s13); + x6 = dct_const_round_shift(s6 + s14); + x7 = dct_const_round_shift(s7 + s15); + x8 = dct_const_round_shift(s0 - s8); + x9 = dct_const_round_shift(s1 - s9); + x10 = dct_const_round_shift(s2 - s10); + x11 = dct_const_round_shift(s3 - s11); + x12 = dct_const_round_shift(s4 - s12); + x13 = dct_const_round_shift(s5 - s13); + x14 = dct_const_round_shift(s6 - s14); + x15 = dct_const_round_shift(s7 - s15); + + // stage 2 + s0 = x0; + s1 = x1; + s2 = x2; + s3 = x3; + s4 = x4; + s5 = x5; + s6 = x6; + s7 = x7; + s8 = x8 * cospi_4_64 + x9 * cospi_28_64; + s9 = x8 * cospi_28_64 - x9 * cospi_4_64; + s10 = x10 * cospi_20_64 + x11 * cospi_12_64; + s11 = x10 * cospi_12_64 - x11 * cospi_20_64; + s12 = - x12 * cospi_28_64 + x13 * cospi_4_64; + s13 = x12 * cospi_4_64 + x13 * cospi_28_64; + s14 = - x14 * cospi_12_64 + x15 * cospi_20_64; + s15 = x14 * cospi_20_64 + x15 * cospi_12_64; + + x0 = s0 + s4; + x1 = s1 + s5; + x2 = s2 + s6; + x3 = s3 + s7; + x4 = s0 - s4; + x5 = s1 - s5; + x6 = s2 - s6; + x7 = s3 - s7; + x8 = dct_const_round_shift(s8 + s12); + x9 = dct_const_round_shift(s9 + s13); + x10 = dct_const_round_shift(s10 + s14); + x11 = dct_const_round_shift(s11 + s15); + x12 = dct_const_round_shift(s8 - s12); + x13 = dct_const_round_shift(s9 - s13); + x14 = dct_const_round_shift(s10 - s14); + x15 = dct_const_round_shift(s11 - s15); + + // stage 3 + s0 = x0; + s1 = x1; + s2 = x2; + s3 = x3; + s4 = x4 * cospi_8_64 + x5 * cospi_24_64; + s5 = x4 * cospi_24_64 - x5 * cospi_8_64; + s6 = - x6 * cospi_24_64 + x7 * cospi_8_64; + s7 = x6 * cospi_8_64 + x7 * cospi_24_64; + s8 = x8; + s9 = x9; + s10 = x10; + s11 = x11; + s12 = x12 * cospi_8_64 + x13 * cospi_24_64; + s13 = x12 * cospi_24_64 - x13 * cospi_8_64; + s14 = - x14 * cospi_24_64 + x15 * cospi_8_64; + s15 = x14 * cospi_8_64 + x15 * cospi_24_64; + + x0 = s0 + s2; + x1 = s1 + s3; + x2 = s0 - s2; + x3 = s1 - s3; + x4 = dct_const_round_shift(s4 + s6); + x5 = dct_const_round_shift(s5 + s7); + x6 = dct_const_round_shift(s4 - s6); + x7 = dct_const_round_shift(s5 - s7); + x8 = s8 + s10; + x9 = s9 + s11; + x10 = s8 - s10; + x11 = s9 - s11; + x12 = dct_const_round_shift(s12 + s14); + x13 = dct_const_round_shift(s13 + s15); + x14 = dct_const_round_shift(s12 - s14); + x15 = dct_const_round_shift(s13 - s15); + + // stage 4 + s2 = (- cospi_16_64) * (x2 + x3); + s3 = cospi_16_64 * (x2 - x3); + s6 = cospi_16_64 * (x6 + x7); + s7 = cospi_16_64 * (- x6 + x7); + s10 = cospi_16_64 * (x10 + x11); + s11 = cospi_16_64 * (- x10 + x11); + s14 = (- cospi_16_64) * (x14 + x15); + s15 = cospi_16_64 * (x14 - x15); + + x2 = dct_const_round_shift(s2); + x3 = dct_const_round_shift(s3); + x6 = dct_const_round_shift(s6); + x7 = dct_const_round_shift(s7); + x10 = dct_const_round_shift(s10); + x11 = dct_const_round_shift(s11); + x14 = dct_const_round_shift(s14); + x15 = dct_const_round_shift(s15); + + output[0] = x0; + output[1] = - x8; + output[2] = x12; + output[3] = - x4; + output[4] = x6; + output[5] = x14; + output[6] = x10; + output[7] = x2; + output[8] = x3; + output[9] = x11; + output[10] = x15; + output[11] = x7; + output[12] = x5; + output[13] = - x13; + output[14] = x9; + output[15] = - x1; +} + +void vp9_short_fht16x16_c(int16_t *input, int16_t *output, + int pitch, TX_TYPE tx_type) { + int16_t out[256]; + int16_t *outptr = &out[0]; + const int short_pitch = pitch >> 1; + int i, j; + int16_t temp_in[16], temp_out[16]; + + void (*fwdr)(int16_t*, int16_t*); + void (*fwdc)(int16_t*, int16_t*); + + switch (tx_type) { + case ADST_ADST: + fwdc = &fadst16_1d; + fwdr = &fadst16_1d; + break; + case ADST_DCT: + fwdc = &fadst16_1d; + fwdr = &fdct16_1d; + break; + case DCT_ADST: + fwdc = &fdct16_1d; + fwdr = &fadst16_1d; + break; + case DCT_DCT: + fwdc = &fdct16_1d; + fwdr = &fdct16_1d; + break; + default: + assert(0); + } + + // column transform + for (i = 0; i < 16; ++i) { + for (j = 0; j < 16; ++j) + temp_in[j] = input[j * short_pitch + i]; + fwdc(temp_in, temp_out); + for (j = 0; j < 16; ++j) + outptr[j * 16 + i] = temp_out[j]; + } + + // row transform + for (i = 0; i < 16; ++i) { + for (j = 0; j < 16; ++j) + temp_in[j] = out[j + i * 16]; + fwdr(temp_in, temp_out); + for (j = 0; j < 16; ++j) + output[j + i * 16] = temp_out[j]; + } +} +#endif + #define TEST_INT_32x32_DCT 1 #if !TEST_INT_32x32_DCT diff --git a/vp9/encoder/vp9_encodeintra.c b/vp9/encoder/vp9_encodeintra.c index a52763080..e40127982 100644 --- a/vp9/encoder/vp9_encodeintra.c +++ b/vp9/encoder/vp9_encodeintra.c @@ -54,7 +54,11 @@ void vp9_encode_intra4x4block(MACROBLOCK *x, int ib) { tx_type = get_tx_type_4x4(&x->e_mbd, b); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT4X4 + vp9_short_fht4x4(be->src_diff, be->coeff, 32, tx_type); +#else vp9_fht(be->src_diff, 32, be->coeff, tx_type, 4); +#endif vp9_ht_quantize_b_4x4(be, b, tx_type); #if CONFIG_INTHT4X4 vp9_short_iht4x4(b->dqcoeff, b->diff, 32, tx_type); @@ -153,8 +157,12 @@ void vp9_encode_intra8x8(MACROBLOCK *x, int ib) { tx_type = get_tx_type_8x8(xd, &xd->block[ib]); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT + vp9_short_fht8x8(be->src_diff, (x->block + idx)->coeff, 32, tx_type); +#else vp9_fht(be->src_diff, 32, (x->block + idx)->coeff, tx_type, 8); +#endif x->quantize_b_8x8(x->block + idx, xd->block + idx); #if CONFIG_INTHT @@ -175,7 +183,11 @@ void vp9_encode_intra8x8(MACROBLOCK *x, int ib) { be = &x->block[ib + iblock[i]]; tx_type = get_tx_type_4x4(xd, b); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT4X4 + vp9_short_fht4x4(be->src_diff, be->coeff, 32, tx_type); +#else vp9_fht_c(be->src_diff, 32, be->coeff, tx_type, 4); +#endif vp9_ht_quantize_b_4x4(be, b, tx_type); #if CONFIG_INTHT4X4 vp9_short_iht4x4(b->dqcoeff, b->diff, 32, tx_type); diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c index efe95c911..a4a0de6ed 100644 --- a/vp9/encoder/vp9_encodemb.c +++ b/vp9/encoder/vp9_encodemb.c @@ -174,7 +174,11 @@ void vp9_transform_mby_4x4(MACROBLOCK *x) { BLOCK *b = &x->block[i]; TX_TYPE tx_type = get_tx_type_4x4(xd, &xd->block[i]); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT4X4 + vp9_short_fht4x4(b->src_diff, b->coeff, 32, tx_type); +#else vp9_fht_c(b->src_diff, 32, b->coeff, tx_type, 4); +#endif } else if (!(i & 1) && get_tx_type_4x4(xd, &xd->block[i + 1]) == DCT_DCT) { x->fwd_txm8x4(&x->block[i].src_diff[0], &x->block[i].coeff[0], 32); @@ -209,7 +213,11 @@ void vp9_transform_mby_8x8(MACROBLOCK *x) { BLOCK *b = &x->block[i]; tx_type = get_tx_type_8x8(xd, &xd->block[i]); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT + vp9_short_fht8x8(b->src_diff, b->coeff, 32, tx_type); +#else vp9_fht_c(b->src_diff, 32, b->coeff, tx_type, 8); +#endif } else { x->fwd_txm8x8(&x->block[i].src_diff[0], &x->block[i].coeff[0], 32); @@ -219,7 +227,11 @@ void vp9_transform_mby_8x8(MACROBLOCK *x) { BLOCK *b = &x->block[i]; tx_type = get_tx_type_8x8(xd, &xd->block[i]); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT + vp9_short_fht8x8(b->src_diff, (b + 2)->coeff, 32, tx_type); +#else vp9_fht_c(b->src_diff, 32, (b + 2)->coeff, tx_type, 8); +#endif } else { x->fwd_txm8x8(&x->block[i].src_diff[0], &x->block[i + 2].coeff[0], 32); @@ -247,7 +259,11 @@ void vp9_transform_mby_16x16(MACROBLOCK *x) { TX_TYPE tx_type = get_tx_type_16x16(xd, &xd->block[0]); vp9_clear_system_state(); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT16X16 + vp9_short_fht16x16(b->src_diff, b->coeff, 32, tx_type); +#else vp9_fht_c(b->src_diff, 32, b->coeff, tx_type, 16); +#endif } else { x->fwd_txm16x16(&x->block[0].src_diff[0], &x->block[0].coeff[0], 32); diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c index 9d1ae1131..33c1dd28c 100644 --- a/vp9/encoder/vp9_onyx_if.c +++ b/vp9/encoder/vp9_onyx_if.c @@ -2551,8 +2551,6 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, int q_low; int q_high; - int zbin_oq_high; - int zbin_oq_low = 0; int top_index; int bottom_index; @@ -2565,11 +2563,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, #if RESET_FOREACH_FILTER int q_low0; int q_high0; - int zbin_oq_high0; - int zbin_oq_low0 = 0; int Q0; - int last_zbin_oq; - int last_zbin_oq0; int active_best_quality0; int active_worst_quality0; double rate_correction_factor0; @@ -2603,7 +2597,6 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, } // Clear zbin over-quant value and mode boost values. - cpi->zbin_over_quant = 0; cpi->zbin_mode_boost = 0; // Enable or disable mode based tweaking of the zbin @@ -2794,18 +2787,6 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, // Determine initial Q to try Q = vp9_regulate_q(cpi, cpi->this_frame_target); } -#if RESET_FOREACH_FILTER - last_zbin_oq = cpi->zbin_over_quant; -#endif - - // Set highest allowed value for Zbin over quant - if (cm->frame_type == KEY_FRAME) - zbin_oq_high = 0; // ZBIN_OQ_MAX/16 - else if (cpi->refresh_alt_ref_frame - || (cpi->refresh_golden_frame && !cpi->source_alt_ref_active)) - zbin_oq_high = 16; - else - zbin_oq_high = ZBIN_OQ_MAX; vp9_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit); @@ -2888,9 +2869,6 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, q_low0 = q_low; q_high0 = q_high; Q0 = Q; - zbin_oq_low0 = zbin_oq_low; - zbin_oq_high0 = zbin_oq_high; - last_zbin_oq0 = last_zbin_oq; rate_correction_factor0 = cpi->rate_correction_factor; gf_rate_correction_factor0 = cpi->gf_rate_correction_factor; active_best_quality0 = cpi->active_best_quality; @@ -3049,23 +3027,12 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, if (cpi->projected_frame_size > cpi->this_frame_target) { q_low = (Q < q_high) ? (Q + 1) : q_high; // Raise Qlow as to at least the current value - if (cpi->zbin_over_quant > 0) // If we are using over quant do the same for zbin_oq_low - zbin_oq_low = (cpi->zbin_over_quant < zbin_oq_high) ? (cpi->zbin_over_quant + 1) : zbin_oq_high; - if (undershoot_seen || (loop_count > 1)) { // Update rate_correction_factor unless cpi->active_worst_quality has changed. if (!active_worst_qchanged) vp9_update_rate_correction_factors(cpi, 1); Q = (q_high + q_low + 1) / 2; - - // Adjust cpi->zbin_over_quant (only allowed when Q is max) - if (Q < MAXQ) - cpi->zbin_over_quant = 0; - else { - zbin_oq_low = (cpi->zbin_over_quant < zbin_oq_high) ? (cpi->zbin_over_quant + 1) : zbin_oq_high; - cpi->zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2; - } } else { // Update rate_correction_factor unless cpi->active_worst_quality has changed. if (!active_worst_qchanged) @@ -3073,7 +3040,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, Q = vp9_regulate_q(cpi, cpi->this_frame_target); - while (((Q < q_low) || (cpi->zbin_over_quant < zbin_oq_low)) && (Retries < 10)) { + while ((Q < q_low) && (Retries < 10)) { vp9_update_rate_correction_factors(cpi, 0); Q = vp9_regulate_q(cpi, cpi->this_frame_target); Retries++; @@ -3084,10 +3051,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, } // Frame is too small else { - if (cpi->zbin_over_quant == 0) - q_high = (Q > q_low) ? (Q - 1) : q_low; // Lower q_high if not using over quant - else // else lower zbin_oq_high - zbin_oq_high = (cpi->zbin_over_quant > zbin_oq_low) ? (cpi->zbin_over_quant - 1) : zbin_oq_low; + q_high = (Q > q_low) ? (Q - 1) : q_low; if (overshoot_seen || (loop_count > 1)) { // Update rate_correction_factor unless cpi->active_worst_quality has changed. @@ -3095,12 +3059,6 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, vp9_update_rate_correction_factors(cpi, 1); Q = (q_high + q_low) / 2; - - // Adjust cpi->zbin_over_quant (only allowed when Q is max) - if (Q < MAXQ) - cpi->zbin_over_quant = 0; - else - cpi->zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2; } else { // Update rate_correction_factor unless cpi->active_worst_quality has changed. if (!active_worst_qchanged) @@ -3117,7 +3075,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, q_low = Q; } - while (((Q > q_high) || (cpi->zbin_over_quant > zbin_oq_high)) && (Retries < 10)) { + while ((Q > q_high) && (Retries < 10)) { vp9_update_rate_correction_factors(cpi, 0); Q = vp9_regulate_q(cpi, cpi->this_frame_target); Retries++; @@ -3133,16 +3091,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, else if (Q < q_low) Q = q_low; - // Clamp cpi->zbin_over_quant - cpi->zbin_over_quant = (cpi->zbin_over_quant < zbin_oq_low) ? - zbin_oq_low : (cpi->zbin_over_quant > zbin_oq_high) ? - zbin_oq_high : cpi->zbin_over_quant; - - // Loop = ((Q != last_q) || (last_zbin_oq != cpi->zbin_over_quant)) ? TRUE : FALSE; Loop = ((Q != last_q)) ? TRUE : FALSE; -#if RESET_FOREACH_FILTER - last_zbin_oq = cpi->zbin_over_quant; -#endif } else Loop = FALSE; @@ -3186,12 +3135,9 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, if (Loop == TRUE) { overshoot_seen = FALSE; undershoot_seen = FALSE; - zbin_oq_low = zbin_oq_low0; - zbin_oq_high = zbin_oq_high0; q_low = q_low0; q_high = q_high0; Q = Q0; - cpi->zbin_over_quant = last_zbin_oq = last_zbin_oq0; cpi->rate_correction_factor = rate_correction_factor0; cpi->gf_rate_correction_factor = gf_rate_correction_factor0; cpi->active_best_quality = active_best_quality0; @@ -3440,8 +3386,6 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, cpi->avg_q, vp9_convert_qindex_to_q(cpi->ni_av_qi), vp9_convert_qindex_to_q(cpi->cq_target_quality), - cpi->zbin_over_quant, - // cpi->avg_frame_qindex, cpi->zbin_over_quant, cpi->refresh_golden_frame, cpi->refresh_alt_ref_frame, cm->frame_type, cpi->gfu_boost, cpi->twopass.est_max_qcorrection_factor, @@ -3454,7 +3398,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, else fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d" "%7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f" - "%6d %5d %5d %5d %8d %8.2f %10d %10.3f" + "%5d %5d %8d %8.2f %10d %10.3f" "%8d %10d %10d %10d\n", cpi->common.current_video_frame, cpi->this_frame_target, cpi->projected_frame_size, @@ -3470,8 +3414,6 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, cpi->avg_q, vp9_convert_qindex_to_q(cpi->ni_av_qi), vp9_convert_qindex_to_q(cpi->cq_target_quality), - cpi->zbin_over_quant, - // cpi->avg_frame_qindex, cpi->zbin_over_quant, cpi->refresh_golden_frame, cpi->refresh_alt_ref_frame, cm->frame_type, cpi->gfu_boost, cpi->twopass.est_max_qcorrection_factor, diff --git a/vp9/encoder/vp9_onyx_int.h b/vp9/encoder/vp9_onyx_int.h index 813003388..cfd761aa9 100644 --- a/vp9/encoder/vp9_onyx_int.h +++ b/vp9/encoder/vp9_onyx_int.h @@ -53,7 +53,6 @@ #define GF_ZEROMV_ZBIN_BOOST 12 #define LF_ZEROMV_ZBIN_BOOST 6 #define MV_ZBIN_BOOST 4 -#define ZBIN_OQ_MAX 192 #define VP9_TEMPORAL_ALT_REF 1 @@ -421,7 +420,6 @@ typedef struct VP9_COMP { double tot_q; double avg_q; - int zbin_over_quant; int zbin_mode_boost; int zbin_mode_boost_enabled; diff --git a/vp9/encoder/vp9_quantize.c b/vp9/encoder/vp9_quantize.c index aed379a5b..8ae53e60e 100644 --- a/vp9/encoder/vp9_quantize.c +++ b/vp9/encoder/vp9_quantize.c @@ -484,8 +484,7 @@ void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) { // Y zbin_extra = (cpi->common.Y1dequant[QIndex][1] * - (cpi->zbin_over_quant + - cpi->zbin_mode_boost + + (cpi->zbin_mode_boost + x->act_zbin_adj)) >> 7; for (i = 0; i < 16; i++) { @@ -504,8 +503,7 @@ void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) { // UV zbin_extra = (cpi->common.UVdequant[QIndex][1] * - (cpi->zbin_over_quant + - cpi->zbin_mode_boost + + (cpi->zbin_mode_boost + x->act_zbin_adj)) >> 7; for (i = 16; i < 24; i++) { @@ -533,8 +531,7 @@ void vp9_update_zbin_extra(VP9_COMP *cpi, MACROBLOCK *x) { // Y zbin_extra = (cpi->common.Y1dequant[QIndex][1] * - (cpi->zbin_over_quant + - cpi->zbin_mode_boost + + (cpi->zbin_mode_boost + x->act_zbin_adj)) >> 7; for (i = 0; i < 16; i++) { x->block[i].zbin_extra = (int16_t)zbin_extra; @@ -542,8 +539,7 @@ void vp9_update_zbin_extra(VP9_COMP *cpi, MACROBLOCK *x) { // UV zbin_extra = (cpi->common.UVdequant[QIndex][1] * - (cpi->zbin_over_quant + - cpi->zbin_mode_boost + + (cpi->zbin_mode_boost + x->act_zbin_adj)) >> 7; for (i = 16; i < 24; i++) { diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c index b4adb67c0..5bb0ef59d 100644 --- a/vp9/encoder/vp9_ratectrl.c +++ b/vp9/encoder/vp9_ratectrl.c @@ -410,23 +410,6 @@ void vp9_update_rate_correction_factors(VP9_COMP *cpi, int damp_var) { vp9_bits_per_mb(cpi->common.frame_type, Q)) * cpi->common.MBs) / (1 << BPER_MB_NORMBITS)); - // Make some allowance for cpi->zbin_over_quant - if (cpi->zbin_over_quant > 0) { - int Z = cpi->zbin_over_quant; - double Factor = 0.99; - double factor_adjustment = 0.01 / 256.0; // (double)ZBIN_OQ_MAX; - - while (Z > 0) { - Z--; - projected_size_based_on_q = - (int)(Factor * projected_size_based_on_q); - Factor += factor_adjustment; - - if (Factor >= 0.999) - Factor = 0.999; - } - } - // Work out a size correction factor. // if ( cpi->this_frame_target > 0 ) // correction_factor = (100 * cpi->projected_frame_size) / cpi->this_frame_target; @@ -488,9 +471,6 @@ int vp9_regulate_q(VP9_COMP *cpi, int target_bits_per_frame) { int bits_per_mb_at_this_q; double correction_factor; - // Reset Zbin OQ value - cpi->zbin_over_quant = 0; - // Select the appropriate correction factor based upon type of frame. if (cpi->common.frame_type == KEY_FRAME) correction_factor = cpi->key_frame_rate_correction_factor; @@ -525,46 +505,6 @@ int vp9_regulate_q(VP9_COMP *cpi, int target_bits_per_frame) { last_error = bits_per_mb_at_this_q - target_bits_per_mb; } while (++i <= cpi->active_worst_quality); - - // If we are at MAXQ then enable Q over-run which seeks to claw back additional bits through things like - // the RD multiplier and zero bin size. - if (Q >= MAXQ) { - int zbin_oqmax; - - double Factor = 0.99; - double factor_adjustment = 0.01 / 256.0; // (double)ZBIN_OQ_MAX; - - if (cpi->common.frame_type == KEY_FRAME) - zbin_oqmax = 0; // ZBIN_OQ_MAX/16 - else if (cpi->refresh_alt_ref_frame - || (cpi->refresh_golden_frame && !cpi->source_alt_ref_active)) - zbin_oqmax = 16; - else - zbin_oqmax = ZBIN_OQ_MAX; - - // Each incrment in the zbin is assumed to have a fixed effect on bitrate. This is not of course true. - // The effect will be highly clip dependent and may well have sudden steps. - // The idea here is to acheive higher effective quantizers than the normal maximum by expanding the zero - // bin and hence decreasing the number of low magnitude non zero coefficients. - while (cpi->zbin_over_quant < zbin_oqmax) { - cpi->zbin_over_quant++; - - if (cpi->zbin_over_quant > zbin_oqmax) - cpi->zbin_over_quant = zbin_oqmax; - - // Adjust bits_per_mb_at_this_q estimate - bits_per_mb_at_this_q = (int)(Factor * bits_per_mb_at_this_q); - Factor += factor_adjustment; - - if (Factor >= 0.999) - Factor = 0.999; - - if (bits_per_mb_at_this_q <= target_bits_per_mb) // Break out if we get down to the target rate - break; - } - - } - return Q; } diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c index 14b9a13db..8d005d7fd 100644 --- a/vp9/encoder/vp9_rdopt.c +++ b/vp9/encoder/vp9_rdopt.c @@ -217,16 +217,6 @@ void vp9_initialize_rd_consts(VP9_COMP *cpi, int QIndex) { cpi->RDMULT = compute_rd_mult(QIndex); - // Extend rate multiplier along side quantizer zbin increases - if (cpi->zbin_over_quant > 0) { - double oq_factor; - - // Experimental code using the same basic equation as used for Q above - // The units of cpi->zbin_over_quant are 1/128 of Q bin size - oq_factor = 1.0 + ((double)0.0015625 * cpi->zbin_over_quant); - cpi->RDMULT = (int)((double)cpi->RDMULT * oq_factor * oq_factor); - } - if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME)) { if (cpi->twopass.next_iiratio > 31) cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4; @@ -1046,7 +1036,11 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, BLOCK *be, b->bmi.as_mode.first = mode; tx_type = get_tx_type_4x4(xd, b); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT4X4 + vp9_short_fht4x4(be->src_diff, be->coeff, 32, tx_type); +#else vp9_fht(be->src_diff, 32, be->coeff, tx_type, 4); +#endif vp9_ht_quantize_b_4x4(be, b, tx_type); } else { x->fwd_txm4x4(be->src_diff, be->coeff, 32); @@ -1346,7 +1340,11 @@ static int64_t rd_pick_intra8x8block(VP9_COMP *cpi, MACROBLOCK *x, int ib, if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) { TX_TYPE tx_type = get_tx_type_8x8(xd, b); if (tx_type != DCT_DCT) +#if CONFIG_INTHT + vp9_short_fht8x8(be->src_diff, (x->block + idx)->coeff, 32, tx_type); +#else vp9_fht(be->src_diff, 32, (x->block + idx)->coeff, tx_type, 8); +#endif else x->fwd_txm8x8(be->src_diff, (x->block + idx)->coeff, 32); x->quantize_b_8x8(x->block + idx, xd->block + idx); @@ -1379,7 +1377,11 @@ static int64_t rd_pick_intra8x8block(VP9_COMP *cpi, MACROBLOCK *x, int ib, be = &x->block[ib + iblock[i]]; tx_type = get_tx_type_4x4(xd, b); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT4X4 + vp9_short_fht4x4(be->src_diff, be->coeff, 32, tx_type); +#else vp9_fht_c(be->src_diff, 32, be->coeff, tx_type, 4); +#endif vp9_ht_quantize_b_4x4(be, b, tx_type); } else if (!(i & 1) && get_tx_type_4x4(xd, b + 1) == DCT_DCT) { x->fwd_txm8x4(be->src_diff, be->coeff, 32); |