1
0
Fork 0
forked from len0rd/rockbox

Sync opus codec to upstream git

Change-Id: I0cfcc0005c4ad7bfbb1aaf454188ce70fb043dc1
This commit is contained in:
William Wilgus 2019-01-04 02:01:18 -06:00 committed by Solomon Peachy
parent 75d9393796
commit 14c6bb798d
286 changed files with 48931 additions and 1278 deletions

View file

@ -30,6 +30,7 @@ POSSIBILITY OF SUCH DAMAGE.
#endif
#include "SigProc_FIX.h"
#include "define.h"
#define QA 24
#define A_LIMIT SILK_FIX_CONST( 0.99975, QA )
@ -38,119 +39,103 @@ POSSIBILITY OF SUCH DAMAGE.
/* Compute inverse of LPC prediction gain, and */
/* test if LPC coefficients are stable (all poles within unit circle) */
static opus_int32 LPC_inverse_pred_gain_QA( /* O Returns inverse prediction gain in energy domain, Q30 */
opus_int32 A_QA[ 2 ][ SILK_MAX_ORDER_LPC ], /* I Prediction coefficients */
static opus_int32 LPC_inverse_pred_gain_QA_c( /* O Returns inverse prediction gain in energy domain, Q30 */
opus_int32 A_QA[ SILK_MAX_ORDER_LPC ], /* I Prediction coefficients */
const opus_int order /* I Prediction order */
)
{
opus_int k, n, mult2Q;
opus_int32 invGain_Q30, rc_Q31, rc_mult1_Q30, rc_mult2, tmp_QA;
opus_int32 *Aold_QA, *Anew_QA;
opus_int32 invGain_Q30, rc_Q31, rc_mult1_Q30, rc_mult2, tmp1, tmp2;
Anew_QA = A_QA[ order & 1 ];
invGain_Q30 = (opus_int32)1 << 30;
invGain_Q30 = SILK_FIX_CONST( 1, 30 );
for( k = order - 1; k > 0; k-- ) {
/* Check for stability */
if( ( Anew_QA[ k ] > A_LIMIT ) || ( Anew_QA[ k ] < -A_LIMIT ) ) {
if( ( A_QA[ k ] > A_LIMIT ) || ( A_QA[ k ] < -A_LIMIT ) ) {
return 0;
}
/* Set RC equal to negated AR coef */
rc_Q31 = -silk_LSHIFT( Anew_QA[ k ], 31 - QA );
rc_Q31 = -silk_LSHIFT( A_QA[ k ], 31 - QA );
/* rc_mult1_Q30 range: [ 1 : 2^30 ] */
rc_mult1_Q30 = ( (opus_int32)1 << 30 ) - silk_SMMUL( rc_Q31, rc_Q31 );
rc_mult1_Q30 = silk_SUB32( SILK_FIX_CONST( 1, 30 ), silk_SMMUL( rc_Q31, rc_Q31 ) );
silk_assert( rc_mult1_Q30 > ( 1 << 15 ) ); /* reduce A_LIMIT if fails */
silk_assert( rc_mult1_Q30 <= ( 1 << 30 ) );
/* rc_mult2 range: [ 2^30 : silk_int32_MAX ] */
mult2Q = 32 - silk_CLZ32( silk_abs( rc_mult1_Q30 ) );
rc_mult2 = silk_INVERSE32_varQ( rc_mult1_Q30, mult2Q + 30 );
/* Update inverse gain */
/* invGain_Q30 range: [ 0 : 2^30 ] */
invGain_Q30 = silk_LSHIFT( silk_SMMUL( invGain_Q30, rc_mult1_Q30 ), 2 );
silk_assert( invGain_Q30 >= 0 );
silk_assert( invGain_Q30 <= ( 1 << 30 ) );
if( invGain_Q30 < SILK_FIX_CONST( 1.0f / MAX_PREDICTION_POWER_GAIN, 30 ) ) {
return 0;
}
/* Swap pointers */
Aold_QA = Anew_QA;
Anew_QA = A_QA[ k & 1 ];
/* rc_mult2 range: [ 2^30 : silk_int32_MAX ] */
mult2Q = 32 - silk_CLZ32( silk_abs( rc_mult1_Q30 ) );
rc_mult2 = silk_INVERSE32_varQ( rc_mult1_Q30, mult2Q + 30 );
/* Update AR coefficient */
for( n = 0; n < k; n++ ) {
tmp_QA = Aold_QA[ n ] - MUL32_FRAC_Q( Aold_QA[ k - n - 1 ], rc_Q31, 31 );
Anew_QA[ n ] = MUL32_FRAC_Q( tmp_QA, rc_mult2 , mult2Q );
for( n = 0; n < (k + 1) >> 1; n++ ) {
opus_int64 tmp64;
tmp1 = A_QA[ n ];
tmp2 = A_QA[ k - n - 1 ];
tmp64 = silk_RSHIFT_ROUND64( silk_SMULL( silk_SUB_SAT32(tmp1,
MUL32_FRAC_Q( tmp2, rc_Q31, 31 ) ), rc_mult2 ), mult2Q);
if( tmp64 > silk_int32_MAX || tmp64 < silk_int32_MIN ) {
return 0;
}
A_QA[ n ] = ( opus_int32 )tmp64;
tmp64 = silk_RSHIFT_ROUND64( silk_SMULL( silk_SUB_SAT32(tmp2,
MUL32_FRAC_Q( tmp1, rc_Q31, 31 ) ), rc_mult2), mult2Q);
if( tmp64 > silk_int32_MAX || tmp64 < silk_int32_MIN ) {
return 0;
}
A_QA[ k - n - 1 ] = ( opus_int32 )tmp64;
}
}
/* Check for stability */
if( ( Anew_QA[ 0 ] > A_LIMIT ) || ( Anew_QA[ 0 ] < -A_LIMIT ) ) {
if( ( A_QA[ k ] > A_LIMIT ) || ( A_QA[ k ] < -A_LIMIT ) ) {
return 0;
}
/* Set RC equal to negated AR coef */
rc_Q31 = -silk_LSHIFT( Anew_QA[ 0 ], 31 - QA );
rc_Q31 = -silk_LSHIFT( A_QA[ 0 ], 31 - QA );
/* Range: [ 1 : 2^30 ] */
rc_mult1_Q30 = ( (opus_int32)1 << 30 ) - silk_SMMUL( rc_Q31, rc_Q31 );
rc_mult1_Q30 = silk_SUB32( SILK_FIX_CONST( 1, 30 ), silk_SMMUL( rc_Q31, rc_Q31 ) );
/* Update inverse gain */
/* Range: [ 0 : 2^30 ] */
invGain_Q30 = silk_LSHIFT( silk_SMMUL( invGain_Q30, rc_mult1_Q30 ), 2 );
silk_assert( invGain_Q30 >= 0 );
silk_assert( invGain_Q30 <= 1<<30 );
silk_assert( invGain_Q30 >= 0 );
silk_assert( invGain_Q30 <= ( 1 << 30 ) );
if( invGain_Q30 < SILK_FIX_CONST( 1.0f / MAX_PREDICTION_POWER_GAIN, 30 ) ) {
return 0;
}
return invGain_Q30;
}
/* For input in Q12 domain */
opus_int32 silk_LPC_inverse_pred_gain( /* O Returns inverse prediction gain in energy domain, Q30 */
opus_int32 silk_LPC_inverse_pred_gain_c( /* O Returns inverse prediction gain in energy domain, Q30 */
const opus_int16 *A_Q12, /* I Prediction coefficients, Q12 [order] */
const opus_int order /* I Prediction order */
)
{
opus_int k;
opus_int32 Atmp_QA[ 2 ][ SILK_MAX_ORDER_LPC ];
opus_int32 *Anew_QA;
opus_int32 Atmp_QA[ SILK_MAX_ORDER_LPC ];
opus_int32 DC_resp = 0;
Anew_QA = Atmp_QA[ order & 1 ];
/* Increase Q domain of the AR coefficients */
for( k = 0; k < order; k++ ) {
DC_resp += (opus_int32)A_Q12[ k ];
Anew_QA[ k ] = silk_LSHIFT32( (opus_int32)A_Q12[ k ], QA - 12 );
Atmp_QA[ k ] = silk_LSHIFT32( (opus_int32)A_Q12[ k ], QA - 12 );
}
/* If the DC is unstable, we don't even need to do the full calculations */
if( DC_resp >= 4096 ) {
return 0;
}
return LPC_inverse_pred_gain_QA( Atmp_QA, order );
return LPC_inverse_pred_gain_QA_c( Atmp_QA, order );
}
#ifdef FIXED_POINT
#if 0
/* For input in Q24 domain */
opus_int32 silk_LPC_inverse_pred_gain_Q24( /* O Returns inverse prediction gain in energy domain, Q30 */
const opus_int32 *A_Q24, /* I Prediction coefficients [order] */
const opus_int order /* I Prediction order */
)
{
opus_int k;
opus_int32 Atmp_QA[ 2 ][ SILK_MAX_ORDER_LPC ];
opus_int32 *Anew_QA;
Anew_QA = Atmp_QA[ order & 1 ];
/* Increase Q domain of the AR coefficients */
for( k = 0; k < order; k++ ) {
Anew_QA[ k ] = silk_RSHIFT32( A_Q24[ k ], 24 - QA );
}
return LPC_inverse_pred_gain_QA( Atmp_QA, order );
}
#endif
#endif