From 6349ffdb677f58fd4b96d6b003eaaa9a01408a59 Mon Sep 17 00:00:00 2001 From: Sandesh Venkatesh Date: Mon, 11 Mar 2024 15:07:05 +0530 Subject: [PATCH] ivas_mdct_core_invQ fixed point changes [x] PLC related functions are still in float point GetPLCModeDecision_flt() and TonalMdctConceal_create_concealment_noise_ivas() --- lib_com/ivas_mdct_core_com.c | 49 +++ lib_com/ivas_prot_fx.h | 54 +++ lib_dec/TonalComponentDetection_fx.c | 76 ++++ lib_dec/er_util_fx.c | 139 ++++++- lib_dec/ivas_mct_dec.c | 132 ++++++ lib_dec/ivas_mdct_core_dec.c | 542 +++++++++++++++++++++++++ lib_dec/ivas_stereo_mdct_core_dec_fx.c | 153 +++++++ lib_dec/ivas_stereo_mdct_stereo_dec.c | 20 +- lib_dec/tonalMDCTconcealment_fx.c | 293 +++++++++++++ 9 files changed, 1450 insertions(+), 8 deletions(-) diff --git a/lib_com/ivas_mdct_core_com.c b/lib_com/ivas_mdct_core_com.c index 35dc6b8ba..b98f3a470 100644 --- a/lib_com/ivas_mdct_core_com.c +++ b/lib_com/ivas_mdct_core_com.c @@ -36,12 +36,61 @@ #include "ivas_prot.h" #include "prot.h" #include "wmc_auto.h" +#ifdef IVAS_FLOAT_FIXED +#include "prot_fx1.h" +#include "prot_fx2.h" +#include "ivas_prot_fx.h" +#endif /*--------------------------------------------------------------------------* * ivas_mdct_tcx10_bit_distribution() * * *--------------------------------------------------------------------------*/ +#ifdef IVAS_FLOAT_FIXED +void ivas_mdct_tcx10_bit_distribution_fx( + Word16 target_bitsTCX10[NB_DIV], /* o : target bit distribution */ + const Word16 nbits_tcx, /* i : TCX bits */ + const Word16 nTnsBitsTCX10[NB_DIV] /* i : TNS bits */ +) +{ + Word16 k; + Word16 min_required_bits = add( add( NBITS_TCX_GAIN, i_mult2( NOISE_FILL_RANGES, NBITS_NOISE_FILL_LEVEL ) ), SMDCT_MINIMUM_ARITH_BITS ); + + FOR( k = 0; k < 2; k++ ) + { + IF( nbits_tcx & 1 ) + { + target_bitsTCX10[k] = sub( add( shr( nbits_tcx, 1 ), ( k == 0 ? 1 : 0 ) ), nTnsBitsTCX10[k] ); + move16(); + } + ELSE + { + target_bitsTCX10[k] = sub( shr( nbits_tcx, 1 ), nTnsBitsTCX10[k] ); + move16(); + } + } + + IF( LT_16( target_bitsTCX10[0], min_required_bits ) ) + { + /*redistribute bits*/ + target_bitsTCX10[1] = sub( target_bitsTCX10[1], sub( min_required_bits, target_bitsTCX10[0] ) ); + move16(); + target_bitsTCX10[0] = min_required_bits; + move16(); + } + ELSE IF( LT_16( target_bitsTCX10[1], min_required_bits ) ) + { + /*redistribute bits*/ + target_bitsTCX10[0] = sub( target_bitsTCX10[0], sub( min_required_bits, target_bitsTCX10[1] ) ); + move16(); + target_bitsTCX10[1] = min_required_bits; + move16(); + } + + return; +} +#endif void ivas_mdct_tcx10_bit_distribution( int16_t target_bitsTCX10[NB_DIV], /* o : target bit distribution */ diff --git a/lib_com/ivas_prot_fx.h b/lib_com/ivas_prot_fx.h index f7e49d822..071738534 100644 --- a/lib_com/ivas_prot_fx.h +++ b/lib_com/ivas_prot_fx.h @@ -1606,4 +1606,58 @@ void ivas_param_ism_config_fx( PARAM_ISM_CONFIG_HANDLE hParamIsm, /* i/o: IVAS Param ISM Config Structure */ const Word16 nchan_obj /* i : number of ISM channels */ ); + +void ivas_mdct_core_invQ_fx( + CPE_DEC_HANDLE hCPE, /* i/o: CPE handle */ + Word16 nTnsBitsTCX10[CPE_CHANNELS][NB_DIV], /* i : number of TNS bits */ + Word16 p_param[CPE_CHANNELS][NB_DIV], /* i : pointer to param buffer */ + Word16 param_lpc[CPE_CHANNELS][NPRM_LPC_NEW], /* i : lpc parameters */ + Word16 param[CPE_CHANNELS][DEC_NPRM_DIV * NB_DIV], /* i : param buffer */ + Word16 fUseTns[CPE_CHANNELS][NB_DIV], /* i : flag TNS enabled */ + STnsData tnsData[CPE_CHANNELS][NB_DIV], /* i : TNS parameter */ + Word32 *x_0[CPE_CHANNELS][NB_DIV], /* i/o: signal buffer */ + Word16 x_0_e[CPE_CHANNELS][NB_DIV], + Word32 *x[CPE_CHANNELS][NB_DIV], /* i/o: signal buffer */ + Word16 x_e[CPE_CHANNELS][NB_DIV], + Word16 x_len[CPE_CHANNELS][NB_DIV], + Word16 Aq[CPE_CHANNELS][( NB_SUBFR16k + 1 ) * ( M + 1 )], /* i : LP coefficients */ + Word16 ms_mask[NB_DIV][MAX_SFB], /* i : M/S mask */ + const Word16 MCT_flag /* i : hMCT handle allocated (1) or not (0) */ +); + +void ivas_mdct_tcx10_bit_distribution_fx( + Word16 target_bitsTCX10[NB_DIV], /* o : target bit distribution */ + const Word16 nbits_tcx, /* i : TCX bits */ + const Word16 nTnsBitsTCX10[NB_DIV] /* i : TNS bits */ +); + +void TonalMDCTConceal_Detect_ivas_fx( + const TonalMDCTConcealPtr hTonalMDCTConc, + const Word32 pitchLag, + Word16 * numIndices, + const PsychoacousticParameters* psychParamsCurrent +); + +Word16 GetPLCModeDecision_ivas_fx( + Decoder_State *st /* i/o: decoder memory state pointer */ +); + +void ivas_DetectTonalComponents_fx( + Word16 indexOfTonalPeak[], + Word16 lowerIndex[], + Word16 upperIndex[], + Word16* pNumIndexes, + const Word32 lastPitchLag, + const Word32 currentPitchLag, + const Word16 lastMDCTSpectrum[], + const Word16 lastMDCTSpectrum_exp, + const Word16 scaleFactors[], + const Word16 scaleFactors_exp[], + const Word16 scaleFactors_max_e, + const Word32 secondLastPowerSpectrum[], + const Word16 nSamples, + const Word16 nSamplesCore, + Word16 floorPowerSpectrum, /* i: lower limit for power spectrum bins */ + const PsychoacousticParameters* psychParamsCurrent +); #endif diff --git a/lib_dec/TonalComponentDetection_fx.c b/lib_dec/TonalComponentDetection_fx.c index eb3ba5f01..5ca18f758 100644 --- a/lib_dec/TonalComponentDetection_fx.c +++ b/lib_dec/TonalComponentDetection_fx.c @@ -13,6 +13,10 @@ #include "rom_basop_util.h" #include "rom_com.h" #include "basop_util.h" +#ifdef IVAS_FLOAT_FIXED +#include "prot_fx2.h" +#include "ivas_prot_fx.h" +#endif /*---------------------------------------------------------------------* @@ -37,6 +41,78 @@ static void findTonalComponents(Word16 * indexOfTonalPeak, Word16 * lowerIndex, * store them in indexOfTonalPeak. Updates lowerIndex, upperIndex, * pNumIndexes accordingly. *-------------------------------------------------------------------*/ +#ifdef IVAS_FLOAT_FIXED +void ivas_DetectTonalComponents_fx( + Word16 indexOfTonalPeak[], + Word16 lowerIndex[], + Word16 upperIndex[], + Word16* pNumIndexes, + const Word32 lastPitchLag, + const Word32 currentPitchLag, + const Word16 lastMDCTSpectrum[], + const Word16 lastMDCTSpectrum_exp, + const Word16 scaleFactors[], + const Word16 scaleFactors_exp[], + const Word16 scaleFactors_max_e, + const Word32 secondLastPowerSpectrum[], + const Word16 nSamples, + const Word16 nSamplesCore, + Word16 floorPowerSpectrum, /* i: lower limit for power spectrum bins */ + const PsychoacousticParameters* psychParamsCurrent +) +{ + Word16 F0; + Word16 thresholdModification[L_FRAME_MAX], lastMDCTSpect_exp; + Word32 pScaledMdctSpectrum[L_FRAME_MAX]; + Word16 nBands; + Word32 sns_int_scf_fx[FDNS_NPTS]; + Word16 q_pScaledMdctSpectrum; + + FOR(Word16 i = 0; i < nSamples; i++) + { + pScaledMdctSpectrum[i] = L_shl(lastMDCTSpectrum[i], 16); // Q31 - lastMDCTSpectrum_exp + } + + IF (psychParamsCurrent == NULL) + { + nBands = FDNS_NPTS; + //PMT("add nBands argument to mdct_shaping_16") + + mdct_shaping_16(lastMDCTSpectrum, nSamplesCore, nSamples, scaleFactors, scaleFactors_exp, scaleFactors_max_e, pScaledMdctSpectrum); + lastMDCTSpect_exp = add(lastMDCTSpectrum_exp, scaleFactors_max_e); + } + ELSE + { + FOR(Word16 i = 0; i < FDNS_NPTS; i++) + { + sns_int_scf_fx[i] = L_shl(scaleFactors[i], 1 + scaleFactors_exp[i]); // Q16 + } + q_pScaledMdctSpectrum = 31 - lastMDCTSpectrum_exp; + sns_shape_spectrum_fx(pScaledMdctSpectrum, &q_pScaledMdctSpectrum, psychParamsCurrent, sns_int_scf_fx,16, nSamplesCore); + q_pScaledMdctSpectrum = q_pScaledMdctSpectrum + 1; + Word16 tmp_e = 31 - q_pScaledMdctSpectrum; + nBands = psychParamsCurrent->nBands; + // till nSamplesCore different Q and nSamples - nSamplesCore in different Q + Scale_sig32(pScaledMdctSpectrum + nSamplesCore, nSamples - nSamplesCore, lastMDCTSpectrum_exp - tmp_e); + lastMDCTSpect_exp = 31 - q_pScaledMdctSpectrum; + } + + for (int i = nSamplesCore; i < nSamples; ++i) { + Word64 tmp = W_mult_32_32(pScaledMdctSpectrum[i], sns_int_scf_fx[nBands - 1]); // q_pScaledMdctSpectrum + 16 + 1 + pScaledMdctSpectrum[i] = W_extract_h(W_shl(tmp, Q15)); // q_pScaledMdctSpectrum + } + + /* Find peak candidates in the last frame. */ + findCandidates(nSamples, pScaledMdctSpectrum, lastMDCTSpect_exp, thresholdModification, floorPowerSpectrum); + + /* Refine peak candidates using the pitch information */ + RefineThresholdsUsingPitch(nSamples, nSamplesCore, secondLastPowerSpectrum, lastPitchLag, currentPitchLag, &F0, thresholdModification); + + /* Find peaks in the second last frame */ + findTonalComponents(indexOfTonalPeak, lowerIndex, upperIndex, pNumIndexes, nSamples, secondLastPowerSpectrum, F0, thresholdModification); +} +#endif + void DetectTonalComponents( Word16 indexOfTonalPeak[], Word16 lowerIndex[], diff --git a/lib_dec/er_util_fx.c b/lib_dec/er_util_fx.c index bb4bed299..85474cc90 100644 --- a/lib_dec/er_util_fx.c +++ b/lib_dec/er_util_fx.c @@ -9,7 +9,9 @@ #include "prot_fx2.h" #include "basop_util.h" #include - +#ifdef IVAS_FLOAT_FIXED +#include "ivas_prot_fx.h" +#endif /* static void setnoiseLevelMemory() @@ -364,6 +366,141 @@ void highPassFiltering( * PLC: [Common: mode decision] * PLC: Decide which Concealment to use. Update pitch lags if needed *----------------------------------------------------------------------------------*/ +#ifdef IVAS_FLOAT_FIXED +Word16 GetPLCModeDecision_ivas_fx( + Decoder_State *st /* i/o: decoder memory state pointer */ +) +{ + Word16 /*int*/ core; + Word16 numIndices = 0; + TCX_DEC_HANDLE hTcxDec; + + hTcxDec = st->hTcxDec; + + + IF( EQ_16(st->flagGuidedAcelp,1)) + { + st->old_pitch_buf_fx[2*st->nb_subfr] = L_deposit_h(st->guidedT0); + st->old_pitch_buf_fx[2*st->nb_subfr+1] = L_deposit_h(st->guidedT0); + st->mem_pitch_gain[0] = st->mem_pitch_gain[1] = 16384/*1.f Q14*/;/*Q14*/ + } + st->plc_use_future_lag = 0; + move16(); + test(); + test(); + if(( st->last_core > ACELP_CORE && hTcxDec->tcxltp_last_gain_unmodified!=0 ) + || ( EQ_16(st->flagGuidedAcelp,1) ) + ) + { + /* no updates needed here, because already updated in last good frame */ + st->plc_use_future_lag = 1; + move16(); + } + + IF (EQ_16(st->last_core,-1)) + { + core = TCX_20_CORE; + move16(); + st->last_core = ACELP_CORE; + move16(); + if(st->Opt_AMR_WB) + { + core = ACELP_CORE; + move16(); + } + st->tonal_mdct_plc_active = 0; + move16(); + } + ELSE + { + core = ACELP_CORE; + move16(); + if (GT_16(st->nbLostCmpt,1)) + { + core = st->last_core_bfi; + move16(); + } + IF (EQ_16(st->nbLostCmpt,1)) + { + st->tonal_mdct_plc_active = 0; + move16(); + test(); + test(); + test(); + IF ( !(st->rf_flag && st->use_partial_copy && (EQ_16(st->rf_frame_type, RF_TCXTD1)||EQ_16(st->rf_frame_type,RF_TCXTD2)))) + { + test(); + test(); + test(); + test(); + test(); + test(); +//PMT("handle to tonalMDCTconceal is missing") +//#ifdef ADD_IVAS_HTONALMDCTCONC + IF ((st->hTonalMDCTConc != NULL && EQ_16(st->last_core,TCX_20_CORE))&&(EQ_16(st->second_last_core,TCX_20_CORE)) + && ((LE_32(st->old_fpitch,L_deposit_h(shr(st->L_frame,1)))) + || (LE_16(hTcxDec->tcxltp_last_gain_unmodified,13107/*0.4f Q15*/))) + /* it is fine to call the detection even if no ltp information + is available, meaning that st->old_fpitch == + st->tcxltp_second_last_pitch == st->L_frame */ + && (EQ_32(st->old_fpitch, hTcxDec->tcxltp_second_last_pitch)) + && !st->last_tns_active && !st->second_last_tns_active) + { + Word32 pitch; + + + pitch = L_deposit_h(0); + if(hTcxDec->tcxltp_last_gain_unmodified > 0) + { + pitch = L_add(st->old_fpitch, 0); + } + //TonalMDCTConceal_Detect_ivas_fx(&st->tonalMDCTconceal, pitch, &numIndices + // , (st->element_mode == IVAS_CPE_MDCT ? &(st->hTcxCfg->psychParamsTCX20) : st->hTcxCfg->psychParamsCurrent) + //); + + TonalMDCTConceal_Detect_ivas_fx(st->hTonalMDCTConc, pitch, &numIndices + , (st->element_mode == IVAS_CPE_MDCT ? &(st->hTcxCfg->psychParamsTCX20) : st->hTcxCfg->psychParamsCurrent) + ); + + test(); + test(); + test(); + test(); + test(); + test(); + IF ((GT_16(numIndices,10)) + || ((GT_16(numIndices,5) ) + && (LT_32(L_abs(L_sub(hTcxDec->tcxltp_third_last_pitch, hTcxDec->tcxltp_second_last_pitch)),32768l/*0.5f Q16*/) )) + || ((numIndices > 0) && ((LE_16(st->last_good_fx,UNVOICED_TRANSITION))||(LE_16(hTcxDec->tcxltp_last_gain_unmodified,13107/*0.4f Q15*/))) + && (LT_32(L_abs(L_sub(hTcxDec->tcxltp_third_last_pitch, hTcxDec->tcxltp_second_last_pitch)),32768l/*0.5f Q16*/) ))) + { + core = TCX_20_CORE; + move16(); + st->tonal_mdct_plc_active = 1; + move16(); + } + ELSE IF (LE_16(st->last_good_fx,UNVOICED_TRANSITION)||LE_16(hTcxDec->tcxltp_last_gain_unmodified,13107/*0.4f Q15*/)) + { + core = TCX_20_CORE; + move16(); + } + } + ELSE IF (st->last_core != ACELP_CORE) + { + test(); + if (LE_16(st->last_good_fx,UNVOICED_TRANSITION)||LE_16(hTcxDec->tcxltp_last_gain_unmodified,13107/*0.4f Q15*/)) + { + core = st->last_core; + move16(); + } + } + } + } + } + return core; +} +#endif + Word16 GetPLCModeDecision( Decoder_State *st /* i/o: decoder memory state pointer */ ) diff --git a/lib_dec/ivas_mct_dec.c b/lib_dec/ivas_mct_dec.c index 231501857..046640f03 100644 --- a/lib_dec/ivas_mct_dec.c +++ b/lib_dec/ivas_mct_dec.c @@ -184,8 +184,140 @@ ivas_error ivas_mct_dec( set_zero( x[n][1], L_FRAME48k / 2 ); } +#ifdef IVAS_FLOAT_FIXED +#ifndef IVAS_FLOAT_CONV_TO_BE_REMOVED + hCPE = st_ivas->hCPE[cpe_id]; + sts = hCPE->hCoreCoder; + + Word16 Aq_fx[MCT_MAX_BLOCKS][CPE_CHANNELS][( NB_SUBFR16k + 1 ) * ( M + 1 )]; + Word16 ch, k, l, i, j; + + Word32 *x_fx[CPE_CHANNELS][NB_DIV]; + Word16 x_e[CPE_CHANNELS][NB_DIV]; + Word16 x_len[CPE_CHANNELS][NB_DIV] = { 0 }; + + FOR( i = 0; i < CPE_CHANNELS; ++i ) + { + x_fx[i][0] = malloc( L_FRAME48k * sizeof( Word32 ) ); + x_fx[i][1] = x_fx[i][0] + L_FRAME48k / 2; + floatToFixed_arrL( x[i][0], x_fx[i][0], 0, L_FRAME48k / 2 ); + floatToFixed_arrL( x[i][1], x_fx[i][1], 0, L_FRAME48k / 2 ); + + FOR( j = 0; j < NB_DIV; ++j ) + { + x_e[i][j] = 31; + } + } + + FOR( ch = 0; ch < CPE_CHANNELS; ch++ ) + { + IF( sts[ch]->mct_chan_mode != MCT_CHAN_MODE_IGNORE ) + { + sts[ch]->gamma = float_to_fix16( sts[ch]->gamma_float, Q15 ); + sts[ch]->preemph_fac = float_to_fix16( sts[ch]->preemph_fac_float, Q15 ); + sts[ch]->Mode2_lp_gainp = float_to_fix( sts[ch]->lp_gainp, Q16 ); + sts[ch]->stab_fac_fx = float_to_fix16( sts[ch]->stab_fac, Q15 ); + sts[ch]->hTcxCfg->na_scale = float_to_fix16( sts[ch]->hTcxCfg->na_scale_flt, Q15 ); + sts[ch]->hTcxCfg->sq_rounding = float_to_fix16( sts[ch]->hTcxCfg->sq_rounding_flt, Q15 ); + sts[ch]->hTcxLtpDec->tcxltp_gain = float_to_fix16( sts[ch]->hTcxLtpDec->tcxltp_gain_float, Q15 ); + sts[ch]->inv_gamma = float_to_fix16( 1 / sts[ch]->gamma_float, Q14 ); + sts[ch]->hTcxCfg->preemph_fac = float_to_fix16( sts[ch]->hTcxCfg->preemph_fac_flt, Q15 ); + f2me_16( sts[ch]->last_gain_syn_deemph_float, &sts[ch]->last_gain_syn_deemph, &sts[ch]->last_gain_syn_deemph_e ); + f2me_16( sts[ch]->last_concealed_gain_syn_deemph_float, &sts[ch]->last_concealed_gain_syn_deemph, &sts[ch]->last_concealed_gain_syn_deemph_e ); + f2me_16( sts[ch]->hTcxDec->old_gaintcx_bfi_float, &sts[ch]->hTcxDec->old_gaintcx_bfi, &sts[ch]->hTcxDec->old_gaintcx_bfi_e ); + floatToFixed_arr( Aq[cpe_id][ch], Aq_fx[cpe_id][ch], Q12, ( NB_SUBFR16k + 1 ) * ( M + 1 ) ); + + sts[ch]->hTcxDec->tcxltp_last_gain_unmodified = float_to_fix16( sts[ch]->hTcxDec->tcxltp_last_gain_unmodified_float, Q15 ); + sts[ch]->old_fpitch = float_to_fix( sts[ch]->old_fpitch_float, Q16 ); + sts[ch]->hTonalMDCTConc->lastPitchLag = float_to_fix( sts[ch]->hTonalMDCTConc->lastPitchLag_float, Q16 ); + // u8bit to 16bit + FOR( l = 0; l < IGF_START_MX; l++ ) + { + sts[ch]->hIGFDec->infoTCXNoise_evs[l] = (Word16) sts[ch]->hIGFDec->infoTCXNoise[l]; + } + FOR( l = 0; l < N_LTP_GAIN_MEMS; l++ ) + { + sts[ch]->hTcxDec->ltpGainMemory_fx[l] = float_to_fix16( sts[ch]->hTcxDec->ltpGainMemory[l], Q15 ); + } + sts[ch]->hTcxDec->cummulative_damping_tcx = float_to_fix16( sts[ch]->hTcxDec->cummulative_damping_tcx_float, Q15 ); + } + } + FOR( ch = 0; ch < CPE_CHANNELS; ch++ ) + { + IF( sts[ch]->rate_switching_reset ) + { + floatToFixed_arr( sts[ch]->lsp_old, sts[ch]->lsp_old_fx, Q15, M ); + } + } +#endif + ivas_mdct_core_invQ_fx( st_ivas->hCPE[cpe_id], nTnsBitsTCX10[cpe_id], p_param[cpe_id], param_lpc[cpe_id], param[cpe_id], + fUseTns[cpe_id], tnsData[cpe_id], x_fx, x_e, x_fx, x_e, x_len, Aq_fx[cpe_id], NULL, 1 ); +#ifndef IVAS_FLOAT_CONV_TO_BE_REMOVED + FOR( ch = 0; ch < CPE_CHANNELS; ch++ ) + { + IF( sts[ch]->mct_chan_mode == MCT_CHAN_MODE_IGNORE ) + { + me2f_buf( x_fx[ch][0], x_e[ch][0], x[ch][0], sts[ch]->hTcxCfg->tcx_coded_lines ); + sts[ch]->hTcxDec->damping_float = fix16_to_float( sts[ch]->hTcxDec->damping, Q14 ); + sts[ch]->hTcxDec->gainHelper_float = me2f_16( sts[ch]->hTcxDec->gainHelper, sts[ch]->hTcxDec->gainHelper_e ); + sts[ch]->hTcxDec->stepCompensate_float = me2f_16( sts[ch]->hTcxDec->stepCompensate, sts[ch]->hTcxDec->stepCompensate_e ); + } + IF( sts[ch]->mct_chan_mode != MCT_CHAN_MODE_IGNORE ) + { + sts[ch]->lp_gainp = fix_to_float( sts[ch]->Mode2_lp_gainp, Q16 ); + sts[ch]->hTonalMDCTConc->lastPitchLag_float = fix_to_float( sts[ch]->hTonalMDCTConc->lastPitchLag, Q16 ); + sts[ch]->hTonalMDCTConc->nFramesLost_float = fix16_to_float( sts[ch]->hTonalMDCTConc->nFramesLost, Q1 ); + sts[ch]->hTcxDec->damping_float = fix16_to_float( sts[ch]->hTcxDec->damping, Q14 ); + sts[ch]->hTcxDec->stepCompensate_float = me2f_16( sts[ch]->hTcxDec->stepCompensate, sts[ch]->hTcxDec->stepCompensate_e ); + sts[ch]->hTcxDec->gainHelper_float = me2f_16( sts[ch]->hTcxDec->gainHelper, sts[ch]->hTcxDec->gainHelper_e ); + sts[ch]->last_concealed_gain_syn_deemph_float = me2f_16( sts[ch]->last_concealed_gain_syn_deemph, sts[ch]->last_concealed_gain_syn_deemph_e ); + sts[ch]->last_gain_syn_deemph_float = me2f_16( sts[ch]->last_gain_syn_deemph, sts[ch]->last_gain_syn_deemph_e ); + sts[ch]->hTcxDec->old_gaintcx_bfi_float = me2f_16( sts[ch]->hTcxDec->old_gaintcx_bfi, sts[ch]->hTcxDec->old_gaintcx_bfi_e ); + sts[ch]->stab_fac = fix16_to_float( sts[ch]->stab_fac_fx, Q15 ); + fixedToFloat_arr( Aq_fx[cpe_id][ch], Aq[cpe_id][ch], Q12, ( NB_SUBFR16k + 1 ) * ( M + 1 ) ); + // 16bit to u8bit + FOR( Word16 l = 0; l < IGF_START_MX; l++ ) + { + sts[ch]->hIGFDec->infoTCXNoise[l] = (uint8_t) sts[ch]->hIGFDec->infoTCXNoise_evs[l]; + } + FOR( Word16 l = 0; l < N_LTP_GAIN_MEMS; l++ ) + { + sts[ch]->hTcxDec->ltpGainMemory[l] = fix16_to_float( sts[ch]->hTcxDec->ltpGainMemory_fx[l], Q15 ); + } + + Word16 subFrames = ( sts[ch]->core == TCX_10_CORE ) ? NB_DIV : 1; + FOR( k = 0; k < subFrames; ++k ) + { + me2f_buf( x_fx[ch][k], x_e[ch][k], x[ch][k], x_len[ch][k] ); + // To be made into Q11 + // me2f_buf(x_fx[ch][k], x_e[ch][k], x[ch][k], L_FRAME48k / 2); + } + IF( !sts[0]->bfi || ( sts[0]->bfi && sts[ch]->core != ACELP_CORE ) ) + { + me2f_buf( sts[ch]->hIGFDec->virtualSpec, sts[ch]->hIGFDec->virtualSpec_e, sts[ch]->hIGFDec->virtualSpec_float, ( N_MAX_TCX - IGF_START_MN ) ); + } + sts[ch]->hTcxDec->cummulative_damping_tcx_float = fix16_to_float( sts[ch]->hTcxDec->cummulative_damping_tcx, Q15 ); + } + } + FOR( ch = 0; ch < CPE_CHANNELS; ch++ ) + { + IF( sts[ch]->rate_switching_reset ) + { + Word16 old_Aq_12_8_e = norm_s( sts[ch]->old_Aq_12_8_fx[0] ); + fixedToFloat_arr( sts[ch]->old_Aq_12_8_fx, sts[ch]->old_Aq_12_8, ( 15 - old_Aq_12_8_e ), M + 1 ); + } + } + + FOR( i = 0; i < CPE_CHANNELS; ++i ) + { + free( x_fx[i][0] ); + } + +#endif +#else ivas_mdct_core_invQ( st_ivas->hCPE[cpe_id], nTnsBitsTCX10[cpe_id], p_param[cpe_id], param_lpc[cpe_id], param[cpe_id], fUseTns[cpe_id], tnsData[cpe_id], x, x, Aq[cpe_id], NULL, 1 ); +#endif st_ivas->BER_detect |= st_ivas->hCPE[cpe_id]->hCoreCoder[0]->BER_detect; st_ivas->BER_detect |= st_ivas->hCPE[cpe_id]->hCoreCoder[1]->BER_detect; diff --git a/lib_dec/ivas_mdct_core_dec.c b/lib_dec/ivas_mdct_core_dec.c index 88cd10fd1..fc43c4f22 100644 --- a/lib_dec/ivas_mdct_core_dec.c +++ b/lib_dec/ivas_mdct_core_dec.c @@ -341,6 +341,77 @@ static void dec_prm_tcx_sidebits( * * decode TCX core parameters *-----------------------------------------------------------------*/ +#ifdef IVAS_FLOAT_FIXED +static void dec_prm_tcx_spec_fx( + Decoder_State *st, /* i/o: decoder memory state */ + Word16 param[], /* o : decoded parameters */ + Word16 *total_nbbits, /* i/o: number of bits / decoded bits */ + Word16 *bitsRead, /* o : number of read bits */ + Word16 p_param[NB_DIV], /* o : pointer to parameters for next round of bs reading*/ + Word16 nTnsBitsTCX10[NB_DIV] /* i : number of TNS bits per TCX10 subframe */ +) +{ + Word16 nSubframes; + Word16 start_bit_pos, nbits_tcx; + Word16 nf_bits; + Word16 target_bitsTCX10[NB_DIV]; + Word16 indexBuffer[N_MAX + 1]; + CONTEXT_HM_CONFIG hm_cfg; + Word16 tmp; + + /*--------------------------------------------------------------------------------* + * Initialization + *--------------------------------------------------------------------------------*/ + + hm_cfg.indexBuffer = indexBuffer; + + start_bit_pos = st->next_bit_pos; + move16(); + + nf_bits = 0; + move16(); + nbits_tcx = 0; + move16(); + + nSubframes = ( EQ_16( st->core, TCX_10_CORE ) ) ? NB_DIV : 1; + move16(); + + /*calculate TCX10 target bits before to assure minimum amount is distributed between subframes*/ + IF( EQ_16( st->core, TCX_10_CORE ) && EQ_16( st->element_mode, IVAS_CPE_MDCT ) ) + { + Word16 nTnsBitsTCX10Tmp[NB_DIV]; + nTnsBitsTCX10Tmp[0] = nTnsBitsTCX10[0]; + move16(); + nTnsBitsTCX10Tmp[1] = nTnsBitsTCX10[1]; + move16(); + + /*compute target bits */ + tmp = i_mult2( nSubframes, add( NBITS_TCX_GAIN, i_mult2( NOISE_FILL_RANGES, NBITS_NOISE_FILL_LEVEL ) ) ); + nbits_tcx = add( add( add( st->bits_frame_channel, nTnsBitsTCX10[0] ), nTnsBitsTCX10[1] ), tmp ); + ivas_mdct_tcx10_bit_distribution_fx( target_bitsTCX10, nbits_tcx, nTnsBitsTCX10Tmp ); + } + + /*--------------------------------------------------------------------------------* + * TCX20/TCX10 parameters + *--------------------------------------------------------------------------------*/ + + getTCXparam( st, st, hm_cfg, param, 0, 0, NULL, p_param, target_bitsTCX10, 1 ); + + nf_bits = i_mult2( nSubframes, ( add( NBITS_TCX_GAIN, i_mult2( NOISE_FILL_RANGES, NBITS_NOISE_FILL_LEVEL ) ) ) ); + + IF( LT_16( add( sub( *total_nbbits, bitsRead[0] ), nf_bits ), sub( st->next_bit_pos, start_bit_pos ) ) ) + { + st->BER_detect = 1; + move16(); + st->next_bit_pos = add( start_bit_pos, sub( *total_nbbits, bitsRead[0] ) ); + } + + bitsRead[0] = sub( st->next_bit_pos, start_bit_pos ); + move16(); + + return; +} +#endif static void dec_prm_tcx_spec( Decoder_State *st, /* i/o: decoder memory state */ @@ -799,6 +870,477 @@ void ivas_mdct_dec_side_bits_frame_channel( * * Inverse processing steps up to inverse quantization *-----------------------------------------------------------------*/ +#ifdef IVAS_FLOAT_FIXED +void ivas_mdct_core_invQ_fx( + CPE_DEC_HANDLE hCPE, /* i/o: CPE handle */ + Word16 nTnsBitsTCX10[CPE_CHANNELS][NB_DIV], /* i : number of TNS bits */ + Word16 p_param[CPE_CHANNELS][NB_DIV], /* i : pointer to param buffer */ + Word16 param_lpc[CPE_CHANNELS][NPRM_LPC_NEW], /* i : lpc parameters */ + Word16 param[CPE_CHANNELS][DEC_NPRM_DIV * NB_DIV], /* i : param buffer */ + Word16 fUseTns[CPE_CHANNELS][NB_DIV], /* i : flag TNS enabled */ + STnsData tnsData[CPE_CHANNELS][NB_DIV], /* i : TNS parameter */ + Word32 *x_0[CPE_CHANNELS][NB_DIV], /* i/o: signal buffer */ + Word16 x_0_e[CPE_CHANNELS][NB_DIV], + Word32 *x[CPE_CHANNELS][NB_DIV], /* i/o: signal buffer */ + Word16 x_e[CPE_CHANNELS][NB_DIV], + Word16 x_len[CPE_CHANNELS][NB_DIV], + Word16 Aq[CPE_CHANNELS][( NB_SUBFR16k + 1 ) * ( M + 1 )], /* i : LP coefficients */ + Word16 ms_mask[NB_DIV][MAX_SFB], /* i : M/S mask */ + const Word16 MCT_flag /* i : hMCT handle allocated (1) or not (0) */ +) +{ + Word16 ch, bfi, k; + Decoder_State **sts, *st; + /* bitstream */ + Word16 total_nbbits[CPE_CHANNELS]; + Word16 bitsRead[CPE_CHANNELS]; + Word16 *prm[CPE_CHANNELS]; + /* LPC */ + Word16 Aind[CPE_CHANNELS][M + 1]; + Word32 sns[CPE_CHANNELS][NB_DIV][M]; + /* TCX */ + Word16 xn_buf[L_MDCT_OVLP_MAX + L_FRAME_PLUS + L_MDCT_OVLP_MAX]; + Word16 tcx_offset[CPE_CHANNELS]; + Word16 tcx_offsetFB[CPE_CHANNELS]; + Word16 left_rect[CPE_CHANNELS]; + Word16 L_spec[CPE_CHANNELS]; + /* Framing */ + Word16 L_frame[CPE_CHANNELS], nSubframes[CPE_CHANNELS], L_frameTCX[CPE_CHANNELS]; + Word16 tmp_concealment_method; + Word16 gain_tcx = 0, gain_tcx_e = 0; + Word16 nf_seed; + const Word16 *prm_sqQ; + Word16 L_frameTCX_global[CPE_CHANNELS]; + Word16 tmp_ms_sig[CPE_CHANNELS][N_MAX]; + Word16 tmp_ms_sig_e[CPE_CHANNELS]; +#ifndef IVAS_FLOAT_CONV_TO_BE_REMOVED + // PLC case kept + float concealment_noise[CPE_CHANNELS][L_FRAME48k]; +#endif + Word32 concealment_noise_fx[CPE_CHANNELS][L_FRAME48k]; + Word16 concealment_noise_e[CPE_CHANNELS] = { 0 }, concealment_noise_len[CPE_CHANNELS]; + TONALMDCTCONC_NOISE_GEN_MODE noise_gen_mode_bfi; + + Word16 q_l = 0, q_r = 0; + Word16 q_snsq = 0; + Word32 *spectralData_tmp[CPE_CHANNELS]; + Word16 shift; + Word16 common_exp = 0; + + FOR( k = 0; k < CPE_CHANNELS; ++k ) + { + spectralData_tmp[k] = malloc( N_MAX * sizeof( Word32 ) ); + } + + push_wmops( "mdct_core_invQ" ); + sts = hCPE->hCoreCoder; + bfi = sts[0]->bfi; + noise_gen_mode_bfi = NOISE_GEN_MODE_UNDEF; + move32(); + + set16_fx( xn_buf, 0, L_MDCT_OVLP_MAX + L_FRAME_PLUS + L_MDCT_OVLP_MAX ); + set_s( total_nbbits, 0, CPE_CHANNELS ); + set_s( bitsRead, 0, CPE_CHANNELS ); + tmp_concealment_method = 0; + move16(); + + FOR( ch = 0; ch < CPE_CHANNELS; ch++ ) + { + sts[ch]->enablePlcWaveadjust = 0; + move16(); + } + + /* temporarily restore LR representation of previous frame for PLC mode decision (done on the individual channels) */ + test(); + test(); + test(); + IF( bfi && !MCT_flag && ( GT_16( hCPE->hStereoMdct->mdct_stereo_mode[0], SMDCT_DUAL_MONO ) || GT_16( hCPE->hStereoMdct->mdct_stereo_mode[1], SMDCT_DUAL_MONO ) ) ) + { + L_frameTCX[0] = sts[0]->L_frameTCX_past; + move16(); + L_frameTCX[1] = sts[1]->L_frameTCX_past; + move16(); + + Copy( sts[0]->hTonalMDCTConc->lastBlockData.spectralData, tmp_ms_sig[0], L_frameTCX[0] ); + Copy( sts[1]->hTonalMDCTConc->lastBlockData.spectralData, tmp_ms_sig[1], L_frameTCX[1] ); + tmp_ms_sig_e[0] = sts[0]->hTonalMDCTConc->lastBlockData.spectralData_exp; + move16(); + tmp_ms_sig_e[1] = sts[1]->hTonalMDCTConc->lastBlockData.spectralData_exp; + move16(); + + common_exp = s_max( sts[0]->hTonalMDCTConc->lastBlockData.spectralData_exp, sts[1]->hTonalMDCTConc->lastBlockData.spectralData_exp ); + + Copy_Scale_sig_16_32( sts[0]->hTonalMDCTConc->lastBlockData.spectralData, spectralData_tmp[0], L_FRAME_MAX, 15 - ( common_exp - sts[0]->hTonalMDCTConc->lastBlockData.spectralData_exp ) ); // 30 - spectral_exp1 + Copy_Scale_sig_16_32( sts[1]->hTonalMDCTConc->lastBlockData.spectralData, spectralData_tmp[1], L_FRAME_MAX, 15 - ( common_exp - sts[1]->hTonalMDCTConc->lastBlockData.spectralData_exp ) ); // 30 - spectral_exp2 + + sts[0]->hTonalMDCTConc->lastBlockData.spectralData_exp = sts[1]->hTonalMDCTConc->lastBlockData.spectralData_exp = common_exp; + move16(); + move16(); + + q_l = sub( 30, sts[0]->hTonalMDCTConc->lastBlockData.spectralData_exp ); + q_r = sub( 30, sts[1]->hTonalMDCTConc->lastBlockData.spectralData_exp ); + + /* both input in same Q */ + stereo_decoder_tcx_fx( hCPE->hStereoMdct, ms_mask, x_0[1], &spectralData_tmp[0], &spectralData_tmp[1], &hCPE->hStereoMdct->mdct_stereo_mode[0], sts[0]->core, sts[1]->core, sts[0]->igf, L_frameTCX[0], L_frameTCX[1], 0, sts[0]->last_core, sts[1]->last_core, 1, &q_r, &q_l ); + + Copy_Scale_sig_32_16( spectralData_tmp[0], sts[0]->hTonalMDCTConc->lastBlockData.spectralData, L_FRAME_MAX, -15 ); + Copy_Scale_sig_32_16( spectralData_tmp[1], sts[1]->hTonalMDCTConc->lastBlockData.spectralData, L_FRAME_MAX, -15 ); + sts[0]->hTonalMDCTConc->lastBlockData.spectralData_exp = sub( 30, q_l ); + sts[1]->hTonalMDCTConc->lastBlockData.spectralData_exp = sub( 30, q_r ); + } + + IF( bfi ) + { + test(); + test(); + IF( EQ_16( sts[0]->core, sts[1]->core ) ) + { + noise_gen_mode_bfi = EQUAL_CORES; + move32(); + } + ELSE IF( EQ_16( sts[0]->core, TCX_20 ) && EQ_16( sts[1]->core, TCX_10 ) ) + { + noise_gen_mode_bfi = TCX10_IN_0_TCX20_IN_1; + move32(); + } + ELSE IF( EQ_16( sts[0]->core, TCX_10 ) && EQ_16( sts[1]->core, TCX_20 ) ) + { + noise_gen_mode_bfi = TCX20_IN_0_TCX10_IN_1; + move32(); + } + } + + /* parameter decoding */ + FOR( ch = 0; ch < CPE_CHANNELS; ch++ ) + { + st = sts[ch]; + + IF( EQ_32( st->mct_chan_mode, MCT_CHAN_MODE_IGNORE ) ) + { + st->total_brate = st->bits_frame_channel; + move32(); + CONTINUE; + } + prm[ch] = param[ch]; /* to avoid compilation warnings */ + + /*Adjust bit per frame*/ + IF( !bfi ) + { + st->bits_frame_core = st->bits_frame_channel; + move16(); + + sts[ch]->total_brate = L_mult0( add( sts[ch]->bits_frame_channel, add( sts[ch]->side_bits_frame_channel, i_mult( sts[ch]->core, NF_GAIN_BITS ) ) ), FRAMES_PER_SEC ); + } + + /* Framing parameters */ + L_frame[ch] = st->L_frame; + move16(); + + /*--------------------------------------------------------------------------------* + * BITSTREAM DECODING + *--------------------------------------------------------------------------------*/ + + total_nbbits[ch] = st->bits_frame_channel; + move16(); + + IF( !bfi ) + { + st->second_last_core = st->last_core; + move16(); + test(); + IF( EQ_16( hCPE->cpe_id, 0 ) && EQ_16( ch, 0 ) ) + { + /* add mct and side bits to first handle bitrate to avoid false BER detection */ + st->total_brate = L_add( st->total_brate, L_mult0( st->next_bit_pos, FRAMES_PER_SEC ) ); + dec_prm_tcx_spec_fx( st, param[ch], &total_nbbits[ch], &bitsRead[ch], p_param[ch], nTnsBitsTCX10[ch] ); + + /*revert to actual total bitrate assigned to ch0 */ + sts[ch]->total_brate = L_mult0( add( add( sts[ch]->bits_frame_channel, sts[ch]->side_bits_frame_channel ), i_mult2( sts[ch]->core, NF_GAIN_BITS ) ), FRAMES_PER_SEC ); + } + ELSE + { + dec_prm_tcx_spec_fx( st, param[ch], &total_nbbits[ch], &bitsRead[ch], p_param[ch], nTnsBitsTCX10[ch] ); + } + + assert( NE_16( st->BER_detect, 1 ) ); + } + ELSE + { + IF( GT_16( st->nbLostCmpt, 1 ) ) + { + st->flagGuidedAcelp = 0; + move16(); + } + + /* PLC: [Common: mode decision] + * PLC: Decide which Concealment to use. Update pitch lags if needed */ + + // PLC to be done + st->core = GetPLCModeDecision_flt( st ); + } + + test(); + test(); + IF( ( !st->bfi || st->hTcxCfg->psychParamsCurrent == NULL ) && GT_16( st->core, ACELP_CORE ) ) + { + Word16 last_frame_was_concealed_cng; + last_frame_was_concealed_cng = ( EQ_16( st->last_core, ACELP_CORE ) ) && ( NE_16( st->last_core, st->last_core_from_bs ) ); + SetCurrentPsychParams( st->core, last_frame_was_concealed_cng, st->hTcxCfg ); + } + + /* PLC: [Common: Memory update] + * PLC: Update the number of lost frames */ + IF( !bfi ) + { + IF( EQ_16( st->prev_bfi, 1 ) ) + { + st->prev_nbLostCmpt = st->nbLostCmpt; + move16(); + } + ELSE + { + st->prev_nbLostCmpt = 0; + move16(); + } + + st->nbLostCmpt = 0; + move16(); + } + } + + test(); + test(); + test(); + IF( bfi && !MCT_flag && ( GT_16( hCPE->hStereoMdct->mdct_stereo_mode[0], SMDCT_DUAL_MONO ) || GT_16( hCPE->hStereoMdct->mdct_stereo_mode[1], SMDCT_DUAL_MONO ) ) ) + { + /* avoid using TD-PLC in only one channel when stereo mode isn't dual mono */ + test(); + test(); + IF( NE_16( sts[0]->core, sts[1]->core ) && ( EQ_16( sts[0]->core, ACELP_CORE ) || EQ_16( sts[1]->core, ACELP_CORE ) ) ) + { + IF( EQ_16( sts[0]->core, ACELP_CORE ) ) + { + sts[0]->core = sts[0]->last_core; + move16(); + } + ELSE IF( EQ_16( sts[1]->core, ACELP_CORE ) ) + { + sts[1]->core = sts[1]->last_core; + move16(); + } + } + + Copy( tmp_ms_sig[0], sts[0]->hTonalMDCTConc->lastBlockData.spectralData, L_frameTCX[0] ); + Copy( tmp_ms_sig[1], sts[1]->hTonalMDCTConc->lastBlockData.spectralData, L_frameTCX[0] ); + sts[0]->hTonalMDCTConc->lastBlockData.spectralData_exp = tmp_ms_sig_e[0]; + move16(); + sts[1]->hTonalMDCTConc->lastBlockData.spectralData_exp = tmp_ms_sig_e[1]; + move16(); + } + + /*--------------------------------------------------------------------------------* + * LPC PARAMETERS + *--------------------------------------------------------------------------------*/ + + IF( EQ_16( bfi, 0 ) ) + { + test(); + test(); + test(); + IF( !MCT_flag && EQ_32( sts[0]->sr_core, 25600 ) && ( EQ_32( hCPE->element_brate, IVAS_48k ) || EQ_32( hCPE->element_brate, IVAS_64k ) ) ) + { + dequantize_sns_fx( param_lpc, sns, sts ); + } + ELSE + { + test(); + test(); + test(); + IF( EQ_16( sts[0]->core, TCX_20_CORE ) && EQ_16( sts[1]->core, TCX_20_CORE ) && NE_32( sts[0]->mct_chan_mode, MCT_CHAN_MODE_IGNORE ) && NE_32( sts[1]->mct_chan_mode, MCT_CHAN_MODE_IGNORE ) ) + { + sns_avq_dec_stereo_fx( param_lpc[0], param_lpc[1], sts[0]->L_frame, &sns[0][0][0], &q_l, &sns[1][0][0], &q_r ); + } + ELSE + { + FOR( ch = 0; ch < CPE_CHANNELS; ch++ ) + { + st = sts[ch]; + IF( NE_32( st->mct_chan_mode, MCT_CHAN_MODE_IGNORE ) ) + { + sns_avq_dec_fx( param_lpc[ch], sns[ch], &q_snsq, st->L_frame, st->numlpc ); + } + } + } + } + } + + /*--------------------------------------------------------------* + * Rate switching + *---------------------------------------------------------------*/ + + FOR( ch = 0; ch < CPE_CHANNELS; ch++ ) + { + st = sts[ch]; + + IF( st->rate_switching_reset ) + { + E_LPC_f_lsp_a_conversion( st->lsp_old_fx, st->old_Aq_12_8_fx, M ); + } + } + + /*--------------------------------------------------------------------------------* + * TCX20/10/5 + *--------------------------------------------------------------------------------*/ + +#ifndef IVAS_FLOAT_CONV_TO_BE_REMOVED + // PLC + FOR( ch = 0; ch < CPE_CHANNELS; ch++ ) + { + IF( sts[ch]->mct_chan_mode != MCT_CHAN_MODE_IGNORE ) + { + IF( bfi && sts[ch]->tonal_mdct_plc_active && NE_16( sts[ch]->element_mode, IVAS_CPE_MDCT ) ) + { + FOR( Word16 i = 0; i < sts[ch]->hTonalMDCTConc->pTCI_float->numIndexes; i++ ) + { + float pd = sts[ch]->hTonalMDCTConc->pTCI_float->phaseDiff_float[i]; + IF( pd >= PI2 ) + pd = fmodf( pd, PI2 ) - PI2; + sts[ch]->hTonalMDCTConc->pTCI_fix->phaseDiff[i] = float_to_fix16( pd, Q12 ); + } + FOR( Word16 i = 0; i < MAX_NUMBER_OF_IDX * GROUP_LENGTH; i++ ) + { + float pd = sts[ch]->hTonalMDCTConc->pTCI_float->phase_currentFramePredicted_float[i]; + pd = fmodf( pd, PI2 ); + sts[ch]->hTonalMDCTConc->pTCI_fix->phase_currentFramePredicted[i] = (Word16) ( pd * ( 1u << Q13 ) ); + } + FOR( Word16 i = 0; i < FDNS_NPTS; i++ ) + { + f2me_16( sts[ch]->hTonalMDCTConc->secondLastBlockData.scaleFactors_float[i], &sts[ch]->hTonalMDCTConc->secondLastBlockData.scaleFactors[i], &sts[ch]->hTonalMDCTConc->secondLastBlockData.scaleFactors_exp[i] ); + sts[ch]->hTonalMDCTConc->secondLastBlockData.scaleFactors_max_e = s_max( sts[ch]->hTonalMDCTConc->secondLastBlockData.scaleFactors_max_e, sts[ch]->hTonalMDCTConc->secondLastBlockData.scaleFactors_exp[i] ); + } + } + IF( sts[ch]->enablePlcWaveadjust ) + { + f2me_buf( sts[ch]->hPlcInfo->data_reci2, sts[ch]->hPlcInfo->data_reci2_fx, &sts[ch]->hPlcInfo->data_reci2_scale, sts[ch]->hPlcInfo->L_frameTCX ); + } + } + } +#endif + FOR( ch = 0; ch < CPE_CHANNELS; ch++ ) + { + st = sts[ch]; + + IF( EQ_32( st->mct_chan_mode, MCT_CHAN_MODE_IGNORE ) ) + { + set32_fx( x[ch][0], 0, st->hTcxCfg->tcx_coded_lines ); + x_e[ch][0] = 31; + move16(); + /* usually set in decoder_tcx_invQ(), needed for concealment */ + st->hTcxDec->damping = 0; + move16(); + st->hTcxDec->gainHelper = 16384; + move16(); + st->hTcxDec->gainHelper_e = 1; + move16(); + st->hTcxDec->stepCompensate = 0; + move16(); + st->hTcxDec->stepCompensate_e = 15; + move16(); + + CONTINUE; + } + + test(); + test(); + IF( !bfi || ( bfi && NE_16( st->core, ACELP_CORE ) ) ) + { + nSubframes[ch] = ( EQ_16( st->core, TCX_10_CORE ) ) ? NB_DIV : 1; + move16(); + + FOR( k = 0; k < nSubframes[ch]; k++ ) + { + /* Stability Factor */ + IF( !bfi ) + { + Copy_Scale_sig_32_16( sns[ch][k], &Aq[ch][k * M], M, -4 ); // sns in Q16 Word32. Aq in Q12 Word16 + } + ELSE + { + st->stab_fac_fx = MAX16B; + move16(); + } + + /* Set pointer to parameters */ + prm[ch] = param[ch] + ( k * DEC_NPRM_DIV ); + L_frameTCX_global[ch] = st->hTcxDec->L_frameTCX / nSubframes[ch]; + L_spec[ch] = st->hTcxCfg->tcx_coded_lines / nSubframes[ch]; + + init_tcx_info_fx( st, st->L_frame / nSubframes[ch], st->hTcxDec->L_frameTCX / nSubframes[ch], k, bfi, &tcx_offset[ch], &tcx_offsetFB[ch], &L_frame[ch], &L_frameTCX[ch], &left_rect[ch], &L_spec[ch] ); + + tmp_concealment_method = 0; + move16(); + nf_seed = prm[ch][1 + NOISE_FILL_RANGES + LTPSIZE]; + move16(); + test(); + test(); + test(); + IF( !bfi && st->hTcxCfg->fIsTNSAllowed && GT_16( ch, 0 ) && LT_16( nf_seed, 0 ) ) + { + mvs2s( param[0] + k * DEC_NPRM_DIV + 1 + NOISE_FILL_RANGES + LTPSIZE, prm[ch] + 1 + NOISE_FILL_RANGES + LTPSIZE, nf_seed * -1 ); + } + nf_seed = 0; + move16(); + + decoder_tcx_invQ_fx( st, prm[ch], Aq[ch], Aind[ch], L_spec[ch], L_frame[ch], L_frameTCX[ch], x[ch][k], &x_e[ch][k], NULL, NULL, xn_buf, &fUseTns[ch][k], &tnsData[ch][k], &gain_tcx, &gain_tcx_e, &prm_sqQ, &nf_seed, bfi, k ); + + shift = Find_Max_Norm32( x[ch][k], L_frameTCX[ch] ); + move16(); + Scale_sig32( x[ch][k], L_frameTCX[ch], shift ); + x_e[ch][k] = sub( x_e[ch][k], shift ); + move16(); + + Copy32( x[ch][k], x_0[ch][k], L_frameTCX[ch] ); + x_0_e[ch][k] = x_e[ch][k]; + move16(); + + // PLC to be done + test(); + IF( bfi && !MCT_flag ) + { +#ifndef IVAS_FLOAT_CONV_TO_BE_REMOVED + st->hTcxDec->cummulative_damping_tcx_float = fix16_to_float( st->hTcxDec->cummulative_damping_tcx, Q15 ); +#endif + TonalMdctConceal_create_concealment_noise_ivas( concealment_noise[ch], hCPE, L_frameTCX[ch], L_frame[ch], ch, k, st->core, st->hTcxDec->cummulative_damping_tcx_float, noise_gen_mode_bfi ); +#ifndef IVAS_FLOAT_CONV_TO_BE_REMOVED + // Float to fix. Temp or PLC + concealment_noise_len[ch] = get_igf_startline( st, L_frame[ch], L_frameTCX[ch] ); + f2me_buf( concealment_noise[ch], concealment_noise_fx[ch], &concealment_noise_e[ch], concealment_noise_len[ch] ); + st->hTcxDec->cummulative_damping_tcx = float_to_fix16( st->hTcxDec->cummulative_damping_tcx_float, Q15 ); +#endif + } + + decoder_tcx_noisefilling_fx( st, concealment_noise_fx[ch], concealment_noise_e[ch], Aq[ch], L_frameTCX_global[ch], L_spec[ch], L_frame[ch], L_frameTCX[ch], x[ch][k], &x_e[ch][k], NULL, NULL, &tmp_concealment_method, gain_tcx, gain_tcx_e, prm_sqQ, nf_seed, bfi, MCT_flag, k ); + + shift = Find_Max_Norm32( x[ch][k], L_frameTCX[ch] ); + move16(); + Scale_sig32( x[ch][k], L_frameTCX[ch], shift ); + x_e[ch][k] = sub( x_e[ch][k], shift ); + move16(); + + decoder_tcx_noiseshaping_igf_fx( st, L_spec[ch], L_frame[ch], L_frameTCX[ch], left_rect[ch], x[ch][k], &x_e[ch][k], &x_len[ch][k], NULL, NULL, &tmp_concealment_method, bfi ); + } + } + } + + FOR( k = 0; k < CPE_CHANNELS; ++k ) + { + free( spectralData_tmp[k] ); + } + + pop_wmops(); + return; +} +#endif void ivas_mdct_core_invQ( CPE_DEC_HANDLE hCPE, /* i/o: CPE handle */ diff --git a/lib_dec/ivas_stereo_mdct_core_dec_fx.c b/lib_dec/ivas_stereo_mdct_core_dec_fx.c index 2dfb79051..9ee0c65ba 100644 --- a/lib_dec/ivas_stereo_mdct_core_dec_fx.c +++ b/lib_dec/ivas_stereo_mdct_core_dec_fx.c @@ -207,7 +207,160 @@ void stereo_mdct_core_dec_fx( } } +#ifdef IVAS_FLOAT_FIXED +#ifndef IVAS_FLOAT_CONV_TO_BE_REMOVED + Word16 Aq_fx[CPE_CHANNELS][( NB_SUBFR16k + 1 ) * ( M + 1 )]; + + Word32 *x_0_fx[CPE_CHANNELS][NB_DIV]; + Word32 *x_fx[CPE_CHANNELS][NB_DIV]; + Word16 x_e[CPE_CHANNELS][NB_DIV]; + Word16 x_0_e[CPE_CHANNELS][NB_DIV]; + Word16 x_len[CPE_CHANNELS][NB_DIV] = { 0 }; + + Word16 i, l, j; + + FOR( i = 0; i < CPE_CHANNELS; ++i ) + { + x_fx[i][0] = malloc( L_FRAME_PLUS * sizeof( Word32 ) ); + x_fx[i][1] = x_fx[i][0] + L_FRAME_PLUS / 2; + floatToFixed_arrL( x[i][0], x_fx[i][0], 0, L_FRAME_PLUS / 2 ); + floatToFixed_arrL( x[i][1], x_fx[i][1], 0, L_FRAME_PLUS / 2 ); + + FOR( j = 0; j < NB_DIV; ++j ) + { + x_e[i][j] = 31; + } + } + + FOR( i = 0; i < CPE_CHANNELS; ++i ) + { + x_0_fx[i][0] = malloc( N_MAX * sizeof( Word32 ) ); + x_0_fx[i][1] = x_0_fx[i][0] + L_FRAME48k / 2; + floatToFixed_arrL( x_0[i][0], x_0_fx[i][0], 0, L_FRAME48k / 2 ); + floatToFixed_arrL( x_0[i][1], x_0_fx[i][1], 0, L_FRAME48k / 2 ); + + FOR( j = 0; j < NB_DIV; ++j ) + { + x_0_e[i][j] = 31; + } + } + + FOR( ch = 0; ch < CPE_CHANNELS; ch++ ) + { + IF( sts[ch]->mct_chan_mode != MCT_CHAN_MODE_IGNORE ) + { + sts[ch]->gamma = float_to_fix16( sts[ch]->gamma_float, Q15 ); + sts[ch]->preemph_fac = float_to_fix16( sts[ch]->preemph_fac_float, Q15 ); + sts[ch]->Mode2_lp_gainp = float_to_fix( sts[ch]->lp_gainp, Q16 ); + sts[ch]->stab_fac_fx = float_to_fix16( sts[ch]->stab_fac, Q15 ); + sts[ch]->hTcxCfg->na_scale = float_to_fix16( sts[ch]->hTcxCfg->na_scale_flt, Q15 ); + sts[ch]->hTcxCfg->sq_rounding = float_to_fix16( sts[ch]->hTcxCfg->sq_rounding_flt, Q15 ); + sts[ch]->hTcxLtpDec->tcxltp_gain = float_to_fix16( sts[ch]->hTcxLtpDec->tcxltp_gain_float, Q15 ); + sts[ch]->inv_gamma = float_to_fix16( 1 / sts[ch]->gamma_float, Q14 ); + sts[ch]->hTcxCfg->preemph_fac = float_to_fix16( sts[ch]->hTcxCfg->preemph_fac_flt, Q15 ); + f2me_16( sts[ch]->last_gain_syn_deemph_float, &sts[ch]->last_gain_syn_deemph, &sts[ch]->last_gain_syn_deemph_e ); + f2me_16( sts[ch]->last_concealed_gain_syn_deemph_float, &sts[ch]->last_concealed_gain_syn_deemph, &sts[ch]->last_concealed_gain_syn_deemph_e ); + f2me_16( sts[ch]->hTcxDec->old_gaintcx_bfi_float, &sts[ch]->hTcxDec->old_gaintcx_bfi, &sts[ch]->hTcxDec->old_gaintcx_bfi_e ); + floatToFixed_arr( Aq[ch], Aq_fx[ch], Q12, ( NB_SUBFR16k + 1 ) * ( M + 1 ) ); + + sts[ch]->hTcxDec->tcxltp_last_gain_unmodified = float_to_fix16( sts[ch]->hTcxDec->tcxltp_last_gain_unmodified_float, Q15 ); + sts[ch]->old_fpitch = float_to_fix( sts[ch]->old_fpitch_float, Q16 ); + sts[ch]->hTonalMDCTConc->lastPitchLag = float_to_fix( sts[ch]->hTonalMDCTConc->lastPitchLag_float, Q16 ); + // u8bit to 16bit + FOR( l = 0; l < IGF_START_MX; l++ ) + { + sts[ch]->hIGFDec->infoTCXNoise_evs[l] = (Word16) sts[ch]->hIGFDec->infoTCXNoise[l]; + } + FOR( l = 0; l < N_LTP_GAIN_MEMS; l++ ) + { + sts[ch]->hTcxDec->ltpGainMemory_fx[l] = float_to_fix16( sts[ch]->hTcxDec->ltpGainMemory[l], Q15 ); + } + sts[ch]->hTcxDec->cummulative_damping_tcx = float_to_fix16( sts[ch]->hTcxDec->cummulative_damping_tcx_float, Q15 ); + } + } + FOR( ch = 0; ch < CPE_CHANNELS; ch++ ) + { + IF( sts[ch]->rate_switching_reset ) + { + floatToFixed_arr( sts[ch]->lsp_old, sts[ch]->lsp_old_fx, Q15, M ); + } + } + IF( sts[0]->bfi && ( hCPE->hStereoMdct->mdct_stereo_mode[0] > SMDCT_DUAL_MONO || hCPE->hStereoMdct->mdct_stereo_mode[1] > SMDCT_DUAL_MONO ) ) + { + floatToFixed_arr( sts[0]->hTonalMDCTConc->lastBlockData.spectralData_float, sts[0]->hTonalMDCTConc->lastBlockData.spectralData, ( 15 - sts[0]->hTonalMDCTConc->lastBlockData.spectralData_exp ), L_FRAME_MAX ); + floatToFixed_arr( sts[1]->hTonalMDCTConc->lastBlockData.spectralData_float, sts[1]->hTonalMDCTConc->lastBlockData.spectralData, ( 15 - sts[1]->hTonalMDCTConc->lastBlockData.spectralData_exp ), L_FRAME_MAX ); + } +#endif + ivas_mdct_core_invQ_fx( hCPE, nTnsBitsTCX10, p_param, param_lpc, param, fUseTns, tnsData, x_0_fx, x_0_e, x_fx, x_e, x_len, Aq_fx, ms_mask, 0 ); +#ifndef IVAS_FLOAT_CONV_TO_BE_REMOVED + FOR( ch = 0; ch < CPE_CHANNELS; ch++ ) + { + IF( sts[ch]->mct_chan_mode == MCT_CHAN_MODE_IGNORE ) + { + me2f_buf( x_fx[ch][0], x_e[ch][0], x[ch][0], sts[ch]->hTcxCfg->tcx_coded_lines ); + sts[ch]->hTcxDec->damping_float = fix16_to_float( sts[ch]->hTcxDec->damping, Q14 ); + sts[ch]->hTcxDec->gainHelper_float = me2f_16( sts[ch]->hTcxDec->gainHelper, sts[ch]->hTcxDec->gainHelper_e ); + sts[ch]->hTcxDec->stepCompensate_float = me2f_16( sts[ch]->hTcxDec->stepCompensate, sts[ch]->hTcxDec->stepCompensate_e ); + } + IF( sts[ch]->mct_chan_mode != MCT_CHAN_MODE_IGNORE ) + { + sts[ch]->lp_gainp = fix_to_float( sts[ch]->Mode2_lp_gainp, Q16 ); + sts[ch]->hTonalMDCTConc->lastPitchLag_float = fix_to_float( sts[ch]->hTonalMDCTConc->lastPitchLag, Q16 ); + sts[ch]->hTonalMDCTConc->nFramesLost_float = fix16_to_float( sts[ch]->hTonalMDCTConc->nFramesLost, Q1 ); + sts[ch]->hTcxDec->damping_float = fix16_to_float( sts[ch]->hTcxDec->damping, Q14 ); + sts[ch]->hTcxDec->stepCompensate_float = me2f_16( sts[ch]->hTcxDec->stepCompensate, sts[ch]->hTcxDec->stepCompensate_e ); + sts[ch]->hTcxDec->gainHelper_float = me2f_16( sts[ch]->hTcxDec->gainHelper, sts[ch]->hTcxDec->gainHelper_e ); + sts[ch]->last_concealed_gain_syn_deemph_float = me2f_16( sts[ch]->last_concealed_gain_syn_deemph, sts[ch]->last_concealed_gain_syn_deemph_e ); + sts[ch]->last_gain_syn_deemph_float = me2f_16( sts[ch]->last_gain_syn_deemph, sts[ch]->last_gain_syn_deemph_e ); + sts[ch]->hTcxDec->old_gaintcx_bfi_float = me2f_16( sts[ch]->hTcxDec->old_gaintcx_bfi, sts[ch]->hTcxDec->old_gaintcx_bfi_e ); + sts[ch]->stab_fac = fix16_to_float( sts[ch]->stab_fac_fx, Q15 ); + fixedToFloat_arr( Aq_fx[ch], Aq[ch], Q12, ( NB_SUBFR16k + 1 ) * ( M + 1 ) ); + // 16bit to u8bit + FOR( l = 0; l < IGF_START_MX; l++ ) + { + sts[ch]->hIGFDec->infoTCXNoise[l] = (uint8_t) sts[ch]->hIGFDec->infoTCXNoise_evs[l]; + } + FOR( l = 0; l < N_LTP_GAIN_MEMS; l++ ) + { + sts[ch]->hTcxDec->ltpGainMemory[l] = fix16_to_float( sts[ch]->hTcxDec->ltpGainMemory_fx[l], Q15 ); + } + + Word16 subFrames = ( sts[ch]->core == TCX_10_CORE ) ? NB_DIV : 1; + FOR( k = 0; k < subFrames; ++k ) + { + // To be made into Q11 + me2f_buf( x_fx[ch][k], x_e[ch][k], x[ch][k], x_len[ch][k] ); + me2f_buf( x_0_fx[ch][k], x_0_e[ch][k], x_0[ch][k], sts[ch]->L_frameTCX_past ); + } + IF( !sts[0]->bfi || ( sts[0]->bfi && sts[ch]->core != ACELP_CORE ) ) + { + me2f_buf( sts[ch]->hIGFDec->virtualSpec, sts[ch]->hIGFDec->virtualSpec_e, sts[ch]->hIGFDec->virtualSpec_float, ( N_MAX_TCX - IGF_START_MN ) ); + } + sts[ch]->hTcxDec->cummulative_damping_tcx_float = fix16_to_float( sts[ch]->hTcxDec->cummulative_damping_tcx, Q15 ); + } + } + FOR( ch = 0; ch < CPE_CHANNELS; ch++ ) + { + IF( sts[ch]->rate_switching_reset ) + { + Word16 old_Aq_12_8_e = norm_s( sts[ch]->old_Aq_12_8_fx[0] ); + fixedToFloat_arr( sts[ch]->old_Aq_12_8_fx, sts[ch]->old_Aq_12_8, ( 15 - old_Aq_12_8_e ), M + 1 ); + } + } + IF( sts[0]->bfi && ( hCPE->hStereoMdct->mdct_stereo_mode[0] > SMDCT_DUAL_MONO || hCPE->hStereoMdct->mdct_stereo_mode[1] > SMDCT_DUAL_MONO ) ) + { + fixedToFloat_arr( sts[0]->hTonalMDCTConc->lastBlockData.spectralData, sts[0]->hTonalMDCTConc->lastBlockData.spectralData_float, ( 15 - sts[0]->hTonalMDCTConc->lastBlockData.spectralData_exp ), sts[0]->L_frameTCX_past ); + fixedToFloat_arr( sts[1]->hTonalMDCTConc->lastBlockData.spectralData, sts[1]->hTonalMDCTConc->lastBlockData.spectralData_float, ( 15 - sts[1]->hTonalMDCTConc->lastBlockData.spectralData_exp ), sts[0]->L_frameTCX_past ); + } + FOR( i = 0; i < CPE_CHANNELS; ++i ) + { + free( x_fx[i][0] ); + free( x_0_fx[i][0] ); + } +#endif +#else ivas_mdct_core_invQ( hCPE, nTnsBitsTCX10, p_param, param_lpc, param, fUseTns, tnsData, x_0, x, Aq, ms_mask, 0 ); +#endif for ( ch = 0; ch < nChannels; ch++ ) { diff --git a/lib_dec/ivas_stereo_mdct_stereo_dec.c b/lib_dec/ivas_stereo_mdct_stereo_dec.c index fb827e887..f0069e9d9 100644 --- a/lib_dec/ivas_stereo_mdct_stereo_dec.c +++ b/lib_dec/ivas_stereo_mdct_stereo_dec.c @@ -553,8 +553,8 @@ void stereo_decoder_tcx_fx( { Word16 i, k, sfb, nSubframes; STEREO_MDCT_BAND_PARAMETERS *sfbConf = NULL; - Word32 nrgRatio; - Word16 tmp, tmp_e; + Word32 nrgRatio, inv_nrgRatio, tmp_32; + Word16 tmp, tmp_e, shift; nSubframes = 2; move16(); @@ -659,8 +659,9 @@ void stereo_decoder_tcx_fx( IF( !mct_on ) { - tmp = BASOP_Util_Divide3216_Scale( ( SMDCT_ILD_RANGE << Q26 ), sub( hStereoMdct->global_ild[k], 1 ), &tmp_e ); - nrgRatio = L_shr( (Word32) tmp, negate( add( 1, tmp_e ) ) ); /* nrgRatio = nrg[1]/nrg[0] */ // Q26 + tmp = BASOP_Util_Divide3216_Scale( ( SMDCT_ILD_RANGE << 26 ), hStereoMdct->global_ild[k], &tmp_e ); + tmp_32 = L_shr( (Word32) tmp, negate( add( 1, tmp_e ) ) ); /* nrgRatio = nrg[1]/nrg[0] */ // Q26 + nrgRatio = L_sub(tmp_32, 67108864); /* 1.0f in Q26 */ hStereoMdct->smooth_ratio_fx = L_add( Mpy_32_32( POINT_8_FIXED, hStereoMdct->smooth_ratio_fx ), Mpy_32_32( POINT_2_FIXED, nrgRatio ) ); // Q26 /* set flag to reverse dmx computation in case of right-side panning, only relevant for mono output */ @@ -679,14 +680,19 @@ void stereo_decoder_tcx_fx( test(); IF( ( GT_32( nrgRatio, ONE_IN_Q26 ) ) && ( LT_16( k, ( ( EQ_16( core_r, TCX_10_CORE ) ) ? NB_DIV : 1 ) ) ) ) { + shift = norm_l(nrgRatio); + nrgRatio = L_shl(nrgRatio, shift); v_multc_fixed( spec_r[k], nrgRatio, spec_r[k], L_frameTCX_r ); - *q_x_ch2 = *q_x_ch2 - 5; + *q_x_ch2 = *q_x_ch2 - 5 + shift; move16(); } ELSE IF( ( LT_32( nrgRatio, ONE_IN_Q26 ) ) && ( LT_16( k, ( ( EQ_16( core_l, TCX_10_CORE ) ) ? NB_DIV : 1 ) ) ) ) { - v_multc_fixed( spec_l[k], L_shl( (Word32) divide3232( ONE_IN_Q26, nrgRatio ), Q11 ), spec_l[k], L_frameTCX_l ); - *q_x_ch1 = *q_x_ch1 - 5; + tmp = BASOP_Util_Divide3232_Scale(ONE_IN_Q26, nrgRatio, &tmp_e); + inv_nrgRatio = L_deposit_h(tmp); + shift = 5 - tmp_e; + v_multc_fixed( spec_l[k], inv_nrgRatio, spec_l[k], L_frameTCX_l ); + *q_x_ch1 = *q_x_ch1 - 5 + shift; move16(); } } diff --git a/lib_dec/tonalMDCTconcealment_fx.c b/lib_dec/tonalMDCTconcealment_fx.c index 18f61e15a..c5879539b 100644 --- a/lib_dec/tonalMDCTconcealment_fx.c +++ b/lib_dec/tonalMDCTconcealment_fx.c @@ -741,6 +741,155 @@ static void FindPhaseDifferences( /* o: Phase } } +#ifdef IVAS_FLOAT_FIXED +static void ivas_CalcPowerSpecAndDetectTonalComponents_fx( + TonalMDCTConcealPtr const hTonalMDCTConc, + Word32 secondLastMDST[], + Word16 secondLastMDST_exp, + Word32 secondLastMDCT[], + Word16 secondLastMDCT_exp, + Word32 const pitchLag, + const PsychoacousticParameters* psychParamsCurrent +) +{ + Word16 nSamples; + Word16 i; + Word16 floorPowerSpectrum; /* Minimum significant value of a spectral line in the power spectrum */ + Word32 powerSpectrum[L_FRAME_MAX]; + Word16 invScaleFactors[FDNS_NPTS]; + Word16 invScaleFactors_exp[FDNS_NPTS]; + Word16 powerSpectrum_exp, tmp_exp, old_exp; +#ifdef BASOP_NOGLOB_DECLARE_LOCAL + Flag Overflow = 0; +#endif + + + nSamples = hTonalMDCTConc->nNonZeroSamples; + move16(); + + /* It is taken into account that the MDCT is not normalized. */ + floorPowerSpectrum/*Q0*/ = extract_l(Mpy_32_16_1(L_mult0(hTonalMDCTConc->nSamples,hTonalMDCTConc->nSamples),82)); /*1/400 = 82 Q15*/ + powerSpectrum_exp = 0; + move16(); + + CalcPowerSpec(secondLastMDCT, + secondLastMDCT_exp, + secondLastMDST, + secondLastMDST_exp, + nSamples, + floorPowerSpectrum, + powerSpectrum, + &powerSpectrum_exp); + + /* This setting to minimal level is required because the power spectrum is used in the threshold adaptation using the pitch up to hTonalMDCTConc->nSamples. */ + set32_fx(powerSpectrum+nSamples, floorPowerSpectrum, sub(hTonalMDCTConc->nSamples, nSamples)); + /* this setting to zero is needed since the FDNS needs to be called + with hTonalMDCTConc->nSamplesCore; it relevant only for nb; it has no effect + to the output, but memory checker may complain otherwise due to the + usage of uninitialized values */ + IF ( GT_16(hTonalMDCTConc->nSamplesCore, hTonalMDCTConc->nSamples)) + { + set32_fx(powerSpectrum+hTonalMDCTConc->nSamples, 0, sub(hTonalMDCTConc->nSamplesCore, hTonalMDCTConc->nSamples)); + } + + //DetectTonalComponents(hTonalMDCTConc->pTCI_fix->indexOfTonalPeak, + // hTonalMDCTConc->pTCI_fix->lowerIndex, + // hTonalMDCTConc->pTCI_fix->upperIndex, + // &hTonalMDCTConc->pTCI_fix->numIndexes, + // hTonalMDCTConc->lastPitchLag, + // pitchLag, + // hTonalMDCTConc->lastBlockData.spectralData, + // add(hTonalMDCTConc->lastBlockData.spectralData_exp,hTonalMDCTConc->lastBlockData.gain_tcx_exp), + // hTonalMDCTConc->lastBlockData.scaleFactors, + // hTonalMDCTConc->lastBlockData.scaleFactors_exp, + // hTonalMDCTConc->lastBlockData.scaleFactors_max_e, + // powerSpectrum, + // nSamples, + // hTonalMDCTConc->nSamplesCore, + // floorPowerSpectrum); + + ivas_DetectTonalComponents_fx(hTonalMDCTConc->pTCI_fix->indexOfTonalPeak, + hTonalMDCTConc->pTCI_fix->lowerIndex, + hTonalMDCTConc->pTCI_fix->upperIndex, + &hTonalMDCTConc->pTCI_fix->numIndexes, + hTonalMDCTConc->lastPitchLag, + pitchLag, + hTonalMDCTConc->lastBlockData.spectralData, + add(hTonalMDCTConc->lastBlockData.spectralData_exp,hTonalMDCTConc->lastBlockData.gain_tcx_exp), + hTonalMDCTConc->lastBlockData.scaleFactors, + hTonalMDCTConc->lastBlockData.scaleFactors_exp, + hTonalMDCTConc->lastBlockData.scaleFactors_max_e, + powerSpectrum, + nSamples, + hTonalMDCTConc->nSamplesCore, + floorPowerSpectrum, psychParamsCurrent); + FindPhases(hTonalMDCTConc, secondLastMDCT, secondLastMDST, sub(secondLastMDST_exp,secondLastMDCT_exp)); + + FindPhaseDifferences(hTonalMDCTConc, powerSpectrum); + + IF (hTonalMDCTConc->pTCI_fix->numIndexes > 0) + { + + hTonalMDCTConc->secondLastPowerSpectrum = hTonalMDCTConc->secondLastBlockData.spectralData; + + /*sqrtFLOAT(powerSpectrum, powerSpectrum, nSamples);*/ + old_exp = powerSpectrum_exp; + powerSpectrum_exp = mult_r(sub(powerSpectrum_exp,2), 1 << 14); /*remove 2 bits of headroom from CalcPowerSpec*/ + FOR (i = 0; i < nSamples; i++) + { + tmp_exp = old_exp; + powerSpectrum[i] = Sqrt32(powerSpectrum[i], &tmp_exp); + powerSpectrum[i] = L_shr(powerSpectrum[i], sub(powerSpectrum_exp, tmp_exp)); + move32(); + } + + FOR (i = 0; i < hTonalMDCTConc->nScaleFactors; i++) + { + move16(); + move16(); + invScaleFactors_exp[i] = hTonalMDCTConc->secondLastBlockData.scaleFactors_exp[i]; + invScaleFactors[i] = Inv16(hTonalMDCTConc->secondLastBlockData.scaleFactors[i], &invScaleFactors_exp[i]); + } + + + /* here mdct_shaping() is intentionally used rather then mdct_shaping_16() */ +#ifdef IVAS_CODE_MDCT_GSHAPE + IF (psychParamsCurrent == NULL) +#endif + { + mdct_shaping(powerSpectrum, hTonalMDCTConc->nSamplesCore, invScaleFactors, invScaleFactors_exp); + } +#ifdef IVAS_CODE_MDCT_GSHAPE + ELSE + { +PMTE() + sns_shape_spectrum(powerSpectrum, psychParamsCurrent, invScaleFactors, hTonalMDCTConc->nSamplesCore); + nBands = psychParamsCurrent->nBands; + } +#endif + FOR (i = hTonalMDCTConc->nSamplesCore; i < nSamples; i++) + { + powerSpectrum[i] = L_shl(Mpy_32_16_1(powerSpectrum[i], invScaleFactors[FDNS_NPTS-1]), invScaleFactors_exp[FDNS_NPTS-1]); + move32(); + } + + /* 16 bits are now enough for storing the power spectrum */ + FOR (i = 0; i < nSamples; i++) + { +#ifdef BASOP_NOGLOB + hTonalMDCTConc->secondLastPowerSpectrum[i] = round_fx_o(powerSpectrum[i], &Overflow); +#else + hTonalMDCTConc->secondLastPowerSpectrum[i] = round_fx(powerSpectrum[i]); +#endif + } + + powerSpectrum_exp = sub(powerSpectrum_exp, hTonalMDCTConc->secondLastBlockData.gain_tcx_exp); + hTonalMDCTConc->secondLastPowerSpectrum_exp = powerSpectrum_exp; + move16(); + } +} +#endif + static void CalcPowerSpecAndDetectTonalComponents( TonalMDCTConcealPtr const hTonalMDCTConc, Word32 secondLastMDST[], @@ -1048,6 +1197,150 @@ void TonalMDCTConceal_Detect( return ; } +#ifdef IVAS_FLOAT_FIXED +void TonalMDCTConceal_Detect_ivas_fx( + const TonalMDCTConcealPtr hTonalMDCTConc, + const Word32 pitchLag, + Word16 * numIndices, + const PsychoacousticParameters* psychParamsCurrent +) +{ + Word32 secondLastMDST[L_FRAME_MAX]; + Word32 secondLastMDCT[L_FRAME_MAX]; + Word16 secondLastMDCT_exp; + Word32 * powerSpectrum = secondLastMDST; + Word16 i, powerSpectrum_exp, secondLastMDST_exp, s; + Word16 nSamples; + Word16 nBands; + Word32 sns_int_scf_fx[FDNS_NPTS]; + + nSamples = hTonalMDCTConc->nSamples; + move16(); + secondLastMDST_exp = 16; /*time signal Q-1*/ + secondLastMDCT_exp = 16; /*time signal Q-1*/ + test(); + test(); + test(); + test(); + test(); + IF(hTonalMDCTConc->lastBlockData.blockIsValid && hTonalMDCTConc->secondLastBlockData.blockIsValid + && (EQ_16(hTonalMDCTConc->lastBlockData.nSamples, nSamples)) && (EQ_16(hTonalMDCTConc->secondLastBlockData.nSamples, nSamples)) + && (!hTonalMDCTConc->secondLastBlockData.blockIsConcealed || hTonalMDCTConc->secondLastBlockData.tonalConcealmentActive || (pitchLag != 0))) + { + /* Safety if the second last frame was concealed and tonal concealment was inactive */ + + IF(hTonalMDCTConc->lastBlockData.blockIsConcealed == 0) + { + IF(hTonalMDCTConc->secondLastBlockData.tonalConcealmentActive == 0) + { + CalcMDXT(hTonalMDCTConc, 0, hTonalMDCTConc->secondLastPcmOut, secondLastMDST, &secondLastMDST_exp); + CalcMDXT(hTonalMDCTConc, 1, hTonalMDCTConc->secondLastPcmOut, secondLastMDCT, &secondLastMDCT_exp); + hTonalMDCTConc->nNonZeroSamples = 0; + FOR(i = 0; i < hTonalMDCTConc->nSamples; i++) + { + if (hTonalMDCTConc->secondLastBlockData.spectralData[i] != 0) + { + hTonalMDCTConc->nNonZeroSamples = i; + move16(); + } + } + + /* 23 is the maximum length of the MA filter in getEnvelope */ + hTonalMDCTConc->nNonZeroSamples = s_min(hTonalMDCTConc->nSamples, add(hTonalMDCTConc->nNonZeroSamples, 23)); + move16(); + nSamples = hTonalMDCTConc->nNonZeroSamples; + move16(); + + s = getScaleFactor32(secondLastMDST, nSamples); + + FOR(i = 0; i < nSamples; i++) + { + secondLastMDST[i] = L_shl(secondLastMDST[i], s); + move32(); + } + secondLastMDST_exp = sub(secondLastMDST_exp, s); + move16(); + s = getScaleFactor32(secondLastMDCT, nSamples); + + FOR(i = 0; i < nSamples; i++) + { + secondLastMDCT[i] = L_shl(secondLastMDCT[i], s); + move32(); + } + secondLastMDCT_exp = sub(secondLastMDCT_exp, s); + move16(); + //CalcPowerSpecAndDetectTonalComponents(hTonalMDCTConc, secondLastMDST, secondLastMDST_exp, secondLastMDCT, secondLastMDCT_exp, pitchLag); + ivas_CalcPowerSpecAndDetectTonalComponents_fx(hTonalMDCTConc, secondLastMDST, secondLastMDST_exp, secondLastMDCT, secondLastMDCT_exp, pitchLag, psychParamsCurrent); + } + ELSE + { + /* If the second last frame was also lost, it is expected that pastTimeSignal could hold a bit different signal (e.g. including fade-out) from the one stored in TonalMDCTConceal_SaveTimeSignal. */ + /* That is why we reuse the already stored information about the concealed spectrum in the second last frame */ + IF(psychParamsCurrent == NULL) + { + nSamples = hTonalMDCTConc->nNonZeroSamples; + move16(); + mdct_shaping_16(hTonalMDCTConc->secondLastPowerSpectrum, hTonalMDCTConc->nSamplesCore, nSamples, + hTonalMDCTConc->secondLastBlockData.scaleFactors, hTonalMDCTConc->secondLastBlockData.scaleFactors_exp, + hTonalMDCTConc->secondLastBlockData.scaleFactors_max_e, powerSpectrum); + } + ELSE + { + //sns_shape_spectrum(powerSpectrum, psychParamsCurrent, hTonalMDCTConc->secondLastBlockData.scaleFactors, hTonalMDCTConc->nSamplesCore); + Word16 power_spectrum_q; + FOR(Word16 i = 0; i < FDNS_NPTS; i++) + { + sns_int_scf_fx[i] = L_shl(hTonalMDCTConc->secondLastBlockData.scaleFactors[i], 1 + hTonalMDCTConc->secondLastBlockData.scaleFactors_exp[i]); // Q16 + } + sns_shape_spectrum_fx(powerSpectrum, &power_spectrum_q, psychParamsCurrent, sns_int_scf_fx, 16, hTonalMDCTConc->nSamplesCore); + nBands = psychParamsCurrent->nBands; + } + powerSpectrum_exp = getScaleFactor32(powerSpectrum, nSamples); + powerSpectrum_exp = sub(powerSpectrum_exp, 3); /*extra 3 bits of headroom for MA filter in getEnvelope*/ + + /* multFLOAT(powerSpectrum, powerSpectrum, powerSpectrum, nSamples); */ + FOR(i = 0; i < nSamples; i++) + { + Word32 const t = L_shl(powerSpectrum[i], powerSpectrum_exp); + powerSpectrum[i] = Mpy_32_32(t, t); + move32(); + } + + RefineTonalComponents(hTonalMDCTConc->pTCI_fix->indexOfTonalPeak, + hTonalMDCTConc->pTCI_fix->lowerIndex, + hTonalMDCTConc->pTCI_fix->upperIndex, + hTonalMDCTConc->pTCI_fix->phaseDiff, + hTonalMDCTConc->pTCI_fix->phase_currentFramePredicted, + &hTonalMDCTConc->pTCI_fix->numIndexes, + hTonalMDCTConc->lastPitchLag, + pitchLag, + hTonalMDCTConc->lastBlockData.spectralData, + add(hTonalMDCTConc->lastBlockData.spectralData_exp,hTonalMDCTConc->lastBlockData.gain_tcx_exp), + hTonalMDCTConc->lastBlockData.scaleFactors, + hTonalMDCTConc->lastBlockData.scaleFactors_exp, + hTonalMDCTConc->lastBlockData.scaleFactors_max_e, + powerSpectrum, + nSamples, + hTonalMDCTConc->nSamplesCore, + extract_l(Mpy_32_16_1(L_mult0(hTonalMDCTConc->nSamples,hTonalMDCTConc->nSamples),82))); /* floorPowerSpectrum */ + + } + } + } + ELSE + { + hTonalMDCTConc->pTCI_fix->numIndexes = 0; + move16(); + } + + *numIndices = hTonalMDCTConc->pTCI_fix->numIndexes; + move16(); + + + return; +} +#endif + void TonalMDCTConceal_InsertNoise_ivas_fx( const TonalMDCTConcealPtr hTonalMDCTConc, /*IN */ Word32 *mdctSpectrum, -- GitLab