Commit ead75aed authored by multrus's avatar multrus
Browse files

[cleanup] accept FIX_2480_HARM_TONALMDCT

parent 3728c466
Loading
Loading
Loading
Loading
+0 −10
Original line number Diff line number Diff line
@@ -1695,15 +1695,6 @@ void ivas_mdct_tcx10_bit_distribution_fx(
    const Word16 nTnsBitsTCX10[NB_DIV]                          /* i  : TNS bits                                */
);

#ifndef FIX_2480_HARM_TONALMDCT
void TonalMDCTConceal_Detect_ivas_fx(
    const TonalMDCTConcealPtr hTonalMDCTConc,
    const Word32 pitchLag,
    Word16 *numIndices,
    const PsychoacousticParameters *psychParamsCurrent,
    Word16 element_mode 
);
#else
void TonalMDCTConceal_Detect_fx(
    const TonalMDCTConcealPtr hTonalMDCTConc,
    const Word32 pitchLag,
@@ -1711,7 +1702,6 @@ void TonalMDCTConceal_Detect_fx(
    const PsychoacousticParameters* psychParamsCurrent,
    Word16 element_mode
);
#endif // !FIX_2480_HARM_TONALMDCT
ivas_error stereo_dft_dec_create_fx(
    STEREO_DFT_DEC_DATA_HANDLE *hStereoDft,                     /* i/o: decoder DFT stereo handle               */
    const Word32 element_brate,                                 /* i  : element bitrate                         */
+0 −1
Original line number Diff line number Diff line
@@ -91,7 +91,6 @@
#define NONBE_FIX_ISSUE_2206_AVOID_OVERFLOW_SWB_fenv_fx2 /* FhG: Avoid overflow of SWB_fenv_fx in SWB_BWE_encoding_fx because of very small energies. */
#define NONBE_FIX_ISSUE_2206_AVOID_OVERFLOW_MSVQ_Interpol_Tran_fx /* FhG: Fix saturation crash in MSVQ_Interpol_Tran_fx() */
#define FIX_2452_HQ_CORE_PEAQ_AVR_RATIO_HARM            /* Eri: Basop issue 2453: Fix alignment of peak_avrg_ratio_fx */
#define FIX_2480_HARM_TONALMDCT                         /* FhG: basop issue 2480: Harmonize TonalMDCTConceal_Detect_fx() and TonalMDCTConceal_Detect_ivas_fx() */
#define FIX_2479_HARM_PITCH_GAIN                        /* FhG: basop issue 2479: Harmonize tcx_ltp_pitch_search_*(), tcx_ltp_find_gain_*fx() */
#define HARMONIZE_2481_EXTEND_SHRINK                    /* FhG: basop issue 2481: Harmonize extend_frm_*fx() and shrink_frm_*fx() */
#define FIX_2485_HARMONIZE_perform_noise_estimation_enc /* FhG: harmonize perform_noise_estimation_enc and perform_noise_estimation_enc_ivas */
+0 −8
Original line number Diff line number Diff line
@@ -5769,14 +5769,6 @@ void TonalMDCTConceal_UpdateState_fx(
    const Word16 badBlock,
    const Word8 tonalConcealmentActive );

#ifndef FIX_2480_HARM_TONALMDCT
void TonalMDCTConceal_Detect_fx(
    const TonalMDCTConcealPtr self, /*IN */
    const Word32 pitchLag,          /*IN */
    Word16 *umIndices,              /*OUT*/
    const Word16 element_mode       /* IN */
);
#endif // !FIX_2480_HARM_TONALMDCT

void TonalMDCTConceal_Apply_fx(
    const TonalMDCTConcealPtr hTonalMDCTConc, /*IN */
+0 −11
Original line number Diff line number Diff line
@@ -449,18 +449,7 @@ Word16 GetPLCModeDecision_fx(
                    {
                        pitch = L_add( st->old_fpitch, 0 ); /*Q16*/
                    }
#ifndef FIX_2480_HARM_TONALMDCT
                    IF( NE_16( st->element_mode, EVS_MONO ) )
                    {
                        TonalMDCTConceal_Detect_ivas_fx( st->hTonalMDCTConc, pitch, &numIndices, ( EQ_16( st->element_mode, IVAS_CPE_MDCT ) ? &( st->hTcxCfg->psychParamsTCX20 ) : st->hTcxCfg->psychParamsCurrent ), st->element_mode );
                    }
                    ELSE
                    {
                        TonalMDCTConceal_Detect_fx( st->hTonalMDCTConc, pitch, &numIndices, st->element_mode );
                    }
#else
                    TonalMDCTConceal_Detect_fx( st->hTonalMDCTConc, pitch, &numIndices, ( EQ_16( st->element_mode, IVAS_CPE_MDCT ) ? &( st->hTcxCfg->psychParamsTCX20 ) : st->hTcxCfg->psychParamsCurrent ), st->element_mode );
#endif // !FIX_2480_HARM_TONALMDCT


                    test();
+0 −156
Original line number Diff line number Diff line
@@ -927,148 +927,12 @@ static void CalcMDXT(
    return;
}

#ifndef FIX_2480_HARM_TONALMDCT
void TonalMDCTConceal_Detect_fx(
    const TonalMDCTConcealPtr hTonalMDCTConc,
    const Word32 pitchLag, /*15Q16*/
    Word16 *numIndices,
    const Word16 element_mode )
{
    Word32 secondLastMDST[L_FRAME_MAX];
    Word32 secondLastMDCT[L_FRAME_MAX];
    Word16 secondLastMDCT_exp;
    Word32 *powerSpectrum = secondLastMDST;
    Word16 i, powerSpectrum_exp, secondLastMDST_exp, s;
    Word16 nSamples;

    nSamples = hTonalMDCTConc->nSamples;
    move16();
    secondLastMDST_exp = 16; /*time signal Q-1*/
    move16();
    secondLastMDCT_exp = 16; /*time signal Q-1*/
    move16();
    test();
    test();
    test();
    test();
    test();
    IF( hTonalMDCTConc->lastBlockData.blockIsValid && hTonalMDCTConc->secondLastBlockData.blockIsValid && ( EQ_16( hTonalMDCTConc->lastBlockData.nSamples, nSamples ) ) && ( EQ_16( hTonalMDCTConc->secondLastBlockData.nSamples, nSamples ) ) && ( !hTonalMDCTConc->secondLastBlockData.blockIsConcealed || hTonalMDCTConc->secondLastBlockData.tonalConcealmentActive || ( pitchLag != 0 ) ) )
    {
        /* Safety if the second last frame was concealed and tonal concealment was inactive */

        IF( hTonalMDCTConc->lastBlockData.blockIsConcealed == 0 )
        {
            IF( hTonalMDCTConc->secondLastBlockData.tonalConcealmentActive == 0 )
            {
                CalcMDXT( hTonalMDCTConc, 0, hTonalMDCTConc->secondLastPcmOut, secondLastMDST, &secondLastMDST_exp );
                CalcMDXT( hTonalMDCTConc, 1, hTonalMDCTConc->secondLastPcmOut, secondLastMDCT, &secondLastMDCT_exp );
                hTonalMDCTConc->nNonZeroSamples = 0;
                move16();
                FOR( i = 0; i < hTonalMDCTConc->nSamples; i++ )
                {
                    if ( hTonalMDCTConc->secondLastBlockData.spectralData[i] != 0 )
                    {
                        hTonalMDCTConc->nNonZeroSamples = i;
                        move16();
                    }
                }

                /* 23 is the maximum length of the MA filter in getEnvelope */
                hTonalMDCTConc->nNonZeroSamples = s_min( hTonalMDCTConc->nSamples, add( hTonalMDCTConc->nNonZeroSamples, 23 ) );
                move16();
                nSamples = hTonalMDCTConc->nNonZeroSamples;
                move16();

                s = getScaleFactor32( secondLastMDST, nSamples );

                FOR( i = 0; i < nSamples; i++ )
                {
                    secondLastMDST[i] = L_shl( secondLastMDST[i], s );
                    move32();
                }
                secondLastMDST_exp = sub( secondLastMDST_exp, s );
                move16();
                s = getScaleFactor32( secondLastMDCT, nSamples );

                FOR( i = 0; i < nSamples; i++ )
                {
                    secondLastMDCT[i] = L_shl( secondLastMDCT[i], s );
                    move32();
                }
                secondLastMDCT_exp = sub( secondLastMDCT_exp, s );
                move16();
                CalcPowerSpecAndDetectTonalComponents_fx( hTonalMDCTConc, secondLastMDST, secondLastMDST_exp, secondLastMDCT, secondLastMDCT_exp, pitchLag, NULL, element_mode );
            }
            ELSE
            {
                /* If the second last frame was also lost, it is expected that pastTimeSignal could hold a bit different signal (e.g. including fade-out) from the one stored in TonalMDCTConceal_SaveTimeSignal. */
                /* That is why we reuse the already stored information about the concealed spectrum in the second last frame */
                {
                    nSamples = hTonalMDCTConc->nNonZeroSamples;
                    move16();
                    mdct_shaping_16( hTonalMDCTConc->secondLastPowerSpectrum, hTonalMDCTConc->nSamplesCore, nSamples,
                                     hTonalMDCTConc->secondLastBlockData.scaleFactors, hTonalMDCTConc->secondLastBlockData.scaleFactors_exp,
                                     hTonalMDCTConc->secondLastBlockData.scaleFactors_max_e, powerSpectrum );
                }
                powerSpectrum_exp = getScaleFactor32( powerSpectrum, nSamples );
                powerSpectrum_exp = sub( powerSpectrum_exp, 3 ); /*extra 3 bits of headroom for MA filter in getEnvelope*/

                /* multFLOAT(powerSpectrum, powerSpectrum, powerSpectrum, nSamples); */
                FOR( i = 0; i < nSamples; i++ )
                {
                    Word32 const t = L_shl( powerSpectrum[i], powerSpectrum_exp ); // Q(31-secondLastMDST_exp+powerSpectrum_exp)
                    powerSpectrum[i] = Mpy_32_32( t, t );                          // Q2*(31-secondLastMDST_exp+powerSpectrum_exp) -31
                    move32();
                }

                RefineTonalComponents_fx( (Word16 *) hTonalMDCTConc->pTCI->indexOfTonalPeak,
                                          (Word16 *) hTonalMDCTConc->pTCI->lowerIndex,
                                          (Word16 *) hTonalMDCTConc->pTCI->upperIndex,
                                          hTonalMDCTConc->pTCI->phaseDiff,
                                          hTonalMDCTConc->pTCI->phase_currentFramePredicted,
                                          (Word16 *) &hTonalMDCTConc->pTCI->numIndexes,
                                          hTonalMDCTConc->lastPitchLag,
                                          pitchLag,
                                          hTonalMDCTConc->lastBlockData.spectralData,
                                          add( hTonalMDCTConc->lastBlockData.spectralData_exp, hTonalMDCTConc->lastBlockData.gain_tcx_exp ),
                                          hTonalMDCTConc->lastBlockData.scaleFactors,
                                          hTonalMDCTConc->lastBlockData.scaleFactors_exp,
                                          hTonalMDCTConc->lastBlockData.scaleFactors_max_e,
                                          powerSpectrum,
                                          -1,
                                          nSamples,
                                          hTonalMDCTConc->nSamplesCore,
                                          extract_l( Mpy_32_16_1( L_mult0( hTonalMDCTConc->nSamples, hTonalMDCTConc->nSamples ), 82 ) ), element_mode, NULL );
            }
        }
    }
    ELSE
    {
        hTonalMDCTConc->pTCI->numIndexes = 0;
        move16();
    }

    *numIndices = hTonalMDCTConc->pTCI->numIndexes;
    move16();

    return;
}
#endif // !FIX_2480_HARM_TONALMDCT
#ifndef FIX_2480_HARM_TONALMDCT
void TonalMDCTConceal_Detect_ivas_fx(
    const TonalMDCTConcealPtr hTonalMDCTConc,
    const Word32 pitchLag, /*15Q16*/
    Word16 *numIndices,
    const PsychoacousticParameters *psychParamsCurrent,
    Word16 element_mode )
#else
void TonalMDCTConceal_Detect_fx(
    const TonalMDCTConcealPtr hTonalMDCTConc,
    const Word32 pitchLag, /*15Q16*/
    Word16 *numIndices,
    const PsychoacousticParameters *psychParamsCurrent,
    Word16 element_mode )
#endif // !FIX_2480_HARM_TONALMDCT
{
    Word32 secondLastMDST[L_FRAME_MAX];
    set32_fx( secondLastMDST, 0, L_FRAME_MAX );
@@ -1086,12 +950,6 @@ void TonalMDCTConceal_Detect_fx(
    nSamples = hTonalMDCTConc->nSamples;
    move16();

#ifndef FIX_2480_HARM_TONALMDCT
    secondLastMDST_exp = sub( 16, hTonalMDCTConc->q_lastPcmOut ); /*time signal Q-1 - hTonalMDCTConc->q_lastPcmOut*/
    move16();
    secondLastMDCT_exp = sub( 16, hTonalMDCTConc->q_lastPcmOut ); /*time signal Q-1 - hTonalMDCTConc->q_lastPcmOut*/
    move16();
#else
    IF( EQ_16( element_mode, EVS_MONO ) )
    {
        secondLastMDST_exp = 16; /*time signal Q-1*/
@@ -1102,7 +960,6 @@ void TonalMDCTConceal_Detect_fx(
        secondLastMDST_exp = sub( 16, hTonalMDCTConc->q_lastPcmOut ); /*time signal Q-1 - hTonalMDCTConc->q_lastPcmOut*/
        secondLastMDCT_exp = sub( 16, hTonalMDCTConc->q_lastPcmOut ); /*time signal Q-1 - hTonalMDCTConc->q_lastPcmOut*/
    }
#endif // !FIX_2480_HARM_TONALMDCT
    move16();
    move16();
    test();
@@ -1136,15 +993,11 @@ void TonalMDCTConceal_Detect_fx(
                move16();
                nSamples = hTonalMDCTConc->nNonZeroSamples;
                move16();
#ifndef FIX_2480_HARM_TONALMDCT
                s = sub( getScaleFactor32( secondLastMDST, nSamples ), 1 );
#else
                s = getScaleFactor32( secondLastMDST, nSamples );
                IF( NE_16( element_mode, EVS_MONO ) )
                {
                    s = sub( s, 1 );
                }
#endif // !FIX_2480_HARM_TONALMDCT

                FOR( i = 0; i < nSamples; i++ )
                {
@@ -1153,15 +1006,11 @@ void TonalMDCTConceal_Detect_fx(
                }
                secondLastMDST_exp = sub( secondLastMDST_exp, s );
                move16();
#ifndef FIX_2480_HARM_TONALMDCT
                s = sub( getScaleFactor32( secondLastMDCT, nSamples ), 1 );
#else
                s = getScaleFactor32( secondLastMDCT, nSamples );
                IF( NE_16( element_mode, EVS_MONO ) )
                {
                    s = sub( s, 1 );
                }
#endif // !FIX_2480_HARM_TONALMDCT

                FOR( i = 0; i < nSamples; i++ )
                {
@@ -1179,16 +1028,11 @@ void TonalMDCTConceal_Detect_fx(
                Word16 temp_power_spectrum_q = 0;
                nSamples = hTonalMDCTConc->nNonZeroSamples;
                move16();
#ifndef FIX_2480_HARM_TONALMDCT
                Copy_Scale_sig_16_32_DEPREC( hTonalMDCTConc->secondLastPowerSpectrum, powerSpectrum, nSamples, Q15 );
                temp_power_spectrum_q = add( Q15, sub( 15, hTonalMDCTConc->secondLastPowerSpectrum_exp ) );
#else
                IF( NE_16( element_mode, EVS_MONO ) )
                {
                    Copy_Scale_sig_16_32_DEPREC( hTonalMDCTConc->secondLastPowerSpectrum, powerSpectrum, nSamples, Q15 );
                    temp_power_spectrum_q = add( Q15, sub( 15, hTonalMDCTConc->secondLastPowerSpectrum_exp ) );
                }
#endif // !FIX_2480_HARM_TONALMDCT
                IF( psychParamsCurrent == NULL )
                {
                    mdct_shaping_16( hTonalMDCTConc->secondLastPowerSpectrum, hTonalMDCTConc->nSamplesCore, nSamples,