Commit 34e96575 authored by Sandesh Venkatesh's avatar Sandesh Venkatesh
Browse files

Merge branch 'enc_excitation_funcs_acelp_path_integration' into 'main'

Integration of encod_unvoiced_ivas_fx, encod_tran_ivas_fx,...

See merge request !774
parents 8adcc59e 1183f046
Loading
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -3008,7 +3008,7 @@ void encod_gen_2sbfr(
    Word16 *bwe_exc,                   /* o  : excitation for SWB TBE                  */
    const Word16 tdm_Pitch_reuse_flag, /* i  : primary channel pitch reuse flag        */
    const Word16 tdm_Pri_pitch_buf[],  /* i  : pitch values for primary channel        */
    Word16 *Q_new );
    Word16 Q_new );

void acelp_fast_fx(
    BSTR_ENC_HANDLE hBstr,  /* i/o: encoder bitstream handle                      */
@@ -4439,7 +4439,7 @@ void tdm_low_rate_enc(
    /* i  : current frame ISF vector                */ // x2.56
    Word16 *tmp_noise,
    /* o  : long-term noise energy                  */ // Q8
    Word16 *Q_new );
    Word16 Q_new );

void tdm_low_rate_dec_fx(
    Decoder_State *st,     /* i/o: decoder static memory							 */
+14 −13
Original line number Diff line number Diff line
@@ -6524,11 +6524,12 @@ void prep_tbe_exc_ivas_fx(
        {
            L_tmp = L_mult( gain_code16, tmp_code_fx[i] ); /* Q9 + Q_exc + 1*/
            L_tmp = L_shl( L_tmp, 5 );                     /* Q9 + Q_exc + Q6*/
            L_tmp = L_mac( L_tmp, gain_pit_fx, bwe_exc_fx[i + i_subfr_fx * HIBND_ACB_L_FAC] ); /*Q15+Q_exc */
#ifdef BASOP_NOGLOB
            L_tmp = L_mac_o( L_tmp, gain_pit_fx, bwe_exc_fx[i + i_subfr_fx * HIBND_ACB_L_FAC], &Overflow ); /*Q15+Q_exc */
            L_tmp = L_shl_o( L_tmp, 1, &Overflow ); /*16+Q_exc */                                           /* saturation can occur here */
            bwe_exc_fx[i + i_subfr_fx * HIBND_ACB_L_FAC] = round_fx_o( L_tmp, &Overflow );                  /*Q_exc */
#else                                                                                                       /* BASOP_NOGLOB */
            L_tmp = L_mac( L_tmp, gain_pit_fx, bwe_exc_fx[i + i_subfr_fx * HIBND_ACB_L_FAC] ); /*Q15+Q_exc */
            L_tmp = L_shl( L_tmp, 1 ); /*16+Q_exc */                                           /* saturation can occur here */
            bwe_exc_fx[i + i_subfr_fx * HIBND_ACB_L_FAC] = round_fx( L_tmp );                  /*Q_exc */
#endif                                                                                                      /* BASOP_NOGLOB */
+175 −267

File changed.

Preview size limit exceeded, changes collapsed.

+29 −0
Original line number Diff line number Diff line
@@ -197,6 +197,35 @@ void E_ACELP_conv(
    }
}

void E_ACELP_conv_ivas(
    const Word16 xn2[], /* i */
    const Word16 h2[],  /* i */
    Word16 cn2[]        /* o */
)
{
    Word16 i, k;
    Word32 L_tmp;
#ifdef BASOP_NOGLOB_DECLARE_LOCAL
    Flag Overflow = 0;
#endif
    FOR( k = 0; k < L_SUBFR; k++ )
    {
        /*cn2[k] = xn2[k];     */
        Word64 L_tmp_64;
        L_tmp_64 = W_deposit32_l( L_mult0( xn2[k], 0x800 ) );
        FOR( i = 0; i < k; i++ )
        {
            /*cn2[k]-=cn2[i]*h2[k-i];*/
            L_tmp_64 = W_msu0_16_16( L_tmp_64, cn2[i], h2[k - i] ); /*h2 4Q11*/
        }
        L_tmp = W_sat_l( L_tmp_64 );
#ifdef BASOP_NOGLOB
        cn2[k] = round_fx_o( L_shl_o( L_tmp, 5, &Overflow ), &Overflow );
#else
        cn2[k] = round_fx( L_shl( L_tmp, 5 ) );
#endif
    }
}
void E_ACELP_build_code(
    Word16 nb_pulse,       /* i */
    const Word16 codvec[], /* i */
+494 −2
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@
 * Local prototypes
 *-------------------------------------------------------------------*/
static void wrte_cv( BSTR_ENC_HANDLE hBstr, const Word16 nq, const Word16 i_ind, const Word16 kv_ind, UWord16 I, Word16 kv[], Word16 *bits );
static void wrte_cv_ivas( BSTR_ENC_HANDLE hBstr, const Word16 nq, const Word16 i_ind, const Word16 kv_ind, UWord16 I, Word16 kv[], Word16 *bits );

/*-------------------------------------------------------------------*
 * Function AVQ_cod()                                                *
@@ -408,7 +409,7 @@ void AVQ_encmux_fx(
                bits = sub( bits, 1 );
            }

            wrte_cv( hBstr, nq[k], i_ind, kv_ind, I[k], &kv[shl( k, 3 )], &bits );
            wrte_cv( hBstr, nq[k], i_ind, kv_ind, I[k], &kv[k * 8], &bits );
        }
    } /* for */
    /* Bit Saving Solution */
@@ -545,7 +546,7 @@ void AVQ_encmux_fx(
        move16();

        /* write codebook indices (rank I and event. Voronoi index kv) */
        wrte_cv( hBstr, nq[i], i_ind, kv_ind, I[i], &kv[shl( i, 3 )], &bits );
        wrte_cv( hBstr, nq[i], i_ind, kv_ind, I[i], &kv[i * 8], &bits );

        bits = sub( bits, dummy_bits );

@@ -566,6 +567,437 @@ void AVQ_encmux_fx(
    return;
}

void AVQ_encmux_ivas_fx(
    BSTR_ENC_HANDLE hBstr, /* i/o: bitstream handle            */
    const Word16 extl,     /* i  : extension layer                                 */
    Word16 xriq[],         /* i/o: rounded subvectors [0..8*Nsv-1] followed
                               by rounded bit allocations [8*Nsv..8*Nsv+Nsv-1] */
    Word16 *nb_bits,       /* i/o: number of allocated bits                        */
    const Word16 Nsv,      /* i:   number of subvectors                            */
    Word16 nq_out[],       /* o  : AVQ nq index                                    */
    Word16 avq_bit_sFlag,  /* i  : flag for AVQ bit saving solution                */
    Word16 trgtSvPos       /* i  : target SV for AVQ bit savings                   */
)
{
    Word16 i, j = 0, bits, pos, pos_max, overflow, pos_tmp, bit_tmp;
    Word16 sort_idx[NSV_MAX], nq[NSV_MAX], kv[NSV_MAX * 8];
    Word16 *t;
    UWord16 I[NSV_MAX];
    Word16 nq_ind, i_ind, kv_ind;
    Word16 nq_est, unused_bits, unused_bits_idx;
    Word16 bitsMod, Nsvm1, Nsvm2;
    Word16 unusedbitsFlag;
    Word16 svOrder[NSV_MAX], k, nullVec, dummy_bits;
    Word16 tmp;

    test();
    IF( EQ_16( extl, SWB_BWE_HIGHRATE ) || EQ_16( extl, FB_BWE_HIGHRATE ) )
    {
        nq_ind = IND_NQ2;
        move16();
        i_ind = IND_I2;
        move16();
        kv_ind = IND_KV2;
        move16();
    }
    ELSE
    {
        nq_ind = IND_NQ;
        move16();
        i_ind = IND_I;
        move16();
        kv_ind = IND_KV;
        move16();
    }

    FOR( i = 0; i < NSV_MAX; i++ )
    {
        I[i] = (UWord16) -1;
        move16();
    }
    unusedbitsFlag = 0;
    bitsMod = 0;
    move16();
    move16();
    /*-----------------------------------------------------------------
     * Encode subvectors and fix possible overflows in total bit budget,
     * i.e. find for each subvector a codebook index nq (nq=0,2,3,4,...,NSV_MAX),
     * a base codebook index (I), and a Voronoi index (kv)
     *-----------------------------------------------------------------*/

    /* sort subvectors by estimated bit allocations in decreasing order */
    t = kv;
    move16(); /* reuse vector to save memory */
    move16(); /*ptr init*/
    FOR( i = 0; i < Nsv; i++ )
    {
        t[i] = xriq[8 * Nsv + i];
        move16();
    }

    FOR( i = 0; i < Nsv; i++ )
    {
        bits = t[0];
        move16();
        pos = 0;
        move16();
        FOR( j = 1; j < Nsv; j++ )
        {
            if ( GT_16( t[j], bits ) )
            {
                pos = j;
                move16();
            }
            bits = s_max( t[j], bits );
        }
        sort_idx[i] = pos;
        move16();
        t[pos] = -1;
        move16();
    }

    /* compute multi-rate indices and avoid bit budget overflow */
    pos_max = 0;
    move16();
    bits = 0;
    move16();
    FOR( i = 0; i < Nsv; i++ )
    {
        /* find vector to quantize (criteria: nb of estimated bits) */
        pos = sort_idx[i];
        move16();

        /* compute multi-rate index of rounded subvector (nq,I,kv[]) */
        re8_cod_fx( &xriq[pos * 8], &nq[pos], &I[pos], &kv[8 * pos] );

        IF( nq[pos] > 0 )
        {
            j = pos_max;
            move16();
            j = s_max( pos, j );

            /* compute (number of bits -1) to describe Q #nq */
            IF( GE_16( nq[pos], 2 ) )
            {
                overflow = sub( i_mult2( nq[pos], 5 ), 1 );
            }
            ELSE
            {
                overflow = 0;
                move16();
            }

            /* check for overflow and compute number of bits-1 (n) */
            IF( GT_16( add( bits, add( overflow, j ) ), *nb_bits ) )
            {
                /* if budget overflow */
                pos_tmp = add( shl( pos, 3 ), 8 ); /*(pos*8)+8*/
                FOR( j = pos * 8; j < pos_tmp; j++ )
                {
                    xriq[j] = 0;
                    move16();
                }
                nq[pos] = 0;
                move16(); /* force Q0 */
            }
            ELSE
            {
                bits = add( bits, overflow );
                pos_max = j;
                move16(); /* update index of the last described subvector */
            }
        }
    }
    nullVec = 0;
    Nsvm1 = sub( Nsv, 1 );
    Nsvm2 = sub( Nsvm1, 1 );
    dummy_bits = 0;
    svOrder[Nsvm1] = trgtSvPos;
    svOrder[0] = 0;
    svOrder[1] = 1;
    i = 2;
    j = i;
    move16();
    move16();
    move16();
    move16();
    move16();
    move16();
    if ( EQ_16( avq_bit_sFlag, 2 ) )
    {
        j = add( i, 1 );
    }
    WHILE( LT_16( i, Nsvm1 ) )
    {
        svOrder[i] = j;
        move16();
        i++; /*ptr*/
        j = add( j, 1 );
    }
    /* write indexes to the bitstream */
    /* ============================== */

    bits = *nb_bits;
    move16();
    overflow = 0;
    move16();
    FOR( i = 0; i < Nsv; i++ )
    {
        k = svOrder[i];
        move16();
        test();
        test();
        test();
        test();
        test();
        tmp = bits;
        move16();
        WHILE( GE_16( tmp, 5 ) )
        {
            tmp = sub( tmp, 5 );
        }
        assert( tmp == bits % 5 );
        IF( EQ_16( avq_bit_sFlag, 2 ) && EQ_16( tmp, 4 ) && GT_16( bits, 8 ) && LT_16( bits, 30 ) && GE_16( k, trgtSvPos ) && LT_16( i, Nsvm1 ) )
        {
            ordr_esti( sub( Nsv, i ), &trgtSvPos, &svOrder[i], Nsv );
            k = svOrder[i];
            move16();
            avq_bit_sFlag = 1;
            move16();
        }

        test();
        IF( EQ_16( k, trgtSvPos ) && avq_bit_sFlag > 0 )
        {
            test();
            test();
            IF( EQ_16( sub( *nb_bits, bits ), 7 ) || LT_16( bits, BIT_SAVING_LOW_THR ) || GE_16( bits, BIT_SAVING_HIGH_THR ) )
            {
                avq_bit_sFlag = 0;
                move16();
            }
            ELSE
            {
                BREAK;
            }
        }

        if ( EQ_16( sub( i_mult2( 5, nq[k] ), 1 ), bits ) ) /* check the overflow */
        {
            overflow = 1;
            move16();
        }

        IF( GT_16( bits, 8 ) )
        {
            /* write the unary code for nq[i] */
            j = sub( nq[k], 1 );
            IF( nq[k] > 0 )
            {
                /* write the unary code */
                FOR( ; j > 16; j -= 16 )
                {
                    push_indice( hBstr, nq_ind, 65535, 16 );
                    bits = sub( bits, 16 );
                }

                IF( j > 0 )
                {
                    push_indice( hBstr, nq_ind, extract_l( L_sub( L_shl( 1L, j ), 1L ) ), j );
                    bits = sub( bits, j );
                }
            }
            IF( !overflow )
            {
                /* write the stop bit */
                push_indice( hBstr, nq_ind, 0, 1 );
                bits = sub( bits, 1 );
            }

            wrte_cv_ivas( hBstr, nq[k], i_ind, kv_ind, I[k], &kv[k * 8], &bits );
        }
    } /* for */
    /* Bit Saving Solution */
    test();
    IF( avq_bit_sFlag > 0 && bits > 8 )
    {
        // PMT("code not validated yet")
        // bitsMod = bits%5;
        bitsMod = bits;
        WHILE( GE_16( bitsMod, 5 ) )
        {
            bitsMod = sub( bitsMod, 5 );
        }
        assert( bitsMod == bits % 5 );
        i = svOrder[Nsvm1];
        move16();
        IF( NE_16( i, Nsvm1 ) )
        {
            nullVec = 0;
            move16();
            FOR( j = i; j < Nsv - 1; j++ )
            {
                if ( nq[svOrder[j]] == 0 )
                {
                    nullVec = add( nullVec, 1 );
                }
            }
            /*nq_est = bits / 5;*/
            nq_est = mult( bits, 6554 );
            assert( nq_est == bits / 5 );

            test();
            test();
            test();
            test();
            test();
            test();
            test();
            test();
            test();
            test();
            test();
            IF( ( bitsMod > 0 || ( EQ_16( nullVec, 4 ) && EQ_16( nq_est, 5 ) ) ) && NE_16( bitsMod, 4 ) && GE_16( add( bits, nullVec ), add( add( shl( nq_est, 2 ), nq_est ), 4 ) ) /*5 * nq_est + 4*/ && nq[svOrder[Nsvm2]] == 0 ) /* detect need for dummy bits */
            {
                dummy_bits = sub( 5, bitsMod );
                bits = add( bits, dummy_bits ); /* add dummy bits */
                bitsMod = 0;
                move16();
            }
            ELSE IF( nq_est > 4 && ( ( bitsMod == 0 && nullVec > 3 && nullVec < 6 ) || ( bitsMod == 4 && nullVec == 5 ) ) && nq[svOrder[Nsvm2]] == 0 ) /* wasted bits 4, 5 for nq 6,7..*/
            {
                overflow = 0;
                move16();
                tmp = add( bitsMod, nullVec );
                WHILE( GE_16( tmp, 5 ) )
                {
                    tmp = sub( tmp, 5 );
                }
                assert( tmp == add( bitsMod, nullVec ) % 5 );
                if ( tmp != 0 )
                {
                    overflow = 1;
                    move16();
                }
                dummy_bits = add( nullVec, overflow );
                bits = add( bits, dummy_bits ); /* add dummy bits */
                bitsMod = 0;
                move16();
            }
        }

        overflow = 1;
        move16();
        IF( NE_16( bitsMod, 4 ) )
        {
            overflow = 0;
            move16();
            bits = sub( bits, bitsMod );
        }
        bits = add( bits, overflow ); /*add fake bit */
        unused_bits = sub( bits, add( shl( nq[i], 2 ), nq[i] ) );
        if ( nq[i] == 0 ) /*no bit savings*/
        {
            unused_bits = sub( unused_bits, 1 ); /*Stop Bit*/
        }
        /*unused_bits_idx = (int16_t)unused_bits / 5;*/
        IF( unused_bits >= 0 )
        {
            unused_bits_idx = mult( unused_bits, 6554 );
        }
        ELSE
        {
            unused_bits_idx = negate( mult( negate( unused_bits ), 6554 ) );
        }
        assert( unused_bits_idx == unused_bits / 5 );
        unusedbitsFlag = 0;
        move16();
        IF( dummy_bits == 0 )
        {
            test();
            test();
            IF( EQ_16( unused_bits_idx, 1 ) && GT_16( bits, BIT_SAVING_LOW_THR ) )
            {
                unused_bits_idx = 0;
                unusedbitsFlag = 1;
                move16();
                move16();
            }
            ELSE IF( unused_bits_idx == 0 && GT_16( bits, BIT_SAVING_LOW_THR ) )
            {
                unused_bits_idx = 1;
                unusedbitsFlag = -1;
                move16();
                move16();
            }
        }

        j = unused_bits_idx;
        move16();
        /*Encode Unused Bit Unary Codeword */
        IF( j > 0 )
        {
            /* write the unary code */
            push_indice( hBstr, nq_ind, u_extract_l( L_sub( L_shl_sat( 1, j ), 1 ) ), j );
            assert( abs( ( 1 << j ) - 1 ) <= 65536 );
        }

        IF( nq[i] != 0 )
        {
            /* write the stop bit */
            push_indice( hBstr, nq_ind, 0, 1 );
        }

        /*Compute AVQ code book number from unused Bits */
        // bit_tmp = add( unusedbitsFlag, unused_bits_idx );
        /*nq_est = (int16_t)ceil(0.2f * (bits - 5 * (unusedbitsFlag + unused_bits_idx)));*/
        // nq_est = mult( 6554, sub( bits, add( shl( bit_tmp, 2 ), bit_tmp ) ) );
        bit_tmp = sub( bits, imult1616( 5, add( unusedbitsFlag, unused_bits_idx ) ) );
        nq_est = 0;
        WHILE( bit_tmp > 0 )
        {
            nq_est = add( nq_est, 1 );
            bit_tmp = sub( bit_tmp, 5 );
        }
        assert( (int16_t) ceil( 0.2f * ( bits - 5 * ( unusedbitsFlag + unused_bits_idx ) ) ) == nq_est );

        if ( EQ_16( nq_est, 1 ) )
        {
            nq_est = 0;
            move16();
        }
        bits = sub( bits, overflow );

        bits = sub( bits, j );

        if ( nq_est != 0 )
        {
            bits = sub( bits, 1 );
        }
        nq[i] = nq_est;
        move16();

        /* write codebook indices (rank I and event. Voronoi index kv) */
        wrte_cv_ivas( hBstr, nq[i], i_ind, kv_ind, I[i], &kv[i * 8], &bits );

        bits = sub( bits, dummy_bits );

        if ( NE_16( bitsMod, 4 ) )
        {
            bits = add( bits, bitsMod );
        }
    }
    *nb_bits = bits;
    move16();

    FOR( i = 0; i < Nsv; i++ )
    {
        nq_out[i] = nq[i];
        move16();
    }

    return;
}

/*-------------------------------------------------------------------*
 * Function AVQ_cod_lpc_fx()                                            *
@@ -715,3 +1147,63 @@ static void wrte_cv(
    move16();
    return;
}

static void wrte_cv_ivas(
    BSTR_ENC_HANDLE hBstr, /* i/o: encoder bitstream handle         */
    const Word16 nq,       /* i  : AVQ nq index                     */
    const Word16 i_ind,    /* i  : Base Bitstream index             */
    const Word16 kv_ind,   /* i  : Vornoi Bitstream index           */
    UWord16 I,             /* o  : rank I code book index           */
    Word16 kv[],           /* o  : Vornoi index kv                  */
    Word16 *nbits          /* i/o: bits                             */
)
{
    int16_t pos, j;
    int16_t bits, nq4;

    bits = *nbits;
    move16();

    /* write codebook indices (rank I and event. Voronoi index kv) */
    IF( nq == 0 ) /* Q0 */
    {
        /* nothing to write */
    }
    ELSE IF( LT_16( nq, 5 ) ) /* Q2, Q3, Q4 */
    {
        nq4 = shl( nq, 2 );
        push_indice( hBstr, i_ind, I, nq4 );
        bits = sub( bits, nq4 );
    }
    ELSE IF( EQ_16( s_and( nq, 1 ), 0 ) ) /* Q4 + Voronoi extensions r=1,2,3,... */
    {
        push_indice( hBstr, i_ind, I, 4 * 4 );
        bits = sub( bits, 4 * 4 );
        /*pos = (int16_t)(nq / 2 - 2);*/ /* Voronoi order determination */
        pos = sub( shr( nq, 1 ), 2 );
        FOR( j = 0; j < 8; j++ )
        {
            push_indice( hBstr, kv_ind, kv[j], pos );
        }

        bits = sub( bits, shl( pos, 3 ) );
    }
    ELSE /* Q3 + Voronoi extensions r=1,2,3,... */
    {
        push_indice( hBstr, i_ind, I, 4 * 3 );
        bits = sub( bits, 4 * 3 );

        /*pos = (int16_t)(nq / 2 - 1);*/ /* Voronoi order determination */
        pos = sub( shr( nq, 1 ), 1 );
        FOR( j = 0; j < 8; j++ )
        {
            push_indice( hBstr, kv_ind, kv[j], pos );
        }

        bits = sub( bits, shl( pos, 3 ) );
    }

    *nbits = bits;
    move16();
    return;
}
Loading