Commit 07d7b238 authored by multrus's avatar multrus
Browse files

[cleanup] accept REUSE_EVS_BE_ACELP_AVQ

parent 5c4be483
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -78,7 +78,6 @@



#define REUSE_EVS_BE_ACELP_AVQ
#define REUSE_EVS_BE_GAUSS          
#define REUSE_EVS_BE_GAINQ          
#define REUSE_EVS_BE_ACELP_PITCH
+0 −519
Original line number Diff line number Diff line
@@ -14,9 +14,6 @@
 * Local prototypes
 *-------------------------------------------------------------------*/
static void wrte_cv( BSTR_ENC_HANDLE hBstr, const Word16 nq, const Word16 i_ind, const Word16 kv_ind, UWord16 I, Word16 kv[], Word16 *bits );
#ifndef REUSE_EVS_BE_ACELP_AVQ
static void wrte_cv_ivas_fx( BSTR_ENC_HANDLE hBstr, const Word16 nq, const Word16 i_ind, const Word16 kv_ind, UWord16 I, Word16 kv[], Word16 *bits );
#endif
/*-------------------------------------------------------------------*
 * Function AVQ_cod()                                                *
 *                                                                   *
@@ -183,9 +180,7 @@ void AVQ_encmux_fx(
    Word16 bitsMod, Nsvm1, Nsvm2;
    Word16 unusedbitsFlag;
    Word16 svOrder[NSV_MAX], k, nullVec, dummy_bits;
#ifdef REUSE_EVS_BE_ACELP_AVQ
    Word16 tmp;
#endif
    test();
    IF( EQ_16( extl, SWB_BWE_HIGHRATE ) || EQ_16( extl, FB_BWE_HIGHRATE ) )
    {
@@ -347,7 +342,6 @@ void AVQ_encmux_fx(
        test();
        test();
        test();
#ifdef REUSE_EVS_BE_ACELP_AVQ
        tmp = bits;
        move16();
        WHILE( GE_16( tmp, 5 ) )
@@ -357,9 +351,6 @@ void AVQ_encmux_fx(
        assert( tmp == bits % 5 );
        IF( EQ_16( avq_bit_sFlag, 2 ) && EQ_16( tmp, 4 ) && GT_16( bits, 8 ) && LT_16( bits, 30 ) && GE_16( k, trgtSvPos ) && LT_16( i, Nsvm1 ) )

#else
        IF( EQ_16( avq_bit_sFlag, 2 ) && EQ_16( s_and( bits, 4 ), 4 ) && GT_16( bits, 8 ) && LT_16( bits, 30 ) && GE_16( k, trgtSvPos ) && LT_16( i, Nsvm1 ) )
#endif
        {
            ordr_esti( sub( Nsv, i ), &trgtSvPos, &svOrder[i], Nsv );
            k = svOrder[i];
@@ -423,10 +414,6 @@ void AVQ_encmux_fx(
    test();
    IF( avq_bit_sFlag > 0 && GT_16( bits, 8 ) )
    {
#ifndef REUSE_EVS_BE_ACELP_AVQ
        // PMT("code not validated yet")
        bitsMod = s_and( bits, 4 /*bits%5*/ );
#else
        /* bitsMod = bits % 5;*/
        bitsMod = bits;
        move16();
@@ -435,7 +422,6 @@ void AVQ_encmux_fx(
            bitsMod = sub( bitsMod, 5 );
        }
        assert( bitsMod == bits % 5 );
#endif
        i = svOrder[Nsvm1];
        move16();
        IF( NE_16( i, Nsvm1 ) )
@@ -471,15 +457,10 @@ void AVQ_encmux_fx(
                bitsMod = 0;
                move16();
            }
#ifdef REUSE_EVS_BE_ACELP_AVQ
            ELSE IF( GT_16( nq_est, 4 ) && ( ( bitsMod == 0 && GT_16( nullVec, 3 ) && LT_16( nullVec, 6 ) ) || ( EQ_16( bitsMod, 4 ) && EQ_16( nullVec, 5 ) ) ) && nq[svOrder[Nsvm2]] == 0 ) /* wasted bits 4, 5 for nq 6,7..*/
#else
            ELSE IF( nq_est > 4 && ( ( bitsMod == 0 && GT_16( nullVec, 3 ) && GT_16( nullVec, 6 ) ) || ( EQ_16( bitsMod, 4 ) && EQ_16( nullVec, 5 ) ) ) && nq[svOrder[Nsvm2]] == 0 ) /* wasted bits 4, 5 for nq 6,7..*/
#endif
            {
                overflow = 0;
                move16();
#ifdef REUSE_EVS_BE_ACELP_AVQ
                tmp = add( bitsMod, nullVec );
                WHILE( tmp >= 5 )
                {
@@ -487,9 +468,6 @@ void AVQ_encmux_fx(
                }
                assert( tmp == add( bitsMod, nullVec ) % 5 );
                if ( tmp != 0 )
#else
                if ( s_and( add( bitsMod, nullVec ), 4 ) != 0 )
#endif
                {
                    overflow = 1;
                    move16();
@@ -516,7 +494,6 @@ void AVQ_encmux_fx(
            unused_bits = sub( unused_bits, 1 ); /*Stop Bit*/
        }
        /*unused_bits_idx = (int16_t)unused_bits / 5;*/
#ifdef REUSE_EVS_BE_ACELP_AVQ
        IF( unused_bits >= 0 )
        {
            unused_bits_idx = mult( unused_bits, 6554 /*1/5 in Q15*/ );
@@ -525,9 +502,6 @@ void AVQ_encmux_fx(
        {
            unused_bits_idx = negate( mult( negate( unused_bits ), 6554 /*1/5 in Q15*/ ) );
        }
#else
        unused_bits_idx = mult( unused_bits, 6554 );
#endif
        assert( unused_bits_idx == unused_bits / 5 );
        unusedbitsFlag = 0;
        move16();
@@ -557,11 +531,7 @@ void AVQ_encmux_fx(
        IF( j > 0 )
        {
            /* write the unary code */
#ifdef REUSE_EVS_BE_ACELP_AVQ
            push_indice( hBstr, nq_ind, u_extract_l( L_sub( L_shl_sat( 1, j ), 1 ) ), j );
#else
            push_indice( hBstr, nq_ind, sub( shl( 1, j ), 1 ), j );
#endif
        }

        IF( nq[i] != 0 )
@@ -613,434 +583,6 @@ void AVQ_encmux_fx(

    return;
}
#ifndef REUSE_EVS_BE_ACELP_AVQ
void AVQ_encmux_ivas_fx(
    BSTR_ENC_HANDLE hBstr, /* i/o: bitstream handle            */
    const Word16 extl,     /* i  : extension layer                                 */
    Word16 xriq[],         /* i/o: rounded subvectors [0..8*Nsv-1] followed
                               by rounded bit allocations [8*Nsv..8*Nsv+Nsv-1] */
    Word16 *nb_bits,       /* i/o: number of allocated bits                        */
    const Word16 Nsv,      /* i:   number of subvectors                            */
    Word16 nq_out[],       /* o  : AVQ nq index                                    */
    Word16 avq_bit_sFlag,  /* i  : flag for AVQ bit saving solution                */
    Word16 trgtSvPos       /* i  : target SV for AVQ bit savings                   */
)
{
    Word16 i, j = 0, bits, pos, pos_max, overflow, pos_tmp, bit_tmp;
    Word16 sort_idx[NSV_MAX], nq[NSV_MAX], kv[NSV_MAX * 8];
    Word16 *t;
    UWord16 I[NSV_MAX];
    Word16 nq_ind, i_ind, kv_ind;
    Word16 nq_est, unused_bits, unused_bits_idx;
    Word16 bitsMod, Nsvm1, Nsvm2;
    Word16 unusedbitsFlag;
    Word16 svOrder[NSV_MAX], k, nullVec, dummy_bits;
    Word16 tmp;

    test();
    IF( EQ_16( extl, SWB_BWE_HIGHRATE ) || EQ_16( extl, FB_BWE_HIGHRATE ) )
    {
        nq_ind = IND_NQ2;
        move16();
        i_ind = IND_I2;
        move16();
        kv_ind = IND_KV2;
        move16();
    }
    ELSE
    {
        nq_ind = IND_NQ;
        move16();
        i_ind = IND_I;
        move16();
        kv_ind = IND_KV;
        move16();
    }

    FOR( i = 0; i < NSV_MAX; i++ )
    {
        I[i] = (UWord16) -1;
        move16();
    }
    unusedbitsFlag = 0;
    bitsMod = 0;
    move16();
    move16();
    /*-----------------------------------------------------------------
     * Encode subvectors and fix possible overflows in total bit budget,
     * i.e. find for each subvector a codebook index nq (nq=0,2,3,4,...,NSV_MAX),
     * a base codebook index (I), and a Voronoi index (kv)
     *-----------------------------------------------------------------*/

    /* sort subvectors by estimated bit allocations in decreasing order */
    t = kv;
    /* reuse vector to save memory */
    /*ptr init*/
    FOR( i = 0; i < Nsv; i++ )
    {
        t[i] = xriq[8 * Nsv + i];
        move16();
    }

    FOR( i = 0; i < Nsv; i++ )
    {
        bits = t[0];
        move16();
        pos = 0;
        move16();
        FOR( j = 1; j < Nsv; j++ )
        {
            if ( GT_16( t[j], bits ) )
            {
                pos = j;
                move16();
            }
            bits = s_max( t[j], bits );
        }
        sort_idx[i] = pos;
        move16();
        t[pos] = -1;
        move16();
    }

    /* compute multi-rate indices and avoid bit budget overflow */
    pos_max = 0;
    move16();
    bits = 0;
    move16();
    FOR( i = 0; i < Nsv; i++ )
    {
        /* find vector to quantize (criteria: nb of estimated bits) */
        pos = sort_idx[i];
        move16();

        /* compute multi-rate index of rounded subvector (nq,I,kv[]) */
        re8_cod_fx( &xriq[pos * 8], &nq[pos], &I[pos], &kv[8 * pos] );

        IF( nq[pos] > 0 )
        {
            j = pos_max;
            move16();
            j = s_max( pos, j );

            /* compute (number of bits -1) to describe Q #nq */
            IF( GE_16( nq[pos], 2 ) )
            {
                overflow = sub( i_mult2( nq[pos], 5 ), 1 );
            }
            ELSE
            {
                overflow = 0;
                move16();
            }

            /* check for overflow and compute number of bits-1 (n) */
            IF( GT_16( add( bits, add( overflow, j ) ), *nb_bits ) )
            {
                /* if budget overflow */
                pos_tmp = add( shl( pos, 3 ), 8 ); /*(pos*8)+8*/
                FOR( j = pos * 8; j < pos_tmp; j++ )
                {
                    xriq[j] = 0;
                    move16();
                }
                nq[pos] = 0;
                move16(); /* force Q0 */
            }
            ELSE
            {
                bits = add( bits, overflow );
                pos_max = j;
                move16(); /* update index of the last described subvector */
            }
        }
    }
    nullVec = 0;
    Nsvm1 = sub( Nsv, 1 );
    Nsvm2 = sub( Nsvm1, 1 );
    dummy_bits = 0;
    svOrder[Nsvm1] = trgtSvPos;
    svOrder[0] = 0;
    svOrder[1] = 1;
    i = 2;
    j = i;
    move16();
    move16();
    move16();
    move16();
    move16();
    move16();
    move16();
    if ( EQ_16( avq_bit_sFlag, 2 ) )
    {
        j = add( i, 1 );
    }
    WHILE( LT_16( i, Nsvm1 ) )
    {
        svOrder[i] = j;
        move16();
        i++; /*ptr*/
        j = add( j, 1 );
    }
    /* write indexes to the bitstream */
    /* ============================== */

    bits = *nb_bits;
    move16();
    overflow = 0;
    move16();
    FOR( i = 0; i < Nsv; i++ )
    {
        k = svOrder[i];
        move16();
        test();
        test();
        test();
        test();
        test();
        tmp = bits;
        move16();
        WHILE( GE_16( tmp, 5 ) )
        {
            tmp = sub( tmp, 5 );
        }
        assert( tmp == bits % 5 );
        IF( EQ_16( avq_bit_sFlag, 2 ) && EQ_16( tmp, 4 ) && GT_16( bits, 8 ) && LT_16( bits, 30 ) && GE_16( k, trgtSvPos ) && LT_16( i, Nsvm1 ) )
        {
            ordr_esti( sub( Nsv, i ), &trgtSvPos, &svOrder[i], Nsv );
            k = svOrder[i];
            move16();
            avq_bit_sFlag = 1;
            move16();
        }

        test();
        IF( EQ_16( k, trgtSvPos ) && avq_bit_sFlag > 0 )
        {
            test();
            test();
            IF( EQ_16( sub( *nb_bits, bits ), 7 ) || LT_16( bits, BIT_SAVING_LOW_THR ) || GE_16( bits, BIT_SAVING_HIGH_THR ) )
            {
                avq_bit_sFlag = 0;
                move16();
            }
            ELSE
            {
                BREAK;
            }
        }

        if ( EQ_16( sub( i_mult2( 5, nq[k] ), 1 ), bits ) ) /* check the overflow */
        {
            overflow = 1;
            move16();
        }

        IF( GT_16( bits, 8 ) )
        {
            /* write the unary code for nq[i] */
            j = sub( nq[k], 1 );
            IF( nq[k] > 0 )
            {
                /* write the unary code */
                FOR( ; j > 16; j -= 16 )
                {
                    push_indice( hBstr, nq_ind, 65535, 16 );
                    bits = sub( bits, 16 );
                }

                IF( j > 0 )
                {
                    push_indice( hBstr, nq_ind, extract_l( L_sub( L_shl( 1L, j ), 1L ) ), j );
                    bits = sub( bits, j );
                }
            }
            IF( !overflow )
            {
                /* write the stop bit */
                push_indice( hBstr, nq_ind, 0, 1 );
                bits = sub( bits, 1 );
            }

            wrte_cv( hBstr, nq[k], i_ind, kv_ind, I[k], &kv[k * 8], &bits );
        }
    } /* for */
    /* Bit Saving Solution */
    test();
    IF( avq_bit_sFlag > 0 && GT_16( bits, 8 ) )
    {
        // PMT("code not validated yet")
        // bitsMod = bits%5;
        bitsMod = bits;
        move16();
        WHILE( bitsMod >= 5 )
        {
            bitsMod = sub( bitsMod, 5 );
        }
        assert( bitsMod == bits % 5 );
        i = svOrder[Nsvm1];
        move16();
        IF( NE_16( i, Nsvm1 ) )
        {
            nullVec = 0;
            move16();
            FOR( j = i; j < Nsv - 1; j++ )
            {
                IF( nq[svOrder[j]] == 0 )
                {
                    nullVec = add( nullVec, 1 );
                }
            }
            /*nq_est = bits / 5;*/
            nq_est = mult( bits, 6554 /*1/5 in Q15*/ );
            assert( nq_est == bits / 5 );

            test();
            test();
            test();
            test();
            test();
            test();
            test();
            test();
            test();
            test();

            IF( ( bitsMod > 0 || ( EQ_16( nullVec, 4 ) && EQ_16( nq_est, 5 ) ) ) && NE_16( bitsMod, 4 ) && GE_16( add( bits, nullVec ), add( add( shl( nq_est, 2 ), nq_est ), 4 ) ) /*5 * nq_est + 4*/ && nq[svOrder[Nsvm2]] == 0 ) /* detect need for dummy bits */
            {
                dummy_bits = sub( 5, bitsMod );
                bits = add( bits, dummy_bits ); /* add dummy bits */
                bitsMod = 0;
                move16();
            }
            ELSE IF( GT_16( nq_est, 4 ) && ( ( bitsMod == 0 && GT_16( nullVec, 3 ) && LT_16( nullVec, 6 ) ) || ( EQ_16( bitsMod, 4 ) && EQ_16( nullVec, 5 ) ) ) && nq[svOrder[Nsvm2]] == 0 ) /* wasted bits 4, 5 for nq 6,7..*/
            {
                overflow = 0;
                move16();
                tmp = add( bitsMod, nullVec );
                WHILE( tmp >= 5 )
                {
                    tmp = sub( tmp, 5 );
                }
                assert( tmp == add( bitsMod, nullVec ) % 5 );
                if ( tmp != 0 )
                {
                    overflow = 1;
                    move16();
                }
                dummy_bits = add( nullVec, overflow );
                bits = add( bits, dummy_bits ); /* add dummy bits */
                bitsMod = 0;
                move16();
            }
        }

        overflow = 1;
        move16();
        IF( NE_16( bitsMod, 4 ) )
        {
            overflow = 0;
            move16();
            bits = sub( bits, bitsMod );
        }
        bits = add( bits, overflow ); /*add fake bit */
        unused_bits = sub( bits, add( shl( nq[i], 2 ), nq[i] ) );
        IF( nq[i] == 0 ) /*no bit savings*/
        {
            unused_bits = sub( unused_bits, 1 ); /*Stop Bit*/
        }
        /*unused_bits_idx = (int16_t)unused_bits / 5;*/
        IF( unused_bits >= 0 )
        {
            unused_bits_idx = mult( unused_bits, 6554 /*1/5 in Q15*/ );
        }
        ELSE
        {
            unused_bits_idx = negate( mult( negate( unused_bits ), 6554 /*1/5 in Q15*/ ) );
        }
        assert( unused_bits_idx == unused_bits / 5 );
        unusedbitsFlag = 0;
        move16();
        IF( dummy_bits == 0 )
        {
            test();
            test();
            IF( EQ_16( unused_bits_idx, 1 ) && GT_16( bits, BIT_SAVING_LOW_THR ) )
            {
                unused_bits_idx = 0;
                unusedbitsFlag = 1;
                move16();
                move16();
            }
            ELSE IF( unused_bits_idx == 0 && GT_16( bits, BIT_SAVING_LOW_THR ) )
            {
                unused_bits_idx = 1;
                unusedbitsFlag = -1;
                move16();
                move16();
            }
        }

        j = unused_bits_idx;
        move16();
        /*Encode Unused Bit Unary Codeword */
        IF( j > 0 )
        {
            /* write the unary code */
            push_indice( hBstr, nq_ind, u_extract_l( L_sub( L_shl_sat( 1, j ), 1 ) ), j );
            assert( abs( ( 1 << j ) - 1 ) <= 65536 );
        }

        IF( nq[i] != 0 )
        {
            /* write the stop bit */
            push_indice( hBstr, nq_ind, 0, 1 );
        }

        /*Compute AVQ code book number from unused Bits */
        bit_tmp = add( unusedbitsFlag, unused_bits_idx );
        /*nq_est = (int16_t)ceil(0.2f * (bits - 5 * (unusedbitsFlag + unused_bits_idx)));*/
        nq_est = mult( 6554 /*.2 in Q15*/, sub( bits, add( shl( bit_tmp, 2 ), bit_tmp ) ) );
        assert( (Word16) ceil( 0.2f * ( bits - 5 * ( unusedbitsFlag + unused_bits_idx ) ) ) == nq_est );

        if ( EQ_16( nq_est, 1 ) )
        {
            nq_est = 0;
            move16();
        }
        bits = sub( bits, overflow );

        bits = sub( bits, j );

        IF( nq_est != 0 )
        {
            bits = sub( bits, 1 );
        }
        nq[i] = nq_est;
        move16();

        /* write codebook indices (rank I and event. Voronoi index kv) */
        wrte_cv( hBstr, nq[i], i_ind, kv_ind, I[i], &kv[i * 8], &bits );

        bits = sub( bits, dummy_bits );

        IF( NE_16( bitsMod, 4 ) )
        {
            bits = add( bits, bitsMod );
        }
    }
    *nb_bits = bits;
    move16();

    FOR( i = 0; i < Nsv; i++ )
    {
        nq_out[i] = nq[i];
        move16();
    }

    return;
}
#endif
/*-------------------------------------------------------------------*
 * Function AVQ_cod_lpc_fx()                                            *
 *                                                                   *
@@ -1189,64 +731,3 @@ static void wrte_cv(
    move16();
    return;
}
#ifndef REUSE_EVS_BE_ACELP_AVQ
static void wrte_cv_ivas_fx(
    BSTR_ENC_HANDLE hBstr, /* i/o: encoder bitstream handle         */
    const Word16 nq,       /* i  : AVQ nq index                     */
    const Word16 i_ind,    /* i  : Base Bitstream index             */
    const Word16 kv_ind,   /* i  : Vornoi Bitstream index           */
    UWord16 I,             /* o  : rank I code book index           */
    Word16 kv[],           /* o  : Vornoi index kv                  */
    Word16 *nbits          /* i/o: bits                             */
)
{
    Word16 pos, j;
    Word16 bits, nq4;

    bits = *nbits;
    move16();

    /* write codebook indices (rank I and event. Voronoi index kv) */
    IF( nq == 0 ) /* Q0 */
    {
        /* nothing to write */
    }
    ELSE IF( LT_16( nq, 5 ) ) /* Q2, Q3, Q4 */
    {
        nq4 = shl( nq, 2 );
        push_indice( hBstr, i_ind, I, nq4 );
        bits = sub( bits, nq4 );
    }
    ELSE IF( EQ_16( s_and( nq, 1 ), 0 ) ) /* Q4 + Voronoi extensions r=1,2,3,... */
    {
        push_indice( hBstr, i_ind, I, 4 * 4 );
        bits = sub( bits, 4 * 4 );
        /*pos = (int16_t)(nq / 2 - 2);*/ /* Voronoi order determination */
        pos = sub( shr( nq, 1 ), 2 );
        FOR( j = 0; j < 8; j++ )
        {
            push_indice( hBstr, kv_ind, kv[j], pos );
        }

        bits = sub( bits, shl( pos, 3 ) );
    }
    ELSE /* Q3 + Voronoi extensions r=1,2,3,... */
    {
        push_indice( hBstr, i_ind, I, 4 * 3 );
        bits = sub( bits, 4 * 3 );

        /*pos = (int16_t)(nq / 2 - 1);*/ /* Voronoi order determination */
        pos = sub( shr( nq, 1 ), 1 );
        FOR( j = 0; j < 8; j++ )
        {
            push_indice( hBstr, kv_ind, kv[j], pos );
        }

        bits = sub( bits, shl( pos, 3 ) );
    }

    *nbits = bits;
    move16();
    return;
}
#endif
+0 −4
Original line number Diff line number Diff line
@@ -612,11 +612,7 @@ void transf_cdbk_enc_ivas_fx(
        move16();
        move16();
    }
#ifndef REUSE_EVS_BE_ACELP_AVQ
    AVQ_encmux_ivas_fx( st_fx->hBstr, -1, x_norm, &nBits, Nsv, nq, avq_bit_sFlag, trgtSvPos );
#else
    AVQ_encmux_fx( st_fx->hBstr, -1, x_norm, &nBits, Nsv, nq, avq_bit_sFlag, trgtSvPos );
#endif
    /* save # of AVQ unused bits for next subframe */
    *unbits = nBits;
    move16();
+0 −13
Original line number Diff line number Diff line
@@ -106,19 +106,6 @@ void AVQ_encmux_fx(
    Word16 avq_bit_sFlag,  /* i  : flag for AVQ bit saving solution                */
    Word16 trgtSvPos       /* i  : target SV for AVQ bit savings                   */
);
#ifndef REUSE_EVS_BE_ACELP_AVQ
void AVQ_encmux_ivas_fx(
    BSTR_ENC_HANDLE hBstr, /* i/o: bitstream handle                                */
    const Word16 extl,     /* i  : extension layer                                 */
    Word16 xriq[],         /* i/o: rounded subvectors [0..8*Nsv-1] followed
                               by rounded bit allocations [8*Nsv..8*Nsv+Nsv-1] */
    Word16 *nb_bits,       /* i/o: number of allocated bits                        */
    const Word16 Nsv,      /* i:   number of subvectors                            */
    Word16 nq_out[],       /* o  : AVQ nq index                                    */
    Word16 avq_bit_sFlag,  /* i  : flag for AVQ bit saving solution                */
    Word16 trgtSvPos       /* i  : target SV for AVQ bit savings                   */
);
#endif
void bw_detect_fx(
    Encoder_State *st,               /* i/o: Encoder State           */
    const Word16 signal_in[],        /* i  : input signal           */