Commit a584f6ee authored by Sandesh Venkatesh's avatar Sandesh Venkatesh
Browse files

Merge branch 'acelp_prec_imp' into 'main'

[allow regression] Scaling fix in encode gen voice, enc gen audio, core-enc, acelp_fast_fx bug...

See merge request !1377
parents 36de3abc dae405df
Loading
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -2998,10 +2998,11 @@ void acelp_fast_fx(
    BSTR_ENC_HANDLE hBstr,  /* i/o: encoder bitstream handle                      */
    const Word16 cdk_index, /* i  : codebook index                                */
    const Word16 dn_orig[L_SUBFR],
    /* i  : corr. between target and h[].                 */ // Q_new + 1
    /* i  : corr. between target and h[].                 */ // Q_dn
    Word16 Q_dn,
    const Word16 cn[L_SUBFR],
    /* i  : residual after long term prediction           */ // Q_new + 1
    /* i  : residual after long term prediction           */ // q_cn
    const Word16 q_cn,
    const Word16 H[L_SUBFR],
    /* i  : impulse response of weighted synthesis filter */ // e(norm_s(H[0])+1)
    Word16 code[L_SUBFR],                                    /* o  : algebraic (fixed) codebook excitation         */
+6 −4
Original line number Diff line number Diff line
@@ -2658,14 +2658,16 @@ static void ivas_calc_p_coeffs_per_band_enc_fx(
            factor = L_max( factor, tmp ); // q_factor
        }

        tmp = L_shl_sat( IVAS_FIX_EPS_Q40, sub( q_factor, 40 ) );
        tmp = L_shl_sat( 189 /* 1e-20 in Q74 */, sub( q_factor, 74 ) );

        Word16 factor_exp = 0;
        move16();
        IF( LE_32( factor, tmp ) )
        {
            factor = 1250000000;
            factor_exp = Q31 - ( -4 );
            factor = 22204; // (1 / 1e-20) in Q(-52)
            factor_exp = Q15 - ( -52 );
            move32();
            move16();
        }
        ELSE
        {
@@ -2688,7 +2690,7 @@ static void ivas_calc_p_coeffs_per_band_enc_fx(
                    {
                        q_tmp = W_norm( W_tmp );
                    }
                    cov_uu_re[i - num_dmx][j - num_dmx] = W_extract_h( W_shl( W_mult0_32_32( cov_uu_re[i - num_dmx][j - num_dmx], factor ), q_tmp ) ); /*q_cov_uu_re+15-factor_exp+q_tmp-32*/
                    cov_uu_re[i - num_dmx][j - num_dmx] = W_extract_h( W_shl( W_tmp, q_tmp ) ); /*q_cov_uu_re+15-factor_exp+q_tmp-32*/
                    move32();
                    q_cov_uu_re_per_value[i - num_dmx][j - num_dmx] = sub( add( add( q_cov_uu_re, sub( 15, factor_exp ) ), q_tmp ), 32 );
                    move16();
+1 −8
Original line number Diff line number Diff line
@@ -5077,14 +5077,7 @@ void fine_gain_quant_fx(
    Word16 *fg_pred,         /* i/o: Predicted gains / Corrected gains        Q12 */
    const Word16 *gopt       /* i  : Optimal gains                            Q12 */
);
void fine_gain_quant_ivas_fx(
    BSTR_ENC_HANDLE hBstr,   /* i/o: encoder bitstream handle                        */
    const Word16 *ord,       /* i  : Indices for energy order                 Q0     */
    const Word16 num_sfm,    /* i  : Number of bands                          Q0     */
    const Word16 *gain_bits, /* i  : Gain adjustment bits per sub band        Q0     */
    Word16 *fg_pred,         /* i/o: Predicted gains / Corrected gains i:Q12 / o:Q11 */
    const Word16 *gopt       /* i  : Optimal gains                            Q12    */
);
void get_max_pulses_fx(
    const Word16 *band_start, /* i  : Sub band start indices    */
    const Word16 *band_end,   /* i  : Sub band end indices      */
+0 −67
Original line number Diff line number Diff line
@@ -509,73 +509,6 @@ void apply_gain_fx(
 *
 * Fine gain quantization
 *--------------------------------------------------------------------------*/
void fine_gain_quant_ivas_fx(
    BSTR_ENC_HANDLE hBstr,   /* i/o: encoder bitstream handle                        */
    const Word16 *ord,       /* i  : Indices for energy order                 Q0     */
    const Word16 num_sfm,    /* i  : Number of bands                          Q0     */
    const Word16 *gain_bits, /* i  : Gain adjustment bits per sub band        Q0     */
    Word16 *fg_pred,         /* i/o: Predicted gains / Corrected gains i:Q12 / o:Q11 */
    const Word16 *gopt       /* i  : Optimal gains                            Q12    */
)
{
    Word16 band;
    Word16 gbits;
    Word16 idx;
    Word16 gain_db, gain_dbq;
    Word16 err;

    Word16 tmp1, tmp2, exp1, exp2;
    Word32 L_tmp;
    UWord16 lsb;
#ifdef BASOP_NOGLOB_DECLARE_LOCAL
    Flag Overflow = 0;
    move32();
#endif

    FOR( band = 0; band < num_sfm; band++ )
    {
        gbits = gain_bits[ord[band]]; /* Q0 */
        move16();
        test();
        IF( fg_pred[band] != 0 && gbits > 0 )
        {
            exp1 = norm_s( gopt[band] );
            exp1 = sub( exp1, 1 );
            tmp1 = shl( gopt[band], exp1 );
            exp2 = norm_s( fg_pred[band] );
            tmp2 = shl( fg_pred[band], exp2 ); /* Q12 + exp2 */
            exp1 = add( 15, sub( exp1, exp2 ) );
            err = div_s( tmp1, tmp2 ); /* Q15 */
            tmp1 = norm_s( err );
            exp2 = Log2_norm_lc( L_deposit_h( shl( err, tmp1 ) ) );
            tmp1 = sub( 14, tmp1 );
            tmp1 = sub( tmp1, exp1 );
            L_tmp = L_Comp( tmp1, exp2 );
            Mpy_32_16_ss( L_tmp, 24660, &L_tmp, &lsb ); /* 24660 = 20*log10(2) in Q12 */ /*16+12-15=13 */
            gain_db = round_fx_sat( L_shl_o( L_tmp, 17, &Overflow ) );                   /* Q14 */

            idx = squant_fx( gain_db, &gain_dbq, finegain_fx[gbits - 1], gain_cb_size[gbits - 1] ); /* Q0 */
            push_indice( hBstr, IND_PVQ_FINE_GAIN, idx, gbits );

            L_tmp = L_mult0( gain_dbq, 21771 ); /* 21771=0.05*log2(10) */ /* 14+17=31 */
            L_tmp = L_shr( L_tmp, 15 );                                   /* Q16 */
            tmp1 = L_Extract_lc( L_tmp, &exp1 );
            tmp1 = abs_s( tmp1 );
            tmp1 = extract_l( Pow2( 14, tmp1 ) );
            exp1 = sub( 14, exp1 );

            L_tmp = L_mult0( fg_pred[band], tmp1 );                      /*12+exp1 */
            fg_pred[band] = round_fx( L_shl( L_tmp, sub( 15, exp1 ) ) ); /*12+exp1+16-exp1-16=12 - 1-> Q11*/
            move16();
        }
        ELSE
        {
            fg_pred[band] = shr( fg_pred[band], 1 ); // Q12 -> Q11 To align all the fg_pred indices in same Q.
        }
    }

    return;
}
void fine_gain_quant_fx(
    BSTR_ENC_HANDLE hBstr,   /* i/o: encoder bitstream handle					  */
    const Word16 *ord,       /* i  : Indices for energy order                 Q0  */
+121 −45
Original line number Diff line number Diff line
@@ -123,10 +123,11 @@ void acelp_fast_fx(
    BSTR_ENC_HANDLE hBstr,  /* i/o: encoder bitstream handle                      */
    const Word16 cdk_index, /* i  : codebook index                                */
    const Word16 dn_orig[L_SUBFR],
    /* i  : corr. between target and h[].                 */ // Q_new + 1
    /* i  : corr. between target and h[].                 */ // Q_dn
    Word16 Q_dn,
    const Word16 cn[L_SUBFR],
    /* i  : residual after long term prediction           */ // Q_new + 1
    /* i  : residual after long term prediction           */ // q_cn
    const Word16 q_cn,
    const Word16 H[L_SUBFR],
    /* i  : impulse response of weighted synthesis filter */ // e(norm_s(H[0])+1)
    Word16 code[L_SUBFR],
@@ -161,7 +162,9 @@ void acelp_fast_fx(
    Word64 s64;
    Word16 flag = 0;
    move16();

    Word32 temp1, temp2, temp3, temp4, temp5, temp6;
    Word16 q_temp1, q_temp2;
    Word16 scale_temp1, scale_temp2;
    /*-----------------------------------------------------------------*
     * Initialization
     *-----------------------------------------------------------------*/
@@ -294,16 +297,23 @@ void acelp_fast_fx(

    exp = sub( Q31, shl( Q_dn, 1 ) );

    s64 = 0;
    move64();
    FOR( i = 0; i < L_subfr; i++ )
    {
        s64 = W_mac_16_16( s64, dn_orig[i], dn_orig[i] ); // 2 * Q_dn + 1
    }
    dndn_fx = 21474836 /*0.01f in Q31 */;
    move32();
    dndn_e = 0;
    move16();
    FOR( i = 0; i < L_subfr; i++ )
    IF( s64 )
    {
        dndn_fx = BASOP_Util_Add_Mant32Exp( dndn_fx, dndn_e, L_mult0( dn_orig[i], dn_orig[i] ), exp, &dndn_e ); // Q(dndn_e)
        Word16 new_exp1 = W_norm( s64 );
        dndn_fx = W_extract_h( W_shl( s64, new_exp1 ) ); // 2 * Q_dyn + exp1 - 31
        dndn_e = sub( 31, sub( add( add( shl( Q_dn, 1 ), 1 ), new_exp1 ), 32 ) );
    }

    exp1 = sub( Q31, shl( sub( Q_dn, 1 ), 1 ) );

    cncn_fx = 214748365 /* 0.1f in Q31 */;
    move32();
@@ -312,31 +322,42 @@ void acelp_fast_fx(

    FOR( q = 0; q < nb_tracks; q++ )
    {
        s64 = 0;
        move64();
        FOR( i = 0; i < L_subfr; i += nb_tracks )
        {
            s64 = W_mac_16_16( s64, cn[i + q], cn[i + q] ); // 2 * q_cn + 1
        }

        cncn_track[q] = 214748365 /* 0.1f in Q31 */;
        move32();
        cncn_track_e[q] = 0;
        move16();

        FOR( i = 0; i < L_subfr; i += nb_tracks )
        IF( s64 )
        {
            Word32 L_tmp = L_mult0( cn[i + q], cn[i + q] );
            shift = norm_l( L_tmp );
            L_tmp = L_shl( L_tmp, shift );
            cncn_track[q] = BASOP_Util_Add_Mant32Exp( cncn_track[q], cncn_track_e[q], L_tmp, sub( exp1, shift ), &cncn_track_e[q] ); // Q(cncn_track_e[q])
            move32();
            Word16 new_exp1 = W_norm( s64 );
            cncn_track[q] = W_extract_h( W_shl( s64, new_exp1 ) ); // 2 * Q_dyn + exp1 - 31
            cncn_track_e[q] = sub( 31, sub( add( add( shl( q_cn, 1 ), 1 ), new_exp1 ), 32 ) );
        }
        cncn_fx = BASOP_Util_Add_Mant32Exp( cncn_fx, cncn_e, cncn_track[q], cncn_track_e[q], &cncn_e ); // Q(cncn_e)
    }

    Word16 tmp;
    Word16 tmp = 0;
    move16();
    s_coef_fx = BASOP_Util_Divide3232_Scale( dndn_fx, cncn_fx, &tmp );
    tmp = add( tmp, sub( dndn_e, cncn_e ) );
    s_coef_fx = Sqrt16( s_coef_fx, &tmp ); // Q(15 - tmp)


    q_temp1 = add( add( sub( Q15, tmp ), q_cn ), Q1 );
    scale_temp1 = sub( q_temp1, Q_dn );
    FOR( i = 0; i < L_subfr; i++ )
    {
        bn_orig_fx[i] = L_mac0( L_shr( L_mult( s_coef_fx, cn[i] ), sub( 15, tmp ) ), beta1_fx, dn_orig[i] ); // Q_dn
        temp1 = L_mult( s_coef_fx, cn[i] );     // Q(15 - tmp)+q_cn+1
        temp2 = L_mult( beta1_fx, dn_orig[i] ); // 1+Q_dn+1
        /* bn_orig_fx[i] is being used in Q_dn */
        temp2 = L_shr( temp2, 1 );
        temp1 = L_shr( temp1, scale_temp1 );
        bn_orig_fx[i] = L_add( temp1, temp2 ); // Q_dn
        move32();

        IF( bn_orig_fx[i] >= 0 )
@@ -576,12 +597,12 @@ void acelp_fast_fx(
        }
        ELSE
        {
            Gn = i_mult( s[0], shr( dn_orig[m[0]], 1 ) ); // Q_dn - 1
            Gn = i_mult( s[0], dn_orig[m[0]] ); // Q_dn
            Gd = alp[0];                        // Q6
            move16();
            G = Gn; // Q_dn - 1
            G = Gn; // Q_dn
            move16();
            G = i_mult( G, s[0] ); // Q_dn - 1
            G = i_mult( G, s[0] ); // Q_dn

            track = track_order[q * nb_tracks + 1]; // Q0
            move16();
@@ -596,14 +617,17 @@ void acelp_fast_fx(
            move64();
            FOR( i = track; i < L_subfr; i += nb_tracks )
            {
                dn[i] = L_shr( L_msu( L_mult0( Gd, dn_orig[i] ), G, *alp_pos0 ), 6 ); // Q_dn
                temp1 = L_mult0( Gd, dn_orig[i] );
                temp2 = L_mult0( G, *alp_pos0 );
                temp3 = L_sub( temp1, temp2 );
                dn[i] = L_shr( temp3, 6 );
                move32();
                alp_pos0 += nb_tracks;
                s64 = W_mac_32_32( s64, dn[i], dn[i] ); // 2 * Q_dn + 1
            }
            exp1 = W_norm( s64 );
            dndn_fx = W_extract_h( W_shl( s64, exp1 ) ); // 2 * Q_dyn + exp1 - 31
            dndn_e = sub( 31, sub( add( shl( Q_dn, 1 ), exp ), 31 ) );
            dndn_e = sub( 31, sub( add( add( shl( Q_dn, 1 ), 1 ), exp1 ), 32 ) );

            IF( dndn_fx == 0 )
            {
@@ -612,17 +636,28 @@ void acelp_fast_fx(
                dndn_e = 0;
                move16();
            }
            exp1 = 0;
            move16();
            s_coef_fx = BASOP_Util_Divide3232_Scale( dndn_fx, cncn_track[track], &exp1 );
            exp1 = add( exp1, sub( dndn_e, cncn_track_e[track] ) );
            s_coef_fx = Sqrt16( s_coef_fx, &exp1 );
            exp1 = sub( exp1, shift );
            max_val_fx = EPSILLON_FX;
            move16();
            m[1] = track; // Q0
            move16();
            q_temp1 = add( add( sub( Q15, exp1 ), q_cn ), 1 );
            q_temp2 = add( Q_dn, Q2 );
            scale_temp1 = sub( q_temp1, Q_dn );
            scale_temp2 = sub( q_temp2, Q_dn );
            FOR( i = track; i < L_subfr; i += nb_tracks )
            {
                dn[i] = L_add( L_shr( L_mult( s_coef_fx, cn[i] ), sub( 15, exp1 ) ), L_shr( imult3216( dn[i], beta2_fx ), 2 ) ); // Q_dn
                temp1 = L_mult( s_coef_fx, cn[i] );   // Q(15 - tmp)+q_cn+1
                temp2 = imult3216( dn[i], beta2_fx ); // Q_dn + 2

                /* bn_orig_fx[i] is being used in Q_dn */
                temp2 = L_shr( temp2, scale_temp2 );
                temp1 = L_shr( temp1, scale_temp1 );
                dn[i] = L_add( temp1, temp2 ); // Q_dn
                move32();
                temp_fx = imult3216( dn[i], sign_fx[i] ); // Q_dn

@@ -645,14 +680,14 @@ void acelp_fast_fx(

        IF( GE_16( nb_pulse, 3 ) )
        {
            Gn = add( Gn, i_mult( s[1], shr( dn_orig[m[1]], 1 ) ) ); // Q_dn -1
            Gn = add( Gn, i_mult( s[1], dn_orig[m[1]] ) ); // Q_dn
            Gd32 = Gd;
            move16();
            Gd32 = L_add( Gd32, L_add( alp[0], L_mult0( i_mult( shl( s[0], 1 ), s[1] ), alp[m[0] - m[1]] ) ) ); // Q6
            G = Gn;                                                                                             // Q_dn - 1
            G = Gn;                                                                                             // Q_dn
            move16();
            G1 = i_mult( G, s[1] ); // Q_dn-1
            G = i_mult( G, s[0] );  // Q_dn-1
            G1 = i_mult( G, s[1] ); // Q_dn
            G = i_mult( G, s[0] );  // Q_dn

            track = track_order[q * nb_tracks + 2]; // Q0
            move16();
@@ -661,7 +696,12 @@ void acelp_fast_fx(

            FOR( i = track; i < L_subfr; i += nb_tracks )
            {
                dn[i] = L_shr( L_msu( L_msu0( imult3216( Gd32, dn_orig[i] ), G, *alp_pos0 ), G1, *alp_pos1 ), 6 ); // Q_dn
                temp1 = imult3216( Gd32, dn_orig[i] );
                temp2 = L_mult0( G, *alp_pos0 );
                temp3 = L_mult0( G1, *alp_pos1 );
                temp4 = L_sub( temp1, temp2 );
                temp4 = L_sub( temp4, temp3 );
                dn[i] = L_shr( temp4, 6 );
                move32();
                alp_pos0 += nb_tracks;
                alp_pos1 += nb_tracks;
@@ -677,15 +717,19 @@ void acelp_fast_fx(

        IF( GE_16( nb_pulse, 4 ) )
        {
            Gn = add( Gn, i_mult( s[2], shr( dn_orig[m[2]], 1 ) ) ); // Q_dn-1
            Gn = add( Gn, i_mult( s[2], dn_orig[m[2]] ) ); // Q_dn
            Gd32 = Gd;
            move16();
            Gd32 = L_add( Gd32, L_add( L_add( alp[0], L_mult0( i_mult( shl( s[0], 1 ), s[2] ), alp[m[0] - m[2]] ) ), L_mult0( i_mult( shl( s[1], 1 ), s[2] ), alp[m[1] - m[2]] ) ) ); // Q6
            G = Gn;                                                                                                                                                                   // Q_dn-1
            temp1 = alp[0];
            move32();
            temp2 = L_mult0( i_mult( shl( s[0], 1 ), s[2] ), alp[m[0] - m[2]] );
            temp3 = L_mult0( i_mult( shl( s[1], 1 ), s[2] ), alp[m[1] - m[2]] );
            Gd32 = L_add( Gd32, L_add( L_add( temp1, temp2 ), temp3 ) ); // Q6
            G = Gn;                                                      // Q_dn
            move16();
            G1 = i_mult( G, s[1] ); // Q_dn-1
            G2 = i_mult( G, s[2] ); // Q_dn-1
            G = i_mult( G, s[0] );  // Q_dn-1
            G1 = i_mult( G, s[1] ); // Q_dn
            G2 = i_mult( G, s[2] ); // Q_dn
            G = i_mult( G, s[0] );  // Q_dn

            track = track_order[q * nb_tracks + 3];
            move16();
@@ -695,7 +739,15 @@ void acelp_fast_fx(

            FOR( i = track; i < L_subfr; i += nb_tracks )
            {
                dn[i] = L_shr( L_msu( L_msu( L_msu( imult3216( Gd32, dn_orig[i] ), G, *alp_pos0 ), G1, *alp_pos1 ), G2, *alp_pos2 ), 6 ); // Q_dn

                temp1 = imult3216( Gd32, dn_orig[i] );
                temp2 = L_mult0( G, *alp_pos0 );
                temp3 = L_mult0( G1, *alp_pos1 );
                temp4 = L_mult0( G2, *alp_pos2 );
                temp5 = L_sub( temp1, temp2 );
                temp5 = L_sub( temp5, temp3 );
                temp5 = L_sub( temp5, temp4 );
                dn[i] = L_shr( temp5, 6 );
                move32();
                alp_pos0 += nb_tracks;
                alp_pos1 += nb_tracks;
@@ -717,16 +769,22 @@ void acelp_fast_fx(

        IF( GE_16( nb_pulse, 5 ) )
        {
            Gn = add( Gn, i_mult( s[3], shr( dn_orig[m[3]], 1 ) ) ); // Q_dn-1
            Gn = add( Gn, i_mult( s[3], dn_orig[m[3]] ) ); // Q_dn
            Gd32 = Gd;
            move16();
            Gd32 = L_add( Gd32, L_add( L_add( L_add( alp[0], L_mult0( i_mult( shl( s[0], 1 ), s[3] ), alp[m[0] - m[3]] ) ), L_mult0( i_mult( shl( s[1], 1 ), s[3] ), alp[m[1] - m[3]] ) ), L_mult0( i_mult( shl( s[2], 1 ), s[3] ), alp[m[2] - m[3]] ) ) ); // Q6
            temp1 = alp[0];
            move32();
            temp2 = L_mult0( i_mult( shl( s[0], 1 ), s[3] ), alp[m[0] - m[3]] );
            temp3 = L_mult0( i_mult( shl( s[1], 1 ), s[3] ), alp[m[1] - m[3]] );
            temp4 = L_mult0( i_mult( shl( s[2], 1 ), s[3] ), alp[m[2] - m[3]] );

            Gd32 = L_add( Gd32, L_add( L_add( L_add( temp1, temp2 ), temp3 ), temp4 ) ); // Q6
            G = Gn;
            move16();               // Q_dn-1
            G1 = i_mult( G, s[1] ); // Q_dn-1
            G2 = i_mult( G, s[2] ); // Q_dn-1
            G3 = i_mult( G, s[3] ); // Q_dn-1
            G = i_mult( G, s[0] );  // Q_dn-1
            move16();               // Q_dn
            G1 = i_mult( G, s[1] ); // Q_dn
            G2 = i_mult( G, s[2] ); // Q_dn
            G3 = i_mult( G, s[3] ); // Q_dn
            G = i_mult( G, s[0] );  // Q_dn

            IF( EQ_16( cdk_index, 6 ) )
            {
@@ -740,7 +798,16 @@ void acelp_fast_fx(

                FOR( i = track; i < L_subfr; i += nb_tracks )
                {
                    dn[i] = L_shr( L_msu( L_msu( L_msu( L_msu( imult3216( Gd32, dn_orig[i] ), G, *alp_pos0 ), G1, *alp_pos1 ), G2, *alp_pos2 ), G3, *alp_pos3 ), 6 ); // Q_dn
                    temp1 = imult3216( Gd32, dn_orig[i] );
                    temp2 = L_mult0( G, *alp_pos0 );
                    temp3 = L_mult0( G1, *alp_pos1 );
                    temp4 = L_mult0( G2, *alp_pos2 );
                    temp5 = L_mult0( G3, *alp_pos3 );
                    temp6 = L_sub( temp1, temp2 );
                    temp6 = L_sub( temp6, temp3 );
                    temp6 = L_sub( temp6, temp4 );
                    temp6 = L_sub( temp6, temp5 );
                    dn[i] = L_shr( temp6, 6 );
                    move32();
                    alp_pos0 += nb_tracks;
                    alp_pos1 += nb_tracks;
@@ -760,7 +827,16 @@ void acelp_fast_fx(

                FOR( i = 0; i < L_subfr; i++ )
                {
                    dn[i] = L_shr( L_msu( L_msu( L_msu( L_msu( imult3216( Gd32, dn_orig[i] ), G, *alp_pos0 ), G1, *alp_pos1 ), G2, *alp_pos2 ), G3, *alp_pos3 ), 6 ); /*Q_dn*/
                    temp1 = imult3216( Gd32, dn_orig[i] );
                    temp2 = L_mult0( G, *alp_pos0 );
                    temp3 = L_mult0( G1, *alp_pos1 );
                    temp4 = L_mult0( G2, *alp_pos2 );
                    temp5 = L_mult0( G3, *alp_pos3 );
                    temp6 = L_sub( temp1, temp2 );
                    temp6 = L_sub( temp6, temp3 );
                    temp6 = L_sub( temp6, temp4 );
                    temp6 = L_sub( temp6, temp5 );
                    dn[i] = L_shr( temp6, 6 );
                    move16();
                    alp_pos0++;
                    alp_pos1++;
Loading