diff --git a/lib_com/common_api_types.h b/lib_com/common_api_types.h index 3e8a963cd9823787b22c1fdbc20b13d197c0a3e4..ed75c249b36c8f32c700ac4825c2c202d11ad2c0 100644 --- a/lib_com/common_api_types.h +++ b/lib_com/common_api_types.h @@ -119,13 +119,13 @@ typedef struct _IVAS_ENC_CHANNEL_AWARE_CONFIG typedef struct _IVAS_ISM_METADATA { #ifdef IVAS_FLOAT_FIXED - Word32 azimuth_fx; - Word32 elevation_fx; - Word16 radius_fx; - Word32 spread_fx; - Word32 gainFactor_fx; - Word32 yaw_fx; - Word32 pitch_fx; + Word32 azimuth_fx; /* Q22 */ + Word32 elevation_fx; /* Q22 */ + Word16 radius_fx; /* Q9 */ + Word32 spread_fx; /* Q22 */ + Word32 gainFactor_fx; /* Q31 */ + Word32 yaw_fx; /* Q22 */ + Word32 pitch_fx; /* Q22 */ #endif float azimuth; float elevation; diff --git a/lib_com/ivas_prot.h b/lib_com/ivas_prot.h index 876313d1bc82339c45625d5cfdf91e919016d88d..db7ec4ee263f57958fb0c86c6f61aa61d929e6b0 100644 --- a/lib_com/ivas_prot.h +++ b/lib_com/ivas_prot.h @@ -7570,11 +7570,11 @@ void panning_wrap_angles( float *ele_wrapped /* o : wrapped elevation component */ ); #ifdef IVAS_FLOAT_FIXED -void panning_wrap_angles_fixed( - const Word32 azi_deg, /* i : azimuth in degrees for panning direction (positive left) */ - const Word32 ele_deg, /* i : elevation in degrees for panning direction (positive up) */ - Word32 *azi_wrapped, /* o : wrapped azimuth component */ - Word32 *ele_wrapped /* o : wrapped elevation component */ +void panning_wrap_angles_fx( + const Word32 azi_deg, /* i : azimuth in degrees for panning direction (positive left) Q22 */ + const Word32 ele_deg, /* i : elevation in degrees for panning direction (positive up) Q22 */ + Word32 *azi_wrapped, /* o : wrapped azimuth component Q22 */ + Word32 *ele_wrapped /* o : wrapped elevation component Q22 */ ); #endif void v_sort_ind( diff --git a/lib_com/ivas_tools.c b/lib_com/ivas_tools.c index 76e84cc9a795adfff1bbb73d529aa6730de08b2d..20cce673c507c610971d524c534ab5b2474c9ed7 100644 --- a/lib_com/ivas_tools.c +++ b/lib_com/ivas_tools.c @@ -2938,8 +2938,8 @@ static float wrap_azi( } #ifdef IVAS_FLOAT_FIXED /* helper function for panning_wrap_angles */ -static Word32 wrap_azi_fixed( - const Word32 azi_deg ) +static Word32 wrap_azi_fx( + const Word32 azi_deg /* Q22 */ ) { Word32 azi = azi_deg; move32(); @@ -3027,19 +3027,18 @@ void panning_wrap_angles( } #ifdef IVAS_FLOAT_FIXED /*-------------------------------------------------------------------* - * panning_wrap_angles_fixed() + * panning_wrap_angles_fx() * * Wrap angles for amplitude panning to the range: * azimuth = (-180, 180] * elevation = [-90, 90] * Considers direction changes from large elevation values *-------------------------------------------------------------------*/ -void panning_wrap_angles_fixed( - const Word32 azi_deg, /* i : azimuth in degrees for panning direction (positive left) */ - const Word32 ele_deg, /* i : elevation in degrees for panning direction (positive up) */ - Word32 *azi_wrapped, /* o : wrapped azimuth component */ - Word32 *ele_wrapped /* o : wrapped elevation component */ - +void panning_wrap_angles_fx( + const Word32 azi_deg, /* i : azimuth in degrees for panning direction (positive left) Q22 */ + const Word32 ele_deg, /* i : elevation in degrees for panning direction (positive up) Q22 */ + Word32 *azi_wrapped, /* o : wrapped azimuth component Q22 */ + Word32 *ele_wrapped /* o : wrapped elevation component Q22 */ ) { Word32 azi, ele; @@ -3053,7 +3052,7 @@ void panning_wrap_angles_fixed( { *ele_wrapped = ele; move32(); - *azi_wrapped = wrap_azi_fixed( azi ); + *azi_wrapped = wrap_azi_fx( azi ); move32(); return; } @@ -3093,7 +3092,7 @@ void panning_wrap_angles_fixed( ele = L_sub( -ANGLE_180_DEG_Q22, ele ); } } - *azi_wrapped = wrap_azi_fixed( azi ); + *azi_wrapped = wrap_azi_fx( azi ); move32(); *ele_wrapped = ele; move32(); diff --git a/lib_com/tools.c b/lib_com/tools.c index 0222d6cc48fb0dcb8a476cda512b765fddb44f7a..072cfa7675260ea29e822eec3f6ee3e5442e5215 100644 --- a/lib_com/tools.c +++ b/lib_com/tools.c @@ -1101,9 +1101,9 @@ float dotp( /*! r: dot product of x[] and y[] */ Word32 dotp_fixed( - const Word32 x[], /* i : vector x[] */ - const Word32 y[], /* i : vector y[] */ - const Word16 n /* i : vector length */ + const Word32 x[], /* i : vector x[] Qx */ + const Word32 y[], /* i : vector y[] Qy */ + const Word16 n /* i : vector length */ ) { Word16 i; diff --git a/lib_dec/lead_deindexing_fx.c b/lib_dec/lead_deindexing_fx.c index 068d60bb1d60d979f1a5e9f3eacb51b75ee76256..4c13ee3925cadf38788674a21ea2ff2f3cbcf320 100644 --- a/lib_dec/lead_deindexing_fx.c +++ b/lib_dec/lead_deindexing_fx.c @@ -277,7 +277,7 @@ static void fcb_decode_pos_fx( FOR( i = 0; i < tmp_loop; i++ ) { select_table23 = select_table22[temp1]; - select_table24 = &select_table23[sub( pulse_num, l )]; + select_table24 = &select_table23[pulse_num - l]; k = sub( *select_table24, k ); WHILE( LE_16( k, *select_table24 ) ) @@ -286,7 +286,7 @@ static void fcb_decode_pos_fx( select_table24--; } - k = sub( select_table23[sub( temp2, l )], k ); + k = sub( select_table23[temp2 - l], k ); pos_vector[i] = sub( l, 1 ); move16(); temp1 = sub( temp1, 1 ); diff --git a/lib_dec/lib_dec.h b/lib_dec/lib_dec.h index 93604547bfe678f75723ead0a3df02b2b8a8f876..e44092fb60ae1f11095e92ec3a8d29da991da1af 100644 --- a/lib_dec/lib_dec.h +++ b/lib_dec/lib_dec.h @@ -430,7 +430,7 @@ ivas_error IVAS_DEC_Configure( const IVAS_HEAD_ORIENT_TRK_T orientation_tracking, /* i : head orientation tracking type */ const Word16 renderConfigEnabled, /* i : enable Renderer config. file for binaural output */ const Word16 Opt_non_diegetic_pan, /* i : diegetic or not */ - const Word16 non_diegetic_pan_gain_fx, /* i : non diegetic panning gain */ + const Word16 non_diegetic_pan_gain_fx, /* i : non diegetic panning gain Q15 */ const Word16 Opt_dpid_on, /* i : enable directivity pattern option */ const UWord16 acousticEnvironmentId, /* i : Acoustic environment ID */ const Word16 delayCompensationEnabled /* i : enable delay compensation */ @@ -455,7 +455,7 @@ ivas_error IVAS_DEC_FeedFrame_Serial( ivas_error IVAS_DEC_GetSamples( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ const Word16 nSamplesAsked, /* i : number of samples wanted by the caller */ - Word16 *pcmBuf, /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels */ + Word16 *pcmBuf, /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels Q0 */ Word16 *nOutSamples, /* o : number of samples per channel written to output buffer */ bool *needNewFrame /* o : indication that the decoder needs a new frame */ ); @@ -527,14 +527,14 @@ ivas_error IVAS_DEC_VoIP_SetScale( ivas_error IVAS_DEC_TSM_SetQuality( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ - const Word16 quality /* i : target TSM quality */ + const Word16 quality /* i : target TSM quality Q14 */ ); /*! r: error code */ ivas_error IVAS_DEC_VoIP_GetSamples( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ UWord16 nSamplesPerChannel, /* i : number of samples per channel requested to be written to output buffer */ - Word16 *pcmBuf, /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels */ + Word16 *pcmBuf, /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels Q0 */ const UWord32 systemTimestamp_ms /* i : current system timestamp */ #ifdef SUPPORT_JBM_TRACEFILE , JbmTraceFileWriterFn jbmWriterFn, @@ -545,7 +545,7 @@ ivas_error IVAS_DEC_VoIP_GetSamples( ivas_error IVAS_DEC_Flush( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ const Word16 nSamplesPerChannel, /* i : number of samples per channel requested to be written to output buffer */ - Word16 *pcmBuf, /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels */ + Word16 *pcmBuf, /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels Q0 */ Word16 *nSamplesFlushed /* o : number of samples flushed */ ); @@ -571,12 +571,12 @@ ivas_error IVAS_DEC_GetRenderFramesize( ivas_error IVAS_DEC_GetRenderFramesizeSamples( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ - Word16 *render_framesize /* o : render framesize in samples */ + Word16 *render_framesize /* o : render framesize in samples Q0 */ ); ivas_error IVAS_DEC_GetReferencesUpdateFrequency( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ - Word16 *update_frequency /* o : update frequency */ + Word16 *update_frequency /* o : update frequency Q0 */ ); ivas_error IVAS_DEC_GetNumOrientationSubframes( @@ -586,7 +586,7 @@ ivas_error IVAS_DEC_GetNumOrientationSubframes( ivas_error IVAS_DEC_GetRenderFramesizeMs( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ - UWord32 *render_framesize /* o : render framesize in samples */ + UWord32 *render_framesize /* o : render framesize in samples Q0 */ ); @@ -678,7 +678,7 @@ ivas_error IVAS_DEC_HasDecodedFirstGoodFrame( /*! r: error code */ ivas_error IVAS_DEC_GetPcmFrameSize( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ - Word32 *pcmFrameSize /* o : total size of the PCM output frame. This takes into account the number of output channels */ + Word32 *pcmFrameSize /* o : total size of the PCM output frame. This takes into account the number of output channels Q0 */ ); /*! r: true if decoder has no data in VoIP jitter buffer */ @@ -689,8 +689,8 @@ bool IVAS_DEC_VoIP_IsEmpty( ivas_error IVAS_DEC_VoIP_Get_CA_offset( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ - Word16 *optimum_offset, - Word16 *FEC_hi + Word16 *optimum_offset, //Q0 + Word16 *FEC_hi //Q0 ); #ifdef SUPPORT_JBM_TRACEFILE diff --git a/lib_dec/lib_dec_fx.c b/lib_dec/lib_dec_fx.c index d3e26547e168d9dcf2964b80a5d2985c4b5d5580..05fd52bfbb2ece80f382ad842ccacdb1a8f6ba49 100644 --- a/lib_dec/lib_dec_fx.c +++ b/lib_dec/lib_dec_fx.c @@ -295,7 +295,7 @@ static void init_decoder_config( hDecoderConfig->Opt_RendConfigCustom = 0; hDecoderConfig->orientation_tracking = IVAS_HEAD_ORIENT_TRK_NONE; hDecoderConfig->Opt_non_diegetic_pan = 0; - hDecoderConfig->non_diegetic_pan_gain_fx = 0; + hDecoderConfig->non_diegetic_pan_gain_fx = 0; // Q15 hDecoderConfig->Opt_tsm = 0; hDecoderConfig->Opt_delay_comp = 0; hDecoderConfig->Opt_ExternalOrientation = 0; @@ -418,7 +418,7 @@ ivas_error IVAS_DEC_Configure( const IVAS_HEAD_ORIENT_TRK_T orientation_tracking, /* i : head orientation tracking type */ const Word16 renderConfigEnabled, /* i : enable Renderer config. file for binaural output */ const Word16 Opt_non_diegetic_pan, /* i : diegetic or not */ - const Word16 non_diegetic_pan_gain_fx, /* i : non diegetic panning gain */ + const Word16 non_diegetic_pan_gain_fx, /* i : non diegetic panning gain Q15 */ const Word16 Opt_dpid_on, /* i : enable directivity pattern option */ const UWord16 acousticEnvironmentId, /* i : Acoustic environment ID */ const Word16 delayCompensationEnabled /* i : enable delay compensation */ @@ -489,7 +489,7 @@ ivas_error IVAS_DEC_Configure( hDecoderConfig->Opt_HRTF_binary = hrtfReaderEnabled; hDecoderConfig->Opt_RendConfigCustom = renderConfigEnabled; hDecoderConfig->Opt_non_diegetic_pan = Opt_non_diegetic_pan; - hDecoderConfig->non_diegetic_pan_gain_fx = non_diegetic_pan_gain_fx; + hDecoderConfig->non_diegetic_pan_gain_fx = non_diegetic_pan_gain_fx; // Q15 hDecoderConfig->Opt_delay_comp = delayCompensationEnabled; hDecoderConfig->Opt_ExternalOrientation = enableExternalOrientation; hDecoderConfig->Opt_dpid_on = Opt_dpid_on; @@ -571,11 +571,11 @@ Word16 get_render_frame_size_ms( } ELSE IF( EQ_16( render_framesize, IVAS_RENDER_FRAMESIZE_10MS ) ) { - return shl( ( 1000 / ( FRAMES_PER_SEC * IVAS_MAX_PARAM_SPATIAL_SUBFRAMES ) ), 1 ); + return ( ( 1000 / ( FRAMES_PER_SEC * IVAS_MAX_PARAM_SPATIAL_SUBFRAMES ) ) << 1 ); } ELSE IF( EQ_16( render_framesize, IVAS_RENDER_FRAMESIZE_20MS ) ) { - return shl( ( 1000 / ( FRAMES_PER_SEC * IVAS_MAX_PARAM_SPATIAL_SUBFRAMES ) ), 2 ); + return ( ( 1000 / ( FRAMES_PER_SEC * IVAS_MAX_PARAM_SPATIAL_SUBFRAMES ) ) << 2 ); } return 0; } @@ -637,7 +637,7 @@ ivas_error IVAS_DEC_GetRenderFramesize( ivas_error IVAS_DEC_GetRenderFramesizeSamples( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ - Word16 *render_framesize /* o : render framesize in samples */ + Word16 *render_framesize /* o : render framesize in samples Q0 */ ) { Word16 tmp; @@ -679,7 +679,7 @@ ivas_error IVAS_DEC_GetRenderFramesizeSamples( ivas_error IVAS_DEC_GetRenderFramesizeMs( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ - UWord32 *render_framesize /* o : render framesize in samples */ + UWord32 *render_framesize /* o : render framesize in samples Q0 */ ) { test(); @@ -703,7 +703,7 @@ ivas_error IVAS_DEC_GetRenderFramesizeMs( ivas_error IVAS_DEC_GetReferencesUpdateFrequency( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ - Word16 *update_frequency /* o : update frequency */ + Word16 *update_frequency /* o : update frequency Q0 */ ) { test(); @@ -937,7 +937,7 @@ ivas_error IVAS_DEC_FeedFrame_Serial( IF( EQ_16( (Word16) hIvasDec->mode, IVAS_DEC_MODE_EVS ) && hIvasDec->hVoIP != NULL && hIvasDec->hVoIP->hCurrentDataUnit != NULL && - NE_16( (Word16) hIvasDec->hVoIP->hCurrentDataUnit->partial_frame, 0 ) ) + ( (Word16) hIvasDec->hVoIP->hCurrentDataUnit->partial_frame != 0 ) ) { DEC_CORE_HANDLE st = hIvasDec->st_ivas->hSCE[0]->hCoreCoder[0]; st->codec_mode = MODE2; @@ -969,7 +969,7 @@ ivas_error IVAS_DEC_FeedFrame_Serial( ivas_error IVAS_DEC_GetSamples( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ const Word16 nSamplesAsked, /* i : number of samples wanted by the caller */ - Word16 *pcmBuf, /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels */ + Word16 *pcmBuf, /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels Q0 */ Word16 *nOutSamples, /* o : number of samples per channel written to output buffer */ bool *needNewFrame /* o :indication that the decoder needs a new frame */ ) @@ -1090,33 +1090,33 @@ ivas_error IVAS_DEC_GetSamples( Word16 tmp_apaExecBuffer[APA_BUF]; IF( EQ_16( (Word16) hIvasDec->mode, IVAS_DEC_MODE_EVS ) ) { - for ( int i = 0; i < APA_BUF_PER_CHANNEL * nTransportChannels; ++i ) + FOR( Word16 i = 0; i < APA_BUF_PER_CHANNEL * nTransportChannels; ++i ) { - tmp_apaExecBuffer[i] = extract_l( L_shr( hIvasDec->apaExecBuffer_fx[i], Q11 ) ); + tmp_apaExecBuffer[i] = extract_l( L_shr( hIvasDec->apaExecBuffer_fx[i], Q11 ) ); // Q0 } IF( apa_exec_fx( hIvasDec->hTimeScaler, tmp_apaExecBuffer, (UWord16) imult3216( hIvasDec->nSamplesFrame, nTransportChannels ), (UWord16) hIvasDec->tsm_max_scaling, tmp_apaExecBuffer, &nTimeScalerOutSamples ) != 0 ) { return IVAS_ERR_UNKNOWN; } - for ( int i = 0; i < APA_BUF_PER_CHANNEL * nTransportChannels; ++i ) + FOR( Word16 i = 0; i < APA_BUF_PER_CHANNEL * nTransportChannels; ++i ) { - hIvasDec->apaExecBuffer_fx[i] = L_shl( tmp_apaExecBuffer[i], Q11 ); + hIvasDec->apaExecBuffer_fx[i] = L_shl( tmp_apaExecBuffer[i], Q11 ); // Q11 } } ELSE { - for ( int i = 0; i < APA_BUF_PER_CHANNEL * nTransportChannels; ++i ) + FOR( Word16 i = 0; i < APA_BUF_PER_CHANNEL * nTransportChannels; ++i ) { - tmp_apaExecBuffer[i] = extract_l( L_shr( hIvasDec->apaExecBuffer_fx[i], Q12 ) ); + tmp_apaExecBuffer[i] = extract_l( L_shr( hIvasDec->apaExecBuffer_fx[i], Q12 ) ); // Q(-1) } IF( apa_exec_ivas_fx( hIvasDec->hTimeScaler, tmp_apaExecBuffer, (UWord16) imult3216( hIvasDec->nSamplesFrame, nTransportChannels ), (UWord16) hIvasDec->tsm_max_scaling, tmp_apaExecBuffer, &nTimeScalerOutSamples ) != 0 ) { return IVAS_ERR_UNKNOWN; } - for ( int i = 0; i < APA_BUF_PER_CHANNEL * nTransportChannels; ++i ) + FOR( Word16 i = 0; i < APA_BUF_PER_CHANNEL * nTransportChannels; ++i ) { - hIvasDec->apaExecBuffer_fx[i] = L_shl( tmp_apaExecBuffer[i], Q12 ); + hIvasDec->apaExecBuffer_fx[i] = L_shl( tmp_apaExecBuffer[i], Q12 ); // Q11 } } assert( LE_32( (Word32) nTimeScalerOutSamples, APA_BUF ) ); @@ -1189,7 +1189,7 @@ static ivas_error IVAS_DEC_Setup( UWord8 *nTransportChannels, /* o : number of decoded transport PCM channels */ UWord8 *nOutChannels, /* o : number of decoded out channels (PCM or CLDFB) */ UWord16 *nSamplesRendered, /* o : number of samples flushed from the last frame */ - Word16 *data /* o : output synthesis signal */ + Word16 *data /* o : output synthesis signal Q0 */ ) { ivas_error error; @@ -1258,7 +1258,7 @@ static ivas_error IVAS_DEC_Setup( test(); test(); test(); - IF( is_DTXrate( ivas_total_brate ) == 0 && st_ivas->ivas_format == MASA_ISM_FORMAT && GT_16( st_ivas->ini_frame, 0 ) && ( GT_32( ivas_total_brate, IVAS_SID_5k2 ) && NE_32( ivas_total_brate, st_ivas->hDecoderConfig->last_ivas_total_brate ) ) && st_ivas->ini_active_frame == 0 ) + IF( is_DTXrate( ivas_total_brate ) == 0 && EQ_16( (Word16) st_ivas->ivas_format, MASA_ISM_FORMAT ) && GT_16( st_ivas->ini_frame, 0 ) && ( GT_32( ivas_total_brate, IVAS_SID_5k2 ) && NE_32( ivas_total_brate, st_ivas->hDecoderConfig->last_ivas_total_brate ) ) && st_ivas->ini_active_frame == 0 ) { IF( st_ivas->hSpar ) { @@ -1317,7 +1317,7 @@ static ivas_error IVAS_DEC_Setup( test(); test(); test(); - IF( is_DTXrate( ivas_total_brate ) == 0 && st_ivas->ivas_format == MASA_ISM_FORMAT && st_ivas->ini_frame > 0 && ( GT_32( ivas_total_brate, IVAS_SID_5k2 ) && NE_32( ivas_total_brate, st_ivas->hDecoderConfig->last_ivas_total_brate ) ) && st_ivas->ini_active_frame == 0 ) + IF( is_DTXrate( ivas_total_brate ) == 0 && EQ_16( (Word16) st_ivas->ivas_format, MASA_ISM_FORMAT ) && st_ivas->ini_frame > 0 && ( GT_32( ivas_total_brate, IVAS_SID_5k2 ) && NE_32( ivas_total_brate, st_ivas->hDecoderConfig->last_ivas_total_brate ) ) && st_ivas->ini_active_frame == 0 ) { IF( EQ_16( st_ivas->ism_mode, ISM_MASA_MODE_DISC ) ) { @@ -1363,7 +1363,7 @@ static ivas_error IVAS_DEC_Setup( static ivas_error IVAS_DEC_GetTcSamples( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ - Word32 *pcmBuf_fx, /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels */ + Word32 *pcmBuf_fx, /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels Q11 */ Word16 *nOutSamples /* o : number of samples per channel written to output buffer */ ) { @@ -1403,7 +1403,7 @@ static ivas_error IVAS_DEC_GetTcSamples( FOR( n = 0; n < ivas_get_nchan_buffers_dec_ivas_fx( st_ivas, st_ivas->sba_analysis_order, st_ivas->hDecoderConfig->ivas_total_brate ); n++ ) { set32_fx( st_ivas->p_output_fx[n], 0, L_FRAME48k ); - st_ivas->hTcBuffer->tc_fx[n] = st_ivas->p_output_fx[n]; + st_ivas->hTcBuffer->tc_fx[n] = st_ivas->p_output_fx[n]; // Q11 } } @@ -1463,13 +1463,13 @@ static ivas_error IVAS_DEC_GetTcSamples( /*note : cldfb_size here signifies the original size which was assigned to cldfb_state_fx buffer not its current size*/ IF( sts[n]->cldfbAna != NULL ) { - scale_sig32( sts[n]->cldfbAna->cldfb_state_fx, sts[n]->cldfbAna->cldfb_size, sub( Q11, Q10 ) ); + scale_sig32( sts[n]->cldfbAna->cldfb_state_fx, sts[n]->cldfbAna->cldfb_size, sub( Q11, Q10 ) ); // Q11 sts[n]->cldfbAna->Q_cldfb_state = Q11; move16(); } IF( sts[n]->cldfbSyn != NULL ) { - scale_sig32( sts[n]->cldfbSyn->cldfb_state_fx, sts[n]->cldfbSyn->cldfb_size, sub( Q11, Q4 ) ); + scale_sig32( sts[n]->cldfbSyn->cldfb_state_fx, sts[n]->cldfbSyn->cldfb_size, sub( Q11, Q4 ) ); // Q11 sts[n]->cldfbSyn->Q_cldfb_state = Q11; move16(); } @@ -1498,7 +1498,7 @@ static ivas_error IVAS_DEC_RendererFeedTcSamples( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ const Word16 nSamplesForRendering, /* i : number of TC samples wanted from the renderer */ Word16 *nSamplesResidual, /* o : number of samples not fitting into the renderer grid and buffer for the next call */ - Word32 *pcmBuf /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels */ + Word32 *pcmBuf /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels Q11*/ ) { Decoder_Struct *st_ivas; @@ -1529,7 +1529,8 @@ static ivas_error IVAS_DEC_GetRenderedSamples( const UWord16 nSamplesForRendering, /* i : number of TC samples wanted from the renderer */ UWord16 *nSamplesRendered, /* o : number of samples rendered */ UWord16 *nSamplesAvailableNext, /* o : number of samples still available in the renerer pipeline */ - Word16 *pcmBuf ) + Word16 *pcmBuf // Q0 +) { Decoder_Struct *st_ivas; ivas_error error; @@ -1768,23 +1769,23 @@ ivas_error IVAS_DEC_GetObjectMetadata( IF( hIsmMeta == NULL || zero_flag ) { - metadata->azimuth_fx = 0; - metadata->elevation_fx = 0; - metadata->radius_fx = 512; - metadata->yaw_fx = 0; - metadata->pitch_fx = 0; - metadata->spread_fx = 0; - metadata->gainFactor_fx = ONE_IN_Q31; + metadata->azimuth_fx = 0; // Q22 + metadata->elevation_fx = 0; // Q22 + metadata->radius_fx = 512; // Q9 + metadata->yaw_fx = 0; // Q22 + metadata->pitch_fx = 0; // Q22 + metadata->spread_fx = 0; // Q22 + metadata->gainFactor_fx = ONE_IN_Q31; // Q31 metadata->non_diegetic_flag = 0; } ELSE { - metadata->azimuth_fx = hIsmMeta->azimuth_fx; - metadata->elevation_fx = hIsmMeta->elevation_fx; - metadata->radius_fx = hIsmMeta->radius_fx; - metadata->yaw_fx = hIsmMeta->yaw_fx; - metadata->pitch_fx = hIsmMeta->pitch_fx; - metadata->spread_fx = 0; + metadata->azimuth_fx = hIsmMeta->azimuth_fx; // Q22 + metadata->elevation_fx = hIsmMeta->elevation_fx; // Q22 + metadata->radius_fx = hIsmMeta->radius_fx; // Q9 + metadata->yaw_fx = hIsmMeta->yaw_fx; // Q22 + metadata->pitch_fx = hIsmMeta->pitch_fx; // Q22 + metadata->spread_fx = 0; // Q22 metadata->gainFactor_fx = ONE_IN_Q31; metadata->non_diegetic_flag = hIsmMeta->non_diegetic_flag; } @@ -1940,18 +1941,18 @@ ivas_error IVAS_DEC_FeedHeadTrackData( Word32 updateRate_fx = 1677721600; // value is 200 in Q23 move32(); - orientation.w_fx = L_shl( orientation.w_fx, sub( Q29, orientation.q_fact ) ); - orientation.x_fx = L_shl( orientation.x_fx, sub( Q29, orientation.q_fact ) ); - orientation.y_fx = L_shl( orientation.y_fx, sub( Q29, orientation.q_fact ) ); - orientation.z_fx = L_shl( orientation.z_fx, sub( Q29, orientation.q_fact ) ); - hHeadTrackData->OrientationTracker->refRot.w_fx = L_shl( hHeadTrackData->OrientationTracker->refRot.w_fx, sub( Q29, hHeadTrackData->OrientationTracker->refRot.q_fact ) ); - hHeadTrackData->OrientationTracker->refRot.x_fx = L_shl( hHeadTrackData->OrientationTracker->refRot.x_fx, sub( Q29, hHeadTrackData->OrientationTracker->refRot.q_fact ) ); - hHeadTrackData->OrientationTracker->refRot.y_fx = L_shl( hHeadTrackData->OrientationTracker->refRot.y_fx, sub( Q29, hHeadTrackData->OrientationTracker->refRot.q_fact ) ); - hHeadTrackData->OrientationTracker->refRot.z_fx = L_shl( hHeadTrackData->OrientationTracker->refRot.z_fx, sub( Q29, hHeadTrackData->OrientationTracker->refRot.q_fact ) ); - hHeadTrackData->OrientationTracker->absAvgRot.w_fx = L_shl( hHeadTrackData->OrientationTracker->absAvgRot.w_fx, sub( Q29, hHeadTrackData->OrientationTracker->absAvgRot.q_fact ) ); - hHeadTrackData->OrientationTracker->absAvgRot.x_fx = L_shl( hHeadTrackData->OrientationTracker->absAvgRot.x_fx, sub( Q29, hHeadTrackData->OrientationTracker->absAvgRot.q_fact ) ); - hHeadTrackData->OrientationTracker->absAvgRot.y_fx = L_shl( hHeadTrackData->OrientationTracker->absAvgRot.y_fx, sub( Q29, hHeadTrackData->OrientationTracker->absAvgRot.q_fact ) ); - hHeadTrackData->OrientationTracker->absAvgRot.z_fx = L_shl( hHeadTrackData->OrientationTracker->absAvgRot.z_fx, sub( Q29, hHeadTrackData->OrientationTracker->absAvgRot.q_fact ) ); + orientation.w_fx = L_shl( orientation.w_fx, sub( Q29, orientation.q_fact ) ); // Q29 + orientation.x_fx = L_shl( orientation.x_fx, sub( Q29, orientation.q_fact ) ); // Q29 + orientation.y_fx = L_shl( orientation.y_fx, sub( Q29, orientation.q_fact ) ); // Q29 + orientation.z_fx = L_shl( orientation.z_fx, sub( Q29, orientation.q_fact ) ); // Q29 + hHeadTrackData->OrientationTracker->refRot.w_fx = L_shl( hHeadTrackData->OrientationTracker->refRot.w_fx, sub( Q29, hHeadTrackData->OrientationTracker->refRot.q_fact ) ); // Q29 + hHeadTrackData->OrientationTracker->refRot.x_fx = L_shl( hHeadTrackData->OrientationTracker->refRot.x_fx, sub( Q29, hHeadTrackData->OrientationTracker->refRot.q_fact ) ); // Q29 + hHeadTrackData->OrientationTracker->refRot.y_fx = L_shl( hHeadTrackData->OrientationTracker->refRot.y_fx, sub( Q29, hHeadTrackData->OrientationTracker->refRot.q_fact ) ); // Q29 + hHeadTrackData->OrientationTracker->refRot.z_fx = L_shl( hHeadTrackData->OrientationTracker->refRot.z_fx, sub( Q29, hHeadTrackData->OrientationTracker->refRot.q_fact ) ); // Q29 + hHeadTrackData->OrientationTracker->absAvgRot.w_fx = L_shl( hHeadTrackData->OrientationTracker->absAvgRot.w_fx, sub( Q29, hHeadTrackData->OrientationTracker->absAvgRot.q_fact ) ); // Q29 + hHeadTrackData->OrientationTracker->absAvgRot.x_fx = L_shl( hHeadTrackData->OrientationTracker->absAvgRot.x_fx, sub( Q29, hHeadTrackData->OrientationTracker->absAvgRot.q_fact ) ); // Q29 + hHeadTrackData->OrientationTracker->absAvgRot.y_fx = L_shl( hHeadTrackData->OrientationTracker->absAvgRot.y_fx, sub( Q29, hHeadTrackData->OrientationTracker->absAvgRot.q_fact ) ); // Q29 + hHeadTrackData->OrientationTracker->absAvgRot.z_fx = L_shl( hHeadTrackData->OrientationTracker->absAvgRot.z_fx, sub( Q29, hHeadTrackData->OrientationTracker->absAvgRot.q_fact ) ); // Q29 orientation.q_fact = Q29; hHeadTrackData->OrientationTracker->refRot.q_fact = Q29; @@ -2004,10 +2005,10 @@ ivas_error IVAS_DEC_FeedRefRotData( pOtr = hIvasDec->st_ivas->hHeadTrackData->OrientationTracker; - pOtr->refRot.w_fx = rotation.w_fx; - pOtr->refRot.x_fx = rotation.x_fx; - pOtr->refRot.z_fx = rotation.z_fx; - pOtr->refRot.y_fx = rotation.y_fx; + pOtr->refRot.w_fx = rotation.w_fx; // rotation.q_fact + pOtr->refRot.x_fx = rotation.x_fx; // rotation.q_fact + pOtr->refRot.z_fx = rotation.z_fx; // rotation.q_fact + pOtr->refRot.y_fx = rotation.y_fx; // rotation.q_fact pOtr->refRot.q_fact = rotation.q_fact; move32(); @@ -2140,15 +2141,16 @@ ivas_error IVAS_DEC_FeedCustomLsData( hLsSetupCustom->num_spk = hLsCustomData.num_spk; move16(); - Copy32( hLsCustomData.azimuth_fx, hLsSetupCustom->ls_azimuth_fx, hLsCustomData.num_spk ); - Copy32( hLsCustomData.elevation_fx, hLsSetupCustom->ls_elevation_fx, hLsCustomData.num_spk ); + Copy32( hLsCustomData.azimuth_fx, hLsSetupCustom->ls_azimuth_fx, hLsCustomData.num_spk ); // Q22 + Copy32( hLsCustomData.elevation_fx, hLsSetupCustom->ls_elevation_fx, hLsCustomData.num_spk ); // Q22 /* Set planar flag */ is_planar = 1; move16(); FOR( i = 0; i < hLsCustomData.num_spk; i++ ) { - IF( is_planar && hLsSetupCustom->ls_elevation_fx[i] != 0 ) + test(); + if ( is_planar && hLsSetupCustom->ls_elevation_fx[i] != 0 ) { is_planar = 0; move16(); @@ -2282,9 +2284,9 @@ static ivas_error copyRendererConfigStruct( hRCout->roomAcoustics.acousticPreDelay_fx = hRCin->roomAcoustics.acousticPreDelay_fx; hRCout->roomAcoustics.inputPreDelay_fx = hRCin->roomAcoustics.inputPreDelay_fx; - Copy32( hRCin->roomAcoustics.pFc_input_fx, hRCout->roomAcoustics.pFc_input_fx, CLDFB_NO_CHANNELS_MAX ); - Copy32( hRCin->roomAcoustics.pAcoustic_rt60_fx, hRCout->roomAcoustics.pAcoustic_rt60_fx, CLDFB_NO_CHANNELS_MAX ); - Copy32( hRCin->roomAcoustics.pAcoustic_dsr_fx, hRCout->roomAcoustics.pAcoustic_dsr_fx, CLDFB_NO_CHANNELS_MAX ); + Copy32( hRCin->roomAcoustics.pFc_input_fx, hRCout->roomAcoustics.pFc_input_fx, CLDFB_NO_CHANNELS_MAX ); // Q16 + Copy32( hRCin->roomAcoustics.pAcoustic_rt60_fx, hRCout->roomAcoustics.pAcoustic_rt60_fx, CLDFB_NO_CHANNELS_MAX ); // Q26 + Copy32( hRCin->roomAcoustics.pAcoustic_dsr_fx, hRCout->roomAcoustics.pAcoustic_dsr_fx, CLDFB_NO_CHANNELS_MAX ); // Q30 Copy( hRCin->directivity_fx, hRCout->directivity_fx, 3 * MAX_NUM_OBJECTS ); hRCout->roomAcoustics.use_er = hRCin->roomAcoustics.use_er; hRCout->roomAcoustics.lowComplexity = hRCin->roomAcoustics.lowComplexity; @@ -2382,9 +2384,9 @@ ivas_error IVAS_DEC_FeedRenderConfig( move32(); } - Copy32( renderConfig.roomAcoustics.pFc_input_fx, hRenderConfig->roomAcoustics.pFc_input_fx, CLDFB_NO_CHANNELS_MAX ); - Copy32( renderConfig.roomAcoustics.pAcoustic_rt60_fx, hRenderConfig->roomAcoustics.pAcoustic_rt60_fx, CLDFB_NO_CHANNELS_MAX ); - Copy32( renderConfig.roomAcoustics.pAcoustic_dsr_fx, hRenderConfig->roomAcoustics.pAcoustic_dsr_fx, CLDFB_NO_CHANNELS_MAX ); + Copy32( renderConfig.roomAcoustics.pFc_input_fx, hRenderConfig->roomAcoustics.pFc_input_fx, CLDFB_NO_CHANNELS_MAX ); // Q16 + Copy32( renderConfig.roomAcoustics.pAcoustic_rt60_fx, hRenderConfig->roomAcoustics.pAcoustic_rt60_fx, CLDFB_NO_CHANNELS_MAX ); // Q26 + Copy32( renderConfig.roomAcoustics.pAcoustic_dsr_fx, hRenderConfig->roomAcoustics.pAcoustic_dsr_fx, CLDFB_NO_CHANNELS_MAX ); // Q30 Copy( renderConfig.directivity_fx, hRenderConfig->directivity_fx, 3 * MAX_NUM_OBJECTS ); @@ -2444,7 +2446,7 @@ ivas_error IVAS_DEC_GetDelay( nSamples[1] = NS2SA_FX2( hDecoderConfig->output_Fs, get_delay_fx( DEC, hDecoderConfig->output_Fs, st_ivas->ivas_format, st_ivas->cldfbAnaDec[0] ) ); move16(); - nSamples[2] = (Word16) W_round64_L( W_mult0_32_32( L_shl( st_ivas->binaural_latency_ns, 1 ), out_fs_fx ) ); + nSamples[2] = extract_l( W_round64_L( W_mult0_32_32( L_shl( st_ivas->binaural_latency_ns, 1 ), out_fs_fx ) ) ); move16(); nSamples[0] = add( nSamples[1], nSamples[2] ); move16(); @@ -2496,7 +2498,7 @@ ivas_error IVAS_DEC_HasDecodedFirstGoodFrame( ivas_error IVAS_DEC_GetPcmFrameSize( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ - Word32 *pcmFrameSize /* o : total size of the PCM output frame. This takes into account the number of output channels */ + Word32 *pcmFrameSize /* o : total size of the PCM output frame. This takes into account the number of output channels Q0 */ ) { test(); @@ -2572,7 +2574,7 @@ static void bsCompactToSerial( const UWord8 *compact, UWord16 *serial, UWord16 n /* Add 4 padding bytes required by core coder */ FOR( i = 0; i < 4 * 8; ++i ) { - serial[L_add( num_bits, (Word32) i )] = 0; + serial[( num_bits + i )] = 0; move16(); } #undef WMC_TOOL_SKIP @@ -2696,7 +2698,7 @@ ivas_error IVAS_DEC_VoIP_SetScale( error = IVAS_ERR_OK; move32(); - IF( EQ_16( hIvasDec->st_ivas->hDecoderConfig->Opt_tsm, false ) ) + IF( hIvasDec->st_ivas->hDecoderConfig->Opt_tsm == false ) { return IVAS_ERR_TSM_NOT_ENABLED; } @@ -2719,7 +2721,7 @@ ivas_error IVAS_DEC_VoIP_SetScale( ivas_error IVAS_DEC_TSM_SetQuality( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ - const Word16 quality /* i : target TSM quality */ + const Word16 quality /* i : target TSM quality Q14 */ ) { ivas_error error; @@ -2727,13 +2729,14 @@ ivas_error IVAS_DEC_TSM_SetQuality( error = IVAS_ERR_OK; move32(); - IF( EQ_16( hIvasDec->st_ivas->hDecoderConfig->Opt_tsm, false ) ) + IF( hIvasDec->st_ivas->hDecoderConfig->Opt_tsm == false ) { return IVAS_ERR_TSM_NOT_ENABLED; } ELSE { - hIvasDec->tsm_quality = quality; + hIvasDec->tsm_quality = quality; // Q14 + move16(); } return error; @@ -2749,7 +2752,7 @@ ivas_error IVAS_DEC_TSM_SetQuality( ivas_error IVAS_DEC_VoIP_GetSamples( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ UWord16 nSamplesPerChannel, /* i : number of samples per channel requested to be written to output buffer */ - Word16 *pcmBuf, /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels */ + Word16 *pcmBuf, /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels Q0 */ const UWord32 systemTimestamp_ms /* i : current system timestamp */ #ifdef SUPPORT_JBM_TRACEFILE , @@ -2804,7 +2807,7 @@ ivas_error IVAS_DEC_VoIP_GetSamples( extBufferedSamples = add( nSamplesRendered, nSamplesBuffered ); Word16 exp; extBufferedTime_ms = BASOP_Util_Divide3232_Scale( imult3216( extBufferedSamples, 1000 ), hDecoderConfig->output_Fs, &exp ); - extBufferedTime_ms = (UWord32) W_shr( extBufferedTime_ms, 15 - exp ); + extBufferedTime_ms = (UWord32) W_shr( extBufferedTime_ms, sub( 15, exp ) ); // Q0 dataUnit = NULL; /* pop one access unit from the jitter buffer */ @@ -2959,7 +2962,7 @@ ivas_error IVAS_DEC_VoIP_GetSamples( ivas_error IVAS_DEC_Flush( IVAS_DEC_HANDLE hIvasDec, /* i/o: IVAS decoder handle */ const Word16 nSamplesPerChannel, /* i : number of samples per channel requested to be written to output buffer */ - Word16 *pcmBuf, /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels */ + Word16 *pcmBuf, /* i/o: buffer for decoded PCM output. The memory must already be allocated and be able to hold the expected number of output samples, based on frame size and number of output channels Q0 */ Word16 *nSamplesFlushed /* o : number of samples flushed */ ) { @@ -3462,8 +3465,9 @@ void IVAS_DEC_PrintDisclaimer( void ) static ivas_error evs_dec_main_fx( Decoder_Struct *st_ivas, const Word16 nOutSamples, - Word32 *Buf_fx, - Word16 *pcmBuf ) + Word32 *Buf_fx, // Q11 + Word16 *pcmBuf // Q0 +) { DEC_CORE_HANDLE *hCoreCoder; Word16 mixer_left_fx, mixer_right_fx; @@ -3477,13 +3481,13 @@ static ivas_error evs_dec_main_fx( hCoreCoder[0]->total_num_bits = st_ivas->num_bits; move16(); move32(); - hCoreCoder[0]->output_frame_fx = extract_l( Mult_32_16( hCoreCoder[0]->output_Fs, 0x0290 ) ); + hCoreCoder[0]->output_frame_fx = extract_l( Mult_32_16( hCoreCoder[0]->output_Fs, 0x0290 /*Q0*/ ) ); // Q0 move16(); mdct_switching_dec( hCoreCoder[0] ); FOR( ch = 0; ch < MAX_OUTPUT_CHANNELS_IN_DIEGETIC_PAN; ch++ ) { - p_output_fx[ch] = st_ivas->p_output_fx[ch]; + p_output_fx[ch] = st_ivas->p_output_fx[ch]; // Q0 } /* run the main EVS decoding routine */ @@ -3538,7 +3542,7 @@ static ivas_error evs_dec_main_fx( move16(); IF( EQ_16( st_ivas->renderer_type, RENDERER_NON_DIEGETIC_DOWNMIX ) ) { - mixer_left_fx = add( shr( st_ivas->hDecoderConfig->non_diegetic_pan_gain_fx, Q1 ), ONE_IN_Q14 ); + mixer_left_fx = add( shr( st_ivas->hDecoderConfig->non_diegetic_pan_gain_fx, Q1 ), ONE_IN_Q14 ); // Q14 mixer_right_fx = sub( MAX16B, mixer_left_fx ); v_multc_fixed_16( p_output_fx[0], mixer_right_fx, p_output_fx[1], nOutSamples ); /* Q11 */ diff --git a/lib_dec/lp_exc_d_fx.c b/lib_dec/lp_exc_d_fx.c index e0bcb2c609e4b5d5d936e2b237c43a6370eba472..cb94c47f7c6da95863e461c46d82ea62ed3bc4a0 100644 --- a/lib_dec/lp_exc_d_fx.c +++ b/lib_dec/lp_exc_d_fx.c @@ -57,7 +57,7 @@ void lp_filt_exc_dec_fx( lp_flag = (Word16) get_next_indice_fx( st_fx, 1 ); } } - IF( EQ_16( lp_flag, LOW_PASS ) ) + IF( lp_flag == LOW_PASS ) { /* pointer positioning to avoid doing it inside the loop */ test(); @@ -76,9 +76,9 @@ void lp_filt_exc_dec_fx( FOR( i = 0; i < L_subfr; i++ ) { - L_tmp = L_mult( fac_n, exc[add( sub( i, 1 ), i_subfr )] ); - L_tmp = L_mac( L_tmp, fac_m, exc[add( i /*+ 0 */, i_subfr )] ); - code[i] = mac_r( L_tmp, fac_n, exc[add( add( i, 1 ), i_subfr )] ); + L_tmp = L_mult( fac_n, exc[( ( i - 1 ) + i_subfr )] ); + L_tmp = L_mac( L_tmp, fac_m, exc[( i /*+ 0 */ + i_subfr )] ); + code[i] = mac_r( L_tmp, fac_n, exc[( ( i + 1 ) + i_subfr )] ); move16(); } diff --git a/lib_rend/ivas_efap.c b/lib_rend/ivas_efap.c index ddaf6a41a3016f64c306e453c1dcab21c4d75b25..e9f7d3d33705de17266d1919fffb27cb805e362e 100644 --- a/lib_rend/ivas_efap.c +++ b/lib_rend/ivas_efap.c @@ -384,7 +384,7 @@ void efap_determine_gains_fx( set32_fx( hEFAPdata->bufferLong_fx, 0, hEFAPdata->vtxData.numVtx ); /* Wrap angles to correct range */ - panning_wrap_angles_fixed( azi_deg, ele_deg, &azi_wrap_int, &ele_wrap_int ); + panning_wrap_angles_fx( azi_deg, ele_deg, &azi_wrap_int, &ele_wrap_int ); /* Panning */ efap_panning_fx( azi_wrap_int, ele_wrap_int, &hEFAPdata->polyData, hEFAPdata->bufferLong_fx ); diff --git a/lib_rend/ivas_prot_rend.h b/lib_rend/ivas_prot_rend.h index 033086191940e169ae61d7b247fcb1c78ec195d8..8a9e8185d112cb9a33fb9c20ddb2089a36c397a1 100644 --- a/lib_rend/ivas_prot_rend.h +++ b/lib_rend/ivas_prot_rend.h @@ -254,19 +254,21 @@ void efap_determine_gains( #ifdef IVAS_FLOAT_FIXED ivas_error vbap_init_data_fx( - VBAP_HANDLE *hVBAPdata, /* i/o: handle for VBAP data structure that will be initialized */ - const Word32 *speaker_node_azi_deg, /* i : vector of speaker node azimuths (positive left) */ - const Word32 *speaker_node_ele_deg, /* i : vector of speaker node elevations (positive up) */ - const Word16 num_speaker_nodes, /* i : number of speaker nodes in the set */ - const IVAS_FORMAT ivas_format /* i : IVAS format */ + VBAP_HANDLE *hVBAPdata, /* i/o: handle for VBAP data structure that will be initialized */ + const Word32 *speaker_node_azi_deg_fx, /* i : vector of speaker node azimuths (positive left) Q22 */ + const Word32 *speaker_node_ele_deg_fx, /* i : vector of speaker node elevations (positive up) Q22 */ + const Word16 num_speaker_nodes, /* i : number of speaker nodes in the set */ + const IVAS_FORMAT ivas_format /* i : IVAS format */ ); + void vbap_determine_gains_fx( - const VBAP_HANDLE hVBAPdata, /* i : prepared VBAP structure */ - Word32 *gains_fx, /* o : gain vector for loudspeakers for given direction */ - const Word16 azi_deg, /* i : azimuth in degrees for panning direction (positive left) */ - const Word16 ele_deg, /* i : elevation in degrees for panning direction (positive up) */ - const Word16 use_object_mode /* i : select between object mode panning and spatial mode panning */ + const VBAP_HANDLE hVBAPdata, /* i : prepared VBAP structure */ + Word32 *gains_fx, /* o : gain vector for loudspeakers for given direction Q29 */ + const Word16 azi_deg, /* i : azimuth in degrees for panning direction (positive left) Q0 */ + const Word16 ele_deg, /* i : elevation in degrees for panning direction (positive up) Q0 */ + const Word16 use_object_mode /* i : select between object mode panning and spatial mode panning */ ); + void vbap_free_data_fx( VBAP_HANDLE *hVBAPdata /* i/o: VBAP handle to be freed */ ); diff --git a/lib_rend/ivas_stat_rend.h b/lib_rend/ivas_stat_rend.h index 03b8fd46e334845852d76861260f6a8f42c44d05..49808bdd1c7936dd81301e37533bdb54424ca4f1 100644 --- a/lib_rend/ivas_stat_rend.h +++ b/lib_rend/ivas_stat_rend.h @@ -780,12 +780,12 @@ typedef struct vbap_data_structure Word16 bottom_virtual_speaker_node_index; Word16 back_virtual_speaker_node_index; #ifdef IVAS_FLOAT_FIXED - Word16 *bottom_virtual_speaker_node_division_gains_fx; - Word16 *top_virtual_speaker_node_division_gains_fx; - Word16 *back_virtual_speaker_node_division_gains_fx; - Word16 *object_mode_bottom_virtual_speaker_node_division_gains_fx; - Word16 *object_mode_top_virtual_speaker_node_division_gains_fx; - Word16 *object_mode_back_virtual_speaker_node_division_gains_fx; + Word16 *bottom_virtual_speaker_node_division_gains_fx; /* Q16 */ + Word16 *top_virtual_speaker_node_division_gains_fx; /* Q16 */ + Word16 *back_virtual_speaker_node_division_gains_fx; /* Q16 */ + Word16 *object_mode_bottom_virtual_speaker_node_division_gains_fx; /* Q16 */ + Word16 *object_mode_top_virtual_speaker_node_division_gains_fx; /* Q16 */ + Word16 *object_mode_back_virtual_speaker_node_division_gains_fx; /* Q16 */ #else float *bottom_virtual_speaker_node_division_gains; float *top_virtual_speaker_node_division_gains; @@ -2350,8 +2350,8 @@ typedef struct ivas_LS_setup_custom { Word16 is_planar_setup; /* flag to indicate if setup is planar or not */ Word16 num_spk; /* number of custom loudspeakers */ - Word32 ls_azimuth_fx[MAX_OUTPUT_CHANNELS]; /* custom loudspeaker azimuths */ - Word32 ls_elevation_fx[MAX_OUTPUT_CHANNELS]; /* custom loudspeaker elevations */ + Word32 ls_azimuth_fx[MAX_OUTPUT_CHANNELS]; /* custom loudspeaker azimuths Q22 */ + Word32 ls_elevation_fx[MAX_OUTPUT_CHANNELS]; /* custom loudspeaker elevations Q22 */ Word16 num_lfe; /* number of LFE channels */ Word16 lfe_idx[MAX_OUTPUT_CHANNELS]; /* index for LFE channel insertion */ Word16 separate_ch_found; /* flag to indicate if a center channel was found */ diff --git a/lib_rend/ivas_vbap.c b/lib_rend/ivas_vbap.c index 3d62f595289f68e961bdd8c9636b893193245e46..9f461294721c16a1e89e1b967c52e83c24261985 100644 --- a/lib_rend/ivas_vbap.c +++ b/lib_rend/ivas_vbap.c @@ -94,8 +94,8 @@ typedef struct connection_option Word16 chA; Word16 chB; #ifdef IVAS_FLOAT_FIXED - Word32 arc_weighted_fx; - Word32 arc_fx; + Word32 arc_weighted_fx; /* Q29 */ + Word32 arc_fx; /* Q29 */ #else float arc; float arc_weighted; @@ -115,9 +115,9 @@ enum SpeakerNodeGroup typedef struct vbap_speaker_node_structure { #ifdef IVAS_FLOAT_FIXED - Word32 azi_deg_fx; - Word32 ele_deg_fx; - Word32 unit_vec_fx[3]; + Word32 azi_deg_fx; /* Q22 */ + Word32 ele_deg_fx; /* Q22 */ + Word32 unit_vec_fx[3]; /* Q30 */ #else float azi_deg; float ele_deg; @@ -137,13 +137,13 @@ static UWord8 vector_matrix_multiply_3x3_fx( const Word16 *src_vector, Word32 ma static UWord8 vector_matrix_multiply_3x3_32_fx( const Word32 *src_vector, Word32 matrix[3][3], Word32 *result, Word16 q_matrix ); -static void init_speaker_node_direction_data_fx( VBAP_SPEAKER_NODE *speaker_node_data, const Word32 *speaker_node_azi_deg_fx, const Word32 *speaker_node_ele_deg_fx, const int16_t num_speaker_nodes ); +static void init_speaker_node_direction_data_fx( VBAP_SPEAKER_NODE *speaker_node_data, const Word32 *speaker_node_azi_deg_fx, const Word32 *speaker_node_ele_deg_fx, const Word16 num_speaker_nodes ); static Word16 determine_virtual_surface_triplets_fx( const Word16 num_speaker_nodes, const VBAP_SPEAKER_NODE *speaker_node_data, Word16 connections[][2], const Word16 max_num_connections, VBAP_VS_TRIPLET *triplets, Word16 initial_search_indices[VBAP_NUM_SEARCH_SECTORS], enum SpeakerNodeGroup allowed_group ); -static void determine_initial_search_indices_fx( const int16_t num_triplets, const Word32 triplet_azidegs_fx[VBAP_MAX_NUM_TRIPLETS], int16_t initial_search_indices[VBAP_NUM_SEARCH_SECTORS] ); +static void determine_initial_search_indices_fx( const Word16 num_triplets, const Word32 triplet_azidegs_fx[VBAP_MAX_NUM_TRIPLETS], Word16 initial_search_indices[VBAP_NUM_SEARCH_SECTORS] ); -static ivas_error determine_connections_fx( const Word16 num_speaker_nodes, const VBAP_SPEAKER_NODE *speaker_node_data, Word16 connections[][2], const Word16 max_num_connections, Word16 *group1_count, Word16 *group2_start, int16_t *group2_count ); +static ivas_error determine_connections_fx( const Word16 num_speaker_nodes, const VBAP_SPEAKER_NODE *speaker_node_data, Word16 connections[][2], const Word16 max_num_connections, Word16 *group1_count, Word16 *group2_start, Word16 *group2_count ); static void formulate_horizontal_connections_fx( const VBAP_SPEAKER_NODE *speaker_node_data, const Word16 num_speaker_nodes, Word16 connections[][2], Word16 *connection_write_index ); @@ -157,7 +157,7 @@ static enum VirtualSpeakerNodeType check_need_of_virtual_speaker_node_fx( VBAP_H static Word16 determine_best_triplet_and_gains_fx( VBAP_SEARCH_STRUCT *search_struct, const Word16 panning_unit_vec_fx[3], const Word16 azi_deg, Word32 gains_fx[3] ); -static void determine_virtual_speaker_node_division_gains_fx( const Word16 virtual_speaker_node_index, Word16 *virtual_node_division_gains_fx, Word16 *max_exp, int16_t connections[][2], const enum VirtualSpeakerNodeType type, const Word16 max_num_connections, const Word16 num_speaker_nodes, const Word16 use_object_mode ); +static void determine_virtual_speaker_node_division_gains_fx( const Word16 virtual_speaker_node_index, Word16 *virtual_node_division_gains_fx, Word16 connections[][2], const enum VirtualSpeakerNodeType type, const Word16 max_num_connections, const Word16 num_speaker_nodes, const Word16 use_object_mode ); static void reorder_triplets_fx( VBAP_VS_TRIPLET *triplets, const Word16 *target_order, const Word16 num_triplets ); #else @@ -193,13 +193,14 @@ static void reorder_triplets( VBAP_VS_TRIPLET *triplets, const int16_t *target_o * * Initialize VBAP data structure for the speaker node set *-------------------------------------------------------------------------*/ + #ifdef IVAS_FLOAT_FIXED ivas_error vbap_init_data_fx( - VBAP_HANDLE *hVBAPdata, /* i/o: handle for VBAP data structure that will be initialized */ - const Word32 *speaker_node_azi_deg_fx, /* i : vector of speaker node azimuths (positive left) */ - const Word32 *speaker_node_ele_deg_fx, /* i : vector of speaker node elevations (positive up) */ - const Word16 num_speaker_nodes, /* i : number of speaker nodes in the set */ - const IVAS_FORMAT ivas_format /* i : IVAS format */ + VBAP_HANDLE *hVBAPdata, /* i/o: handle for VBAP data structure that will be initialized */ + const Word32 *speaker_node_azi_deg_fx, /* i : vector of speaker node azimuths (positive left) Q22 */ + const Word32 *speaker_node_ele_deg_fx, /* i : vector of speaker node elevations (positive up) Q22 */ + const Word16 num_speaker_nodes, /* i : number of speaker nodes in the set */ + const IVAS_FORMAT ivas_format /* i : IVAS format */ ) { /* Variables */ @@ -213,8 +214,8 @@ ivas_error vbap_init_data_fx( enum VirtualSpeakerNodeType virtual_bottom_type; enum VirtualSpeakerNodeType virtual_back_type; - Word32 speaker_node_azi_deg_internal_fx[VBAP_MAX_NUM_SPEAKER_NODES]; - Word32 speaker_node_ele_deg_internal_fx[VBAP_MAX_NUM_SPEAKER_NODES]; + Word32 speaker_node_azi_deg_internal_fx[VBAP_MAX_NUM_SPEAKER_NODES]; /* Q22 */ + Word32 speaker_node_ele_deg_internal_fx[VBAP_MAX_NUM_SPEAKER_NODES]; /* Q22 */ VBAP_SPEAKER_NODE speaker_node_data[VBAP_MAX_NUM_SPEAKER_NODES]; VBAP_DATA *vbap; @@ -299,7 +300,7 @@ ivas_error vbap_init_data_fx( is_success &= vbap->object_mode_bottom_virtual_speaker_node_division_gains_fx != NULL; } speaker_node_azi_deg_internal_fx[vbap->bottom_virtual_speaker_node_index] = 0; - speaker_node_ele_deg_internal_fx[vbap->bottom_virtual_speaker_node_index] = -377487360; /*-90.0f in Q22*/ + speaker_node_ele_deg_internal_fx[vbap->bottom_virtual_speaker_node_index] = -377487360; /* -90.0f in Q22 */ move32(); move32(); } @@ -323,7 +324,7 @@ ivas_error vbap_init_data_fx( is_success &= vbap->object_mode_top_virtual_speaker_node_division_gains_fx != NULL; } speaker_node_azi_deg_internal_fx[vbap->top_virtual_speaker_node_index] = 0; - speaker_node_ele_deg_internal_fx[vbap->top_virtual_speaker_node_index] = 377487360; /*90.0f in Q22*/ + speaker_node_ele_deg_internal_fx[vbap->top_virtual_speaker_node_index] = 377487360; /* 90.0f in Q22 */ move32(); move16(); } @@ -346,24 +347,14 @@ ivas_error vbap_init_data_fx( set16_fx( vbap->object_mode_back_virtual_speaker_node_division_gains_fx, 0, num_speaker_nodes ); is_success &= vbap->object_mode_back_virtual_speaker_node_division_gains_fx != NULL; } - speaker_node_azi_deg_internal_fx[vbap->back_virtual_speaker_node_index] = 754974720; + speaker_node_azi_deg_internal_fx[vbap->back_virtual_speaker_node_index] = 754974720; /* 180.0f in Q22 */ speaker_node_ele_deg_internal_fx[vbap->back_virtual_speaker_node_index] = 0; move32(); move16(); } + init_speaker_node_direction_data_fx( speaker_node_data, speaker_node_azi_deg_internal_fx, speaker_node_ele_deg_internal_fx, vbap->num_speaker_nodes_internal ); -#ifdef TRUE0 - /*TODO: Clean up of update float buffers*/ - for ( int ch = 0; ch < vbap->num_speaker_nodes_internal; ch++ ) - { - speaker_node_data[ch].unit_vec[0] = fix_to_float( speaker_node_data[ch].unit_vec_fx[0], Q30 ); - speaker_node_data[ch].unit_vec[1] = fix_to_float( speaker_node_data[ch].unit_vec_fx[1], Q30 ); - speaker_node_data[ch].unit_vec[2] = fix_to_float( speaker_node_data[ch].unit_vec_fx[2], Q30 ); - speaker_node_data[ch].ele_deg = fix_to_float( speaker_node_data[ch].ele_deg_fx, Q22 ); - speaker_node_data[ch].azi_deg = fix_to_float( speaker_node_data[ch].azi_deg_fx, Q22 ); - } -#endif /* Allocate and determine node-node connections */ max_num_connections = mult0( ( sub( vbap->num_speaker_nodes_internal, 2 ) ), 3 ); /* Theoretical maximum */ @@ -382,10 +373,12 @@ ivas_error vbap_init_data_fx( Word16 speaker_nodes_horiz_internal = 0; move16(); UWord8 loop_done = 0; + move16(); /* Count nodes in different groups to reserve correct memory */ FOR( ch = 0; ch < vbap->num_speaker_nodes_internal && !loop_done; ch++ ) { + test(); SWITCH( speaker_node_data[ch].group ) { case SPEAKER_NODE_ALL: @@ -411,21 +404,21 @@ ivas_error vbap_init_data_fx( } } - IF( ( vbap->search_struct[0].triplets = (VBAP_VS_TRIPLET *) malloc( ( ( speaker_nodes_group1_internal - 2 ) * 2 - ( max( 0, ( speaker_nodes_horiz_internal - 2 ) ) ) ) * sizeof( VBAP_VS_TRIPLET ) ) ) == NULL ) + IF( ( vbap->search_struct[0].triplets = (VBAP_VS_TRIPLET *) malloc( ( ( speaker_nodes_group1_internal - 2 ) * 2 - ( s_max( 0, ( speaker_nodes_horiz_internal - 2 ) ) ) ) * sizeof( VBAP_VS_TRIPLET ) ) ) == NULL ) { return ( IVAS_ERROR( IVAS_ERR_FAILED_ALLOC, "Can not allocate memory for VBAP data\n" ) ); } - is_success &= vbap->search_struct[0].triplets != NULL; + is_success = s_and( is_success, vbap->search_struct[0].triplets != NULL ); IF( speaker_nodes_group2_internal > 0 ) { vbap->num_search_structs = 2; move16(); - IF( ( vbap->search_struct[1].triplets = (VBAP_VS_TRIPLET *) malloc( ( ( speaker_nodes_group2_internal - 2 ) * 2 - ( max( 0, ( speaker_nodes_horiz_internal - 2 ) ) ) ) * sizeof( VBAP_VS_TRIPLET ) ) ) == NULL ) + IF( ( vbap->search_struct[1].triplets = (VBAP_VS_TRIPLET *) malloc( ( ( speaker_nodes_group2_internal - 2 ) * 2 - ( s_max( 0, ( speaker_nodes_horiz_internal - 2 ) ) ) ) * sizeof( VBAP_VS_TRIPLET ) ) ) == NULL ) { return ( IVAS_ERROR( IVAS_ERR_FAILED_ALLOC, "Can not allocate memory for VBAP data\n" ) ); } - is_success &= vbap->search_struct[1].triplets != NULL; + is_success = s_and( is_success, vbap->search_struct[1].triplets != NULL ); } ELSE { @@ -441,29 +434,29 @@ ivas_error vbap_init_data_fx( { /* If all speaker nodes belong to ALL set, then we only create one triplet set and search structure */ vbap->search_struct[0].num_triplets = determine_virtual_surface_triplets_fx( vbap->num_speaker_nodes_internal, speaker_node_data, connections, max_num_connections, vbap->search_struct[0].triplets, vbap->search_struct[0].initial_search_indices, SPEAKER_NODE_ALL ); + move16(); } ELSE { /* Otherwise, we have two sets and can handle them separately for more opmitized processing. */ vbap->search_struct[0].num_triplets = determine_virtual_surface_triplets_fx( vbap->num_speaker_nodes_internal, speaker_node_data, connections, connection_group1_count, vbap->search_struct[0].triplets, vbap->search_struct[0].initial_search_indices, SPEAKER_NODE_BOTTOM_HALF ); + move16(); vbap->search_struct[1].num_triplets = determine_virtual_surface_triplets_fx( vbap->num_speaker_nodes_internal, speaker_node_data, connections + connection_group2_start, connection_group2_count, vbap->search_struct[1].triplets, vbap->search_struct[1].initial_search_indices, SPEAKER_NODE_TOP_HALF ); + move16(); } } /* Determine how the virtual node gains should be distributed to real nodes, if necessary (checked within function). */ IF( is_success ) { - Word16 max_exp = 31; - move16(); - - determine_virtual_speaker_node_division_gains_fx( vbap->top_virtual_speaker_node_index, vbap->top_virtual_speaker_node_division_gains_fx, &max_exp, connections, virtual_top_type, max_num_connections, num_speaker_nodes, 0 ); - determine_virtual_speaker_node_division_gains_fx( vbap->bottom_virtual_speaker_node_index, vbap->bottom_virtual_speaker_node_division_gains_fx, &max_exp, connections, virtual_bottom_type, max_num_connections, num_speaker_nodes, 0 ); - determine_virtual_speaker_node_division_gains_fx( vbap->back_virtual_speaker_node_index, vbap->back_virtual_speaker_node_division_gains_fx, &max_exp, connections, virtual_back_type, max_num_connections, num_speaker_nodes, 0 ); + determine_virtual_speaker_node_division_gains_fx( vbap->top_virtual_speaker_node_index, vbap->top_virtual_speaker_node_division_gains_fx, connections, virtual_top_type, max_num_connections, num_speaker_nodes, 0 ); + determine_virtual_speaker_node_division_gains_fx( vbap->bottom_virtual_speaker_node_index, vbap->bottom_virtual_speaker_node_division_gains_fx, connections, virtual_bottom_type, max_num_connections, num_speaker_nodes, 0 ); + determine_virtual_speaker_node_division_gains_fx( vbap->back_virtual_speaker_node_index, vbap->back_virtual_speaker_node_division_gains_fx, connections, virtual_back_type, max_num_connections, num_speaker_nodes, 0 ); IF( EQ_32( ivas_format, MASA_ISM_FORMAT ) ) { - determine_virtual_speaker_node_division_gains_fx( vbap->top_virtual_speaker_node_index, vbap->object_mode_top_virtual_speaker_node_division_gains_fx, &max_exp, connections, virtual_top_type == NO_VIRTUAL_SPEAKER_NODE ? NO_VIRTUAL_SPEAKER_NODE : VIRTUAL_SPEAKER_NODE_DISTRIBUTE_ENERGY, max_num_connections, num_speaker_nodes, 1 ); - determine_virtual_speaker_node_division_gains_fx( vbap->bottom_virtual_speaker_node_index, vbap->object_mode_bottom_virtual_speaker_node_division_gains_fx, &max_exp, connections, virtual_bottom_type == NO_VIRTUAL_SPEAKER_NODE ? NO_VIRTUAL_SPEAKER_NODE : VIRTUAL_SPEAKER_NODE_DISTRIBUTE_ENERGY, max_num_connections, num_speaker_nodes, 1 ); - determine_virtual_speaker_node_division_gains_fx( vbap->back_virtual_speaker_node_index, vbap->object_mode_back_virtual_speaker_node_division_gains_fx, &max_exp, connections, virtual_back_type == NO_VIRTUAL_SPEAKER_NODE ? NO_VIRTUAL_SPEAKER_NODE : VIRTUAL_SPEAKER_NODE_DISTRIBUTE_ENERGY, max_num_connections, num_speaker_nodes, 1 ); + determine_virtual_speaker_node_division_gains_fx( vbap->top_virtual_speaker_node_index, vbap->object_mode_top_virtual_speaker_node_division_gains_fx, connections, virtual_top_type == NO_VIRTUAL_SPEAKER_NODE ? NO_VIRTUAL_SPEAKER_NODE : VIRTUAL_SPEAKER_NODE_DISTRIBUTE_ENERGY, max_num_connections, num_speaker_nodes, 1 ); + determine_virtual_speaker_node_division_gains_fx( vbap->bottom_virtual_speaker_node_index, vbap->object_mode_bottom_virtual_speaker_node_division_gains_fx, connections, virtual_bottom_type == NO_VIRTUAL_SPEAKER_NODE ? NO_VIRTUAL_SPEAKER_NODE : VIRTUAL_SPEAKER_NODE_DISTRIBUTE_ENERGY, max_num_connections, num_speaker_nodes, 1 ); + determine_virtual_speaker_node_division_gains_fx( vbap->back_virtual_speaker_node_index, vbap->object_mode_back_virtual_speaker_node_division_gains_fx, connections, virtual_back_type == NO_VIRTUAL_SPEAKER_NODE ? NO_VIRTUAL_SPEAKER_NODE : VIRTUAL_SPEAKER_NODE_DISTRIBUTE_ENERGY, max_num_connections, num_speaker_nodes, 1 ); } } @@ -730,6 +723,8 @@ ivas_error vbap_init_data( return IVAS_ERR_OK; } #endif + + /*-------------------------------------------------------------------------* * vbap_free_data() * @@ -843,33 +838,33 @@ void vbap_free_data( *-------------------------------------------------------------------------*/ #ifdef IVAS_FLOAT_FIXED void vbap_determine_gains_fx( - const VBAP_HANDLE hVBAPdata, /* i : prepared VBAP structure */ - Word32 *gains_fx, /* o : gain vector for loudspeakers for given direction */ - const Word16 azi_deg, /* i : azimuth in degrees for panning direction (positive left) */ - const Word16 ele_deg, /* i : elevation in degrees for panning direction (positive up) */ - const Word16 use_object_mode /* i : select between object mode panning and spatial mode panning */ + const VBAP_HANDLE hVBAPdata, /* i : prepared VBAP structure */ + Word32 *gains_fx, /* o : gain vector for loudspeakers for given direction Q29 */ + const Word16 azi_deg, /* i : azimuth in degrees for panning direction (positive left) Q0 */ + const Word16 ele_deg, /* i : elevation in degrees for panning direction (positive up) Q0 */ + const Word16 use_object_mode /* i : select between object mode panning and spatial mode panning */ ) { /* This function formulates gains for the given angle. The triplet-selection has been pre-formulated. */ Word16 ch, ch2; Word16 triplet_ch; Word16 triplet_index; - Word16 panning_unit_vec_fx[3]; - Word32 gain_triplet_fx[3]; + Word16 panning_unit_vec_fx[3]; /* Q15 */ + Word32 gain_triplet_fx[3]; /* Q16 */ Word32 norm_value_fx; Word32 gain_ene_fx; - Word32 azi_norm; - Word32 ele_norm; - Word32 azi_temp_fx; - Word32 ele_temp_fx; + Word16 azi_norm; /* Q15 */ + Word16 ele_norm; /* Q15 */ + Word32 azi_temp_fx; /* Q22 */ + Word32 ele_temp_fx; /* Q22 */ Word16 num_speaker_nodes; Word16 bottom_virtual_speaker_node_index; Word16 top_virtual_speaker_node_index; Word16 back_virtual_speaker_node_index; VBAP_VS_TRIPLET *selected_triplet; - Word16 *bottom_virtual_speaker_node_division_gains_fx; - Word16 *top_virtual_speaker_node_division_gains_fx; - Word16 *back_virtual_speaker_node_division_gains_fx; + Word16 *bottom_virtual_speaker_node_division_gains_fx; /* Q16 */ + Word16 *top_virtual_speaker_node_division_gains_fx; /* Q16 */ + Word16 *back_virtual_speaker_node_division_gains_fx; /* Q16 */ push_wmops( "vbap_gains" ); @@ -895,13 +890,14 @@ void vbap_determine_gains_fx( back_virtual_speaker_node_division_gains_fx = hVBAPdata->back_virtual_speaker_node_division_gains_fx; } - panning_wrap_angles_fixed( L_shl( azi_deg, 22 ), L_shl( ele_deg, 22 ), &azi_temp_fx, &ele_temp_fx ); - azi_norm = L_shr( Mpy_32_32( azi_temp_fx, 5965232 ), 7 ); - ele_norm = L_shr( Mpy_32_32( ele_temp_fx, 5965232 ), 7 ); + panning_wrap_angles_fx( L_shl( azi_deg, Q22 ), L_shl( ele_deg, Q22 ), &azi_temp_fx, &ele_temp_fx ); + azi_norm = extract_l( L_shr( Mpy_32_32( azi_temp_fx, ONE_BY_360_Q31 ), Q7 ) ); /* Q15 */ + ele_norm = extract_l( L_shr( Mpy_32_32( ele_temp_fx, ONE_BY_360_Q31 ), Q7 ) ); /* Q15 */ - panning_unit_vec_fx[0] = mult( getCosWord16R2( (Word16) azi_norm ), getCosWord16R2( (Word16) ele_norm ) ); - panning_unit_vec_fx[1] = mult( getSineWord16R2( (Word16) azi_norm ), getCosWord16R2( (Word16) ele_norm ) ); - panning_unit_vec_fx[2] = getSineWord16R2( (Word16) ele_norm ); + panning_unit_vec_fx[0] = mult( getCosWord16R2( azi_norm ), getCosWord16R2( ele_norm ) ); /* Q15 */ + panning_unit_vec_fx[1] = mult( getSineWord16R2( azi_norm ), getCosWord16R2( ele_norm ) ); /* Q15 */ + panning_unit_vec_fx[2] = getSineWord16R2( ele_norm ); /* Q15 */ + move16(); move16(); move16(); @@ -911,13 +907,11 @@ void vbap_determine_gains_fx( IF( EQ_16( hVBAPdata->num_search_structs, 2 ) && ele_deg > 0 ) { triplet_index = determine_best_triplet_and_gains_fx( &( hVBAPdata->search_struct[1] ), panning_unit_vec_fx, azi_deg, gain_triplet_fx ); - move16(); selected_triplet = &hVBAPdata->search_struct[1].triplets[triplet_index]; } ELSE { triplet_index = determine_best_triplet_and_gains_fx( &( hVBAPdata->search_struct[0] ), panning_unit_vec_fx, azi_deg, gain_triplet_fx ); - move16(); selected_triplet = &hVBAPdata->search_struct[0].triplets[triplet_index]; } @@ -926,14 +920,14 @@ void vbap_determine_gains_fx( move32(); FOR( ch = 0; ch < 3; ch++ ) { - gain_ene_fx = L_add( gain_ene_fx, Mpy_32_32( gain_triplet_fx[ch], gain_triplet_fx[ch] ) ); // 2q -31 = 27 + gain_ene_fx = L_add( gain_ene_fx, Mpy_32_32( gain_triplet_fx[ch], gain_triplet_fx[ch] ) ); /* Q(2 * VBAP_VS_TRIPLET.q_inverse_matrix - 31) */ } - norm_value_fx = Isqrt( L_shr( gain_ene_fx, 1 ) ); // q = 47 - hVBAPdata->search_struct[0].triplets->q_inverse_matrix = 18 + norm_value_fx = Isqrt( L_shr( gain_ene_fx, 1 ) ); /* Q(31 - (2 * VBAP_VS_TRIPLET.q_inverse_matrix - 31 - 1) / 2 ) = Q(47 - VBAP_VS_TRIPLET.q_inverse_matrix) */ FOR( ch = 0; ch < 3; ch++ ) { - gain_triplet_fx[ch] = Mpy_32_32( gain_triplet_fx[ch], norm_value_fx ); // Q16 + gain_triplet_fx[ch] = Mpy_32_32( gain_triplet_fx[ch], norm_value_fx ); /* 47 - VBAP_VS_TRIPLET.q_inverse_matrix + VBAP_VS_TRIPLET.q_inverse_matrix - 31 = Q16 */ move32(); /* Sanity check for rounding issues */ @@ -956,7 +950,7 @@ void vbap_determine_gains_fx( { FOR( ch2 = 0; ch2 < num_speaker_nodes; ch2++ ) { - gains_fx[ch2] = L_add( gains_fx[ch2], L_shl( Mpy_32_16_1( gain_triplet_fx[ch], bottom_virtual_speaker_node_division_gains_fx[ch2] ), 12 ) ); // Q29 + gains_fx[ch2] = L_add( gains_fx[ch2], L_shl( Mpy_32_16_1( gain_triplet_fx[ch], bottom_virtual_speaker_node_division_gains_fx[ch2] ), Q12 ) ); /* Q16 + Q16 - Q15 + Q12 = Q29 */ move32(); } } @@ -964,7 +958,7 @@ void vbap_determine_gains_fx( { FOR( ch2 = 0; ch2 < num_speaker_nodes; ch2++ ) { - gains_fx[ch2] = L_add( gains_fx[ch2], L_shl( Mpy_32_16_1( gain_triplet_fx[ch], top_virtual_speaker_node_division_gains_fx[ch2] ), 12 ) ); // Q29 + gains_fx[ch2] = L_add( gains_fx[ch2], L_shl( Mpy_32_16_1( gain_triplet_fx[ch], top_virtual_speaker_node_division_gains_fx[ch2] ), Q12 ) ); /* Q16 + Q16 - Q15 + Q12 = Q29 */ move32(); } } @@ -972,13 +966,13 @@ void vbap_determine_gains_fx( { FOR( ch2 = 0; ch2 < num_speaker_nodes; ch2++ ) { - gains_fx[ch2] = L_add( gains_fx[ch2], L_shl( Mpy_32_16_1( gain_triplet_fx[ch], back_virtual_speaker_node_division_gains_fx[ch2] ), 12 ) ); // Q29 + gains_fx[ch2] = L_add( gains_fx[ch2], L_shl( Mpy_32_16_1( gain_triplet_fx[ch], back_virtual_speaker_node_division_gains_fx[ch2] ), Q12 ) ); /* Q16 + Q16 - Q15 + Q12 = Q29 */ move32(); } } ELSE { - gains_fx[triplet_ch] = L_add( gains_fx[triplet_ch], L_shl( gain_triplet_fx[ch], 13 ) ); // Q29 + gains_fx[triplet_ch] = L_add( gains_fx[triplet_ch], L_shl( gain_triplet_fx[ch], Q13 ) ); /* Q16 + Q13 = Q29 */ move32(); } } @@ -1126,11 +1120,12 @@ void vbap_determine_gains( * * 3-by-3 vector cross product *-------------------------------------------------------------------------*/ + #ifdef IVAS_FLOAT_FIXED static void vbap_crossp_fx( - const Word32 *vec1_fx, /* i : input vector 1 */ - const Word32 *vec2_fx, /* i : input vector 2 */ - Word32 *crossProduct_fx /* o : cross product output */ + const Word32 *vec1_fx, /* i : input vector 1 Qx */ + const Word32 *vec2_fx, /* i : input vector 2 Qy */ + Word32 *crossProduct_fx /* o : cross product output Qx + Qy - 31 */ ) { @@ -1168,43 +1163,43 @@ static void vbap_crossp( #ifdef IVAS_FLOAT_FIXED /*! r: Status result if triplet is usable for panning. Allows early exit. */ static UWord8 vector_matrix_multiply_3x3_fx( - const Word16 *src_vector, /* i : input vector */ - Word32 matrix[3][3], /* i : input matrix */ - Word32 *result, /* o : output vector */ + const Word16 *src_vector, /* i : input vector Q15 */ + Word32 matrix[3][3], /* i : input matrix Q(q_matrix) */ + Word32 *result, /* o : output vector Q(q_matrix) */ Word16 q_matrix ) { - result[0] = Mpy_32_16_1( matrix[0][0], src_vector[0] ); // q = q_matrix - result[0] = L_add( result[0], Mpy_32_16_1( matrix[1][0], src_vector[1] ) ); - result[0] = L_add( result[0], Mpy_32_16_1( matrix[2][0], src_vector[2] ) ); + result[0] = Mpy_32_16_1( matrix[0][0], src_vector[0] ); /* Q(q_matrix) */ + result[0] = L_add( result[0], Mpy_32_16_1( matrix[1][0], src_vector[1] ) ); /* Q(q_matrix) */ + result[0] = L_add( result[0], Mpy_32_16_1( matrix[2][0], src_vector[2] ) ); /* Q(q_matrix) */ move32(); move32(); move32(); - IF( LT_32( result[0], Mpy_32_16_1( L_lshl( 1, q_matrix ), -327 ) ) ) // 327 = 0.01 in Q = 15 + IF( LT_32( result[0], Mpy_32_16_1( L_lshl( 1, q_matrix ), -327 /* -0.01 in Q15 */ ) ) ) { return 0; } - result[1] = Mpy_32_16_1( matrix[0][1], src_vector[0] ); - result[1] = L_add( result[1], Mpy_32_16_1( matrix[1][1], src_vector[1] ) ); - result[1] = L_add( result[1], Mpy_32_16_1( matrix[2][1], src_vector[2] ) ); + result[1] = Mpy_32_16_1( matrix[0][1], src_vector[0] ); /* Q(q_matrix) */ + result[1] = L_add( result[1], Mpy_32_16_1( matrix[1][1], src_vector[1] ) ); /* Q(q_matrix) */ + result[1] = L_add( result[1], Mpy_32_16_1( matrix[2][1], src_vector[2] ) ); /* Q(q_matrix) */ move32(); move32(); move32(); - IF( LT_32( result[1], Mpy_32_16_1( L_shl( 1, q_matrix ), -327 ) ) ) + IF( LT_32( result[1], Mpy_32_16_1( L_shl( 1, q_matrix ), -327 /* -0.01 in Q15 */ ) ) ) { return 0; } - result[2] = Mpy_32_16_1( matrix[0][2], src_vector[0] ); - result[2] = L_add( result[2], Mpy_32_16_1( matrix[1][2], src_vector[1] ) ); - result[2] = L_add( result[2], Mpy_32_16_1( matrix[2][2], src_vector[2] ) ); + result[2] = Mpy_32_16_1( matrix[0][2], src_vector[0] ); /* Q(q_matrix) */ + result[2] = L_add( result[2], Mpy_32_16_1( matrix[1][2], src_vector[1] ) ); /* Q(q_matrix) */ + result[2] = L_add( result[2], Mpy_32_16_1( matrix[2][2], src_vector[2] ) ); /* Q(q_matrix) */ move32(); move32(); move32(); - IF( LT_32( result[2], Mpy_32_16_1( L_shl( 1, q_matrix ), -327 ) ) ) + IF( LT_32( result[2], Mpy_32_16_1( L_shl( 1, q_matrix ), -327 /* -0.01 in Q15 */ ) ) ) { return 0; } @@ -1213,42 +1208,42 @@ static UWord8 vector_matrix_multiply_3x3_fx( } static UWord8 vector_matrix_multiply_3x3_32_fx( - const Word32 *src_vector, /* i : input vector */ - Word32 matrix[3][3], /* i : input matrix */ - Word32 *result, /* o : output vector */ + const Word32 *src_vector, /* i : input vector Q30 */ + Word32 matrix[3][3], /* i : input matrix Q(q_matrix) */ + Word32 *result, /* o : output vector Q(q_matrix - 1) */ Word16 q_matrix ) { - result[0] = Mpy_32_32( matrix[0][0], src_vector[0] ); // q = ( q_matrix + 30 ) - 31 - result[0] = L_add( result[0], Mpy_32_32( matrix[1][0], src_vector[1] ) ); - result[0] = L_add( result[0], Mpy_32_32( matrix[2][0], src_vector[2] ) ); + result[0] = Mpy_32_32( matrix[0][0], src_vector[0] ); /* Q(q_matrix - 1) */ + result[0] = L_add( result[0], Mpy_32_32( matrix[1][0], src_vector[1] ) ); /* Q(q_matrix - 1) */ + result[0] = L_add( result[0], Mpy_32_32( matrix[2][0], src_vector[2] ) ); /* Q(q_matrix - 1) */ move32(); move32(); move32(); - IF( LT_32( result[0], Mpy_32_32( L_shl( 1, ( sub( q_matrix, 1 ) ) ), -21474836 ) ) ) // 21474836 = 0.01 in Q = 31 + IF( LT_32( result[0], Mpy_32_32( L_shl( 1, ( sub( q_matrix, 1 ) ) ), -21474836 /* -0.01 in Q31 */ ) ) ) { return 0; } - result[1] = Mpy_32_32( matrix[0][1], src_vector[0] ); - result[1] = L_add( result[1], Mpy_32_32( matrix[1][1], src_vector[1] ) ); - result[1] = L_add( result[1], Mpy_32_32( matrix[2][1], src_vector[2] ) ); + result[1] = Mpy_32_32( matrix[0][1], src_vector[0] ); /* Q(q_matrix - 1) */ + result[1] = L_add( result[1], Mpy_32_32( matrix[1][1], src_vector[1] ) ); /* Q(q_matrix - 1) */ + result[1] = L_add( result[1], Mpy_32_32( matrix[2][1], src_vector[2] ) ); /* Q(q_matrix - 1) */ move32(); move32(); move32(); - IF( LT_32( result[1], Mpy_32_32( L_shl( 1, ( sub( q_matrix, 1 ) ) ), -21474836 ) ) ) // 21474836 = 0.01 in Q = 31 + IF( LT_32( result[1], Mpy_32_32( L_shl( 1, ( sub( q_matrix, 1 ) ) ), -21474836 /* -0.01 in Q31 */ ) ) ) { return 0; } - result[2] = Mpy_32_32( matrix[0][2], src_vector[0] ); - result[2] = L_add( result[2], Mpy_32_32( matrix[1][2], src_vector[1] ) ); - result[2] = L_add( result[2], Mpy_32_32( matrix[2][2], src_vector[2] ) ); + result[2] = Mpy_32_32( matrix[0][2], src_vector[0] ); /* Q(q_matrix - 1) */ + result[2] = L_add( result[2], Mpy_32_32( matrix[1][2], src_vector[1] ) ); /* Q(q_matrix - 1) */ + result[2] = L_add( result[2], Mpy_32_32( matrix[2][2], src_vector[2] ) ); /* Q(q_matrix - 1) */ move32(); move32(); move32(); - IF( LT_32( result[2], Mpy_32_32( L_shl( 1, ( sub( q_matrix, 1 ) ) ), -21474836 ) ) ) // 21474836 = 0.01 in Q = 31 + IF( LT_32( result[2], Mpy_32_32( L_shl( 1, ( sub( q_matrix, 1 ) ) ), -21474836 /* -0.01 in Q31 */ ) ) ) { return 0; } @@ -1292,8 +1287,8 @@ static uint8_t vector_matrix_multiply_3x3( return 1; } - #endif + /*----------------------------------------------------------------------------------------------* * determine_best_triplet_and_gains() * @@ -1301,14 +1296,12 @@ static uint8_t vector_matrix_multiply_3x3( *----------------------------------------------------------------------------------------------*/ /*! r: triplet id */ - - #ifdef IVAS_FLOAT_FIXED static Word16 determine_best_triplet_and_gains_fx( - VBAP_SEARCH_STRUCT *search_struct, /* i : VBAP search struct */ - const Word16 panning_unit_vec_fx[3], /* i : panning unit vector */ - const Word16 azi_deg, /* i : panning azimuth */ - Word32 gains_fx[3] /* o : panning gains */ + VBAP_SEARCH_STRUCT *search_struct, /* i : VBAP search struct */ + const Word16 panning_unit_vec_fx[3], /* i : panning unit vector Q15 */ + const Word16 azi_deg, /* i : panning azimuth */ + Word32 gains_fx[3] /* o : panning gains Q(VBAP_VS_TRIPLET.q_inverse_matrix) */ ) { Word16 i, tr, k; @@ -1334,13 +1327,23 @@ static Word16 determine_best_triplet_and_gains_fx( * the chosen four sectors. */ IF( GT_16( abs_s( azi_deg ), 90 ) ) { - sector = azi_deg < 0 ? 2 : 1; + sector = 1; move16(); + if ( azi_deg < 0 ) + { + sector = 2; + move16(); + } } ELSE { - sector = azi_deg < 0 ? 3 : 0; + sector = 0; move16(); + if ( azi_deg < 0 ) + { + sector = 3; + move16(); + } } first_triplet = search_struct->initial_search_indices[sector]; move16(); @@ -1355,7 +1358,6 @@ static Word16 determine_best_triplet_and_gains_fx( IF( triplet_ok ) { min_gain_this_fx = L_min( ( L_min( unnormalized_gains_fx[0], unnormalized_gains_fx[1] ) ), unnormalized_gains_fx[2] ); - move32(); IF( GT_32( min_gain_this_fx, best_min_gain_fx ) ) { @@ -1365,7 +1367,7 @@ static Word16 determine_best_triplet_and_gains_fx( move16(); FOR( k = 0; k < 3; k++ ) { - gains_fx[k] = unnormalized_gains_fx[k]; + gains_fx[k] = unnormalized_gains_fx[k]; /* Q(VBAP_VS_TRIPLET.q_inverse_matrix) */ move32(); } IF( best_min_gain_fx >= 0 ) @@ -1472,21 +1474,22 @@ static int16_t determine_best_triplet_and_gains( return best_triplet; } #endif + /*-------------------------------------------------------------------------* * determine_virtual_speaker_node_division_gains() * * Determines how the virtual node gains are distributed to real nodes *-------------------------------------------------------------------------*/ + #ifdef IVAS_FLOAT_FIXED static void determine_virtual_speaker_node_division_gains_fx( - const Word16 virtual_speaker_node_index, /* i : virtual speaker node index */ - Word16 *virtual_node_division_gains_fx, - Word16 *max_exp, - Word16 connections[][2], /* i : vector of all connections */ - const enum VirtualSpeakerNodeType type, /* i : virtual speaker node typel */ - const Word16 max_num_connections, /* i : max number of connections */ - const Word16 num_speaker_nodes, /* i : max number of speaker nodes */ - const Word16 use_object_mode /* i : use VBAP in object panning mode vs. spatial panning mode */ + const Word16 virtual_speaker_node_index, /* i : virtual speaker node index */ + Word16 *virtual_node_division_gains_fx, /* o : virtual speaker node division gains Q16 */ + Word16 connections[][2], /* i : vector of all connections */ + const enum VirtualSpeakerNodeType type, /* i : virtual speaker node typel */ + const Word16 max_num_connections, /* i : max number of connections */ + const Word16 num_speaker_nodes, /* i : max number of speaker nodes */ + const Word16 use_object_mode /* i : use VBAP in object panning mode vs. spatial panning mode */ ) { /* When node type is VIRTUAL_SPEAKER_NODE_DISTRIBUTE_ENERGY, the gains of the virtual node @@ -1536,7 +1539,7 @@ static void determine_virtual_speaker_node_division_gains_fx( Word16 guard_bits = find_guarded_bits_fx( num_speaker_nodes ); FOR( ch = 0; ch < num_speaker_nodes; ch++ ) { - sum_val_fx = add( sum_val_fx, shr( virtual_node_division_gains_fx[ch], guard_bits ) ); // Q10 + sum_val_fx = add( sum_val_fx, shr( virtual_node_division_gains_fx[ch], guard_bits ) ); } Word16 final_exp = 0, res_exp; Word32 tmp_1, tmp_2, tmp_3; @@ -1545,7 +1548,7 @@ static void determine_virtual_speaker_node_division_gains_fx( { IF( virtual_node_division_gains_fx[ch] != 0 ) { - BASOP_Util_Divide_MantExp( virtual_node_division_gains_fx[ch], 1, sum_val_fx, add( guard_bits, 1 ), &virtual_node_division_gains_fx[ch], &final_exp ); // Q15 + BASOP_Util_Divide_MantExp( virtual_node_division_gains_fx[ch], 1, sum_val_fx, add( guard_bits, 1 ), &virtual_node_division_gains_fx[ch], &final_exp ); } ELSE { @@ -1565,9 +1568,9 @@ static void determine_virtual_speaker_node_division_gains_fx( } ELSE { - Word32 tmp32 = L_deposit_h( virtual_node_division_gains_fx[ch] ); // q is 15-final_exp// ldep -> 31-final_exp (q) + Word32 tmp32 = L_deposit_h( virtual_node_division_gains_fx[ch] ); tmp_1 = L_add( BASOP_Util_Log2( tmp32 ), L_shl( ( sub( 31, sub( 31, final_exp ) ) ), 25 ) ); // Q25 - tmp_2 = Mpy_32_32( 26843546 /*0.8f in Q25*/, tmp_1 ); + tmp_2 = Mpy_32_32( 26843546 /* 0.8f in Q25 */, tmp_1 ); tmp_3 = BASOP_util_Pow2( tmp_2, Q31 - Q19, &res_exp ); exp_virtual_node_division_gains[ch] = res_exp; move16(); @@ -1577,10 +1580,9 @@ static void determine_virtual_speaker_node_division_gains_fx( } } /*make a common exp*/ - *max_exp = -1; /*Q16*/ FOR( i = 0; i < num_speaker_nodes; i++ ) { - virtual_node_division_gains_fx[i] = shr( virtual_node_division_gains_fx[i], sub( *max_exp, exp_virtual_node_division_gains[i] ) ); + virtual_node_division_gains_fx[i] = shr( virtual_node_division_gains_fx[i], sub( -1, exp_virtual_node_division_gains[i] ) ); /* Q16 */ move16(); } } @@ -1662,13 +1664,13 @@ static void determine_virtual_speaker_node_division_gains( #ifdef IVAS_FLOAT_FIXED static enum VirtualSpeakerNodeType check_need_of_virtual_speaker_node_fx( VBAP_HANDLE hVBAPdata, /* i/o: VBAP structure */ - const Word32 *speaker_node_azi_deg_fx, /* i : vector of speaker node azimuths */ - const Word32 *speaker_node_ele_deg_fx, /* i : vector of speaker node elevations */ + const Word32 *speaker_node_azi_deg_fx, /* i : vector of speaker node azimuths Q22 */ + const Word32 *speaker_node_ele_deg_fx, /* i : vector of speaker node elevations Q22 */ enum SpeakerNodeGroup group /* i : group of speaker nodes where this belongs */ ) { Word16 ch; - Word32 max_elevation_fx = 0; + Word32 max_elevation_fx = 0; /* Q22 */ Word16 Flag1, Flag2, Flag3; move32(); @@ -1677,17 +1679,17 @@ static enum VirtualSpeakerNodeType check_need_of_virtual_speaker_node_fx( { Word16 virtual_back_needed = 1; move16(); - const Word16 virtual_back_epsilon_fx = -573; /* -0.0175f Q15*/ + const Word16 virtual_back_epsilon_fx = -573; /* -0.0175f in Q15 */ move16(); FOR( ch = 0; ch < hVBAPdata->num_speaker_nodes; ch++ ) { Flag1 = BASOP_Util_Cmp_Mant32Exp( speaker_node_ele_deg_fx[ch], Q31 - Q22, 23040 /*45.0f Q9*/, Q31 - Q9 ); - IF( EQ_16( Flag1, (Word16) -1 ) ) + IF( EQ_16( Flag1, -1 ) ) { Word16 azi_temp; - azi_temp = extract_l( L_shr( Mpy_32_32( speaker_node_azi_deg_fx[ch], ONE_BY_180_Q31 ), Q8 ) ); /* Q15 */ - Word16 cos_res = getCosWord16R2( azi_temp ); // Q15 + azi_temp = extract_l( L_shr( Mpy_32_32( speaker_node_azi_deg_fx[ch], ONE_BY_360_Q31 ), Q7 ) ); /* Q15 */ + Word16 cos_res = getCosWord16R2( azi_temp ); /* Q15 */ IF( LT_16( cos_res, virtual_back_epsilon_fx ) ) { @@ -1729,7 +1731,7 @@ static enum VirtualSpeakerNodeType check_need_of_virtual_speaker_node_fx( } } } - Flag2 = BASOP_Util_Cmp_Mant32Exp( max_elevation_fx, Q31 - Q22, 23039 /*44.9990005 Q9*/, Q31 - Q9 ); + Flag2 = BASOP_Util_Cmp_Mant32Exp( max_elevation_fx, Q31 - Q22, 23039 /* 44.9990005 in Q9 */, Q31 - Q9 ); IF( EQ_16( Flag2, 1 ) ) { return NO_VIRTUAL_SPEAKER_NODE; @@ -1749,7 +1751,7 @@ static enum VirtualSpeakerNodeType check_need_of_virtual_speaker_node_fx( hVBAPdata->num_speaker_nodes_internal = add( hVBAPdata->num_speaker_nodes_internal, 1 ); move16(); - Flag3 = BASOP_Util_Cmp_Mant32Exp( max_elevation_fx, Q31 - Q22, 20478 /*19.9990005 Q10*/, Q31 - Q10 ); + Flag3 = BASOP_Util_Cmp_Mant32Exp( max_elevation_fx, Q31 - Q22, 20478 /* 19.9990005 in Q10 */, Q31 - Q10 ); IF( EQ_16( Flag3, 1 ) ) { @@ -1849,31 +1851,28 @@ static enum VirtualSpeakerNodeType check_need_of_virtual_speaker_node( #ifdef IVAS_FLOAT_FIXED static void init_speaker_node_direction_data_fx( - VBAP_SPEAKER_NODE *speaker_node_data, /* o : storage for speaker node data */ - const Word32 *speaker_node_azi_deg_fx, /* i : vector of speaker node azimuths */ - const Word32 *speaker_node_ele_deg_fx, /* i : vector of speaker node elevations */ - const Word16 num_speaker_nodes /* i : number of speaker nodes */ + VBAP_SPEAKER_NODE *speaker_node_data, /* o : storage for speaker node data */ + const Word32 *speaker_node_azi_deg_fx, /* i : vector of speaker node azimuths Q22 */ + const Word32 *speaker_node_ele_deg_fx, /* i : vector of speaker node elevations Q22 */ + const Word16 num_speaker_nodes /* i : number of speaker nodes */ ) { Word16 ch; - - Word16 azi_rad_fx = 0; - Word16 ele_rad_fx = 0; - + Word16 azi_rad_fx; + Word16 ele_rad_fx; Word16 num_horiz = 0; UWord8 in_all_mode = TRUE; - move32(); - move32(); - move32(); move16(); + move16(); + FOR( ch = 0; ch < num_speaker_nodes; ch++ ) { speaker_node_data[ch].azi_deg_fx = speaker_node_azi_deg_fx[ch]; move32(); - azi_rad_fx = extract_l( L_shr( Mpy_32_32( speaker_node_azi_deg_fx[ch], ONE_BY_180_Q31 ), Q8 ) ); + azi_rad_fx = extract_l( L_shr( Mpy_32_32( speaker_node_azi_deg_fx[ch], ONE_BY_360_Q31 ), Q7 ) ); /* Q15 */ test(); - IF( GE_32( L_shr( speaker_node_ele_deg_fx[ch], 22 ), -5 ) && LE_32( L_shr( speaker_node_ele_deg_fx[ch], 22 ), 5 ) ) + IF( GE_32( L_shr( speaker_node_ele_deg_fx[ch], Q22 ), -5 ) && LE_32( L_shr( speaker_node_ele_deg_fx[ch], Q22 ), 5 ) ) { speaker_node_data[ch].ele_deg_fx = 0; move32(); @@ -1887,7 +1886,7 @@ static void init_speaker_node_direction_data_fx( { speaker_node_data[ch].ele_deg_fx = speaker_node_ele_deg_fx[ch]; move32(); - ele_rad_fx = extract_l( L_shr( Mpy_32_32( speaker_node_ele_deg_fx[ch], ONE_BY_180_Q31 ), Q8 ) ); + ele_rad_fx = extract_l( L_shr( Mpy_32_32( speaker_node_ele_deg_fx[ch], ONE_BY_360_Q31 ), Q7 ) ); /* Q15 */ IF( ele_rad_fx < 0 ) { @@ -1901,11 +1900,11 @@ static void init_speaker_node_direction_data_fx( } } - speaker_node_data[ch].unit_vec_fx[0] = L_shr( L_mult( getCosWord16R2( azi_rad_fx ), getCosWord16R2( ele_rad_fx ) ), 1 ); // Q30 (add one gaurd bit , buffer being used in Q30) + speaker_node_data[ch].unit_vec_fx[0] = L_shr( L_mult( getCosWord16R2( azi_rad_fx ), getCosWord16R2( ele_rad_fx ) ), 1 ); /* Q15 + Q15 + Q1 - Q1 = Q30 */ move32(); - speaker_node_data[ch].unit_vec_fx[1] = L_shr( L_mult( getSineWord16R2( azi_rad_fx ), getCosWord16R2( ele_rad_fx ) ), 1 ); // Q30 + speaker_node_data[ch].unit_vec_fx[1] = L_shr( L_mult( getSineWord16R2( azi_rad_fx ), getCosWord16R2( ele_rad_fx ) ), 1 ); /* Q15 + Q15 + Q1 - Q1 = Q30 */ move32(); - speaker_node_data[ch].unit_vec_fx[2] = L_shr( L_deposit_h( getSineWord16R2( ele_rad_fx ) ), 1 ); // Q30 + speaker_node_data[ch].unit_vec_fx[2] = L_shr( L_deposit_h( getSineWord16R2( ele_rad_fx ) ), 1 ); /* Q15 + Q16 - Q1 = Q30 */ move32(); } /* Check for largest horizontal gap if there are at least 3 horizontal speaker nodes */ @@ -1921,18 +1920,19 @@ static void init_speaker_node_direction_data_fx( move16(); FOR( ch = 0; ch < num_speaker_nodes && i < num_horiz; ch++ ) { + test(); IF( EQ_16( speaker_node_data[ch].group, SPEAKER_NODE_HORIZONTAL ) ) { Word16 exp1; - Word32 Mant2 = BASOP_Util_Add_Mant32Exp( speaker_node_azi_deg_fx[ch], 31 - 22, 23040, 31 - 6, &exp1 ); + Word32 Mant2 = BASOP_Util_Add_Mant32Exp( speaker_node_azi_deg_fx[ch], Q31 - Q22, 23040 /* 360.0f in Q6 */, Q31 - Q6, &exp1 ); IF( L_shr( speaker_node_azi_deg_fx[ch], 22 ) < 0 ) { - horiz_azi[i] = (UWord16) L_shr( Mant2, sub( 31, exp1 ) ); + horiz_azi[i] = (UWord16) L_shr( Mant2, sub( 31, exp1 ) ); /* Q0 */ } ELSE { - horiz_azi[i] = (UWord16) L_shr( speaker_node_azi_deg_fx[ch], 22 ); + horiz_azi[i] = (UWord16) L_shr( speaker_node_azi_deg_fx[ch], Q22 ); /* Q0 */ } i = add( i, 1 ); } @@ -1944,7 +1944,7 @@ static void init_speaker_node_direction_data_fx( /* Find largest gap. Initialize with the wrap over gap. */ largest_gap = add( sub( horiz_azi[0], horiz_azi[num_horiz - 1] ), 360 ); - FOR( ch = 0; ch < sub( num_horiz, 1 ); ch++ ) + FOR( ch = 0; ch < num_horiz - 1; ch++ ) { temp = sub( horiz_azi[ch + 1], horiz_azi[ch] ); if ( GT_16( temp, largest_gap ) ) @@ -2082,26 +2082,25 @@ static void init_speaker_node_direction_data( #ifdef IVAS_FLOAT_FIXED static void matrix_inverse_3x3_32_fx( - const Word32 **input_matrix_fx, /* i : input matrix */ - Word32 inverse_matrix_fx[3][3], /* o : output matrix */ + const Word32 **input_matrix_fx, /* i : input matrix Q30 */ + Word32 inverse_matrix_fx[3][3], /* o : output matrix Q(31 - exp_inv_mat) */ Word16 *exp_inv_mat ) { Word16 k; - Word32 determinant_fx; - Word32 cross_vec_fx[3]; + Word32 determinant_fx; /* Q28 */ + Word32 cross_vec_fx[3]; /* Q29 */ Word16 exp_inverse_matrix_fx[3][3]; vbap_crossp_fx( input_matrix_fx[1], input_matrix_fx[2], cross_vec_fx ); - determinant_fx = dotp_fixed( input_matrix_fx[0], cross_vec_fx, 3 ); // Q30 + Q29 - Q31 = Q28 + determinant_fx = dotp_fixed( input_matrix_fx[0], cross_vec_fx, 3 ); Word16 inv_mat_exp = 0; move16(); FOR( k = 0; k < 3; k++ ) { inverse_matrix_fx[k][0] = L_deposit_h( BASOP_Util_Divide3232_Scale( cross_vec_fx[k], determinant_fx, &inv_mat_exp ) ); - inv_mat_exp = add( inv_mat_exp, ( ( 31 - 29 ) - ( 31 - 28 ) ) ); - exp_inverse_matrix_fx[k][0] = inv_mat_exp; - move32(); + inv_mat_exp = add( inv_mat_exp, ( ( Q31 - Q29 ) - ( Q31 - Q28 ) ) ); + exp_inverse_matrix_fx[k][0] = inv_mat_exp; move16(); } @@ -2110,10 +2109,9 @@ static void matrix_inverse_3x3_32_fx( FOR( k = 0; k < 3; k++ ) { inverse_matrix_fx[k][1] = L_deposit_h( BASOP_Util_Divide3232_Scale( cross_vec_fx[k], determinant_fx, &inv_mat_exp ) ); - inv_mat_exp = add( inv_mat_exp, ( ( 31 - 29 ) - ( 31 - 28 ) ) ); - exp_inverse_matrix_fx[k][1] = inv_mat_exp; - move32(); + inv_mat_exp = add( inv_mat_exp, ( ( Q31 - Q29 ) - ( Q31 - Q28 ) ) ); + exp_inverse_matrix_fx[k][1] = inv_mat_exp; move16(); } @@ -2123,7 +2121,7 @@ static void matrix_inverse_3x3_32_fx( { inverse_matrix_fx[k][2] = L_deposit_h( BASOP_Util_Divide3232_Scale( cross_vec_fx[k], determinant_fx, &inv_mat_exp ) ); move32(); - inv_mat_exp = add( inv_mat_exp, ( ( 31 - 29 ) - ( 31 - 28 ) ) ); + inv_mat_exp = add( inv_mat_exp, ( ( Q31 - Q29 ) - ( Q31 - Q28 ) ) ); exp_inverse_matrix_fx[k][2] = inv_mat_exp; move16(); } @@ -2149,13 +2147,13 @@ static void matrix_inverse_3x3_32_fx( IF( LT_16( exp_inverse_matrix_fx[i][j], -15 ) && inverse_matrix_fx[i][j] != 0 ) { inverse_matrix_fx[i][j] = 1; - exp_inverse_matrix_fx[i][j] = 0; move32(); + exp_inverse_matrix_fx[i][j] = 0; move16(); } ELSE { - inverse_matrix_fx[i][j] = L_shr( inverse_matrix_fx[i][j], *exp_inv_mat - exp_inverse_matrix_fx[i][j] ); + inverse_matrix_fx[i][j] = L_shr( inverse_matrix_fx[i][j], sub( *exp_inv_mat, exp_inverse_matrix_fx[i][j] ) ); /* Q(31 - *exp_inv_mat) */ move32(); } } @@ -2205,37 +2203,41 @@ static void matrix_inverse_3x3( * Check if the given loudspeaker triplet is a valid one and store data when * valid triplet is found. *-------------------------------------------------------------------------*/ + #ifdef IVAS_FLOAT_FIXED static Word16 check_and_store_triplet_fx( - const Word16 chA, /* i : first channel index that forms the loudspeaker triplet */ - const Word16 chB, /* i : second channel index that forms the loudspeaker triplet */ - const Word16 chC, /* i : third channel index that forms the loudspeaker triplet */ - const Word16 num_speaker_nodes, /* i : number of speaker nodes */ - const VBAP_SPEAKER_NODE *speaker_node_data, /* i : speaker node data structure */ - VBAP_VS_TRIPLET *triplets, /* o : vector of virtual surface triplets */ - Word16 *triplet_index, /* i/o: index for the next free triplet slot */ - Word32 *triplet_azidegs_fx, - Word16 *triplet_order /* o : initial order of triplet indices */ + const Word16 chA, /* i : first channel index that forms the loudspeaker triplet */ + const Word16 chB, /* i : second channel index that forms the loudspeaker triplet */ + const Word16 chC, /* i : third channel index that forms the loudspeaker triplet */ + const Word16 num_speaker_nodes, /* i : number of speaker nodes */ + const VBAP_SPEAKER_NODE *speaker_node_data, /* i : speaker node data structure */ + VBAP_VS_TRIPLET *triplets, /* o : vector of virtual surface triplets */ + Word16 *triplet_index, /* i/o: index for the next free triplet slot */ + Word32 *triplet_azidegs_fx, /* o : center azimuths of the found triplets Q19 */ + Word16 *triplet_order /* o : initial order of triplet indices */ ) { Word16 ch_check; Word16 k; Word16 speaker_node_found_inside_triplet; UWord8 triplet_ok; + Word16 exp_inv_mat; - Word32 inverse_matrix_fx[3][3], unnormalized_gains_fx[3]; + Word32 inverse_matrix_fx[3][3] /* Q(31 - exp_inv_mat) */, unnormalized_gains_fx[3] /* Q(31 - exp_inv_mat - 1) */; set32_fx( unnormalized_gains_fx, 0, 3 ); - const Word32 *speaker_node_triplet_unit_vec_matrix_fx[3]; + const Word32 *speaker_node_triplet_unit_vec_matrix_fx[3]; /* Q30 */ /* Triplet found, determine inverse matrix for VBAP formulation */ speaker_node_triplet_unit_vec_matrix_fx[0] = speaker_node_data[chA].unit_vec_fx; + move32(); speaker_node_triplet_unit_vec_matrix_fx[1] = speaker_node_data[chB].unit_vec_fx; + move32(); speaker_node_triplet_unit_vec_matrix_fx[2] = speaker_node_data[chC].unit_vec_fx; - Word16 exp_inv_mat = 31; - move16(); + move32(); matrix_inverse_3x3_32_fx( speaker_node_triplet_unit_vec_matrix_fx, inverse_matrix_fx, &exp_inv_mat ); triplets[*triplet_index].q_inverse_matrix = sub( 31, exp_inv_mat ); + move16(); /* Check through all speaker nodes that none of them are within the triplet. * Node within the triplet is identified by that all three panning gains are positive. @@ -2248,7 +2250,7 @@ static Word16 check_and_store_triplet_fx( test(); IF( ( NE_16( ch_check, chA ) ) && NE_16( ch_check, chB ) && NE_16( ch_check, chC ) ) { - triplet_ok = vector_matrix_multiply_3x3_32_fx( speaker_node_data[ch_check].unit_vec_fx, inverse_matrix_fx, unnormalized_gains_fx, sub( 31, exp_inv_mat ) ); + triplet_ok = vector_matrix_multiply_3x3_32_fx( speaker_node_data[ch_check].unit_vec_fx, inverse_matrix_fx, unnormalized_gains_fx, sub( Q31, exp_inv_mat ) ); test(); test(); test(); @@ -2275,16 +2277,16 @@ static Word16 check_and_store_triplet_fx( Copy32( inverse_matrix_fx[k], triplets[*triplet_index].inverse_matrix_fx[k], 3 ); } /* Get center azimuth for fast search use */ - Word32 tmp_a = L_add( L_shr( L_add( speaker_node_data[chA].unit_vec_fx[1], speaker_node_data[chB].unit_vec_fx[1] ), 2 ), L_shr( speaker_node_data[chC].unit_vec_fx[1], 2 ) ); // Q28 + Word32 tmp_a = L_add( L_shr( L_add( speaker_node_data[chA].unit_vec_fx[1], speaker_node_data[chB].unit_vec_fx[1] ), Q2 ), L_shr( speaker_node_data[chC].unit_vec_fx[1], Q2 ) ); /* Q28 */ /*Condition to make tmp_a 0 to adress precision loss seen*/ - if ( EQ_32( tmp_a, -8193 ) ) + if ( EQ_32( tmp_a, -8193 /* -0.0000305 in Q28 */ ) ) { tmp_a = 0; move32(); } - Word32 tmp_b = L_add( L_shr( L_add( speaker_node_data[chA].unit_vec_fx[0], speaker_node_data[chB].unit_vec_fx[0] ), 2 ), L_shr( speaker_node_data[chC].unit_vec_fx[0], 2 ) ); // Q28 - Word16 tmp_tan = shr( BASOP_util_atan2( tmp_a, tmp_b, 0 ), Q13 - Q9 ); - triplet_azidegs_fx[*triplet_index] = L_mult( tmp_tan, 29335 /*_180_OVER_PI in Q9*/ ); // Q3 + Word32 tmp_b = L_add( L_shr( L_add( speaker_node_data[chA].unit_vec_fx[0], speaker_node_data[chB].unit_vec_fx[0] ), 2 ), L_shr( speaker_node_data[chC].unit_vec_fx[0], 2 ) ); /* Q28 */ + Word16 tmp_tan = shr( BASOP_util_atan2( tmp_a, tmp_b, 0 ), Q13 - Q9 ); /* Q9 */ + triplet_azidegs_fx[*triplet_index] = L_mult( tmp_tan, 29335 /*_180_OVER_PI in Q9*/ ); /* Q19 */ move32(); /* Store increasing order indices for the later sorting step. */ triplet_order[*triplet_index] = *triplet_index; @@ -2300,7 +2302,6 @@ static Word16 check_and_store_triplet_fx( return 0; } #else - static int16_t check_and_store_triplet( const int16_t chA, /* i : first channel index that forms the loudspeaker triplet */ const int16_t chB, /* i : second channel index that forms the loudspeaker triplet */ @@ -2399,7 +2400,7 @@ static Word16 determine_virtual_surface_triplets_fx( Word16 num_connected_to_chA; Word16 connected_to_chA[VBAP_MAX_NUM_SPEAKER_NODES]; Word16 connection_uses_left[VBAP_MAX_NUM_SPEAKER_NODES]; - Word32 triplet_azidegs_fx[VBAP_MAX_NUM_TRIPLETS]; + Word32 triplet_azidegs_fx[VBAP_MAX_NUM_TRIPLETS]; /* Q19 */ Word16 triplet_order[VBAP_MAX_NUM_TRIPLETS]; /* Each connection can be used exactly by two different virtual surface triplets. */ @@ -2441,12 +2442,24 @@ static Word16 determine_virtual_surface_triplets_fx( { Word16 connect_index_k = connected_to_chA[k]; move16(); - chB = EQ_16( connections[connect_index_k][0], chA ) ? connections[connect_index_k][1] : connections[connect_index_k][0]; + chB = connections[connect_index_k][0]; + move16(); + if ( EQ_16( connections[connect_index_k][0], chA ) ) + { + chB = connections[connect_index_k][1]; + move16(); + } FOR( l = k + 1; l < num_connected_to_chA; l++ ) { Word16 connect_index_l = connected_to_chA[l]; move16(); - chC = EQ_16( connections[connect_index_l][0], chA ) ? connections[connect_index_l][1] : connections[connect_index_l][0]; + chC = connections[connect_index_l][0]; + move16(); + if ( EQ_16( connections[connect_index_l][0], chA ) ) + { + chC = connections[connect_index_l][1]; + move16(); + } /* With chA, chB, and chC selected, we still need to find connection between chB and chC and verify that the triplet is valid */ FOR( m = 0; m < max_num_connections; m++ ) @@ -2485,7 +2498,7 @@ static Word16 determine_virtual_surface_triplets_fx( * each search sector for this search struct. */ v_sort_ind_fixed( triplet_azidegs_fx, triplet_order, num_triplets ); reorder_triplets_fx( triplets, triplet_order, num_triplets ); - determine_initial_search_indices_fx( num_triplets, triplet_azidegs_fx /*Q19*/, initial_search_indices ); + determine_initial_search_indices_fx( num_triplets, triplet_azidegs_fx, initial_search_indices ); return num_triplets; } @@ -2593,18 +2606,18 @@ static int16_t determine_virtual_surface_triplets( #ifdef IVAS_FLOAT_FIXED static void determine_initial_search_indices_fx( - const Word16 num_triplets, /* i : number of triplets */ - const Word32 triplet_azidegs_fx[VBAP_MAX_NUM_TRIPLETS], - Word16 initial_search_indices[VBAP_NUM_SEARCH_SECTORS] /* o : initial search indices */ + const Word16 num_triplets, /* i : number of triplets */ + const Word32 triplet_azidegs_fx[VBAP_MAX_NUM_TRIPLETS], /* i : azimuths of triplets (in degrees) Q19 */ + Word16 initial_search_indices[VBAP_NUM_SEARCH_SECTORS] /* o : initial search indices */ ) { Word16 i, j; - Word32 sector_reference_azideg_fx; - Word32 sector_border_start_azideg_fx; - Word32 sector_border_end_azideg_fx; + Word32 sector_reference_azideg_fx; /* Q0 */ + Word32 sector_border_start_azideg_fx; /* Q0 */ + Word32 sector_border_end_azideg_fx; /* Q0 */ Word16 best_index; Word32 min_azideg_diff_fx; - Word32 azideg_diff_fx; + Word32 azideg_diff_fx; /* Q19 */ FOR( i = 0; i < VBAP_NUM_SEARCH_SECTORS; i++ ) { @@ -2622,13 +2635,13 @@ static void determine_initial_search_indices_fx( { azideg_diff_fx = L_sub( L_shl( sector_reference_azideg_fx, Q19 ), triplet_azidegs_fx[j] ); - IF( GT_32( azideg_diff_fx, 94371840 ) ) + IF( GT_32( azideg_diff_fx, 94371840 /* 180.0f in Q19 */ ) ) { - azideg_diff_fx = L_sub( azideg_diff_fx, 188743680 ); + azideg_diff_fx = L_sub( azideg_diff_fx, 188743680 /* 360.0f in Q19 */ ); } - ELSE IF( LT_32( azideg_diff_fx, -94371840 ) ) + ELSE IF( LT_32( azideg_diff_fx, -94371840 /* -180.0f in Q19 */ ) ) { - azideg_diff_fx = L_add( azideg_diff_fx, 188743680 ); + azideg_diff_fx = L_add( azideg_diff_fx, 188743680 /* 360.0f in Q19 */ ); } azideg_diff_fx = L_abs( azideg_diff_fx ); @@ -2718,7 +2731,7 @@ static ivas_error determine_connections_fx( Word16 c; Word16 connection_write_index = 0; move16(); - Word32 non_crossing_plane_elevation_deg_fx[VBAP_MAX_PLANES]; + Word32 non_crossing_plane_elevation_deg_fx[VBAP_MAX_PLANES]; /* Q14 */ ivas_error error; @@ -2844,15 +2857,15 @@ static enum ConnectionClass determine_connection_class_fx( Word16 ch, k; const Word32 *p1_fx, *v2_fx; - Word32 v1v1_fx, v1v2_fx, v2v2_fx, v1p1_fx, v2p1_fx; - Word32 determinant_fx; + Word32 v1v1_fx, v1v2_fx, v2v2_fx, v1p1_fx, v2p1_fx; /* Q25, Q27, Q29, Q27, Q29 */ + Word32 determinant_fx; /* Q23 */ Word32 norm_distance_on_v1_fx; Word32 vec_diff_fx[3]; - Word32 v1_fx[3]; + Word32 v1_fx[3]; /* Q28 */ Word32 vTarget_fx[3]; Word32 energy_sum_fx; Word32 eq_value_fx; - Word32 uvecdot_fx; + Word32 uvecdot_fx; /* Q30 */ /* Check if connection passes through origin. This is not desired. * When this happens, unit vectors point in opposite directions. */ @@ -2885,9 +2898,10 @@ static enum ConnectionClass determine_connection_class_fx( FOR( k = 0; k < 3; k++ ) { - v1_fx[k] = L_sub( L_shr( node_data[chB].unit_vec_fx[k], 2 ), L_shr( node_data[chA].unit_vec_fx[k], 2 ) ); // q28 (Add two guard bit) + v1_fx[k] = L_sub( L_shr( node_data[chB].unit_vec_fx[k], 2 ), L_shr( node_data[chA].unit_vec_fx[k], 2 ) ); /* Q28 (Add two guard bit) */ + move32(); } - v2_fx = node_data[ch].unit_vec_fx; // q30 + v2_fx = node_data[ch].unit_vec_fx; // Q30 move32(); v1v1_fx = dotp_fixed( v1_fx, v1_fx, 3 ); // Q25 @@ -2939,13 +2953,13 @@ static enum ConnectionClass determine_connection_class_fx( FOR( k = 0; k < 3; k++ ) { var1 = Mpy_32_32( norm_distance_on_v1_fx, v1_fx[k] ); // Q(25 - exp) + Q28 - 31 - vTarget_fx[k] = BASOP_Util_Add_Mant32Exp( p1_fx[k], 1, var1, Q31 - ( Q25 - exp + Q28 - Q31 ), &exp_vTarget ); + vTarget_fx[k] = BASOP_Util_Add_Mant32Exp( p1_fx[k], 1, var1, sub( Q31, add( sub( Q25, exp ), Q28 - Q31 ) ), &exp_vTarget ); move16(); vTarget_fx_e[k] = exp_vTarget; move16(); var2 = Mpy_32_32( vTarget_fx[k], vTarget_fx[k] ); // 2*exp_vTarget - energy_sum_fx = BASOP_Util_Add_Mant32Exp( energy_sum_fx, exp_energy_sum, var2, 2 * exp_vTarget, &exp_energy_sum ); + energy_sum_fx = BASOP_Util_Add_Mant32Exp( energy_sum_fx, exp_energy_sum, var2, shl( exp_vTarget, 1 ), &exp_energy_sum ); vec_diff_fx[k] = BASOP_Util_Add_Mant32Exp( vTarget_fx[k], exp_vTarget, L_negate( v2_fx[k] ), 1, &exp_vec_diff ); move16(); vec_diff_e[k] = exp_vec_diff; @@ -2990,7 +3004,7 @@ static enum ConnectionClass determine_connection_class_fx( Word32 res = dotp_fixed( vTarget_fx, v2_fx, 3 ); // 31 - (max_vTarget_e + 2) + 30 - 31 = 28 - max_vTarget_e move32(); - IF( GT_32( res, L_shr( 2147054208, sub( 31, sub( 28, max_vTarget_e ) ) ) ) ) + IF( GT_32( res, L_shr( 2147054208 /* 0.9998f in Q31 */, sub( 31, sub( 28, max_vTarget_e ) ) ) ) ) { return CONNECTION_WITH_SPEAKER_NODE_BEHIND; } @@ -3001,10 +3015,10 @@ static enum ConnectionClass determine_connection_class_fx( Word32 vec_diff_dotp = dotp_fixed( vec_diff_fx, vec_diff_fx, 3 ); // exp : 2 * max_vec_diff_e + 4 move32(); Word32 var = Mpy_32_32( vec_diff_dotp, 51200 /*25.0f in Q11*/ ); // exp : 2 * max_vec_diff_e + 4 + 20 - Word16 Flag1 = BASOP_Util_Cmp_Mant32Exp( v1v1_fx, Q31 - Q25, var, 2 * max_vec_diff_e + 4 + 20 ); + Word16 Flag1 = BASOP_Util_Cmp_Mant32Exp( v1v1_fx, Q31 - Q25, var, add( shl( max_vec_diff_e, 1 ), 4 + 20 ) ); IF( EQ_16( Flag1, 1 ) ) { - IF( LT_32( L_abs( L_sub( node_data[chB].unit_vec_fx[2], node_data[chA].unit_vec_fx[2] ) ), 2147483 ) ) + IF( LT_32( L_abs( L_sub( node_data[chB].unit_vec_fx[2], node_data[chA].unit_vec_fx[2] ) ), 1073742 /* 0.001f in Q30 */ ) ) { return ELEVATED_PLANE_THIN_TRIANGLE_CONNECTION; } @@ -3240,10 +3254,10 @@ static void formulate_horizontal_connections( /*! r: truth value for crossing */ #ifdef IVAS_FLOAT_FIXED static Word16 check_plane_crossing_fx( - const Word32 ele1_deg_fx, /* i : speaker node 1 elevation */ - const Word32 ele2_deg_fx, /* i : speaker node 2 elevation */ - const Word16 num_non_crossing_planes, /* i : number of non-crossing planes */ - const Word32 *non_crossing_plane_elevation_deg_fx /* i : vector non-crossing plane elevations*/ + const Word32 ele1_deg_fx, /* i : speaker node 1 elevation Q22 */ + const Word32 ele2_deg_fx, /* i : speaker node 2 elevation Q22 */ + const Word16 num_non_crossing_planes, /* i : number of non-crossing planes */ + const Word32 *non_crossing_plane_elevation_deg_fx /* i : vector non-crossing plane elevations Q14 */ ) { /* Find if the connection crosses a non-crossing plane, with 1-degree threshold. */ @@ -3300,13 +3314,13 @@ static int16_t check_plane_crossing( *-------------------------------------------------------------------------*/ #ifdef IVAS_FLOAT_FIXED static ivas_error get_half_sphere_connection_options_fx( - const VBAP_SPEAKER_NODE *speaker_node_data, /* i : speaker node data */ - const enum SpeakerNodeGroup group, /* i : speaker node group */ - const Word16 num_speaker_nodes, /* i : number of speaker nodes */ - const Word16 num_non_crossing_planes, /* i : number of non-crossing planes */ - const Word32 *non_crossing_plane_elevation_deg_fx, /* i : vector of non-crossing plane elevations */ - ConnectionOption **connection_options_pr, /* o : list of connection options */ - Word16 *num_connection_options /* o : number of connection options */ + const VBAP_SPEAKER_NODE *speaker_node_data, /* i : speaker node data */ + const enum SpeakerNodeGroup group, /* i : speaker node group */ + const Word16 num_speaker_nodes, /* i : number of speaker nodes */ + const Word16 num_non_crossing_planes, /* i : number of non-crossing planes */ + const Word32 *non_crossing_plane_elevation_deg_fx, /* i : vector of non-crossing plane elevations Q14 */ + ConnectionOption **connection_options_pr, /* o : list of connection options */ + Word16 *num_connection_options /* o : number of connection options */ ) { Word16 max_num_connection_options = 0; @@ -3380,8 +3394,8 @@ static ivas_error get_half_sphere_connection_options_fx( Word16 exp_uv = Q31 - Q27; move16(); one_minus_unit_vec_dotp_sq_root = Sqrt32( one_minus_unit_vec_dotp_sq, &exp_uv ); - acos_val = BASOP_util_atan2( one_minus_unit_vec_dotp_sq_root, unit_vec_dotp, exp_uv - 2 ); // Q13 - c_options[index].arc_fx = L_deposit_h( acos_val ); // Q29 + acos_val = BASOP_util_atan2( one_minus_unit_vec_dotp_sq_root, unit_vec_dotp, sub( exp_uv, 2 ) ); // Q13 + c_options[index].arc_fx = L_deposit_h( acos_val ); // Q29 move32(); c_options[index].arc_weighted_fx = c_options[index].arc_fx; // Q29 move32(); @@ -3397,7 +3411,7 @@ static ivas_error get_half_sphere_connection_options_fx( /* If the connection passes a pre-determined plane of speaker nodes, then add further penalty */ - IF( check_plane_crossing_fx( speaker_node_data[chA].ele_deg_fx /*q22*/, speaker_node_data[chB].ele_deg_fx, num_non_crossing_planes, non_crossing_plane_elevation_deg_fx ) ) + IF( check_plane_crossing_fx( speaker_node_data[chA].ele_deg_fx, speaker_node_data[chB].ele_deg_fx, num_non_crossing_planes, non_crossing_plane_elevation_deg_fx ) ) { c_options[index].arc_weighted_fx = L_shl( c_options[index].arc_weighted_fx, 1 ); move32(); @@ -3595,9 +3609,9 @@ static ivas_error formulate_half_sphere_connections_fx( const enum SpeakerNodeGroup group, /* i : speaker node group */ Word16 connections[][2], /* o : vector of connections */ Word16 *connection_write_index, - const Word16 max_num_connections, /* i : max number of connections */ - const Word16 num_non_crossing_planes, /* i : number of non-crossing planes */ - const Word32 *non_crossing_plane_elevation_deg_fx /* i : vector of non-crossing plane elevations */ + const Word16 max_num_connections, /* i : max number of connections */ + const Word16 num_non_crossing_planes, /* i : number of non-crossing planes */ + const Word32 *non_crossing_plane_elevation_deg_fx /* i : vector of non-crossing plane elevations Q14*/ ) { /* Variable initializations */ @@ -3607,11 +3621,11 @@ static ivas_error formulate_half_sphere_connections_fx( Word32 new_cross_fx[3]; Word32 planeCrossingVec_fx[3]; Word16 Q_planeCrossingVec; - Word32 new_arc_fx; - Word32 connection_arc_fx[( VBAP_MAX_NUM_SPEAKER_NODES - 2 ) * 3]; - Word32 connection_cross_fx[( VBAP_MAX_NUM_SPEAKER_NODES - 2 ) * 3][3]; + Word32 new_arc_fx; /* Q29 */ + Word32 connection_arc_fx[( VBAP_MAX_NUM_SPEAKER_NODES - 2 ) * 3]; /* Q29 */ + Word32 connection_cross_fx[( VBAP_MAX_NUM_SPEAKER_NODES - 2 ) * 3][3]; /* Q29 */ Word32 tmpFloat_fx; - Word32 cmp_arc_fx; + Word32 cmp_arc_fx; /* Q29 */ Word32 normVal_fx; Word16 angleCmp_fx; ConnectionOption *connection_options; @@ -3648,6 +3662,7 @@ static ivas_error formulate_half_sphere_connections_fx( move16(); WHILE( c_opt < num_connection_options && *connection_write_index < max_num_connections ) { + test(); chA = connection_options[c_opt].chA; move16(); chB = connection_options[c_opt].chB; @@ -3747,10 +3762,10 @@ static ivas_error formulate_half_sphere_connections_fx( sub_exp = 0, sub_exp_2 = 0, sub_final_exp = 0; var_a = BASOP_Util_Add_Mant32Exp( new_arc_fx, Q31 - Q29, L_negate( L_deposit_h( angleCmp_fx ) ), final_exp, &sub_exp ); - comp1 = BASOP_Util_Cmp_Mant32Exp( L_abs( var_a ), sub_exp, 21474836, 0 ); + comp1 = BASOP_Util_Cmp_Mant32Exp( L_abs( var_a ), sub_exp, 21474836 /* 0.01f in Q31 */, 0 ); var_b = BASOP_Util_Add_Mant32Exp( 25735, Q31 - Q12, L_negate( L_deposit_h( angleCmp_fx ) ), final_exp, &sub_exp_2 ); var_c = BASOP_Util_Add_Mant32Exp( new_arc_fx, Q31 - Q29, L_negate( var_b ), sub_exp_2, &sub_final_exp ); - comp2 = BASOP_Util_Cmp_Mant32Exp( L_abs( var_c ), sub_final_exp, 21474836, 0 ); + comp2 = BASOP_Util_Cmp_Mant32Exp( L_abs( var_c ), sub_final_exp, 21474836 /* 0.01f in Q31 */, 0 ); within_first_arc = 0; move16(); @@ -3771,7 +3786,7 @@ static ivas_error formulate_half_sphere_connections_fx( move32(); } /*update Q for planeCrossingVec */ - Q_planeCrossingVec = sub( sub( add( Q27, Q27 ), tmp_exp ), Q31 ); + Q_planeCrossingVec = sub( sub( Q27 + Q27, tmp_exp ), Q31 ); } /* Study if the crossing is also between arc cmp_chA-cmp_chB */ @@ -3803,13 +3818,13 @@ static ivas_error formulate_half_sphere_connections_fx( } one_minus_var1_sq = BASOP_Util_Add_Mant32Exp( ONE_IN_Q30, 1, L_negate( var1_sq ), exp_var1_sq, &final_exp_A ); var1_sqrt = Sqrt32( one_minus_var1_sq, &final_exp_A ); - var1_cos = BASOP_util_atan2( var1_sqrt, var1, final_exp_A - ( Q31 - ( Q_planeCrossingVec + Q30 - Q31 ) ) ); // Q13 + var1_cos = BASOP_util_atan2( var1_sqrt, var1, sub( final_exp_A, sub( Q31, add( Q_planeCrossingVec, Q30 - Q31 ) ) ) ); // Q13 angleCmp_fx = var1_cos; move16(); one_minus_var2_sq = BASOP_Util_Add_Mant32Exp( ONE_IN_Q30, 1, L_negate( var2_sq ), exp_var2_sq, &final_exp_B ); var2_sqrt = Sqrt32( one_minus_var2_sq, &final_exp_B ); - var2_cos = BASOP_util_atan2( var2_sqrt, var2, final_exp_B - ( Q31 - ( Q_planeCrossingVec + Q30 - Q31 ) ) ); // Q13 + var2_cos = BASOP_util_atan2( var2_sqrt, var2, sub( final_exp_B, sub( Q31, add( Q_planeCrossingVec, Q30 - Q31 ) ) ) ); // Q13 final_exp = BASOP_Util_Add_MantExp( angleCmp_fx, Q15 - Q13, var2_cos, Q15 - Q13, &angleCmp_fx ); @@ -3818,7 +3833,7 @@ static ivas_error formulate_half_sphere_connections_fx( move16(); move16(); var_a = BASOP_Util_Add_Mant32Exp( cmp_arc_fx, Q31 - Q29, L_negate( L_deposit_h( angleCmp_fx ) ), final_exp, &sub_exp ); - comp1 = BASOP_Util_Cmp_Mant32Exp( L_abs( var_a ), sub_exp, 21474836, 0 ); + comp1 = BASOP_Util_Cmp_Mant32Exp( L_abs( var_a ), sub_exp, 21474836 /* 0.01f in Q31 */, 0 ); if ( EQ_16( comp1, -1 ) ) { @@ -3839,13 +3854,13 @@ static ivas_error formulate_half_sphere_connections_fx( move16(); connections[*connection_write_index][1] = chB; move16(); - connection_arc_fx[*connection_write_index] = new_arc_fx; // Q29 + connection_arc_fx[*connection_write_index] = new_arc_fx; /* Q29 */ move16(); - connection_cross_fx[*connection_write_index][0] = new_cross_fx[0]; + connection_cross_fx[*connection_write_index][0] = new_cross_fx[0]; /* Q29 */ move16(); - connection_cross_fx[*connection_write_index][1] = new_cross_fx[1]; + connection_cross_fx[*connection_write_index][1] = new_cross_fx[1]; /* Q29 */ move16(); - connection_cross_fx[*connection_write_index][2] = new_cross_fx[2]; + connection_cross_fx[*connection_write_index][2] = new_cross_fx[2]; /* Q29 */ move16(); *connection_write_index = add( *connection_write_index, 1 ); } @@ -4013,15 +4028,15 @@ static ivas_error formulate_half_sphere_connections( #ifdef IVAS_FLOAT_FIXED static Word16 determine_non_crossing_planes_fx( - const Word16 num_speaker_nodes, /* i : number of speaker nodes */ - const VBAP_SPEAKER_NODE *node_data, /* i : speaker node data */ - Word32 *non_crossing_plane_elevation_deg_fx /* o : vector of non-crossing plane elevations */ + const Word16 num_speaker_nodes, /* i : number of speaker nodes */ + const VBAP_SPEAKER_NODE *node_data, /* i : speaker node data */ + Word32 *non_crossing_plane_elevation_deg_fx /* o : vector of non-crossing plane elevations Q14 */ ) { - Word32 next_ele_check_fx; - Word32 ele_check_fx; - Word32 max_gap_fx; - Word32 gap_to_next_ls_fx; + Word32 next_ele_check_fx; /* Q14 */ + Word32 ele_check_fx; /* Q14 */ + Word32 max_gap_fx; /* Q14 */ + Word32 gap_to_next_ls_fx; /* Q14 */ Word16 ch, ch_cmp; Word16 num_planes; @@ -4046,7 +4061,7 @@ static Word16 determine_non_crossing_planes_fx( tmp2 = L_sub( next_ele_check_fx, 16 /*VBAP_EPSILON in Q14*/ ); test(); test(); - IF( ( node_data[ch].group != SPEAKER_NODE_HORIZONTAL ) && GT_32( L_shr( node_data[ch].ele_deg_fx, 8 ), tmp1 ) && LT_32( L_shr( node_data[ch].ele_deg_fx, 8 ), tmp2 ) ) + IF( NE_32( node_data[ch].group, SPEAKER_NODE_HORIZONTAL ) && GT_32( L_shr( node_data[ch].ele_deg_fx, 8 ), tmp1 ) && LT_32( L_shr( node_data[ch].ele_deg_fx, 8 ), tmp2 ) ) { next_ele_check_fx = L_shr( node_data[ch].ele_deg_fx, 8 ); // shift due to comparision with 90.0f } @@ -4098,7 +4113,7 @@ static Word16 determine_non_crossing_planes_fx( test(); IF( LT_32( max_gap_fx, 2293776 /*Q14*/ ) && max_gap_fx > 0 ) { - non_crossing_plane_elevation_deg_fx[num_planes] = ele_check_fx; // q14 + non_crossing_plane_elevation_deg_fx[num_planes] = ele_check_fx; /* Q14 */ move32(); num_planes = add( num_planes, 1 ); IF( EQ_16( num_planes, VBAP_MAX_PLANES ) ) diff --git a/lib_rend/lib_rend.c b/lib_rend/lib_rend.c index a178953bad28b53c1d8b3b0d8ece6767ca112d91..90ac02ce1dcc9944e3f01d1b9319d0e03fe1dacc 100644 --- a/lib_rend/lib_rend.c +++ b/lib_rend/lib_rend.c @@ -133,7 +133,7 @@ typedef struct #ifndef IVAS_FLOAT_FIXED float gain; /* Linear, not in dB */ #else - Word32 gain_fx; /* Linear, not in dB */ + Word32 gain_fx; /* Linear, not in dB Q30 */ #endif rendering_context ctx; Word32 numNewSamplesPerChannel; /* Used to keep track how much new audio was fed before rendering current frame */ @@ -164,7 +164,7 @@ typedef struct rotation_matrix_fx rot_mat_prev_fx; #endif pan_vector prev_pan_gains; - int8_t firstFrameRendered; + Word8 firstFrameRendered; #ifndef IVAS_FLOAT_FIXED float *bufferData; #endif @@ -174,10 +174,10 @@ typedef struct float nonDiegeticPanGain; #endif #ifdef IVAS_FLOAT_FIXED - Word32 nonDiegeticPanGain_fx; + Word32 nonDiegeticPanGain_fx; /* Q31 */ #endif OMASA_ANA_HANDLE hOMasa; - uint16_t total_num_objects; + UWord16 total_num_objects; #ifdef IVAS_FLOAT_FIXED Word32 ism_metadata_delay_ms_fx; /* Q0 */ #else @@ -210,13 +210,13 @@ typedef struct Word16 numLfeChannels; bool pan_lfe; // float lfeInputGain; - Word32 lfeInputGain_fx; + Word32 lfeInputGain_fx; /* Q31 */ // float lfeOutputAzimuth; Word16 lfeOutputAzimuth_fx; // float lfeOutputElevation; Word16 lfeOutputElevation_fx; // IVAS_REND_LfePanMtx lfePanMtx; - IVAS_REND_LfePanMtx_fx lfePanMtx_fx; + IVAS_REND_LfePanMtx_fx lfePanMtx_fx; /* Q31 */ } lfe_routing; #else typedef struct @@ -236,16 +236,22 @@ typedef struct /* Full panning matrix. 1st index is input channel, 2nd index is output channel. All LFE channels should be included, both for inputs and outputs */ +#ifndef IVAS_FLOAT_FIXED pan_matrix panGains; - pan_matrix_fx panGains_fx; +#else + pan_matrix_fx panGains_fx; /* Q31 */ +#endif LSSETUP_CUSTOM_STRUCT customLsInput; EFAP_WRAPPER efapInWrapper; TDREND_WRAPPER tdRendWrapper; CREND_WRAPPER_HANDLE crendWrapper; REVERB_HANDLE hReverb; +#ifndef IVAS_FLOAT_FIXED rotation_gains rot_gains_prev; +#else rotation_gains_Word32 rot_gains_prev_fx; +#endif Word16 nonDiegeticPan; Word32 nonDiegeticPanGain_fx; lfe_routing lfeRouting; @@ -253,7 +259,7 @@ typedef struct #ifndef IVAS_FLOAT_FIXED float *bufferData; #endif - int16_t binauralDelaySmp; + Word16 binauralDelaySmp; #ifndef IVAS_FLOAT_FIXED float nonDiegeticPanGain; float *lfeDelayBuffer; @@ -292,12 +298,13 @@ typedef struct // pan_matrix hoaDecMtx; pan_matrix_fx hoaDecMtx_fx; CREND_WRAPPER_HANDLE crendWrapper; - rotation_gains rot_gains_prev; - rotation_gains_fx rot_gains_prev_fx; #ifndef IVAS_FLOAT_FIXED + rotation_gains rot_gains_prev; float *bufferData; -#endif +#else + rotation_gains_fx rot_gains_prev_fx; Word32 *bufferData_fx; +#endif DIRAC_ANA_HANDLE hDirAC; } input_sba; #else @@ -397,7 +404,7 @@ static void intermidiate_ext_dirac_render( *-------------------------------------------------------------------*/ #ifdef IVAS_FLOAT_FIXED static ivas_error allocateInputBaseBufferData_fx( - Word32 **data, + Word32 **data, /* Qx */ const Word16 data_size ) { *data = (Word32 *) malloc( data_size * sizeof( Word32 ) ); @@ -424,7 +431,7 @@ static ivas_error allocateInputBaseBufferData( #endif #ifdef IVAS_FLOAT_FIXED static void freeInputBaseBufferData_fx( - Word32 **data ) + Word32 **data /* Qx */ ) { IF( *data != NULL ) { @@ -449,7 +456,7 @@ static void freeInputBaseBufferData( #endif #ifdef IVAS_FLOAT_FIXED static ivas_error allocateMcLfeDelayBuffer_fx( - Word32 **lfeDelayBuffer, + Word32 **lfeDelayBuffer, /* Qx */ const Word16 data_size ) { *lfeDelayBuffer = (Word32 *) malloc( data_size * sizeof( Word32 ) ); @@ -477,7 +484,7 @@ static ivas_error allocateMcLfeDelayBuffer( #endif #ifdef IVAS_FLOAT_FIXED static void freeMcLfeDelayBuffer_fx( - Word32 **lfeDelayBuffer ) + Word32 **lfeDelayBuffer /* Qx */ ) { IF( *lfeDelayBuffer != NULL ) { @@ -521,11 +528,6 @@ static IVAS_QUATERNION quaternionInit_fx( move16(); move16(); -#ifdef IVAS_FLOAT_FIXED_TO_BE_REMOVED - q.w = 1.0f; - q.x = q.y = q.z = 0.0f; -#endif - return q; } #else @@ -651,7 +653,7 @@ static void accumulate2dArrayToBuffer( /*! r: number of clipped output samples */ static Word32 limitRendererOutput_fx( IVAS_LIMITER_HANDLE hLimiter, /* i/o: limiter struct handle */ - Word32 *output, /* i/o: I/O buffer */ + Word32 *output, /* i/o: I/O buffer Q(q_factor) */ const Word16 output_frame, /* i : number of samples per channel in the buffer */ const Word32 threshold, /* i : signal amplitude above which limiting starts to be applied */ Word16 q_factor ) /* i : q factor of output samples */ @@ -1208,7 +1210,7 @@ static LSSETUP_CUSTOM_STRUCT defaultCustomLs( #ifdef IVAS_FLOAT_FIXED static ivas_error getSpeakerAzimuths_fx( AUDIO_CONFIG config, - const Word32 **azimuths ) + const Word32 **azimuths /* Q22 */ ) { SWITCH( config ) { @@ -1279,7 +1281,7 @@ static ivas_error getSpeakerAzimuths( #ifdef IVAS_FLOAT_FIXED static ivas_error getSpeakerElevations_fx( AUDIO_CONFIG config, - const Word32 **elevations ) + const Word32 **elevations /* Q22 */ ) { SWITCH( config ) { @@ -1514,8 +1516,8 @@ static ivas_error getNumNonLfeChannelsInSpeakerLayout( static ivas_error getMcConfigValues_fx( AUDIO_CONFIG inConfig, const LSSETUP_CUSTOM_STRUCT *pInCustomLs, - const Word32 **azimuth, - const Word32 **elevation, + const Word32 **azimuth, /* Q22 */ + const Word32 **elevation, /* Q22 */ Word16 *lfe_idx, Word16 *is_planar ) { @@ -1666,8 +1668,8 @@ static ivas_error initEfap( const LSSETUP_CUSTOM_STRUCT *pCustomLsOut ) { ivas_error error; - const Word32 *azimuths; - const Word32 *elevations; + const Word32 *azimuths; /* Q22 */ + const Word32 *elevations; /* Q22 */ Word16 numNonLfeChannels; test(); @@ -1798,11 +1800,11 @@ static ivas_error initEfap( #ifdef IVAS_FLOAT_FIXED static ivas_error getEfapGains_fx( EFAP_WRAPPER efapWrapper, - const Word32 azi, - const Word32 ele, - pan_vector_fx panGains ) + const Word32 azi, /* Q22 */ + const Word32 ele, /* Q22 */ + pan_vector_fx panGains /* Q31 */ ) { - pan_vector_fx tmpPanGains; /* tmp pan gain buffer without LFE channels */ + pan_vector_fx tmpPanGains; /* tmp pan gain buffer without LFE channels */ /* Q30 */ Word32 *readPtr; Word16 i; Word16 lfeCount; @@ -1826,7 +1828,7 @@ static ivas_error getEfapGains_fx( { panGains[i] = 0; move32(); - ++lfeCount; + lfeCount = add( lfeCount, 1 ); } ELSE { @@ -1947,7 +1949,7 @@ static ivas_error initHeadRotation_fx( IVAS_REND_HANDLE hIvasRend ) { Word16 i, crossfade_len; - Word32 tmp_fx; + Word32 tmp_fx; /* Q31 */ ivas_error error; /* Head rotation is enabled by default */ @@ -2117,7 +2119,7 @@ static void initRotGainsWord32_fx( return; } -#endif +#else static void initRotGains( rotation_gains rot_gains ) { @@ -2132,6 +2134,7 @@ static void initRotGains( return; } +#endif #ifdef IVAS_FLOAT_FIXED static void initRendInputBase_fx( input_base *inputBase, @@ -2232,6 +2235,7 @@ static IVAS_ISM_METADATA defaultObjectPosition( } #endif // IVAS_FLOAT_FIXED +#ifndef IVAS_FLOAT_FIXED static int8_t checkObjectPositionChanged( IVAS_ISM_METADATA *currentPos, IVAS_ISM_METADATA *previousPos ) @@ -2239,8 +2243,7 @@ static int8_t checkObjectPositionChanged( return !( fabs( currentPos->azimuth - previousPos->azimuth ) < EPSILON && fabs( currentPos->elevation - previousPos->elevation ) < EPSILON ); } - -#ifdef IVAS_FLOAT_FIXED +#else static Word8 checkObjectPositionChanged_fx( IVAS_ISM_METADATA *currentPos, IVAS_ISM_METADATA *previousPos ) @@ -2409,6 +2412,7 @@ static ivas_error setRendInputActiveIsm( initRendInputBase_fx( &inputIsm->base, inConfig, id, rendCtx, inputIsm->bufferData_fx, MAX_BUFFER_LENGTH ); inputIsm->firstFrameRendered = FALSE; + move16(); inputIsm->currentPos = defaultObjectPosition(); inputIsm->previousPos = defaultObjectPosition(); @@ -2662,7 +2666,7 @@ static void copyLsConversionMatrixToPanMatrix_fx( } ELSE { - panMatrix[inCh][outCh] = L_shl( lsConvMatrix[i].value, 1 ); + panMatrix[inCh][outCh] = L_shl( lsConvMatrix[i].value, 1 ); /* Q30 + Q1 = Q31 */ } move32(); } @@ -2670,7 +2674,6 @@ static void copyLsConversionMatrixToPanMatrix_fx( return; } #else - static void copyLsConversionMatrixToPanMatrix( const LS_CONVERSION_MATRIX *lsConvMatrix, pan_matrix panMatrix ) @@ -2695,6 +2698,8 @@ static void copyLsConversionMatrixToPanMatrix( return; } #endif + +#ifndef IVAS_FLOAT_FIXED static void setZeroPanMatrix( pan_matrix panMatrix ) { @@ -2707,7 +2712,7 @@ static void setZeroPanMatrix( return; } -#ifdef IVAS_FLOAT_FIXED +#else static void setZeroPanMatrix_fx( pan_matrix_fx panMatrix ) { @@ -2721,6 +2726,7 @@ static void setZeroPanMatrix_fx( return; } #endif + #ifdef IVAS_FLOAT_FIXED /* Note: this only sets non-zero elements, call setZeroPanMatrix() to init first. */ static void fillIdentityPanMatrix_fx( @@ -2850,7 +2856,7 @@ static ivas_error initMcPanGainsWithEfap_fx( Word16 i; Word16 numNonLfeInChannels; Word16 inLfeChIdx, outChIdx; - const Word32 *spkAzi, *spkEle; + const Word32 *spkAzi, *spkEle; /* Q22 */ ivas_error error; IF( NE_32( inputMc->base.inConfig, IVAS_AUDIO_CONFIG_LS_CUSTOM ) ) @@ -2895,14 +2901,14 @@ static ivas_error initMcPanGainsWithEfap_fx( { IF( EQ_16( i, inLfeChIdx ) ) { - ++outChIdx; + outChIdx = add( outChIdx, 1 ); } IF( NE_32( ( error = getEfapGains_fx( *inputMc->base.ctx.pEfapOutWrapper, spkAzi[i], spkEle[i], inputMc->panGains_fx[outChIdx] ) ), IVAS_ERR_OK ) ) { return error; } - ++outChIdx; + outChIdx = add( outChIdx, 1 ); } test(); @@ -3085,6 +3091,7 @@ static ivas_error initMcPanGainsWithMonoOut_fx( /* ls_conversion_cicpX_stereo contains gains for side speakers. * These should be skipped with 5.1+X inputs. */ skipSideSpeakers = false; + move16(); test(); if ( EQ_32( inputMc->base.inConfig, IVAS_AUDIO_CONFIG_5_1_2 ) || EQ_32( inputMc->base.inConfig, IVAS_AUDIO_CONFIG_5_1_4 ) ) { @@ -3121,7 +3128,7 @@ static ivas_error initMcPanGainsWithMonoOut_fx( inputMc->panGains_fx[writeIdx][0] = L_shl( ls_conversion_cicpX_mono_fx[readIdx][0], 1 ); // Q31 } move32(); - ++readIdx; + readIdx = add( readIdx, 1 ); } } @@ -3198,9 +3205,9 @@ static ivas_error initMcPanGainsWithStereoLookup_fx( * Use gains for center CICP speaker and return early. */ IF( EQ_32( inputMc->base.inConfig, IVAS_AUDIO_CONFIG_MONO ) ) { - inputMc->panGains_fx[0][0] = L_shl( ls_conversion_cicpX_stereo_fx[2][0], 1 ); // Q31 + inputMc->panGains_fx[0][0] = L_shl( ls_conversion_cicpX_stereo_fx[2][0], 1 ); /* Q30 + Q1 = Q31 */ move32(); - inputMc->panGains_fx[0][1] = L_shl( ls_conversion_cicpX_stereo_fx[2][1], 1 ); // Q31 + inputMc->panGains_fx[0][1] = L_shl( ls_conversion_cicpX_stereo_fx[2][1], 1 ); /* Q30 + Q1 = Q31 */ move32(); return IVAS_ERR_OK; } @@ -3237,7 +3244,7 @@ static ivas_error initMcPanGainsWithStereoLookup_fx( } ELSE { - inputMc->panGains_fx[writeIdx][0] = L_shl( ls_conversion_cicpX_stereo_fx[readIdx][0], 1 ); + inputMc->panGains_fx[writeIdx][0] = L_shl( ls_conversion_cicpX_stereo_fx[readIdx][0], 1 ); /* Q30 + Q1 = Q31 */ } move32(); @@ -3247,10 +3254,10 @@ static ivas_error initMcPanGainsWithStereoLookup_fx( } ELSE { - inputMc->panGains_fx[writeIdx][1] = L_shl( ls_conversion_cicpX_stereo_fx[readIdx][1], 1 ); + inputMc->panGains_fx[writeIdx][1] = L_shl( ls_conversion_cicpX_stereo_fx[readIdx][1], 1 ); /* Q30 + Q1 = Q31 */ } move32(); - ++readIdx; + readIdx = add( readIdx, 1 ); } return IVAS_ERR_OK; @@ -3449,7 +3456,7 @@ static ivas_error updateLfePanGainsForMcOut( } /* linear input gain */ - v_multc_fixed( inputMc->lfeRouting.lfePanMtx_fx[i], inputMc->lfeRouting.lfeInputGain_fx, inputMc->lfeRouting.lfePanMtx_fx[i], numOutChannels ); + v_multc_fixed( inputMc->lfeRouting.lfePanMtx_fx[i], inputMc->lfeRouting.lfeInputGain_fx, inputMc->lfeRouting.lfePanMtx_fx[i], numOutChannels ); /* Q31 */ } return error; @@ -3528,7 +3535,7 @@ static ivas_error updateLfePanGainsForAmbiOut( ivas_dirac_dec_get_response_fx( inputMc->lfeRouting.lfeOutputAzimuth_fx, inputMc->lfeRouting.lfeOutputElevation_fx, inputMc->lfeRouting.lfePanMtx_fx[i], outAmbiOrder, Q29 ); /* linear input gain */ - v_multc_fixed( inputMc->lfeRouting.lfePanMtx_fx[i], inputMc->lfeRouting.lfeInputGain_fx, inputMc->lfeRouting.lfePanMtx_fx[i], IVAS_MAX_OUTPUT_CHANNELS ); + v_multc_fixed( inputMc->lfeRouting.lfePanMtx_fx[i], inputMc->lfeRouting.lfeInputGain_fx, inputMc->lfeRouting.lfePanMtx_fx[i], IVAS_MAX_OUTPUT_CHANNELS ); /* Q31 */ } return error; @@ -3606,7 +3613,7 @@ static ivas_error updateMcPanGainsForMcOut( } ELSE { - inputMc->panGains_fx[0][0] = L_add( L_shr( inputMc->nonDiegeticPanGain_fx, 1 ), ONE_IN_Q30 ); + inputMc->panGains_fx[0][0] = L_add( L_shr( inputMc->nonDiegeticPanGain_fx, 1 ), ONE_IN_Q30 /* 0.5f in Q31 */ ); /* Q31 */ } move32(); inputMc->panGains_fx[0][1] = L_sub( ONE_IN_Q31, inputMc->panGains_fx[0][0] ); @@ -3713,7 +3720,7 @@ static ivas_error updateMcPanGainsForAmbiOut( { Word16 ch_in, ch_out, lfeIdx, i; Word16 numNonLfeInChannels, outAmbiOrder; - const Word32 *spkAzi_fx, *spkEle_fx; + const Word32 *spkAzi_fx, *spkEle_fx; /* Q22 */ ivas_error error; IF( NE_32( ( error = getAmbisonicsOrder_fx( outConfig, &outAmbiOrder ) ), IVAS_ERR_OK ) ) @@ -3768,11 +3775,11 @@ static ivas_error updateMcPanGainsForAmbiOut( } ELSE { - inputMc->panGains_fx[ch_out][i] = L_shl( temp, 2 ); + inputMc->panGains_fx[ch_out][i] = L_shl( temp, 2 ); /* Q29 + Q2 = Q31 */ move32(); } } - ++ch_in; + ch_in = add( ch_in, 1 ); } } ELSE @@ -3816,11 +3823,11 @@ static ivas_error updateMcPanGainsForAmbiOut( } ELSE { - inputMc->panGains_fx[ch_out][i] = L_shl( temp, 2 ); + inputMc->panGains_fx[ch_out][i] = L_shl( temp, 2 ); /* Q29 + Q2 = Q31 */ move32(); } } - ++ch_in; + ch_in = add( ch_in, 1 ); } } @@ -4053,11 +4060,13 @@ static ivas_error initMcBinauralRendering( IF( EQ_16( inConfig, IVAS_AUDIO_CONFIG_LS_CUSTOM ) && NE_16( outConfig, IVAS_AUDIO_CONFIG_BINAURAL_ROOM_REVERB ) ) { useTDRend = TRUE; + move16(); } ELSE IF( ( EQ_16( inConfig, IVAS_AUDIO_CONFIG_5_1 ) || EQ_16( inConfig, IVAS_AUDIO_CONFIG_7_1 ) ) && ( inputMc->base.ctx.pHeadRotData->headRotEnabled ) ) { useTDRend = TRUE; + move16(); } } @@ -4177,8 +4186,8 @@ static ivas_error initMcBinauralRendering( Word16 exp = 0; move16(); Word16 var1 = BASOP_Util_Divide3232_Scale( *inputMc->base.ctx.pOutSampleRate, 1000000000, &exp ); - Word32 var2 = L_shr_r( Mpy_32_32( binauralDelayNs, L_deposit_h( var1 ) ), negate( exp ) ); // 31 + exp - inputMc->binauralDelaySmp = (Word16) var2; + Word32 var2 = L_shr_r( Mpy_32_32( binauralDelayNs, L_deposit_h( var1 ) ), negate( exp ) ); /* Q0 */ + inputMc->binauralDelaySmp = extract_l( var2 ); move16(); // inputMc->binauralDelaySmp = (int16_t) roundf( (float) binauralDelayNs * *inputMc->base.ctx.pOutSampleRate / 1000000000.f ); @@ -4406,6 +4415,7 @@ static lfe_routing defaultLfeRouting( case IVAS_AUDIO_CONFIG_LS_CUSTOM: FOR( i = 0; i < routing.numLfeChannels && i < customLsOut.num_lfe; ++i ) { + test(); routing.lfePanMtx_fx[i][customLsOut.lfe_idx[i]] = ONE_IN_Q31; move32(); } @@ -4507,7 +4517,6 @@ static ivas_error setRendInputActiveMc( } initRendInputBase_fx( &inputMc->base, inConfig, id, rendCtx, inputMc->bufferData_fx, MAX_BUFFER_LENGTH ); - setZeroPanMatrix( inputMc->panGains ); setZeroPanMatrix_fx( inputMc->panGains_fx ); inputMc->customLsInput = defaultCustomLs(); @@ -4516,7 +4525,6 @@ static ivas_error setRendInputActiveMc( inputMc->hReverb = NULL; inputMc->hMcMasa = NULL; - initRotGains( inputMc->rot_gains_prev ); initRotGainsWord32_fx( inputMc->rot_gains_prev_fx ); inputMc->lfeRouting = defaultLfeRouting( inConfig, inputMc->customLsInput, outConfig, *inputMc->base.ctx.pCustomLsOut ); set32_fx( inputMc->lfeDelayBuffer_fx, 0, MAX_BIN_DELAY_SAMPLES ); @@ -4747,7 +4755,7 @@ static ivas_error initSbaPanGainsForMcOut( { CONTINUE; /* nothing to be rendered to LFE */ } - inputSba->hoaDecMtx_fx[chInIdx][chOutIdx] = L_shl_sat( *readPtr++, 2 ); + inputSba->hoaDecMtx_fx[chInIdx][chOutIdx] = L_shl_sat( *readPtr++, Q2 ); /* Q29 + Q2 = Q31 */ move32(); } } @@ -4876,7 +4884,6 @@ static ivas_error updateSbaPanGains( rendering_context rendCtx; /* Reset to all zeros - some functions below only write non-zero elements. */ - // setZeroPanMatrix( inputSba->hoaDecMtx ); setZeroPanMatrix_fx( inputSba->hoaDecMtx_fx ); inConfig = inputSba->base.inConfig; @@ -5269,7 +5276,7 @@ static ivas_error setRendInputActiveMasa( } initRendInputBase_fx( &inputMasa->base, inConfig, id, rendCtx, inputMasa->bufferData_fx, MAX_BUFFER_LENGTH ); - IF( ( error = getAudioConfigNumChannels( inConfig, &numInChannels ) ) != IVAS_ERR_OK ) + IF( NE_32( ( error = getAudioConfigNumChannels( inConfig, &numInChannels ) ), IVAS_ERR_OK ) ) { return error; } @@ -5288,7 +5295,7 @@ static ivas_error setRendInputActiveMasa( temp = 2; } move16(); - IF( ( error = masaPrerendOpen_fx( &inputMasa->hMasaPrerend, temp, *( inputMasa->base.ctx.pOutSampleRate ) ) ) != IVAS_ERR_OK ) + IF( NE_32( ( error = masaPrerendOpen_fx( &inputMasa->hMasaPrerend, temp, *( inputMasa->base.ctx.pOutSampleRate ) ) ), IVAS_ERR_OK ) ) { return error; } @@ -5358,7 +5365,7 @@ ivas_error IVAS_REND_Open( const Word32 outputSampleRate, const AUDIO_CONFIG outConfig, const Word16 nonDiegeticPan, - const Word32 nonDiegeticPanGain, /*Q31*/ + const Word32 nonDiegeticPanGain, /* Q31 */ const Word16 num_subframes ) { Word16 i; @@ -5635,11 +5642,6 @@ static LSSETUP_CUSTOM_STRUCT makeCustomLsSetup( /* Copy layout description */ customLs.num_spk = rendCustomLsLayout.num_spk; move16(); - FOR( i = 0; i < rendCustomLsLayout.num_spk; i++ ) - { - customLs.ls_azimuth_fx[i] = (Word32) ( rendCustomLsLayout.azimuth[i] * ONE_IN_Q22 ); - customLs.ls_elevation_fx[i] = (Word32) ( rendCustomLsLayout.elevation[i] * ONE_IN_Q22 ); - } Copy32( rendCustomLsLayout.azimuth_fx, customLs.ls_azimuth_fx, rendCustomLsLayout.num_spk ); Copy32( rendCustomLsLayout.elevation_fx, customLs.ls_elevation_fx, rendCustomLsLayout.num_spk ); customLs.is_planar_setup = 1; @@ -6696,9 +6698,9 @@ ivas_error IVAS_REND_SetInputGain( } #else ivas_error IVAS_REND_SetInputGain_fx( - IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ - const IVAS_REND_InputId inputId, /* i : ID of the input */ - const Word32 gain /* i : linear gain (not in dB) */ + IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ + const IVAS_REND_InputId inputId, /* i : ID of the input */ + const Word32 gain /* i : linear gain (not in dB) Q30 */ ) { input_base *inputBase; @@ -6823,11 +6825,11 @@ ivas_error IVAS_REND_SetInputLfeMtx_fx( *-------------------------------------------------------------------*/ ivas_error IVAS_REND_SetInputLfePos_fx( - IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ - const IVAS_REND_InputId inputId, /* i : ID of the input */ - const Word32 inputGain, /* i : Input gain to be applied to the LFE channel(s) */ - const Word16 outputAzimuth, /* i : Output azimuth position */ - const Word16 outputElevation /* i : Output elevation position */ + IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ + const IVAS_REND_InputId inputId, /* i : ID of the input */ + const Word32 inputGain, /* i : Input gain to be applied to the LFE channel(s) Q31 */ + const Word16 outputAzimuth, /* i : Output azimuth position Q0 */ + const Word16 outputElevation /* i : Output elevation position Q0 */ ) { input_base *pInputBase; @@ -6854,11 +6856,11 @@ ivas_error IVAS_REND_SetInputLfePos_fx( pInputMc->lfeRouting.pan_lfe = true; move16(); - pInputMc->lfeRouting.lfeInputGain_fx = inputGain; // Q31 + pInputMc->lfeRouting.lfeInputGain_fx = inputGain; /* Q31 */ move32(); - pInputMc->lfeRouting.lfeOutputAzimuth_fx = (Word16) ( outputAzimuth ); // Q0 + pInputMc->lfeRouting.lfeOutputAzimuth_fx = outputAzimuth; /* Q0 */ move16(); - pInputMc->lfeRouting.lfeOutputElevation_fx = (Word16) ( outputElevation ); // Q0 + pInputMc->lfeRouting.lfeOutputElevation_fx = outputElevation; /* Q0 */ move16(); IF( NE_32( ( error = updateMcPanGains( pInputMc, hIvasRend->outputConfig ) ), IVAS_ERR_OK ) ) @@ -7160,6 +7162,13 @@ ivas_error IVAS_REND_GetDelay_fx( Word32 max_latency_ns; Word32 timescale_by_ns[7] = { 0, 17180, 34360, 0, 68719, 0, 103079 }; + move32(); + move32(); + move32(); + move32(); + move32(); + move32(); + move32(); /* Validate function arguments */ test(); @@ -7242,6 +7251,7 @@ ivas_error IVAS_REND_GetDelay_fx( IF( NE_32( hIvasRend->inputsMasa[i].base.inConfig, IVAS_AUDIO_CONFIG_INVALID ) ) { latency_ns = (Word32) ( IVAS_FB_DEC_DELAY_NS ); + move32(); max_latency_ns = L_max( max_latency_ns, latency_ns ); } } @@ -7508,7 +7518,9 @@ ivas_error IVAS_REND_FeedInputObjectMetadataToOMasa( /* Set position to OMasa struct */ hIvasRend->inputsIsm->hOMasa->ism_azimuth_fx[inputIndex] = objectPosition.azimuth_fx; + move32(); hIvasRend->inputsIsm->hOMasa->ism_elevation_fx[inputIndex] = objectPosition.elevation_fx; + move32(); return IVAS_ERR_OK; } @@ -7933,25 +7945,25 @@ ivas_error IVAS_REND_SetHeadRotation( rotQuat = headRot; } - Word32 updateRate_fx = 1677721600; // value is 200 in Q23 - rotQuat.w_fx = L_shl( rotQuat.w_fx, sub( Q29, rotQuat.q_fact ) ); - rotQuat.x_fx = L_shl( rotQuat.x_fx, sub( Q29, rotQuat.q_fact ) ); - rotQuat.y_fx = L_shl( rotQuat.y_fx, sub( Q29, rotQuat.q_fact ) ); - rotQuat.z_fx = L_shl( rotQuat.z_fx, sub( Q29, rotQuat.q_fact ) ); + Word32 updateRate_fx = 1677721600; // value is 200 in Q23 + rotQuat.w_fx = L_shl( rotQuat.w_fx, sub( Q29, rotQuat.q_fact ) ); /* Q29 */ + rotQuat.x_fx = L_shl( rotQuat.x_fx, sub( Q29, rotQuat.q_fact ) ); /* Q29 */ + rotQuat.y_fx = L_shl( rotQuat.y_fx, sub( Q29, rotQuat.q_fact ) ); /* Q29 */ + rotQuat.z_fx = L_shl( rotQuat.z_fx, sub( Q29, rotQuat.q_fact ) ); /* Q29 */ move32(); move32(); move32(); move32(); - hIvasRend->headRotData.hOrientationTracker->refRot.w_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->refRot.w_fx, sub( Q29, hIvasRend->headRotData.hOrientationTracker->refRot.q_fact ) ); - hIvasRend->headRotData.hOrientationTracker->refRot.x_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->refRot.x_fx, Q29 - hIvasRend->headRotData.hOrientationTracker->refRot.q_fact ); - hIvasRend->headRotData.hOrientationTracker->refRot.y_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->refRot.y_fx, Q29 - hIvasRend->headRotData.hOrientationTracker->refRot.q_fact ); - hIvasRend->headRotData.hOrientationTracker->refRot.z_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->refRot.z_fx, Q29 - hIvasRend->headRotData.hOrientationTracker->refRot.q_fact ); - hIvasRend->headRotData.hOrientationTracker->absAvgRot.w_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->absAvgRot.w_fx, Q29 - hIvasRend->headRotData.hOrientationTracker->absAvgRot.q_fact ); - hIvasRend->headRotData.hOrientationTracker->absAvgRot.x_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->absAvgRot.x_fx, Q29 - hIvasRend->headRotData.hOrientationTracker->absAvgRot.q_fact ); - hIvasRend->headRotData.hOrientationTracker->absAvgRot.y_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->absAvgRot.y_fx, Q29 - hIvasRend->headRotData.hOrientationTracker->absAvgRot.q_fact ); - hIvasRend->headRotData.hOrientationTracker->absAvgRot.z_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->absAvgRot.z_fx, Q29 - hIvasRend->headRotData.hOrientationTracker->absAvgRot.q_fact ); + hIvasRend->headRotData.hOrientationTracker->refRot.w_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->refRot.w_fx, sub( Q29, hIvasRend->headRotData.hOrientationTracker->refRot.q_fact ) ); /* Q29 */ + hIvasRend->headRotData.hOrientationTracker->refRot.x_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->refRot.x_fx, sub( Q29, hIvasRend->headRotData.hOrientationTracker->refRot.q_fact ) ); /* Q29 */ + hIvasRend->headRotData.hOrientationTracker->refRot.y_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->refRot.y_fx, sub( Q29, hIvasRend->headRotData.hOrientationTracker->refRot.q_fact ) ); /* Q29 */ + hIvasRend->headRotData.hOrientationTracker->refRot.z_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->refRot.z_fx, sub( Q29, hIvasRend->headRotData.hOrientationTracker->refRot.q_fact ) ); /* Q29 */ + hIvasRend->headRotData.hOrientationTracker->absAvgRot.w_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->absAvgRot.w_fx, sub( Q29, hIvasRend->headRotData.hOrientationTracker->absAvgRot.q_fact ) ); /* Q29 */ + hIvasRend->headRotData.hOrientationTracker->absAvgRot.x_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->absAvgRot.x_fx, sub( Q29, hIvasRend->headRotData.hOrientationTracker->absAvgRot.q_fact ) ); /* Q29 */ + hIvasRend->headRotData.hOrientationTracker->absAvgRot.y_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->absAvgRot.y_fx, sub( Q29, hIvasRend->headRotData.hOrientationTracker->absAvgRot.q_fact ) ); /* Q29 */ + hIvasRend->headRotData.hOrientationTracker->absAvgRot.z_fx = L_shl( hIvasRend->headRotData.hOrientationTracker->absAvgRot.z_fx, sub( Q29, hIvasRend->headRotData.hOrientationTracker->absAvgRot.q_fact ) ); /* Q29 */ move32(); move32(); @@ -8504,8 +8516,8 @@ static void renderBufferChannelLerp( static void renderBufferChannelLerp_fx( const IVAS_REND_AudioBuffer inAudio, const Word32 inChannelIdx, - const Word32 *const gainsCurrent, - const Word32 *const gainsPrev, + const Word32 *const gainsCurrent, /* Q31 */ + const Word32 *const gainsPrev, /* Q31 */ IVAS_REND_AudioBuffer outAudio ) { const Word32 *inSmpl; @@ -8515,8 +8527,8 @@ static void renderBufferChannelLerp_fx( Word32 i; const Word32 *lastInSmpl; Word16 outChnlIdx; - Word32 currentGain; - Word32 previousGain; + Word32 currentGain; /* Q31 */ + Word32 previousGain; /* Q31 */ /* Pointer to behind last input sample */ lastInSmpl = getSmplPtr_fx( inAudio, inChannelIdx, inAudio.config.numSamplesPerChannel ); @@ -8524,9 +8536,17 @@ static void renderBufferChannelLerp_fx( FOR( outChnlIdx = 0; outChnlIdx < outAudio.config.numChannels; ++outChnlIdx ) { currentGain = gainsCurrent[outChnlIdx]; - previousGain = gainsPrev == NULL ? 0 : gainsPrev[outChnlIdx]; - move32(); move32(); + if ( gainsPrev == NULL ) + { + previousGain = 0; + move32(); + } + else + { + previousGain = gainsPrev[outChnlIdx]; + move32(); + } /* Process current output channel only if applying non-zero gains */ test(); @@ -8621,7 +8641,7 @@ static void renderBufferChannelLerp_fx( static void renderBufferChannel_fx( const IVAS_REND_AudioBuffer inAudio, const Word32 inChannelIdx, - const Word32 *const outputGains, + const Word32 *const outputGains, /* Q31 */ IVAS_REND_AudioBuffer outAudio ) { renderBufferChannelLerp_fx( inAudio, inChannelIdx, outputGains, NULL, outAudio ); @@ -8656,7 +8676,7 @@ static ivas_error chooseCrossfade( #else static ivas_error chooseCrossfade_fx( const IVAS_REND_HeadRotData *headRotData, - const Word32 **pCrossfade ) + const Word32 **pCrossfade /* Q31 */ ) { *pCrossfade = headRotData->crossfade_fx; @@ -8679,10 +8699,10 @@ static ivas_error rotateFrameMc_fx( { Word16 i; Word16 j; - const Word32 *crossfade; + const Word32 *crossfade; /* Q31 */ Word16 num_subframes; Word16 subframe_idx, subframe_len; - Word32 azimuth_fx, elevation_fx; + Word32 azimuth_fx, elevation_fx; /* Q22 */ Word16 is_planar_setup, lfe_idx; Word16 nchan; Word16 ch_in, ch_out; @@ -8691,10 +8711,10 @@ static ivas_error rotateFrameMc_fx( const Word32 *ls_azimuth, *ls_elevation; rotation_matrix_fx Rmat_fx; rotation_gains_Word32 gains; - Word32 tmp_gains[MAX_INPUT_CHANNELS]; + Word32 tmp_gains[MAX_INPUT_CHANNELS]; /* Q30 */ ivas_error error; push_wmops( "rotateFrameMc_fx" ); - IF( NE_32( ( error = chooseCrossfade_fx( headRotData, &crossfade ) ), IVAS_ERR_OK ) ) // Q31 + IF( NE_32( ( error = chooseCrossfade_fx( headRotData, &crossfade ) ), IVAS_ERR_OK ) ) { return error; } @@ -8770,10 +8790,8 @@ static ivas_error rotateFrameMc_fx( } /* input channel index without LFE */ - ch_in_woLFE = ( ( GT_16( lfe_idx, 0 ) ) && ( GE_16( ch_in, lfe_idx ) ) ) ? sub( ch_in, 1 ) : ch_in; - test(); - IF( ( GT_16( lfe_idx, 0 ) ) && ( GE_16( ch_in, lfe_idx ) ) ) + IF( ( lfe_idx > 0 ) && ( GE_16( ch_in, lfe_idx ) ) ) { ch_in_woLFE = sub( ch_in, 1 ); } @@ -8824,14 +8842,14 @@ static ivas_error rotateFrameMc_fx( { FOR( ch_in = 0; ch_in < nchan; ch_in++ ) { - writePtr = getSmplPtr_fx( outAudio, ch_out, imult1616( subframe_idx, subframe_len ) ); - readPtr = getSmplPtr_fx( inAudio, ch_in, imult1616( subframe_idx, subframe_len ) ); + writePtr = getSmplPtr_fx( outAudio, ch_out, imult1616( subframe_idx, subframe_len ) ); /* Qx */ + readPtr = getSmplPtr_fx( inAudio, ch_in, imult1616( subframe_idx, subframe_len ) ); /* Qx */ /* crossfade with previous rotation gains */ FOR( i = 0; i < subframe_len; i++ ) { *writePtr = - L_add( *writePtr, L_add( Mpy_32_32( ( *readPtr ), Mpy_32_32( ( ONE_IN_Q31 - crossfade[i] ), gains_prev[ch_in][ch_out] ) ), - Mpy_32_32( ( *readPtr ), Mpy_32_32( crossfade[i], gains[ch_in][ch_out] ) ) ) ); // Qinp -1 + L_add( *writePtr, L_add( Mpy_32_32( ( *readPtr ), Mpy_32_32( L_sub( ONE_IN_Q31, crossfade[i] ), gains_prev[ch_in][ch_out] ) ), + Mpy_32_32( ( *readPtr ), Mpy_32_32( crossfade[i], gains[ch_in][ch_out] ) ) ) ); /* Qx - 1 */ move32(); readPtr++; writePtr++; @@ -8997,28 +9015,28 @@ static ivas_error rotateFrameMc( #ifdef IVAS_FLOAT_FIXED static ivas_error rotateFrameSba_fx( - IVAS_REND_AudioBuffer inAudio, /* i : Input Audio buffer */ - const AUDIO_CONFIG inConfig, /* i : Input Audio config */ - const IVAS_REND_HeadRotData *headRotData, /* i : Head rotation data */ + IVAS_REND_AudioBuffer inAudio, /* i : Input Audio buffer */ + const AUDIO_CONFIG inConfig, /* i : Input Audio config */ + const IVAS_REND_HeadRotData *headRotData, /* i : Head rotation data */ const COMBINED_ORIENTATION_HANDLE *hCombinedOrientationData, /* i : Combined head and external orientations */ - Word16 gains_prev[MAX_INPUT_CHANNELS][MAX_INPUT_CHANNELS], /* i/o: Previous frame rotation gains */ - IVAS_REND_AudioBuffer outAudio /* o : Output Audio buffer */ + Word16 gains_prev[MAX_INPUT_CHANNELS][MAX_INPUT_CHANNELS], /* i/o: Previous frame rotation gains Q14 */ + IVAS_REND_AudioBuffer outAudio /* o : Output Audio buffer */ ) { Word16 i, l, n, m; Word16 m1, m2; Word16 shd_rot_max_order; - const Word32 *crossfade; + const Word32 *crossfade; /* Q31 */ Word16 num_subframes; Word16 subframe_idx, subframe_len; Word32 *writePtr; Word32 tmpRot[2 * HEADROT_ORDER + 1]; - Word16 gains[HEADROT_SHMAT_DIM][HEADROT_SHMAT_DIM]; + Word16 gains[HEADROT_SHMAT_DIM][HEADROT_SHMAT_DIM]; /* Q14 */ Word32 temp; - Word32 Rmat[3][3]; + Word32 Rmat[3][3]; /* Q30 */ ivas_error error; Word16 idx, exp; - Word32 cf, oneminuscf; + Word32 cf, oneminuscf; /* Q31 */ Word32 val; push_wmops( "rotateFrameSba" ); @@ -9052,7 +9070,7 @@ static ivas_error rotateFrameSba_fx( { FOR( l = 0; l < 3; l++ ) { - Rmat[i][l] = ( *hCombinedOrientationData )->Rmat_fx[subframe_idx][i][l]; // Q30 + Rmat[i][l] = ( *hCombinedOrientationData )->Rmat_fx[subframe_idx][i][l]; /* Q30 */ move32(); } } @@ -9091,14 +9109,14 @@ static ivas_error rotateFrameSba_fx( /* compute mtx-vector product for this l */ FOR( n = m1; n < m2; n++ ) { - tmpRot[sub( n, m1 )] = 0; + tmpRot[n - m1] = 0; move32(); FOR( m = m1; m < m2; m++ ) { - val = inAudio.data_fx[add( imult1616( m, inAudio.config.numSamplesPerChannel ), idx )]; + val = inAudio.data_fx[m * inAudio.config.numSamplesPerChannel + idx]; /* crossfade with previous rotation gains */ temp = Mpy_32_32( L_add( Mpy_32_16_r( cf, gains[n][m] ), ( Mpy_32_16_r( oneminuscf, gains_prev[n][m] ) ) ), val ); - tmpRot[sub( n, m1 )] = L_add( L_shl( temp, 1 ), tmpRot[sub( n, m1 )] ); // Qexp + tmpRot[n - m1] = L_add( L_shl( temp, 1 ), tmpRot[n - m1] ); move32(); move32(); } @@ -9107,19 +9125,19 @@ static ivas_error rotateFrameSba_fx( FOR( n = m1; n < m2; n++ ) { writePtr = getSmplPtr_fx( outAudio, n, idx ); - ( *writePtr ) = tmpRot[sub( n, m1 )]; + ( *writePtr ) = tmpRot[n - m1]; move32(); } m1 = m2; move16(); - m2 = add( m2, 2 * ( l + 1 ) + 1 ); + m2 = add( m2, add( shl( add( l, 1 ), 1 ), 1 ) ); } } /* move SHrotmat to SHrotmat_prev */ FOR( i = 0; i < HEADROT_SHMAT_DIM; i++ ) { - Copy( gains[i], gains_prev[i], HEADROT_SHMAT_DIM ); // Q14 + Copy( gains[i], gains_prev[i], HEADROT_SHMAT_DIM ); } } pop_wmops(); @@ -9261,7 +9279,7 @@ static ivas_error renderIsmToBinaural( FOR( i = 0; i < MAX_OUTPUT_CHANNELS; ++i ) { - Scale_sig32( tmpTDRendBuffer[i], L_FRAME48k, sub( 11, exp ) ); + Scale_sig32( tmpTDRendBuffer[i], L_FRAME48k, sub( Q11, exp ) ); /* Q11 */ } IF( NE_32( ( error = ivas_td_binaural_renderer_ext_fx( &ismInput->tdRendWrapper, ismInput->base.inConfig, NULL, ismInput->base.ctx.pCombinedOrientationData, &ismInput->currentPos, ismInput->hReverb, ism_md_subframe_update_ext, @@ -9273,7 +9291,7 @@ static ivas_error renderIsmToBinaural( FOR( i = 0; i < MAX_OUTPUT_CHANNELS; ++i ) { - Scale_sig32( tmpTDRendBuffer[i], L_FRAME48k, negate( sub( 11, exp ) ) ); + Scale_sig32( tmpTDRendBuffer[i], L_FRAME48k, negate( sub( Q11, exp ) ) ); /* Q(exp) */ } IF( ismInput->hReverb != NULL ) @@ -9282,7 +9300,7 @@ static ivas_error renderIsmToBinaural( { FOR( Word16 j = 0; j < outAudio.config.numSamplesPerChannel; j++ ) { - tmpTDRendBuffer[i][j] = L_shl( tmpTDRendBuffer[i][j], 2 ); + tmpTDRendBuffer[i][j] = L_shl( tmpTDRendBuffer[i][j], Q2 ); /* Q(exp + 2) */ move32(); } } @@ -9333,9 +9351,9 @@ static Word16 getNumSubframesInBuffer( const IVAS_REND_AudioBuffer *buffer, const Word32 sampleRate ) { - Word16 scale, temp = extract_l( Mpy_32_32( sampleRate, 10737418 ) ); // Q0 + Q31 - Q31 -> Q0 + Word16 scale, temp = extract_l( Mpy_32_32( sampleRate, 10737418 /* 1 / FRAMES_PER_SEC * MAX_PARAM_SPATIAL_SUBFRAMES in Q31 */ ) ); temp = BASOP_Util_Divide1616_Scale( buffer->config.numSamplesPerChannel, temp, &scale ); - temp = shr( temp, sub( 15, scale ) ); + temp = shr( temp, sub( 15, scale ) ); /* Q0 */ return temp; } #else @@ -9358,7 +9376,7 @@ static ivas_error renderIsmToBinauralRoom( { Word16 position_changed; Word16 i, j; - Word32 azi_rot, ele_rot; + Word32 azi_rot, ele_rot; /* Q22 */ Word16 subframe_idx; Word16 tmp; rotation_matrix_fx Rmat; @@ -9389,7 +9407,6 @@ static ivas_error renderIsmToBinauralRoom( { FOR( subframe_idx = 0; subframe_idx < ( *hCombinedOrientationData )->num_subframes; subframe_idx++ ) { - IF( ( *hCombinedOrientationData )->enableCombinedOrientation[subframe_idx] != 0 ) { combinedOrientationEnabled = 1; @@ -9459,7 +9476,9 @@ static ivas_error renderIsmToBinauralRoom( move32(); } + test(); position_changed = !ismInput->firstFrameRendered || checkObjectPositionChanged_fx( &rotatedPos, &rotatedPosPrev ); + move16(); /* set previous gains if this is the first frame */ IF( NE_32( ( error = getEfapGains_fx( *ismInput->base.ctx.pEfapOutWrapper, rotatedPosPrev.azimuth_fx, rotatedPosPrev.elevation_fx, ismInput->prev_pan_gains_fx ) ), IVAS_ERR_OK ) ) @@ -9493,11 +9512,20 @@ static ivas_error renderIsmToBinauralRoom( tmpMcBuffer.data_fx = malloc( imult1616( tmpMcBuffer.config.numSamplesPerChannel, tmpMcBuffer.config.numChannels ) * sizeof( Word32 ) ); set_zero_fx( tmpMcBuffer.data_fx, imult1616( tmpMcBuffer.config.numSamplesPerChannel, tmpMcBuffer.config.numChannels ) ); - - renderBufferChannelLerp_fx( ismInput->base.inputBuffer, 0, - position_changed ? currentPanGains : ismInput->prev_pan_gains_fx, - position_changed ? ismInput->prev_pan_gains_fx : NULL, - tmpMcBuffer ); + IF( position_changed ) + { + renderBufferChannelLerp_fx( ismInput->base.inputBuffer, 0, + currentPanGains, + ismInput->prev_pan_gains_fx, + tmpMcBuffer ); + } + ELSE + { + renderBufferChannelLerp_fx( ismInput->base.inputBuffer, 0, + ismInput->prev_pan_gains_fx, + NULL, + tmpMcBuffer ); + } copyBufferTo2dArray_fx( tmpMcBuffer, tmpRendBuffer ); @@ -9512,11 +9540,8 @@ static ivas_error renderIsmToBinauralRoom( Copy32( currentPanGains, ismInput->prev_pan_gains_fx, MAX_OUTPUT_CHANNELS ); } // Crend_process porting - move16(); CREND_HANDLE hCrend; hCrend = ismInput->crendWrapper->hCrend; - move16(); - move16(); IF( hCrend->reflections != NULL ) { test(); @@ -9524,7 +9549,7 @@ static ivas_error renderIsmToBinauralRoom( { FOR( i = 0; i < 150; i++ ) { - hCrend->reflections->shoebox_data.gains.data_fx[i] = L_shl( hCrend->reflections->shoebox_data.gains.data_fx[i], 9 ); + hCrend->reflections->shoebox_data.gains.data_fx[i] = L_shl( hCrend->reflections->shoebox_data.gains.data_fx[i], Q9 ); move32(); } } @@ -9735,7 +9760,7 @@ static ivas_error renderIsmToBinauralReverb( FOR( i = 0; i < MAX_OUTPUT_CHANNELS; ++i ) { - Scale_sig32( tmpRendBuffer_fx[i], L_FRAME48k, sub( 11, exp ) ); + Scale_sig32( tmpRendBuffer_fx[i], L_FRAME48k, sub( Q11, exp ) ); /* Q11 */ } IF( NE_32( ( error = ivas_td_binaural_renderer_ext_fx( &ismInput->tdRendWrapper, ismInput->base.inConfig, NULL, ismInput->base.ctx.pCombinedOrientationData, &ismInput->currentPos, ismInput->hReverb, ism_md_subframe_update_ext, *ismInput->base.ctx.pOutSampleRate, outAudio.config.numSamplesPerChannel, tmpRendBuffer_fx, &exp ) ), IVAS_ERR_OK ) ) @@ -9745,7 +9770,7 @@ static ivas_error renderIsmToBinauralReverb( FOR( i = 0; i < MAX_OUTPUT_CHANNELS; ++i ) { - Scale_sig32( tmpRendBuffer_fx[i], L_FRAME48k, negate( sub( 11, exp ) ) ); + Scale_sig32( tmpRendBuffer_fx[i], L_FRAME48k, negate( sub( 11, exp ) ) ); /* Q(exp) */ } IF( ismInput->hReverb != NULL ) @@ -9754,7 +9779,7 @@ static ivas_error renderIsmToBinauralReverb( { FOR( Word16 j = 0; j < outAudio.config.numSamplesPerChannel; j++ ) { - tmpRendBuffer_fx[i][j] = L_shl( tmpRendBuffer_fx[i][j], 2 ); + tmpRendBuffer_fx[i][j] = L_shl( tmpRendBuffer_fx[i][j], 2 ); /* Q(exp + 2) */ move16(); } } @@ -9799,26 +9824,31 @@ static ivas_error renderIsmToMc( const IVAS_REND_AudioBuffer outAudio ) { Word8 position_changed; - pan_vector_fx currentPanGains_fx; + pan_vector_fx currentPanGains_fx; /* Q31 */ ivas_error error; push_wmops( "renderIsmToMc" ); ismInput->currentPos.azimuth_fx = L_shl( L_shr( L_add( ismInput->currentPos.azimuth_fx, ONE_IN_Q21 ), Q22 ), Q22 ); + move32(); ismInput->currentPos.elevation_fx = L_shl( L_shr( L_add( ismInput->currentPos.elevation_fx, ONE_IN_Q21 ), Q22 ), Q22 ); + move32(); ismInput->previousPos.azimuth_fx = L_shl( L_shr( L_add( ismInput->previousPos.azimuth_fx, ONE_IN_Q21 ), Q22 ), Q22 ); + move32(); ismInput->previousPos.elevation_fx = L_shl( L_shr( L_add( ismInput->previousPos.elevation_fx, ONE_IN_Q21 ), Q22 ), Q22 ); + move32(); - position_changed = !ismInput->firstFrameRendered || checkObjectPositionChanged( &ismInput->currentPos, &ismInput->previousPos ); test(); + position_changed = !ismInput->firstFrameRendered || checkObjectPositionChanged_fx( &ismInput->currentPos, &ismInput->previousPos ); + move16(); IF( EQ_32( *ismInput->base.ctx.pOutConfig, IVAS_AUDIO_CONFIG_STEREO ) ) { IF( ismInput->nonDiegeticPan ) { - currentPanGains_fx[0] = L_add( L_shr( ismInput->nonDiegeticPanGain_fx, 1 ), ONE_IN_Q30 ); - currentPanGains_fx[1] = L_sub( ONE_IN_Q31, currentPanGains_fx[0] ); - ismInput->prev_pan_gains_fx[0] = currentPanGains_fx[0]; // Q31 - ismInput->prev_pan_gains_fx[1] = currentPanGains_fx[1]; // Q31 + currentPanGains_fx[0] = L_add( L_shr( ismInput->nonDiegeticPanGain_fx, 1 ), ONE_IN_Q30 ); /* Q31 */ + currentPanGains_fx[1] = L_sub( ONE_IN_Q31, currentPanGains_fx[0] ); /* Q31 */ + ismInput->prev_pan_gains_fx[0] = currentPanGains_fx[0]; /* Q31 */ + ismInput->prev_pan_gains_fx[1] = currentPanGains_fx[1]; /* Q31 */ move32(); move32(); move32(); @@ -9835,8 +9865,8 @@ static ivas_error renderIsmToMc( elevation_tmp = extract_l( L_shr( ismInput->currentPos.elevation_fx, Q22 ) ); ivas_ism_get_stereo_gains_fx( azimuth_tmp, elevation_tmp, &gains_fx[0], &gains_fx[1] ); - currentPanGains_fx[0] = L_deposit_h( gains_fx[0] ); // Q31 - currentPanGains_fx[1] = L_deposit_h( gains_fx[1] ); // Q31 + currentPanGains_fx[0] = L_deposit_h( gains_fx[0] ); /* Q31 */ + currentPanGains_fx[1] = L_deposit_h( gains_fx[1] ); /* Q31 */ move32(); move32(); @@ -9845,8 +9875,8 @@ static ivas_error renderIsmToMc( set32_fx( ismInput->prev_pan_gains_fx, 0, MAX_OUTPUT_CHANNELS ); ivas_ism_get_stereo_gains_fx( azimuth_tmp, elevation_tmp, &gains_fx[0], &gains_fx[1] ); - ismInput->prev_pan_gains_fx[0] = L_deposit_h( gains_fx[0] ); // Q31 - ismInput->prev_pan_gains_fx[1] = L_deposit_h( gains_fx[1] ); // Q31 + ismInput->prev_pan_gains_fx[0] = L_deposit_h( gains_fx[0] ); /* Q31 */ + ismInput->prev_pan_gains_fx[1] = L_deposit_h( gains_fx[1] ); /* Q31 */ move32(); move32(); } @@ -9885,10 +9915,20 @@ static ivas_error renderIsmToMc( /* Assume num channels in audio buffer to be 1. * This should have been validated in IVAS_REND_FeedInputAudio() */ - renderBufferChannelLerp_fx( ismInput->base.inputBuffer, 0, - position_changed ? currentPanGains_fx : ismInput->prev_pan_gains_fx, - position_changed ? ismInput->prev_pan_gains_fx : NULL, - outAudio ); + IF( position_changed ) + { + renderBufferChannelLerp_fx( ismInput->base.inputBuffer, 0, + currentPanGains_fx, + ismInput->prev_pan_gains_fx, + outAudio ); + } + ELSE + { + renderBufferChannelLerp_fx( ismInput->base.inputBuffer, 0, + ismInput->prev_pan_gains_fx, + NULL, + outAudio ); + } IF( position_changed ) { @@ -9989,6 +10029,7 @@ static ivas_error renderIsmToSba( pan_vector_fx currentPanGains_fx; ivas_error error; error = IVAS_ERR_OK; + move32(); ismInput->currentPos.azimuth_fx = L_shl( L_shr( L_add( ismInput->currentPos.azimuth_fx, ONE_IN_Q21 ), Q22 ), Q22 ); ismInput->currentPos.elevation_fx = L_shl( L_shr( L_add( ismInput->currentPos.elevation_fx, ONE_IN_Q21 ), Q22 ), Q22 ); @@ -10011,8 +10052,9 @@ static ivas_error renderIsmToSba( return error; } - position_changed = !ismInput->firstFrameRendered || checkObjectPositionChanged( &ismInput->currentPos, &ismInput->previousPos ); test(); + position_changed = !ismInput->firstFrameRendered || checkObjectPositionChanged_fx( &ismInput->currentPos, &ismInput->previousPos ); + move16(); /* set previous gains if this is the first frame */ Word16 azimuth_tmp, elevation_tmp; @@ -10028,7 +10070,7 @@ static ivas_error renderIsmToSba( Q29 ); FOR( i = 0; i < MAX_OUTPUT_CHANNELS; i++ ) { - ismInput->prev_pan_gains_fx[i] = L_shl_sat( ismInput->prev_pan_gains_fx[i], Q2 ); + ismInput->prev_pan_gains_fx[i] = L_shl_sat( ismInput->prev_pan_gains_fx[i], Q2 ); /* Q29 + Q2 = Q31 */ move32(); } } @@ -10046,22 +10088,31 @@ static ivas_error renderIsmToSba( Q29 ); FOR( i = 0; i < MAX_OUTPUT_CHANNELS; i++ ) { - currentPanGains_fx[i] = L_shl_sat( currentPanGains_fx[i], Q2 ); + currentPanGains_fx[i] = L_shl_sat( currentPanGains_fx[i], Q2 ); /* Q29 + Q2 = Q31 */ move32(); } } /* Assume num channels in audio buffer to be 1. * This should have been validated in IVAS_REND_FeedInputAudio() */ - renderBufferChannelLerp_fx( ismInput->base.inputBuffer, 0, - position_changed ? currentPanGains_fx : ismInput->prev_pan_gains_fx, - position_changed ? ismInput->prev_pan_gains_fx : NULL, - outAudio ); + IF( position_changed ) + { + renderBufferChannelLerp_fx( ismInput->base.inputBuffer, 0, + currentPanGains_fx, + ismInput->prev_pan_gains_fx, + outAudio ); + } + ELSE + { + renderBufferChannelLerp_fx( ismInput->base.inputBuffer, 0, + ismInput->prev_pan_gains_fx, + NULL, + outAudio ); + } IF( position_changed ) { Copy32( currentPanGains_fx, ismInput->prev_pan_gains_fx, MAX_OUTPUT_CHANNELS ); - // mvr2r( currentPanGains, ismInput->prev_pan_gains, MAX_OUTPUT_CHANNELS ); } pop_wmops(); @@ -10158,16 +10209,18 @@ static void renderIsmToMasa( FOR( i = 1; i < MAX_NUM_OBJECTS; i++ ) { - IF( LT_16( max_e, input_e[0] ) ) - max_e = input_e[i]; - move16(); + if ( LT_16( max_e, input_e[0] ) ) + { + max_e = input_e[i]; + move16(); + } } FOR( i = 0; i < MAX_NUM_OBJECTS; i++ ) { FOR( j = 0; j < L_FRAME48k; j++ ) { - tmpRendBuffer_fx[i][j] = L_shr( tmpRendBuffer_fx[i][j], add( sub( max_e, sub( 31, *outAudio.pq_fact ) ), guard_bits ) ); + tmpRendBuffer_fx[i][j] = L_shr( tmpRendBuffer_fx[i][j], add( sub( max_e, sub( 31, *outAudio.pq_fact ) ), guard_bits ) ); /* Q(31 - (max_e + guard_bits)) */ move32(); } } @@ -10232,10 +10285,10 @@ static ivas_error renderInputIsm( /* Apply input gain to new audio */ v_multc_fixed( inAudio.data_fx, ismInput->base.gain_fx, inAudio.data_fx, imult1616( inAudio.config.numSamplesPerChannel, inAudio.config.numChannels ) ); *outAudio.pq_fact = sub( *outAudio.pq_fact, Q1 ); - exp = *outAudio.pq_fact; - move16(); + exp = *outAudio.pq_fact; move16(); + /* set combined orientation subframe info to start info */ ivas_combined_orientation_set_to_start_index( *ismInput->base.ctx.pCombinedOrientationData ); @@ -10283,11 +10336,11 @@ static ivas_error renderInputIsm( } ismInput->firstFrameRendered = TRUE; + move16(); *outAudio.pq_fact = exp; - - move16(); move16(); + return error; } #else @@ -10382,7 +10435,7 @@ static ivas_error renderActiveInputsIsm( } FOR( Word16 j = 0; j < outAudio.config.numSamplesPerChannel * outAudio.config.numChannels; ++j ) { - outAudio.data_fx[j] = L_shl( outAudio.data_fx[j], sub( sub( input_q, 1 ), ( *outAudio.pq_fact ) ) ); + outAudio.data_fx[j] = L_shl( outAudio.data_fx[j], sub( sub( input_q, 1 ), ( *outAudio.pq_fact ) ) ); /* Q(input_q - 1) */ move32(); } *outAudio.pq_fact = sub( input_q, 1 ); @@ -10434,7 +10487,7 @@ static ivas_error renderLfeToBinaural_fx( assert( ( outAudio.config.numChannels == 2 ) && "Must be binaural output" ); push_wmops( "renderLfeToBinaural" ); - gain_fx = GAIN_LFE_WORD32; + gain_fx = GAIN_LFE_WORD32; /* 1.88364911f in Q30 */ move32(); IF( NE_32( mcInput->base.inConfig, IVAS_AUDIO_CONFIG_LS_CUSTOM ) ) @@ -10455,7 +10508,6 @@ static ivas_error renderLfeToBinaural_fx( /* --- Prepare LFE signal to be added to binaural output --- */ lfeInput = getSmplPtr_fx( mcInput->base.inputBuffer, lfe_idx, 0 ); - move32(); frame_size = mcInput->base.inputBuffer.config.numSamplesPerChannel; move16(); num_cpy_smpl_prev_frame = mcInput->binauralDelaySmp; @@ -10466,10 +10518,10 @@ static ivas_error renderLfeToBinaural_fx( assert( mcInput->binauralDelaySmp < frame_size ); /* Get delayed LFE signal from previous frame, apply gain and save in tmp buffer */ - v_multc_fixed( mcInput->lfeDelayBuffer_fx, gain_fx, tmpLfeBuffer, num_cpy_smpl_prev_frame ); // Qinp-1 + v_multc_fixed( mcInput->lfeDelayBuffer_fx, gain_fx, tmpLfeBuffer, num_cpy_smpl_prev_frame ); /* Qx - 1 */ /* Continue filling tmp buffer, now with LFE signal from current frame */ - v_multc_fixed( lfeInput, gain_fx, tmpLfeBuffer + num_cpy_smpl_prev_frame, num_cpy_smpl_cur_frame ); // Qinp-1 + v_multc_fixed( lfeInput, gain_fx, tmpLfeBuffer + num_cpy_smpl_prev_frame, num_cpy_smpl_cur_frame ); /* Qx - 1 */ /* Save remaining LFE samples of current frame for next frame */ MVR2R_WORD32( lfeInput + num_cpy_smpl_cur_frame, mcInput->lfeDelayBuffer_fx, num_cpy_smpl_prev_frame ); @@ -10479,8 +10531,7 @@ static ivas_error renderLfeToBinaural_fx( { FOR( i = 0; i < add( num_cpy_smpl_prev_frame, num_cpy_smpl_cur_frame ); i++ ) { - tmpLfeBuffer[i] = L_shr( tmpLfeBuffer[i], r_shift ); // out_q - + tmpLfeBuffer[i] = L_shr( tmpLfeBuffer[i], r_shift ); /* Q(out_q) */ move32(); } } @@ -10489,7 +10540,7 @@ static ivas_error renderLfeToBinaural_fx( { writePtr = getSmplPtr_fx( outAudio, ear_idx, 0 ); move32(); - v_add_fixed( writePtr, tmpLfeBuffer, writePtr, frame_size, 0 ); // out_q + v_add_fixed( writePtr, tmpLfeBuffer, writePtr, frame_size, 0 ); /* Q(out_q) */ } pop_wmops(); @@ -10608,7 +10659,7 @@ static ivas_error renderMcToBinaural( FOR( i = 0; i < MAX_OUTPUT_CHANNELS; ++i ) { - Scale_sig32( tmpRendBuffer_fx[i], L_FRAME48k, sub( 11, exp ) ); + Scale_sig32( tmpRendBuffer_fx[i], L_FRAME48k, sub( Q11, exp ) ); /* Q11 */ } IF( NE_32( ( error = ivas_td_binaural_renderer_ext_fx( &mcInput->tdRendWrapper, mcInput->base.inConfig, &mcInput->customLsInput, mcInput->base.ctx.pCombinedOrientationData, NULL, mcInput->hReverb, 0, *mcInput->base.ctx.pOutSampleRate, mcInput->base.inputBuffer.config.numSamplesPerChannel, tmpRendBuffer_fx, &exp ) ), @@ -10619,7 +10670,7 @@ static ivas_error renderMcToBinaural( FOR( i = 0; i < MAX_OUTPUT_CHANNELS; ++i ) { - Scale_sig32( tmpRendBuffer_fx[i], L_FRAME48k, -sub( Q11, exp ) ); + Scale_sig32( tmpRendBuffer_fx[i], L_FRAME48k, negate( sub( Q11, exp ) ) ); /* Q(exp) */ } } ELSE @@ -10791,9 +10842,8 @@ static ivas_error renderMcToBinauralRoom( inConfig = mcInput->base.inConfig; move32(); hCombinedOrientationData = mcInput->base.ctx.pCombinedOrientationData; - move32(); combinedOrientationEnabled = 0; - move32(); + move16(); IF( hCombinedOrientationData != NULL ) { FOR( subframe_idx = 0; subframe_idx < ( *hCombinedOrientationData )->num_subframes; subframe_idx++ ) @@ -10818,7 +10868,7 @@ static ivas_error renderMcToBinauralRoom( FOR( i = 0; i < MAX_OUTPUT_CHANNELS; ++i ) { - Scale_sig32( tmpRendBuffer[i], L_FRAME48k, sub( Q11, exp ) ); + Scale_sig32( tmpRendBuffer[i], L_FRAME48k, sub( Q11, exp ) ); /* Q11 */ } IF( NE_32( ( error = ivas_td_binaural_renderer_ext_fx( &mcInput->tdRendWrapper, mcInput->base.inConfig, &mcInput->customLsInput, mcInput->base.ctx.pCombinedOrientationData, NULL, mcInput->hReverb, @@ -10830,9 +10880,8 @@ static ivas_error renderMcToBinauralRoom( FOR( i = 0; i < MAX_OUTPUT_CHANNELS; ++i ) { - Scale_sig32( tmpRendBuffer[i], L_FRAME48k, -sub( Q11, exp ) ); + Scale_sig32( tmpRendBuffer[i], L_FRAME48k, negate( sub( Q11, exp ) ) ); /* Q(exp) */ } - //*outAudio.pq_fact = exp; } ELSE { @@ -11273,28 +11322,35 @@ static void renderMcToSba( } #endif +#ifdef IVAS_FLOAT_FIXED static void renderMcToMasa( input_mc *mcInput, IVAS_REND_AudioBuffer outAudio ) { -#ifdef IVAS_FLOAT_FIXED push_wmops( "renderMcToMasa" ); Word32 tmpRendBuffer_fx[MAX_OUTPUT_CHANNELS][L_FRAME48k]; copyBufferTo2dArray_fx( mcInput->base.inputBuffer, tmpRendBuffer_fx ); ivas_mcmasa_ana_fx( mcInput->hMcMasa, tmpRendBuffer_fx, *( outAudio.pq_fact ), mcInput->base.inputBuffer.config.numSamplesPerChannel, outAudio.config.numChannels, mcInput->base.inputBuffer.config.numChannels ); accumulate2dArrayToBuffer_fx( tmpRendBuffer_fx, &outAudio ); + pop_wmops(); + return; +} #else +static void renderMcToMasa( + input_mc *mcInput, + IVAS_REND_AudioBuffer outAudio ) +{ push_wmops( "renderMcToMasa" ); float tmpRendBuffer[MAX_OUTPUT_CHANNELS][L_FRAME48k]; copyBufferTo2dArray( mcInput->base.inputBuffer, tmpRendBuffer ); ivas_mcmasa_ana( mcInput->hMcMasa, tmpRendBuffer, mcInput->base.inputBuffer.config.numSamplesPerChannel, outAudio.config.numChannels, mcInput->base.inputBuffer.config.numChannels ); accumulate2dArrayToBuffer( tmpRendBuffer, &outAudio ); -#endif // IVAS_FLOAT_FIXED pop_wmops(); return; } +#endif #ifdef IVAS_FLOAT_FIXED static ivas_error renderInputMc( @@ -11317,6 +11373,7 @@ static ivas_error renderInputMc( move32(); v_multc_fixed( inAudio.data_fx, mcInput->base.gain_fx, inAudio.data_fx, inAudio.config.numSamplesPerChannel * inAudio.config.numChannels ); *outAudio.pq_fact = sub( *outAudio.pq_fact, Q1 ); // reducing the Q by 1 compensating for the v_mult_fixed done + move16(); /* set combined orientation subframe info to start info */ ivas_combined_orientation_set_to_start_index( *( mcInput->base.ctx.pCombinedOrientationData ) ); @@ -11595,7 +11652,7 @@ static ivas_error renderSbaToBinaural( tmpRotBuffer.data_fx = malloc( tmpRotBuffer.config.numSamplesPerChannel * tmpRotBuffer.config.numChannels * sizeof( Word32 ) ); /* copy input for in-place rotation */ - MVR2R_WORD32( sbaInput->base.inputBuffer.data_fx, tmpRotBuffer.data_fx, tmpRotBuffer.config.numChannels * tmpRotBuffer.config.numSamplesPerChannel ); + Copy32( sbaInput->base.inputBuffer.data_fx, tmpRotBuffer.data_fx, i_mult( tmpRotBuffer.config.numChannels, tmpRotBuffer.config.numSamplesPerChannel ) ); IF( NE_16( ( error = rotateFrameSba_fx( sbaInput->base.inputBuffer, sbaInput->base.inConfig, sbaInput->base.ctx.pHeadRotData, sbaInput->base.ctx.pCombinedOrientationData, sbaInput->rot_gains_prev_fx, tmpRotBuffer ) ), @@ -11765,7 +11822,7 @@ static ivas_error renderSbaToBinauralRoom( tmpRotBuffer.data_fx = malloc( imult1616( tmpRotBuffer.config.numSamplesPerChannel, tmpRotBuffer.config.numChannels ) * sizeof( Word32 ) ); /* copy input for in-place rotation */ - MVR2R_WORD32( sbaInput->base.inputBuffer.data_fx, tmpRotBuffer.data_fx, tmpRotBuffer.config.numChannels * tmpRotBuffer.config.numSamplesPerChannel ); + Copy32( sbaInput->base.inputBuffer.data_fx, tmpRotBuffer.data_fx, i_mult( tmpRotBuffer.config.numChannels, tmpRotBuffer.config.numSamplesPerChannel ) ); IF( NE_32( ( error = rotateFrameSba_fx( sbaInput->base.inputBuffer, sbaInput->base.inConfig, sbaInput->base.ctx.pHeadRotData, sbaInput->base.ctx.pCombinedOrientationData, @@ -11788,7 +11845,7 @@ static ivas_error renderSbaToBinauralRoom( tmpMcBuffer.config.numChannels = tmp; move16(); tmpMcBuffer.data_fx = malloc( tmpMcBuffer.config.numSamplesPerChannel * tmpMcBuffer.config.numChannels * sizeof( Word32 ) ); - set32_fx( tmpMcBuffer.data_fx, 0, tmpMcBuffer.config.numChannels * tmpMcBuffer.config.numSamplesPerChannel ); + set32_fx( tmpMcBuffer.data_fx, 0, i_mult( tmpMcBuffer.config.numChannels, tmpMcBuffer.config.numSamplesPerChannel ) ); IF( combinedOrientationEnabled ) { @@ -11980,7 +12037,7 @@ static ivas_error renderInputSba( *outAudio.pq_fact = outAudio.q_factor; move16(); /* Apply input gain to new audio */ - v_multc_fixed( inAudio.data_fx, sbaInput->base.gain_fx, inAudio.data_fx, inAudio.config.numSamplesPerChannel * inAudio.config.numChannels ); + v_multc_fixed( inAudio.data_fx, sbaInput->base.gain_fx, inAudio.data_fx, i_mult( inAudio.config.numSamplesPerChannel, inAudio.config.numChannels ) ); *outAudio.pq_fact = sub( *outAudio.pq_fact, 1 ); // to compensate for the qfactor reduction in gain multiplication. move16(); @@ -12149,9 +12206,9 @@ static void copyMasaMetadataToDiracRenderer_fx( { FOR( bin = MASA_band_grouping_24[band]; bin < MASA_band_grouping_24[band + 1] && bin < maxBin; bin++ ) { - hSpatParamRendCom->azimuth[meta_write_index][bin] = extract_l( L_shr( meta->directional_meta[0].azimuth_fx[sf][band], Q22 ) ); // Q22 -> Q0 + hSpatParamRendCom->azimuth[meta_write_index][bin] = extract_l( L_shr( meta->directional_meta[0].azimuth_fx[sf][band], Q22 ) ); /* Q22 - Q22 = Q0 */ move16(); - hSpatParamRendCom->elevation[meta_write_index][bin] = extract_l( L_shr( meta->directional_meta[0].elevation_fx[sf][band], Q22 ) ); // Q22 -> Q0 + hSpatParamRendCom->elevation[meta_write_index][bin] = extract_l( L_shr( meta->directional_meta[0].elevation_fx[sf][band], Q22 ) ); /* Q22 - Q22 = Q0 */ move16(); hSpatParamRendCom->energy_ratio1_fx[meta_write_index][bin] = meta->directional_meta[0].energy_ratio_fx[sf][band]; move32(); @@ -12164,9 +12221,9 @@ static void copyMasaMetadataToDiracRenderer_fx( IF( EQ_16( hSpatParamRendCom->numSimultaneousDirections, 2 ) ) { - hSpatParamRendCom->azimuth2[meta_write_index][bin] = extract_l( L_shr( meta->directional_meta[1].azimuth_fx[sf][band], Q22 ) ); // Q22 -> Q0 + hSpatParamRendCom->azimuth2[meta_write_index][bin] = extract_l( L_shr( meta->directional_meta[1].azimuth_fx[sf][band], Q22 ) ); /* Q22 - Q22 = Q0 */ move16(); - hSpatParamRendCom->elevation2[meta_write_index][bin] = extract_l( L_shr( meta->directional_meta[1].elevation_fx[sf][band], Q22 ) ); // Q22 -> Q0 + hSpatParamRendCom->elevation2[meta_write_index][bin] = extract_l( L_shr( meta->directional_meta[1].elevation_fx[sf][band], Q22 ) ); /* Q22 - Q22 = Q0 */ move16(); hSpatParamRendCom->energy_ratio2_fx[meta_write_index][bin] = meta->directional_meta[1].energy_ratio_fx[sf][band]; move32(); @@ -12179,7 +12236,7 @@ static void copyMasaMetadataToDiracRenderer_fx( } } - hSpatParamRendCom->dirac_bs_md_write_idx = ( hSpatParamRendCom->dirac_bs_md_write_idx + MAX_PARAM_SPATIAL_SUBFRAMES ) % hSpatParamRendCom->dirac_md_buffer_length; + hSpatParamRendCom->dirac_bs_md_write_idx = add( hSpatParamRendCom->dirac_bs_md_write_idx, MAX_PARAM_SPATIAL_SUBFRAMES ) % hSpatParamRendCom->dirac_md_buffer_length; move16(); return; @@ -12236,7 +12293,7 @@ static void renderMasaToMasa( IVAS_REND_AudioBuffer outAudio ) { Word16 sf, band, dir, numDirs; - Word32 ratioSum_fx; + Word32 ratioSum_fx; /* Q30 */ MASA_DECODER_EXT_OUT_META_HANDLE outMeta; MASA_METADATA_FRAME *inMeta; Word32 tmpBuffer_fx[MAX_OUTPUT_CHANNELS][L_FRAME48k]; @@ -12256,7 +12313,8 @@ static void renderMasaToMasa( move16(); move16(); /* Calculate energy */ - l_ts = masaInput->base.inputBuffer.config.numSamplesPerChannel / CLDFB_NO_COL_MAX; + // l_ts = masaInput->base.inputBuffer.config.numSamplesPerChannel / CLDFB_NO_COL_MAX; + l_ts = shr( masaInput->base.inputBuffer.config.numSamplesPerChannel, 4 ); numAnalysisChannels = masaInput->hMasaPrerend->num_Cldfb_instances; move16(); /* do processing over all CLDFB time slots */ @@ -12282,8 +12340,8 @@ static void renderMasaToMasa( cldfbAnalysis_ts_fx_fixed_q( &( tmpBuffer_fx[i][l_ts * ts] ), Chan_RealBuffer_fx[i], Chan_ImagBuffer_fx[i], l_ts, masaInput->hMasaPrerend->cldfbAnaEnc[i], &q_cldfb_out ); scale_factor = s_min( scale_factor, s_min( getScaleFactor32( Chan_RealBuffer_fx[i], CLDFB_NO_CHANNELS_MAX ), getScaleFactor32( Chan_ImagBuffer_fx[i], CLDFB_NO_CHANNELS_MAX ) ) ); scale_factor = sub( scale_factor, 1 ); - scale_sig32( Chan_RealBuffer_fx[i], CLDFB_NO_CHANNELS_MAX, scale_factor ); // Q17 - scale_sig32( Chan_ImagBuffer_fx[i], CLDFB_NO_CHANNELS_MAX, scale_factor ); // Q17 + scale_sig32( Chan_RealBuffer_fx[i], CLDFB_NO_CHANNELS_MAX, scale_factor ); /* Q(q_cldfb_out + scale_factor) */ + scale_sig32( Chan_ImagBuffer_fx[i], CLDFB_NO_CHANNELS_MAX, scale_factor ); /* Q(q_cldfb_out + scale_factor) */ } Word16 q_add = sub( 31, add( scale_factor, q_cldfb_out ) ); @@ -12298,8 +12356,8 @@ static void renderMasaToMasa( { FOR( i = 0; i < numAnalysisChannels; i++ ) { - Word32 temp = L_add( Mult_32_32( Chan_RealBuffer_fx[0][j], Chan_RealBuffer_fx[0][j] ), Mult_32_32( Chan_ImagBuffer_fx[0][j], Chan_ImagBuffer_fx[0][j] ) ); - masaInput->hMasaPrerend->energy_fx[block_m_idx][band_m_idx] = BASOP_Util_Add_Mant32Exp( masaInput->hMasaPrerend->energy_fx[block_m_idx][band_m_idx], tmp_energy_e[block_m_idx][band_m_idx], temp, ( 2 * q_add ), &tmp_energy_e[block_m_idx][band_m_idx] ); + Word32 temp = L_add( Mpy_32_32( Chan_RealBuffer_fx[0][j], Chan_RealBuffer_fx[0][j] ), Mpy_32_32( Chan_ImagBuffer_fx[0][j], Chan_ImagBuffer_fx[0][j] ) ); /* 2 * Q(q_cldfb_out + scale_factor) - 31 */ + masaInput->hMasaPrerend->energy_fx[block_m_idx][band_m_idx] = BASOP_Util_Add_Mant32Exp( masaInput->hMasaPrerend->energy_fx[block_m_idx][band_m_idx], tmp_energy_e[block_m_idx][band_m_idx], temp, shl( q_add, 1 ), &tmp_energy_e[block_m_idx][band_m_idx] ); move32(); } } @@ -12312,7 +12370,6 @@ static void renderMasaToMasa( move16(); FOR( j = 0; j < MASA_FREQUENCY_BANDS; j++ ) { - max_e = s_max( max_e, tmp_energy_e[i][j] ); } masaInput->hMasaPrerend->energy_e[i] = max_e; @@ -12320,7 +12377,7 @@ static void renderMasaToMasa( FOR( j = 0; j < MASA_FREQUENCY_BANDS; j++ ) { - masaInput->hMasaPrerend->energy_fx[i][j] = L_shr( masaInput->hMasaPrerend->energy_fx[i][j], sub( max_e, tmp_energy_e[i][j] ) ); + masaInput->hMasaPrerend->energy_fx[i][j] = L_shr( masaInput->hMasaPrerend->energy_fx[i][j], sub( max_e, tmp_energy_e[i][j] ) ); /* Q(31 - max_e) */ move32(); } } @@ -12329,7 +12386,7 @@ static void renderMasaToMasa( test(); IF( EQ_16( masaInput->base.inputBuffer.config.numChannels, 1 ) && EQ_16( outAudio.config.numChannels, 2 ) ) { - MVR2R_WORD32( tmpBuffer_fx[0], tmpBuffer_fx[1], masaInput->base.inputBuffer.config.numSamplesPerChannel ); + Copy32( tmpBuffer_fx[0], tmpBuffer_fx[1], masaInput->base.inputBuffer.config.numSamplesPerChannel ); } ELSE IF( EQ_16( masaInput->base.inputBuffer.config.numChannels, 2 ) && EQ_16( outAudio.config.numChannels, 1 ) ) { @@ -12378,14 +12435,16 @@ static void renderMasaToMasa( FOR( dir = 0; dir < numDirs; dir++ ) { tmp = BASOP_Util_Divide3232_Scale_cadence( inMeta->directional_meta[dir].energy_ratio_fx[sf][band], ratioSum_fx, &tmp_e ); - inMeta->directional_meta[dir].energy_ratio_fx[sf][band] = L_shl( tmp, sub( tmp_e, 1 ) ); // Q30 + inMeta->directional_meta[dir].energy_ratio_fx[sf][band] = L_shl( tmp, sub( tmp_e, 1 ) ); /* Q30 */ + move32(); } tmp_e = 0; move16(); tmp = 0; move32(); tmp = BASOP_Util_Divide3232_Scale_cadence( inMeta->common_meta.diffuse_to_total_ratio_fx[sf][band], ratioSum_fx, &tmp_e ); - inMeta->common_meta.diffuse_to_total_ratio_fx[sf][band] = L_shl( tmp, sub( tmp_e, 1 ) ); // Q30 + inMeta->common_meta.diffuse_to_total_ratio_fx[sf][band] = L_shl( tmp, sub( tmp_e, 1 ) ); /* Q30 */ + move32(); } } } @@ -12400,7 +12459,7 @@ static void renderMasaToMasa( { outMeta->directionIndex[dir][sf][band] = index_theta_phi_16_fx( &inMeta->directional_meta[dir].elevation_fx[sf][band], &inMeta->directional_meta[dir].azimuth_fx[sf][band], masaInput->hMasaPrerend->sph_grid16 ); outMeta->directToTotalRatio[dir][sf][band] = (UWord8) L_shr( inMeta->directional_meta[dir].energy_ratio_fx[sf][band], Q22 ); - outMeta->diffuseToTotalRatio[sf][band] -= outMeta->directToTotalRatio[dir][sf][band]; + outMeta->diffuseToTotalRatio[sf][band] = (UWord8) sub( outMeta->diffuseToTotalRatio[sf][band], outMeta->directToTotalRatio[dir][sf][band] ); outMeta->spreadCoherence[dir][sf][band] = (UWord8) shr( inMeta->directional_meta[dir].spread_coherence_fx[sf][band], Q7 ); move16(); @@ -12575,11 +12634,11 @@ static ivas_error renderInputMasa( *outAudio.pq_fact = outAudio.q_factor; move16(); /* Apply input gain to new audio */ - v_multc_fixed( inAudio.data_fx, masaInput->base.gain_fx, inAudio.data_fx, inAudio.config.numSamplesPerChannel * inAudio.config.numChannels ); + v_multc_fixed( inAudio.data_fx, masaInput->base.gain_fx, inAudio.data_fx, i_mult( inAudio.config.numSamplesPerChannel, inAudio.config.numChannels ) ); *outAudio.pq_fact = sub( *outAudio.pq_fact, 1 ); // to compensate for the qfactor reduction in gain multiplication. move16(); - maxBin = extract_l( Mpy_32_32( *masaInput->base.ctx.pOutSampleRate, INV_CLDFB_BANDWIDTH_Q31 ) ); + maxBin = extract_l( Mpy_32_32( *masaInput->base.ctx.pOutSampleRate, INV_CLDFB_BANDWIDTH_Q31 ) ); /* Q0 */ /* set combined orientation subframe info to start info */ ivas_combined_orientation_set_to_start_index( *( masaInput->base.ctx.pCombinedOrientationData ) ); @@ -12600,7 +12659,7 @@ static ivas_error renderInputMasa( copyBufferTo2dArray_fx( masaInput->base.inputBuffer, tmpBuffer_buff_fx ); num_subframes = BASOP_Util_Divide3232_Scale( L_mult0( masaInput->base.inputBuffer.config.numSamplesPerChannel, IVAS_NUM_FRAMES_PER_SEC * MAX_PARAM_SPATIAL_SUBFRAMES ), *masaInput->base.ctx.pOutSampleRate, &exp ); - num_subframes = shr( num_subframes, sub( 15, exp ) ); + num_subframes = shr( num_subframes, sub( 15, exp ) ); /* Q0 */ SWITCH( masaInput->hMasaExtRend->renderer_type ) { @@ -12611,10 +12670,11 @@ static ivas_error renderInputMasa( FOR( ch = 0; ch < masaInput->hMasaExtRend->hDirACRend->hOutSetup.nchan_out_woLFE + masaInput->hMasaExtRend->hDirACRend->hOutSetup.num_lfe; ch++ ) { masaInput->hMasaExtRend->cldfbAnaRend[0]->Q_cldfb_state = Q11; + move16(); } FOR( ch = 0; ch < MAX_OUTPUT_CHANNELS; ch++ ) { - Scale_sig32( tmpBuffer_buff_fx[ch], L_FRAME48k, sub( Q11, *outAudio.pq_fact ) ); + Scale_sig32( tmpBuffer_buff_fx[ch], L_FRAME48k, sub( Q11, *outAudio.pq_fact ) ); /* Q11 */ } ivas_masa_ext_dirac_render_fx( masaInput->hMasaExtRend, tmpBuffer_fx, num_subframes ); @@ -12623,8 +12683,9 @@ static ivas_error renderInputMasa( FOR( ch = 0; ch < masaInput->hMasaExtRend->hDirACRend->hOutSetup.nchan_out_woLFE + masaInput->hMasaExtRend->hDirACRend->hOutSetup.num_lfe; ch++ ) { - scale_sig32( masaInput->hMasaExtRend->cldfbSynRend[ch]->cldfb_state_fx, masaInput->hMasaExtRend->cldfbSynRend[ch]->cldfb_size, sub( Q11, masaInput->hMasaExtRend->cldfbSynRend[ch]->Q_cldfb_state ) ); + scale_sig32( masaInput->hMasaExtRend->cldfbSynRend[ch]->cldfb_state_fx, masaInput->hMasaExtRend->cldfbSynRend[ch]->cldfb_size, sub( Q11, masaInput->hMasaExtRend->cldfbSynRend[ch]->Q_cldfb_state ) ); /* Q11 */ masaInput->hMasaExtRend->cldfbSynRend[ch]->Q_cldfb_state = Q11; + move16(); } intermidiate_ext_dirac_render( masaInput->hMasaExtRend, 0 ); @@ -12635,8 +12696,8 @@ static ivas_error renderInputMasa( copyMasaMetadataToDiracRenderer_fx( &masaInput->masaMetadata, masaInput->hMasaExtRend->hSpatParamRendCom, maxBin ); - Scale_sig32( tmpBuffer_buff_fx[0], L_FRAME48k, sub( Q11, *outAudio.pq_fact ) ); - Scale_sig32( tmpBuffer_buff_fx[1], L_FRAME48k, sub( Q11, *outAudio.pq_fact ) ); + Scale_sig32( tmpBuffer_buff_fx[0], L_FRAME48k, sub( Q11, *outAudio.pq_fact ) ); /* Q11 */ + Scale_sig32( tmpBuffer_buff_fx[1], L_FRAME48k, sub( Q11, *outAudio.pq_fact ) ); /* Q11 */ ivas_masa_ext_rend_parambin_render_fx( masaInput->hMasaExtRend, *masaInput->base.ctx.pCombinedOrientationData, tmpBuffer_fx, num_subframes ); *outAudio.pq_fact = Q11; @@ -13157,7 +13218,7 @@ static ivas_error getSamplesInternal( test(); IF( EQ_32( getAudioConfigType( hIvasRend->outputConfig ), IVAS_REND_AUDIO_CONFIG_TYPE_BINAURAL ) && - NE_32( outAudio.config.numSamplesPerChannel * 1000, ( hIvasRend->num_subframes * BINAURAL_RENDERING_FRAME_SIZE_MS ) * hIvasRend->sampleRateOut ) ) + NE_32( L_mult0( outAudio.config.numSamplesPerChannel, 1000 ), imult3216( hIvasRend->sampleRateOut, i_mult( hIvasRend->num_subframes, BINAURAL_RENDERING_FRAME_SIZE_MS ) ) ) ) { return IVAS_ERROR( IVAS_ERR_INVALID_BUFFER_SIZE, "Binaural rendering requires specific frame size" ); } @@ -13581,13 +13642,13 @@ static ivas_error ivas_masa_ext_rend_dirac_rend_init( hDirACRend->hOutSetup.ls_elevation_fx = inputMasa->base.ctx.pCustomLsOut->ls_elevation_fx; hDirACRend->hOutSetup.num_lfe = inputMasa->base.ctx.pCustomLsOut->num_lfe; + move16(); hDirACRend->hOutSetup.index_lfe[0] = inputMasa->base.ctx.pCustomLsOut->lfe_idx[0]; + move16(); hDirACRend->hOutSetup.is_loudspeaker_setup = TRUE; - hDirACRend->hOutSetup.is_planar_setup = (Word8) inputMasa->base.ctx.pCustomLsOut->is_planar_setup; - move16(); - move16(); move16(); + hDirACRend->hOutSetup.is_planar_setup = (Word8) inputMasa->base.ctx.pCustomLsOut->is_planar_setup; move16(); } @@ -13605,7 +13666,8 @@ static ivas_error ivas_masa_ext_rend_dirac_rend_init( { hDirACRend->hOutSetup.ambisonics_order = SBA_HOA3_ORDER; /* Order 3 is used by default in DirAC for SHD processing */ move16(); - IF( EQ_16( hDirACRend->hOutSetup.output_config, IVAS_AUDIO_CONFIG_MONO ) || EQ_16( hDirACRend->hOutSetup.output_config, IVAS_AUDIO_CONFIG_STEREO ) ) + test(); + if ( EQ_16( hDirACRend->hOutSetup.output_config, IVAS_AUDIO_CONFIG_MONO ) || EQ_16( hDirACRend->hOutSetup.output_config, IVAS_AUDIO_CONFIG_STEREO ) ) { hDirACRend->hOutSetup.ambisonics_order = SBA_FOA_ORDER; move16(); @@ -13625,10 +13687,10 @@ static ivas_error ivas_masa_ext_rend_dirac_rend_init( IF( EQ_16( hDirACRend->hOutSetup.output_config, IVAS_AUDIO_CONFIG_MONO ) ) { hDirACRend->synthesisConf = DIRAC_SYNTHESIS_MONO; - hDirACRend->panningConf = DIRAC_PANNING_HOA3; - nchan_out_woLFE = 1; move32(); + hDirACRend->panningConf = DIRAC_PANNING_HOA3; move32(); + nchan_out_woLFE = 1; move16(); } ELSE IF( hDirACRend->hOutSetup.is_loudspeaker_setup ) @@ -13641,15 +13703,15 @@ static ivas_error ivas_masa_ext_rend_dirac_rend_init( ELSE IF( !hDirACRend->hOutSetup.is_loudspeaker_setup && GT_16( nchan_transport, 1 ) ) { hDirACRend->synthesisConf = DIRAC_SYNTHESIS_PSD_SHD; - hDirACRend->panningConf = DIRAC_PANNING_HOA3; move32(); + hDirACRend->panningConf = DIRAC_PANNING_HOA3; move32(); } ELSE { hDirACRend->synthesisConf = DIRAC_SYNTHESIS_GAIN_SHD; - hDirACRend->panningConf = DIRAC_PANNING_HOA3; move32(); + hDirACRend->panningConf = DIRAC_PANNING_HOA3; move32(); } @@ -13688,39 +13750,35 @@ static ivas_error ivas_masa_ext_rend_dirac_rend_init( { /* Directional and diffuses components in output LS format */ hDirACRend->num_outputs_diff = nchan_out_woLFE; - hDirACRend->num_outputs_dir = nchan_out_woLFE; move16(); + hDirACRend->num_outputs_dir = nchan_out_woLFE; move16(); } ELSE IF( EQ_16( hDirACRend->synthesisConf, DIRAC_SYNTHESIS_GAIN_SHD ) ) { /* Directional and diffuses components in SHD */ /* Diffuseness components up to 1st order */ - hDirACRend->num_outputs_diff = imult1616( ( s_min( hDirACRend->hOutSetup.ambisonics_order, 1 ) + 1 ), ( add( s_min( hDirACRend->hOutSetup.ambisonics_order, 1 ), 1 ) ) ); + hDirACRend->num_outputs_diff = imult1616( add( s_min( hDirACRend->hOutSetup.ambisonics_order, 1 ), 1 ), ( add( s_min( hDirACRend->hOutSetup.ambisonics_order, 1 ), 1 ) ) ); hDirACRend->num_outputs_dir = ivas_sba_get_nchan_fx( hDirACRend->hOutSetup.ambisonics_order, 0 ); - move16(); - move16(); } ELSE IF( EQ_16( hDirACRend->synthesisConf, DIRAC_SYNTHESIS_PSD_SHD ) ) { hDirACRend->num_outputs_diff = DIRAC_HOA_RENDERING_NUM_VIRT_DECORR_LS; - hDirACRend->num_outputs_dir = nchan_out_woLFE; move16(); + hDirACRend->num_outputs_dir = nchan_out_woLFE; move16(); } ELSE IF( EQ_16( hDirACRend->synthesisConf, DIRAC_SYNTHESIS_MONO ) ) { hDirACRend->num_outputs_diff = 1; /* There is one output channel in mono */ - hDirACRend->num_outputs_dir = 2; /* Two channels are pre-rendered for stereo type detection */ move16(); + hDirACRend->num_outputs_dir = 2; /* Two channels are pre-rendered for stereo type detection */ move16(); } ELSE { assert( 0 && "DirAC: not existing synthesis methods!" ); } - move16(); - move16(); IF( ( hDirACRend->proto_index_dir = (Word16 *) malloc( sizeof( Word16 ) * hDirACRend->num_outputs_dir ) ) == NULL ) { @@ -13740,10 +13798,10 @@ static ivas_error ivas_masa_ext_rend_dirac_rend_init( IF( EQ_16( nchan_transport, 1 ) ) { hDirACRend->num_protos_ambi = 1; - hDirACRend->num_protos_dir = 1; - hDirACRend->num_protos_diff = 1; move16(); + hDirACRend->num_protos_dir = 1; move16(); + hDirACRend->num_protos_diff = 1; move16(); } ELSE IF( EQ_16( nchan_transport, 2 ) ) @@ -13751,33 +13809,33 @@ static ivas_error ivas_masa_ext_rend_dirac_rend_init( IF( EQ_16( hDirACRend->synthesisConf, DIRAC_SYNTHESIS_GAIN_SHD ) ) { hDirACRend->num_protos_ambi = 2; - hDirACRend->num_protos_diff = 1; - hDirACRend->num_protos_dir = 2; - hDirACRend->proto_index_dir[1] = 1; move16(); + hDirACRend->num_protos_diff = 1; move16(); + hDirACRend->num_protos_dir = 2; move16(); + hDirACRend->proto_index_dir[1] = 1; move16(); } ELSE IF( EQ_16( hDirACRend->hOutSetup.output_config, IVAS_AUDIO_CONFIG_MONO ) ) { /* Following the foa rendering for code compatibility */ hDirACRend->num_protos_ambi = 2; - hDirACRend->num_protos_dir = 2; - hDirACRend->num_protos_diff = 3; - hDirACRend->proto_index_dir[0] = 0; - hDirACRend->proto_index_diff[0] = 0; move16(); + hDirACRend->num_protos_dir = 2; move16(); + hDirACRend->num_protos_diff = 3; move16(); + hDirACRend->proto_index_dir[0] = 0; move16(); + hDirACRend->proto_index_diff[0] = 0; move16(); } ELSE { hDirACRend->num_protos_ambi = 2; - hDirACRend->num_protos_diff = 3; move16(); + hDirACRend->num_protos_diff = 3; move16(); FOR( k = 0; k < hDirACRend->num_outputs_diff; k++ ) @@ -13806,8 +13864,8 @@ static ivas_error ivas_masa_ext_rend_dirac_rend_init( ELSE { hDirACRend->num_protos_dir = 2; - hDirACRend->proto_index_dir[1] = 1; move16(); + hDirACRend->proto_index_dir[1] = 1; move16(); } } @@ -13841,7 +13899,7 @@ static ivas_error ivas_masa_ext_rend_dirac_rend_init( return ( IVAS_ERROR( IVAS_ERR_FAILED_ALLOC, "Can not allocate memory for DirAC\n" ) ); } - set32_fx( hDirACRend->hoa_encoder_fx, 0, nchan_out_woLFE * hDirACRend->num_outputs_diff ); + set32_fx( hDirACRend->hoa_encoder_fx, 0, imult1616( nchan_out_woLFE, hDirACRend->num_outputs_diff ) ); compute_hoa_encoder_mtx_fx( ls_azimuth_fx, ls_elevation_fx, hDirACRend->hoa_encoder_fx, hDirACRend->num_outputs_diff, hDirACRend->hOutSetup.ambisonics_order ); } @@ -13874,7 +13932,7 @@ static ivas_error ivas_masa_ext_rend_dirac_rend_init( /* decorrelation */ hDirACRend->proto_signal_decorr_on = 1; move16(); - IF( EQ_16( hDirACRend->synthesisConf, DIRAC_SYNTHESIS_MONO ) ) + if ( EQ_16( hDirACRend->synthesisConf, DIRAC_SYNTHESIS_MONO ) ) { hDirACRend->proto_signal_decorr_on = 0; move16(); @@ -13906,7 +13964,7 @@ static ivas_error ivas_masa_ext_rend_dirac_rend_init( move16(); test(); - IF( EQ_16( hDirACRend->synthesisConf, DIRAC_SYNTHESIS_PSD_SHD ) || EQ_16( hDirACRend->synthesisConf, DIRAC_SYNTHESIS_GAIN_SHD ) ) + if ( EQ_16( hDirACRend->synthesisConf, DIRAC_SYNTHESIS_PSD_SHD ) || EQ_16( hDirACRend->synthesisConf, DIRAC_SYNTHESIS_GAIN_SHD ) ) { hDirACRend->h_output_synthesis_psd_params.use_onset_filters = 0; move16(); @@ -14466,8 +14524,8 @@ static ivas_error ivas_masa_ext_rend_parambin_init( /* Set common variables and defaults */ output_Fs = *( inputMasa->base.ctx.pOutSampleRate ); - nBins = inputMasa->hMasaExtRend->hSpatParamRendCom->num_freq_bands; move32(); + nBins = inputMasa->hMasaExtRend->hSpatParamRendCom->num_freq_bands; move16(); renderer_type = inputMasa->hMasaExtRend->renderer_type; move32(); @@ -14543,7 +14601,7 @@ static ivas_error ivas_masa_ext_rend_parambin_init( tmpFloat_fx = s_max( 0, sub( shl_sat( 1, sub( 15, tmp_e ) ), tmp ) ) /*max( 0.0f, 1.0f - binCenterFreq / 2700.0f )*/; /*Q30*/ tmp2 = extract_l( Mult_32_32( binCenterFreq_fx, 1952258 /*=2^31*180/(550)/360*/ ) % 32767 ); //*binCenterFreq_fx * EVS_PI / 550.0f*/ hDiracDecBin->diffuseFieldCoherence_fx[bin] = L_shl( L_mult0( divide3232( tmpFloat_fx, Mult_32_16( binCenterFreq_fx, 187 /*2^15*pi/550*/ ) ), getSineWord16R2( tmp2 ) ), tmp_e ); /*tmpFloat * sinf( binCenterFreq * EVS_PI / 550.0f ) / ( binCenterFreq * EVS_PI / 550.0f );*/ - hDiracDecBin->diffuseFieldCoherence_fx[bin] = L_shl( hDiracDecBin->diffuseFieldCoherence_fx[bin], 1 ); // Q31 + hDiracDecBin->diffuseFieldCoherence_fx[bin] = L_shl( hDiracDecBin->diffuseFieldCoherence_fx[bin], 1 ); /* Q31 */ move32(); move32(); } @@ -14606,7 +14664,7 @@ static ivas_error ivas_masa_ext_rend_parambin_init( return error; } /* External renderer uses constant regularization factor */ - hDiracDecBin->reqularizationFactor_fx = 6554; + hDiracDecBin->reqularizationFactor_fx = 6554; /* 0.4f in Q14 */ move16(); inputMasa->hMasaExtRend->hDiracDecBin = hDiracDecBin; @@ -15118,7 +15176,7 @@ static void intermidiate_ext_dirac_render( move16(); FOR( slot_idx = 0; slot_idx < hSpatParamRendCom->subframe_nbslots[subframe_idx]; slot_idx++ ) { - Scale_sig32( hDirACRend->h_output_synthesis_psd_state.direct_responses_fx, i_mult( hSpatParamRendCom->num_freq_bands, hDirACRend->num_outputs_dir ), sub( Q30, hDirACRend->h_output_synthesis_psd_state.direct_responses_q ) ); + Scale_sig32( hDirACRend->h_output_synthesis_psd_state.direct_responses_fx, i_mult( hSpatParamRendCom->num_freq_bands, hDirACRend->num_outputs_dir ), sub( Q30, hDirACRend->h_output_synthesis_psd_state.direct_responses_q ) ); /* Q30 */ hDirACRend->h_output_synthesis_psd_state.direct_responses_q = Q30; move16(); } @@ -15126,22 +15184,22 @@ static void intermidiate_ext_dirac_render( IF( hDirACRend->h_output_synthesis_psd_state.cy_cross_dir_smooth_fx ) { Word16 shift = L_norm_arr( hDirACRend->h_output_synthesis_psd_state.cy_cross_dir_smooth_fx, hDirACRend->h_output_synthesis_psd_state.cy_cross_dir_smooth_len ); - scale_sig32( hDirACRend->h_output_synthesis_psd_state.cy_cross_dir_smooth_fx, hDirACRend->h_output_synthesis_psd_state.cy_cross_dir_smooth_len, shift ); + scale_sig32( hDirACRend->h_output_synthesis_psd_state.cy_cross_dir_smooth_fx, hDirACRend->h_output_synthesis_psd_state.cy_cross_dir_smooth_len, shift ); /* Q(hDirACRend->h_output_synthesis_psd_state.q_cy_cross_dir_smooth + shift) */ hDirACRend->h_output_synthesis_psd_state.q_cy_cross_dir_smooth = add( hDirACRend->h_output_synthesis_psd_state.q_cy_cross_dir_smooth, shift ); move16(); } IF( hDirACRend->h_output_synthesis_psd_state.cy_cross_dir_smooth_prev_fx ) { Word16 shift = L_norm_arr( hDirACRend->h_output_synthesis_psd_state.cy_cross_dir_smooth_prev_fx, hDirACRend->h_output_synthesis_psd_state.cy_cross_dir_smooth_prev_len ); - scale_sig32( hDirACRend->h_output_synthesis_psd_state.cy_cross_dir_smooth_prev_fx, hDirACRend->h_output_synthesis_psd_state.cy_cross_dir_smooth_prev_len, shift ); + scale_sig32( hDirACRend->h_output_synthesis_psd_state.cy_cross_dir_smooth_prev_fx, hDirACRend->h_output_synthesis_psd_state.cy_cross_dir_smooth_prev_len, shift ); /* Q(hDirACRend->h_output_synthesis_psd_state.q_cy_cross_dir_smooth_prev + shift) */ hDirACRend->h_output_synthesis_psd_state.q_cy_cross_dir_smooth_prev = add( hDirACRend->h_output_synthesis_psd_state.q_cy_cross_dir_smooth_prev, shift ); move16(); } IF( hDirACRend->h_output_synthesis_psd_state.cy_auto_dir_smooth_fx ) { - Word16 shift = L_norm_arr( hDirACRend->h_output_synthesis_psd_state.cy_auto_dir_smooth_fx, hDirACRend->num_outputs_dir * hSpatParamRendCom->num_freq_bands ); - scale_sig32( hDirACRend->h_output_synthesis_psd_state.cy_auto_dir_smooth_fx, hDirACRend->h_output_synthesis_psd_state.cy_auto_dir_smooth_len, shift ); + Word16 shift = L_norm_arr( hDirACRend->h_output_synthesis_psd_state.cy_auto_dir_smooth_fx, imult1616( hDirACRend->num_outputs_dir, hSpatParamRendCom->num_freq_bands ) ); + scale_sig32( hDirACRend->h_output_synthesis_psd_state.cy_auto_dir_smooth_fx, hDirACRend->h_output_synthesis_psd_state.cy_auto_dir_smooth_len, shift ); /* Q(hDirACRend->h_output_synthesis_psd_state.q_cy_auto_dir_smooth + shift) */ hDirACRend->h_output_synthesis_psd_state.q_cy_auto_dir_smooth = add( hDirACRend->h_output_synthesis_psd_state.q_cy_auto_dir_smooth, shift ); move16(); } @@ -15157,8 +15215,8 @@ static void intermidiate_ext_dirac_render( IF( h_dirac_output_synthesis_state->cy_auto_diff_smooth_fx ) { - tmp = L_norm_arr( h_dirac_output_synthesis_state->cy_auto_diff_smooth_fx, num_channels_dir * hSpatParamRendCom->num_freq_bands ); - scale_sig32( h_dirac_output_synthesis_state->cy_auto_diff_smooth_fx, hDirACRend->h_output_synthesis_psd_state.cy_auto_diff_smooth_len, tmp ); + tmp = L_norm_arr( h_dirac_output_synthesis_state->cy_auto_diff_smooth_fx, imult1616( num_channels_dir, hSpatParamRendCom->num_freq_bands ) ); + scale_sig32( h_dirac_output_synthesis_state->cy_auto_diff_smooth_fx, hDirACRend->h_output_synthesis_psd_state.cy_auto_diff_smooth_len, tmp ); /* Q(h_dirac_output_synthesis_state->q_cy_auto_diff_smooth + tmp) */ h_dirac_output_synthesis_state->q_cy_auto_diff_smooth = add( h_dirac_output_synthesis_state->q_cy_auto_diff_smooth, tmp ); move16(); } @@ -15166,21 +15224,21 @@ static void intermidiate_ext_dirac_render( IF( hDirACRend->h_output_synthesis_psd_state.cy_auto_diff_smooth_prev_fx ) { tmp = L_norm_arr( hDirACRend->h_output_synthesis_psd_state.cy_auto_diff_smooth_prev_fx, hDirACRend->h_output_synthesis_psd_state.cy_auto_diff_smooth_prev_len ); - scale_sig32( hDirACRend->h_output_synthesis_psd_state.cy_auto_diff_smooth_prev_fx, hDirACRend->h_output_synthesis_psd_state.cy_auto_diff_smooth_prev_len, tmp ); + scale_sig32( hDirACRend->h_output_synthesis_psd_state.cy_auto_diff_smooth_prev_fx, hDirACRend->h_output_synthesis_psd_state.cy_auto_diff_smooth_prev_len, tmp ); /* Q(hDirACRend->h_output_synthesis_psd_state.q_cy_auto_diff_smooth_prev + tmp) */ hDirACRend->h_output_synthesis_psd_state.q_cy_auto_diff_smooth_prev = add( hDirACRend->h_output_synthesis_psd_state.q_cy_auto_diff_smooth_prev, tmp ); move16(); } - scale_sig32( hDirACRend->h_output_synthesis_psd_state.gains_dir_prev_fx, hDirACRend->h_output_synthesis_psd_state.gains_dir_prev_len, Q26 - hDirACRend->h_output_synthesis_psd_state.gains_dir_prev_q ); + scale_sig32( hDirACRend->h_output_synthesis_psd_state.gains_dir_prev_fx, hDirACRend->h_output_synthesis_psd_state.gains_dir_prev_len, sub( Q26, hDirACRend->h_output_synthesis_psd_state.gains_dir_prev_q ) ); /* Q26 */ hDirACRend->h_output_synthesis_psd_state.gains_dir_prev_q = Q26; move16(); - scale_sig32( hDirACRend->h_output_synthesis_psd_state.gains_diff_prev_fx, hDirACRend->h_output_synthesis_psd_state.gains_diff_prev_len, Q26 - hDirACRend->h_output_synthesis_psd_state.gains_diff_prev_q ); + scale_sig32( hDirACRend->h_output_synthesis_psd_state.gains_diff_prev_fx, hDirACRend->h_output_synthesis_psd_state.gains_diff_prev_len, sub( Q26, hDirACRend->h_output_synthesis_psd_state.gains_diff_prev_q ) ); /* Q26 */ hDirACRend->h_output_synthesis_psd_state.gains_diff_prev_q = Q26; move16(); IF( hDirACRend->h_output_synthesis_psd_state.cy_auto_dir_smooth_prev_fx ) { - Word16 shift = L_norm_arr( hDirACRend->h_output_synthesis_psd_state.cy_auto_dir_smooth_prev_fx, hSpatParamRendCom->num_freq_bands * hDirACRend->num_outputs_dir ); - scale_sig32( hDirACRend->h_output_synthesis_psd_state.cy_auto_dir_smooth_prev_fx, hDirACRend->h_output_synthesis_psd_state.cy_auto_dir_smooth_prev_len, shift ); + Word16 shift = L_norm_arr( hDirACRend->h_output_synthesis_psd_state.cy_auto_dir_smooth_prev_fx, imult1616( hSpatParamRendCom->num_freq_bands, hDirACRend->num_outputs_dir ) ); + scale_sig32( hDirACRend->h_output_synthesis_psd_state.cy_auto_dir_smooth_prev_fx, hDirACRend->h_output_synthesis_psd_state.cy_auto_dir_smooth_prev_len, shift ); /* Q(hDirACRend->h_output_synthesis_psd_state.q_cy_auto_dir_smooth_prev + shift) */ hDirACRend->h_output_synthesis_psd_state.q_cy_auto_dir_smooth_prev = add( hDirACRend->h_output_synthesis_psd_state.q_cy_auto_dir_smooth_prev, shift ); move16(); } @@ -15188,7 +15246,7 @@ static void intermidiate_ext_dirac_render( IF( EQ_16( hDirACRend->proto_signal_decorr_on, 1 ) ) { tmp = L_norm_arr( hDirACRend->h_freq_domain_decorr_ap_state->decorr_buffer_fx, hDirACRend->h_freq_domain_decorr_ap_state->decorr_buffer_len ); - scale_sig32( hDirACRend->h_freq_domain_decorr_ap_state->decorr_buffer_fx, hDirACRend->h_freq_domain_decorr_ap_state->decorr_buffer_len, tmp ); + scale_sig32( hDirACRend->h_freq_domain_decorr_ap_state->decorr_buffer_fx, hDirACRend->h_freq_domain_decorr_ap_state->decorr_buffer_len, tmp ); /* Q(hDirACRend->h_freq_domain_decorr_ap_state->q_decorr_buffer + tmp) */ hDirACRend->h_freq_domain_decorr_ap_state->q_decorr_buffer = add( hDirACRend->h_freq_domain_decorr_ap_state->q_decorr_buffer, tmp ); move16(); } @@ -15224,8 +15282,9 @@ static void intermidiate_ext_dirac_render( Word16 hr_exp = sub( 31, shift ); - Scale_sig32( hDirACRend->h_output_synthesis_psd_state.proto_diffuse_buffer_f_fx, hDirACRend->h_output_synthesis_psd_state.proto_diffuse_buffer_f_len, sub( sub( 31, hDirACRend->h_output_synthesis_psd_state.proto_diffuse_buffer_f_q ), hr_exp ) ); + Scale_sig32( hDirACRend->h_output_synthesis_psd_state.proto_diffuse_buffer_f_fx, hDirACRend->h_output_synthesis_psd_state.proto_diffuse_buffer_f_len, sub( sub( 31, hDirACRend->h_output_synthesis_psd_state.proto_diffuse_buffer_f_q ), hr_exp ) ); /* Q(31 - hr_exp) */ hDirACRend->h_output_synthesis_psd_state.proto_diffuse_buffer_f_q = sub( 31, hr_exp ); + move16(); } FOR( slot_idx = 0; slot_idx < hSpatParamRendCom->subframe_nbslots[subframe_idx]; slot_idx++ ) @@ -15242,25 +15301,25 @@ static void intermidiate_ext_dirac_render( IF( hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_fx ) { - tmp = L_norm_arr( hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_fx, hDirACRend->num_protos_dir * hSpatParamRendCom->num_freq_bands ); - scale_sig32( hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_fx, hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_len, tmp ); + tmp = L_norm_arr( hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_fx, imult1616( hDirACRend->num_protos_dir, hSpatParamRendCom->num_freq_bands ) ); + scale_sig32( hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_fx, hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_len, tmp ); /* Q(hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_q + tmp) */ hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_q = add( hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_q, tmp ); move16(); - tmp = L_norm_arr( hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_prev_fx, hDirACRend->num_protos_dir * hSpatParamRendCom->num_freq_bands ); - scale_sig32( hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_prev_fx, hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_prev_len, tmp ); + tmp = L_norm_arr( hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_prev_fx, imult1616( hDirACRend->num_protos_dir, hSpatParamRendCom->num_freq_bands ) ); + scale_sig32( hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_prev_fx, hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_prev_len, tmp ); /* Q(hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_prev_q + tmp) */ hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_prev_q = add( tmp, hDirACRend->h_output_synthesis_psd_state.proto_power_smooth_prev_q ); move16(); } tmp = L_norm_arr( hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_fx, hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_len ); - scale_sig32( hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_fx, hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_len, tmp ); + scale_sig32( hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_fx, hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_len, tmp ); /* Q(hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_q + tmp) */ hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_q = add( hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_q, tmp ); move16(); IF( hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_prev_fx != 0 ) { tmp = L_norm_arr( hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_prev_fx, hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_prev_len ); - scale_sig32( hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_prev_fx, hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_prev_len, tmp ); + scale_sig32( hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_prev_fx, hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_prev_len, tmp ); /* Q(hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_prev_q + tmp) */ hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_prev_q = add( hDirACRend->h_output_synthesis_psd_state.proto_power_diff_smooth_prev_q, tmp ); move16(); } @@ -15272,7 +15331,7 @@ static void intermidiate_ext_dirac_render( /* CLDFB Analysis*/ FOR( ch = 0; ch < nchan_transport; ch++ ) { - scale_sig32( hMasaExtRend->cldfbAnaRend[ch]->cldfb_state_fx, hMasaExtRend->cldfbAnaRend[ch]->cldfb_size, sub( Q11, hMasaExtRend->cldfbAnaRend[0]->Q_cldfb_state ) ); + scale_sig32( hMasaExtRend->cldfbAnaRend[ch]->cldfb_state_fx, hMasaExtRend->cldfbAnaRend[ch]->cldfb_size, sub( Q11, hMasaExtRend->cldfbAnaRend[0]->Q_cldfb_state ) ); /* Q11 */ hMasaExtRend->cldfbAnaRend[ch]->Q_cldfb_state = Q11; move16(); } @@ -15280,7 +15339,7 @@ static void intermidiate_ext_dirac_render( FOR( ch = 0; ch < hDirACRend->hOutSetup.nchan_out_woLFE + hDirACRend->hOutSetup.num_lfe; ch++ ) { - scale_sig32( hMasaExtRend->cldfbSynRend[ch]->cldfb_state_fx, hMasaExtRend->cldfbSynRend[ch]->cldfb_size, sub( Q11, hMasaExtRend->cldfbSynRend[0]->Q_cldfb_state ) ); + scale_sig32( hMasaExtRend->cldfbSynRend[ch]->cldfb_state_fx, hMasaExtRend->cldfbSynRend[ch]->cldfb_size, sub( Q11, hMasaExtRend->cldfbSynRend[0]->Q_cldfb_state ) ); /* Q11 */ hMasaExtRend->cldfbSynRend[ch]->Q_cldfb_state = Q11; move16(); } diff --git a/lib_rend/lib_rend.h b/lib_rend/lib_rend.h index beb2af28948b8439e4d588b077eb12fb69924fd1..1d8c36cf5e5bf8163877f5ca3d03a350fa2525f6 100644 --- a/lib_rend/lib_rend.h +++ b/lib_rend/lib_rend.h @@ -133,12 +133,12 @@ typedef enum _IVAS_REND_COMPLEXITY_LEVEL #ifdef IVAS_FLOAT_FIXED ivas_error IVAS_REND_Open( - IVAS_REND_HANDLE *phIvasRend, /* i/o: Pointer to renderer handle */ - const Word32 outputSampleRate, /* i : output sampling rate */ - const IVAS_AUDIO_CONFIG outConfig, /* i : output audio config */ - const Word16 nonDiegeticPan, /* i : non-diegetic object flag */ - const Word32 nonDiegeticPanGain, /* i : non-diegetic panning gain */ - const Word16 num_subframes /* i : number of subframes */ + IVAS_REND_HANDLE *phIvasRend, /* i/o: Pointer to renderer handle */ + const Word32 outputSampleRate, /* i : output sampling rate */ + const IVAS_AUDIO_CONFIG outConfig, /* i : output audio config */ + const Word16 nonDiegeticPan, /* i : non-diegetic object flag */ + const Word32 nonDiegeticPanGain, /* i : non-diegetic panning gain Q31 */ + const Word16 num_subframes /* i : number of subframes */ ); #else ivas_error IVAS_REND_Open( @@ -149,7 +149,7 @@ ivas_error IVAS_REND_Open( const float nonDiegeticPanGain, /* i : non-diegetic panning gain */ const int16_t num_subframes /* i : number of subframes */ ); -#endif // IVAS_FLOAT_FIXED +#endif /* Note: this will reset custom LFE routings set for any MC input */ ivas_error IVAS_REND_ConfigureCustomOutputLoudspeakerLayout( @@ -160,30 +160,30 @@ ivas_error IVAS_REND_ConfigureCustomOutputLoudspeakerLayout( /* Functions to be called before/during rendering */ -#ifndef IVAS_FLOAT_FIXED +#ifdef IVAS_FLOAT_FIXED ivas_error IVAS_REND_NumOutChannels( IVAS_REND_CONST_HANDLE hIvasRend, /* i : Renderer handle */ - int16_t *numOutChannels /* o : number of output channels */ + Word16 *numOutChannels /* o : number of output channels */ +); + +ivas_error IVAS_REND_AddInput_fx( + IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ + const IVAS_AUDIO_CONFIG inConfig, /* i : audio config for a new input */ + IVAS_REND_InputId *inputId /* o : ID of the new input */ ); #else ivas_error IVAS_REND_NumOutChannels( IVAS_REND_CONST_HANDLE hIvasRend, /* i : Renderer handle */ - Word16 *numOutChannels /* o : number of output channels */ + int16_t *numOutChannels /* o : number of output channels */ ); -#endif ivas_error IVAS_REND_AddInput( IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ const IVAS_AUDIO_CONFIG inConfig, /* i : audio config for a new input */ IVAS_REND_InputId *inputId /* o : ID of the new input */ ); -#ifdef IVAS_FLOAT_FIXED -ivas_error IVAS_REND_AddInput_fx( - IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ - const IVAS_AUDIO_CONFIG inConfig, /* i : audio config for a new input */ - IVAS_REND_InputId *inputId /* o : ID of the new input */ -); #endif + /* Note: this will reset any custom LFE routing set for the input */ ivas_error IVAS_REND_ConfigureCustomInputLoudspeakerLayout( IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ @@ -191,24 +191,33 @@ ivas_error IVAS_REND_ConfigureCustomInputLoudspeakerLayout( const IVAS_CUSTOM_LS_DATA layout /* i : custom loudspeaker layout for input */ ); -ivas_error IVAS_REND_SetInputGain( - IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ - const IVAS_REND_InputId inputId, /* i : ID of the input */ - const float gain /* i : linear gain (not in dB) */ -); #ifdef IVAS_FLOAT_FIXED ivas_error IVAS_REND_SetInputGain_fx( - IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ - const IVAS_REND_InputId inputId, /* i : ID of the input */ - const Word32 gain /* i : linear gain (not in dB) */ + IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ + const IVAS_REND_InputId inputId, /* i : ID of the input */ + const Word32 gain /* i : linear gain (not in dB) Q30 */ ); ivas_error IVAS_REND_SetInputLfeMtx_fx( - IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ - const IVAS_REND_InputId inputId, /* i : ID of the input */ - const IVAS_REND_LfePanMtx_fx *lfePanMtx /* i : LFE panning matrix */ + IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ + const IVAS_REND_InputId inputId, /* i : ID of the input */ + const IVAS_REND_LfePanMtx_fx *lfePanMtx /* i : LFE panning matrix */ +); + +ivas_error IVAS_REND_SetInputLfePos_fx( + IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ + const IVAS_REND_InputId inputId, /* i : ID of the input */ + const Word32 inputGain, /* i : Input gain to be applied to the LFE channel(s) Q31 */ + const Word16 outputAzimuth, /* i : Output azimuth position Q0 */ + const Word16 outputElevation /* i : Output elevation position Q0 */ +); +#else +ivas_error IVAS_REND_SetInputGain( + IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ + const IVAS_REND_InputId inputId, /* i : ID of the input */ + const float gain /* i : linear gain (not in dB) */ ); -#endif // IVAS_FLOAT_FIXED + ivas_error IVAS_REND_SetInputLfeMtx( IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ const IVAS_REND_InputId inputId, /* i : ID of the input */ @@ -222,15 +231,8 @@ ivas_error IVAS_REND_SetInputLfePos( const float outputAzimuth, /* i : Output azimuth position */ const float outputElevation /* i : Output elevation position */ ); -#ifdef IVAS_FLOAT_FIXED -ivas_error IVAS_REND_SetInputLfePos_fx( - IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ - const IVAS_REND_InputId inputId, /* i : ID of the input */ - const Word32 inputGain, /* i : Input gain to be applied to the LFE channel(s) */ - const Word16 outputAzimuth, /* i : Output azimuth position */ - const Word16 outputElevation /* i : Output elevation position */ -); -#endif // IVAS_FLOAT_FIXED +#endif + ivas_error IVAS_REND_RemoveInput( IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ const IVAS_REND_InputId inputId /* i : ID of the input */ @@ -240,7 +242,13 @@ ivas_error IVAS_REND_RemoveInput( ivas_error IVAS_REND_GetInputNumChannels( IVAS_REND_CONST_HANDLE hIvasRend, /* i : Renderer handle */ const IVAS_REND_InputId inputId, /* i : ID of the input */ - Word16 *numChannels /* o : number of channels of the input */ + Word16 *numChannels /* o : number of channels of the input */ +); + +ivas_error IVAS_REND_GetDelay_fx( + IVAS_REND_CONST_HANDLE hIvasRend, /* i : Renderer state */ + Word16 *nSamples, /* o : Renderer delay in samples */ + Word32 *timeScale /* o : Time scale of the delay, equal to renderer output sampling rate */ ); #else ivas_error IVAS_REND_GetInputNumChannels( @@ -248,46 +256,47 @@ ivas_error IVAS_REND_GetInputNumChannels( const IVAS_REND_InputId inputId, /* i : ID of the input */ int16_t *numChannels /* o : number of channels of the input */ ); -#endif -#ifndef IVAS_FLOAT_FIXED ivas_error IVAS_REND_GetDelay( IVAS_REND_CONST_HANDLE hIvasRend, /* i : Renderer handle */ int16_t *nSamples, /* o : Renderer delay in samples */ int32_t *timeScale /* o : Time scale of the delay, equal to renderer output sampling rate */ ); -#else -ivas_error IVAS_REND_GetDelay( - IVAS_REND_CONST_HANDLE hIvasRend, /* i : Renderer handle */ - Word16 *nSamples, /* o : Renderer delay in samples */ - Word32 *timeScale /* o : Time scale of the delay, equal to renderer output sampling rate */ -); #endif -#ifdef IVAS_FLOAT_FIXED -ivas_error IVAS_REND_GetDelay_fx( - IVAS_REND_CONST_HANDLE hIvasRend, /* i : Renderer state */ - Word16 *nSamples, /* o : Renderer delay in samples */ - Word32 *timeScale /* o : Time scale of the delay, equal to renderer output sampling rate */ -); -#endif // IVAS_FLOAT_FIXED - /* Functions to be called during rendering */ -ivas_error IVAS_REND_FeedInputAudio( +#ifdef IVAS_FLOAT_FIXED +ivas_error IVAS_REND_FeedInputAudio_fx( IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ const IVAS_REND_InputId inputId, /* i : ID of the input */ const IVAS_REND_ReadOnlyAudioBuffer inputAudio /* i : buffer with input audio */ ); -#ifdef IVAS_FLOAT_FIXED -ivas_error IVAS_REND_FeedInputAudio_fx( +ivas_error IVAS_REND_FeedInputObjectMetadata( + IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ + const IVAS_REND_InputId inputId, /* i : ID of the input */ + const IVAS_ISM_METADATA objectPosition /* i : object position struct */ +); + +ivas_error IVAS_REND_FeedInputObjectMetadataToOMasa( + IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ + const Word16 inputIndex, /* i : Index of the input */ + const IVAS_ISM_METADATA objectPosition /* i : object position struct */ +); + +ivas_error IVAS_REND_FeedInputObjectMetadataToOMasa( + IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ + const Word16 inputIndex, /* i : Index of the input */ + const IVAS_ISM_METADATA objectPosition /* i : object position struct */ +); +#else +ivas_error IVAS_REND_FeedInputAudio( IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ const IVAS_REND_InputId inputId, /* i : ID of the input */ const IVAS_REND_ReadOnlyAudioBuffer inputAudio /* i : buffer with input audio */ ); -#endif ivas_error IVAS_REND_FeedInputObjectMetadata( IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ const IVAS_REND_InputId inputId, /* i : ID of the input */ @@ -300,6 +309,14 @@ ivas_error IVAS_REND_FeedInputObjectMetadataToOMasa( const IVAS_ISM_METADATA objectPosition /* i : object position struct */ ); +ivas_error IVAS_REND_FeedInputObjectMetadataToOMasa( + IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ + const int16_t inputIndex, /* i : Index of the input */ + const IVAS_ISM_METADATA objectPosition /* i : object position struct */ +); +#endif + + ivas_error IVAS_REND_FeedInputMasaMetadata( IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ const IVAS_REND_InputId inputId, /* i : ID of the input */ @@ -311,45 +328,40 @@ ivas_error IVAS_REND_InitConfig( const IVAS_AUDIO_CONFIG outAudioConfig /* i : output audioConfig */ ); -#ifndef IVAS_FLOAT_FIXED -int16_t IVAS_REND_GetRenderConfig( - IVAS_REND_HANDLE hIvasRend, /* i/o: IVAS decoder handle */ - const IVAS_RENDER_CONFIG_HANDLE hRCout /* o : Render configuration handle */ -); -#else +#ifdef IVAS_FLOAT_FIXED Word16 IVAS_REND_GetRenderConfig( IVAS_REND_HANDLE hIvasRend, /* i/o: IVAS decoder handle */ const IVAS_RENDER_CONFIG_HANDLE hRCout /* o : Render configuration handle */ ); -#endif -#ifndef IVAS_FLOAT_FIXED -int16_t IVAS_REND_FeedRenderConfig( +Word16 IVAS_REND_FeedRenderConfig( IVAS_REND_HANDLE hIvasRend, /* i/o: IVAS decoder handle */ const IVAS_RENDER_CONFIG_DATA renderConfig /* i : Render configuration struct */ ); + +ivas_error IVAS_REND_SetHeadRotation( + IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ + const IVAS_QUATERNION headRot, /* i : head orientations for next rendering call */ + const IVAS_VECTOR3 Pos, /* i : listener positions for next rendering call */ + const Word16 sf_idx /* i : subframe index */ +); #else -Word16 IVAS_REND_FeedRenderConfig( +int16_t IVAS_REND_GetRenderConfig( IVAS_REND_HANDLE hIvasRend, /* i/o: IVAS decoder handle */ - const IVAS_RENDER_CONFIG_DATA renderConfig /* i : Render configuration struct */ + const IVAS_RENDER_CONFIG_HANDLE hRCout /* o : Render configuration handle */ ); -#endif +int16_t IVAS_REND_FeedRenderConfig( + IVAS_REND_HANDLE hIvasRend, /* i/o: IVAS decoder handle */ + const IVAS_RENDER_CONFIG_DATA renderConfig /* i : Render configuration struct */ +); -#ifndef IVAS_FLOAT_FIXED ivas_error IVAS_REND_SetHeadRotation( IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ const IVAS_QUATERNION headRot, /* i : head orientations for next rendering call */ const IVAS_VECTOR3 Pos, /* i : listener positions for next rendering call */ const int16_t sf_idx /* i : subframe index */ ); -#else -ivas_error IVAS_REND_SetHeadRotation( - IVAS_REND_HANDLE hIvasRend, /* i/o: Renderer handle */ - const IVAS_QUATERNION headRot, /* i : head orientations for next rendering call */ - const IVAS_VECTOR3 Pos, /* i : listener positions for next rendering call */ - const Word16 sf_idx /* i : subframe index */ -); #endif /* Head rotation becomes enabled by calling IVAS_REND_SetHeadRotation. Use this to disable. */ @@ -433,31 +445,27 @@ ivas_error IVAS_REND_SetTotalNumberOfObjects( IVAS_REND_HANDLE hIvasRend, /* i/o: IVAS renderer handle */ const UWord16 total_num_objects /* i : total number of objects */ ); -#else -ivas_error IVAS_REND_SetTotalNumberOfObjects( - IVAS_REND_HANDLE hIvasRend, /* i/o: IVAS renderer handle */ - const uint16_t total_num_objects /* i : total number of objects */ -); -#endif -#ifdef IVAS_FLOAT_FIXED ivas_error IVAS_REND_SetIsmMetadataDelay( IVAS_REND_HANDLE hIvasRend, /* i/o: IVAS renderer handle */ const Word32 sync_md_delay /* i : Metadata Delay in ms to sync with audio delay */ ); -#else -ivas_error IVAS_REND_SetIsmMetadataDelay( - IVAS_REND_HANDLE hIvasRend, /* i/o: IVAS renderer handle */ - const float sync_md_delay /* i : Metadata Delay in ms to sync with audio delay */ -); -#endif -#ifdef IVAS_FLOAT_FIXED ivas_error IVAS_REND_GetNumAllObjects( IVAS_REND_CONST_HANDLE hIvasRend, /* i : Renderer handle */ Word16 *numChannels /* o : number of all objects */ ); #else +ivas_error IVAS_REND_SetTotalNumberOfObjects( + IVAS_REND_HANDLE hIvasRend, /* i/o: IVAS renderer handle */ + const uint16_t total_num_objects /* i : total number of objects */ +); + +ivas_error IVAS_REND_SetIsmMetadataDelay( + IVAS_REND_HANDLE hIvasRend, /* i/o: IVAS renderer handle */ + const float sync_md_delay /* i : Metadata Delay in ms to sync with audio delay */ +); + ivas_error IVAS_REND_GetNumAllObjects( IVAS_REND_CONST_HANDLE hIvasRend, /* i : Renderer handle */ int16_t *numChannels /* o : number of all objects */