diff --git a/lib_com/options.h b/lib_com/options.h index 0ccc660258a6232ed5925c92ff299778cf05c362..3e149ee2ff5b75fbdfd7f4fce1bc81365ca6476d 100644 --- a/lib_com/options.h +++ b/lib_com/options.h @@ -155,6 +155,7 @@ /*#define FIX_I4_OL_PITCH*/ /* fix open-loop pitch used for EVS core switching */ #define SPLIT_REND_WITH_HEAD_ROT /* Dlb,FhG: Split Rendering contributions 21 and 35 */ +#define SPLIT_REND_POSE_CORRECTION_UNUSED_BITS #define FIX_NUM_SUBFRAME_UPDATE diff --git a/lib_isar/isar_cnst.h b/lib_isar/isar_cnst.h index c9391e37bb12e3e7a29ebaf296c4f92cf3ff6e2a..aa475a9c9a7506d80c801d0ebd334de6ba4ebfef 100644 --- a/lib_isar/isar_cnst.h +++ b/lib_isar/isar_cnst.h @@ -75,7 +75,12 @@ typedef enum #define MAX_SPLIT_REND_MD_BANDS 20 #define MAX_SPLIT_MD_SUBFRAMES 1 #define COMPLEX_MD_BAND_THRESH MAX_SPLIT_REND_MD_BANDS +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS +#define COMPLEX_MD_BAND_THRESH_LOW 4 +#define COMPLEX_MD_BAND_THRESH_HIGH 10 +#else #define COMPLEX_MD_BAND_THRESH_LOW 5 +#endif #define SPLIT_REND_RO_MD_BAND_THRESH 4 #define ISAR_SPLIT_REND_NUM_QUANT_STRATS 4 @@ -108,6 +113,7 @@ typedef enum #define ISAR_SPLIT_REND_DOF_BITS 2 #define ISAR_SPLIT_REND_HQ_MODE_BITS 1 #define ISAR_SPLIT_REND_ROT_AXIS_BITS 3 +#define ISAR_SPLIT_REND_RO_FLAG_BITS 1 /*----------------------------------------------------------------------------------* * Split rendering bitrate constants diff --git a/lib_isar/isar_prot.h b/lib_isar/isar_prot.h index 3ff35ff73efa014d89ed957d8ec247fec4febd27..c5842bab7593a330e1cd7d2cda8670cb6a092e08 100644 --- a/lib_isar/isar_prot.h +++ b/lib_isar/isar_prot.h @@ -200,8 +200,15 @@ void isar_split_rend_get_quant_params( int16_t bands_pitch[ISAR_SPLIT_REND_NUM_QUANT_STRATS], int16_t pred_real_bands_roll[ISAR_SPLIT_REND_NUM_QUANT_STRATS], int16_t pred_imag_bands_roll[ISAR_SPLIT_REND_NUM_QUANT_STRATS], - int16_t *num_quant_strats, - int16_t *num_complex_bands ); +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + const int16_t ro_flag, +#endif + int16_t *num_quant_strats +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + , + int16_t *num_complex_bands +#endif +); void isar_splitBinPostRendMdDec( ISAR_SPLIT_REND_BITS_HANDLE pBits, @@ -269,6 +276,20 @@ void isar_renderSplitGetMultiBinPoseData( MULTI_BIN_REND_POSE_DATA *pMultiBinPoseData, const ISAR_SPLIT_REND_ROT_AXIS rot_axis ); +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS +int16_t isar_renderSplitGetRot_axisNumBits( + const int16_t dof ); + +ISAR_SPLIT_REND_ROT_AXIS isar_renderSplitGetRot_axisFromCode( + const int16_t dof, + const int16_t code ); + +int16_t isar_renderSplitGetCodeFromRot_axis( + const int16_t dof, + const ISAR_SPLIT_REND_ROT_AXIS rot_axis, + int16_t *num_bits ); +#endif + void isar_init_split_post_rend_handles( ISAR_SPLIT_POST_REND_WRAPPER *hSplitRendWrapper ); diff --git a/lib_isar/isar_splitRendererPost.c b/lib_isar/isar_splitRendererPost.c index 495caee2336037efd0197cedff0238a9c2444bf4..6ea2d2539f450e4fefa0aef920ecc18c29abbf43 100644 --- a/lib_isar/isar_splitRendererPost.c +++ b/lib_isar/isar_splitRendererPost.c @@ -242,7 +242,7 @@ static void isar_split_rend_unquant_md( float quantstep; quantstep = pred_quant_step; - +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) { for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) @@ -251,8 +251,20 @@ static void isar_split_rend_unquant_md( hMd->pred_mat_re[ch1][ch2] = hMd->pred_mat_re[ch1][ch2] + fix_pos_rot_mat[ch1][ch2]; } } +#endif if ( real_only ) { +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + hMd->pred_mat_re[ch1][ch2] = hMd->pred_mat_re_idx[ch1][ch2] * quantstep; + hMd->pred_mat_re[ch1][ch2] = hMd->pred_mat_re[ch1][ch2] + ( ( ch1 == ch2 ) ? 1.0f : 0.0f ); + } + } +#endif + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) { for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) @@ -263,6 +275,17 @@ static void isar_split_rend_unquant_md( } else { +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + hMd->pred_mat_re[ch1][ch2] = hMd->pred_mat_re_idx[ch1][ch2] * quantstep; + hMd->pred_mat_re[ch1][ch2] = hMd->pred_mat_re[ch1][ch2] + fix_pos_rot_mat[ch1][ch2]; + } + } +#endif + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) { for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) @@ -348,6 +371,7 @@ static void isar_splitBinPostRendMdBase2Dec( { if ( hBinHrSplitPostRend->pose_type[pos_idx] == ANY_YAW ) { +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS for ( b = 0; b < pred_real_bands_yaw; b++ ) { hMd = &hBinHrSplitPostRend->rot_md[pos_idx][sf_idx][b]; @@ -372,6 +396,40 @@ static void isar_splitBinPostRendMdBase2Dec( } } } +#else + for ( b = 0; b < pred_imag_bands_yaw; b++ ) + { + hMd = &hBinHrSplitPostRend->rot_md[pos_idx][sf_idx][b]; + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + code = (int16_t) ISAR_SPLIT_REND_BITStream_read_int32( pBits, pred_code_len ); + hMd->pred_mat_re_idx[ch1][ch2] = code + min_pred_idx; + } + } + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + code = (int16_t) ISAR_SPLIT_REND_BITStream_read_int32( pBits, pred_code_len ); + hMd->pred_mat_im_idx[ch1][ch2] = code + min_pred_idx; + } + } + } + + for ( ; b < pred_real_bands_yaw; b++ ) + { + hMd = &hBinHrSplitPostRend->rot_md[pos_idx][sf_idx][b]; + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + code = (int16_t) ISAR_SPLIT_REND_BITStream_read_int32( pBits, pred_code_len ); + hMd->pred_mat_re_idx[ch1][ch1] = code + min_pred_idx; + } + hMd->pred_mat_re_idx[0][1] = 0; + hMd->pred_mat_re_idx[1][0] = 0; + } +#endif for ( b = 0; b < d_bands_yaw; b++ ) { hMd = &hBinHrSplitPostRend->rot_md[pos_idx][sf_idx][b]; @@ -392,6 +450,7 @@ static void isar_splitBinPostRendMdBase2Dec( } else { +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS for ( b = 0; b < pred_real_bands_roll; b++ ) { hMd = &hBinHrSplitPostRend->rot_md[pos_idx][sf_idx][b]; @@ -416,6 +475,40 @@ static void isar_splitBinPostRendMdBase2Dec( } } } +#else + for ( b = 0; b < pred_imag_bands_roll; b++ ) + { + hMd = &hBinHrSplitPostRend->rot_md[pos_idx][sf_idx][b]; + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + code = (int16_t) ISAR_SPLIT_REND_BITStream_read_int32( pBits, pred_roll_code_len ); + hMd->pred_mat_re_idx[ch1][ch2] = code + min_pred_roll_idx; + } + } + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + code = (int16_t) ISAR_SPLIT_REND_BITStream_read_int32( pBits, pred_roll_code_len ); + hMd->pred_mat_im_idx[ch1][ch2] = code + min_pred_roll_idx; + } + } + } + + for ( ; b < pred_real_bands_roll; b++ ) + { + hMd = &hBinHrSplitPostRend->rot_md[pos_idx][sf_idx][b]; + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + code = (int16_t) ISAR_SPLIT_REND_BITStream_read_int32( pBits, pred_roll_code_len ); + hMd->pred_mat_re_idx[ch1][ch1] = code + min_pred_roll_idx; + } + hMd->pred_mat_re_idx[0][1] = 0; + hMd->pred_mat_re_idx[1][0] = 0; + } +#endif } } } @@ -473,6 +566,7 @@ static void isar_splitBinPostRendMdHuffDec( { if ( hBinHrSplitPostRend->pose_type[pos_idx] == ANY_YAW ) { +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS for ( b = 0; b < pred_real_bands_yaw; b++ ) { hMd = &hBinHrSplitPostRend->rot_md[pos_idx][sf_idx][b]; @@ -497,6 +591,39 @@ static void isar_splitBinPostRendMdHuffDec( } isar_SplitRenderer_getdiagdiff( sym_adj_idx, hMd->pred_mat_im_idx, -1, min_pred_idx, max_pred_idx ); } +#else + for ( b = 0; b < pred_imag_bands_yaw; b++ ) + { + hMd = &hBinHrSplitPostRend->rot_md[pos_idx][sf_idx][b]; + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + sym_adj_idx[ch1][ch2] = isar_split_rend_huffman_decode_opt( &pHuff_cfg->pred[pred_cb_idx], pBits, pHuff_cfg->pred_idx_trav[pred_cb_idx] ); + } + } + isar_SplitRenderer_getdiagdiff( sym_adj_idx, hMd->pred_mat_re_idx, 1, min_pred_idx, max_pred_idx ); + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + sym_adj_idx[ch1][ch2] = isar_split_rend_huffman_decode_opt( &pHuff_cfg->pred[pred_cb_idx], pBits, pHuff_cfg->pred_idx_trav[pred_cb_idx] ); + } + } + isar_SplitRenderer_getdiagdiff( sym_adj_idx, hMd->pred_mat_im_idx, -1, min_pred_idx, max_pred_idx ); + } + for ( ; b < pred_real_bands_yaw; b++ ) + { + hMd = &hBinHrSplitPostRend->rot_md[pos_idx][sf_idx][b]; + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + sym_adj_idx[ch1][ch1] = isar_split_rend_huffman_decode_opt( &pHuff_cfg->pred[pred_cb_idx], pBits, pHuff_cfg->pred_idx_trav[pred_cb_idx] ); + } + sym_adj_idx[1][0] = 0; + sym_adj_idx[0][1] = 0; + isar_SplitRenderer_getdiagdiff( sym_adj_idx, hMd->pred_mat_re_idx, -1, min_pred_idx, max_pred_idx ); + } +#endif for ( b = 0; b < d_bands_yaw; b++ ) { hMd = &hBinHrSplitPostRend->rot_md[pos_idx][sf_idx][b]; @@ -515,6 +642,7 @@ static void isar_splitBinPostRendMdHuffDec( } else { +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS for ( b = 0; b < pred_real_bands_roll; b++ ) { hMd = &hBinHrSplitPostRend->rot_md[pos_idx][sf_idx][b]; @@ -540,6 +668,42 @@ static void isar_splitBinPostRendMdHuffDec( } isar_SplitRenderer_getdiagdiff( sym_adj_idx, hMd->pred_mat_im_idx, -1, min_pred_roll_idx, max_pred_roll_idx ); } +#else + for ( b = 0; b < pred_imag_bands_roll; b++ ) + { + hMd = &hBinHrSplitPostRend->rot_md[pos_idx][sf_idx][b]; + + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + sym_adj_idx[ch1][ch2] = isar_split_rend_huffman_decode_opt( &pHuff_cfg->pred_roll, pBits, pHuff_cfg->pred_roll_idx_trav ); + } + } + isar_SplitRenderer_getdiagdiff( sym_adj_idx, hMd->pred_mat_re_idx, 1, min_pred_roll_idx, max_pred_roll_idx ); + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + sym_adj_idx[ch1][ch2] = isar_split_rend_huffman_decode_opt( &pHuff_cfg->pred_roll, pBits, pHuff_cfg->pred_roll_idx_trav ); + } + } + isar_SplitRenderer_getdiagdiff( sym_adj_idx, hMd->pred_mat_im_idx, -1, min_pred_roll_idx, max_pred_roll_idx ); + } + + for ( ; b < pred_real_bands_roll; b++ ) + { + hMd = &hBinHrSplitPostRend->rot_md[pos_idx][sf_idx][b]; + + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + sym_adj_idx[ch1][ch1] = isar_split_rend_huffman_decode_opt( &pHuff_cfg->pred_roll, pBits, pHuff_cfg->pred_roll_idx_trav ); + } + sym_adj_idx[1][0] = 0; + sym_adj_idx[0][1] = 0; + isar_SplitRenderer_getdiagdiff( sym_adj_idx, hMd->pred_mat_re_idx, -1, min_pred_roll_idx, max_pred_roll_idx ); + } +#endif } } } @@ -568,7 +732,11 @@ void isar_splitBinPostRendMdDec( int16_t pred_real_bands_yaw[ISAR_SPLIT_REND_NUM_QUANT_STRATS], pred_real_bands_roll[ISAR_SPLIT_REND_NUM_QUANT_STRATS]; int16_t pred_imag_bands_yaw[ISAR_SPLIT_REND_NUM_QUANT_STRATS], pred_imag_bands_roll[ISAR_SPLIT_REND_NUM_QUANT_STRATS]; int16_t d_bands_yaw[ISAR_SPLIT_REND_NUM_QUANT_STRATS], bands_pitch[ISAR_SPLIT_REND_NUM_QUANT_STRATS]; +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + int16_t num_quant_strats; +#else int16_t num_complex_bands, num_quant_strats; +#endif int32_t quant_strat_bits, is_huff_coding, quant_strat; int16_t pred_quant_pnts_yaw[ISAR_SPLIT_REND_NUM_QUANT_STRATS]; float pred_1byquantstep_yaw[ISAR_SPLIT_REND_NUM_QUANT_STRATS]; @@ -579,12 +747,30 @@ void isar_splitBinPostRendMdDec( ISAR_BIN_HR_SPLIT_REND_MD_HANDLE hMd; ISAR_SPLIT_REND_CONFIG_DATA split_rend_config; ISAR_SPLIT_REND_ROT_AXIS rot_axis; +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + int16_t ro_md_flag, num_bits, axis_code; +#endif hBinHrSplitPostRend->low_Res = 1; split_rend_config.dof = (int16_t) ISAR_SPLIT_REND_BITStream_read_int32( pBits, ISAR_SPLIT_REND_DOF_BITS ); split_rend_config.hq_mode = (int16_t) ISAR_SPLIT_REND_BITStream_read_int32( pBits, ISAR_SPLIT_REND_HQ_MODE_BITS ); + +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + num_bits = isar_renderSplitGetRot_axisNumBits( split_rend_config.dof ); + if ( num_bits > 0 ) + { + axis_code = (int16_t) ISAR_SPLIT_REND_BITStream_read_int32( pBits, (int32_t) num_bits ); + } + else + { + axis_code = 0; + } + rot_axis = isar_renderSplitGetRot_axisFromCode( split_rend_config.dof, axis_code ); + ro_md_flag = (int16_t) ISAR_SPLIT_REND_BITStream_read_int32( pBits, ISAR_SPLIT_REND_RO_FLAG_BITS ); +#else rot_axis = (ISAR_SPLIT_REND_ROT_AXIS) ISAR_SPLIT_REND_BITStream_read_int32( pBits, ISAR_SPLIT_REND_ROT_AXIS_BITS ); +#endif isar_renderSplitGetMultiBinPoseData( &split_rend_config, pMultiBinPoseData, rot_axis ); @@ -611,6 +797,21 @@ void isar_splitBinPostRendMdDec( hBinHrSplitPostRend->QuaternionsPre[sf_idx].z = (float) angle; } +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + isar_split_rend_get_quant_params( + MAX_SPLIT_REND_MD_BANDS, + pred_real_bands_yaw, + pred_imag_bands_yaw, + pred_quant_pnts_yaw, + pred_quantstep_yaw, + pred_1byquantstep_yaw, + d_bands_yaw, + bands_pitch, + pred_real_bands_roll, + pred_imag_bands_roll, + ro_md_flag, + &num_quant_strats ); +#else isar_split_rend_get_quant_params( MAX_SPLIT_REND_MD_BANDS, pred_real_bands_yaw, @@ -624,6 +825,7 @@ void isar_splitBinPostRendMdDec( pred_imag_bands_roll, &num_quant_strats, &num_complex_bands ); +#endif quant_strat_bits = (int32_t) ceilf( log2f( num_quant_strats ) ); is_huff_coding = ISAR_SPLIT_REND_BITStream_read_int32( pBits, 1 ); diff --git a/lib_isar/isar_splitRendererPre.c b/lib_isar/isar_splitRendererPre.c index 56e4b6b1e078cbdf5db3800b709cd8aee92cacaa..be899732246ca8d82b45dd2bdd8201fcbbdc8932 100644 --- a/lib_isar/isar_splitRendererPre.c +++ b/lib_isar/isar_splitRendererPre.c @@ -482,7 +482,11 @@ static void isar_split_rend_quant_md( { int16_t ch1, ch2; int16_t gd_idx_min; +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + float quant_val; +#else float sign, quant_val; +#endif if ( pose_type == PRED_ONLY || pose_type == PRED_ROLL_ONLY ) { @@ -491,6 +495,24 @@ static void isar_split_rend_quant_md( onebyquantstep = pred_1byquantstep; if ( real_only == 1 ) { +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + hMd->pred_mat_re[ch1][ch1] = hMd->pred_mat_re2[ch1]; + } + hMd->pred_mat_re[1][0] = 0.0f; + hMd->pred_mat_re[0][1] = 0.0f; + + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + quant_val = hMd->pred_mat_re[ch1][ch2] - ( ( ch1 == ch2 ) ? 1.0f : 0.0f ); + quant_val = min( ISAR_SPLIT_REND_PRED_MAX_VAL, max( quant_val, ISAR_SPLIT_REND_PRED_MIN_VAL ) ); + hMd->pred_mat_re_idx[ch1][ch2] = (int16_t) roundf( onebyquantstep * quant_val ); + } + } +#else for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) { for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) @@ -501,17 +523,24 @@ static void isar_split_rend_quant_md( hMd->pred_mat_im[ch1][ch2] = 0.0f; } } +#endif } - - for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + else { - for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) +#endif + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) { - quant_val = hMd->pred_mat_re[ch1][ch2] - fix_pos_rot_mat[ch1][ch2]; - quant_val = min( ISAR_SPLIT_REND_PRED_MAX_VAL, max( quant_val, ISAR_SPLIT_REND_PRED_MIN_VAL ) ); - hMd->pred_mat_re_idx[ch1][ch2] = (int16_t) roundf( onebyquantstep * quant_val ); + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + quant_val = hMd->pred_mat_re[ch1][ch2] - fix_pos_rot_mat[ch1][ch2]; + quant_val = min( ISAR_SPLIT_REND_PRED_MAX_VAL, max( quant_val, ISAR_SPLIT_REND_PRED_MIN_VAL ) ); + hMd->pred_mat_re_idx[ch1][ch2] = (int16_t) roundf( onebyquantstep * quant_val ); + } } +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS } +#endif if ( real_only == 0 ) { @@ -548,6 +577,28 @@ static void isar_split_rend_quant_md( return; } +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS +static void get_lr_gains( float cov_in[][BINAURAL_CHANNELS], + float cov_out[][BINAURAL_CHANNELS], + float gains[BINAURAL_CHANNELS] ) +{ + int16_t i; + for ( i = 0; i < BINAURAL_CHANNELS; i++ ) + { + gains[i] = cov_in[i][i]; + if ( gains[i] < EPSILON ) + { + gains[i] = 1.0f; + } + else + { + gains[i] = ( cov_out[i][i] ) / gains[i]; + gains[i] = sqrtf( gains[i] ); + } + } + return; +} +#endif static void ComputeCoeffs( float cov_ii_re[][BINAURAL_CHANNELS], @@ -571,7 +622,9 @@ static void ComputeCoeffs( if ( pose_type == PITCH_ONLY ) { float gd_tmp[BINAURAL_CHANNELS]; - +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + get_lr_gains( cov_ii_re, cov_oo_re, gd_tmp ); +#else for ( i = 0; i < BINAURAL_CHANNELS; i++ ) { gd_tmp[i] = cov_ii_re[i][i]; @@ -585,6 +638,7 @@ static void ComputeCoeffs( gd_tmp[i] = sqrtf( gd_tmp[i] ); } } +#endif hMd->gd = gd_tmp[0]; hMd->gd2 = gd_tmp[1]; } @@ -593,7 +647,15 @@ static void ComputeCoeffs( if ( real_only ) { float gd_tmp[BINAURAL_CHANNELS]; - +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + get_lr_gains( cov_ii_re, cov_oo_re, gd_tmp ); + for ( i = 0; i < BINAURAL_CHANNELS; i++ ) + { + hMd->pred_mat_re[i][i] = gd_tmp[i]; + hMd->pred_mat_re2[i] = gd_tmp[i]; + set_zero( hMd->pred_mat_im[i], BINAURAL_CHANNELS ); + } +#else for ( i = 0; i < BINAURAL_CHANNELS; i++ ) { gd_tmp[i] = cov_ii_re[i][i]; @@ -609,11 +671,15 @@ static void ComputeCoeffs( hMd->pred_mat_re[i][i] = gd_tmp[i]; set_zero( hMd->pred_mat_im[i], BINAURAL_CHANNELS ); } +#endif hMd->pred_mat_re[1][0] = 0.0f; hMd->pred_mat_re[0][1] = 0.0f; } else { +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + get_lr_gains( cov_ii_re, cov_oo_re, hMd->pred_mat_re2 ); +#endif cov_norm_fact = GetNormFact( cov_ii_re, cov_ii_im, cov_io_re, cov_io_im, cov_oo_re ); /* normalize the covariance */ @@ -733,8 +799,14 @@ static void get_base2_bits( pose_type = hBinHrSplitPreRend->pose_type[pose_idx]; if ( pose_type == ANY_YAW ) { +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS base2bits[q] += pred_yaw_bits[q] * pred_real_bands_yaw[q] * num_subframes * BINAURAL_CHANNELS * BINAURAL_CHANNELS; base2bits[q] += pred_yaw_bits[q] * pred_imag_bands_yaw[q] * num_subframes * BINAURAL_CHANNELS * BINAURAL_CHANNELS; +#else + base2bits[q] += pred_yaw_bits[q] * pred_real_bands_yaw[q] * num_subframes * BINAURAL_CHANNELS; + base2bits[q] += pred_yaw_bits[q] * pred_imag_bands_yaw[q] * num_subframes * BINAURAL_CHANNELS; + base2bits[q] += pred_yaw_bits[q] * pred_imag_bands_yaw[q] * num_subframes * BINAURAL_CHANNELS * BINAURAL_CHANNELS; +#endif base2bits[q] += d_gain_bits * d_bands_yaw[q] * num_subframes; } else if ( pose_type == PITCH_ONLY ) @@ -744,8 +816,14 @@ static void get_base2_bits( } else { +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS base2bits[q] += pred_roll_bits * pred_real_bands_roll[q] * num_subframes * BINAURAL_CHANNELS * BINAURAL_CHANNELS; base2bits[q] += pred_roll_bits * pred_imag_bands_roll[q] * num_subframes * BINAURAL_CHANNELS * BINAURAL_CHANNELS; +#else + base2bits[q] += pred_roll_bits * pred_real_bands_roll[q] * num_subframes * BINAURAL_CHANNELS; + base2bits[q] += pred_roll_bits * pred_imag_bands_roll[q] * num_subframes * BINAURAL_CHANNELS; + base2bits[q] += pred_roll_bits * pred_imag_bands_roll[q] * num_subframes * BINAURAL_CHANNELS * BINAURAL_CHANNELS; +#endif } } } @@ -802,6 +880,7 @@ static void isar_SplitRenderer_code_md_base2( { if ( hBinHrSplitPreRend->pose_type[pos_idx] == ANY_YAW ) { +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS for ( b = 0; b < pred_real_bands_yaw; b++ ) { hMd = &hBinHrSplitPreRend->rot_md[pos_idx][sf_idx][b]; @@ -827,6 +906,40 @@ static void isar_SplitRenderer_code_md_base2( } } } +#else + + for ( b = 0; b < pred_imag_bands_yaw; b++ ) + { + hMd = &hBinHrSplitPreRend->rot_md[pos_idx][sf_idx][b]; + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + code = hMd->pred_mat_re_idx[ch1][ch2] - min_pred_idx; + ISAR_SPLIT_REND_BITStream_write_int32( pBits, code, pred_code_len ); + } + } + + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + code = hMd->pred_mat_im_idx[ch1][ch2] - min_pred_idx; + ISAR_SPLIT_REND_BITStream_write_int32( pBits, code, pred_code_len ); + } + } + } + + for ( ; b < pred_real_bands_yaw; b++ ) + { + hMd = &hBinHrSplitPreRend->rot_md[pos_idx][sf_idx][b]; + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + code = hMd->pred_mat_re_idx[ch1][ch1] - min_pred_idx; + ISAR_SPLIT_REND_BITStream_write_int32( pBits, code, pred_code_len ); + } + } +#endif for ( b = 0; b < d_bands_yaw; b++ ) { hMd = &hBinHrSplitPreRend->rot_md[pos_idx][sf_idx][b]; @@ -848,6 +961,7 @@ static void isar_SplitRenderer_code_md_base2( } else { +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS for ( b = 0; b < pred_real_bands_roll; b++ ) { hMd = &hBinHrSplitPreRend->rot_md[pos_idx][sf_idx][b]; @@ -873,6 +987,38 @@ static void isar_SplitRenderer_code_md_base2( } } } +#else + for ( b = 0; b < pred_imag_bands_roll; b++ ) + { + hMd = &hBinHrSplitPreRend->rot_md[pos_idx][sf_idx][b]; + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + code = hMd->pred_mat_re_idx[ch1][ch2] - min_pred_roll_idx; + ISAR_SPLIT_REND_BITStream_write_int32( pBits, code, pred_roll_code_len ); + } + } + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + code = hMd->pred_mat_im_idx[ch1][ch2] - min_pred_roll_idx; + ISAR_SPLIT_REND_BITStream_write_int32( pBits, code, pred_roll_code_len ); + } + } + } + + for ( ; b < pred_real_bands_roll; b++ ) + { + hMd = &hBinHrSplitPreRend->rot_md[pos_idx][sf_idx][b]; + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + code = hMd->pred_mat_re_idx[ch1][ch1] - min_pred_roll_idx; + ISAR_SPLIT_REND_BITStream_write_int32( pBits, code, pred_roll_code_len ); + } + } +#endif } } } @@ -945,6 +1091,7 @@ static void isar_SplitRenderer_code_md_huff( { if ( hBinHrSplitPreRend->pose_type[pos_idx] == ANY_YAW ) { +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS for ( b = 0; b < pred_real_bands_yaw; b++ ) { hMd = &hBinHrSplitPreRend->rot_md[pos_idx][sf_idx][b]; @@ -973,6 +1120,42 @@ static void isar_SplitRenderer_code_md_huff( } } } +#else + for ( b = 0; b < pred_imag_bands_yaw; b++ ) + { + hMd = &hBinHrSplitPreRend->rot_md[pos_idx][sf_idx][b]; + isar_SplitRenderer_getdiagdiff( hMd->pred_mat_re_idx, sym_adj_idx, -1, min_pred_idx, max_pred_idx ); + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + isar_split_rend_huffman_encode( &pHuff_cfg->pred[pred_cb_idx], sym_adj_idx[ch1][ch2], &code, &len ); + ISAR_SPLIT_REND_BITStream_write_int32( pBits, code, len ); + } + } + + isar_SplitRenderer_getdiagdiff( hMd->pred_mat_im_idx, sym_adj_idx, 1, min_pred_idx, max_pred_idx ); + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + isar_split_rend_huffman_encode( &pHuff_cfg->pred[pred_cb_idx], sym_adj_idx[ch1][ch2], &code, &len ); + ISAR_SPLIT_REND_BITStream_write_int32( pBits, code, len ); + } + } + } + + for ( ; b < pred_real_bands_yaw; b++ ) + { + hMd = &hBinHrSplitPreRend->rot_md[pos_idx][sf_idx][b]; + isar_SplitRenderer_getdiagdiff( hMd->pred_mat_re_idx, sym_adj_idx, 1, min_pred_idx, max_pred_idx ); + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + isar_split_rend_huffman_encode( &pHuff_cfg->pred[pred_cb_idx], sym_adj_idx[ch1][ch1], &code, &len ); + ISAR_SPLIT_REND_BITStream_write_int32( pBits, code, len ); + } + } +#endif for ( b = 0; b < d_bands_yaw; b++ ) { hMd = &hBinHrSplitPreRend->rot_md[pos_idx][sf_idx][b]; @@ -994,6 +1177,7 @@ static void isar_SplitRenderer_code_md_huff( } else { +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS for ( b = 0; b < pred_real_bands_roll; b++ ) { hMd = &hBinHrSplitPreRend->rot_md[pos_idx][sf_idx][b]; @@ -1020,6 +1204,42 @@ static void isar_SplitRenderer_code_md_huff( } } } +#else + for ( b = 0; b < pred_imag_bands_roll; b++ ) + { + hMd = &hBinHrSplitPreRend->rot_md[pos_idx][sf_idx][b]; + isar_SplitRenderer_getdiagdiff( hMd->pred_mat_re_idx, sym_adj_idx, -1, min_pred_roll_idx, max_pred_roll_idx ); + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + isar_split_rend_huffman_encode( &pHuff_cfg->pred_roll, sym_adj_idx[ch1][ch2], &code, &len ); + ISAR_SPLIT_REND_BITStream_write_int32( pBits, code, len ); + } + } + + isar_SplitRenderer_getdiagdiff( hMd->pred_mat_im_idx, sym_adj_idx, 1, min_pred_roll_idx, max_pred_roll_idx ); + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + for ( ch2 = 0; ch2 < BINAURAL_CHANNELS; ch2++ ) + { + isar_split_rend_huffman_encode( &pHuff_cfg->pred_roll, sym_adj_idx[ch1][ch2], &code, &len ); + ISAR_SPLIT_REND_BITStream_write_int32( pBits, code, len ); + } + } + } + + for ( ; b < pred_real_bands_roll; b++ ) + { + hMd = &hBinHrSplitPreRend->rot_md[pos_idx][sf_idx][b]; + isar_SplitRenderer_getdiagdiff( hMd->pred_mat_re_idx, sym_adj_idx, 1, min_pred_roll_idx, max_pred_roll_idx ); + for ( ch1 = 0; ch1 < BINAURAL_CHANNELS; ch1++ ) + { + isar_split_rend_huffman_encode( &pHuff_cfg->pred_roll, sym_adj_idx[ch1][ch1], &code, &len ); + ISAR_SPLIT_REND_BITStream_write_int32( pBits, code, len ); + } + } +#endif } } } @@ -1052,9 +1272,16 @@ static void isar_SplitRenderer_quant_code( MULTI_BIN_REND_POSE_DATA *pMultiBinPoseData, ISAR_SPLIT_REND_BITS_HANDLE pBits, const int16_t low_res_pre_rend_rot, +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + const int16_t ro_md_flag, +#endif const int32_t target_md_bits ) { +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + int16_t q, num_subframes, sf_idx, pos_idx, b, num_quant_strats; +#else int16_t num_complex_bands, q, num_subframes, sf_idx, pos_idx, b, num_quant_strats; +#endif int32_t overhead_bits, quant_strat_bits, huff_bits, start_bit; int16_t pred_real_bands_yaw[ISAR_SPLIT_REND_NUM_QUANT_STRATS], pred_real_bands_roll[ISAR_SPLIT_REND_NUM_QUANT_STRATS]; int16_t pred_imag_bands_yaw[ISAR_SPLIT_REND_NUM_QUANT_STRATS], pred_imag_bands_roll[ISAR_SPLIT_REND_NUM_QUANT_STRATS]; @@ -1064,6 +1291,9 @@ static void isar_SplitRenderer_quant_code( float pred_1byquantstep_yaw[ISAR_SPLIT_REND_NUM_QUANT_STRATS]; float pred_quantstep_yaw[ISAR_SPLIT_REND_NUM_QUANT_STRATS]; ISAR_BIN_HR_SPLIT_REND_MD_HANDLE hMd; +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + int16_t rot_axis_code, num_bits; +#endif if ( low_res_pre_rend_rot ) { @@ -1078,7 +1308,16 @@ static void isar_SplitRenderer_quant_code( ISAR_SPLIT_REND_BITStream_write_int32( pBits, pMultiBinPoseData->dof, ISAR_SPLIT_REND_DOF_BITS ); ISAR_SPLIT_REND_BITStream_write_int32( pBits, pMultiBinPoseData->hq_mode, ISAR_SPLIT_REND_HQ_MODE_BITS ); +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + rot_axis_code = isar_renderSplitGetCodeFromRot_axis( pMultiBinPoseData->dof, pMultiBinPoseData->rot_axis, &num_bits ); + if ( num_bits > 0 ) + { + ISAR_SPLIT_REND_BITStream_write_int32( pBits, (int32_t) rot_axis_code, num_bits ); + } + ISAR_SPLIT_REND_BITStream_write_int32( pBits, (int32_t) ro_md_flag, ISAR_SPLIT_REND_RO_FLAG_BITS ); +#else ISAR_SPLIT_REND_BITStream_write_int32( pBits, (int32_t) pMultiBinPoseData->rot_axis, ISAR_SPLIT_REND_ROT_AXIS_BITS ); +#endif /* code ref pose*/ for ( sf_idx = 0; sf_idx < num_subframes; sf_idx++ ) @@ -1101,10 +1340,15 @@ static void isar_SplitRenderer_quant_code( ISAR_SPLIT_REND_BITStream_write_int32( pBits, angle, ISAR_SPLIT_REND_HEAD_POSE_BITS ); } +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + isar_split_rend_get_quant_params( MAX_SPLIT_REND_MD_BANDS, pred_real_bands_yaw, pred_imag_bands_yaw, + pred_quant_pnts_yaw, pred_quantstep_yaw, pred_1byquantstep_yaw, + d_bands_yaw, bands_pitch, pred_real_bands_roll, pred_imag_bands_roll, ro_md_flag, &num_quant_strats ); +#else isar_split_rend_get_quant_params( MAX_SPLIT_REND_MD_BANDS, pred_real_bands_yaw, pred_imag_bands_yaw, pred_quant_pnts_yaw, pred_quantstep_yaw, pred_1byquantstep_yaw, d_bands_yaw, bands_pitch, pred_real_bands_roll, pred_imag_bands_roll, &num_quant_strats, &num_complex_bands ); - +#endif quant_strat_bits = (int32_t) ceilf( log2f( num_quant_strats ) ); overhead_bits = pBits->bits_written - overhead_bits + quant_strat_bits + 1; /* 1 for base2 vs huff */ @@ -1392,7 +1636,11 @@ void isar_rend_CldfbSplitPreRendProcess( isar_SplitRenderer_GetRotMd( hBinHrSplitPreRend, pMultiBinPoseData, Cldfb_In_BinReal, Cldfb_In_BinImag, low_res_pre_rend_rot, ro_md_flag ); +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + isar_SplitRenderer_quant_code( hBinHrSplitPreRend, headPosition, pMultiBinPoseData, pBits, low_res_pre_rend_rot, ro_md_flag, target_md_bits ); +#else isar_SplitRenderer_quant_code( hBinHrSplitPreRend, headPosition, pMultiBinPoseData, pBits, low_res_pre_rend_rot, target_md_bits ); +#endif #ifdef SPLIT_POSE_CORRECTION_DEBUG float tmpCrendBuffer[2][L_FRAME48k], quant_val, step, minv, maxv; diff --git a/lib_isar/isar_splitRenderer_utils.c b/lib_isar/isar_splitRenderer_utils.c index 8d03e297d74ec07ee30eb31f78d1757ce28a297f..b20791f4b9f6aaf12b823a5612f8e5ccb034ec20 100644 --- a/lib_isar/isar_splitRenderer_utils.c +++ b/lib_isar/isar_splitRenderer_utils.c @@ -765,24 +765,45 @@ ivas_error isar_split_rend_validate_config( { if ( pSplitRendConfig->dof == 1 ) { +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + if ( pSplitRendConfig->splitRendBitRate < 34000 ) + { + return IVAS_ERROR( IVAS_ERR_INVALID_SPLIT_REND_CONFIG, "1DOF metadata needs atleast 34 kbps" ); + } +#else if ( pSplitRendConfig->splitRendBitRate < 50000 ) { return IVAS_ERROR( IVAS_ERR_INVALID_SPLIT_REND_CONFIG, "1DOF metadata needs atleast 50 kbps" ); } +#endif } else if ( pSplitRendConfig->dof == 2 ) { +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + if ( pSplitRendConfig->splitRendBitRate < 50000 ) + { + return IVAS_ERROR( IVAS_ERR_INVALID_SPLIT_REND_CONFIG, "2DOF metadata needs atleast 50 kbps" ); + } +#else if ( pSplitRendConfig->splitRendBitRate < 66000 ) { return IVAS_ERROR( IVAS_ERR_INVALID_SPLIT_REND_CONFIG, "2DOF metadata needs atleast 66 kbps" ); } +#endif } else if ( pSplitRendConfig->dof == 3 ) { +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + if ( pSplitRendConfig->splitRendBitRate < 82000 ) + { + return IVAS_ERROR( IVAS_ERR_INVALID_SPLIT_REND_CONFIG, "3DOF metadata needs atleast 82 kbps" ); + } +#else if ( pSplitRendConfig->splitRendBitRate < 128000 ) { return IVAS_ERROR( IVAS_ERR_INVALID_SPLIT_REND_CONFIG, "3DOF metadata needs atleast 128 kbps" ); } +#endif } } @@ -807,14 +828,23 @@ void isar_split_rend_get_quant_params( int16_t bands_pitch[ISAR_SPLIT_REND_NUM_QUANT_STRATS], int16_t pred_real_bands_roll[ISAR_SPLIT_REND_NUM_QUANT_STRATS], int16_t pred_imag_bands_roll[ISAR_SPLIT_REND_NUM_QUANT_STRATS], - int16_t *num_quant_strats, - int16_t *num_complex_bands ) +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + const int16_t ro_flag, +#endif + int16_t *num_quant_strats +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + , + int16_t *num_complex_bands +#endif +) { int16_t q; *num_quant_strats = ISAR_SPLIT_REND_NUM_QUANT_STRATS; +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS *num_complex_bands = COMPLEX_MD_BAND_THRESH_LOW; assert( *num_complex_bands <= num_md_bands ); +#endif pred_quant_pnts_yaw[0] = ISAR_SPLIT_REND_PRED_63QUANT_PNTS; pred_quantstep_yaw[0] = ISAR_SPLIT_REND_PRED63_Q_STEP; @@ -831,16 +861,40 @@ void isar_split_rend_get_quant_params( pred_real_bands_yaw[q] = num_md_bands; pred_real_bands_roll[q] = num_md_bands; } + +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS pred_imag_bands_yaw[0] = num_md_bands; pred_imag_bands_roll[0] = num_md_bands; pred_imag_bands_yaw[1] = num_md_bands; pred_imag_bands_roll[1] = num_md_bands; - for ( q = 2; q < *num_quant_strats; q++ ) { pred_imag_bands_yaw[q] = ( q < ( *num_quant_strats - 1 ) ) ? num_md_bands : *num_complex_bands; pred_imag_bands_roll[q] = *num_complex_bands; } +#else + if ( ro_flag ) + { + for ( q = 0; q < *num_quant_strats; q++ ) + { + pred_imag_bands_yaw[q] = SPLIT_REND_RO_MD_BAND_THRESH; + } + } + else + { + for ( q = 0; q < *num_quant_strats - 2; q++ ) + { + pred_imag_bands_yaw[q] = num_md_bands; + } + pred_imag_bands_yaw[( *num_quant_strats - 2 )] = COMPLEX_MD_BAND_THRESH_HIGH; + pred_imag_bands_yaw[( *num_quant_strats - 1 )] = COMPLEX_MD_BAND_THRESH_LOW; + } + + for ( q = 0; q < *num_quant_strats; q++ ) + { + pred_imag_bands_roll[q] = SPLIT_REND_RO_MD_BAND_THRESH; + } +#endif for ( q = 0; q < *num_quant_strats; q++ ) { @@ -851,6 +905,99 @@ void isar_split_rend_get_quant_params( return; } +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS +/*------------------------------------------------------------------------- + * Function isar_renderSplitGetRot_axisNumBits() + * + * + *------------------------------------------------------------------------*/ + +int16_t isar_renderSplitGetRot_axisNumBits( + const int16_t dof ) +{ + int16_t num_bits; + if ( dof < 3 ) + { + num_bits = 2; + } + else + { + num_bits = 0; + } + return num_bits; +} + +/*------------------------------------------------------------------------- + * Function isar_renderSplitGetRot_axisFromCode() + * + * + *------------------------------------------------------------------------*/ + +ISAR_SPLIT_REND_ROT_AXIS isar_renderSplitGetRot_axisFromCode( + const int16_t dof, + const int16_t code ) +{ + ISAR_SPLIT_REND_ROT_AXIS rot_axis; + + if ( dof == 1 ) + { + rot_axis = (ISAR_SPLIT_REND_ROT_AXIS) code; + } + else if ( dof == 2 ) + { + if ( code == 0 ) + { + rot_axis = (ISAR_SPLIT_REND_ROT_AXIS) code; + } + else + { + rot_axis = (ISAR_SPLIT_REND_ROT_AXIS) ( code - 1 ) + YAW_PITCH; + } + } + else + { + rot_axis = (ISAR_SPLIT_REND_ROT_AXIS) DEFAULT_AXIS; + } + + return rot_axis; +} + +/*------------------------------------------------------------------------- + * Function isar_renderSplitGetCodeFromRot_axis() + * + * + *------------------------------------------------------------------------*/ + +int16_t isar_renderSplitGetCodeFromRot_axis( + const int16_t dof, + const ISAR_SPLIT_REND_ROT_AXIS rot_axis, + int16_t *num_bits ) +{ + int16_t code = 0; + if ( dof == 1 ) + { + code = (int16_t) rot_axis; + } + else if ( dof == 2 ) + { + if ( rot_axis == DEFAULT_AXIS ) + { + code = (int16_t) rot_axis; + } + else + { + code = (int16_t) ( rot_axis - YAW_PITCH ) + 1; + } + } + else + { + code = (int16_t) DEFAULT_AXIS; + } + *num_bits = isar_renderSplitGetRot_axisNumBits( dof ); + + return code; +} +#endif /*------------------------------------------------------------------------- * Function isar_renderSplitGetMultiBinPoseData() @@ -916,15 +1063,19 @@ void isar_renderSplitGetMultiBinPoseData( switch ( rot_axis ) { case DEFAULT_AXIS: +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS case YAW: case PITCH: +#endif case YAW_PITCH: { num_yaw_poses = SPLIT_REND_MAX_YAW_ONLY_POSES; num_pitch_poses = SPLIT_REND_MAX_PITCH_ONLY_POSES; break; } +#ifndef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS case ROLL: +#endif case YAW_ROLL: { num_yaw_poses = SPLIT_REND_MAX_YAW_ONLY_POSES; diff --git a/lib_isar/isar_stat.h b/lib_isar/isar_stat.h index e43ac88cceb905e8c249a648cc27c7f6a07697d3..627099d90afd3b5db987630b4953028ec529f486 100644 --- a/lib_isar/isar_stat.h +++ b/lib_isar/isar_stat.h @@ -113,6 +113,9 @@ typedef struct isar_binaural_head_rot_split_rendering_md_struct { float pred_mat_re[BINAURAL_CHANNELS][BINAURAL_CHANNELS]; float pred_mat_im[BINAURAL_CHANNELS][BINAURAL_CHANNELS]; +#ifdef SPLIT_REND_POSE_CORRECTION_UNUSED_BITS + float pred_mat_re2[BINAURAL_CHANNELS]; +#endif float gd; float gd2; int16_t pred_mat_re_idx[BINAURAL_CHANNELS][BINAURAL_CHANNELS];