Loading ivas_processing_scripts/generation/generate_omasa_items.py +4 −13 Original line number Diff line number Diff line Loading @@ -33,7 +33,6 @@ import logging import sys from itertools import groupby, repeat from math import floor from pathlib import Path import numpy as np Loading Loading @@ -245,14 +244,6 @@ def generate_OMASA_scene( else: source_shift = int(np.ceil(source_shift / frame_len) * frame_len) # read the level if "level" in scene.keys(): level = ( scene["level"][i] if isinstance(scene["level"], list) else scene["level"] ) else: level = -26 # read the level if "level" in scene.keys(): level = ( Loading Loading @@ -313,7 +304,7 @@ def generate_OMASA_scene( # get the number of frames (multiple of 20ms) N_frames = int(len(x.audio) / frame_len) # convert input audio source signal to MASA or ISM # convert the input audio source signal to MASA or ISM if fmt in ["FOA", "HOA2", "HOA3"]: # convert FOA/HOA2/HOA3 to MASA x_masa = audio.MetadataAssistedSpatialAudio( Loading Loading @@ -410,7 +401,7 @@ def generate_OMASA_scene( # if MASA, append metadata file to the OMASA object y.metadata_files.append(x.metadata_file) else: # if ISM, append metadata file to the OMASA object # if ISM, append object position to the OMASA object y.object_pos = x.object_pos.copy() # if source_shift < 0: Loading Loading @@ -465,7 +456,7 @@ def generate_OMASA_scene( if len(y.audio) != duration: metadata.trim_meta(y, limits=[0, len(y.audio) - duration], samples=True) # write the ISMn output to .wav file in an interleaved format and ISM metadata in .csv files # write the OMASA audio output to .wav file in an interleaved format and ISM metadata in .csv files audiofile.write(output_filename, y.audio, y.fs) metadata.write_ISM_metadata_in_file(y.object_pos, y.metadata_files[:-1]) Loading Loading
ivas_processing_scripts/generation/generate_omasa_items.py +4 −13 Original line number Diff line number Diff line Loading @@ -33,7 +33,6 @@ import logging import sys from itertools import groupby, repeat from math import floor from pathlib import Path import numpy as np Loading Loading @@ -245,14 +244,6 @@ def generate_OMASA_scene( else: source_shift = int(np.ceil(source_shift / frame_len) * frame_len) # read the level if "level" in scene.keys(): level = ( scene["level"][i] if isinstance(scene["level"], list) else scene["level"] ) else: level = -26 # read the level if "level" in scene.keys(): level = ( Loading Loading @@ -313,7 +304,7 @@ def generate_OMASA_scene( # get the number of frames (multiple of 20ms) N_frames = int(len(x.audio) / frame_len) # convert input audio source signal to MASA or ISM # convert the input audio source signal to MASA or ISM if fmt in ["FOA", "HOA2", "HOA3"]: # convert FOA/HOA2/HOA3 to MASA x_masa = audio.MetadataAssistedSpatialAudio( Loading Loading @@ -410,7 +401,7 @@ def generate_OMASA_scene( # if MASA, append metadata file to the OMASA object y.metadata_files.append(x.metadata_file) else: # if ISM, append metadata file to the OMASA object # if ISM, append object position to the OMASA object y.object_pos = x.object_pos.copy() # if source_shift < 0: Loading Loading @@ -465,7 +456,7 @@ def generate_OMASA_scene( if len(y.audio) != duration: metadata.trim_meta(y, limits=[0, len(y.audio) - duration], samples=True) # write the ISMn output to .wav file in an interleaved format and ISM metadata in .csv files # write the OMASA audio output to .wav file in an interleaved format and ISM metadata in .csv files audiofile.write(output_filename, y.audio, y.fs) metadata.write_ISM_metadata_in_file(y.object_pos, y.metadata_files[:-1]) Loading