Commit 098c27a8 authored by Dominik Weckbecker's avatar Dominik Weckbecker 💬
Browse files

Merge branch 'main' into 930-missing-test-case-for-osba-with-planar-foa-hoa-input

parents 5ba6d79f db360790
Loading
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -336,7 +336,11 @@ static const CmdLnParser_Option cliOptions[] = {
      .description = "LFE panning matrix. File (CSV table) containing a matrix of dimensions [ num_input_lfe x num_output_channels ] with elements specifying linear routing gain (like --gain, -g). \nIf specified, overrides the output LFE position option and the default behavior which attempts to map input to output LFE channel(s)" },
    {
        .id = CmdLnOptionId_noDelayCmp,
#ifdef FIX_929_RENDERER_CMDL
        .match = "no_delay_compensation",
#else
        .match = "no_delay_comparison",
#endif
        .matchShort = "no_delay_cmp",
        .description = "[flag] Turn off delay compensation",
    },
+2 −0
Original line number Diff line number Diff line
@@ -160,6 +160,7 @@
#define FIX_SPLITREND_WARNINGS                          /* FhG: fix warnings related to split rendering observed in build jobs */
#define FIX_923_EXTERNAL_REND_COMMAND_LINE              /* VA: issue 923: enable external renderer command-line options in UPPER case letters */
#define FIX_921_OMASA_DELAY_PRINTOUT                    /* VA: issue 921: correct OMASA decoder delay printout */
#define FIX_929_RENDERER_CMDL                           /* Nokia: issue #929: renderer command line option */

/* #################### End BE switches ################################## */

@@ -184,6 +185,7 @@
#define NONBE_FIX_855_JBM_FLUSH_OFFSET                        /* FhG: issue #855: add missing switch here for the code in JBM flushing                 */
#define NONBE_FIX_926_OSBA_DECODER_CRASH_PLANAR_SBA           /* FhG: issue 926: crash in OSBA decoding with planar FOA */
#define NONBE_FIX_908_OSBA_BR_SWITCHING_CRASH                 /* FhG: issue 908: fix crash in OSBA BR switching with long test vectors */
#define NONBE_FIX_BINAURAL_ROOM_IR_REVERBERATOR               /* FhG: re-enable acidentially disabled reverberator for BINAURAL_ROOM_IR */

/* ##################### End NON-BE switches ########################### */

+4 −0
Original line number Diff line number Diff line
@@ -1229,7 +1229,11 @@ ivas_error ivas_binRenderer_open(
    }

    /* Allocate memories needed for reverb module */
#ifdef NONBE_FIX_BINAURAL_ROOM_IR_REVERBERATOR
    if ( st_ivas->renderer_type == RENDERER_BINAURAL_FASTCONV_ROOM || ( st_ivas->renderer_type == RENDERER_BINAURAL_FASTCONV && st_ivas->hOutSetup.output_config == IVAS_AUDIO_CONFIG_BINAURAL_ROOM_REVERB ) )
#else
    if ( st_ivas->renderer_type == RENDERER_BINAURAL_FASTCONV && st_ivas->hIntSetup.output_config == IVAS_AUDIO_CONFIG_BINAURAL_ROOM_REVERB )
#endif
    {
        if ( ( error = ivas_binaural_reverb_open_fastconv( &( hBinRenderer->hReverb ), hBinRenderer->conv_band, hBinRenderer->timeSlots, &( st_ivas->hRenderConfig->roomAcoustics ), st_ivas->hIntSetup.output_config, st_ivas->hDecoderConfig->output_Fs, st_ivas->hHrtfFastConv ) ) != IVAS_ERR_OK )
        {

scripts/generate_test_items.py

deleted100755 → 0
+0 −254
Original line number Diff line number Diff line
#!/usr/bin/env python3

"""
   (C) 2022-2023 IVAS codec Public Collaboration with portions copyright Dolby International AB, Ericsson AB,
   Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V., Huawei Technologies Co. LTD.,
   Koninklijke Philips N.V., Nippon Telegraph and Telephone Corporation, Nokia Technologies Oy, Orange,
   Panasonic Holdings Corporation, Qualcomm Technologies, Inc., VoiceAge Corporation, and other
   contributors to this repository. All Rights Reserved.

   This software is protected by copyright law and by international treaties.
   The IVAS codec Public Collaboration consisting of Dolby International AB, Ericsson AB,
   Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V., Huawei Technologies Co. LTD.,
   Koninklijke Philips N.V., Nippon Telegraph and Telephone Corporation, Nokia Technologies Oy, Orange,
   Panasonic Holdings Corporation, Qualcomm Technologies, Inc., VoiceAge Corporation, and other
   contributors to this repository retain full ownership rights in their respective contributions in
   the software. This notice grants no license of any kind, including but not limited to patent
   license, nor is any license granted by implication, estoppel or otherwise.

   Contributors are required to enter into the IVAS codec Public Collaboration agreement before making
   contributions.

   This software is provided "AS IS", without any express or implied warranties. The software is in the
   development stage. It is intended exclusively for experts who have experience with such software and
   solely for the purpose of inspection. All implied warranties of non-infringement, merchantability
   and fitness for a particular purpose are hereby disclaimed and excluded.

   Any dispute, controversy or claim arising under or in relation to providing this software shall be
   submitted to and settled by the final, binding jurisdiction of the courts of Munich, Germany in
   accordance with the laws of the Federal Republic of Germany excluding its conflict of law rules and
   the United Nations Convention on Contracts on the International Sales of Goods.
"""

import argparse
import logging
import multiprocessing as mp
import os
from typing import Iterable

from pyaudio3dtools import audiofile
from pyprocessing import processing, processing_configs, utils

# Global logging options
logger = logging.getLogger(__name__)
LOGGER_MAIN_LOG_FILENAME = "log.txt"
LOGGER_PROC_ITEM_SUFFIX = "_log.txt"
LOGGER_FORMAT = "%(asctime)s | %(name)-12s | %(levelname)-8s | %(message)s"
LOGGER_DATEFMT = "%m-%d %H:%M"


def _run_condition_and_item(
    out_folder: str, tmp_folder: str, item: str, proc_chain: Iterable
) -> None:
    item_name, _ = os.path.splitext(os.path.basename(item))
    out_item = os.path.join(out_folder, item_name + ".wav")

    # Create a logfile for the item
    fh = logging.FileHandler(
        os.path.join(tmp_folder, item_name + LOGGER_PROC_ITEM_SUFFIX), mode="w"
    )
    fh.setLevel(logging.DEBUG)
    formatter = logging.Formatter(LOGGER_FORMAT, datefmt=LOGGER_DATEFMT)
    fh.setFormatter(formatter)
    logger.addHandler(fh)

    processing.process_chain(proc_chain, item, out_item, tmp_folder)

    # Log after completion
    logger.removeHandler(fh)
    output_nickname = "Done " + os.path.join(
        os.path.basename(out_folder), item_name + ".wav"
    )
    logger.info(f"{output_nickname:-^100.100}")


def main(test_cfg):
    # Create pool if multiprocessing is enabled
    if test_cfg.enable_multiprocessing:
        pool = mp.Pool(mp.cpu_count())
        results = []

    # pre-process items if required
    if test_cfg.preproc_input:
        condition = test_cfg.list_of_conditions[0]
        out_folder = test_cfg.output_folders[0]
        tmp_folder = test_cfg.tmp_folders[0]

        proc_id = condition["id"]
        logger.info("  Condition: " + proc_id)

        proc_chain = condition["proc_chain"]

        for item in test_cfg.items_list:
            logger.info("    Item: " + item)
            if test_cfg.enable_multiprocessing:
                results.append(
                    pool.apply_async(
                        _run_condition_and_item,
                        args=(out_folder, tmp_folder, item, proc_chain),
                    )
                )
            else:
                _run_condition_and_item(out_folder, tmp_folder, item, proc_chain)

        if test_cfg.enable_multiprocessing:
            pool.close()
            pool.join()
            for r in results:
                r.get()
            pool = mp.Pool(mp.cpu_count())
            results = []

        test_cfg.items_list = utils.list_audio(out_folder, absolute=True)

        test_cfg.list_of_conditions = test_cfg.list_of_conditions[1:]
        test_cfg.output_folders = test_cfg.output_folders[1:]
        test_cfg.tmp_folders = test_cfg.tmp_folders[1:]

    # concatenate items if required
    if test_cfg.concatenate_input:
        if len(test_cfg.items_list) > 1:
            concat_filename = os.path.join(
                test_cfg.output_path,
                os.path.basename(test_cfg.input_path) + "_concat.wav",
            )
            audiofile.concatenatefiles(
                test_cfg.items_list,
                concat_filename,
                test_cfg.concat_silence_pre,
                test_cfg.concat_silence_post,
                test_cfg.in_fs,
            )
        # simply pad if single item
        else:
            concat_filename = os.path.join(
                test_cfg.output_path,
                os.path.splitext(os.path.basename(test_cfg.input_path))[0]
                + "_padded.wav",
            )
            audiofile.concatenatefiles(
                test_cfg.items_list,
                concat_filename,
                test_cfg.concat_silence_pre,
                test_cfg.concat_silence_post,
                test_cfg.in_fs,
            )
        test_cfg.items_list = [concat_filename]

    for condition, out_folder, tmp_folder in zip(
        test_cfg.list_of_conditions, test_cfg.output_folders, test_cfg.tmp_folders
    ):
        proc_id = condition["id"]
        logger.info(f"  Condition: {proc_id} in {out_folder}")

        proc_chain = condition["proc_chain"]

        for item in test_cfg.items_list:
            logger.info(f"    Item: {item}")
            if test_cfg.enable_multiprocessing:
                results.append(
                    pool.apply_async(
                        _run_condition_and_item,
                        args=(out_folder, tmp_folder, item, proc_chain),
                    )
                )
            else:
                _run_condition_and_item(out_folder, tmp_folder, item, proc_chain)

    if test_cfg.enable_multiprocessing:
        pool.close()
        pool.join()
        for r in results:
            r.get()

    # copy over JSON to main output directory
    output_json = os.path.join(test_cfg.output_path, test_cfg.name + ".json")
    with open(output_json, "w") as fp:
        fp.write(test_cfg.json_out)

    # remove concatenated file
    if (
        test_cfg.delete_tmp
        and test_cfg.concatenate_input
        and os.path.exists(concat_filename)
    ):
        os.remove(concat_filename)


if __name__ == "__main__":
    # Parse command line
    parser = argparse.ArgumentParser(
        description="Generate test items. Refer to README.md for detailed usage instructions."
    )
    parser.add_argument(
        "-i",
        "--infile",
        required=True,
        nargs="+",
        help="Configuration file(s): FILE1.json FILE2.json ...",
    )
    args = parser.parse_args()

    # Get all test configuration files to process
    infile = args.infile
    tests_list = []
    for infile in args.infile:
        if os.path.isdir(infile):
            tests_list.extend(
                [
                    os.path.join(infile, f)
                    for f in os.listdir(infile)
                    if f.endswith((".json"))
                ]
            )
        else:
            tests_list.append(infile)

    # Read configuration file
    for test in tests_list:
        test_cfg = processing_configs.test_config(test)

        # context manager to create output folders and clean up temporary folders
        delete_folders = test_cfg.tmp_folders if test_cfg.delete_tmp else []
        with utils.DirManager(
            test_cfg.output_folders + test_cfg.tmp_folders, delete_folders
        ):

            # Set up logging handlers
            console_handler = logging.StreamHandler()
            console_handler.setLevel(logging.INFO)
            console_handler.setFormatter(logging.Formatter("%(message)s"))

            file_handler = logging.FileHandler(
                os.path.join(test_cfg.output_path, LOGGER_MAIN_LOG_FILENAME), mode="w"
            )
            file_handler.setLevel(logging.INFO)
            file_handler.setFormatter(
                logging.Formatter(LOGGER_FORMAT, datefmt=LOGGER_DATEFMT)
            )

            # Configure loggers
            logging.basicConfig(
                format=LOGGER_FORMAT,
                datefmt=LOGGER_DATEFMT,
                level=logging.INFO,
                handlers=[console_handler, file_handler],
            )

            # Log some info
            logger.info(f"===Generate test: {test}===")
            logger.info(f"Test name: {test_cfg.name}")
            logger.info(f"Input path: {test_cfg.input_path}")
            logger.info(f"Output path: {test_cfg.output_path}")

            main(test_cfg)