Commit f95f1097 authored by Archit Tamarapu's avatar Archit Tamarapu
Browse files

Merge branch '932-remove-obsolete-python-script-generate_test_items-py' into 'main'

Resolve "Remove obsolete python script `generate_test_items.py`"

See merge request !1277
parents 2c21b46c 00fd3ee6
Loading
Loading
Loading
Loading
Loading

scripts/generate_test_items.py

deleted100755 → 0
+0 −254
Original line number Diff line number Diff line
#!/usr/bin/env python3

"""
   (C) 2022-2023 IVAS codec Public Collaboration with portions copyright Dolby International AB, Ericsson AB,
   Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V., Huawei Technologies Co. LTD.,
   Koninklijke Philips N.V., Nippon Telegraph and Telephone Corporation, Nokia Technologies Oy, Orange,
   Panasonic Holdings Corporation, Qualcomm Technologies, Inc., VoiceAge Corporation, and other
   contributors to this repository. All Rights Reserved.

   This software is protected by copyright law and by international treaties.
   The IVAS codec Public Collaboration consisting of Dolby International AB, Ericsson AB,
   Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V., Huawei Technologies Co. LTD.,
   Koninklijke Philips N.V., Nippon Telegraph and Telephone Corporation, Nokia Technologies Oy, Orange,
   Panasonic Holdings Corporation, Qualcomm Technologies, Inc., VoiceAge Corporation, and other
   contributors to this repository retain full ownership rights in their respective contributions in
   the software. This notice grants no license of any kind, including but not limited to patent
   license, nor is any license granted by implication, estoppel or otherwise.

   Contributors are required to enter into the IVAS codec Public Collaboration agreement before making
   contributions.

   This software is provided "AS IS", without any express or implied warranties. The software is in the
   development stage. It is intended exclusively for experts who have experience with such software and
   solely for the purpose of inspection. All implied warranties of non-infringement, merchantability
   and fitness for a particular purpose are hereby disclaimed and excluded.

   Any dispute, controversy or claim arising under or in relation to providing this software shall be
   submitted to and settled by the final, binding jurisdiction of the courts of Munich, Germany in
   accordance with the laws of the Federal Republic of Germany excluding its conflict of law rules and
   the United Nations Convention on Contracts on the International Sales of Goods.
"""

import argparse
import logging
import multiprocessing as mp
import os
from typing import Iterable

from pyaudio3dtools import audiofile
from pyprocessing import processing, processing_configs, utils

# Global logging options
logger = logging.getLogger(__name__)
LOGGER_MAIN_LOG_FILENAME = "log.txt"
LOGGER_PROC_ITEM_SUFFIX = "_log.txt"
LOGGER_FORMAT = "%(asctime)s | %(name)-12s | %(levelname)-8s | %(message)s"
LOGGER_DATEFMT = "%m-%d %H:%M"


def _run_condition_and_item(
    out_folder: str, tmp_folder: str, item: str, proc_chain: Iterable
) -> None:
    item_name, _ = os.path.splitext(os.path.basename(item))
    out_item = os.path.join(out_folder, item_name + ".wav")

    # Create a logfile for the item
    fh = logging.FileHandler(
        os.path.join(tmp_folder, item_name + LOGGER_PROC_ITEM_SUFFIX), mode="w"
    )
    fh.setLevel(logging.DEBUG)
    formatter = logging.Formatter(LOGGER_FORMAT, datefmt=LOGGER_DATEFMT)
    fh.setFormatter(formatter)
    logger.addHandler(fh)

    processing.process_chain(proc_chain, item, out_item, tmp_folder)

    # Log after completion
    logger.removeHandler(fh)
    output_nickname = "Done " + os.path.join(
        os.path.basename(out_folder), item_name + ".wav"
    )
    logger.info(f"{output_nickname:-^100.100}")


def main(test_cfg):
    # Create pool if multiprocessing is enabled
    if test_cfg.enable_multiprocessing:
        pool = mp.Pool(mp.cpu_count())
        results = []

    # pre-process items if required
    if test_cfg.preproc_input:
        condition = test_cfg.list_of_conditions[0]
        out_folder = test_cfg.output_folders[0]
        tmp_folder = test_cfg.tmp_folders[0]

        proc_id = condition["id"]
        logger.info("  Condition: " + proc_id)

        proc_chain = condition["proc_chain"]

        for item in test_cfg.items_list:
            logger.info("    Item: " + item)
            if test_cfg.enable_multiprocessing:
                results.append(
                    pool.apply_async(
                        _run_condition_and_item,
                        args=(out_folder, tmp_folder, item, proc_chain),
                    )
                )
            else:
                _run_condition_and_item(out_folder, tmp_folder, item, proc_chain)

        if test_cfg.enable_multiprocessing:
            pool.close()
            pool.join()
            for r in results:
                r.get()
            pool = mp.Pool(mp.cpu_count())
            results = []

        test_cfg.items_list = utils.list_audio(out_folder, absolute=True)

        test_cfg.list_of_conditions = test_cfg.list_of_conditions[1:]
        test_cfg.output_folders = test_cfg.output_folders[1:]
        test_cfg.tmp_folders = test_cfg.tmp_folders[1:]

    # concatenate items if required
    if test_cfg.concatenate_input:
        if len(test_cfg.items_list) > 1:
            concat_filename = os.path.join(
                test_cfg.output_path,
                os.path.basename(test_cfg.input_path) + "_concat.wav",
            )
            audiofile.concatenatefiles(
                test_cfg.items_list,
                concat_filename,
                test_cfg.concat_silence_pre,
                test_cfg.concat_silence_post,
                test_cfg.in_fs,
            )
        # simply pad if single item
        else:
            concat_filename = os.path.join(
                test_cfg.output_path,
                os.path.splitext(os.path.basename(test_cfg.input_path))[0]
                + "_padded.wav",
            )
            audiofile.concatenatefiles(
                test_cfg.items_list,
                concat_filename,
                test_cfg.concat_silence_pre,
                test_cfg.concat_silence_post,
                test_cfg.in_fs,
            )
        test_cfg.items_list = [concat_filename]

    for condition, out_folder, tmp_folder in zip(
        test_cfg.list_of_conditions, test_cfg.output_folders, test_cfg.tmp_folders
    ):
        proc_id = condition["id"]
        logger.info(f"  Condition: {proc_id} in {out_folder}")

        proc_chain = condition["proc_chain"]

        for item in test_cfg.items_list:
            logger.info(f"    Item: {item}")
            if test_cfg.enable_multiprocessing:
                results.append(
                    pool.apply_async(
                        _run_condition_and_item,
                        args=(out_folder, tmp_folder, item, proc_chain),
                    )
                )
            else:
                _run_condition_and_item(out_folder, tmp_folder, item, proc_chain)

    if test_cfg.enable_multiprocessing:
        pool.close()
        pool.join()
        for r in results:
            r.get()

    # copy over JSON to main output directory
    output_json = os.path.join(test_cfg.output_path, test_cfg.name + ".json")
    with open(output_json, "w") as fp:
        fp.write(test_cfg.json_out)

    # remove concatenated file
    if (
        test_cfg.delete_tmp
        and test_cfg.concatenate_input
        and os.path.exists(concat_filename)
    ):
        os.remove(concat_filename)


if __name__ == "__main__":
    # Parse command line
    parser = argparse.ArgumentParser(
        description="Generate test items. Refer to README.md for detailed usage instructions."
    )
    parser.add_argument(
        "-i",
        "--infile",
        required=True,
        nargs="+",
        help="Configuration file(s): FILE1.json FILE2.json ...",
    )
    args = parser.parse_args()

    # Get all test configuration files to process
    infile = args.infile
    tests_list = []
    for infile in args.infile:
        if os.path.isdir(infile):
            tests_list.extend(
                [
                    os.path.join(infile, f)
                    for f in os.listdir(infile)
                    if f.endswith((".json"))
                ]
            )
        else:
            tests_list.append(infile)

    # Read configuration file
    for test in tests_list:
        test_cfg = processing_configs.test_config(test)

        # context manager to create output folders and clean up temporary folders
        delete_folders = test_cfg.tmp_folders if test_cfg.delete_tmp else []
        with utils.DirManager(
            test_cfg.output_folders + test_cfg.tmp_folders, delete_folders
        ):

            # Set up logging handlers
            console_handler = logging.StreamHandler()
            console_handler.setLevel(logging.INFO)
            console_handler.setFormatter(logging.Formatter("%(message)s"))

            file_handler = logging.FileHandler(
                os.path.join(test_cfg.output_path, LOGGER_MAIN_LOG_FILENAME), mode="w"
            )
            file_handler.setLevel(logging.INFO)
            file_handler.setFormatter(
                logging.Formatter(LOGGER_FORMAT, datefmt=LOGGER_DATEFMT)
            )

            # Configure loggers
            logging.basicConfig(
                format=LOGGER_FORMAT,
                datefmt=LOGGER_DATEFMT,
                level=logging.INFO,
                handlers=[console_handler, file_handler],
            )

            # Log some info
            logger.info(f"===Generate test: {test}===")
            logger.info(f"Test name: {test_cfg.name}")
            logger.info(f"Input path: {test_cfg.input_path}")
            logger.info(f"Output path: {test_cfg.output_path}")

            main(test_cfg)