Commit 924dfb4d authored by Jan Kiene's avatar Jan Kiene
Browse files

adapt renderer_short usage in other scripts

parent 8c999f15
Loading
Loading
Loading
Loading
+6 −10
Original line number Diff line number Diff line
@@ -42,7 +42,6 @@ It might be required to set Clang-18 as the default clang on the machine
  sudo apt install python3.13 python3.13-venv
  ```


## Run CUT tests on Target platform

Running the conformance tests requires around 30 gb of disk space and around 6 gb of RAM space.
@@ -105,7 +104,6 @@ CUT_OUTPUTS

If CUT test execution is done on a different platform, the scripts/CUT_OUTPUTS must be copied and provided in the reference platform's scripts/CUT_OUTPUTS. Then the BE analysis or non-BE analysis procedure below should be followed. It is recommended to perform first analysis with BE comparison and then analysis with non-BE comparison if non-BE outputs were found. Note, non-BE conformance applies currently only if metadata file output is BE and there are non-BE results only in wave-file output.


### Perform the BE comparison on the CUT outputs on reference platform

The BE comparison is performed to the CUT outputs using the command below. Encoded outputs will be decoded using the reference decoder executables as part of the process. The BE comparison is then performed between the CUT and reference decoded outputs. This includes comparison of ".wav"-files, and ".csv" and ".met" metadata files. If any non-BE results are observed, this is reported on the command-line and link to an analysis ".csv" file is given. The analysis file shows which exact files were non-BE. An example passing output is shown below. If all test sets print `PASSED BE TEST`, then CUT outputs are BE-conformant.
@@ -142,7 +140,6 @@ Analysing tests for ISAR (1252 tests)
</code></pre>
</details>


### Perform the MLD based non-BE analysis on the CUT outputs on reference platform (Ubuntu 24.04)

The MLD-based non-BE analysis is performed to the CUT outputs with the command below. Encoded outputs will be decoded using the reference decoder executables as part of the process. The MLD analysis is then performed between the CUT and reference decoded outputs (only ".wav" files are compared). Comparison to MLD corridor is also done as part of this process. An example passing output is shown below. If all test sets print `MLD Corridor passed for...` and there were no non-BE metadata comparisons in BE-test, then CUT outputs are Non-BE conformant.
@@ -244,7 +241,6 @@ MLD Corridor passed for ISAR with max MLD diff of 0.0
</code></pre>
</details>


## Executing specific tests only

All CUT tests can be run specifically for IVAS Encoder,IVAS Decoder,IVAS Renderer, ISAR Encoder and ISAR Decoder only. The commandline allows for ```-test-mode=<PARAM>``` for this functionality, examples:
+5 −5
Original line number Diff line number Diff line
@@ -8,7 +8,7 @@ cp IVAS_cod IVAS_cod_ref
cp IVAS_dec IVAS_dec_ref
cp IVAS_rend IVAS_rend_ref
cp ISAR_post_rend ISAR_post_rend_ref
python3 -m pytest -q tests/codec_be_on_mr_nonselection tests/renderer_short/test_renderer.py tests/split_rendering/test_split_rendering.py -v -n auto --update_ref 1 --create_ref --keep_files --html=report_cmd.html --self-contained-html
python3 -m pytest -q tests/codec_be_on_mr_nonselection tests/renderer/test_renderer_short.py tests/split_rendering/test_split_rendering.py -v -n auto --update_ref 1 --create_ref --keep_files --html=report_cmd.html --self-contained-html
python3 scripts/parse_commands.py report_cmd.html Readme_IVAS.txt
rm -rf testvec
mkdir testvec
@@ -24,7 +24,7 @@ cp -r scripts/switchPaths testvec
cp -r scripts/trajectories testvec
cp -r scripts/binauralRenderer_interface/binaural_renderers_hrtf_data testvec/binauralRenderer_interface
cp -r tests/ref testvec/testv/ref
cp -r tests/renderer_short/ref testvec/testv/renderer_short/ref
cp -r tests/renderer/ref testvec/testv/renderer_short/ref
cp -r tests/split_rendering/ref testvec/testv/split_rendering/ref
cp -r tests/split_rendering/renderer_configs testvec/testv/split_rendering/renderer_configs
cp -r tests/split_rendering/error_patterns testvec/testv/split_rendering/error_patterns
+118 −44
Original line number Diff line number Diff line
@@ -211,15 +211,16 @@ def validate_build_binaries(parser, build_path: str, build_label: str) -> None:
    for tag, binary in IVAS_Bins.items():
        candidate = os.path.join(abs_build_path, binary)
        candidate_exe = f"{candidate}.exe"
        exists = os.path.isfile(candidate) or (is_windows and os.path.isfile(candidate_exe))
        exists = os.path.isfile(candidate) or (
            is_windows and os.path.isfile(candidate_exe)
        )
        if not exists:
            shown = candidate_exe if is_windows else candidate
            missing.append(f"{tag}: {shown}")

    if missing:
        parser.error(
            f"Missing {build_label} binaries:\n  - " + "\n  - ".join(missing)
        )
        parser.error(f"Missing {build_label} binaries:\n  - " + "\n  - ".join(missing))


ReferenceMldFiles = {
    "ENC": "mld_ref_ENC.csv",
@@ -253,7 +254,9 @@ class MLDConformance:
        with open(self.failedCmdsFile, "r") as f:
            return sum(1 for line in f if line.strip())

    def appendRunlog(self, command: str = "", output: str = "", context: str = "") -> None:
    def appendRunlog(
        self, command: str = "", output: str = "", context: str = ""
    ) -> None:
        if not getattr(self, "logFile", None):
            return
        with open(self.logFile, "a") as fd:
@@ -266,7 +269,9 @@ class MLDConformance:
                if not output.endswith("\n"):
                    fd.write("\n")

    def appendFailed(self, command: str = "", output: str = "", context: str = "") -> None:
    def appendFailed(
        self, command: str = "", output: str = "", context: str = ""
    ) -> None:
        if not getattr(self, "failedCmdsFile", None):
            return
        with open(self.failedCmdsFile, "a") as fd:
@@ -297,7 +302,7 @@ class MLDConformance:
        if self.args.clean_output_dir and os.path.exists(self.outputDir):
            shutil.rmtree(self.outputDir, ignore_errors=False)
        os.makedirs(self.outputDir, exist_ok=True)
        subdirs = ["enc", "dec", "renderer_short", "split_rendering"]
        subdirs = ["enc", "dec", "renderer", "split_rendering"]
        for odir in subdirs:
            os.makedirs(os.path.join(self.testvDir, odir), exist_ok=True)
            os.makedirs(os.path.join(self.outputDir, odir), exist_ok=True)
@@ -643,14 +648,16 @@ class MLDConformance:
            )
            return (non_be, None, None, None)
        else:
            if not os.path.exists(testDesc.refOutput) or not os.path.exists(testDesc.dutOutput):
                msg = (
                    f"Missing file for compare: ref={testDesc.refOutput}, dut={testDesc.dutOutput}"
                )
            if not os.path.exists(testDesc.refOutput) or not os.path.exists(
                testDesc.dutOutput
            ):
                msg = f"Missing file for compare: ref={testDesc.refOutput}, dut={testDesc.dutOutput}"
                self.appendFailed(context=f"[{tag}:{dutPytestTag}] {msg}")
                return (None, None, (msg, ""), None)

            validate_err = self.validateAudioPairHeader(testDesc.refOutput, testDesc.dutOutput)
            validate_err = self.validateAudioPairHeader(
                testDesc.refOutput, testDesc.dutOutput
            )
            if validate_err:
                self.appendFailed(context=f"[{tag}:{dutPytestTag}] {validate_err}")
                return (None, None, (validate_err, ""), None)
@@ -705,17 +712,23 @@ class MLDConformance:
            )
            if rc != 0:
                return (None, None, (dutDecCmd, err_output), dutDecCmd)
            if not os.path.exists(refDecOutputFile) or not os.path.exists(dutDecOutputFile):
            if not os.path.exists(refDecOutputFile) or not os.path.exists(
                dutDecOutputFile
            ):
                msg = f"Missing file for compare: ref={refDecOutputFile}, dut={dutDecOutputFile}"
                self.appendFailed(context=f"[{tag}:{encPytestTag}] {msg}")
                return (None, None, (msg, ""), dutDecCmd)

            validate_err = self.validateAudioPairHeader(refDecOutputFile, dutDecOutputFile)
            validate_err = self.validateAudioPairHeader(
                refDecOutputFile, dutDecOutputFile
            )
            if validate_err:
                self.appendFailed(context=f"[{tag}:{encPytestTag}] {validate_err}")
                return (None, None, (validate_err, ""), dutDecCmd)

            non_be = int(not filecmp.cmp(refDecOutputFile, dutDecOutputFile, shallow=False))
            non_be = int(
                not filecmp.cmp(refDecOutputFile, dutDecOutputFile, shallow=False)
            )
            max_mld, mld_error = self.mld(
                tag, encPytestTag, refFile=refDecOutputFile, dutFile=dutDecOutputFile
            )
@@ -736,7 +749,9 @@ class MLDConformance:
            return (non_be, None, None, None)
        else:
            refDecOutputFile = testDesc.refOutput.replace(".splt.bit", ".wav")
            dutDecOutputFile = testDesc.dutOutput.replace(".splt.bit", "_CUT_REFDECODED.wav")
            dutDecOutputFile = testDesc.dutOutput.replace(
                ".splt.bit", "_CUT_REFDECODED.wav"
            )
            # Decode the encoded output with Reference ISAR decoder
            dutDecCmd = testDesc.refDecCmdline.split()
            for idx, cmd in enumerate(dutDecCmd):
@@ -754,17 +769,23 @@ class MLDConformance:
            )
            if rc != 0:
                return (None, None, (dutDecCmd, err_output), dutDecCmd)
            if not os.path.exists(refDecOutputFile) or not os.path.exists(dutDecOutputFile):
            if not os.path.exists(refDecOutputFile) or not os.path.exists(
                dutDecOutputFile
            ):
                msg = f"Missing file for compare: ref={refDecOutputFile}, dut={dutDecOutputFile}"
                self.appendFailed(context=f"[{tag}:{pytestTag}] {msg}")
                return (None, None, (msg, ""), dutDecCmd)

            validate_err = self.validateAudioPairHeader(refDecOutputFile, dutDecOutputFile)
            validate_err = self.validateAudioPairHeader(
                refDecOutputFile, dutDecOutputFile
            )
            if validate_err:
                self.appendFailed(context=f"[{tag}:{pytestTag}] {validate_err}")
                return (None, None, (validate_err, ""), dutDecCmd)

            non_be = int(not filecmp.cmp(refDecOutputFile, dutDecOutputFile, shallow=False))
            non_be = int(
                not filecmp.cmp(refDecOutputFile, dutDecOutputFile, shallow=False)
            )
            max_mld, mld_error = self.mld(
                tag, pytestTag, refFile=refDecOutputFile, dutFile=dutDecOutputFile
            )
@@ -857,7 +878,9 @@ class MLDConformance:
    ):
        # Run CUT Cmdline
        testPrefix = f"[{tag} {testIndex}/{totalTests}]"
        self.appendRunlog(context=self.formatTestHeader(testPrefix, "Running test", pyTestsTag))
        self.appendRunlog(
            context=self.formatTestHeader(testPrefix, "Running test", pyTestsTag)
        )
        testDesc = self.TestDesc[tag][pyTestsTag]
        rc, err_output = self.process(
            command=testDesc.dutCmdline,
@@ -884,28 +907,49 @@ class MLDConformance:
        errorDetails = None
        executedCommand = None
        if tag == "ENC":
            non_be, max_mld, errorDetails, executedCommand = self.analyseOneEncoderTest(tag, pyTestsTag)
            non_be, max_mld, errorDetails, executedCommand = self.analyseOneEncoderTest(
                tag, pyTestsTag
            )
        elif tag == "DEC":
            non_be, max_mld, errorDetails, executedCommand = self.analyseWavOutputTest(tag, pyTestsTag)
            non_be, max_mld, errorDetails, executedCommand = self.analyseWavOutputTest(
                tag, pyTestsTag
            )
        elif tag == "REND":
            non_be, max_mld, errorDetails, executedCommand = self.analyseWavOutputTest(tag, pyTestsTag)
            non_be, max_mld, errorDetails, executedCommand = self.analyseWavOutputTest(
                tag, pyTestsTag
            )
        elif tag == "ISAR_ENC":
            non_be, max_mld, errorDetails, executedCommand = self.analyseOneIsarEncoderTest(tag, pyTestsTag)
            non_be, max_mld, errorDetails, executedCommand = (
                self.analyseOneIsarEncoderTest(tag, pyTestsTag)
            )
        elif tag == "ISAR":
            non_be, max_mld, errorDetails, executedCommand = self.analyseWavOutputTest(tag, pyTestsTag)
            non_be, max_mld, errorDetails, executedCommand = self.analyseWavOutputTest(
                tag, pyTestsTag
            )
        else:
            assert False, f"Un-implemented Tag {tag}"

        if errorDetails is not None:
            if errorDetails:
                cmd, err_output = errorDetails
                self.appendFailed(context=header, command=cmd, output=(err_output or ""))
                self.appendFailed(
                    context=header, command=cmd, output=(err_output or "")
                )
            elif executedCommand:
                self.appendFailed(context=header, command=executedCommand)
            else:
                self.appendFailed(context=header)
            self.stats()
            return (testPrefix, pyTestsTag, "ERROR", None, errorDetails, executedCommand, None, None)
            return (
                testPrefix,
                pyTestsTag,
                "ERROR",
                None,
                errorDetails,
                executedCommand,
                None,
                None,
            )

        if self.args.be_test:
            verdict = "NON-BE" if non_be else "BE"
@@ -918,7 +962,16 @@ class MLDConformance:
                result_text = f"{verdict}, MLD_MAX={max_mld}"

        self.stats()
        return (testPrefix, pyTestsTag, "OK", result_text, None, executedCommand, verdict, max_mld)
        return (
            testPrefix,
            pyTestsTag,
            "OK",
            result_text,
            None,
            executedCommand,
            verdict,
            max_mld,
        )

    def analyseOneCommandFromTuple(self, args):
        return self.analyseOneCommand(*args)
@@ -944,9 +997,13 @@ class MLDConformance:
                    (tag, pyTestsTag, idx, self.totalTests)
                    for idx, pyTestsTag in enumerate(selectedTests, start=1)
                ]
                for testPrefix, pyTestsTag, rc, command, err_output in pool.imap_unordered(
                    self.runOneCommandFromTuple, args
                ):
                for (
                    testPrefix,
                    pyTestsTag,
                    rc,
                    command,
                    err_output,
                ) in pool.imap_unordered(self.runOneCommandFromTuple, args):
                    status = "OK" if rc == 0 else "ERROR"
                    print(
                        f"{testPrefix} Running test: {pyTestsTag} ... {status}",
@@ -1037,7 +1094,12 @@ class MLDConformance:
            verdict,
            test_max_mld,
        ):
            nonlocal command_fail_count, be_count, non_be_count, failure_count, worst_failure
            nonlocal \
                command_fail_count, \
                be_count, \
                non_be_count, \
                failure_count, \
                worst_failure

            if runStatus != "OK":
                command_fail_count += 1
@@ -1047,7 +1109,9 @@ class MLDConformance:
                non_be_count += 1
                if test_max_mld is not None and test_max_mld > corridor_threshold:
                    failure_count += 1
                    fail_header = self.formatTestHeader(testPrefix, "Analyzing test", pyTestsTag)
                    fail_header = self.formatTestHeader(
                        testPrefix, "Analyzing test", pyTestsTag
                    )
                    self.appendFailed(
                        context=(
                            fail_header
@@ -1099,7 +1163,9 @@ class MLDConformance:
                    (tag, pyTestsTag, idx, self.totalTests)
                    for idx, pyTestsTag in enumerate(selectedTests, start=1)
                ]
                for result in pool.imap_unordered(self.analyseOneCommandFromTuple, args):
                for result in pool.imap_unordered(
                    self.analyseOneCommandFromTuple, args
                ):
                    handle_test_result(*result)
        else:
            for idx, pyTestsTag in enumerate(selectedTests, start=1):
@@ -1190,7 +1256,11 @@ class MLDConformance:
            if emitConsole:
                print(f"{prefix}Failed command: {command}", flush=True)
                if c.stdout:
                    print(c.stdout, end="" if c.stdout.endswith("\n") else "\n", flush=True)
                    print(
                        c.stdout,
                        end="" if c.stdout.endswith("\n") else "\n",
                        flush=True,
                    )

        if returnOutput:
            return c.returncode, (c.stdout or "")
@@ -1320,7 +1390,10 @@ class MLDConformance:
                wavdiff_log_lines = []
                wavdiff_rows_omitted = 0
                for line in wavdiff_output.splitlines():
                    if re.match(r"^\s*[-+]?\d+(?:\.\d+)?;[-+]?\d+(?:\.\d+)?;[-+]?\d+(?:\.\d+)?\s*$", line):
                    if re.match(
                        r"^\s*[-+]?\d+(?:\.\d+)?;[-+]?\d+(?:\.\d+)?;[-+]?\d+(?:\.\d+)?\s*$",
                        line,
                    ):
                        wavdiff_rows_omitted += 1
                    else:
                        wavdiff_log_lines.append(line)
@@ -1372,7 +1445,9 @@ class MLDConformance:
            mldWithTags = np.column_stack(
                (
                    mldThisFile,
                    np.array([f"{pytestTag}-FRM{x:05d}" for x in range(mldThisFile.size)]),
                    np.array(
                        [f"{pytestTag}-FRM{x:05d}" for x in range(mldThisFile.size)]
                    ),
                )
            )
            with open(self.mldcsv[tag], "ab") as f:
@@ -1463,9 +1538,7 @@ class MLDConformance:
            maxDiff = float(diff.max()) if diff.size else 0.0
            corridor_failed = maxDiff > threshold
            if corridor_failed:
                msg = (
                    f"[{tag}] MLD corridor failed: max(dut-ref)={maxDiff} exceeds threshold={threshold}"
                )
                msg = f"[{tag}] MLD corridor failed: max(dut-ref)={maxDiff} exceeds threshold={threshold}"
                self.appendRunlog(context=msg)
                self.appendFailed(context=msg)
        else:
@@ -1577,7 +1650,9 @@ class MLDConformance:
                        all_ok = all_ok and corridor_ok
                        corridor_fail_count += int(not corridor_ok)
                    else:
                        missing_msg = f"Missing reference MLD file for {tag} : {refMldFile}"
                        missing_msg = (
                            f"Missing reference MLD file for {tag} : {refMldFile}"
                        )
                        print(f"\033[91m{missing_msg} \033[00m")
                        self.appendRunlog(context=missing_msg)
                        self.appendFailed(context=missing_msg)
@@ -1727,4 +1802,3 @@ if __name__ == "__main__":
        for tag in testTags:
            tag_status = "OK" if tag_results.get(tag, False) else "FAILED"
            print(f"[{tag}] {tag_status}")
+182 −123

File changed.

Preview size limit exceeded, changes collapsed.