Commit 6a9d926a authored by Vladimir Malenovsky's avatar Vladimir Malenovsky
Browse files

introduce --regenerate-mld-ref

parent 3793bbd7
Loading
Loading
Loading
Loading
Loading
+64 −40
Original line number Diff line number Diff line
@@ -394,18 +394,26 @@ class MLDConformance:
            .replace(".fer", "")
            .replace("_cut", "")
        )
        return decInput.split(".")[-2]
        # Remove xxxxx.wav_ from the beginning
        if ".wav_" in decInput:
            decInput = decInput.split(".wav_", 1)[1]
        # Split by last dot
        return decInput.rsplit(".", 1)[0]

    def getIsarDecPytestTag(self, command: str) -> str:
        getName = False
        for command in command.split():
            if getName:
                return os.path.basename(command).split(".")[-3]
            getName = True if command == "-i" else getName
        commands = command.split()
        if "-i" not in commands:
            assert False, f"No match found for {command}"
        input_file = commands[commands.index("-i") + 1]
        return os.path.basename(input_file).split(".")[-3]

    def getEncPytestTag(self, command: str) -> str:
        return os.path.basename(command.split()[-1]).split(".")[-2]
        basename = os.path.basename(command.split()[-1])
        # Remove xxxxx.wav_ from the beginning
        if ".wav_" in basename:
            basename = basename.split(".wav_", 1)[1]
        # Split by last dot
        return basename.rsplit(".", 1)[0]

    def getIsarEncPytestTag(self, command: str) -> str:
        return os.path.basename(command.split()[-1]).split(".")[-3]
@@ -1021,7 +1029,7 @@ class MLDConformance:
                    if runStatus == "OK":
                        if not analysisResult:
                            print(
                                f"{testPrefix} Analyzing test: {pyTestsTag} ... Command executed successfully but MLD analysis failed!",
                                f"{testPrefix} Analyzing test: {pyTestsTag} ... MLD analysis failed!",
                                flush=True,
                            )
                        else:
@@ -1054,7 +1062,7 @@ class MLDConformance:
            max_mld_value = None
        else:
            analysis_ok, corridor_fail_count, max_mld_value = self.doAnalysis(
                selectTag=tag, corridor=True
                selectTag=tag
            )

        failed_after = self.getFailedCommandCount()
@@ -1062,6 +1070,9 @@ class MLDConformance:
        max_mld_text = (
            f", MAX_MLD={max_mld_value}" if max_mld_value is not None else ""
        )
        if self.args.regenerate_mld_ref:
            return failed_delta == 0 and analysis_ok

        if failed_delta == 0 and non_be_count == 0 and analysis_ok:
            print(f"[{tag}] OK{max_mld_text}")
            return True
@@ -1232,7 +1243,7 @@ class MLDConformance:
            mldWithTags = np.column_stack(
                (
                    mldThisFile,
                    np.array([f"{pytestTag}-FRM{x}" for x in range(mldThisFile.size)]),
                    np.array([f"{pytestTag}-FRM{x:05d}" for x in range(mldThisFile.size)]),
                )
            )
            with open(self.mldcsv[tag], "ab") as f:
@@ -1322,29 +1333,20 @@ class MLDConformance:
            diff = dutMLD - refMLD
            maxDiff = float(diff.max()) if diff.size else 0.0
            corridor_failed = maxDiff > threshold
            maxMLD = np.maximum(dutMLD, refMLD)
            mldWithTags = np.column_stack((maxMLD, refTags))
        else:
            # For any mismatch, emit mld_ref2 directly from DUT values/tags.
            maxDiff = float("nan")
            mldWithTags = np.column_stack((dutMLD, dutTags))
            corridor_failed = True

        if corridor_failed:
            new_mld_dir = os.path.join(
                self.testvDir, "mld_ref2"
            ref_count = refMLD.shape[0]
            dut_count = dutMLD.shape[0]
            ref_preview = ", ".join(refTags[:3]) if ref_count else "<empty>"
            dut_preview = ", ".join(dutTags[:3]) if dut_count else "<empty>"
            print(
                f"\033[93mWarning: {tag} corridor comparison skipped because reference and DUT frame tags do not match "
                f"(ref_count={ref_count}, dut_count={dut_count}, ref_first=[{ref_preview}], dut_first=[{dut_preview}]).\033[00m"
            )
            if not os.path.exists(new_mld_dir):
                os.makedirs(new_mld_dir, exist_ok=True)
            corridor_failed = True

            refMldFile2 = os.path.join(
                self.testvDir, "mld_ref2", ReferenceMldFiles[tag]
            )
            with open(refMldFile2, "w") as f:
                np.savetxt(f, mldWithTags, fmt="%s", delimiter=",")
        return not corridor_failed

    def doAnalysis(self, selectTag="all", corridor=False):
    def doAnalysis(self, selectTag="all"):
        all_ok = True
        corridor_fail_count = 0
        max_mld_value = None
@@ -1402,7 +1404,17 @@ class MLDConformance:
                )
                print("##########################################################\n")

                if corridor:
                if self.args.regenerate_mld_ref:
                    # Directly write DUT MLD values to mld_ref2 without reference comparison
                    new_mld_dir = os.path.join(self.testvDir, "mld_ref2")
                    if not os.path.exists(new_mld_dir):
                        os.makedirs(new_mld_dir, exist_ok=True)
                    refMldFile2 = os.path.join(
                        self.testvDir, "mld_ref2", ReferenceMldFiles[tag]
                    )
                    with open(refMldFile2, "w") as f:
                        np.savetxt(f, mdlCutWithTags, fmt="%s", delimiter=",")
                else:
                    refMldFile = os.path.join(
                        self.testvDir, "mld_ref", ReferenceMldFiles[tag]
                    )
@@ -1484,8 +1496,9 @@ if __name__ == "__main__":
    parser.add_argument(
        "--test-mode",
        type=str,
        default="ALL",
        help='Choose tests to run ["ENC", "DEC", "REND", "ISAR", "ISAR_ENC", "ALL"]',
        default=None,
        choices=["ENC", "DEC", "REND", "ISAR", "ISAR_ENC"],
        help='Choose one test group to run ["ENC", "DEC", "REND", "ISAR", "ISAR_ENC"]. If omitted, all are run.',
    )
    parser.add_argument(
        "--be-test",
@@ -1518,6 +1531,12 @@ if __name__ == "__main__":
        action="store_true",
        help="Do not run DUT, use existing mld and bitdiff stats files to generate analysis only",
    )
    parser.add_argument(
        "--regenerate-mld-ref",
        default=False,
        action="store_true",
        help="Run analysis and unconditionally regenerate mld_ref2 files for all tags",
    )
    args = parser.parse_args()

    if not os.path.isdir(args.testvecDir):
@@ -1525,9 +1544,9 @@ if __name__ == "__main__":
            f"--testvecDir does not exist or is not a directory: {os.path.abspath(args.testvecDir)}"
        )

    requires_cut_bins = not args.analyse and not args.report_only
    requires_cut_bins = not args.analyse and not args.report_only and not args.regenerate_mld_ref
    requires_ref_bins = (
        (args.analyse and not args.be_test) or args.regenerate_enc_refs
        (args.analyse and not args.be_test) or args.regenerate_enc_refs or args.regenerate_mld_ref
    ) and not args.report_only

    if requires_cut_bins:
@@ -1552,11 +1571,15 @@ if __name__ == "__main__":
        conformance.runReferenceGeneration(encTag="ENC")
        sys.exit(0)

    testTags = IVAS_Bins.keys() if args.test_mode == "ALL" else [args.test_mode]
    # If --regenerate-mld-ref is set, treat as --analyse
    if args.regenerate_mld_ref:
        args.analyse = True

    testTags = IVAS_Bins.keys() if args.test_mode is None else [args.test_mode]
    tag_results = {}
    for tag in testTags:
        if args.report_only:
            tag_ok, _, _ = conformance.doAnalysis(selectTag=tag, corridor=True)
            tag_ok, _, _ = conformance.doAnalysis(selectTag=tag)
        elif not args.analyse:
            tag_ok = conformance.runTag(tag)
        else:
@@ -1564,6 +1587,7 @@ if __name__ == "__main__":

        tag_results[tag] = tag_ok

    if not args.regenerate_mld_ref:
        print("Summary of results:")
        for tag in testTags:
            tag_status = "OK" if tag_results.get(tag, False) else "FAILED"