diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index bdb3b55b174a6b8447c0f2f3522a7b9b3b6f2ea7..1dabff9278a8d080b52bbbc5aa12c14adb9b4f0f 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -2289,7 +2289,7 @@ coverage-test-on-main-scheduled:
&complexity-measurements-setup # create necessary environment
- mkdir -p wmops/logs
- - job_id=$(python3 ci/get_id_of_last_job_occurence.py $CI_COMMIT_REF_NAME $CI_JOB_NAME)
+ - job_id=$(python3 ci/get_id_of_last_job_occurence.py $CI_COMMIT_REF_NAME $CI_JOB_NAME $CI_PROJECT_ID --success_only)
- echo $job_id
- curl --request GET "https://forge.3gpp.org/rep/api/v4/projects/$CI_PROJECT_ID/jobs/$job_id/artifacts" --output artifacts.zip
- unzip artifacts.zip || true # this may fail on first run, when there are no artifacts there and the zip file is actually just "404"-html
diff --git a/ci/basop-pages/basop_index.html b/ci/basop-pages/basop_index.html
new file mode 100644
index 0000000000000000000000000000000000000000..4c5202ad352c5821de0e106eee7100ac95d8fcd5
--- /dev/null
+++ b/ci/basop-pages/basop_index.html
@@ -0,0 +1,22 @@
+
+
+
+
+
+ Ivas BASOP code Development
+
+ Daily long testvector tests
+
+
+
+ Test Coverage
+
+
+ tbd...
+
+
+
diff --git a/ci/basop-pages/create_report_pages.py b/ci/basop-pages/create_report_pages.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca14442edb6f41fa6297d2ef310da0a7361e5a25
--- /dev/null
+++ b/ci/basop-pages/create_report_pages.py
@@ -0,0 +1,219 @@
+import csv
+import pathlib
+import argparse
+
+
+CSV_DELIM = ";"
+SUBPAGE_TMPL_CSS = """
+
+"""
+
+SUBPAGE_TMPL_HTML = """
+
+Report for job {job_name}
+ Current run - id: {id_current}
+ Previous run - id: {id_previous}
+ Merged csv data
+
+
+
+Table is sorted by Difference in MLD.
+
+
+
+
+ Testcase |
+ MLD |
+ Max Abs Diff |
+
+
+ {id_previous} |
+ {id_current} |
+ {id_previous} |
+ {id_current} |
+
+
+{table_body}
+
+
+"""
+TD_TMPL_NORMAL = "{} | "
+TD_TMPL_INCREASE = "{} | "
+TD_TMPL_REDUCE = "{} | "
+TR_TMPL = "{}
"
+
+# expected columns. actual columns are filtered from the incoming data later, this
+# is mainly for controlling the order in the output table
+COLUMNS = ["testcase", "Result", "MLD", "MAXIMUM ABS DIFF"]
+COLUMNS_GLOBAL = COLUMNS[:1]
+COLUMNS_DIFFERENTIAL = COLUMNS[1:]
+
+
+def create_subpage(
+ html_out,
+ csv_out,
+ csv_current: str,
+ csv_previous: str,
+ id_current: int,
+ id_previous: int,
+ job_name: str,
+):
+ merged_reports = merge_and_cleanup_mld_reports(
+ csv_current, csv_previous, id_current, id_previous
+ )
+ write_out_csv(merged_reports, merged_reports[0].keys(), csv_out)
+ table_body = "\n".join(
+ tr_from_row(row, id_current, id_previous) for row in merged_reports
+ )
+ new_subpage = SUBPAGE_TMPL_CSS + SUBPAGE_TMPL_HTML.format(
+ id_current=id_current,
+ id_previous=id_previous,
+ table_body=table_body,
+ job_name=job_name,
+ )
+ with open(html_out, "w") as f:
+ f.write(new_subpage)
+
+
+def write_out_csv(data, col_names, outfile):
+ with open(outfile, "w") as f:
+ writer = csv.DictWriter(f, col_names, delimiter=";")
+ writer.writeheader()
+ for row in data:
+ writer.writerow(row)
+
+
+def tr_from_row(row, id_current, id_previous):
+ tr = list()
+
+ # pre-filter columns to handle case where new columns are added
+ # only include columns that are there in both data
+ columns_global = [c for c in COLUMNS_GLOBAL if c in row]
+ diff_col_tmpl = "{}-{}"
+ incoming_cols = row.keys()
+ columns_differential = [
+ c
+ for c in COLUMNS_DIFFERENTIAL
+ if diff_col_tmpl.format(c, id_current) in incoming_cols
+ and diff_col_tmpl.format(c, id_previous) in incoming_cols
+ ]
+
+ for c in columns_global:
+ # this is currently for the "testcase" column - here we don't compare, just one value is used
+ tr.append(TD_TMPL_NORMAL.format(row[c]))
+ for c in columns_differential:
+ # this is for all columns where we compare between current and previous run
+ prev = row[f"{c}-{id_previous}"]
+ curr = row[f"{c}-{id_current}"]
+
+ # use red background if increase, green if decrease, white if same
+ td_tmpl = TD_TMPL_NORMAL
+ try:
+ if float(curr) > float(prev):
+ td_tmpl = TD_TMPL_INCREASE
+ if float(curr) < float(prev):
+ td_tmpl = TD_TMPL_REDUCE
+ except ValueError:
+ # if we land here, one of the cells is not a number, this indicates a crash
+ # or some error in the scripts, so mark with red as well
+ td_tmpl = TD_TMPL_INCREASE
+
+ tr.append(td_tmpl.format(row[f"{c}-{id_previous}"]))
+ tr.append(td_tmpl.format(row[f"{c}-{id_current}"]))
+
+ return TR_TMPL.format("\n".join(tr))
+
+
+def merge_and_cleanup_mld_reports(
+ csv_current: str, csv_previous: str, id_current: int, id_previous: int
+):
+ with open(csv_current) as f:
+ current_reader = csv.DictReader(f, delimiter=CSV_DELIM)
+ current = list(current_reader)
+ with open(csv_previous) as f:
+ previous = list(csv.DictReader(f, delimiter=CSV_DELIM))
+
+ # TODO: handle newly added testcases - for now assume that both have the same columns
+ merge_key = "testcase"
+ other_keys = [k for k in current_reader.fieldnames if k != merge_key]
+ merged = merge_tables(
+ current, previous, id_current, id_previous, merge_key, other_keys
+ )
+
+ # TODO: sort on result as well
+ mld_col_curr = f"MLD-{id_current}"
+ mld_col_prev = f"MLD-{id_previous}"
+
+ # sort based on difference in MLD between current and previous run
+ # put cases with "None" at the top of the list
+ def sort_func(x):
+ vals_missing = ["None", ""]
+
+ if x[mld_col_curr] in vals_missing or x[mld_col_prev] in vals_missing:
+ return float("inf")
+
+ return float(x[mld_col_curr]) - float(x[mld_col_prev])
+
+ merged = sorted(merged, key=sort_func, reverse=True)
+
+ # remove the unecessary whole path from the testcase names
+ for row in merged:
+ row["testcase"] = pathlib.Path(row["testcase"]).name
+
+ return merged
+
+
+def merge_tables(tbl1, tbl2, suffix1, suffix2, merge_key, other_keys):
+ merged = list()
+
+ for row1 in tbl1:
+ new_row = dict()
+ for key in other_keys:
+ new_row[f"{key}-{suffix1}"] = row1[key]
+
+ for row2 in tbl2:
+ if row1[merge_key] == row2[merge_key]:
+ new_row[merge_key] = row1[merge_key]
+ for key in other_keys:
+ new_row[f"{key}-{suffix2}"] = row2[key]
+ break
+
+ merged.append(new_row)
+
+ return merged
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("html_out")
+ parser.add_argument("csv_out")
+ parser.add_argument("csv_current")
+ parser.add_argument("csv_previous")
+ parser.add_argument("id_current", type=int)
+ parser.add_argument("id_previous", type=int)
+ parser.add_argument("job_name")
+ args = parser.parse_args()
+
+ create_subpage(
+ args.html_out,
+ args.csv_out,
+ args.csv_current,
+ args.csv_previous,
+ args.id_current,
+ args.id_previous,
+ args.job_name,
+ )
diff --git a/ci/get_id_of_last_job_occurence.py b/ci/get_id_of_last_job_occurence.py
index 449902f50a12919897ef0c0aaac41c197cd59f05..50beac5bb7c96d9b657604fb562e870bade65d68 100755
--- a/ci/get_id_of_last_job_occurence.py
+++ b/ci/get_id_of_last_job_occurence.py
@@ -1,49 +1,50 @@
#!/usr/bin/env python3
"""
- (C) 2022-2024 IVAS codec Public Collaboration with portions copyright Dolby International AB, Ericsson AB,
- Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V., Huawei Technologies Co. LTD.,
- Koninklijke Philips N.V., Nippon Telegraph and Telephone Corporation, Nokia Technologies Oy, Orange,
- Panasonic Holdings Corporation, Qualcomm Technologies, Inc., VoiceAge Corporation, and other
- contributors to this repository. All Rights Reserved.
-
- This software is protected by copyright law and by international treaties.
- The IVAS codec Public Collaboration consisting of Dolby International AB, Ericsson AB,
- Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V., Huawei Technologies Co. LTD.,
- Koninklijke Philips N.V., Nippon Telegraph and Telephone Corporation, Nokia Technologies Oy, Orange,
- Panasonic Holdings Corporation, Qualcomm Technologies, Inc., VoiceAge Corporation, and other
- contributors to this repository retain full ownership rights in their respective contributions in
- the software. This notice grants no license of any kind, including but not limited to patent
- license, nor is any license granted by implication, estoppel or otherwise.
-
- Contributors are required to enter into the IVAS codec Public Collaboration agreement before making
- contributions.
-
- This software is provided "AS IS", without any express or implied warranties. The software is in the
- development stage. It is intended exclusively for experts who have experience with such software and
- solely for the purpose of inspection. All implied warranties of non-infringement, merchantability
- and fitness for a particular purpose are hereby disclaimed and excluded.
-
- Any dispute, controversy or claim arising under or in relation to providing this software shall be
- submitted to and settled by the final, binding jurisdiction of the courts of Munich, Germany in
- accordance with the laws of the Federal Republic of Germany excluding its conflict of law rules and
- the United Nations Convention on Contracts on the International Sales of Goods.
+(C) 2022-2024 IVAS codec Public Collaboration with portions copyright Dolby International AB, Ericsson AB,
+Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V., Huawei Technologies Co. LTD.,
+Koninklijke Philips N.V., Nippon Telegraph and Telephone Corporation, Nokia Technologies Oy, Orange,
+Panasonic Holdings Corporation, Qualcomm Technologies, Inc., VoiceAge Corporation, and other
+contributors to this repository. All Rights Reserved.
+
+This software is protected by copyright law and by international treaties.
+The IVAS codec Public Collaboration consisting of Dolby International AB, Ericsson AB,
+Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V., Huawei Technologies Co. LTD.,
+Koninklijke Philips N.V., Nippon Telegraph and Telephone Corporation, Nokia Technologies Oy, Orange,
+Panasonic Holdings Corporation, Qualcomm Technologies, Inc., VoiceAge Corporation, and other
+contributors to this repository retain full ownership rights in their respective contributions in
+the software. This notice grants no license of any kind, including but not limited to patent
+license, nor is any license granted by implication, estoppel or otherwise.
+
+Contributors are required to enter into the IVAS codec Public Collaboration agreement before making
+contributions.
+
+This software is provided "AS IS", without any express or implied warranties. The software is in the
+development stage. It is intended exclusively for experts who have experience with such software and
+solely for the purpose of inspection. All implied warranties of non-infringement, merchantability
+and fitness for a particular purpose are hereby disclaimed and excluded.
+
+Any dispute, controversy or claim arising under or in relation to providing this software shall be
+submitted to and settled by the final, binding jurisdiction of the courts of Munich, Germany in
+accordance with the laws of the Federal Republic of Germany excluding its conflict of law rules and
+the United Nations Convention on Contracts on the International Sales of Goods.
"""
import argparse
import requests
+
PER_PAGE_SUFFIX = "?per_page=50"
PAGE_SUFFIX = "&page={}"
-API_BASE_URL = "https://forge.3gpp.org/rep/api/v4/projects/49"
+API_URL_TMPL = "https://forge.3gpp.org/rep/api/v4/projects/{}/pipelines"
-def get_job_id(branch_name, job_name):
+def get_job_id(branch_name, job_name, project_id, success_only):
job_id = -1
# check last 500 pipelines max
for page in range(100):
- url_pls = API_BASE_URL + "/pipelines"
+ url_pls = API_URL_TMPL.format(project_id)
# need both suffixes here to descend through the pages and get also older pipelines
suffix = PER_PAGE_SUFFIX + PAGE_SUFFIX.format(page)
@@ -61,7 +62,8 @@ def get_job_id(branch_name, job_name):
# find actual job by name
for job in resp_jobs.json():
- if job["name"] == job_name and job["status"] == "success":
+ include_job = not success_only or job["status"] == "success"
+ if include_job and job["name"] == job_name:
job_id = job["id"]
break
if job_id >= 0:
@@ -75,10 +77,12 @@ def get_job_id(branch_name, job_name):
if __name__ == "__main__":
parser = argparse.ArgumentParser()
- parser.add_argument("branch_name")
- parser.add_argument("job_name")
+ parser.add_argument("branch_name", help="Name of the branch to search on")
+ parser.add_argument("job_name", help="Name of the job to get the id of")
+ parser.add_argument("project_id", help="ID of project to search in", type=int)
+ parser.add_argument("--success_only", help="Only include jobs with status 'success'", action="store_true")
args = parser.parse_args()
- job_id = get_job_id(args.branch_name, args.job_name)
+ job_id = get_job_id(args.branch_name, args.job_name, args.project_id, args.success_only)
print(job_id)
diff --git a/ci/setup_pages.py b/ci/setup_pages.py
index 10a2e9e84ecf2f5ad1a351ca2cb040e0f5a1b569..4754d09f5c8d834e9f664aa63a73377b60473668 100755
--- a/ci/setup_pages.py
+++ b/ci/setup_pages.py
@@ -3,10 +3,16 @@ import os
import pathlib
import subprocess
import sys
+import shutil
+from tempfile import TemporaryDirectory
from get_id_of_last_job_occurence import get_job_id
-JOBS = [
+PROJECT_ID_FLOAT_REPO = 49
+PROJECT_ID_BASOP_REPO = 77
+
+
+JOBS_FLOAT_REPO = [
"complexity-stereo-in-stereo-out",
"complexity-ism-in-binaural-out",
"complexity-sba-hoa3-in-hoa3-out",
@@ -15,44 +21,81 @@ JOBS = [
"complexity-StereoDmxEVS-stereo-in-mono-out",
"coverage-test-on-main-scheduled",
]
+JOBS_BASOP_REPO = [
+ "ivas-pytest-mld-long-dec",
+]
+
+JOBS_FOR_PROJECT_ID = {
+ PROJECT_ID_FLOAT_REPO: JOBS_FLOAT_REPO,
+ PROJECT_ID_BASOP_REPO: JOBS_BASOP_REPO,
+}
+
ARTIFACTS = "artifacts.zip"
API_URL_BASE = "https://forge.3gpp.org/rep/api/v4/projects/{}/jobs"
-PUBLIC = "./public"
+PUBLIC_FOLDER = pathlib.Path("./public").absolute()
def main():
+ PUBLIC_FOLDER.mkdir()
- public_folder = pathlib.Path(PUBLIC)
- public_folder.mkdir()
+ project_id = int(os.environ["CI_PROJECT_ID"])
+ jobs = JOBS_FOR_PROJECT_ID[project_id]
+ success_only = project_id == PROJECT_ID_FLOAT_REPO
+ failed_count = get_artifacts_for_jobs_and_return_num_failed(
+ jobs, project_id, success_only
+ )
+ if failed_count == len(jobs):
+ print("Artifact collection failed for all jobs to check.")
+ sys.exit(1)
+
+ index_html = PUBLIC_FOLDER.joinpath("index.html")
+ if project_id == PROJECT_ID_FLOAT_REPO:
+ src = pathlib.Path("ci/index-pages.html").absolute()
+ shutil.move(src, index_html)
+ elif project_id == PROJECT_ID_BASOP_REPO:
+ src = pathlib.Path("ci/basop-pages/basop_index.html").absolute()
+ shutil.move(src, index_html)
+
+ sys.exit(0)
+
+
+def get_artifacts_for_jobs_and_return_num_failed(
+ jobs: list, project_id: int, success_only: bool
+) -> int:
+ """
+ Get specified artifact folders for all jobs given and put them into the public folder.
+
+ jobs: dictionary with the job names in the keys and a list of the
+ public folders to copy from the artifacts in the values
+ if "-public" is in the list, the actual folder name to copy is the key with "-public" appended
+ """
failed_count = 0
- for job in JOBS:
- job_id = get_job_id(os.environ["CI_COMMIT_REF_NAME"], job)
+
+ for job in jobs:
+ job_id = get_job_id( os.environ["CI_DEFAULT_BRANCH"], job, project_id, success_only)
+
print(f"{job_id} - {job}")
try:
- curl_for_artifacts(job_id)
+ with TemporaryDirectory() as tmp_dir:
+ curl_for_artifacts(job_id, tmp_dir)
- job_public = job + "-public"
- if job == "coverage-test-on-main-scheduled":
- job_public = "coverage"
- pathlib.Path("coverage_stv").rename(
- public_folder.joinpath("coverage_stv")
- )
+ tmp_dir = pathlib.Path(tmp_dir)
- pathlib.Path(job_public).rename(public_folder.joinpath(job_public))
+ for artifact in tmp_dir.iterdir():
+ src = tmp_dir.joinpath(artifact).absolute()
+ dst = PUBLIC_FOLDER.joinpath(artifact.name)
+ print(f"{src} -> {dst}")
+ shutil.move(src, dst)
except subprocess.CalledProcessError:
print(f"Could not get artifacts for {job}")
failed_count += 1
- if failed_count == len(JOBS):
- sys.exit(1)
-
- pathlib.Path("ci/index-pages.html").rename(public_folder.joinpath("index.html"))
- sys.exit(0)
+ return failed_count
-def curl_for_artifacts(job_id):
+def curl_for_artifacts(job_id: int, exdir: str):
cmd = [
"curl",
"--request",
@@ -61,6 +104,7 @@ def curl_for_artifacts(job_id):
"--output",
ARTIFACTS,
]
+ print(cmd)
subprocess.run(cmd, check=True)
# check for valid archive (if not, it is likely a 404 page, then display that)
@@ -73,7 +117,7 @@ def curl_for_artifacts(job_id):
raise subprocess.CalledProcessError(-1, "Unzip check failed")
# do the actual unzipping
- cmd = ["unzip", ARTIFACTS]
+ cmd = ["unzip", ARTIFACTS, "-d", exdir]
subprocess.run(cmd, check=True)