diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 53cdd2871b48d1461c4bbbb5fdd558b6e5f6301d..55108e47c3d063b3cb85b92808267b12abd7c434 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1147,6 +1147,14 @@ pages: - *unzip-or-cat - mv complexity-StereoDmxEVS-stereo-in-mono-out-public ./public/ + ### collect artifacts from coverage job + - job_id=$(python3 ci/get_id_of_last_job_occurence.py $branch coverage-test-on-main-scheduled) + - echo $job_id + - echo "$API_URL_BASE/$job_id/artifacts" + - curl --request GET "$API_URL_BASE/$job_id/artifacts" --output $ARTIFACTS + - *unzip-or-cat + - mv coverage ./public + - cp ci/index-pages.html public/index.html artifacts: paths: diff --git a/ci/get_id_of_last_job_occurence.py b/ci/get_id_of_last_job_occurence.py index f6223d998063d6d92db1e0ea9e376607a1767658..ca6671104120a632dd11889ac66380c027070887 100755 --- a/ci/get_id_of_last_job_occurence.py +++ b/ci/get_id_of_last_job_occurence.py @@ -38,44 +38,45 @@ PAGE_SUFFIX = "&page={}" API_BASE_URL = "https://forge.3gpp.org/rep/api/v4/projects/49" -parser = argparse.ArgumentParser() -parser.add_argument("branch_name") -parser.add_argument("job_name") - -args = parser.parse_args() - -branch_name = args.branch_name -job_name = args.job_name - - -job_id = -1 -# check last 500 pipelines max -for page in range(100): - url_pls = API_BASE_URL + "/pipelines" - - # need both suffixes here to descend through the pages and get also older pipelines - suffix = PER_PAGE_SUFFIX + PAGE_SUFFIX.format(page) - resp_pls = requests.get(url_pls + suffix) - for pl in resp_pls.json(): - if pl["ref"] == branch_name: - url_jobs = url_pls + f"/{pl['id']}/jobs" - - # only one of the suffixes here - this assumes only max of 50 jobs per pipeline - # so only one page needed - resp_jobs = requests.get(url_jobs + PER_PAGE_SUFFIX) - - if job_name not in resp_jobs.text: - continue - - # find actual job by name - for job in resp_jobs.json(): - if job["name"] == job_name and job["status"] == "success": - job_id = job["id"] +def get_job_id(branch_name, job_name): + job_id = -1 + # check last 500 pipelines max + for page in range(100): + url_pls = API_BASE_URL + "/pipelines" + + # need both suffixes here to descend through the pages and get also older pipelines + suffix = PER_PAGE_SUFFIX + PAGE_SUFFIX.format(page) + resp_pls = requests.get(url_pls + suffix) + for pl in resp_pls.json(): + if pl["ref"] == branch_name: + url_jobs = url_pls + f"/{pl['id']}/jobs" + + # only one of the suffixes here - this assumes only max of 50 jobs per pipeline + # so only one page needed + resp_jobs = requests.get(url_jobs + PER_PAGE_SUFFIX) + + if job_name not in resp_jobs.text: + continue + + # find actual job by name + for job in resp_jobs.json(): + if job["name"] == job_name and job["status"] == "success": + job_id = job["id"] + break + if job_id >= 0: break - if job_id >= 0: - break - if job_id >= 0: - break + if job_id >= 0: + break -print(job_id) + return job_id + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("branch_name") + parser.add_argument("job_name") + + args = parser.parse_args() + + job_id = get_job_id(args.branch_name, args.job_name) + print(job_id) \ No newline at end of file diff --git a/ci/index-pages.html b/ci/index-pages.html index 0a2e73e78e5833b618991e1c189d79e54aa2b35b..9d60155e8fa9399e35cd4ff862cb99b780909309 100644 --- a/ci/index-pages.html +++ b/ci/index-pages.html @@ -16,4 +16,10 @@