diff --git a/ci/get_id_of_last_job_occurence.py b/ci/get_id_of_last_job_occurence.py index 50beac5bb7c96d9b657604fb562e870bade65d68..389c6ab7346387fd5397ec080b46b7c0c355518d 100755 --- a/ci/get_id_of_last_job_occurence.py +++ b/ci/get_id_of_last_job_occurence.py @@ -38,6 +38,8 @@ import requests PER_PAGE_SUFFIX = "?per_page=50" PAGE_SUFFIX = "&page={}" API_URL_TMPL = "https://forge.3gpp.org/rep/api/v4/projects/{}/pipelines" +SCOPE_FAILED = "scope[]=failed" +SCOPE_SUCCESS = "scope[]=success" def get_job_id(branch_name, job_name, project_id, success_only): @@ -51,7 +53,11 @@ def get_job_id(branch_name, job_name, project_id, success_only): resp_pls = requests.get(url_pls + suffix) for pl in resp_pls.json(): if pl["ref"] == branch_name: - url_jobs = url_pls + f"/{pl['id']}/jobs" + scope = f"?{SCOPE_SUCCESS}" + if not success_only: + scope += f"&{SCOPE_FAILED}" + + url_jobs = url_pls + f"/{pl['id']}/jobs{scope}" # only one of the suffixes here - this assumes only max of 50 jobs per pipeline # so only one page needed @@ -62,8 +68,7 @@ def get_job_id(branch_name, job_name, project_id, success_only): # find actual job by name for job in resp_jobs.json(): - include_job = not success_only or job["status"] == "success" - if include_job and job["name"] == job_name: + if job["name"] == job_name: job_id = job["id"] break if job_id >= 0: