Loading .gitlab-ci.yml +3 −4 Original line number Diff line number Diff line Loading @@ -734,10 +734,9 @@ coverage-test-on-main-scheduled: # create necessary environment - mkdir -p wmops/logs # get latest artifacts with previously generated javascript files - api_url=$CI_API_V4_URL/projects/$CI_PROJECT_ID/jobs/artifacts/$CI_COMMIT_REF_NAME/download?job=$CI_JOB_NAME - echo $api_url - 'curl --output artifacts.zip --header "JOB-TOKEN: $CI_JOB_TOKEN" "$api_url"' - job_id=$(python3 ci/get_id_of_last_job_occurence.py $CI_COMMIT_REF_NAME $CI_JOB_NAME) - echo $job_id - curl --request GET "https://forge.3gpp.org/rep/api/v4/projects/$CI_PROJECT_ID/jobs/$job_id/artifacts" --output artifacts.zip - cat artifacts.zip - unzip artifacts.zip || true # this may fail on first run, when there are no artifacts there and the zip file is actually just "404"-html - ls Loading ci/get_id_of_last_job_occurence.py 0 → 100755 +41 −0 Original line number Diff line number Diff line import argparse import requests PER_PAGE_SUFFIX = "?per_page=50" API_BASE_URL = "https://forge.3gpp.org/rep/api/v4/projects/49" parser = argparse.ArgumentParser() parser.add_argument("branch_name") parser.add_argument("job_name") args = parser.parse_args() branch_name = args.branch_name job_name = args.job_name job_id = -1 # check last 500 pipelines max for page in range(100): url_pls = API_BASE_URL + "/pipelines" resp_pls = requests.get(url_pls + PER_PAGE_SUFFIX) for pl in resp_pls.json(): if pl["ref"] == branch_name: url_jobs = url_pls + f"/{pl['id']}/jobs" resp_jobs = requests.get(url_jobs + PER_PAGE_SUFFIX) if job_name not in resp_jobs.text: continue # find actual job by name for job in resp_jobs.json(): if job["name"] == job_name: job_id = job["id"] break break if job_id >= 0: break print(job_id) Loading
.gitlab-ci.yml +3 −4 Original line number Diff line number Diff line Loading @@ -734,10 +734,9 @@ coverage-test-on-main-scheduled: # create necessary environment - mkdir -p wmops/logs # get latest artifacts with previously generated javascript files - api_url=$CI_API_V4_URL/projects/$CI_PROJECT_ID/jobs/artifacts/$CI_COMMIT_REF_NAME/download?job=$CI_JOB_NAME - echo $api_url - 'curl --output artifacts.zip --header "JOB-TOKEN: $CI_JOB_TOKEN" "$api_url"' - job_id=$(python3 ci/get_id_of_last_job_occurence.py $CI_COMMIT_REF_NAME $CI_JOB_NAME) - echo $job_id - curl --request GET "https://forge.3gpp.org/rep/api/v4/projects/$CI_PROJECT_ID/jobs/$job_id/artifacts" --output artifacts.zip - cat artifacts.zip - unzip artifacts.zip || true # this may fail on first run, when there are no artifacts there and the zip file is actually just "404"-html - ls Loading
ci/get_id_of_last_job_occurence.py 0 → 100755 +41 −0 Original line number Diff line number Diff line import argparse import requests PER_PAGE_SUFFIX = "?per_page=50" API_BASE_URL = "https://forge.3gpp.org/rep/api/v4/projects/49" parser = argparse.ArgumentParser() parser.add_argument("branch_name") parser.add_argument("job_name") args = parser.parse_args() branch_name = args.branch_name job_name = args.job_name job_id = -1 # check last 500 pipelines max for page in range(100): url_pls = API_BASE_URL + "/pipelines" resp_pls = requests.get(url_pls + PER_PAGE_SUFFIX) for pl in resp_pls.json(): if pl["ref"] == branch_name: url_jobs = url_pls + f"/{pl['id']}/jobs" resp_jobs = requests.get(url_jobs + PER_PAGE_SUFFIX) if job_name not in resp_jobs.text: continue # find actual job by name for job in resp_jobs.json(): if job["name"] == job_name: job_id = job["id"] break break if job_id >= 0: break print(job_id)