diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a3fa4217b7aefb21f1be5bf9789acd8038432e70..09b796c4f3ebad9a8b6b67515334a964784bc642 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -28,8 +28,8 @@ variables: MERGE_TARGET_ENCODER_PATH_FOR_BUILD_DO_NOT_MODIFY: "./IVAS_cod_merge_target" MERGE_TARGET_DECODER_PATH_FOR_BUILD_DO_NOT_MODIFY: "./IVAS_dec_merge_target" LEVEL_SCALING: "1.0" - IVAS_PIPELINE_NAME: '' - BASOP_CI_BRANCH_PC_REPO: "basop-ci-branch" + IVAS_PIPELINE_NAME: "" + BASOP_CI_BRANCH_PC_REPO: "kiene/tmp-branch-for-ltv-split-testing" PRM_FILES: "scripts/config/self_test.prm scripts/config/self_test_ltv.prm" TESTCASE_TIMEOUT_STV: 900 TESTCASE_TIMEOUT_LTV: 2400 @@ -44,6 +44,7 @@ variables: ENCODER_TEST: "" DELTA_ODG: "" COMPARE_DMX: "" + SPLIT_COMPARISON: "" SKIP_REGRESSION_CHECK: "" FAILED_TESTCASES_LIST: "failed-testcases.txt" ERRORS_TESTCASES_LIST: "errors-testcases.txt" @@ -51,81 +52,80 @@ variables: FLOAT_REF_COMMIT_FILE: "float-ref-git-sha" CUT_COMMIT_FILE: "CuT-git-sha" MERGE_TARGET_COMMIT_FILE: "merge-target-git-sha" + MEASURES_FOR_REPORT: "MLD MAX_ABS_DIFF MIN_SSNR MIN_ODG" MANUAL_PIPELINE_TYPE: description: "Type for the manual pipeline run. Use 'pytest-compare' to run comparison test against reference float codec." - value: 'default' + value: "default" options: - - 'default' - - 'pytest-compare' - - 'pytest-compare-enc-dmx' - - 'pytest-compare-long' - - 'pytest-compare-to-input' - - 'pytest-saturation-smoke-test' - - 'evs-26444' - - 'sanitizer' - - 'pytest-renderer' - - 'complexity' - - 'coverage' - - 'voip-be-test' - - 'peaq-enc-passthrough' - + - "default" + - "pytest-compare" + - "pytest-compare-enc-dmx" + - "pytest-compare-long" + - "pytest-compare-to-input" + - "pytest-saturation-smoke-test" + - "evs-26444" + - "sanitizer" + - "pytest-renderer" + - "complexity" + - "coverage" + - "voip-be-test" + - "peaq-enc-passthrough" default: interruptible: true # Make all jobs by default interruptible workflow: - name: '$IVAS_PIPELINE_NAME' + name: "$IVAS_PIPELINE_NAME" rules: # see https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS && $CI_PIPELINE_SOURCE == "push" when: never - if: $CI_PIPELINE_SOURCE == 'merge_request_event' variables: - IVAS_PIPELINE_NAME: 'MR pipeline: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME' + IVAS_PIPELINE_NAME: "MR pipeline: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME" ### disabled for now because pipeline cd is redundant with MR pipeline with current workflow # - if: $CI_PIPELINE_SOURCE == 'push' && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH # Pushes to main # variables: # IVAS_PIPELINE_NAME: 'Push pipeline: $CI_COMMIT_BRANCH' - if: $CI_PIPELINE_SOURCE == 'web' && $MANUAL_PIPELINE_TYPE == 'default' # for testing variables: - IVAS_PIPELINE_NAME: 'Web run pipeline: $CI_COMMIT_BRANCH' + IVAS_PIPELINE_NAME: "Web run pipeline: $CI_COMMIT_BRANCH" - if: $CI_PIPELINE_SOURCE == 'web' && $MANUAL_PIPELINE_TYPE == 'pytest-compare' variables: - IVAS_PIPELINE_NAME: 'Run comparison tools against float ref: $CI_COMMIT_BRANCH' + IVAS_PIPELINE_NAME: "Run comparison tools against float ref: $CI_COMMIT_BRANCH" - if: $CI_PIPELINE_SOURCE == 'web' && $MANUAL_PIPELINE_TYPE == 'pytest-compare-enc-dmx' variables: - IVAS_PIPELINE_NAME: 'Run encoder dmx comparison against float ref: $CI_COMMIT_BRANCH' + IVAS_PIPELINE_NAME: "Run encoder dmx comparison against float ref: $CI_COMMIT_BRANCH" - if: $CI_PIPELINE_SOURCE == 'web' && $MANUAL_PIPELINE_TYPE == 'pytest-compare-long' variables: - IVAS_PIPELINE_NAME: 'Run comparison tools against float ref (long test vectors): $CI_COMMIT_BRANCH' + IVAS_PIPELINE_NAME: "Run comparison tools against float ref (long test vectors): $CI_COMMIT_BRANCH" - if: $CI_PIPELINE_SOURCE == 'web' && $MANUAL_PIPELINE_TYPE == 'pytest-compare-to-input' variables: - IVAS_PIPELINE_NAME: 'Run comparison tools against input (pass-through only): $CI_COMMIT_BRANCH' + IVAS_PIPELINE_NAME: "Run comparison tools against input (pass-through only): $CI_COMMIT_BRANCH" - if: $CI_PIPELINE_SOURCE == 'web' && $MANUAL_PIPELINE_TYPE == 'pytest-saturation-smoke-test' variables: - IVAS_PIPELINE_NAME: 'Run saturation smoke-test: $CI_COMMIT_BRANCH' + IVAS_PIPELINE_NAME: "Run saturation smoke-test: $CI_COMMIT_BRANCH" - if: $CI_PIPELINE_SOURCE == 'web' && $MANUAL_PIPELINE_TYPE == 'evs-26444' variables: - IVAS_PIPELINE_NAME: 'EVS 26.444 test: $CI_COMMIT_BRANCH' + IVAS_PIPELINE_NAME: "EVS 26.444 test: $CI_COMMIT_BRANCH" - if: $CI_PIPELINE_SOURCE == 'web' && $MANUAL_PIPELINE_TYPE == 'sanitizer' variables: - IVAS_PIPELINE_NAME: 'Short testvectors sanitizers' + IVAS_PIPELINE_NAME: "Short testvectors sanitizers" - if: $CI_PIPELINE_SOURCE == 'web' && $MANUAL_PIPELINE_TYPE == 'pytest-renderer' variables: - IVAS_PIPELINE_NAME: 'Renderer test: $CI_COMMIT_BRANCH' + IVAS_PIPELINE_NAME: "Renderer test: $CI_COMMIT_BRANCH" - if: $CI_PIPELINE_SOURCE == 'web' && $MANUAL_PIPELINE_TYPE == 'complexity' variables: - IVAS_PIPELINE_NAME: 'Complexity Measurement on $CI_COMMIT_BRANCH' + IVAS_PIPELINE_NAME: "Complexity Measurement on $CI_COMMIT_BRANCH" - if: $CI_PIPELINE_SOURCE == 'web' && $MANUAL_PIPELINE_TYPE == 'voip-be-test' variables: - IVAS_PIPELINE_NAME: 'Voip BE test on $CI_COMMIT_BRANCH' + IVAS_PIPELINE_NAME: "Voip BE test on $CI_COMMIT_BRANCH" - if: $CI_PIPELINE_SOURCE == 'web' && $MANUAL_PIPELINE_TYPE == 'peaq-enc-passthrough' variables: - IVAS_PIPELINE_NAME: 'PEAQ encoder pass-through test: $CI_COMMIT_BRANCH' + IVAS_PIPELINE_NAME: "PEAQ encoder pass-through test: $CI_COMMIT_BRANCH" - if: $CI_PIPELINE_SOURCE == 'schedule' # Scheduled in any branch variables: - IVAS_PIPELINE_NAME: 'Scheduled pipeline: $CI_COMMIT_BRANCH' - + IVAS_PIPELINE_NAME: "Scheduled pipeline: $CI_COMMIT_BRANCH" stages: - .pre @@ -159,8 +159,8 @@ stages: .activate-debug-mode-info-if-set: &activate-debug-mode-info-if-set - if [ "$BUILD_WITH_DEBUG_MODE_INFO" = "true" ]; then - - sed -i.bak -e "s/\/\*\ *\(#define\ *DEBUGGING\ *\)\*\//\1/g" lib_com/options.h - - sed -i.bak -e "s/\/\*\ *\(#define\ *DEBUG_MODE_INFO\ *\)\*\//\1/g" lib_com/options.h + - sed -i.bak -e "s/\/\*\ *\(#define\ *DEBUGGING\ *\)\*\//\1/g" lib_com/options.h + - sed -i.bak -e "s/\/\*\ *\(#define\ *DEBUG_MODE_INFO\ *\)\*\//\1/g" lib_com/options.h - fi .build-float-ref-binaries: &build-float-ref-binaries @@ -196,10 +196,10 @@ stages: - git rev-parse HEAD > $MERGE_TARGET_COMMIT_FILE - git checkout $current_commit_sha -.build-float-ref-and-dut-binaries: &build-float-ref-and-dut-binaries -### build reference binaries +.build-float-ref-and-dut-binaries: + &build-float-ref-and-dut-binaries ### build reference binaries - *build-float-ref-binaries -### build dut binaries + ### build dut binaries - *activate-debug-mode-info-if-set - make clean - make -j @@ -215,12 +215,12 @@ stages: - enc_stats_arg="" - if [ "$ENCODER_TEST" = "true" ]; then - - enc_stats_arg="--enc_stats" + - enc_stats_arg="--enc_stats" - fi - enc_dmx_arg="" - if [ "$COMPARE_DMX" = "true" ]; then - - enc_dmx_arg="--compare_enc_dmx" + - enc_dmx_arg="--compare_enc_dmx" - fi - python3 -m pytest $TEST_SUITE -v --update_ref 1 $enc_stats_arg $enc_dmx_arg --create_ref -n auto --ref_encoder_path $REF_ENCODER_PATH --ref_decoder_path $REF_DECODER_PATH --dut_encoder_path $DUT_ENCODER_PATH --dut_decoder_path $DUT_DECODER_PATH || exit_code=$? @@ -252,11 +252,11 @@ stages: - echo $CI_COMMIT_SHA - echo $CI_MERGE_REQUEST_TARGET_BRANCH_NAME - commits_behind_count=$(git rev-list --count $CI_COMMIT_SHA..origin/$CI_MERGE_REQUEST_TARGET_BRANCH_NAME) - + .copy-ltv-files-to-testv-dir: ©-ltv-files-to-testv-dir - - cp "$LTV_DIR"/*.wav scripts/testv/ - - cp "$LTV_DIR"/*.met scripts/testv/ - - cp "$LTV_DIR"/*.csv scripts/testv/ + - cp "$LTV_DIR"/*.wav scripts/testv/ + - cp "$LTV_DIR"/*.met scripts/testv/ + - cp "$LTV_DIR"/*.csv scripts/testv/ .activate-Werror-linux: &activate-Werror-linux - sed -i.bak "s/^# \(CFLAGS += -Werror\)/\1/" Makefile @@ -351,9 +351,9 @@ stages: - ivas-windows .print-results-banner: &print-results-banner - - set +x - - echo "" - - echo -e "==================================================================================================================\n================================================== TEST RESULTS ==================================================\n==================================================================================================================\n" + - set +x + - echo "" + - echo -e "==================================================================================================================\n================================================== TEST RESULTS ==================================================\n==================================================================================================================\n" # template for test jobs on linux that need the TESTV_DIR .test-job-linux-needs-testv-dir: @@ -370,42 +370,47 @@ stages: variables: # keep "mld" in artifact name for backwards compatibility reasons CSV_ARTIFACT_NAME: "mld--$CI_JOB_NAME-$CI_JOB_ID--sha-$CI_COMMIT_SHORT_SHA.csv" + CSV_ARTIFACT_SPLIT: "mld--split--$CI_JOB_NAME-$CI_JOB_ID--sha-$CI_COMMIT_SHORT_SHA.csv" MERGED_CSV_ARTIFACT_NAME: "$CI_JOB_NAME--merged_csv--$CI_JOB_ID.csv" PAGES_HTML_ARTIFACT_NAME: "$CI_JOB_NAME-index.html" SUMMARY_HTML_ARTIFACT_NAME: "summary_$CI_JOB_NAME.html" + SUMMARY_HTML_ARTIFACT_SPLIT: "summary_split_$CI_JOB_NAME.html" IMAGES_ARTIFACT_NAME: "images_$CI_JOB_NAME" + IMAGES_ARTIFACT_SPLIT: "images_split_$CI_JOB_NAME" script: - set -euxo pipefail - *print-common-info - *update-scripts-repo - if [ $USE_LTV -eq 1 ]; then - - *update-ltv-repo - - *copy-ltv-files-to-testv-dir - - testcase_timeout=$TESTCASE_TIMEOUT_LTV + - *update-ltv-repo + - *copy-ltv-files-to-testv-dir + - testcase_timeout=$TESTCASE_TIMEOUT_LTV - else - - testcase_timeout=$TESTCASE_TIMEOUT_STV + - testcase_timeout=$TESTCASE_TIMEOUT_STV - fi - python3 ci/remove_unsupported_testcases.py $PRM_FILES - if [ $LEVEL_SCALING != "1.0" ]; then - - *apply-testv-scaling + - *apply-testv-scaling - fi - if [ "$COMPARE_DMX" = "true" ] || [ "$ENCODER_TEST" = "true" ]; then - - BUILD_WITH_DEBUG_MODE_INFO="true" + - BUILD_WITH_DEBUG_MODE_INFO="true" - fi - - *build-and-create-float-ref-outputs + + - comp_args="--mld --ssnr --odg" - INV_LEVEL_SCALING=$(awk "BEGIN {print 1.0 / $LEVEL_SCALING}") - comp_args="--mld --ssnr --odg --scalefac $INV_LEVEL_SCALING" - - summary_args="MLD DIFF SSNR ODG" - - REPORT_ARG="" - if [ "$ENCODER_TEST" = "true" ]; then comp_args="${comp_args} --enc_stats"; fi - - if [ "$DELTA_ODG" = "true" ]; then comp_args="${comp_args} --odg_bin"; summary_args="${summary_args} DELTA_ODG"; REPORT_ARG="--delta_odg"; fi + - if [ "$DELTA_ODG" = "true" ]; then comp_args="${comp_args} --odg_bin"; MEASURES_FOR_REPORT="$MEASURES_FOR_REPORT DELTA_ODG"; fi + - if [ "$SPLIT_COMPARISON" = "true" ]; then comp_args="${comp_args} --split-comparison"; fi + + - *build-and-create-float-ref-outputs # DMX comparison only in manual job with no other metrics - if [ "$COMPARE_DMX" = "true" ]; then - - comp_args="--compare_enc_dmx" + - comp_args="--compare_enc_dmx" - fi - echo "$comp_args" @@ -415,33 +420,43 @@ stages: - python3 -m pytest --tb=no $TEST_SUITE -v --create_cut --html=report.html --self-contained-html --junit-xml=report-junit.xml $comp_args -n auto --testcase_timeout $testcase_timeout --ref_encoder_path $REF_ENCODER_PATH --ref_decoder_path $REF_DECODER_PATH --dut_encoder_path $DUT_ENCODER_PATH --dut_decoder_path $DUT_DECODER_PATH || exit_code=$? - zero_errors=$(cat report-junit.xml | grep -c 'errors="0"') || true - - python3 scripts/parse_xml_report.py report-junit.xml $CSV_ARTIFACT_NAME $REPORT_ARG - - mkdir $IMAGES_ARTIFACT_NAME - - for MEASURE in $summary_args;do python3 scripts/create_histogram_summary.py $CSV_ARTIFACT_NAME $IMAGES_ARTIFACT_NAME/summary_"$MEASURE".csv $IMAGES_ARTIFACT_NAME/summary_"$MEASURE".png --measure $MEASURE; done - - python3 ci/basop-pages/create_summary_page.py $SUMMARY_HTML_ARTIFACT_NAME $CI_JOB_ID $CI_JOB_NAME --measures $summary_args + ### create histograms + - python3 scripts/parse_xml_report.py report-junit.xml $CSV_ARTIFACT_NAME --split-csv-file $CSV_ARTIFACT_SPLIT + + # first for "whole" files comparison + - python3 scripts/create_histograms.py $CSV_ARTIFACT_NAME $IMAGES_ARTIFACT_NAME --measures $MEASURES_FOR_REPORT --write-out-histograms + - python3 ci/basop-pages/create_summary_page.py $SUMMARY_HTML_ARTIFACT_NAME $CI_JOB_ID $CI_JOB_NAME $IMAGES_ARTIFACT_NAME --measures $MEASURES_FOR_REPORT + + - if [ "$SPLIT_COMPARISON" = "true" ]; then + - python3 scripts/create_histograms.py $CSV_ARTIFACT_SPLIT $IMAGES_ARTIFACT_SPLIT --measures $MEASURES_FOR_REPORT --write-out-histograms + - python3 ci/basop-pages/create_summary_page.py $SUMMARY_HTML_ARTIFACT_SPLIT $CI_JOB_ID $CI_JOB_NAME $IMAGES_ARTIFACT_SPLIT --measures $MEASURES_FOR_REPORT + - else + # touch files to suppress warning for missing artifacts + - touch $CSV_ARTIFACT_SPLIT $IMAGES_ARTIFACT_SPLIT + - fi - if [ $USE_LTV -eq 1 ] && [ "$CI_COMMIT_BRANCH" == "$CI_DEFAULT_BRANCH" ]; then - - id_previous=$(python3 ci/get_id_of_last_job_occurence.py $CI_DEFAULT_BRANCH $CI_JOB_NAME $CI_PROJECT_ID) - - echo "Job ID from variables - $CI_JOB_ID, Job ID from script - $id_previous" - - curl --request GET "https://forge.3gpp.org/rep/api/v4/projects/$CI_PROJECT_ID/jobs/$id_previous/artifacts" --output artifacts.zip - - unzip artifacts.zip -d previous_artifacts + - id_previous=$(python3 ci/get_id_of_last_job_occurence.py $CI_DEFAULT_BRANCH $CI_JOB_NAME $CI_PROJECT_ID) + - echo "Job ID from variables - $CI_JOB_ID, Job ID from script - $id_previous" + - curl --request GET "https://forge.3gpp.org/rep/api/v4/projects/$CI_PROJECT_ID/jobs/$id_previous/artifacts" --output artifacts.zip + - unzip artifacts.zip -d previous_artifacts # This wildcard thingy relies on only one csv file being present per job - - file_previous="previous_artifacts/mld--$CI_JOB_NAME-$id_previous--sha-*.csv" - - python3 ci/basop-pages/create_report_pages.py $PAGES_HTML_ARTIFACT_NAME $MERGED_CSV_ARTIFACT_NAME $CSV_ARTIFACT_NAME $file_previous $CI_JOB_ID $id_previous $CI_JOB_NAME + - file_previous="previous_artifacts/mld--$CI_JOB_NAME-$id_previous--sha-*.csv" + - python3 ci/basop-pages/create_report_pages.py $PAGES_HTML_ARTIFACT_NAME $MERGED_CSV_ARTIFACT_NAME $CSV_ARTIFACT_NAME $file_previous $CI_JOB_ID $id_previous $CI_JOB_NAME - else # create empty file for artifacts to avoid errors - - touch $PAGES_HTML_ARTIFACT_NAME - - touch $MERGED_CSV_ARTIFACT_NAME + - touch $PAGES_HTML_ARTIFACT_NAME + - touch $MERGED_CSV_ARTIFACT_NAME - fi - if [ $zero_errors != 1 ]; then - - echo "Run errors encountered!" + - echo "Run errors encountered!" # TODO: temporary only to not fail MR pipelines on crashes - - if [ $CI_PIPELINE_SOURCE == 'merge_request_event' ]; then - - exit $EXIT_CODE_NON_BE - - else - - exit $EXIT_CODE_FAIL - - fi + - if [ $CI_PIPELINE_SOURCE == 'merge_request_event' ]; then + - exit $EXIT_CODE_NON_BE + - else + - exit $EXIT_CODE_FAIL + - fi - fi - if [ $exit_code -eq 1 ]; then echo "Differences encountered"; exit $EXIT_CODE_NON_BE; fi - exit 0 @@ -458,22 +473,25 @@ stages: - report.html - $PAGES_HTML_ARTIFACT_NAME - $CSV_ARTIFACT_NAME + - $CSV_ARTIFACT_SPLIT - $MERGED_CSV_ARTIFACT_NAME - $SUMMARY_HTML_ARTIFACT_NAME + - $SUMMARY_HTML_ARTIFACT_SPLIT - $IMAGES_ARTIFACT_NAME + - $IMAGES_ARTIFACT_SPLIT expose_as: "pytest compare results" reports: junit: - report-junit.xml .check-up-to-date-in-comparison-jobs: &check-up-to-date-in-comparison-jobs - - *get-commits-behind-count - - | - if [ $commits_behind_count -ne 0 ]; then - set +x - echo -e "Your branch is $commits_behind_count commits behind the target branch, possibly main changed during your pipeline run. Checking bitexactness or testing for regressions now can result in meaningless results. Run\n\t git pull origin $CI_MERGE_REQUEST_TARGET_BRANCH_NAME\nto update." - exit 1 - fi + - *get-commits-behind-count + - | + if [ $commits_behind_count -ne 0 ]; then + set +x + echo -e "Your branch is $commits_behind_count commits behind the target branch, possibly main changed during your pipeline run. Checking bitexactness or testing for regressions now can result in meaningless results. Run\n\t git pull origin $CI_MERGE_REQUEST_TARGET_BRANCH_NAME\nto update." + exit 1 + fi .check-be-to-target-anchor: &check-be-to-target-anchor stage: check-be @@ -492,18 +510,18 @@ stages: - python3 tests/create_short_testvectors.py - if [ $USE_LTV -eq 1 ]; then - - *update-ltv-repo - - *copy-ltv-files-to-testv-dir - - testcase_timeout=$TESTCASE_TIMEOUT_LTV + - *update-ltv-repo + - *copy-ltv-files-to-testv-dir + - testcase_timeout=$TESTCASE_TIMEOUT_LTV - else - - testcase_timeout=$TESTCASE_TIMEOUT_STV + - testcase_timeout=$TESTCASE_TIMEOUT_STV - fi - python3 ci/remove_unsupported_testcases.py $PRM_FILES - python3 scripts/prepare_combined_format_inputs.py - if [ $LEVEL_SCALING != "1.0" ];then - - *apply-testv-scaling + - *apply-testv-scaling - fi - *build-float-ref-binaries @@ -520,39 +538,39 @@ stages: - python3 -m pytest --tb=no -q $TEST_SUITE -v --keep_files --create_cut --html=$HTML_REPORT --self-contained-html --junit-xml=$XML_REPORT --ref_encoder_path $MERGE_TARGET_ENCODER_PATH --ref_decoder_path $MERGE_TARGET_DECODER_PATH --dut_encoder_path $DUT_ENCODER_PATH --dut_decoder_path $DUT_DECODER_PATH -n auto --testcase_timeout $testcase_timeout > pytest_log.txt || exit_code=$? - if [ $exit_code -ne 0 ]; then - - exit_code=$EXIT_CODE_NON_BE - - zip -r $PYTEST_CACHE_ARTIFACT .pytest_cache - - - grep "^FAILED" pytest_log.txt | sed "s/^FAILED /'/" | sed "s/] - .*/]'/" | tr "\n" " " > $FAILED_TESTCASES_LIST || true - - grep "^FAILED" pytest_log.txt | sed "s/^FAILED //" | sed "s/] - .*/]/" > failed_testcases_for_printing.txt || true - - num_failures=$(wc -l < failed_testcases_for_printing.txt) - - - grep "^ERROR" pytest_log.txt | sed "s/^ERROR /'/" | sed "s/] - .*/]'/" | tr "\n" " " > $ERRORS_TESTCASES_LIST || true - - grep "^ERROR" pytest_log.txt | sed "s/^ERROR //" | sed "s/] - .*/]/" > errors_testcases_for_printing.txt || true - - num_errors=$(wc -l < errors_testcases_for_printing.txt) - - - *print-results-banner - - echo "Found these $num_failures non-bitexact testcases:" - - cat failed_testcases_for_printing.txt - - - echo "Reproduce locally with:" - - echo -e "1. Create references with target branch $CI_MERGE_REQUEST_TARGET_BRANCH_NAME:\n\t- git checkout $(cat $FLOAT_REF_COMMIT_FILE)\n\t- make clean\n\t- make -j\n\t- mv IVAS_cod IVAS_cod_ref\n\t- mv IVAS_dec IVAS_dec_ref\n\t- python3 -m pytest $(cat $FAILED_TESTCASES_LIST) --update_ref 1 --ref_encoder_path $REF_ENCODER_PATH --ref_decoder_path $REF_DECODER_PATH" - - echo -e "2. Run test with source branch $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:\n\t- git checkout $(cat $CUT_COMMIT_FILE)\n\t- make clean\n\t- make -j\n\t- python3 -m pytest $(cat $FAILED_TESTCASES_LIST) --dut_encoder_path $DUT_ENCODER_PATH --dut_decoder_path $DUT_DECODER_PATH" - - echo "The individual command lines can be found in the html report in the job artifacts." - - if [ $num_errors -ne 0 ]; then - - exit_code=1 - - echo "There were errors present in the following testcases:" - - cat errors_testcases_for_printing.txt - - fi - - - exit $exit_code + - exit_code=$EXIT_CODE_NON_BE + - zip -r $PYTEST_CACHE_ARTIFACT .pytest_cache + + - grep "^FAILED" pytest_log.txt | sed "s/^FAILED /'/" | sed "s/] - .*/]'/" | tr "\n" " " > $FAILED_TESTCASES_LIST || true + - grep "^FAILED" pytest_log.txt | sed "s/^FAILED //" | sed "s/] - .*/]/" > failed_testcases_for_printing.txt || true + - num_failures=$(wc -l < failed_testcases_for_printing.txt) + + - grep "^ERROR" pytest_log.txt | sed "s/^ERROR /'/" | sed "s/] - .*/]'/" | tr "\n" " " > $ERRORS_TESTCASES_LIST || true + - grep "^ERROR" pytest_log.txt | sed "s/^ERROR //" | sed "s/] - .*/]/" > errors_testcases_for_printing.txt || true + - num_errors=$(wc -l < errors_testcases_for_printing.txt) + + - *print-results-banner + - echo "Found these $num_failures non-bitexact testcases:" + - cat failed_testcases_for_printing.txt + + - echo "Reproduce locally with:" + - echo -e "1. Create references with target branch $CI_MERGE_REQUEST_TARGET_BRANCH_NAME:\n\t- git checkout $(cat $FLOAT_REF_COMMIT_FILE)\n\t- make clean\n\t- make -j\n\t- mv IVAS_cod IVAS_cod_ref\n\t- mv IVAS_dec IVAS_dec_ref\n\t- python3 -m pytest $(cat $FAILED_TESTCASES_LIST) --update_ref 1 --ref_encoder_path $REF_ENCODER_PATH --ref_decoder_path $REF_DECODER_PATH" + - echo -e "2. Run test with source branch $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:\n\t- git checkout $(cat $CUT_COMMIT_FILE)\n\t- make clean\n\t- make -j\n\t- python3 -m pytest $(cat $FAILED_TESTCASES_LIST) --dut_encoder_path $DUT_ENCODER_PATH --dut_decoder_path $DUT_DECODER_PATH" + - echo "The individual command lines can be found in the html report in the job artifacts." + - if [ $num_errors -ne 0 ]; then + - exit_code=1 + - echo "There were errors present in the following testcases:" + - cat errors_testcases_for_printing.txt + - fi + + - exit $exit_code - else # create empty files to not have errors at artifact stage - - touch $FAILED_TESTCASES_LIST - - touch $ERRORS_TESTCASES_LIST - - touch $PYTEST_CACHE_ARTIFACT - - *print-results-banner - - echo "All testcases are bitexact." + - touch $FAILED_TESTCASES_LIST + - touch $ERRORS_TESTCASES_LIST + - touch $PYTEST_CACHE_ARTIFACT + - *print-results-banner + - echo "All testcases are bitexact." - fi - exit $exit_code @@ -582,8 +600,8 @@ stages: .overwrite-pytest-cache-with-artifact: &overwrite-pytest-cache-with-artifact - if [ -f $PYTEST_CACHE_ARTIFACT ]; then - - rm -rf .pytest_cache || true - - unzip $PYTEST_CACHE_ARTIFACT + - rm -rf .pytest_cache || true + - unzip $PYTEST_CACHE_ARTIFACT - fi .check-regressions-pytest-anchor: &check-regressions-pytest-anchor @@ -608,28 +626,28 @@ stages: - set -euxo pipefail - if [ -s $FAILED_TESTCASES_LIST ]; then - - *overwrite-pytest-cache-with-artifact - - export PYTEST_ADDOPTS=--last-failed + - *overwrite-pytest-cache-with-artifact + - export PYTEST_ADDOPTS=--last-failed - else # turn off echoing back of commands for result printout - - *print-results-banner - - echo -e "All tested cases were bit-exact between $CI_MERGE_REQUEST_TARGET_BRANCH_NAME and $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME.\nNo need to check for regressions. All is fine." - - exit 0 + - *print-results-banner + - echo -e "All tested cases were bit-exact between $CI_MERGE_REQUEST_TARGET_BRANCH_NAME and $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME.\nNo need to check for regressions. All is fine." + - exit 0 - fi - *update-scripts-repo - if [ $USE_LTV -eq 1 ]; then - - *update-ltv-repo - - *copy-ltv-files-to-testv-dir - - testcase_timeout=$TESTCASE_TIMEOUT_LTV + - *update-ltv-repo + - *copy-ltv-files-to-testv-dir + - testcase_timeout=$TESTCASE_TIMEOUT_LTV - else - - testcase_timeout=$TESTCASE_TIMEOUT_STV + - testcase_timeout=$TESTCASE_TIMEOUT_STV - fi - python3 ci/remove_unsupported_testcases.py $PRM_FILES - if [ $LEVEL_SCALING != "1.0" ];then - - *apply-testv-scaling + - *apply-testv-scaling - fi # check MR title for flag that allows regressions to be mergable @@ -652,9 +670,9 @@ stages: # Store branch outputs for comparison - mv tests/dut tests/dut_branch - # create the summary based on the branch - - for MEASURE in MLD DIFF SSNR ODG;do python3 scripts/create_histogram_summary.py $CSV_BRANCH $IMAGES_ARTIFACT_NAME/summary_"$MEASURE".csv $IMAGES_ARTIFACT_NAME/summary_"$MEASURE".png --measure $MEASURE; done - - python3 ci/basop-pages/create_summary_page.py $SUMMARY_HTML_ARTIFACT_NAME $CI_JOB_ID $CI_JOB_NAME + # create the summary based on the branch only + - python3 scripts/create_histograms.py $CSV_BRANCH $IMAGES_ARTIFACT_NAME --measures $MEASURES_FOR_REPORT + - python3 ci/basop-pages/create_summary_page.py $SUMMARY_HTML_ARTIFACT_NAME $CI_JOB_ID $CI_JOB_NAME $IMAGES_ARTIFACT_NAME --measures $MEASURES_FOR_REPORT ### run main now - git checkout $CI_MERGE_REQUEST_TARGET_BRANCH_NAME @@ -673,25 +691,25 @@ stages: - exit_code=0 - *print-results-banner - if [ $zero_errors_branch != 1 ]; then - - echo "Run errors encountered!" - - exit_code=$EXIT_CODE_FAIL - - echo "Reproduce locally with:" - - echo -e "1. Create references with target branch $CI_MERGE_REQUEST_TARGET_BRANCH_NAME:\n\t- git checkout $(cat $FLOAT_REF_COMMIT_FILE)\n\t- make clean\n\t- make -j\n\t- mv IVAS_cod IVAS_cod_ref\n\t- mv IVAS_dec IVAS_dec_ref\n\t- python3 -m pytest $(cat $ERRORS_TESTCASES_LIST) --update_ref 1 --ref_encoder_path $REF_ENCODER_PATH --ref_decoder_path $REF_DECODER_PATH" - - echo -e "2. Run test with source branch $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:\n\t- git checkout $(cat $CUT_COMMIT_FILE)\n\t- make clean\n\t- make -j\n\t- python3 -m pytest $(cat $ERRORS_TESTCASES_LIST) --dut_encoder_path $DUT_ENCODER_PATH --dut_decoder_path $DUT_DECODER_PATH" - - echo "The individual command lines can be found in the regressions_crashes.csv files in the job artifacts." + - echo "Run errors encountered!" + - exit_code=$EXIT_CODE_FAIL + - echo "Reproduce locally with:" + - echo -e "1. Create references with target branch $CI_MERGE_REQUEST_TARGET_BRANCH_NAME:\n\t- git checkout $(cat $FLOAT_REF_COMMIT_FILE)\n\t- make clean\n\t- make -j\n\t- mv IVAS_cod IVAS_cod_ref\n\t- mv IVAS_dec IVAS_dec_ref\n\t- python3 -m pytest $(cat $ERRORS_TESTCASES_LIST) --update_ref 1 --ref_encoder_path $REF_ENCODER_PATH --ref_decoder_path $REF_DECODER_PATH" + - echo -e "2. Run test with source branch $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:\n\t- git checkout $(cat $CUT_COMMIT_FILE)\n\t- make clean\n\t- make -j\n\t- python3 -m pytest $(cat $ERRORS_TESTCASES_LIST) --dut_encoder_path $DUT_ENCODER_PATH --dut_decoder_path $DUT_DECODER_PATH" + - echo "The individual command lines can be found in the regressions_crashes.csv files in the job artifacts." - elif [ $regressions_found != 0 ] && [ "$SKIP_REGRESSION_CHECK" != "true" ]; then - - cat regression_log.txt - - if [ $allow_regressions_flag == 0 ]; then - - echo "Detected regression wrt to $CI_MERGE_REQUEST_TARGET_BRANCH_NAME, [allow regression] not set!" - - exit_code=$EXIT_CODE_FAIL; - - else - - echo "Detected regression wrt to $CI_MERGE_REQUEST_TARGET_BRANCH_NAME, [allow regression] set." - - exit_code=$EXIT_CODE_NON_BE; - - fi - - echo "Reproduce locally with:" - - echo -e "1. Create references with target branch $CI_MERGE_REQUEST_TARGET_BRANCH_NAME:\n\t- git checkout $(cat $FLOAT_REF_COMMIT_FILE)\n\t- make clean\n\t- make -j\n\t- mv IVAS_cod IVAS_cod_ref\n\t- mv IVAS_dec IVAS_dec_ref\n\t- python3 -m pytest $(cat $FAILED_TESTCASES_LIST) --update_ref 1 --ref_encoder_path $REF_ENCODER_PATH --ref_decoder_path $REF_DECODER_PATH" - - echo -e "2. Run test with source branch $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:\n\t- git checkout $(cat $CUT_COMMIT_FILE)\n\t- make clean\n\t- make -j\n\t- python3 -m pytest $(cat $FAILED_TESTCASES_LIST) --dut_encoder_path $DUT_ENCODER_PATH --dut_decoder_path $DUT_DECODER_PATH" - - echo "The individual command lines can be found in the regressions_*.csv files in the job artifacts." + - cat regression_log.txt + - if [ $allow_regressions_flag == 0 ]; then + - echo "Detected regression wrt to $CI_MERGE_REQUEST_TARGET_BRANCH_NAME, [allow regression] not set!" + - exit_code=$EXIT_CODE_FAIL; + - else + - echo "Detected regression wrt to $CI_MERGE_REQUEST_TARGET_BRANCH_NAME, [allow regression] set." + - exit_code=$EXIT_CODE_NON_BE; + - fi + - echo "Reproduce locally with:" + - echo -e "1. Create references with target branch $CI_MERGE_REQUEST_TARGET_BRANCH_NAME:\n\t- git checkout $(cat $FLOAT_REF_COMMIT_FILE)\n\t- make clean\n\t- make -j\n\t- mv IVAS_cod IVAS_cod_ref\n\t- mv IVAS_dec IVAS_dec_ref\n\t- python3 -m pytest $(cat $FAILED_TESTCASES_LIST) --update_ref 1 --ref_encoder_path $REF_ENCODER_PATH --ref_decoder_path $REF_DECODER_PATH" + - echo -e "2. Run test with source branch $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:\n\t- git checkout $(cat $CUT_COMMIT_FILE)\n\t- make clean\n\t- make -j\n\t- python3 -m pytest $(cat $FAILED_TESTCASES_LIST) --dut_encoder_path $DUT_ENCODER_PATH --dut_decoder_path $DUT_DECODER_PATH" + - echo "The individual command lines can be found in the regressions_*.csv files in the job artifacts." - fi - exit $exit_code @@ -732,7 +750,6 @@ stages: - $XML_REPORT_BRANCH - $XML_REPORT_MAIN - .ivas-pytest-sanitizers-anchor: &ivas-pytest-sanitizers-anchor stage: test needs: ["build-codec-linux-make"] @@ -754,16 +771,16 @@ stages: - set -euxo pipefail - make_args="CLANG=$CLANG_NUM" - if [[ $CLANG_NUM == 3 ]]; then - - export UBSAN_OPTIONS="suppressions=scripts/ubsan_basop.supp,report_error_type=1,print_stacktrace=1" - - python3 scripts/basop_create_ignorelist_for_ubsan.py - - make_args="$make_args IGNORELIST=1" + - export UBSAN_OPTIONS="suppressions=scripts/ubsan_basop.supp,report_error_type=1,print_stacktrace=1" + - python3 scripts/basop_create_ignorelist_for_ubsan.py + - make_args="$make_args IGNORELIST=1" - fi - make clean - make -j $make_args - testcase_timeout_arg="--testcase_timeout $TESTCASE_TIMEOUT_LTV_SANITIZERS" # disable per-testcase timeout for msan to evaluate what is going on that it takes so long - if [[ $CLANG_NUM = 1 ]]; then - - testcase_timeout_arg="" + - testcase_timeout_arg="" - fi - python3 -m pytest $TEST_SUITE -v --tb=no --update_ref 1 --html=report.html --self-contained-html --junit-xml=report-junit.xml $testcase_timeout_arg --ref_encoder_path $DUT_ENCODER_PATH --ref_decoder_path $DUT_DECODER_PATH artifacts: @@ -788,16 +805,16 @@ stages: - *print-common-info - *update-scripts-repo - if [ $USE_LTV -eq 1 ]; then - - *update-ltv-repo - - *copy-ltv-files-to-testv-dir - - testcase_timeout=$TESTCASE_TIMEOUT_LTV + - *update-ltv-repo + - *copy-ltv-files-to-testv-dir + - testcase_timeout=$TESTCASE_TIMEOUT_LTV - else - - testcase_timeout=$TESTCASE_TIMEOUT_STV + - testcase_timeout=$TESTCASE_TIMEOUT_STV - fi - python3 ci/remove_unsupported_testcases.py $PRM_FILES - if [ $LEVEL_SCALING != "1.0" ];then - - *apply-testv-scaling + - *apply-testv-scaling - fi - *build-float-ref-and-dut-binaries @@ -1318,34 +1335,34 @@ ivas-pytest-enc-usan: - DUT_DECODER_PATH=./$REF_DECODER_PATH - TEST_SUITE=$LONG_TEST_SUITE_ENCODER <<: *ivas-pytest-sanitizers-anchor - + ### jobs that test flt encoder -> fx decoder ivas-pytest-compare_to_ref-short-dec: extends: - .rules-pytest-to-ref-short - - .test-job-linux + - .test-job-linux before_script: - USE_LTV=0 - DUT_ENCODER_PATH=./$REF_ENCODER_PATH - TEST_SUITE="$SHORT_TEST_SUITE" - LEVEL_SCALING=1.0 <<: *ivas-pytest-anchor - + ivas-pytest-compare_to_ref-short-dec-lev-10: extends: - .rules-pytest-to-ref-short - - .test-job-linux + - .test-job-linux before_script: - USE_LTV=0 - DUT_ENCODER_PATH=./$REF_ENCODER_PATH - TEST_SUITE="$SHORT_TEST_SUITE" - LEVEL_SCALING=0.3162 <<: *ivas-pytest-anchor - + ivas-pytest-compare_to_ref-short-dec-lev+10: extends: - .rules-pytest-to-ref-short - - .test-job-linux + - .test-job-linux before_script: - USE_LTV=0 - DUT_ENCODER_PATH=./$REF_ENCODER_PATH @@ -1410,73 +1427,79 @@ ivas-pytest-dec-usan: <<: *ivas-pytest-sanitizers-anchor # --------------------------------------------------------------- -# Long test jobs +# Long test jobs # --------------------------------------------------------------- ivas-pytest-compare_ref-long-enc: extends: - .rules-pytest-long - - .test-job-linux + - .test-job-linux before_script: - USE_LTV=1 - DUT_DECODER_PATH=./$REF_DECODER_PATH - TEST_SUITE="$LONG_TEST_SUITE_ENCODER" - LEVEL_SCALING=1.0 + - SPLIT_COMPARISON="true" <<: *ivas-pytest-anchor - + ivas-pytest-compare_ref-long-dec: extends: - .rules-pytest-long - - .test-job-linux + - .test-job-linux before_script: - - USE_LTV=1 + - USE_LTV=1 - DUT_ENCODER_PATH=./$REF_ENCODER_PATH - TEST_SUITE="$LONG_TEST_SUITE" - LEVEL_SCALING=1.0 + - SPLIT_COMPARISON="true" <<: *ivas-pytest-anchor - + ivas-pytest-compare_ref-long-enc-lev-10: extends: - .rules-pytest-long - - .test-job-linux + - .test-job-linux before_script: - USE_LTV=1 - DUT_DECODER_PATH=./$REF_DECODER_PATH - TEST_SUITE="$LONG_TEST_SUITE_ENCODER" - LEVEL_SCALING=0.3162 + - SPLIT_COMPARISON="true" <<: *ivas-pytest-anchor - + ivas-pytest-compare_ref-long-dec-lev-10: extends: - .rules-pytest-long - - .test-job-linux + - .test-job-linux before_script: - USE_LTV=1 - DUT_ENCODER_PATH=./$REF_ENCODER_PATH - TEST_SUITE="$LONG_TEST_SUITE" - LEVEL_SCALING=0.3162 + - SPLIT_COMPARISON="true" <<: *ivas-pytest-anchor ivas-pytest-compare_ref-long-enc-lev+10: extends: - .rules-pytest-long - - .test-job-linux + - .test-job-linux before_script: - USE_LTV=1 - DUT_DECODER_PATH=./$REF_DECODER_PATH - TEST_SUITE="$LONG_TEST_SUITE_ENCODER" - LEVEL_SCALING=3.162 + - SPLIT_COMPARISON="true" <<: *ivas-pytest-anchor - + ivas-pytest-compare_ref-long-dec-lev+10: extends: - .rules-pytest-long - - .test-job-linux + - .test-job-linux before_script: - - USE_LTV=1 + - USE_LTV=1 - DUT_ENCODER_PATH=./$REF_ENCODER_PATH - TEST_SUITE="$LONG_TEST_SUITE" - LEVEL_SCALING=3.162 + - SPLIT_COMPARISON="true" <<: *ivas-pytest-anchor ivas-smoke-test-saturation: @@ -1490,11 +1513,11 @@ ivas-smoke-test-saturation: - *print-common-info - *update-scripts-repo - if [ $USE_LTV -eq 1 ]; then - - *update-ltv-repo - - *copy-ltv-files-to-testv-dir + - *update-ltv-repo + - *copy-ltv-files-to-testv-dir - fi - if [ $LEVEL_SCALING != "1.0" ];then - - *apply-testv-scaling + - *apply-testv-scaling - fi - cp -r scripts/testv/* $TESTV_DIR/ @@ -1521,7 +1544,6 @@ ivas-smoke-test-saturation: - smoke_test_output_hrtf.txt expose_as: "saturation smoke test results" - # GCOV/LCOV coverage analysis of self_test suite coverage-test-on-main-scheduled: extends: @@ -1531,7 +1553,7 @@ coverage-test-on-main-scheduled: timeout: 3 hours script: - *print-common-info - - *update-scripts-repo + - *update-scripts-repo - *update-ltv-repo - *copy-ltv-files-to-testv-dir - *build-float-ref-binaries @@ -1604,7 +1626,7 @@ be-2-evs-26444: - python3 -m pytest tests/test_26444.py -v --html=report.html --self-contained-html --junit-xml=report-junit.xml -n auto || exit_code=$? - if [ $exit_code -eq 1 ]; then echo "Differences encountered"; exit $EXIT_CODE_FAIL; fi - exit 0 - + artifacts: name: "$CI_JOB_NAME--sha-$CI_COMMIT_SHORT_SHA--results" expire_in: 1 week @@ -1615,7 +1637,7 @@ be-2-evs-26444: expose_as: "EVS 26444 result" reports: junit: - - report-junit.xml + - report-junit.xml ivas-pytest-renderer: extends: @@ -1661,13 +1683,12 @@ voip-be-on-merge-request: - make -j - python3 -m pytest tests/test_be_for_jbm_neutral_dly_profile.py - # --------------------------------------------------------------- # Complexity measurement jobs # --------------------------------------------------------------- -.complexity-measurements-setup: - &complexity-measurements-setup # create necessary environment +.complexity-measurements-setup: &complexity-measurements-setup + # create necessary environment ### 1. part: mainly same as in float repo - this is boilerplate code to make the gitlab pages presentation work - mkdir -p wmops/logs @@ -1685,20 +1706,20 @@ voip-be-on-merge-request: # 1. check for public_dir being there as this might not be the case when artifact download failed # 2. check for public dir not being empty - handle job failures in prev job that happen after the dir is created. In that case, the empty dir is in the artifacts - if [ -d $public_dir ] && [ ! -z "$( ls -A $public_dir )" ]; then - - mv $public_dir/* wmops/ + - mv $public_dir/* wmops/ # check here if we have the split-by-levels files present - if not, fake them up with the existing global one # this is needed for the first run with split graphs on a branch where the global version did run previously # NOTE: checking only for level_1 file here as this should already be sufficient # NOTE2: also not chechking for RAM for same reason - - wmops_all_global="wmops/log_wmops_all.txt" - - ram_all_global="wmops/log_ram_all.txt" - - if [ -f "${wmops_all_global}" ] && [ ! -f "wmops/log_wmops_all_level_1.txt" ]; then - - declare -a suffixes=("level_1" "level_2" "level_3" "rate_sw") - - for suffix in "${suffixes[@]}"; do - - cp ${wmops_all_global} wmops/log_wmops_all_${suffix}.txt - - cp ${ram_all_global} wmops/log_ram_all_${suffix}.txt - - done - - fi + - wmops_all_global="wmops/log_wmops_all.txt" + - ram_all_global="wmops/log_ram_all.txt" + - if [ -f "${wmops_all_global}" ] && [ ! -f "wmops/log_wmops_all_level_1.txt" ]; then + - declare -a suffixes=("level_1" "level_2" "level_3" "rate_sw") + - for suffix in "${suffixes[@]}"; do + - cp ${wmops_all_global} wmops/log_wmops_all_${suffix}.txt + - cp ${ram_all_global} wmops/log_ram_all_${suffix}.txt + - done + - fi - fi - ls wmops @@ -1986,8 +2007,8 @@ complexity-masa-in-hoa3-out: # - if: $MEASURE_COMPLEXITY_LINUX # when: delayed # start_in: 13 hours - # variables: - # JOB_ID_INJECT: "" +# variables: +# JOB_ID_INJECT: "" # script: # - in_format=OMASA # - out_format=EXT