Skip to content
GitLab
Explore
Sign in
Show whitespace changes
Inline
Side-by-side
scripts/find_regressions_from_logs.py
0 → 100644
View file @
f63d1a55
#!/usr/bin/env python3
import
argparse
from
pathlib
import
Path
import
pandas
as
pd
REPRODUCE_REGRESSION_SCRIPT_TMPL
=
"""
#!/bin/bash -x
SCRIPTS_DIR=/usr/local/scripts
LTV_DIR=/usr/local/ltv
MIN_DATE={min_date}
MIN_SHA={min_sha}
LEVEL_SCALING={level_scaling}
TESTCASE=
"
{testcase}
"
REF_ENC1={REF_ENC1}
REF_DEC1={REF_DEC1}
DUT_ENC1={DUT_ENC1}
DUT_DEC1={DUT_DEC1}
REF_ENC2={REF_ENC2}
REF_DEC2={REF_DEC2}
DUT_ENC2={DUT_ENC2}
DUT_DEC2={DUT_DEC2}
INV_LEVEL_SCALING=$(awk
"
BEGIN {{print 1.0 / $LEVEL_SCALING}}
"
)
# Obtain executables from past reference
git checkout `git rev-list -1 --before=
"
$MIN_DATE 22:00:00
"
ivas-float-update`
echo
"
ivas_float_update, min version: `git rev-parse HEAD`
"
> versions.txt
make clean
make -j
mv IVAS_cod IVAS_cod_ref_1
mv IVAS_dec IVAS_dec_ref_1
mv IVAS_rend IVAS_rend_ref_1
git checkout $MIN_SHA
echo
"
main, min version: `git rev-parse HEAD`
"
>> versions.txt
make clean
make -j
mv IVAS_cod IVAS_cod_1
mv IVAS_dec IVAS_dec_1
mv IVAS_rend IVAS_rend_1
# Obtain latest executables
git checkout ivas-float-update
git pull
echo
"
ivas-float-update, current version: `git rev-parse HEAD`
"
>> versions.txt
make clean
make -j
mv IVAS_cod IVAS_cod_ref_2
mv IVAS_dec IVAS_dec_ref_2
mv IVAS_rend IVAS_rend_ref_2
git checkout main
git pull
echo
"
main, current version: `git rev-parse HEAD`
"
>> versions.txt
make clean
make -j
mv IVAS_cod IVAS_cod_2
mv IVAS_dec IVAS_dec_2
mv IVAS_rend IVAS_rend_2
# Get fresh copy of scripts, tests and ci
cp -r $SCRIPTS_DIR/{{scripts,tests,ci,pytest.ini}} .
rm -rf tests/ref tests/dut tests/renderer/ref tests/renderer/cut
python3 ci/remove_unsupported_testcases.py scripts/config/self_test.prm scripts/config/self_test_ltv.prm
# Get LTVs
cp $LTV_DIR/* scripts/testv
# Apply level scaling
tests/scale_pcm.py ./scripts/testv/
"
$LEVEL_SCALING
"
# Run tests
cp IVAS_rend_ref_1 IVAS_rend_ref
cp IVAS_rend_1 IVAS_rend
python3 -m pytest
"
$TESTCASE
"
-n 1 --update_ref 1 --create_ref --param_file scripts/config/self_test_ltv.prm --use_ltv --ref_encoder_path $REF_ENC1 --ref_decoder_path $REF_DEC1
python3 -m pytest
"
$TESTCASE
"
-n 1 --create_cut --param_file scripts/config/self_test_ltv.prm --use_ltv --dut_encoder_path $DUT_ENC1 --dut_decoder_path $DUT_DEC1 --mld --ssnr --odg --scalefac $INV_LEVEL_SCALING --junit-xml=report1.xml --html=report1.html --self-contained-html
python3 scripts/parse_xml_report.py report1.xml report1.csv
# Store results from first run
mkdir -p tests1/renderer
cp -r tests/ref tests/dut tests1
cp -r tests/renderer/ref tests1/renderer
cp -r tests/renderer/cut tests1/renderer
cp IVAS_rend_ref_2 IVAS_rend_ref
cp IVAS_rend_2 IVAS_rend
python3 -m pytest
"
$TESTCASE
"
-n 1 --update_ref 1 --create_ref --param_file scripts/config/self_test_ltv.prm --use_ltv --ref_encoder_path $REF_ENC2 --ref_decoder_path $REF_DEC2
python3 -m pytest
"
$TESTCASE
"
-n 1 --create_cut --param_file scripts/config/self_test_ltv.prm --use_ltv --dut_encoder_path $DUT_ENC2 --dut_decoder_path $DUT_DEC2 --mld --ssnr --odg --scalefac $INV_LEVEL_SCALING --junit-xml=report2.xml --html=report2.html --self-contained-html
python3 scripts/parse_xml_report.py report2.xml report2.csv
"""
def
main
(
logs_dir
,
output_filename
,
measure
):
input_path
=
Path
(
logs_dir
)
logs
=
[
f
for
f
in
input_path
.
iterdir
()
if
f
.
is_dir
()]
# Build dict of scores
formatdict
=
{}
sha
=
{}
logdict
=
{}
for
log
in
logs
:
date
=
log
.
name
logdict
[
date
]
=
{}
formatdict
[
date
]
=
{}
for
logfile
in
log
.
glob
(
"
*.csv
"
):
tmp
=
logfile
.
name
.
split
(
"
-
"
)
job
=
"
-
"
.
join
(
tmp
[
3
:
-
4
])
sha
[
date
]
=
tmp
[
-
1
].
split
(
"
.
"
)[
0
]
data
=
pd
.
read_csv
(
logfile
,
usecols
=
[
"
testcase
"
,
measure
,
"
format
"
])
logdict
[
date
][
job
]
=
{}
formatdict
[
date
][
job
]
=
{}
for
testcase
,
value
,
format
in
zip
(
data
[
"
testcase
"
],
data
[
measure
],
data
[
"
format
"
]
):
formatdict
[
date
][
job
][
testcase
]
=
format
logdict
[
date
][
job
][
testcase
]
=
value
# Restructure dict
csv_rows
=
[]
formats
=
[]
for
date
,
jobs
in
logdict
.
items
():
for
job
,
testcases
in
jobs
.
items
():
for
testcase
,
value
in
testcases
.
items
():
csv_rows
.
append
((
job
,
testcase
,
date
,
value
))
formats
.
append
((
job
,
testcase
,
date
,
formatdict
[
date
][
job
][
testcase
]))
result
=
pd
.
DataFrame
(
csv_rows
,
columns
=
[
"
job
"
,
"
testcase
"
,
"
date
"
,
"
value
"
])
result
=
result
.
pivot
(
index
=
[
"
job
"
,
"
testcase
"
],
columns
=
"
date
"
,
values
=
"
value
"
).
reset_index
()
f
=
pd
.
DataFrame
(
formats
,
columns
=
[
"
job
"
,
"
testcase
"
,
"
date
"
,
"
format
"
])
f
=
f
.
pivot
(
index
=
[
"
job
"
,
"
testcase
"
],
columns
=
"
date
"
,
values
=
"
format
"
).
reset_index
()
values
=
result
.
iloc
[:,
2
:]
last_date
=
values
.
columns
[
-
1
]
result
.
insert
(
2
,
"
format
"
,
f
[
last_date
])
result
.
insert
(
3
,
"
min_date
"
,
values
.
idxmin
(
axis
=
1
))
result
.
insert
(
4
,
"
min_sha
"
,
result
[
"
min_date
"
].
map
(
sha
))
result
.
insert
(
5
,
"
curr_value
"
,
values
[
last_date
])
result
.
insert
(
6
,
"
min_value
"
,
values
.
min
(
axis
=
1
))
result
.
insert
(
7
,
"
diff
"
,
result
[
"
curr_value
"
]
-
result
[
"
min_value
"
])
result
.
insert
(
8
,
"
ratio
"
,
result
[
"
curr_value
"
]
/
result
[
"
min_value
"
])
result
.
loc
[
result
[
"
min_value
"
]
==
0
,
"
ratio
"
]
=
(
1
# Set ratio to 1 for denominator 0
)
result
[
"
min_sha
"
]
=
"'"
+
result
[
"
min_sha
"
]
result
.
to_csv
(
output_filename
,
sep
=
"
;
"
,
index
=
False
)
critical
=
result
.
iloc
[:,
0
:
9
]
formats
=
list
(
set
(
critical
[
"
format
"
]))
formats
.
sort
()
critical3
=
pd
.
DataFrame
()
for
format
in
formats
:
top3
=
(
critical
[
critical
[
"
format
"
]
==
format
]
.
sort_values
(
by
=
"
ratio
"
,
ascending
=
False
)
.
head
(
3
)
)
critical3
=
pd
.
concat
([
critical3
,
top3
],
ignore_index
=
True
)
critical3
.
to_csv
(
"
critical3.csv
"
,
sep
=
"
;
"
,
index
=
False
)
for
row_counter
,
row
in
critical3
.
iterrows
():
# Find level
level_scaling
=
1.0
if
"
lev+10
"
in
row
[
"
job
"
]:
level_scaling
=
3.162
if
"
lev-10
"
in
row
[
"
job
"
]:
level_scaling
=
0.3162
# Find executables setup
REF_ENC1
=
"
IVAS_cod_ref_1
"
REF_DEC1
=
"
IVAS_dec_ref_1
"
DUT_ENC1
=
"
IVAS_cod_1
"
DUT_DEC1
=
"
IVAS_dec_1
"
REF_ENC2
=
"
IVAS_cod_ref_2
"
REF_DEC2
=
"
IVAS_dec_ref_2
"
DUT_ENC2
=
"
IVAS_cod_2
"
DUT_DEC2
=
"
IVAS_dec_2
"
if
"
dec
"
in
row
[
"
job
"
]:
DUT_ENC1
=
"
IVAS_cod_ref_1
"
DUT_ENC2
=
"
IVAS_cod_ref_2
"
if
"
enc
"
in
row
[
"
job
"
]:
DUT_DEC1
=
"
IVAS_dec_ref_1
"
DUT_DEC2
=
"
IVAS_dec_ref_2
"
script_content
=
REPRODUCE_REGRESSION_SCRIPT_TMPL
.
format
(
min_date
=
row
[
"
min_date
"
],
min_sha
=
row
[
"
min_sha
"
][
1
:],
level_scaling
=
level_scaling
,
testcase
=
row
[
"
testcase
"
],
REF_ENC1
=
REF_ENC1
,
REF_DEC1
=
REF_DEC1
,
DUT_ENC1
=
DUT_ENC1
,
DUT_DEC1
=
DUT_DEC1
,
REF_ENC2
=
REF_ENC2
,
REF_DEC2
=
REF_DEC2
,
DUT_ENC2
=
DUT_ENC2
,
DUT_DEC2
=
DUT_DEC2
,
)
script_filename
=
f
"
regression_
{
row_counter
+
2
:
03
d
}
.bash
"
with
open
(
script_filename
,
"
w
"
)
as
f
:
f
.
write
(
script_content
)
if
__name__
==
"
__main__
"
:
parser
=
argparse
.
ArgumentParser
(
description
=
"
logs dir
"
)
parser
.
add_argument
(
"
logs_dir
"
,
type
=
str
,
help
=
"
Logs dir, e.g. logs
"
,
)
parser
.
add_argument
(
"
output_filename
"
,
type
=
str
,
help
=
"
Filename of the combined csv file. e.g mld.csv
"
,
)
parser
.
add_argument
(
"
--measure
"
,
type
=
str
,
help
=
"
Measure for summary, one of MLD MIN_SSNR MAX_ABS_DIFF MIN_ODG, (default: MLD)
"
,
default
=
"
MLD
"
,
)
args
=
parser
.
parse_args
()
main
(
args
.
logs_dir
,
args
.
output_filename
,
args
.
measure
)
tests/test_be_for_jbm_neutral_dly_profile.py
View file @
f63d1a55
...
...
@@ -7,7 +7,7 @@ from tempfile import TemporaryDirectory
from
pathlib
import
Path
from
.constants
import
TESTV_DIR
,
SCRIPTS_DIR
from
.split_rendering.constants
import
HR_TRAJECTORY_DIR
from
.split_rendering.constants
import
HR_TRAJECTORY_DIR
,
RENDER_CFG_DIR
sys
.
path
.
append
(
str
(
SCRIPTS_DIR
))
from
pyaudio3dtools
import
audiofile
,
audioarray
...
...
@@ -90,14 +90,13 @@ TESTCASES_NO_DTX = [
# BINAURAL_SPLIT_CODED with LCLD
[
"
HOA3
"
,
128000
,
"
BINAURAL_SPLIT_CODED
"
],
[
"
OSBA_ISM4_FOA
"
,
128000
,
"
BINAURAL_SPLIT_CODED
"
],
]
DLY_PROFILE
=
SCRIPTS_DIR
.
joinpath
(
"
dly_error_profiles/dly_error_profile_0.dat
"
)
JBM_NEUTRAL_DELAY_MS
=
60
def
is_split_rend
(
format
)
->
bool
:
return
format
in
[
"
BINAURAL_SPLIT_CODED
"
,
"
BINAURAL_SPLIT_PCM
"
]
return
format
.
upper
()
in
[
"
BINAURAL_SPLIT_CODED
"
,
"
BINAURAL_SPLIT_PCM
"
]
def
get_options_cod
(
in_format
,
dtx
):
...
...
@@ -151,7 +150,10 @@ def get_options_dec(
):
options
=
[]
if
output_format
==
"
BINAURAL_SPLIT_PCM
"
:
if
"
BINAURAL_SPLIT
"
in
output_format
.
upper
():
options
.
extend
([
"
-render_config
"
,
str
(
RENDER_CFG_DIR
/
"
split_renderer_config_3dof_512k_default.txt
"
)])
if
output_format
.
upper
()
==
"
BINAURAL_SPLIT_PCM
"
:
options
.
extend
([
"
-om
"
,
str
(
output_file
.
with_suffix
(
"
.isarmd
"
))])
if
is_split_rend
(
output_format
):
...
...
Prev
1
2
Next