Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
M
MPI-BugBench
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
High Performance Computing - Public
MPI-BugBench
Commits
4118322f
Commit
4118322f
authored
1 month ago
by
Simon Schwitanski
Browse files
Options
Downloads
Plain Diff
Merge remote-tracking branch 'origin/toolcoverage' into must-toolcoverage
parents
6d8bcb4d
2bc41aea
No related branches found
No related tags found
No related merge requests found
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
MBB.py
+11
-3
11 additions, 3 deletions
MBB.py
scripts/MBButils.py
+32
-1
32 additions, 1 deletion
scripts/MBButils.py
scripts/tools/mpi_checker.py
+9
-9
9 additions, 9 deletions
scripts/tools/mpi_checker.py
scripts/tools/parcoach.py
+125
-71
125 additions, 71 deletions
scripts/tools/parcoach.py
with
177 additions
and
84 deletions
MBB.py
+
11
−
3
View file @
4118322f
...
...
@@ -91,7 +91,7 @@ def extract_all_todo(batch):
if
os
.
path
.
exists
(
f
"
/MBB/scripts/
{
args
.
gencodes
}
/
"
):
# Docker run
filenames
=
glob
.
glob
(
f
"
/MBB/scripts/
{
args
.
gencodes
}
/**/*.c
"
)
elif
os
.
path
.
exists
(
f
"
{
args
.
gencodes
}
/
"
):
# Gitlab-ci run
filenames
=
glob
.
glob
(
f
"
{
args
.
gencodes
}
/**/*.c
"
)
# our code expects absolute paths
filenames
=
glob
.
glob
(
f
"
{
args
.
gencodes
}
/**/*.c
"
,
recursive
=
True
)
# our code expects absolute paths
elif
os
.
path
.
exists
(
f
"
../../
{
args
.
gencodes
}
/
"
):
# Local runs
filenames
=
glob
.
glob
(
f
"
{
os
.
getcwd
()
}
/../../
{
args
.
gencodes
}
/**/*.c
"
)
# our code expects absolute paths
else
:
...
...
@@ -115,7 +115,7 @@ def extract_all_todo(batch):
global
todo
filename
=
sorted
(
filenames
)
for
filename
in
filenames
[
min_rank
:
max_rank
]:
todo
=
todo
+
parse_one_code
(
filename
)
todo
=
todo
+
parse_one_code
(
filename
,
args
.
foreign
)
if
pos
==
runner_count
and
pos
!=
1
:
# The last runner starts from the end of the array to ease dynamically splitting
todo
=
list
(
reversed
(
todo
))
...
...
@@ -131,7 +131,7 @@ def extract_all_todo_from_logdir(tool, logdir):
global
todo
filename
=
sorted
(
filenames
)
for
filename
in
filenames
:
todo
=
todo
+
parse_one_code
(
filename
)
todo
=
todo
+
parse_one_code
(
filename
,
args
.
foreign
)
########################
...
...
@@ -180,6 +180,7 @@ def cmd_run(rootdir, toolname, batchinfo):
tools
[
toolname
].
build
(
rootdir
=
rootdir
)
# build list of test executions for run function
print
(
f
"
Found
{
len
(
todo
)
}
TODO items
"
)
work_items
=
[]
for
number
,
test
in
enumerate
(
todo
):
binary
=
re
.
sub
(
r
'
\.c
'
,
''
,
os
.
path
.
basename
(
test
[
'
filename
'
]))
...
...
@@ -1632,6 +1633,13 @@ parser.add_argument('-f', metavar='format', default='pdf',
parser
.
add_argument
(
'
-v
'
,
'
--verbose
'
,
action
=
"
store_const
"
,
dest
=
"
loglevel
"
,
const
=
logging
.
DEBUG
,
default
=
logging
.
INFO
)
parser
.
add_argument
(
'
--foreign
'
,
action
=
'
store_true
'
,
help
=
'
Run with foreign tests without MBB header
'
)
args
=
parser
.
parse_args
()
rootdir
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
...
...
This diff is collapsed.
Click to expand it.
scripts/MBButils.py
+
32
−
1
View file @
4118322f
...
...
@@ -289,11 +289,14 @@ displayed_name = {
}
def
parse_one_code
(
filename
):
def
parse_one_code
(
filename
,
foreign
=
False
):
"""
Reads the header of the provided filename, and extract a list of todo item, each of them being a (cmd, expect, test_num) tupple.
The test_num is useful to build a log file containing both the binary and the test_num, when there is more than one test in the same binary.
"""
if
foreign
:
return
parse_one_code_noheader
(
filename
)
res
=
[]
error_lines
=
[]
...
...
@@ -364,6 +367,34 @@ def parse_one_code(filename):
res
.
append
(
test
)
return
res
def
parse_one_code_noheader
(
filename
):
res
=
[]
error_lines
=
[]
def
check_filepath_for_folder
(
filepath
,
folder_name
=
"
correct
"
):
normalized_filepath
=
os
.
path
.
normpath
(
filepath
)
path_components
=
normalized_filepath
.
split
(
os
.
sep
)
if
folder_name
in
path_components
:
return
"
OK
"
else
:
return
"
ERROR
"
def
get_path_up_to_folder
(
filepath
,
target_folder
=
"
correct
"
):
parts
=
filepath
.
split
(
os
.
sep
)
if
target_folder
in
parts
:
last_correct_index
=
len
(
parts
)
-
1
-
parts
[::
-
1
].
index
(
target_folder
)
return
os
.
sep
.
join
(
parts
[:
last_correct_index
+
1
])
else
:
return
None
include_path
=
f
"
-I
{
include_path
}
/include
"
if
(
include_path
:
=
get_path_up_to_folder
(
filename
))
else
""
test
=
{
'
filename
'
:
filename
,
'
id
'
:
0
,
'
category
'
:
"
P2P
"
,
'
cmd
'
:
f
"
{
include_path
}
"
,
'
expect
'
:
check_filepath_for_folder
(
filename
),
'
detail
'
:
""
,
'
can_deadlock
'
:
""
,
'
error_lines
'
:
""
}
res
.
append
(
test
)
return
res
cache_categorize
=
{}
...
...
This diff is collapsed.
Click to expand it.
scripts/tools/mpi_checker.py
+
9
−
9
View file @
4118322f
...
...
@@ -84,13 +84,13 @@ class Tool(AbstractTool):
finally
:
os
.
chdir
(
here
)
def
merge_coverage_single
(
self
,
filename
,
profile
):
def
merge_coverage_single
(
self
,
filename
,
profile
,
number
):
here
=
os
.
getcwd
()
os
.
chdir
(
self
.
llvm_profile_folder
)
print
(
f
"
We are here:
{
profile
}
"
)
#
print(f"We are here: {profile}")
try
:
if
os
.
path
.
isfile
(
profile
):
command
=
f
"
-o
{
os
.
path
.
basename
(
filename
)
}
.pro
"
command
=
f
"
-o
{
os
.
path
.
basename
(
filename
)
}
-mbb
{
number
}
.pro
"
subprocess
.
run
(
f
"
llvm-profdata merge -sparse
{
profile
}
{
command
}
"
,
shell
=
True
,
...
...
@@ -144,11 +144,14 @@ class Tool(AbstractTool):
with
tempfile
.
TemporaryDirectory
()
as
temp_dir
:
compile_commands_path
=
os
.
path
.
join
(
temp_dir
,
"
compile_commands.json
"
)
with
open
(
compile_commands_path
,
"
w
"
)
as
out
:
command_suffix
=
f
"
{
execcmd
}
"
if
execcmd
.
startswith
(
"
-I
"
)
else
""
command_
=
f
"
mpicc
{
filename
}
{
self
.
mpicc_compile_flags
}{
command_suffix
}
"
out
.
write
(
f
'
[{{
"
directory
"
:
"
{
here
}
"
,
"
command
"
:
"
mpicc
{
filename
}
{
self
.
mpicc_compile_flags
}
"
,
"
file
"
:
"
{
filename
}
"
}}]
'
f
'
[{{
"
directory
"
:
"
{
here
}
"
,
"
command
"
:
"
{
command_
}
"
,
"
file
"
:
"
{
filename
}
"
}}]
'
)
current_env
=
os
.
environ
.
copy
()
current_env
[
"
LLVM_PROFILE_FILE
"
]
=
f
"
{
self
.
llvm_profile_folder
}
/
{
base_file
}
.profraw
"
current_env
[
"
LLVM_PROFILE_FILE
"
]
=
f
"
{
self
.
llvm_profile_folder
}
/
{
base_file
}
-mbb
{
number
}
.profraw
"
ran
=
self
.
run_cmd
(
buildcmd
=
None
,
execcmd
=
f
"
clang-tidy -p
{
temp_dir
}
-checks=
'
-*,mpi-type-mismatch,mpi-buffer-deref,clang-analyzer-optin.mpi.MPI-Checker
'
{
filename
}
"
,
...
...
@@ -161,10 +164,7 @@ class Tool(AbstractTool):
loglevel
=
loglevel
,
current_env
=
current_env
,
)
self
.
merge_coverage_single
(
filename
=
filename
,
profile
=
f
"
{
self
.
llvm_profile_folder
}
/
{
base_file
}
.profraw
"
)
# subprocess.run(f"chmod -R +r /MBI/logs/mpi-checker/{cachefile}", shell=True, check=True)
# subprocess.run("rm compile_commands.json", shell=True, check=True)
self
.
merge_coverage_single
(
filename
=
filename
,
profile
=
f
"
{
self
.
llvm_profile_folder
}
/
{
base_file
}
-mbb
{
number
}
.profraw
"
,
number
=
number
)
def
parse
(
self
,
cachefile
):
if
os
.
path
.
exists
(
f
"
{
cachefile
}
.timeout
"
)
or
os
.
path
.
exists
(
...
...
This diff is collapsed.
Click to expand it.
scripts/tools/parcoach.py
+
125
−
71
View file @
4118322f
import
re
import
os
from
MBButils
import
*
from
pathlib
import
Path
class
Tool
(
AbstractTool
):
def
identify
(
self
):
return
"
PARCOACH wrapper
"
def
ensure_image
(
self
):
AbstractTool
.
ensure_image
(
self
,
"
-x parcoach
"
)
#AbstractTool.ensure_image(self, "-x parcoach")
pass
def
build
(
self
,
rootdir
,
cached
=
True
):
if
cached
and
os
.
path
.
exists
(
f
"
/tmp/build-parcoach/parcoach-2.4.0-shared-Linux/bin/parcoachcc
"
):
print
(
"
No need to rebuild PARCOACH.
"
)
os
.
environ
[
'
PATH
'
]
=
os
.
environ
[
'
PATH
'
]
+
f
"
:/tmp/build-parcoach/parcoach-2.4.0-shared-Linux/bin/
"
os
.
environ
[
'
OMPI_CC
'
]
=
"
clang-15
"
return
here
=
os
.
getcwd
()
# Save where we were
os
.
chdir
(
rootdir
)
subprocess
.
run
(
f
"
wget https://gitlab.inria.fr/api/v4/projects/12320/packages/generic/parcoach/2.4.0/parcoach-2.4.0-shared-Linux.tar.gz
"
,
shell
=
True
,
check
=
True
)
subprocess
.
run
(
f
"
tar xfz parcoach-*.tar.gz
"
,
shell
=
True
,
check
=
True
)
if
not
os
.
path
.
exists
(
"
/usr/lib/llvm-15/bin/clang
"
):
subprocess
.
run
(
"
ln -s $(which clang) /usr/lib/llvm-15/bin/clang
"
,
shell
=
True
,
check
=
True
)
# Go to where we want to install it, and build it out-of-tree (we're in the docker)
subprocess
.
run
(
f
"
rm -rf /tmp/build-parcoach/parcoach-2.4.0-shared-Linux/ && mkdir -p /tmp/build-parcoach/
"
,
shell
=
True
,
check
=
True
)
subprocess
.
run
(
f
"
mv parcoach-*/ /tmp/build-parcoach/
"
,
shell
=
True
,
check
=
True
)
os
.
environ
[
'
PATH
'
]
=
os
.
environ
[
'
PATH
'
]
+
f
"
:/tmp/build-parcoach/parcoach-2.4.0-shared-Linux/bin/
"
os
.
environ
[
'
OMPI_CC
'
]
=
"
clang-15
"
# Back to our previous directory
os
.
chdir
(
here
)
# nothing to do
pass
def
setup
(
self
):
os
.
environ
[
'
PATH
'
]
=
os
.
environ
[
'
PATH
'
]
+
f
"
:/tmp/build-parcoach/parcoach-2.4.0-shared-Linux/bin
"
#
os.environ['PATH'] = os.environ['PATH'] + f":/tmp/build-parcoach/parcoach-2.4.0-shared-Linux/bin"
os
.
environ
[
'
OMPI_CC
'
]
=
"
clang-15
"
result_parcoach
=
subprocess
.
run
(
"
which parcoach
"
,
shell
=
True
,
check
=
True
,
capture_output
=
True
,
text
=
True
)
self
.
parcoach_exe
=
result_parcoach
.
stdout
.
strip
()
clang_19_bin
=
os
.
environ
[
"
LLVM_19_BIN
"
]
self
.
llvm_cov
=
clang_19_bin
+
"
/llvm-cov
"
self
.
llvm_profdata
=
clang_19_bin
+
"
/llvm-profdata
"
assert
os
.
path
.
isfile
(
self
.
llvm_cov
)
assert
os
.
path
.
isfile
(
self
.
llvm_profdata
)
self
.
llvm_profile_folder
=
os
.
environ
[
"
LLVM_PROFILE_FOLDER
"
]
assert
os
.
path
.
isdir
(
self
.
llvm_profile_folder
)
self
.
coverage_profile
=
"
parcoach_profile.pro
"
self
.
coverage_report_html
=
"
parcoach_coverage_html
"
self
.
coverage_source
=
os
.
path
.
dirname
(
self
.
parcoach_exe
)
+
"
/../src/
"
self
.
so_files
=
[
str
(
p
)
for
p
in
list
(
Path
(
os
.
path
.
dirname
(
self
.
parcoach_exe
)).
rglob
(
"
*.so
"
))]
def
run
(
self
,
execcmd
,
filename
,
binary
,
id
,
number
,
timeout
,
batchinfo
,
loglevel
=
logging
.
INFO
):
self
.
setup
()
os
.
environ
[
'
PATH
'
]
=
os
.
environ
[
'
PATH
'
]
+
f
"
:/tmp/build-parcoach/parcoach-2.4.0-shared-Linux/bin
"
os
.
environ
[
'
OMPI_CC
'
]
=
"
clang-15
"
os
.
environ
[
'
OMPI_ALLOW_RUN_AS_ROOT
'
]
=
"
1
"
os
.
environ
[
'
OMPI_ALLOW_RUN_AS_ROOT_CONFIRM
'
]
=
"
1
"
cachefile
=
f
'
{
binary
}
_
{
id
}
'
current_env
=
os
.
environ
.
copy
()
base_file
=
os
.
path
.
basename
(
filename
)
current_env
[
"
LLVM_PROFILE_FILE
"
]
=
f
"
{
self
.
llvm_profile_folder
}
/
{
base_file
}
-mbb
{
number
}
.profraw
"
execcmd
=
re
.
sub
(
'
\${EXE}
'
,
f
'
./
{
binary
}
'
,
execcmd
)
if
filename
.
find
(
'
lock
'
)
>=
0
or
filename
.
find
(
'
fence
'
)
>=
0
:
self
.
run_cmd
(
# buildcmd=f"parcoachcc -check=rma --args mpicc {filename} -c -o {binary}.o",
buildcmd
=
f
"
parcoachcc -check=rma --args mpicc
{
filename
}
-c -o
{
binary
}
.o
\n
mpicc
{
binary
}
.o -o
{
binary
}
-L/tmp/build-parcoach/parcoach-2.4.0-shared-Linux/lib/ -lParcoachRMADynamic_MPI_C
"
,
execcmd
=
f
"
mpicc
{
binary
}
.o -lParcoachRMADynamic_MPI_C -L/tmp/build-parcoach/parcoach-2.4.0-shared-Linux/lib/
"
,
# buildcmd=f"parcoachcc -check=rma --args mpicc {filename} -c -o {binary}.o \n mpicc {binary}.o -o {binary} -L/tmp/build-parcoach/parcoach-2.4.0-shared-Linux/lib/ -lParcoachRMADynamic_MPI_C",
# execcmd=f"mpicc {binary}.o -lParcoachRMADynamic_MPI_C -L/tmp/build-parcoach/parcoach-2.4.0-shared-Linux/lib/",
buildcmd
=
f
"
mpicc
{
filename
}
-S -emit-llvm -o
{
binary
}
.ir
"
,
execcmd
=
f
"
{
self
.
parcoach_exe
}
--check=rma
{
binary
}
.ir
"
,
# execcmd=execcmd,
cachefile
=
cachefile
,
filename
=
filename
,
...
...
@@ -56,12 +66,15 @@ class Tool(AbstractTool):
binary
=
binary
,
timeout
=
timeout
,
batchinfo
=
batchinfo
,
loglevel
=
loglevel
)
loglevel
=
loglevel
,
current_env
=
current_env
,
)
else
:
self
.
run_cmd
(
# buildcmd=f"parcoachcc -instrum-inter --args mpicc {filename} -c -o {binary}.o",
buildcmd
=
f
"
parcoachcc -instrum-inter --args mpicc
{
filename
}
-c -o
{
binary
}
.o
\n
mpicc
{
binary
}
.o -o
{
binary
}
-L/tmp/build-parcoach/parcoach-2.4.0-shared-Linux/lib/ -lParcoachCollDynamic_MPI_C
"
,
execcmd
=
f
"
mpicc
{
binary
}
.o -lParcoachCollDynamic_MPI_C -L/tmp/build-parcoach/parcoach-2.4.0-shared-Linux/lib/
"
,
# buildcmd=f"parcoachcc -instrum-inter --args mpicc {filename} -c -o {binary}.o \n mpicc {binary}.o -o {binary} -L/tmp/build-parcoach/parcoach-2.4.0-shared-Linux/lib/ -lParcoachCollDynamic_MPI_C",
buildcmd
=
f
"
mpicc
{
filename
}
-S -emit-llvm -o
{
binary
}
.ir
"
,
execcmd
=
f
"
{
self
.
parcoach_exe
}
--check=mpi
{
binary
}
.ir
"
,
# execcmd=f"mpicc {binary}.o -lParcoachCollDynamic_MPI_C -L/tmp/build-parcoach/parcoach-2.4.0-shared-Linux/lib/",
# execcmd=execcmd,
cachefile
=
cachefile
,
filename
=
filename
,
...
...
@@ -69,18 +82,61 @@ class Tool(AbstractTool):
binary
=
binary
,
timeout
=
timeout
,
batchinfo
=
batchinfo
,
loglevel
=
loglevel
)
loglevel
=
loglevel
,
current_env
=
current_env
,
)
self
.
merge_coverage_single
(
filename
=
filename
,
profile
=
f
"
{
self
.
llvm_profile_folder
}
/
{
base_file
}
-mbb
{
number
}
.profraw
"
,
number
=
number
)
subprocess
.
run
(
"
rm -f *.bc core
"
,
shell
=
True
,
check
=
True
)
def
merge_coverage_single
(
self
,
filename
,
profile
,
number
):
here
=
os
.
getcwd
()
os
.
chdir
(
self
.
llvm_profile_folder
)
# print(f"We are here: {profile}")
try
:
if
os
.
path
.
isfile
(
profile
):
command
=
f
"
-o
{
os
.
path
.
basename
(
filename
)
}
-mbb
{
number
}
.pro
"
subprocess
.
run
(
f
"
{
self
.
llvm_profdata
}
merge -sparse
{
profile
}
{
command
}
"
,
shell
=
True
,
check
=
True
,
)
subprocess
.
run
(
f
"
rm
{
profile
}
"
,
shell
=
True
,
check
=
False
,
)
except
Exception
:
pass
finally
:
os
.
chdir
(
here
)
def
teardown
(
self
):
self
.
setup
()
here
=
os
.
getcwd
()
os
.
chdir
(
self
.
llvm_profile_folder
)
subprocess
.
run
(
f
"
{
self
.
llvm_profdata
}
merge -sparse *.pro -o
{
self
.
coverage_profile
}
"
,
shell
=
True
,
check
=
True
,
)
if
os
.
path
.
exists
(
self
.
coverage_profile
):
subprocess
.
run
(
f
"
{
self
.
llvm_cov
}
show -format=html
{
'
'
.
join
(
self
.
so_files
)
}
{
self
.
parcoach_exe
}
"
f
"
--instr-profile=
{
self
.
coverage_profile
}
"
f
"
--show-directory-coverage --output-dir
{
self
.
coverage_report_html
}
"
f
"
--sources
{
self
.
coverage_source
}
"
,
shell
=
True
,
check
=
True
,
)
os
.
chdir
(
here
)
def
get_mbb_error_label
(
self
,
error_message
):
mbb_error_dict
=
{
'
LocalConcurrency
'
:
"
LocalConcurrency
"
,
'
Call Ordering
'
:
"
CallOrdering
"
}
for
k
,
v
in
mbb_error_dict
.
items
():
if
error_message
.
startswith
(
k
):
return
v
...
...
@@ -88,18 +144,16 @@ class Tool(AbstractTool):
assert
False
and
"
ERROR MESSAGE NOT KNOWN PARSING DOES NOT WORK CORRECLTY
"
return
"
UNKNOWN
"
def
parse
(
self
,
cachefile
,
logs_dir
):
if
os
.
path
.
exists
(
f
'
{
cachefile
}
.timeout
'
)
or
os
.
path
.
exists
(
f
'
{
logs_dir
}
/parcoach/
{
cachefile
}
.timeout
'
):
outcome
=
'
timeout
'
if
not
(
os
.
path
.
exists
(
f
'
{
cachefile
}
.txt
'
)
or
os
.
path
.
exists
(
f
'
{
logs_dir
}
/parcoach/
{
cachefile
}
.txt
'
)):
return
'
failure
'
with
open
(
f
'
{
cachefile
}
.txt
'
if
os
.
path
.
exists
(
f
'
{
cachefile
}
.txt
'
)
else
f
'
{
logs_dir
}
/parcoach/
{
cachefile
}
.txt
'
,
'
r
'
)
as
infile
:
with
open
(
f
'
{
cachefile
}
.txt
'
if
os
.
path
.
exists
(
f
'
{
cachefile
}
.txt
'
)
else
f
'
{
logs_dir
}
/parcoach/
{
cachefile
}
.txt
'
,
'
r
'
)
as
infile
:
output
=
infile
.
read
()
if
re
.
search
(
'
Compilation of .*? raised an error \(retcode:
'
,
output
):
output
=
{}
output
[
"
status
"
]
=
"
UNIMPLEMENTED
"
...
...
@@ -125,7 +179,8 @@ class Tool(AbstractTool):
for
line
in
lines
:
# get the error/warning blocks:
if
re
.
match
(
COerror_line_prefix
,
line
)
or
re
.
match
(
LC1error_line_prefix
,
line
)
or
re
.
match
(
LC2error_line_prefix
,
line
)
or
re
.
match
(
LCinfo_line_prefix
,
line
):
if
re
.
match
(
COerror_line_prefix
,
line
)
or
re
.
match
(
LC1error_line_prefix
,
line
)
or
re
.
match
(
LC2error_line_prefix
,
line
)
or
re
.
match
(
LCinfo_line_prefix
,
line
):
current_report
.
append
(
line
)
reports
.
append
(
current_report
)
# print("--- current_report = ", current_report, "\n")
...
...
@@ -186,7 +241,6 @@ class Tool(AbstractTool):
parsed_report
[
'
ranks
'
]
=
list
(
set
(
parsed_report
[
'
ranks
'
]))
parsed_reports
.
append
(
parsed_report
)
# parsed_reports = list(set(parsed_reports))
output
[
"
messages
"
]
=
parsed_reports
# output["messages"] = list(set(output["messages"]))
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment