diff --git a/.deactivated.gitlab-ci.yml b/.deactivated.gitlab-ci.yml
index c967c5c54a7c0a137663d05f44b91ab98a714e78..026c6c3fcbf88b431099a1a4d8923d62fa3dcbfe 100755
--- a/.deactivated.gitlab-ci.yml
+++ b/.deactivated.gitlab-ci.yml
@@ -19,7 +19,7 @@ build-must:
     stage: build
     needs: []
     script:
-        - scripts/ensure_python3 ./MBI.py -x must -c build
+        - scripts/ensure_python3 ./MBB.py -x must -c build
     artifacts:
         untracked: false
         paths:
@@ -33,8 +33,8 @@ test-must-1:
     before_script:
         - apt-get install -y psmisc
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x must -c run -b 1/10
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x must -c run -b 1/10
     artifacts:
         untracked: false
         when: always
@@ -49,8 +49,8 @@ test-must-2:
     before_script:
         - apt-get install -y psmisc
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x must -c run -b 2/10
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x must -c run -b 2/10
     artifacts:
         untracked: false
         when: always
@@ -65,8 +65,8 @@ test-must-3:
     before_script:
         - apt-get install -y psmisc
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x must -c run -b 3/10
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x must -c run -b 3/10
     artifacts:
         untracked: false
         when: always
@@ -81,8 +81,8 @@ test-must-4:
     before_script:
         - apt-get install -y psmisc
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x must -c run -b 4/10
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x must -c run -b 4/10
     artifacts:
         untracked: false
         when: always
@@ -97,8 +97,8 @@ test-must-5:
     before_script:
         - apt-get install -y psmisc
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x must -c run -b 5/10
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x must -c run -b 5/10
     artifacts:
         untracked: false
         when: always
@@ -113,8 +113,8 @@ test-must-6:
     before_script:
         - apt-get install -y psmisc
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x must -c run -b 6/10
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x must -c run -b 6/10
     artifacts:
         untracked: false
         when: always
@@ -129,8 +129,8 @@ test-must-7:
     before_script:
         - apt-get install -y psmisc
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x must -c run -b 7/10
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x must -c run -b 7/10
     artifacts:
         untracked: false
         when: always
@@ -145,8 +145,8 @@ test-must-8:
     before_script:
         - apt-get install -y psmisc
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x must -c run -b 8/10
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x must -c run -b 8/10
     artifacts:
         untracked: false
         when: always
@@ -161,8 +161,8 @@ test-must-9:
     before_script:
         - apt-get install -y psmisc
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x must -c run -b 9/10
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x must -c run -b 9/10
     artifacts:
         untracked: false
         when: always
@@ -177,8 +177,8 @@ test-must-10:
     before_script:
         - apt-get install -y psmisc
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x must -c run -b 10/10
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x must -c run -b 10/10
     artifacts:
         untracked: false
         when: always
@@ -211,8 +211,8 @@ test-must-all:
         - job: test-must-10
           artifacts: true
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x must
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x must
     artifacts:
         untracked: false
         when: always
@@ -224,7 +224,7 @@ build-aislinn:
     needs: []
     image: ubuntu:18.04
     script:
-        - scripts/ensure_python3 ./MBI.py -x aislinn -c build
+        - scripts/ensure_python3 ./MBB.py -x aislinn -c build
     artifacts:
         untracked: false
         when: always
@@ -238,8 +238,8 @@ test-aislinn:
         - job: build-aislinn
           artifacts: true
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x aislinn
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x aislinn
     artifacts:
         untracked: false
         when: always
@@ -255,8 +255,8 @@ test-aislinn:
 #        - baremetal
 #    image: registry.hub.docker.com/mquinson/mbi
 #    script:
-#        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-#        - scripts/ensure_python3 ./MBI.py -x mpisv
+#        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+#        - scripts/ensure_python3 ./MBB.py -x mpisv
 #    artifacts:
 #        untracked: false
 #        when: always
@@ -267,8 +267,8 @@ test-civl-1:
     stage: test
     needs: []
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x civl -c run -b 1/4
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x civl -c run -b 1/4
     artifacts:
         untracked: false
         when: always
@@ -278,8 +278,8 @@ test-civl-2:
     stage: test
     needs: []
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x civl -c run -b 2/4
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x civl -c run -b 2/4
     artifacts:
         untracked: false
         when: always
@@ -289,8 +289,8 @@ test-civl-3:
     stage: test
     needs: []
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x civl -c run -b 3/4
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x civl -c run -b 3/4
     artifacts:
         untracked: false
         when: always
@@ -300,8 +300,8 @@ test-civl-4:
     stage: test
     needs: []
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x civl -c run -b 4/4
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x civl -c run -b 4/4
     artifacts:
         untracked: false
         when: always
@@ -320,8 +320,8 @@ test-civl-all:
         - job: test-civl-4
           artifacts: true
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x civl
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x civl
     artifacts:
         untracked: false
         when: always
@@ -332,7 +332,7 @@ build-parcoach:
     stage: build
     needs: []
     script:
-        - scripts/ensure_python3 ./MBI.py -x parcoach -c build
+        - scripts/ensure_python3 ./MBB.py -x parcoach -c build
     artifacts:
         untracked: false
         when: always
@@ -345,8 +345,8 @@ test-parcoach:
         - job: build-parcoach
           artifacts: true
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x parcoach
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x parcoach
     artifacts:
         untracked: false
         when: always
@@ -357,7 +357,7 @@ build-simgrid:
     stage: build
     needs: []
     script:
-        - scripts/ensure_python3 ./MBI.py -x simgrid -c build
+        - scripts/ensure_python3 ./MBB.py -x simgrid -c build
     cache:
         paths:
             - SimGrid/*
@@ -373,8 +373,8 @@ test-simgrid:
         - job: build-simgrid
           artifacts: true
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x simgrid
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x simgrid
     artifacts:
         untracked: false
         when: always
@@ -386,8 +386,8 @@ test-smpi:
         - job: build-simgrid
           artifacts: true
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x smpi
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x smpi
     artifacts:
         untracked: false
         when: always
@@ -399,8 +399,8 @@ test-smpivg-1:
         - job: build-simgrid
           artifacts: true
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x smpivg -b 1/3
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x smpivg -b 1/3
     artifacts:
         untracked: false
         when: always
@@ -412,8 +412,8 @@ test-smpivg-2:
         - job: build-simgrid
           artifacts: true
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x smpivg -b 2/3
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x smpivg -b 2/3
     artifacts:
         untracked: false
         when: always
@@ -425,8 +425,8 @@ test-smpivg-3:
         - job: build-simgrid
           artifacts: true
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x smpivg -b 3/3
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x smpivg -b 3/3
     artifacts:
         untracked: false
         when: always
@@ -442,8 +442,8 @@ test-smpivg-all:
         - job: test-smpivg-3
           artifacts: true
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x smpivg
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x smpivg
     artifacts:
         untracked: false
         when: always
@@ -457,8 +457,8 @@ test-smpivg-all:
 #        - baremetal
 #    image: registry.hub.docker.com/mquinson/mbi
 #    script:
-#        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-#        - scripts/ensure_python3 ./MBI.py -x itac -c run
+#        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+#        - scripts/ensure_python3 ./MBB.py -x itac -c run
 #    artifacts:
 #        untracked: false
 #        when: always
@@ -469,7 +469,7 @@ build-isp:
     stage: build
     needs: []
     script:
-        - scripts/ensure_python3 ./MBI.py -x isp -c build
+        - scripts/ensure_python3 ./MBB.py -x isp -c build
     artifacts:
         untracked: false
         when: always
@@ -482,8 +482,8 @@ test-isp-1:
         - job: build-isp
           artifacts: true
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x isp -c run -b 1/2
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x isp -c run -b 1/2
     artifacts:
         untracked: false
         when: always
@@ -496,8 +496,8 @@ test-isp-2:
         - job: build-isp
           artifacts: true
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x isp -c run -b 2/2
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x isp -c run -b 2/2
     artifacts:
         untracked: false
         when: always
@@ -512,8 +512,8 @@ test-isp-all:
         - job: test-isp-2
           artifacts: true
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -x isp
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -x isp
     artifacts:
         untracked: false
         when: always
@@ -526,8 +526,8 @@ latex:
     when: always
     script:
         - apt update ; apt -y install texlive-latex-base texlive-latex-extra texlive-pictures
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -c latex
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -c latex
         - cd script ; ./tools/gen_latex.py ; cd ..
         - cd latex ; pdflatex -batch dashboard.tex ; cd ..
     artifacts:
@@ -540,8 +540,8 @@ pages:
     stage: deploy
     when: always
     script:
-        - rm -rf /MBI/*; scripts/ensure_python3 ./MBI.py -c generate
-        - scripts/ensure_python3 ./MBI.py -c html
+        - rm -rf /MBI/*; scripts/ensure_python3 ./MBB.py -c generate
+        - scripts/ensure_python3 ./MBB.py -c html
         - pwd ; ls
         - mkdir public
         - cp -r *.html gencodes img logs public/
diff --git a/MBI.py b/MBB.py
similarity index 93%
rename from MBI.py
rename to MBB.py
index df907a7ec50426e9b03726279019f728980833bd..f67624ae2ee9f8a4a1f8f07f836c4c9da9ac5f3b 100755
--- a/MBI.py
+++ b/MBB.py
@@ -1,6 +1,6 @@
 #! /usr/bin/python3
 
-# autopep8 -i --max-line-length 130 MBI.py
+# autopep8 -i --max-line-length 130 MBB.py
 
 import shutil
 import os
@@ -23,6 +23,8 @@ import matplotlib as mpl
 import matplotlib.pyplot as plt
 import matplotlib.patches as mpatches
 
+from scripts.MBIutils import categorize
+
 mpl.rcParams['hatch.linewidth'] = 4.5  # hatch linewidth
 
 # Add our lib directory to the PYTHONPATH, and load our utilitary libraries
@@ -595,40 +597,15 @@ def aggregate_metrics_per_category(df_in):
 
     return df[["CE", "RE", "TP", "TN", "FP", "FN", "coverage", "conclusiveness", "specificity", "recall", "precision", "f1", "overallaccuracy"]]
 
+def read_tool_reports(rootdir, toolname):
+    if not toolname in tools:
+        raise Exception(f"Tool {toolname} does not seem to be a valid name.")
 
-def cmd_csv(rootdir, toolnames):
-    here = os.getcwd()
-    os.chdir(rootdir)
-    results = {}
-    total_elapsed = {}
-    used_toolnames = []
-    outpath = f'{rootdir}/csv/'
-
-    # Create directory for output if not present
-    pathlib.Path(outpath).mkdir(parents=True, exist_ok=True)
-
-    # select the tools for which we have some results
-    print("Produce the stats for:", end='')
-    for toolname in toolnames:
-        if not toolname in tools:
-            raise Exception(f"Tool {toolname} does not seem to be a valid name.")
-
-        if os.path.exists(f'{args.logs_dir}/{toolname}'):
-            used_toolnames.append(toolname)
-            print(f' {toolname}', end="")
-
-            # To compute timing statistics
-            total_elapsed[toolname] = 0
-    print(".")
+    if not os.path.exists(f'{args.logs_dir}/{toolname}'):
+        raise Exception(f"Not found Logs for {toolname}.")
 
-    test_categories = ['COLL', 'P2P', 'RMA', 'other']
+    results=[]
 
-    # Initialize the data structure to gather all results
-    results = {}
-    for toolname in used_toolnames:
-        results[toolname] = []
-
-    # Get all data from the caches
     for test in todo:
         binary = re.sub(r'\.c', '', os.path.basename(test['filename']))
         ID = test['id']
@@ -636,29 +613,45 @@ def cmd_csv(rootdir, toolnames):
         test_id = f"{binary}_{ID}"
         expected = test['expect']
 
-        for toolname in used_toolnames:
-            resulting_categorization = categorize(tool=tools[toolname], toolname=toolname,
+        resulting_categorization = categorize(tool=tools[toolname], toolname=toolname,
                                                   test=test, test_id=test_id, logs_dir=args.logs_dir,
                                                   )
-            resulting_categorization["test_id"] = test_id
-            resulting_categorization["category"] = test["category"]
-            results[toolname].append(resulting_categorization)
-
-    pd.set_option('display.max_columns',14)
-    for toolname in ["itac", "must", "parcoach"]:
-        df = pd.DataFrame(results[toolname])
-
-        df["TP_base"] = df["ERROR_EXPECTED"] & df["any_error_reported"] & (df["CE"] == False) & (df["RE"] == False) 
-        df["TP_class"] = df["ERROR_EXPECTED"] & df["any_error_reported"] & df["correct_class_reported"] & (df["CE"] == False) & (df["RE"] == False)
-        df["TP_line"] = df["ERROR_EXPECTED"] & df["any_error_reported"] & df["correct_line_reported"] & (df["CE"] == False) & (df["RE"] == False) 
-        df["TP_class_line"] = df["ERROR_EXPECTED"] & df["any_error_reported"] & df["correct_class_reported"] & df[
-            "correct_line_reported"] & (df["CE"] == False) & (df["RE"] == False)
-        df["TP_class_line_no_class_noise"] = df["ERROR_EXPECTED"] & df["any_error_reported"] & df[
-            "correct_class_reported"] & df["correct_line_reported"] & (~df["contains_noise_class"]) & (df["CE"] == False) & (df["RE"] == False)
-        df["TP_class_line_no_line_noise"] = df["ERROR_EXPECTED"] & df["any_error_reported"] & df[
-            "correct_class_reported"] & df["correct_line_reported"] & (~df["contains_noise_line"]) & (df["CE"] == False) & (df["RE"] == False) 
+        resulting_categorization["test_id"] = test_id
+        resulting_categorization["category"] = test["category"]
+        results.append(resulting_categorization)
+
+    df = pd.DataFrame(results)
+
+    df["TP_base"] = df["ERROR_EXPECTED"] & df["any_error_reported"] & (df["CE"] == False) & (df["RE"] == False)
+    df["TP_class"] = df["ERROR_EXPECTED"] & df["any_error_reported"] & df["correct_class_reported"] & (df["CE"] == False) & (df["RE"] == False)
+    df["TP_line"] = df["ERROR_EXPECTED"] & df["any_error_reported"] & df["correct_line_reported"] & (df["CE"] == False) & (df["RE"] == False)
+    df["TP_class_line"] = df["ERROR_EXPECTED"] & df["any_error_reported"] & df["correct_class_reported"] & df[
+        "correct_line_reported"] & (df["CE"] == False) & (df["RE"] == False)
+    df["TP_class_line_no_class_noise"] = df["ERROR_EXPECTED"] & df["any_error_reported"] & df[
+        "correct_class_reported"] & df["correct_line_reported"] & (~df["contains_noise_class"]) & (df["CE"] == False) & (df["RE"] == False)
+    df["TP_class_line_no_line_noise"] = df["ERROR_EXPECTED"] & df["any_error_reported"] & df[
+        "correct_class_reported"] & df["correct_line_reported"] & (~df["contains_noise_line"]) & (df["CE"] == False) & (df["RE"] == False)
+
+    return df
+
+def cmd_csv(rootdir, toolnames,print_to_console=False):
+    here = os.getcwd()
+    os.chdir(rootdir)
+    outpath = f'{rootdir}/csv/'
+
+    # Create directory for output if not present
+    pathlib.Path(outpath).mkdir(parents=True, exist_ok=True)
+
+    df_noise_ratio = pd.DataFrame(columns=toolnames)
+    df_overall_noise_ratio = pd.DataFrame(columns=toolnames)
+
+    pd.set_option('display.max_columns', 14)
+
+    for toolname in toolnames:
+        df = read_tool_reports(rootdir, toolname)
         df.to_csv(f'{outpath}/{toolname}_raw.csv', index=False)
-        print(f"=== {toolname} ===")
+        if print_to_console:
+            print(f"=== {toolname} ===")
 
         # Output for each type of TP
         for (colname) in ["base", "class", "line", "class_line", "class_line_no_line_noise", "class_line_no_line_noise", "class_line_no_class_noise"]:
@@ -667,72 +660,80 @@ def cmd_csv(rootdir, toolnames):
             df_classified.to_csv(f'{outpath}/{toolname}_{colname}_full.csv', index=False)
             df_result = aggregate_metrics_per_category(df_classified)
             df_result.to_csv(f'{outpath}/{toolname}_{colname}.csv', index=True)
-            if colname == "base":
+            if print_to_console:
                 print(f"\n{colname}:")
                 print(df_result[["CE", "RE", "TP", "TN", "FP", "FN", "coverage", "conclusiveness", "specificity", "recall", "precision", "f1", "overallaccuracy"]])
                 df_result[["CE", "RE", "TP", "TN", "FP", "FN", "coverage", "conclusiveness", "specificity", "recall", "precision", "f1", "overallaccuracy"]].style.format(precision=2).to_latex(f'{outpath}/{toolname}_{colname}.tex')
 
-
-        # todo move this into plots cmd?
-        df_plot = df.groupby("category").sum()
-        df_plot.loc["ALL"] = df_plot.sum(axis=0)
-        df_plot.drop("other", axis=0, inplace=True)
-
-        df_plot["TP_class"] = df_plot["TP_class"] - df_plot["TP_class_line"]
-        df_plot["TP_line"] = df_plot["TP_line"] - df_plot["TP_class_line"]
-        df_plot["TP_base"] = df_plot["TP_base"] - df_plot["TP_class_line"] - df_plot["TP_class"] - df_plot["TP_line"]
-
-        colors = ['#66CCEE', '#66CCEE', '#228833', '#EE6677']
-        # colors = ['#66CCEE', 'yellow', '#228833', '#EE6677']
-
-        fig, ax = plt.subplots()
-
-        df_plot = df_plot[["TP_class", "TP_class_line", "TP_line", "TP_base"]]
-
-        df_plot[["TP_class", "TP_class_line", "TP_line", "TP_base"]].div(df_plot.sum(axis=1), axis=0).plot.barh(
-            stacked=True, color=colors, ax=ax, legend=False)
-
-        # Customize bars
-        bars = ax.patches
-
-        for i in [4, 5, 6, 7]:
-            bars[i].set_hatch("//")
-            bars[i].set_edgecolor(colors[2])
-
-        # Create custom legend handles
-        handles = [
-            mpatches.Patch(color=colors[0], label="correct error class"),
-            mpatches.Patch(facecolor=colors[1], edgecolor=colors[2], hatch='//', label='correct class and line'),
-            mpatches.Patch(color=colors[2], label='correct source line'),
-            mpatches.Patch(color=colors[3], label='not helpful report')
-        ]
-
-        # ax.legend(handles=handles, ncol=2, loc='center left', bbox_to_anchor=(0.1, -0.25))
-        # ax.set_title(f"Helpfulness of {toolname} Error Reports")
-        # ax.set_xlabel("percentage of error reports")
-        # ax.set_ylabel("MPI feature")
-        # plt.tight_layout()
-        # plt.savefig(f'{outpath}/{toolname}_plot.pdf')
-
-        # print()
-        # print(toolname)
-        # print()
-
-        # df_plot = df.groupby("category").sum()
-        # df_plot.loc["ALL"] = df_plot.sum(axis=0)
-        # df_plot.drop("other", axis=0, inplace=True)
-        # df_plot["noise_ratio"] = df_plot["num_noise_line"] / df_plot["num_error_reports"]
-        # print("overall_noise")
-        # print(df_plot["noise_ratio"])
-
-        # df_copy = df.copy()
-        # df_copy.loc[df_copy['ERROR_EXPECTED'] == False, ['num_noise_class_line', 'num_error_reports']] = 0
-        # df_plot = df_copy.groupby("category").sum()
-        # df_plot.loc["ALL"] = df_plot.sum(axis=0)
-        # df_plot.drop("other", axis=0, inplace=True)
-        # df_plot["noise_ratio"] = df_plot["num_noise_line"] / df_plot["num_error_reports"]
-        # print("noise_in_cases_where_errors_are_present")
-        # print(df_plot[["noise_ratio", "num_noise_class_line", "num_error_reports"]])
+        plot_helpfulness(df, outpath, toolname)
+
+        df_noise_per_tool = df.groupby("category").sum()
+        df_noise_per_tool.loc["ALL"] = df_noise_per_tool.sum(axis=0)
+        df_noise_per_tool.drop("other", axis=0, inplace=True)
+        df_noise_per_tool["noise_ratio"] = df_noise_per_tool["num_noise_line"] / df_noise_per_tool["num_error_reports"]
+        if print_to_console:
+            print("overall_noise")
+            print(df_noise_per_tool["noise_ratio"])
+        df_overall_noise_ratio[toolname] =df_noise_per_tool["noise_ratio"]
+
+        df_copy = df.copy()
+        df_copy.loc[df_copy['ERROR_EXPECTED'] == False, ['num_noise_class_line', 'num_error_reports']] = 0
+        df_noise_per_tool = df_copy.groupby("category").sum()
+        df_noise_per_tool.loc["ALL"] = df_noise_per_tool.sum(axis=0)
+        df_noise_per_tool.drop("other", axis=0, inplace=True)
+        df_noise_per_tool["noise_ratio"] = df_noise_per_tool["num_noise_line"] / df_noise_per_tool["num_error_reports"]
+        if print_to_console:
+            print("noise_in_cases_where_errors_are_present")
+            print(df_noise_per_tool[["noise_ratio", "num_noise_class_line", "num_error_reports"]])
+        df_noise_ratio[toolname] = df_noise_per_tool["noise_ratio"]
+
+    df_noise_ratio.to_csv(f'{outpath}/noise.csv')
+    df_overall_noise_ratio.to_csv(f'{outpath}/overall_noise_including_unexpected.csv')
+
+
+def plot_helpfulness(df, outpath, toolname):
+    SMALL_SIZE = 16
+    MEDIUM_SIZE = 16
+    BIGGER_SIZE = 16
+
+    plt.rc('font', size=SMALL_SIZE)  # controls default text sizes
+    plt.rc('axes', titlesize=BIGGER_SIZE)  # fontsize of the axes title
+    plt.rc('axes', labelsize=MEDIUM_SIZE)  # fontsize of the x and y labels
+    plt.rc('xtick', labelsize=SMALL_SIZE)  # fontsize of the tick labels
+    plt.rc('ytick', labelsize=SMALL_SIZE)  # fontsize of the tick labels
+    plt.rc('legend', fontsize=SMALL_SIZE)  # legend fontsize
+    plt.rc('figure', titlesize=BIGGER_SIZE)  # fontsize of the figure title
+
+    df_plot = df.groupby("category").sum()
+    df_plot.loc["ALL"] = df_plot.sum(axis=0)
+    df_plot.drop("other", axis=0, inplace=True)
+    df_plot["TP_class"] = df_plot["TP_class"] - df_plot["TP_class_line"]
+    df_plot["TP_line"] = df_plot["TP_line"] - df_plot["TP_class_line"]
+    df_plot["TP_base"] = df_plot["TP_base"] - df_plot["TP_class_line"] - df_plot["TP_class"] - df_plot["TP_line"]
+    colors = ['#88CCEE', '#88CCEE', '#44AA99', '#EE6677']
+    # colors = ['#66CCEE', 'yellow', '#228833', '#EE6677']
+    fig, ax = plt.subplots(1, 1, figsize=(9, 6))
+    df_plot = df_plot[["TP_class", "TP_class_line", "TP_line", "TP_base"]]
+    df_plot[["TP_class", "TP_class_line", "TP_line", "TP_base"]].div(df_plot.sum(axis=1), axis=0).plot.barh(
+        stacked=True, color=colors, ax=ax, legend=False)
+    # Customize bars
+    bars = ax.patches
+    for i in [4, 5, 6, 7]:
+        bars[i].set_hatch("//")
+        bars[i].set_edgecolor(colors[2])
+    # Create custom legend handles
+    handles = [
+        mpatches.Patch(color=colors[0], label="correct error class"),
+        mpatches.Patch(facecolor=colors[1], edgecolor=colors[2], hatch='//', label='correct class and line'),
+        mpatches.Patch(color=colors[2], label='correct source line'),
+        mpatches.Patch(color=colors[3], label='not helpful report')
+    ]
+    ax.legend(handles=handles, ncol=2, loc='center left', bbox_to_anchor=(0.05, -0.3))
+    #ax.set_title(f"Helpfulness of {toolname} error reports")
+    ax.set_xlabel("Percentage of error reports")
+    ax.set_ylabel("MPI feature")
+    plt.tight_layout()
+    plt.savefig(f'{outpath}/{toolname}_plot.pdf')
 
 
 def cmd_latex(rootdir, toolnames):
@@ -1959,15 +1960,18 @@ elif args.c == 'run':
     for t in arg_tools:
         cmd_run(rootdir=rootdir, toolname=t, batchinfo=args.b)
 elif args.c == 'latex':
-    extract_all_todo_from_logdir(args.x, args.logs_dir)
+    extract_all_todo_from_logdir(arg_tools[0], args.logs_dir)
     # 'smpi','smpivg' are not shown in the paper
     # cmd_latex(rootdir, toolnames=['aislinn', 'civl', 'isp','itac', 'simgrid', 'mpisv', 'must', 'hermes', 'parcoach', 'mpi-checker'])
     cmd_latex(rootdir, toolnames=['itac', 'must', 'parcoach'])
 elif args.c == 'csv':
-    extract_all_todo_from_logdir(args.x, args.logs_dir)
-    cmd_csv(rootdir, toolnames=['itac', 'must', 'parcoach'])
+    extract_all_todo_from_logdir(arg_tools[0], args.logs_dir)
+    if arg_tools:
+        cmd_csv(rootdir, toolnames=arg_tools)
+    else:
+        cmd_csv(rootdir, toolnames=['itac', 'must', 'parcoach'])
 elif args.c == 'html':
-    extract_all_todo_from_logdir(args.x, args.logs_dir)
+    extract_all_todo_from_logdir(arg_tools[0], args.logs_dir)
     if args.x == 'mpirun':
         # toolnames=['itac', 'simgrid','must', 'smpi', 'smpivg', 'aislinn', 'civl', 'isp', 'mpisv', 'parcoach', 'hermes', 'mpi-checker']
         toolnames = ['itac', 'must', 'parcoach']
diff --git a/gitlab-simgrid.yml b/gitlab-simgrid.yml
index 6d40ad739946354feaec8a4d497bb360b1fa0804..3b4e500f2af40c02da7423b025e6f16ab3d2072f 100755
--- a/gitlab-simgrid.yml
+++ b/gitlab-simgrid.yml
@@ -15,9 +15,9 @@ pages:
         - 7zr x -so logs-*.7z | tar xf -
         - mkdir -p logs/simgrid
         - touch logs/simgrid/trust_the_installation trust_the_installation # Silence the checks for the right docker image
-        - python3 ./MBI.py -c generate
-#        - python3 ./MBI.py -x simgrid | grep Test
-        - python3 ./MBI.py -c html -x itac,simgrid,must,parcoach,isp,aislinn,mpisv,civl,smpi,smpivg
+        - python3 ./MBB.py -c generate
+#        - python3 ./MBB.py -x simgrid | grep Test
+        - python3 ./MBB.py -c html -x itac,simgrid,must,parcoach,isp,aislinn,mpisv,civl,smpi,smpivg
         - pwd ; ls ; echo logs:; ls logs
         - mkdir public
         - cp -r *.html gencodes img logs public/
diff --git a/scripts/result_plot.py b/scripts/result_plot.py
index 22185325ab92d8ae7056035c0e36c6e6d2592540..b62b5ae835dba524d862eecc5ef9e8001285faf3 100644
--- a/scripts/result_plot.py
+++ b/scripts/result_plot.py
@@ -52,10 +52,14 @@ plt.rc('figure', titlesize=BIGGER_SIZE)  # fontsize of the figure title
 
 fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(16, 9))  #
 
-colors = ['#228833', '#66ccee', '#ee6677', '#aa3377', '#ccbb44', '#bbbbbb']
+#colors = ['#228833', '#66ccee', '#ee6677', '#aa3377', '#ccbb44', '#bbbbbb']
+colors = ['#6699CC', '#EECC66', '#004488', '#997700', '#BBBBBB', '#000000']
+colors_2 = {'TP': '#117733', 'TN': '#88CCEE', 'FP': '#CC6677', 'FN': '#AA4499', 'RE': '#DDCC77', 'CE': '#DDDDDD'}
+
+hatches= ["","",".",".","",""]
 
 ((ax1, ax2), (ax3, ax4)) = axs
-df_p2p[["TP", "TN", "FP", "FN", "RE", "CE"]].plot.barh(stacked=True, ax=ax1, legend=False, color=colors)
+df_p2p[["TP", "TN", "FP", "FN", "RE", "CE"]].plot.barh(stacked=True, ax=ax1, legend=False,color=colors)
 ax1.set_title('P2P')
 handles, labels = ax1.get_legend_handles_labels()
 
@@ -80,4 +84,5 @@ for ax in [ax1, ax2, ax3, ax4]:
 fig.legend(handles, labels, loc='upper center', ncols=6, bbox_to_anchor=(0.5, 1.05), )
 
 plt.tight_layout()
+
 plt.savefig(os.path.join(plot_path, "results_per_cat.pdf"), bbox_inches="tight")
diff --git a/scripts/tools/aislinn.py b/scripts/tools/aislinn.py
index 5f45b992c8d2e39b70c8b66e377d2bda582e53d3..ae76700a850156d4cceac743d3c71c1130eddd33 100644
--- a/scripts/tools/aislinn.py
+++ b/scripts/tools/aislinn.py
@@ -16,7 +16,7 @@ class Tool(AbstractTool):
             print(f"id: '{id.stdout}'; version: '{ver.stdout}'")
             print("Please run this script in a ubuntu:18.04 image. Run these commands:")
             print("  docker image pull ubuntu:18.04")
-            print("  docker run -it --rm --name MIB --volume $(pwd):/MBI ubuntu:18.04 /MBI/MBI.py -x aislinn")
+            print("  docker run -it --rm --name MIB --volume $(pwd):/MBI ubuntu:18.04 /MBI/MBB.py -x aislinn")
             sys.exit(1)
 
     def build(self, rootdir, cached=True):
diff --git a/scripts/tools/mpisv.py b/scripts/tools/mpisv.py
index 5ace8ccf739c922cdd42bddbad3a6f56a6a9cb44..c00103e47e095b7578995c05f34af46e04611c90 100644
--- a/scripts/tools/mpisv.py
+++ b/scripts/tools/mpisv.py
@@ -13,7 +13,7 @@ class Tool(AbstractTool):
         else:
             print("Please run this script in a mpisv/mpi-sv image. Run these commands:")
             print("  docker image pull mpisv/mpi-sv")
-            print("  docker run -it --rm --name MIB --shm-size=512m --volume $(pwd):/MBI mpisv/mpi-sv  /MBI/scripts/ensure_python3 /MBI/MBI.py -x mpisv")
+            print("  docker run -it --rm --name MIB --shm-size=512m --volume $(pwd):/MBI mpisv/mpi-sv  /MBI/scripts/ensure_python3 /MBI/MBB.py -x mpisv")
             sys.exit(1)
 
     def run(self, execcmd, filename, binary, id, timeout, batchinfo):
diff --git a/scripts/tools/must.py b/scripts/tools/must.py
index e7a9c50c7672611906f645262de2e16a229c9a57..fe89640841dee2993c94bd33a0a38387410ed21a 100644
--- a/scripts/tools/must.py
+++ b/scripts/tools/must.py
@@ -126,18 +126,18 @@ class V18(AbstractTool):
             "MUST_ERROR_INTEGER_NOT_WITHIN_ZERO_TAG_UB": ["InvalidParam"],
             "MUST_ERROR_INTEGER_NOT_WITHIN_ZERO_TAG_UB_ANY_TAG": ["InvalidParam"],
             "MUST_ERROR_INTEGER_GREATER_COMM_SIZE": ["InvalidParam"],
-            "MUST_ERROR_INTEGER_GREATER_EQUAL_COMM_SIZE": ["InvalidParam"],
+            "MUST_ERROR_INTEGER_GREATER_EQUAL_COMM_SIZE": ["InvalidParam", "GlobalParameterMissmatch"],
             "MUST_ERROR_INTEGER_PRODUCT_GREATER_COMM_SIZE": ["InvalidParam"],
             "MUST_ERROR_GROUP_RANGE_RANK": ["InvalidParam"],
             "MUST_ERROR_GROUP_RANGE_STRIDE": ["InvalidParam"],
-            "MUST_ERROR_REQUEST_ACTIVE": ["DEADLOCK", "RequestLifeCycle"],
+            "MUST_ERROR_REQUEST_ACTIVE": ["DEADLOCK", "RequestLifeCycle", "InvalidParam"],
             "MUST_ERROR_REQUEST_ACTIVE_ARRAY": ["DEADLOCK", "RequestLifeCycle"],
             "MUST_ERROR_REQUEST_PARTITION_ACTIVE": ["DEADLOCK", "RequestLifeCycle"],
             "MUST_ERROR_REQUEST_NOT_PARTITIONED_SEND": ["DEADLOCK", "RequestLifeCycle"],
             "MUST_ERROR_REQUEST_NOT_PARTITIONED_RECV": ["DEADLOCK", "RequestLifeCycle"],
-            "MUST_ERROR_REQUEST_NOT_KNOWN": ["DEADLOCK", "RequestLifeCycle"],
-            "MUST_ERROR_REQUEST_NOT_KNOWN_ARRAY": ["DEADLOCK", "RequestLifeCycle"],
-            "MUST_ERROR_REQUEST_NULL": ["DEADLOCK", "RequestLifeCycle"],
+            "MUST_ERROR_REQUEST_NOT_KNOWN": ["InvalidParam", "DEADLOCK", "RequestLifeCycle"],
+            "MUST_ERROR_REQUEST_NOT_KNOWN_ARRAY": ["InvalidParam", "DEADLOCK", "RequestLifeCycle"],
+            "MUST_ERROR_REQUEST_NULL": ["InvalidParam", "DEADLOCK", "RequestLifeCycle", "CallOrdering"],
             "MUST_ERROR_REQUEST_NULL_ARRAY": ["DEADLOCK", "RequestLifeCycle"],
             "MUST_ERROR_REQUEST_PERSISTENT_BUT_INACTIVE": ["DEADLOCK", "RequestLifeCycle"],
             "MUST_ERROR_COMM_UNKNWOWN": ["InvalidParam"],
@@ -176,24 +176,24 @@ class V18(AbstractTool):
             "MUST_ERROR_POINTER_NULL_COMM_SIZE_ARRAY": ["InvalidParam"],
             "MUST_ERROR_POINTER_NULL_COMM_SIZE_ARRAY_AT_INDEX": ["InvalidParam"],
             "MUST_ERROR_MPI_IN_PLACE_USED": ["InvalidParam"],
-            "MUST_ERROR_SELFOVERLAPPED": ["LocalParameterMissmatch"],
-            "MUST_ERROR_OVERLAPPED_SEND": ["LocalParameterMissmatch"],
-            "MUST_ERROR_OVERLAPPED_RECV": ["LocalParameterMissmatch"],
+            "MUST_ERROR_SELFOVERLAPPED": ["LocalParameterMissmatch", "LocalConcurrency", "GlobalParameterMissmatch"],
+            "MUST_ERROR_OVERLAPPED_SEND": ["LocalParameterMissmatch", "LocalConcurrency"],
+            "MUST_ERROR_OVERLAPPED_RECV": ["LocalParameterMissmatch", "LocalConcurrency"],
             "MUST_ERROR_POINTER_NULL_STATUS_IGNORE": ["InvalidParam"],
             "MUST_ERROR_TYPEMATCH_INTERNAL_NOTYPE": ["InvalidParam", "LocalParameterMissmatch"],
             "MUST_ERROR_TYPEMATCH_INTERNAL_TYPESIG": ["InvalidParam", "LocalParameterMissmatch"],
-            "MUST_ERROR_TYPEMATCH_MISMATCH": ["GlobalParameterMissmatch"],
+            "MUST_ERROR_TYPEMATCH_MISMATCH": ["GlobalParameterMissmatch", "LocalParameterMissmatch"],
             "MUST_ERROR_TYPEMATCH_MISMATCH_BYTE": ["GlobalParameterMissmatch"],
-            "MUST_ERROR_TYPEMATCH_LENGTH": ["InvalidParam", "LocalParameterMissmatch"],
+            "MUST_ERROR_TYPEMATCH_LENGTH": ["InvalidParam", "LocalParameterMissmatch", "GlobalParameterMissmatch"],
             "MUST_ERROR_TYPEMATCH_ALIGNMENT": ["InvalidParam", "LocalParameterMissmatch"],
             "MUST_ERROR_MESSAGE_LOST": ["DEADLOCK", "CallOrdering"],
             "MUST_ERROR_COLLECTIVE_CALL_MISMATCH": ["DEADLOCK", "CallOrdering"],
             "MUST_ERROR_COLLECTIVE_OP_MISMATCH": ["GlobalParameterMissmatch"],
             "MUST_ERROR_COLLECTIVE_ROOT_MISMATCH": ["GlobalParameterMissmatch"],
-            "MUST_ERROR_COLLECTIVE_BLOCKING_NONBLOCKING_MISMATCH": ["GlobalParameterMissmatch"],
-            "MUST_ERROR_DEADLOCK": ["DEADLOCK"],
+            "MUST_ERROR_COLLECTIVE_BLOCKING_NONBLOCKING_MISMATCH": ["GlobalParameterMissmatch", "CallOrdering"],
+            "MUST_ERROR_DEADLOCK": ["DEADLOCK", "CallOrdering", "GlobalParameterMissmatch", "MissingCall", "InvalidParam"],
             "MUST_ERROR_BUFFER_REATTACH": ["TODO"],
-            "MUST_ERROR_BUFFER_NOATTACHED": ["TODO"],
+            "MUST_ERROR_BUFFER_NOATTACHED": ["InvalidParam"],
             "MUST_ERROR_COUNTS_ARRAYS_DIFFER": ["TODO"],
             "MUST_ERROR_MPI_MULTIPLE_THREADS": ["TODO"],
             "MUST_ERROR_UNSUPPORTED": ["TODO"],