diff --git a/gitignore b/gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..7ed3340852a932b53d7e99f220ba18fec263b83a
--- /dev/null
+++ b/gitignore
@@ -0,0 +1,18 @@
+# Operating system files
+.DS_Store
+Thumbs.db
+#backend
+#src/backend/__pycache__/
+#src/backend/api/
+#src/backend/backend_utilities/
+#src/backend/cache/
+#src/backend/endpoints/
+#src/backend/middleware/
+#src/backend/core/
+
+#frontend
+# node_modules becomes very big after npm install
+#src/frontend/node_modules
+
+# after npm start
+.angular/
\ No newline at end of file
diff --git a/src/backend/.DS_Store b/src/backend/.DS_Store
index 10b404014fd30fd8b8d4dc68d6ab2da8d4b403a1..b7e5e937cec369f3515e3247f37d2a726044cea2 100644
Binary files a/src/backend/.DS_Store and b/src/backend/.DS_Store differ
diff --git a/src/backend/api/routes/input_output/importing.py b/src/backend/api/routes/input_output/importing.py
index 178a958c1d3dae08a18a635be58a5b290ddf5480..f21641e14f12f4ab64ffb555ec3d8b7be59be3a0 100644
--- a/src/backend/api/routes/input_output/importing.py
+++ b/src/backend/api/routes/input_output/importing.py
@@ -12,6 +12,10 @@ from endpoints.load_event_log import calculate_event_log_properties
 from fastapi import APIRouter, Depends, File, HTTPException, UploadFile
 from pydantic import BaseModel
 
+from backend_utilities import mine_log as ml
+import tempfile
+import os
+
 router = APIRouter(tags=["importing"], prefix="/importing")
 
 
@@ -39,6 +43,44 @@ class FilePathInput(BaseModel):
     file_path: str
 
 
+@router.post("/loadOCELFromFile")
+async def load_ocel_from_file(
+    file: UploadFile = File(...),
+    config_repo: ConfigurationRepository = Depends(get_config_repo),
+):
+    cache.pcache = {}
+
+    # Save the uploaded file to a temporary location
+    with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
+        tmp_file.write(await file.read())
+        tmp_file_path = tmp_file.name
+
+    content = "".join([line.decode("UTF-8") for line in file.file])
+
+    try:
+        print(f"FILE: {tmp_file_path}")
+        ocel = await ml.process_ocel(filename=tmp_file_path)
+    except FileNotFoundError as e:
+        raise HTTPException(
+            status_code=404, detail=f"Event log not found ({tmp_file_path})"
+        )
+    finally:
+        # Clean up the temporary file
+        os.remove(tmp_file_path)
+
+    #event_log = xes_importer.deserialize(content)
+    #use_mp = (
+    #    len(event_log) > config_repo.get_configuration().min_traces_variant_detection_mp
+    #)
+    #info = calculate_event_log_properties(event_log, use_mp=use_mp)
+    print(f"OCEL: {ocel}")
+    print("\n\n\n")
+    return ocel
+
+
+class FilePathInput(BaseModel):
+    file_path: str
+
 @router.post("/loadEventLogFromFilePath")
 async def load_event_log_from_file_path(
     d: FilePathInput, config_repo: ConfigurationRepository = Depends(get_config_repo)
diff --git a/src/backend/api/websocket/main.py b/src/backend/api/websocket/main.py
index 394356f5bf9bcd03cdc81451ad8839a870ef7bc6..e8a47280fcbb8e78d8b7996b423737eec9734276 100644
--- a/src/backend/api/websocket/main.py
+++ b/src/backend/api/websocket/main.py
@@ -2,15 +2,18 @@ from fastapi.routing import APIRouter
 from typing import Callable
 from starlette.websockets import WebSocket, WebSocketState, WebSocketDisconnect
 
+
 from api.routes.conformance.variantConformance import (
     calculate_alignment_intern_with_timeout,
     get_alignment_callback,
 )
+
 from api.routes.variants.subvariantMining import (
     mine_repetition_patterns_with_timeout,
     RepetitionsMiningConfig,
     get_repetition_mining_callback,
 )
+
 from backend_utilities.configuration.repository import ConfigurationRepositoryFactory
 from backend_utilities.multiprocessing.pool_factory import PoolFactory
 from cache import cache
diff --git a/src/backend/mine_log.py b/src/backend/backend_utilities/mine_log.py
similarity index 89%
rename from src/backend/mine_log.py
rename to src/backend/backend_utilities/mine_log.py
index d7ff26c3aa0bae0c1522959264b466aedafd9e41..39df4a1bca1183ffcf7e8e3ff64187f1f5f1bf2d 100644
--- a/src/backend/mine_log.py
+++ b/src/backend/backend_utilities/mine_log.py
@@ -20,7 +20,7 @@ import ocsv.Super_Variant_Hierarchy as SVH
 import time 
 import numpy as np
 
-app = FastAPI()
+#app = FastAPI()
 
 class Parameters(BaseModel):
     execution_extraction: Optional[str] = "leading_type"
@@ -28,10 +28,10 @@ class Parameters(BaseModel):
     max_levels: Optional[int] = 4
     frequency_distribution_type: Optional[str] = "NORMAL"
 
-
-@app.post("/process_ocel/")
+#@app.get("/")
+#@app.post("/process_ocel/")
 #async def process_ocel(file: UploadFile = File(...), parameters: Parameters = None):
-async def process_ocel(parameters: Parameters = Parameters()):
+async def process_ocel(parameters: Parameters = Parameters(), filename: str = "/Users/swolf/Desktop/mining/ocsv/ocsv/EventLogs/BPI2017-Top10.jsonocel"):
     '''
     # Save the uploaded file
     file_location = f"/tmp/{file.filename}"
@@ -39,8 +39,13 @@ async def process_ocel(parameters: Parameters = Parameters()):
         file_object.write(file.file.read())
     '''
     # Predefined filename
-    filename = "../ocsv/ocsv/EventLogs/BPI2017-Top10.jsonocel"
-    #parameters = {"execution_extraction": "leading_type",
+    print("Processing OCEL file../n/n")
+    print(filename)
+    #filename = "../ocsv/ocsv/EventLogs/BPI2017-Top10.jsonocel"
+    #filename = "/Users/swolf/Desktop/mining/ocsv/ocsv/EventLogs/BPI2017-Top10.jsonocel"
+    #filename = "/Users/swolf/Desktop/mining/ocsv/ocsv/EventLogs/OCEL_example.jsonocel"
+    parameters = Parameters()
+    #{"execution_extraction": "leading_type",
               #"leading_type": "application"}
     # Load the OCEL file
     ocel = ocel_import_factory.apply(file_path=filename, parameters=parameters)
@@ -123,6 +128,8 @@ async def process_ocel(parameters: Parameters = Parameters()):
         "hierarchy_info_list": hierarchy_info_list
     }
 
+'''
 if __name__ == "__main__":
     uvicorn.run(app, host="0.0.0.0", port=8000)
 
+'''
diff --git a/src/evaluation/__init__.py b/src/evaluation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..789b957e90125941ebfb3d7b19d1ccd532ca4ca4
--- /dev/null
+++ b/src/evaluation/__init__.py
@@ -0,0 +1,26 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation import generalization, precision, replay_fitness, simplicity, evaluator, wf_net
+import pkgutil
+
+if pkgutil.find_loader("pyemd"):
+    # import the EMD only if the pyemd package is installed
+    from evaluation import earth_mover_distance
+
+if pkgutil.find_loader("networkx") and pkgutil.find_loader("sympy"):
+    # import the Woflan package only if NetworkX and sympy are installed
+    from evaluation import soundness
diff --git a/src/evaluation/__pycache__/__init__.cpython-310.pyc b/src/evaluation/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..55963c030775480230afbc23cea3e705eb17672b
Binary files /dev/null and b/src/evaluation/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/__pycache__/evaluator.cpython-310.pyc b/src/evaluation/__pycache__/evaluator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..144fed0f31b32085fd8f16ba37e162e693c48748
Binary files /dev/null and b/src/evaluation/__pycache__/evaluator.cpython-310.pyc differ
diff --git a/src/evaluation/earth_mover_distance/__init__.py b/src/evaluation/earth_mover_distance/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b686dd1d4b74a705cc3463a5a2432d0392d060d1
--- /dev/null
+++ b/src/evaluation/earth_mover_distance/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from pm4py.evaluation.earth_mover_distance import evaluator, variants
diff --git a/src/evaluation/earth_mover_distance/__pycache__/__init__.cpython-310.pyc b/src/evaluation/earth_mover_distance/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..069ee8fa5be8d80d2b9bd05e376ea18811c2acbc
Binary files /dev/null and b/src/evaluation/earth_mover_distance/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/earth_mover_distance/__pycache__/evaluator.cpython-310.pyc b/src/evaluation/earth_mover_distance/__pycache__/evaluator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a6c586d2d84d1a08ec1a29c60f92e349e47b10e5
Binary files /dev/null and b/src/evaluation/earth_mover_distance/__pycache__/evaluator.cpython-310.pyc differ
diff --git a/src/evaluation/earth_mover_distance/evaluator.py b/src/evaluation/earth_mover_distance/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..528aeea3e390d155d9426ce97739515e62c1ca2b
--- /dev/null
+++ b/src/evaluation/earth_mover_distance/evaluator.py
@@ -0,0 +1,59 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from pm4py.evaluation.earth_mover_distance.variants import pyemd
+from enum import Enum
+from pm4py.util import exec_utils
+import deprecation
+from pm4py.meta import VERSION
+import warnings
+
+
+class Variants(Enum):
+    PYEMD = pyemd
+
+
+DEFAULT_VARIANT = Variants.PYEMD
+
+
+@deprecation.deprecated(deprecated_in="2.2.5", removed_in="3.0",
+                        current_version=VERSION,
+                        details="Use the pm4py.algo.evaluation.earth_mover_distance package")
+def apply(lang1, lang2, variant=Variants.PYEMD, parameters=None):
+    """
+    Gets the EMD language between the two languages
+
+    Parameters
+    -------------
+    lang1
+        First language
+    lang2
+        Second language
+    variant
+        Variants of the algorithm
+    parameters
+        Parameters
+    variants
+        Variants of the algorithm, including:
+            - Variants.PYEMD: pyemd based distance
+
+    Returns
+    -------------
+    dist
+        EMD distance
+    """
+    warnings.warn("Use the pm4py.algo.evaluation.earth_mover_distance package")
+    return exec_utils.get_variant(variant).apply(lang1, lang2, parameters=parameters)
diff --git a/src/evaluation/earth_mover_distance/variants/__init__.py b/src/evaluation/earth_mover_distance/variants/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..18335b3f28c32421f2f009571ad9987d876a3a41
--- /dev/null
+++ b/src/evaluation/earth_mover_distance/variants/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from pm4py.evaluation.earth_mover_distance.variants import pyemd
diff --git a/src/evaluation/earth_mover_distance/variants/__pycache__/__init__.cpython-310.pyc b/src/evaluation/earth_mover_distance/variants/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fcc433767db38236cdcbadcc1fa3e8966a71da9a
Binary files /dev/null and b/src/evaluation/earth_mover_distance/variants/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/earth_mover_distance/variants/__pycache__/pyemd.cpython-310.pyc b/src/evaluation/earth_mover_distance/variants/__pycache__/pyemd.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..70fc6d67e009093eb97479dc5ddd1ddb6dec2b70
Binary files /dev/null and b/src/evaluation/earth_mover_distance/variants/__pycache__/pyemd.cpython-310.pyc differ
diff --git a/src/evaluation/earth_mover_distance/variants/pyemd.py b/src/evaluation/earth_mover_distance/variants/pyemd.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9b338615cdc6ec49f607fd6ab13f28fe4d1df06
--- /dev/null
+++ b/src/evaluation/earth_mover_distance/variants/pyemd.py
@@ -0,0 +1,175 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from pm4py.util.regex import SharedObj, get_new_char
+from pm4py.util import string_distance
+import numpy as np
+from pyemd import emd
+from pm4py.util import exec_utils
+
+
+class Parameters:
+    STRING_DISTANCE = "string_distance"
+
+
+def normalized_levensthein(s1, s2):
+    """
+    Normalized Levensthein distance
+
+    Parameters
+    -------------
+    s1
+        First string
+    s2
+        Second string
+
+    Returns
+    --------------
+    dist
+        Distance
+    """
+    return float(string_distance.levenshtein(s1, s2)) / float(max(len(s1), len(s2)))
+
+
+def get_act_correspondence(activities, parameters=None):
+    """
+    Gets an encoding for each activity
+
+    Parameters
+    --------------
+    activities
+        Activities of the two languages
+    parameters
+        Parameters
+
+    Returns
+    -------------
+    encoding
+        Encoding into hex characters
+    """
+    if parameters is None:
+        parameters = {}
+
+    shared_obj = SharedObj()
+    ret = {}
+    for act in activities:
+        get_new_char(act, shared_obj)
+        ret[act] = shared_obj.mapping_dictio[act]
+
+    return ret
+
+
+def encode_two_languages(lang1, lang2, parameters=None):
+    """
+    Encode the two languages into hexadecimal strings
+
+    Parameters
+    --------------
+    lang1
+        Language 1
+    lang2
+        Language 2
+    parameters
+        Parameters of the algorithm
+
+    Returns
+    --------------
+    enc1
+        Encoding of the first language
+    enc2
+        Encoding of the second language
+    """
+    if parameters is None:
+        parameters = {}
+
+    all_activities = sorted(list(set(y for x in lang1 for y in x).union(set(y for x in lang2 for y in x))))
+    acts_corresp = get_act_correspondence(all_activities, parameters=parameters)
+
+    enc1 = {}
+    enc2 = {}
+
+    for k in lang1:
+        new_key = "".join(acts_corresp[i] for i in k)
+        enc1[new_key] = lang1[k]
+
+    for k in lang2:
+        new_key = "".join(acts_corresp[i] for i in k)
+        enc2[new_key] = lang2[k]
+
+    # each language should have the same keys, even if not present
+    for x in enc1:
+        if x not in enc2:
+            enc2[x] = 0.0
+
+    for x in enc2:
+        if x not in enc1:
+            enc1[x] = 0.0
+
+    enc1 = [(x, y) for x, y in enc1.items()]
+    enc2 = [(x, y) for x, y in enc2.items()]
+
+    # sort the keys in a decreasing way
+    enc1 = sorted(enc1, reverse=True, key=lambda x: x[0])
+    enc2 = sorted(enc2, reverse=True, key=lambda x: x[0])
+
+    return enc1, enc2
+
+
+def apply(lang1, lang2, parameters=None):
+    """
+    Calculates the EMD distance between the two stochastic languages
+
+    Parameters
+    -------------
+    lang1
+        First language
+    lang2
+        Second language
+    parameters
+        Parameters of the algorithm, including:
+            - Parameters.STRING_DISTANCE: function that accepts two strings and returns a distance
+
+    Returns
+    ---------------
+    emd_dist
+        EMD distance
+    """
+    if parameters is None:
+        parameters = {}
+
+    distance_function = exec_utils.get_param_value(Parameters.STRING_DISTANCE, parameters, normalized_levensthein)
+
+    enc1, enc2 = encode_two_languages(lang1, lang2, parameters=parameters)
+
+    # transform everything into a numpy array
+    first_histogram = np.array([x[1] for x in enc1])
+    second_histogram = np.array([x[1] for x in enc2])
+
+    # including a distance matrix that includes the distance between
+    # the traces
+    distance_matrix = []
+    for x in enc1:
+        distance_matrix.append([])
+        for y in enc2:
+            # calculates the (normalized) distance between the strings
+            dist = distance_function(x[0], y[0])
+            distance_matrix[-1].append(float(dist))
+
+    distance_matrix = np.array(distance_matrix)
+
+    ret = emd(first_histogram, second_histogram, distance_matrix)
+
+    return ret
diff --git a/src/evaluation/evaluator.py b/src/evaluation/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..b75264bf8fc12d5bb9f8d78ec3215c5430fb0554
--- /dev/null
+++ b/src/evaluation/evaluator.py
@@ -0,0 +1,114 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from pm4py import util as pmutil
+from pm4py.algo.conformance.tokenreplay.variants import token_replay
+from evaluation.generalization.variants import token_based as generalization_token_based
+from evaluation.precision.variants import etconformance_token as precision_token_based
+from evaluation.replay_fitness.variants import token_replay as fitness_token_based
+from evaluation.simplicity.variants import arc_degree as simplicity_arc_degree
+from pm4py.objects import log as log_lib
+from pm4py.objects.conversion.log import converter as log_conversion
+from pm4py.util import xes_constants as xes_util
+from pm4py.util import constants
+from enum import Enum
+from pm4py.util import exec_utils
+import deprecation
+from pm4py.meta import VERSION
+import warnings
+
+
+class Parameters(Enum):
+    ACTIVITY_KEY = constants.PARAMETER_CONSTANT_ACTIVITY_KEY
+    PARAM_FITNESS_WEIGHT = 'fitness_weight'
+    PARAM_PRECISION_WEIGHT = 'precision_weight'
+    PARAM_SIMPLICITY_WEIGHT = 'simplicity_weight'
+    PARAM_GENERALIZATION_WEIGHT = 'generalization_weight'
+
+
+@deprecation.deprecated(deprecated_in="2.2.5", removed_in="3.0",
+                        current_version=VERSION,
+                        details="Use the pm4py.algo.evaluation.evaluator class")
+def apply(log, net, initial_marking, final_marking, parameters=None):
+    """
+    Calculates all metrics based on token-based replay and returns a unified dictionary
+
+    Parameters
+    -----------
+    log
+        Log
+    net
+        Petri net
+    initial_marking
+        Initial marking
+    final_marking
+        Final marking
+    parameters
+        Parameters
+
+    Returns
+    -----------
+    dictionary
+        Dictionary containing fitness, precision, generalization and simplicity; along with the average weight of
+        these metrics
+    """
+    warnings.warn("Use the pm4py.algo.evaluation.evaluator class")
+    if parameters is None:
+        parameters = {}
+    log = log_conversion.apply(log, parameters, log_conversion.TO_EVENT_LOG)
+
+    activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, log_lib.util.xes.DEFAULT_NAME_KEY)
+    fitness_weight = exec_utils.get_param_value(Parameters.PARAM_FITNESS_WEIGHT, parameters, 0.25)
+    precision_weight = exec_utils.get_param_value(Parameters.PARAM_PRECISION_WEIGHT, parameters, 0.25)
+    simplicity_weight = exec_utils.get_param_value(Parameters.PARAM_SIMPLICITY_WEIGHT, parameters, 0.25)
+    generalization_weight = exec_utils.get_param_value(Parameters.PARAM_GENERALIZATION_WEIGHT, parameters, 0.25)
+
+    sum_of_weights = (fitness_weight + precision_weight + simplicity_weight + generalization_weight)
+    fitness_weight = fitness_weight / sum_of_weights
+    precision_weight = precision_weight / sum_of_weights
+    simplicity_weight = simplicity_weight / sum_of_weights
+    generalization_weight = generalization_weight / sum_of_weights
+
+    parameters_tr = {token_replay.Parameters.ACTIVITY_KEY: activity_key}
+
+    aligned_traces = token_replay.apply(log, net, initial_marking, final_marking, parameters=parameters_tr)
+
+    parameters = {
+        token_replay.Parameters.ACTIVITY_KEY: activity_key
+    }
+
+    fitness = fitness_token_based.evaluate(aligned_traces)
+    precision = precision_token_based.apply(log, net, initial_marking, final_marking, parameters=parameters)
+    generalization = generalization_token_based.get_generalization(net, aligned_traces)
+    simplicity = simplicity_arc_degree.apply(net)
+
+    metrics_average_weight = fitness_weight * fitness["log_fitness"] + precision_weight * precision \
+                             + generalization_weight * generalization + simplicity_weight * simplicity
+
+    fscore = 0.0
+    if (fitness['log_fitness'] + precision) > 0:
+        fscore = (2*fitness['log_fitness']*precision)/(fitness['log_fitness']+precision)
+    dictionary = {
+        "fitness": fitness,
+        "precision": precision,
+        "generalization": generalization,
+        "simplicity": simplicity,
+        "metricsAverageWeight": metrics_average_weight,
+        "fscore": fscore
+    }
+
+    return dictionary
+
diff --git a/src/evaluation/generalization/__init__.py b/src/evaluation/generalization/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9972d59dab7c64bce2e6e3470f82a732ca27e751
--- /dev/null
+++ b/src/evaluation/generalization/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.generalization import evaluator, variants
diff --git a/src/evaluation/generalization/__pycache__/__init__.cpython-310.pyc b/src/evaluation/generalization/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ed7ee7c7324415daeac154af07fa7e1f6c8b6949
Binary files /dev/null and b/src/evaluation/generalization/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/generalization/__pycache__/evaluator.cpython-310.pyc b/src/evaluation/generalization/__pycache__/evaluator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb0b604a58bc366ccb62df039bb9564813e4a89c
Binary files /dev/null and b/src/evaluation/generalization/__pycache__/evaluator.cpython-310.pyc differ
diff --git a/src/evaluation/generalization/__pycache__/parameters.cpython-310.pyc b/src/evaluation/generalization/__pycache__/parameters.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c70a616dcfe5f26650d3e56dc5030e06ccd18456
Binary files /dev/null and b/src/evaluation/generalization/__pycache__/parameters.cpython-310.pyc differ
diff --git a/src/evaluation/generalization/evaluator.py b/src/evaluation/generalization/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1834204d77d0afdb31ffd60ea01adb3c6a0ec2e
--- /dev/null
+++ b/src/evaluation/generalization/evaluator.py
@@ -0,0 +1,43 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.generalization.variants import token_based
+from pm4py.objects.conversion.log import converter as log_conversion
+from enum import Enum
+from pm4py.util import exec_utils
+import deprecation
+from pm4py.meta import VERSION
+import warnings
+
+class Variants(Enum):
+    GENERALIZATION_TOKEN = token_based
+
+
+GENERALIZATION_TOKEN = Variants.GENERALIZATION_TOKEN
+VERSIONS = {GENERALIZATION_TOKEN}
+
+
+@deprecation.deprecated(deprecated_in="2.2.5", removed_in="3.0",
+                        current_version=VERSION,
+                        details="Use the pm4py.algo.evaluation.generalization package")
+def apply(log, petri_net, initial_marking, final_marking, parameters=None, variant=GENERALIZATION_TOKEN):
+    warnings.warn("Use the pm4py.algo.evaluation.generalization package")
+    if parameters is None:
+        parameters = {}
+
+    return exec_utils.get_variant(variant).apply(log_conversion.apply(log, parameters, log_conversion.TO_EVENT_LOG),
+                                                 petri_net,
+                                                 initial_marking, final_marking, parameters=parameters)
diff --git a/src/evaluation/generalization/parameters.py b/src/evaluation/generalization/parameters.py
new file mode 100644
index 0000000000000000000000000000000000000000..6475c53a803d95bfb48ec0dfe8a110c4d184d6aa
--- /dev/null
+++ b/src/evaluation/generalization/parameters.py
@@ -0,0 +1,22 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from enum import Enum
+from pm4py.util import constants
+
+
+class Parameters(Enum):
+    ACTIVITY_KEY = constants.PARAMETER_CONSTANT_ACTIVITY_KEY
diff --git a/src/evaluation/generalization/variants/__init__.py b/src/evaluation/generalization/variants/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e206b4eefe0006635f56ac311dab1542d7d2178
--- /dev/null
+++ b/src/evaluation/generalization/variants/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.generalization.variants import token_based
diff --git a/src/evaluation/generalization/variants/__pycache__/__init__.cpython-310.pyc b/src/evaluation/generalization/variants/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..435bab56c97036f7ddcb37d1dc3d184ee6f81bc5
Binary files /dev/null and b/src/evaluation/generalization/variants/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/generalization/variants/__pycache__/token_based.cpython-310.pyc b/src/evaluation/generalization/variants/__pycache__/token_based.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..033d3985221fb278a3ef089c3d878b816d9b370e
Binary files /dev/null and b/src/evaluation/generalization/variants/__pycache__/token_based.cpython-310.pyc differ
diff --git a/src/evaluation/generalization/variants/token_based.py b/src/evaluation/generalization/variants/token_based.py
new file mode 100644
index 0000000000000000000000000000000000000000..83f098cea9df1423557b4e7e8a38a23e879419a0
--- /dev/null
+++ b/src/evaluation/generalization/variants/token_based.py
@@ -0,0 +1,115 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from collections import Counter
+from math import sqrt
+
+from pm4py import util as pmutil
+from pm4py.algo.conformance.tokenreplay import algorithm as token_replay
+from evaluation.generalization.parameters import Parameters
+from pm4py.util import exec_utils
+
+
+def get_generalization(petri_net, aligned_traces):
+    """
+    Gets the generalization from the Petri net and the list of activated transitions
+    during the replay
+
+    The approach has been suggested by the paper
+    Buijs, Joos CAM, Boudewijn F. van Dongen, and Wil MP van der Aalst. "Quality dimensions in process discovery:
+    The importance of fitness, precision, generalization and simplicity."
+    International Journal of Cooperative Information Systems 23.01 (2014): 1440001.
+
+    A token replay is applied and, for each transition, we can measure the number of occurrences
+    in the replay. The following formula is applied for generalization
+
+           \sum_{t \in transitions} (math.sqrt(1.0/(n_occ_replay(t)))
+    1 -    ----------------------------------------------------------
+                             # transitions
+
+    Parameters
+    -----------
+    petri_net
+        Petri net
+    aligned_traces
+        Result of the token-replay
+
+    Returns
+    -----------
+    generalization
+        Generalization measure
+    """
+
+    trans_occ_map = Counter()
+    for trace in aligned_traces:
+        for trans in trace["activated_transitions"]:
+            trans_occ_map[trans] += 1
+    inv_sq_occ_sum = 0.0
+    for trans in trans_occ_map:
+        this_term = 1.0 / sqrt(trans_occ_map[trans])
+        inv_sq_occ_sum = inv_sq_occ_sum + this_term
+    for trans in petri_net.transitions:
+        if trans not in trans_occ_map:
+            inv_sq_occ_sum = inv_sq_occ_sum + 1
+    generalization = 1.0
+    if len(petri_net.transitions) > 0:
+        generalization = 1.0 - inv_sq_occ_sum / float(len(petri_net.transitions))
+    return generalization
+
+
+def apply(log, petri_net, initial_marking, final_marking, parameters=None):
+    """
+    Calculates generalization on the provided log and Petri net.
+
+    The approach has been suggested by the paper
+    Buijs, Joos CAM, Boudewijn F. van Dongen, and Wil MP van der Aalst. "Quality dimensions in process discovery:
+    The importance of fitness, precision, generalization and simplicity."
+    International Journal of Cooperative Information Systems 23.01 (2014): 1440001.
+
+    A token replay is applied and, for each transition, we can measure the number of occurrences
+    in the replay. The following formula is applied for generalization
+
+           \sum_{t \in transitions} (math.sqrt(1.0/(n_occ_replay(t)))
+    1 -    ----------------------------------------------------------
+                             # transitions
+
+    Parameters
+    -----------
+    log
+        Trace log
+    petri_net
+        Petri net
+    initial_marking
+        Initial marking
+    final_marking
+        Final marking
+    parameters
+        Algorithm parameters
+
+    Returns
+    -----------
+    generalization
+        Generalization measure
+    """
+    if parameters is None:
+        parameters = {}
+    activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, pmutil.xes_constants.DEFAULT_NAME_KEY)
+
+    parameters_tr = {Parameters.ACTIVITY_KEY: activity_key}
+
+    aligned_traces = token_replay.apply(log, petri_net, initial_marking, final_marking, parameters=parameters_tr)
+
+    return get_generalization(petri_net, aligned_traces)
diff --git a/src/evaluation/precision/__init__.py b/src/evaluation/precision/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..57ae4270978f6cc191e9db3159bdf922cc0326de
--- /dev/null
+++ b/src/evaluation/precision/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.precision import evaluator, variants
diff --git a/src/evaluation/precision/__pycache__/__init__.cpython-310.pyc b/src/evaluation/precision/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8b3121b84a71b90a3a92e6be05b94631703bc53f
Binary files /dev/null and b/src/evaluation/precision/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/precision/__pycache__/evaluator.cpython-310.pyc b/src/evaluation/precision/__pycache__/evaluator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..325520fd35997d4eb08db90fcce60568eeeacea2
Binary files /dev/null and b/src/evaluation/precision/__pycache__/evaluator.cpython-310.pyc differ
diff --git a/src/evaluation/precision/__pycache__/parameters.cpython-310.pyc b/src/evaluation/precision/__pycache__/parameters.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a26c6ebefedde9cfb6659519a1ae09993a423893
Binary files /dev/null and b/src/evaluation/precision/__pycache__/parameters.cpython-310.pyc differ
diff --git a/src/evaluation/precision/__pycache__/utils.cpython-310.pyc b/src/evaluation/precision/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0becb8d68747d016c028d4a88181f84341f3af3e
Binary files /dev/null and b/src/evaluation/precision/__pycache__/utils.cpython-310.pyc differ
diff --git a/src/evaluation/precision/evaluator.py b/src/evaluation/precision/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3b64092c070e00fa401d906d0fa51819b419d39
--- /dev/null
+++ b/src/evaluation/precision/evaluator.py
@@ -0,0 +1,82 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.precision.variants import etconformance_token, align_etconformance
+from pm4py.objects.conversion.log import converter as log_conversion
+from pm4py.objects.petri_net.utils.check_soundness import check_easy_soundness_net_in_fin_marking
+from enum import Enum
+from pm4py.util import exec_utils
+import deprecation
+from pm4py.meta import VERSION
+import warnings
+
+
+class Variants(Enum):
+    ETCONFORMANCE_TOKEN = etconformance_token
+    ALIGN_ETCONFORMANCE = align_etconformance
+
+
+ETCONFORMANCE_TOKEN = Variants.ETCONFORMANCE_TOKEN
+ALIGN_ETCONFORMANCE = Variants.ALIGN_ETCONFORMANCE
+
+VERSIONS = {ETCONFORMANCE_TOKEN, ALIGN_ETCONFORMANCE}
+
+@deprecation.deprecated(deprecated_in="2.2.5", removed_in="3.0",
+                        current_version=VERSION,
+                        details="Use the pm4py.algo.evaluation.precision package")
+def apply(log, net, marking, final_marking, parameters=None, variant=None):
+    """
+    Method to apply ET Conformance
+
+    Parameters
+    -----------
+    log
+        Trace log
+    net
+        Petri net
+    marking
+        Initial marking
+    final_marking
+        Final marking
+    parameters
+        Parameters of the algorithm, including:
+            pm4py.util.constants.PARAMETER_CONSTANT_ACTIVITY_KEY -> Activity key
+    variant
+        Variant of the algorithm that should be applied:
+            - Variants.ETCONFORMANCE_TOKEN
+            - Variants.ALIGN_ETCONFORMANCE
+    """
+    warnings.warn("Use the pm4py.algo.evaluation.precision package")
+
+    if parameters is None:
+        parameters = {}
+
+    log = log_conversion.apply(log, parameters, log_conversion.TO_EVENT_LOG)
+
+    # execute the following part of code when the variant is not specified by the user
+    if variant is None:
+        if not (check_easy_soundness_net_in_fin_marking(
+                net,
+                marking,
+                final_marking)):
+            # in the case the net is not a easy sound workflow net, we must apply token-based replay
+            variant = ETCONFORMANCE_TOKEN
+        else:
+            # otherwise, use the align-etconformance approach (safer, in the case the model contains duplicates)
+            variant = ALIGN_ETCONFORMANCE
+
+    return exec_utils.get_variant(variant).apply(log, net, marking,
+                             final_marking, parameters=parameters)
diff --git a/src/evaluation/precision/parameters.py b/src/evaluation/precision/parameters.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcdbd4c8ed057763d81cd99c5cd2c41a1db09a9b
--- /dev/null
+++ b/src/evaluation/precision/parameters.py
@@ -0,0 +1,26 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from enum import Enum
+from pm4py.util import constants
+from pm4py.algo.conformance.tokenreplay import algorithm
+
+
+class Parameters(Enum):
+    ACTIVITY_KEY = constants.PARAMETER_CONSTANT_ACTIVITY_KEY
+    TOKEN_REPLAY_VARIANT = "token_replay_variant"
+    CLEANING_TOKEN_FLOOD = "cleaning_token_flood"
+    SHOW_PROGRESS_BAR = "show_progress_bar"
diff --git a/src/evaluation/precision/utils.py b/src/evaluation/precision/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..c08cc20a9d9c4af8ed5cc60bb34ca2bfb790282e
--- /dev/null
+++ b/src/evaluation/precision/utils.py
@@ -0,0 +1,148 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from collections import Counter
+from pm4py.objects.log.obj import EventLog, Event, Trace
+from pm4py.util import xes_constants as xes_util, constants
+import heapq
+from pm4py.objects.petri_net.utils.petri_utils import decorate_places_preset_trans, decorate_transitions_prepostset
+from pm4py.objects.petri_net.utils import align_utils as utils
+from pm4py.objects.petri_net.utils.incidence_matrix import construct
+
+def __search(sync_net, ini, fin, stop, cost_function, skip):
+    decorate_transitions_prepostset(sync_net)
+    decorate_places_preset_trans(sync_net)
+
+    incidence_matrix = construct(sync_net)
+    ini_vec, fin_vec, cost_vec = utils.__vectorize_initial_final_cost(incidence_matrix, ini, fin, cost_function)
+
+    closed = set()
+
+    ini_state = utils.SearchTuple(0, 0, 0, ini, None, None, None, True)
+    open_set = [ini_state]
+    heapq.heapify(open_set)
+    visited = 0
+    queued = 0
+    traversed = 0
+
+    # return all the prefix markings of the optimal alignments as set
+    ret_markings = None
+    # keep track of the optimal cost of an alignment (to trim search when needed)
+    optimal_cost = None
+
+    while not len(open_set) == 0:
+        curr = heapq.heappop(open_set)
+
+        current_marking = curr.m
+
+        # trim alignments when we already reached an optimal alignment and the
+        # current cost is greater than the optimal cost
+        if optimal_cost is not None and curr.f > optimal_cost:
+            break
+
+        already_closed = current_marking in closed
+        if already_closed:
+            continue
+
+        if stop <= current_marking:
+            # add the current marking to the set
+            # of returned markings
+            if ret_markings is None:
+                ret_markings = set()
+            ret_markings.add(current_marking)
+            # close the marking
+            closed.add(current_marking)
+            # set the optimal cost
+            optimal_cost = curr.f
+
+            continue
+
+        closed.add(current_marking)
+        visited += 1
+
+        enabled_trans = set()
+        for p in current_marking:
+            for t in p.ass_trans:
+                if t.sub_marking <= current_marking:
+                    enabled_trans.add(t)
+
+        trans_to_visit_with_cost = [(t, cost_function[t]) for t in enabled_trans if
+                                    not (t is None or utils.__is_log_move(t, skip) or (
+                                            utils.__is_model_move(t, skip) and not t.label[1] is None))]
+
+        for t, cost in trans_to_visit_with_cost:
+            traversed += 1
+            new_marking = utils.add_markings(current_marking, t.add_marking)
+
+            if new_marking in closed:
+                continue
+            g = curr.g + cost
+
+            queued += 1
+            new_f = g
+
+            tp = utils.SearchTuple(new_f, g, 0, new_marking, curr, t, None, True)
+            heapq.heappush(open_set, tp)
+
+    return ret_markings
+
+
+def get_log_prefixes(log, activity_key=xes_util.DEFAULT_NAME_KEY):
+    """
+    Get log prefixes
+
+    Parameters
+    ----------
+    log
+        Trace log
+    activity_key
+        Activity key (must be provided if different from concept:name)
+    """
+    prefixes = {}
+    prefix_count = Counter()
+    for trace in log:
+        for i in range(1, len(trace)):
+            red_trace = trace[0:i]
+            prefix = constants.DEFAULT_VARIANT_SEP.join([x[activity_key] for x in red_trace])
+            next_activity = trace[i][activity_key]
+            if prefix not in prefixes:
+                prefixes[prefix] = set()
+            prefixes[prefix].add(next_activity)
+            prefix_count[prefix] += 1
+    return prefixes, prefix_count
+
+
+def form_fake_log(prefixes_keys, activity_key=xes_util.DEFAULT_NAME_KEY):
+    """
+    Form fake log for replay (putting each prefix as separate trace to align)
+
+    Parameters
+    ----------
+    prefixes_keys
+        Keys of the prefixes (to form a log with a given order)
+    activity_key
+        Activity key (must be provided if different from concept:name)
+    """
+    fake_log = EventLog()
+    for prefix in prefixes_keys:
+        trace = Trace()
+        prefix_activities = prefix.split(constants.DEFAULT_VARIANT_SEP)
+        for activity in prefix_activities:
+            event = Event()
+            event[activity_key] = activity
+            trace.append(event)
+        fake_log.append(trace)
+    return fake_log
diff --git a/src/evaluation/precision/variants/__init__.py b/src/evaluation/precision/variants/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b89f02bb3846816c6e99af062295f59a8cc51501
--- /dev/null
+++ b/src/evaluation/precision/variants/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.precision.variants import etconformance_token, align_etconformance
diff --git a/src/evaluation/precision/variants/__pycache__/__init__.cpython-310.pyc b/src/evaluation/precision/variants/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0f9b915c37691a80a420d566ad6dac8b6a5082f1
Binary files /dev/null and b/src/evaluation/precision/variants/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/precision/variants/__pycache__/align_etconformance.cpython-310.pyc b/src/evaluation/precision/variants/__pycache__/align_etconformance.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c0eb16b9bd78d4a434e6edbffd14de1d9db639c
Binary files /dev/null and b/src/evaluation/precision/variants/__pycache__/align_etconformance.cpython-310.pyc differ
diff --git a/src/evaluation/precision/variants/__pycache__/etconformance_token.cpython-310.pyc b/src/evaluation/precision/variants/__pycache__/etconformance_token.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..57bf8524fdc6a84f3d0311b3c316e8a7550c7016
Binary files /dev/null and b/src/evaluation/precision/variants/__pycache__/etconformance_token.cpython-310.pyc differ
diff --git a/src/evaluation/precision/variants/align_etconformance.py b/src/evaluation/precision/variants/align_etconformance.py
new file mode 100644
index 0000000000000000000000000000000000000000..6412d37064e93fbdc6ed0a71745d43d0c33051fd
--- /dev/null
+++ b/src/evaluation/precision/variants/align_etconformance.py
@@ -0,0 +1,274 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from pm4py.objects import log as log_lib
+from evaluation.precision import utils as precision_utils
+from pm4py.objects.petri_net.utils import align_utils as utils, check_soundness
+from pm4py.objects.petri_net.obj import Marking
+from pm4py.objects.petri_net.utils.petri_utils import construct_trace_net
+from pm4py.objects.petri_net.utils.synchronous_product import construct
+from pm4py.statistics.start_activities.log.get import get_start_activities
+from pm4py.objects.petri_net.utils.align_utils import get_visible_transitions_eventually_enabled_by_marking
+from evaluation.precision.parameters import Parameters
+from pm4py.util import exec_utils
+from pm4py.util import xes_constants
+import pkgutil
+
+
+def apply(log, net, marking, final_marking, parameters=None):
+    """
+    Get Align-ET Conformance precision
+
+    Parameters
+    ----------
+    log
+        Trace log
+    net
+        Petri net
+    marking
+        Initial marking
+    final_marking
+        Final marking
+    parameters
+        Parameters of the algorithm, including:
+            Parameters.ACTIVITY_KEY -> Activity key
+    """
+
+    if parameters is None:
+        parameters = {}
+
+    debug_level = parameters["debug_level"] if "debug_level" in parameters else 0
+
+    activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, log_lib.util.xes.DEFAULT_NAME_KEY)
+
+    # default value for precision, when no activated transitions (not even by looking at the initial marking) are found
+    precision = 1.0
+    sum_ee = 0
+    sum_at = 0
+    unfit = 0
+
+    if not check_soundness.check_easy_soundness_net_in_fin_marking(net, marking, final_marking):
+        raise Exception("trying to apply Align-ETConformance on a Petri net that is not a easy sound net!!")
+
+    prefixes, prefix_count = precision_utils.get_log_prefixes(log, activity_key=activity_key)
+    prefixes_keys = list(prefixes.keys())
+    fake_log = precision_utils.form_fake_log(prefixes_keys, activity_key=activity_key)
+
+    align_stop_marking = align_fake_log_stop_marking(fake_log, net, marking, final_marking, parameters=parameters)
+    all_markings = transform_markings_from_sync_to_original_net(align_stop_marking, net, parameters=parameters)
+
+    for i in range(len(prefixes)):
+        markings = all_markings[i]
+
+        if markings is not None:
+            log_transitions = set(prefixes[prefixes_keys[i]])
+            activated_transitions_labels = set()
+            for m in markings:
+                # add to the set of activated transitions in the model the activated transitions
+                # for each prefix
+                activated_transitions_labels = activated_transitions_labels.union(
+                    x.label for x in utils.get_visible_transitions_eventually_enabled_by_marking(net, m) if
+                    x.label is not None)
+            escaping_edges = activated_transitions_labels.difference(log_transitions)
+
+            sum_at += len(activated_transitions_labels) * prefix_count[prefixes_keys[i]]
+            sum_ee += len(escaping_edges) * prefix_count[prefixes_keys[i]]
+
+            if debug_level > 1:
+                print("")
+                print("prefix=", prefixes_keys[i])
+                print("log_transitions=", log_transitions)
+                print("activated_transitions=", activated_transitions_labels)
+                print("escaping_edges=", escaping_edges)
+        else:
+            unfit += prefix_count[prefixes_keys[i]]
+
+    if debug_level > 0:
+        print("\n")
+        print("overall unfit", unfit)
+        print("overall activated transitions", sum_at)
+        print("overall escaping edges", sum_ee)
+
+    # fix: also the empty prefix should be counted!
+    start_activities = set(get_start_activities(log, parameters=parameters))
+    trans_en_ini_marking = set([x.label for x in get_visible_transitions_eventually_enabled_by_marking(net, marking)])
+    diff = trans_en_ini_marking.difference(start_activities)
+    sum_at += len(log) * len(trans_en_ini_marking)
+    sum_ee += len(log) * len(diff)
+    # end fix
+
+    if sum_at > 0:
+        precision = 1 - float(sum_ee) / float(sum_at)
+
+    return precision
+
+
+def transform_markings_from_sync_to_original_net(markings0, net, parameters=None):
+    """
+    Transform the markings of the sync net (in which alignment stops) into markings of the original net
+    (in order to measure the precision)
+
+    Parameters
+    -------------
+    markings0
+        Markings on the sync net (expressed as place name with count)
+    net
+        Petri net
+    parameters
+        Parameters of the algorithm
+
+    Returns
+    -------------
+    markings
+        Markings of the original model (expressed as place with count)
+    """
+    if parameters is None:
+        parameters = {}
+
+    places_corr = {p.name: p for p in net.places}
+
+    markings = []
+
+    for i in range(len(markings0)):
+        res_list = markings0[i]
+
+        # res_list shall be a list of markings.
+        # If it is None, then there is no correspondence markings
+        # in the original Petri net
+        if res_list is not None:
+            # saves all the markings reached by the optimal alignment
+            # as markings of the original net
+            markings.append([])
+
+            for j in range(len(res_list)):
+                res = res_list[j]
+
+                atm = Marking()
+                for pl, count in res.items():
+                    if pl[0] == utils.SKIP:
+                        atm[places_corr[pl[1]]] = count
+                markings[-1].append(atm)
+        else:
+            markings.append(None)
+
+    return markings
+
+
+def align_fake_log_stop_marking(fake_log, net, marking, final_marking, parameters=None):
+    """
+    Align the 'fake' log with all the prefixes in order to get the markings in which
+    the alignment stops
+
+    Parameters
+    -------------
+    fake_log
+        Fake log
+    net
+        Petri net
+    marking
+        Marking
+    final_marking
+        Final marking
+    parameters
+        Parameters of the algorithm
+
+    Returns
+    -------------
+    alignment
+        For each trace in the log, return the marking in which the alignment stops (expressed as place name with count)
+    """
+    if parameters is None:
+        parameters = {}
+
+    show_progress_bar = exec_utils.get_param_value(Parameters.SHOW_PROGRESS_BAR, parameters, True)
+
+    align_result = []
+
+    progress = None
+    if pkgutil.find_loader("tqdm") and show_progress_bar and len(fake_log) > 1:
+        from tqdm.auto import tqdm
+        progress = tqdm(total=len(fake_log), desc="computing precision with alignments, completed variants :: ")
+
+    for i in range(len(fake_log)):
+        trace = fake_log[i]
+        sync_net, sync_initial_marking, sync_final_marking = build_sync_net(trace, net, marking, final_marking,
+                                                                            parameters=parameters)
+        stop_marking = Marking()
+        for pl, count in sync_final_marking.items():
+            if pl.name[1] == utils.SKIP:
+                stop_marking[pl] = count
+        cost_function = utils.construct_standard_cost_function(sync_net, utils.SKIP)
+
+        # perform the alignment of the prefix
+        res = precision_utils.__search(sync_net, sync_initial_marking, sync_final_marking, stop_marking, cost_function,
+                                       utils.SKIP)
+
+        if res is not None:
+            align_result.append([])
+            for mark in res:
+                res2 = {}
+                for pl in mark:
+                    # transforms the markings for easier correspondence at the end
+                    # (distributed engine friendly!)
+                    res2[(pl.name[0], pl.name[1])] = mark[pl]
+
+                align_result[-1].append(res2)
+        else:
+            # if there is no path from the initial marking
+            # replaying the given prefix, then add None
+            align_result.append(None)
+        if progress is not None:
+            progress.update()
+
+    # gracefully close progress bar
+    if progress is not None:
+        progress.close()
+    del progress
+
+    return align_result
+
+
+def build_sync_net(trace, petri_net, initial_marking, final_marking, parameters=None):
+    """
+    Build the sync product net between the Petri net and the trace prefix
+
+    Parameters
+    ---------------
+    trace
+        Trace prefix
+    petri_net
+        Petri net
+    initial_marking
+        Initial marking
+    final_marking
+        Final marking
+    parameters
+        Possible parameters of the algorithm
+    """
+    if parameters is None:
+        parameters = {}
+
+    activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, xes_constants.DEFAULT_NAME_KEY)
+
+    trace_net, trace_im, trace_fm = construct_trace_net(trace, activity_key=activity_key)
+
+    sync_prod, sync_initial_marking, sync_final_marking = construct(trace_net, trace_im,
+                                                                                              trace_fm, petri_net,
+                                                                                              initial_marking,
+                                                                                              final_marking,
+                                                                                              utils.SKIP)
+
+    return sync_prod, sync_initial_marking, sync_final_marking
diff --git a/src/evaluation/precision/variants/etconformance_token.py b/src/evaluation/precision/variants/etconformance_token.py
new file mode 100644
index 0000000000000000000000000000000000000000..35ed6a48f13c184be829eb01487c69e9bcb7f3a5
--- /dev/null
+++ b/src/evaluation/precision/variants/etconformance_token.py
@@ -0,0 +1,113 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from pm4py.algo.conformance.tokenreplay.variants import token_replay
+from pm4py.algo.conformance.tokenreplay import algorithm as executor
+
+from pm4py.objects import log as log_lib
+from evaluation.precision import utils as precision_utils
+from pm4py.statistics.start_activities.log.get import get_start_activities
+from pm4py.objects.petri_net.utils.align_utils import get_visible_transitions_eventually_enabled_by_marking
+from evaluation.precision.parameters import Parameters
+from pm4py.util import exec_utils
+
+"""
+Implementation of the approach described in paper
+
+Muñoz-Gama, Jorge, and Josep Carmona. "A fresh look at precision in process conformance." International Conference
+on Business Process Management. Springer, Berlin, Heidelberg, 2010.
+
+for measuring precision.
+
+For each prefix in the log, the reflected tasks are calculated (outgoing attributes from the prefix)
+Then, a token replay is done on the prefix in order to get activated transitions
+Escaping edges is the set difference between activated transitions and reflected tasks
+
+Then, precision is calculated by the formula used in the paper
+
+At the moment, the precision value is different from the one provided by the ProM plug-in,
+although the implementation seems to follow the paper concept
+"""
+
+
+def apply(log, net, marking, final_marking, parameters=None):
+    """
+    Get ET Conformance precision
+
+    Parameters
+    ----------
+    log
+        Trace log
+    net
+        Petri net
+    marking
+        Initial marking
+    final_marking
+        Final marking
+    parameters
+        Parameters of the algorithm, including:
+            Parameters.ACTIVITY_KEY -> Activity key
+    """
+
+    if parameters is None:
+        parameters = {}
+
+    cleaning_token_flood = exec_utils.get_param_value(Parameters.CLEANING_TOKEN_FLOOD, parameters, False)
+    token_replay_variant = exec_utils.get_param_value(Parameters.TOKEN_REPLAY_VARIANT, parameters,
+                                                      executor.Variants.TOKEN_REPLAY)
+    activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, log_lib.util.xes.DEFAULT_NAME_KEY)
+    # default value for precision, when no activated transitions (not even by looking at the initial marking) are found
+    precision = 1.0
+    sum_ee = 0
+    sum_at = 0
+
+    parameters_tr = {
+        token_replay.Parameters.CONSIDER_REMAINING_IN_FITNESS: False,
+        token_replay.Parameters.TRY_TO_REACH_FINAL_MARKING_THROUGH_HIDDEN: False,
+        token_replay.Parameters.STOP_IMMEDIATELY_UNFIT: True,
+        token_replay.Parameters.WALK_THROUGH_HIDDEN_TRANS: True,
+        token_replay.Parameters.CLEANING_TOKEN_FLOOD: cleaning_token_flood,
+        token_replay.Parameters.ACTIVITY_KEY: activity_key
+    }
+
+    prefixes, prefix_count = precision_utils.get_log_prefixes(log, activity_key=activity_key)
+    prefixes_keys = list(prefixes.keys())
+    fake_log = precision_utils.form_fake_log(prefixes_keys, activity_key=activity_key)
+
+    aligned_traces = executor.apply(fake_log, net, marking, final_marking, variant=token_replay_variant,
+                                        parameters=parameters_tr)
+
+    # fix: also the empty prefix should be counted!
+    start_activities = set(get_start_activities(log, parameters=parameters))
+    trans_en_ini_marking = set([x.label for x in get_visible_transitions_eventually_enabled_by_marking(net, marking)])
+    diff = trans_en_ini_marking.difference(start_activities)
+    sum_at += len(log) * len(trans_en_ini_marking)
+    sum_ee += len(log) * len(diff)
+    # end fix
+
+    for i in range(len(aligned_traces)):
+        if aligned_traces[i]["trace_is_fit"]:
+            log_transitions = set(prefixes[prefixes_keys[i]])
+            activated_transitions_labels = set(
+                [x.label for x in aligned_traces[i]["enabled_transitions_in_marking"] if x.label is not None])
+            sum_at += len(activated_transitions_labels) * prefix_count[prefixes_keys[i]]
+            escaping_edges = activated_transitions_labels.difference(log_transitions)
+            sum_ee += len(escaping_edges) * prefix_count[prefixes_keys[i]]
+
+    if sum_at > 0:
+        precision = 1 - float(sum_ee) / float(sum_at)
+
+    return precision
diff --git a/src/evaluation/replay_fitness/__init__.py b/src/evaluation/replay_fitness/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e16070ee0d011ab08e42d624436b38b26a7c1db
--- /dev/null
+++ b/src/evaluation/replay_fitness/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.replay_fitness import evaluator, variants
diff --git a/src/evaluation/replay_fitness/__pycache__/__init__.cpython-310.pyc b/src/evaluation/replay_fitness/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d1813c392983bd8d398f50e70da01728aeeb752b
Binary files /dev/null and b/src/evaluation/replay_fitness/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/replay_fitness/__pycache__/evaluator.cpython-310.pyc b/src/evaluation/replay_fitness/__pycache__/evaluator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6dc8ce5267bfefeb7ed21a3157a8b9563ac7dbf6
Binary files /dev/null and b/src/evaluation/replay_fitness/__pycache__/evaluator.cpython-310.pyc differ
diff --git a/src/evaluation/replay_fitness/__pycache__/parameters.cpython-310.pyc b/src/evaluation/replay_fitness/__pycache__/parameters.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..72bdd18a01e2f6aa7b4afa08548a80f2feeb2408
Binary files /dev/null and b/src/evaluation/replay_fitness/__pycache__/parameters.cpython-310.pyc differ
diff --git a/src/evaluation/replay_fitness/evaluator.py b/src/evaluation/replay_fitness/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..160f6105953954ab4a1fbdca13b98d91d61590b2
--- /dev/null
+++ b/src/evaluation/replay_fitness/evaluator.py
@@ -0,0 +1,122 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.replay_fitness.variants import alignment_based, token_replay
+from pm4py.algo.conformance import alignments
+from pm4py.objects.conversion.log import converter as log_conversion
+from pm4py.util import exec_utils
+from pm4py.objects.petri_net.utils.check_soundness import check_easy_soundness_net_in_fin_marking
+from enum import Enum
+import deprecation
+from pm4py.meta import VERSION
+import warnings
+
+
+class Variants(Enum):
+    ALIGNMENT_BASED = alignment_based
+    TOKEN_BASED = token_replay
+
+
+class Parameters(Enum):
+    ALIGN_VARIANT = "align_variant"
+
+
+ALIGNMENT_BASED = Variants.ALIGNMENT_BASED
+TOKEN_BASED = Variants.TOKEN_BASED
+
+VERSIONS = {ALIGNMENT_BASED, TOKEN_BASED}
+
+
+@deprecation.deprecated(deprecated_in="2.2.5", removed_in="3.0",
+                        current_version=VERSION,
+                        details="Use the pm4py.algo.evaluation.replay_fitness package")
+def apply(log, petri_net, initial_marking, final_marking, parameters=None, variant=None):
+    """
+    Apply fitness evaluation starting from an event log and a marked Petri net,
+    by using one of the replay techniques provided by PM4Py
+
+    Parameters
+    -----------
+    log
+        Trace log object
+    petri_net
+        Petri net
+    initial_marking
+        Initial marking
+    final_marking
+        Final marking
+    parameters
+        Parameters related to the replay algorithm
+    variant
+        Chosen variant:
+            - Variants.ALIGNMENT_BASED
+            - Variants.TOKEN_BASED
+
+    Returns
+    ----------
+    fitness_eval
+        Fitness evaluation
+    """
+    warnings.warn("Use the pm4py.algo.evaluation.replay_fitness package")
+
+    if parameters is None:
+        parameters = {}
+
+    # execute the following part of code when the variant is not specified by the user
+    if variant is None:
+        if not (
+                check_easy_soundness_net_in_fin_marking(petri_net, initial_marking,
+                                                                              final_marking)):
+            # in the case the net is not a easy sound workflow net, we must apply token-based replay
+            variant = TOKEN_BASED
+        else:
+            # otherwise, use the align-etconformance approach (safer, in the case the model contains duplicates)
+            variant = ALIGNMENT_BASED
+
+    if variant == TOKEN_BASED:
+        # execute the token-based replay variant
+        return exec_utils.get_variant(variant).apply(log_conversion.apply(log, parameters, log_conversion.TO_EVENT_LOG),
+                                                     petri_net,
+                                                     initial_marking, final_marking, parameters=parameters)
+    else:
+        # execute the alignments based variant, with the specification of the alignments variant
+        align_variant = exec_utils.get_param_value(Parameters.ALIGN_VARIANT, parameters,
+                                                   alignments.algorithm.DEFAULT_VARIANT)
+        return exec_utils.get_variant(variant).apply(log_conversion.apply(log, parameters, log_conversion.TO_EVENT_LOG),
+                                                     petri_net,
+                                                     initial_marking, final_marking, align_variant=align_variant,
+                                                     parameters=parameters)
+
+
+def evaluate(results, parameters=None, variant=TOKEN_BASED):
+    """
+    Evaluate replay results when the replay algorithm has already been applied
+
+    Parameters
+    -----------
+    results
+        Results of the replay algorithm
+    parameters
+        Possible parameters passed to the evaluation
+    variant
+        Indicates which evaluator is called
+
+    Returns
+    -----------
+    fitness_eval
+        Fitness evaluation
+    """
+    return exec_utils.get_variant(variant).evaluate(results, parameters=parameters)
diff --git a/src/evaluation/replay_fitness/parameters.py b/src/evaluation/replay_fitness/parameters.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f077222b1d8a621f17e6947d7c69fb6b6643848
--- /dev/null
+++ b/src/evaluation/replay_fitness/parameters.py
@@ -0,0 +1,26 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from enum import Enum
+from pm4py.util import constants
+from pm4py.algo.conformance.tokenreplay import algorithm
+
+
+class Parameters(Enum):
+    ACTIVITY_KEY = constants.PARAMETER_CONSTANT_ACTIVITY_KEY
+    ATTRIBUTE_KEY = constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY
+    TOKEN_REPLAY_VARIANT = "token_replay_variant"
+    CLEANING_TOKEN_FLOOD = "cleaning_token_flood"
diff --git a/src/evaluation/replay_fitness/variants/__init__.py b/src/evaluation/replay_fitness/variants/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab66e85249d2d4a0a2b225a68ff724d1c97a8e50
--- /dev/null
+++ b/src/evaluation/replay_fitness/variants/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.replay_fitness.variants import alignment_based, token_replay
diff --git a/src/evaluation/replay_fitness/variants/__pycache__/__init__.cpython-310.pyc b/src/evaluation/replay_fitness/variants/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d7342cb4e61bd9e1d41984297e91ab3f8dbebf2
Binary files /dev/null and b/src/evaluation/replay_fitness/variants/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/replay_fitness/variants/__pycache__/alignment_based.cpython-310.pyc b/src/evaluation/replay_fitness/variants/__pycache__/alignment_based.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b95eec483b619446e06ca18fcdd770390cd13eb
Binary files /dev/null and b/src/evaluation/replay_fitness/variants/__pycache__/alignment_based.cpython-310.pyc differ
diff --git a/src/evaluation/replay_fitness/variants/__pycache__/token_replay.cpython-310.pyc b/src/evaluation/replay_fitness/variants/__pycache__/token_replay.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8f885b14e679afa5940eb4685676df516b7a943b
Binary files /dev/null and b/src/evaluation/replay_fitness/variants/__pycache__/token_replay.cpython-310.pyc differ
diff --git a/src/evaluation/replay_fitness/variants/alignment_based.py b/src/evaluation/replay_fitness/variants/alignment_based.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8db21f3e4cfc33a01574e63c47bc3a90974d85b
--- /dev/null
+++ b/src/evaluation/replay_fitness/variants/alignment_based.py
@@ -0,0 +1,126 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from pm4py.algo.conformance.alignments.petri_net import algorithm as alignments
+from pm4py.algo.conformance.alignments.decomposed import algorithm as decomp_alignments
+from evaluation.replay_fitness.parameters import Parameters
+
+
+def evaluate(aligned_traces, parameters=None):
+    """
+    Transforms the alignment result to a simple dictionary
+    including the percentage of fit traces and the average fitness
+
+    Parameters
+    ----------
+    aligned_traces
+        Alignments calculated for the traces in the log
+    parameters
+        Possible parameters of the evaluation
+
+    Returns
+    ----------
+    dictionary
+        Containing two keys (percFitTraces and averageFitness)
+    """
+    if parameters is None:
+        parameters = {}
+    str(parameters)
+    no_traces = len([x for x in aligned_traces if x is not None])
+    no_fit_traces = 0
+    sum_fitness = 0.0
+
+    for tr in aligned_traces:
+        if tr is not None:
+            if tr["fitness"] == 1.0:
+                no_fit_traces = no_fit_traces + 1
+            sum_fitness = sum_fitness + tr["fitness"]
+
+    perc_fit_traces = 0.0
+    average_fitness = 0.0
+
+    if no_traces > 0:
+        perc_fit_traces = (100.0 * float(no_fit_traces)) / (float(no_traces))
+        average_fitness = float(sum_fitness) / float(no_traces)
+
+    return {"percFitTraces": perc_fit_traces, "averageFitness": average_fitness,
+            "percentage_of_fitting_traces": perc_fit_traces,
+            "average_trace_fitness": average_fitness}
+
+
+def apply(log, petri_net, initial_marking, final_marking, align_variant=alignments.DEFAULT_VARIANT, parameters=None):
+    """
+    Evaluate fitness based on alignments
+
+    Parameters
+    ----------------
+    log
+        Event log
+    petri_net
+        Petri net
+    initial_marking
+        Initial marking
+    final_marking
+        Final marking
+    align_variant
+        Variants of the alignments to apply
+    parameters
+        Parameters of the algorithm
+
+    Returns
+    ---------------
+    dictionary
+        Containing two keys (percFitTraces and averageFitness)
+    """
+    if align_variant == decomp_alignments.Variants.RECOMPOS_MAXIMAL.value:
+        alignment_result = decomp_alignments.apply(log, petri_net, initial_marking, final_marking,
+                                                   variant=align_variant, parameters=parameters)
+    else:
+        alignment_result = alignments.apply(log, petri_net, initial_marking, final_marking, variant=align_variant,
+                                            parameters=parameters)
+    return evaluate(alignment_result)
+
+
+def apply_trace(trace, petri_net, initial_marking, final_marking, best_worst, activity_key):
+    """
+    Performs the basic alignment search, given a trace, a net and the costs of the \"best of the worst\".
+    The costs of the best of the worst allows us to deduce the fitness of the trace.
+    We compute the fitness by means of 1 - alignment costs / best of worst costs (i.e. costs of 0 => fitness 1)
+
+    Parameters
+    ----------
+    trace: :class:`list` input trace, assumed to be a list of events (i.e. the code will use the activity key to
+    get the attributes)
+    petri_net: :class:`pm4py.objects.petri.net.PetriNet` the Petri net to use in the alignment
+    initial_marking: :class:`pm4py.objects.petri.net.Marking` initial marking in the Petri net
+    final_marking: :class:`pm4py.objects.petri.net.Marking` final marking in the Petri net
+    best_worst: cost of the best worst alignment of a trace (empty trace aligned to the model)
+    activity_key: :class:`str` (optional) key to use to identify the activity described by the events
+
+    Returns
+    -------
+    dictionary: `dict` with keys **alignment**, **cost**, **visited_states**, **queued_states** and **traversed_arcs**
+    """
+    alignment = alignments.apply_trace(trace, petri_net, initial_marking, final_marking,
+                                       {Parameters.ACTIVITY_KEY: activity_key})
+    fixed_costs = alignment['cost'] // alignments.utils.STD_MODEL_LOG_MOVE_COST
+    if best_worst > 0:
+        fitness = 1 - (fixed_costs / best_worst)
+    else:
+        fitness = 1
+    return {'trace': trace, 'alignment': alignment['alignment'], 'cost': fixed_costs, 'fitness': fitness,
+            'visited_states': alignment['visited_states'], 'queued_states': alignment['queued_states'],
+            'traversed_arcs': alignment['traversed_arcs']}
diff --git a/src/evaluation/replay_fitness/variants/token_replay.py b/src/evaluation/replay_fitness/variants/token_replay.py
new file mode 100644
index 0000000000000000000000000000000000000000..84c5692da3a9aa21943d2ebb50bde0ed9cfd85e4
--- /dev/null
+++ b/src/evaluation/replay_fitness/variants/token_replay.py
@@ -0,0 +1,100 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from pm4py.algo.conformance.tokenreplay import algorithm as executor
+from pm4py.algo.conformance.tokenreplay.variants import token_replay
+from evaluation.replay_fitness.parameters import Parameters
+from pm4py.util import exec_utils
+from pm4py.util.xes_constants import DEFAULT_NAME_KEY
+
+
+def evaluate(aligned_traces, parameters=None):
+    """
+    Gets a dictionary expressing fitness in a synthetic way from the list of boolean values
+    saying if a trace in the log is fit, and the float values of fitness associated to each trace
+
+    Parameters
+    ------------
+    aligned_traces
+        Result of the token-based replayer
+    parameters
+        Possible parameters of the evaluation
+
+    Returns
+    -----------
+    dictionary
+        Containing two keys (percFitTraces and averageFitness)
+    """
+    if parameters is None:
+        parameters = {}
+    no_traces = len(aligned_traces)
+    fit_traces = len([x for x in aligned_traces if x["trace_is_fit"]])
+    sum_of_fitness = sum([x["trace_fitness"] for x in aligned_traces])
+    perc_fit_traces = 0.0
+    average_fitness = 0.0
+    log_fitness = 0
+    total_m = sum([x["missing_tokens"] for x in aligned_traces])
+    total_c = sum([x["consumed_tokens"] for x in aligned_traces])
+    total_r = sum([x["remaining_tokens"] for x in aligned_traces])
+    total_p = sum([x["produced_tokens"] for x in aligned_traces])
+    if no_traces > 0:
+        perc_fit_traces = float(100.0 * fit_traces) / float(no_traces)
+        average_fitness = float(sum_of_fitness) / float(no_traces)
+        if total_c > 0 and total_p > 0:
+            log_fitness = 0.5 * (1 - total_m / total_c) + 0.5 * (1 - total_r / total_p)
+    return {"perc_fit_traces": perc_fit_traces, "average_trace_fitness": average_fitness, "log_fitness": log_fitness,
+            "percentage_of_fitting_traces": perc_fit_traces }
+
+
+def apply(log, petri_net, initial_marking, final_marking, parameters=None):
+    """
+    Apply token replay fitness evaluation
+
+    Parameters
+    -----------
+    log
+        Trace log
+    petri_net
+        Petri net
+    initial_marking
+        Initial marking
+    final_marking
+        Final marking
+    parameters
+        Parameters
+
+    Returns
+    -----------
+    dictionary
+        Containing two keys (percFitTraces and averageFitness)
+    """
+
+    if parameters is None:
+        parameters = {}
+    activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, DEFAULT_NAME_KEY)
+    token_replay_variant = exec_utils.get_param_value(Parameters.TOKEN_REPLAY_VARIANT, parameters,
+                                                      executor.Variants.TOKEN_REPLAY)
+    cleaning_token_flood = exec_utils.get_param_value(Parameters.CLEANING_TOKEN_FLOOD, parameters, False)
+    remaining_in_fitness = exec_utils.get_param_value(token_replay.Parameters.CONSIDER_REMAINING_IN_FITNESS, parameters, True)
+
+    parameters_tr = {token_replay.Parameters.ACTIVITY_KEY: activity_key,
+                     token_replay.Parameters.CONSIDER_REMAINING_IN_FITNESS: remaining_in_fitness,
+                     token_replay.Parameters.CLEANING_TOKEN_FLOOD: cleaning_token_flood}
+
+    aligned_traces = executor.apply(log, petri_net, initial_marking, final_marking, variant=token_replay_variant,
+                                    parameters=parameters_tr)
+
+    return evaluate(aligned_traces)
diff --git a/src/evaluation/simplicity/__init__.py b/src/evaluation/simplicity/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef0d4efb18c805f3b7d600b599520ca17e174107
--- /dev/null
+++ b/src/evaluation/simplicity/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.simplicity import evaluator, variants
diff --git a/src/evaluation/simplicity/__pycache__/__init__.cpython-310.pyc b/src/evaluation/simplicity/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..35c404eb3a5ab3c97869cb2417a7a7ce4f61964a
Binary files /dev/null and b/src/evaluation/simplicity/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/simplicity/__pycache__/evaluator.cpython-310.pyc b/src/evaluation/simplicity/__pycache__/evaluator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..19b414499b921880733c921dc9134ad1b6649a99
Binary files /dev/null and b/src/evaluation/simplicity/__pycache__/evaluator.cpython-310.pyc differ
diff --git a/src/evaluation/simplicity/evaluator.py b/src/evaluation/simplicity/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..439848c46c1dc03efd2b430eb186cdcdc73abe0a
--- /dev/null
+++ b/src/evaluation/simplicity/evaluator.py
@@ -0,0 +1,39 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.simplicity.variants import arc_degree
+from enum import Enum
+from pm4py.util import exec_utils
+import deprecation
+from pm4py.meta import VERSION
+import warnings
+
+
+class Variants(Enum):
+    SIMPLICITY_ARC_DEGREE = arc_degree
+
+
+SIMPLICITY_ARC_DEGREE = Variants.SIMPLICITY_ARC_DEGREE
+
+VERSIONS = {SIMPLICITY_ARC_DEGREE}
+
+
+@deprecation.deprecated(deprecated_in="2.2.5", removed_in="3.0",
+                        current_version=VERSION,
+                        details="Use the pm4py.algo.evaluation.simplicity package")
+def apply(petri_net, parameters=None, variant=SIMPLICITY_ARC_DEGREE):
+    warnings.warn("Use the pm4py.algo.evaluation.simplicity package")
+    return exec_utils.get_variant(variant).apply(petri_net, parameters=parameters)
diff --git a/src/evaluation/simplicity/variants/__init__.py b/src/evaluation/simplicity/variants/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ca201752ad2d79063e2a14412e11ddc5373ed18
--- /dev/null
+++ b/src/evaluation/simplicity/variants/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.simplicity.variants import arc_degree
diff --git a/src/evaluation/simplicity/variants/__pycache__/__init__.cpython-310.pyc b/src/evaluation/simplicity/variants/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..79bce5a0bf4c6ab34b32a6eb4c80d85591765cd5
Binary files /dev/null and b/src/evaluation/simplicity/variants/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/simplicity/variants/__pycache__/arc_degree.cpython-310.pyc b/src/evaluation/simplicity/variants/__pycache__/arc_degree.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e926c1d78c5f68701dcc52538bc816ede860d607
Binary files /dev/null and b/src/evaluation/simplicity/variants/__pycache__/arc_degree.cpython-310.pyc differ
diff --git a/src/evaluation/simplicity/variants/arc_degree.py b/src/evaluation/simplicity/variants/arc_degree.py
new file mode 100644
index 0000000000000000000000000000000000000000..33819ec99bcaa07f75f77332eeada74a4094a988
--- /dev/null
+++ b/src/evaluation/simplicity/variants/arc_degree.py
@@ -0,0 +1,70 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from statistics import mean
+from enum import Enum
+from pm4py.util import exec_utils
+
+
+class Parameters(Enum):
+    K = "k"
+
+
+def apply(petri_net, parameters=None):
+    """
+    Gets simplicity from a Petri net
+
+    Vázquez-Barreiros, Borja, Manuel Mucientes, and Manuel Lama. "ProDiGen: Mining complete, precise and minimal
+    structure process models with a genetic algorithm." Information Sciences 294 (2015): 315-333.
+
+    Parameters
+    -----------
+    petri_net
+        Petri net
+    parameters
+        Possible parameters of the algorithm:
+            - K: defines the value to be substracted in the formula: the lower is the value,
+            the lower is the simplicity value. k is the baseline arc degree (that is subtracted from the others)
+
+    Returns
+    -----------
+    simplicity
+        Simplicity measure associated to the Petri net
+    """
+    if parameters is None:
+        parameters = {}
+
+    # original model: we have plenty of choices there.
+    # one choice is about taking a model containing the most frequent variant,
+    # along with a short circuit between the final and the initial marking.
+    # in that case, the average arc degree of the "original model" is 2
+
+    # keep the default to 2
+    k = exec_utils.get_param_value(Parameters.K, parameters, 2)
+
+    # TODO: verify the real provenence of the approach before!
+
+    all_arc_degrees = []
+    for place in petri_net.places:
+        all_arc_degrees.append(len(place.in_arcs) + len(place.out_arcs))
+    for trans in petri_net.transitions:
+        all_arc_degrees.append(len(trans.in_arcs) + len(trans.out_arcs))
+
+    mean_degree = mean(all_arc_degrees) if all_arc_degrees else 0.0
+
+    simplicity = 1.0 / (1.0 + max(mean_degree - k, 0))
+
+    return simplicity
diff --git a/src/evaluation/soundness/__init__.py b/src/evaluation/soundness/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6afd88a8ec95be122b2b783af99e0b654c3315eb
--- /dev/null
+++ b/src/evaluation/soundness/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.soundness import woflan
diff --git a/src/evaluation/soundness/__pycache__/__init__.cpython-310.pyc b/src/evaluation/soundness/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d11df19de565faefadc6ec9de3a49a014ff76b9b
Binary files /dev/null and b/src/evaluation/soundness/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/__init__.py b/src/evaluation/soundness/woflan/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a77c8712d5d52cf1d8becf5116cd5bb58eddf853
--- /dev/null
+++ b/src/evaluation/soundness/woflan/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.soundness.woflan import algorithm, graphs, not_well_handled_pairs, place_invariants
diff --git a/src/evaluation/soundness/woflan/__pycache__/__init__.cpython-310.pyc b/src/evaluation/soundness/woflan/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f72bf4fca8b2e01d810ec574bcec811ac06597db
Binary files /dev/null and b/src/evaluation/soundness/woflan/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/__pycache__/algorithm.cpython-310.pyc b/src/evaluation/soundness/woflan/__pycache__/algorithm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3ed770e1faef9fdd363c56cf7cf76ea405d925a3
Binary files /dev/null and b/src/evaluation/soundness/woflan/__pycache__/algorithm.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/algorithm.py b/src/evaluation/soundness/woflan/algorithm.py
new file mode 100644
index 0000000000000000000000000000000000000000..88097d45ba3d7d8d33cafe5ac6b7731b32cbd520
--- /dev/null
+++ b/src/evaluation/soundness/woflan/algorithm.py
@@ -0,0 +1,654 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+import copy
+import warnings
+from enum import Enum
+
+import deprecation
+import networkx as nx
+import numpy as np
+
+# Minimal Coverability Graph
+from evaluation.soundness.woflan.graphs.minimal_coverability_graph.minimal_coverability_graph import \
+    apply as minimal_coverability_graph
+# reachability Graph Creation
+from evaluation.soundness.woflan.graphs.reachability_graph.reachability_graph import apply as reachability_graph
+# Restricted coverability graph
+from evaluation.soundness.woflan.graphs.restricted_coverability_graph.restricted_coverability_graph import \
+    construct_tree as restricted_coverability_tree
+from evaluation.soundness.woflan.graphs.utility import check_for_dead_tasks
+from evaluation.soundness.woflan.graphs.utility import check_for_improper_conditions
+from evaluation.soundness.woflan.graphs.utility import check_for_substates
+from evaluation.soundness.woflan.graphs.utility import convert_marking
+# Importing to discover not-well handled pairs
+from evaluation.soundness.woflan.not_well_handled_pairs.not_well_handled_pairs import \
+    apply as compute_not_well_handled_pairs
+# Importing for place invariants related stuff (s-components, uniform and weighted place invariants)
+from evaluation.soundness.woflan.place_invariants.place_invariants import compute_place_invariants
+from evaluation.soundness.woflan.place_invariants.s_component import compute_s_components
+from evaluation.soundness.woflan.place_invariants.s_component import compute_uncovered_places_in_component
+from evaluation.soundness.woflan.place_invariants.utility import \
+    compute_uncovered_places as compute_uncovered_place_in_invariants
+from evaluation.soundness.woflan.place_invariants.utility import transform_basis
+from pm4py.objects.petri_net.utils import petri_utils
+from pm4py.objects.petri_net.obj import PetriNet
+from pm4py.util import exec_utils
+
+
+class Parameters(Enum):
+    RETURN_ASAP_WHEN_NOT_SOUND = "return_asap_when_not_sound"
+    PRINT_DIAGNOSTICS = "print_diagnostics"
+    RETURN_DIAGNOSTICS = "return_diagnostics"
+
+
+class Outputs(Enum):
+    S_C_NET = "s_c_net"
+    PLACE_INVARIANTS = "place_invariants"
+    UNIFORM_PLACE_INVARIANTS = "uniform_place_invariants"
+    S_COMPONENTS = "s_components"
+    UNCOVERED_PLACES_S_COMPONENT = "uncovered_places_s_component"
+    NOT_WELL_HANDLED_PAIRS = "not_well_handled_pairs"
+    LEFT = "left"
+    UNCOVERED_PLACES_UNIFORM = "uncovered_places_uniform"
+    WEIGHTED_PLACE_INVARIANTS = "weighted_place_invariants"
+    UNCOVERED_PLACES_WEIGHTED = "uncovered_places_weighted"
+    MCG = "mcg"
+    DEAD_TASKS = "dead_tasks"
+    R_G_S_C = "r_g_s_c"
+    R_G = "r_g"
+    LOCKING_SCENARIOS = "locking_scenarios"
+    RESTRICTED_COVERABILITY_TREE = "restricted_coverability_tree"
+
+
+class woflan:
+    def __init__(self, net, initial_marking, final_marking, print_diagnostics=False):
+        self.net = net
+        self.initial_marking = initial_marking
+        self.final_marking = final_marking
+        self.print_diagnostics = print_diagnostics
+        self.s_c_net = None
+        self.place_invariants = None
+        self.uniform_place_invariants = None
+        self.s_components = None
+        self.uncovered_places_s_component = None
+        self.not_well_handled_pairs = None
+        self.left = None
+        self.uncovered_places_uniform = None
+        self.weighted_place_invariants = None
+        self.uncovered_places_weighted = None
+        self.mcg = None
+        self.dead_tasks = None
+        self.r_g_s_c = None
+        self.r_g = None
+        self.locking_scenarios = None
+        self.restricted_coverability_tree = None
+
+    def set_s_c_net(self, s_c_net):
+        self.s_c_net = s_c_net
+
+    def set_place_invariants(self, invariants):
+        self.place_invariants = invariants
+
+    def set_uniform_place_invariants(self, invariants):
+        self.uniform_place_invariants = invariants
+
+    def set_s_components(self, s_components):
+        self.s_components = s_components
+
+    def set_uncovered_places_s_component(self, uncovered_places):
+        self.uncovered_places_s_component = uncovered_places
+
+    def set_not_well_handled_pairs(self, not_well_handled_pairs):
+        self.not_well_handled_pairs = not_well_handled_pairs
+
+    def set_left(self, left):
+        self.left = left
+
+    def set_uncovered_places_uniform(self, places):
+        self.uncovered_places_uniform = places
+
+    def set_weighted_place_invariants(self, invariants):
+        self.weighted_place_invariants = invariants
+
+    def set_uncovered_places_weighted(self, places):
+        self.uncovered_places_weighted = places
+
+    def set_mcg(self, mcg):
+        self.mcg = mcg
+
+    def set_dead_tasks(self, dead_tasks):
+        self.dead_tasks = dead_tasks
+
+    def set_r_g_s_c(self, r_g):
+        self.r_g_s_c = r_g
+
+    def set_r_g(self, r_g):
+        self.r_g = r_g
+
+    def set_locking_scenarios(self, scenarios):
+        self.locking_scenarios = scenarios
+
+    def set_restricted_coverability_tree(self, graph):
+        self.restricted_coverability_tree = graph
+
+    def get_net(self):
+        return self.net
+
+    def get_initial_marking(self):
+        return self.initial_marking
+
+    def get_final_marking(self):
+        return self.final_marking
+
+    def get_s_c_net(self):
+        return self.s_c_net
+
+    def get_place_invariants(self):
+        return self.place_invariants
+
+    def get_uniform_place_invariants(self):
+        return self.uniform_place_invariants
+
+    def get_s_components(self):
+        return self.s_components
+
+    def get_uncovered_places_s_component(self):
+        return self.uncovered_places_s_component
+
+    def get_not_well_handled_pairs(self):
+        return self.not_well_handled_pairs
+
+    def get_left(self):
+        return self.left
+
+    def get_uncovered_places_uniform(self):
+        return self.uncovered_places_uniform
+
+    def get_weighted_place_invariants(self):
+        return self.weighted_place_invariants
+
+    def get_uncovered_places_weighted(self):
+        return self.uncovered_places_weighted
+
+    def get_mcg(self):
+        return self.mcg
+
+    def get_dead_tasks(self):
+        return self.dead_tasks
+
+    def get_r_g_s_c(self):
+        return self.r_g_s_c
+
+    def get_r_g(self):
+        return self.r_g
+
+    def get_locking_scenarios(self):
+        return self.locking_scenarios
+
+    def get_restricted_coverability_tree(self):
+        return self.restricted_coverability_tree
+
+    def get_output(self):
+        """
+        Returns a dictionary representation of the
+        entities that are calculated during WOFLAN
+        """
+        ret = {}
+        if self.s_c_net is not None:
+            ret[Outputs.S_C_NET.value] = self.s_c_net
+        if self.place_invariants is not None:
+            ret[Outputs.PLACE_INVARIANTS.value] = self.place_invariants
+        if self.uniform_place_invariants is not None:
+            ret[Outputs.UNIFORM_PLACE_INVARIANTS.value] = self.uniform_place_invariants
+        if self.s_components is not None:
+            ret[Outputs.S_COMPONENTS.value] = self.s_components
+        if self.uncovered_places_s_component is not None:
+            ret[Outputs.UNCOVERED_PLACES_S_COMPONENT.value] = self.uncovered_places_s_component
+        if self.not_well_handled_pairs is not None:
+            ret[Outputs.NOT_WELL_HANDLED_PAIRS.value] = self.not_well_handled_pairs
+        if self.left is not None:
+            ret[Outputs.LEFT.value] = self.left
+        if self.uncovered_places_uniform is not None:
+            ret[Outputs.UNCOVERED_PLACES_UNIFORM.value] = self.uncovered_places_uniform
+        if self.weighted_place_invariants is not None:
+            ret[Outputs.WEIGHTED_PLACE_INVARIANTS.value] = self.weighted_place_invariants
+        if self.uncovered_places_weighted is not None:
+            ret[Outputs.UNCOVERED_PLACES_WEIGHTED.value] = self.uncovered_places_weighted
+        if self.mcg is not None:
+            ret[Outputs.MCG.value] = self.mcg
+        if self.dead_tasks is not None:
+            ret[Outputs.DEAD_TASKS.value] = self.dead_tasks
+        if self.r_g_s_c is not None:
+            ret[Outputs.R_G_S_C.value] = self.r_g_s_c
+        if self.r_g is not None:
+            ret[Outputs.R_G] = self.r_g
+        if self.locking_scenarios is not None:
+            ret[Outputs.LOCKING_SCENARIOS] = self.locking_scenarios
+        if self.restricted_coverability_tree is not None:
+            ret[Outputs.RESTRICTED_COVERABILITY_TREE] = self.restricted_coverability_tree
+        return ret
+
+
+def short_circuit_petri_net(net, print_diagnostics=False):
+    """
+    Fist, sink and source place are identified. Then, a transition from source to sink is added to short-circuited
+    the given petri net. If there is no unique source and sink place, an error gets returned
+    :param net: Petri net that is going to be short circuited
+    :return:
+    """
+    s_c_net = copy.deepcopy(net)
+    no_source_places = 0
+    no_sink_places = 0
+    sink = None
+    source = None
+    for place in s_c_net.places:
+        if len(place.in_arcs) == 0:
+            source = place
+            no_source_places += 1
+        if len(place.out_arcs) == 0:
+            sink = place
+            no_sink_places += 1
+    if (sink is not None) and (source is not None) and no_source_places == 1 and no_sink_places == 1:
+        # If there is one unique source and sink place, short circuit Petri Net is constructed
+        t_1 = PetriNet.Transition("short_circuited_transition", "short_circuited_transition")
+        s_c_net.transitions.add(t_1)
+        # add arcs in short-circuited net
+        petri_utils.add_arc_from_to(sink, t_1, s_c_net)
+        petri_utils.add_arc_from_to(t_1, source, s_c_net)
+        return s_c_net
+    else:
+        if sink is None:
+            if print_diagnostics:
+                print("There is no sink place.")
+            return None
+        elif source is None:
+            if print_diagnostics:
+                print("There is no source place.")
+            return None
+        elif no_source_places > 1:
+            if print_diagnostics:
+                print("There is more than one source place.")
+            return None
+        elif no_sink_places > 1:
+            if print_diagnostics:
+                print("There is more than one sink place.")
+            return None
+
+
+def step_1(woflan_object, return_asap_when_unsound=False):
+    """
+    In the first step, we check if the input is given correct. We check if net is an PM4Py Petri Net representation
+    and if the exist a correct entry for the initial and final marking.
+    :param woflan_object: Object that contains all necessary information
+    :return: Proceed with step 2 if ok; else False
+    """
+
+    def check_if_marking_in_net(marking, net):
+        """
+        Checks if the marked place exists in the Petri Net and if there is only one i_m and f_m
+        :param marking: Marking of Petri Net
+        :param net: PM4Py representation of Petri Net
+        :return: Boolean. True if marking can exists; False if not.
+        """
+        for place in marking:
+            if place in net.places:
+                return True
+        return False
+
+    if isinstance(woflan_object.get_net(), PetriNet):
+        if len(woflan_object.get_initial_marking()) != 1 or len(woflan_object.get_final_marking()) != 1:
+            if woflan_object.print_diagnostics:
+                print('There is more than one initial or final marking.')
+            return False
+        if check_if_marking_in_net(woflan_object.get_initial_marking(), woflan_object.get_net()):
+            if check_if_marking_in_net(woflan_object.get_final_marking(), woflan_object.get_net()):
+                if woflan_object.print_diagnostics:
+                    print("Input is ok.")
+                return step_2(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+    if woflan_object.print_diagnostics:
+        print('The Petri Net is not PM4Py Petri Net represenatation.')
+    return False
+
+
+def step_2(woflan_object, return_asap_when_unsound=False):
+    """
+    This method checks if a given Petri net is a workflow net. First, the Petri Net gets short-circuited
+    (connect start and end place with a tau-transition. Second, the Petri Net gets converted into a networkx graph.
+    Finally, it is tested if the resulting graph is a strongly connected component.
+    :param woflan_object: Woflan objet containing all information
+    :return: Bool=True if net is a WF-Net
+    """
+
+    def transform_petri_net_into_regular_graph(still_need_to_discover):
+        """
+        Ths method transforms a list of places and transitions into a networkx graph
+        :param still_need_to_discover: set of places and transition that are not fully added to graph
+        :return:
+        """
+        G = nx.DiGraph()
+        while len(still_need_to_discover) > 0:
+            element = still_need_to_discover.pop()
+            G.add_node(element.name)
+            for in_arc in element.in_arcs:
+                G.add_node(in_arc.source.name)
+                G.add_edge(in_arc.source.name, element.name)
+            for out_arc in element.out_arcs:
+                G.add_node(out_arc.target.name)
+                G.add_edge(element.name, out_arc.target.name)
+        return G
+
+    woflan_object.set_s_c_net(short_circuit_petri_net(woflan_object.get_net(),
+                                                      print_diagnostics=woflan_object.print_diagnostics))
+    if woflan_object.get_s_c_net() == None:
+        return False
+    to_discover = woflan_object.get_s_c_net().places | woflan_object.get_s_c_net().transitions
+    graph = transform_petri_net_into_regular_graph(to_discover)
+    if not nx.algorithms.components.is_strongly_connected(graph):
+        if woflan_object.print_diagnostics:
+            print('Petri Net is a not a worflow net.')
+        return False
+    else:
+        if woflan_object.print_diagnostics:
+            print("Petri Net is a workflow net.")
+        return step_3(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+
+
+def step_3(woflan_object, return_asap_when_unsound=False):
+    woflan_object.set_place_invariants(compute_place_invariants(woflan_object.get_s_c_net()))
+    woflan_object.set_uniform_place_invariants(transform_basis(woflan_object.get_place_invariants(), style='uniform'))
+    woflan_object.set_s_components(
+        compute_s_components(woflan_object.get_s_c_net(), woflan_object.get_uniform_place_invariants()))
+    woflan_object.set_uncovered_places_s_component(
+        compute_uncovered_places_in_component(woflan_object.get_s_components(), woflan_object.get_s_c_net()))
+    if len(woflan_object.get_uncovered_places_s_component()) == 0:
+        woflan_object.set_left(True)
+        if woflan_object.print_diagnostics:
+            print('Every place is covered by s-components.')
+        return step_10(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+    else:
+        if woflan_object.print_diagnostics:
+            print('The following places are not covered by an s-component: {}.'.format(
+                woflan_object.get_uncovered_places_s_component()))
+        if return_asap_when_unsound:
+            return False
+        return step_4(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+
+
+def step_4(woflan_object, return_asap_when_unsound=False):
+    woflan_object.set_not_well_handled_pairs(compute_not_well_handled_pairs(woflan_object.get_s_c_net()))
+    if len(woflan_object.get_not_well_handled_pairs()) == 0:
+        if woflan_object.print_diagnostics:
+            print('Petri Net is unsound')
+        woflan_object.set_left(False)
+        if return_asap_when_unsound:
+            return False
+        return step_5(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+    else:
+        if woflan_object.print_diagnostics:
+            print('Not well-handled pairs are: {}.'.format(woflan_object.get_not_well_handled_pairs()))
+        woflan_object.set_left(True)
+        return step_5(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+
+
+def step_5(woflan_object, return_asap_when_unsound=False):
+    woflan_object.set_uncovered_places_uniform(
+        compute_uncovered_place_in_invariants(woflan_object.get_uniform_place_invariants(),
+                                              woflan_object.get_s_c_net()))
+    if len(woflan_object.get_uncovered_places_uniform()) == 0:
+        if woflan_object.print_diagnostics:
+            print('There are no uncovered places in uniform invariants.')
+        return step_10(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+    else:
+        if woflan_object.print_diagnostics:
+            print('The following places are uncovered in uniform invariants: {}'.format(
+                woflan_object.get_uncovered_places_uniform()))
+        return step_6(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+
+
+def step_6(woflan_object, return_asap_when_unsound=False):
+    woflan_object.set_weighted_place_invariants(transform_basis(woflan_object.get_place_invariants(), style='weighted'))
+    woflan_object.set_uncovered_places_weighted(
+        compute_uncovered_place_in_invariants(woflan_object.get_weighted_place_invariants(),
+                                              woflan_object.get_s_c_net()))
+    if len(woflan_object.get_uncovered_places_weighted()) == 0:
+        if woflan_object.print_diagnostics:
+            print('There are no uncovered places in weighted invariants.')
+        return step_10(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+    else:
+        if woflan_object.print_diagnostics:
+            print('The following places are uncovered in weighted invariants: {}'.format(
+                woflan_object.get_uncovered_places_weighted()))
+        return step_7(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+
+
+def step_7(woflan_object, return_asap_when_unsound=False):
+    woflan_object.set_mcg(minimal_coverability_graph(woflan_object.get_s_c_net(), woflan_object.get_initial_marking(),
+                                                     woflan_object.get_net()))
+    if len(check_for_improper_conditions(woflan_object.get_mcg())) == 0:
+        if woflan_object.print_diagnostics:
+            print('No improper coditions.')
+        if woflan_object.get_left == True:
+            return step_8(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+        else:
+            return step_10(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+    else:
+        if woflan_object.print_diagnostics:
+            print('Improper WPD. The following are the improper conditions: {}.'.format(
+                check_for_improper_conditions(woflan_object.get_mcg())))
+        if return_asap_when_unsound:
+            return False
+        return step_9(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+
+
+def step_8(woflan_object, return_asap_when_unsound=False):
+    if check_for_substates(woflan_object.get_mcg()):
+        return step_10(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+    else:
+        return step_10(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+
+
+def step_9(woflan_object, return_asap_when_unsound=False):
+    if woflan_object.print_diagnostics:
+        print('The following sequences are unbounded: {}'.format(compute_unbounded_sequences(woflan_object)))
+    return False
+
+
+def step_10(woflan_object, return_asap_when_unsound=False):
+    if woflan_object.get_mcg() == None:
+        woflan_object.set_mcg(
+            minimal_coverability_graph(woflan_object.get_s_c_net(), woflan_object.get_initial_marking(),
+                                       woflan_object.get_net()))
+    woflan_object.set_dead_tasks(check_for_dead_tasks(woflan_object.get_s_c_net(), woflan_object.get_mcg()))
+    if len(woflan_object.get_dead_tasks()) == 0:
+        if woflan_object.print_diagnostics:
+            print('There are no dead tasks.')
+        if woflan_object.get_left() == True:
+            return step_11(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+        else:
+            if return_asap_when_unsound:
+                return False
+            return step_12(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+    else:
+        if woflan_object.print_diagnostics:
+            print('The following tasks are dead: {}'.format(woflan_object.get_dead_tasks()))
+        return False
+
+
+def step_11(woflan_object, return_asap_when_unsound=False):
+    woflan_object.set_r_g_s_c(
+        reachability_graph(woflan_object.get_s_c_net(), woflan_object.get_initial_marking(), woflan_object.get_net()))
+    if nx.is_strongly_connected(woflan_object.get_r_g_s_c()):
+        if woflan_object.print_diagnostics:
+            print('All tasks are live.')
+        return True
+    else:
+        if return_asap_when_unsound:
+            return False
+        return step_13(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+
+
+def step_12(woflan_object, return_asap_when_unsound=False):
+    woflan_object.set_r_g_s_c(
+        reachability_graph(woflan_object.get_s_c_net(), woflan_object.get_initial_marking(), woflan_object.get_net()))
+    if woflan_object.print_diagnostics:
+        print('There are non-live tasks.')
+    if return_asap_when_unsound:
+        return False
+    return step_13(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+
+
+def step_13(woflan_object, return_asap_when_unsound=False):
+    woflan_object.set_locking_scenarios(compute_non_live_sequences(woflan_object))
+    if woflan_object.print_diagnostics:
+        print('The following sequences lead to deadlocks: {}.'.format(woflan_object.get_locking_scenarios()))
+    return False
+
+
+@deprecation.deprecated('2.2.2', removed_in='3.0.0',
+                        details='deprecated version of WOFLAN; use pm4py.algo.analysis.woflan')
+def apply(net, i_m, f_m, parameters=None):
+    """
+    Apply the Woflan Soundness check. Trough this process, different steps are executed.
+    :param net: Petri Net representation of PM4Py
+    :param i_m: initial marking of given Net. Marking object of PM4Py
+    :param f_m: final marking of given Net. Marking object of PM4Py
+    :return: True, if net is sound; False otherwise.
+    """
+    warnings.warn('deprecated version of WOFLAN; use pm4py.algo.analysis.woflan',
+                  DeprecationWarning)
+    if parameters is None:
+        parameters = {}
+    return_asap_when_unsound = exec_utils.get_param_value(Parameters.RETURN_ASAP_WHEN_NOT_SOUND, parameters, False)
+    print_diagnostics = exec_utils.get_param_value(Parameters.PRINT_DIAGNOSTICS, parameters, True)
+    return_diagnostics = exec_utils.get_param_value(Parameters.RETURN_DIAGNOSTICS, parameters, False)
+
+    woflan_object = woflan(net, i_m, f_m, print_diagnostics=print_diagnostics)
+    step_1_res = step_1(woflan_object, return_asap_when_unsound=return_asap_when_unsound)
+
+    if return_diagnostics:
+        return step_1_res, woflan_object.get_output()
+
+    return step_1_res
+
+
+def compute_non_live_sequences(woflan_object):
+    """
+    We want to compute the sequences of transitions which lead to deadlocks.
+    To do this, we first compute a reachbility graph (possible, since we know that the Petri Net is bounded) and then we
+    convert it to a spanning tree. Afterwards, we compute the paths which lead to nodes from which the final marking cannot
+    be reached. Note: We are searching for the shortest sequence. After the first red node, all successors are also red.
+    Therefore, we do not have to consider them.
+    :param woflan_object: Object that contains the necessary information
+    :return: List of sequence of transitions, each sequence is a list
+    """
+    woflan_object.set_r_g(reachability_graph(woflan_object.get_net(), woflan_object.get_initial_marking()))
+    f_m = convert_marking(woflan_object.get_net(), woflan_object.get_final_marking())
+    sucessfull_terminate_state = None
+    for node in woflan_object.get_r_g().nodes:
+        if all(np.equal(woflan_object.get_r_g().nodes[node]['marking'], f_m)):
+            sucessfull_terminate_state = node
+            break
+    # red nodes are those from which the final marking is not reachable
+    red_nodes = []
+    for node in woflan_object.get_r_g().nodes:
+        if not nx.has_path(woflan_object.get_r_g(), node, sucessfull_terminate_state):
+            red_nodes.append(node)
+    # Compute directed spanning tree
+    spanning_tree = nx.algorithms.tree.Edmonds(woflan_object.get_r_g()).find_optimum()
+    queue = set()
+    paths = {}
+    # root node
+    queue.add(0)
+    paths[0] = []
+    processed_nodes = set()
+    red_paths = []
+    while len(queue) > 0:
+        v = queue.pop()
+        for node in spanning_tree.neighbors(v):
+            if node not in paths and node not in processed_nodes:
+                paths[node] = paths[v].copy()
+                # we can use directly 0 here, since we are working on a spanning tree and there should be no more edges to a node
+                paths[node].append(woflan_object.get_r_g().get_edge_data(v, node)[0]['transition'])
+                if node not in red_nodes:
+                    queue.add(node)
+                else:
+                    red_paths.append(paths[node])
+        processed_nodes.add(v)
+    return red_paths
+
+
+def compute_unbounded_sequences(woflan_object):
+    """
+    We compute the sequences which lead to an infinite amount of tokens. To do this, we compute a restricted coverability tree.
+    The tree works similar to the graph, despite we consider tree characteristics during the construction.
+    :param woflan_object: Woflan object that contains all needed information.
+    :return: List of unbounded sequences, each sequence is a list of transitions
+    """
+
+    def check_for_markings_larger_than_final_marking(graph, f_m):
+        markings = []
+        for node in graph.nodes:
+            if all(np.greater_equal(graph.nodes[node]['marking'], f_m)):
+                markings.append(node)
+        return markings
+
+    woflan_object.set_restricted_coverability_tree(
+        restricted_coverability_tree(woflan_object.get_net(), woflan_object.get_initial_marking()))
+    f_m = convert_marking(woflan_object.get_net(), woflan_object.get_final_marking())
+    infinite_markings = []
+    for node in woflan_object.get_restricted_coverability_tree().nodes:
+        if np.inf in woflan_object.get_restricted_coverability_tree().nodes[node]['marking']:
+            infinite_markings.append(node)
+    larger_markings = check_for_markings_larger_than_final_marking(woflan_object.get_restricted_coverability_tree(),
+                                                                   f_m)
+    green_markings = []
+    for node in woflan_object.get_restricted_coverability_tree().nodes:
+        add_to_green = True
+        for marking in infinite_markings:
+            if nx.has_path(woflan_object.get_restricted_coverability_tree(), node, marking):
+                add_to_green = False
+        for marking in larger_markings:
+            if nx.has_path(woflan_object.get_restricted_coverability_tree(), node, marking):
+                add_to_green = False
+        if add_to_green:
+            green_markings.append(node)
+    red_markings = []
+    for node in woflan_object.get_restricted_coverability_tree().nodes:
+        add_to_red = True
+        for node_green in green_markings:
+            if nx.has_path(woflan_object.get_restricted_coverability_tree(), node, node_green):
+                add_to_red = False
+                break
+        if add_to_red:
+            red_markings.append(node)
+    # Make the path as short as possible. If we reach a red state, we stop and do not go further in the "red zone".
+    queue = set()
+    queue.add(0)
+    paths = {}
+    paths[0] = []
+    paths_to_red = []
+    while len(queue) > 0:
+        v = queue.pop()
+        successors = woflan_object.get_restricted_coverability_tree().successors(v)
+        for suc in successors:
+            paths[suc] = paths[v].copy()
+            paths[suc].append(woflan_object.get_restricted_coverability_tree().get_edge_data(v, suc)['transition'])
+            if suc in red_markings:
+                paths_to_red.append(paths[suc])
+            else:
+                queue.add(suc)
+    return paths_to_red
diff --git a/src/evaluation/soundness/woflan/graphs/__init__.py b/src/evaluation/soundness/woflan/graphs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4855257f6c1a0ef60304c907320a51a49e9c27cd
--- /dev/null
+++ b/src/evaluation/soundness/woflan/graphs/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.soundness.woflan.graphs import utility, minimal_coverability_graph, reachability_graph, restricted_coverability_graph
\ No newline at end of file
diff --git a/src/evaluation/soundness/woflan/graphs/__pycache__/__init__.cpython-310.pyc b/src/evaluation/soundness/woflan/graphs/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74a70e1f9f35c8fe049f475dc789ce98a56de058
Binary files /dev/null and b/src/evaluation/soundness/woflan/graphs/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/graphs/__pycache__/utility.cpython-310.pyc b/src/evaluation/soundness/woflan/graphs/__pycache__/utility.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b1530b9a7d2962d50aef93f7f617d52411e6e553
Binary files /dev/null and b/src/evaluation/soundness/woflan/graphs/__pycache__/utility.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/graphs/minimal_coverability_graph/__init__.py b/src/evaluation/soundness/woflan/graphs/minimal_coverability_graph/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7dc4771d053309373da5bbbbce1409bae5a6f31
--- /dev/null
+++ b/src/evaluation/soundness/woflan/graphs/minimal_coverability_graph/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.soundness.woflan.graphs.minimal_coverability_graph import minimal_coverability_graph
diff --git a/src/evaluation/soundness/woflan/graphs/minimal_coverability_graph/__pycache__/__init__.cpython-310.pyc b/src/evaluation/soundness/woflan/graphs/minimal_coverability_graph/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..610b0d1035c2be275c910f7099fa5bae47ffc542
Binary files /dev/null and b/src/evaluation/soundness/woflan/graphs/minimal_coverability_graph/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/graphs/minimal_coverability_graph/__pycache__/minimal_coverability_graph.cpython-310.pyc b/src/evaluation/soundness/woflan/graphs/minimal_coverability_graph/__pycache__/minimal_coverability_graph.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cadd5961672256893ba7c48e3ba32a774f20bf32
Binary files /dev/null and b/src/evaluation/soundness/woflan/graphs/minimal_coverability_graph/__pycache__/minimal_coverability_graph.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/graphs/minimal_coverability_graph/minimal_coverability_graph.py b/src/evaluation/soundness/woflan/graphs/minimal_coverability_graph/minimal_coverability_graph.py
new file mode 100644
index 0000000000000000000000000000000000000000..e149db5e79ef1cdeaab9b8650fc297e9b689aeec
--- /dev/null
+++ b/src/evaluation/soundness/woflan/graphs/minimal_coverability_graph/minimal_coverability_graph.py
@@ -0,0 +1,186 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+"""
+This module is based on:
+The minimal coverability graph for Petri nets
+from Alain Finkel
+"""
+import numpy as np
+import networkx as nx
+from evaluation.soundness.woflan.graphs import utility as helper
+from copy import copy
+
+
+def minimal_coverability_tree(net, initial_marking, original_net=None):
+    """
+    This method computes the minimal coverability tree. It is part of a method to obtain a minial coverability graph
+    :param net: Petri Net
+    :param initial_marking: Initial Marking of the Petri Net
+    :param original_net: Petri Net without short-circuited transition
+    :return: Minimal coverability tree
+    """
+
+    def check_if_marking_already_in_processed_nodes(n, processed_nodes):
+        for node in processed_nodes:
+            if np.array_equal(G.nodes[node]['marking'], G.nodes[n]['marking']):
+                return True
+        return False
+
+    def is_m_smaller_than_other(m, processed_nodes):
+        for node in processed_nodes:
+            if all(np.less_equal(m, G.nodes[node]['marking'])):
+                return True
+        return False
+
+    def is_m_greater_than_other(m, processed_nodes):
+        for node in processed_nodes:
+            if all(np.greater_equal(m, G.nodes[node]['marking'])):
+                return True
+        return False
+
+    def get_first_smaller_marking_on_path(n, m2):
+        path = nx.shortest_path(G, source=0, target=n)
+        for node in path:
+            if all(np.less_equal(G.nodes[node]['marking'], m2)):
+                return node
+        return None
+
+    def remove_subtree(tree, n):
+        bfs_tree = nx.bfs_tree(tree, n)
+        for edge in bfs_tree.edges:
+            tree.remove_edge(edge[0], edge[1])
+        for node in bfs_tree.nodes:
+            if node != n:
+                tree.remove_node(node)
+        return tree
+
+    G = nx.MultiDiGraph()
+
+    incidence_matrix = helper.compute_incidence_matrix(net)
+    firing_dict = helper.split_incidence_matrix(incidence_matrix, net)
+    req_dict = helper.compute_firing_requirement(net)
+
+    initial_mark = helper.convert_marking(net, initial_marking, original_net)
+    j = 0
+    unprocessed_nodes = set()
+    G.add_node(j, marking=initial_mark)
+    unprocessed_nodes.add(j)
+    j += 1
+
+    processed_nodes = set()
+
+    while len(unprocessed_nodes) > 0:
+        n = unprocessed_nodes.pop()
+        if check_if_marking_already_in_processed_nodes(n, processed_nodes):
+            processed_nodes.add(n)
+        elif is_m_smaller_than_other(G.nodes[n]['marking'], processed_nodes):
+            G.remove_edge(next(G.predecessors(n)), n)
+            G.remove_node(n)
+        elif is_m_greater_than_other(G.nodes[n]['marking'], processed_nodes):
+            m2 = G.nodes[n]['marking'].copy()
+            ancestor_bool = False
+            for ancestor in nx.ancestors(G, n):
+                if is_m_greater_than_other(G.nodes[n]['marking'], [ancestor]):
+                    i = 0
+                    while i < len(G.nodes[n]['marking']):
+                        if G.nodes[ancestor]['marking'][i] < G.nodes[n]['marking'][i]:
+                            m2[i] = np.inf
+                        i += 1
+            n1 = None
+            for ancestor in nx.ancestors(G, n):
+                if all(np.less_equal(G.nodes[ancestor]['marking'], m2)):
+                    n1 = get_first_smaller_marking_on_path(n, m2)
+                    break
+            if n1 != None:
+                ancestor_bool = True
+                G.nodes[n1]['marking'] = m2.copy()
+                subtree = nx.bfs_tree(G, n1)
+                for node in subtree:
+                    if node in processed_nodes:
+                        processed_nodes.remove(node)
+                    if node in unprocessed_nodes:
+                        unprocessed_nodes.remove(node)
+                G = remove_subtree(G, n1)
+                unprocessed_nodes.add(n1)
+            processed_nodes_copy = copy(processed_nodes)
+            for node in processed_nodes_copy:
+                if node in G.nodes:
+                    if all(np.less_equal(G.nodes[node]['marking'], m2)):
+                        subtree = nx.bfs_tree(G, node)
+                        for node in subtree:
+                            if node in processed_nodes:
+                                processed_nodes.remove(node)
+                            if node in unprocessed_nodes:
+                                unprocessed_nodes.remove(node)
+                        remove_subtree(G, node)
+                        G.remove_node(node)
+            if not ancestor_bool:
+                unprocessed_nodes.add(n)
+        else:
+            for el in helper.enabled_markings(firing_dict, req_dict, G.nodes[n]['marking']):
+                G.add_node(j, marking=el[0])
+                G.add_edge(n, j, transition=el[1])
+                unprocessed_nodes.add(j)
+                j += 1
+            processed_nodes.add(n)
+    return (G, firing_dict, req_dict)
+
+
+def apply(net, initial_marking, original_net=None):
+    """
+    Apply method from the "outside".
+    :param net: Petri Net object
+    :param initial_marking: Initial marking of the Petri Net object
+    :param original_net: Petri Net object without short-circuited transition. For better usability, initial set to None
+    :return: MultiDiGraph networkx object
+    """
+
+    def detect_same_labelled_nodes(G):
+        same_labels = {}
+        for node in G.nodes:
+            if np.array2string(G.nodes[node]['marking']) not in same_labels:
+                same_labels[np.array2string(G.nodes[node]['marking'])] = [node]
+            else:
+                same_labels[np.array2string(G.nodes[node]['marking'])].append(node)
+        return same_labels
+
+    def merge_nodes_of_same_label(G, same_labels):
+        for marking in same_labels:
+            if len(same_labels[marking]) > 1:
+                origin = same_labels[marking][0]
+                i = 1
+                while i < len(same_labels[marking]):
+                    G = nx.contracted_nodes(G, origin, same_labels[marking][i])
+                    i += 1
+        return G
+
+    mct, firing_dict, req_dict = minimal_coverability_tree(net, initial_marking, original_net)
+    mcg = merge_nodes_of_same_label(mct, detect_same_labelled_nodes(mct))
+
+    to_remove_edges = []
+    for edge in mcg.edges:
+        reachable_markings = helper.enabled_markings(firing_dict, req_dict, mcg.nodes[edge[0]]['marking'])
+        not_reachable = True
+        for el in reachable_markings:
+            if np.array_equal(el[0], mcg.nodes[edge[1]]['marking']):
+                not_reachable = False
+                break
+        if not_reachable:
+            to_remove_edges.append(edge)
+    for edge in to_remove_edges:
+        mcg.remove_edge(edge[0], edge[1])
+    return mcg
diff --git a/src/evaluation/soundness/woflan/graphs/reachability_graph/__init__.py b/src/evaluation/soundness/woflan/graphs/reachability_graph/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb23bccf3b62a59b7c4470409b8d453f74a34e26
--- /dev/null
+++ b/src/evaluation/soundness/woflan/graphs/reachability_graph/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.soundness.woflan.graphs.reachability_graph import reachability_graph
diff --git a/src/evaluation/soundness/woflan/graphs/reachability_graph/__pycache__/__init__.cpython-310.pyc b/src/evaluation/soundness/woflan/graphs/reachability_graph/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..356ec3c9aaa77f275ac426ee0e6f1038c5c79b5c
Binary files /dev/null and b/src/evaluation/soundness/woflan/graphs/reachability_graph/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/graphs/reachability_graph/__pycache__/reachability_graph.cpython-310.pyc b/src/evaluation/soundness/woflan/graphs/reachability_graph/__pycache__/reachability_graph.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..029dc4aa5044bd9ea3d7c1b19c816dd79f30cc94
Binary files /dev/null and b/src/evaluation/soundness/woflan/graphs/reachability_graph/__pycache__/reachability_graph.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/graphs/reachability_graph/reachability_graph.py b/src/evaluation/soundness/woflan/graphs/reachability_graph/reachability_graph.py
new file mode 100644
index 0000000000000000000000000000000000000000..6700e6bf188656849a243880d546f7ecc932f9ee
--- /dev/null
+++ b/src/evaluation/soundness/woflan/graphs/reachability_graph/reachability_graph.py
@@ -0,0 +1,56 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+import networkx as nx
+import numpy as np
+from evaluation.soundness.woflan.graphs import utility as helper
+
+
+def apply(net, initial_marking, original_net=None):
+    """
+    Method that computes a reachability graph as networkx object
+    :param net: Petri Net
+    :param initial_marking: Initial Marking of the Petri Net
+    :param original_net: Petri Net without short-circuited transition
+    :return: Networkx Graph that represents the reachability graph of the Petri Net
+    """
+    initial_marking = helper.convert_marking(net, initial_marking, original_net)
+    firing_dict = helper.split_incidence_matrix(helper.compute_incidence_matrix(net), net)
+    req_dict = helper.compute_firing_requirement(net)
+    look_up_indices = {}
+    j = 0
+    reachability_graph = nx.MultiDiGraph()
+    reachability_graph.add_node(j, marking=initial_marking)
+
+    working_set = set()
+    working_set.add(j)
+
+    look_up_indices[np.array2string(initial_marking)] = j
+
+    j += 1
+    while len(working_set) > 0:
+        m = working_set.pop()
+        possible_markings = helper.enabled_markings(firing_dict, req_dict, reachability_graph.nodes[m]['marking'])
+        for marking in possible_markings:
+            if np.array2string(marking[0]) not in look_up_indices:
+                look_up_indices[np.array2string(marking[0])] = j
+                reachability_graph.add_node(j, marking=marking[0])
+                working_set.add(j)
+                reachability_graph.add_edge(m, j, transition=marking[1])
+                j += 1
+            else:
+                reachability_graph.add_edge(m, look_up_indices[np.array2string(marking[0])], transition=marking[1])
+    return reachability_graph
diff --git a/src/evaluation/soundness/woflan/graphs/restricted_coverability_graph/__init__.py b/src/evaluation/soundness/woflan/graphs/restricted_coverability_graph/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b54423ac9310af7004ab3b2ba60ab591c1e4c44
--- /dev/null
+++ b/src/evaluation/soundness/woflan/graphs/restricted_coverability_graph/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.soundness.woflan.graphs.restricted_coverability_graph import restricted_coverability_graph
diff --git a/src/evaluation/soundness/woflan/graphs/restricted_coverability_graph/__pycache__/__init__.cpython-310.pyc b/src/evaluation/soundness/woflan/graphs/restricted_coverability_graph/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e0540b59c9cbccd0fdf1a420b4c396ce2521a4e7
Binary files /dev/null and b/src/evaluation/soundness/woflan/graphs/restricted_coverability_graph/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/graphs/restricted_coverability_graph/__pycache__/restricted_coverability_graph.cpython-310.pyc b/src/evaluation/soundness/woflan/graphs/restricted_coverability_graph/__pycache__/restricted_coverability_graph.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..357971bbf8304707ad42eafbf85db13656fa6522
Binary files /dev/null and b/src/evaluation/soundness/woflan/graphs/restricted_coverability_graph/__pycache__/restricted_coverability_graph.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/graphs/restricted_coverability_graph/restricted_coverability_graph.py b/src/evaluation/soundness/woflan/graphs/restricted_coverability_graph/restricted_coverability_graph.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0019e11e51547acde047a55443f984d6ce67d47
--- /dev/null
+++ b/src/evaluation/soundness/woflan/graphs/restricted_coverability_graph/restricted_coverability_graph.py
@@ -0,0 +1,89 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+import numpy as np
+import networkx as nx
+from evaluation.soundness.woflan.graphs import utility as helper
+
+
+def construct_tree(net, initial_marking):
+    """
+    Construct a restricted coverability marking.
+    For more information, see the thesis "Verification of WF-nets", 4.3.
+    :param net:
+    :param initial_marking:
+    :return:
+    """
+    initial_marking = helper.convert_marking(net, initial_marking)
+    firing_dict = helper.split_incidence_matrix(helper.compute_incidence_matrix(net), net)
+    req_dict = helper.compute_firing_requirement(net)
+    look_up_indices = {}
+    j = 0
+    coverability_graph = nx.DiGraph()
+    coverability_graph.add_node(j, marking=initial_marking)
+    look_up_indices[np.array2string(initial_marking)] = j
+
+    j += 1
+    new_arc = True
+    while new_arc:
+        new_arc = False
+        nodes = list(coverability_graph.nodes).copy()
+        while len(nodes) > 0:
+            m = nodes.pop()
+            if not np.inf in coverability_graph.nodes[m]['marking']:
+                possible_markings = helper.enabled_markings(firing_dict, req_dict,
+                                                            coverability_graph.nodes[m]['marking'])
+                m2 = None
+                if len(possible_markings) > 0:
+                    for marking in possible_markings:
+                        # check for m1 + since we want to construct a tree, we do not want that a marking is already in a graph since it is going to have an arc
+                        if np.array2string(marking[0]) not in look_up_indices:
+                            if check_if_transition_unique(m, coverability_graph, marking[1]):
+                                m2 = marking
+                                new_arc = True
+                                break
+                if new_arc:
+                    break
+        if new_arc:
+            m3 = np.zeros(len(list(net.places)))
+            for place in list(net.places):
+                if check_for_smaller_marking(m2, coverability_graph, list(net.places).index(place), m, look_up_indices):
+                    m3[list(net.places).index(place)] = np.inf
+                else:
+                    m3[list(net.places).index(place)] = m2[0][list(net.places).index(place)]
+            coverability_graph.add_node(j, marking=m3)
+            coverability_graph.add_edge(m, j, transition=m2[1])
+            look_up_indices[np.array2string(m3)] = j
+            j += 1
+    return coverability_graph
+
+
+def check_if_transition_unique(marking, graph, transition):
+    for edge in graph.out_edges(marking):
+        if graph[edge[0]][edge[1]]['transition'] == transition:
+            return False
+    return True
+
+
+def check_for_smaller_marking(marking, coverability_graph, index, current_node, look_up_indices):
+    for node in coverability_graph.nodes:
+        if all(np.less_equal(coverability_graph.nodes[node]['marking'], marking[0])):
+            if coverability_graph.nodes[node]['marking'][index] < marking[0][index]:
+                if nx.has_path(coverability_graph,
+                               look_up_indices[np.array2string(coverability_graph.nodes[node]['marking'])],
+                               current_node):
+                    return True
+    return False
diff --git a/src/evaluation/soundness/woflan/graphs/utility.py b/src/evaluation/soundness/woflan/graphs/utility.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef646a425527299b9302dd4e26347d7ce8ad8a73
--- /dev/null
+++ b/src/evaluation/soundness/woflan/graphs/utility.py
@@ -0,0 +1,139 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+import numpy as np
+import networkx as nx
+
+def compute_incidence_matrix(net):
+    """
+    Given a Petri Net, the incidence matrix is computed. An incidence matrix has n rows (places) and m columns
+    (transitions).
+    :param net: Petri Net object
+    :return: Incidence matrix
+    """
+    n = len(net.transitions)
+    m = len(net.places)
+    C = np.zeros((m, n))
+    i = 0
+    transition_list = list(net.transitions)
+    place_list = list(net.places)
+    while i < n:
+        t = transition_list[i]
+        for in_arc in t.in_arcs:
+            # arcs that go to transition
+            C[place_list.index(in_arc.source), i] -= (1*in_arc.weight)
+        for out_arc in t.out_arcs:
+            # arcs that lead away from transition
+            C[place_list.index(out_arc.target), i] += (1*out_arc.weight)
+        i += 1
+    return C
+
+
+def split_incidence_matrix(matrix, net):
+    """
+    We split the incidence matrix columnwise to get the firing information for each transition
+    :param matrix: incidence matrix
+    :param net: Petri Net
+    :return: Dictionary, whereby the key is an np array that contains the firing information and the value is the name
+    of the transition
+    """
+    transition_dict = {}
+    i = 0
+    while i < len(net.transitions):
+        transition_dict[list(net.transitions)[i]] = np.hsplit(np.transpose(matrix), 1)[0][i]
+        i += 1
+    return transition_dict
+
+def compute_firing_requirement(net):
+    place_list=list(net.places)
+    transition_dict={}
+    for transition in net.transitions:
+        temp_array=np.zeros(len(place_list))
+        for arc in transition.in_arcs:
+            temp_array[place_list.index(arc.source)] -=1*arc.weight
+        transition_dict[transition]=temp_array
+    return transition_dict
+
+def enabled_markings(firing_dict, req_dict,marking):
+    enabled_transitions = []
+    for transition, requirment in req_dict.items():
+        if all(np.greater_equal(marking, requirment.copy()*-1)):
+            enabled_transitions.append(transition)
+    new_markings = []
+    for transition in enabled_transitions:
+        new_marking = marking + firing_dict[transition]
+        new_markings.append((new_marking, transition))
+    return new_markings
+
+def convert_marking(net, marking, original_net=None):
+    """
+    Takes an marking as input and converts it into an Numpy Array
+    :param net: PM4Py Petri Net object
+    :param marking: Marking that should be converted
+    :param original_net: PM4Py Petri Net object without short-circuited transition
+    :return: Numpy array representation
+    """
+    marking_list=list(el.name for el in marking.keys())
+    place_list = list(el.name for el in net.places)
+    mark = np.zeros(len(place_list))
+    for index, value in enumerate(mark):
+        if place_list[index] in marking_list:
+            #TODO: Is setting the value to 1 ok in this case?
+            mark[index]=1
+    return mark
+
+def check_for_dead_tasks(net, graph):
+    """
+    We compute a list of dead tasks. A dead task is a task which does not appear in the Minimal Coverability Graph
+    :param net: Petri Net representation of PM4Py
+    :param graph: Minimal coverability graph. NetworkX MultiDiGraph object.
+    :return: list of dead tasks
+    """
+    tasks=[]
+    for transition in list(net.transitions):
+        if transition.label != None:
+            tasks.append(transition)
+    for node,targets in graph.edges()._adjdict.items():
+        for target_node,activties in targets.items():
+            for option,activity in activties.items():
+                if activity['transition'] in tasks:
+                    tasks.remove(activity['transition'])
+    return tasks
+
+def check_for_improper_conditions(mcg):
+    """
+    An improper condition is a state in the minimum-coverability graph with an possible infinite amount of tokens
+    :param mcg: networkx object (minimal coverability graph)
+    :return: True, if there are no improper conditions; false otherwise
+    """
+    improper_states=[]
+    for node in mcg.nodes:
+        if np.inf in mcg.nodes[node]['marking']:
+            improper_states.append(node)
+    return improper_states
+
+def check_for_substates(mcg):
+    """
+    Checks if a substate exists in a given mcg
+    :param mcg: Minimal coverability graph (networkx object)
+    :return: True, if there exist no substate; False otherwise
+    """
+    for node in mcg.nodes:
+        reachable_states = nx.descendants(mcg, node)
+        for state in reachable_states:
+            if all(np.less(mcg.nodes[node]['marking'],mcg.nodes[state]['marking'])):
+                return False
+    return True
diff --git a/src/evaluation/soundness/woflan/not_well_handled_pairs/__init__.py b/src/evaluation/soundness/woflan/not_well_handled_pairs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..05f5f47bdeeeb28f974fe73104632dd673302075
--- /dev/null
+++ b/src/evaluation/soundness/woflan/not_well_handled_pairs/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.soundness.woflan.not_well_handled_pairs import not_well_handled_pairs
diff --git a/src/evaluation/soundness/woflan/not_well_handled_pairs/__pycache__/__init__.cpython-310.pyc b/src/evaluation/soundness/woflan/not_well_handled_pairs/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c4473ff44e33d890a9527d36972ce51162493e36
Binary files /dev/null and b/src/evaluation/soundness/woflan/not_well_handled_pairs/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/not_well_handled_pairs/__pycache__/not_well_handled_pairs.cpython-310.pyc b/src/evaluation/soundness/woflan/not_well_handled_pairs/__pycache__/not_well_handled_pairs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..622581dfc87812eca74c98dd856d4429946ff1cb
Binary files /dev/null and b/src/evaluation/soundness/woflan/not_well_handled_pairs/__pycache__/not_well_handled_pairs.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/not_well_handled_pairs/not_well_handled_pairs.py b/src/evaluation/soundness/woflan/not_well_handled_pairs/not_well_handled_pairs.py
new file mode 100644
index 0000000000000000000000000000000000000000..e025631501c50c5229ac308a1b72722c8e6ff34b
--- /dev/null
+++ b/src/evaluation/soundness/woflan/not_well_handled_pairs/not_well_handled_pairs.py
@@ -0,0 +1,64 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+import networkx as nx
+
+def create_network_graph(net):
+    """
+    Transform a given Petri Net in a network graph. Each place and transition is node and gets duplicated.
+    The even numbers handle the inputs of a node, the odds the output.
+    :param net: PM4Py Petri Net representation
+    :return: networkx.DiGraph(), bookkeeping dictionary
+    """
+    graph = nx.DiGraph()
+    places = list(net.places)
+    transitions = list(net.transitions)
+    nodes=set(places) | set(transitions)
+    bookkeeping={}
+    for index,el in enumerate(nodes):
+        bookkeeping[el]=index*2
+    for node in nodes:
+        graph.add_node(bookkeeping[node])
+        graph.add_node(bookkeeping[node]+1)
+        graph.add_edge(bookkeeping[node], bookkeeping[node]+1, capacity=1)
+    #add edges for outgoing arcs in former Petri Net
+    for element in nodes:
+        for arc in element.out_arcs:
+            graph.add_edge(bookkeeping[element]+1, bookkeeping[arc.target], capacity=1)
+    #add edges for ingoing arcs in former Petri Net
+    for element in nodes:
+        for arc in element.in_arcs:
+            graph.add_edge(bookkeeping[arc.source]+1, bookkeeping[element], capacity=1)
+    return graph,bookkeeping
+
+def apply(net):
+    """
+    Using the max-flow min-cut theorem, we compute a list of nett well handled TP and PT pairs
+    (T=transition, P=place)
+    :param net: Petri Net
+    :return: List
+    """
+    graph,booking=create_network_graph(net)
+    pairs=[]
+    for place in net.places:
+        for transition in net.transitions:
+            p=booking[place]
+            t=booking[transition]
+            if nx.maximum_flow_value(graph, p+1, t)>1:
+                pairs.append((p+1,t))
+            if nx.maximum_flow_value(graph, t+1, p)>1:
+                pairs.append((t+1,p))
+    return pairs
diff --git a/src/evaluation/soundness/woflan/place_invariants/__init__.py b/src/evaluation/soundness/woflan/place_invariants/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c856d15cc3a37448e2c0744d01d18f769486815
--- /dev/null
+++ b/src/evaluation/soundness/woflan/place_invariants/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.soundness.woflan.place_invariants import place_invariants, s_component, uniform_invariant, utility
diff --git a/src/evaluation/soundness/woflan/place_invariants/__pycache__/__init__.cpython-310.pyc b/src/evaluation/soundness/woflan/place_invariants/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..842536a4fe3905874110b6b9b4ad256f5c2bf694
Binary files /dev/null and b/src/evaluation/soundness/woflan/place_invariants/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/place_invariants/__pycache__/place_invariants.cpython-310.pyc b/src/evaluation/soundness/woflan/place_invariants/__pycache__/place_invariants.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b3c4ef8ebc11e41f3bf6cfb1a7a8fb015dea5484
Binary files /dev/null and b/src/evaluation/soundness/woflan/place_invariants/__pycache__/place_invariants.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/place_invariants/__pycache__/s_component.cpython-310.pyc b/src/evaluation/soundness/woflan/place_invariants/__pycache__/s_component.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2b1b3d43cb875ab94d4cf042390f6da3addce3a7
Binary files /dev/null and b/src/evaluation/soundness/woflan/place_invariants/__pycache__/s_component.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/place_invariants/__pycache__/uniform_invariant.cpython-310.pyc b/src/evaluation/soundness/woflan/place_invariants/__pycache__/uniform_invariant.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e4eece20dcf20ef62a30b39f141e42c92a12f5b
Binary files /dev/null and b/src/evaluation/soundness/woflan/place_invariants/__pycache__/uniform_invariant.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/place_invariants/__pycache__/utility.cpython-310.pyc b/src/evaluation/soundness/woflan/place_invariants/__pycache__/utility.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c67d4152c917af1c7b3f51e3af9046d5769ed240
Binary files /dev/null and b/src/evaluation/soundness/woflan/place_invariants/__pycache__/utility.cpython-310.pyc differ
diff --git a/src/evaluation/soundness/woflan/place_invariants/place_invariants.py b/src/evaluation/soundness/woflan/place_invariants/place_invariants.py
new file mode 100644
index 0000000000000000000000000000000000000000..4dcd43c240a52a36db153d4ee74ec12c366a464a
--- /dev/null
+++ b/src/evaluation/soundness/woflan/place_invariants/place_invariants.py
@@ -0,0 +1,66 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+import numpy as np
+import sympy
+
+def compute_place_invariants(net):
+    """
+    We compute the NUllspace of the incidence matrix and obtain the place-invariants.
+    :param net: Petri Net of which we want to know the place invariants.
+    :return: Set of place invariants of the given Petri Net.
+    """
+
+    def compute_incidence_matrix(net):
+        """
+        Given a Petri Net, the incidence matrix is computed. An incidence matrix has n rows (places) and m columns
+        (transitions).
+        :param net: Petri Net object
+        :return: Incidence matrix
+        """
+        n = len(net.transitions)
+        m = len(net.places)
+        C = np.zeros((m, n))
+        i = 0
+        transition_list = list(net.transitions)
+        place_list = list(net.places)
+        while i < n:
+            t = transition_list[i]
+            for in_arc in t.in_arcs:
+                # arcs that go to transition
+                C[place_list.index(in_arc.source), i] -= 1
+            for out_arc in t.out_arcs:
+                # arcs that lead away from transition
+                C[place_list.index(out_arc.target), i] += 1
+            i += 1
+        return C
+
+    def extract_basis_vectors(incidence_matrix):
+        """
+        The name of the method describes what we want t achieve. We calculate the nullspace of the transposed identity matrix.
+        :param incidence_matrix: Numpy Array
+        :return: a collection of numpy arrays that form a base of transposed A
+        """
+        # To have the same dimension as described as in https://www7.in.tum.de/~esparza/fcbook-middle.pdf and to get the correct nullspace, we have to transpose
+        A = np.transpose(incidence_matrix)
+        # exp from book https://www7.in.tum.de/~esparza/fcbook-middle.pdf
+        x = sympy.Matrix(A).nullspace()
+        # TODO: Question here: Will x be always rational? Depends on sympy implementation. Normaly, yes, we we will have rational results
+        x = np.array(x).astype(np.float64)
+        return x
+
+    A = compute_incidence_matrix(net)
+    return extract_basis_vectors(A)
diff --git a/src/evaluation/soundness/woflan/place_invariants/s_component.py b/src/evaluation/soundness/woflan/place_invariants/s_component.py
new file mode 100644
index 0000000000000000000000000000000000000000..73b7f1a83ca206e5f96ad9250d327dac3af4774c
--- /dev/null
+++ b/src/evaluation/soundness/woflan/place_invariants/s_component.py
@@ -0,0 +1,90 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.soundness.woflan.place_invariants.uniform_invariant import apply as compute_uniform_invariants
+
+def apply(net):
+    """
+    General method to obtain a list of S-components
+    :param net: Petri Net for which S-components should be computed
+    :return: A list of S-components
+    """
+    uniform_invariants=compute_uniform_invariants(net)
+    return compute_s_components(net, uniform_invariants)
+
+
+def compute_s_components(net, p_invariants):
+    """
+    We perform the hint in 5.4.4 of https://pure.tue.nl/ws/portalfiles/portal/1596223/9715985.pdf
+    :param p_invariants: Semi-positive basis we calculate previously
+    :return: A list of S-Components. A s-component consists of a set which includes all related transitions a places
+    """
+
+    def compare_lists(list1, list2):
+        """
+        :param list1: a list
+        :param list2: a list
+        :return: a number how often a item from list1 appears in list2
+        """
+        counter = 0
+        for el in list1:
+            if el in list2:
+                counter += 1
+        return counter
+
+    s_components = []
+    place_list = list(net.places)
+    for invariant in p_invariants:
+        i = 0
+        s_component = []
+        for el in invariant:
+            if el > 0:
+                place = place_list[i]
+                s_component.append(place)
+                for in_arc in place.in_arcs:
+                    s_component.append(in_arc.source)
+                for out_arc in place.out_arcs:
+                    s_component.append(out_arc.target)
+            i += 1
+        if len(s_component) != 0:
+            is_s_component = True
+            for el in s_component:
+                if el in net.transitions:
+                    places_before = [arc.source for arc in el.in_arcs]
+                    if compare_lists(s_component, places_before) != 1:
+                        is_s_component = False
+                        break
+                    places_after = [arc.target for arc in el.out_arcs]
+                    if compare_lists(s_component, places_after) != 1:
+                        is_s_component = False
+                        break
+            if is_s_component:
+                s_components.append(set(s_component))
+    return s_components
+
+def compute_uncovered_places_in_component(s_components, net):
+    """
+    We check for uncovered places
+    :param s_components: List of s_components
+    :param net: Petri Net representation of PM4Py
+    :return: List of uncovered places
+    """
+    place_list=list(net.places)
+    for component in s_components:
+        for el in component:
+            if el in place_list:
+                place_list.remove(el)
+    return place_list
diff --git a/src/evaluation/soundness/woflan/place_invariants/uniform_invariant.py b/src/evaluation/soundness/woflan/place_invariants/uniform_invariant.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6c85008511bfcf3b8301e807325a8df7a327015
--- /dev/null
+++ b/src/evaluation/soundness/woflan/place_invariants/uniform_invariant.py
@@ -0,0 +1,24 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.soundness.woflan.place_invariants.place_invariants import compute_place_invariants
+from evaluation.soundness.woflan.place_invariants.utility import transform_basis
+
+def apply(net):
+    place_invariants= compute_place_invariants(net)
+    modified_invariants=transform_basis(place_invariants, style='uniform')
+    return modified_invariants
+
diff --git a/src/evaluation/soundness/woflan/place_invariants/utility.py b/src/evaluation/soundness/woflan/place_invariants/utility.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf2ec9b393e2e0d4bde1e5ec71c80c0e121f2a8a
--- /dev/null
+++ b/src/evaluation/soundness/woflan/place_invariants/utility.py
@@ -0,0 +1,118 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+import numpy as np
+
+
+def removearray(L, arr):
+    """
+    Remove an array from a given list and return the list with the removed element.
+    :param L: list object
+    :param arr: array that has to be removed
+    :return: list object without array
+    """
+    ind = 0
+    size = len(L)
+    while ind != size and not np.array_equal(L[ind], arr):
+        ind += 1
+    if ind != size:
+        L.pop(ind)
+    else:
+        raise ValueError('array not found in list.')
+
+def transform_basis(basis, style=None):
+    """
+    We construct a (I)LP to transform our basis into a set of vectors by using linear combination to fit certain styles/
+    properties
+    :param basis: list of p-invariants. Commonly computed by the method 'compute_place_invariants' in
+    place_invariants.py
+    :param style: String that is used to construct certain constraints
+    At the moment, 'uniform' (all weights have value 0 or 1), and 'weighted' (all weights are >=0) are supported
+    :return: List of p-invariants that fits the style
+    """
+    import pulp
+
+    if style==None:
+        style='weighted'
+
+    # First, we want to check if a vector of a basis only contains non-positve entries. If so, then we multiply the
+    # vector -1.
+    modified_base = []
+    for vector in basis:
+        all_non_positiv = True
+        for entry in vector:
+            if entry > 0:
+                all_non_positiv = False
+        if all_non_positiv:
+            modified_base.append(-1 * vector)
+        else:
+            modified_base.append(vector)
+    #For uniform variants, it is necessary that the weight for a place is either 0 or 1. We collect the variants for
+    #which this condition does not hold. We also collect the variants for the weighted invariants the entry is <0.
+    to_modify = []
+    for vector in modified_base:
+        for entry in vector:
+            if ((entry < 0 or entry > 1) and style=='uniform') or ( entry < 0 and style=='weighted'):
+                to_modify.append(vector)
+                break
+    # if we have nothing to modify, we are done
+    if len(to_modify) > 0:
+        for vector in to_modify:
+            removearray(modified_base, vector)
+            set_B = range(0, len(modified_base))
+            prob = pulp.LpProblem("linear_combination", pulp.LpMinimize)
+            X = pulp.LpVariable.dicts("x", set_B, cat='Integer')
+            y = pulp.LpVariable("y", cat='Integer', lowBound=1)
+            # add objective
+            prob += pulp.lpSum(X[i] for i in set_B)
+            if style=='uniform':
+                # variables for uniform. Therefore, the resulting weight can either be 0 or 1
+                z = pulp.LpVariable.dicts("z", range(0, len(vector)), lowBound=0, upBound=1, cat='Integer')
+                # add constraints
+                for i in range(len(vector)):
+                    prob += pulp.lpSum(X[j]*modified_base[j][i] for j in range(len(modified_base)))+y*vector[i]== z[i]
+            elif style=='weighted':
+                for i in range(len(vector)):
+                    prob += pulp.lpSum(X[j]*modified_base[j][i] for j in range(len(modified_base)))+y*vector[i] >= 0
+            prob.solve()
+            new_vector = np.zeros(len(vector))
+            if style=='weighted':
+                for i in range(len(new_vector)):
+                    new_vector[i] = y.varValue * vector[i]
+                    for j in range(len(modified_base)):
+                        new_vector[i] = new_vector[i] + modified_base[j][i] * X[j].varValue
+            elif style=='uniform':
+                for i in range(len(new_vector)):
+                    new_vector[i] = z[i].varValue
+            modified_base.append(new_vector)
+    return modified_base
+
+def compute_uncovered_places(invariants, net):
+    """
+    Compute a list of uncovered places for invariants of a given Petri Net. Note that there exists a separate algorithm
+    for s-components
+    :param invariants: list of invariants. Each invariants is a numpy-Array representation
+    :param net: Petri Net object of PM4Py
+    :return: List of uncovered place over all invariants
+    """
+    place_list=list(net.places)
+    unncovered_list=place_list.copy()
+    for invariant in invariants:
+        for index, value in enumerate(invariant):
+            if value != 0:
+                if place_list[index] in unncovered_list:
+                    unncovered_list.remove(place_list[index])
+    return unncovered_list
diff --git a/src/evaluation/wf_net/__init__.py b/src/evaluation/wf_net/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8ddfdce3d48745752ebe58dd7c850e0a1d73994
--- /dev/null
+++ b/src/evaluation/wf_net/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.wf_net import evaluator, variants
diff --git a/src/evaluation/wf_net/__pycache__/__init__.cpython-310.pyc b/src/evaluation/wf_net/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e65adaa65b90217a1def9be58b8827b53ddf6c1
Binary files /dev/null and b/src/evaluation/wf_net/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/wf_net/__pycache__/evaluator.cpython-310.pyc b/src/evaluation/wf_net/__pycache__/evaluator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2d3139da6d04c932419207fe0dc2780ce71c0706
Binary files /dev/null and b/src/evaluation/wf_net/__pycache__/evaluator.cpython-310.pyc differ
diff --git a/src/evaluation/wf_net/evaluator.py b/src/evaluation/wf_net/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..c270f087d0336d921467b94e659947907cd86cfb
--- /dev/null
+++ b/src/evaluation/wf_net/evaluator.py
@@ -0,0 +1,52 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+import warnings
+from enum import Enum
+
+import deprecation
+
+from evaluation.wf_net.variants import petri_net
+from pm4py.util import exec_utils
+
+
+class Variants(Enum):
+    PETRI_NET = petri_net
+
+
+@deprecation.deprecated(deprecated_in='2.2.2', removed_in='3.0.0',
+                        details='this wf-net check is moved to the pm4py.algo.analysis package')
+def apply(net, parameters=None, variant=Variants.PETRI_NET):
+    warnings.warn('this wf-net check is moved to the pm4py.algo.analysis package', DeprecationWarning)
+    """
+    Checks if a Petri net is a workflow net
+
+    Parameters
+    ---------------
+    net
+        Petri net
+    parameters
+        Parameters of the algorithm
+    variant
+        Variant of the algorithm, possibe values:
+        - Variants.PETRI_NET
+
+    Returns
+    ---------------
+    boolean
+        Boolean value
+    """
+    return exec_utils.get_variant(variant).apply(net, parameters=parameters)
diff --git a/src/evaluation/wf_net/variants/__init__.py b/src/evaluation/wf_net/variants/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..00ebe893482e3e3a0a8d02eaac7d3163f7c9164e
--- /dev/null
+++ b/src/evaluation/wf_net/variants/__init__.py
@@ -0,0 +1,17 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+from evaluation.wf_net.variants import petri_net
diff --git a/src/evaluation/wf_net/variants/__pycache__/__init__.cpython-310.pyc b/src/evaluation/wf_net/variants/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c60fae0298498dca73b0eee057fbcb1a322423b8
Binary files /dev/null and b/src/evaluation/wf_net/variants/__pycache__/__init__.cpython-310.pyc differ
diff --git a/src/evaluation/wf_net/variants/__pycache__/petri_net.cpython-310.pyc b/src/evaluation/wf_net/variants/__pycache__/petri_net.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..963b1854ad3fc02917178a81b5cdb3666ea9d2bd
Binary files /dev/null and b/src/evaluation/wf_net/variants/__pycache__/petri_net.cpython-310.pyc differ
diff --git a/src/evaluation/wf_net/variants/petri_net.py b/src/evaluation/wf_net/variants/petri_net.py
new file mode 100644
index 0000000000000000000000000000000000000000..39dfcafbbb0acd03046d58d1346cbf46eae74793
--- /dev/null
+++ b/src/evaluation/wf_net/variants/petri_net.py
@@ -0,0 +1,101 @@
+'''
+    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
+
+    PM4Py is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    PM4Py is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
+'''
+import copy
+
+from pm4py.objects.petri_net.utils import petri_utils as pn_utils
+from pm4py.objects.petri_net.obj import PetriNet
+
+
+def _short_circuit_petri_net(net):
+    """
+    Creates a short circuited Petri net,
+    whether an unique source place and sink place are there,
+    by connecting the sink with the source
+
+    Parameters
+    ---------------
+    net
+        Petri net
+
+    Returns
+    ---------------
+    boolean
+        Boolean value
+    """
+    s_c_net = copy.deepcopy(net)
+    no_source_places = 0
+    no_sink_places = 0
+    sink = None
+    source = None
+    for place in s_c_net.places:
+        if len(place.in_arcs) == 0:
+            source = place
+            no_source_places += 1
+        if len(place.out_arcs) == 0:
+            sink = place
+            no_sink_places += 1
+    if (sink is not None) and (source is not None) and no_source_places == 1 and no_sink_places == 1:
+        # If there is one unique source and sink place, short circuit Petri Net is constructed
+        t_1 = PetriNet.Transition("short_circuited_transition", "short_circuited_transition")
+        s_c_net.transitions.add(t_1)
+        # add arcs in short-circuited net
+        pn_utils.add_arc_from_to(sink, t_1, s_c_net)
+        pn_utils.add_arc_from_to(t_1, source, s_c_net)
+        return s_c_net
+    else:
+        return None
+
+
+def apply(net, parameters=None):
+    """
+    Checks if a Petri net is a workflow net
+
+    Parameters
+    ---------------
+    net
+        Petri net
+    parameters
+        Parameters of the algorithm
+
+    Returns
+    ---------------
+    boolean
+        Boolean value
+    """
+    if parameters is None:
+        parameters = {}
+
+    import networkx as nx
+
+    scnet = _short_circuit_petri_net(net)
+    if scnet is None:
+        return False
+    nodes = scnet.transitions | scnet.places
+    graph = nx.DiGraph()
+    while len(nodes) > 0:
+        element = nodes.pop()
+        graph.add_node(element.name)
+        for in_arc in element.in_arcs:
+            graph.add_node(in_arc.source.name)
+            graph.add_edge(in_arc.source.name, element.name)
+        for out_arc in element.out_arcs:
+            graph.add_node(out_arc.target.name)
+            graph.add_edge(element.name, out_arc.target.name)
+    if nx.algorithms.components.is_strongly_connected(graph):
+        return True
+    else:
+        return False
diff --git a/src/frontend/src/app/components/header-bar/header-bar.component.ts b/src/frontend/src/app/components/header-bar/header-bar.component.ts
index da5d3c481a868ac62feb437fda85b6eb2bd73cbb..cc2272408818e521a690a683955d006cb32469df 100644
--- a/src/frontend/src/app/components/header-bar/header-bar.component.ts
+++ b/src/frontend/src/app/components/header-bar/header-bar.component.ts
@@ -77,7 +77,7 @@ export class HeaderBarComponent implements OnDestroy {
 
   //TODO: remove comments
   importOCEL(): void {
-    console.log("We try to upload an OCEL, but nothing is yet implemented")
+    //console.log("We try to upload an OCEL, but nothing is yet implemented")
     this.fileUploadOCEL.nativeElement.click();
   }
 
@@ -190,7 +190,8 @@ export class HeaderBarComponent implements OnDestroy {
       let backendCall;
       if (!environment.electron) {
         console.log('Debug: Electron is not the detected environment, using uploadEventLog.');
-        backendCall = this.backendService.uploadEventLog(fileList[0]);
+        //backendCall = this.backendService.uploadEventLog(fileList[0]);
+        backendCall = this.backendService.uploadOCEL(fileList[0]);
       } else {
         console.log('Debug: Electron environment detected, using loadEventLogFromFilePath.');
         backendCall = this.backendService.loadEventLogFromFilePath(fileList[0]['path']);
@@ -198,9 +199,22 @@ export class HeaderBarComponent implements OnDestroy {
       this.loadingOverlayService.showLoader(
         'Importing OCEL. For large logs this can take up to several minutes'
       );
-      backendCall.subscribe(() => {
-        this.loadingOverlayService.hideLoader();
+      backendCall.subscribe({
+        next: () => {
+          this.loadingOverlayService.hideLoader();
+        },
+        error: (err) => {
+          console.error('Error importing OCEL:', err);
+          this.loadingOverlayService.hideLoader();
+        },
+        complete: () => {
+          this.loadingOverlayService.hideLoader();
+        }
       });
+
+      //backendCall.subscribe(() => {
+        //this.loadingOverlayService.hideLoader();
+      //});
     }
 
     // reset form
diff --git a/src/frontend/src/app/services/backendService/backend.service.ts b/src/frontend/src/app/services/backendService/backend.service.ts
index 2c4c378e0de3e98cb206096f11af7544d15a1428..c65e150ae4392b1b704ccccc9f6fc993409f9829 100644
--- a/src/frontend/src/app/services/backendService/backend.service.ts
+++ b/src/frontend/src/app/services/backendService/backend.service.ts
@@ -92,6 +92,25 @@ export class BackendService {
       );
   }
 
+  uploadOCEL(file: File) {
+    let formData = new FormData();
+    formData.append('file', file);
+
+    return this.httpClient
+      .post(
+        ROUTES.HTTP_BASE_URL + ROUTES.IMPORT + 'loadOCELFromFile',
+        formData
+      )
+      .pipe(
+        mapVariants(),
+        tap((res) => {
+          this.logService.processOCEL(res, file['path']);
+        })
+      );
+  }
+
+
+
   loadProcessTreeFromFilePath(filePath: string): void {
     this.httpClient
       .post(
diff --git a/src/frontend/src/app/services/logService/log.service.ts b/src/frontend/src/app/services/logService/log.service.ts
index f3a832285259651c39c423f5e0b4f8e0630a3cc8..8cba08e1fddc2d8c20f17672767e3fbb462be000 100644
--- a/src/frontend/src/app/services/logService/log.service.ts
+++ b/src/frontend/src/app/services/logService/log.service.ts
@@ -250,6 +250,9 @@ export class LogService {
     this.logGranularity = res['timeGranularity'];
     this.logModifications = [];
   }
+
+  public processOCEL(res, filePath = null) {
+  }
 }
 
 export class LogStats {