diff --git a/src/assessment/__init__.py b/src/assessment/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/assessment/framework.py b/src/assessment/framework.py
new file mode 100644
index 0000000000000000000000000000000000000000..5327a8adee58b8561d98cfeb9d3662459463baf7
--- /dev/null
+++ b/src/assessment/framework.py
@@ -0,0 +1,213 @@
+import abc
+import json
+import os
+import pandas
+import plotly
+import plotly.express
+import requests
+from requests.auth import HTTPBasicAuth
+from tqdm import tqdm
+from typing import List
+
+colors = ['#00549f', '#006165', '#0098A1', '#57AB27', '#BDCD00', '#CC071E', '#A11035', '#612158', '#7A6FAC']
+
+
+class AssessmentError(Exception):
+
+    def __init__(self, reason: str):
+        self._reason = reason
+
+    def __str__(self):
+        return f'AssessmentError: {self._reason}'
+
+
+class Framework(abc.ABC):
+
+    def __init__(self, prefix_url: str):
+        self._prefix_url = prefix_url
+        self._all_scores = pandas.DataFrame(
+            columns=["framework", "dummy", "principle", "value"])
+
+    @property
+    @abc.abstractmethod
+    def name(self) -> str:
+        ...
+
+    @abc.abstractmethod
+    def assess(self, identifier: str) -> dict:
+        ...
+
+    @abc.abstractmethod
+    def evaluate(self, assessment_folder: str, dummy: str) -> pandas.DataFrame:
+        ...
+
+    @staticmethod
+    def visualize(scores: pandas.DataFrame, filename: str):
+        # fig = plotly.express.line_polar(scores, r='value', theta='principle', line_close=True, color='framework', labels='framework')
+        fig = plotly.express.line_polar(scores, r='value', theta='principle', line_close=True,  color='framework', labels='framework')
+        fig.update_traces(fill='toself', textfont=(dict(color='black')))
+
+        # Customize the layout
+        fig.update_layout(
+            polar=dict(
+                bgcolor='rgba(0,0,0,0)',  # Set radar plot background color (transparent in this case)
+                radialaxis=dict(
+                    visible=True,
+                    range=[0, 100],  # Set the range of the radial axis
+                    tickcolor='black',  # Set the color of the tick marks
+                    tickvals=[0, 25, 50, 75, 100],  # Set the manual step size
+                    tickfont=dict(
+                        size=10,  # Set the font size of the tick labels
+                        color='black',
+                    ),
+                    showline=False,  # Show radial axis line
+                    gridcolor='black',  # Set color of radial grid lines
+                    griddash='dash',
+                ),
+                angularaxis=dict(
+                    showline=True,  # Show angular axis line
+                    linecolor='black',  # Set color of angular axis line
+                    # rotation=45,
+                    tickcolor='black',
+                    tickfont=dict(
+                        size=10,  # Set the font size of the tick labels
+                        color='black',
+                    ),
+                ),
+            ),
+            paper_bgcolor='white',  # Set background color outside the radar plot
+        )
+
+        # fig.write_image(os.path.join(OUT, f"{args.dummy}_metadata.svg"))
+        fig.write_image(filename)
+        fig.show()
+
+
+class FUJI(Framework):
+    _username = 'marvel'
+    _password = 'wonderwoman'
+
+    _base_url = "https://purl.org/fair-sensor-services/"
+    _request_url = 'http://localhost:1071/fuji/api/v1/evaluate'
+    _request_body = {
+        "object_identifier": None,
+        "test_debug": True,
+        "metadata_service_endpoint": "http://ws.pangaea.de/oai/provider",
+        "metadata_service_type": "oai_pmh",
+        "use_datacite": True,
+        "use_github": False,
+        "metric_version": "metrics_v0.5"
+    }
+
+    @property
+    def name(self) -> str:
+        return 'f-uji'
+
+    def __init__(self, prefix):
+        Framework.__init__(self, f'{self._base_url}{prefix}/')
+
+    def assess(self, identifier: str) -> dict:
+        self._request_body["object_identifier"] = f"{self._prefix_url}{identifier}/"
+        response = requests.post(self._request_url, json=self._request_body,
+                                 auth=HTTPBasicAuth(self._username, self._password))
+
+        if response.status_code != 200:
+            raise AssessmentError(response.reason)
+
+        return json.loads(response.content.decode('UTF-8'))
+
+    def evaluate(self, assessment_folder: str, dummy: str) -> pandas.DataFrame:
+
+        for assessment_filename in tqdm(os.listdir(assessment_folder)):
+            with open(os.path.join(assessment_folder, assessment_filename)) as assessment_file:
+                assessment = json.load(assessment_file)
+
+            for key in assessment['summary']['score_percent']:
+                self._all_scores.loc[len(self._all_scores.index)] = [self.name, dummy, key,
+                                                                     assessment['summary']['score_percent'][key]]
+
+        average_scores = \
+            self._all_scores[self._all_scores['principle'].isin(['F', 'A', 'I', 'R'])].groupby('principle',
+                                                                                               as_index=False).agg({
+                'framework': 'first',
+                'dummy': 'first',
+                'value': 'mean'
+            })
+
+        return average_scores
+
+
+class FAIRChecker(Framework):
+    _base_url = f'https://iot.wzl-mq.rwth-aachen.de/soil-dummies/'
+    _request_url = 'https://fair-checker.france-bioinformatique.fr/api/check/metrics_all'
+
+    _mapping = {
+        'F1A': ['F1', 'F', 'FAIR']
+    }
+
+    def __init__(self, prefix: str, dummy: str):
+        Framework.__init__(self, f'{self._base_url}{dummy}/{prefix}/')
+
+    @property
+    def name(self) -> str:
+        return 'fair-checker'
+
+    def assess(self, identifier: str) -> dict:
+        response = requests.get(f"{self._request_url}?url={self._prefix_url}{identifier}/")
+
+        if response.status_code != 200:
+            raise AssessmentError(response.reason)
+
+        return json.loads(response.content.decode('UTF-8'))
+
+    def evaluate(self, assessment_folder: str, dummy: str) -> pandas.DataFrame:
+        for assessment_filename in tqdm(os.listdir(assessment_folder)):
+            with open(os.path.join(assessment_folder, assessment_filename)) as assessment_file:
+                assessment = json.load(assessment_file)
+
+            fuji_mapping = {'A': 0, 'F': 0, 'I': 0, 'R': 0, 'A1': 0, 'F1': 0, 'F2': 0, 'F3': 0, 'F4': 0,
+                            'I1': 0, 'I2': 0, 'I3': 0, 'R1': 0, 'R1.1': 0, 'R1.2': 0, 'R1.3': 0,
+                            'FAIR': 5}
+
+            for entry in assessment:
+                fuji_mapping['FAIR'] += eval(entry['score']) / 12 / 2 * 100
+                if entry['metric'] in ['F1A', 'F1B']:
+                    fuji_mapping['F1'] += eval(entry['score']) / 2 / 2 * 100
+                    fuji_mapping['F'] += eval(entry['score']) / 4 / 2 * 100
+                elif entry['metric'] in ['F2A', 'F2B']:
+                    fuji_mapping['F2'] += eval(entry['score']) / 2 / 2 * 100
+                    fuji_mapping['F'] += eval(entry['score']) / 4 / 2 * 100
+                elif entry['metric'] in ['A1.1', 'A1.2']:
+                    fuji_mapping['A1'] += eval(entry['score']) / 2 / 2 * 100
+                    fuji_mapping['A'] += eval(entry['score']) / 2 / 2 * 100
+                elif entry['metric'] in ['I1']:
+                    fuji_mapping['I1'] += eval(entry['score']) / 1 / 2 * 100
+                    fuji_mapping['I'] += eval(entry['score']) / 3 / 2 * 100
+                elif entry['metric'] in ['I2']:
+                    fuji_mapping['I2'] += eval(entry['score']) / 1 / 2 * 100
+                    fuji_mapping['I'] += eval(entry['score']) / 3 / 2 * 100
+                elif entry['metric'] in ['I3']:
+                    fuji_mapping['I3'] += eval(entry['score']) / 1 / 2 * 100
+                    fuji_mapping['I'] += eval(entry['score']) / 3 / 2 * 100
+                elif entry['metric'] in ['R1.1']:
+                    fuji_mapping['R1.1'] += eval(entry['score']) / 1 / 2 * 100
+                    fuji_mapping['R'] += eval(entry['score']) / 3 / 2 * 100
+                elif entry['metric'] in ['R1.2']:
+                    fuji_mapping['R1.2'] += eval(entry['score']) / 1 / 2 * 100
+                    fuji_mapping['R'] += eval(entry['score']) / 3 / 2 * 100
+                elif entry['metric'] in ['R1.13']:
+                    fuji_mapping['R1.3'] += eval(entry['score']) / 1 / 2 * 100
+                    fuji_mapping['R'] += eval(entry['score']) / 3 / 2 * 100
+
+            for key in fuji_mapping:
+                self._all_scores.loc[len(self._all_scores.index)] = [self.name, dummy, key, fuji_mapping[key]]
+
+        average_scores = \
+            self._all_scores[self._all_scores['principle'].isin(['F', 'A', 'I', 'R'])].groupby('principle',
+                                                                                               as_index=False).agg({
+                'framework': 'first',
+                'dummy': 'first',
+                'value': 'mean'
+            })
+
+        return average_scores