diff --git a/docs/source/index.rst b/docs/source/index.rst
index 25295b8d61eff151a3c3cb24297b61fff0eaaee8..0b9ed0802dfaf733d8aef8d28473638b1b7a8a1b 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -10,12 +10,3 @@ Susceptibility and Hazard mappIng fRamEwork SHIRE
    
    example
    example-plain
-
-
-
-Indices and tables
-==================
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
diff --git a/src/gui_version/create_training_data_gui.py b/src/gui_version/create_training_data_gui.py
index a83ff3c79ee1fb8aaf9cb4ec115f74e0c79ffa86..2c36e2d678cea10b4dab0ca81d0e611c319a7e55 100644
--- a/src/gui_version/create_training_data_gui.py
+++ b/src/gui_version/create_training_data_gui.py
@@ -265,16 +265,17 @@ class create_training_data:
         """
 
         if self.properties_train['nonls_path'].split('.')[-1] == 'csv':
-            self.absence = pd.read_csv(self.properties_train['nonls_path'])
+            self.df_absence = pd.read_csv(self.properties_train['nonls_path'])
             
-            self.absence = self.absence.rename(
+            self.df_absence = self.df_absence.rename(
             columns={
                 self.properties_train['x_nonls']: self.properties_train['x'],
                 self.properties_train['y_nonls']: self.properties_train['y']})
 
             nonls_id = [
-                'nonls_event_' + str(i) for i in range(len(self.absence))]
-            self.absence.insert(0, self.properties_train['id'], nonls_id)
+                'nonls_event_' + str(i) for i in range(len(self.df_absence))]
+            if self.properties_train['id'] not in self.df_absence.columns.tolist():
+                self.df_absence.insert(0, self.properties_train['id'], nonls_id)
 
             self.logger.info('Absence locations added')
 
diff --git a/src/gui_version/utilities/properties_user_input.csv b/src/gui_version/utilities/properties_user_input.csv
index 80f4fefdc4f9b10559fc53344533c68bf772a46f..f7f1babe55f0bceaf96e3deda316e71938aea9e3 100644
--- a/src/gui_version/utilities/properties_user_input.csv
+++ b/src/gui_version/utilities/properties_user_input.csv
@@ -1,6 +1,6 @@
 key,type,range,extension,path
 ls_path,str,None,csv,1
-nonls_path,str,None,nc,1
+nonls_path,str,None,"nc,csv",1
 train_path,str,None,csv,1
 geo_path,str,None,csv,1
 feat_path,str,None,csv,1
diff --git a/src/plain_scripts/create_training_data.py b/src/plain_scripts/create_training_data.py
index 87233f0989068b6ec71a095957c71ea463b6f920..03f251cb1e996c98ed236e1e60448d4668dec250 100644
--- a/src/plain_scripts/create_training_data.py
+++ b/src/plain_scripts/create_training_data.py
@@ -199,19 +199,37 @@ class create_training_data:
             Supplement presence data with absence data. It needs to be
             pre-generated.
         """
+        
+        if settings.path_nonls_locations.split('.')[-1] == 'csv':
+            self.df_absence = pd.read_csv(settings.path_nonls_locations)
+            
+            self.df_absence = self.df_absence.rename(
+            columns={
+                settings.nonls_database_x: settings.landslide_database_x,
+                settings.nonls_database_y: settings.landslide_database_y})
 
-        ds = nc.Dataset(settings.path_nonls_locations)
+            nonls_id = [
+                'nonls_event_' + str(i) for i in range(len(self.df_absence))]
+            if settings.ID not in self.df_absence.columns.tolist():
+                self.df_absence.insert(0, settings.ID, nonls_id)
 
-        x = ds[settings.nonls_database_x][:].data
-        y = ds[settings.nonls_database_y][:].data
-        
-        self.df_absence = pd.DataFrame(index=range(len(x)),
-                                       columns=list(self.df_train.columns))
+            self.logger.info('Absence locations added')
+
+        elif settings.path_nonls_locations.split('.')[-1] == 'nc':
+            ds = nc.Dataset(settings.path_nonls_locations)
+
+            x = ds[settings.nonls_database_x][:].data
+            y = ds[settings.nonls_database_y][:].data
 
-        self.df_absence[settings.ID] = ['nonls_event_' + str(i)
-                                        for i in range(len(x))]
-        self.df_absence[settings.landslide_database_x] = list(x)
-        self.df_absence[settings.landslide_database_y] = list(y)
+            self.df_absence = pd.DataFrame(index=range(len(x)),
+                                           columns=list(self.df_train.columns))
+
+            self.df_absence[settings.ID] = [
+                'nonls_event_' + str(i) for i in range(len(x))]
+            self.df_absence[settings.landslide_database_x] = list(x)
+            self.df_absence[settings.landslide_database_y] = list(y)
+
+            self.logger.info('Absence locations added')
 
     def label_training_data(self):
 
@@ -576,6 +594,9 @@ class create_training_data:
         """
             Determine if the extent of one or several
             clusters are too large for local interpolation
+            
+            Input:
+                -
 
             Output:
                 num_bb: list, names of clusters that need reclustering
diff --git a/src/plain_scripts/utilities/properties_user_input.csv b/src/plain_scripts/utilities/properties_user_input.csv
index 95a1a6cc7299f2a9984c0ab2a1eb3a3f9c0e525d..7e159fce4636b3bf53e7718e4810cec025f5012e 100644
--- a/src/plain_scripts/utilities/properties_user_input.csv
+++ b/src/plain_scripts/utilities/properties_user_input.csv
@@ -1,6 +1,6 @@
 key,type,range,extension,path
 ls_path,str,None,csv,1
-nonls_path,str,None,nc,1
+nonls_path,str,None,"nc,csv",1
 train_path,str,None,csv,1
 geo_path,str,None,csv,1
 feat_path,str,None,csv,1