diff --git a/__pycache__/model.cpython-39.pyc b/__pycache__/model.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5e2536fa6c41592ad0cef8d85b9e4216ef2fd8ae
Binary files /dev/null and b/__pycache__/model.cpython-39.pyc differ
diff --git a/__pycache__/utils.cpython-39.pyc b/__pycache__/utils.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5faec9d908ffe50e1bf850ec5159f734e3b969cf
Binary files /dev/null and b/__pycache__/utils.cpython-39.pyc differ
diff --git a/binary_fill_holes_benchmark.py b/binary_fill_holes_benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee1649218ffd4257b1f71d71c8a495e4c2dd803
--- /dev/null
+++ b/binary_fill_holes_benchmark.py
@@ -0,0 +1,35 @@
+from scipy import ndimage
+import numpy as np
+import cupy as cp
+from cupyx.scipy.ndimage import binary_fill_holes
+
+from timeit import default_timer as timer
+
+np_start = timer()
+
+a = np.zeros((5000, 5000), dtype=int)
+
+a[1000:4000, 1000:4000] = 1
+a[1200:3800,1200:3800] = 1
+
+np_start = timer()
+
+b = ndimage.binary_fill_holes(a).astype(int)
+
+np_end = timer()
+print('np_time: ', np_end - np_start)
+
+print(b)
+
+with cp.cuda.Device(1):
+    c_a = cp.array(a)
+
+    cp_start = timer()
+
+    c_b = binary_fill_holes(c_a).astype(int)
+
+    cp_end = timer()
+
+    print('cp_time: ', cp_end - cp_start)
+
+    print(c_b)
\ No newline at end of file
diff --git a/environment.yml b/environment.yml
index f2993dddaacd6d24c9f6832b4600c26f0ba77566..dc797de6ff29f25f88ad3de3849bdca33743aa9d 100644
--- a/environment.yml
+++ b/environment.yml
@@ -1,4 +1,4 @@
-name: python37
+name: flash
 channels:
   - pytorch
   - defaults
diff --git a/extractFGpatches.py b/extractFGpatches.py
new file mode 100755
index 0000000000000000000000000000000000000000..a19e23fa9ae4e6dc8ac444f3c8dff9f50b14b1c2
--- /dev/null
+++ b/extractFGpatches.py
@@ -0,0 +1,333 @@
+import numpy as np
+import os
+import sys
+import logging as log
+from PIL import Image
+from scipy.ndimage.measurements import label
+from skimage.color import rgb2gray
+import openslide as osl
+from openslide.lowlevel import OpenSlideError
+from skimage.transform import rescale, resize
+from scipy.ndimage import zoom
+import matplotlib.pyplot as plt
+from skimage.segmentation import flood, flood_fill
+from skimage.color import rgb2gray
+from skimage import filters
+from scipy.ndimage import binary_dilation, binary_closing, binary_fill_holes, binary_erosion, binary_opening
+import scipy.ndimage
+from skimage.measure import regionprops
+from tifffile import imread
+import shutil
+import cv2
+
+from utils import generate_ball, patchify, unpatchify, saveOverlayResults, savePredictionOverlayResults, savePredictionResultsWithoutDilation, saveImage, savePredictionResults, saveRGBPredictionOverlayResults, convert_labelmap_to_rgb_with_instance_first_class
+
+from joblib import Parallel, delayed
+import multiprocessing
+from ome_types import from_xml
+
+
+def writeOutPatchesRowwise(x):
+    for y in range(extractedPatches.shape[1]):
+        if extractedSegm[x, y].sum() > (minFGproportion * imageResolution * imageResolution):
+            Image.fromarray(extractedPatches[x, y, 0]).save(
+                resultsPath + '/' + WSIpath[:suffixCut] + '_' + str(x) + '_' + str(y) + '_' + str(i + 1) + '.png')
+    return
+
+
+stain = 'NGAL' #PAS, aSMA, Col III, NGAL, Fibronectin, Meca-32, CD44, F4-80, CD31, AFOG
+# WSIfolder = '/work/scratch/bouteldja/Data_ActivePAS'
+# WSIfolder = '/images/ACTIVE/2015-04_Boor/Nassim_2019/kidney histology'
+# WSIfolder = '/images/ACTIVE/2015-04_Boor/Nassim_2019/kidney IHC'
+WSIfolder = '/homeStor1/ylan/data/MarkusRinschen/debug_2/'#'SPECIFIED WSI FOLDER'
+
+imageSizeUM = 216
+imageResolution = 640
+strideProportion = 1.0
+minFGproportion = 0.5
+
+
+# resultsPath = '/work/scratch/bouteldja/Data_StainTranslation/'+stain.replace(" ", "")
+resultsPath = WSIfolder
+resultsForegroundSegmentation = resultsPath + '/FGsegm'
+
+if os.path.exists(resultsForegroundSegmentation):
+    shutil.rmtree(resultsForegroundSegmentation)
+
+onlyPerformForegroundSegmentation = True
+
+saveWSICoarseForegroundSegmResults = True
+saveWSICoarseForegroundSegmResults_RegionSeparate = False
+saveWSICroppedForegroundSegmResults = False
+alpha = 0.3
+figHeight = 20
+
+targetSpacing = imageSizeUM / imageResolution
+shiftUM = imageSizeUM // 3
+
+struc3 = generate_ball(1)
+struc5 = generate_ball(2)
+struc7 = generate_ball(3)
+
+if not os.path.exists(resultsPath):
+    os.makedirs(resultsPath)
+if not os.path.exists(resultsForegroundSegmentation):
+    os.makedirs(resultsForegroundSegmentation)
+
+# Set up logger
+log.basicConfig(
+    level=log.INFO,
+    format='%(asctime)s %(message)s',
+    datefmt='%Y/%m/%d %I:%M:%S %p',
+    handlers=[
+        log.FileHandler(resultsForegroundSegmentation + '/LOGS.log', 'w'),
+        log.StreamHandler(sys.stdout)
+    ])
+logger = log.getLogger()
+
+print(os.listdir(WSIfolder))
+
+files = sorted(list(filter(lambda x: ('.ndpi' in x or '.svs' in x or '.tif' in x) and stain in x, os.listdir(WSIfolder))))
+files = ['p21_kidleftp456_B10_PAS_3.ome.tif']
+# files = sorted(list(filter(lambda x: '.ndpi' in x or '.svs' in x, os.listdir(WSIfolder))))
+logger.info('Amount of WSIs in folder: ' + str(len(files)))
+
+num_cores = multiprocessing.cpu_count()
+
+detectedRegions = []
+
+try:
+
+    for no, WSIpath in enumerate(files):
+        # Load slide
+        ## .ome.tiff
+        # imgRGB = imread(os.path.join(WSIfolder, WSIpath))
+        # spacings = np.array([0.2197, 0.2197])
+
+        # resize_x = 500 / imgRGB.shape[1]
+
+        # resize_y = 
+
+        # imgRGB = np.array()
+        slide = osl.OpenSlide(os.path.join(WSIfolder, WSIpath))
+        if '.tif' in WSIpath:
+        # slide = osl.OpenSlide(os.path.join(WSIfolder, WSIpath))
+            x_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_x
+            y_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_y
+            spacings = np.array([float( x_spacing), float(y_spacing)])
+            suffixCut = -8
+        else:
+        # # Extract/print relevant parameters
+            spacings = np.array([float(slide.properties['openslide.mpp-x']), float(slide.properties['openslide.mpp-y'])])
+            if WSIpath.split('.')[-1] == 'ndpi':
+                suffixCut = -5
+            else:
+                suffixCut = -4
+        levelDims = np.array(slide.level_dimensions)
+        amountLevels = len(levelDims)
+        levelDownsamples = np.asarray(np.round(np.array(slide.level_downsamples)), int)
+
+        logger.info(str(no + 1) + ':  WSI:\t' + WSIpath + ', Spacing:\t' + str(spacings) + ', levels:\t' + str(amountLevels))
+
+        
+
+        # # specify used wsi level for foreground segmentation: min. 500x500 pixels
+        usedLevel = np.argwhere(np.all(levelDims > [500, 500], 1) == True).max()
+        logger.info('Used level for foreground segmentation: ' + str(usedLevel))
+
+        # extract image from that level
+        imgRGB = np.array(slide.read_region(location=np.array([0, 0]), level=usedLevel, size=levelDims[usedLevel]))[:, :,:3]
+        img = rgb2gray(imgRGB)
+
+        # foreground segmentation
+        otsu_threshold = filters.threshold_otsu(img)
+
+        divideSizeFactor = 30
+
+        if stain == 'PAS':
+            if '17.40.53' in WSIpath:
+                otsu_threshold -= 0.065
+
+        elif stain == 'aSMA':
+            if '22.15.27' in WSIpath or '22.28.25' in WSIpath:
+                otsu_threshold -= 0.07
+            if '20.19.57' in WSIpath or '16.59.43' in WSIpath:
+                otsu_threshold += 0.024
+
+        elif stain == 'CD31':
+            if '18.51.39' in WSIpath:
+                otsu_threshold += 0.02
+
+        elif stain == 'Col III':
+            if '21.25.37' in WSIpath or '21.42.09' in WSIpath:
+                otsu_threshold -= 0.1
+            elif '2172_13' in WSIpath:
+                otsu_threshold += 0.05
+            else:
+                otsu_threshold += 0.055
+
+        elif stain == 'NGAL':
+            if '21.40.59' in WSIpath or '21.35.56' in WSIpath:
+                otsu_threshold += 0.005
+            elif '18.04.45' in WSIpath:
+                otsu_threshold += 0.07
+            elif '21.46.20' in WSIpath:
+                otsu_threshold += 0.01
+            else:
+                otsu_threshold += 0.05
+
+        elif stain == 'Fibronectin':
+            if '23.03.22' in WSIpath:
+                otsu_threshold -= 0.08
+            elif '00.58.23' in WSIpath:
+                otsu_threshold += 0.02
+            else:
+                otsu_threshold += 0.05
+
+        elif stain == 'Meca-32':
+            divideSizeFactor = 50
+
+            if '1150-12' in WSIpath:
+                otsu_threshold -= 0.097
+            elif '22.36.35' in WSIpath:
+                otsu_threshold -= 0.065
+            elif '10.23.46' in WSIpath:
+                otsu_threshold += 0.05
+            else:
+                otsu_threshold += 0.02
+
+        elif stain == 'CD44':
+            if '11.22.14' in WSIpath or '11.28.21' in WSIpath:
+                otsu_threshold += 0.085
+            elif '11.41.12' in WSIpath:
+                otsu_threshold -= 0.06
+            else:
+                otsu_threshold += 0.015
+
+
+
+
+        img_mask = img < otsu_threshold
+        # img_mask = img < 0.78
+        logger.info('Utilized threshold: ' + str(otsu_threshold))
+
+        if stain == 'NGAL' and '18.58.25' in WSIpath:
+            img_mask[395:405,440:530] = 0
+
+        # extract connected regions only with at least 1/25 size of WSI
+        labeledRegions, numberRegions = label(img_mask, struc3)
+        minRequiredSize = (img_mask.shape[0] * img_mask.shape[1]) // divideSizeFactor
+
+        argRegions = []
+        for i in range(1, numberRegions + 1):
+            if (labeledRegions == i).sum() > minRequiredSize:
+                argRegions.append(i)
+
+        finalWSI_FG = np.zeros(img_mask.shape, dtype=np.bool)
+
+        # process these regions
+        for i, arg in enumerate(argRegions):
+            logger.info('Extract foreground region ' + str(i + 1) + '...')
+            detectedRegion = labeledRegions == arg
+            detectedRegion = binary_fill_holes(detectedRegion)
+            detectedRegion = binary_opening(detectedRegion, structure=struc7)
+
+            # extract biggest component
+            labeledRegions2, numberLabeledRegions = label(detectedRegion, struc3)
+            if numberLabeledRegions > 1:
+                argMax = np.array([region.area for region in regionprops(labeledRegions2)]).argmax() + 1
+                detectedRegion = labeledRegions2 == argMax
+
+            # detectedRegion = binary_erosion(detectedRegion, structure=struc3)
+
+            # Save foreground segmentation (on chosen coarse resolution level) as overlay
+            if saveWSICoarseForegroundSegmResults_RegionSeparate:
+                saveOverlayResults(imgRGB, detectedRegion, alpha=alpha, figHeight=figHeight, fullResultPath=resultsForegroundSegmentation + '/' + WSIpath[:suffixCut] +'_'+str(i + 1)+'_fgSeg.png')
+
+            finalWSI_FG = np.logical_or(finalWSI_FG, detectedRegion)
+
+            logger.info('Foreground segmentation done on coarse level...')
+
+            if onlyPerformForegroundSegmentation:
+                continue
+
+            # enlargement of foreground in order to fully cover border structures
+            # detectedRegion = binary_erosion(detectedRegion, structure=struc3)
+
+            # compute bounding box
+            temp = np.where(detectedRegion == 1)
+
+            bbox = np.array([np.min(temp[0]), np.min(temp[1]), np.max(temp[0]), np.max(temp[1])])
+
+            # compute how much to enlarge bbox to consider wider context utilization (especially for patchify)
+            downsampleFactor = int(levelDownsamples[usedLevel])
+            # downsampleFactor = int(resize_x)
+            shift = round((shiftUM / spacings[0]) / downsampleFactor)
+
+            # enlarge bounding box due to wider context consideration
+            bbox[0] = max(bbox[0] - shift, 0)
+            bbox[1] = max(bbox[1] - shift, 0)
+            bbox[2] = min(bbox[2] + shift, detectedRegion.shape[0] - 1)
+            bbox[3] = min(bbox[3] + shift, detectedRegion.shape[1] - 1)
+
+
+            bbox_WSI = np.asarray(bbox * downsampleFactor, int)
+            logger.info('High res bounding box coordinates: ' + str(bbox_WSI))
+
+            logger.info('Extract high res patch and segm map...')
+            try:
+                img_WSI = np.array(slide.read_region(location=np.array([bbox_WSI[1], bbox_WSI[0]]), level=0, size=np.array([bbox_WSI[3] - bbox_WSI[1] + downsampleFactor, bbox_WSI[2] - bbox_WSI[0] + downsampleFactor])))[:, :, :3]
+            except OpenSlideError:
+                logger.info('############ FILE CORRUPTED - IGNORED ############')
+                continue
+
+            segMap = zoom(detectedRegion[bbox[0]:bbox[2] + 1, bbox[1]:bbox[3] + 1], downsampleFactor, order=0)
+            # segMap = rescale(detectedRegion[bbox[0]:bbox[2] + 1, bbox[1]:bbox[3] + 1], downsampleFactor, order=0, preserve_range=True, multichannel=False)
+            assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling via Zoom/Rescale lead to unequal resolutions..."
+            logger.info('Done - size of extracted high res patch: ' + str(img_WSI.shape))
+
+            downsamplingFactor = spacings[0] / targetSpacing  # Rescaling very slow!
+            logger.info('Spacing of slide: '+str(spacings[0])+', Resample both patches using factor: ' + str(downsamplingFactor))
+            img_WSI = np.asarray(np.round(rescale(img_WSI, downsamplingFactor, order=1, preserve_range=True, multichannel=True)), np.uint8)
+            segMap = np.asarray(zoom(segMap, downsamplingFactor, order=0), np.bool)
+            # segMap = np.asarray(np.round(rescale(segMap, downsamplingFactor, order=0, preserve_range=True, multichannel=False)), np.bool)
+            assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling via Zoom/Rescale lead to unequal resolutions..."
+            logger.info('Done - size of extracted resampled high res patch: ' + str(img_WSI.shape))
+
+            # Save cropped foreground segmentation result as overlay
+            if saveWSICroppedForegroundSegmResults:
+                saveOverlayResults(img_WSI, segMap, alpha=alpha, figHeight=figHeight, fullResultPath=resultsForegroundSegmentation + '/' + WSIpath[:suffixCut] +'_'+str(i + 1)+'_fgSeg2.png')
+
+
+            ##### FOREGROUND SEGMENTATION DONE - NOW PATCH EXTRACTION USING PATCHIFY #####
+            logger.info('Perform patch extraction...')
+
+            extractedPatches = patchify(img_WSI.copy(), patch_size=(imageResolution, imageResolution, 3), step=int(imageResolution*strideProportion))  # shape: (5, 7, 1, 640, 640, 3)
+            extractedSegm = patchify(segMap.copy(), patch_size=(imageResolution, imageResolution), step=int(imageResolution*strideProportion))  # shape: (5, 7, 640, 640)
+
+            resultsLabel2 = Parallel(n_jobs=num_cores)(delayed(writeOutPatchesRowwise)(x) for x in range(extractedPatches.shape[0]))
+
+            # for x in range(extractedPatches.shape[0]):
+            #     for y in range(extractedPatches.shape[1]):
+            #         if extractedSegm[x,y].sum() > (minFGproportion * imageResolution * imageResolution):
+            #             Image.fromarray(extractedPatches[x,y,0]).save(resultsPath + '/' + WSIpath[:suffixCut] +'_'+str(x)+'_'+str(y)+'_'+str(i + 1)+'.png')
+
+            logger.info('Done.')
+
+        if saveWSICoarseForegroundSegmResults:
+            saveOverlayResults(imgRGB, finalWSI_FG, alpha=alpha, figHeight=figHeight, fullResultPath=resultsForegroundSegmentation + '/' + WSIpath[:suffixCut] + '_fgSeg_WSI.png')
+
+        detectedRegions.append(len(argRegions))
+        logger.info('####################')
+
+    logger.info('Detected regions of all processed slides:')
+    logger.info(detectedRegions)
+
+
+
+except:
+    logger.exception('! Exception !')
+    raise
+
+log.info('%%%% Ended regularly ! %%%%')
+
diff --git a/model.py b/model.py
index 0688d44b309ed350d7121401a23e24b135c5afda..5bf7d0f9cdedfc2cf3efc5fe5071c6d40fc424e6 100644
--- a/model.py
+++ b/model.py
@@ -32,8 +32,9 @@ nonlinearity = partial(F.relu, inplace=True)
 ####################################################################################################
 # Custom represents our utilized and developed deep learning model. It is based on the U-Net architecture:
 # ----- Custom Unet 2D/3D - Pooling-Encoder + (Transposed/Upsampling)-Decoder + DoubleConvs ----- #
+# modelDim = input image dimension (2D or 3D image)
 class Custom(nn.Module):
-    def __init__(self, input_ch=3, output_ch=1, modelDim=2):
+    def __init__(self, input_ch=3, output_ch=1, modelDim=2): 
         super(Custom, self).__init__()
         assert modelDim == 2 or modelDim == 3, "Wrong unet-model dimension: " + str(modelDim)
 
@@ -391,3 +392,10 @@ class outconv(nn.Module):
 
 
 
+if __name__ == '__main__':
+
+    input = torch.rand((1, 3,640,640))
+    print(input.shape)
+    model = Custom(input_ch=3, output_ch=8, modelDim=2) 
+    output = model(input)
+    print(output.shape)
\ No newline at end of file
diff --git a/segment_WSI.py b/segment_WSI.py
index b27507ef74b2c8c4466c68d6f2e3f9848c7ea905..5504bbe0a0a5b899f1b72b07d02dfcfb7bdab82d 100644
--- a/segment_WSI.py
+++ b/segment_WSI.py
@@ -30,12 +30,17 @@ from skimage.morphology import remove_small_objects
 from utils import generate_ball, patchify, unpatchify, saveOverlayResults, savePredictionOverlayResults, savePredictionOverlayResults_Fast, saveImage
 from model import Custom
 
+from pathlib import Path
 
 
 
-WSIrootFolder = 'SPECIFIED WSI FOLDER'
-modelpath = 'STRUCTURE SEGMENTATION MODEL PATH'
-resultsPath = 'RESULTS PATH'
+# WSIrootFolder = '/homeStor1/datasets/MarkusRinschen/'#'SPECIFIED WSI FOLDER'
+WSIrootFolder = '/homeStor1/ylan/data/Saskia_3D/debug/'#'SPECIFIED WSI FOLDER'
+
+# modelpath = '/homeStor1/nbouteldja/Results_ActivePAS/custom_train_val_test_e500_b6_r0.001_w1e-05_516_640_32_RAdam_instance_1deeper_Healthy_UUO_Adenine_Alport_IRI_NTN_fewSpecies_fewHuman_Background_+-1range_X/Model/finalModel.pt'
+modelpath = '/homeStor1/nbouteldja/Project_Human_Bios/Results/custom_train_val_test_e500_b10_r0.001_w1e-05_1024_1024_32_RAdam_instance_1deeper_+-1range_noArterialWeights_miceSpacing_InclJASNhumandata_weight10_3/Model/finalModel.pt'
+resultsPath = WSIrootFolder#'RESULTS PATH'
+
 
 if not os.path.exists(resultsPath):
     os.makedirs(resultsPath)
@@ -52,7 +57,7 @@ regionMinSizeUM = 3E5
 
 alpha = 0.3
 strideProportion = 0.5
-figHeight = 15
+figHeight = 26
 minibatchSize = 2
 minibatchSize_FG = 1
 useAllGPUs = False
diff --git a/segment_WSI_clean.py b/segment_WSI_clean.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c16888691fdc14e042d9d92d68910ade37d712d
--- /dev/null
+++ b/segment_WSI_clean.py
@@ -0,0 +1,474 @@
+import numpy as np
+import os
+import sys
+import cv2
+import torch
+import math
+import logging as log
+
+from openslide.lowlevel import OpenSlideError
+import openslide as osl
+from PIL import Image
+import matplotlib.pyplot as plt
+from scipy.ndimage.measurements import label
+from scipy.ndimage import zoom
+from scipy.ndimage.morphology import binary_dilation, binary_closing, binary_fill_holes, binary_erosion, binary_opening
+import scipy.ndimage
+import scipy as sp
+from skimage.transform import rescale, resize
+from skimage.measure import regionprops
+from skimage.segmentation import flood, flood_fill
+from skimage.color import rgb2gray
+from skimage.segmentation import clear_border
+from skimage import filters
+from skimage.morphology import remove_small_objects
+
+from utils_Clean import savePredictionOverlayResults_Fast, getFGsegmParam, generate_ball, patchify, unpatchify, saveOverlayResults, save_WSI_image, savePredictionOverlayResults, savePredictionResultsWithoutDilation, saveImage, savePredictionResults, saveRGBPredictionOverlayResults, convert_labelmap_to_rgb_with_instance_first_class
+from model import Custom, CustomContext3, CustomContext, UNetVanilla, CE_Net_2D, CE_Net_Inception_Variants_2D, StackedUNet
+from ome_types import from_xml
+
+
+
+
+# WSIrootFolder = '/homeStor1/nbouteldja/Bertram_McCullen/WSIs'
+WSIrootFolder = '/homeStor1/ylan/data/MarkusRinschen/debug_2/'#'SPECIFIED WSI FOLDER'
+
+
+
+modelpath = '/homeStor1/nbouteldja/Results_ActivePAS/custom_train_val_test_e500_b30_r0.001_w1e-05_516_640_32_RAdam_instance_1deeper_Healthy_UUO_Adenine_Alport_IRI_NTN_+-1range_CycleData/Model/finalModel.pt'
+
+# resultsPath = '/homeStor1/nbouteldja/Bertram_McCullen/Segmentations'
+resultsPath = WSIrootFolder
+
+
+if not os.path.exists(resultsPath):
+    os.makedirs(resultsPath)
+
+
+patchSegmSize = 516
+patchImgSize = 640
+patchLengthUM = 174.
+
+alpha = 0.3
+strideProportion = 0.5
+figHeight = 26
+minibatchSize = 52
+useAllGPUs = False
+GPUno = 0
+device = torch.device("cuda:" + str(GPUno) if torch.cuda.is_available() else "cpu")
+
+tubuliInstanceID_StartsWith = 10  # => tubuli instances start with id 10
+
+ftChannelsOutput = 8
+applyTestTimeAugmentation = True
+centerWeighting = False
+centerWeight = 3.
+
+makeBGwhite = True
+
+saveWSICoarseForegroundSegmResults = True
+saveCroppedForegroundSegmResults = False
+saveMedullaCortexBGSegmResults = True
+saveWSIandPredNumpy = True
+
+
+
+TUBULI_MIN_SIZE = 500
+GLOM_MIN_SIZE = 1800
+TUFT_MIN_SIZE = 500
+VEIN_MIN_SIZE = 3000
+ARTERY_MIN_SIZE = 400
+LUMEN_MIN_SIZE = 35
+
+# parameter for FGsegm
+regionMinSizeUM = 1E7
+
+labelBG = 8
+
+model = Custom(input_ch=3, output_ch=ftChannelsOutput, modelDim=2)
+
+model.load_state_dict(torch.load(modelpath, map_location=lambda storage, loc: storage))
+model.train(False)
+model.eval()
+
+if useAllGPUs:
+    model = torch.nn.DataParallel(model)  # multi-GPUs
+model = model.to(device)
+
+segmentationPatchStride = int(patchSegmSize * strideProportion)
+targetSpacing = patchLengthUM / patchSegmSize
+
+shiftMinUM = (patchImgSize - patchSegmSize) // 2 * targetSpacing + 2 # + 2 just for sufficient margin reasons
+shiftMaxUM = ((patchImgSize - patchSegmSize) // 2 + patchSegmSize * strideProportion) * targetSpacing
+
+# Set up logger
+log.basicConfig(
+    level=log.INFO,
+    format='%(asctime)s %(message)s',
+    datefmt='%Y/%m/%d %I:%M:%S %p',
+    handlers=[
+        log.FileHandler(resultsPath + '/LOGS.log', 'w'),
+        log.StreamHandler(sys.stdout)
+    ])
+logger = log.getLogger()
+
+struc3 = generate_ball(1)
+
+try:
+    for dirName, subdirList, fileList in os.walk(WSIrootFolder):
+
+        fileListWSI = sorted([fname for fname in fileList if (fname.endswith('.svs') or fname.endswith('.ndpi') or fname.endswith('.tif'))])
+
+        if len(fileListWSI) != 0:
+            logger.info(str(len(fileListWSI)) + ' WSIs to be analyzed in directory: ' + dirName)
+
+            resultsDir = resultsPath + dirName[len(WSIrootFolder):]
+            resultsDirNPYfiles = resultsDir + '/npyFiles'
+            if not os.path.exists(resultsDirNPYfiles):
+                os.makedirs(resultsDirNPYfiles)
+
+            for no, fname in enumerate(fileListWSI):
+
+                # Load slide
+                slide = osl.OpenSlide(os.path.join(dirName, fname))
+                if fname.endswith('.tif'):
+                    x_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_x
+                    y_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_y
+                    spacings = np.array([float(x_spacing), float(y_spacing)])
+                else:
+
+                
+
+                # Extract/print relevant parameters
+                    spacings = np.array([float(slide.properties['openslide.mpp-x']), float(slide.properties['openslide.mpp-y'])])
+                levelDims = np.array(slide.level_dimensions)
+                amountLevels = len(levelDims)
+                levelDownsamples = np.asarray(np.round(np.array(slide.level_downsamples)), np.int)
+
+                logger.info(str(no + 1) + ':  WSI:\t' + fname + ', Spacing:\t' + str(spacings) + ', levels:\t' + str(amountLevels))
+                logger.info('Spacings: ' + str(spacings))
+                logger.info('Level Dimensions: ' + str(levelDims))
+                logger.info('Level Downsamples: '+str(levelDownsamples))
+
+                suffixCut = -5 if fname.split('.')[-1] == 'ndpi' else -4
+
+                # specify used wsi level for foreground segmentation: min. 500x500 pixels
+                usedLevel = np.argwhere(np.all(levelDims > [500, 500], 1) == True).max()
+                logger.info('Used level for foreground segmentation: ' + str(usedLevel))
+
+                opening_radius, moreThresh = getFGsegmParam(fname, dirName)
+                logger.info('Initial opening_radius: {}, moreThresh: {}'.format(opening_radius, moreThresh))
+
+                # extract image from that level
+                imgRGB = np.array(slide.read_region(location=np.array([0, 0]), level=usedLevel, size=levelDims[usedLevel]))[:, :,:3]
+                img = rgb2gray(imgRGB)
+
+                # foreground segmentation
+                otsu_threshold = filters.threshold_otsu(img)
+                img_mask = img < otsu_threshold + moreThresh
+                logger.info('Utilized threshold: ' + str(otsu_threshold))
+
+
+                regionMinPixels = regionMinSizeUM / (spacings[0] * spacings[1] * levelDownsamples[usedLevel] * levelDownsamples[usedLevel])
+
+                labeledRegions, _ = label(img_mask)
+                labeledRegions, numberFGRegions = label(remove_small_objects(labeledRegions, min_size=regionMinPixels))
+
+
+                while numberFGRegions == 0:
+                    otsu_threshold += 0.02
+                    img_mask = img < otsu_threshold + moreThresh
+                    print('No region detected, utilized threshold now: ' + str(otsu_threshold + moreThresh))
+                    labeledRegions, _ = label(img_mask)
+                    labeledRegions, numberFGRegions = label(remove_small_objects(labeledRegions, min_size=regionMinPixels))
+
+                while (labeledRegions==1).sum() > 0.98 * img.shape[0] * img.shape[1]:
+                    otsu_threshold -= 0.04
+                    img_mask = img < otsu_threshold + moreThresh
+                    print('Whole image region detected, utilized threshold now: ' + str(otsu_threshold + moreThresh))
+                    labeledRegions, _ = label(img_mask)
+                    labeledRegions, numberFGRegions = label(remove_small_objects(labeledRegions, min_size=regionMinPixels))
+
+                logger.info('In total -> '+str(numberFGRegions)+' <- regions on WSI detected!')
+
+                # process these regions
+                for regionID in range(1, numberFGRegions+1):
+                    logger.info('#######\n Extract foreground region ' + str(regionID) + '...')
+                    detectedRegion = labeledRegions == regionID
+
+                    detectedRegion = binary_fill_holes(detectedRegion)
+                    detectedRegion = binary_opening(detectedRegion, structure=generate_ball(opening_radius))
+
+                    # extract biggest component
+                    labeledRegions2, numberLabeledRegions = label(detectedRegion, struc3)
+                    if numberLabeledRegions > 1:
+                        argMax = np.array([region.area for region in regionprops(labeledRegions2)]).argmax() + 1
+                        detectedRegion = labeledRegions2 == argMax
+                        if detectedRegion.sum() < regionMinPixels:
+                            logger.info('Region has gotten too small after opening!')
+                            continue
+                    elif numberLabeledRegions == 0:
+                        logger.info('Region vanished after opening!')
+                        continue
+
+                    logger.info('Foreground segmentation done on coarse level...')
+
+                    # compute bounding box and how much to enlarge bbox to consider wider context utilization (especially for patchify)
+                    temp = np.where(detectedRegion == 1)
+                    bbox = np.array([np.min(temp[0]), np.min(temp[1]), np.max(temp[0]), np.max(temp[1])])
+
+                    downsampleFactor = int(levelDownsamples[usedLevel])
+                    shiftMin = round((shiftMinUM / spacings[0]) / downsampleFactor)
+                    shiftMax = round((shiftMaxUM / spacings[0]) / downsampleFactor)
+
+                    # enlarge bounding box due to wider context consideration
+                    bbox[0] = max(bbox[0] - shiftMin, 0)
+                    bbox[1] = max(bbox[1] - shiftMin, 0)
+                    bbox[2] = min(bbox[2] + shiftMax, detectedRegion.shape[0] - 1) + 1
+                    bbox[3] = min(bbox[3] + shiftMax, detectedRegion.shape[1] - 1) + 1
+
+                    bbox_WSI = np.asarray(bbox * downsampleFactor, np.int)
+                    logger.info('High res bounding box coordinates: ' + str(bbox_WSI))
+
+                    # Save foreground segmentation (on chosen coarse resolution level) as overlay
+                    if saveWSICoarseForegroundSegmResults:
+                        logger.info('Saving coarse foreground segmentation results...')
+                        saveOverlayResults(imgRGB, detectedRegion, alpha=alpha, figHeight=figHeight, fullResultPath=resultsDir + '/' + fname[:suffixCut] +'_'+str(regionID)+'_fgSeg_({}_{}_{}).png'.format(bbox_WSI[0], bbox_WSI[1], spacings[0]))
+
+
+                    logger.info('Extract high res patch and segm map...')
+                    try:
+                        img_WSI = np.asarray(np.array(slide.read_region(location=np.array([bbox_WSI[1], bbox_WSI[0]]), level=0, size=np.array([bbox_WSI[3] - bbox_WSI[1], bbox_WSI[2] - bbox_WSI[0]])))[:, :, :3], np.uint8)
+                    except OpenSlideError:
+                        logger.info('############ FILE CORRUPTED - IGNORED ############')
+                        continue
+
+                    segMap = zoom(detectedRegion[bbox[0]:bbox[2], bbox[1]:bbox[3]], downsampleFactor, order=0)
+                    # segMap = rescale(detectedRegion[bbox[0]:bbox[2] + 1, bbox[1]:bbox[3] + 1], downsampleFactor, order=0, preserve_range=True, multichannel=False)
+                    assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling led to unequal resolutions..."
+                    logger.info('Done - size of extracted high res patch: ' + str(img_WSI.shape))
+
+                    downsamplingFactor = spacings[0] / targetSpacing  # Rescaling would be very slow using 'rescale' method!
+                    logger.info('Utilized spacing of slide: '+str(spacings[0])+', Resample both patches using factor: ' + str(downsamplingFactor))
+                    segMap = np.asarray(zoom(segMap, downsamplingFactor, order=0), np.bool)
+                    img_WSI = cv2.resize(img_WSI, dsize=tuple(np.flip(segMap.shape)), interpolation=cv2.INTER_LINEAR)
+                    # segMap = np.asarray(np.round(rescale(segMap, downsamplingFactor, order=0, preserve_range=True, multichannel=False)), np.bool)
+                    assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling via zoom/resize led to unequal resolutions..."
+                    logger.info('Done - size of extracted resampled high res patch: ' + str(img_WSI.shape))
+
+                    # Save cropped foreground segmentation result as overlay
+                    if saveCroppedForegroundSegmResults:
+                        logger.info('Saving initial cropped foreground segmentation results...')
+                        # saveOverlayResults(img_WSI, segMap, alpha=alpha, figHeight=figHeight, fullResultPath=resultsDir + '/' + fname[:suffixCut] +'_'+str(regionID)+'_fgSeg_tissue.png')
+                        saveOverlayResults(img_WSI, segMap, alpha=0.1, figHeight=figHeight, fullResultPath=resultsDir + '/' + fname[:suffixCut] +'_'+str(regionID)+'_fgSeg_tissue.png')
+
+                    ##### PREPROCESSING DONE - NOW: NETWORK SEGMENTATION PART #####
+                    logger.info('Start segmentation process...')
+
+                    # preprocess img
+                    img_WSI_prep = (np.array((img_WSI / 255. - 0.5) / 0.5, np.float32)).transpose(2, 0, 1)
+
+                    # patchify
+                    smallOverlappingPatches = patchify(img_WSI_prep.copy(), patch_size=(3, patchImgSize, patchImgSize), step=segmentationPatchStride)  # shape: (1, 5, 7, 3, 640, 640)
+
+                    # smallOverlappingPatches = torch.from_numpy(smallOverlappingPatches).to(device)
+                    smallOverlappingPatches = torch.from_numpy(smallOverlappingPatches)
+
+                    # calculate segmentation patch size since patchify cuts of last patch if not exactly fitting in window
+                    startX = (patchImgSize - patchSegmSize) // 2; startY = startX
+                    endX = segmentationPatchStride * (smallOverlappingPatches.size(1)-1) + patchSegmSize + startX
+                    endY = segmentationPatchStride * (smallOverlappingPatches.size(2)-1) + patchSegmSize + startY
+
+                    bigPatchResults = torch.zeros(device="cpu", size=(ftChannelsOutput, endX - startX, endY - startY))
+
+                    amountOfRowPatches = smallOverlappingPatches.size(2)
+                    gpuAmountRowPatchSplits = math.ceil(amountOfRowPatches / minibatchSize)
+                    gpuIDXsplits = np.array_split(np.arange(amountOfRowPatches), gpuAmountRowPatchSplits)
+
+                    for x in range(smallOverlappingPatches.size(1)):
+                        for k in range(gpuAmountRowPatchSplits):
+                            imgBatch = smallOverlappingPatches[0, x, gpuIDXsplits[k], :, :, :].to(device)
+
+                            with torch.no_grad():
+                                rowPrediction = torch.softmax(model(imgBatch), dim=1)  # shape: (7, 8, 516, 516)
+
+                                if applyTestTimeAugmentation:
+                                    imgBatch = imgBatch.flip(2)
+                                    rowPrediction += torch.softmax(model(imgBatch), 1).flip(2)
+
+                                    imgBatch = imgBatch.flip(3)
+                                    rowPrediction += torch.softmax(model(imgBatch), 1).flip(3).flip(2)
+
+                                    imgBatch = imgBatch.flip(2)
+                                    rowPrediction += torch.softmax(model(imgBatch), 1).flip(3)
+
+                                if centerWeighting:
+                                    rowPrediction[:, :, patchSegmSize // 4: patchSegmSize // 4 * 3, patchSegmSize // 4: patchSegmSize // 4 * 3] *= centerWeight
+
+                                rowPrediction = rowPrediction.to("cpu")
+
+                                for idx, y in enumerate(gpuIDXsplits[k]):
+                                    bigPatchResults[:, segmentationPatchStride * x:patchSegmSize + segmentationPatchStride * x,segmentationPatchStride * y:patchSegmSize + segmentationPatchStride * y] += rowPrediction[idx, :, :, :]
+
+                    bigPatchResults = torch.argmax(bigPatchResults, 0).byte().numpy() # shape: (1536, 2048)
+
+                    logger.info('Predictions generated. Final shape: '+str(bigPatchResults.shape))
+
+                    # Context margin + border patches not fully inside img removed
+                    img_WSI = img_WSI[startX:endX, startY:endY, :]
+                    segMap = segMap[startX:endX, startY:endY]
+
+                    # correct foreground segmentation including all touching vein prediction instances
+                    bgMap = np.logical_not(segMap)
+                    bigPatchResults[bgMap] = 4 #vein class assignment of bg
+                    temp = bigPatchResults == 4
+                    bgMap = np.logical_xor(clear_border(temp), temp)
+                    segMap = np.logical_not(bgMap)
+                    segMap = binary_fill_holes(segMap)
+                    bgMap = np.logical_not(segMap)
+
+                    # extract biggest fg component
+                    temp, numberLabeledRegions = label(segMap)
+                    if numberLabeledRegions > 1:
+                        argMax = np.array([region.area for region in regionprops(temp)]).argmax() + 1
+                        segMap = temp == argMax
+                        bgMap = np.logical_not(segMap)
+
+                    bigPatchResults[bgMap] = labelBG # color of label 'labelBG' => Purple represents BG just for visualization purposes
+
+                    if makeBGwhite:
+                        img_WSI[bgMap] = 255
+
+                    logger.info('Saving prediction and background overlay results...')
+                    save_WSI_image(img_WSI, resultsDir + '/' + fname[:suffixCut] +'_'+str(regionID)+'_imageWSI.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight))
+                    savePredictionOverlayResults(img_WSI, bigPatchResults, resultsDir + '/' + fname[:suffixCut] +'_'+str(regionID)+'_resultOverlay.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+                    if saveWSIandPredNumpy:
+                        logger.info('Saving numpy img...')
+                        np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] + '_'+str(regionID)+'_resultWSI.npy', img_WSI)
+
+                    logger.info('Start postprocessing...')
+
+                    # remove border class
+                    bigPatchResults[bigPatchResults == 7] = 0
+
+                    # Delete BG to reduce postprocessing overhead
+                    bigPatchResults[bgMap] = 0
+
+                    ################# HOLE FILLING ################
+                    bigPatchResults[binary_fill_holes(bigPatchResults == 1)] = 1  # tubuli
+                    bigPatchResults[binary_fill_holes(bigPatchResults == 4)] = 4  # veins
+                    temp = binary_fill_holes(bigPatchResults == 3)  # tuft
+                    bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 3, bigPatchResults == 2))] = 2  # glom
+                    bigPatchResults[temp] = 3  # tuft
+                    temp = binary_fill_holes(bigPatchResults == 6)  # artery_lumen
+                    bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))] = 5  # full_artery
+                    bigPatchResults[temp] = 6  # artery_lumen
+
+                    ###### REMOVING TOO SMALL CONNECTED REGIONS ######
+                    temp, _ = label(bigPatchResults == 1)
+                    finalResults_Instance = remove_small_objects(temp, min_size=TUBULI_MIN_SIZE) > 0
+
+                    ############ PERFORM TUBULE DILATION ############
+                    finalResults_Instance, numberTubuli = label(finalResults_Instance)
+                    assert numberTubuli < 65500, logger.info('ERROR: TOO MANY TUBULI DETECTED - MAX ARE 2^16=65k COZ OF UINT16 !')
+                    finalResults_Instance[finalResults_Instance > 0] += (tubuliInstanceID_StartsWith - 1)
+                    # finalResults_Instance = np.asarray(finalResults_Instance, np.uint16)
+                    # finalResults_Instance = cv2.dilate(np.asarray(finalResults_Instance, np.uint16), kernel=np.asarray(generate_ball(1), np.uint8), iterations=1) #RESULT TYPE: UINT16
+                    finalResults_Instance = cv2.dilate(np.asarray(finalResults_Instance, np.uint16), kernel=np.asarray(generate_ball(2), np.uint8), iterations=1) #RESULT TYPE: UINT16
+
+                    temp, _ = label(np.logical_or(bigPatchResults == 2, bigPatchResults == 3))
+                    finalResults_Instance[remove_small_objects(temp, min_size=GLOM_MIN_SIZE) > 0] = 2
+                    temp, _ = label(bigPatchResults == 3)
+                    finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=TUFT_MIN_SIZE) > 0, finalResults_Instance==2)] = 3
+                    temp, _ = label(bigPatchResults == 4)
+                    finalResults_Instance[remove_small_objects(temp, min_size=VEIN_MIN_SIZE) > 0] = 4
+                    temp, _ = label(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))
+                    finalResults_Instance[remove_small_objects(temp, min_size=ARTERY_MIN_SIZE) > 0] = 5
+                    temp, _ = label(bigPatchResults == 6)
+                    finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=LUMEN_MIN_SIZE) > 0, finalResults_Instance==5)] = 6
+
+                    finalResults_Instance = finalResults_Instance * segMap
+
+                    logger.info('Done - Save final instance overlay results...')
+                    savePredictionOverlayResults(img_WSI, finalResults_Instance, resultsDir + '/' + fname[:suffixCut] +'_'+str(regionID)+'_resultOverlayFINALInstance.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+                    logger.info('Done - Save final non-instance overlay results...')
+                    finalResults = finalResults_Instance.copy()
+                    finalResults[finalResults > tubuliInstanceID_StartsWith] = 1
+                    savePredictionOverlayResults(img_WSI, finalResults, resultsDir + '/' + fname[:suffixCut] +'_'+str(regionID)+'_resultOverlayFINAL.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+                    finalResults_Instance[bgMap] = labelBG
+
+                    if saveWSIandPredNumpy:
+                        logger.info('Saving numpy final instance prediction results...')
+                        np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] + '_'+str(regionID)+'_finalInstancePrediction.npy', finalResults_Instance)
+
+                    ################################################################################################################
+
+                    logger.info('Medulla/Cortex-Segmentation starts...')
+
+                    downscaleFactor = 25.
+                    MEDULLA_TRESH_UP = 1200000
+                    MEDULLA_TRESH_DOWN = MEDULLA_TRESH_UP // int(downscaleFactor * downscaleFactor)
+                    cortexRad = 75
+                    medullaRad = 58
+                    smoothMed = 35
+
+                    finalResults_down = np.asarray(zoom(finalResults, 1 / downscaleFactor, order=0), np.uint8)
+                    bgMap_down = np.asarray(zoom(bgMap, 1 / downscaleFactor, order=0), np.bool)
+
+                    finalResults_down = np.logical_or(finalResults_down == 2, finalResults_down == 3)
+                    finalResults_down = binary_dilation(finalResults_down, structure=generate_ball(cortexRad))
+                    finalResults_down = np.logical_or(finalResults_down, bgMap_down)
+
+                    medulla = np.logical_not(finalResults_down)
+
+                    temp, numberRegions = label(medulla, generate_ball(1))
+                    if numberRegions > 1:
+                        for i in range(1, numberRegions + 1):
+                            medullaMask = temp == i
+                            if medullaMask.sum() < MEDULLA_TRESH_DOWN:
+                                temp[medullaMask] = 0
+                        medulla = temp > 0
+                        # argMax = np.array([region.area for region in regionprops(temp)]).argmax() + 1
+                        # medulla = temp == argMax
+
+                    medulla = binary_dilation(medulla, structure=generate_ball(medullaRad))
+
+                    # smooth medulla on coarse level
+                    medulla = sp.ndimage.filters.gaussian_filter(np.asarray(medulla, np.float32), smoothMed, mode='nearest') > 0.5
+
+                    medulla = np.asarray(cv2.resize(np.asarray(medulla, np.uint8), dsize=(img_WSI.shape[1], img_WSI.shape[0]), interpolation=cv2.INTER_NEAREST), np.bool)
+
+                    medulla = np.logical_and(medulla, segMap)
+
+                    temp, numberRegions = label(medulla, generate_ball(1))
+                    if numberRegions > 1:
+                        for i in range(1, numberRegions + 1):
+                            medullaMask = temp == i
+                            if medullaMask.sum() < MEDULLA_TRESH_UP:
+                                temp[medullaMask] = 0
+                        medulla = temp > 0
+                    #     argMax = np.array([region.area for region in regionprops(temp)]).argmax() + 1
+                    #     medulla = temp == argMax
+
+                    kortex = np.asarray(np.logical_and(np.logical_not(medulla), segMap), np.uint8)
+                    kortex[medulla] = 2
+
+                    if saveWSIandPredNumpy:
+                        logger.info('Saving numpy kortex/medulla...')
+                        np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] + '_'+str(regionID)+'_resultKortexMedulla.npy', kortex)
+
+                    if saveMedullaCortexBGSegmResults:
+                        logger.info('Saving kortex/medulla/bg segmentation results...')
+                        kortex[kortex == 0] = labelBG
+                        savePredictionOverlayResults_Fast(img_WSI, kortex, resultsDir + '/' + fname[:suffixCut] + '_' + str(regionID) + '_medullaKortexBG.png', figSize=(bigPatchResults.shape[1] / bigPatchResults.shape[0] * figHeight, figHeight), alpha=alpha)
+
+                logger.info('####################')
+
+except:
+    logger.exception('! Exception !')
+    raise
+
+log.info('%%%% Ended regularly ! %%%%')
diff --git a/segment_WSI_cupy.py b/segment_WSI_cupy.py
new file mode 100644
index 0000000000000000000000000000000000000000..231059e375f19de50ad98222d1d0aa313fb104d0
--- /dev/null
+++ b/segment_WSI_cupy.py
@@ -0,0 +1,541 @@
+# this file recursively performs the automated segmentation of WSIs by applying the tissue segmentation and structure segmentation CNN to all WSIs contained in a specified folder
+
+import numpy as np
+import cupy as cp
+import cupyx.scipy as sp
+from cupyx.scipy.ndimage import label
+from cupyx.scipy.ndimage import zoom
+from cupyx.scipy.ndimage import binary_dilation, binary_closing, binary_fill_holes, binary_erosion, binary_opening
+from cucim.skimage.morphology import remove_small_objects
+
+import os
+import sys
+import cv2
+import torch
+import math
+import logging as log
+from tqdm import tqdm, trange
+import re
+
+# from openslide.lowlevel import OpenSlideError
+import openslide as osl
+from ome_types import from_xml
+from PIL import Image
+import matplotlib.pyplot as plt
+# from scipy.ndimage import label
+# from scipy.ndimage import zoom
+# from scipy.ndimage import binary_dilation, binary_closing, binary_fill_holes, binary_erosion, binary_opening
+# import scipy.ndimage
+# import scipy as sp
+# from skimage.transform import rescale, resize
+# from skimage.measure import regionprops
+# from skimage.segmentation import flood, flood_fill
+# from skimage.color import rgb2gray
+# from skimage.segmentation import clear_border
+# from skimage import filters
+# from skimage.morphology import remove_small_objects
+
+from utils import generate_ball, patchify, unpatchify, saveOverlayResults, savePredictionOverlayResults, savePredictionOverlayResults_Fast, saveImage
+from model import Custom
+
+from pathlib import Path
+
+import nnunetv2
+import torch
+from batchgenerators.dataloading.data_loader import DataLoader
+from batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter
+from batchgenerators.transforms.utility_transforms import NumpyToTensor
+from batchgenerators.utilities.file_and_folder_operations import load_json, join, isfile, maybe_mkdir_p, isdir, subdirs, \
+    save_json
+from nnunetv2.configuration import default_num_processes
+from nnunetv2.inference.export_prediction import export_prediction_from_softmax
+from nnunetv2.inference.sliding_window_prediction import predict_sliding_window_return_logits, compute_gaussian
+from nnunetv2.preprocessing.preprocessors.default_preprocessor import DefaultPreprocessor
+from nnunetv2.utilities.file_path_utilities import get_output_folder, should_i_save_to_file, check_workers_busy
+from nnunetv2.utilities.find_class_by_name import recursive_find_python_class
+from nnunetv2.utilities.json_export import recursive_fix_for_json_export
+from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels, convert_labelmap_to_one_hot
+from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager
+from nnunetv2.utilities.utils import create_lists_from_splitted_dataset_folder
+
+from tifffile import imread
+# from aicsimageio import AICSImage
+from tqdm import tqdm
+import gc
+
+
+def load_what_we_need(model_training_output_dir, use_folds, checkpoint_name):
+    # we could also load plans and dataset_json from the init arguments in the checkpoint. Not quite sure what is the
+    # best method so we leave things as they are for the moment.
+    dataset_json = load_json(join(model_training_output_dir, 'dataset.json'))
+    plans = load_json(join(model_training_output_dir, 'plans.json'))
+    plans_manager = PlansManager(plans)
+
+    if isinstance(use_folds, str):
+        use_folds = [use_folds]
+
+    parameters = []
+    for i, f in enumerate(use_folds):
+        f = int(f) if f != 'all' else f
+        checkpoint = torch.load(join(model_training_output_dir, f'fold_{f}', checkpoint_name),
+                                map_location=torch.device('cpu'))
+        if i == 0:
+            trainer_name = checkpoint['trainer_name']
+            configuration_name = checkpoint['init_args']['configuration']
+            inference_allowed_mirroring_axes = checkpoint['inference_allowed_mirroring_axes'] if \
+                'inference_allowed_mirroring_axes' in checkpoint.keys() else None
+
+        parameters.append(checkpoint['network_weights'])
+
+    configuration_manager = plans_manager.get_configuration(configuration_name)
+    # restore network
+    num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json)
+    trainer_class = recursive_find_python_class(join(nnunetv2.__path__[0], "training", "nnUNetTrainer"),
+                                                trainer_name, 'nnunetv2.training.nnUNetTrainer')
+    network = trainer_class.build_network_architecture(plans_manager, dataset_json, configuration_manager,
+                                                    num_input_channels, enable_deep_supervision=False)
+    return parameters, configuration_manager, inference_allowed_mirroring_axes, plans_manager, dataset_json, network, trainer_name
+
+# WSIrootFolder = '/homeStor1/ylan/tissue_detection/Markus/'#'SPECIFIED WSI FOLDER'
+# WSIrootFolder = '/homeStor1/ylan/tissue_detection/Markus/p21_kidrightp789_B10_PAS'#'SPECIFIED WSI FOLDER'
+# WSIrootFolder = '/homeStor1/datasets/Cooperations/MarkusRinschen/'#'SPECIFIED WSI FOLDER'
+WSIrootFolder = '/homeStor1/ylan/data/MarkusRinschen/ometif/'#'SPECIFIED WSI FOLDER'
+modelpath = '/homeStor1/nbouteldja/Results_ActivePAS/custom_train_val_test_e500_b6_r0.001_w1e-05_516_640_32_RAdam_instance_1deeper_Healthy_UUO_Adenine_Alport_IRI_NTN_fewSpecies_fewHuman_Background_+-1range_X/Model/finalModel.pt'
+resultsPath = WSIrootFolder#'RESULTS PATH'
+
+
+if not os.path.exists(resultsPath):
+    os.makedirs(resultsPath)
+
+# model_FG_path = 'TISSUE SEGMENTATION MODEL PATH'
+
+patchSegmSize = 516
+patchImgSize = 640
+patchLengthUM = 174.
+
+patchImgSize_FG = 512
+patchLengthUM_FG = 1500 #2500
+regionMinSizeUM = 3E5
+
+alpha = 0.3
+strideProportion = 0.5
+figHeight = 15
+minibatchSize = 2
+minibatchSize_FG = 1
+useAllGPUs = False
+GPUno = 0
+device = torch.device("cuda:" + str(GPUno) if torch.cuda.is_available() else "cpu")
+
+tubuliInstanceID_StartsWith = 10  # => tubuli instances start with id 10
+
+ftChannelsOutput = 8
+applyTestTimeAugmentation = True
+centerWeighting = False
+centerWeight = 3.
+
+saveWSICoarseForegroundSegmResults = True
+# saveCroppedForegroundSegmResults = False
+saveCroppedWSIimg = True
+saveWSIandPredNumpy = True
+
+TUBULI_MIN_SIZE = 400
+GLOM_MIN_SIZE = 1500
+TUFT_MIN_SIZE = 500
+VEIN_MIN_SIZE = 3000
+ARTERY_MIN_SIZE = 400
+LUMEN_MIN_SIZE = 20
+
+labelBG = 8
+
+# LOAD STRUCTURE SEGMENTATION MODEL
+model = Custom(input_ch=3, output_ch=ftChannelsOutput, modelDim=2)
+model.load_state_dict(torch.load(modelpath, map_location=lambda storage, loc: storage))
+model.train(False)
+model.eval()
+
+if useAllGPUs:
+    model = torch.nn.DataParallel(model)  # multi-GPUs
+model = model.to(device)
+
+# LOAD TISSUE SEGMENTATION MODEL
+use_folds = [0]
+model_training_output_dir = f'/homeStor1/ylan/data/nnUNet/nnUNet_results/Dataset555_TissueDetection/nnUNetTrainer__nnUNetPlans__2d'
+checkpoint_name = 'checkpoint_best.pth'
+
+parameters, configuration_manager, inference_allowed_mirroring_axes, \
+        plans_manager, dataset_json, network, trainer_name = load_what_we_need(model_training_output_dir, use_folds, checkpoint_name)
+# model = network
+model_FG = network
+network.load_state_dict(parameters[0])
+
+
+# model_FG = torch.load(model_FG_path)
+model_FG = model_FG.to(device)
+model_FG.eval()
+
+segmentationPatchStride = int(patchSegmSize * strideProportion)
+targetSpacing = patchLengthUM / patchSegmSize
+
+targetSpacing_FG = patchLengthUM_FG / patchImgSize_FG
+
+shiftMinUM = (patchImgSize - patchSegmSize) // 2 * targetSpacing + 2 # + 2 just for sufficient margin reasons
+shiftMaxUM = ((patchImgSize - patchSegmSize) // 2 + patchSegmSize * strideProportion) * targetSpacing
+
+# Set up logger
+log.basicConfig(
+    level=log.INFO,
+    format='%(asctime)s %(message)s',
+    datefmt='%Y/%m/%d %I:%M:%S %p',
+    handlers=[
+        log.FileHandler(resultsPath + '/LOGS.log', 'w'),
+        log.StreamHandler(sys.stdout)
+    ])
+logger = log.getLogger()
+
+struc3 = generate_ball(1)
+
+# walk through directoy and its subdirectoy recursively
+for dirName, subdirList, fileList in os.walk(WSIrootFolder):
+
+    # print(fileList)
+    # filter WSIs in current directory
+    fileListWSI = sorted([fname for fname in fileList if (fname.endswith('.svs') or fname.endswith('.ndpi') or fname.endswith('.scn') or fname.endswith('.tif') or fname.endswith('.ome.tif')) and 'PAS' in fname])
+    print(fileListWSI)
+    if len(fileListWSI) != 0:
+        logger.info(str(len(fileListWSI)) + ' WSIs to be analyzed in directory: ' + dirName)
+
+        resultsDir = resultsPath + dirName[len(WSIrootFolder):]
+        resultsDirNPYfiles = resultsDir + '/npyFiles'
+        if not os.path.exists(resultsDirNPYfiles):
+            os.makedirs(resultsDirNPYfiles)
+
+        # traverse through all found WSIs
+        for no, fname in tqdm(enumerate(fileListWSI)):
+
+            # Extract/print relevant parameters
+                # try:
+                # except:
+                #     logger.info('Slide {} or its spacings not readable, slide skipped!'.format(fname))
+
+            # img_WSI = imread(os.path.join(dirName,fname))
+            # spacings = np.array([0.2197, 0.2197])
+            # spacings = 
+            try:
+                slide = osl.OpenSlide(os.path.join(dirName, fname))
+                print(slide.properties)
+                logger.info(str(no + 1) + ':  WSI:\t' + fname)
+                if 'openslide.mpp-x' in slide.properties.keys():
+                    spacings = np.array([float(slide.properties['openslide.mpp-x']), float(slide.properties['openslide.mpp-y'])])
+                else: 
+                    x_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_x
+                    y_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_y
+                    spacings = np.array([float(x_spacing), float(y_spacing)])
+            except:
+                logger.info('Slide {} or its spacings not readable, slide skipped!'.format(fname))
+                continue
+            levelDims = np.array(slide.level_dimensions)
+            amountLevels = len(levelDims)
+            levelDownsamples = np.asarray(np.round(np.array(slide.level_downsamples)), int)
+
+            logger.info('Spacings: ' + str(spacings))
+            logger.info('Level Dimensions: ' + str(levelDims))
+            logger.info('Level Downsamples: '+str(levelDownsamples))
+
+            suffixCut = -5 if fname.split('.')[-1] == 'ndpi' else -4
+            if fname.split('.')[-1] == 'tif':
+                suffixCut = -8
+
+
+
+            # extract the WSI level that is closest to the target spacing of the tissue segmentation network (increasing efficiency instead of simply taking full resolution, finest level 0) 
+            spacingFactorX = spacings[0] / targetSpacing
+            spacingFactorY = spacings[1] / targetSpacing
+            # x_scaled = round(levelDims[0][0] * spacingFactorX)
+            # y_scaled = round(levelDims[0][1] * spacingFactorY)
+
+            # usedLevel = np.argwhere(np.all(levelDims > [x_scaled, y_scaled], 1) == True).max()
+
+            # # resample to target spacing
+            # logger.info('Image size resampled to FG spacing would be {}, {}, thus level {} with resolution {} chosen as resampling point!'.format(x_scaled, y_scaled, usedLevel, levelDims[usedLevel]))
+            # spacingOnUsedLevelX = spacings[0] * levelDownsamples[usedLevel]
+            # spacingOnUsedLevelY = spacings[1] * levelDownsamples[usedLevel]
+            # downsamplingFactorX = spacingOnUsedLevelX / targetSpacing_FG
+            # downsamplingFactorY = spacingOnUsedLevelY / targetSpacing_FG
+            np_slide = np.array(slide.read_region(location=np.array([0, 0]), level=0, size=levelDims[0]))[:, :, :3]
+
+            # d1 = int(round(imgWSI.shape[1] * downsamplingFactorX))
+            # d2 = int(round(imgWSI.shape[0] * downsamplingFactorY))
+
+            # img_shape = slide.pages[0].shape
+            # print(spacingFactorX, spacingFactorY)
+            # print(targetSpacing_FG)
+            # print(slide.shape)
+            # print(slide.shape[0])
+            d1 = int(round(np_slide.shape[1] * spacingFactorX)) 
+            d2 = int(round(np_slide.shape[0] * spacingFactorY))
+            # # print(d1, d2)
+
+            imgWSI = cv2.resize(np_slide, dsize=(d1, d2), interpolation=cv2.INTER_LINEAR) #dtype: uint8, size: d2 x d1 x 3
+            del np_slide
+            gc.collect()
+
+            print(imgWSI.shape)
+            # imgWSIzeropadded = np.zeros(shape=(d2+patchImgSize_FG-1, d1+patchImgSize_FG-1, 3), dtype=np.float32)
+            # imgWSIzeropadded[:d2,:d1,:] = imgWSI
+
+            # tesselate resampled image
+            # smallOverlappingPatches = patchify(imgWSIzeropadded, patch_size=(patchImgSize_FG, patchImgSize_FG, 3), step=patchImgSize_FG) # CARE: IMAGE DATA AT THE RIGHT AND BOTTOM BORDERS IS LOST !!!
+
+            # tileDataset = []
+            # # with tqdm(total=smallOverlappingPatches.shape[0] * smallOverlappingPatches.shape[1]) as pbar:
+            # for i in range(smallOverlappingPatches.shape[0]):
+            #     for j in range(smallOverlappingPatches.shape[1]):
+            #         tileDataset.append({'name': '{}-{}'.format(i, j), 'data': torch.from_numpy(smallOverlappingPatches[i, j, 0, :, :, :])})
+            #             # pbar.update(1)
+
+            # img_mask = np.zeros(shape=(imgWSIzeropadded.shape[0],imgWSIzeropadded.shape[1]), dtype=bool)
+
+            # create dataloader for concurrent tile processing and prediction computation
+            # dataloader = torch.utils.data.DataLoader(tileDataset, batch_size=minibatchSize_FG, shuffle=False)
+            # with torch.no_grad():
+            #     for i, data in enumerate(dataloader, 0):
+            #         imgBatch = data['data'].permute(0, 3, 1, 2).to(device)
+
+            #         prediction = model_FG(imgBatch)  # prediction should have shape (1,2,512,512)
+            #         prediction = (prediction[:,1,:,:] > prediction[:,0,:,:]).to("cpu").numpy()
+
+            #         for n, d in zip(data['name'], prediction):
+            #             x = int(n.split('-')[0])
+            #             y = int(n.split('-')[1])
+            #             img_mask[x * patchImgSize_FG: (x+1) * patchImgSize_FG, y * patchImgSize_FG: (y+1) * patchImgSize_FG] = d
+
+
+            
+
+            # img_mask = img_mask[:d2,:d1]
+
+            # print('img_mask_shape: ', img_mask.shape)
+            # ###########################################################
+
+            # # postprocessing
+            # img_mask = binary_fill_holes(img_mask)
+
+            # # remove connected regions if too small
+            # regionMinPixels = regionMinSizeUM / (targetSpacing_FG * targetSpacing_FG)
+            # img_mask, _ = label(img_mask)
+            # labeledRegions, numberFGRegions = label(remove_small_objects(img_mask, min_size=regionMinPixels))
+            # if numberFGRegions < 256:
+            #     labeledRegions = np.asarray(labeledRegions, np.uint8)
+
+            # if saveWSICoarseForegroundSegmResults:
+            #     logger.info('Saving WSI-level coarse FG segmentation results...')
+            #     savePredictionOverlayResults(imgWSI, labeledRegions, alpha=alpha, figSize=(labeledRegions.shape[1]/labeledRegions.shape[0]*figHeight, figHeight), fullResultPath=resultsDir + '/' + fname[:suffixCut] + '_0_fgSeg.png')
+
+            # labeledRegions = cv2.resize(labeledRegions, dsize=(slide.shape[0],slide.shape[1]), interpolation = cv2.INTER_NEAREST) # FG RESULTS ON WSI-RESOLUTION, UINT8, REGION IDs
+            # labeledRegions = cv2.resize(labeledRegions, dsize=(levelDims[0][0],levelDims[0][1]), interpolation = cv2.INTER_NEAREST) # FG RESULTS ON WSI-RESOLUTION, UINT8, REGION IDs
+
+            # logger.info('In total -> '+str(numberFGRegions)+' <- regions on WSI detected!')
+
+            # process all detected tissue regions separately 
+            # for regionID in range(1, numberFGRegions+1):
+                # logger.info('#######\n Extract foreground region ' + str(regionID) + '...')
+                # detectedRegion = labeledRegions == regionID
+
+                # # compute bounding box and how much to enlarge bbox to consider wider context utilization (especially for patchify)
+                # temp = np.where(detectedRegion == 1)
+                # bbox = np.array([np.min(temp[0]), np.min(temp[1]), np.max(temp[0]), np.max(temp[1])])
+
+                # shiftMin = round(shiftMinUM / spacings[0])
+                # shiftMax = round(shiftMaxUM / spacings[0])
+
+                # # enlarge bounding box due to wider context consideration
+                # bbox[0] = max(bbox[0] - shiftMin, 0)
+                # bbox[1] = max(bbox[1] - shiftMin, 0)
+                # bbox[2] = min(bbox[2] + shiftMax, detectedRegion.shape[0] - 1) + 1
+                # bbox[3] = min(bbox[3] + shiftMax, detectedRegion.shape[1] - 1) + 1
+                # print(bbox)
+
+                # logger.info('Extract high res patch and segm map...')
+                # img_WSI = 
+                # try:
+                #     img_WSI = np.asarray(np.array(slide.read_region(location=np.array([bbox[1], bbox[0]]), level=0, size=np.array([bbox[3] - bbox[1], bbox[2] - bbox[0]])))[:, :, :3], np.uint8)
+                # except OpenSlideError:
+                #     logger.info('#################################### FILE CORRUPTED - IGNORED ####################################')
+                #     continue
+                # detectedRegion = detectedRegion[bbox[0]:bbox[2], bbox[1]:bbox[3]]
+
+                # extract image and resample into target spacing of the structure segmentation network
+                # downsamplingFactor = spacings[0] / targetSpacing  # Rescaling would be very slow using 'rescale' method!
+                # logger.info('Utilized spacing of slide: '+str(spacings[0])+', Resample both patches using factor: ' + str(downsamplingFactor))
+                # segMap = np.asarray(zoom(detectedRegion, downsamplingFactor, order=0), np.bool)
+                # img_WSI = cv2.resize(img_WSI, dsize=tuple(np.flip(segMap.shape)), interpolation=cv2.INTER_LINEAR)
+                # # segMap = np.asarray(np.round(rescale(segMap, downsamplingFactor, order=0, preserve_range=True, multichannel=False)), np.bool)
+                # assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling via zoom/resize led to unequal resolutions..."
+                # logger.info('Done - size of extracted resampled high res patch: ' + str(img_WSI.shape))
+
+                # if np.min(segMap.shape) < patchImgSize:
+                #     logger.info('Detected region smaller than window, thus skipped...')
+                #     continue
+
+                ##### PREPROCESSING DONE - NOW: NETWORK SEGMENTATION PART #####
+                # logger.info('Start segmentation process...')
+
+                # preprocess img
+            imgWSI = torch.Tensor(np.array((imgWSI / 255. - 0.5) / 0.5, np.float32))
+            
+
+            # tesselate image and tissue prediction results
+            print('Patchify!')
+            smallOverlappingPatches = patchify(imgWSI.copy(), patch_size=(patchImgSize, patchImgSize, 3), step=segmentationPatchStride) # CARE: IMAGE DATA AT THE RIGHT AND BOTTOM BORDERS IS LOST !!!
+            # smallOverlappingPatches_FG = patchify(segMap.copy(), patch_size=(patchImgSize, patchImgSize), step=segmentationPatchStride)
+
+            tileDataset = []
+            for i in range(smallOverlappingPatches.shape[0]):
+                for j in range(smallOverlappingPatches.shape[1]):
+                    # if smallOverlappingPatches_FG[i,j,:,:].any():
+                    tileDataset.append({'name': '{}-{}'.format(i, j), 'data': torch.from_numpy(smallOverlappingPatches[i, j, 0, :, :, :])})
+
+            print('tileDataset: ', len(tileDataset))
+            # calculate segmentation patch size since patchify cuts of last patch if not exactly fitting in window
+            startX = (patchImgSize - patchSegmSize) // 2; startY = startX
+            endX = segmentationPatchStride * (smallOverlappingPatches.shape[0]-1) + patchSegmSize + startX
+            endY = segmentationPatchStride * (smallOverlappingPatches.shape[1]-1) + patchSegmSize + startY
+
+            bigPatchResults = torch.zeros(device=device, size=(ftChannelsOutput, endX - startX, endY - startY))
+
+            # create dataloader for concurrent prediction computation
+            print('Create Dataloader.')
+            dataloader = torch.utils.data.DataLoader(tileDataset, batch_size=minibatchSize, shuffle=False)
+            print('Run Inference!')
+            with torch.no_grad():
+                for i, data in enumerate(dataloader, 0):
+                    imgBatch = data['data'].permute(0, 3, 1, 2).to(device)
+
+                    prediction = torch.softmax(model(imgBatch), dim=1)  # shape: (minibatchSize, 8, 516, 516)
+                    if applyTestTimeAugmentation:
+                        imgBatch = imgBatch.flip(2)
+                        prediction += torch.softmax(model(imgBatch), 1).flip(2)
+
+                        imgBatch = imgBatch.flip(3)
+                        prediction += torch.softmax(model(imgBatch), 1).flip(3).flip(2)
+
+                        imgBatch = imgBatch.flip(2)
+                        prediction += torch.softmax(model(imgBatch), 1).flip(3)
+
+                    if centerWeighting:
+                        prediction[:, :, patchSegmSize // 4: patchSegmSize // 4 * 3, patchSegmSize // 4: patchSegmSize // 4 * 3] *= centerWeight
+
+                    # prediction = prediction.to("cpu")
+                    # print('Add prediction results to bigPatchResults.')
+                    for n, d in zip(data['name'], prediction):
+                        x = int(n.split('-')[0])
+                        y = int(n.split('-')[1])
+                        bigPatchResults[:, x * segmentationPatchStride: x * segmentationPatchStride + patchSegmSize, y * segmentationPatchStride: y * segmentationPatchStride + patchSegmSize] = d
+
+            bigPatchResults = torch.argmax(bigPatchResults, 0).byte()# shape: (1536, 2048)
+            print(bigPatchResults.shape)
+            logger.info('Predictions generated. Final shape: '+str(bigPatchResults.shape))
+
+                    # Context margin + border patches not fully inside img removed
+            imgWSI = imgWSI[startX:endX, startY:endY, :]
+                # segMap = segMap[startX:endX, startY:endY]
+                # bgMap = np.logical_not(segMap)
+
+                        # Save cropped foreground segmentation result as overlay
+                        # if saveCroppedWSIimg:
+                        #     logger.info('Saving cropped segmented WSI image...')
+                        #     saveImage(img_WSI, fullResultPath=resultsDir + '/' + fname[:suffixCut] +'_'+ i +'_'+str(regionID)+'_fgWSI_({}_{}_{}).png'.format(bbox[0], bbox[1], spacings[0]), figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight))
+
+                        # # correct foreground segmentation including all touching vein prediction instances
+                        # bigPatchResults[bgMap] = 4 #vein class assignment of bg
+                        # temp = bigPatchResults == 4
+                        # bgMap = np.logical_xor(clear_border(temp), temp)
+                        # segMap = np.logical_not(bgMap)
+                        # segMap = binary_fill_holes(segMap)
+                        # bgMap = np.logical_not(segMap)
+
+                        # remove small fg components
+                        # temp, numberLabeledRegions = label(segMap, struc3)
+                        # if numberLabeledRegions > 1:
+                        #     regionMinPixels = regionMinSizeUM / (targetSpacing * targetSpacing)
+                        #     regionIDs = np.where(np.array([region.area for region in regionprops(temp)]) > regionMinSizeUM)[0] + 1
+                        #     segMap = np.isin(temp, regionIDs)
+                        #     bgMap = np.logical_not(segMap)
+
+                        # bigPatchResults[bgMap] = labelBG # color of label 'labelBG' => Purple represents BG just for visualization purposes
+
+            # logger.info('Saving prediction and background overlay results...')
+            savePredictionOverlayResults(imgWSI, bigPatchResults, resultsDir + '/' + fname[:suffixCut] +'_resultOverlay.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+            if saveWSIandPredNumpy:
+                logger.info('Saving numpy img...')
+                np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] +'_resultWSI.npy', imgWSI)
+
+            logger.info('Start postprocessing...')
+
+            # remove border class
+            bigPatchResults[bigPatchResults == 7] = 0
+
+            # Delete BG to reduce postprocessing overhead
+            # bigPatchResults[bgMap] = 0
+
+            ################# HOLE FILLING ################
+            bigPatchResults[binary_fill_holes(bigPatchResults == 1)] = 1  # tubuli
+            bigPatchResults[binary_fill_holes(bigPatchResults == 4)] = 4  # veins
+            temp = binary_fill_holes(bigPatchResults == 3)  # tuft
+            bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 3, bigPatchResults == 2))] = 2  # glom
+            bigPatchResults[temp] = 3  # tuft
+            temp = binary_fill_holes(bigPatchResults == 6)  # artery_lumen
+            bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))] = 5  # full_artery
+            bigPatchResults[temp] = 6  # artery_lumen
+
+            ###### REMOVING TOO SMALL CONNECTED REGIONS ######
+            temp, _ = label(bigPatchResults == 1)
+            finalResults_Instance = remove_small_objects(temp, min_size=TUBULI_MIN_SIZE) > 0
+
+            ############ PERFORM TUBULE DILATION ############
+            finalResults_Instance, numberTubuli = label(finalResults_Instance) #dtype: int32
+            finalResults_Instance[finalResults_Instance > 0] += (tubuliInstanceID_StartsWith - 1)
+            if numberTubuli < 65500:
+                finalResults_Instance = cv2.dilate(np.asarray(finalResults_Instance, np.uint16), kernel=np.asarray(generate_ball(2), np.uint8), iterations=1) #RESULT TYPE: UINT16
+            else:
+                finalResults_Instance = np.asarray(cv2.dilate(np.asarray(finalResults_Instance, np.float64), kernel=np.asarray(generate_ball(2), np.uint8), iterations=1), np.int32)
+
+            temp, _ = label(np.logical_or(bigPatchResults == 2, bigPatchResults == 3))
+            finalResults_Instance[remove_small_objects(temp, min_size=GLOM_MIN_SIZE) > 0] = 2
+            temp, _ = label(bigPatchResults == 3)
+            finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=TUFT_MIN_SIZE) > 0, finalResults_Instance==2)] = 3
+            temp, _ = label(bigPatchResults == 4)
+            finalResults_Instance[remove_small_objects(temp, min_size=VEIN_MIN_SIZE) > 0] = 4
+            temp, _ = label(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))
+            finalResults_Instance[remove_small_objects(temp, min_size=ARTERY_MIN_SIZE) > 0] = 5
+            temp, _ = label(bigPatchResults == 6)
+            finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=LUMEN_MIN_SIZE) > 0, finalResults_Instance==5)] = 6
+
+            # finalResults_Instance = finalResults_Instance * segMap    
+
+            logger.info('Done - Save final instance overlay results...')
+            savePredictionOverlayResults(imgWSI, finalResults_Instance, resultsDir + '/' + fname[:suffixCut] +'_resultOverlayFINALInstance.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+            logger.info('Done - Save final non-instance overlay results...')
+            finalResults = finalResults_Instance.copy()
+            finalResults[finalResults > tubuliInstanceID_StartsWith] = 1
+            savePredictionOverlayResults(imgWSI, finalResults, resultsDir + '/' + fname[:suffixCut]+'_resultOverlayFINAL.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+            # finalResults_Instance[bgMap] = labelBG
+
+            if saveWSIandPredNumpy:
+                logger.info('Saving numpy final instance prediction results...')
+                np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] +'_finalInstancePrediction.npy', finalResults_Instance)
+
+            del imgWSI
+            gc.collect()
+        logger.info('####################')
+
+    # break
+
+# except:
+#     logger.exception('! Exception !')
+#     raise
+
+log.info('%%%% Ended regularly ! %%%%')
diff --git a/segment_WSI_nnunetv2.py b/segment_WSI_nnunetv2.py
new file mode 100644
index 0000000000000000000000000000000000000000..88a708d3d1efbbc35f6eabceffab7a50efbfb9ce
--- /dev/null
+++ b/segment_WSI_nnunetv2.py
@@ -0,0 +1,546 @@
+# this file recursively performs the automated segmentation of WSIs by applying the tissue segmentation and structure segmentation CNN to all WSIs contained in a specified folder
+
+import numpy as np
+import os
+import sys
+import cv2
+import torch
+import math
+import logging as log
+from tqdm import tqdm, trange
+import re
+
+# from openslide.lowlevel import OpenSlideError
+import openslide as osl
+from ome_types import from_xml
+from PIL import Image
+import matplotlib.pyplot as plt
+from scipy.ndimage import label
+from scipy.ndimage import zoom
+from scipy.ndimage import binary_dilation, binary_closing, binary_fill_holes, binary_erosion, binary_opening
+import scipy.ndimage
+import scipy as sp
+from skimage.transform import rescale, resize
+from skimage.measure import regionprops
+from skimage.segmentation import flood, flood_fill
+from skimage.color import rgb2gray
+from skimage.segmentation import clear_border
+from skimage import filters
+from skimage.morphology import remove_small_objects
+
+from utils import generate_ball, patchify, unpatchify, saveOverlayResults, savePredictionOverlayResults, savePredictionOverlayResults_Fast, saveImage
+from model import Custom
+
+from pathlib import Path
+
+import nnunetv2
+import numpy as np
+import torch
+from batchgenerators.dataloading.data_loader import DataLoader
+from batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter
+from batchgenerators.transforms.utility_transforms import NumpyToTensor
+from batchgenerators.utilities.file_and_folder_operations import load_json, join, isfile, maybe_mkdir_p, isdir, subdirs, \
+    save_json
+from nnunetv2.configuration import default_num_processes
+from nnunetv2.inference.export_prediction import export_prediction_from_softmax
+from nnunetv2.inference.sliding_window_prediction import predict_sliding_window_return_logits, compute_gaussian
+from nnunetv2.preprocessing.preprocessors.default_preprocessor import DefaultPreprocessor
+from nnunetv2.utilities.file_path_utilities import get_output_folder, should_i_save_to_file, check_workers_busy
+from nnunetv2.utilities.find_class_by_name import recursive_find_python_class
+from nnunetv2.utilities.json_export import recursive_fix_for_json_export
+from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels, convert_labelmap_to_one_hot
+from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager
+from nnunetv2.utilities.utils import create_lists_from_splitted_dataset_folder
+
+from tifffile import imread
+# from aicsimageio import AICSImage
+from tqdm import tqdm
+import gc
+
+
+def load_what_we_need(model_training_output_dir, use_folds, checkpoint_name):
+    # we could also load plans and dataset_json from the init arguments in the checkpoint. Not quite sure what is the
+    # best method so we leave things as they are for the moment.
+    dataset_json = load_json(join(model_training_output_dir, 'dataset.json'))
+    plans = load_json(join(model_training_output_dir, 'plans.json'))
+    plans_manager = PlansManager(plans)
+
+    if isinstance(use_folds, str):
+        use_folds = [use_folds]
+
+    parameters = []
+    for i, f in enumerate(use_folds):
+        f = int(f) if f != 'all' else f
+        checkpoint = torch.load(join(model_training_output_dir, f'fold_{f}', checkpoint_name),
+                                map_location=torch.device('cpu'))
+        if i == 0:
+            trainer_name = checkpoint['trainer_name']
+            configuration_name = checkpoint['init_args']['configuration']
+            inference_allowed_mirroring_axes = checkpoint['inference_allowed_mirroring_axes'] if \
+                'inference_allowed_mirroring_axes' in checkpoint.keys() else None
+
+        parameters.append(checkpoint['network_weights'])
+
+    configuration_manager = plans_manager.get_configuration(configuration_name)
+    # restore network
+    num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json)
+    trainer_class = recursive_find_python_class(join(nnunetv2.__path__[0], "training", "nnUNetTrainer"),
+                                                trainer_name, 'nnunetv2.training.nnUNetTrainer')
+    network = trainer_class.build_network_architecture(plans_manager, dataset_json, configuration_manager,
+                                                    num_input_channels, enable_deep_supervision=False)
+    return parameters, configuration_manager, inference_allowed_mirroring_axes, plans_manager, dataset_json, network, trainer_name
+
+# WSIrootFolder = '/homeStor1/ylan/tissue_detection/Markus/'#'SPECIFIED WSI FOLDER'
+# WSIrootFolder = '/homeStor1/ylan/tissue_detection/Markus/p21_kidrightp789_B10_PAS'#'SPECIFIED WSI FOLDER'
+# WSIrootFolder = '/homeStor1/datasets/Cooperations/MarkusRinschen/'#'SPECIFIED WSI FOLDER'
+WSIrootFolder = '/homeStor1/ylan/data/MarkusRinschen/debug_2/'#'SPECIFIED WSI FOLDER'
+# WSIrootFolder = '/homeStor1/datasets/DeepGraft/Aachen_Biopsy_Slides_Extended'#'SPECIFIED WSI FOLDER'
+modelpath = '/homeStor1/nbouteldja/Results_ActivePAS/custom_train_val_test_e500_b6_r0.001_w1e-05_516_640_32_RAdam_instance_1deeper_Healthy_UUO_Adenine_Alport_IRI_NTN_fewSpecies_fewHuman_Background_+-1range_X/Model/finalModel.pt'
+resultsPath = '/homeStor1/ylan/data/MarkusRinschen/debug/' #'RESULTS PATH'
+# resultsPath = WSIrootFolder
+
+
+if not os.path.exists(resultsPath):
+    os.makedirs(resultsPath)
+
+# model_FG_path = 'TISSUE SEGMENTATION MODEL PATH'
+
+patchSegmSize = 516
+patchImgSize = 640
+patchLengthUM = 174.
+
+patchImgSize_FG = 512
+patchLengthUM_FG = 1500 #2500
+regionMinSizeUM = 3E5
+
+alpha = 0.3
+strideProportion = 0.5
+figHeight = 15
+minibatchSize = 2
+minibatchSize_FG = 1
+useAllGPUs = False
+GPUno = 0
+device = torch.device("cuda:" + str(GPUno) if torch.cuda.is_available() else "cpu")
+
+tubuliInstanceID_StartsWith = 10  # => tubuli instances start with id 10
+
+ftChannelsOutput = 8
+applyTestTimeAugmentation = True
+centerWeighting = False
+centerWeight = 3.
+
+saveWSICoarseForegroundSegmResults = True
+# saveCroppedForegroundSegmResults = False
+saveCroppedWSIimg = True
+saveWSIandPredNumpy = True
+
+TUBULI_MIN_SIZE = 400
+GLOM_MIN_SIZE = 1500
+TUFT_MIN_SIZE = 500
+VEIN_MIN_SIZE = 3000
+ARTERY_MIN_SIZE = 400
+LUMEN_MIN_SIZE = 20
+
+labelBG = 8
+
+# LOAD STRUCTURE SEGMENTATION MODEL
+model = Custom(input_ch=3, output_ch=ftChannelsOutput, modelDim=2)
+model.load_state_dict(torch.load(modelpath, map_location=lambda storage, loc: storage))
+model.train(False)
+model.eval()
+
+if useAllGPUs:
+    model = torch.nn.DataParallel(model)  # multi-GPUs
+model = model.to(device)
+
+# LOAD TISSUE SEGMENTATION MODEL
+use_folds = [0]
+model_training_output_dir = f'/homeStor1/ylan/data/nnUNet/nnUNet_results/Dataset555_TissueDetection/nnUNetTrainer__nnUNetPlans__2d'
+checkpoint_name = 'checkpoint_best.pth'
+
+parameters, configuration_manager, inference_allowed_mirroring_axes, \
+        plans_manager, dataset_json, network, trainer_name = load_what_we_need(model_training_output_dir, use_folds, checkpoint_name)
+# model = network
+model_FG = network
+network.load_state_dict(parameters[0])
+
+
+# model_FG = torch.load(model_FG_path)
+model_FG = model_FG.to(device)
+model_FG.eval()
+
+segmentationPatchStride = int(patchSegmSize * strideProportion)
+targetSpacing = patchLengthUM / patchSegmSize
+
+targetSpacing_FG = patchLengthUM_FG / patchImgSize_FG
+
+shiftMinUM = (patchImgSize - patchSegmSize) // 2 * targetSpacing + 2 # + 2 just for sufficient margin reasons
+shiftMaxUM = ((patchImgSize - patchSegmSize) // 2 + patchSegmSize * strideProportion) * targetSpacing
+
+# Set up logger
+log.basicConfig(
+    level=log.INFO,
+    format='%(asctime)s %(message)s',
+    datefmt='%Y/%m/%d %I:%M:%S %p',
+    handlers=[
+        log.FileHandler(resultsPath + '/LOGS.log', 'w'),
+        log.StreamHandler(sys.stdout)
+    ])
+logger = log.getLogger()
+
+struc3 = generate_ball(1)
+
+# walk through directoy and its subdirectoy recursively
+for dirName, subdirList, fileList in os.walk(WSIrootFolder):
+
+    # print(fileList)
+    # filter WSIs in current directory
+    fileListWSI = sorted([fname for fname in fileList if (fname.endswith('.svs') or fname.endswith('.ndpi') or fname.endswith('.scn') or fname.endswith('.tif') or fname.endswith('.ome.tif')) and 'PAS' in fname])
+    # fileListWSI = ['Aachen_KiBiDatabase_KiBiAcADEZ140_01_001_PAS.svs']
+    if len(fileListWSI) != 0:
+        logger.info(str(len(fileListWSI)) + ' WSIs to be analyzed in directory: ' + dirName)
+
+        resultsDir = resultsPath + dirName[len(WSIrootFolder):]
+        resultsDirNPYfiles = resultsDir + '/npyFiles'
+        if not os.path.exists(resultsDirNPYfiles):
+            os.makedirs(resultsDirNPYfiles)
+
+        # traverse through all found WSIs
+        for no, fname in tqdm(enumerate(fileListWSI)):
+            print(fname)
+            # Extract/print relevant parameters
+                # try:
+                # except:
+                #     logger.info('Slide {} or its spacings not readable, slide skipped!'.format(fname))
+
+            # img_WSI = imread(os.path.join(dirName,fname))
+            # spacings = np.array([0.2197, 0.2197])
+            # spacings = 
+            try:
+                slide = osl.OpenSlide(os.path.join(dirName, fname))
+                logger.info(str(no + 1) + ':  WSI:\t' + fname)
+                if 'openslide.mpp-x' in slide.properties.keys():
+                    spacings = np.array([float(slide.properties['openslide.mpp-x']), float(slide.properties['openslide.mpp-y'])])
+                else: 
+                    x_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_x
+                    y_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_y
+                    spacings = np.array([float(x_spacing), float(y_spacing)])
+            except:
+                logger.info('Slide {} or its spacings not readable, slide skipped!'.format(fname))
+                continue
+            levelDims = np.array(slide.level_dimensions)
+            amountLevels = len(levelDims)
+            levelDownsamples = np.asarray(np.round(np.array(slide.level_downsamples)), int)
+
+            logger.info('Spacings: ' + str(spacings))
+            logger.info('Level Dimensions: ' + str(levelDims))
+            logger.info('Level Downsamples: '+str(levelDownsamples))
+
+            suffixCut = -5 if fname.split('.')[-1] == 'ndpi' else -4
+            if fname.split('.')[-1] == 'tif':
+                suffixCut = -8
+
+
+
+            # extract the WSI level that is closest to the target spacing of the tissue segmentation network (increasing efficiency instead of simply taking full resolution, finest level 0) 
+            spacingFactorX = spacings[0] / targetSpacing
+            spacingFactorY = spacings[1] / targetSpacing
+            # x_scaled = round(levelDims[0][0] * spacingFactorX)
+            # y_scaled = round(levelDims[0][1] * spacingFactorY)
+
+            # usedLevel = np.argwhere(np.all(levelDims > [x_scaled, y_scaled], 1) == True).max()
+
+            # # resample to target spacing
+            # logger.info('Image size resampled to FG spacing would be {}, {}, thus level {} with resolution {} chosen as resampling point!'.format(x_scaled, y_scaled, usedLevel, levelDims[usedLevel]))
+            # spacingOnUsedLevelX = spacings[0] * levelDownsamples[usedLevel]
+            # spacingOnUsedLevelY = spacings[1] * levelDownsamples[usedLevel]
+            # downsamplingFactorX = spacingOnUsedLevelX / targetSpacing_FG
+            # downsamplingFactorY = spacingOnUsedLevelY / targetSpacing_FG
+            np_slide = np.array(slide.read_region(location=np.array([0, 0]), level=0, size=levelDims[0]))[:, :, :3]
+
+            # d1 = int(round(imgWSI.shape[1] * downsamplingFactorX))
+            # d2 = int(round(imgWSI.shape[0] * downsamplingFactorY))
+
+            # img_shape = slide.pages[0].shape
+            # print(spacingFactorX, spacingFactorY)
+            # print(targetSpacing_FG)
+            # print(slide.shape)
+            # print(slide.shape[0])
+            d1 = int(round(np_slide.shape[1] * spacingFactorX)) 
+            d2 = int(round(np_slide.shape[0] * spacingFactorY))
+            # # print(d1, d2)
+
+            imgWSI = cv2.resize(np_slide, dsize=(d1, d2), interpolation=cv2.INTER_LINEAR) #dtype: uint8, size: d2 x d1 x 3
+            del np_slide
+            gc.collect()
+
+            print(imgWSI.shape)
+            # imgWSIzeropadded = np.zeros(shape=(d2+patchImgSize_FG-1, d1+patchImgSize_FG-1, 3), dtype=np.float32)
+            # imgWSIzeropadded[:d2,:d1,:] = imgWSI
+
+            # tesselate resampled image
+            # smallOverlappingPatches = patchify(imgWSIzeropadded, patch_size=(patchImgSize_FG, patchImgSize_FG, 3), step=patchImgSize_FG) # CARE: IMAGE DATA AT THE RIGHT AND BOTTOM BORDERS IS LOST !!!
+
+            # tileDataset = []
+            # # with tqdm(total=smallOverlappingPatches.shape[0] * smallOverlappingPatches.shape[1]) as pbar:
+            # for i in range(smallOverlappingPatches.shape[0]):
+            #     for j in range(smallOverlappingPatches.shape[1]):
+            #         tileDataset.append({'name': '{}-{}'.format(i, j), 'data': torch.from_numpy(smallOverlappingPatches[i, j, 0, :, :, :])})
+            #             # pbar.update(1)
+
+            # img_mask = np.zeros(shape=(imgWSIzeropadded.shape[0],imgWSIzeropadded.shape[1]), dtype=bool)
+
+            # create dataloader for concurrent tile processing and prediction computation
+            # dataloader = torch.utils.data.DataLoader(tileDataset, batch_size=minibatchSize_FG, shuffle=False)
+            # with torch.no_grad():
+            #     for i, data in enumerate(dataloader, 0):
+            #         imgBatch = data['data'].permute(0, 3, 1, 2).to(device)
+
+            #         prediction = model_FG(imgBatch)  # prediction should have shape (1,2,512,512)
+            #         prediction = (prediction[:,1,:,:] > prediction[:,0,:,:]).to("cpu").numpy()
+
+            #         for n, d in zip(data['name'], prediction):
+            #             x = int(n.split('-')[0])
+            #             y = int(n.split('-')[1])
+            #             img_mask[x * patchImgSize_FG: (x+1) * patchImgSize_FG, y * patchImgSize_FG: (y+1) * patchImgSize_FG] = d
+
+
+            
+
+            # img_mask = img_mask[:d2,:d1]
+
+            # print('img_mask_shape: ', img_mask.shape)
+            # ###########################################################
+
+            # # postprocessing
+            # img_mask = binary_fill_holes(img_mask)
+
+            # # remove connected regions if too small
+            # regionMinPixels = regionMinSizeUM / (targetSpacing_FG * targetSpacing_FG)
+            # img_mask, _ = label(img_mask)
+            # labeledRegions, numberFGRegions = label(remove_small_objects(img_mask, min_size=regionMinPixels))
+            # if numberFGRegions < 256:
+            #     labeledRegions = np.asarray(labeledRegions, np.uint8)
+
+            # if saveWSICoarseForegroundSegmResults:
+            #     logger.info('Saving WSI-level coarse FG segmentation results...')
+            #     savePredictionOverlayResults(imgWSI, labeledRegions, alpha=alpha, figSize=(labeledRegions.shape[1]/labeledRegions.shape[0]*figHeight, figHeight), fullResultPath=resultsDir + '/' + fname[:suffixCut] + '_0_fgSeg.png')
+
+            # labeledRegions = cv2.resize(labeledRegions, dsize=(slide.shape[0],slide.shape[1]), interpolation = cv2.INTER_NEAREST) # FG RESULTS ON WSI-RESOLUTION, UINT8, REGION IDs
+            # labeledRegions = cv2.resize(labeledRegions, dsize=(levelDims[0][0],levelDims[0][1]), interpolation = cv2.INTER_NEAREST) # FG RESULTS ON WSI-RESOLUTION, UINT8, REGION IDs
+
+            # logger.info('In total -> '+str(numberFGRegions)+' <- regions on WSI detected!')
+
+            # process all detected tissue regions separately 
+            # for regionID in range(1, numberFGRegions+1):
+                # logger.info('#######\n Extract foreground region ' + str(regionID) + '...')
+                # detectedRegion = labeledRegions == regionID
+
+                # # compute bounding box and how much to enlarge bbox to consider wider context utilization (especially for patchify)
+                # temp = np.where(detectedRegion == 1)
+                # bbox = np.array([np.min(temp[0]), np.min(temp[1]), np.max(temp[0]), np.max(temp[1])])
+
+                # shiftMin = round(shiftMinUM / spacings[0])
+                # shiftMax = round(shiftMaxUM / spacings[0])
+
+                # # enlarge bounding box due to wider context consideration
+                # bbox[0] = max(bbox[0] - shiftMin, 0)
+                # bbox[1] = max(bbox[1] - shiftMin, 0)
+                # bbox[2] = min(bbox[2] + shiftMax, detectedRegion.shape[0] - 1) + 1
+                # bbox[3] = min(bbox[3] + shiftMax, detectedRegion.shape[1] - 1) + 1
+                # print(bbox)
+
+                # logger.info('Extract high res patch and segm map...')
+                # img_WSI = 
+                # try:
+                #     img_WSI = np.asarray(np.array(slide.read_region(location=np.array([bbox[1], bbox[0]]), level=0, size=np.array([bbox[3] - bbox[1], bbox[2] - bbox[0]])))[:, :, :3], np.uint8)
+                # except OpenSlideError:
+                #     logger.info('#################################### FILE CORRUPTED - IGNORED ####################################')
+                #     continue
+                # detectedRegion = detectedRegion[bbox[0]:bbox[2], bbox[1]:bbox[3]]
+
+                # extract image and resample into target spacing of the structure segmentation network
+                # downsamplingFactor = spacings[0] / targetSpacing  # Rescaling would be very slow using 'rescale' method!
+                # logger.info('Utilized spacing of slide: '+str(spacings[0])+', Resample both patches using factor: ' + str(downsamplingFactor))
+                # segMap = np.asarray(zoom(detectedRegion, downsamplingFactor, order=0), np.bool)
+                # img_WSI = cv2.resize(img_WSI, dsize=tuple(np.flip(segMap.shape)), interpolation=cv2.INTER_LINEAR)
+                # # segMap = np.asarray(np.round(rescale(segMap, downsamplingFactor, order=0, preserve_range=True, multichannel=False)), np.bool)
+                # assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling via zoom/resize led to unequal resolutions..."
+                # logger.info('Done - size of extracted resampled high res patch: ' + str(img_WSI.shape))
+
+                # if np.min(segMap.shape) < patchImgSize:
+                #     logger.info('Detected region smaller than window, thus skipped...')
+                #     continue
+
+                ##### PREPROCESSING DONE - NOW: NETWORK SEGMENTATION PART #####
+                # logger.info('Start segmentation process...')
+
+                # preprocess img
+            imgWSI = np.array((imgWSI / 255. - 0.5) / 0.5, np.float32)
+            
+
+            # tesselate image and tissue prediction results
+            print('Patchify!')
+            smallOverlappingPatches = patchify(imgWSI, patch_size=(patchImgSize, patchImgSize, 3), step=segmentationPatchStride) # CARE: IMAGE DATA AT THE RIGHT AND BOTTOM BORDERS IS LOST !!!
+            # smallOverlappingPatches_FG = patchify(segMap.copy(), patch_size=(patchImgSize, patchImgSize), step=segmentationPatchStride)
+
+            tileDataset = []
+            for i in range(smallOverlappingPatches.shape[0]):
+                for j in range(smallOverlappingPatches.shape[1]):
+                    # if smallOverlappingPatches_FG[i,j,:,:].any():
+                    tileDataset.append({'name': '{}-{}'.format(i, j), 'data': torch.from_numpy(smallOverlappingPatches[i, j, 0, :, :, :])})
+
+            print('tileDataset: ', len(tileDataset))
+            # calculate segmentation patch size since patchify cuts of last patch if not exactly fitting in window
+            startX = (patchImgSize - patchSegmSize) // 2; startY = startX
+            endX = segmentationPatchStride * (smallOverlappingPatches.shape[0]-1) + patchSegmSize + startX
+            endY = segmentationPatchStride * (smallOverlappingPatches.shape[1]-1) + patchSegmSize + startY
+
+            bigPatchResults = torch.zeros(device='cpu', size=(ftChannelsOutput, endX - startX, endY - startY))
+
+            # create dataloader for concurrent prediction computation
+            print('Create Dataloader.')
+            logger.info('Create Dataloader.')
+
+            dataloader = torch.utils.data.DataLoader(tileDataset, batch_size=minibatchSize, shuffle=False)
+            print('Run Inference!')
+            logger.info('Run Inference!')
+
+            with torch.no_grad():
+                for i, data in enumerate(dataloader, 0):
+                    imgBatch = data['data'].permute(0, 3, 1, 2).to(device)
+
+
+                    prediction = torch.softmax(model(imgBatch), dim=1)  # shape: (minibatchSize, 8, 516, 516)
+                    if applyTestTimeAugmentation:
+                        imgBatch = imgBatch.flip(2)
+                        prediction += torch.softmax(model(imgBatch), 1).flip(2)
+
+                        imgBatch = imgBatch.flip(3)
+                        prediction += torch.softmax(model(imgBatch), 1).flip(3).flip(2)
+
+                        imgBatch = imgBatch.flip(2)
+                        prediction += torch.softmax(model(imgBatch), 1).flip(3)
+
+                    if centerWeighting:
+                        prediction[:, :, patchSegmSize // 4: patchSegmSize // 4 * 3, patchSegmSize // 4: patchSegmSize // 4 * 3] *= centerWeight
+
+                    prediction = prediction.to("cpu")
+                    # print('Add prediction results to bigPatchResults.')
+                    for n, d in zip(data['name'], prediction):
+                        x = int(n.split('-')[0])
+                        y = int(n.split('-')[1])
+                        bigPatchResults[:, x * segmentationPatchStride: x * segmentationPatchStride + patchSegmSize, y * segmentationPatchStride: y * segmentationPatchStride + patchSegmSize] = d
+
+
+
+            bigPatchResults = torch.argmax(bigPatchResults, 0).byte()# shape: (1536, 2048)
+            print(bigPatchResults.shape)
+            logger.info('Predictions generated. Final shape: '+str(bigPatchResults.shape))
+
+                    # Context margin + border patches not fully inside img removed
+            imgWSI = imgWSI[startX:endX, startY:endY, :]
+                # segMap = segMap[startX:endX, startY:endY]
+                # bgMap = np.logical_not(segMap)
+
+                        # Save cropped foreground segmentation result as overlay
+                        # if saveCroppedWSIimg:
+                        #     logger.info('Saving cropped segmented WSI image...')
+                        #     saveImage(img_WSI, fullResultPath=resultsDir + '/' + fname[:suffixCut] +'_'+ i +'_'+str(regionID)+'_fgWSI_({}_{}_{}).png'.format(bbox[0], bbox[1], spacings[0]), figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight))
+
+                        # # correct foreground segmentation including all touching vein prediction instances
+                        # bigPatchResults[bgMap] = 4 #vein class assignment of bg
+                        # temp = bigPatchResults == 4
+                        # bgMap = np.logical_xor(clear_border(temp), temp)
+                        # segMap = np.logical_not(bgMap)
+                        # segMap = binary_fill_holes(segMap)
+                        # bgMap = np.logical_not(segMap)
+
+                        # remove small fg components
+                        # temp, numberLabeledRegions = label(segMap, struc3)
+                        # if numberLabeledRegions > 1:
+                        #     regionMinPixels = regionMinSizeUM / (targetSpacing * targetSpacing)
+                        #     regionIDs = np.where(np.array([region.area for region in regionprops(temp)]) > regionMinSizeUM)[0] + 1
+                        #     segMap = np.isin(temp, regionIDs)
+                        #     bgMap = np.logical_not(segMap)
+
+                        # bigPatchResults[bgMap] = labelBG # color of label 'labelBG' => Purple represents BG just for visualization purposes
+
+            # logger.info('Saving prediction and background overlay results...')
+            savePredictionOverlayResults(imgWSI, bigPatchResults, resultsDir + '/' + fname[:suffixCut] +'_resultOverlay.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+            if saveWSIandPredNumpy:
+                logger.info('Saving numpy img...')
+                np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] +'_resultWSI.npy', imgWSI)
+
+            logger.info('Start postprocessing...')
+
+            # remove border class
+            bigPatchResults[bigPatchResults == 7] = 0
+
+            # Delete BG to reduce postprocessing overhead
+            # bigPatchResults[bgMap] = 0
+
+            ################# HOLE FILLING ################
+            bigPatchResults[binary_fill_holes(bigPatchResults == 1)] = 1  # tubuli
+            bigPatchResults[binary_fill_holes(bigPatchResults == 4)] = 4  # veins
+            temp = binary_fill_holes(bigPatchResults == 3)  # tuft
+            bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 3, bigPatchResults == 2))] = 2  # glom
+            bigPatchResults[temp] = 3  # tuft
+            temp = binary_fill_holes(bigPatchResults == 6)  # artery_lumen
+            bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))] = 5  # full_artery
+            bigPatchResults[temp] = 6  # artery_lumen
+
+            ###### REMOVING TOO SMALL CONNECTED REGIONS ######
+            temp, _ = label(bigPatchResults == 1)
+            finalResults_Instance = remove_small_objects(temp, min_size=TUBULI_MIN_SIZE) > 0
+
+            ############ PERFORM TUBULE DILATION ############
+            finalResults_Instance, numberTubuli = label(finalResults_Instance) #dtype: int32
+            finalResults_Instance[finalResults_Instance > 0] += (tubuliInstanceID_StartsWith - 1)
+            if numberTubuli < 65500:
+                finalResults_Instance = cv2.dilate(np.asarray(finalResults_Instance, np.uint16), kernel=np.asarray(generate_ball(2), np.uint8), iterations=1) #RESULT TYPE: UINT16
+            else:
+                finalResults_Instance = np.asarray(cv2.dilate(np.asarray(finalResults_Instance, np.float64), kernel=np.asarray(generate_ball(2), np.uint8), iterations=1), np.int32)
+
+            temp, _ = label(np.logical_or(bigPatchResults == 2, bigPatchResults == 3))
+            finalResults_Instance[remove_small_objects(temp, min_size=GLOM_MIN_SIZE) > 0] = 2
+            temp, _ = label(bigPatchResults == 3)
+            finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=TUFT_MIN_SIZE) > 0, finalResults_Instance==2)] = 3
+            temp, _ = label(bigPatchResults == 4)
+            finalResults_Instance[remove_small_objects(temp, min_size=VEIN_MIN_SIZE) > 0] = 4
+            temp, _ = label(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))
+            finalResults_Instance[remove_small_objects(temp, min_size=ARTERY_MIN_SIZE) > 0] = 5
+            temp, _ = label(bigPatchResults == 6)
+            finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=LUMEN_MIN_SIZE) > 0, finalResults_Instance==5)] = 6
+
+            # finalResults_Instance = finalResults_Instance * segMap    
+
+            logger.info('Done - Save final instance overlay results...')
+            savePredictionOverlayResults(imgWSI, finalResults_Instance, resultsDir + '/' + fname[:suffixCut] +'_resultOverlayFINALInstance.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+            if saveWSIandPredNumpy:
+                logger.info('Saving numpy final instance prediction results...')
+                np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] +'_finalInstancePrediction.npy', finalResults_Instance)
+
+            logger.info('Done - Save final non-instance overlay results...')
+            finalResults = finalResults_Instance.copy()
+
+            finalResults[finalResults > tubuliInstanceID_StartsWith] = 1
+            savePredictionOverlayResults(imgWSI, finalResults, resultsDir + '/' + fname[:suffixCut]+'_resultOverlayFINAL.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+            # finalResults_Instance[bgMap] = labelBG
+
+            
+
+            del imgWSI
+            gc.collect()
+        logger.info('####################')
+
+    break
+
+# except:
+#     logger.exception('! Exception !')
+#     raise
+
+log.info('%%%% Ended regularly ! %%%%')
diff --git a/segment_WSI_noTD.py b/segment_WSI_noTD.py
new file mode 100644
index 0000000000000000000000000000000000000000..08718b2c43d978e1169fa7b66b869031b1e09d91
--- /dev/null
+++ b/segment_WSI_noTD.py
@@ -0,0 +1,569 @@
+import numpy as np
+import os
+import sys
+import logging as log
+from PIL import Image
+from scipy.ndimage import label
+from skimage.color import rgb2gray
+import openslide as osl
+from openslide.lowlevel import OpenSlideError
+from skimage.transform import rescale, resize
+from scipy.ndimage import zoom
+import matplotlib.pyplot as plt
+from skimage.segmentation import flood, flood_fill
+from skimage.color import rgb2gray
+from skimage import filters
+from scipy.ndimage import binary_dilation, binary_closing, binary_fill_holes, binary_erosion, binary_opening
+import scipy.ndimage
+from skimage.measure import regionprops
+from skimage.morphology import remove_small_objects
+import cv2
+
+
+import shutil
+from pathlib import Path
+import torch
+from utils import generate_ball, patchify, unpatchify, saveOverlayResults, savePredictionOverlayResults, savePredictionOverlayResults_Fast, saveImage
+from model import Custom
+from tqdm import tqdm
+
+from torchvision.transforms import Resize
+
+
+from utils import generate_ball, patchify, unpatchify, saveOverlayResults, savePredictionOverlayResults, savePredictionResultsWithoutDilation, saveImage, savePredictionResults, saveRGBPredictionOverlayResults, convert_labelmap_to_rgb_with_instance_first_class
+
+from joblib import Parallel, delayed
+import multiprocessing
+
+
+def writeOutPatchesRowwise(x):
+    for y in range(extractedPatches.shape[1]):
+        if extractedSegm[x, y].sum() > (minFGproportion * imageResolution * imageResolution):
+            Image.fromarray(extractedPatches[x, y, 0]).save(
+                resultsPath + '/' + WSIpath[:suffixCut] + '_' + str(x) + '_' + str(y) + '_' + str(i + 1) + '.png')
+    return
+
+
+TUBULI_MIN_SIZE = 400
+GLOM_MIN_SIZE = 1500
+TUFT_MIN_SIZE = 500
+VEIN_MIN_SIZE = 3000
+ARTERY_MIN_SIZE = 400
+LUMEN_MIN_SIZE = 20
+
+tubuliInstanceID_StartsWith = 10
+
+stain = 'NGAL' #PAS, aSMA, Col III, NGAL, Fibronectin, Meca-32, CD44, F4-80, CD31, AFOG
+# WSIfolder = '/work/scratch/bouteldja/Data_ActivePAS'
+# WSIfolder = '/images/ACTIVE/2015-04_Boor/Nassim_2019/kidney histology'
+WSIfolder = '/homeStor1/ylan/data/MarkusRinschen/640_174uM_annotated/pig/BLOCKS'
+# WSIfolder = '/homeStor1/ylan/data/MarkusRinschen/640_174uM_annotated_0.5/pig/TEST'
+
+modelpath = '/homeStor1/nbouteldja/Results_ActivePAS/custom_train_val_test_e500_b6_r0.001_w1e-05_516_640_32_RAdam_instance_1deeper_Healthy_UUO_Adenine_Alport_IRI_NTN_fewSpecies_fewHuman_Background_+-1range_X/Model/finalModel.pt'
+
+# Patchfolder = '/'
+imageSizeUM = 174
+imageResolution = 640
+strideProportion = 1.0
+minFGproportion = 0.5
+
+
+
+
+resultsPath = '/work/scratch/bouteldja/Data_StainTranslation/'+stain.replace(" ", "")
+resultsPath = os.path.join(WSIfolder, 'flash') 
+# resultsPath = str(Path(WSIfolder).parent / 'flash')
+resultsForegroundSegmentation = resultsPath + '/FGsegm'
+resultsDirNPYfiles = resultsPath + '/npy'
+
+if os.path.exists(resultsForegroundSegmentation):
+    shutil.rmtree(resultsForegroundSegmentation)
+# if os.path.exists(resultsDirNPYfiles):
+#     shutil.rmtree(resultsDirNPYfiles)
+
+onlyPerformForegroundSegmentation = True
+
+saveWSICoarseForegroundSegmResults = True
+saveWSICoarseForegroundSegmResults_RegionSeparate = False
+saveWSICroppedForegroundSegmResults = False
+alpha = 0.3
+figHeight = 40
+
+targetSpacing = imageSizeUM / imageResolution
+shiftUM = imageSizeUM // 3
+
+struc3 = generate_ball(1)
+struc5 = generate_ball(2)
+struc7 = generate_ball(3)
+
+if not os.path.exists(resultsPath):
+    os.makedirs(resultsPath)
+if not os.path.exists(resultsForegroundSegmentation):
+    os.makedirs(resultsForegroundSegmentation)
+if not os.path.exists(resultsDirNPYfiles):
+    os.makedirs(resultsDirNPYfiles)
+
+# Set up logger
+log.basicConfig(
+    level=log.INFO,
+    format='%(asctime)s %(message)s',
+    datefmt='%Y/%m/%d %I:%M:%S %p',
+    handlers=[
+        log.FileHandler(resultsForegroundSegmentation + '/LOGS.log', 'w'),
+        log.StreamHandler(sys.stdout)
+    ])
+logger = log.getLogger()
+
+
+files = sorted(list(filter(lambda x: ('.ndpi' in x or '.svs' in x) and stain in x, os.listdir(WSIfolder))))
+# files = sorted(list(filter(lambda x: '.ndpi' in x or '.svs' in x, os.listdir(WSIfolder))))
+logger.info('Amount of WSIs in folder: ' + str(len(files)))
+
+num_cores = multiprocessing.cpu_count()
+
+detectedRegions = []
+
+minibatchSize = 64
+ftChannelsOutput = 8
+segSize = 516
+device = torch.device("cuda:0"  if torch.cuda.is_available() else "cpu")
+model = Custom(input_ch=3, output_ch=ftChannelsOutput, modelDim=2)
+model.load_state_dict(torch.load(modelpath, map_location=lambda storage, loc: storage))
+model = model.to(device)
+model.train(False)
+model.eval()
+
+directories = list(Path(WSIfolder).iterdir())
+# print(directories)
+
+for slide in directories:
+    # print(slide.stem)
+    
+    slide_name = slide.stem + f'_scene_{str(slide)[-1]}'
+    if slide_name != 'p21_kidrightp131415_B10_PAS_scene_2':
+        continue
+    print(slide_name)
+    resultsDir = os.path.join(resultsPath, slide_name)
+    tempNPYfiles = os.path.join(resultsDirNPYfiles, slide_name)
+
+
+    tileDataset = []
+    x_max = 0
+    y_max = 0
+    x_min = len(list(slide.iterdir()))
+    y_min = x_min
+
+
+    ########################################################
+
+    for patch_path in tqdm(list(slide.iterdir())):
+        name = patch_path.stem
+        # print(name)
+        if name != 'p21_kidrightp131415_B10_PAS.czi - Scene #2_(6-53)':
+            continue
+        coords = np.array(name.split('_')[-1][1:-1].split('-'), int)
+
+        # find max:
+        # x_max = coords[0] if coords[0] > x_max
+
+        if coords[0] > y_max:
+            y_max = coords[0]
+        if coords[1] > x_max:
+            x_max = coords[1]
+        if coords[0] < y_min:
+            y_min = coords[0]
+        if coords[1] < x_min:
+            x_min = coords[1]
+
+
+
+        patch = np.asarray(Image.open(patch_path)).astype(np.float32)
+        # print(patch.shape)
+        patch = torch.from_numpy(patch)
+        tileDataset.append({'data': patch, 'coords': coords})
+    ########################################################
+
+    # print(x_min, x_max)
+    # print(y_min, y_max)
+    # print((x_max - x_min)*imageResolution)
+    # print((y_max - y_min)*imageResolution)
+    batch_size = 50
+    dataloader = torch.utils.data.DataLoader(tileDataset, batch_size=batch_size, shuffle=False)
+    
+
+    bigPatchResults = torch.zeros(device='cpu', size=(ftChannelsOutput, (x_max+1 - x_min)*segSize, (y_max+1 - y_min)*segSize))
+    imgWSI = torch.zeros(device='cpu', size=(3, (x_max+1 - x_min)*segSize, (y_max+1 - y_min)*segSize))
+
+    print(bigPatchResults.shape)
+    print(imgWSI.shape)
+
+
+    with torch.no_grad():
+        for i, data in tqdm(enumerate(dataloader, 0)):
+            imgBatch = data['data'].permute(0, 3, 1, 2).to(device)
+
+            prediction = torch.softmax(model(imgBatch), dim=1)
+            # print(prediction.shape)
+            target_size = (prediction.shape[2], prediction.shape[3])
+            imgBatch = Resize(size=target_size)(imgBatch)
+            
+            # for i in range(imgBatch.shape[0]):
+            #     img = imgBatch[i]
+            #     pred = prediction[i]
+
+                # d1 = int(img.shape[2] * (segSize / imageResolution))
+                # d2 = int(img.shape[1] * (segSize / imageResolution))
+
+                # img = Resize(size=(d1,d2))(img)
+                # print(img)
+
+                # print(img.shape)
+                # img = img.transpose(1,2,0)
+
+                # img = cv2.resize(img, dsize=(d1, d2), interpolation=cv2.INTER_LINEAR).astype(np.uint8)
+                # print(img.shape)
+                # print(pred.shape)
+                # savePredictionOverlayResults(img.cpu().numpy().astype(np.uint8).transpose(1,2,0), torch.argmax(pred, 0).cpu().byte().numpy(), resultsDir + f'_resultOverlay_{i}.png', figSize=(pred.shape[1]/pred.shape[0]*figHeight, figHeight), alpha=alpha)
+                
+            for i in range(imgBatch.shape[0]):
+                n = data['coords'][i]
+                d = prediction[i]
+                img = imgBatch[i]
+                x = (n[1]-x_min)
+                y = (n[0]-y_min)
+                bigPatchResults[:, int(x*segSize*strideProportion): int((x*strideProportion +1)*segSize), int(y*segSize*strideProportion) : int((y*strideProportion+1)*segSize)] = d.cpu()
+                imgWSI[:, int(x*segSize*strideProportion): int((x*strideProportion +1)*segSize), int(y*segSize*strideProportion) : int((y*strideProportion+1)*segSize)] = img.cpu()
+                # imgWSI[:, int(x*imageResolution*strideProportion):int(x*strideProportion*imageResolution + imageResolution), int(y*imageResolution*strideProportion):int(y*strideProportion*imageResolution+imageResolution)] = img.cpu()
+            # break
+    # break
+    bigPatchResults = torch.argmax(bigPatchResults, 0).byte().cpu().numpy()
+    imgWSI = imgWSI.cpu().numpy().astype(np.uint8)
+
+    # print(imgWSI.shape)
+
+    # d1 = int(imgWSI.shape[2] * (segSize / imageResolution))
+    # d2 = int(imgWSI.shape[1] * (segSize / imageResolution))
+
+    # print(d1, d2)
+    imgWSI = imgWSI.transpose(1,2,0)
+
+    # imgWSI = cv2.resize(imgWSI, dsize=(d1, d2), interpolation=cv2.INTER_LINEAR).astype(np.uint8)
+    print(imgWSI.shape)
+    print(bigPatchResults.shape)
+    
+    # img_WSI_prep = np.array((imgWSI / 255. - 0.5) / 0.5, np.float32)
+    # pil_image = Image.fromarray(imgWSI.astype(np.uint8)).convert('RGB')
+    # pil_image.save(resultsDir + '_imgwSI.png')
+
+    # imgWSI = cv2.resize(img)
+    # cv2.imwrite(resultsDir + '_imgwSI.png', imgWSI)
+    
+    # print(bigPatchResults.shape)
+
+    # savePredictionOverlayResults_Fast(imgWSI, bigPatchResults, resultsDir+'_fastOverlay.png', figHeight=figHeight)
+
+
+    # imgWSI = imgWSI.transpose(2, 0, 1)
+
+    # pil_mask = Image.fromarray(bigPatchResults).convert('L')
+    # pil_mask = pil_mask.point(lambda p: 255 if p<0 else 0)
+    # overlaid_image = Image.new('RGBA', image.size)
+    # for i in range(bigPatchResults.shape[0]):
+    #     color = colorMatp[i]
+
+
+
+
+
+
+    savePredictionOverlayResults(imgWSI, bigPatchResults, resultsDir + '_resultOverlay.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+    # if saveWSIandPredNumpy:
+    logger.info('Saving numpy img...')
+    np.save(tempNPYfiles +'_resultWSI.npy', imgWSI)
+
+    logger.info('Start postprocessing...')
+
+    # remove border class
+    bigPatchResults[bigPatchResults == 7] = 0
+
+    # Delete BG to reduce postprocessing overhead
+    # bigPatchResults[bgMap] = 0
+
+    ################# HOLE FILLING ################
+    bigPatchResults[binary_fill_holes(bigPatchResults == 1)] = 1  # tubuli
+    bigPatchResults[binary_fill_holes(bigPatchResults == 4)] = 4  # veins
+    temp = binary_fill_holes(bigPatchResults == 3)  # tuft
+    bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 3, bigPatchResults == 2))] = 2  # glom
+    bigPatchResults[temp] = 3  # tuft
+    temp = binary_fill_holes(bigPatchResults == 6)  # artery_lumen
+    bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))] = 5  # full_artery
+    bigPatchResults[temp] = 6  # artery_lumen
+
+    ###### REMOVING TOO SMALL CONNECTED REGIONS ######
+    temp, _ = label(bigPatchResults == 1)
+    finalResults_Instance = remove_small_objects(temp, min_size=TUBULI_MIN_SIZE) > 0
+
+    ############ PERFORM TUBULE DILATION ############
+    finalResults_Instance, numberTubuli = label(finalResults_Instance) #dtype: int32
+    finalResults_Instance[finalResults_Instance > 0] += (tubuliInstanceID_StartsWith - 1)
+    if numberTubuli < 65500:
+        finalResults_Instance = cv2.dilate(np.asarray(finalResults_Instance, np.uint16), kernel=np.asarray(generate_ball(2), np.uint8), iterations=1) #RESULT TYPE: UINT16
+    else:
+        finalResults_Instance = np.asarray(cv2.dilate(np.asarray(finalResults_Instance, np.float64), kernel=np.asarray(generate_ball(2), np.uint8), iterations=1), np.int32)
+
+    temp, _ = label(np.logical_or(bigPatchResults == 2, bigPatchResults == 3))
+    finalResults_Instance[remove_small_objects(temp, min_size=GLOM_MIN_SIZE) > 0] = 2
+    temp, _ = label(bigPatchResults == 3)
+    finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=TUFT_MIN_SIZE) > 0, finalResults_Instance==2)] = 3
+    temp, _ = label(bigPatchResults == 4)
+    finalResults_Instance[remove_small_objects(temp, min_size=VEIN_MIN_SIZE) > 0] = 4
+    temp, _ = label(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))
+    finalResults_Instance[remove_small_objects(temp, min_size=ARTERY_MIN_SIZE) > 0] = 5
+    temp, _ = label(bigPatchResults == 6)
+    finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=LUMEN_MIN_SIZE) > 0, finalResults_Instance==5)] = 6
+
+    print(finalResults_Instance)
+
+    # finalResults_Instance = finalResults_Instance * segMap    
+
+    logger.info('Done - Save final instance overlay results...')
+    savePredictionOverlayResults(imgWSI, finalResults_Instance, resultsDir +'_resultOverlayFINALInstance.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+    # if saveWSIandPredNumpy:
+    #     logger.info('Saving numpy final instance prediction results...')
+    #     np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] +'_finalInstancePrediction.npy', finalResults_Instance)
+
+    logger.info('Done - Save final non-instance overlay results...')
+    finalResults = finalResults_Instance.copy()
+
+    finalResults[finalResults > tubuliInstanceID_StartsWith] = 1
+    savePredictionOverlayResults(imgWSI, finalResults, resultsDir +'_resultOverlayFINAL.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+
+
+    del finalResults
+    del finalResults_Instance
+    del imgWSI
+    del bigPatchResults
+
+
+
+# try:
+
+#     for no, WSIpath in enumerate(files):
+#         # Load slide
+#         slide = osl.OpenSlide(os.path.join(WSIfolder, WSIpath))
+
+#         # Extract/print relevant parameters
+#         spacings = np.array([float(slide.properties['openslide.mpp-x']), float(slide.properties['openslide.mpp-y'])])
+#         levelDims = np.array(slide.level_dimensions)
+#         amountLevels = len(levelDims)
+#         levelDownsamples = np.asarray(np.round(np.array(slide.level_downsamples)), np.int)
+
+#         logger.info(str(no + 1) + ':  WSI:\t' + WSIpath + ', Spacing:\t' + str(spacings) + ', levels:\t' + str(amountLevels))
+
+#         if WSIpath.split('.')[-1] == 'ndpi':
+#             suffixCut = -5
+#         else:
+#             suffixCut = -4
+
+#         # specify used wsi level for foreground segmentation: min. 500x500 pixels
+#         usedLevel = np.argwhere(np.all(levelDims > [500, 500], 1) == True).max()
+#         logger.info('Used level for foreground segmentation: ' + str(usedLevel))
+
+#         # extract image from that level
+#         imgRGB = np.array(slide.read_region(location=np.array([0, 0]), level=usedLevel, size=levelDims[usedLevel]))[:, :,:3]
+#         img = rgb2gray(imgRGB)
+
+#         # foreground segmentation
+#         otsu_threshold = filters.threshold_otsu(img)
+
+#         divideSizeFactor = 30
+
+#         if stain == 'PAS':
+#             if '17.40.53' in WSIpath:
+#                 otsu_threshold -= 0.065
+
+#         elif stain == 'aSMA':
+#             if '22.15.27' in WSIpath or '22.28.25' in WSIpath:
+#                 otsu_threshold -= 0.07
+#             if '20.19.57' in WSIpath or '16.59.43' in WSIpath:
+#                 otsu_threshold += 0.024
+
+#         elif stain == 'CD31':
+#             if '18.51.39' in WSIpath:
+#                 otsu_threshold += 0.02
+
+#         elif stain == 'Col III':
+#             if '21.25.37' in WSIpath or '21.42.09' in WSIpath:
+#                 otsu_threshold -= 0.1
+#             elif '2172_13' in WSIpath:
+#                 otsu_threshold += 0.05
+#             else:
+#                 otsu_threshold += 0.055
+
+#         elif stain == 'NGAL':
+#             if '21.40.59' in WSIpath or '21.35.56' in WSIpath:
+#                 otsu_threshold += 0.005
+#             elif '18.04.45' in WSIpath:
+#                 otsu_threshold += 0.07
+#             elif '21.46.20' in WSIpath:
+#                 otsu_threshold += 0.01
+#             else:
+#                 otsu_threshold += 0.05
+
+#         elif stain == 'Fibronectin':
+#             if '23.03.22' in WSIpath:
+#                 otsu_threshold -= 0.08
+#             elif '00.58.23' in WSIpath:
+#                 otsu_threshold += 0.02
+#             else:
+#                 otsu_threshold += 0.05
+
+#         elif stain == 'Meca-32':
+#             divideSizeFactor = 50
+
+#             if '1150-12' in WSIpath:
+#                 otsu_threshold -= 0.097
+#             elif '22.36.35' in WSIpath:
+#                 otsu_threshold -= 0.065
+#             elif '10.23.46' in WSIpath:
+#                 otsu_threshold += 0.05
+#             else:
+#                 otsu_threshold += 0.02
+
+#         elif stain == 'CD44':
+#             if '11.22.14' in WSIpath or '11.28.21' in WSIpath:
+#                 otsu_threshold += 0.085
+#             elif '11.41.12' in WSIpath:
+#                 otsu_threshold -= 0.06
+#             else:
+#                 otsu_threshold += 0.015
+
+
+
+
+#         img_mask = img < otsu_threshold
+#         # img_mask = img < 0.78
+#         logger.info('Utilized threshold: ' + str(otsu_threshold))
+
+#         if stain == 'NGAL' and '18.58.25' in WSIpath:
+#             img_mask[395:405,440:530] = 0
+
+#         # extract connected regions only with at least 1/25 size of WSI
+#         labeledRegions, numberRegions = label(img_mask, struc3)
+#         minRequiredSize = (img_mask.shape[0] * img_mask.shape[1]) // divideSizeFactor
+
+#         argRegions = []
+#         for i in range(1, numberRegions + 1):
+#             if (labeledRegions == i).sum() > minRequiredSize:
+#                 argRegions.append(i)
+
+#         finalWSI_FG = np.zeros(img_mask.shape, dtype=np.bool)
+
+#         # process these regions
+#         for i, arg in enumerate(argRegions):
+#             logger.info('Extract foreground region ' + str(i + 1) + '...')
+#             detectedRegion = labeledRegions == arg
+#             detectedRegion = binary_fill_holes(detectedRegion)
+#             detectedRegion = binary_opening(detectedRegion, structure=struc7)
+
+#             # extract biggest component
+#             labeledRegions2, numberLabeledRegions = label(detectedRegion, struc3)
+#             if numberLabeledRegions > 1:
+#                 argMax = np.array([region.area for region in regionprops(labeledRegions2)]).argmax() + 1
+#                 detectedRegion = labeledRegions2 == argMax
+
+#             # detectedRegion = binary_erosion(detectedRegion, structure=struc3)
+
+#             # Save foreground segmentation (on chosen coarse resolution level) as overlay
+#             if saveWSICoarseForegroundSegmResults_RegionSeparate:
+#                 saveOverlayResults(imgRGB, detectedRegion, alpha=alpha, figHeight=figHeight, fullResultPath=resultsForegroundSegmentation + '/' + WSIpath[:suffixCut] +'_'+str(i + 1)+'_fgSeg.png')
+
+#             finalWSI_FG = np.logical_or(finalWSI_FG, detectedRegion)
+
+#             logger.info('Foreground segmentation done on coarse level...')
+
+#             if onlyPerformForegroundSegmentation:
+#                 continue
+
+#             # enlargement of foreground in order to fully cover border structures
+#             # detectedRegion = binary_erosion(detectedRegion, structure=struc3)
+
+#             # compute bounding box
+#             temp = np.where(detectedRegion == 1)
+
+#             bbox = np.array([np.min(temp[0]), np.min(temp[1]), np.max(temp[0]), np.max(temp[1])])
+
+#             # compute how much to enlarge bbox to consider wider context utilization (especially for patchify)
+#             downsampleFactor = int(levelDownsamples[usedLevel])
+#             shift = round((shiftUM / spacings[0]) / downsampleFactor)
+
+#             # enlarge bounding box due to wider context consideration
+#             bbox[0] = max(bbox[0] - shift, 0)
+#             bbox[1] = max(bbox[1] - shift, 0)
+#             bbox[2] = min(bbox[2] + shift, detectedRegion.shape[0] - 1)
+#             bbox[3] = min(bbox[3] + shift, detectedRegion.shape[1] - 1)
+
+
+#             bbox_WSI = np.asarray(bbox * downsampleFactor, np.int)
+#             logger.info('High res bounding box coordinates: ' + str(bbox_WSI))
+
+#             logger.info('Extract high res patch and segm map...')
+#             try:
+#                 img_WSI = np.array(slide.read_region(location=np.array([bbox_WSI[1], bbox_WSI[0]]), level=0, size=np.array([bbox_WSI[3] - bbox_WSI[1] + downsampleFactor, bbox_WSI[2] - bbox_WSI[0] + downsampleFactor])))[:, :, :3]
+#             except OpenSlideError:
+#                 logger.info('############ FILE CORRUPTED - IGNORED ############')
+#                 continue
+
+#             segMap = zoom(detectedRegion[bbox[0]:bbox[2] + 1, bbox[1]:bbox[3] + 1], downsampleFactor, order=0)
+#             # segMap = rescale(detectedRegion[bbox[0]:bbox[2] + 1, bbox[1]:bbox[3] + 1], downsampleFactor, order=0, preserve_range=True, multichannel=False)
+#             assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling via Zoom/Rescale lead to unequal resolutions..."
+#             logger.info('Done - size of extracted high res patch: ' + str(img_WSI.shape))
+
+#             downsamplingFactor = spacings[0] / targetSpacing  # Rescaling very slow!
+#             logger.info('Spacing of slide: '+str(spacings[0])+', Resample both patches using factor: ' + str(downsamplingFactor))
+#             img_WSI = np.asarray(np.round(rescale(img_WSI, downsamplingFactor, order=1, preserve_range=True, multichannel=True)), np.uint8)
+#             segMap = np.asarray(zoom(segMap, downsamplingFactor, order=0), np.bool)
+#             # segMap = np.asarray(np.round(rescale(segMap, downsamplingFactor, order=0, preserve_range=True, multichannel=False)), np.bool)
+#             assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling via Zoom/Rescale lead to unequal resolutions..."
+#             logger.info('Done - size of extracted resampled high res patch: ' + str(img_WSI.shape))
+
+#             # Save cropped foreground segmentation result as overlay
+#             if saveWSICroppedForegroundSegmResults:
+#                 saveOverlayResults(img_WSI, segMap, alpha=alpha, figHeight=figHeight, fullResultPath=resultsForegroundSegmentation + '/' + WSIpath[:suffixCut] +'_'+str(i + 1)+'_fgSeg2.png')
+
+
+#             ##### FOREGROUND SEGMENTATION DONE - NOW PATCH EXTRACTION USING PATCHIFY #####
+#             logger.info('Perform patch extraction...')
+
+#             extractedPatches = patchify(img_WSI.copy(), patch_size=(imageResolution, imageResolution, 3), step=int(imageResolution*strideProportion))  # shape: (5, 7, 1, 640, 640, 3)
+#             extractedSegm = patchify(segMap.copy(), patch_size=(imageResolution, imageResolution), step=int(imageResolution*strideProportion))  # shape: (5, 7, 640, 640)
+
+#             resultsLabel2 = Parallel(n_jobs=num_cores)(delayed(writeOutPatchesRowwise)(x) for x in range(extractedPatches.shape[0]))
+
+#             # for x in range(extractedPatches.shape[0]):
+#             #     for y in range(extractedPatches.shape[1]):
+#             #         if extractedSegm[x,y].sum() > (minFGproportion * imageResolution * imageResolution):
+#             #             Image.fromarray(extractedPatches[x,y,0]).save(resultsPath + '/' + WSIpath[:suffixCut] +'_'+str(x)+'_'+str(y)+'_'+str(i + 1)+'.png')
+
+#             logger.info('Done.')
+
+#         if saveWSICoarseForegroundSegmResults:
+#             saveOverlayResults(imgRGB, finalWSI_FG, alpha=alpha, figHeight=figHeight, fullResultPath=resultsForegroundSegmentation + '/' + WSIpath[:suffixCut] + '_fgSeg_WSI.png')
+
+#         detectedRegions.append(len(argRegions))
+#         logger.info('####################')
+
+#     logger.info('Detected regions of all processed slides:')
+#     logger.info(detectedRegions)
+
+
+
+# except:
+#     logger.exception('! Exception !')
+#     raise
+
+# log.info('%%%% Ended regularly ! %%%%')
+
diff --git a/show_npy.ipynb b/show_npy.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..3566da25862b36f906fb9a88b18469d899fa74c2
--- /dev/null
+++ b/show_npy.ipynb
@@ -0,0 +1,176 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "import os\n",
+    "from PIL import Image\n",
+    "import matplotlib.pylab as plt\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "(34314, 31734)\n"
+     ]
+    }
+   ],
+   "source": [
+    "WSIrootFolder = '/homeStor1/ylan/data/MarkusRinschen/debug_2/npyFiles/'#'SPECIFIED WSI FOLDER'\n",
+    "# print(os.listdir(WSIrootFolder))\n",
+    "finalInstance = np.load(WSIrootFolder+f'p21_kidleftp456_B10_PAS_3_finalInstancePrediction.npy')\n",
+    "print(finalInstance.shape)\n",
+    "# resultWSI = np.load(WSIrootFolder+f'p21_kidleftp456_B10_PAS_3_resultWSI.npy')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "51649\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(finalInstance.max())\n",
+    "c = np.unique(finalInstance==3, return_index=True)\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "(array([False,  True]), array([       0, 49018927]))\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(c)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[[False False False ... False False False]\n",
+      " [False False False ... False False False]\n",
+      " [False False False ... False False False]\n",
+      " ...\n",
+      " [False False False ... False False False]\n",
+      " [False False False ... False False False]\n",
+      " [False False False ... False False False]]\n"
+     ]
+    }
+   ],
+   "source": [
+    "\n",
+    "\n",
+    "# resized = np.resize(finalInstance, (1000, 1000))\n",
+    "# print(finalInstance==3)\n",
+    "channel = finalInstance == (1)\n",
+    "resized = np.resize(channel, (1000, 1000))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "<matplotlib.image.AxesImage at 0x7f4fc41bab50>"
+      ]
+     },
+     "execution_count": 12,
+     "metadata": {},
+     "output_type": "execute_result"
+    },
+    {
+     "data": {
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAaIAAAGiCAYAAAClC8JvAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy89olMNAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAsQklEQVR4nO3de3TUdX7/8VcSmAGESbglIUtAEAUjAcotTFUqJSVAtKB4CkjdgIiFJhwhiiErC2h7Tix0u+Jyq8dTY89Z5NIjsIKEpsGEKuEWiRCQVNhosDABwWQgQq6f3x/+8i2zRJdA4EOW5+Oc79nM9/P+fr+f91zy2sl8BoOMMUYAAFgSbHsCAIC7G0EEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCqxQfRqlWrdO+996pNmzaKi4vT/v37bU8JANAELTqINmzYoNTUVC1ZskSfffaZBg4cqISEBJ09e9b21AAA1ymoJf+jp3FxcRo2bJhWrlwpSaqvr1d0dLTmzp2rhQsXWp4dAOB6tLI9gRtVXV2tgoICpaenO/uCg4MVHx+v/Pz8Ro+pqqpSVVWVc7u+vl4XLlxQ586dFRQUdMvnDAB/yowxunjxoqKiohQcfP1/cGuxQfTtt9+qrq5OERERAfsjIiJ0/PjxRo/JyMjQa6+9djumBwB3rVOnTql79+7XXd9ig+hGpKenKzU11bldUVGhHj166BGNVyu1tjgzAGj5alWjT/SROnTo0KTjWmwQdenSRSEhISorKwvYX1ZWpsjIyEaPcbvdcrvd1+xvpdZqFUQQAcBN+f8rDpr6UUeLXTXncrk0ZMgQ5eTkOPvq6+uVk5Mjr9drcWYAgKZose+IJCk1NVVJSUkaOnSohg8frjfffFOVlZWaMWOG7akBAK5Tiw6iyZMn69y5c1q8eLF8Pp8GDRqkrKysaxYwAADuXC36e0Q3y+/3KzQ0VI9pAp8RAcBNqjU1ytVWVVRUyOPxXPdxLfYzIgDAnwaCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCq2YNo6dKlCgoKCtj69evnjF+5ckXJycnq3Lmz2rdvr0mTJqmsrCzgHKWlpUpMTFS7du0UHh6uBQsWqLa2NqAmNzdXgwcPltvtVp8+fZSZmdncrQAAboNb8o7ooYce0pkzZ5ztk08+ccbmz5+vDz/8UJs2bVJeXp5Onz6tp556yhmvq6tTYmKiqqurtWfPHr333nvKzMzU4sWLnZqSkhIlJiZq1KhRKiws1Lx58/T8889r586dt6IdAMAtFGSMMc15wqVLl2rLli0qLCy8ZqyiokJdu3bVunXr9PTTT0uSjh8/rgcffFD5+fkaMWKEduzYoccff1ynT59WRESEJGnt2rVKS0vTuXPn5HK5lJaWpu3bt6uoqMg595QpU1ReXq6srKzrnqvf71doaKge0wS1Cmp9c40DwF2u1tQoV1tVUVEhj8dz3cfdkndEX375paKiotS7d29NmzZNpaWlkqSCggLV1NQoPj7eqe3Xr5969Oih/Px8SVJ+fr5iY2OdEJKkhIQE+f1+HT161Km5+hwNNQ3n+DFVVVXy+/0BGwDArmYPori4OGVmZiorK0tr1qxRSUmJHn30UV28eFE+n08ul0thYWEBx0RERMjn80mSfD5fQAg1jDeM/VSN3+/X5cuXf3RuGRkZCg0Ndbbo6OibbRcAcJNaNfcJx40b5/w8YMAAxcXFqWfPntq4caPatm3b3JdrkvT0dKWmpjq3/X4/YQQAlt3y5dthYWF64IEHdOLECUVGRqq6ulrl5eUBNWVlZYqMjJQkRUZGXrOKruH2H6vxeDw/GXZut1sejydgAwDYdcuD6NKlSzp58qS6deumIUOGqHXr1srJyXHGi4uLVVpaKq/XK0nyer06cuSIzp4969RkZ2fL4/EoJibGqbn6HA01DecAALQczR5EL7/8svLy8vTVV19pz549evLJJxUSEqKpU6cqNDRUM2fOVGpqqj7++GMVFBRoxowZ8nq9GjFihCRpzJgxiomJ0bPPPqvPP/9cO3fu1KJFi5ScnCy32y1Jmj17tn7/+9/rlVde0fHjx7V69Wpt3LhR8+fPb+52AAC3WLN/RvTNN99o6tSpOn/+vLp27apHHnlEe/fuVdeuXSVJv/71rxUcHKxJkyapqqpKCQkJWr16tXN8SEiItm3bpjlz5sjr9eqee+5RUlKSXn/9daemV69e2r59u+bPn68VK1aoe/fueuedd5SQkNDc7QAAbrFm/x5RS8L3iACg+dxR3yMCAOB6EUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVU0Oot27d+uJJ55QVFSUgoKCtGXLloBxY4wWL16sbt26qW3btoqPj9eXX34ZUHPhwgVNmzZNHo9HYWFhmjlzpi5duhRQc/jwYT366KNq06aNoqOjtWzZsmvmsmnTJvXr109t2rRRbGysPvroo6a2AwCwrMlBVFlZqYEDB2rVqlWNji9btkxvvfWW1q5dq3379umee+5RQkKCrly54tRMmzZNR48eVXZ2trZt26bdu3frhRdecMb9fr/GjBmjnj17qqCgQMuXL9fSpUv19ttvOzV79uzR1KlTNXPmTB06dEgTJ07UxIkTVVRU1NSWAAAWBRljzA0fHBSkzZs3a+LEiZJ+eDcUFRWll156SS+//LIkqaKiQhEREcrMzNSUKVP0xRdfKCYmRgcOHNDQoUMlSVlZWRo/fry++eYbRUVFac2aNXr11Vfl8/nkcrkkSQsXLtSWLVt0/PhxSdLkyZNVWVmpbdu2OfMZMWKEBg0apLVr1zY636qqKlVVVTm3/X6/oqOj9ZgmqFVQ6xu9GwAAkmpNjXK1VRUVFfJ4PNd9XLN+RlRSUiKfz6f4+HhnX2hoqOLi4pSfny9Jys/PV1hYmBNCkhQfH6/g4GDt27fPqRk5cqQTQpKUkJCg4uJifffdd07N1ddpqGm4TmMyMjIUGhrqbNHR0TffNADgpjRrEPl8PklSREREwP6IiAhnzOfzKTw8PGC8VatW6tSpU0BNY+e4+ho/VtMw3pj09HRVVFQ426lTp5raIgCgmbWyPYHbye12y+12254GAOAqzfqOKDIyUpJUVlYWsL+srMwZi4yM1NmzZwPGa2trdeHChYCaxs5x9TV+rKZhHADQMjRrEPXq1UuRkZHKyclx9vn9fu3bt09er1eS5PV6VV5eroKCAqdm165dqq+vV1xcnFOze/du1dTUODXZ2dnq27evOnbs6NRcfZ2GmobrAABahiYH0aVLl1RYWKjCwkJJPyxQKCwsVGlpqYKCgjRv3jz94z/+o373u9/pyJEj+vnPf66oqChnZd2DDz6osWPHatasWdq/f78+/fRTpaSkaMqUKYqKipIkPfPMM3K5XJo5c6aOHj2qDRs2aMWKFUpNTXXm8eKLLyorK0u/+tWvdPz4cS1dulQHDx5USkrKzd8rAIDbpsnLt3NzczVq1Khr9iclJSkzM1PGGC1ZskRvv/22ysvL9cgjj2j16tV64IEHnNoLFy4oJSVFH374oYKDgzVp0iS99dZbat++vVNz+PBhJScn68CBA+rSpYvmzp2rtLS0gGtu2rRJixYt0ldffaX7779fy5Yt0/jx46+7F7/fr9DQUJZvA0AzuNHl2zf1PaKWjiACgOZzR3yPCACApiKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKuaHES7d+/WE088oaioKAUFBWnLli0B49OnT1dQUFDANnbs2ICaCxcuaNq0afJ4PAoLC9PMmTN16dKlgJrDhw/r0UcfVZs2bRQdHa1ly5ZdM5dNmzapX79+atOmjWJjY/XRRx81tR0AgGVNDqLKykoNHDhQq1at+tGasWPH6syZM872/vvvB4xPmzZNR48eVXZ2trZt26bdu3frhRdecMb9fr/GjBmjnj17qqCgQMuXL9fSpUv19ttvOzV79uzR1KlTNXPmTB06dEgTJ07UxIkTVVRU1NSWAAAWBRljzA0fHBSkzZs3a+LEic6+6dOnq7y8/Jp3Sg2++OILxcTE6MCBAxo6dKgkKSsrS+PHj9c333yjqKgorVmzRq+++qp8Pp9cLpckaeHChdqyZYuOHz8uSZo8ebIqKyu1bds259wjRozQoEGDtHbt2kavXVVVpaqqKue23+9XdHS0HtMEtQpqfaN3AwBAUq2pUa62qqKiQh6P57qPuyWfEeXm5io8PFx9+/bVnDlzdP78eWcsPz9fYWFhTghJUnx8vIKDg7Vv3z6nZuTIkU4ISVJCQoKKi4v13XffOTXx8fEB101ISFB+fv6PzisjI0OhoaHOFh0d3Sz9AgBuXLMH0dixY/Xv//7vysnJ0T/90z8pLy9P48aNU11dnSTJ5/MpPDw84JhWrVqpU6dO8vl8Tk1ERERATcPtP1bTMN6Y9PR0VVRUONupU6durlkAwE1r1dwnnDJlivNzbGysBgwYoPvuu0+5ubkaPXp0c1+uSdxut9xut9U5AAAC3fLl271791aXLl104sQJSVJkZKTOnj0bUFNbW6sLFy4oMjLSqSkrKwuoabj9x2oaxgEALcMtD6JvvvlG58+fV7du3SRJXq9X5eXlKigocGp27dql+vp6xcXFOTW7d+9WTU2NU5Odna2+ffuqY8eOTk1OTk7AtbKzs+X1em91SwCAZtTkILp06ZIKCwtVWFgoSSopKVFhYaFKS0t16dIlLViwQHv37tVXX32lnJwcTZgwQX369FFCQoIk6cEHH9TYsWM1a9Ys7d+/X59++qlSUlI0ZcoURUVFSZKeeeYZuVwuzZw5U0ePHtWGDRu0YsUKpaamOvN48cUXlZWVpV/96lc6fvy4li5dqoMHDyolJaUZ7hYAwO3S5OXbubm5GjVq1DX7k5KStGbNGk2cOFGHDh1SeXm5oqKiNGbMGP3DP/xDwMKCCxcuKCUlRR9++KGCg4M1adIkvfXWW2rfvr1Tc/jwYSUnJ+vAgQPq0qWL5s6dq7S0tIBrbtq0SYsWLdJXX32l+++/X8uWLdP48eOvuxe/36/Q0FCWbwNAM7jR5ds39T2ilo4gAoDmc0d9jwgAgOtFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArGpSEGVkZGjYsGHq0KGDwsPDNXHiRBUXFwfUXLlyRcnJyercubPat2+vSZMmqaysLKCmtLRUiYmJateuncLDw7VgwQLV1tYG1OTm5mrw4MFyu93q06ePMjMzr5nPqlWrdO+996pNmzaKi4vT/v37m9IOAOAO0KQgysvLU3Jysvbu3avs7GzV1NRozJgxqqysdGrmz5+vDz/8UJs2bVJeXp5Onz6tp556yhmvq6tTYmKiqqurtWfPHr333nvKzMzU4sWLnZqSkhIlJiZq1KhRKiws1Lx58/T8889r586dTs2GDRuUmpqqJUuW6LPPPtPAgQOVkJCgs2fP3sz9AQC4zYKMMeZGDz537pzCw8OVl5enkSNHqqKiQl27dtW6dev09NNPS5KOHz+uBx98UPn5+RoxYoR27Nihxx9/XKdPn1ZERIQkae3atUpLS9O5c+fkcrmUlpam7du3q6ioyLnWlClTVF5erqysLElSXFychg0bppUrV0qS6uvrFR0drblz52rhwoXXNX+/36/Q0FA9pglqFdT6Ru8GAICkWlOjXG1VRUWFPB7PdR93U58RVVRUSJI6deokSSooKFBNTY3i4+Odmn79+qlHjx7Kz8+XJOXn5ys2NtYJIUlKSEiQ3+/X0aNHnZqrz9FQ03CO6upqFRQUBNQEBwcrPj7eqWlMVVWV/H5/wAYAsOuGg6i+vl7z5s3Tww8/rP79+0uSfD6fXC6XwsLCAmojIiLk8/mcmqtDqGG8Yeynavx+vy5fvqxvv/1WdXV1jdY0nKMxGRkZCg0Ndbbo6OimNw4AaFY3HETJyckqKirS+vXrm3M+t1R6eroqKiqc7dSpU7anBAB3vVY3clBKSoq2bdum3bt3q3v37s7+yMhIVVdXq7y8POBdUVlZmSIjI52aP1zd1rCq7uqaP1xpV1ZWJo/Ho7Zt2yokJEQhISGN1jScozFut1tut7vpDQMAbpkmvSMyxiglJUWbN2/Wrl271KtXr4DxIUOGqHXr1srJyXH2FRcXq7S0VF6vV5Lk9Xp15MiRgNVt2dnZ8ng8iomJcWquPkdDTcM5XC6XhgwZElBTX1+vnJwcpwYA0DI06R1RcnKy1q1bp61bt6pDhw7O5zGhoaFq27atQkNDNXPmTKWmpqpTp07yeDyaO3euvF6vRowYIUkaM2aMYmJi9Oyzz2rZsmXy+XxatGiRkpOTnXcrs2fP1sqVK/XKK6/oueee065du7Rx40Zt377dmUtqaqqSkpI0dOhQDR8+XG+++aYqKys1Y8aM5rpvAAC3QZOWbwcFBTW6/91339X06dMl/fCF1pdeeknvv/++qqqqlJCQoNWrVwf8yezrr7/WnDlzlJubq3vuuUdJSUl644031KrV/+Vibm6u5s+fr2PHjql79+765S9/6VyjwcqVK7V8+XL5fD4NGjRIb731luLi4q67eZZvA0DzudHl2zf1PaKWjiACgOZj5XtEAADcLIIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsKpJQZSRkaFhw4apQ4cOCg8P18SJE1VcXBxQ89hjjykoKChgmz17dkBNaWmpEhMT1a5dO4WHh2vBggWqra0NqMnNzdXgwYPldrvVp08fZWZmXjOfVatW6d5771WbNm0UFxen/fv3N6UdAMAdoElBlJeXp+TkZO3du1fZ2dmqqanRmDFjVFlZGVA3a9YsnTlzxtmWLVvmjNXV1SkxMVHV1dXas2eP3nvvPWVmZmrx4sVOTUlJiRITEzVq1CgVFhZq3rx5ev7557Vz506nZsOGDUpNTdWSJUv02WefaeDAgUpISNDZs2dv9L4AAFgQZIwxN3rwuXPnFB4erry8PI0cOVLSD++IBg0apDfffLPRY3bs2KHHH39cp0+fVkREhCRp7dq1SktL07lz5+RyuZSWlqbt27erqKjIOW7KlCkqLy9XVlaWJCkuLk7Dhg3TypUrJUn19fWKjo7W3LlztXDhwuuav9/vV2hoqB7TBLUKan2jdwMAQFKtqVGutqqiokIej+e6j7upz4gqKiokSZ06dQrY/9vf/lZdunRR//79lZ6eru+//94Zy8/PV2xsrBNCkpSQkCC/36+jR486NfHx8QHnTEhIUH5+viSpurpaBQUFATXBwcGKj493ahpTVVUlv98fsAEA7Gp1owfW19dr3rx5evjhh9W/f39n/zPPPKOePXsqKipKhw8fVlpamoqLi/XBBx9Iknw+X0AISXJu+3y+n6zx+/26fPmyvvvuO9XV1TVac/z48R+dc0ZGhl577bUbbRkAcAvccBAlJyerqKhIn3zyScD+F154wfk5NjZW3bp10+jRo3Xy5Endd999Nz7TZpCenq7U1FTntt/vV3R0tMUZAQBuKIhSUlK0bds27d69W927d//J2ri4OEnSiRMndN999ykyMvKa1W1lZWWSpMjISOd/G/ZdXePxeNS2bVuFhIQoJCSk0ZqGczTG7XbL7XZfX5MAgNuiSZ8RGWOUkpKizZs3a9euXerVq9cfPaawsFCS1K1bN0mS1+vVkSNHAla3ZWdny+PxKCYmxqnJyckJOE92dra8Xq8kyeVyaciQIQE19fX1ysnJcWoAAC1Dk94RJScna926ddq6das6dOjgfKYTGhqqtm3b6uTJk1q3bp3Gjx+vzp076/Dhw5o/f75GjhypAQMGSJLGjBmjmJgYPfvss1q2bJl8Pp8WLVqk5ORk593K7NmztXLlSr3yyit67rnntGvXLm3cuFHbt2935pKamqqkpCQNHTpUw4cP15tvvqnKykrNmDGjue4bAMBt0KTl20FBQY3uf/fddzV9+nSdOnVKf/u3f6uioiJVVlYqOjpaTz75pBYtWhSwlO/rr7/WnDlzlJubq3vuuUdJSUl644031KrV/+Vibm6u5s+fr2PHjql79+765S9/qenTpwdcd+XKlVq+fLl8Pp8GDRqkt956y/lT4PVg+TYANJ8bXb59U98jaukIIgBoPla+RwQAwM0iiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrmhREa9as0YABA+TxeOTxeOT1erVjxw5n/MqVK0pOTlbnzp3Vvn17TZo0SWVlZQHnKC0tVWJiotq1a6fw8HAtWLBAtbW1ATW5ubkaPHiw3G63+vTpo8zMzGvmsmrVKt17771q06aN4uLitH///qa0AgC4QzQpiLp376433nhDBQUFOnjwoP7yL/9SEyZM0NGjRyVJ8+fP14cffqhNmzYpLy9Pp0+f1lNPPeUcX1dXp8TERFVXV2vPnj167733lJmZqcWLFzs1JSUlSkxM1KhRo1RYWKh58+bp+eef186dO52aDRs2KDU1VUuWLNFnn32mgQMHKiEhQWfPnr3Z+wMAcJsFGWPMzZygU6dOWr58uZ5++ml17dpV69at09NPPy1JOn78uB588EHl5+drxIgR2rFjhx5//HGdPn1aERERkqS1a9cqLS1N586dk8vlUlpamrZv366ioiLnGlOmTFF5ebmysrIkSXFxcRo2bJhWrlwpSaqvr1d0dLTmzp2rhQsXXvfc/X6/QkND9ZgmqFVQ65u5GwDgrldrapSrraqoqJDH47nu4274M6K6ujqtX79elZWV8nq9KigoUE1NjeLj452afv36qUePHsrPz5ck5efnKzY21gkhSUpISJDf73feVeXn5weco6Gm4RzV1dUqKCgIqAkODlZ8fLxT82Oqqqrk9/sDNgCAXU0OoiNHjqh9+/Zyu92aPXu2Nm/erJiYGPl8PrlcLoWFhQXUR0REyOfzSZJ8Pl9ACDWMN4z9VI3f79fly5f17bffqq6urtGahnP8mIyMDIWGhjpbdHR0U9sHADSzJgdR3759VVhYqH379mnOnDlKSkrSsWPHbsXcml16eroqKiqc7dSpU7anBAB3vVZNPcDlcqlPnz6SpCFDhujAgQNasWKFJk+erOrqapWXlwe8KyorK1NkZKQkKTIy8prVbQ2r6q6u+cOVdmVlZfJ4PGrbtq1CQkIUEhLSaE3DOX6M2+2W2+1uassAgFvopr9HVF9fr6qqKg0ZMkStW7dWTk6OM1ZcXKzS0lJ5vV5Jktfr1ZEjRwJWt2VnZ8vj8SgmJsapufocDTUN53C5XBoyZEhATX19vXJycpwaAEDL0aR3ROnp6Ro3bpx69Oihixcvat26dcrNzdXOnTsVGhqqmTNnKjU1VZ06dZLH49HcuXPl9Xo1YsQISdKYMWMUExOjZ599VsuWLZPP59OiRYuUnJzsvFOZPXu2Vq5cqVdeeUXPPfecdu3apY0bN2r79u3OPFJTU5WUlKShQ4dq+PDhevPNN1VZWakZM2Y0410DALgdmhREZ8+e1c9//nOdOXNGoaGhGjBggHbu3Km/+qu/kiT9+te/VnBwsCZNmqSqqiolJCRo9erVzvEhISHatm2b5syZI6/Xq3vuuUdJSUl6/fXXnZpevXpp+/btmj9/vlasWKHu3bvrnXfeUUJCglMzefJknTt3TosXL5bP59OgQYOUlZV1zQIGAMCd76a/R9SS8T0iAGg+t/17RAAANAeCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFa1sj0Bm4wxkqRa1UjG8mQAoIWrVY2k//vder3u6iA6f/68JOkTfWR5JgDwp+PixYsKDQ297vq7Oog6deokSSotLW3SnXYn8/v9io6O1qlTp+TxeGxPp9nQV8tCXy1Lc/VljNHFixcVFRXVpOPu6iAKDv7hI7LQ0NA/qSeVJHk8nj+5niT6amnoq2Vpjr5u5P/Us1gBAGAVQQQAsOquDiK3260lS5bI7Xbbnkqz+VPsSaKvloa+WhbbfQWZpq6zAwCgGd3V74gAAPYRRAAAqwgiAIBVBBEAwCqCCABg1V0bRKtWrdK9996rNm3aKC4uTvv377c9JcfSpUsVFBQUsPXr188Zv3LlipKTk9W5c2e1b99ekyZNUllZWcA5SktLlZiYqHbt2ik8PFwLFixQbW1tQE1ubq4GDx4st9utPn36KDMzs1n72L17t5544glFRUUpKChIW7ZsCRg3xmjx4sXq1q2b2rZtq/j4eH355ZcBNRcuXNC0adPk8XgUFhammTNn6tKlSwE1hw8f1qOPPqo2bdooOjpay5Ytu2YumzZtUr9+/dSmTRvFxsbqo49u/N8X/GN9TZ8+/ZrHb+zYsXd0XxkZGRo2bJg6dOig8PBwTZw4UcXFxQE1t/N511yvz+vp67HHHrvm8Zo9e/Yd3deaNWs0YMAA519C8Hq92rFjhzPe4h4rcxdav369cblc5t/+7d/M0aNHzaxZs0xYWJgpKyuzPTVjjDFLliwxDz30kDlz5oyznTt3zhmfPXu2iY6ONjk5OebgwYNmxIgR5s///M+d8draWtO/f38THx9vDh06ZD766CPTpUsXk56e7tT8/ve/N+3atTOpqanm2LFj5je/+Y0JCQkxWVlZzdbHRx99ZF599VXzwQcfGElm8+bNAeNvvPGGCQ0NNVu2bDGff/65+eu//mvTq1cvc/nyZadm7NixZuDAgWbv3r3mv//7v02fPn3M1KlTnfGKigoTERFhpk2bZoqKisz7779v2rZta/71X//Vqfn0009NSEiIWbZsmTl27JhZtGiRad26tTly5Mgt6SspKcmMHTs24PG7cOFCQM2d1ldCQoJ59913TVFRkSksLDTjx483PXr0MJcuXXJqbtfzrjlfn9fT11/8xV+YWbNmBTxeFRUVd3Rfv/vd78z27dvN//zP/5ji4mLzi1/8wrRu3doUFRUZY1reY3VXBtHw4cNNcnKyc7uurs5ERUWZjIwMi7P6P0uWLDEDBw5sdKy8vNy0bt3abNq0ydn3xRdfGEkmPz/fGPPDL8rg4GDj8/mcmjVr1hiPx2OqqqqMMca88sor5qGHHgo49+TJk01CQkIzd/ODP/yFXV9fbyIjI83y5cudfeXl5cbtdpv333/fGGPMsWPHjCRz4MABp2bHjh0mKCjI/O///q8xxpjVq1ebjh07On0ZY0xaWprp27evc/tv/uZvTGJiYsB84uLizN/93d81e1/G/BBEEyZM+NFjWkJfZ8+eNZJMXl6eMeb2Pu9u5evzD/sy5ocgevHFF3/0mJbQlzHGdOzY0bzzzjst8rG66/40V11drYKCAsXHxzv7goODFR8fr/z8fIszC/Tll18qKipKvXv31rRp01RaWipJKigoUE1NTcD8+/Xrpx49ejjzz8/PV2xsrCIiIpyahIQE+f1+HT161Km5+hwNNbfrPigpKZHP5wuYQ2hoqOLi4gL6CAsL09ChQ52a+Ph4BQcHa9++fU7NyJEj5XK5AvooLi7Wd99959Tc7l5zc3MVHh6uvn37as6cOc5/cqSl9FVRUSHp//6F+tv1vLvVr88/7KvBb3/7W3Xp0kX9+/dXenq6vv/+e2fsTu+rrq5O69evV2Vlpbxeb4t8rO66f33722+/VV1dXcADIEkRERE6fvy4pVkFiouLU2Zmpvr27aszZ87otdde06OPPqqioiL5fD65XC6FhYUFHBMRESGfzydJ8vl8jfbXMPZTNX6/X5cvX1bbtm1vUXcKmEdjc7h6juHh4QHjrVq1UqdOnQJqevXqdc05GsY6duz4o702nKO5jR07Vk899ZR69eqlkydP6he/+IXGjRun/Px8hYSE3PF91dfXa968eXr44YfVv39/55q343n33Xff3bLXZ2N9SdIzzzyjnj17KioqSocPH1ZaWpqKi4v1wQcf3NF9HTlyRF6vV1euXFH79u21efNmxcTEqLCwsMU9VnddELUE48aNc34eMGCA4uLi1LNnT23cuPGWBwRu3pQpU5yfY2NjNWDAAN13333Kzc3V6NGjLc7s+iQnJ6uoqEiffPKJ7ak0qx/r64UXXnB+jo2NVbdu3TR69GidPHlS99133+2e5nXr27evCgsLVVFRof/4j/9QUlKS8vLybE/rhtx1f5rr0qWLQkJCrllBUlZWpsjISEuz+mlhYWF64IEHdOLECUVGRqq6ulrl5eUBNVfPPzIystH+GsZ+qsbj8dyWsGuYx089DpGRkTp79mzAeG1trS5cuNAsvd6ux7t3797q0qWLTpw44cznTu0rJSVF27Zt08cff6zu3bs7+2/X8+5WvT5/rK/GxMXFSVLA43Un9uVyudSnTx8NGTJEGRkZGjhwoFasWNEiH6u7LohcLpeGDBminJwcZ199fb1ycnLk9XotzuzHXbp0SSdPnlS3bt00ZMgQtW7dOmD+xcXFKi0tdebv9Xp15MiRgF922dnZ8ng8iomJcWquPkdDze26D3r16qXIyMiAOfj9fu3bty+gj/LychUUFDg1u3btUn19vfPLwuv1avfu3aqpqQnoo2/fvurYsaNTY7PXb775RufPn1e3bt2c+dxpfRljlJKSos2bN2vXrl3X/Fnwdj3vmvv1+cf6akxhYaEkBTxed1pfjamvr1dVVVXLfKyatLThT8T69euN2+02mZmZ5tixY+aFF14wYWFhAStIbHrppZdMbm6uKSkpMZ9++qmJj483Xbp0MWfPnjXG/LA0s0ePHmbXrl3m4MGDxuv1Gq/X6xzfsDRzzJgxprCw0GRlZZmuXbs2ujRzwYIF5osvvjCrVq1q9uXbFy9eNIcOHTKHDh0yksy//Mu/mEOHDpmvv/7aGPPD8u2wsDCzdetWc/jwYTNhwoRGl2//2Z/9mdm3b5/55JNPzP333x+wzLm8vNxERESYZ5991hQVFZn169ebdu3aXbPMuVWrVuaf//mfzRdffGGWLFlyU8u3f6qvixcvmpdfftnk5+ebkpIS81//9V9m8ODB5v777zdXrly5Y/uaM2eOCQ0NNbm5uQHLmL///nun5nY975rz9fnH+jpx4oR5/fXXzcGDB01JSYnZunWr6d27txk5cuQd3dfChQtNXl6eKSkpMYcPHzYLFy40QUFB5j//8z+NMS3vsborg8gYY37zm9+YHj16GJfLZYYPH2727t1re0qOyZMnm27duhmXy2V+9rOfmcmTJ5sTJ04445cvXzZ///d/bzp27GjatWtnnnzySXPmzJmAc3z11Vdm3Lhxpm3btqZLly7mpZdeMjU1NQE1H3/8sRk0aJBxuVymd+/e5t13323WPj7++GMj6ZotKSnJGPPDEu5f/vKXJiIiwrjdbjN69GhTXFwccI7z58+bqVOnmvbt2xuPx2NmzJhhLl68GFDz+eefm0ceecS43W7zs5/9zLzxxhvXzGXjxo3mgQceMC6Xyzz00ENm+/btt6Sv77//3owZM8Z07drVtG7d2vTs2dPMmjXrmhfmndZXY/1ICnhO3M7nXXO9Pv9YX6WlpWbkyJGmU6dOxu12mz59+pgFCxYEfI/oTuzrueeeMz179jQul8t07drVjB492gkhY1reY8V/jwgAYNVd9xkRAODOQhABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVv0/Rgd5CariuE8AAAAASUVORK5CYII=",
+      "text/plain": [
+       "<Figure size 640x480 with 1 Axes>"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "import matplotlib.pylab as plt\n",
+    "\n",
+    "plt.imshow(channel)\n",
+    "# print(resized)\n",
+    "# plt.imshow([finalInstance==3])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import matplotlib.pylab as plt\n",
+    "\n",
+    "plt.imshow(finalInstance==4)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "torch",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.9.13"
+  },
+  "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/show_npy.py b/show_npy.py
new file mode 100644
index 0000000000000000000000000000000000000000..055fa5c9ecf9126a82b8ba4b9f265d2673a09038
--- /dev/null
+++ b/show_npy.py
@@ -0,0 +1,14 @@
+import numpy as np
+import os
+from PIL import Image
+import matplotlib.pylab as plt
+
+WSIrootFolder = '/homeStor1/ylan/data/MarkusRinschen/debug_2/npyFiles/'#'SPECIFIED WSI FOLDER'
+# print(os.listdir(WSIrootFolder))
+finalInstance = np.load(WSIrootFolder+f'p21_kidleftp456_B10_PAS_3_finalInstancePrediction.npy')
+# resultWSI = np.load(WSIrootFolder+f'p21_kidleftp456_B10_PAS_3_resultWSI.npy')
+
+# resized = np.resize(finalInstance, (1000, 1000))
+print(finalInstance==3)
+channel = finalInstance == 3
+resized = np.resize(channel, (1000, 1000))
\ No newline at end of file
diff --git a/training.py b/training.py
index ccb6d82b42d0307c5bf8fdf2537ac775e33f3e5e..149c934ab1e94bd8e8e6a5efd4fe155d5e678e9f 100644
--- a/training.py
+++ b/training.py
@@ -332,7 +332,7 @@ def set_up_training(modelString, setting, epochs, batchSize, lrate, weightDecay,
     tbWriter = SummaryWriter(log_dir=tensorboardPath)
 
     if modelString == 'custom':
-        model = Custom(input_ch=3, output_ch=8, modelDim=2)
+        model = Custom(input_ch=3, output_ch=8, modelDim=2) # one channel per class
     elif modelString == 'nnunet':
         model = Generic_UNet(input_channels=3, num_classes=8, base_num_features=30, num_pool=7, final_nonlin = None, deep_supervision=False, dropout_op_kwargs = {'p': 0.0, 'inplace': True})
     else:
diff --git a/utils.py b/utils.py
index 7ba199684fd6a1f300e10be3d175c659874013aa..53e69720c473d459286449c19c2682e0d0a29b90 100644
--- a/utils.py
+++ b/utils.py
@@ -18,6 +18,8 @@ import torch.nn as nn
 from scipy.ndimage.measurements import label
 from scipy.ndimage.morphology import binary_dilation, binary_fill_holes
 
+from PIL import Image
+
 
 colors = torch.tensor([[  0,   0,   0], # Black
                        [255,   0,   0], # Red
@@ -42,7 +44,7 @@ def getBoundingBox(img):
 
 # Generates a 2d ball of a specified radius representing a structuring element for morphological operations
 def generate_ball(radius):
-    structure = np.zeros((3, 3), dtype=np.int)
+    structure = np.zeros((3, 3), dtype=int)
     structure[1, :] = 1
     structure[:, 1] = 1
 
@@ -50,7 +52,7 @@ def generate_ball(radius):
     ball[radius, radius] = 1
     for i in range(radius):
         ball = binary_dilation(ball, structure=structure)
-    return np.asarray(ball, dtype=np.int)
+    return np.asarray(ball, dtype=int)
 
 
 def convert_labelmap_to_rgb(labelmap):
@@ -187,6 +189,7 @@ def savePredictionResultsWithoutDilation(prediction, fullResultPath, figSize):
 
 # Visualizes prediction and image overlay after dilating tubules
 def savePredictionOverlayResults(img, prediction, fullResultPath, figSize, alpha=0.4):
+    
     predictionMask = np.ma.masked_where(prediction == 0, prediction)
 
     colorMap = np.array([[0, 0, 0],  # Black
@@ -200,7 +203,38 @@ def savePredictionOverlayResults(img, prediction, fullResultPath, figSize, alpha
                          [128, 0, 128],  # Purple
                          [255, 140, 0],  # Orange
                          [255, 255, 255]], dtype=np.uint8)  # White
+    
+
+    
+    
+    # pil_img = Image.fromarray(img.transpose(1,2,0)).convert('L')
+
+    # # print(pil_mask.size)
+    # overlaid_image = Image.new('RGBA', pil_img.size)
+    # for i in range(prediction.shape[0]):
+    #     pil_mask = Image.fromarray(prediction[i]).convert('L')
+    #     pil_mask = pil_mask.point(lambda p: 255 if p<0 else 0)
+
+    #     color = colorMap[i]
+    #     overlaid_image = overlaid_image.composite(pil_img, pil_mask, color)
+
+    # overlaid_image.save('fullResultPath')
+
+    # if prediction.shape[0] == 8:
+    #     prediction = prediction.transpose(1,2,0)
+
+    # overlayed_image = Image.fromarray(img)
+    # print(prediction.shape)
+    # for i, color in enumerate(colorMap):
+    #     class_mask = prediction == i
+    #     # class_mask = prediction[:,:, i] > 
+    #     class_mask_rgb = np.zeros_like(, dtype=np.uint8)
+    #     class_color = colorMap[i]
+    #     overlayed_image.paste(Image.new('RGB', overlayed_image.size, tuple(class_color)), mask=Image.fromarray(class_mask))
 
+    # overlayed_image.save(fullResultPath)
+
+    #######################################################
     newRandomColors = np.random.randint(low=0, high=256, dtype=np.uint8, size=(prediction.max(), 3))
     colorMap = np.concatenate((colorMap, newRandomColors))
     colorMap = colorMap / 255.
@@ -209,7 +243,10 @@ def savePredictionOverlayResults(img, prediction, fullResultPath, figSize, alpha
     assert prediction.max() < max_number_of_labels, 'Too many labels -> Not enough colors available in custom colormap! Add some colors!'
     customColorMap = mpl.colors.ListedColormap(colorMap)
 
+
+
     fig = plt.figure(figsize=figSize)
+
     ax = plt.Axes(fig, [0., 0., 1., 1., ])
     ax.set_axis_off()
     fig.add_axes(ax)
@@ -255,6 +292,8 @@ def savePredictionOverlayResults_Fast(img, prediction, fullResultPath, figHeight
     assert prediction.max() < max_number_of_labels, 'Too many labels -> Not enough colors available in custom colormap! Add some colors!'
     customColorMap = mpl.colors.ListedColormap(colorMap)
 
+
+
     fig = plt.figure(figsize=(figHeight*prediction.shape[1]/prediction.shape[0], figHeight))
     ax = plt.Axes(fig, [0., 0., 1., 1., ])
     ax.set_axis_off()