diff --git a/UNet/NormalizeTrainingdata_32.ipynb b/UNet/NormalizeTrainingdata_32.ipynb
index 507145720e2b0586e42a31811f166d19c6ec202f..09ba0845689c733809e59eb53ab75365e62b3506 100644
--- a/UNet/NormalizeTrainingdata_32.ipynb
+++ b/UNet/NormalizeTrainingdata_32.ipynb
@@ -20,7 +20,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 4,
+      "execution_count": 2,
       "metadata": {
         "id": "OzNQI96lq3Pi"
       },
@@ -35,7 +35,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": null,
+      "execution_count": 13,
       "metadata": {
         "id": "lhj_0D1F0dWN"
       },
@@ -59,7 +59,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 6,
+      "execution_count": 3,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/"
@@ -82,7 +82,6 @@
         "#training_label = training_label[:,np.newaxis,...]\n",
         "phase= data[:,4,:,:,:].reshape(1987, 1,32,32,32)\n",
         "new_phase = np.ones(phase.shape) - phase #input[4]: martinsite, input[5]:ferrit\n",
-        "#new_training_data = np.append(data,new_channel,axis=1)\n",
         "#input = np.append(angles,phase,axis=1)\n",
         "#input = np.append(input,new_phase,axis=1)\n",
         "input = np.append(phase,new_phase,axis=1)\n",
@@ -101,18 +100,18 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 7,
+      "execution_count": 4,
       "metadata": {
         "id": "-Rbt8Brb9mM_"
       },
       "outputs": [],
       "source": [
-        "min_label = training_label.min()\n",
-        "max_label = training_label.max()\n",
-        "s_batch, width, height, depth = label.size()\n",
         "label_normalized = label.view(label.size(0), -1)\n",
-        "label_normalized -= label_normalized.min(1, keepdim=True)[0]\n",
-        "label_normalized /= label_normalized.max(1, keepdim=True)[0]\n",
+        "min_label = label_normalized.min()\n",
+        "max_label = label_normalized.max()\n",
+        "s_batch, width, height, depth = label.size()\n",
+        "label_normalized -= min_label\n",
+        "label_normalized /= max_label\n",
         "label_normalized = label_normalized.view(s_batch, width, height, depth)\n",
         "label_normalized = label_normalized[:,np.newaxis,...]"
       ]
@@ -142,7 +141,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 11,
+      "execution_count": 5,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/"
@@ -153,10 +152,39 @@
       "outputs": [],
       "source": [
         "dataset = TensorDataset(input,label_normalized) # create the pytorch dataset \n",
-        "#np.save('E:/Data/damask3/UNet/Input/Norm_min_max_32_V2.npy',[min_label, max_label,angles_min_max])\n",
-        "np.save('E:/Data/damask3/UNet/Input/Norm_min_max_32_V2.npy',[min_label, max_label])\n",
+        "#np.save('E:/Data/damask3/UNet/Input/Norm_min_max_32_angles.npy',[min_label, max_label,angles_min_max])\n",
+        "np.save('E:/Data/damask3/UNet/Input/Norm_min_max_32_phase_only.npy',[min_label, max_label])\n",
         "\n",
-        "torch.save(dataset,'E:/Data/damask3/UNet/Input/Training_Dataset_normalized__32_V2.pt')\n"
+        "torch.save(dataset,'E:/Data/damask3/UNet/Input/TD_norm_32_phase_only.pt')\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 13,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "diff = training_label-rescaled.reshape(1987,32,32,32).numpy()"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 14,
+      "metadata": {},
+      "outputs": [
+        {
+          "data": {
+            "text/plain": [
+              "2.384185791015625e-07"
+            ]
+          },
+          "execution_count": 14,
+          "metadata": {},
+          "output_type": "execute_result"
+        }
+      ],
+      "source": [
+        "np.max(abs(diff))\n"
       ]
     }
   ],
diff --git a/UNet/NormalizeTrainingdata_64.ipynb b/UNet/NormalizeTrainingdata_64.ipynb
index aa94092841742216ddf4781a0907ed5b4b1c4568..c961007d988338972f0abed78b6163247fdfc972 100644
--- a/UNet/NormalizeTrainingdata_64.ipynb
+++ b/UNet/NormalizeTrainingdata_64.ipynb
@@ -2,7 +2,7 @@
   "cells": [
     {
       "cell_type": "code",
-      "execution_count": 9,
+      "execution_count": 1,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/"
@@ -21,7 +21,16 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 10,
+      "execution_count": 2,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "min_label, max_label,angles_min_max = np.load('E:/Data/damask3/UNet/Input/Norm_min_max_64.npy', allow_pickle= True)"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 7,
       "metadata": {
         "id": "OzNQI96lq3Pi"
       },
@@ -29,6 +38,9 @@
       "source": [
         "training_data = np.load('E:/Data/damask3/UNet/Input/Training_data_64.npy')\n",
         "training_label =  np.load('E:/Data/damask3/UNet/Input/Training_labels_64.npy')\n",
+        "training_data = training_data[0:10]\n",
+        "training_label = training_label[0:10]\n",
+        "\n",
         "if training_data.shape[0] != training_label.shape[0]:\n",
         "    print('label and data have not the same size')\n",
         "    #Desired input shape: (N,C,D,H,W) N=Batchsize, C=Channel, D=Depth, H=Height, W = Width\n",
@@ -38,7 +50,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 11,
+      "execution_count": 8,
       "metadata": {
         "id": "lUnBE7T4q3Pi"
       },
@@ -46,24 +58,26 @@
       "source": [
         "angles = data[:,0:4,...]\n",
         "angles_min_max= np.zeros((2,angles.shape[1]))\n",
+        "\n",
         "for i in range(angles.shape[1]):\n",
+        "  min = angles_min_max[0,i]\n",
+        "  max = angles_min_max[1,i]\n",
+        "\n",
         "  s_batch,_,width, height, depth = angles.shape\n",
         "\n",
         "  column= angles[:,i,...]\n",
         "  angles_min_max[0,i] = column.min()\n",
         "  angles_min_max[1,i] = column.max()\n",
-        "  #column_normalized = column.view(angles.shape[0], -1)\n",
-        "  column -= column.min()\n",
-        "  column /= (column.max() - column.min())\n",
+        "  column -= min\n",
+        "  column /= (max - min)\n",
         "  column = column.reshape(s_batch,width, height, depth)\n",
-        "  #column = column[:,np.newaxis,...]\n",
         "  angles[:,i,...] = column\n",
         "\n"
       ]
     },
     {
       "cell_type": "code",
-      "execution_count": 12,
+      "execution_count": 9,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/"
@@ -76,8 +90,8 @@
           "name": "stdout",
           "output_type": "stream",
           "text": [
-            "size of input is torch.Size([458, 6, 64, 64, 64])\n",
-            "size of label is torch.Size([458, 64, 64, 64])\n"
+            "size of input is torch.Size([10, 6, 64, 64, 64])\n",
+            "size of label is torch.Size([10, 64, 64, 64])\n"
           ]
         }
       ],
@@ -103,33 +117,33 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 13,
+      "execution_count": 11,
       "metadata": {
         "id": "Kgd1WhOODim3"
       },
       "outputs": [],
       "source": [
-        "min_label = training_label.min()\n",
-        "max_label = training_label.max()\n",
+        "min_label = training_label.min(1, keepdim=True)\n",
+        "max_label = training_label.max(1, keepdim=True)\n",
         "s_batch, width, height, depth = label.size()\n",
         "label_normalized = label.view(label.size(0), -1)\n",
-        "label_normalized -= label_normalized.min(1, keepdim=True)[0]\n",
-        "label_normalized /= label_normalized.max(1, keepdim=True)[0]\n",
+        "label_normalized -= min_label\n",
+        "label_normalized /= max_label\n",
         "label_normalized = label_normalized.view(s_batch, width, height, depth)\n",
         "label_normalized = label_normalized[:,np.newaxis,...]"
       ]
     },
     {
       "cell_type": "code",
-      "execution_count": 15,
+      "execution_count": 12,
       "metadata": {
         "id": "-Rbt8Brb9mM_"
       },
       "outputs": [],
       "source": [
-        "#dataset = TensorDataset(input,label_normalized) # create the pytorch dataset \n",
+        "dataset = TensorDataset(input,label_normalized) # create the pytorch dataset \n",
         "#np.save('E:/Data/damask3/UNet/Input/Norm_min_max_64.npy',[min_label, max_label,angles_min_max])\n",
-        "torch.save(dataset,'E:/Data/damask3/UNet/Input/Training_Dataset_normalized_64.pt')\n"
+        "torch.save(dataset,'E:/Data/damask3/UNet/Input/Training_Dataset_normalized_64_sample.pt')\n"
       ]
     }
   ],
diff --git a/UNet/UNet_V10.py b/UNet/UNet_V10.py
index 01254e9916e121d4545d359395330b862c5e0553..5efc2f75d0d3e1feaf208d306a42e6d369268a97 100644
--- a/UNet/UNet_V10.py
+++ b/UNet/UNet_V10.py
@@ -50,10 +50,10 @@ class head_layer(nn.Module):
         #return self.sig(self.pointwise(self.depthwise(x))) #convolution
 
 class Encoder(nn.Module):
-    def __init__(self,kernel_size, chs, padding=(0,"same","same",)):
+    def __init__(self,kernel_size, chs, padding=(0,"same","same")):
       super().__init__()
       self.channels = chs
-      self.enc_blocks = nn.ModuleList([depthwise_separable_conv(chs[i], chs[i+1],kernel_size=kernel_size, padding=padding[i]) for i in range(len(chs)-1)])
+      self.enc_blocks = nn.ModuleList([depthwise_separable_conv(chs[i][0], chs[i][1],kernel_size=kernel_size, padding=padding[i]) for i in range(len(chs))])
       self.pool       = nn.MaxPool3d(kernel_size=2, stride=2)
       #self.batch_norm = nn.ModuleList([nn.BatchNorm3d( chs[i][2]) for i in range(len(chs))])
       self.periodic_upsample = nn.ReflectionPad3d(int((kernel_size-1)/2))
@@ -74,13 +74,13 @@ class Encoder(nn.Module):
       return ftrs
 
 class Decoder(nn.Module):
-    def __init__(self,kernel_size, chs_upsampling, chs_conv, padding=("same","same","same",)):
+    def __init__(self,kernel_size, chs_upsampling, chs_conv, padding=("same","same","same")):
         super().__init__()
         assert len(chs_conv) == len(chs_upsampling)
         self.chs         = chs_upsampling
         self.upconvs    = nn.ModuleList([nn.ConvTranspose3d(chs_upsampling[i], chs_upsampling[i], 2, 2) for i in range(len(chs_upsampling))])
         self.dec_blocks = nn.ModuleList([depthwise_separable_conv(chs_conv[i][0], chs_conv[i][1],kernel_size=kernel_size, padding=padding[i]) for i in range(len(chs_conv))])
-        self.head = head_layer(chs_conv[-1][2])
+        self.head = head_layer(chs_conv[-1][1])
     def forward(self, x, encoder_features):
         for i in range(len(self.chs)):
             x        = self.upconvs[i](x)
@@ -126,18 +126,19 @@ class UNetBase(nn.Module):
         print("Epoch [{}], train_loss: {:.6f}, val_loss: {:.6f}, val_acc: {:.6f}".format(
             epoch, result['train_loss'], result['val_loss'], result['val_acc']))
         
-def accuracy(outputs, labels, threshold = 0.05):
-    error = (abs(outputs - labels)/outputs)
+def accuracy(outputs, labels,normalization, threshold = 0.05):
+    error = (abs((outputs) - (labels)))/(outputs+normalization[0]/normalization[1])
     right_predic = torch.sum(error < threshold)
     percentage = ((right_predic/torch.numel(error))*100.)
     return percentage
     
+    
 class UNet(UNetBase):
-    def __init__(self,kernel_size = 5, enc_chs=((2,32), (32,64), (64,128)), dec_chs_up=(128, 128, 64), dec_chs_conv=((192, 128),(160,64),(66,32))):
+    def __init__(self,kernel_size = 5, enc_chs=((2,32), (32,64), (64,128)), dec_chs_up=(128, 128, 64), dec_chs_conv=((192, 128),(160,64),(66,32)),normalization=np.array([0,1])):
         super().__init__()
         self.encoder     = Encoder(kernel_size = kernel_size, chs = enc_chs)
         self.decoder     = Decoder(kernel_size = kernel_size, chs_upsampling = dec_chs_up, chs_conv = dec_chs_conv)
-        #self.head        = depthwise_separable_conv(1, 1, padding = "same", kernel_size=1)
+        self.normalization = normalization
 
 
     def forward(self, x):
@@ -170,8 +171,8 @@ def fit(epochs, lr, model, train_loader, val_loader, path, opt_func=torch.optim.
         result['train_loss'] = torch.stack(train_losses).mean().item()
         model.epoch_end(epoch, result)
         history.append(result)
-    torch.save(model.state_dict(),f'{path}/Unet_dict_V9.pth')
-    torch.save(history,f'{path}/history_V9.pt')
+    torch.save(model.state_dict(),f'{path}/Unet_dict_V10.pth')
+    torch.save(history,f'{path}/history_V10.pt')
     return history
 
 def get_default_device():
@@ -220,10 +221,11 @@ def Create_Dataloader(path, batch_size = 100, percent_val = 0.2):
 
 if __name__ == '__main__':
     #os.chdir('F:/RWTH/HiWi_IEHK/DAMASK3/UNet/Trainingsdata')
-    use_seeds = False
-    seed = 373686838
+    path_to_rep = '/home/yk138599/Hiwi/damask3'
+    use_seeds = True
+    seed = 2193910023
     num_epochs = 1300
-    b_size = 8
+    b_size = 32
     opt_func = torch.optim.Adam
     lr = 0.00001
     kernel = 5
@@ -238,9 +240,10 @@ if __name__ == '__main__':
     random.seed(seed)
     np.random.seed(seed)
     device = get_default_device()
-    train_dl, valid_dl = Create_Dataloader('/home/yk138599/Hiwi/damask3/UNet/Trainingsdata/Training_Dataset_normalized_32_V2.pt', batch_size= b_size )
+    normalization = np.load(f'{path_to_rep}/UNet/Trainingsdata/Norm_min_max_32_angles.npy')
+    train_dl, valid_dl = Create_Dataloader(f'{path_to_rep}/UNet/Trainingsdata/Training_Dataset_normalized_32_V2.pt', batch_size= b_size )
     train_dl = DeviceDataLoader(train_dl, device)
     valid_dl = DeviceDataLoader(valid_dl, device)
 
-    model = to_device(UNet(kernel_size=kernel).double(), device)
-    history = fit(num_epochs, lr, model, train_dl, valid_dl,'/home/yk138599/Hiwi/damask3/UNet/output', opt_func)
+    model = to_device(UNet(kernel_size=kernel,normalization=normalization).double(), device)
+    history = fit(num_epochs, lr, model, train_dl, valid_dl,f'{path_to_rep}/UNet/output', opt_func) 
diff --git a/UNet/UNet_V9.py b/UNet/UNet_V9_1.py
similarity index 90%
rename from UNet/UNet_V9.py
rename to UNet/UNet_V9_1.py
index db51335bef9054feefc0ec97c268c9a182781f7e..67edfbcdb7440ef6cbe86c974746ccfb49fe932a 100644
--- a/UNet/UNet_V9.py
+++ b/UNet/UNet_V9_1.py
@@ -116,7 +116,7 @@ class UNetBase(nn.Module):
         input, labels = batch 
         out = self(input)                    # Generate predictions
         loss = F.l1_loss(out, labels)   # Calculate loss
-        acc = accuracy(out.detach(), labels.detach())         # Calculate accuracy
+        acc = accuracy(out.detach(), labels.detach(),normalization=self.normalization)         # Calculate accuracy
         return {'val_loss': loss.detach(), 'val_acc': acc}
         
     def validation_epoch_end(self, outputs):
@@ -130,18 +130,19 @@ class UNetBase(nn.Module):
         print("Epoch [{}], train_loss: {:.6f}, val_loss: {:.6f}, val_acc: {:.6f}".format(
             epoch, result['train_loss'], result['val_loss'], result['val_acc']))
         
-def accuracy(outputs, labels, threshold = 0.05):
-    error = (abs(outputs - labels)/outputs)
+def accuracy(outputs, labels,normalization, threshold = 0.05):
+    error = (abs((outputs) - (labels)))/(outputs+normalization[0]/normalization[1])
     right_predic = torch.sum(error < threshold)
     percentage = ((right_predic/torch.numel(error))*100.)
     return percentage
     
 class UNet(UNetBase):
-    def __init__(self,kernel_size = 5, enc_chs=((2,16,32), (32,32,64), (64,64,128)), dec_chs_up=(128, 128, 64), dec_chs_conv=((192,128, 128),(160,64,64),(66,32,32))):
+    def __init__(self,kernel_size = 5, enc_chs=((2,16,32), (32,32,64), (64,64,128)), dec_chs_up=(128, 128, 64), dec_chs_conv=((192,128, 128),(160,64,64),(66,32,32)),normalization=np.array([0,1])):
         super().__init__()
         self.encoder     = Encoder(kernel_size = kernel_size, chs = enc_chs)
         self.decoder     = Decoder(kernel_size = kernel_size, chs_upsampling = dec_chs_up, chs_conv = dec_chs_conv)
         #self.head        = depthwise_separable_conv(1, 1, padding = "same", kernel_size=1)
+        self.normalization = normalization
 
 
     def forward(self, x):
@@ -174,8 +175,8 @@ def fit(epochs, lr, model, train_loader, val_loader, path, opt_func=torch.optim.
         result['train_loss'] = torch.stack(train_losses).mean().item()
         model.epoch_end(epoch, result)
         history.append(result)
-    torch.save(model.state_dict(),f'{path}/Unet_dict_V9.pth')
-    torch.save(history,f'{path}/history_V9.pt')
+    torch.save(model.state_dict(),f'{path}/Unet_dict_V9_1.pth')
+    torch.save(history,f'{path}/history_V9_1.pt')
     return history
 
 def get_default_device():
@@ -224,13 +225,14 @@ def Create_Dataloader(path, batch_size = 100, percent_val = 0.2):
 
 if __name__ == '__main__':
     #os.chdir('F:/RWTH/HiWi_IEHK/DAMASK3/UNet/Trainingsdata')
-    use_seeds = False
+    path_to_rep = '/home/yk138599/Hiwi/damask3'
+    use_seeds = True
     seed = 373686838
     num_epochs = 1300
-    b_size = 8
+    b_size = 32
     opt_func = torch.optim.Adam
     lr = 0.00001
-    kernel = 9
+    kernel = 5
     print(f'number auf epochs: {num_epochs}')
     print(f'batchsize: {b_size}')
     print(f'learning rate: {lr}')
@@ -242,9 +244,10 @@ if __name__ == '__main__':
     random.seed(seed)
     np.random.seed(seed)
     device = get_default_device()
-    train_dl, valid_dl = Create_Dataloader('/home/yk138599/Hiwi/damask3/UNet/Trainingsdata/Training_Dataset_normalized_32_V2.pt', batch_size= b_size )
+    normalization = np.load(f'{path_to_rep}/UNet/Trainingsdata/Norm_min_max_32_angles.npy')
+    train_dl, valid_dl = Create_Dataloader(f'{path_to_rep}/UNet/Trainingsdata/Training_Dataset_normalized_32_V2.pt', batch_size= b_size )
     train_dl = DeviceDataLoader(train_dl, device)
     valid_dl = DeviceDataLoader(valid_dl, device)
 
-    model = to_device(UNet(kernel_size=kernel).double(), device)
-    history = fit(num_epochs, lr, model, train_dl, valid_dl,'/home/yk138599/Hiwi/damask3/UNet/output', opt_func)
+    model = to_device(UNet(kernel_size=kernel,normalization=normalization).double(), device)
+    history = fit(num_epochs, lr, model, train_dl, valid_dl,f'{path_to_rep}/UNet/output', opt_func)
diff --git a/UNet/UNet_V9_2.py b/UNet/UNet_V9_2.py
new file mode 100644
index 0000000000000000000000000000000000000000..7337baa0bf57519d089d9edffc07139c2a2e0a1e
--- /dev/null
+++ b/UNet/UNet_V9_2.py
@@ -0,0 +1,253 @@
+#like V6_2 but only the different phases as input
+"""UNet_V6.ipynb
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+    https://colab.research.google.com/drive/1yvtk3lFo_x0ZiqtFdnR8jgcjPKy3nZA4
+"""
+
+import torch
+import torch.nn as nn
+import numpy as np
+import random
+from torch.utils.data.sampler import SubsetRandomSampler
+from torch.utils.data.dataloader import DataLoader
+from torch.utils.data import TensorDataset
+import torch.nn.functional as F
+from torch.utils.data import random_split
+from torch.nn.modules.activation import ReLU
+
+class depthwise_separable_conv(nn.Module):
+    def __init__(self, in_c, out_1_c, out_2_c, padding, kernel_size):
+        super(depthwise_separable_conv, self).__init__()
+        self.depthwise_1 = nn.Conv3d(in_c, in_c, kernel_size= kernel_size, padding=padding[0], groups=in_c, bias=True)
+        self.pointwise_1 = nn.Conv3d(in_c, out_1_c, kernel_size=1, bias=True)
+        self.batch_norm_1 = nn.BatchNorm3d(out_1_c)
+        self.relu = nn.ReLU()
+        self.depthwise_2 = nn.Conv3d(out_1_c, out_1_c, kernel_size= kernel_size, padding=padding[1], groups=out_1_c, bias=True)
+        self.pointwise_2 = nn.Conv3d(out_1_c, out_2_c, kernel_size=1, bias=True)
+        self.batch_norm_2 = nn.BatchNorm3d(out_2_c)
+    def forward(self, x):
+        x = self.batch_norm_1(self.relu(self.pointwise_1(self.depthwise_1(x))))
+        return self.batch_norm_2(self.relu(self.pointwise_2(self.depthwise_2(x))))
+
+class convolution_Layer(nn.Module):
+    def __init__(self, in_c, out_1_c, out_2_c, padding, kernel_size):
+        super(convolution_Layer, self).__init__()
+        self.conv_1 = nn.Conv3d(in_c, out_1_c, kernel_size= kernel_size, padding=padding[0], bias=True)
+        self.batch_norm_1 = nn.BatchNorm3d(out_1_c)
+        self.relu = nn.ReLU()
+        self.conv_2 = nn.Conv3d(out_1_c, out_2_c, kernel_size= kernel_size, padding=padding[1], bias=True)
+        self.batch_norm_2 = nn.BatchNorm3d(out_2_c)
+    def forward(self, x):
+        x = self.batch_norm_1(self.relu(self.conv_1(x)))
+        return self.batch_norm_2(self.relu(self.relu(self.conv_2(x))))
+
+class head_layer(nn.Module):
+    def __init__(self, in_c, out_c = 1, padding = "same"):
+        super(head_layer, self).__init__()
+        self.conv =  nn.Conv3d(in_c, out_c, kernel_size=1, bias=True)
+        self.sig = nn.Sigmoid()
+    def forward(self, x):
+        return self.sig(self.conv(x)) #convolution
+        #return self.sig(self.pointwise(self.depthwise(x))) #convolution
+
+class Encoder(nn.Module):
+    def __init__(self,kernel_size, chs, padding=((0,"same"),("same","same"),("same","same"))):
+      super().__init__()
+      self.channels = chs
+      self.enc_blocks = nn.ModuleList([depthwise_separable_conv(chs[i][0], chs[i][1], chs[i][2], kernel_size=kernel_size, padding=padding[i]) for i in range(len(chs))])
+      self.pool       = nn.MaxPool3d(kernel_size=2, stride=2)
+      #self.batch_norm = nn.ModuleList([nn.BatchNorm3d( chs[i][2]) for i in range(len(chs))])
+      self.periodic_upsample = nn.ReflectionPad3d(int((kernel_size-1)/2))
+
+    
+    def forward(self, x):
+      ftrs = []
+      x = self.periodic_upsample(x)
+      for i in range(len(self.channels)):
+        ftrs.append(x)
+        x =self.enc_blocks[i](x)
+        #print(f'size of ftrs: {ftrs[i].size()}')
+        x = self.pool(x)
+        #print(f'size of x after pooling{x.size()}')
+      ftrs.append(x)
+      #print(f'size of ftrs: {ftrs[3].size()}')
+      #print(f'length of ftrs: {len(ftrs)}')
+      return ftrs
+
+class Decoder(nn.Module):
+    def __init__(self,kernel_size, chs_upsampling, chs_conv, padding=(("same","same"),("same","same"),("same","same"))):
+        super().__init__()
+        assert len(chs_conv) == len(chs_upsampling)
+        self.chs         = chs_upsampling
+        self.upconvs    = nn.ModuleList([nn.ConvTranspose3d(chs_upsampling[i], chs_upsampling[i], 2, 2) for i in range(len(chs_upsampling))])
+        self.dec_blocks = nn.ModuleList([depthwise_separable_conv(chs_conv[i][0], chs_conv[i][1], chs_conv[i][2], kernel_size=kernel_size, padding=padding[i]) for i in range(len(chs_conv))])
+        self.head = head_layer(chs_conv[-1][2])
+    def forward(self, x, encoder_features):
+        for i in range(len(self.chs)):
+            x        = self.upconvs[i](x)
+            #print(f'size after upsampling: {x.size()}')
+            enc_ftrs = self.crop(encoder_features[i], x)
+            x        = torch.cat([x, enc_ftrs], dim=1)
+            #print(f'size after cropping&cat: {x.size()}')
+
+            x        = self.dec_blocks[i](x)
+            #print(f'size after convolution: {x.size()}')
+        x = self.head(x)    
+        return x
+    
+    def crop(self, tensor, target_tensor):
+        target_size = target_tensor.size()[2]
+        tensor_size = tensor.size()[2]
+        delta = tensor_size - target_size
+        delta = delta // 2
+        return tensor[:,:,delta:tensor_size-delta,delta:tensor_size-delta,delta:tensor_size-delta]
+
+class UNetBase(nn.Module):
+    def training_step(self, batch):
+        input, labels = batch 
+        out = self(input)                  # Generate predictions
+        loss = F.l1_loss(out, labels) # Calculate loss
+        return loss
+    
+    def validation_step(self, batch):
+        input, labels = batch 
+        out = self(input)                    # Generate predictions
+        loss = F.l1_loss(out, labels)   # Calculate loss
+        acc = accuracy(out.detach(), labels.detach(),normalization=self.normalization)         # Calculate accuracy
+        return {'val_loss': loss.detach(), 'val_acc': acc}
+        
+    def validation_epoch_end(self, outputs):
+        batch_losses = [x['val_loss'] for x in outputs]
+        epoch_loss = torch.stack(batch_losses).mean()   # Combine losses
+        batch_accs = [x['val_acc'] for x in outputs]
+        epoch_acc = torch.stack(batch_accs).mean()      # Combine accuracies
+        return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
+    
+    def epoch_end(self, epoch, result):
+        print("Epoch [{}], train_loss: {:.6f}, val_loss: {:.6f}, val_acc: {:.6f}".format(
+            epoch, result['train_loss'], result['val_loss'], result['val_acc']))
+        
+def accuracy(outputs, labels,normalization, threshold = 0.05):
+    error = (abs((outputs) - (labels)))/(outputs+normalization[0]/normalization[1])
+    right_predic = torch.sum(error < threshold)
+    percentage = ((right_predic/torch.numel(error))*100.)
+    return percentage
+    
+class UNet(UNetBase):
+    def __init__(self,kernel_size = 5, enc_chs=((2,16,32), (32,32,64), (64,64,128)), dec_chs_up=(128, 128, 64), dec_chs_conv=((192,128, 128),(160,64,64),(66,32,32)),normalization=np.array([0,1])):
+        super().__init__()
+        self.encoder     = Encoder(kernel_size = kernel_size, chs = enc_chs)
+        self.decoder     = Decoder(kernel_size = kernel_size, chs_upsampling = dec_chs_up, chs_conv = dec_chs_conv)
+        #self.head        = depthwise_separable_conv(1, 1, padding = "same", kernel_size=1)
+        self.normalization = normalization
+
+
+    def forward(self, x):
+        enc_ftrs = self.encoder(x)
+        out      = self.decoder(enc_ftrs[::-1][0], enc_ftrs[::-1][1:])
+        #out      = self.head(out)
+        return out
+
+@torch.no_grad()
+def evaluate(model, val_loader):
+    model.eval()
+    outputs = [model.validation_step(batch) for batch in val_loader]
+    return model.validation_epoch_end(outputs)
+
+def fit(epochs, lr, model, train_loader, val_loader, path, opt_func=torch.optim.Adam):
+    history = []
+    optimizer = opt_func(model.parameters(), lr, eps=1e-07)
+    for epoch in range(epochs):
+        # Training Phase 
+        model.train()
+        train_losses = []
+        for batch in train_loader:
+            loss = model.training_step(batch)
+            train_losses.append(loss)
+            loss.backward()
+            optimizer.step()
+            optimizer.zero_grad()
+        # Validation phase
+        result = evaluate(model, val_loader)
+        result['train_loss'] = torch.stack(train_losses).mean().item()
+        model.epoch_end(epoch, result)
+        history.append(result)
+    torch.save(model.state_dict(),f'{path}/Unet_dict_V9_2.pth')
+    torch.save(history,f'{path}/history_V9_2.pt')
+    return history
+
+def get_default_device():
+    """Pick GPU if available, else CPU"""
+    if torch.cuda.is_available():
+        return torch.device('cuda')
+    else:
+      print('no GPU found')
+      return torch.device('cpu')
+      
+def to_device(data, device):
+    """Move tensor(s) to chosen device"""
+    if isinstance(data, (list,tuple)):
+        return [to_device(x, device) for x in data]
+    return data.to(device, non_blocking=True)
+
+class DeviceDataLoader():
+    """Wrap a dataloader to move data to a device"""
+    def __init__(self, dl, device):
+        self.dl = dl
+        self.device = device
+        
+    def __iter__(self):
+        """Yield a batch of data after moving it to device"""
+        for b in self.dl: 
+            yield to_device(b, self.device)
+
+    def __len__(self):
+        """Number of batches"""
+        return len(self.dl)
+
+def Create_Dataloader(path, batch_size = 100, percent_val = 0.2):
+    dataset = torch.load(path) # create the pytorch dataset 
+    #size_data = 500 #shrink dataset for colab
+    #rest = len(dataset) -size_data
+    #dataset,_ = torch.utils.data.random_split(dataset, [size_data, rest])
+    val_size = int(len(dataset) * percent_val)
+    train_size = len(dataset) - val_size
+
+    train_ds, val_ds = random_split(dataset, [train_size, val_size])
+    # Create DataLoader
+    train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=1, pin_memory=True)
+    valid_dl = DataLoader(val_ds, batch_size, num_workers=1, pin_memory=True)
+    
+    return train_dl, valid_dl
+
+if __name__ == '__main__':
+    #os.chdir('F:/RWTH/HiWi_IEHK/DAMASK3/UNet/Trainingsdata')
+    path_to_rep = '/home/yk138599/Hiwi/damask3'
+    use_seeds = True
+    seed = 373686838
+    num_epochs = 1300
+    b_size = 32
+    opt_func = torch.optim.Adam
+    lr = 0.00001
+    kernel = 7
+    print(f'number auf epochs: {num_epochs}')
+    print(f'batchsize: {b_size}')
+    print(f'learning rate: {lr}')
+    print(f'kernel size is: {kernel}')
+    if not use_seeds:
+      seed = random.randrange(2**32 - 1)
+    print(f' seed is: {seed}')
+    torch.manual_seed(seed)
+    random.seed(seed)
+    np.random.seed(seed)
+    device = get_default_device()
+    normalization = np.load(f'{path_to_rep}/UNet/Trainingsdata/Norm_min_max_32_angles.npy')
+    train_dl, valid_dl = Create_Dataloader(f'{path_to_rep}/UNet/Trainingsdata/Training_Dataset_normalized_32_V2.pt', batch_size= b_size )
+    train_dl = DeviceDataLoader(train_dl, device)
+    valid_dl = DeviceDataLoader(valid_dl, device)
+
+    model = to_device(UNet(kernel_size=kernel,normalization=normalization).double(), device)
+    history = fit(num_epochs, lr, model, train_dl, valid_dl,f'{path_to_rep}/UNet/output', opt_func)
diff --git a/UNet/UNet_V9_3.py b/UNet/UNet_V9_3.py
new file mode 100644
index 0000000000000000000000000000000000000000..997dcfec17c601400f5116467ba2563f18cfed06
--- /dev/null
+++ b/UNet/UNet_V9_3.py
@@ -0,0 +1,253 @@
+#like V6_2 but only the different phases as input
+"""UNet_V6.ipynb
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+    https://colab.research.google.com/drive/1yvtk3lFo_x0ZiqtFdnR8jgcjPKy3nZA4
+"""
+
+import torch
+import torch.nn as nn
+import numpy as np
+import random
+from torch.utils.data.sampler import SubsetRandomSampler
+from torch.utils.data.dataloader import DataLoader
+from torch.utils.data import TensorDataset
+import torch.nn.functional as F
+from torch.utils.data import random_split
+from torch.nn.modules.activation import ReLU
+
+class depthwise_separable_conv(nn.Module):
+    def __init__(self, in_c, out_1_c, out_2_c, padding, kernel_size):
+        super(depthwise_separable_conv, self).__init__()
+        self.depthwise_1 = nn.Conv3d(in_c, in_c, kernel_size= kernel_size, padding=padding[0], groups=in_c, bias=True)
+        self.pointwise_1 = nn.Conv3d(in_c, out_1_c, kernel_size=1, bias=True)
+        self.batch_norm_1 = nn.BatchNorm3d(out_1_c)
+        self.relu = nn.ReLU()
+        self.depthwise_2 = nn.Conv3d(out_1_c, out_1_c, kernel_size= kernel_size, padding=padding[1], groups=out_1_c, bias=True)
+        self.pointwise_2 = nn.Conv3d(out_1_c, out_2_c, kernel_size=1, bias=True)
+        self.batch_norm_2 = nn.BatchNorm3d(out_2_c)
+    def forward(self, x):
+        x = self.batch_norm_1(self.relu(self.pointwise_1(self.depthwise_1(x))))
+        return self.batch_norm_2(self.relu(self.pointwise_2(self.depthwise_2(x))))
+
+class convolution_Layer(nn.Module):
+    def __init__(self, in_c, out_1_c, out_2_c, padding, kernel_size):
+        super(convolution_Layer, self).__init__()
+        self.conv_1 = nn.Conv3d(in_c, out_1_c, kernel_size= kernel_size, padding=padding[0], bias=True)
+        self.batch_norm_1 = nn.BatchNorm3d(out_1_c)
+        self.relu = nn.ReLU()
+        self.conv_2 = nn.Conv3d(out_1_c, out_2_c, kernel_size= kernel_size, padding=padding[1], bias=True)
+        self.batch_norm_2 = nn.BatchNorm3d(out_2_c)
+    def forward(self, x):
+        x = self.batch_norm_1(self.relu(self.conv_1(x)))
+        return self.batch_norm_2(self.relu(self.relu(self.conv_2(x))))
+
+class head_layer(nn.Module):
+    def __init__(self, in_c, out_c = 1, padding = "same"):
+        super(head_layer, self).__init__()
+        self.conv =  nn.Conv3d(in_c, out_c, kernel_size=1, bias=True)
+        self.sig = nn.Sigmoid()
+    def forward(self, x):
+        return self.sig(self.conv(x)) #convolution
+        #return self.sig(self.pointwise(self.depthwise(x))) #convolution
+
+class Encoder(nn.Module):
+    def __init__(self,kernel_size, chs, padding=((0,"same"),("same","same"),("same","same"))):
+      super().__init__()
+      self.channels = chs
+      self.enc_blocks = nn.ModuleList([depthwise_separable_conv(chs[i][0], chs[i][1], chs[i][2], kernel_size=kernel_size, padding=padding[i]) for i in range(len(chs))])
+      self.pool       = nn.MaxPool3d(kernel_size=2, stride=2)
+      #self.batch_norm = nn.ModuleList([nn.BatchNorm3d( chs[i][2]) for i in range(len(chs))])
+      self.periodic_upsample = nn.ReflectionPad3d(int((kernel_size-1)/2))
+
+    
+    def forward(self, x):
+      ftrs = []
+      x = self.periodic_upsample(x)
+      for i in range(len(self.channels)):
+        ftrs.append(x)
+        x =self.enc_blocks[i](x)
+        #print(f'size of ftrs: {ftrs[i].size()}')
+        x = self.pool(x)
+        #print(f'size of x after pooling{x.size()}')
+      ftrs.append(x)
+      #print(f'size of ftrs: {ftrs[3].size()}')
+      #print(f'length of ftrs: {len(ftrs)}')
+      return ftrs
+
+class Decoder(nn.Module):
+    def __init__(self,kernel_size, chs_upsampling, chs_conv, padding=(("same","same"),("same","same"),("same","same"))):
+        super().__init__()
+        assert len(chs_conv) == len(chs_upsampling)
+        self.chs         = chs_upsampling
+        self.upconvs    = nn.ModuleList([nn.ConvTranspose3d(chs_upsampling[i], chs_upsampling[i], 2, 2) for i in range(len(chs_upsampling))])
+        self.dec_blocks = nn.ModuleList([depthwise_separable_conv(chs_conv[i][0], chs_conv[i][1], chs_conv[i][2], kernel_size=kernel_size, padding=padding[i]) for i in range(len(chs_conv))])
+        self.head = head_layer(chs_conv[-1][2])
+    def forward(self, x, encoder_features):
+        for i in range(len(self.chs)):
+            x        = self.upconvs[i](x)
+            #print(f'size after upsampling: {x.size()}')
+            enc_ftrs = self.crop(encoder_features[i], x)
+            x        = torch.cat([x, enc_ftrs], dim=1)
+            #print(f'size after cropping&cat: {x.size()}')
+
+            x        = self.dec_blocks[i](x)
+            #print(f'size after convolution: {x.size()}')
+        x = self.head(x)    
+        return x
+    
+    def crop(self, tensor, target_tensor):
+        target_size = target_tensor.size()[2]
+        tensor_size = tensor.size()[2]
+        delta = tensor_size - target_size
+        delta = delta // 2
+        return tensor[:,:,delta:tensor_size-delta,delta:tensor_size-delta,delta:tensor_size-delta]
+
+class UNetBase(nn.Module):
+    def training_step(self, batch):
+        input, labels = batch 
+        out = self(input)                  # Generate predictions
+        loss = F.l1_loss(out, labels) # Calculate loss
+        return loss
+    
+    def validation_step(self, batch):
+        input, labels = batch 
+        out = self(input)                    # Generate predictions
+        loss = F.l1_loss(out, labels)   # Calculate loss
+        acc = accuracy(out.detach(), labels.detach(),normalization=self.normalization)         # Calculate accuracy
+        return {'val_loss': loss.detach(), 'val_acc': acc}
+        
+    def validation_epoch_end(self, outputs):
+        batch_losses = [x['val_loss'] for x in outputs]
+        epoch_loss = torch.stack(batch_losses).mean()   # Combine losses
+        batch_accs = [x['val_acc'] for x in outputs]
+        epoch_acc = torch.stack(batch_accs).mean()      # Combine accuracies
+        return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
+    
+    def epoch_end(self, epoch, result):
+        print("Epoch [{}], train_loss: {:.6f}, val_loss: {:.6f}, val_acc: {:.6f}".format(
+            epoch, result['train_loss'], result['val_loss'], result['val_acc']))
+        
+def accuracy(outputs, labels,normalization, threshold = 0.05):
+    error = (abs((outputs) - (labels)))/(outputs+normalization[0]/normalization[1])
+    right_predic = torch.sum(error < threshold)
+    percentage = ((right_predic/torch.numel(error))*100.)
+    return percentage
+    
+class UNet(UNetBase):
+    def __init__(self,kernel_size = 5, enc_chs=((2,16,32), (32,32,64), (64,64,128)), dec_chs_up=(128, 128, 64), dec_chs_conv=((192,128, 128),(160,64,64),(66,32,32)),normalization=np.array([0,1])):
+        super().__init__()
+        self.encoder     = Encoder(kernel_size = kernel_size, chs = enc_chs)
+        self.decoder     = Decoder(kernel_size = kernel_size, chs_upsampling = dec_chs_up, chs_conv = dec_chs_conv)
+        #self.head        = depthwise_separable_conv(1, 1, padding = "same", kernel_size=1)
+        self.normalization = normalization
+
+
+    def forward(self, x):
+        enc_ftrs = self.encoder(x)
+        out      = self.decoder(enc_ftrs[::-1][0], enc_ftrs[::-1][1:])
+        #out      = self.head(out)
+        return out
+
+@torch.no_grad()
+def evaluate(model, val_loader):
+    model.eval()
+    outputs = [model.validation_step(batch) for batch in val_loader]
+    return model.validation_epoch_end(outputs)
+
+def fit(epochs, lr, model, train_loader, val_loader, path, opt_func=torch.optim.Adam):
+    history = []
+    optimizer = opt_func(model.parameters(), lr, eps=1e-07)
+    for epoch in range(epochs):
+        # Training Phase 
+        model.train()
+        train_losses = []
+        for batch in train_loader:
+            loss = model.training_step(batch)
+            train_losses.append(loss)
+            loss.backward()
+            optimizer.step()
+            optimizer.zero_grad()
+        # Validation phase
+        result = evaluate(model, val_loader)
+        result['train_loss'] = torch.stack(train_losses).mean().item()
+        model.epoch_end(epoch, result)
+        history.append(result)
+    torch.save(model.state_dict(),f'{path}/Unet_dict_V9_3.pth')
+    torch.save(history,f'{path}/history_V9_3.pt')
+    return history
+
+def get_default_device():
+    """Pick GPU if available, else CPU"""
+    if torch.cuda.is_available():
+        return torch.device('cuda')
+    else:
+      print('no GPU found')
+      return torch.device('cpu')
+      
+def to_device(data, device):
+    """Move tensor(s) to chosen device"""
+    if isinstance(data, (list,tuple)):
+        return [to_device(x, device) for x in data]
+    return data.to(device, non_blocking=True)
+
+class DeviceDataLoader():
+    """Wrap a dataloader to move data to a device"""
+    def __init__(self, dl, device):
+        self.dl = dl
+        self.device = device
+        
+    def __iter__(self):
+        """Yield a batch of data after moving it to device"""
+        for b in self.dl: 
+            yield to_device(b, self.device)
+
+    def __len__(self):
+        """Number of batches"""
+        return len(self.dl)
+
+def Create_Dataloader(path, batch_size = 100, percent_val = 0.2):
+    dataset = torch.load(path) # create the pytorch dataset 
+    #size_data = 500 #shrink dataset for colab
+    #rest = len(dataset) -size_data
+    #dataset,_ = torch.utils.data.random_split(dataset, [size_data, rest])
+    val_size = int(len(dataset) * percent_val)
+    train_size = len(dataset) - val_size
+
+    train_ds, val_ds = random_split(dataset, [train_size, val_size])
+    # Create DataLoader
+    train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=1, pin_memory=True)
+    valid_dl = DataLoader(val_ds, batch_size, num_workers=1, pin_memory=True)
+    
+    return train_dl, valid_dl
+
+if __name__ == '__main__':
+    #os.chdir('F:/RWTH/HiWi_IEHK/DAMASK3/UNet/Trainingsdata')
+    path_to_rep = '/home/yk138599/Hiwi/damask3'
+    use_seeds = True
+    seed = 373686838
+    num_epochs = 1300
+    b_size = 32
+    opt_func = torch.optim.Adam
+    lr = 0.00001
+    kernel = 3
+    print(f'number auf epochs: {num_epochs}')
+    print(f'batchsize: {b_size}')
+    print(f'learning rate: {lr}')
+    print(f'kernel size is: {kernel}')
+    if not use_seeds:
+      seed = random.randrange(2**32 - 1)
+    print(f' seed is: {seed}')
+    torch.manual_seed(seed)
+    random.seed(seed)
+    np.random.seed(seed)
+    device = get_default_device()
+    normalization = np.load(f'{path_to_rep}/UNet/Trainingsdata/Norm_min_max_32_angles.npy')
+    train_dl, valid_dl = Create_Dataloader(f'{path_to_rep}/UNet/Trainingsdata/Training_Dataset_normalized_32_V2.pt', batch_size= b_size )
+    train_dl = DeviceDataLoader(train_dl, device)
+    valid_dl = DeviceDataLoader(valid_dl, device)
+
+    model = to_device(UNet(kernel_size=kernel,normalization=normalization).double(), device)
+    history = fit(num_epochs, lr, model, train_dl, valid_dl,f'{path_to_rep}/UNet/output', opt_func)
diff --git a/UNet/postprocessing.py b/UNet/postprocessing.py
deleted file mode 100644
index 7a836dc785b537de259b9f33582439a16621438f..0000000000000000000000000000000000000000
--- a/UNet/postprocessing.py
+++ /dev/null
@@ -1,106 +0,0 @@
-import torch
-import numpy as np
-import matplotlib
-import matplotlib.pyplot as plt
-import UNet_V4 as UNet
-import pyvista as pv
-from matplotlib.colors import ListedColormap
-import copy
-
-
-
-def predict_stress(image_id, normalization, model, dataset, threshold = 0.05):
-    input, output = dataset[image_id]
-    input = copy.deepcopy(input)
-    output = copy.deepcopy(output)
-    input = torch.unsqueeze(input,0)
-    output = torch.unsqueeze(output,0)
-    xb = UNet.to_device(input, device)
-    model.eval()
-    prediction = model(xb)
-    input = input.detach().numpy()
-    prediction = prediction.detach().numpy()
-    output = output.detach().numpy()
-    prediction = rescale(prediction, normalization)
-    output = rescale(output, normalization)
-    error = (abs(output - prediction)/output)
-    print(f'Maximum error is : {error.max()*100.:.4} %')
-    print(f'average error is : {error.mean()*100.:.4} %')
-    right_predic = (error < threshold).sum()
-    print(f'{(right_predic/error.size)*100.:.4}% of voxels have a diviation less than {threshold*100.}%')
-    grains = grain_matrix(input)
-    plot_difference(error,grains,output, threshold)
-
-def rescale(output, normalization):
-    output_rescale = output.reshape(32,32,32)
-    min_label, max_label,_ = normalization
-    output_rescale *= max_label
-    output_rescale += min_label
-    return output_rescale
-
-def get_colormap(mesh, threshold):    
-    black = np.array([11/256, 11/256, 11/256, 1])
-    yellow = np.array([255/256, 237/256, 0/256, 1])
-    orange = np.array([245/256, 167/256, 0/256, 1])
-    red = np.array([203/256, 6/256, 29/256, 1])
-    bordeaux = np.array([160/256, 15/256, 53/256, 1])
-
-    mapping = np.linspace(mesh['error'].min(), mesh['error'].max(),256)
-    newcolors = np.empty((256,4))
-    newcolors[mapping >=0.75] = bordeaux
-    newcolors[mapping <0.75] = red
-    newcolors[mapping <0.5] = orange
-    newcolors[mapping <0.25] = yellow
-    newcolors[mapping <threshold] = black
-    return ListedColormap(newcolors)
-
-
-def plot_losses(history):   
-    losses = [x['val_loss'] for x in history[50:]]
-    plt.plot(losses, '-x')
-    plt.xlabel('epoch')
-    plt.ylabel('loss')
-    plt.title('Loss vs. No. of epochs')
-
-def grain_matrix(input):    
-    matrix = input[0,0,:,:,:]
-    unique_angles = np.unique(matrix)
-    for index, angle in enumerate(unique_angles):
-        matrix[matrix == angle] = index
-    return matrix
-def plot_difference(error, grains,stress, threshold):    
-    #opacity = np.where(error < 0.1, 1, 0.)
-    grid_1 = pv.UniformGrid()
-    grid_1.dimensions = np.array(error.shape) +1
-    grid_1.spacing = (1,1,1)
-    grid_1.cell_data["error"] = error.flatten(order = "F")
-    grid_2 = pv.UniformGrid()
-    grid_2.dimensions = np.array(grains.shape) +1
-    grid_2.spacing = (1,1,1)
-    grid_2.cell_data["grains"] = grains.flatten(order = "F")
-    grid_3 = pv.UniformGrid()
-    grid_3.dimensions = np.array(stress.shape) +1
-    grid_3.spacing = (1,1,1)
-    grid_3.cell_data["stress"] = stress.flatten(order = "F")
-    my_colormap = get_colormap(grid_1, threshold)
-    #grid.cell_data["opacity"] = opacity.flatten(order = "F")
-    plotter = pv.Plotter(shape=(3,1))
-    #plotter.add_mesh(grid.copy(), scalars='error', opacity='opacity', use_transparency=True, show_edges=True)
-    plotter.subplot(0,0)
-    plotter.add_mesh_clip_plane(grid_2,scalars = 'grains', show_edges=True)
-    plotter.subplot(2,0)
-    plotter.add_mesh_clip_plane(grid_1,scalars = 'error', show_edges=True, cmap = my_colormap)
-    plotter.subplot(1,0)
-    plotter.add_mesh_clip_plane(grid_3,scalars = 'stress', show_edges=True)   
-    plotter.link_views()
-    plotter.show()
-if __name__ == '__main__':
-    dataset = torch.load('E:/Data/damask3/UNet/Input/Training_Dataset_normalized_V2.pt')
-    #history = torch.load('F:/RWTH/HiWi_IEHK/DAMASK3/UNet/output/V4/history_4.pt')
-    #history_2 = torch.load('E:/Data/damask3/UNet/output/history_test.pt')
-    normalization = np.load('E:/Data/damask3/UNet/Input/Norm_min_max_V2.npy', allow_pickle=True)
-    model = UNet.UNet()
-    model.load_state_dict(torch.load('E:/Data/damask3/UNet/output/V4/Unet_dict_V4.pth',map_location=torch.device('cpu')))
-    device = UNet.get_default_device()
-    model = UNet.to_device(model.double(), device)
-    predict_stress(34,normalization = normalization, model=model,dataset=dataset)
\ No newline at end of file
diff --git a/UNet/postprocessing_new.ipynb b/UNet/postprocessing_new.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..9924d35415f954b4e1be40117b4901090354da38
--- /dev/null
+++ b/UNet/postprocessing_new.ipynb
@@ -0,0 +1,279 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import torch\n",
+    "import numpy as np\n",
+    "import matplotlib.pyplot as plt\n",
+    "import UNet_V9 as UNet\n",
+    "import pyvista as pv\n",
+    "from matplotlib.colors import ListedColormap\n",
+    "import copy\n",
+    "import scipy.stats as stats\n",
+    "import pylab as pl"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 36,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def predict_stress(image_id, normalization, model, dataset,grain_data, threshold = 0.1):\n",
+    "    input, output = dataset[image_id]\n",
+    "    grain,_ = grain_data[image_id]\n",
+    "    grain = copy.deepcopy(grain)\n",
+    "    grain = torch.unsqueeze(grain,0)\n",
+    "    grain = grain.detach().numpy()\n",
+    "    input = copy.deepcopy(input)\n",
+    "    output = copy.deepcopy(output)\n",
+    "    input = torch.unsqueeze(input,0)\n",
+    "    output = torch.unsqueeze(output,0)\n",
+    "    xb = UNet.to_device(input, device)\n",
+    "    model.eval()\n",
+    "    prediction = model(xb)\n",
+    "    input = input.detach().numpy()\n",
+    "    prediction = prediction.detach().numpy()\n",
+    "    output = output.detach().numpy()\n",
+    "    prediction = rescale(prediction, normalization)\n",
+    "    output = rescale(output, normalization)\n",
+    "    error = (abs(output - prediction)/output)\n",
+    "    print(f'Maximum error is : {error.max()*100.:.4} %')\n",
+    "    print(f'average error is : {error.mean()*100.:.4} %')\n",
+    "    right_predic = (error < threshold).sum()\n",
+    "    print(f'{(right_predic/error.size)*100.:.4}% of voxels have a diviation less than {threshold*100.}%')\n",
+    "    grains = grain_matrix_colormap(grain)\n",
+    "    plot_difference(error,grains,output, threshold)\n",
+    "\n",
+    "def rescale(output, normalization):\n",
+    "    output_rescale = output.reshape(output.shape[2],output.shape[3],output.shape[4])\n",
+    "    if normalization is not None: \n",
+    "        min_label, max_label = normalization\n",
+    "        output_rescale *= max_label\n",
+    "        output_rescale += min_label\n",
+    "    return output_rescale\n",
+    "\n",
+    "\n",
+    "def get_colormap(mesh, threshold):    \n",
+    "    black = np.array([11/256, 11/256, 11/256, 1])\n",
+    "    yellow = np.array([255/256, 237/256, 0/256, 1])\n",
+    "    orange = np.array([245/256, 167/256, 0/256, 1])\n",
+    "    red = np.array([203/256, 6/256, 29/256, 1])\n",
+    "    bordeaux = np.array([160/256, 15/256, 53/256, 1])\n",
+    "\n",
+    "    mapping = np.linspace(mesh['error'].min(), mesh['error'].max(),256)\n",
+    "    newcolors = np.empty((256,4))\n",
+    "    newcolors[mapping >=0.5] = bordeaux\n",
+    "    newcolors[mapping <0.5] = red\n",
+    "    newcolors[mapping <0.3] = orange\n",
+    "    newcolors[mapping <0.1] = yellow\n",
+    "    newcolors[mapping <0.05] = black\n",
+    "    return ListedColormap(newcolors)\n",
+    "\n",
+    "\n",
+    "def plot_losses(history):   \n",
+    "    train_losses = [x['train_loss'] for x in history[50:]]\n",
+    "    val_acc = [x['val_acc'] for x in history[50:]]\n",
+    "    val_loss = [x['val_loss'] for x in history[50:]]\n",
+    "\n",
+    "    pl.plot(train_losses, '-x',)\n",
+    "    pl.plot(val_acc, '-x',)\n",
+    "    pl.plot(val_loss, '-x',)\n",
+    "\n",
+    "    pl.xlabel('epoch')\n",
+    "    pl.ylabel('loss')\n",
+    "    pl.title('Loss vs. No. of epochs')\n",
+    "\n",
+    "def grain_matrix_colormap(input):    \n",
+    "    matrix_grains = input[0,0,:,:,:]\n",
+    "    matrix_ferrit = input[0,5,:,:,:] #matrix with elements = 1 if the phase is ferrit else 0\n",
+    "    #unique_angles = np.unique(matrix_grains)\n",
+    "    matrix_ferrit_grains = np.multiply(matrix_grains, matrix_ferrit)# matrix where only the ferrit grains are nonzero\n",
+    "    index_ferrit_angles = np.unique(matrix_ferrit_grains[matrix_ferrit_grains != 0])\n",
+    "    index_martensite_angles = np.setdiff1d(np.unique(matrix_grains),index_ferrit_angles)\n",
+    "    for index, angle in enumerate(index_ferrit_angles):\n",
+    "        matrix_grains[matrix_grains == angle] = (index) # matrix with id for each grain add 1 to perfome the elementwise multiplication to get the index of phase grains\n",
+    "    for index, angle in enumerate(index_martensite_angles):\n",
+    "        matrix_grains[matrix_grains == angle] = (index + len(index_ferrit_angles) +100) # matrix with id for each grain add 1 to perfome the elementwise multiplication to get the index of phase grains\n",
+    "\n",
+    "    return matrix_grains"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "\n",
+    "def plot_difference(error, grains, stress, threshold):   \n",
+    "    grid_1 = pv.UniformGrid()\n",
+    "    grid_1.dimensions = np.array(error.shape) +1\n",
+    "    grid_1.spacing = (1,1,1)\n",
+    "    grid_1.cell_data[\"error\"] = error.flatten(order = \"F\")\n",
+    "    grid_2 = pv.UniformGrid()\n",
+    "    grid_2.dimensions = np.array(grains.shape) +1\n",
+    "    grid_2.spacing = (1,1,1)\n",
+    "    grid_2.cell_data[\"grain\"] = grains.flatten(order = \"F\")\n",
+    "    grid_3 = pv.UniformGrid()\n",
+    "    grid_3.dimensions = np.array(stress.shape) +1\n",
+    "    grid_3.spacing = (1,1,1)\n",
+    "    grid_3.cell_data[\"stress\"] = stress.flatten(order = \"F\")\n",
+    "    colormap_error = get_colormap(grid_1, threshold)\n",
+    "    p = pv.Plotter(notebook=False,shape=(3,1))\n",
+    "    sargs_grain = dict(height=0.75, vertical=True, position_x=0.1, position_y=0.05, n_labels=0)\n",
+    "    sargs = dict(height=0.75, vertical=True, position_x=0.1, position_y=0.05)\n",
+    "\n",
+    "\n",
+    "\n",
+    "    def my_plane_func(normal, origin):\n",
+    "        slc_1 = grid_1.slice(normal=normal, origin=origin)\n",
+    "        slc_2 = grid_2.slice(normal=normal, origin=origin)\n",
+    "        slc_3 = grid_3.slice(normal=normal, origin=origin)\n",
+    "        p.subplot(0,0)\n",
+    "        p.add_mesh(slc_2, name=\"my_slice_2\", cmap = 'RdBu', annotations = annotations, scalar_bar_args=sargs_grain)\n",
+    "        p.subplot(2,0)\n",
+    "        p.add_mesh(slc_1, name=\"my_slice_1\",clim=[0.01, 1.0], below_color = 'blue', above_color = 'red', cmap = colormap_error, scalar_bar_args=sargs)\n",
+    "        p.subplot(1,0)\n",
+    "        p.add_mesh(slc_3, name=\"my_slice_3\", scalar_bar_args=sargs)\n",
+    "\n",
+    "    p.subplot(0,0)\n",
+    "    annotations = {\n",
+    "        0: 'Ferrite',\n",
+    "        grains.max(): 'Martensite',\n",
+    "    }\n",
+    "    p.add_title('Grains',font_size=10)\n",
+    "    p.add_mesh(grid_2 ,opacity=0, cmap = 'RdBu', annotations = annotations, scalar_bar_args=sargs_grain)\n",
+    "    p.add_plane_widget(my_plane_func)\n",
+    "    p.subplot(2,0)\n",
+    "    p.add_title('Error',font_size=10)\n",
+    "    p.add_mesh(grid_1,scalars = \"error\" ,clim=[0.01, 1.0], below_color = 'blue', above_color = 'red',opacity=0)\n",
+    "    p.add_plane_widget(my_plane_func)\n",
+    "\n",
+    "    p.subplot(1,0)\n",
+    "    p.add_title('Stress',font_size=10)\n",
+    "    p.add_mesh(grid_3,scalars = \"stress\" ,opacity=0)\n",
+    "    p.add_plane_widget(my_plane_func)\n",
+    "\n",
+    "    p.link_views()\n",
+    "    p.show()\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 35,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "no GPU found\n"
+     ]
+    }
+   ],
+   "source": [
+    "Training_data = torch.load('E:/Data/damask3/UNet/Input/TD_norm_32_phase_only.pt')\n",
+    "grain_data = torch.load('E:/Data/damask3/UNet/Input/TD_norm_32_angles.pt')\n",
+    "history = torch.load('E:/Data/damask3/UNet/output/V6_64/history_V6_2_64.pt')\n",
+    "#history_2 = torch.load('E:/Data/damask3/UNet/output/history_test.pt')\n",
+    "normalization = np.load('E:/Data/damask3/UNet/Input/Norm_min_max_32_phase_only.npy', allow_pickle=True)\n",
+    "model = UNet.UNet()\n",
+    "model.load_state_dict(torch.load('E:/Data/damask3/UNet/output/V9/Unet_dict_V9_2.pth',map_location=torch.device('cpu')))\n",
+    "device = UNet.get_default_device()\n",
+    "model = UNet.to_device(model.double(), device)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "plot_losses(history)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "sample_index = np.random.randint(low=0, high=len(Training_data))\n",
+    "print(f'sample number: {sample_index}')\n",
+    "predict_stress(sample_index, normalization = normalization, model = model, dataset = Training_data,grain_data =grain_data)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "mean_error, max_error, correct_per = dataset_evaluation( normalization = normalization, model = model, dataset = Training_data, threshold = 0.1)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def dataset_evaluation( normalization = normalization, model = model, dataset = Training_data, threshold = 0.05):\n",
+    "    model.eval()\n",
+    "    mean_error = np.empty(len(dataset))\n",
+    "    max_error = np.empty(len(dataset))\n",
+    "    correct_per = np.empty(len(dataset)) #percentage of voxel that are guessed corrected, according to threshold\n",
+    "    for index in range(len(dataset)):\n",
+    "        input, output = dataset[index]\n",
+    "        input = copy.copy(input)\n",
+    "        output = copy.copy(output)\n",
+    "        input = torch.unsqueeze(input,0)\n",
+    "        output = torch.unsqueeze(output,0)\n",
+    "        xb = UNet.to_device(input, device)\n",
+    "        prediction = model(xb)\n",
+    "        input = input.detach().numpy()\n",
+    "        prediction = prediction.detach().numpy()\n",
+    "        output = output.detach().numpy()\n",
+    "        prediction = rescale(prediction, normalization)\n",
+    "        output = rescale(output, normalization)\n",
+    "        error = (abs(output - prediction)/output)\n",
+    "        right_predic = (error < threshold).sum()\n",
+    "        mean_error[index] = error.mean()*100.\n",
+    "        max_error[index] = error.max()*100.\n",
+    "        correct_per[index] = right_predic * 100.\n",
+    "    return mean_error, max_error, correct_per\n",
+    "        "
+   ]
+  }
+ ],
+ "metadata": {
+  "interpreter": {
+   "hash": "97ae2fbf52e0575424be8b71df1b468d27bac9d21e20089d11e8b4b02c5eac36"
+  },
+  "kernelspec": {
+   "display_name": "Python 3.9.5 64-bit ('base': conda)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.9.5"
+  },
+  "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/UNet/postprocessing.ipynb b/UNet/postprocessing_old.ipynb
similarity index 54%
rename from UNet/postprocessing.ipynb
rename to UNet/postprocessing_old.ipynb
index 47b9db34754b5dd12be84ee9b03c49d5df3604b4..1a173b1672e50689fb8a7d4b6af1111d00795d14 100644
--- a/UNet/postprocessing.ipynb
+++ b/UNet/postprocessing_old.ipynb
@@ -2,49 +2,24 @@
  "cells": [
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": 4,
    "metadata": {},
    "outputs": [],
    "source": [
     "import torch\n",
     "import numpy as np\n",
-    "import matplotlib\n",
     "import matplotlib.pyplot as plt\n",
-    "import UNet_V4 as UNet\n",
+    "import UNet_V9 as UNet\n",
     "import pyvista as pv\n",
     "from matplotlib.colors import ListedColormap\n",
     "import copy\n",
-    "from pyvista import examples\n"
+    "from pyvista import examples\n",
+    "from torch.utils.data import TensorDataset"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {},
-   "outputs": [
-    {
-     "ename": "NameError",
-     "evalue": "name 'dataset' is not defined",
-     "output_type": "error",
-     "traceback": [
-      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
-      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
-      "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_16728/3899246243.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[1;32mfor\u001b[0m \u001b[0ma\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mb\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mdataset\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      2\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmax\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0ma\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m>\u001b[0m \u001b[1;36m1.0\u001b[0m \u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      3\u001b[0m         \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"dataset kaputt\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m     \u001b[1;31m#if torch.max(b) > 1.0 :\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m         \u001b[1;31m#print(\"dataset kaputt\")\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
-      "\u001b[1;31mNameError\u001b[0m: name 'dataset' is not defined"
-     ]
-    }
-   ],
-   "source": [
-    "for a, b in dataset:\n",
-    "    if torch.max(a) > 1.0 :\n",
-    "        print(\"dataset kaputt\")\n",
-    "    #if torch.max(b) > 1.0 :\n",
-    "        #print(\"dataset kaputt\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 56,
+   "execution_count": 9,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -67,17 +42,26 @@
     "    print(f'average error is : {error.mean()*100.:.4} %')\n",
     "    right_predic = (error < threshold).sum()\n",
     "    print(f'{(right_predic/error.size)*100.:.4}% of voxels have a diviation less than {threshold*100.}%')\n",
-    "    grains = grain_matrix(input)\n",
-    "    plot_difference(error,grains,output, threshold)\n",
+    "    grains,colormap = grain_matrix_colormap(input)\n",
+    "    plot_difference(error,grains,colormap,output, threshold)\n",
     "\n",
     "def rescale(output, normalization):\n",
-    "    output_rescale = output.reshape(32,32,32)\n",
+    "    output_rescale = output.reshape(output.shape[2],output.shape[3],output.shape[4])\n",
     "    if normalization is not None: \n",
     "        min_label, max_label,_ = normalization\n",
     "        output_rescale *= max_label\n",
     "        output_rescale += min_label\n",
     "    return output_rescale\n",
     "\n",
+    "def get_colormap_grains(grains,index_ferrit_grains,index_martensite_grains):\n",
+    "    black = np.array([11/256, 11/256, 11/256, 1])\n",
+    "    red = np.array([203/256, 6/256, 29/256, 1])\n",
+    "    new_color = np.empty(np.unique(grains),4)\n",
+    "    new_color[index_ferrit_grains] = black\n",
+    "    new_color[index_ferrit_grains] = red\n",
+    "    return ListedColormap(new_color)\n",
+    "\n",
+    "\n",
     "def get_colormap(mesh, threshold):    \n",
     "    black = np.array([11/256, 11/256, 11/256, 1])\n",
     "    yellow = np.array([255/256, 237/256, 0/256, 1])\n",
@@ -96,13 +80,19 @@
     "\n",
     "\n",
     "def plot_losses(history):   \n",
-    "    losses = [x['val_loss'] for x in history[50:]]\n",
-    "    plt.plot(losses, '-x')\n",
+    "    train_losses = [x['train_loss'] for x in history[50:]]\n",
+    "    val_acc = [x['val_acc'] for x in history[50:]]\n",
+    "    val_loss = [x['val_loss'] for x in history[50:]]\n",
+    "\n",
+    "    plt.plot(train_losses, '-x',)\n",
+    "    plt.plot(val_acc, '-x',)\n",
+    "    plt.plot(val_loss, '-x',)\n",
+    "\n",
     "    plt.xlabel('epoch')\n",
     "    plt.ylabel('loss')\n",
     "    plt.title('Loss vs. No. of epochs')\n",
     "\n",
-    "def grain_matrix(input):    \n",
+    "def grain_matrix_colormap(input):    \n",
     "    matrix_grains = input[0,0,:,:,:]\n",
     "    matrix_ferrit = input[0,5,:,:,:] #matrix with elements = 1 if the phase is ferrit else 0\n",
     "    unique_angles = np.unique(matrix_grains)\n",
@@ -111,29 +101,22 @@
     "    matrix_ferrit_grains = np.multiply(matrix_grains, matrix_ferrit)# matrix where only the ferrit grains are nonzero\n",
     "    index_ferrit_grains = np.unique(matrix_ferrit_grains[matrix_ferrit_grains != 0])\n",
     "    index_martensite_grains = np.setdiff1d(np.unique(matrix_grains),index_ferrit_grains)\n",
-    "    return matrix_grains,index_ferrit_grains,index_martensite_grains"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 57,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "input, label = dataset[0]\n",
-    "input = copy.deepcopy(input)\n",
-    "input= input[np.newaxis,...]\n",
-    "_,index_ferrit_grains,index_martensite_grains = grain_matrix(input)"
+    "    black = np.array([11/256, 11/256, 11/256, 1])\n",
+    "    red = np.array([203/256, 6/256, 29/256, 1])\n",
+    "    new_color = np.empty((np.unique(matrix_grains),4))\n",
+    "    new_color[index_ferrit_grains] = black\n",
+    "    new_color[index_martensite_grains] = red\n",
+    "    return matrix_grains,ListedColormap(new_color)"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 35,
+   "execution_count": 6,
    "metadata": {},
    "outputs": [],
    "source": [
     "\n",
-    "def plot_difference(error, grains,stress, threshold):   \n",
+    "def plot_difference(error, grains, colormap_grains, stress, threshold):   \n",
     "    grid_1 = pv.UniformGrid()\n",
     "    grid_1.dimensions = np.array(error.shape) +1\n",
     "    grid_1.spacing = (1,1,1)\n",
@@ -146,7 +129,7 @@
     "    grid_3.dimensions = np.array(stress.shape) +1\n",
     "    grid_3.spacing = (1,1,1)\n",
     "    grid_3.cell_data[\"stress\"] = stress.flatten(order = \"F\")\n",
-    "    my_colormap = get_colormap(grid_1, threshold)\n",
+    "    colormap_error = get_colormap(grid_1, threshold)\n",
     "    p = pv.Plotter(notebook=False,shape=(3,1))\n",
     "    def my_plane_func(normal, origin):\n",
     "        slc_1 = grid_1.slice(normal=normal, origin=origin)\n",
@@ -154,19 +137,19 @@
     "        slc_3 = grid_3.slice(normal=normal, origin=origin)\n",
     "\n",
     "        p.subplot(0,0)\n",
-    "        p.add_mesh(slc_2, name=\"my_slice_2\", cmap = 'tab20c')\n",
+    "        p.add_mesh(slc_2, name=\"my_slice_2\", cmap = colormap_grains)\n",
     "        p.subplot(2,0)\n",
-    "        p.add_mesh(slc_1, name=\"my_slice_1\", cmap=my_colormap)\n",
+    "        p.add_mesh(slc_1, name=\"my_slice_1\", cmap = colormap_error)\n",
     "        p.subplot(1,0)\n",
     "        p.add_mesh(slc_3, name=\"my_slice_3\")\n",
     "\n",
     "    p.subplot(0,0)\n",
     "    p.add_title('Grains',font_size=10)\n",
-    "    p.add_mesh(grid_2,scalars = \"grain\" ,opacity=0, cmap = 'tab20c')\n",
+    "    p.add_mesh(grid_2,scalars = \"grain\" ,opacity=0, cmap = colormap_grains)\n",
     "    p.add_plane_widget(my_plane_func)\n",
     "    p.subplot(2,0)\n",
     "    p.add_title('Error',font_size=10)\n",
-    "    p.add_mesh(grid_1,scalars = \"error\" ,opacity=0,cmap=my_colormap)\n",
+    "    p.add_mesh(grid_1,scalars = \"error\" ,opacity=0,cmap = colormap_error)\n",
     "    p.add_plane_widget(my_plane_func)\n",
     "\n",
     "    p.subplot(1,0)\n",
@@ -180,7 +163,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 50,
+   "execution_count": 10,
    "metadata": {},
    "outputs": [
     {
@@ -192,52 +175,62 @@
     }
    ],
    "source": [
-    "dataset = torch.load('E:/Data/damask3/UNet/Input/Training_Dataset_normalized.pt')\n",
-    "#history = torch.load('F:/RWTH/HiWi_IEHK/DAMASK3/UNet/output/V4/history_4.pt')\n",
+    "dataset = torch.load('E:/Data/damask3/UNet/Input/Training_Dataset_normalized__32_V2.pt')\n",
+    "#history = torch.load('E:/Data/damask3/UNet/output/V6_64/history_V6_2_64.pt')\n",
     "#history_2 = torch.load('E:/Data/damask3/UNet/output/history_test.pt')\n",
-    "normalization = np.load('E:/Data/damask3/UNet/Input/Norm_min_max.npy', allow_pickle=True)\n",
+    "normalization = np.load('E:/Data/damask3/UNet/Input/Norm_min_max_32_V2.npy', allow_pickle=True)\n",
     "model = UNet.UNet()\n",
-    "model.load_state_dict(torch.load('E:/Data/damask3/UNet/output/V4/Unet_dict.pth',map_location=torch.device('cpu')))\n",
+    "model.load_state_dict(torch.load('E:/Data/damask3/UNet/output/V9/Unet_dict_V9.pth',map_location=torch.device('cpu')))\n",
     "device = UNet.get_default_device()\n",
     "model = UNet.to_device(model.double(), device)"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 46,
+   "execution_count": 11,
    "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Maximum error is : 335.0 %\n",
-      "average error is : 22.09 %\n",
-      "17.76% of voxels have a diviation less than 5.0%\n"
+      "Maximum error is : 272.0 %\n",
+      "average error is : 14.51 %\n",
+      "24.97% of voxels have a diviation less than 5.0%\n"
      ]
     },
     {
      "ename": "IndexError",
-     "evalue": "index 5 is out of bounds for axis 0 with size 1",
+     "evalue": "index 5 is out of bounds for axis 1 with size 2",
      "output_type": "error",
      "traceback": [
       "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
       "\u001b[1;31mIndexError\u001b[0m                                Traceback (most recent call last)",
-      "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_21620/3913701557.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mpredict_stress\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m7\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mnormalization\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnormalization\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mdataset\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mdataset\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
-      "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_21620/2230561446.py\u001b[0m in \u001b[0;36mpredict_stress\u001b[1;34m(image_id, normalization, model, dataset, threshold)\u001b[0m\n\u001b[0;32m     18\u001b[0m     \u001b[0mright_predic\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0merror\u001b[0m \u001b[1;33m<\u001b[0m \u001b[0mthreshold\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msum\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     19\u001b[0m     \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34mf'{(right_predic/error.size)*100.:.4}% of voxels have a diviation less than {threshold*100.}%'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 20\u001b[1;33m     \u001b[0mgrains\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgrain_matrix\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     21\u001b[0m     \u001b[0mplot_difference\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0merror\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mgrains\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0moutput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mthreshold\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     22\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
-      "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_21620/2230561446.py\u001b[0m in \u001b[0;36mgrain_matrix\u001b[1;34m(input)\u001b[0m\n\u001b[0;32m     55\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mgrain_matrix\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     56\u001b[0m     \u001b[0mmatrix_grains\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 57\u001b[1;33m     \u001b[0mmatrix_ferrit\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m5\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;31m#matrix with elements = 1 if the phase is ferrit else 0\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     58\u001b[0m     \u001b[0munique_angles\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0munique\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmatrix_grains\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     59\u001b[0m     \u001b[1;32mfor\u001b[0m \u001b[0mindex\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mangle\u001b[0m \u001b[1;32min\u001b[0m \u001b[0menumerate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0munique_angles\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
-      "\u001b[1;31mIndexError\u001b[0m: index 5 is out of bounds for axis 0 with size 1"
+      "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_24852/1619662343.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mpredict_stress\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m7\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnormalization\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnormalization\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdataset\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mdataset\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
+      "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_24852/1249646142.py\u001b[0m in \u001b[0;36mpredict_stress\u001b[1;34m(image_id, normalization, model, dataset, threshold)\u001b[0m\n\u001b[0;32m     18\u001b[0m     \u001b[0mright_predic\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0merror\u001b[0m \u001b[1;33m<\u001b[0m \u001b[0mthreshold\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msum\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     19\u001b[0m     \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34mf'{(right_predic/error.size)*100.:.4}% of voxels have a diviation less than {threshold*100.}%'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 20\u001b[1;33m     \u001b[0mgrains\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mcolormap\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgrain_matrix_colormap\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     21\u001b[0m     \u001b[0mplot_difference\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0merror\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mgrains\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mcolormap\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0moutput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mthreshold\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     22\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
+      "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_24852/1249646142.py\u001b[0m in \u001b[0;36mgrain_matrix_colormap\u001b[1;34m(input)\u001b[0m\n\u001b[0;32m     70\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mgrain_matrix_colormap\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     71\u001b[0m     \u001b[0mmatrix_grains\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 72\u001b[1;33m     \u001b[0mmatrix_ferrit\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m5\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;31m#matrix with elements = 1 if the phase is ferrit else 0\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     73\u001b[0m     \u001b[0munique_angles\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0munique\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmatrix_grains\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     74\u001b[0m     \u001b[1;32mfor\u001b[0m \u001b[0mindex\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mangle\u001b[0m \u001b[1;32min\u001b[0m \u001b[0menumerate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0munique_angles\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
+      "\u001b[1;31mIndexError\u001b[0m: index 5 is out of bounds for axis 1 with size 2"
      ]
     }
    ],
    "source": [
-    "\n",
-    "predict_stress(7,normalization = normalization, model=model,dataset=dataset)\n"
+    "predict_stress(7, normalization = normalization, model = model, dataset = dataset)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "train_losses = [x['train_loss'] for x in history[50:]]\n",
+    "plt.plot(train_losses)\n",
+    "plt.show()\n"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 31,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [