diff --git a/UNet/NormalizeTrainingdata_32.ipynb b/UNet/NormalizeTrainingdata_32.ipynb
index 507145720e2b0586e42a31811f166d19c6ec202f..09ba0845689c733809e59eb53ab75365e62b3506 100644
--- a/UNet/NormalizeTrainingdata_32.ipynb
+++ b/UNet/NormalizeTrainingdata_32.ipynb
@@ -20,7 +20,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 4,
+      "execution_count": 2,
       "metadata": {
         "id": "OzNQI96lq3Pi"
       },
@@ -35,7 +35,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": null,
+      "execution_count": 13,
       "metadata": {
         "id": "lhj_0D1F0dWN"
       },
@@ -59,7 +59,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 6,
+      "execution_count": 3,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/"
@@ -82,7 +82,6 @@
         "#training_label = training_label[:,np.newaxis,...]\n",
         "phase= data[:,4,:,:,:].reshape(1987, 1,32,32,32)\n",
         "new_phase = np.ones(phase.shape) - phase #input[4]: martinsite, input[5]:ferrit\n",
-        "#new_training_data = np.append(data,new_channel,axis=1)\n",
         "#input = np.append(angles,phase,axis=1)\n",
         "#input = np.append(input,new_phase,axis=1)\n",
         "input = np.append(phase,new_phase,axis=1)\n",
@@ -101,18 +100,18 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 7,
+      "execution_count": 4,
       "metadata": {
         "id": "-Rbt8Brb9mM_"
       },
       "outputs": [],
       "source": [
-        "min_label = training_label.min()\n",
-        "max_label = training_label.max()\n",
-        "s_batch, width, height, depth = label.size()\n",
         "label_normalized = label.view(label.size(0), -1)\n",
-        "label_normalized -= label_normalized.min(1, keepdim=True)[0]\n",
-        "label_normalized /= label_normalized.max(1, keepdim=True)[0]\n",
+        "min_label = label_normalized.min()\n",
+        "max_label = label_normalized.max()\n",
+        "s_batch, width, height, depth = label.size()\n",
+        "label_normalized -= min_label\n",
+        "label_normalized /= max_label\n",
         "label_normalized = label_normalized.view(s_batch, width, height, depth)\n",
         "label_normalized = label_normalized[:,np.newaxis,...]"
       ]
@@ -142,7 +141,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 11,
+      "execution_count": 5,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/"
@@ -153,10 +152,39 @@
       "outputs": [],
       "source": [
         "dataset = TensorDataset(input,label_normalized) # create the pytorch dataset \n",
-        "#np.save('E:/Data/damask3/UNet/Input/Norm_min_max_32_V2.npy',[min_label, max_label,angles_min_max])\n",
-        "np.save('E:/Data/damask3/UNet/Input/Norm_min_max_32_V2.npy',[min_label, max_label])\n",
+        "#np.save('E:/Data/damask3/UNet/Input/Norm_min_max_32_angles.npy',[min_label, max_label,angles_min_max])\n",
+        "np.save('E:/Data/damask3/UNet/Input/Norm_min_max_32_phase_only.npy',[min_label, max_label])\n",
         "\n",
-        "torch.save(dataset,'E:/Data/damask3/UNet/Input/Training_Dataset_normalized__32_V2.pt')\n"
+        "torch.save(dataset,'E:/Data/damask3/UNet/Input/TD_norm_32_phase_only.pt')\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 13,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "diff = training_label-rescaled.reshape(1987,32,32,32).numpy()"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 14,
+      "metadata": {},
+      "outputs": [
+        {
+          "data": {
+            "text/plain": [
+              "2.384185791015625e-07"
+            ]
+          },
+          "execution_count": 14,
+          "metadata": {},
+          "output_type": "execute_result"
+        }
+      ],
+      "source": [
+        "np.max(abs(diff))\n"
       ]
     }
   ],
diff --git a/UNet/UNet_V10.py b/UNet/UNet_V10.py
index 11f2b7ae5ec6dd34828e2a5120b585b2dec895c2..5efc2f75d0d3e1feaf208d306a42e6d369268a97 100644
--- a/UNet/UNet_V10.py
+++ b/UNet/UNet_V10.py
@@ -126,18 +126,19 @@ class UNetBase(nn.Module):
         print("Epoch [{}], train_loss: {:.6f}, val_loss: {:.6f}, val_acc: {:.6f}".format(
             epoch, result['train_loss'], result['val_loss'], result['val_acc']))
         
-def accuracy(outputs, labels, threshold = 0.05):
-    error = (abs(outputs - labels)/outputs)
+def accuracy(outputs, labels,normalization, threshold = 0.05):
+    error = (abs((outputs) - (labels)))/(outputs+normalization[0]/normalization[1])
     right_predic = torch.sum(error < threshold)
     percentage = ((right_predic/torch.numel(error))*100.)
     return percentage
     
+    
 class UNet(UNetBase):
-    def __init__(self,kernel_size = 5, enc_chs=((2,32), (32,64), (64,128)), dec_chs_up=(128, 128, 64), dec_chs_conv=((192, 128),(160,64),(66,32))):
+    def __init__(self,kernel_size = 5, enc_chs=((2,32), (32,64), (64,128)), dec_chs_up=(128, 128, 64), dec_chs_conv=((192, 128),(160,64),(66,32)),normalization=np.array([0,1])):
         super().__init__()
         self.encoder     = Encoder(kernel_size = kernel_size, chs = enc_chs)
         self.decoder     = Decoder(kernel_size = kernel_size, chs_upsampling = dec_chs_up, chs_conv = dec_chs_conv)
-        #self.head        = depthwise_separable_conv(1, 1, padding = "same", kernel_size=1)
+        self.normalization = normalization
 
 
     def forward(self, x):
@@ -220,6 +221,7 @@ def Create_Dataloader(path, batch_size = 100, percent_val = 0.2):
 
 if __name__ == '__main__':
     #os.chdir('F:/RWTH/HiWi_IEHK/DAMASK3/UNet/Trainingsdata')
+    path_to_rep = '/home/yk138599/Hiwi/damask3'
     use_seeds = True
     seed = 2193910023
     num_epochs = 1300
@@ -238,9 +240,10 @@ if __name__ == '__main__':
     random.seed(seed)
     np.random.seed(seed)
     device = get_default_device()
-    train_dl, valid_dl = Create_Dataloader('/home/yk138599/Hiwi/damask3/UNet/Trainingsdata/Training_Dataset_normalized_32_V2.pt', batch_size= b_size )
+    normalization = np.load(f'{path_to_rep}/UNet/Trainingsdata/Norm_min_max_32_angles.npy')
+    train_dl, valid_dl = Create_Dataloader(f'{path_to_rep}/UNet/Trainingsdata/Training_Dataset_normalized_32_V2.pt', batch_size= b_size )
     train_dl = DeviceDataLoader(train_dl, device)
     valid_dl = DeviceDataLoader(valid_dl, device)
 
-    model = to_device(UNet(kernel_size=kernel).double(), device)
-    history = fit(num_epochs, lr, model, train_dl, valid_dl,'/home/yk138599/Hiwi/damask3/UNet/output', opt_func)
+    model = to_device(UNet(kernel_size=kernel,normalization=normalization).double(), device)
+    history = fit(num_epochs, lr, model, train_dl, valid_dl,f'{path_to_rep}/UNet/output', opt_func) 
diff --git a/UNet/UNet_V9.py b/UNet/UNet_V9_1.py
similarity index 90%
rename from UNet/UNet_V9.py
rename to UNet/UNet_V9_1.py
index 65b19bd6eea2f01bba1f949843d092cc2c9c26df..67edfbcdb7440ef6cbe86c974746ccfb49fe932a 100644
--- a/UNet/UNet_V9.py
+++ b/UNet/UNet_V9_1.py
@@ -116,7 +116,7 @@ class UNetBase(nn.Module):
         input, labels = batch 
         out = self(input)                    # Generate predictions
         loss = F.l1_loss(out, labels)   # Calculate loss
-        acc = accuracy(out.detach(), labels.detach())         # Calculate accuracy
+        acc = accuracy(out.detach(), labels.detach(),normalization=self.normalization)         # Calculate accuracy
         return {'val_loss': loss.detach(), 'val_acc': acc}
         
     def validation_epoch_end(self, outputs):
@@ -130,18 +130,19 @@ class UNetBase(nn.Module):
         print("Epoch [{}], train_loss: {:.6f}, val_loss: {:.6f}, val_acc: {:.6f}".format(
             epoch, result['train_loss'], result['val_loss'], result['val_acc']))
         
-def accuracy(outputs, labels, threshold = 0.05):
-    error = (abs(outputs - labels)/outputs)
+def accuracy(outputs, labels,normalization, threshold = 0.05):
+    error = (abs((outputs) - (labels)))/(outputs+normalization[0]/normalization[1])
     right_predic = torch.sum(error < threshold)
     percentage = ((right_predic/torch.numel(error))*100.)
     return percentage
     
 class UNet(UNetBase):
-    def __init__(self,kernel_size = 7, enc_chs=((2,16,32), (32,32,64), (64,64,128)), dec_chs_up=(128, 128, 64), dec_chs_conv=((192,128, 128),(160,64,64),(66,32,32))):
+    def __init__(self,kernel_size = 5, enc_chs=((2,16,32), (32,32,64), (64,64,128)), dec_chs_up=(128, 128, 64), dec_chs_conv=((192,128, 128),(160,64,64),(66,32,32)),normalization=np.array([0,1])):
         super().__init__()
         self.encoder     = Encoder(kernel_size = kernel_size, chs = enc_chs)
         self.decoder     = Decoder(kernel_size = kernel_size, chs_upsampling = dec_chs_up, chs_conv = dec_chs_conv)
         #self.head        = depthwise_separable_conv(1, 1, padding = "same", kernel_size=1)
+        self.normalization = normalization
 
 
     def forward(self, x):
@@ -174,8 +175,8 @@ def fit(epochs, lr, model, train_loader, val_loader, path, opt_func=torch.optim.
         result['train_loss'] = torch.stack(train_losses).mean().item()
         model.epoch_end(epoch, result)
         history.append(result)
-    torch.save(model.state_dict(),f'{path}/Unet_dict_V9.pth')
-    torch.save(history,f'{path}/history_V9.pt')
+    torch.save(model.state_dict(),f'{path}/Unet_dict_V9_1.pth')
+    torch.save(history,f'{path}/history_V9_1.pt')
     return history
 
 def get_default_device():
@@ -224,7 +225,8 @@ def Create_Dataloader(path, batch_size = 100, percent_val = 0.2):
 
 if __name__ == '__main__':
     #os.chdir('F:/RWTH/HiWi_IEHK/DAMASK3/UNet/Trainingsdata')
-    use_seeds = False
+    path_to_rep = '/home/yk138599/Hiwi/damask3'
+    use_seeds = True
     seed = 373686838
     num_epochs = 1300
     b_size = 32
@@ -242,9 +244,10 @@ if __name__ == '__main__':
     random.seed(seed)
     np.random.seed(seed)
     device = get_default_device()
-    train_dl, valid_dl = Create_Dataloader('/home/yk138599/Hiwi/damask3/UNet/Trainingsdata/Training_Dataset_normalized_32_V2.pt', batch_size= b_size )
+    normalization = np.load(f'{path_to_rep}/UNet/Trainingsdata/Norm_min_max_32_angles.npy')
+    train_dl, valid_dl = Create_Dataloader(f'{path_to_rep}/UNet/Trainingsdata/Training_Dataset_normalized_32_V2.pt', batch_size= b_size )
     train_dl = DeviceDataLoader(train_dl, device)
     valid_dl = DeviceDataLoader(valid_dl, device)
 
-    model = to_device(UNet(kernel_size=kernel).double(), device)
-    history = fit(num_epochs, lr, model, train_dl, valid_dl,'/home/yk138599/Hiwi/damask3/UNet/output', opt_func)
+    model = to_device(UNet(kernel_size=kernel,normalization=normalization).double(), device)
+    history = fit(num_epochs, lr, model, train_dl, valid_dl,f'{path_to_rep}/UNet/output', opt_func)
diff --git a/UNet/UNet_V9_2.py b/UNet/UNet_V9_2.py
new file mode 100644
index 0000000000000000000000000000000000000000..7337baa0bf57519d089d9edffc07139c2a2e0a1e
--- /dev/null
+++ b/UNet/UNet_V9_2.py
@@ -0,0 +1,253 @@
+#like V6_2 but only the different phases as input
+"""UNet_V6.ipynb
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+    https://colab.research.google.com/drive/1yvtk3lFo_x0ZiqtFdnR8jgcjPKy3nZA4
+"""
+
+import torch
+import torch.nn as nn
+import numpy as np
+import random
+from torch.utils.data.sampler import SubsetRandomSampler
+from torch.utils.data.dataloader import DataLoader
+from torch.utils.data import TensorDataset
+import torch.nn.functional as F
+from torch.utils.data import random_split
+from torch.nn.modules.activation import ReLU
+
+class depthwise_separable_conv(nn.Module):
+    def __init__(self, in_c, out_1_c, out_2_c, padding, kernel_size):
+        super(depthwise_separable_conv, self).__init__()
+        self.depthwise_1 = nn.Conv3d(in_c, in_c, kernel_size= kernel_size, padding=padding[0], groups=in_c, bias=True)
+        self.pointwise_1 = nn.Conv3d(in_c, out_1_c, kernel_size=1, bias=True)
+        self.batch_norm_1 = nn.BatchNorm3d(out_1_c)
+        self.relu = nn.ReLU()
+        self.depthwise_2 = nn.Conv3d(out_1_c, out_1_c, kernel_size= kernel_size, padding=padding[1], groups=out_1_c, bias=True)
+        self.pointwise_2 = nn.Conv3d(out_1_c, out_2_c, kernel_size=1, bias=True)
+        self.batch_norm_2 = nn.BatchNorm3d(out_2_c)
+    def forward(self, x):
+        x = self.batch_norm_1(self.relu(self.pointwise_1(self.depthwise_1(x))))
+        return self.batch_norm_2(self.relu(self.pointwise_2(self.depthwise_2(x))))
+
+class convolution_Layer(nn.Module):
+    def __init__(self, in_c, out_1_c, out_2_c, padding, kernel_size):
+        super(convolution_Layer, self).__init__()
+        self.conv_1 = nn.Conv3d(in_c, out_1_c, kernel_size= kernel_size, padding=padding[0], bias=True)
+        self.batch_norm_1 = nn.BatchNorm3d(out_1_c)
+        self.relu = nn.ReLU()
+        self.conv_2 = nn.Conv3d(out_1_c, out_2_c, kernel_size= kernel_size, padding=padding[1], bias=True)
+        self.batch_norm_2 = nn.BatchNorm3d(out_2_c)
+    def forward(self, x):
+        x = self.batch_norm_1(self.relu(self.conv_1(x)))
+        return self.batch_norm_2(self.relu(self.relu(self.conv_2(x))))
+
+class head_layer(nn.Module):
+    def __init__(self, in_c, out_c = 1, padding = "same"):
+        super(head_layer, self).__init__()
+        self.conv =  nn.Conv3d(in_c, out_c, kernel_size=1, bias=True)
+        self.sig = nn.Sigmoid()
+    def forward(self, x):
+        return self.sig(self.conv(x)) #convolution
+        #return self.sig(self.pointwise(self.depthwise(x))) #convolution
+
+class Encoder(nn.Module):
+    def __init__(self,kernel_size, chs, padding=((0,"same"),("same","same"),("same","same"))):
+      super().__init__()
+      self.channels = chs
+      self.enc_blocks = nn.ModuleList([depthwise_separable_conv(chs[i][0], chs[i][1], chs[i][2], kernel_size=kernel_size, padding=padding[i]) for i in range(len(chs))])
+      self.pool       = nn.MaxPool3d(kernel_size=2, stride=2)
+      #self.batch_norm = nn.ModuleList([nn.BatchNorm3d( chs[i][2]) for i in range(len(chs))])
+      self.periodic_upsample = nn.ReflectionPad3d(int((kernel_size-1)/2))
+
+    
+    def forward(self, x):
+      ftrs = []
+      x = self.periodic_upsample(x)
+      for i in range(len(self.channels)):
+        ftrs.append(x)
+        x =self.enc_blocks[i](x)
+        #print(f'size of ftrs: {ftrs[i].size()}')
+        x = self.pool(x)
+        #print(f'size of x after pooling{x.size()}')
+      ftrs.append(x)
+      #print(f'size of ftrs: {ftrs[3].size()}')
+      #print(f'length of ftrs: {len(ftrs)}')
+      return ftrs
+
+class Decoder(nn.Module):
+    def __init__(self,kernel_size, chs_upsampling, chs_conv, padding=(("same","same"),("same","same"),("same","same"))):
+        super().__init__()
+        assert len(chs_conv) == len(chs_upsampling)
+        self.chs         = chs_upsampling
+        self.upconvs    = nn.ModuleList([nn.ConvTranspose3d(chs_upsampling[i], chs_upsampling[i], 2, 2) for i in range(len(chs_upsampling))])
+        self.dec_blocks = nn.ModuleList([depthwise_separable_conv(chs_conv[i][0], chs_conv[i][1], chs_conv[i][2], kernel_size=kernel_size, padding=padding[i]) for i in range(len(chs_conv))])
+        self.head = head_layer(chs_conv[-1][2])
+    def forward(self, x, encoder_features):
+        for i in range(len(self.chs)):
+            x        = self.upconvs[i](x)
+            #print(f'size after upsampling: {x.size()}')
+            enc_ftrs = self.crop(encoder_features[i], x)
+            x        = torch.cat([x, enc_ftrs], dim=1)
+            #print(f'size after cropping&cat: {x.size()}')
+
+            x        = self.dec_blocks[i](x)
+            #print(f'size after convolution: {x.size()}')
+        x = self.head(x)    
+        return x
+    
+    def crop(self, tensor, target_tensor):
+        target_size = target_tensor.size()[2]
+        tensor_size = tensor.size()[2]
+        delta = tensor_size - target_size
+        delta = delta // 2
+        return tensor[:,:,delta:tensor_size-delta,delta:tensor_size-delta,delta:tensor_size-delta]
+
+class UNetBase(nn.Module):
+    def training_step(self, batch):
+        input, labels = batch 
+        out = self(input)                  # Generate predictions
+        loss = F.l1_loss(out, labels) # Calculate loss
+        return loss
+    
+    def validation_step(self, batch):
+        input, labels = batch 
+        out = self(input)                    # Generate predictions
+        loss = F.l1_loss(out, labels)   # Calculate loss
+        acc = accuracy(out.detach(), labels.detach(),normalization=self.normalization)         # Calculate accuracy
+        return {'val_loss': loss.detach(), 'val_acc': acc}
+        
+    def validation_epoch_end(self, outputs):
+        batch_losses = [x['val_loss'] for x in outputs]
+        epoch_loss = torch.stack(batch_losses).mean()   # Combine losses
+        batch_accs = [x['val_acc'] for x in outputs]
+        epoch_acc = torch.stack(batch_accs).mean()      # Combine accuracies
+        return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
+    
+    def epoch_end(self, epoch, result):
+        print("Epoch [{}], train_loss: {:.6f}, val_loss: {:.6f}, val_acc: {:.6f}".format(
+            epoch, result['train_loss'], result['val_loss'], result['val_acc']))
+        
+def accuracy(outputs, labels,normalization, threshold = 0.05):
+    error = (abs((outputs) - (labels)))/(outputs+normalization[0]/normalization[1])
+    right_predic = torch.sum(error < threshold)
+    percentage = ((right_predic/torch.numel(error))*100.)
+    return percentage
+    
+class UNet(UNetBase):
+    def __init__(self,kernel_size = 5, enc_chs=((2,16,32), (32,32,64), (64,64,128)), dec_chs_up=(128, 128, 64), dec_chs_conv=((192,128, 128),(160,64,64),(66,32,32)),normalization=np.array([0,1])):
+        super().__init__()
+        self.encoder     = Encoder(kernel_size = kernel_size, chs = enc_chs)
+        self.decoder     = Decoder(kernel_size = kernel_size, chs_upsampling = dec_chs_up, chs_conv = dec_chs_conv)
+        #self.head        = depthwise_separable_conv(1, 1, padding = "same", kernel_size=1)
+        self.normalization = normalization
+
+
+    def forward(self, x):
+        enc_ftrs = self.encoder(x)
+        out      = self.decoder(enc_ftrs[::-1][0], enc_ftrs[::-1][1:])
+        #out      = self.head(out)
+        return out
+
+@torch.no_grad()
+def evaluate(model, val_loader):
+    model.eval()
+    outputs = [model.validation_step(batch) for batch in val_loader]
+    return model.validation_epoch_end(outputs)
+
+def fit(epochs, lr, model, train_loader, val_loader, path, opt_func=torch.optim.Adam):
+    history = []
+    optimizer = opt_func(model.parameters(), lr, eps=1e-07)
+    for epoch in range(epochs):
+        # Training Phase 
+        model.train()
+        train_losses = []
+        for batch in train_loader:
+            loss = model.training_step(batch)
+            train_losses.append(loss)
+            loss.backward()
+            optimizer.step()
+            optimizer.zero_grad()
+        # Validation phase
+        result = evaluate(model, val_loader)
+        result['train_loss'] = torch.stack(train_losses).mean().item()
+        model.epoch_end(epoch, result)
+        history.append(result)
+    torch.save(model.state_dict(),f'{path}/Unet_dict_V9_2.pth')
+    torch.save(history,f'{path}/history_V9_2.pt')
+    return history
+
+def get_default_device():
+    """Pick GPU if available, else CPU"""
+    if torch.cuda.is_available():
+        return torch.device('cuda')
+    else:
+      print('no GPU found')
+      return torch.device('cpu')
+      
+def to_device(data, device):
+    """Move tensor(s) to chosen device"""
+    if isinstance(data, (list,tuple)):
+        return [to_device(x, device) for x in data]
+    return data.to(device, non_blocking=True)
+
+class DeviceDataLoader():
+    """Wrap a dataloader to move data to a device"""
+    def __init__(self, dl, device):
+        self.dl = dl
+        self.device = device
+        
+    def __iter__(self):
+        """Yield a batch of data after moving it to device"""
+        for b in self.dl: 
+            yield to_device(b, self.device)
+
+    def __len__(self):
+        """Number of batches"""
+        return len(self.dl)
+
+def Create_Dataloader(path, batch_size = 100, percent_val = 0.2):
+    dataset = torch.load(path) # create the pytorch dataset 
+    #size_data = 500 #shrink dataset for colab
+    #rest = len(dataset) -size_data
+    #dataset,_ = torch.utils.data.random_split(dataset, [size_data, rest])
+    val_size = int(len(dataset) * percent_val)
+    train_size = len(dataset) - val_size
+
+    train_ds, val_ds = random_split(dataset, [train_size, val_size])
+    # Create DataLoader
+    train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=1, pin_memory=True)
+    valid_dl = DataLoader(val_ds, batch_size, num_workers=1, pin_memory=True)
+    
+    return train_dl, valid_dl
+
+if __name__ == '__main__':
+    #os.chdir('F:/RWTH/HiWi_IEHK/DAMASK3/UNet/Trainingsdata')
+    path_to_rep = '/home/yk138599/Hiwi/damask3'
+    use_seeds = True
+    seed = 373686838
+    num_epochs = 1300
+    b_size = 32
+    opt_func = torch.optim.Adam
+    lr = 0.00001
+    kernel = 7
+    print(f'number auf epochs: {num_epochs}')
+    print(f'batchsize: {b_size}')
+    print(f'learning rate: {lr}')
+    print(f'kernel size is: {kernel}')
+    if not use_seeds:
+      seed = random.randrange(2**32 - 1)
+    print(f' seed is: {seed}')
+    torch.manual_seed(seed)
+    random.seed(seed)
+    np.random.seed(seed)
+    device = get_default_device()
+    normalization = np.load(f'{path_to_rep}/UNet/Trainingsdata/Norm_min_max_32_angles.npy')
+    train_dl, valid_dl = Create_Dataloader(f'{path_to_rep}/UNet/Trainingsdata/Training_Dataset_normalized_32_V2.pt', batch_size= b_size )
+    train_dl = DeviceDataLoader(train_dl, device)
+    valid_dl = DeviceDataLoader(valid_dl, device)
+
+    model = to_device(UNet(kernel_size=kernel,normalization=normalization).double(), device)
+    history = fit(num_epochs, lr, model, train_dl, valid_dl,f'{path_to_rep}/UNet/output', opt_func)
diff --git a/UNet/UNet_V9_3.py b/UNet/UNet_V9_3.py
new file mode 100644
index 0000000000000000000000000000000000000000..997dcfec17c601400f5116467ba2563f18cfed06
--- /dev/null
+++ b/UNet/UNet_V9_3.py
@@ -0,0 +1,253 @@
+#like V6_2 but only the different phases as input
+"""UNet_V6.ipynb
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+    https://colab.research.google.com/drive/1yvtk3lFo_x0ZiqtFdnR8jgcjPKy3nZA4
+"""
+
+import torch
+import torch.nn as nn
+import numpy as np
+import random
+from torch.utils.data.sampler import SubsetRandomSampler
+from torch.utils.data.dataloader import DataLoader
+from torch.utils.data import TensorDataset
+import torch.nn.functional as F
+from torch.utils.data import random_split
+from torch.nn.modules.activation import ReLU
+
+class depthwise_separable_conv(nn.Module):
+    def __init__(self, in_c, out_1_c, out_2_c, padding, kernel_size):
+        super(depthwise_separable_conv, self).__init__()
+        self.depthwise_1 = nn.Conv3d(in_c, in_c, kernel_size= kernel_size, padding=padding[0], groups=in_c, bias=True)
+        self.pointwise_1 = nn.Conv3d(in_c, out_1_c, kernel_size=1, bias=True)
+        self.batch_norm_1 = nn.BatchNorm3d(out_1_c)
+        self.relu = nn.ReLU()
+        self.depthwise_2 = nn.Conv3d(out_1_c, out_1_c, kernel_size= kernel_size, padding=padding[1], groups=out_1_c, bias=True)
+        self.pointwise_2 = nn.Conv3d(out_1_c, out_2_c, kernel_size=1, bias=True)
+        self.batch_norm_2 = nn.BatchNorm3d(out_2_c)
+    def forward(self, x):
+        x = self.batch_norm_1(self.relu(self.pointwise_1(self.depthwise_1(x))))
+        return self.batch_norm_2(self.relu(self.pointwise_2(self.depthwise_2(x))))
+
+class convolution_Layer(nn.Module):
+    def __init__(self, in_c, out_1_c, out_2_c, padding, kernel_size):
+        super(convolution_Layer, self).__init__()
+        self.conv_1 = nn.Conv3d(in_c, out_1_c, kernel_size= kernel_size, padding=padding[0], bias=True)
+        self.batch_norm_1 = nn.BatchNorm3d(out_1_c)
+        self.relu = nn.ReLU()
+        self.conv_2 = nn.Conv3d(out_1_c, out_2_c, kernel_size= kernel_size, padding=padding[1], bias=True)
+        self.batch_norm_2 = nn.BatchNorm3d(out_2_c)
+    def forward(self, x):
+        x = self.batch_norm_1(self.relu(self.conv_1(x)))
+        return self.batch_norm_2(self.relu(self.relu(self.conv_2(x))))
+
+class head_layer(nn.Module):
+    def __init__(self, in_c, out_c = 1, padding = "same"):
+        super(head_layer, self).__init__()
+        self.conv =  nn.Conv3d(in_c, out_c, kernel_size=1, bias=True)
+        self.sig = nn.Sigmoid()
+    def forward(self, x):
+        return self.sig(self.conv(x)) #convolution
+        #return self.sig(self.pointwise(self.depthwise(x))) #convolution
+
+class Encoder(nn.Module):
+    def __init__(self,kernel_size, chs, padding=((0,"same"),("same","same"),("same","same"))):
+      super().__init__()
+      self.channels = chs
+      self.enc_blocks = nn.ModuleList([depthwise_separable_conv(chs[i][0], chs[i][1], chs[i][2], kernel_size=kernel_size, padding=padding[i]) for i in range(len(chs))])
+      self.pool       = nn.MaxPool3d(kernel_size=2, stride=2)
+      #self.batch_norm = nn.ModuleList([nn.BatchNorm3d( chs[i][2]) for i in range(len(chs))])
+      self.periodic_upsample = nn.ReflectionPad3d(int((kernel_size-1)/2))
+
+    
+    def forward(self, x):
+      ftrs = []
+      x = self.periodic_upsample(x)
+      for i in range(len(self.channels)):
+        ftrs.append(x)
+        x =self.enc_blocks[i](x)
+        #print(f'size of ftrs: {ftrs[i].size()}')
+        x = self.pool(x)
+        #print(f'size of x after pooling{x.size()}')
+      ftrs.append(x)
+      #print(f'size of ftrs: {ftrs[3].size()}')
+      #print(f'length of ftrs: {len(ftrs)}')
+      return ftrs
+
+class Decoder(nn.Module):
+    def __init__(self,kernel_size, chs_upsampling, chs_conv, padding=(("same","same"),("same","same"),("same","same"))):
+        super().__init__()
+        assert len(chs_conv) == len(chs_upsampling)
+        self.chs         = chs_upsampling
+        self.upconvs    = nn.ModuleList([nn.ConvTranspose3d(chs_upsampling[i], chs_upsampling[i], 2, 2) for i in range(len(chs_upsampling))])
+        self.dec_blocks = nn.ModuleList([depthwise_separable_conv(chs_conv[i][0], chs_conv[i][1], chs_conv[i][2], kernel_size=kernel_size, padding=padding[i]) for i in range(len(chs_conv))])
+        self.head = head_layer(chs_conv[-1][2])
+    def forward(self, x, encoder_features):
+        for i in range(len(self.chs)):
+            x        = self.upconvs[i](x)
+            #print(f'size after upsampling: {x.size()}')
+            enc_ftrs = self.crop(encoder_features[i], x)
+            x        = torch.cat([x, enc_ftrs], dim=1)
+            #print(f'size after cropping&cat: {x.size()}')
+
+            x        = self.dec_blocks[i](x)
+            #print(f'size after convolution: {x.size()}')
+        x = self.head(x)    
+        return x
+    
+    def crop(self, tensor, target_tensor):
+        target_size = target_tensor.size()[2]
+        tensor_size = tensor.size()[2]
+        delta = tensor_size - target_size
+        delta = delta // 2
+        return tensor[:,:,delta:tensor_size-delta,delta:tensor_size-delta,delta:tensor_size-delta]
+
+class UNetBase(nn.Module):
+    def training_step(self, batch):
+        input, labels = batch 
+        out = self(input)                  # Generate predictions
+        loss = F.l1_loss(out, labels) # Calculate loss
+        return loss
+    
+    def validation_step(self, batch):
+        input, labels = batch 
+        out = self(input)                    # Generate predictions
+        loss = F.l1_loss(out, labels)   # Calculate loss
+        acc = accuracy(out.detach(), labels.detach(),normalization=self.normalization)         # Calculate accuracy
+        return {'val_loss': loss.detach(), 'val_acc': acc}
+        
+    def validation_epoch_end(self, outputs):
+        batch_losses = [x['val_loss'] for x in outputs]
+        epoch_loss = torch.stack(batch_losses).mean()   # Combine losses
+        batch_accs = [x['val_acc'] for x in outputs]
+        epoch_acc = torch.stack(batch_accs).mean()      # Combine accuracies
+        return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
+    
+    def epoch_end(self, epoch, result):
+        print("Epoch [{}], train_loss: {:.6f}, val_loss: {:.6f}, val_acc: {:.6f}".format(
+            epoch, result['train_loss'], result['val_loss'], result['val_acc']))
+        
+def accuracy(outputs, labels,normalization, threshold = 0.05):
+    error = (abs((outputs) - (labels)))/(outputs+normalization[0]/normalization[1])
+    right_predic = torch.sum(error < threshold)
+    percentage = ((right_predic/torch.numel(error))*100.)
+    return percentage
+    
+class UNet(UNetBase):
+    def __init__(self,kernel_size = 5, enc_chs=((2,16,32), (32,32,64), (64,64,128)), dec_chs_up=(128, 128, 64), dec_chs_conv=((192,128, 128),(160,64,64),(66,32,32)),normalization=np.array([0,1])):
+        super().__init__()
+        self.encoder     = Encoder(kernel_size = kernel_size, chs = enc_chs)
+        self.decoder     = Decoder(kernel_size = kernel_size, chs_upsampling = dec_chs_up, chs_conv = dec_chs_conv)
+        #self.head        = depthwise_separable_conv(1, 1, padding = "same", kernel_size=1)
+        self.normalization = normalization
+
+
+    def forward(self, x):
+        enc_ftrs = self.encoder(x)
+        out      = self.decoder(enc_ftrs[::-1][0], enc_ftrs[::-1][1:])
+        #out      = self.head(out)
+        return out
+
+@torch.no_grad()
+def evaluate(model, val_loader):
+    model.eval()
+    outputs = [model.validation_step(batch) for batch in val_loader]
+    return model.validation_epoch_end(outputs)
+
+def fit(epochs, lr, model, train_loader, val_loader, path, opt_func=torch.optim.Adam):
+    history = []
+    optimizer = opt_func(model.parameters(), lr, eps=1e-07)
+    for epoch in range(epochs):
+        # Training Phase 
+        model.train()
+        train_losses = []
+        for batch in train_loader:
+            loss = model.training_step(batch)
+            train_losses.append(loss)
+            loss.backward()
+            optimizer.step()
+            optimizer.zero_grad()
+        # Validation phase
+        result = evaluate(model, val_loader)
+        result['train_loss'] = torch.stack(train_losses).mean().item()
+        model.epoch_end(epoch, result)
+        history.append(result)
+    torch.save(model.state_dict(),f'{path}/Unet_dict_V9_3.pth')
+    torch.save(history,f'{path}/history_V9_3.pt')
+    return history
+
+def get_default_device():
+    """Pick GPU if available, else CPU"""
+    if torch.cuda.is_available():
+        return torch.device('cuda')
+    else:
+      print('no GPU found')
+      return torch.device('cpu')
+      
+def to_device(data, device):
+    """Move tensor(s) to chosen device"""
+    if isinstance(data, (list,tuple)):
+        return [to_device(x, device) for x in data]
+    return data.to(device, non_blocking=True)
+
+class DeviceDataLoader():
+    """Wrap a dataloader to move data to a device"""
+    def __init__(self, dl, device):
+        self.dl = dl
+        self.device = device
+        
+    def __iter__(self):
+        """Yield a batch of data after moving it to device"""
+        for b in self.dl: 
+            yield to_device(b, self.device)
+
+    def __len__(self):
+        """Number of batches"""
+        return len(self.dl)
+
+def Create_Dataloader(path, batch_size = 100, percent_val = 0.2):
+    dataset = torch.load(path) # create the pytorch dataset 
+    #size_data = 500 #shrink dataset for colab
+    #rest = len(dataset) -size_data
+    #dataset,_ = torch.utils.data.random_split(dataset, [size_data, rest])
+    val_size = int(len(dataset) * percent_val)
+    train_size = len(dataset) - val_size
+
+    train_ds, val_ds = random_split(dataset, [train_size, val_size])
+    # Create DataLoader
+    train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=1, pin_memory=True)
+    valid_dl = DataLoader(val_ds, batch_size, num_workers=1, pin_memory=True)
+    
+    return train_dl, valid_dl
+
+if __name__ == '__main__':
+    #os.chdir('F:/RWTH/HiWi_IEHK/DAMASK3/UNet/Trainingsdata')
+    path_to_rep = '/home/yk138599/Hiwi/damask3'
+    use_seeds = True
+    seed = 373686838
+    num_epochs = 1300
+    b_size = 32
+    opt_func = torch.optim.Adam
+    lr = 0.00001
+    kernel = 3
+    print(f'number auf epochs: {num_epochs}')
+    print(f'batchsize: {b_size}')
+    print(f'learning rate: {lr}')
+    print(f'kernel size is: {kernel}')
+    if not use_seeds:
+      seed = random.randrange(2**32 - 1)
+    print(f' seed is: {seed}')
+    torch.manual_seed(seed)
+    random.seed(seed)
+    np.random.seed(seed)
+    device = get_default_device()
+    normalization = np.load(f'{path_to_rep}/UNet/Trainingsdata/Norm_min_max_32_angles.npy')
+    train_dl, valid_dl = Create_Dataloader(f'{path_to_rep}/UNet/Trainingsdata/Training_Dataset_normalized_32_V2.pt', batch_size= b_size )
+    train_dl = DeviceDataLoader(train_dl, device)
+    valid_dl = DeviceDataLoader(valid_dl, device)
+
+    model = to_device(UNet(kernel_size=kernel,normalization=normalization).double(), device)
+    history = fit(num_epochs, lr, model, train_dl, valid_dl,f'{path_to_rep}/UNet/output', opt_func)
diff --git a/UNet/postprocessing_new.ipynb b/UNet/postprocessing_new.ipynb
index 3b8455be4b92fc66c9d6f11c85d376176b6c7215..9924d35415f954b4e1be40117b4901090354da38 100644
--- a/UNet/postprocessing_new.ipynb
+++ b/UNet/postprocessing_new.ipynb
@@ -2,7 +2,7 @@
  "cells": [
   {
    "cell_type": "code",
-   "execution_count": 51,
+   "execution_count": 1,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -13,13 +13,13 @@
     "import pyvista as pv\n",
     "from matplotlib.colors import ListedColormap\n",
     "import copy\n",
-    "from pyvista import examples\n",
-    "from torch.utils.data import TensorDataset"
+    "import scipy.stats as stats\n",
+    "import pylab as pl"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 52,
+   "execution_count": 36,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -80,13 +80,13 @@
     "    val_acc = [x['val_acc'] for x in history[50:]]\n",
     "    val_loss = [x['val_loss'] for x in history[50:]]\n",
     "\n",
-    "    plt.plot(train_losses, '-x',)\n",
-    "    plt.plot(val_acc, '-x',)\n",
-    "    plt.plot(val_loss, '-x',)\n",
+    "    pl.plot(train_losses, '-x',)\n",
+    "    pl.plot(val_acc, '-x',)\n",
+    "    pl.plot(val_loss, '-x',)\n",
     "\n",
-    "    plt.xlabel('epoch')\n",
-    "    plt.ylabel('loss')\n",
-    "    plt.title('Loss vs. No. of epochs')\n",
+    "    pl.xlabel('epoch')\n",
+    "    pl.ylabel('loss')\n",
+    "    pl.title('Loss vs. No. of epochs')\n",
     "\n",
     "def grain_matrix_colormap(input):    \n",
     "    matrix_grains = input[0,0,:,:,:]\n",
@@ -105,7 +105,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 53,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -125,18 +125,21 @@
     "    grid_3.cell_data[\"stress\"] = stress.flatten(order = \"F\")\n",
     "    colormap_error = get_colormap(grid_1, threshold)\n",
     "    p = pv.Plotter(notebook=False,shape=(3,1))\n",
+    "    sargs_grain = dict(height=0.75, vertical=True, position_x=0.1, position_y=0.05, n_labels=0)\n",
     "    sargs = dict(height=0.75, vertical=True, position_x=0.1, position_y=0.05)\n",
     "\n",
+    "\n",
+    "\n",
     "    def my_plane_func(normal, origin):\n",
     "        slc_1 = grid_1.slice(normal=normal, origin=origin)\n",
     "        slc_2 = grid_2.slice(normal=normal, origin=origin)\n",
     "        slc_3 = grid_3.slice(normal=normal, origin=origin)\n",
     "        p.subplot(0,0)\n",
-    "        p.add_mesh(slc_2, name=\"my_slice_2\", cmap = 'RdBu', annotations = annotations, scalar_bar_args=sargs)\n",
+    "        p.add_mesh(slc_2, name=\"my_slice_2\", cmap = 'RdBu', annotations = annotations, scalar_bar_args=sargs_grain)\n",
     "        p.subplot(2,0)\n",
-    "        p.add_mesh(slc_1, name=\"my_slice_1\", cmap = colormap_error)\n",
+    "        p.add_mesh(slc_1, name=\"my_slice_1\",clim=[0.01, 1.0], below_color = 'blue', above_color = 'red', cmap = colormap_error, scalar_bar_args=sargs)\n",
     "        p.subplot(1,0)\n",
-    "        p.add_mesh(slc_3, name=\"my_slice_3\")\n",
+    "        p.add_mesh(slc_3, name=\"my_slice_3\", scalar_bar_args=sargs)\n",
     "\n",
     "    p.subplot(0,0)\n",
     "    annotations = {\n",
@@ -144,11 +147,11 @@
     "        grains.max(): 'Martensite',\n",
     "    }\n",
     "    p.add_title('Grains',font_size=10)\n",
-    "    p.add_mesh(grid_2 ,opacity=0, cmap = 'RdBu', annotations = annotations, scalar_bar_args=sargs)\n",
+    "    p.add_mesh(grid_2 ,opacity=0, cmap = 'RdBu', annotations = annotations, scalar_bar_args=sargs_grain)\n",
     "    p.add_plane_widget(my_plane_func)\n",
     "    p.subplot(2,0)\n",
     "    p.add_title('Error',font_size=10)\n",
-    "    p.add_mesh(grid_1,scalars = \"error\" ,opacity=0,cmap = colormap_error)\n",
+    "    p.add_mesh(grid_1,scalars = \"error\" ,clim=[0.01, 1.0], below_color = 'blue', above_color = 'red',opacity=0)\n",
     "    p.add_plane_widget(my_plane_func)\n",
     "\n",
     "    p.subplot(1,0)\n",
@@ -162,7 +165,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
+   "execution_count": 35,
    "metadata": {},
    "outputs": [
     {
@@ -175,59 +178,43 @@
    ],
    "source": [
     "Training_data = torch.load('E:/Data/damask3/UNet/Input/TD_norm_32_phase_only.pt')\n",
-    "grain_data = torch.load('E:/Data/damask3/UNet/Input/TD_norm_32.pt')\n",
-    "#history = torch.load('E:/Data/damask3/UNet/output/V6_64/history_V6_2_64.pt')\n",
+    "grain_data = torch.load('E:/Data/damask3/UNet/Input/TD_norm_32_angles.pt')\n",
+    "history = torch.load('E:/Data/damask3/UNet/output/V6_64/history_V6_2_64.pt')\n",
     "#history_2 = torch.load('E:/Data/damask3/UNet/output/history_test.pt')\n",
-    "normalization = np.load('E:/Data/damask3/UNet/Input/Norm_min_max_32_V2.npy', allow_pickle=True)\n",
+    "normalization = np.load('E:/Data/damask3/UNet/Input/Norm_min_max_32_phase_only.npy', allow_pickle=True)\n",
     "model = UNet.UNet()\n",
-    "model.load_state_dict(torch.load('E:/Data/damask3/UNet/output/V9/Unet_dict_V9.pth',map_location=torch.device('cpu')))\n",
+    "model.load_state_dict(torch.load('E:/Data/damask3/UNet/output/V9/Unet_dict_V9_2.pth',map_location=torch.device('cpu')))\n",
     "device = UNet.get_default_device()\n",
     "model = UNet.to_device(model.double(), device)"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 54,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "sample number: 876\n",
-      "Maximum error is : 313.1 %\n",
-      "average error is : 11.93 %\n",
-      "53.96% of voxels have a diviation less than 10.0%\n"
-     ]
-    }
-   ],
+   "outputs": [],
+   "source": [
+    "plot_losses(history)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
    "source": [
     "sample_index = np.random.randint(low=0, high=len(Training_data))\n",
     "print(f'sample number: {sample_index}')\n",
-    "predict_stress(np.random.randint(low=0, high=len(Training_data)), normalization = normalization, model = model, dataset = Training_data,grain_data =grain_data)"
+    "predict_stress(sample_index, normalization = normalization, model = model, dataset = Training_data,grain_data =grain_data)"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 38,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "ename": "NameError",
-     "evalue": "name 'history' is not defined",
-     "output_type": "error",
-     "traceback": [
-      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
-      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
-      "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_4608/2766589794.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mtrain_losses\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'train_loss'\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mx\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mhistory\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m50\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      2\u001b[0m \u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mplot\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtrain_losses\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      3\u001b[0m \u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
-      "\u001b[1;31mNameError\u001b[0m: name 'history' is not defined"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
-    "train_losses = [x['train_loss'] for x in history[50:]]\n",
-    "plt.plot(train_losses)\n",
-    "plt.show()\n"
+    "mean_error, max_error, correct_per = dataset_evaluation( normalization = normalization, model = model, dataset = Training_data, threshold = 0.1)"
    ]
   },
   {
@@ -236,7 +223,31 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "plot_difference_2(error, grains,output, threshold)  \n"
+    "def dataset_evaluation( normalization = normalization, model = model, dataset = Training_data, threshold = 0.05):\n",
+    "    model.eval()\n",
+    "    mean_error = np.empty(len(dataset))\n",
+    "    max_error = np.empty(len(dataset))\n",
+    "    correct_per = np.empty(len(dataset)) #percentage of voxel that are guessed corrected, according to threshold\n",
+    "    for index in range(len(dataset)):\n",
+    "        input, output = dataset[index]\n",
+    "        input = copy.copy(input)\n",
+    "        output = copy.copy(output)\n",
+    "        input = torch.unsqueeze(input,0)\n",
+    "        output = torch.unsqueeze(output,0)\n",
+    "        xb = UNet.to_device(input, device)\n",
+    "        prediction = model(xb)\n",
+    "        input = input.detach().numpy()\n",
+    "        prediction = prediction.detach().numpy()\n",
+    "        output = output.detach().numpy()\n",
+    "        prediction = rescale(prediction, normalization)\n",
+    "        output = rescale(output, normalization)\n",
+    "        error = (abs(output - prediction)/output)\n",
+    "        right_predic = (error < threshold).sum()\n",
+    "        mean_error[index] = error.mean()*100.\n",
+    "        max_error[index] = error.max()*100.\n",
+    "        correct_per[index] = right_predic * 100.\n",
+    "    return mean_error, max_error, correct_per\n",
+    "        "
    ]
   }
  ],