diff --git a/Bericht/Bilder/difference_phase_angles.png b/Bericht/Bilder/difference_phase_angles.png
index 0171e788cd45a26b9dfebb1050fd52e21b774fcd..f65221bcd6180470ea24caf81b069bdc59404612 100644
Binary files a/Bericht/Bilder/difference_phase_angles.png and b/Bericht/Bilder/difference_phase_angles.png differ
diff --git a/Bericht/Bilder/results_1637.png b/Bericht/Bilder/results_1637.png
index 2ac79b126b2c9781a92a08b68f87cf642b087f3e..073d072b64e1feb070702c6d86137cbb28dd4ae4 100644
Binary files a/Bericht/Bilder/results_1637.png and b/Bericht/Bilder/results_1637.png differ
diff --git a/Literatur/Korrekturen/Inga_1.pdf b/Literatur/Korrekturen/Inga_1.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..9adc23867f5861b49b706dab82328f44592040fb
Binary files /dev/null and b/Literatur/Korrekturen/Inga_1.pdf differ
diff --git a/Literatur/Korrekturen/Niklas_1(fertig).pdf b/Literatur/Korrekturen/Niklas_1(fertig).pdf
new file mode 100644
index 0000000000000000000000000000000000000000..ba2cad769eb2ff251602eea75cb737213e91ace6
Binary files /dev/null and b/Literatur/Korrekturen/Niklas_1(fertig).pdf differ
diff --git a/Literatur/Papers/Crystal plasticity/Spectral methods for full-field micromechanical modelling of polycrystalline materials.pdf b/Literatur/Papers/Crystal plasticity/Spectral methods for full-field micromechanical modelling of polycrystalline materials.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..5d9a8f1832f788b28d5f6b026f955a612c42a272
Binary files /dev/null and b/Literatur/Papers/Crystal plasticity/Spectral methods for full-field micromechanical modelling of polycrystalline materials.pdf differ
diff --git a/UNet/Auswertung_64.py b/UNet/Auswertung_64.py
new file mode 100644
index 0000000000000000000000000000000000000000..03d0e11e351f907f0ba88aef208c59e8a3fc0b29
--- /dev/null
+++ b/UNet/Auswertung_64.py
@@ -0,0 +1,132 @@
+import torch
+import numpy as np
+import UNet_V16 as UNet
+import copy
+
+
+def rescale(output, normalization):
+    output_rescale = output.reshape(output.shape[2],output.shape[3],output.shape[4])
+    if normalization is not None:
+        if normalization.shape[0] == 2:
+          min_label, max_label = normalization
+        else:
+          min_label, max_label,_ = normalization
+        output_rescale *= max_label
+        output_rescale += min_label
+    return output_rescale
+
+def dataset_evaluation( normalization, model, dataset, threshold = 0.05):
+    model.eval()
+    mean_error = np.empty(len(dataset))
+    max_error = np.empty(len(dataset))
+    correct_per = np.empty(len(dataset)) #percentage of voxel that are guessed corrected, according to threshold
+    mean_deviation_abs = np.empty(len(dataset)) #absolute mean deviation between prediction and label per RVE
+    mean_deviation_per = np.empty(len(dataset)) #percentile mean deviation between prediction and label per RVE
+
+    for index in range(len(dataset)):
+        input, output = dataset[index]
+        input = copy.copy(input)
+        output = copy.copy(output)
+        input = torch.unsqueeze(input,0)
+        output = torch.unsqueeze(output,0)
+        xb = UNet.to_device(input, device)
+        prediction = model(xb)
+        input = input.detach().numpy()
+        prediction = prediction.cpu().detach().numpy()
+        output = output.detach().numpy()
+        prediction = rescale(prediction, normalization)
+        output = rescale(output, normalization)
+        error = (abs(output - prediction)/output)
+        right_predic = (error < threshold).sum()
+        mean_error[index] = error.mean()*100.
+        max_error[index] = error.max()*100.
+        mean_deviation_abs[index] = prediction.mean() - output.mean()
+        mean_deviation_per[index] = (prediction.mean() - output.mean())/output.mean()
+
+        correct_per[index] = (right_predic/error.size)*100.
+    return [mean_error, max_error, correct_per, mean_deviation_abs,mean_deviation_per]
+
+def best_sample_id(result):
+  index_min = min(range(len(result[0])), key=result[0].__getitem__)
+  print(f'sample with index {index_min} has the best mean error with {result[0][index_min]:.4}%')
+  return index_min
+
+
+def predict_stress(image_id, normalization, model, dataset,grain_data, UNet, device,threshold = 0.15):
+    input, output = dataset[image_id]
+    grain,_ = grain_data[image_id]
+    grain = copy.deepcopy(grain)
+    grain = torch.unsqueeze(grain,0)
+    grain = grain.detach().numpy()
+    input = copy.deepcopy(input)
+    output = copy.deepcopy(output)
+    input = torch.unsqueeze(input,0)
+    output = torch.unsqueeze(output,0)
+    xb = UNet.to_device(input, device)
+    model.eval()
+    prediction = model(xb)
+    input = input.detach().numpy()
+    prediction = prediction.detach().numpy()
+    output = output.detach().numpy()
+    prediction = rescale(prediction, normalization)
+    output = rescale(output, normalization)
+    error = (abs(output - prediction)/output)
+    print(f'Maximum error is : {error.max()*100.:.4} %')
+    print(f'average error is : {error.mean()*100.:.4} %')
+    right_predic = (error < threshold).sum()
+    print(f'{(right_predic/error.size)*100.:.4}% of voxels have a diviation less than {threshold*100.}%')
+    grains = grain_matrix(grain)
+    return error,grains,prediction, output
+
+
+def grain_matrix(input):    
+    matrix_grains = input[0,0,:,:,:]
+    matrix_ferrit = input[0,5,:,:,:] #matrix with elements = 1 if the phase is ferrit else 0
+    #unique_angles = np.unique(matrix_grains)
+    matrix_ferrit_grains = np.multiply(matrix_grains, matrix_ferrit)# matrix where only the ferrit grains are nonzero
+    index_ferrit_angles = np.unique(matrix_ferrit_grains[matrix_ferrit_grains != 0])
+    index_martensite_angles = np.setdiff1d(np.unique(matrix_grains),index_ferrit_angles)
+    for index, angle in enumerate(index_ferrit_angles):
+        matrix_grains[matrix_grains == angle] = (index) # matrix with id for each grain add 1 to perfome the elementwise multiplication to get the index of phase grains
+    for index, angle in enumerate(index_martensite_angles):
+        matrix_grains[matrix_grains == angle] = (index + len(index_ferrit_angles) +100) # matrix with id for each grain add 1 to perfome the elementwise multiplication to get the index of phase grains
+
+    return matrix_grains
+
+def export_vtk(error, grains, stress, label,path):   
+    grid = pv.UniformGrid()
+    grid.dimensions = np.array(error.shape) +1
+    grid.spacing = (1,1,1)
+
+    grid.cell_data["error"] = error.flatten(order = "F")
+    grid.cell_data["grain"] = grains.flatten(order = "F")
+    grid.cell_data["stress"] = stress.flatten(order = "F")
+    grid.cell_data["label"] = label.flatten(order = "F")
+
+    grid.save(f'{path}.vtk')
+
+
+    
+if __name__ == '__main__':
+    export_path = '/home/yk138599/Hiwi/damask3/UNet/output/result_14'
+    Training_data = torch.load(f'/home/yk138599/Hiwi/damask3/UNet/Trainingsdata/TD_norm_64_angles.pt.pt')
+    #Training_data = torch.load(f'/content/drive/MyDrive/Bachlorarbeit/Input/TD_norm_32_phase.pt')
+
+    normalization = np.load(f'/home/yk138599/Hiwi/damask3/UNet/Trainingsdata/Norm_min_max_64_angles.npy', allow_pickle=True)
+    #normalization = np.load(f'/content/drive/MyDrive/Bachlorarbeit/Input/Norm_min_max_32_phase.npy', allow_pickle=True)
+    model = UNet.UNet()
+    device = UNet.get_default_device()
+    model = UNet.to_device(model.double(), device)
+
+    model.load_state_dict(torch.load(f'/home/yk138599/Hiwi/damask3/UNet/output/V14/Unet_dict_V14.pth',map_location=torch.device('cuda')))
+    result= dataset_evaluation( normalization = normalization, model = model, dataset = copy.copy(Training_data), threshold = 0.05)
+    print(f'\t mean error over whole set: {result[0].mean():.4}%')
+    print(f'\t max error average: {result[1].mean():.4}% and maximum {result[1].max():.4}%')
+    print(f'\t average correct percentile of voxels over whole set: {result[2].mean():.4}%')
+    print(f'\t average deviation per RVE over whole set: {result[3].mean():.4} Pa')
+    print(f'\t average deviation in percent per RVE over whole set: {result[4].mean()*100.:.4} %')
+    np.save('/home/yk138599/Hiwi/damask3/UNet/output/V14/evaluation',result)
+    sample_index = best_sample_id(result)
+    print(f'best sample is: {sample_index}')
+    error,grains,prediction,label= predict_stress(sample_index, normalization = normalization, model = model, device=device, dataset = Training_data,grain_data =Training_data,UNet=UNet, threshold=0.15)
+    export_vtk(error,grains,prediction,label,export_path)
\ No newline at end of file
diff --git a/UNet/NormalizeTrainingdata_64.ipynb b/UNet/NormalizeTrainingdata_64.ipynb
index fbc2f92345358e96ea4a804f95d9849fda5d9744..7bcd331f5ee652cc9f2e368b7c28d9ef2329a770 100644
--- a/UNet/NormalizeTrainingdata_64.ipynb
+++ b/UNet/NormalizeTrainingdata_64.ipynb
@@ -2,7 +2,7 @@
   "cells": [
     {
       "cell_type": "code",
-      "execution_count": 1,
+      "execution_count": 2,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/"
@@ -28,7 +28,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 2,
+      "execution_count": 3,
       "metadata": {
         "id": "OzNQI96lq3Pi"
       },
@@ -36,8 +36,7 @@
       "source": [
         "training_data = np.load('E:/Data/damask3/UNet/Input/Training_data_64.npy')\n",
         "training_label =  np.load('E:/Data/damask3/UNet/Input/Training_labels_64.npy')\n",
-        "training_data = training_data[0:50]\n",
-        "training_label = training_label[0:50]\n",
+        "\n",
         "\n",
         "if training_data.shape[0] != training_label.shape[0]:\n",
         "    print('label and data have not the same size')\n",
@@ -48,7 +47,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 7,
+      "execution_count": 4,
       "metadata": {
         "id": "lUnBE7T4q3Pi"
       },
@@ -72,7 +71,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 3,
+      "execution_count": 5,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/"
@@ -85,8 +84,8 @@
           "name": "stdout",
           "output_type": "stream",
           "text": [
-            "size of input is torch.Size([50, 2, 64, 64, 64])\n",
-            "size of label is torch.Size([50, 64, 64, 64])\n"
+            "size of input is torch.Size([791, 2, 64, 64, 64])\n",
+            "size of label is torch.Size([791, 64, 64, 64])\n"
           ]
         }
       ],
diff --git a/UNet/grain_ numbers.ipynb b/UNet/grain_ numbers.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..8a8966577c3eacca09c06ba6a7f3bdfe5c063ef4
--- /dev/null
+++ b/UNet/grain_ numbers.ipynb	
@@ -0,0 +1,54 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import pyvista as pv\n",
+    "import numpy as np\n",
+    "import os"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 16,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "path_to_folder ='E:/Data/Simulation_Output/OutputData_64'\n",
+    "number_grains=np.empty(len(os.listdir(path_to_folder)))\n",
+    "for folder_id, folder in enumerate(os.listdir(path_to_folder)):\n",
+    "    grid = pv.read(f'{path_to_folder}/{folder}/grid.vti')\n",
+    "    number_grains[folder_id] = grid['material'].max() +1\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 15,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "np.save('E:/Data/damask3/UNet/Input/grain_numbers_64.npy',number_grains)"
+   ]
+  }
+ ],
+ "metadata": {
+  "interpreter": {
+   "hash": "97ae2fbf52e0575424be8b71df1b468d27bac9d21e20089d11e8b4b02c5eac36"
+  },
+  "kernelspec": {
+   "display_name": "Python 3.9.5 ('base')",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "name": "python",
+   "version": "3.9.5"
+  },
+  "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/UNet/visualize_vtk.ipynb b/UNet/visualize_vtk.ipynb
index 56f982c29e43a169520bea48ead4242c6255ac41..92f7e731d46a08773c842a8708ca44136ffdabd0 100644
--- a/UNet/visualize_vtk.ipynb
+++ b/UNet/visualize_vtk.ipynb
@@ -46,7 +46,7 @@
     "    right_predic = (error < threshold).sum()\n",
     "    print(f'{(right_predic/error.size)*100.:.4}% of voxels have a diviation less than {threshold*100.}%')\n",
     "    grains = grain_matrix(grain)\n",
-    "    return error,grains,prediction\n",
+    "    return error,grains,prediction, output\n",
     "\n",
     "\n",
     "def grain_matrix(input):    \n",
@@ -70,7 +70,7 @@
     "        output_rescale *= max_label\n",
     "        output_rescale += min_label\n",
     "    return output_rescale\n",
-    "def export_vtk(error, grains, stress,path):   \n",
+    "def export_vtk(error, grains, stress, label,path):   \n",
     "    grid = pv.UniformGrid()\n",
     "    grid.dimensions = np.array(error.shape) +1\n",
     "    grid.spacing = (1,1,1)\n",
@@ -78,6 +78,8 @@
     "    grid.cell_data[\"error\"] = error.flatten(order = \"F\")\n",
     "    grid.cell_data[\"grain\"] = grains.flatten(order = \"F\")\n",
     "    grid.cell_data[\"stress\"] = stress.flatten(order = \"F\")\n",
+    "    grid.cell_data[\"label\"] = label.flatten(order = \"F\")\n",
+    "\n",
     "    grid.save(f'{path}.vtk')\n",
     "\n",
     "\n",
@@ -131,8 +133,8 @@
     "#sample_index = np.random.randint(low=0, high=len(Training_data_32))\n",
     "sample_index = 1288\n",
     "print(f'sample number: {sample_index}')\n",
-    "error,grains,output = predict_stress(sample_index, normalization = normalization_32, model = model, device=device, dataset = Training_data_32,grain_data =grain_data_32,UNet=UNet, threshold=0.15)\n",
-    "export_vtk(error,grains,output,export_path)"
+    "error,grains,prediction,label= predict_stress(sample_index, normalization = normalization_32, model = model, device=device, dataset = Training_data_32,grain_data =grain_data_32,UNet=UNet, threshold=0.15)\n",
+    "export_vtk(error,grains,prediction,label,export_path)"
    ]
   },
   {