Skip to content
Snippets Groups Projects
Commit bceae19f authored by Brian Christopher Wasels's avatar Brian Christopher Wasels
Browse files

new difference figure

parent 617e44ad
No related branches found
No related tags found
No related merge requests found
File suppressed by a .gitattributes entry, the file's encoding is unsupported, or the file size exceeds the limit.
File suppressed by a .gitattributes entry, the file's encoding is unsupported, or the file size exceeds the limit.
No preview for this file type
File suppressed by a .gitattributes entry, the file's encoding is unsupported, or the file size exceeds the limit.
No preview for this file type
File suppressed by a .gitattributes entry, the file's encoding is unsupported, or the file size exceeds the limit.
No preview for this file type
File suppressed by a .gitattributes entry, the file's encoding is unsupported, or the file size exceeds the limit.
import torch import torch
import numpy as np import numpy as np
import UNet_V16 as UNet import torch.nn as nn
import UNet_V14 as UNet
import copy import copy
def rescale(output, normalization): def rescale(output, normalization):
output_rescale = output.reshape(output.shape[2],output.shape[3],output.shape[4]) output_rescale = output.reshape(output.shape[2],output.shape[3],output.shape[4])
if normalization is not None: if normalization is not None:
...@@ -15,22 +15,21 @@ def rescale(output, normalization): ...@@ -15,22 +15,21 @@ def rescale(output, normalization):
output_rescale += min_label output_rescale += min_label
return output_rescale return output_rescale
def dataset_evaluation( normalization, model, dataset, threshold = 0.05): def dataset_evaluation( normalization, model, dataloader, threshold = 0.05):
model.eval() model.eval()
mean_error = np.empty(len(dataset)) mean_error = []
max_error = np.empty(len(dataset)) max_error = []
correct_per = np.empty(len(dataset)) #percentage of voxel that are guessed corrected, according to threshold correct_per = []#percentage of voxel that are guessed corrected, according to threshold
mean_deviation_abs = np.empty(len(dataset)) #absolute mean deviation between prediction and label per RVE mean_deviation_abs = [] #absolute mean deviation between prediction and label per RVE
mean_deviation_per = np.empty(len(dataset)) #percentile mean deviation between prediction and label per RVE mean_deviation_per = [] #percentile mean deviation between prediction and label per RVE
for index in range(len(dataset)): for batch in dataloader:
input, output = dataset[index] input, output = batch
input = copy.copy(input) input = copy.copy(input)
output = copy.copy(output) output = copy.copy(output)
input = torch.unsqueeze(input,0) input = torch.unsqueeze(input,0)
output = torch.unsqueeze(output,0) output = torch.unsqueeze(output,0)
xb = UNet.to_device(input, device) prediction = model(input)
prediction = model(xb)
input = input.detach().numpy() input = input.detach().numpy()
prediction = prediction.cpu().detach().numpy() prediction = prediction.cpu().detach().numpy()
output = output.detach().numpy() output = output.detach().numpy()
...@@ -38,12 +37,12 @@ def dataset_evaluation( normalization, model, dataset, threshold = 0.05): ...@@ -38,12 +37,12 @@ def dataset_evaluation( normalization, model, dataset, threshold = 0.05):
output = rescale(output, normalization) output = rescale(output, normalization)
error = (abs(output - prediction)/output) error = (abs(output - prediction)/output)
right_predic = (error < threshold).sum() right_predic = (error < threshold).sum()
mean_error[index] = error.mean()*100. mean_error.append(error.mean()*100.)
max_error[index] = error.max()*100. max_error.append(error.max()*100.)
mean_deviation_abs[index] = prediction.mean() - output.mean() mean_deviation_abs.append(prediction.mean() - output.mean())
mean_deviation_per[index] = (prediction.mean() - output.mean())/output.mean() mean_deviation_per.append((prediction.mean() - output.mean())/output.mean())
correct_per[index] = (right_predic/error.size)*100. correct_per.append((right_predic/error.size)*100.)
return [mean_error, max_error, correct_per, mean_deviation_abs,mean_deviation_per] return [mean_error, max_error, correct_per, mean_deviation_abs,mean_deviation_per]
def best_sample_id(result): def best_sample_id(result):
...@@ -109,7 +108,7 @@ def export_vtk(error, grains, stress, label,path): ...@@ -109,7 +108,7 @@ def export_vtk(error, grains, stress, label,path):
if __name__ == '__main__': if __name__ == '__main__':
export_path = '/home/yk138599/Hiwi/damask3/UNet/output/result_14' export_path = '/home/yk138599/Hiwi/damask3/UNet/output/result_14'
Training_data = torch.load(f'/home/yk138599/Hiwi/damask3/UNet/Trainingsdata/TD_norm_64_angles.pt.pt') Training_data = torch.load(f'/home/yk138599/Hiwi/damask3/UNet/Trainingsdata/TD_norm_64_angles.pt')
#Training_data = torch.load(f'/content/drive/MyDrive/Bachlorarbeit/Input/TD_norm_32_phase.pt') #Training_data = torch.load(f'/content/drive/MyDrive/Bachlorarbeit/Input/TD_norm_32_phase.pt')
normalization = np.load(f'/home/yk138599/Hiwi/damask3/UNet/Trainingsdata/Norm_min_max_64_angles.npy', allow_pickle=True) normalization = np.load(f'/home/yk138599/Hiwi/damask3/UNet/Trainingsdata/Norm_min_max_64_angles.npy', allow_pickle=True)
...@@ -117,8 +116,10 @@ if __name__ == '__main__': ...@@ -117,8 +116,10 @@ if __name__ == '__main__':
model = UNet.UNet() model = UNet.UNet()
device = UNet.get_default_device() device = UNet.get_default_device()
model = UNet.to_device(model.double(), device) model = UNet.to_device(model.double(), device)
dataloader = UNet.Create_Dataloader(f'/home/yk138599/Hiwi/damask3/UNet/Trainingsdata/TD_norm_64_angles.pt',batch_size=1)
dataloader = UNet.DeviceDataLoader(dataloader,device)
model.load_state_dict(torch.load(f'/home/yk138599/Hiwi/damask3/UNet/output/V14/Unet_dict_V14.pth',map_location=torch.device('cuda'))) model.load_state_dict(torch.load(f'/home/yk138599/Hiwi/damask3/UNet/output/V14/Unet_dict_V14.pth',map_location=torch.device('cuda')))
result= dataset_evaluation( normalization = normalization, model = model, dataset = copy.copy(Training_data), threshold = 0.05) result= dataset_evaluation( normalization = normalization, model = model, dataset = copy.copy(Training_data), threshold = 0.05)
print(f'\t mean error over whole set: {result[0].mean():.4}%') print(f'\t mean error over whole set: {result[0].mean():.4}%')
print(f'\t max error average: {result[1].mean():.4}% and maximum {result[1].max():.4}%') print(f'\t max error average: {result[1].mean():.4}% and maximum {result[1].max():.4}%')
......
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment