diff --git a/UNet/.#Train_model.sh b/UNet/.#Train_model.sh
deleted file mode 120000
index d0dfaea869caedd8207c2c126db76e1ea917a51f..0000000000000000000000000000000000000000
--- a/UNet/.#Train_model.sh
+++ /dev/null
@@ -1 +0,0 @@
-yk138599@login18-x-1.hpc.itc.rwth-aachen.de.71560:1644816141
\ No newline at end of file
diff --git a/UNet/Train_model.sh b/UNet/1_Train_model.sh
similarity index 78%
rename from UNet/Train_model.sh
rename to UNet/1_Train_model.sh
index c609f15161b36418fd589aa0d4b368f191fb9641..2f301b8dc63ddee6955fb50361df3b3e7b67c0a3 100644
--- a/UNet/Train_model.sh
+++ b/UNet/1_Train_model.sh
@@ -6,7 +6,7 @@
 #SBATCH --partition=c18g
 
 #SBATCH -J training_model
-#SBATCH -o Sim_logs/UNet_64_V14_%J.log
+#SBATCH -o Sim_logs/UNet_V12_%J.log
  
 #SBATCH --gres=gpu:1
 #SBATCH --time=90:00:00
@@ -16,8 +16,7 @@
 module load cuda
 module load python/3.7.11
 pip3 install --user -Iv -q torch==1.10.1
-#time python3 ./UNet_V12.py
-#time python3 ./UNet_V13.py
-time python3 ./UNet_V14.py
+
+time python3 ./UNet_V12.py
 #print GPU Information
 #$CUDA_ROOT/extras/demo_suite/deviceQuery -noprompt
diff --git a/UNet/2_Train_model.sh b/UNet/2_Train_model.sh
index 2bd0b5d45b92e05ea5f349bd249a9ae810bd0a19..178eef405a78faeee70e67eb4fa8279d5fff9d52 100644
--- a/UNet/2_Train_model.sh
+++ b/UNet/2_Train_model.sh
@@ -6,17 +6,18 @@
 #SBATCH --partition=c18g
 
 #SBATCH -J training_model
-#SBATCH -o Sim_logs/UNet_V10_%J.log
+#SBATCH -o Sim_logs/UNet_V17_%J.log
  
 #SBATCH --gres=gpu:1
-#SBATCH --time=50:00:00
+#SBATCH --time=90:00:00
 ### Request memory you need for your job in MB
-#SBATCH --mem-per-cpu=10000
+#SBATCH --mem-per-cpu=20000
 #SBATCH --mem-per-gpu=16000
 module load cuda
 module load python/3.7.11
-echo "9.1 k=7 lr=1e-06"
 pip3 install --user -Iv -q torch==1.10.1
-time python3 ./UNet_V10.py
+#time python3 ./UNet_V12.py
+time python3 ./UNet_V17.py
+#time python3 ./UNet_V14.py
 #print GPU Information
 #$CUDA_ROOT/extras/demo_suite/deviceQuery -noprompt
diff --git a/UNet/3_Train_model.sh b/UNet/3_Train_model.sh
new file mode 100644
index 0000000000000000000000000000000000000000..61264b9b8cca3ac59df5aec868408b231fa88d20
--- /dev/null
+++ b/UNet/3_Train_model.sh
@@ -0,0 +1,22 @@
+#!/usr/local_rwth/bin/zsh
+### Project account
+#SBATCH --account=rwth0744
+
+### Cluster Partition
+#SBATCH --partition=c18g
+
+#SBATCH -J training_model
+#SBATCH -o Sim_logs/UNet_V15_%J.log
+ 
+#SBATCH --gres=gpu:1
+#SBATCH --time=90:00:00
+### Request memory you need for your job in MB
+#SBATCH --mem-per-cpu=20000
+#SBATCH --mem-per-gpu=16000
+module load cuda
+module load python/3.7.11
+pip3 install --user -Iv -q torch==1.10.1
+
+time python3 ./UNet_V15.py
+#print GPU Information
+#$CUDA_ROOT/extras/demo_suite/deviceQuery -noprompt
diff --git a/UNet/Train_model2.sh b/UNet/4_Train_model.sh
similarity index 83%
rename from UNet/Train_model2.sh
rename to UNet/4_Train_model.sh
index 8588d548a921811924c8208a82e9868ed73f146f..2d13029d3835869f1d2b73a30296dc68f8d05556 100644
--- a/UNet/Train_model2.sh
+++ b/UNet/4_Train_model.sh
@@ -6,7 +6,7 @@
 #SBATCH --partition=c18g
 
 #SBATCH -J training_model
-#SBATCH -o Sim_logs/UNet_64_V16_%J.log
+#SBATCH -o Sim_logs/UNet_V16_%J.log
  
 #SBATCH --gres=gpu:1
 #SBATCH --time=90:00:00
@@ -16,8 +16,7 @@
 module load cuda
 module load python/3.7.11
 pip3 install --user -Iv -q torch==1.10.1
+
 time python3 ./UNet_V16.py
-#time python3 ./UNet_V13.py
-#time python3 ./UNet_V14.py
 #print GPU Information
 #$CUDA_ROOT/extras/demo_suite/deviceQuery -noprompt
diff --git a/UNet/5_Train_model.sh b/UNet/5_Train_model.sh
new file mode 100644
index 0000000000000000000000000000000000000000..747df366e4d5eebb83ffed3365d94016f292b6d4
--- /dev/null
+++ b/UNet/5_Train_model.sh
@@ -0,0 +1,22 @@
+#!/usr/local_rwth/bin/zsh
+### Project account
+#SBATCH --account=rwth0744
+
+### Cluster Partition
+#SBATCH --partition=c18g
+
+#SBATCH -J training_model
+#SBATCH -o Sim_logs/UNet_V16_K3_%J.log
+ 
+#SBATCH --gres=gpu:1
+#SBATCH --time=90:00:00
+### Request memory you need for your job in MB
+#SBATCH --mem-per-cpu=20000
+#SBATCH --mem-per-gpu=16000
+module load cuda
+module load python/3.7.11
+pip3 install --user -Iv -q torch==1.10.1
+
+time python3 ./UNet_V16.py
+#print GPU Information
+#$CUDA_ROOT/extras/demo_suite/deviceQuery -noprompt
diff --git a/UNet/Sim_logs/UNet_64_V12_25614663.log b/UNet/Sim_logs/UNet_64_V12_25614663.log
deleted file mode 100644
index 29d23bf339e625e0ae5adb38d1fe8741ae15fac3..0000000000000000000000000000000000000000
--- a/UNet/Sim_logs/UNet_64_V12_25614663.log
+++ /dev/null
@@ -1,46 +0,0 @@
-(OK) Loading cuda 10.2.89
-(OK) Loading python 3.7.11
-(!!) The SciPy Stack is available: http://www.scipy.org/stackspec.html
- Built with GCC compilers.
-Collecting torch==1.10.1
-  Using cached torch-1.10.1-cp37-cp37m-manylinux1_x86_64.whl (881.9 MB)
-Collecting typing-extensions
-  Using cached typing_extensions-4.1.1-py3-none-any.whl (26 kB)
-Installing collected packages: typing-extensions, torch
-  WARNING: The scripts convert-caffe2-to-onnx, convert-onnx-to-caffe2 and torchrun are installed in '/home/yk138599/.local/bin' which is not on PATH.
-  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
-Successfully installed torch-1.10.1 typing-extensions-4.1.1
-WARNING: You are using pip version 21.2.4; however, version 22.0.3 is available.
-You should consider upgrading via the '/usr/local_rwth/sw/python/3.7.11/x86_64/bin/python3.7 -m pip install --upgrade pip' command.
-number auf epochs: 500
-batchsize: 32
-learning rate: 3e-05
-kernel size is: 9
- seed is: 2518441936
-Traceback (most recent call last):
-  File "./UNet_V12.py", line 250, in <module>
-    history = fit(num_epochs, lr, model, train_dl, valid_dl,f'{path_to_rep}/UNet/output', opt_func)
-  File "./UNet_V12.py", line 165, in fit
-    loss = model.training_step(batch)
-  File "./UNet_V12.py", line 108, in training_step
-    out = self(input)                  # Generate predictions
-  File "/home/yk138599/.local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
-    return forward_call(*input, **kwargs)
-  File "./UNet_V12.py", line 147, in forward
-    out      = self.decoder(enc_ftrs[::-1][0], enc_ftrs[::-1][1:])
-  File "/home/yk138599/.local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
-    return forward_call(*input, **kwargs)
-  File "./UNet_V12.py", line 93, in forward
-    x        = self.dec_blocks[i](x)
-  File "/home/yk138599/.local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
-    return forward_call(*input, **kwargs)
-  File "./UNet_V12.py", line 29, in forward
-    x = self.batch_norm_1(self.relu(self.pointwise_1(self.depthwise_1(x))))
-  File "/home/yk138599/.local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
-    return forward_call(*input, **kwargs)
-  File "/home/yk138599/.local/lib/python3.7/site-packages/torch/nn/modules/conv.py", line 590, in forward
-    return self._conv_forward(input, self.weight, self.bias)
-  File "/home/yk138599/.local/lib/python3.7/site-packages/torch/nn/modules/conv.py", line 586, in _conv_forward
-    input, weight, bias, self.stride, self.padding, self.dilation, self.groups
-RuntimeError: CUDA out of memory. Tried to allocate 512.00 MiB (GPU 0; 15.78 GiB total capacity; 14.15 GiB already allocated; 280.50 MiB free; 14.16 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation.  See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
-python3 ./UNet_V12.py  4.92s user 5.82s system 16% cpu 1:06.40 total
diff --git a/UNet/Sim_logs/UNet_64_V13_25614318.log b/UNet/Sim_logs/UNet_64_V13_25614318.log
deleted file mode 100644
index e8371e6132b07b777ef4e56fa76ca95625257f57..0000000000000000000000000000000000000000
--- a/UNet/Sim_logs/UNet_64_V13_25614318.log
+++ /dev/null
@@ -1,38 +0,0 @@
-(OK) Loading cuda 10.2.89
-(OK) Loading python 3.7.11
-(!!) The SciPy Stack is available: http://www.scipy.org/stackspec.html
- Built with GCC compilers.
-Collecting torch==1.10.1
-  Using cached torch-1.10.1-cp37-cp37m-manylinux1_x86_64.whl (881.9 MB)
-Collecting typing-extensions
-  Using cached typing_extensions-4.1.1-py3-none-any.whl (26 kB)
-Installing collected packages: typing-extensions, torch
-  WARNING: The scripts convert-caffe2-to-onnx, convert-onnx-to-caffe2 and torchrun are installed in '/home/yk138599/.local/bin' which is not on PATH.
-  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
-Successfully installed torch-1.10.1 typing-extensions-4.1.1
-WARNING: You are using pip version 21.2.4; however, version 22.0.3 is available.
-You should consider upgrading via the '/usr/local_rwth/sw/python/3.7.11/x86_64/bin/python3.7 -m pip install --upgrade pip' command.
-number auf epochs: 500
-batchsize: 16
-learning rate: 3e-05
-kernel size is: 9
- seed is: 2628832979
-Epoch [0], train_loss: 0.271159, val_loss: 0.268970, val_acc: 0.014685
-Epoch [1], train_loss: 0.269396, val_loss: 0.270057, val_acc: 0.073417
-Epoch [2], train_loss: 0.268085, val_loss: 0.279366, val_acc: 0.120659
-Epoch [3], train_loss: 0.266099, val_loss: 0.254583, val_acc: 0.435022
-Epoch [4], train_loss: 0.263552, val_loss: 0.256586, val_acc: 0.376657
-Epoch [5], train_loss: 0.261619, val_loss: 0.242178, val_acc: 0.313965
-Epoch [6], train_loss: 0.260539, val_loss: 0.247519, val_acc: 0.305485
-Epoch [7], train_loss: 0.259419, val_loss: 0.248480, val_acc: 0.254837
-Epoch [8], train_loss: 0.258631, val_loss: 0.247978, val_acc: 0.210317
-Epoch [9], train_loss: 0.257922, val_loss: 0.255808, val_acc: 0.172337
-Epoch [10], train_loss: 0.257285, val_loss: 0.252549, val_acc: 0.182081
-Epoch [11], train_loss: 0.256655, val_loss: 0.258195, val_acc: 0.166881
-Epoch [12], train_loss: 0.256037, val_loss: 0.265417, val_acc: 0.211055
-Epoch [13], train_loss: 0.255511, val_loss: 0.254048, val_acc: 0.176106
-Epoch [14], train_loss: 0.254910, val_loss: 0.249992, val_acc: 0.237055
-Epoch [15], train_loss: 0.254372, val_loss: 0.251587, val_acc: 0.127559
-Epoch [16], train_loss: 0.253764, val_loss: 0.260919, val_acc: 0.167581
-Epoch [17], train_loss: 0.253268, val_loss: 0.259768, val_acc: 0.206201
-python3 ./UNet_V13.py  1570.35s user 1560.30s system 96% cpu 53:54.66 total
diff --git a/UNet/Sim_logs/UNet_64_V14_25617675.log b/UNet/Sim_logs/UNet_64_V14_25617675.log
deleted file mode 100644
index ac6eb10c86cae42afacf8afec9d347cc803cf6d8..0000000000000000000000000000000000000000
--- a/UNet/Sim_logs/UNet_64_V14_25617675.log
+++ /dev/null
@@ -1,47 +0,0 @@
-(OK) Loading cuda 10.2.89
-(OK) Loading python 3.7.11
-(!!) The SciPy Stack is available: http://www.scipy.org/stackspec.html
- Built with GCC compilers.
-Collecting torch==1.10.1
-  Using cached torch-1.10.1-cp37-cp37m-manylinux1_x86_64.whl (881.9 MB)
-Collecting typing-extensions
-  Using cached typing_extensions-4.1.1-py3-none-any.whl (26 kB)
-Installing collected packages: typing-extensions, torch
-  WARNING: The scripts convert-caffe2-to-onnx, convert-onnx-to-caffe2 and torchrun are installed in '/home/yk138599/.local/bin' which is not on PATH.
-  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
-Successfully installed torch-1.10.1 typing-extensions-4.1.1
-WARNING: You are using pip version 21.2.4; however, version 22.0.3 is available.
-You should consider upgrading via the '/usr/local_rwth/sw/python/3.7.11/x86_64/bin/python3.7 -m pip install --upgrade pip' command.
-number auf epochs: 500
-batchsize: 32
-learning rate: 3e-05
-kernel size is: 9
- seed is: 1197567716
-Traceback (most recent call last):
-  File "./UNet_V14.py", line 249, in <module>
-Traceback (most recent call last):
-  File "/rwthfs/rz/SW/UTIL.common/Python/3.7.11/x86_64/lib/python3.7/multiprocessing/queues.py", line 242, in _feed
-    send_bytes(obj)
-  File "/rwthfs/rz/SW/UTIL.common/Python/3.7.11/x86_64/lib/python3.7/multiprocessing/connection.py", line 200, in send_bytes
-    self._send_bytes(m[offset:offset + size])
-  File "/rwthfs/rz/SW/UTIL.common/Python/3.7.11/x86_64/lib/python3.7/multiprocessing/connection.py", line 404, in _send_bytes
-    self._send(header + buf)
-  File "/rwthfs/rz/SW/UTIL.common/Python/3.7.11/x86_64/lib/python3.7/multiprocessing/connection.py", line 368, in _send
-    n = write(self._handle, buf)
-BrokenPipeError: [Errno 32] Broken pipe
-    history = fit(num_epochs, lr, model, train_dl, valid_dl,f'{path_to_rep}/UNet/output', opt_func)
-  File "./UNet_V14.py", line 163, in fit
-    for batch in train_loader:
-  File "./UNet_V14.py", line 201, in __iter__
-    yield to_device(b, self.device)
-  File "./UNet_V14.py", line 189, in to_device
-    return [to_device(x, device) for x in data]
-  File "./UNet_V14.py", line 189, in <listcomp>
-    return [to_device(x, device) for x in data]
-  File "./UNet_V14.py", line 190, in to_device
-    return data.to(device, non_blocking=True)
-  File "/home/yk138599/.local/lib/python3.7/site-packages/torch/utils/data/_utils/signal_handling.py", line 66, in handler
-    _error_if_any_worker_fails()
-RuntimeError: DataLoader worker (pid 53817) is killed by signal: Killed. 
-python3 ./UNet_V14.py  6.29s user 14.51s system 17% cpu 2:00.50 total
-slurmstepd: error: Detected 1 oom-kill event(s) in step 25617675.batch cgroup. Some of your processes may have been killed by the cgroup out-of-memory handler.
diff --git a/UNet/Sim_logs/UNet_64_V14_25621929.log b/UNet/Sim_logs/UNet_64_V14_25621929.log
deleted file mode 100644
index b0e5b7de331116b7ff4290d8c850441576782811..0000000000000000000000000000000000000000
--- a/UNet/Sim_logs/UNet_64_V14_25621929.log
+++ /dev/null
@@ -1,35 +0,0 @@
-(OK) Loading cuda 10.2.89
-(OK) Loading python 3.7.11
-(!!) The SciPy Stack is available: http://www.scipy.org/stackspec.html
- Built with GCC compilers.
-Collecting torch==1.10.1
-  Using cached torch-1.10.1-cp37-cp37m-manylinux1_x86_64.whl (881.9 MB)
-Collecting typing-extensions
-  Using cached typing_extensions-4.1.1-py3-none-any.whl (26 kB)
-Installing collected packages: typing-extensions, torch
-  WARNING: The scripts convert-caffe2-to-onnx, convert-onnx-to-caffe2 and torchrun are installed in '/home/yk138599/.local/bin' which is not on PATH.
-  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
-Successfully installed torch-1.10.1 typing-extensions-4.1.1
-WARNING: You are using pip version 21.2.4; however, version 22.0.3 is available.
-You should consider upgrading via the '/usr/local_rwth/sw/python/3.7.11/x86_64/bin/python3.7 -m pip install --upgrade pip' command.
-number auf epochs: 500
-batchsize: 32
-learning rate: 3e-05
-kernel size is: 9
- seed is: 1383180841
-Traceback (most recent call last):
-  File "./UNet_V14.py", line 249, in <module>
-    history = fit(num_epochs, lr, model, train_dl, valid_dl,f'{path_to_rep}/UNet/output', opt_func)
-  File "./UNet_V14.py", line 170, in fit
-    result = evaluate(model, val_loader)
-  File "/home/yk138599/.local/lib/python3.7/site-packages/torch/autograd/grad_mode.py", line 28, in decorate_context
-    return func(*args, **kwargs)
-  File "./UNet_V14.py", line 153, in evaluate
-    outputs = [model.validation_step(batch) for batch in val_loader]
-  File "./UNet_V14.py", line 153, in <listcomp>
-    outputs = [model.validation_step(batch) for batch in val_loader]
-  File "./UNet_V14.py", line 115, in validation_step
-    acc = accuracy(out.detach(), labels.detach())         # Calculate accuracy
-TypeError: accuracy() missing 1 required positional argument: 'normalization'
-terminate called without an active exception
-python3 ./UNet_V14.py  42.18s user 50.52s system 45% cpu 3:24.39 total
diff --git a/UNet/Sim_logs/UNet_64_V16_25621936.log b/UNet/Sim_logs/UNet_64_V16_25621936.log
index 36a8f64b40745f648c944d41e272fd4fa20a96e0..fc5b97b098c8be78f1d7414d5fd25654ffc1b5f0 100644
--- a/UNet/Sim_logs/UNet_64_V16_25621936.log
+++ b/UNet/Sim_logs/UNet_64_V16_25621936.log
@@ -2193,3 +2193,1338 @@ Epoch [2172], train_loss: 0.060548, val_loss: 0.057796, val_acc: 25.732431
 Epoch [2173], train_loss: 0.060332, val_loss: 0.057812, val_acc: 25.721066
 Epoch [2174], train_loss: 0.060685, val_loss: 0.057682, val_acc: 25.839436
 Epoch [2175], train_loss: 0.060615, val_loss: 0.057744, val_acc: 25.759651
+Epoch [2176], train_loss: 0.060611, val_loss: 0.057793, val_acc: 25.731363
+Epoch [2177], train_loss: 0.060592, val_loss: 0.057739, val_acc: 25.850721
+Epoch [2178], train_loss: 0.060436, val_loss: 0.057720, val_acc: 25.831568
+Epoch [2179], train_loss: 0.060434, val_loss: 0.057680, val_acc: 25.817488
+Epoch [2180], train_loss: 0.060296, val_loss: 0.057705, val_acc: 25.866602
+Epoch [2181], train_loss: 0.060522, val_loss: 0.057759, val_acc: 25.746832
+Epoch [2182], train_loss: 0.060539, val_loss: 0.057798, val_acc: 25.746128
+Epoch [2183], train_loss: 0.060488, val_loss: 0.057764, val_acc: 25.724018
+Epoch [2184], train_loss: 0.060404, val_loss: 0.057831, val_acc: 25.723972
+Epoch [2185], train_loss: 0.060425, val_loss: 0.057743, val_acc: 25.771969
+Epoch [2186], train_loss: 0.060743, val_loss: 0.057749, val_acc: 25.758217
+Epoch [2187], train_loss: 0.060448, val_loss: 0.057732, val_acc: 25.819386
+Epoch [2188], train_loss: 0.060696, val_loss: 0.057737, val_acc: 25.808725
+Epoch [2189], train_loss: 0.060466, val_loss: 0.057750, val_acc: 25.774410
+Epoch [2190], train_loss: 0.060455, val_loss: 0.057782, val_acc: 25.709166
+Epoch [2191], train_loss: 0.060451, val_loss: 0.057749, val_acc: 25.787664
+Epoch [2192], train_loss: 0.060394, val_loss: 0.057737, val_acc: 25.764484
+Epoch [2193], train_loss: 0.060442, val_loss: 0.057791, val_acc: 25.790199
+Epoch [2194], train_loss: 0.060630, val_loss: 0.057744, val_acc: 25.815210
+Epoch [2195], train_loss: 0.060379, val_loss: 0.057792, val_acc: 25.752935
+Epoch [2196], train_loss: 0.060604, val_loss: 0.057822, val_acc: 25.696817
+Epoch [2197], train_loss: 0.060290, val_loss: 0.057852, val_acc: 25.667650
+Epoch [2198], train_loss: 0.060410, val_loss: 0.057716, val_acc: 25.817989
+Epoch [2199], train_loss: 0.060327, val_loss: 0.057700, val_acc: 25.826756
+Epoch [2200], train_loss: 0.060500, val_loss: 0.057822, val_acc: 25.760221
+Epoch [2201], train_loss: 0.060468, val_loss: 0.057766, val_acc: 25.813177
+Epoch [2202], train_loss: 0.060478, val_loss: 0.057719, val_acc: 25.854160
+Epoch [2203], train_loss: 0.060318, val_loss: 0.057775, val_acc: 25.785646
+Epoch [2204], train_loss: 0.060448, val_loss: 0.057757, val_acc: 25.773241
+Epoch [2205], train_loss: 0.060418, val_loss: 0.057762, val_acc: 25.694445
+Epoch [2206], train_loss: 0.060368, val_loss: 0.057751, val_acc: 25.759914
+Epoch [2207], train_loss: 0.060456, val_loss: 0.057768, val_acc: 25.781172
+Epoch [2208], train_loss: 0.060502, val_loss: 0.057709, val_acc: 25.850376
+Epoch [2209], train_loss: 0.060595, val_loss: 0.057733, val_acc: 25.835716
+Epoch [2210], train_loss: 0.060523, val_loss: 0.057758, val_acc: 25.758085
+Epoch [2211], train_loss: 0.060444, val_loss: 0.057814, val_acc: 25.641556
+Epoch [2212], train_loss: 0.060374, val_loss: 0.057790, val_acc: 25.772602
+Epoch [2213], train_loss: 0.060601, val_loss: 0.057685, val_acc: 25.879204
+Epoch [2214], train_loss: 0.060434, val_loss: 0.057753, val_acc: 25.767296
+Epoch [2215], train_loss: 0.060546, val_loss: 0.057772, val_acc: 25.789049
+Epoch [2216], train_loss: 0.060527, val_loss: 0.057704, val_acc: 25.842173
+Epoch [2217], train_loss: 0.060368, val_loss: 0.057767, val_acc: 25.738672
+Epoch [2218], train_loss: 0.060468, val_loss: 0.057780, val_acc: 25.772261
+Epoch [2219], train_loss: 0.060444, val_loss: 0.057705, val_acc: 25.771921
+Epoch [2220], train_loss: 0.060443, val_loss: 0.057725, val_acc: 25.841135
+Epoch [2221], train_loss: 0.060431, val_loss: 0.057721, val_acc: 25.801538
+Epoch [2222], train_loss: 0.060419, val_loss: 0.057776, val_acc: 25.765121
+Epoch [2223], train_loss: 0.060621, val_loss: 0.057714, val_acc: 25.757565
+Epoch [2224], train_loss: 0.060412, val_loss: 0.057749, val_acc: 25.782127
+Epoch [2225], train_loss: 0.060403, val_loss: 0.057791, val_acc: 25.768785
+Epoch [2226], train_loss: 0.060341, val_loss: 0.057715, val_acc: 25.848467
+Epoch [2227], train_loss: 0.060451, val_loss: 0.057687, val_acc: 25.832636
+Epoch [2228], train_loss: 0.060666, val_loss: 0.057739, val_acc: 25.791044
+Epoch [2229], train_loss: 0.060517, val_loss: 0.057711, val_acc: 25.802126
+Epoch [2230], train_loss: 0.060547, val_loss: 0.057808, val_acc: 25.731644
+Epoch [2231], train_loss: 0.060617, val_loss: 0.057789, val_acc: 25.734324
+Epoch [2232], train_loss: 0.060693, val_loss: 0.057678, val_acc: 25.858536
+Epoch [2233], train_loss: 0.060356, val_loss: 0.057689, val_acc: 25.832647
+Epoch [2234], train_loss: 0.060578, val_loss: 0.057760, val_acc: 25.806698
+Epoch [2235], train_loss: 0.060512, val_loss: 0.057670, val_acc: 25.860470
+Epoch [2236], train_loss: 0.060306, val_loss: 0.057745, val_acc: 25.751888
+Epoch [2237], train_loss: 0.060551, val_loss: 0.057817, val_acc: 25.722898
+Epoch [2238], train_loss: 0.060523, val_loss: 0.057746, val_acc: 25.741154
+Epoch [2239], train_loss: 0.060546, val_loss: 0.057717, val_acc: 25.759691
+Epoch [2240], train_loss: 0.060551, val_loss: 0.057712, val_acc: 25.808327
+Epoch [2241], train_loss: 0.060611, val_loss: 0.057679, val_acc: 25.874208
+Epoch [2242], train_loss: 0.060566, val_loss: 0.057657, val_acc: 25.869831
+Epoch [2243], train_loss: 0.060531, val_loss: 0.057669, val_acc: 25.866463
+Epoch [2244], train_loss: 0.060555, val_loss: 0.057686, val_acc: 25.828321
+Epoch [2245], train_loss: 0.060478, val_loss: 0.057694, val_acc: 25.839281
+Epoch [2246], train_loss: 0.060566, val_loss: 0.057718, val_acc: 25.782473
+Epoch [2247], train_loss: 0.060439, val_loss: 0.057665, val_acc: 25.889170
+Epoch [2248], train_loss: 0.060596, val_loss: 0.057698, val_acc: 25.800360
+Epoch [2249], train_loss: 0.060528, val_loss: 0.057758, val_acc: 25.783882
+Epoch [2250], train_loss: 0.060595, val_loss: 0.057659, val_acc: 25.864922
+Epoch [2251], train_loss: 0.060389, val_loss: 0.057770, val_acc: 25.751453
+Epoch [2252], train_loss: 0.060439, val_loss: 0.057765, val_acc: 25.727221
+Epoch [2253], train_loss: 0.060434, val_loss: 0.057771, val_acc: 25.762266
+Epoch [2254], train_loss: 0.060392, val_loss: 0.057654, val_acc: 25.833237
+Epoch [2255], train_loss: 0.060442, val_loss: 0.057696, val_acc: 25.881895
+Epoch [2256], train_loss: 0.060201, val_loss: 0.057652, val_acc: 25.832014
+Epoch [2257], train_loss: 0.060565, val_loss: 0.057738, val_acc: 25.799576
+Epoch [2258], train_loss: 0.060503, val_loss: 0.057713, val_acc: 25.801365
+Epoch [2259], train_loss: 0.060388, val_loss: 0.057695, val_acc: 25.776922
+Epoch [2260], train_loss: 0.060468, val_loss: 0.057782, val_acc: 25.788883
+Epoch [2261], train_loss: 0.060459, val_loss: 0.057651, val_acc: 25.884781
+Epoch [2262], train_loss: 0.060406, val_loss: 0.057771, val_acc: 25.756977
+Epoch [2263], train_loss: 0.060503, val_loss: 0.057671, val_acc: 25.813347
+Epoch [2264], train_loss: 0.060507, val_loss: 0.057767, val_acc: 25.738756
+Epoch [2265], train_loss: 0.060517, val_loss: 0.057745, val_acc: 25.701756
+Epoch [2266], train_loss: 0.060506, val_loss: 0.057778, val_acc: 25.756189
+Epoch [2267], train_loss: 0.060666, val_loss: 0.057722, val_acc: 25.793472
+Epoch [2268], train_loss: 0.060327, val_loss: 0.057670, val_acc: 25.863176
+Epoch [2269], train_loss: 0.060372, val_loss: 0.057745, val_acc: 25.766289
+Epoch [2270], train_loss: 0.060563, val_loss: 0.057854, val_acc: 25.700743
+Epoch [2271], train_loss: 0.060402, val_loss: 0.057728, val_acc: 25.751146
+Epoch [2272], train_loss: 0.060368, val_loss: 0.057777, val_acc: 25.723698
+Epoch [2273], train_loss: 0.060484, val_loss: 0.057750, val_acc: 25.765871
+Epoch [2274], train_loss: 0.060436, val_loss: 0.057781, val_acc: 25.718958
+Epoch [2275], train_loss: 0.060189, val_loss: 0.057737, val_acc: 25.766733
+Epoch [2276], train_loss: 0.060332, val_loss: 0.057757, val_acc: 25.785681
+Epoch [2277], train_loss: 0.060608, val_loss: 0.057693, val_acc: 25.837849
+Epoch [2278], train_loss: 0.060549, val_loss: 0.057759, val_acc: 25.771421
+Epoch [2279], train_loss: 0.060385, val_loss: 0.057787, val_acc: 25.850931
+Epoch [2280], train_loss: 0.060316, val_loss: 0.057785, val_acc: 25.769699
+Epoch [2281], train_loss: 0.060431, val_loss: 0.057772, val_acc: 25.780359
+Epoch [2282], train_loss: 0.060413, val_loss: 0.057791, val_acc: 25.749384
+Epoch [2283], train_loss: 0.060295, val_loss: 0.057773, val_acc: 25.756769
+Epoch [2284], train_loss: 0.060511, val_loss: 0.057747, val_acc: 25.777809
+Epoch [2285], train_loss: 0.060231, val_loss: 0.057763, val_acc: 25.785608
+Epoch [2286], train_loss: 0.060634, val_loss: 0.057798, val_acc: 25.710896
+Epoch [2287], train_loss: 0.060508, val_loss: 0.057709, val_acc: 25.810839
+Epoch [2288], train_loss: 0.060485, val_loss: 0.057698, val_acc: 25.848362
+Epoch [2289], train_loss: 0.060521, val_loss: 0.057768, val_acc: 25.761320
+Epoch [2290], train_loss: 0.060425, val_loss: 0.057752, val_acc: 25.757967
+Epoch [2291], train_loss: 0.060443, val_loss: 0.057770, val_acc: 25.776522
+Epoch [2292], train_loss: 0.060336, val_loss: 0.057709, val_acc: 25.817623
+Epoch [2293], train_loss: 0.060297, val_loss: 0.057677, val_acc: 25.812698
+Epoch [2294], train_loss: 0.060347, val_loss: 0.057734, val_acc: 25.772928
+Epoch [2295], train_loss: 0.060384, val_loss: 0.057649, val_acc: 25.857811
+Epoch [2296], train_loss: 0.060538, val_loss: 0.057686, val_acc: 25.831953
+Epoch [2297], train_loss: 0.060525, val_loss: 0.057735, val_acc: 25.762400
+Epoch [2298], train_loss: 0.060272, val_loss: 0.057733, val_acc: 25.775373
+Epoch [2299], train_loss: 0.060513, val_loss: 0.057768, val_acc: 25.762644
+Epoch [2300], train_loss: 0.060309, val_loss: 0.057721, val_acc: 25.819778
+Epoch [2301], train_loss: 0.060518, val_loss: 0.057730, val_acc: 25.802290
+Epoch [2302], train_loss: 0.060626, val_loss: 0.057716, val_acc: 25.807055
+Epoch [2303], train_loss: 0.060316, val_loss: 0.057742, val_acc: 25.688393
+Epoch [2304], train_loss: 0.060404, val_loss: 0.057769, val_acc: 25.792820
+Epoch [2305], train_loss: 0.060342, val_loss: 0.057672, val_acc: 25.816435
+Epoch [2306], train_loss: 0.060532, val_loss: 0.057787, val_acc: 25.795994
+Epoch [2307], train_loss: 0.060414, val_loss: 0.057708, val_acc: 25.785358
+Epoch [2308], train_loss: 0.060439, val_loss: 0.057693, val_acc: 25.835417
+Epoch [2309], train_loss: 0.060305, val_loss: 0.057853, val_acc: 25.674574
+Epoch [2310], train_loss: 0.060325, val_loss: 0.057695, val_acc: 25.869837
+Epoch [2311], train_loss: 0.060477, val_loss: 0.057676, val_acc: 25.863216
+Epoch [2312], train_loss: 0.060403, val_loss: 0.057760, val_acc: 25.746294
+Epoch [2313], train_loss: 0.060510, val_loss: 0.057723, val_acc: 25.835260
+Epoch [2314], train_loss: 0.060269, val_loss: 0.057772, val_acc: 25.767481
+Epoch [2315], train_loss: 0.060343, val_loss: 0.057623, val_acc: 25.907377
+Epoch [2316], train_loss: 0.060531, val_loss: 0.057677, val_acc: 25.807919
+Epoch [2317], train_loss: 0.060555, val_loss: 0.057764, val_acc: 25.782345
+Epoch [2318], train_loss: 0.060432, val_loss: 0.057848, val_acc: 25.697060
+Epoch [2319], train_loss: 0.060387, val_loss: 0.057740, val_acc: 25.807673
+Epoch [2320], train_loss: 0.060556, val_loss: 0.057696, val_acc: 25.828379
+Epoch [2321], train_loss: 0.060606, val_loss: 0.057693, val_acc: 25.844820
+Epoch [2322], train_loss: 0.060465, val_loss: 0.057766, val_acc: 25.702715
+Epoch [2323], train_loss: 0.060402, val_loss: 0.057663, val_acc: 25.879646
+Epoch [2324], train_loss: 0.060456, val_loss: 0.057690, val_acc: 25.842573
+Epoch [2325], train_loss: 0.060471, val_loss: 0.057712, val_acc: 25.825689
+Epoch [2326], train_loss: 0.060316, val_loss: 0.057694, val_acc: 25.815388
+Epoch [2327], train_loss: 0.060569, val_loss: 0.057772, val_acc: 25.747602
+Epoch [2328], train_loss: 0.060555, val_loss: 0.057742, val_acc: 25.773405
+Epoch [2329], train_loss: 0.060388, val_loss: 0.057788, val_acc: 25.722101
+Epoch [2330], train_loss: 0.060507, val_loss: 0.057672, val_acc: 25.834211
+Epoch [2331], train_loss: 0.060297, val_loss: 0.057838, val_acc: 25.652922
+Epoch [2332], train_loss: 0.060502, val_loss: 0.057757, val_acc: 25.807400
+Epoch [2333], train_loss: 0.060542, val_loss: 0.057754, val_acc: 25.806952
+Epoch [2334], train_loss: 0.060351, val_loss: 0.057705, val_acc: 25.860958
+Epoch [2335], train_loss: 0.060395, val_loss: 0.057749, val_acc: 25.796251
+Epoch [2336], train_loss: 0.060554, val_loss: 0.057760, val_acc: 25.770847
+Epoch [2337], train_loss: 0.060493, val_loss: 0.057700, val_acc: 25.792994
+Epoch [2338], train_loss: 0.060306, val_loss: 0.057681, val_acc: 25.803392
+Epoch [2339], train_loss: 0.060502, val_loss: 0.057694, val_acc: 25.877287
+Epoch [2340], train_loss: 0.060413, val_loss: 0.057764, val_acc: 25.744783
+Epoch [2341], train_loss: 0.060661, val_loss: 0.057757, val_acc: 25.797989
+Epoch [2342], train_loss: 0.060458, val_loss: 0.057683, val_acc: 25.855156
+Epoch [2343], train_loss: 0.060511, val_loss: 0.057760, val_acc: 25.805962
+Epoch [2344], train_loss: 0.060242, val_loss: 0.057771, val_acc: 25.763943
+Epoch [2345], train_loss: 0.060548, val_loss: 0.057736, val_acc: 25.753424
+Epoch [2346], train_loss: 0.060441, val_loss: 0.057726, val_acc: 25.811256
+Epoch [2347], train_loss: 0.060240, val_loss: 0.057766, val_acc: 25.764999
+Epoch [2348], train_loss: 0.060462, val_loss: 0.057760, val_acc: 25.733618
+Epoch [2349], train_loss: 0.060344, val_loss: 0.057698, val_acc: 25.754078
+Epoch [2350], train_loss: 0.060545, val_loss: 0.057722, val_acc: 25.811905
+Epoch [2351], train_loss: 0.060389, val_loss: 0.057642, val_acc: 25.866827
+Epoch [2352], train_loss: 0.060383, val_loss: 0.057756, val_acc: 25.725126
+Epoch [2353], train_loss: 0.060302, val_loss: 0.057753, val_acc: 25.728703
+Epoch [2354], train_loss: 0.060375, val_loss: 0.057741, val_acc: 25.729942
+Epoch [2355], train_loss: 0.060419, val_loss: 0.057744, val_acc: 25.768532
+Epoch [2356], train_loss: 0.060482, val_loss: 0.057663, val_acc: 25.828943
+Epoch [2357], train_loss: 0.060303, val_loss: 0.057718, val_acc: 25.838272
+Epoch [2358], train_loss: 0.060499, val_loss: 0.057712, val_acc: 25.830561
+Epoch [2359], train_loss: 0.060248, val_loss: 0.057738, val_acc: 25.710234
+Epoch [2360], train_loss: 0.060549, val_loss: 0.057687, val_acc: 25.840845
+Epoch [2361], train_loss: 0.060567, val_loss: 0.057788, val_acc: 25.707705
+Epoch [2362], train_loss: 0.060577, val_loss: 0.057776, val_acc: 25.808596
+Epoch [2363], train_loss: 0.060494, val_loss: 0.057705, val_acc: 25.798435
+Epoch [2364], train_loss: 0.060539, val_loss: 0.057748, val_acc: 25.780937
+Epoch [2365], train_loss: 0.060258, val_loss: 0.057687, val_acc: 25.820450
+Epoch [2366], train_loss: 0.060472, val_loss: 0.057771, val_acc: 25.744291
+Epoch [2367], train_loss: 0.060592, val_loss: 0.057731, val_acc: 25.751637
+Epoch [2368], train_loss: 0.060199, val_loss: 0.057767, val_acc: 25.765478
+Epoch [2369], train_loss: 0.060519, val_loss: 0.057707, val_acc: 25.831560
+Epoch [2370], train_loss: 0.060590, val_loss: 0.057704, val_acc: 25.825935
+Epoch [2371], train_loss: 0.060312, val_loss: 0.057634, val_acc: 25.857615
+Epoch [2372], train_loss: 0.060432, val_loss: 0.057670, val_acc: 25.854832
+Epoch [2373], train_loss: 0.060474, val_loss: 0.057649, val_acc: 25.851704
+Epoch [2374], train_loss: 0.060528, val_loss: 0.057594, val_acc: 25.887753
+Epoch [2375], train_loss: 0.060378, val_loss: 0.057608, val_acc: 25.824764
+Epoch [2376], train_loss: 0.060342, val_loss: 0.057728, val_acc: 25.717169
+Epoch [2377], train_loss: 0.060572, val_loss: 0.057694, val_acc: 25.821024
+Epoch [2378], train_loss: 0.060341, val_loss: 0.057783, val_acc: 25.704891
+Epoch [2379], train_loss: 0.060476, val_loss: 0.057714, val_acc: 25.779932
+Epoch [2380], train_loss: 0.060556, val_loss: 0.057672, val_acc: 25.791439
+Epoch [2381], train_loss: 0.060409, val_loss: 0.057700, val_acc: 25.768406
+Epoch [2382], train_loss: 0.060316, val_loss: 0.057704, val_acc: 25.820158
+Epoch [2383], train_loss: 0.060443, val_loss: 0.057673, val_acc: 25.790964
+Epoch [2384], train_loss: 0.060510, val_loss: 0.057688, val_acc: 25.850451
+Epoch [2385], train_loss: 0.060426, val_loss: 0.057720, val_acc: 25.709927
+Epoch [2386], train_loss: 0.060289, val_loss: 0.057696, val_acc: 25.807819
+Epoch [2387], train_loss: 0.060245, val_loss: 0.057709, val_acc: 25.749619
+Epoch [2388], train_loss: 0.060357, val_loss: 0.057666, val_acc: 25.764381
+Epoch [2389], train_loss: 0.060311, val_loss: 0.057701, val_acc: 25.771265
+Epoch [2390], train_loss: 0.060324, val_loss: 0.057741, val_acc: 25.774147
+Epoch [2391], train_loss: 0.060509, val_loss: 0.057661, val_acc: 25.859011
+Epoch [2392], train_loss: 0.060457, val_loss: 0.057755, val_acc: 25.703068
+Epoch [2393], train_loss: 0.060223, val_loss: 0.057728, val_acc: 25.768833
+Epoch [2394], train_loss: 0.060483, val_loss: 0.057686, val_acc: 25.772802
+Epoch [2395], train_loss: 0.060572, val_loss: 0.057680, val_acc: 25.757343
+Epoch [2396], train_loss: 0.060536, val_loss: 0.057782, val_acc: 25.773054
+Epoch [2397], train_loss: 0.060271, val_loss: 0.057741, val_acc: 25.797962
+Epoch [2398], train_loss: 0.060331, val_loss: 0.057688, val_acc: 25.771355
+Epoch [2399], train_loss: 0.060374, val_loss: 0.057708, val_acc: 25.780838
+Epoch [2400], train_loss: 0.060412, val_loss: 0.057629, val_acc: 25.853294
+Epoch [2401], train_loss: 0.060472, val_loss: 0.057737, val_acc: 25.771622
+Epoch [2402], train_loss: 0.060622, val_loss: 0.057690, val_acc: 25.786005
+Epoch [2403], train_loss: 0.060510, val_loss: 0.057647, val_acc: 25.800749
+Epoch [2404], train_loss: 0.060200, val_loss: 0.057627, val_acc: 25.850847
+Epoch [2405], train_loss: 0.060437, val_loss: 0.057699, val_acc: 25.765152
+Epoch [2406], train_loss: 0.060367, val_loss: 0.057724, val_acc: 25.773758
+Epoch [2407], train_loss: 0.060472, val_loss: 0.057643, val_acc: 25.833738
+Epoch [2408], train_loss: 0.060154, val_loss: 0.057697, val_acc: 25.786976
+Epoch [2409], train_loss: 0.060471, val_loss: 0.057713, val_acc: 25.817942
+Epoch [2410], train_loss: 0.060346, val_loss: 0.057661, val_acc: 25.833502
+Epoch [2411], train_loss: 0.060397, val_loss: 0.057723, val_acc: 25.720018
+Epoch [2412], train_loss: 0.060474, val_loss: 0.057730, val_acc: 25.788185
+Epoch [2413], train_loss: 0.060342, val_loss: 0.057707, val_acc: 25.802059
+Epoch [2414], train_loss: 0.060389, val_loss: 0.057659, val_acc: 25.863611
+Epoch [2415], train_loss: 0.060578, val_loss: 0.057657, val_acc: 25.807173
+Epoch [2416], train_loss: 0.060226, val_loss: 0.057623, val_acc: 25.816687
+Epoch [2417], train_loss: 0.060539, val_loss: 0.057795, val_acc: 25.701048
+Epoch [2418], train_loss: 0.060399, val_loss: 0.057619, val_acc: 25.853409
+Epoch [2419], train_loss: 0.060432, val_loss: 0.057784, val_acc: 25.695900
+Epoch [2420], train_loss: 0.060329, val_loss: 0.057630, val_acc: 25.837141
+Epoch [2421], train_loss: 0.060356, val_loss: 0.057676, val_acc: 25.829201
+Epoch [2422], train_loss: 0.060189, val_loss: 0.057743, val_acc: 25.754601
+Epoch [2423], train_loss: 0.060387, val_loss: 0.057687, val_acc: 25.782608
+Epoch [2424], train_loss: 0.060196, val_loss: 0.057650, val_acc: 25.818378
+Epoch [2425], train_loss: 0.060298, val_loss: 0.057626, val_acc: 25.863249
+Epoch [2426], train_loss: 0.060382, val_loss: 0.057655, val_acc: 25.865240
+Epoch [2427], train_loss: 0.060366, val_loss: 0.057646, val_acc: 25.834703
+Epoch [2428], train_loss: 0.060250, val_loss: 0.057678, val_acc: 25.796074
+Epoch [2429], train_loss: 0.060130, val_loss: 0.057714, val_acc: 25.769707
+Epoch [2430], train_loss: 0.060428, val_loss: 0.057705, val_acc: 25.800591
+Epoch [2431], train_loss: 0.060545, val_loss: 0.057726, val_acc: 25.808168
+Epoch [2432], train_loss: 0.060497, val_loss: 0.057649, val_acc: 25.812544
+Epoch [2433], train_loss: 0.060250, val_loss: 0.057651, val_acc: 25.796120
+Epoch [2434], train_loss: 0.060448, val_loss: 0.057726, val_acc: 25.746616
+Epoch [2435], train_loss: 0.060437, val_loss: 0.057734, val_acc: 25.734470
+Epoch [2436], train_loss: 0.060344, val_loss: 0.057704, val_acc: 25.784967
+Epoch [2437], train_loss: 0.060507, val_loss: 0.057768, val_acc: 25.741991
+Epoch [2438], train_loss: 0.060398, val_loss: 0.057712, val_acc: 25.745380
+Epoch [2439], train_loss: 0.060493, val_loss: 0.057624, val_acc: 25.854685
+Epoch [2440], train_loss: 0.060437, val_loss: 0.057596, val_acc: 25.859146
+Epoch [2441], train_loss: 0.060416, val_loss: 0.057655, val_acc: 25.830259
+Epoch [2442], train_loss: 0.060241, val_loss: 0.057750, val_acc: 25.788610
+Epoch [2443], train_loss: 0.060401, val_loss: 0.057632, val_acc: 25.867136
+Epoch [2444], train_loss: 0.060631, val_loss: 0.057806, val_acc: 25.721800
+Epoch [2445], train_loss: 0.060395, val_loss: 0.057622, val_acc: 25.904385
+Epoch [2446], train_loss: 0.060388, val_loss: 0.057783, val_acc: 25.768480
+Epoch [2447], train_loss: 0.060420, val_loss: 0.057741, val_acc: 25.795557
+Epoch [2448], train_loss: 0.060454, val_loss: 0.057690, val_acc: 25.789230
+Epoch [2449], train_loss: 0.060390, val_loss: 0.057688, val_acc: 25.817247
+Epoch [2450], train_loss: 0.060595, val_loss: 0.057711, val_acc: 25.764851
+Epoch [2451], train_loss: 0.060275, val_loss: 0.057670, val_acc: 25.809145
+Epoch [2452], train_loss: 0.060293, val_loss: 0.057695, val_acc: 25.816948
+Epoch [2453], train_loss: 0.060344, val_loss: 0.057642, val_acc: 25.815132
+Epoch [2454], train_loss: 0.060387, val_loss: 0.057714, val_acc: 25.782608
+Epoch [2455], train_loss: 0.060325, val_loss: 0.057771, val_acc: 25.698103
+Epoch [2456], train_loss: 0.060407, val_loss: 0.057715, val_acc: 25.797068
+Epoch [2457], train_loss: 0.060491, val_loss: 0.057651, val_acc: 25.796762
+Epoch [2458], train_loss: 0.060226, val_loss: 0.057643, val_acc: 25.842619
+Epoch [2459], train_loss: 0.060231, val_loss: 0.057664, val_acc: 25.842268
+Epoch [2460], train_loss: 0.060413, val_loss: 0.057715, val_acc: 25.769999
+Epoch [2461], train_loss: 0.060467, val_loss: 0.057693, val_acc: 25.836718
+Epoch [2462], train_loss: 0.060496, val_loss: 0.057663, val_acc: 25.828520
+Epoch [2463], train_loss: 0.060186, val_loss: 0.057710, val_acc: 25.802383
+Epoch [2464], train_loss: 0.060236, val_loss: 0.057690, val_acc: 25.823971
+Epoch [2465], train_loss: 0.060358, val_loss: 0.057633, val_acc: 25.875994
+Epoch [2466], train_loss: 0.060451, val_loss: 0.057650, val_acc: 25.890287
+Epoch [2467], train_loss: 0.060430, val_loss: 0.057675, val_acc: 25.798214
+Epoch [2468], train_loss: 0.060290, val_loss: 0.057732, val_acc: 25.748436
+Epoch [2469], train_loss: 0.060425, val_loss: 0.057673, val_acc: 25.834295
+Epoch [2470], train_loss: 0.060342, val_loss: 0.057675, val_acc: 25.824558
+Epoch [2471], train_loss: 0.060391, val_loss: 0.057653, val_acc: 25.834496
+Epoch [2472], train_loss: 0.060153, val_loss: 0.057748, val_acc: 25.730509
+Epoch [2473], train_loss: 0.060381, val_loss: 0.057701, val_acc: 25.801142
+Epoch [2474], train_loss: 0.060228, val_loss: 0.057674, val_acc: 25.785894
+Epoch [2475], train_loss: 0.060403, val_loss: 0.057661, val_acc: 25.783913
+Epoch [2476], train_loss: 0.060440, val_loss: 0.057665, val_acc: 25.784021
+Epoch [2477], train_loss: 0.060382, val_loss: 0.057636, val_acc: 25.846621
+Epoch [2478], train_loss: 0.060254, val_loss: 0.057681, val_acc: 25.796150
+Epoch [2479], train_loss: 0.060358, val_loss: 0.057743, val_acc: 25.792673
+Epoch [2480], train_loss: 0.060570, val_loss: 0.057643, val_acc: 25.844248
+Epoch [2481], train_loss: 0.060342, val_loss: 0.057673, val_acc: 25.827284
+Epoch [2482], train_loss: 0.060332, val_loss: 0.057730, val_acc: 25.766788
+Epoch [2483], train_loss: 0.060348, val_loss: 0.057707, val_acc: 25.793003
+Epoch [2484], train_loss: 0.060400, val_loss: 0.057675, val_acc: 25.846964
+Epoch [2485], train_loss: 0.060355, val_loss: 0.057764, val_acc: 25.700815
+Epoch [2486], train_loss: 0.060335, val_loss: 0.057698, val_acc: 25.778185
+Epoch [2487], train_loss: 0.060405, val_loss: 0.057592, val_acc: 25.869677
+Epoch [2488], train_loss: 0.060121, val_loss: 0.057670, val_acc: 25.808949
+Epoch [2489], train_loss: 0.060294, val_loss: 0.057745, val_acc: 25.721159
+Epoch [2490], train_loss: 0.060256, val_loss: 0.057601, val_acc: 25.847319
+Epoch [2491], train_loss: 0.060246, val_loss: 0.057702, val_acc: 25.727404
+Epoch [2492], train_loss: 0.060489, val_loss: 0.057679, val_acc: 25.802628
+Epoch [2493], train_loss: 0.060395, val_loss: 0.057674, val_acc: 25.789749
+Epoch [2494], train_loss: 0.060427, val_loss: 0.057692, val_acc: 25.783449
+Epoch [2495], train_loss: 0.060310, val_loss: 0.057638, val_acc: 25.834669
+Epoch [2496], train_loss: 0.060439, val_loss: 0.057678, val_acc: 25.821690
+Epoch [2497], train_loss: 0.060342, val_loss: 0.057722, val_acc: 25.734074
+Epoch [2498], train_loss: 0.060246, val_loss: 0.057705, val_acc: 25.754930
+Epoch [2499], train_loss: 0.060437, val_loss: 0.057653, val_acc: 25.766607
+Epoch [2500], train_loss: 0.060294, val_loss: 0.057740, val_acc: 25.772913
+Epoch [2501], train_loss: 0.060221, val_loss: 0.057711, val_acc: 25.781612
+Epoch [2502], train_loss: 0.060244, val_loss: 0.057725, val_acc: 25.816341
+Epoch [2503], train_loss: 0.060338, val_loss: 0.057647, val_acc: 25.845608
+Epoch [2504], train_loss: 0.060227, val_loss: 0.057714, val_acc: 25.809135
+Epoch [2505], train_loss: 0.060379, val_loss: 0.057736, val_acc: 25.742212
+Epoch [2506], train_loss: 0.060176, val_loss: 0.057684, val_acc: 25.771687
+Epoch [2507], train_loss: 0.060365, val_loss: 0.057732, val_acc: 25.719160
+Epoch [2508], train_loss: 0.060374, val_loss: 0.057652, val_acc: 25.824255
+Epoch [2509], train_loss: 0.060401, val_loss: 0.057686, val_acc: 25.777624
+Epoch [2510], train_loss: 0.060500, val_loss: 0.057655, val_acc: 25.817949
+Epoch [2511], train_loss: 0.060110, val_loss: 0.057596, val_acc: 25.852907
+Epoch [2512], train_loss: 0.060392, val_loss: 0.057624, val_acc: 25.846895
+Epoch [2513], train_loss: 0.060596, val_loss: 0.057716, val_acc: 25.725859
+Epoch [2514], train_loss: 0.060458, val_loss: 0.057617, val_acc: 25.849962
+Epoch [2515], train_loss: 0.060285, val_loss: 0.057671, val_acc: 25.829557
+Epoch [2516], train_loss: 0.060420, val_loss: 0.057614, val_acc: 25.831135
+Epoch [2517], train_loss: 0.060324, val_loss: 0.057652, val_acc: 25.813055
+Epoch [2518], train_loss: 0.060392, val_loss: 0.057759, val_acc: 25.667982
+Epoch [2519], train_loss: 0.060430, val_loss: 0.057708, val_acc: 25.713600
+Epoch [2520], train_loss: 0.060407, val_loss: 0.057732, val_acc: 25.772537
+Epoch [2521], train_loss: 0.060377, val_loss: 0.057716, val_acc: 25.770927
+Epoch [2522], train_loss: 0.060315, val_loss: 0.057672, val_acc: 25.781473
+Epoch [2523], train_loss: 0.060243, val_loss: 0.057654, val_acc: 25.837883
+Epoch [2524], train_loss: 0.060326, val_loss: 0.057669, val_acc: 25.799862
+Epoch [2525], train_loss: 0.060338, val_loss: 0.057622, val_acc: 25.826717
+Epoch [2526], train_loss: 0.060450, val_loss: 0.057678, val_acc: 25.794975
+Epoch [2527], train_loss: 0.060460, val_loss: 0.057775, val_acc: 25.702280
+Epoch [2528], train_loss: 0.060301, val_loss: 0.057588, val_acc: 25.888716
+Epoch [2529], train_loss: 0.060526, val_loss: 0.057711, val_acc: 25.756676
+Epoch [2530], train_loss: 0.060357, val_loss: 0.057643, val_acc: 25.869198
+Epoch [2531], train_loss: 0.060348, val_loss: 0.057711, val_acc: 25.784462
+Epoch [2532], train_loss: 0.060396, val_loss: 0.057694, val_acc: 25.797184
+Epoch [2533], train_loss: 0.060327, val_loss: 0.057675, val_acc: 25.787889
+Epoch [2534], train_loss: 0.060340, val_loss: 0.057696, val_acc: 25.725990
+Epoch [2535], train_loss: 0.060246, val_loss: 0.057619, val_acc: 25.842955
+Epoch [2536], train_loss: 0.060437, val_loss: 0.057676, val_acc: 25.809258
+Epoch [2537], train_loss: 0.060292, val_loss: 0.057646, val_acc: 25.794695
+Epoch [2538], train_loss: 0.060417, val_loss: 0.057636, val_acc: 25.820829
+Epoch [2539], train_loss: 0.060477, val_loss: 0.057605, val_acc: 25.815153
+Epoch [2540], train_loss: 0.060537, val_loss: 0.057720, val_acc: 25.768425
+Epoch [2541], train_loss: 0.060314, val_loss: 0.057642, val_acc: 25.884607
+Epoch [2542], train_loss: 0.060421, val_loss: 0.057670, val_acc: 25.795937
+Epoch [2543], train_loss: 0.060253, val_loss: 0.057701, val_acc: 25.787951
+Epoch [2544], train_loss: 0.060363, val_loss: 0.057640, val_acc: 25.815018
+Epoch [2545], train_loss: 0.060441, val_loss: 0.057732, val_acc: 25.743456
+Epoch [2546], train_loss: 0.060320, val_loss: 0.057712, val_acc: 25.800238
+Epoch [2547], train_loss: 0.060407, val_loss: 0.057661, val_acc: 25.777006
+Epoch [2548], train_loss: 0.060327, val_loss: 0.057653, val_acc: 25.809839
+Epoch [2549], train_loss: 0.060258, val_loss: 0.057686, val_acc: 25.778643
+Epoch [2550], train_loss: 0.060556, val_loss: 0.057761, val_acc: 25.777311
+Epoch [2551], train_loss: 0.060373, val_loss: 0.057658, val_acc: 25.817579
+Epoch [2552], train_loss: 0.060435, val_loss: 0.057674, val_acc: 25.806717
+Epoch [2553], train_loss: 0.060337, val_loss: 0.057625, val_acc: 25.829929
+Epoch [2554], train_loss: 0.060449, val_loss: 0.057770, val_acc: 25.725822
+Epoch [2555], train_loss: 0.060275, val_loss: 0.057738, val_acc: 25.739683
+Epoch [2556], train_loss: 0.060418, val_loss: 0.057730, val_acc: 25.786932
+Epoch [2557], train_loss: 0.060301, val_loss: 0.057667, val_acc: 25.738541
+Epoch [2558], train_loss: 0.060530, val_loss: 0.057706, val_acc: 25.762974
+Epoch [2559], train_loss: 0.060600, val_loss: 0.057695, val_acc: 25.787312
+Epoch [2560], train_loss: 0.060341, val_loss: 0.057687, val_acc: 25.759150
+Epoch [2561], train_loss: 0.060299, val_loss: 0.057641, val_acc: 25.804146
+Epoch [2562], train_loss: 0.060384, val_loss: 0.057721, val_acc: 25.714869
+Epoch [2563], train_loss: 0.060237, val_loss: 0.057675, val_acc: 25.799637
+Epoch [2564], train_loss: 0.060343, val_loss: 0.057726, val_acc: 25.784153
+Epoch [2565], train_loss: 0.060373, val_loss: 0.057699, val_acc: 25.761660
+Epoch [2566], train_loss: 0.060279, val_loss: 0.057709, val_acc: 25.763321
+Epoch [2567], train_loss: 0.060284, val_loss: 0.057753, val_acc: 25.721907
+Epoch [2568], train_loss: 0.060378, val_loss: 0.057658, val_acc: 25.813074
+Epoch [2569], train_loss: 0.060478, val_loss: 0.057766, val_acc: 25.712862
+Epoch [2570], train_loss: 0.060437, val_loss: 0.057677, val_acc: 25.790171
+Epoch [2571], train_loss: 0.060286, val_loss: 0.057727, val_acc: 25.738300
+Epoch [2572], train_loss: 0.060558, val_loss: 0.057637, val_acc: 25.810093
+Epoch [2573], train_loss: 0.060201, val_loss: 0.057757, val_acc: 25.723274
+Epoch [2574], train_loss: 0.060276, val_loss: 0.057719, val_acc: 25.743174
+Epoch [2575], train_loss: 0.060477, val_loss: 0.057656, val_acc: 25.880278
+Epoch [2576], train_loss: 0.060459, val_loss: 0.057667, val_acc: 25.811562
+Epoch [2577], train_loss: 0.060275, val_loss: 0.057760, val_acc: 25.689520
+Epoch [2578], train_loss: 0.060392, val_loss: 0.057665, val_acc: 25.824074
+Epoch [2579], train_loss: 0.060369, val_loss: 0.057709, val_acc: 25.760838
+Epoch [2580], train_loss: 0.060194, val_loss: 0.057754, val_acc: 25.673983
+Epoch [2581], train_loss: 0.060582, val_loss: 0.057665, val_acc: 25.812553
+Epoch [2582], train_loss: 0.060493, val_loss: 0.057666, val_acc: 25.818670
+Epoch [2583], train_loss: 0.060274, val_loss: 0.057647, val_acc: 25.738663
+Epoch [2584], train_loss: 0.060322, val_loss: 0.057637, val_acc: 25.825342
+Epoch [2585], train_loss: 0.060417, val_loss: 0.057637, val_acc: 25.853174
+Epoch [2586], train_loss: 0.060330, val_loss: 0.057643, val_acc: 25.782749
+Epoch [2587], train_loss: 0.060389, val_loss: 0.057721, val_acc: 25.730986
+Epoch [2588], train_loss: 0.060253, val_loss: 0.057664, val_acc: 25.750740
+Epoch [2589], train_loss: 0.060398, val_loss: 0.057652, val_acc: 25.770412
+Epoch [2590], train_loss: 0.060209, val_loss: 0.057697, val_acc: 25.762152
+Epoch [2591], train_loss: 0.060396, val_loss: 0.057691, val_acc: 25.770636
+Epoch [2592], train_loss: 0.060540, val_loss: 0.057810, val_acc: 25.704466
+Epoch [2593], train_loss: 0.060379, val_loss: 0.057614, val_acc: 25.793085
+Epoch [2594], train_loss: 0.060190, val_loss: 0.057691, val_acc: 25.758018
+Epoch [2595], train_loss: 0.060393, val_loss: 0.057660, val_acc: 25.794044
+Epoch [2596], train_loss: 0.060285, val_loss: 0.057794, val_acc: 25.706484
+Epoch [2597], train_loss: 0.060308, val_loss: 0.057685, val_acc: 25.796698
+Epoch [2598], train_loss: 0.060386, val_loss: 0.057609, val_acc: 25.807787
+Epoch [2599], train_loss: 0.060329, val_loss: 0.057739, val_acc: 25.716206
+Epoch [2600], train_loss: 0.060380, val_loss: 0.057642, val_acc: 25.797754
+Epoch [2601], train_loss: 0.060389, val_loss: 0.057630, val_acc: 25.861917
+Epoch [2602], train_loss: 0.060243, val_loss: 0.057675, val_acc: 25.724117
+Epoch [2603], train_loss: 0.060203, val_loss: 0.057694, val_acc: 25.735188
+Epoch [2604], train_loss: 0.060348, val_loss: 0.057617, val_acc: 25.805956
+Epoch [2605], train_loss: 0.060122, val_loss: 0.057707, val_acc: 25.733097
+Epoch [2606], train_loss: 0.060315, val_loss: 0.057709, val_acc: 25.740648
+Epoch [2607], train_loss: 0.060211, val_loss: 0.057632, val_acc: 25.829628
+Epoch [2608], train_loss: 0.060389, val_loss: 0.057621, val_acc: 25.826515
+Epoch [2609], train_loss: 0.060312, val_loss: 0.057690, val_acc: 25.788092
+Epoch [2610], train_loss: 0.060329, val_loss: 0.057714, val_acc: 25.713194
+Epoch [2611], train_loss: 0.060361, val_loss: 0.057745, val_acc: 25.715286
+Epoch [2612], train_loss: 0.060413, val_loss: 0.057683, val_acc: 25.749687
+Epoch [2613], train_loss: 0.060148, val_loss: 0.057691, val_acc: 25.722649
+Epoch [2614], train_loss: 0.060520, val_loss: 0.057623, val_acc: 25.779669
+Epoch [2615], train_loss: 0.060228, val_loss: 0.057693, val_acc: 25.729319
+Epoch [2616], train_loss: 0.060230, val_loss: 0.057711, val_acc: 25.677341
+Epoch [2617], train_loss: 0.060226, val_loss: 0.057693, val_acc: 25.760033
+Epoch [2618], train_loss: 0.060396, val_loss: 0.057723, val_acc: 25.735111
+Epoch [2619], train_loss: 0.060340, val_loss: 0.057681, val_acc: 25.746607
+Epoch [2620], train_loss: 0.060434, val_loss: 0.057649, val_acc: 25.787003
+Epoch [2621], train_loss: 0.060367, val_loss: 0.057720, val_acc: 25.733727
+Epoch [2622], train_loss: 0.060246, val_loss: 0.057650, val_acc: 25.758156
+Epoch [2623], train_loss: 0.060364, val_loss: 0.057705, val_acc: 25.760969
+Epoch [2624], train_loss: 0.060333, val_loss: 0.057721, val_acc: 25.722794
+Epoch [2625], train_loss: 0.060214, val_loss: 0.057669, val_acc: 25.779360
+Epoch [2626], train_loss: 0.060111, val_loss: 0.057718, val_acc: 25.737268
+Epoch [2627], train_loss: 0.060209, val_loss: 0.057767, val_acc: 25.709042
+Epoch [2628], train_loss: 0.060241, val_loss: 0.057615, val_acc: 25.820662
+Epoch [2629], train_loss: 0.060303, val_loss: 0.057717, val_acc: 25.731306
+Epoch [2630], train_loss: 0.060452, val_loss: 0.057694, val_acc: 25.773636
+Epoch [2631], train_loss: 0.060372, val_loss: 0.057663, val_acc: 25.823257
+Epoch [2632], train_loss: 0.060249, val_loss: 0.057730, val_acc: 25.655687
+Epoch [2633], train_loss: 0.060165, val_loss: 0.057605, val_acc: 25.798641
+Epoch [2634], train_loss: 0.060360, val_loss: 0.057737, val_acc: 25.726835
+Epoch [2635], train_loss: 0.060342, val_loss: 0.057658, val_acc: 25.779329
+Epoch [2636], train_loss: 0.060273, val_loss: 0.057651, val_acc: 25.781492
+Epoch [2637], train_loss: 0.060426, val_loss: 0.057641, val_acc: 25.749031
+Epoch [2638], train_loss: 0.060376, val_loss: 0.057708, val_acc: 25.756376
+Epoch [2639], train_loss: 0.060533, val_loss: 0.057614, val_acc: 25.813950
+Epoch [2640], train_loss: 0.060377, val_loss: 0.057640, val_acc: 25.781538
+Epoch [2641], train_loss: 0.060397, val_loss: 0.057675, val_acc: 25.775476
+Epoch [2642], train_loss: 0.060209, val_loss: 0.057618, val_acc: 25.854078
+Epoch [2643], train_loss: 0.060169, val_loss: 0.057579, val_acc: 25.847134
+Epoch [2644], train_loss: 0.060523, val_loss: 0.057661, val_acc: 25.808325
+Epoch [2645], train_loss: 0.060131, val_loss: 0.057616, val_acc: 25.758661
+Epoch [2646], train_loss: 0.060317, val_loss: 0.057658, val_acc: 25.769922
+Epoch [2647], train_loss: 0.060285, val_loss: 0.057748, val_acc: 25.738039
+Epoch [2648], train_loss: 0.060409, val_loss: 0.057703, val_acc: 25.749010
+Epoch [2649], train_loss: 0.060179, val_loss: 0.057612, val_acc: 25.744486
+Epoch [2650], train_loss: 0.060476, val_loss: 0.057716, val_acc: 25.695705
+Epoch [2651], train_loss: 0.060158, val_loss: 0.057719, val_acc: 25.697411
+Epoch [2652], train_loss: 0.060296, val_loss: 0.057740, val_acc: 25.702703
+Epoch [2653], train_loss: 0.060269, val_loss: 0.057654, val_acc: 25.758480
+Epoch [2654], train_loss: 0.060233, val_loss: 0.057650, val_acc: 25.788359
+Epoch [2655], train_loss: 0.060363, val_loss: 0.057675, val_acc: 25.742994
+Epoch [2656], train_loss: 0.060234, val_loss: 0.057697, val_acc: 25.745518
+Epoch [2657], train_loss: 0.060229, val_loss: 0.057652, val_acc: 25.806780
+Epoch [2658], train_loss: 0.060206, val_loss: 0.057699, val_acc: 25.727442
+Epoch [2659], train_loss: 0.060356, val_loss: 0.057666, val_acc: 25.773701
+Epoch [2660], train_loss: 0.060325, val_loss: 0.057748, val_acc: 25.685635
+Epoch [2661], train_loss: 0.060373, val_loss: 0.057696, val_acc: 25.745955
+Epoch [2662], train_loss: 0.060151, val_loss: 0.057627, val_acc: 25.743778
+Epoch [2663], train_loss: 0.060388, val_loss: 0.057678, val_acc: 25.726709
+Epoch [2664], train_loss: 0.060192, val_loss: 0.057689, val_acc: 25.685825
+Epoch [2665], train_loss: 0.060304, val_loss: 0.057731, val_acc: 25.686123
+Epoch [2666], train_loss: 0.060163, val_loss: 0.057588, val_acc: 25.788692
+Epoch [2667], train_loss: 0.060324, val_loss: 0.057750, val_acc: 25.680696
+Epoch [2668], train_loss: 0.060307, val_loss: 0.057731, val_acc: 25.717075
+Epoch [2669], train_loss: 0.060282, val_loss: 0.057669, val_acc: 25.780678
+Epoch [2670], train_loss: 0.060276, val_loss: 0.057701, val_acc: 25.713564
+Epoch [2671], train_loss: 0.060266, val_loss: 0.057670, val_acc: 25.691652
+Epoch [2672], train_loss: 0.060521, val_loss: 0.057616, val_acc: 25.842585
+Epoch [2673], train_loss: 0.060162, val_loss: 0.057625, val_acc: 25.771908
+Epoch [2674], train_loss: 0.060202, val_loss: 0.057663, val_acc: 25.742302
+Epoch [2675], train_loss: 0.060400, val_loss: 0.057705, val_acc: 25.704617
+Epoch [2676], train_loss: 0.060226, val_loss: 0.057716, val_acc: 25.729588
+Epoch [2677], train_loss: 0.060079, val_loss: 0.057682, val_acc: 25.726387
+Epoch [2678], train_loss: 0.060192, val_loss: 0.057659, val_acc: 25.747442
+Epoch [2679], train_loss: 0.060334, val_loss: 0.057689, val_acc: 25.763908
+Epoch [2680], train_loss: 0.060266, val_loss: 0.057643, val_acc: 25.774225
+Epoch [2681], train_loss: 0.060274, val_loss: 0.057620, val_acc: 25.747797
+Epoch [2682], train_loss: 0.060223, val_loss: 0.057730, val_acc: 25.743773
+Epoch [2683], train_loss: 0.060233, val_loss: 0.057710, val_acc: 25.714075
+Epoch [2684], train_loss: 0.060324, val_loss: 0.057634, val_acc: 25.789370
+Epoch [2685], train_loss: 0.060119, val_loss: 0.057670, val_acc: 25.720627
+Epoch [2686], train_loss: 0.060148, val_loss: 0.057737, val_acc: 25.664621
+Epoch [2687], train_loss: 0.060267, val_loss: 0.057677, val_acc: 25.780666
+Epoch [2688], train_loss: 0.060162, val_loss: 0.057650, val_acc: 25.773706
+Epoch [2689], train_loss: 0.060368, val_loss: 0.057612, val_acc: 25.738499
+Epoch [2690], train_loss: 0.060240, val_loss: 0.057611, val_acc: 25.801874
+Epoch [2691], train_loss: 0.060130, val_loss: 0.057646, val_acc: 25.759405
+Epoch [2692], train_loss: 0.060304, val_loss: 0.057660, val_acc: 25.758730
+Epoch [2693], train_loss: 0.060232, val_loss: 0.057663, val_acc: 25.765940
+Epoch [2694], train_loss: 0.060334, val_loss: 0.057658, val_acc: 25.767433
+Epoch [2695], train_loss: 0.060305, val_loss: 0.057660, val_acc: 25.744637
+Epoch [2696], train_loss: 0.060455, val_loss: 0.057656, val_acc: 25.764145
+Epoch [2697], train_loss: 0.060108, val_loss: 0.057652, val_acc: 25.713619
+Epoch [2698], train_loss: 0.060237, val_loss: 0.057698, val_acc: 25.746771
+Epoch [2699], train_loss: 0.060321, val_loss: 0.057635, val_acc: 25.799335
+Epoch [2700], train_loss: 0.060223, val_loss: 0.057673, val_acc: 25.776445
+Epoch [2701], train_loss: 0.060189, val_loss: 0.057674, val_acc: 25.687889
+Epoch [2702], train_loss: 0.060011, val_loss: 0.057716, val_acc: 25.690254
+Epoch [2703], train_loss: 0.060379, val_loss: 0.057756, val_acc: 25.648659
+Epoch [2704], train_loss: 0.060148, val_loss: 0.057659, val_acc: 25.736155
+Epoch [2705], train_loss: 0.060392, val_loss: 0.057645, val_acc: 25.767103
+Epoch [2706], train_loss: 0.060396, val_loss: 0.057675, val_acc: 25.773851
+Epoch [2707], train_loss: 0.060303, val_loss: 0.057586, val_acc: 25.823418
+Epoch [2708], train_loss: 0.060163, val_loss: 0.057672, val_acc: 25.676838
+Epoch [2709], train_loss: 0.060248, val_loss: 0.057650, val_acc: 25.727699
+Epoch [2710], train_loss: 0.060231, val_loss: 0.057683, val_acc: 25.728247
+Epoch [2711], train_loss: 0.060340, val_loss: 0.057720, val_acc: 25.644743
+Epoch [2712], train_loss: 0.060263, val_loss: 0.057689, val_acc: 25.737322
+Epoch [2713], train_loss: 0.059967, val_loss: 0.057700, val_acc: 25.703728
+Epoch [2714], train_loss: 0.060078, val_loss: 0.057731, val_acc: 25.709888
+Epoch [2715], train_loss: 0.060427, val_loss: 0.057687, val_acc: 25.737089
+Epoch [2716], train_loss: 0.060259, val_loss: 0.057669, val_acc: 25.743095
+Epoch [2717], train_loss: 0.060352, val_loss: 0.057749, val_acc: 25.643183
+Epoch [2718], train_loss: 0.060201, val_loss: 0.057764, val_acc: 25.632372
+Epoch [2719], train_loss: 0.060277, val_loss: 0.057638, val_acc: 25.701950
+Epoch [2720], train_loss: 0.060475, val_loss: 0.057760, val_acc: 25.635212
+Epoch [2721], train_loss: 0.060152, val_loss: 0.057659, val_acc: 25.747150
+Epoch [2722], train_loss: 0.060329, val_loss: 0.057695, val_acc: 25.720781
+Epoch [2723], train_loss: 0.060414, val_loss: 0.057638, val_acc: 25.779970
+Epoch [2724], train_loss: 0.060150, val_loss: 0.057679, val_acc: 25.721498
+Epoch [2725], train_loss: 0.060273, val_loss: 0.057659, val_acc: 25.702703
+Epoch [2726], train_loss: 0.060189, val_loss: 0.057675, val_acc: 25.710487
+Epoch [2727], train_loss: 0.060391, val_loss: 0.057635, val_acc: 25.788692
+Epoch [2728], train_loss: 0.060277, val_loss: 0.057630, val_acc: 25.742115
+Epoch [2729], train_loss: 0.060148, val_loss: 0.057712, val_acc: 25.657701
+Epoch [2730], train_loss: 0.060200, val_loss: 0.057711, val_acc: 25.633276
+Epoch [2731], train_loss: 0.060451, val_loss: 0.057707, val_acc: 25.678276
+Epoch [2732], train_loss: 0.060155, val_loss: 0.057698, val_acc: 25.683060
+Epoch [2733], train_loss: 0.060185, val_loss: 0.057660, val_acc: 25.736395
+Epoch [2734], train_loss: 0.060329, val_loss: 0.057686, val_acc: 25.703890
+Epoch [2735], train_loss: 0.060117, val_loss: 0.057688, val_acc: 25.722183
+Epoch [2736], train_loss: 0.060197, val_loss: 0.057670, val_acc: 25.679867
+Epoch [2737], train_loss: 0.060140, val_loss: 0.057690, val_acc: 25.755306
+Epoch [2738], train_loss: 0.060187, val_loss: 0.057698, val_acc: 25.689432
+Epoch [2739], train_loss: 0.060245, val_loss: 0.057699, val_acc: 25.701683
+Epoch [2740], train_loss: 0.060306, val_loss: 0.057657, val_acc: 25.749851
+Epoch [2741], train_loss: 0.060099, val_loss: 0.057684, val_acc: 25.747963
+Epoch [2742], train_loss: 0.060246, val_loss: 0.057669, val_acc: 25.721859
+Epoch [2743], train_loss: 0.060257, val_loss: 0.057672, val_acc: 25.681707
+Epoch [2744], train_loss: 0.060344, val_loss: 0.057673, val_acc: 25.733686
+Epoch [2745], train_loss: 0.060308, val_loss: 0.057648, val_acc: 25.723597
+Epoch [2746], train_loss: 0.060100, val_loss: 0.057645, val_acc: 25.759661
+Epoch [2747], train_loss: 0.060312, val_loss: 0.057716, val_acc: 25.622776
+Epoch [2748], train_loss: 0.060367, val_loss: 0.057657, val_acc: 25.705141
+Epoch [2749], train_loss: 0.060349, val_loss: 0.057704, val_acc: 25.740480
+Epoch [2750], train_loss: 0.060180, val_loss: 0.057708, val_acc: 25.725330
+Epoch [2751], train_loss: 0.060208, val_loss: 0.057616, val_acc: 25.785423
+Epoch [2752], train_loss: 0.060293, val_loss: 0.057747, val_acc: 25.603098
+Epoch [2753], train_loss: 0.060236, val_loss: 0.057800, val_acc: 25.541290
+Epoch [2754], train_loss: 0.060210, val_loss: 0.057741, val_acc: 25.619799
+Epoch [2755], train_loss: 0.060293, val_loss: 0.057710, val_acc: 25.675585
+Epoch [2756], train_loss: 0.060063, val_loss: 0.057715, val_acc: 25.679539
+Epoch [2757], train_loss: 0.060123, val_loss: 0.057676, val_acc: 25.709745
+Epoch [2758], train_loss: 0.060360, val_loss: 0.057705, val_acc: 25.714359
+Epoch [2759], train_loss: 0.060100, val_loss: 0.057732, val_acc: 25.660166
+Epoch [2760], train_loss: 0.060352, val_loss: 0.057716, val_acc: 25.652225
+Epoch [2761], train_loss: 0.060163, val_loss: 0.057711, val_acc: 25.682247
+Epoch [2762], train_loss: 0.060150, val_loss: 0.057679, val_acc: 25.708134
+Epoch [2763], train_loss: 0.060200, val_loss: 0.057779, val_acc: 25.647449
+Epoch [2764], train_loss: 0.060099, val_loss: 0.057653, val_acc: 25.728460
+Epoch [2765], train_loss: 0.060339, val_loss: 0.057708, val_acc: 25.708216
+Epoch [2766], train_loss: 0.060351, val_loss: 0.057702, val_acc: 25.713821
+Epoch [2767], train_loss: 0.060178, val_loss: 0.057706, val_acc: 25.689247
+Epoch [2768], train_loss: 0.059891, val_loss: 0.057616, val_acc: 25.778694
+Epoch [2769], train_loss: 0.060292, val_loss: 0.057682, val_acc: 25.642511
+Epoch [2770], train_loss: 0.060225, val_loss: 0.057708, val_acc: 25.685286
+Epoch [2771], train_loss: 0.060194, val_loss: 0.057670, val_acc: 25.720285
+Epoch [2772], train_loss: 0.060207, val_loss: 0.057620, val_acc: 25.809380
+Epoch [2773], train_loss: 0.060254, val_loss: 0.057696, val_acc: 25.714380
+Epoch [2774], train_loss: 0.060276, val_loss: 0.057695, val_acc: 25.690159
+Epoch [2775], train_loss: 0.060240, val_loss: 0.057726, val_acc: 25.667791
+Epoch [2776], train_loss: 0.060101, val_loss: 0.057672, val_acc: 25.697794
+Epoch [2777], train_loss: 0.060039, val_loss: 0.057700, val_acc: 25.636095
+Epoch [2778], train_loss: 0.060339, val_loss: 0.057753, val_acc: 25.634951
+Epoch [2779], train_loss: 0.060357, val_loss: 0.057731, val_acc: 25.681993
+Epoch [2780], train_loss: 0.060122, val_loss: 0.057771, val_acc: 25.622166
+Epoch [2781], train_loss: 0.060164, val_loss: 0.057732, val_acc: 25.602180
+Epoch [2782], train_loss: 0.060142, val_loss: 0.057659, val_acc: 25.737780
+Epoch [2783], train_loss: 0.060137, val_loss: 0.057687, val_acc: 25.687128
+Epoch [2784], train_loss: 0.060193, val_loss: 0.057626, val_acc: 25.714594
+Epoch [2785], train_loss: 0.060207, val_loss: 0.057722, val_acc: 25.637766
+Epoch [2786], train_loss: 0.060319, val_loss: 0.057725, val_acc: 25.645908
+Epoch [2787], train_loss: 0.060157, val_loss: 0.057727, val_acc: 25.623259
+Epoch [2788], train_loss: 0.060175, val_loss: 0.057632, val_acc: 25.794920
+Epoch [2789], train_loss: 0.060111, val_loss: 0.057703, val_acc: 25.693121
+Epoch [2790], train_loss: 0.060295, val_loss: 0.057668, val_acc: 25.729925
+Epoch [2791], train_loss: 0.059995, val_loss: 0.057731, val_acc: 25.631924
+Epoch [2792], train_loss: 0.060145, val_loss: 0.057774, val_acc: 25.589334
+Epoch [2793], train_loss: 0.060267, val_loss: 0.057680, val_acc: 25.740406
+Epoch [2794], train_loss: 0.060138, val_loss: 0.057683, val_acc: 25.680775
+Epoch [2795], train_loss: 0.060167, val_loss: 0.057728, val_acc: 25.626865
+Epoch [2796], train_loss: 0.060131, val_loss: 0.057725, val_acc: 25.602087
+Epoch [2797], train_loss: 0.060248, val_loss: 0.057741, val_acc: 25.667265
+Epoch [2798], train_loss: 0.060063, val_loss: 0.057704, val_acc: 25.687813
+Epoch [2799], train_loss: 0.060140, val_loss: 0.057742, val_acc: 25.672251
+Epoch [2800], train_loss: 0.060279, val_loss: 0.057719, val_acc: 25.663008
+Epoch [2801], train_loss: 0.060150, val_loss: 0.057664, val_acc: 25.705324
+Epoch [2802], train_loss: 0.060155, val_loss: 0.057741, val_acc: 25.614235
+Epoch [2803], train_loss: 0.060207, val_loss: 0.057784, val_acc: 25.619051
+Epoch [2804], train_loss: 0.060201, val_loss: 0.057769, val_acc: 25.641466
+Epoch [2805], train_loss: 0.060219, val_loss: 0.057712, val_acc: 25.739740
+Epoch [2806], train_loss: 0.060083, val_loss: 0.057724, val_acc: 25.625729
+Epoch [2807], train_loss: 0.060036, val_loss: 0.057726, val_acc: 25.566486
+Epoch [2808], train_loss: 0.059945, val_loss: 0.057707, val_acc: 25.679758
+Epoch [2809], train_loss: 0.060210, val_loss: 0.057688, val_acc: 25.749550
+Epoch [2810], train_loss: 0.060353, val_loss: 0.057796, val_acc: 25.582453
+Epoch [2811], train_loss: 0.060208, val_loss: 0.057731, val_acc: 25.649748
+Epoch [2812], train_loss: 0.060364, val_loss: 0.057770, val_acc: 25.632103
+Epoch [2813], train_loss: 0.060113, val_loss: 0.057715, val_acc: 25.670059
+Epoch [2814], train_loss: 0.060321, val_loss: 0.057736, val_acc: 25.657892
+Epoch [2815], train_loss: 0.060150, val_loss: 0.057734, val_acc: 25.608204
+Epoch [2816], train_loss: 0.060441, val_loss: 0.057845, val_acc: 25.583685
+Epoch [2817], train_loss: 0.060249, val_loss: 0.057666, val_acc: 25.640579
+Epoch [2818], train_loss: 0.060239, val_loss: 0.057769, val_acc: 25.558201
+Epoch [2819], train_loss: 0.060306, val_loss: 0.057703, val_acc: 25.665577
+Epoch [2820], train_loss: 0.060268, val_loss: 0.057846, val_acc: 25.490192
+Epoch [2821], train_loss: 0.060208, val_loss: 0.057746, val_acc: 25.635578
+Epoch [2822], train_loss: 0.060234, val_loss: 0.057667, val_acc: 25.747898
+Epoch [2823], train_loss: 0.060029, val_loss: 0.057709, val_acc: 25.629555
+Epoch [2824], train_loss: 0.060212, val_loss: 0.057729, val_acc: 25.650644
+Epoch [2825], train_loss: 0.060161, val_loss: 0.057721, val_acc: 25.657103
+Epoch [2826], train_loss: 0.060122, val_loss: 0.057682, val_acc: 25.679916
+Epoch [2827], train_loss: 0.060014, val_loss: 0.057729, val_acc: 25.644579
+Epoch [2828], train_loss: 0.060458, val_loss: 0.057661, val_acc: 25.668137
+Epoch [2829], train_loss: 0.060353, val_loss: 0.057746, val_acc: 25.644028
+Epoch [2830], train_loss: 0.060102, val_loss: 0.057693, val_acc: 25.710325
+Epoch [2831], train_loss: 0.060236, val_loss: 0.057713, val_acc: 25.595474
+Epoch [2832], train_loss: 0.060232, val_loss: 0.057769, val_acc: 25.615440
+Epoch [2833], train_loss: 0.060054, val_loss: 0.057752, val_acc: 25.673044
+Epoch [2834], train_loss: 0.060267, val_loss: 0.057785, val_acc: 25.612637
+Epoch [2835], train_loss: 0.060160, val_loss: 0.057753, val_acc: 25.638035
+Epoch [2836], train_loss: 0.060131, val_loss: 0.057745, val_acc: 25.626348
+Epoch [2837], train_loss: 0.060283, val_loss: 0.057727, val_acc: 25.617716
+Epoch [2838], train_loss: 0.060156, val_loss: 0.057718, val_acc: 25.608089
+Epoch [2839], train_loss: 0.060040, val_loss: 0.057804, val_acc: 25.540998
+Epoch [2840], train_loss: 0.060065, val_loss: 0.057817, val_acc: 25.566544
+Epoch [2841], train_loss: 0.060285, val_loss: 0.057730, val_acc: 25.678947
+Epoch [2842], train_loss: 0.060208, val_loss: 0.057696, val_acc: 25.622126
+Epoch [2843], train_loss: 0.060124, val_loss: 0.057837, val_acc: 25.593061
+Epoch [2844], train_loss: 0.060294, val_loss: 0.057795, val_acc: 25.584513
+Epoch [2845], train_loss: 0.060273, val_loss: 0.057792, val_acc: 25.611017
+Epoch [2846], train_loss: 0.060255, val_loss: 0.057725, val_acc: 25.641420
+Epoch [2847], train_loss: 0.060173, val_loss: 0.057700, val_acc: 25.646982
+Epoch [2848], train_loss: 0.060153, val_loss: 0.057813, val_acc: 25.545786
+Epoch [2849], train_loss: 0.060110, val_loss: 0.057747, val_acc: 25.629734
+Epoch [2850], train_loss: 0.060092, val_loss: 0.057722, val_acc: 25.620827
+Epoch [2851], train_loss: 0.059968, val_loss: 0.057827, val_acc: 25.508318
+Epoch [2852], train_loss: 0.060172, val_loss: 0.057759, val_acc: 25.580866
+Epoch [2853], train_loss: 0.060333, val_loss: 0.057766, val_acc: 25.617142
+Epoch [2854], train_loss: 0.060131, val_loss: 0.057717, val_acc: 25.590626
+Epoch [2855], train_loss: 0.060221, val_loss: 0.057758, val_acc: 25.563484
+Epoch [2856], train_loss: 0.060148, val_loss: 0.057734, val_acc: 25.585140
+Epoch [2857], train_loss: 0.060006, val_loss: 0.057700, val_acc: 25.610817
+Epoch [2858], train_loss: 0.060281, val_loss: 0.057731, val_acc: 25.614588
+Epoch [2859], train_loss: 0.060123, val_loss: 0.057698, val_acc: 25.667017
+Epoch [2860], train_loss: 0.060054, val_loss: 0.057764, val_acc: 25.577887
+Epoch [2861], train_loss: 0.060172, val_loss: 0.057693, val_acc: 25.593756
+Epoch [2862], train_loss: 0.060119, val_loss: 0.057763, val_acc: 25.556684
+Epoch [2863], train_loss: 0.060122, val_loss: 0.057724, val_acc: 25.608156
+Epoch [2864], train_loss: 0.060128, val_loss: 0.057769, val_acc: 25.581501
+Epoch [2865], train_loss: 0.060213, val_loss: 0.057757, val_acc: 25.626320
+Epoch [2866], train_loss: 0.060212, val_loss: 0.057718, val_acc: 25.637312
+Epoch [2867], train_loss: 0.059985, val_loss: 0.057792, val_acc: 25.559471
+Epoch [2868], train_loss: 0.060434, val_loss: 0.057714, val_acc: 25.546669
+Epoch [2869], train_loss: 0.060100, val_loss: 0.057780, val_acc: 25.613056
+Epoch [2870], train_loss: 0.060214, val_loss: 0.057662, val_acc: 25.589960
+Epoch [2871], train_loss: 0.060070, val_loss: 0.057804, val_acc: 25.543882
+Epoch [2872], train_loss: 0.060199, val_loss: 0.057756, val_acc: 25.550167
+Epoch [2873], train_loss: 0.060251, val_loss: 0.057809, val_acc: 25.579075
+Epoch [2874], train_loss: 0.060110, val_loss: 0.057789, val_acc: 25.551304
+Epoch [2875], train_loss: 0.060304, val_loss: 0.057755, val_acc: 25.609047
+Epoch [2876], train_loss: 0.060155, val_loss: 0.057651, val_acc: 25.660282
+Epoch [2877], train_loss: 0.060192, val_loss: 0.057778, val_acc: 25.551613
+Epoch [2878], train_loss: 0.060141, val_loss: 0.057785, val_acc: 25.596991
+Epoch [2879], train_loss: 0.060085, val_loss: 0.057788, val_acc: 25.557444
+Epoch [2880], train_loss: 0.060282, val_loss: 0.057789, val_acc: 25.546864
+Epoch [2881], train_loss: 0.060149, val_loss: 0.057800, val_acc: 25.551771
+Epoch [2882], train_loss: 0.060328, val_loss: 0.057720, val_acc: 25.622761
+Epoch [2883], train_loss: 0.060214, val_loss: 0.057745, val_acc: 25.638241
+Epoch [2884], train_loss: 0.060181, val_loss: 0.057763, val_acc: 25.554724
+Epoch [2885], train_loss: 0.060040, val_loss: 0.057727, val_acc: 25.601999
+Epoch [2886], train_loss: 0.060066, val_loss: 0.057702, val_acc: 25.618578
+Epoch [2887], train_loss: 0.060053, val_loss: 0.057787, val_acc: 25.615311
+Epoch [2888], train_loss: 0.060284, val_loss: 0.057754, val_acc: 25.582676
+Epoch [2889], train_loss: 0.060233, val_loss: 0.057745, val_acc: 25.585892
+Epoch [2890], train_loss: 0.060116, val_loss: 0.057728, val_acc: 25.675072
+Epoch [2891], train_loss: 0.060050, val_loss: 0.057688, val_acc: 25.636332
+Epoch [2892], train_loss: 0.060067, val_loss: 0.057771, val_acc: 25.532837
+Epoch [2893], train_loss: 0.059935, val_loss: 0.057740, val_acc: 25.564249
+Epoch [2894], train_loss: 0.060349, val_loss: 0.057843, val_acc: 25.483564
+Epoch [2895], train_loss: 0.060098, val_loss: 0.057766, val_acc: 25.582253
+Epoch [2896], train_loss: 0.060172, val_loss: 0.057704, val_acc: 25.627983
+Epoch [2897], train_loss: 0.060073, val_loss: 0.057763, val_acc: 25.568054
+Epoch [2898], train_loss: 0.060251, val_loss: 0.057709, val_acc: 25.621668
+Epoch [2899], train_loss: 0.060234, val_loss: 0.057657, val_acc: 25.716864
+Epoch [2900], train_loss: 0.060127, val_loss: 0.057837, val_acc: 25.492197
+Epoch [2901], train_loss: 0.060334, val_loss: 0.057803, val_acc: 25.573500
+Epoch [2902], train_loss: 0.059984, val_loss: 0.057770, val_acc: 25.590925
+Epoch [2903], train_loss: 0.060321, val_loss: 0.057739, val_acc: 25.646294
+Epoch [2904], train_loss: 0.060208, val_loss: 0.057715, val_acc: 25.635386
+Epoch [2905], train_loss: 0.060059, val_loss: 0.057760, val_acc: 25.583139
+Epoch [2906], train_loss: 0.060071, val_loss: 0.057700, val_acc: 25.635700
+Epoch [2907], train_loss: 0.060062, val_loss: 0.057823, val_acc: 25.540586
+Epoch [2908], train_loss: 0.060025, val_loss: 0.057721, val_acc: 25.608118
+Epoch [2909], train_loss: 0.060202, val_loss: 0.057707, val_acc: 25.644840
+Epoch [2910], train_loss: 0.060113, val_loss: 0.057781, val_acc: 25.514584
+Epoch [2911], train_loss: 0.060068, val_loss: 0.057718, val_acc: 25.630163
+Epoch [2912], train_loss: 0.060194, val_loss: 0.057824, val_acc: 25.499977
+Epoch [2913], train_loss: 0.060216, val_loss: 0.057855, val_acc: 25.473236
+Epoch [2914], train_loss: 0.060165, val_loss: 0.057804, val_acc: 25.510328
+Epoch [2915], train_loss: 0.060122, val_loss: 0.057792, val_acc: 25.522762
+Epoch [2916], train_loss: 0.060186, val_loss: 0.057761, val_acc: 25.553928
+Epoch [2917], train_loss: 0.060108, val_loss: 0.057776, val_acc: 25.527037
+Epoch [2918], train_loss: 0.060149, val_loss: 0.057744, val_acc: 25.633669
+Epoch [2919], train_loss: 0.060222, val_loss: 0.057748, val_acc: 25.599648
+Epoch [2920], train_loss: 0.060281, val_loss: 0.057800, val_acc: 25.480347
+Epoch [2921], train_loss: 0.060004, val_loss: 0.057750, val_acc: 25.577440
+Epoch [2922], train_loss: 0.060258, val_loss: 0.057861, val_acc: 25.518791
+Epoch [2923], train_loss: 0.059997, val_loss: 0.057787, val_acc: 25.593760
+Epoch [2924], train_loss: 0.060076, val_loss: 0.057847, val_acc: 25.449705
+Epoch [2925], train_loss: 0.060160, val_loss: 0.057749, val_acc: 25.609852
+Epoch [2926], train_loss: 0.060135, val_loss: 0.057825, val_acc: 25.529495
+Epoch [2927], train_loss: 0.059839, val_loss: 0.057735, val_acc: 25.578691
+Epoch [2928], train_loss: 0.060265, val_loss: 0.057824, val_acc: 25.508114
+Epoch [2929], train_loss: 0.060113, val_loss: 0.057732, val_acc: 25.559057
+Epoch [2930], train_loss: 0.060294, val_loss: 0.057698, val_acc: 25.715561
+Epoch [2931], train_loss: 0.060032, val_loss: 0.057776, val_acc: 25.556946
+Epoch [2932], train_loss: 0.060372, val_loss: 0.057765, val_acc: 25.572718
+Epoch [2933], train_loss: 0.060132, val_loss: 0.057726, val_acc: 25.655693
+Epoch [2934], train_loss: 0.060109, val_loss: 0.057751, val_acc: 25.568651
+Epoch [2935], train_loss: 0.060150, val_loss: 0.057789, val_acc: 25.477427
+Epoch [2936], train_loss: 0.060283, val_loss: 0.057750, val_acc: 25.578037
+Epoch [2937], train_loss: 0.059954, val_loss: 0.057719, val_acc: 25.576750
+Epoch [2938], train_loss: 0.060114, val_loss: 0.057727, val_acc: 25.557695
+Epoch [2939], train_loss: 0.060428, val_loss: 0.057840, val_acc: 25.506184
+Epoch [2940], train_loss: 0.060268, val_loss: 0.057763, val_acc: 25.573826
+Epoch [2941], train_loss: 0.060206, val_loss: 0.057825, val_acc: 25.544510
+Epoch [2942], train_loss: 0.060191, val_loss: 0.057776, val_acc: 25.536880
+Epoch [2943], train_loss: 0.060255, val_loss: 0.057768, val_acc: 25.546036
+Epoch [2944], train_loss: 0.060267, val_loss: 0.057749, val_acc: 25.558128
+Epoch [2945], train_loss: 0.060199, val_loss: 0.057734, val_acc: 25.650949
+Epoch [2946], train_loss: 0.060191, val_loss: 0.057704, val_acc: 25.637043
+Epoch [2947], train_loss: 0.060148, val_loss: 0.057758, val_acc: 25.568012
+Epoch [2948], train_loss: 0.060068, val_loss: 0.057819, val_acc: 25.472511
+Epoch [2949], train_loss: 0.060113, val_loss: 0.057820, val_acc: 25.487329
+Epoch [2950], train_loss: 0.060035, val_loss: 0.057704, val_acc: 25.602335
+Epoch [2951], train_loss: 0.060026, val_loss: 0.057775, val_acc: 25.497766
+Epoch [2952], train_loss: 0.060150, val_loss: 0.057724, val_acc: 25.545979
+Epoch [2953], train_loss: 0.059977, val_loss: 0.057825, val_acc: 25.411970
+Epoch [2954], train_loss: 0.059991, val_loss: 0.057763, val_acc: 25.491987
+Epoch [2955], train_loss: 0.060004, val_loss: 0.057724, val_acc: 25.568241
+Epoch [2956], train_loss: 0.060278, val_loss: 0.057816, val_acc: 25.500664
+Epoch [2957], train_loss: 0.060084, val_loss: 0.057751, val_acc: 25.474094
+Epoch [2958], train_loss: 0.060219, val_loss: 0.057755, val_acc: 25.565018
+Epoch [2959], train_loss: 0.060188, val_loss: 0.057819, val_acc: 25.538313
+Epoch [2960], train_loss: 0.060081, val_loss: 0.057788, val_acc: 25.533195
+Epoch [2961], train_loss: 0.060201, val_loss: 0.057828, val_acc: 25.533209
+Epoch [2962], train_loss: 0.060233, val_loss: 0.057690, val_acc: 25.624100
+Epoch [2963], train_loss: 0.059958, val_loss: 0.057761, val_acc: 25.521820
+Epoch [2964], train_loss: 0.059927, val_loss: 0.057810, val_acc: 25.476112
+Epoch [2965], train_loss: 0.060093, val_loss: 0.057779, val_acc: 25.567793
+Epoch [2966], train_loss: 0.060255, val_loss: 0.057755, val_acc: 25.540054
+Epoch [2967], train_loss: 0.060138, val_loss: 0.057773, val_acc: 25.568684
+Epoch [2968], train_loss: 0.060323, val_loss: 0.057756, val_acc: 25.557825
+Epoch [2969], train_loss: 0.060191, val_loss: 0.057721, val_acc: 25.612728
+Epoch [2970], train_loss: 0.059996, val_loss: 0.057799, val_acc: 25.534430
+Epoch [2971], train_loss: 0.060037, val_loss: 0.057760, val_acc: 25.558359
+Epoch [2972], train_loss: 0.059975, val_loss: 0.057831, val_acc: 25.509562
+Epoch [2973], train_loss: 0.060134, val_loss: 0.057792, val_acc: 25.457529
+Epoch [2974], train_loss: 0.060006, val_loss: 0.057838, val_acc: 25.488564
+Epoch [2975], train_loss: 0.060055, val_loss: 0.057747, val_acc: 25.558672
+Epoch [2976], train_loss: 0.060017, val_loss: 0.057687, val_acc: 25.632008
+Epoch [2977], train_loss: 0.060110, val_loss: 0.057728, val_acc: 25.575996
+Epoch [2978], train_loss: 0.060107, val_loss: 0.057904, val_acc: 25.490532
+Epoch [2979], train_loss: 0.060114, val_loss: 0.057793, val_acc: 25.480141
+Epoch [2980], train_loss: 0.060109, val_loss: 0.057810, val_acc: 25.508667
+Epoch [2981], train_loss: 0.060010, val_loss: 0.057786, val_acc: 25.525206
+Epoch [2982], train_loss: 0.059978, val_loss: 0.057707, val_acc: 25.584896
+Epoch [2983], train_loss: 0.059886, val_loss: 0.057806, val_acc: 25.506781
+Epoch [2984], train_loss: 0.059945, val_loss: 0.057740, val_acc: 25.612816
+Epoch [2985], train_loss: 0.060061, val_loss: 0.057777, val_acc: 25.584293
+Epoch [2986], train_loss: 0.060193, val_loss: 0.057792, val_acc: 25.504007
+Epoch [2987], train_loss: 0.059972, val_loss: 0.057818, val_acc: 25.513229
+Epoch [2988], train_loss: 0.060122, val_loss: 0.057759, val_acc: 25.609461
+Epoch [2989], train_loss: 0.060223, val_loss: 0.057762, val_acc: 25.587471
+Epoch [2990], train_loss: 0.059858, val_loss: 0.057769, val_acc: 25.579329
+Epoch [2991], train_loss: 0.060122, val_loss: 0.057763, val_acc: 25.530405
+Epoch [2992], train_loss: 0.060075, val_loss: 0.057702, val_acc: 25.617348
+Epoch [2993], train_loss: 0.060046, val_loss: 0.057835, val_acc: 25.521246
+Epoch [2994], train_loss: 0.060216, val_loss: 0.057800, val_acc: 25.523542
+Epoch [2995], train_loss: 0.059985, val_loss: 0.057749, val_acc: 25.609501
+Epoch [2996], train_loss: 0.059991, val_loss: 0.057775, val_acc: 25.551847
+Epoch [2997], train_loss: 0.060130, val_loss: 0.057806, val_acc: 25.526068
+Epoch [2998], train_loss: 0.059914, val_loss: 0.057760, val_acc: 25.600491
+Epoch [2999], train_loss: 0.060107, val_loss: 0.057829, val_acc: 25.508734
+Epoch [3000], train_loss: 0.060140, val_loss: 0.057862, val_acc: 25.409458
+Epoch [3001], train_loss: 0.059861, val_loss: 0.057834, val_acc: 25.486103
+Epoch [3002], train_loss: 0.059941, val_loss: 0.057817, val_acc: 25.503193
+Epoch [3003], train_loss: 0.060167, val_loss: 0.057830, val_acc: 25.508787
+Epoch [3004], train_loss: 0.060041, val_loss: 0.057775, val_acc: 25.530910
+Epoch [3005], train_loss: 0.060021, val_loss: 0.057751, val_acc: 25.552830
+Epoch [3006], train_loss: 0.060059, val_loss: 0.057779, val_acc: 25.568491
+Epoch [3007], train_loss: 0.059875, val_loss: 0.057917, val_acc: 25.427402
+Epoch [3008], train_loss: 0.060088, val_loss: 0.057823, val_acc: 25.488447
+Epoch [3009], train_loss: 0.059976, val_loss: 0.057760, val_acc: 25.527134
+Epoch [3010], train_loss: 0.059960, val_loss: 0.057796, val_acc: 25.570663
+Epoch [3011], train_loss: 0.059975, val_loss: 0.057750, val_acc: 25.567297
+Epoch [3012], train_loss: 0.060118, val_loss: 0.057837, val_acc: 25.487619
+Epoch [3013], train_loss: 0.060149, val_loss: 0.057773, val_acc: 25.564360
+Epoch [3014], train_loss: 0.059984, val_loss: 0.057831, val_acc: 25.435514
+Epoch [3015], train_loss: 0.060057, val_loss: 0.057721, val_acc: 25.599461
+Epoch [3016], train_loss: 0.060101, val_loss: 0.057839, val_acc: 25.475317
+Epoch [3017], train_loss: 0.059939, val_loss: 0.057815, val_acc: 25.421604
+Epoch [3018], train_loss: 0.059981, val_loss: 0.057779, val_acc: 25.549477
+Epoch [3019], train_loss: 0.059913, val_loss: 0.057856, val_acc: 25.485765
+Epoch [3020], train_loss: 0.059991, val_loss: 0.057780, val_acc: 25.502184
+Epoch [3021], train_loss: 0.059891, val_loss: 0.057812, val_acc: 25.529911
+Epoch [3022], train_loss: 0.060068, val_loss: 0.057812, val_acc: 25.512064
+Epoch [3023], train_loss: 0.060051, val_loss: 0.057882, val_acc: 25.413574
+Epoch [3024], train_loss: 0.060057, val_loss: 0.057816, val_acc: 25.465790
+Epoch [3025], train_loss: 0.060130, val_loss: 0.057855, val_acc: 25.434757
+Epoch [3026], train_loss: 0.060010, val_loss: 0.058044, val_acc: 25.337334
+Epoch [3027], train_loss: 0.060049, val_loss: 0.057796, val_acc: 25.521189
+Epoch [3028], train_loss: 0.060238, val_loss: 0.057801, val_acc: 25.558237
+Epoch [3029], train_loss: 0.059960, val_loss: 0.057816, val_acc: 25.472008
+Epoch [3030], train_loss: 0.060024, val_loss: 0.057751, val_acc: 25.539862
+Epoch [3031], train_loss: 0.060078, val_loss: 0.057840, val_acc: 25.435259
+Epoch [3032], train_loss: 0.059980, val_loss: 0.057783, val_acc: 25.545664
+Epoch [3033], train_loss: 0.059962, val_loss: 0.057858, val_acc: 25.502502
+Epoch [3034], train_loss: 0.059977, val_loss: 0.057797, val_acc: 25.529743
+Epoch [3035], train_loss: 0.060112, val_loss: 0.057794, val_acc: 25.540089
+Epoch [3036], train_loss: 0.059971, val_loss: 0.057824, val_acc: 25.508936
+Epoch [3037], train_loss: 0.060027, val_loss: 0.057777, val_acc: 25.503880
+Epoch [3038], train_loss: 0.060194, val_loss: 0.057872, val_acc: 25.476019
+Epoch [3039], train_loss: 0.060225, val_loss: 0.057856, val_acc: 25.514277
+Epoch [3040], train_loss: 0.059941, val_loss: 0.057800, val_acc: 25.479080
+Epoch [3041], train_loss: 0.059904, val_loss: 0.057898, val_acc: 25.398706
+Epoch [3042], train_loss: 0.059996, val_loss: 0.057875, val_acc: 25.466955
+Epoch [3043], train_loss: 0.060036, val_loss: 0.057808, val_acc: 25.510935
+Epoch [3044], train_loss: 0.060012, val_loss: 0.057814, val_acc: 25.536274
+Epoch [3045], train_loss: 0.060072, val_loss: 0.057804, val_acc: 25.541885
+Epoch [3046], train_loss: 0.060083, val_loss: 0.057732, val_acc: 25.562248
+Epoch [3047], train_loss: 0.059860, val_loss: 0.057851, val_acc: 25.478010
+Epoch [3048], train_loss: 0.060134, val_loss: 0.057793, val_acc: 25.512234
+Epoch [3049], train_loss: 0.060211, val_loss: 0.057880, val_acc: 25.460629
+Epoch [3050], train_loss: 0.060090, val_loss: 0.057842, val_acc: 25.512815
+Epoch [3051], train_loss: 0.060016, val_loss: 0.057814, val_acc: 25.479706
+Epoch [3052], train_loss: 0.059955, val_loss: 0.057791, val_acc: 25.502851
+Epoch [3053], train_loss: 0.059995, val_loss: 0.057861, val_acc: 25.444332
+Epoch [3054], train_loss: 0.059985, val_loss: 0.057861, val_acc: 25.420588
+Epoch [3055], train_loss: 0.060030, val_loss: 0.057876, val_acc: 25.420427
+Epoch [3056], train_loss: 0.060145, val_loss: 0.057780, val_acc: 25.568943
+Epoch [3057], train_loss: 0.060063, val_loss: 0.057835, val_acc: 25.462267
+Epoch [3058], train_loss: 0.060114, val_loss: 0.057774, val_acc: 25.507046
+Epoch [3059], train_loss: 0.059839, val_loss: 0.057765, val_acc: 25.524969
+Epoch [3060], train_loss: 0.059981, val_loss: 0.057789, val_acc: 25.454853
+Epoch [3061], train_loss: 0.060154, val_loss: 0.057774, val_acc: 25.494429
+Epoch [3062], train_loss: 0.060093, val_loss: 0.057848, val_acc: 25.507380
+Epoch [3063], train_loss: 0.060118, val_loss: 0.057891, val_acc: 25.457920
+Epoch [3064], train_loss: 0.059916, val_loss: 0.057869, val_acc: 25.442511
+Epoch [3065], train_loss: 0.060020, val_loss: 0.057837, val_acc: 25.480864
+Epoch [3066], train_loss: 0.060103, val_loss: 0.057895, val_acc: 25.490761
+Epoch [3067], train_loss: 0.059998, val_loss: 0.057783, val_acc: 25.541126
+Epoch [3068], train_loss: 0.060033, val_loss: 0.057834, val_acc: 25.482012
+Epoch [3069], train_loss: 0.059988, val_loss: 0.057750, val_acc: 25.550224
+Epoch [3070], train_loss: 0.060158, val_loss: 0.057725, val_acc: 25.600817
+Epoch [3071], train_loss: 0.059910, val_loss: 0.057843, val_acc: 25.456964
+Epoch [3072], train_loss: 0.059888, val_loss: 0.057843, val_acc: 25.522339
+Epoch [3073], train_loss: 0.060081, val_loss: 0.057804, val_acc: 25.466099
+Epoch [3074], train_loss: 0.060075, val_loss: 0.057843, val_acc: 25.458511
+Epoch [3075], train_loss: 0.059916, val_loss: 0.057896, val_acc: 25.362522
+Epoch [3076], train_loss: 0.059956, val_loss: 0.057812, val_acc: 25.485479
+Epoch [3077], train_loss: 0.060048, val_loss: 0.057848, val_acc: 25.435125
+Epoch [3078], train_loss: 0.059919, val_loss: 0.057792, val_acc: 25.507423
+Epoch [3079], train_loss: 0.059973, val_loss: 0.057784, val_acc: 25.523472
+Epoch [3080], train_loss: 0.059812, val_loss: 0.057888, val_acc: 25.397633
+Epoch [3081], train_loss: 0.060048, val_loss: 0.057841, val_acc: 25.474087
+Epoch [3082], train_loss: 0.060081, val_loss: 0.057881, val_acc: 25.383043
+Epoch [3083], train_loss: 0.060092, val_loss: 0.057764, val_acc: 25.525589
+Epoch [3084], train_loss: 0.059904, val_loss: 0.057870, val_acc: 25.437788
+Epoch [3085], train_loss: 0.060082, val_loss: 0.057788, val_acc: 25.517805
+Epoch [3086], train_loss: 0.060070, val_loss: 0.057845, val_acc: 25.465437
+Epoch [3087], train_loss: 0.059950, val_loss: 0.057820, val_acc: 25.432447
+Epoch [3088], train_loss: 0.060122, val_loss: 0.057743, val_acc: 25.506241
+Epoch [3089], train_loss: 0.059843, val_loss: 0.057801, val_acc: 25.496601
+Epoch [3090], train_loss: 0.059966, val_loss: 0.057750, val_acc: 25.530165
+Epoch [3091], train_loss: 0.059956, val_loss: 0.057801, val_acc: 25.464340
+Epoch [3092], train_loss: 0.060206, val_loss: 0.057840, val_acc: 25.515144
+Epoch [3093], train_loss: 0.059966, val_loss: 0.057783, val_acc: 25.516546
+Epoch [3094], train_loss: 0.060058, val_loss: 0.057784, val_acc: 25.513336
+Epoch [3095], train_loss: 0.059884, val_loss: 0.057935, val_acc: 25.371824
+Epoch [3096], train_loss: 0.059969, val_loss: 0.057869, val_acc: 25.411146
+Epoch [3097], train_loss: 0.059974, val_loss: 0.057910, val_acc: 25.385233
+Epoch [3098], train_loss: 0.060086, val_loss: 0.057919, val_acc: 25.400745
+Epoch [3099], train_loss: 0.060017, val_loss: 0.057934, val_acc: 25.383020
+Epoch [3100], train_loss: 0.060002, val_loss: 0.057844, val_acc: 25.420845
+Epoch [3101], train_loss: 0.060096, val_loss: 0.057743, val_acc: 25.560167
+Epoch [3102], train_loss: 0.059999, val_loss: 0.057831, val_acc: 25.517515
+Epoch [3103], train_loss: 0.060168, val_loss: 0.057772, val_acc: 25.555914
+Epoch [3104], train_loss: 0.059968, val_loss: 0.057834, val_acc: 25.452074
+Epoch [3105], train_loss: 0.059903, val_loss: 0.057928, val_acc: 25.374765
+Epoch [3106], train_loss: 0.059890, val_loss: 0.057903, val_acc: 25.366318
+Epoch [3107], train_loss: 0.059959, val_loss: 0.057822, val_acc: 25.487383
+Epoch [3108], train_loss: 0.060043, val_loss: 0.057884, val_acc: 25.420738
+Epoch [3109], train_loss: 0.059896, val_loss: 0.057835, val_acc: 25.354752
+Epoch [3110], train_loss: 0.060058, val_loss: 0.057804, val_acc: 25.489714
+Epoch [3111], train_loss: 0.059941, val_loss: 0.057956, val_acc: 25.378435
+Epoch [3112], train_loss: 0.060052, val_loss: 0.057820, val_acc: 25.421087
+Epoch [3113], train_loss: 0.060054, val_loss: 0.057736, val_acc: 25.577980
+Epoch [3114], train_loss: 0.059955, val_loss: 0.057754, val_acc: 25.504089
+Epoch [3115], train_loss: 0.060034, val_loss: 0.057815, val_acc: 25.523849
+Epoch [3116], train_loss: 0.059987, val_loss: 0.057762, val_acc: 25.532162
+Epoch [3117], train_loss: 0.060058, val_loss: 0.057809, val_acc: 25.465183
+Epoch [3118], train_loss: 0.059901, val_loss: 0.057852, val_acc: 25.407286
+Epoch [3119], train_loss: 0.059841, val_loss: 0.057783, val_acc: 25.496799
+Epoch [3120], train_loss: 0.059751, val_loss: 0.057902, val_acc: 25.378532
+Epoch [3121], train_loss: 0.060278, val_loss: 0.057819, val_acc: 25.441908
+Epoch [3122], train_loss: 0.059899, val_loss: 0.057706, val_acc: 25.592848
+Epoch [3123], train_loss: 0.060031, val_loss: 0.057738, val_acc: 25.588936
+Epoch [3124], train_loss: 0.059908, val_loss: 0.057770, val_acc: 25.461809
+Epoch [3125], train_loss: 0.059912, val_loss: 0.057883, val_acc: 25.444487
+Epoch [3126], train_loss: 0.059994, val_loss: 0.057880, val_acc: 25.433586
+Epoch [3127], train_loss: 0.060035, val_loss: 0.057907, val_acc: 25.374498
+Epoch [3128], train_loss: 0.059920, val_loss: 0.057815, val_acc: 25.494612
+Epoch [3129], train_loss: 0.059876, val_loss: 0.057753, val_acc: 25.512926
+Epoch [3130], train_loss: 0.059942, val_loss: 0.057776, val_acc: 25.505920
+Epoch [3131], train_loss: 0.060031, val_loss: 0.057856, val_acc: 25.436893
+Epoch [3132], train_loss: 0.060059, val_loss: 0.057867, val_acc: 25.414448
+Epoch [3133], train_loss: 0.059970, val_loss: 0.057901, val_acc: 25.443853
+Epoch [3134], train_loss: 0.059763, val_loss: 0.057832, val_acc: 25.477526
+Epoch [3135], train_loss: 0.059941, val_loss: 0.057913, val_acc: 25.376961
+Epoch [3136], train_loss: 0.059876, val_loss: 0.057828, val_acc: 25.459547
+Epoch [3137], train_loss: 0.059949, val_loss: 0.057747, val_acc: 25.550524
+Epoch [3138], train_loss: 0.059937, val_loss: 0.057869, val_acc: 25.462845
+Epoch [3139], train_loss: 0.059993, val_loss: 0.057867, val_acc: 25.382502
+Epoch [3140], train_loss: 0.059962, val_loss: 0.057828, val_acc: 25.463211
+Epoch [3141], train_loss: 0.060004, val_loss: 0.057959, val_acc: 25.355478
+Epoch [3142], train_loss: 0.059885, val_loss: 0.057838, val_acc: 25.414875
+Epoch [3143], train_loss: 0.059725, val_loss: 0.057924, val_acc: 25.350952
+Epoch [3144], train_loss: 0.059839, val_loss: 0.057882, val_acc: 25.396612
+Epoch [3145], train_loss: 0.059873, val_loss: 0.057982, val_acc: 25.288340
+Epoch [3146], train_loss: 0.060083, val_loss: 0.057822, val_acc: 25.451006
+Epoch [3147], train_loss: 0.059973, val_loss: 0.057888, val_acc: 25.395414
+Epoch [3148], train_loss: 0.059914, val_loss: 0.057813, val_acc: 25.485052
+Epoch [3149], train_loss: 0.059901, val_loss: 0.057909, val_acc: 25.398842
+Epoch [3150], train_loss: 0.060081, val_loss: 0.057936, val_acc: 25.366882
+Epoch [3151], train_loss: 0.059957, val_loss: 0.057808, val_acc: 25.509293
+Epoch [3152], train_loss: 0.059936, val_loss: 0.057854, val_acc: 25.383959
+Epoch [3153], train_loss: 0.059953, val_loss: 0.057831, val_acc: 25.469208
+Epoch [3154], train_loss: 0.059922, val_loss: 0.057878, val_acc: 25.414825
+Epoch [3155], train_loss: 0.059649, val_loss: 0.057759, val_acc: 25.511908
+Epoch [3156], train_loss: 0.059911, val_loss: 0.057922, val_acc: 25.382870
+Epoch [3157], train_loss: 0.059846, val_loss: 0.057871, val_acc: 25.434013
+Epoch [3158], train_loss: 0.060047, val_loss: 0.057900, val_acc: 25.426580
+Epoch [3159], train_loss: 0.059992, val_loss: 0.057801, val_acc: 25.544878
+Epoch [3160], train_loss: 0.059866, val_loss: 0.057926, val_acc: 25.409998
+Epoch [3161], train_loss: 0.059845, val_loss: 0.057892, val_acc: 25.409782
+Epoch [3162], train_loss: 0.059961, val_loss: 0.057894, val_acc: 25.392265
+Epoch [3163], train_loss: 0.060074, val_loss: 0.057961, val_acc: 25.336626
+Epoch [3164], train_loss: 0.060140, val_loss: 0.057959, val_acc: 25.377659
+Epoch [3165], train_loss: 0.059742, val_loss: 0.057892, val_acc: 25.414972
+Epoch [3166], train_loss: 0.059851, val_loss: 0.057845, val_acc: 25.441624
+Epoch [3167], train_loss: 0.059927, val_loss: 0.057874, val_acc: 25.419060
+Epoch [3168], train_loss: 0.059965, val_loss: 0.057900, val_acc: 25.410477
+Epoch [3169], train_loss: 0.060062, val_loss: 0.057896, val_acc: 25.399429
+Epoch [3170], train_loss: 0.060005, val_loss: 0.057921, val_acc: 25.350849
+Epoch [3171], train_loss: 0.060044, val_loss: 0.057936, val_acc: 25.360649
+Epoch [3172], train_loss: 0.060016, val_loss: 0.057851, val_acc: 25.464233
+Epoch [3173], train_loss: 0.059902, val_loss: 0.057933, val_acc: 25.336948
+Epoch [3174], train_loss: 0.059763, val_loss: 0.057869, val_acc: 25.374207
+Epoch [3175], train_loss: 0.060109, val_loss: 0.057876, val_acc: 25.375742
+Epoch [3176], train_loss: 0.059966, val_loss: 0.057795, val_acc: 25.567398
+Epoch [3177], train_loss: 0.059879, val_loss: 0.057813, val_acc: 25.523163
+Epoch [3178], train_loss: 0.060050, val_loss: 0.057868, val_acc: 25.412786
+Epoch [3179], train_loss: 0.059960, val_loss: 0.057847, val_acc: 25.451145
+Epoch [3180], train_loss: 0.059884, val_loss: 0.057863, val_acc: 25.410690
+Epoch [3181], train_loss: 0.059909, val_loss: 0.057915, val_acc: 25.412992
+Epoch [3182], train_loss: 0.059854, val_loss: 0.057844, val_acc: 25.477951
+Epoch [3183], train_loss: 0.059869, val_loss: 0.057936, val_acc: 25.379215
+Epoch [3184], train_loss: 0.059871, val_loss: 0.057845, val_acc: 25.437195
+Epoch [3185], train_loss: 0.059871, val_loss: 0.057852, val_acc: 25.414322
+Epoch [3186], train_loss: 0.059952, val_loss: 0.057849, val_acc: 25.452074
+Epoch [3187], train_loss: 0.059912, val_loss: 0.057825, val_acc: 25.428242
+Epoch [3188], train_loss: 0.059842, val_loss: 0.057874, val_acc: 25.458845
+Epoch [3189], train_loss: 0.059816, val_loss: 0.057835, val_acc: 25.469751
+Epoch [3190], train_loss: 0.060093, val_loss: 0.057860, val_acc: 25.508236
+Epoch [3191], train_loss: 0.059962, val_loss: 0.057976, val_acc: 25.355137
+Epoch [3192], train_loss: 0.059796, val_loss: 0.057889, val_acc: 25.374134
+Epoch [3193], train_loss: 0.059942, val_loss: 0.057855, val_acc: 25.378334
+Epoch [3194], train_loss: 0.059889, val_loss: 0.057881, val_acc: 25.371801
+Epoch [3195], train_loss: 0.059838, val_loss: 0.057789, val_acc: 25.479742
+Epoch [3196], train_loss: 0.059967, val_loss: 0.057788, val_acc: 25.458649
+Epoch [3197], train_loss: 0.060089, val_loss: 0.057821, val_acc: 25.449572
+Epoch [3198], train_loss: 0.059915, val_loss: 0.057795, val_acc: 25.470655
+Epoch [3199], train_loss: 0.060045, val_loss: 0.057861, val_acc: 25.419077
+Epoch [3200], train_loss: 0.059732, val_loss: 0.057982, val_acc: 25.304361
+Epoch [3201], train_loss: 0.059940, val_loss: 0.057732, val_acc: 25.545895
+Epoch [3202], train_loss: 0.059621, val_loss: 0.057766, val_acc: 25.480242
+Epoch [3203], train_loss: 0.059771, val_loss: 0.057810, val_acc: 25.463018
+Epoch [3204], train_loss: 0.059922, val_loss: 0.057816, val_acc: 25.462278
+Epoch [3205], train_loss: 0.059883, val_loss: 0.057900, val_acc: 25.397636
+Epoch [3206], train_loss: 0.059937, val_loss: 0.057828, val_acc: 25.388428
+Epoch [3207], train_loss: 0.059958, val_loss: 0.057864, val_acc: 25.450548
+Epoch [3208], train_loss: 0.059925, val_loss: 0.057990, val_acc: 25.277227
+Epoch [3209], train_loss: 0.060072, val_loss: 0.057894, val_acc: 25.431931
+Epoch [3210], train_loss: 0.059934, val_loss: 0.057778, val_acc: 25.475666
+Epoch [3211], train_loss: 0.059945, val_loss: 0.057871, val_acc: 25.361080
+Epoch [3212], train_loss: 0.059981, val_loss: 0.057900, val_acc: 25.336426
+Epoch [3213], train_loss: 0.059945, val_loss: 0.057808, val_acc: 25.524698
+Epoch [3214], train_loss: 0.060060, val_loss: 0.057830, val_acc: 25.451567
+Epoch [3215], train_loss: 0.059898, val_loss: 0.057848, val_acc: 25.416435
+Epoch [3216], train_loss: 0.060021, val_loss: 0.057876, val_acc: 25.466232
+Epoch [3217], train_loss: 0.059954, val_loss: 0.057844, val_acc: 25.450800
+Epoch [3218], train_loss: 0.059998, val_loss: 0.057894, val_acc: 25.433041
+Epoch [3219], train_loss: 0.059969, val_loss: 0.057946, val_acc: 25.323757
+Epoch [3220], train_loss: 0.059969, val_loss: 0.057859, val_acc: 25.426092
+Epoch [3221], train_loss: 0.059807, val_loss: 0.057964, val_acc: 25.317966
+Epoch [3222], train_loss: 0.059918, val_loss: 0.057836, val_acc: 25.429691
+Epoch [3223], train_loss: 0.059989, val_loss: 0.057767, val_acc: 25.579859
+Epoch [3224], train_loss: 0.059928, val_loss: 0.057887, val_acc: 25.362280
+Epoch [3225], train_loss: 0.059964, val_loss: 0.057819, val_acc: 25.403379
+Epoch [3226], train_loss: 0.059858, val_loss: 0.057848, val_acc: 25.483978
+Epoch [3227], train_loss: 0.060038, val_loss: 0.057821, val_acc: 25.469852
+Epoch [3228], train_loss: 0.059862, val_loss: 0.057877, val_acc: 25.414963
+Epoch [3229], train_loss: 0.060079, val_loss: 0.057912, val_acc: 25.358921
+Epoch [3230], train_loss: 0.059834, val_loss: 0.057818, val_acc: 25.483170
+Epoch [3231], train_loss: 0.059933, val_loss: 0.057829, val_acc: 25.413507
+Epoch [3232], train_loss: 0.059954, val_loss: 0.057867, val_acc: 25.421503
+Epoch [3233], train_loss: 0.059992, val_loss: 0.057790, val_acc: 25.500736
+Epoch [3234], train_loss: 0.060125, val_loss: 0.057904, val_acc: 25.363728
+Epoch [3235], train_loss: 0.059951, val_loss: 0.057940, val_acc: 25.429462
+Epoch [3236], train_loss: 0.059936, val_loss: 0.057896, val_acc: 25.436777
+Epoch [3237], train_loss: 0.059961, val_loss: 0.057962, val_acc: 25.414120
+Epoch [3238], train_loss: 0.060084, val_loss: 0.057836, val_acc: 25.434248
+Epoch [3239], train_loss: 0.060009, val_loss: 0.058044, val_acc: 25.232817
+Epoch [3240], train_loss: 0.059946, val_loss: 0.057855, val_acc: 25.415073
+Epoch [3241], train_loss: 0.059850, val_loss: 0.057860, val_acc: 25.395411
+Epoch [3242], train_loss: 0.059847, val_loss: 0.057875, val_acc: 25.369917
+Epoch [3243], train_loss: 0.059697, val_loss: 0.057845, val_acc: 25.424728
+Epoch [3244], train_loss: 0.059928, val_loss: 0.057866, val_acc: 25.400040
+Epoch [3245], train_loss: 0.059994, val_loss: 0.057882, val_acc: 25.423754
+Epoch [3246], train_loss: 0.059874, val_loss: 0.057943, val_acc: 25.375475
+Epoch [3247], train_loss: 0.059691, val_loss: 0.057875, val_acc: 25.378130
+Epoch [3248], train_loss: 0.059765, val_loss: 0.057852, val_acc: 25.434916
+Epoch [3249], train_loss: 0.059750, val_loss: 0.058012, val_acc: 25.256296
+Epoch [3250], train_loss: 0.059840, val_loss: 0.057907, val_acc: 25.398951
+Epoch [3251], train_loss: 0.059858, val_loss: 0.057950, val_acc: 25.241371
+Epoch [3252], train_loss: 0.059929, val_loss: 0.057993, val_acc: 25.284845
+Epoch [3253], train_loss: 0.059878, val_loss: 0.057930, val_acc: 25.350071
+Epoch [3254], train_loss: 0.059817, val_loss: 0.057869, val_acc: 25.432087
+Epoch [3255], train_loss: 0.059999, val_loss: 0.057870, val_acc: 25.351679
+Epoch [3256], train_loss: 0.059964, val_loss: 0.057877, val_acc: 25.365362
+Epoch [3257], train_loss: 0.059818, val_loss: 0.058009, val_acc: 25.295017
+Epoch [3258], train_loss: 0.059861, val_loss: 0.057932, val_acc: 25.347090
+Epoch [3259], train_loss: 0.059910, val_loss: 0.057855, val_acc: 25.412516
+Epoch [3260], train_loss: 0.059908, val_loss: 0.057847, val_acc: 25.432999
+Epoch [3261], train_loss: 0.059865, val_loss: 0.058012, val_acc: 25.277166
+Epoch [3262], train_loss: 0.059914, val_loss: 0.057951, val_acc: 25.276688
+Epoch [3263], train_loss: 0.059907, val_loss: 0.057832, val_acc: 25.435242
+Epoch [3264], train_loss: 0.059840, val_loss: 0.057897, val_acc: 25.380756
+Epoch [3265], train_loss: 0.059747, val_loss: 0.057935, val_acc: 25.355034
+Epoch [3266], train_loss: 0.059711, val_loss: 0.057898, val_acc: 25.346483
+Epoch [3267], train_loss: 0.059737, val_loss: 0.057891, val_acc: 25.364725
+Epoch [3268], train_loss: 0.060035, val_loss: 0.057924, val_acc: 25.358002
+Epoch [3269], train_loss: 0.059737, val_loss: 0.057950, val_acc: 25.389235
+Epoch [3270], train_loss: 0.059818, val_loss: 0.057892, val_acc: 25.415764
+Epoch [3271], train_loss: 0.059984, val_loss: 0.057849, val_acc: 25.406767
+Epoch [3272], train_loss: 0.059785, val_loss: 0.057937, val_acc: 25.274298
+Epoch [3273], train_loss: 0.059815, val_loss: 0.057892, val_acc: 25.328617
+Epoch [3274], train_loss: 0.059796, val_loss: 0.057878, val_acc: 25.401550
+Epoch [3275], train_loss: 0.059868, val_loss: 0.057938, val_acc: 25.376654
+Epoch [3276], train_loss: 0.059819, val_loss: 0.058012, val_acc: 25.238907
+Epoch [3277], train_loss: 0.059888, val_loss: 0.058062, val_acc: 25.232550
+Epoch [3278], train_loss: 0.059898, val_loss: 0.058020, val_acc: 25.260914
+Epoch [3279], train_loss: 0.059895, val_loss: 0.057992, val_acc: 25.295200
+Epoch [3280], train_loss: 0.059752, val_loss: 0.057859, val_acc: 25.438889
+Epoch [3281], train_loss: 0.059901, val_loss: 0.057962, val_acc: 25.337982
+Epoch [3282], train_loss: 0.059868, val_loss: 0.058023, val_acc: 25.308313
+Epoch [3283], train_loss: 0.059815, val_loss: 0.057983, val_acc: 25.361465
+Epoch [3284], train_loss: 0.059757, val_loss: 0.057914, val_acc: 25.343285
+Epoch [3285], train_loss: 0.059644, val_loss: 0.057840, val_acc: 25.426247
+Epoch [3286], train_loss: 0.059885, val_loss: 0.057863, val_acc: 25.422434
+Epoch [3287], train_loss: 0.059719, val_loss: 0.057969, val_acc: 25.290216
+Epoch [3288], train_loss: 0.059807, val_loss: 0.057920, val_acc: 25.414089
+Epoch [3289], train_loss: 0.059918, val_loss: 0.057942, val_acc: 25.320684
+Epoch [3290], train_loss: 0.059762, val_loss: 0.057929, val_acc: 25.356033
+Epoch [3291], train_loss: 0.059929, val_loss: 0.057857, val_acc: 25.435883
+Epoch [3292], train_loss: 0.059808, val_loss: 0.057883, val_acc: 25.390329
+Epoch [3293], train_loss: 0.059953, val_loss: 0.057856, val_acc: 25.454300
+Epoch [3294], train_loss: 0.059843, val_loss: 0.057949, val_acc: 25.379307
+Epoch [3295], train_loss: 0.059852, val_loss: 0.058005, val_acc: 25.318043
+Epoch [3296], train_loss: 0.059679, val_loss: 0.057872, val_acc: 25.379265
+Epoch [3297], train_loss: 0.059839, val_loss: 0.057953, val_acc: 25.327610
+Epoch [3298], train_loss: 0.059817, val_loss: 0.057943, val_acc: 25.359009
+Epoch [3299], train_loss: 0.059764, val_loss: 0.057941, val_acc: 25.321011
+Epoch [3300], train_loss: 0.059967, val_loss: 0.057942, val_acc: 25.346066
+Epoch [3301], train_loss: 0.059722, val_loss: 0.057819, val_acc: 25.464647
+Epoch [3302], train_loss: 0.059860, val_loss: 0.057976, val_acc: 25.326130
+Epoch [3303], train_loss: 0.059797, val_loss: 0.057952, val_acc: 25.350843
+Epoch [3304], train_loss: 0.059781, val_loss: 0.057880, val_acc: 25.365396
+Epoch [3305], train_loss: 0.060030, val_loss: 0.058079, val_acc: 25.260105
+Epoch [3306], train_loss: 0.059808, val_loss: 0.057958, val_acc: 25.387232
+Epoch [3307], train_loss: 0.059794, val_loss: 0.057840, val_acc: 25.444313
+Epoch [3308], train_loss: 0.059712, val_loss: 0.057862, val_acc: 25.381113
+Epoch [3309], train_loss: 0.060017, val_loss: 0.057868, val_acc: 25.417320
+Epoch [3310], train_loss: 0.059920, val_loss: 0.057972, val_acc: 25.289913
+Epoch [3311], train_loss: 0.059875, val_loss: 0.058053, val_acc: 25.257080
+Epoch [3312], train_loss: 0.059872, val_loss: 0.057891, val_acc: 25.399555
+Epoch [3313], train_loss: 0.059793, val_loss: 0.057961, val_acc: 25.378729
+Epoch [3314], train_loss: 0.059696, val_loss: 0.057927, val_acc: 25.345779
+Epoch [3315], train_loss: 0.059881, val_loss: 0.057855, val_acc: 25.394127
+Epoch [3316], train_loss: 0.059856, val_loss: 0.057928, val_acc: 25.340288
+Epoch [3317], train_loss: 0.060153, val_loss: 0.057941, val_acc: 25.346079
+Epoch [3318], train_loss: 0.059780, val_loss: 0.057923, val_acc: 25.337442
+Epoch [3319], train_loss: 0.059862, val_loss: 0.058007, val_acc: 25.266800
+Epoch [3320], train_loss: 0.059984, val_loss: 0.057917, val_acc: 25.338833
+Epoch [3321], train_loss: 0.059854, val_loss: 0.057920, val_acc: 25.324747
+Epoch [3322], train_loss: 0.059728, val_loss: 0.057784, val_acc: 25.505503
+Epoch [3323], train_loss: 0.059862, val_loss: 0.057918, val_acc: 25.359373
+Epoch [3324], train_loss: 0.059736, val_loss: 0.057943, val_acc: 25.346603
+Epoch [3325], train_loss: 0.059771, val_loss: 0.057971, val_acc: 25.300577
+Epoch [3326], train_loss: 0.059880, val_loss: 0.057908, val_acc: 25.395420
+Epoch [3327], train_loss: 0.059993, val_loss: 0.058010, val_acc: 25.316156
+Epoch [3328], train_loss: 0.059962, val_loss: 0.058022, val_acc: 25.327698
+Epoch [3329], train_loss: 0.059814, val_loss: 0.057924, val_acc: 25.363888
+Epoch [3330], train_loss: 0.059827, val_loss: 0.057926, val_acc: 25.351067
+Epoch [3331], train_loss: 0.059863, val_loss: 0.057971, val_acc: 25.368181
+Epoch [3332], train_loss: 0.059881, val_loss: 0.058068, val_acc: 25.214838
+Epoch [3333], train_loss: 0.059786, val_loss: 0.057926, val_acc: 25.401386
+Epoch [3334], train_loss: 0.059724, val_loss: 0.057921, val_acc: 25.319954
+Epoch [3335], train_loss: 0.059719, val_loss: 0.057871, val_acc: 25.372887
+Epoch [3336], train_loss: 0.059754, val_loss: 0.057909, val_acc: 25.335445
+Epoch [3337], train_loss: 0.059688, val_loss: 0.057978, val_acc: 25.286921
+Epoch [3338], train_loss: 0.059925, val_loss: 0.057906, val_acc: 25.331079
+Epoch [3339], train_loss: 0.059910, val_loss: 0.057953, val_acc: 25.292002
+Epoch [3340], train_loss: 0.059735, val_loss: 0.057844, val_acc: 25.388727
+Epoch [3341], train_loss: 0.059846, val_loss: 0.057876, val_acc: 25.373154
+Epoch [3342], train_loss: 0.059762, val_loss: 0.057877, val_acc: 25.374903
+Epoch [3343], train_loss: 0.059745, val_loss: 0.057988, val_acc: 25.345352
+Epoch [3344], train_loss: 0.059795, val_loss: 0.057999, val_acc: 25.295673
+Epoch [3345], train_loss: 0.059764, val_loss: 0.057904, val_acc: 25.374409
+Epoch [3346], train_loss: 0.059688, val_loss: 0.057853, val_acc: 25.411186
+Epoch [3347], train_loss: 0.059829, val_loss: 0.057817, val_acc: 25.419153
+Epoch [3348], train_loss: 0.059674, val_loss: 0.058013, val_acc: 25.259453
+Epoch [3349], train_loss: 0.059889, val_loss: 0.057899, val_acc: 25.382898
+Epoch [3350], train_loss: 0.059852, val_loss: 0.057979, val_acc: 25.267097
+Epoch [3351], train_loss: 0.059904, val_loss: 0.057918, val_acc: 25.307661
+Epoch [3352], train_loss: 0.059567, val_loss: 0.057929, val_acc: 25.385044
+Epoch [3353], train_loss: 0.059819, val_loss: 0.057879, val_acc: 25.363066
+Epoch [3354], train_loss: 0.059879, val_loss: 0.057881, val_acc: 25.373154
+Epoch [3355], train_loss: 0.059853, val_loss: 0.057958, val_acc: 25.311485
+Epoch [3356], train_loss: 0.059918, val_loss: 0.057974, val_acc: 25.327990
+Epoch [3357], train_loss: 0.059853, val_loss: 0.058094, val_acc: 25.176746
+Epoch [3358], train_loss: 0.059739, val_loss: 0.058010, val_acc: 25.197174
+Epoch [3359], train_loss: 0.059981, val_loss: 0.058009, val_acc: 25.320133
+Epoch [3360], train_loss: 0.059920, val_loss: 0.057993, val_acc: 25.287086
+Epoch [3361], train_loss: 0.059663, val_loss: 0.057872, val_acc: 25.390436
+Epoch [3362], train_loss: 0.060004, val_loss: 0.057958, val_acc: 25.350996
+Epoch [3363], train_loss: 0.059834, val_loss: 0.057932, val_acc: 25.348049
+Epoch [3364], train_loss: 0.059717, val_loss: 0.057945, val_acc: 25.315601
+Epoch [3365], train_loss: 0.059719, val_loss: 0.058002, val_acc: 25.191538
+Epoch [3366], train_loss: 0.059858, val_loss: 0.057903, val_acc: 25.361549
+Epoch [3367], train_loss: 0.059705, val_loss: 0.057947, val_acc: 25.288612
+Epoch [3368], train_loss: 0.059762, val_loss: 0.057999, val_acc: 25.262276
+Epoch [3369], train_loss: 0.059753, val_loss: 0.057915, val_acc: 25.403482
+Epoch [3370], train_loss: 0.059599, val_loss: 0.057876, val_acc: 25.393766
+Epoch [3371], train_loss: 0.059862, val_loss: 0.057923, val_acc: 25.348768
+Epoch [3372], train_loss: 0.059699, val_loss: 0.058009, val_acc: 25.205776
+Epoch [3373], train_loss: 0.059627, val_loss: 0.057920, val_acc: 25.375353
+Epoch [3374], train_loss: 0.059837, val_loss: 0.057938, val_acc: 25.370140
+Epoch [3375], train_loss: 0.059884, val_loss: 0.058029, val_acc: 25.276482
+Epoch [3376], train_loss: 0.059753, val_loss: 0.057916, val_acc: 25.394722
+Epoch [3377], train_loss: 0.059722, val_loss: 0.057925, val_acc: 25.308750
+Epoch [3378], train_loss: 0.059963, val_loss: 0.057939, val_acc: 25.303606
+Epoch [3379], train_loss: 0.059796, val_loss: 0.058023, val_acc: 25.298853
+Epoch [3380], train_loss: 0.059600, val_loss: 0.058011, val_acc: 25.306477
+Epoch [3381], train_loss: 0.059780, val_loss: 0.057921, val_acc: 25.344145
+Epoch [3382], train_loss: 0.059837, val_loss: 0.057945, val_acc: 25.319313
+Epoch [3383], train_loss: 0.060159, val_loss: 0.057905, val_acc: 25.340103
+Epoch [3384], train_loss: 0.059808, val_loss: 0.057968, val_acc: 25.284449
+Epoch [3385], train_loss: 0.059787, val_loss: 0.057824, val_acc: 25.473854
+Epoch [3386], train_loss: 0.059814, val_loss: 0.057999, val_acc: 25.245762
+Epoch [3387], train_loss: 0.059794, val_loss: 0.058009, val_acc: 25.247416
+Epoch [3388], train_loss: 0.059827, val_loss: 0.057910, val_acc: 25.314659
+Epoch [3389], train_loss: 0.059691, val_loss: 0.057865, val_acc: 25.378130
+Epoch [3390], train_loss: 0.059767, val_loss: 0.057951, val_acc: 25.324318
+Epoch [3391], train_loss: 0.059766, val_loss: 0.057917, val_acc: 25.389534
+Epoch [3392], train_loss: 0.059907, val_loss: 0.057858, val_acc: 25.453712
+Epoch [3393], train_loss: 0.059917, val_loss: 0.057896, val_acc: 25.420799
+Epoch [3394], train_loss: 0.059643, val_loss: 0.058002, val_acc: 25.240686
+Epoch [3395], train_loss: 0.059518, val_loss: 0.058040, val_acc: 25.238997
+Epoch [3396], train_loss: 0.059718, val_loss: 0.058015, val_acc: 25.235016
+Epoch [3397], train_loss: 0.059718, val_loss: 0.057965, val_acc: 25.311142
+Epoch [3398], train_loss: 0.059866, val_loss: 0.057924, val_acc: 25.396959
+Epoch [3399], train_loss: 0.059821, val_loss: 0.057891, val_acc: 25.364567
+Epoch [3400], train_loss: 0.059696, val_loss: 0.057914, val_acc: 25.391739
+Epoch [3401], train_loss: 0.059809, val_loss: 0.057947, val_acc: 25.273323
+Epoch [3402], train_loss: 0.059858, val_loss: 0.058009, val_acc: 25.218412
+Epoch [3403], train_loss: 0.059567, val_loss: 0.058093, val_acc: 25.189331
+Epoch [3404], train_loss: 0.059797, val_loss: 0.058018, val_acc: 25.245874
+Epoch [3405], train_loss: 0.059973, val_loss: 0.057946, val_acc: 25.364630
+Epoch [3406], train_loss: 0.059657, val_loss: 0.058018, val_acc: 25.231136
+Epoch [3407], train_loss: 0.059721, val_loss: 0.057926, val_acc: 25.323502
+Epoch [3408], train_loss: 0.059756, val_loss: 0.057928, val_acc: 25.345818
+Epoch [3409], train_loss: 0.059953, val_loss: 0.057979, val_acc: 25.328627
+Epoch [3410], train_loss: 0.059825, val_loss: 0.057968, val_acc: 25.341415
+Epoch [3411], train_loss: 0.059828, val_loss: 0.057910, val_acc: 25.367384
+Epoch [3412], train_loss: 0.059873, val_loss: 0.058074, val_acc: 25.207670
+Epoch [3413], train_loss: 0.059785, val_loss: 0.057952, val_acc: 25.269194
+Epoch [3414], train_loss: 0.059822, val_loss: 0.058043, val_acc: 25.294096
+Epoch [3415], train_loss: 0.059813, val_loss: 0.057875, val_acc: 25.389311
+Epoch [3416], train_loss: 0.059657, val_loss: 0.057920, val_acc: 25.282415
+Epoch [3417], train_loss: 0.059717, val_loss: 0.057967, val_acc: 25.264057
+Epoch [3418], train_loss: 0.059875, val_loss: 0.058001, val_acc: 25.313370
+Epoch [3419], train_loss: 0.060008, val_loss: 0.057840, val_acc: 25.441730
+Epoch [3420], train_loss: 0.059695, val_loss: 0.057989, val_acc: 25.238108
+Epoch [3421], train_loss: 0.059742, val_loss: 0.057979, val_acc: 25.296881
+Epoch [3422], train_loss: 0.059901, val_loss: 0.058032, val_acc: 25.222080
+Epoch [3423], train_loss: 0.059621, val_loss: 0.057896, val_acc: 25.362354
+Epoch [3424], train_loss: 0.059827, val_loss: 0.058009, val_acc: 25.325417
+Epoch [3425], train_loss: 0.059703, val_loss: 0.057912, val_acc: 25.403856
+Epoch [3426], train_loss: 0.059675, val_loss: 0.057944, val_acc: 25.307407
+Epoch [3427], train_loss: 0.059808, val_loss: 0.057980, val_acc: 25.301136
+Epoch [3428], train_loss: 0.059955, val_loss: 0.057881, val_acc: 25.400848
+Epoch [3429], train_loss: 0.059891, val_loss: 0.057957, val_acc: 25.324530
+Epoch [3430], train_loss: 0.059724, val_loss: 0.058029, val_acc: 25.279505
+Epoch [3431], train_loss: 0.059567, val_loss: 0.058097, val_acc: 25.225849
+Epoch [3432], train_loss: 0.059824, val_loss: 0.057856, val_acc: 25.436440
+Epoch [3433], train_loss: 0.059894, val_loss: 0.057905, val_acc: 25.359781
+Epoch [3434], train_loss: 0.059538, val_loss: 0.057933, val_acc: 25.393082
+Epoch [3435], train_loss: 0.059887, val_loss: 0.058054, val_acc: 25.207609
+Epoch [3436], train_loss: 0.059761, val_loss: 0.057985, val_acc: 25.291481
+Epoch [3437], train_loss: 0.059664, val_loss: 0.057954, val_acc: 25.307114
+Epoch [3438], train_loss: 0.059687, val_loss: 0.057976, val_acc: 25.240448
+Epoch [3439], train_loss: 0.059827, val_loss: 0.057997, val_acc: 25.283279
+Epoch [3440], train_loss: 0.059686, val_loss: 0.058081, val_acc: 25.176958
+Epoch [3441], train_loss: 0.059790, val_loss: 0.057957, val_acc: 25.299976
+Epoch [3442], train_loss: 0.060081, val_loss: 0.058045, val_acc: 25.251596
+Epoch [3443], train_loss: 0.059694, val_loss: 0.058088, val_acc: 25.183012
+Epoch [3444], train_loss: 0.059769, val_loss: 0.057929, val_acc: 25.320732
+Epoch [3445], train_loss: 0.059729, val_loss: 0.058025, val_acc: 25.291103
+Epoch [3446], train_loss: 0.059751, val_loss: 0.057971, val_acc: 25.307318
+Epoch [3447], train_loss: 0.059770, val_loss: 0.057825, val_acc: 25.457272
+Epoch [3448], train_loss: 0.059728, val_loss: 0.057981, val_acc: 25.313049
+Epoch [3449], train_loss: 0.059826, val_loss: 0.057980, val_acc: 25.335924
+Epoch [3450], train_loss: 0.059660, val_loss: 0.057956, val_acc: 25.321186
+Epoch [3451], train_loss: 0.059617, val_loss: 0.057997, val_acc: 25.285282
+Epoch [3452], train_loss: 0.059649, val_loss: 0.057950, val_acc: 25.321638
+Epoch [3453], train_loss: 0.059589, val_loss: 0.057919, val_acc: 25.304466
+Epoch [3454], train_loss: 0.059830, val_loss: 0.057918, val_acc: 25.388208
+Epoch [3455], train_loss: 0.059665, val_loss: 0.058075, val_acc: 25.152382
+Epoch [3456], train_loss: 0.059815, val_loss: 0.057950, val_acc: 25.327549
+Epoch [3457], train_loss: 0.059793, val_loss: 0.058028, val_acc: 25.259945
+Epoch [3458], train_loss: 0.059813, val_loss: 0.057966, val_acc: 25.351715
+Epoch [3459], train_loss: 0.059694, val_loss: 0.057864, val_acc: 25.401190
+Epoch [3460], train_loss: 0.059712, val_loss: 0.057953, val_acc: 25.307463
+Epoch [3461], train_loss: 0.059759, val_loss: 0.057940, val_acc: 25.332911
+Epoch [3462], train_loss: 0.059910, val_loss: 0.058022, val_acc: 25.246050
+Epoch [3463], train_loss: 0.059572, val_loss: 0.057938, val_acc: 25.333450
+Epoch [3464], train_loss: 0.059568, val_loss: 0.057996, val_acc: 25.207188
+Epoch [3465], train_loss: 0.059496, val_loss: 0.058058, val_acc: 25.224277
+Epoch [3466], train_loss: 0.059698, val_loss: 0.058155, val_acc: 25.106544
+Epoch [3467], train_loss: 0.059811, val_loss: 0.058067, val_acc: 25.183998
+Epoch [3468], train_loss: 0.059772, val_loss: 0.057909, val_acc: 25.391626
+Epoch [3469], train_loss: 0.059726, val_loss: 0.058052, val_acc: 25.233337
+Epoch [3470], train_loss: 0.059676, val_loss: 0.058041, val_acc: 25.177378
+Epoch [3471], train_loss: 0.060028, val_loss: 0.058034, val_acc: 25.201147
+Epoch [3472], train_loss: 0.059690, val_loss: 0.058165, val_acc: 25.187798
+Epoch [3473], train_loss: 0.059629, val_loss: 0.058033, val_acc: 25.230202
+Epoch [3474], train_loss: 0.059681, val_loss: 0.058039, val_acc: 25.230268
+Epoch [3475], train_loss: 0.059792, val_loss: 0.058096, val_acc: 25.185112
+Epoch [3476], train_loss: 0.059715, val_loss: 0.058060, val_acc: 25.190365
+Epoch [3477], train_loss: 0.059674, val_loss: 0.057908, val_acc: 25.363152
+Epoch [3478], train_loss: 0.059788, val_loss: 0.057950, val_acc: 25.350687
+Epoch [3479], train_loss: 0.059825, val_loss: 0.057975, val_acc: 25.261393
+Epoch [3480], train_loss: 0.059761, val_loss: 0.058102, val_acc: 25.122639
+Epoch [3481], train_loss: 0.059642, val_loss: 0.058017, val_acc: 25.242146
+Epoch [3482], train_loss: 0.059887, val_loss: 0.057964, val_acc: 25.347702
+Epoch [3483], train_loss: 0.059727, val_loss: 0.057980, val_acc: 25.279123
+Epoch [3484], train_loss: 0.059675, val_loss: 0.057995, val_acc: 25.247789
+Epoch [3485], train_loss: 0.059790, val_loss: 0.057882, val_acc: 25.343575
+Epoch [3486], train_loss: 0.059606, val_loss: 0.057975, val_acc: 25.326866
+Epoch [3487], train_loss: 0.059748, val_loss: 0.058162, val_acc: 25.107479
+Epoch [3488], train_loss: 0.059825, val_loss: 0.058071, val_acc: 25.203529
+Epoch [3489], train_loss: 0.059785, val_loss: 0.057968, val_acc: 25.318497
+Epoch [3490], train_loss: 0.059807, val_loss: 0.057945, val_acc: 25.341328
+Epoch [3491], train_loss: 0.059851, val_loss: 0.057944, val_acc: 25.315495
+Epoch [3492], train_loss: 0.059709, val_loss: 0.057990, val_acc: 25.233231
+Epoch [3493], train_loss: 0.059638, val_loss: 0.057995, val_acc: 25.289009
+Epoch [3494], train_loss: 0.059724, val_loss: 0.057970, val_acc: 25.252089
+Epoch [3495], train_loss: 0.059616, val_loss: 0.058026, val_acc: 25.236710
+Epoch [3496], train_loss: 0.059670, val_loss: 0.058119, val_acc: 25.178843
+Epoch [3497], train_loss: 0.059937, val_loss: 0.057982, val_acc: 25.371769
+Epoch [3498], train_loss: 0.059843, val_loss: 0.058035, val_acc: 25.239124
+Epoch [3499], train_loss: 0.059642, val_loss: 0.057994, val_acc: 25.300665
+Epoch [3500], train_loss: 0.059871, val_loss: 0.058105, val_acc: 25.143959
+Epoch [3501], train_loss: 0.059685, val_loss: 0.058053, val_acc: 25.268234
+Epoch [3502], train_loss: 0.059736, val_loss: 0.058006, val_acc: 25.239958
+Epoch [3503], train_loss: 0.059721, val_loss: 0.058031, val_acc: 25.279373
+Epoch [3504], train_loss: 0.059755, val_loss: 0.058122, val_acc: 25.221838
+Epoch [3505], train_loss: 0.059577, val_loss: 0.057962, val_acc: 25.330328
+Epoch [3506], train_loss: 0.059604, val_loss: 0.057978, val_acc: 25.246592
+Epoch [3507], train_loss: 0.059818, val_loss: 0.057979, val_acc: 25.261814
+Epoch [3508], train_loss: 0.059727, val_loss: 0.058036, val_acc: 25.205030
+Epoch [3509], train_loss: 0.059806, val_loss: 0.057998, val_acc: 25.300102
+slurmstepd: error: *** JOB 25621936 ON nrg05 CANCELLED AT 2022-02-21T11:02:17 ***
diff --git a/UNet/Sim_logs/UNet_V12_25655856.log b/UNet/Sim_logs/UNet_V12_25655856.log
new file mode 100644
index 0000000000000000000000000000000000000000..27b0a6f536a1eb1520346c9f455169840e0dae34
--- /dev/null
+++ b/UNet/Sim_logs/UNet_V12_25655856.log
@@ -0,0 +1,14 @@
+(OK) Loading cuda 10.2.89
+(OK) Loading python 3.7.11
+(!!) The SciPy Stack is available: http://www.scipy.org/stackspec.html
+ Built with GCC compilers.
+Collecting torch==1.10.1
+  Using cached torch-1.10.1-cp37-cp37m-manylinux1_x86_64.whl (881.9 MB)
+Collecting typing-extensions
+  Using cached typing_extensions-4.1.1-py3-none-any.whl (26 kB)
+Installing collected packages: typing-extensions, torch
+ERROR: Could not install packages due to an OSError: [Errno 116] Stale file handle
+
+WARNING: You are using pip version 21.2.4; however, version 22.0.3 is available.
+You should consider upgrading via the '/usr/local_rwth/sw/python/3.7.11/x86_64/bin/python3.7 -m pip install --upgrade pip' command.
+python3 ./UNet_V12.py  0.44s user 0.19s system 15% cpu 4.079 total
diff --git a/UNet/Sim_logs/UNet_V12_25657700.log b/UNet/Sim_logs/UNet_V12_25657700.log
new file mode 100644
index 0000000000000000000000000000000000000000..605ec8f4f3455c410a6813694084812af19d2a5d
--- /dev/null
+++ b/UNet/Sim_logs/UNet_V12_25657700.log
@@ -0,0 +1,320 @@
+(OK) Loading cuda 10.2.89
+(OK) Loading python 3.7.11
+(!!) The SciPy Stack is available: http://www.scipy.org/stackspec.html
+ Built with GCC compilers.
+Collecting torch==1.10.1
+  Using cached torch-1.10.1-cp37-cp37m-manylinux1_x86_64.whl (881.9 MB)
+Collecting typing-extensions
+  Using cached typing_extensions-4.1.1-py3-none-any.whl (26 kB)
+Installing collected packages: typing-extensions, torch
+  WARNING: The scripts convert-caffe2-to-onnx, convert-onnx-to-caffe2 and torchrun are installed in '/home/yk138599/.local/bin' which is not on PATH.
+  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
+Successfully installed torch-1.10.1 typing-extensions-4.1.1
+WARNING: You are using pip version 21.2.4; however, version 22.0.3 is available.
+You should consider upgrading via the '/usr/local_rwth/sw/python/3.7.11/x86_64/bin/python3.7 -m pip install --upgrade pip' command.
+number auf epochs: 300
+batchsize: 12
+learning rate: 3e-05
+kernel size is: 9
+ seed is: 4219779809
+Epoch [0], train_loss: 0.330932, val_loss: 0.333908, val_acc: 0.000079
+Epoch [1], train_loss: 0.326367, val_loss: 0.298662, val_acc: 0.002234
+Epoch [2], train_loss: 0.322735, val_loss: 0.271929, val_acc: 0.011306
+Epoch [3], train_loss: 0.317418, val_loss: 0.269272, val_acc: 0.021464
+Epoch [4], train_loss: 0.311764, val_loss: 0.237774, val_acc: 0.118737
+Epoch [5], train_loss: 0.307452, val_loss: 0.224266, val_acc: 0.245894
+Epoch [6], train_loss: 0.301535, val_loss: 0.203132, val_acc: 0.662440
+Epoch [7], train_loss: 0.298223, val_loss: 0.205178, val_acc: 0.616535
+Epoch [8], train_loss: 0.292946, val_loss: 0.201957, val_acc: 0.668476
+Epoch [9], train_loss: 0.289787, val_loss: 0.198624, val_acc: 0.813807
+Epoch [10], train_loss: 0.287935, val_loss: 0.198760, val_acc: 0.863931
+Epoch [11], train_loss: 0.283918, val_loss: 0.187972, val_acc: 1.185447
+Epoch [12], train_loss: 0.279013, val_loss: 0.190985, val_acc: 1.207216
+Epoch [13], train_loss: 0.277025, val_loss: 0.190413, val_acc: 1.086987
+Epoch [14], train_loss: 0.272979, val_loss: 0.172012, val_acc: 1.718978
+Epoch [15], train_loss: 0.269731, val_loss: 0.185849, val_acc: 1.012337
+Epoch [16], train_loss: 0.265869, val_loss: 0.178654, val_acc: 1.319245
+Epoch [17], train_loss: 0.263417, val_loss: 0.176885, val_acc: 1.360124
+Epoch [18], train_loss: 0.259055, val_loss: 0.165301, val_acc: 1.719582
+Epoch [19], train_loss: 0.255452, val_loss: 0.174970, val_acc: 1.327765
+Epoch [20], train_loss: 0.253257, val_loss: 0.176343, val_acc: 1.182486
+Epoch [21], train_loss: 0.249228, val_loss: 0.160585, val_acc: 2.034065
+Epoch [22], train_loss: 0.247194, val_loss: 0.159162, val_acc: 1.855866
+Epoch [23], train_loss: 0.241735, val_loss: 0.153731, val_acc: 2.196544
+Epoch [24], train_loss: 0.239423, val_loss: 0.146373, val_acc: 2.455339
+Epoch [25], train_loss: 0.235884, val_loss: 0.144393, val_acc: 2.595568
+Epoch [26], train_loss: 0.234214, val_loss: 0.151597, val_acc: 2.237917
+Epoch [27], train_loss: 0.231163, val_loss: 0.134683, val_acc: 2.582559
+Epoch [28], train_loss: 0.228238, val_loss: 0.137483, val_acc: 2.816873
+Epoch [29], train_loss: 0.223509, val_loss: 0.138756, val_acc: 2.355864
+Epoch [30], train_loss: 0.222387, val_loss: 0.139537, val_acc: 2.667137
+Epoch [31], train_loss: 0.220239, val_loss: 0.134597, val_acc: 2.668186
+Epoch [32], train_loss: 0.216011, val_loss: 0.124042, val_acc: 3.022071
+Epoch [33], train_loss: 0.212218, val_loss: 0.115443, val_acc: 3.173354
+Epoch [34], train_loss: 0.211256, val_loss: 0.130666, val_acc: 2.820698
+Epoch [35], train_loss: 0.209040, val_loss: 0.135068, val_acc: 2.422821
+Epoch [36], train_loss: 0.204512, val_loss: 0.118570, val_acc: 3.175327
+Epoch [37], train_loss: 0.201221, val_loss: 0.128671, val_acc: 2.704814
+Epoch [38], train_loss: 0.198098, val_loss: 0.120194, val_acc: 2.968834
+Epoch [39], train_loss: 0.196409, val_loss: 0.122263, val_acc: 2.977932
+Epoch [40], train_loss: 0.194247, val_loss: 0.112889, val_acc: 3.273737
+Epoch [41], train_loss: 0.189588, val_loss: 0.111694, val_acc: 3.201276
+Epoch [42], train_loss: 0.189154, val_loss: 0.118303, val_acc: 2.956765
+Epoch [43], train_loss: 0.184660, val_loss: 0.114128, val_acc: 3.022040
+Epoch [44], train_loss: 0.182272, val_loss: 0.107020, val_acc: 3.435851
+Epoch [45], train_loss: 0.180298, val_loss: 0.110453, val_acc: 3.212457
+Epoch [46], train_loss: 0.177246, val_loss: 0.110458, val_acc: 3.285410
+Epoch [47], train_loss: 0.175345, val_loss: 0.117425, val_acc: 2.662162
+Epoch [48], train_loss: 0.171582, val_loss: 0.099856, val_acc: 3.417045
+Epoch [49], train_loss: 0.170042, val_loss: 0.104306, val_acc: 3.357560
+Epoch [50], train_loss: 0.167819, val_loss: 0.095344, val_acc: 3.898432
+Epoch [51], train_loss: 0.165388, val_loss: 0.104266, val_acc: 3.386984
+Epoch [52], train_loss: 0.162697, val_loss: 0.092880, val_acc: 3.967252
+Epoch [53], train_loss: 0.160542, val_loss: 0.095196, val_acc: 3.769666
+Epoch [54], train_loss: 0.156323, val_loss: 0.093582, val_acc: 3.865036
+Epoch [55], train_loss: 0.154797, val_loss: 0.097115, val_acc: 3.768576
+Epoch [56], train_loss: 0.151841, val_loss: 0.086748, val_acc: 4.480707
+Epoch [57], train_loss: 0.149365, val_loss: 0.077681, val_acc: 5.819269
+Epoch [58], train_loss: 0.148627, val_loss: 0.089356, val_acc: 4.467890
+Epoch [59], train_loss: 0.145796, val_loss: 0.087287, val_acc: 4.462812
+Epoch [60], train_loss: 0.143554, val_loss: 0.087449, val_acc: 4.362958
+Epoch [61], train_loss: 0.141680, val_loss: 0.086598, val_acc: 4.560365
+Epoch [62], train_loss: 0.140820, val_loss: 0.093668, val_acc: 3.916125
+Epoch [63], train_loss: 0.138518, val_loss: 0.096764, val_acc: 3.759207
+Epoch [64], train_loss: 0.135202, val_loss: 0.084972, val_acc: 4.720425
+Epoch [65], train_loss: 0.131120, val_loss: 0.082765, val_acc: 4.993444
+Epoch [66], train_loss: 0.130263, val_loss: 0.084944, val_acc: 4.687880
+Epoch [67], train_loss: 0.128340, val_loss: 0.077728, val_acc: 5.733890
+Epoch [68], train_loss: 0.126514, val_loss: 0.091514, val_acc: 4.317702
+Epoch [69], train_loss: 0.124356, val_loss: 0.083951, val_acc: 4.904956
+Epoch [70], train_loss: 0.121970, val_loss: 0.076883, val_acc: 6.161818
+Epoch [71], train_loss: 0.121167, val_loss: 0.075282, val_acc: 6.162612
+Epoch [72], train_loss: 0.118357, val_loss: 0.081848, val_acc: 5.203506
+Epoch [73], train_loss: 0.116403, val_loss: 0.072103, val_acc: 6.797554
+Epoch [74], train_loss: 0.115092, val_loss: 0.073429, val_acc: 6.377663
+Epoch [75], train_loss: 0.113431, val_loss: 0.076082, val_acc: 6.389636
+Epoch [76], train_loss: 0.108592, val_loss: 0.077715, val_acc: 5.728654
+Epoch [77], train_loss: 0.109136, val_loss: 0.083360, val_acc: 5.176933
+Epoch [78], train_loss: 0.106650, val_loss: 0.075688, val_acc: 6.085006
+Epoch [79], train_loss: 0.104850, val_loss: 0.075703, val_acc: 6.622899
+Epoch [80], train_loss: 0.103149, val_loss: 0.070089, val_acc: 7.100959
+Epoch [81], train_loss: 0.098914, val_loss: 0.067992, val_acc: 7.965209
+Epoch [82], train_loss: 0.099671, val_loss: 0.068102, val_acc: 7.380318
+Epoch [83], train_loss: 0.097020, val_loss: 0.067948, val_acc: 8.361097
+Epoch [84], train_loss: 0.096013, val_loss: 0.069431, val_acc: 8.938758
+Epoch [85], train_loss: 0.093117, val_loss: 0.070982, val_acc: 8.344923
+Epoch [86], train_loss: 0.091744, val_loss: 0.060632, val_acc: 10.024256
+Epoch [87], train_loss: 0.090020, val_loss: 0.060416, val_acc: 9.948620
+Epoch [88], train_loss: 0.088730, val_loss: 0.060369, val_acc: 10.208632
+Epoch [89], train_loss: 0.087630, val_loss: 0.054752, val_acc: 12.275188
+Epoch [90], train_loss: 0.085918, val_loss: 0.058509, val_acc: 10.788905
+Epoch [91], train_loss: 0.083586, val_loss: 0.057303, val_acc: 11.429493
+Epoch [92], train_loss: 0.081877, val_loss: 0.057905, val_acc: 11.238928
+Epoch [93], train_loss: 0.079967, val_loss: 0.053880, val_acc: 12.298821
+Epoch [94], train_loss: 0.078955, val_loss: 0.052080, val_acc: 12.871546
+Epoch [95], train_loss: 0.077945, val_loss: 0.055148, val_acc: 11.921134
+Epoch [96], train_loss: 0.076258, val_loss: 0.051707, val_acc: 13.099904
+Epoch [97], train_loss: 0.075198, val_loss: 0.053121, val_acc: 12.995317
+Epoch [98], train_loss: 0.072808, val_loss: 0.052737, val_acc: 12.941602
+Epoch [99], train_loss: 0.072506, val_loss: 0.051848, val_acc: 13.005103
+Epoch [100], train_loss: 0.070697, val_loss: 0.050875, val_acc: 13.091144
+Epoch [101], train_loss: 0.068954, val_loss: 0.047773, val_acc: 14.077797
+Epoch [102], train_loss: 0.067856, val_loss: 0.047796, val_acc: 14.020807
+Epoch [103], train_loss: 0.067277, val_loss: 0.047484, val_acc: 14.092660
+Epoch [104], train_loss: 0.065078, val_loss: 0.048702, val_acc: 13.452213
+Epoch [105], train_loss: 0.063880, val_loss: 0.047979, val_acc: 13.946779
+Epoch [106], train_loss: 0.062933, val_loss: 0.045329, val_acc: 14.382503
+Epoch [107], train_loss: 0.062097, val_loss: 0.044581, val_acc: 14.692544
+Epoch [108], train_loss: 0.059715, val_loss: 0.045984, val_acc: 14.149676
+Epoch [109], train_loss: 0.059357, val_loss: 0.045581, val_acc: 14.480347
+Epoch [110], train_loss: 0.058289, val_loss: 0.041901, val_acc: 15.219129
+Epoch [111], train_loss: 0.057351, val_loss: 0.044165, val_acc: 14.692810
+Epoch [112], train_loss: 0.056865, val_loss: 0.041789, val_acc: 15.050001
+Epoch [113], train_loss: 0.056389, val_loss: 0.039803, val_acc: 16.413157
+Epoch [114], train_loss: 0.054279, val_loss: 0.041715, val_acc: 15.648774
+Epoch [115], train_loss: 0.052641, val_loss: 0.041312, val_acc: 15.357080
+Epoch [116], train_loss: 0.051913, val_loss: 0.039806, val_acc: 16.508070
+Epoch [117], train_loss: 0.051776, val_loss: 0.041246, val_acc: 16.545189
+Epoch [118], train_loss: 0.050722, val_loss: 0.039352, val_acc: 16.969158
+Epoch [119], train_loss: 0.050137, val_loss: 0.039767, val_acc: 16.568089
+Epoch [120], train_loss: 0.049011, val_loss: 0.039279, val_acc: 17.686954
+Epoch [121], train_loss: 0.048122, val_loss: 0.039168, val_acc: 17.295559
+Epoch [122], train_loss: 0.047595, val_loss: 0.039003, val_acc: 17.673378
+Epoch [123], train_loss: 0.047206, val_loss: 0.038182, val_acc: 18.208027
+Epoch [124], train_loss: 0.047462, val_loss: 0.038081, val_acc: 19.202049
+Epoch [125], train_loss: 0.046766, val_loss: 0.037916, val_acc: 18.842037
+Epoch [126], train_loss: 0.046549, val_loss: 0.037296, val_acc: 19.525326
+Epoch [127], train_loss: 0.046119, val_loss: 0.037547, val_acc: 19.612654
+Epoch [128], train_loss: 0.044818, val_loss: 0.037488, val_acc: 20.283020
+Epoch [129], train_loss: 0.045145, val_loss: 0.037154, val_acc: 19.891037
+Epoch [130], train_loss: 0.044425, val_loss: 0.036449, val_acc: 21.353188
+Epoch [131], train_loss: 0.044426, val_loss: 0.037429, val_acc: 20.075893
+Epoch [132], train_loss: 0.044464, val_loss: 0.036686, val_acc: 20.678669
+Epoch [133], train_loss: 0.043490, val_loss: 0.036976, val_acc: 21.383036
+Epoch [134], train_loss: 0.043925, val_loss: 0.037306, val_acc: 21.430660
+Epoch [135], train_loss: 0.043271, val_loss: 0.037566, val_acc: 21.239231
+Epoch [136], train_loss: 0.042751, val_loss: 0.036352, val_acc: 21.908257
+Epoch [137], train_loss: 0.042788, val_loss: 0.035632, val_acc: 22.820667
+Epoch [138], train_loss: 0.043218, val_loss: 0.036288, val_acc: 22.643335
+Epoch [139], train_loss: 0.042925, val_loss: 0.036858, val_acc: 21.479206
+Epoch [140], train_loss: 0.042788, val_loss: 0.036201, val_acc: 22.981276
+Epoch [141], train_loss: 0.042249, val_loss: 0.036397, val_acc: 21.859480
+Epoch [142], train_loss: 0.042385, val_loss: 0.036429, val_acc: 22.866312
+Epoch [143], train_loss: 0.041660, val_loss: 0.035811, val_acc: 22.886631
+Epoch [144], train_loss: 0.041826, val_loss: 0.036749, val_acc: 22.163021
+Epoch [145], train_loss: 0.042171, val_loss: 0.035593, val_acc: 23.413296
+Epoch [146], train_loss: 0.042649, val_loss: 0.035336, val_acc: 23.473188
+Epoch [147], train_loss: 0.041424, val_loss: 0.035379, val_acc: 23.291012
+Epoch [148], train_loss: 0.041121, val_loss: 0.035905, val_acc: 23.378353
+Epoch [149], train_loss: 0.041450, val_loss: 0.035856, val_acc: 23.402935
+Epoch [150], train_loss: 0.041455, val_loss: 0.035896, val_acc: 23.266470
+Epoch [151], train_loss: 0.042443, val_loss: 0.036178, val_acc: 23.030109
+Epoch [152], train_loss: 0.042072, val_loss: 0.035429, val_acc: 23.700045
+Epoch [153], train_loss: 0.041407, val_loss: 0.035655, val_acc: 23.288815
+Epoch [154], train_loss: 0.041409, val_loss: 0.036043, val_acc: 22.924623
+Epoch [155], train_loss: 0.040374, val_loss: 0.034996, val_acc: 23.503851
+Epoch [156], train_loss: 0.040767, val_loss: 0.035214, val_acc: 23.645016
+Epoch [157], train_loss: 0.040616, val_loss: 0.035498, val_acc: 23.884483
+Epoch [158], train_loss: 0.040749, val_loss: 0.035638, val_acc: 23.437801
+Epoch [159], train_loss: 0.040498, val_loss: 0.035148, val_acc: 23.621138
+Epoch [160], train_loss: 0.040402, val_loss: 0.035531, val_acc: 23.408960
+Epoch [161], train_loss: 0.040273, val_loss: 0.034853, val_acc: 23.856819
+Epoch [162], train_loss: 0.040445, val_loss: 0.034955, val_acc: 24.082684
+Epoch [163], train_loss: 0.040153, val_loss: 0.035271, val_acc: 23.800920
+Epoch [164], train_loss: 0.040043, val_loss: 0.034128, val_acc: 24.205929
+Epoch [165], train_loss: 0.040324, val_loss: 0.035199, val_acc: 23.833691
+Epoch [166], train_loss: 0.040708, val_loss: 0.034773, val_acc: 24.128401
+Epoch [167], train_loss: 0.040256, val_loss: 0.035201, val_acc: 23.925947
+Epoch [168], train_loss: 0.039837, val_loss: 0.035064, val_acc: 23.911997
+Epoch [169], train_loss: 0.040059, val_loss: 0.034638, val_acc: 24.182192
+Epoch [170], train_loss: 0.039366, val_loss: 0.034986, val_acc: 24.047337
+Epoch [171], train_loss: 0.039361, val_loss: 0.034532, val_acc: 24.094643
+Epoch [172], train_loss: 0.039471, val_loss: 0.034924, val_acc: 23.921383
+Epoch [173], train_loss: 0.039938, val_loss: 0.034866, val_acc: 24.063986
+Epoch [174], train_loss: 0.039132, val_loss: 0.034507, val_acc: 24.204281
+Epoch [175], train_loss: 0.039309, val_loss: 0.034070, val_acc: 24.343960
+Epoch [176], train_loss: 0.039127, val_loss: 0.034897, val_acc: 24.051970
+Epoch [177], train_loss: 0.039419, val_loss: 0.035162, val_acc: 23.899141
+Epoch [178], train_loss: 0.039658, val_loss: 0.034556, val_acc: 24.445030
+Epoch [179], train_loss: 0.039258, val_loss: 0.034770, val_acc: 23.768597
+Epoch [180], train_loss: 0.039566, val_loss: 0.034871, val_acc: 23.959322
+Epoch [181], train_loss: 0.038523, val_loss: 0.034196, val_acc: 24.343197
+Epoch [182], train_loss: 0.038964, val_loss: 0.035054, val_acc: 23.886755
+Epoch [183], train_loss: 0.039052, val_loss: 0.034299, val_acc: 24.593691
+Epoch [184], train_loss: 0.038854, val_loss: 0.034092, val_acc: 24.725517
+Epoch [185], train_loss: 0.038436, val_loss: 0.034496, val_acc: 24.345980
+Epoch [186], train_loss: 0.038562, val_loss: 0.033564, val_acc: 24.741579
+Epoch [187], train_loss: 0.038796, val_loss: 0.034359, val_acc: 24.119211
+Epoch [188], train_loss: 0.038681, val_loss: 0.033935, val_acc: 24.618692
+Epoch [189], train_loss: 0.038690, val_loss: 0.034429, val_acc: 24.333197
+Epoch [190], train_loss: 0.038519, val_loss: 0.034131, val_acc: 24.448185
+Epoch [191], train_loss: 0.038545, val_loss: 0.034177, val_acc: 24.591734
+Epoch [192], train_loss: 0.038372, val_loss: 0.034516, val_acc: 24.201221
+Epoch [193], train_loss: 0.038440, val_loss: 0.033833, val_acc: 24.720915
+Epoch [194], train_loss: 0.038325, val_loss: 0.034234, val_acc: 24.181847
+Epoch [195], train_loss: 0.038361, val_loss: 0.034016, val_acc: 24.440741
+Epoch [196], train_loss: 0.038143, val_loss: 0.033818, val_acc: 24.717918
+Epoch [197], train_loss: 0.037874, val_loss: 0.034109, val_acc: 24.479546
+Epoch [198], train_loss: 0.038581, val_loss: 0.033707, val_acc: 24.584669
+Epoch [199], train_loss: 0.038350, val_loss: 0.035012, val_acc: 24.049282
+Epoch [200], train_loss: 0.037887, val_loss: 0.033940, val_acc: 24.642500
+Epoch [201], train_loss: 0.037502, val_loss: 0.033480, val_acc: 24.653925
+Epoch [202], train_loss: 0.037965, val_loss: 0.033619, val_acc: 24.758905
+Epoch [203], train_loss: 0.038016, val_loss: 0.033458, val_acc: 24.493906
+Epoch [204], train_loss: 0.038090, val_loss: 0.034434, val_acc: 24.392431
+Epoch [205], train_loss: 0.038159, val_loss: 0.033409, val_acc: 24.546669
+Epoch [206], train_loss: 0.037514, val_loss: 0.033463, val_acc: 24.699381
+Epoch [207], train_loss: 0.037927, val_loss: 0.033748, val_acc: 24.519545
+Epoch [208], train_loss: 0.037796, val_loss: 0.033257, val_acc: 24.681526
+Epoch [209], train_loss: 0.037889, val_loss: 0.033901, val_acc: 24.535894
+Epoch [210], train_loss: 0.038053, val_loss: 0.034457, val_acc: 24.255098
+Epoch [211], train_loss: 0.037852, val_loss: 0.033525, val_acc: 24.697960
+Epoch [212], train_loss: 0.037701, val_loss: 0.033219, val_acc: 24.727812
+Epoch [213], train_loss: 0.037195, val_loss: 0.033079, val_acc: 24.843166
+Epoch [214], train_loss: 0.037678, val_loss: 0.033712, val_acc: 24.551096
+Epoch [215], train_loss: 0.037775, val_loss: 0.033296, val_acc: 24.755516
+Epoch [216], train_loss: 0.037194, val_loss: 0.033020, val_acc: 24.803595
+Epoch [217], train_loss: 0.037821, val_loss: 0.033999, val_acc: 24.769567
+Epoch [218], train_loss: 0.037511, val_loss: 0.034653, val_acc: 24.605206
+Epoch [219], train_loss: 0.037332, val_loss: 0.033486, val_acc: 24.609325
+Epoch [220], train_loss: 0.037339, val_loss: 0.032950, val_acc: 24.905024
+Epoch [221], train_loss: 0.037468, val_loss: 0.032985, val_acc: 24.933559
+Epoch [222], train_loss: 0.037656, val_loss: 0.033603, val_acc: 24.902418
+Epoch [223], train_loss: 0.037632, val_loss: 0.033585, val_acc: 24.481031
+Epoch [224], train_loss: 0.037330, val_loss: 0.033751, val_acc: 24.783964
+Epoch [225], train_loss: 0.036755, val_loss: 0.032837, val_acc: 24.743954
+Epoch [226], train_loss: 0.037807, val_loss: 0.033143, val_acc: 24.839422
+Epoch [227], train_loss: 0.036577, val_loss: 0.033573, val_acc: 24.640791
+Epoch [228], train_loss: 0.037649, val_loss: 0.033804, val_acc: 24.425098
+Epoch [229], train_loss: 0.037191, val_loss: 0.033629, val_acc: 24.616665
+Epoch [230], train_loss: 0.037408, val_loss: 0.033175, val_acc: 24.737001
+Epoch [231], train_loss: 0.036969, val_loss: 0.033340, val_acc: 24.935492
+Epoch [232], train_loss: 0.037673, val_loss: 0.033390, val_acc: 24.755821
+Epoch [233], train_loss: 0.036895, val_loss: 0.033261, val_acc: 24.819834
+Epoch [234], train_loss: 0.036949, val_loss: 0.032941, val_acc: 24.897337
+Epoch [235], train_loss: 0.036304, val_loss: 0.032821, val_acc: 24.992735
+Epoch [236], train_loss: 0.036954, val_loss: 0.032833, val_acc: 24.966488
+Epoch [237], train_loss: 0.036929, val_loss: 0.032484, val_acc: 24.990423
+Epoch [238], train_loss: 0.036468, val_loss: 0.033028, val_acc: 24.982964
+Epoch [239], train_loss: 0.036784, val_loss: 0.033362, val_acc: 25.127249
+Epoch [240], train_loss: 0.036711, val_loss: 0.033313, val_acc: 24.640331
+Epoch [241], train_loss: 0.036988, val_loss: 0.033128, val_acc: 24.968746
+Epoch [242], train_loss: 0.036938, val_loss: 0.032501, val_acc: 24.859625
+Epoch [243], train_loss: 0.036927, val_loss: 0.033366, val_acc: 24.789770
+Epoch [244], train_loss: 0.036418, val_loss: 0.033090, val_acc: 24.880058
+Epoch [245], train_loss: 0.037121, val_loss: 0.032742, val_acc: 24.891336
+Epoch [246], train_loss: 0.036638, val_loss: 0.033638, val_acc: 24.720098
+Epoch [247], train_loss: 0.036487, val_loss: 0.032908, val_acc: 25.141048
+Epoch [248], train_loss: 0.036577, val_loss: 0.033265, val_acc: 24.649408
+Epoch [249], train_loss: 0.036935, val_loss: 0.033175, val_acc: 24.869314
+Epoch [250], train_loss: 0.036512, val_loss: 0.032712, val_acc: 24.983177
+Epoch [251], train_loss: 0.036147, val_loss: 0.032690, val_acc: 25.089064
+Epoch [252], train_loss: 0.036788, val_loss: 0.032996, val_acc: 25.032606
+Epoch [253], train_loss: 0.036499, val_loss: 0.032553, val_acc: 25.081976
+Epoch [254], train_loss: 0.036900, val_loss: 0.032840, val_acc: 25.129053
+Epoch [255], train_loss: 0.036549, val_loss: 0.033194, val_acc: 24.795155
+Epoch [256], train_loss: 0.036325, val_loss: 0.032767, val_acc: 25.022554
+Epoch [257], train_loss: 0.036797, val_loss: 0.032942, val_acc: 24.712595
+Epoch [258], train_loss: 0.036790, val_loss: 0.032518, val_acc: 25.115345
+Epoch [259], train_loss: 0.036268, val_loss: 0.032287, val_acc: 25.192682
+Epoch [260], train_loss: 0.036530, val_loss: 0.032475, val_acc: 24.956091
+Epoch [261], train_loss: 0.035585, val_loss: 0.032887, val_acc: 25.007650
+Epoch [262], train_loss: 0.036116, val_loss: 0.032643, val_acc: 25.052174
+Epoch [263], train_loss: 0.036488, val_loss: 0.033617, val_acc: 24.740458
+Epoch [264], train_loss: 0.036693, val_loss: 0.033207, val_acc: 25.126368
+Epoch [265], train_loss: 0.036630, val_loss: 0.033515, val_acc: 24.555309
+Epoch [266], train_loss: 0.036635, val_loss: 0.032624, val_acc: 25.150656
+Epoch [267], train_loss: 0.036110, val_loss: 0.032706, val_acc: 25.204714
+Epoch [268], train_loss: 0.035867, val_loss: 0.032187, val_acc: 25.276003
+Epoch [269], train_loss: 0.035860, val_loss: 0.032671, val_acc: 24.946751
+Epoch [270], train_loss: 0.036218, val_loss: 0.032459, val_acc: 24.929037
+Epoch [271], train_loss: 0.036119, val_loss: 0.032071, val_acc: 25.101917
+Epoch [272], train_loss: 0.036473, val_loss: 0.033474, val_acc: 25.086012
+Epoch [273], train_loss: 0.036044, val_loss: 0.032007, val_acc: 25.159664
+Epoch [274], train_loss: 0.035925, val_loss: 0.032099, val_acc: 25.052015
+Epoch [275], train_loss: 0.036379, val_loss: 0.032056, val_acc: 25.175943
+Epoch [276], train_loss: 0.036185, val_loss: 0.031987, val_acc: 25.179211
+Epoch [277], train_loss: 0.035944, val_loss: 0.032325, val_acc: 25.194370
+Epoch [278], train_loss: 0.035777, val_loss: 0.032955, val_acc: 25.034834
+Epoch [279], train_loss: 0.036075, val_loss: 0.032918, val_acc: 25.029425
+Epoch [280], train_loss: 0.036176, val_loss: 0.032247, val_acc: 24.961863
+Epoch [281], train_loss: 0.035823, val_loss: 0.032517, val_acc: 24.841063
+Epoch [282], train_loss: 0.035886, val_loss: 0.032249, val_acc: 25.195751
+Epoch [283], train_loss: 0.035490, val_loss: 0.032022, val_acc: 25.163242
+Epoch [284], train_loss: 0.036145, val_loss: 0.032215, val_acc: 25.233896
+Epoch [285], train_loss: 0.035476, val_loss: 0.032426, val_acc: 24.995411
+Epoch [286], train_loss: 0.035604, val_loss: 0.032744, val_acc: 24.929636
+Epoch [287], train_loss: 0.036543, val_loss: 0.032553, val_acc: 25.068060
+Epoch [288], train_loss: 0.035707, val_loss: 0.032896, val_acc: 25.078625
+Epoch [289], train_loss: 0.035712, val_loss: 0.032501, val_acc: 25.049051
+Epoch [290], train_loss: 0.035766, val_loss: 0.032976, val_acc: 25.072905
+Epoch [291], train_loss: 0.036003, val_loss: 0.032285, val_acc: 25.081154
+Epoch [292], train_loss: 0.035908, val_loss: 0.032722, val_acc: 24.871487
+Epoch [293], train_loss: 0.035887, val_loss: 0.032475, val_acc: 25.035276
+Epoch [294], train_loss: 0.035564, val_loss: 0.032550, val_acc: 25.166437
+Epoch [295], train_loss: 0.035674, val_loss: 0.032304, val_acc: 25.295708
+Epoch [296], train_loss: 0.035665, val_loss: 0.032232, val_acc: 25.198397
+Epoch [297], train_loss: 0.035727, val_loss: 0.032301, val_acc: 25.134348
+Epoch [298], train_loss: 0.035753, val_loss: 0.032482, val_acc: 25.167299
+Epoch [299], train_loss: 0.035616, val_loss: 0.032304, val_acc: 25.094013
+python3 ./UNet_V12.py  44015.83s user 43330.11s system 99% cpu 24:16:51.30 total
diff --git a/UNet/Sim_logs/UNet_V13_25657882.log b/UNet/Sim_logs/UNet_V13_25657882.log
new file mode 100644
index 0000000000000000000000000000000000000000..a7c9e838d23e1da4300bdc0ad79acc7f622b2445
--- /dev/null
+++ b/UNet/Sim_logs/UNet_V13_25657882.log
@@ -0,0 +1,320 @@
+(OK) Loading cuda 10.2.89
+(OK) Loading python 3.7.11
+(!!) The SciPy Stack is available: http://www.scipy.org/stackspec.html
+ Built with GCC compilers.
+Collecting torch==1.10.1
+  Using cached torch-1.10.1-cp37-cp37m-manylinux1_x86_64.whl (881.9 MB)
+Collecting typing-extensions
+  Using cached typing_extensions-4.1.1-py3-none-any.whl (26 kB)
+Installing collected packages: typing-extensions, torch
+  WARNING: The scripts convert-caffe2-to-onnx, convert-onnx-to-caffe2 and torchrun are installed in '/home/yk138599/.local/bin' which is not on PATH.
+  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
+Successfully installed torch-1.10.1 typing-extensions-4.1.1
+WARNING: You are using pip version 21.2.4; however, version 22.0.3 is available.
+You should consider upgrading via the '/usr/local_rwth/sw/python/3.7.11/x86_64/bin/python3.7 -m pip install --upgrade pip' command.
+number auf epochs: 300
+batchsize: 10
+learning rate: 3e-05
+kernel size is: 7
+ seed is: 4259142679
+Epoch [0], train_loss: 0.454919, val_loss: 0.472424, val_acc: 0.000000
+Epoch [1], train_loss: 0.450413, val_loss: 0.468140, val_acc: 0.000005
+Epoch [2], train_loss: 0.447051, val_loss: 0.465443, val_acc: 0.000005
+Epoch [3], train_loss: 0.445552, val_loss: 0.462944, val_acc: 0.000005
+Epoch [4], train_loss: 0.445250, val_loss: 0.440080, val_acc: 0.000010
+Epoch [5], train_loss: 0.444165, val_loss: 0.435054, val_acc: 0.000010
+Epoch [6], train_loss: 0.442540, val_loss: 0.459324, val_acc: 0.000005
+Epoch [7], train_loss: 0.442120, val_loss: 0.453200, val_acc: 0.000005
+Epoch [8], train_loss: 0.441151, val_loss: 0.468032, val_acc: 0.000002
+Epoch [9], train_loss: 0.440469, val_loss: 0.485783, val_acc: 0.000000
+Epoch [10], train_loss: 0.439867, val_loss: 0.459353, val_acc: 0.000002
+Epoch [11], train_loss: 0.438918, val_loss: 0.466173, val_acc: 0.000000
+Epoch [12], train_loss: 0.438403, val_loss: 0.474290, val_acc: 0.000000
+Epoch [13], train_loss: 0.437321, val_loss: 0.467174, val_acc: 0.000000
+Epoch [14], train_loss: 0.437156, val_loss: 0.464499, val_acc: 0.000000
+Epoch [15], train_loss: 0.436918, val_loss: 0.476911, val_acc: 0.000000
+Epoch [16], train_loss: 0.435666, val_loss: 0.463752, val_acc: 0.000000
+Epoch [17], train_loss: 0.435774, val_loss: 0.467874, val_acc: 0.000000
+Epoch [18], train_loss: 0.434227, val_loss: 0.475646, val_acc: 0.000000
+Epoch [19], train_loss: 0.432749, val_loss: 0.474636, val_acc: 0.000000
+Epoch [20], train_loss: 0.433510, val_loss: 0.452345, val_acc: 0.000002
+Epoch [21], train_loss: 0.432112, val_loss: 0.490219, val_acc: 0.000000
+Epoch [22], train_loss: 0.431034, val_loss: 0.470604, val_acc: 0.000000
+Epoch [23], train_loss: 0.431205, val_loss: 0.455704, val_acc: 0.000000
+Epoch [24], train_loss: 0.430384, val_loss: 0.485743, val_acc: 0.000000
+Epoch [25], train_loss: 0.429517, val_loss: 0.475590, val_acc: 0.000000
+Epoch [26], train_loss: 0.428127, val_loss: 0.471683, val_acc: 0.000000
+Epoch [27], train_loss: 0.428085, val_loss: 0.463437, val_acc: 0.000000
+Epoch [28], train_loss: 0.426906, val_loss: 0.473415, val_acc: 0.000000
+Epoch [29], train_loss: 0.426836, val_loss: 0.476842, val_acc: 0.000000
+Epoch [30], train_loss: 0.425959, val_loss: 0.476416, val_acc: 0.000000
+Epoch [31], train_loss: 0.425423, val_loss: 0.474347, val_acc: 0.000000
+Epoch [32], train_loss: 0.425463, val_loss: 0.474484, val_acc: 0.000000
+Epoch [33], train_loss: 0.424158, val_loss: 0.462367, val_acc: 0.000000
+Epoch [34], train_loss: 0.423691, val_loss: 0.445456, val_acc: 0.000000
+Epoch [35], train_loss: 0.423309, val_loss: 0.435730, val_acc: 0.000002
+Epoch [36], train_loss: 0.422103, val_loss: 0.469645, val_acc: 0.000000
+Epoch [37], train_loss: 0.420998, val_loss: 0.459317, val_acc: 0.000000
+Epoch [38], train_loss: 0.420460, val_loss: 0.462440, val_acc: 0.000000
+Epoch [39], train_loss: 0.419403, val_loss: 0.455368, val_acc: 0.000000
+Epoch [40], train_loss: 0.418943, val_loss: 0.439703, val_acc: 0.000000
+Epoch [41], train_loss: 0.418217, val_loss: 0.439900, val_acc: 0.000000
+Epoch [42], train_loss: 0.417106, val_loss: 0.401938, val_acc: 0.000010
+Epoch [43], train_loss: 0.413960, val_loss: 0.414473, val_acc: 0.000010
+Epoch [44], train_loss: 0.412456, val_loss: 0.409499, val_acc: 0.000012
+Epoch [45], train_loss: 0.411529, val_loss: 0.396481, val_acc: 0.000012
+Epoch [46], train_loss: 0.410884, val_loss: 0.395699, val_acc: 0.000012
+Epoch [47], train_loss: 0.410078, val_loss: 0.394477, val_acc: 0.000012
+Epoch [48], train_loss: 0.408807, val_loss: 0.421912, val_acc: 0.000012
+Epoch [49], train_loss: 0.407664, val_loss: 0.408937, val_acc: 0.000012
+Epoch [50], train_loss: 0.406258, val_loss: 0.435875, val_acc: 0.000007
+Epoch [51], train_loss: 0.406395, val_loss: 0.401617, val_acc: 0.000012
+Epoch [52], train_loss: 0.405098, val_loss: 0.402642, val_acc: 0.000012
+Epoch [53], train_loss: 0.404149, val_loss: 0.389720, val_acc: 0.000012
+Epoch [54], train_loss: 0.403790, val_loss: 0.424951, val_acc: 0.000012
+Epoch [55], train_loss: 0.402866, val_loss: 0.405548, val_acc: 0.000012
+Epoch [56], train_loss: 0.402321, val_loss: 0.424365, val_acc: 0.000012
+Epoch [57], train_loss: 0.401078, val_loss: 0.398302, val_acc: 0.000012
+Epoch [58], train_loss: 0.400731, val_loss: 0.407022, val_acc: 0.000012
+Epoch [59], train_loss: 0.399539, val_loss: 0.417790, val_acc: 0.000014
+Epoch [60], train_loss: 0.398399, val_loss: 0.412996, val_acc: 0.000012
+Epoch [61], train_loss: 0.397972, val_loss: 0.401770, val_acc: 0.000012
+Epoch [62], train_loss: 0.396672, val_loss: 0.412115, val_acc: 0.000012
+Epoch [63], train_loss: 0.395674, val_loss: 0.418653, val_acc: 0.000012
+Epoch [64], train_loss: 0.395152, val_loss: 0.387932, val_acc: 0.000012
+Epoch [65], train_loss: 0.394075, val_loss: 0.405583, val_acc: 0.000012
+Epoch [66], train_loss: 0.393237, val_loss: 0.399943, val_acc: 0.000012
+Epoch [67], train_loss: 0.392937, val_loss: 0.377755, val_acc: 0.000012
+Epoch [68], train_loss: 0.391516, val_loss: 0.423917, val_acc: 0.000010
+Epoch [69], train_loss: 0.391563, val_loss: 0.395655, val_acc: 0.000012
+Epoch [70], train_loss: 0.390100, val_loss: 0.374335, val_acc: 0.000010
+Epoch [71], train_loss: 0.389510, val_loss: 0.414012, val_acc: 0.000014
+Epoch [72], train_loss: 0.388749, val_loss: 0.395169, val_acc: 0.000012
+Epoch [73], train_loss: 0.387852, val_loss: 0.377387, val_acc: 0.000010
+Epoch [74], train_loss: 0.387689, val_loss: 0.372207, val_acc: 0.000010
+Epoch [75], train_loss: 0.385784, val_loss: 0.401280, val_acc: 0.000012
+Epoch [76], train_loss: 0.385510, val_loss: 0.384863, val_acc: 0.000012
+Epoch [77], train_loss: 0.383984, val_loss: 0.403879, val_acc: 0.000012
+Epoch [78], train_loss: 0.383226, val_loss: 0.384920, val_acc: 0.000012
+Epoch [79], train_loss: 0.382886, val_loss: 0.387366, val_acc: 0.000012
+Epoch [80], train_loss: 0.380838, val_loss: 0.414086, val_acc: 0.000014
+Epoch [81], train_loss: 0.381342, val_loss: 0.374765, val_acc: 0.000010
+Epoch [82], train_loss: 0.380346, val_loss: 0.369848, val_acc: 0.000010
+Epoch [83], train_loss: 0.379326, val_loss: 0.386882, val_acc: 0.000012
+Epoch [84], train_loss: 0.378519, val_loss: 0.369920, val_acc: 0.000010
+Epoch [85], train_loss: 0.377660, val_loss: 0.390009, val_acc: 0.000012
+Epoch [86], train_loss: 0.377764, val_loss: 0.351680, val_acc: 0.000010
+Epoch [87], train_loss: 0.375657, val_loss: 0.355543, val_acc: 0.000117
+Epoch [88], train_loss: 0.375479, val_loss: 0.371712, val_acc: 0.000038
+Epoch [89], train_loss: 0.374485, val_loss: 0.375923, val_acc: 0.000036
+Epoch [90], train_loss: 0.373703, val_loss: 0.384659, val_acc: 0.000019
+Epoch [91], train_loss: 0.372796, val_loss: 0.390946, val_acc: 0.000026
+Epoch [92], train_loss: 0.371697, val_loss: 0.369039, val_acc: 0.000064
+Epoch [93], train_loss: 0.369837, val_loss: 0.354095, val_acc: 0.000143
+Epoch [94], train_loss: 0.370542, val_loss: 0.370132, val_acc: 0.000107
+Epoch [95], train_loss: 0.368574, val_loss: 0.371889, val_acc: 0.000191
+Epoch [96], train_loss: 0.368936, val_loss: 0.361940, val_acc: 0.000148
+Epoch [97], train_loss: 0.366453, val_loss: 0.378749, val_acc: 0.000129
+Epoch [98], train_loss: 0.367373, val_loss: 0.348691, val_acc: 0.000203
+Epoch [99], train_loss: 0.366531, val_loss: 0.348766, val_acc: 0.000126
+Epoch [100], train_loss: 0.364904, val_loss: 0.365184, val_acc: 0.000112
+Epoch [101], train_loss: 0.364516, val_loss: 0.366472, val_acc: 0.000062
+Epoch [102], train_loss: 0.363404, val_loss: 0.380847, val_acc: 0.000103
+Epoch [103], train_loss: 0.363296, val_loss: 0.350778, val_acc: 0.000098
+Epoch [104], train_loss: 0.360924, val_loss: 0.369595, val_acc: 0.000117
+Epoch [105], train_loss: 0.360851, val_loss: 0.353101, val_acc: 0.000057
+Epoch [106], train_loss: 0.361195, val_loss: 0.349997, val_acc: 0.000081
+Epoch [107], train_loss: 0.358195, val_loss: 0.368572, val_acc: 0.000076
+Epoch [108], train_loss: 0.357887, val_loss: 0.348768, val_acc: 0.000074
+Epoch [109], train_loss: 0.358209, val_loss: 0.342000, val_acc: 0.000101
+Epoch [110], train_loss: 0.358056, val_loss: 0.342260, val_acc: 0.000113
+Epoch [111], train_loss: 0.354882, val_loss: 0.386255, val_acc: 0.000064
+Epoch [112], train_loss: 0.356210, val_loss: 0.356048, val_acc: 0.000091
+Epoch [113], train_loss: 0.354304, val_loss: 0.373762, val_acc: 0.000048
+Epoch [114], train_loss: 0.353945, val_loss: 0.325477, val_acc: 0.000067
+Epoch [115], train_loss: 0.353338, val_loss: 0.333392, val_acc: 0.000032
+Epoch [116], train_loss: 0.353895, val_loss: 0.333212, val_acc: 0.000036
+Epoch [117], train_loss: 0.351959, val_loss: 0.350092, val_acc: 0.000007
+Epoch [118], train_loss: 0.351144, val_loss: 0.328471, val_acc: 0.000029
+Epoch [119], train_loss: 0.349751, val_loss: 0.322472, val_acc: 0.000032
+Epoch [120], train_loss: 0.350229, val_loss: 0.364284, val_acc: 0.000010
+Epoch [121], train_loss: 0.348323, val_loss: 0.377481, val_acc: 0.000010
+Epoch [122], train_loss: 0.347721, val_loss: 0.344489, val_acc: 0.000015
+Epoch [123], train_loss: 0.346970, val_loss: 0.342917, val_acc: 0.000015
+Epoch [124], train_loss: 0.345425, val_loss: 0.319098, val_acc: 0.000036
+Epoch [125], train_loss: 0.343942, val_loss: 0.344905, val_acc: 0.000012
+Epoch [126], train_loss: 0.343661, val_loss: 0.336993, val_acc: 0.000022
+Epoch [127], train_loss: 0.343870, val_loss: 0.333127, val_acc: 0.000024
+Epoch [128], train_loss: 0.344610, val_loss: 0.310766, val_acc: 0.000036
+Epoch [129], train_loss: 0.339901, val_loss: 0.369065, val_acc: 0.000012
+Epoch [130], train_loss: 0.341783, val_loss: 0.327196, val_acc: 0.000027
+Epoch [131], train_loss: 0.340387, val_loss: 0.352543, val_acc: 0.000005
+Epoch [132], train_loss: 0.338168, val_loss: 0.329162, val_acc: 0.000027
+Epoch [133], train_loss: 0.339784, val_loss: 0.333817, val_acc: 0.000024
+Epoch [134], train_loss: 0.338126, val_loss: 0.337254, val_acc: 0.000022
+Epoch [135], train_loss: 0.336698, val_loss: 0.334278, val_acc: 0.000024
+Epoch [136], train_loss: 0.335599, val_loss: 0.371823, val_acc: 0.000010
+Epoch [137], train_loss: 0.335258, val_loss: 0.311989, val_acc: 0.000036
+Epoch [138], train_loss: 0.333377, val_loss: 0.354898, val_acc: 0.000005
+Epoch [139], train_loss: 0.333675, val_loss: 0.331807, val_acc: 0.000024
+Epoch [140], train_loss: 0.333112, val_loss: 0.312823, val_acc: 0.000036
+Epoch [141], train_loss: 0.332754, val_loss: 0.333642, val_acc: 0.000024
+Epoch [142], train_loss: 0.332645, val_loss: 0.343598, val_acc: 0.000010
+Epoch [143], train_loss: 0.330290, val_loss: 0.336461, val_acc: 0.000022
+Epoch [144], train_loss: 0.327972, val_loss: 0.326111, val_acc: 0.000029
+Epoch [145], train_loss: 0.329810, val_loss: 0.326205, val_acc: 0.000024
+Epoch [146], train_loss: 0.328654, val_loss: 0.343274, val_acc: 0.000007
+Epoch [147], train_loss: 0.327846, val_loss: 0.296456, val_acc: 0.000108
+Epoch [148], train_loss: 0.327343, val_loss: 0.332193, val_acc: 0.000024
+Epoch [149], train_loss: 0.327543, val_loss: 0.337736, val_acc: 0.000017
+Epoch [150], train_loss: 0.325246, val_loss: 0.325093, val_acc: 0.000029
+Epoch [151], train_loss: 0.323796, val_loss: 0.308851, val_acc: 0.000036
+Epoch [152], train_loss: 0.322870, val_loss: 0.333243, val_acc: 0.000024
+Epoch [153], train_loss: 0.323560, val_loss: 0.343801, val_acc: 0.000007
+Epoch [154], train_loss: 0.321265, val_loss: 0.339004, val_acc: 0.000015
+Epoch [155], train_loss: 0.321042, val_loss: 0.317127, val_acc: 0.000036
+Epoch [156], train_loss: 0.319355, val_loss: 0.331125, val_acc: 0.000024
+Epoch [157], train_loss: 0.318003, val_loss: 0.346468, val_acc: 0.000005
+Epoch [158], train_loss: 0.319286, val_loss: 0.333113, val_acc: 0.000022
+Epoch [159], train_loss: 0.322551, val_loss: 0.275597, val_acc: 0.001040
+Epoch [160], train_loss: 0.315855, val_loss: 0.331252, val_acc: 0.000024
+Epoch [161], train_loss: 0.317473, val_loss: 0.296471, val_acc: 0.000082
+Epoch [162], train_loss: 0.315018, val_loss: 0.306466, val_acc: 0.000046
+Epoch [163], train_loss: 0.316667, val_loss: 0.328403, val_acc: 0.000024
+Epoch [164], train_loss: 0.315086, val_loss: 0.307568, val_acc: 0.000039
+Epoch [165], train_loss: 0.312605, val_loss: 0.324641, val_acc: 0.000029
+Epoch [166], train_loss: 0.311513, val_loss: 0.280240, val_acc: 0.000461
+Epoch [167], train_loss: 0.314532, val_loss: 0.283550, val_acc: 0.000245
+Epoch [168], train_loss: 0.310864, val_loss: 0.305887, val_acc: 0.000039
+Epoch [169], train_loss: 0.310986, val_loss: 0.292331, val_acc: 0.000117
+Epoch [170], train_loss: 0.306978, val_loss: 0.320912, val_acc: 0.000034
+Epoch [171], train_loss: 0.307536, val_loss: 0.322411, val_acc: 0.000032
+Epoch [172], train_loss: 0.307957, val_loss: 0.320405, val_acc: 0.000034
+Epoch [173], train_loss: 0.308291, val_loss: 0.281726, val_acc: 0.000300
+Epoch [174], train_loss: 0.308095, val_loss: 0.310066, val_acc: 0.000036
+Epoch [175], train_loss: 0.305718, val_loss: 0.320184, val_acc: 0.000036
+Epoch [176], train_loss: 0.307222, val_loss: 0.297588, val_acc: 0.000067
+Epoch [177], train_loss: 0.302904, val_loss: 0.302352, val_acc: 0.000048
+Epoch [178], train_loss: 0.305689, val_loss: 0.288641, val_acc: 0.000147
+Epoch [179], train_loss: 0.301028, val_loss: 0.301472, val_acc: 0.000046
+Epoch [180], train_loss: 0.300278, val_loss: 0.328107, val_acc: 0.000027
+Epoch [181], train_loss: 0.300005, val_loss: 0.324268, val_acc: 0.000027
+Epoch [182], train_loss: 0.301793, val_loss: 0.270243, val_acc: 0.001066
+Epoch [183], train_loss: 0.297957, val_loss: 0.320275, val_acc: 0.000036
+Epoch [184], train_loss: 0.296969, val_loss: 0.275148, val_acc: 0.000775
+Epoch [185], train_loss: 0.299225, val_loss: 0.268231, val_acc: 0.001543
+Epoch [186], train_loss: 0.295942, val_loss: 0.299514, val_acc: 0.000058
+Epoch [187], train_loss: 0.295432, val_loss: 0.303705, val_acc: 0.000044
+Epoch [188], train_loss: 0.297181, val_loss: 0.291693, val_acc: 0.000128
+Epoch [189], train_loss: 0.294458, val_loss: 0.260538, val_acc: 0.002829
+Epoch [190], train_loss: 0.291946, val_loss: 0.345700, val_acc: 0.000005
+Epoch [191], train_loss: 0.294209, val_loss: 0.286959, val_acc: 0.000159
+Epoch [192], train_loss: 0.293780, val_loss: 0.251052, val_acc: 0.006578
+Epoch [193], train_loss: 0.292864, val_loss: 0.305454, val_acc: 0.000041
+Epoch [194], train_loss: 0.292983, val_loss: 0.268957, val_acc: 0.001125
+Epoch [195], train_loss: 0.289026, val_loss: 0.268505, val_acc: 0.001287
+Epoch [196], train_loss: 0.289578, val_loss: 0.267335, val_acc: 0.001276
+Epoch [197], train_loss: 0.288411, val_loss: 0.301857, val_acc: 0.000041
+Epoch [198], train_loss: 0.287057, val_loss: 0.255880, val_acc: 0.003799
+Epoch [199], train_loss: 0.288318, val_loss: 0.276759, val_acc: 0.000407
+Epoch [200], train_loss: 0.287482, val_loss: 0.268818, val_acc: 0.000976
+Epoch [201], train_loss: 0.283233, val_loss: 0.296437, val_acc: 0.000072
+Epoch [202], train_loss: 0.287845, val_loss: 0.255371, val_acc: 0.004052
+Epoch [203], train_loss: 0.283348, val_loss: 0.257393, val_acc: 0.003151
+Epoch [204], train_loss: 0.285648, val_loss: 0.266121, val_acc: 0.001200
+Epoch [205], train_loss: 0.283281, val_loss: 0.286759, val_acc: 0.000130
+Epoch [206], train_loss: 0.283972, val_loss: 0.271909, val_acc: 0.000656
+Epoch [207], train_loss: 0.282500, val_loss: 0.233109, val_acc: 0.020171
+Epoch [208], train_loss: 0.282720, val_loss: 0.262163, val_acc: 0.001543
+Epoch [209], train_loss: 0.277050, val_loss: 0.299145, val_acc: 0.000051
+Epoch [210], train_loss: 0.281467, val_loss: 0.233053, val_acc: 0.017195
+Epoch [211], train_loss: 0.278121, val_loss: 0.269144, val_acc: 0.000786
+Epoch [212], train_loss: 0.278222, val_loss: 0.289072, val_acc: 0.000084
+Epoch [213], train_loss: 0.277197, val_loss: 0.284032, val_acc: 0.000170
+Epoch [214], train_loss: 0.274931, val_loss: 0.291498, val_acc: 0.000089
+Epoch [215], train_loss: 0.277378, val_loss: 0.267780, val_acc: 0.000852
+Epoch [216], train_loss: 0.272868, val_loss: 0.275565, val_acc: 0.000357
+Epoch [217], train_loss: 0.273930, val_loss: 0.230588, val_acc: 0.022493
+Epoch [218], train_loss: 0.271846, val_loss: 0.271974, val_acc: 0.000615
+Epoch [219], train_loss: 0.274978, val_loss: 0.241848, val_acc: 0.008314
+Epoch [220], train_loss: 0.272323, val_loss: 0.282296, val_acc: 0.000177
+Epoch [221], train_loss: 0.272894, val_loss: 0.247279, val_acc: 0.005080
+Epoch [222], train_loss: 0.271805, val_loss: 0.242228, val_acc: 0.007766
+Epoch [223], train_loss: 0.271825, val_loss: 0.238265, val_acc: 0.010445
+Epoch [224], train_loss: 0.271229, val_loss: 0.230287, val_acc: 0.017403
+Epoch [225], train_loss: 0.268233, val_loss: 0.242053, val_acc: 0.007758
+Epoch [226], train_loss: 0.268914, val_loss: 0.226194, val_acc: 0.023229
+Epoch [227], train_loss: 0.269525, val_loss: 0.243659, val_acc: 0.006373
+Epoch [228], train_loss: 0.263583, val_loss: 0.287187, val_acc: 0.000104
+Epoch [229], train_loss: 0.264938, val_loss: 0.245980, val_acc: 0.006113
+Epoch [230], train_loss: 0.266971, val_loss: 0.230229, val_acc: 0.017276
+Epoch [231], train_loss: 0.262807, val_loss: 0.252697, val_acc: 0.003237
+Epoch [232], train_loss: 0.267467, val_loss: 0.221651, val_acc: 0.023276
+Epoch [233], train_loss: 0.262371, val_loss: 0.238721, val_acc: 0.011578
+Epoch [234], train_loss: 0.263330, val_loss: 0.239321, val_acc: 0.008778
+Epoch [235], train_loss: 0.262060, val_loss: 0.240598, val_acc: 0.008402
+Epoch [236], train_loss: 0.259924, val_loss: 0.258223, val_acc: 0.001644
+Epoch [237], train_loss: 0.259622, val_loss: 0.282539, val_acc: 0.000136
+Epoch [238], train_loss: 0.261466, val_loss: 0.248414, val_acc: 0.004917
+Epoch [239], train_loss: 0.255411, val_loss: 0.270137, val_acc: 0.000463
+Epoch [240], train_loss: 0.260107, val_loss: 0.224135, val_acc: 0.018092
+Epoch [241], train_loss: 0.255563, val_loss: 0.238766, val_acc: 0.009017
+Epoch [242], train_loss: 0.255764, val_loss: 0.230584, val_acc: 0.014638
+Epoch [243], train_loss: 0.255663, val_loss: 0.223004, val_acc: 0.025461
+Epoch [244], train_loss: 0.256631, val_loss: 0.231526, val_acc: 0.014364
+Epoch [245], train_loss: 0.253259, val_loss: 0.224054, val_acc: 0.021905
+Epoch [246], train_loss: 0.250185, val_loss: 0.264855, val_acc: 0.000837
+Epoch [247], train_loss: 0.254089, val_loss: 0.203218, val_acc: 0.092602
+Epoch [248], train_loss: 0.250198, val_loss: 0.235429, val_acc: 0.011591
+Epoch [249], train_loss: 0.249036, val_loss: 0.248433, val_acc: 0.003759
+Epoch [250], train_loss: 0.249461, val_loss: 0.210154, val_acc: 0.049614
+Epoch [251], train_loss: 0.245883, val_loss: 0.243078, val_acc: 0.006274
+Epoch [252], train_loss: 0.249595, val_loss: 0.207478, val_acc: 0.055078
+Epoch [253], train_loss: 0.247055, val_loss: 0.235127, val_acc: 0.011520
+Epoch [254], train_loss: 0.245523, val_loss: 0.221096, val_acc: 0.027158
+Epoch [255], train_loss: 0.243874, val_loss: 0.246183, val_acc: 0.005038
+Epoch [256], train_loss: 0.243338, val_loss: 0.239214, val_acc: 0.007786
+Epoch [257], train_loss: 0.244525, val_loss: 0.259906, val_acc: 0.001096
+Epoch [258], train_loss: 0.243617, val_loss: 0.184497, val_acc: 0.145584
+Epoch [259], train_loss: 0.244508, val_loss: 0.177855, val_acc: 0.194426
+Epoch [260], train_loss: 0.239620, val_loss: 0.249579, val_acc: 0.002916
+Epoch [261], train_loss: 0.243098, val_loss: 0.200525, val_acc: 0.067570
+Epoch [262], train_loss: 0.240118, val_loss: 0.206844, val_acc: 0.063521
+Epoch [263], train_loss: 0.239903, val_loss: 0.208243, val_acc: 0.061687
+Epoch [264], train_loss: 0.237859, val_loss: 0.214119, val_acc: 0.030712
+Epoch [265], train_loss: 0.241269, val_loss: 0.203171, val_acc: 0.068511
+Epoch [266], train_loss: 0.237549, val_loss: 0.212152, val_acc: 0.040143
+Epoch [267], train_loss: 0.236793, val_loss: 0.209513, val_acc: 0.046735
+Epoch [268], train_loss: 0.239011, val_loss: 0.209624, val_acc: 0.039298
+Epoch [269], train_loss: 0.235313, val_loss: 0.181438, val_acc: 0.185899
+Epoch [270], train_loss: 0.232315, val_loss: 0.201151, val_acc: 0.084142
+Epoch [271], train_loss: 0.232258, val_loss: 0.184103, val_acc: 0.168284
+Epoch [272], train_loss: 0.231371, val_loss: 0.207695, val_acc: 0.043995
+Epoch [273], train_loss: 0.233028, val_loss: 0.173565, val_acc: 0.236666
+Epoch [274], train_loss: 0.232298, val_loss: 0.224142, val_acc: 0.014578
+Epoch [275], train_loss: 0.226169, val_loss: 0.239993, val_acc: 0.006212
+Epoch [276], train_loss: 0.226931, val_loss: 0.212679, val_acc: 0.048620
+Epoch [277], train_loss: 0.227392, val_loss: 0.219627, val_acc: 0.023510
+Epoch [278], train_loss: 0.229497, val_loss: 0.189389, val_acc: 0.133109
+Epoch [279], train_loss: 0.227280, val_loss: 0.170183, val_acc: 0.242807
+Epoch [280], train_loss: 0.231963, val_loss: 0.196284, val_acc: 0.067596
+Epoch [281], train_loss: 0.228038, val_loss: 0.186897, val_acc: 0.131167
+Epoch [282], train_loss: 0.227864, val_loss: 0.193010, val_acc: 0.076409
+Epoch [283], train_loss: 0.222711, val_loss: 0.189435, val_acc: 0.134893
+Epoch [284], train_loss: 0.226858, val_loss: 0.215869, val_acc: 0.021843
+Epoch [285], train_loss: 0.224417, val_loss: 0.205037, val_acc: 0.046986
+Epoch [286], train_loss: 0.223633, val_loss: 0.172832, val_acc: 0.223340
+Epoch [287], train_loss: 0.224504, val_loss: 0.162333, val_acc: 0.295237
+Epoch [288], train_loss: 0.220279, val_loss: 0.181393, val_acc: 0.158548
+Epoch [289], train_loss: 0.221887, val_loss: 0.206802, val_acc: 0.043995
+Epoch [290], train_loss: 0.220029, val_loss: 0.171529, val_acc: 0.204438
+Epoch [291], train_loss: 0.218107, val_loss: 0.189974, val_acc: 0.113968
+Epoch [292], train_loss: 0.217622, val_loss: 0.187795, val_acc: 0.121540
+Epoch [293], train_loss: 0.215712, val_loss: 0.209523, val_acc: 0.042450
+Epoch [294], train_loss: 0.217258, val_loss: 0.194868, val_acc: 0.090092
+Epoch [295], train_loss: 0.212126, val_loss: 0.214594, val_acc: 0.028653
+Epoch [296], train_loss: 0.215778, val_loss: 0.155335, val_acc: 0.461018
+Epoch [297], train_loss: 0.211999, val_loss: 0.225239, val_acc: 0.012230
+Epoch [298], train_loss: 0.216607, val_loss: 0.148585, val_acc: 0.502829
+Epoch [299], train_loss: 0.213307, val_loss: 0.178628, val_acc: 0.166517
+python3 ./UNet_V13.py  16279.97s user 16601.72s system 99% cpu 9:09:52.80 total
diff --git a/UNet/Sim_logs/UNet_V15_25648398.log b/UNet/Sim_logs/UNet_V15_25648398.log
new file mode 100644
index 0000000000000000000000000000000000000000..9a11e64dd29df5af54c8bb108e693127efa6054e
--- /dev/null
+++ b/UNet/Sim_logs/UNet_V15_25648398.log
@@ -0,0 +1,320 @@
+(OK) Loading cuda 10.2.89
+(OK) Loading python 3.7.11
+(!!) The SciPy Stack is available: http://www.scipy.org/stackspec.html
+ Built with GCC compilers.
+Collecting torch==1.10.1
+  Using cached torch-1.10.1-cp37-cp37m-manylinux1_x86_64.whl (881.9 MB)
+Collecting typing-extensions
+  Using cached typing_extensions-4.1.1-py3-none-any.whl (26 kB)
+Installing collected packages: typing-extensions, torch
+  WARNING: The scripts convert-caffe2-to-onnx, convert-onnx-to-caffe2 and torchrun are installed in '/home/yk138599/.local/bin' which is not on PATH.
+  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
+Successfully installed torch-1.10.1 typing-extensions-4.1.1
+WARNING: You are using pip version 21.2.4; however, version 22.0.3 is available.
+You should consider upgrading via the '/usr/local_rwth/sw/python/3.7.11/x86_64/bin/python3.7 -m pip install --upgrade pip' command.
+number auf epochs: 300
+batchsize: 32
+learning rate: 3e-05
+kernel size is: 7
+ seed is: 621886705
+Epoch [0], train_loss: 0.214512, val_loss: 0.187477, val_acc: 4.682247
+Epoch [1], train_loss: 0.204507, val_loss: 0.153932, val_acc: 6.012501
+Epoch [2], train_loss: 0.193085, val_loss: 0.143270, val_acc: 8.210194
+Epoch [3], train_loss: 0.184067, val_loss: 0.143671, val_acc: 8.454381
+Epoch [4], train_loss: 0.178286, val_loss: 0.141907, val_acc: 8.545613
+Epoch [5], train_loss: 0.173388, val_loss: 0.141169, val_acc: 8.836565
+Epoch [6], train_loss: 0.169127, val_loss: 0.142926, val_acc: 8.846289
+Epoch [7], train_loss: 0.165882, val_loss: 0.138909, val_acc: 9.176203
+Epoch [8], train_loss: 0.162155, val_loss: 0.141454, val_acc: 9.260369
+Epoch [9], train_loss: 0.159240, val_loss: 0.140589, val_acc: 9.296802
+Epoch [10], train_loss: 0.155881, val_loss: 0.134206, val_acc: 9.778922
+Epoch [11], train_loss: 0.152649, val_loss: 0.133877, val_acc: 10.068307
+Epoch [12], train_loss: 0.150261, val_loss: 0.128972, val_acc: 10.252433
+Epoch [13], train_loss: 0.148163, val_loss: 0.123321, val_acc: 11.076769
+Epoch [14], train_loss: 0.145055, val_loss: 0.122155, val_acc: 11.043660
+Epoch [15], train_loss: 0.142461, val_loss: 0.123448, val_acc: 11.264566
+Epoch [16], train_loss: 0.140686, val_loss: 0.113968, val_acc: 12.126838
+Epoch [17], train_loss: 0.138693, val_loss: 0.120889, val_acc: 11.691317
+Epoch [18], train_loss: 0.136051, val_loss: 0.114922, val_acc: 12.182749
+Epoch [19], train_loss: 0.133650, val_loss: 0.110509, val_acc: 12.420129
+Epoch [20], train_loss: 0.132031, val_loss: 0.107905, val_acc: 13.009701
+Epoch [21], train_loss: 0.130601, val_loss: 0.107272, val_acc: 12.979741
+Epoch [22], train_loss: 0.128446, val_loss: 0.107584, val_acc: 12.975901
+Epoch [23], train_loss: 0.126468, val_loss: 0.105430, val_acc: 13.341612
+Epoch [24], train_loss: 0.124915, val_loss: 0.104719, val_acc: 13.519083
+Epoch [25], train_loss: 0.123083, val_loss: 0.102020, val_acc: 13.954125
+Epoch [26], train_loss: 0.121050, val_loss: 0.095102, val_acc: 14.733166
+Epoch [27], train_loss: 0.119698, val_loss: 0.097351, val_acc: 14.489087
+Epoch [28], train_loss: 0.118147, val_loss: 0.096059, val_acc: 14.938239
+Epoch [29], train_loss: 0.116212, val_loss: 0.103350, val_acc: 14.036307
+Epoch [30], train_loss: 0.114370, val_loss: 0.097027, val_acc: 14.703565
+Epoch [31], train_loss: 0.113229, val_loss: 0.093954, val_acc: 15.464749
+Epoch [32], train_loss: 0.111924, val_loss: 0.094672, val_acc: 15.416807
+Epoch [33], train_loss: 0.110348, val_loss: 0.098113, val_acc: 15.063859
+Epoch [34], train_loss: 0.108991, val_loss: 0.092326, val_acc: 15.768640
+Epoch [35], train_loss: 0.107507, val_loss: 0.092365, val_acc: 15.814393
+Epoch [36], train_loss: 0.106085, val_loss: 0.091486, val_acc: 15.722871
+Epoch [37], train_loss: 0.104517, val_loss: 0.093251, val_acc: 16.071335
+Epoch [38], train_loss: 0.103661, val_loss: 0.089934, val_acc: 16.573162
+Epoch [39], train_loss: 0.101771, val_loss: 0.089592, val_acc: 16.452057
+Epoch [40], train_loss: 0.100930, val_loss: 0.083913, val_acc: 17.320667
+Epoch [41], train_loss: 0.099614, val_loss: 0.086105, val_acc: 17.020794
+Epoch [42], train_loss: 0.098540, val_loss: 0.085785, val_acc: 17.148422
+Epoch [43], train_loss: 0.097228, val_loss: 0.084713, val_acc: 17.485941
+Epoch [44], train_loss: 0.096072, val_loss: 0.081368, val_acc: 18.275349
+Epoch [45], train_loss: 0.095216, val_loss: 0.079673, val_acc: 18.201534
+Epoch [46], train_loss: 0.093760, val_loss: 0.080263, val_acc: 18.435171
+Epoch [47], train_loss: 0.092640, val_loss: 0.080428, val_acc: 18.522026
+Epoch [48], train_loss: 0.091624, val_loss: 0.079682, val_acc: 18.683863
+Epoch [49], train_loss: 0.090305, val_loss: 0.077123, val_acc: 19.072182
+Epoch [50], train_loss: 0.089380, val_loss: 0.078907, val_acc: 18.913008
+Epoch [51], train_loss: 0.088431, val_loss: 0.078002, val_acc: 19.322641
+Epoch [52], train_loss: 0.087466, val_loss: 0.079245, val_acc: 18.847107
+Epoch [53], train_loss: 0.086739, val_loss: 0.077221, val_acc: 19.452036
+Epoch [54], train_loss: 0.085531, val_loss: 0.076482, val_acc: 20.313416
+Epoch [55], train_loss: 0.084759, val_loss: 0.074654, val_acc: 20.544968
+Epoch [56], train_loss: 0.083788, val_loss: 0.075438, val_acc: 19.714024
+Epoch [57], train_loss: 0.083031, val_loss: 0.073995, val_acc: 20.516907
+Epoch [58], train_loss: 0.082167, val_loss: 0.072212, val_acc: 20.939703
+Epoch [59], train_loss: 0.081460, val_loss: 0.072117, val_acc: 21.047981
+Epoch [60], train_loss: 0.080650, val_loss: 0.069975, val_acc: 21.552031
+Epoch [61], train_loss: 0.079910, val_loss: 0.070914, val_acc: 21.229048
+Epoch [62], train_loss: 0.078887, val_loss: 0.069491, val_acc: 21.556219
+Epoch [63], train_loss: 0.078404, val_loss: 0.068550, val_acc: 21.912312
+Epoch [64], train_loss: 0.077728, val_loss: 0.068308, val_acc: 21.953703
+Epoch [65], train_loss: 0.076785, val_loss: 0.067860, val_acc: 22.317198
+Epoch [66], train_loss: 0.076315, val_loss: 0.069624, val_acc: 22.163050
+Epoch [67], train_loss: 0.075711, val_loss: 0.069359, val_acc: 21.818130
+Epoch [68], train_loss: 0.075001, val_loss: 0.067147, val_acc: 22.915466
+Epoch [69], train_loss: 0.074369, val_loss: 0.067768, val_acc: 22.701344
+Epoch [70], train_loss: 0.073909, val_loss: 0.065976, val_acc: 23.256077
+Epoch [71], train_loss: 0.073220, val_loss: 0.066364, val_acc: 23.107592
+Epoch [72], train_loss: 0.072950, val_loss: 0.064924, val_acc: 23.292017
+Epoch [73], train_loss: 0.072155, val_loss: 0.065207, val_acc: 23.400295
+Epoch [74], train_loss: 0.071628, val_loss: 0.064773, val_acc: 23.566074
+Epoch [75], train_loss: 0.071394, val_loss: 0.066221, val_acc: 23.472216
+Epoch [76], train_loss: 0.070842, val_loss: 0.064734, val_acc: 23.627943
+Epoch [77], train_loss: 0.070595, val_loss: 0.064910, val_acc: 23.667610
+Epoch [78], train_loss: 0.069781, val_loss: 0.064783, val_acc: 23.737663
+Epoch [79], train_loss: 0.069656, val_loss: 0.065103, val_acc: 23.931768
+Epoch [80], train_loss: 0.069182, val_loss: 0.063441, val_acc: 24.137806
+Epoch [81], train_loss: 0.068896, val_loss: 0.064502, val_acc: 23.689814
+Epoch [82], train_loss: 0.068506, val_loss: 0.063261, val_acc: 24.243189
+Epoch [83], train_loss: 0.068320, val_loss: 0.064813, val_acc: 24.107489
+Epoch [84], train_loss: 0.067872, val_loss: 0.065226, val_acc: 24.054050
+Epoch [85], train_loss: 0.067872, val_loss: 0.063931, val_acc: 24.353127
+Epoch [86], train_loss: 0.067528, val_loss: 0.064289, val_acc: 24.303619
+Epoch [87], train_loss: 0.067099, val_loss: 0.064410, val_acc: 24.386433
+Epoch [88], train_loss: 0.066637, val_loss: 0.063106, val_acc: 24.505985
+Epoch [89], train_loss: 0.066688, val_loss: 0.062766, val_acc: 24.914490
+Epoch [90], train_loss: 0.066467, val_loss: 0.063061, val_acc: 24.743313
+Epoch [91], train_loss: 0.066101, val_loss: 0.063759, val_acc: 24.588673
+Epoch [92], train_loss: 0.065937, val_loss: 0.063012, val_acc: 24.854162
+Epoch [93], train_loss: 0.065927, val_loss: 0.062529, val_acc: 24.921272
+Epoch [94], train_loss: 0.065925, val_loss: 0.063009, val_acc: 24.800829
+Epoch [95], train_loss: 0.065594, val_loss: 0.063396, val_acc: 24.783348
+Epoch [96], train_loss: 0.065328, val_loss: 0.062568, val_acc: 24.883804
+Epoch [97], train_loss: 0.064983, val_loss: 0.062625, val_acc: 24.957716
+Epoch [98], train_loss: 0.065223, val_loss: 0.063292, val_acc: 24.964661
+Epoch [99], train_loss: 0.064856, val_loss: 0.064463, val_acc: 24.929462
+Epoch [100], train_loss: 0.065166, val_loss: 0.063620, val_acc: 25.017401
+Epoch [101], train_loss: 0.064611, val_loss: 0.062821, val_acc: 25.045458
+Epoch [102], train_loss: 0.064676, val_loss: 0.062472, val_acc: 24.971319
+Epoch [103], train_loss: 0.064550, val_loss: 0.062883, val_acc: 25.025282
+Epoch [104], train_loss: 0.064405, val_loss: 0.062506, val_acc: 25.063635
+Epoch [105], train_loss: 0.064335, val_loss: 0.062646, val_acc: 25.118507
+Epoch [106], train_loss: 0.064405, val_loss: 0.063563, val_acc: 25.028439
+Epoch [107], train_loss: 0.064433, val_loss: 0.061967, val_acc: 25.204155
+Epoch [108], train_loss: 0.064234, val_loss: 0.062799, val_acc: 25.178654
+Epoch [109], train_loss: 0.064251, val_loss: 0.063172, val_acc: 25.139015
+Epoch [110], train_loss: 0.064161, val_loss: 0.062377, val_acc: 25.180946
+Epoch [111], train_loss: 0.064224, val_loss: 0.062469, val_acc: 25.192411
+Epoch [112], train_loss: 0.064006, val_loss: 0.062025, val_acc: 25.215410
+Epoch [113], train_loss: 0.064135, val_loss: 0.062421, val_acc: 25.209976
+Epoch [114], train_loss: 0.064045, val_loss: 0.062301, val_acc: 25.184544
+Epoch [115], train_loss: 0.063855, val_loss: 0.063516, val_acc: 25.099146
+Epoch [116], train_loss: 0.063908, val_loss: 0.063784, val_acc: 25.143581
+Epoch [117], train_loss: 0.063722, val_loss: 0.062831, val_acc: 25.213774
+Epoch [118], train_loss: 0.064097, val_loss: 0.062273, val_acc: 25.233570
+Epoch [119], train_loss: 0.063907, val_loss: 0.062984, val_acc: 25.174665
+Epoch [120], train_loss: 0.063798, val_loss: 0.062997, val_acc: 25.194090
+Epoch [121], train_loss: 0.063638, val_loss: 0.062734, val_acc: 25.251001
+Epoch [122], train_loss: 0.063730, val_loss: 0.063243, val_acc: 25.208796
+Epoch [123], train_loss: 0.063718, val_loss: 0.063005, val_acc: 25.217896
+Epoch [124], train_loss: 0.063675, val_loss: 0.062495, val_acc: 25.258898
+Epoch [125], train_loss: 0.063403, val_loss: 0.062449, val_acc: 25.270401
+Epoch [126], train_loss: 0.063438, val_loss: 0.062345, val_acc: 25.272223
+Epoch [127], train_loss: 0.063454, val_loss: 0.062109, val_acc: 25.314283
+Epoch [128], train_loss: 0.063445, val_loss: 0.062560, val_acc: 25.272043
+Epoch [129], train_loss: 0.063238, val_loss: 0.061976, val_acc: 25.315069
+Epoch [130], train_loss: 0.063067, val_loss: 0.062129, val_acc: 25.284983
+Epoch [131], train_loss: 0.064603, val_loss: 0.071687, val_acc: 24.362026
+Epoch [132], train_loss: 0.065197, val_loss: 0.076895, val_acc: 23.551723
+Epoch [133], train_loss: 0.064853, val_loss: 0.076706, val_acc: 23.761330
+Epoch [134], train_loss: 0.065793, val_loss: 0.068231, val_acc: 24.608536
+Epoch [135], train_loss: 0.064721, val_loss: 0.061469, val_acc: 25.264048
+Epoch [136], train_loss: 0.064262, val_loss: 0.060808, val_acc: 25.415501
+Epoch [137], train_loss: 0.064409, val_loss: 0.062518, val_acc: 25.144537
+Epoch [138], train_loss: 0.064450, val_loss: 0.061293, val_acc: 25.408251
+Epoch [139], train_loss: 0.064702, val_loss: 0.062446, val_acc: 25.291451
+Epoch [140], train_loss: 0.064036, val_loss: 0.060889, val_acc: 25.425533
+Epoch [141], train_loss: 0.064154, val_loss: 0.061527, val_acc: 25.382561
+Epoch [142], train_loss: 0.064163, val_loss: 0.061123, val_acc: 25.370550
+Epoch [143], train_loss: 0.064212, val_loss: 0.061449, val_acc: 25.388645
+Epoch [144], train_loss: 0.063534, val_loss: 0.060731, val_acc: 25.421282
+Epoch [145], train_loss: 0.063639, val_loss: 0.060984, val_acc: 25.436773
+Epoch [146], train_loss: 0.063400, val_loss: 0.060812, val_acc: 25.437506
+Epoch [147], train_loss: 0.063539, val_loss: 0.060534, val_acc: 25.442270
+Epoch [148], train_loss: 0.063512, val_loss: 0.060906, val_acc: 25.436571
+Epoch [149], train_loss: 0.063249, val_loss: 0.060824, val_acc: 25.442419
+Epoch [150], train_loss: 0.063101, val_loss: 0.060581, val_acc: 25.458778
+Epoch [151], train_loss: 0.063167, val_loss: 0.061031, val_acc: 25.440580
+Epoch [152], train_loss: 0.063082, val_loss: 0.060647, val_acc: 25.406298
+Epoch [153], train_loss: 0.062933, val_loss: 0.060888, val_acc: 25.438669
+Epoch [154], train_loss: 0.062942, val_loss: 0.060559, val_acc: 25.440842
+Epoch [155], train_loss: 0.063098, val_loss: 0.060854, val_acc: 25.452734
+Epoch [156], train_loss: 0.062875, val_loss: 0.060934, val_acc: 25.457329
+Epoch [157], train_loss: 0.063028, val_loss: 0.061058, val_acc: 25.438961
+Epoch [158], train_loss: 0.062711, val_loss: 0.060958, val_acc: 25.439878
+Epoch [159], train_loss: 0.062772, val_loss: 0.060743, val_acc: 25.458431
+Epoch [160], train_loss: 0.063081, val_loss: 0.063209, val_acc: 25.408518
+Epoch [161], train_loss: 0.062949, val_loss: 0.062384, val_acc: 25.320555
+Epoch [162], train_loss: 0.062800, val_loss: 0.061946, val_acc: 25.386189
+Epoch [163], train_loss: 0.062526, val_loss: 0.062165, val_acc: 25.414232
+Epoch [164], train_loss: 0.062386, val_loss: 0.061838, val_acc: 25.397297
+Epoch [165], train_loss: 0.062267, val_loss: 0.061754, val_acc: 25.358530
+Epoch [166], train_loss: 0.062688, val_loss: 0.061436, val_acc: 25.451172
+Epoch [167], train_loss: 0.062636, val_loss: 0.061528, val_acc: 25.429850
+Epoch [168], train_loss: 0.062432, val_loss: 0.062332, val_acc: 25.431797
+Epoch [169], train_loss: 0.062405, val_loss: 0.061518, val_acc: 25.418449
+Epoch [170], train_loss: 0.062616, val_loss: 0.062688, val_acc: 25.369709
+Epoch [171], train_loss: 0.062213, val_loss: 0.061660, val_acc: 25.436274
+Epoch [172], train_loss: 0.062650, val_loss: 0.061646, val_acc: 25.392746
+Epoch [173], train_loss: 0.062164, val_loss: 0.061970, val_acc: 25.447544
+Epoch [174], train_loss: 0.062382, val_loss: 0.061702, val_acc: 25.389153
+Epoch [175], train_loss: 0.062302, val_loss: 0.061477, val_acc: 25.407866
+Epoch [176], train_loss: 0.062217, val_loss: 0.061659, val_acc: 25.384409
+Epoch [177], train_loss: 0.062359, val_loss: 0.062446, val_acc: 25.414927
+Epoch [178], train_loss: 0.062186, val_loss: 0.061812, val_acc: 25.383636
+Epoch [179], train_loss: 0.062233, val_loss: 0.061767, val_acc: 25.414467
+Epoch [180], train_loss: 0.062351, val_loss: 0.062186, val_acc: 25.425909
+Epoch [181], train_loss: 0.062050, val_loss: 0.061731, val_acc: 25.384783
+Epoch [182], train_loss: 0.062062, val_loss: 0.061314, val_acc: 25.447243
+Epoch [183], train_loss: 0.062272, val_loss: 0.061633, val_acc: 25.414482
+Epoch [184], train_loss: 0.062306, val_loss: 0.061876, val_acc: 25.422178
+Epoch [185], train_loss: 0.062241, val_loss: 0.061590, val_acc: 25.413662
+Epoch [186], train_loss: 0.062045, val_loss: 0.061218, val_acc: 25.391821
+Epoch [187], train_loss: 0.062078, val_loss: 0.061464, val_acc: 25.396624
+Epoch [188], train_loss: 0.062023, val_loss: 0.061455, val_acc: 25.427750
+Epoch [189], train_loss: 0.062468, val_loss: 0.061996, val_acc: 25.406429
+Epoch [190], train_loss: 0.061928, val_loss: 0.061572, val_acc: 25.407293
+Epoch [191], train_loss: 0.062195, val_loss: 0.061691, val_acc: 25.419250
+Epoch [192], train_loss: 0.061968, val_loss: 0.061490, val_acc: 25.355995
+Epoch [193], train_loss: 0.062111, val_loss: 0.061658, val_acc: 25.429514
+Epoch [194], train_loss: 0.061990, val_loss: 0.061596, val_acc: 25.427971
+Epoch [195], train_loss: 0.062015, val_loss: 0.061605, val_acc: 25.427586
+Epoch [196], train_loss: 0.061952, val_loss: 0.061606, val_acc: 25.439156
+Epoch [197], train_loss: 0.062019, val_loss: 0.061664, val_acc: 25.330345
+Epoch [198], train_loss: 0.061960, val_loss: 0.061721, val_acc: 25.405354
+Epoch [199], train_loss: 0.061987, val_loss: 0.061467, val_acc: 25.407782
+Epoch [200], train_loss: 0.061963, val_loss: 0.061375, val_acc: 25.451382
+Epoch [201], train_loss: 0.061797, val_loss: 0.061202, val_acc: 25.415634
+Epoch [202], train_loss: 0.061995, val_loss: 0.061598, val_acc: 25.446730
+Epoch [203], train_loss: 0.061731, val_loss: 0.061373, val_acc: 25.424488
+Epoch [204], train_loss: 0.061843, val_loss: 0.061524, val_acc: 25.463943
+Epoch [205], train_loss: 0.061844, val_loss: 0.061347, val_acc: 25.445183
+Epoch [206], train_loss: 0.061854, val_loss: 0.062010, val_acc: 25.476536
+Epoch [207], train_loss: 0.061724, val_loss: 0.061378, val_acc: 25.437059
+Epoch [208], train_loss: 0.061814, val_loss: 0.061275, val_acc: 25.461065
+Epoch [209], train_loss: 0.061722, val_loss: 0.061437, val_acc: 25.441647
+Epoch [210], train_loss: 0.061830, val_loss: 0.061549, val_acc: 25.379433
+Epoch [211], train_loss: 0.061828, val_loss: 0.061388, val_acc: 25.475929
+Epoch [212], train_loss: 0.061689, val_loss: 0.061371, val_acc: 25.464521
+Epoch [213], train_loss: 0.061418, val_loss: 0.061429, val_acc: 25.459610
+Epoch [214], train_loss: 0.061982, val_loss: 0.062605, val_acc: 25.411688
+Epoch [215], train_loss: 0.061688, val_loss: 0.061645, val_acc: 25.397480
+Epoch [216], train_loss: 0.061695, val_loss: 0.061368, val_acc: 25.451176
+Epoch [217], train_loss: 0.061556, val_loss: 0.061366, val_acc: 25.434586
+Epoch [218], train_loss: 0.061748, val_loss: 0.061297, val_acc: 25.440933
+Epoch [219], train_loss: 0.061651, val_loss: 0.061356, val_acc: 25.432768
+Epoch [220], train_loss: 0.061943, val_loss: 0.062157, val_acc: 25.453321
+Epoch [221], train_loss: 0.061629, val_loss: 0.061915, val_acc: 25.406897
+Epoch [222], train_loss: 0.061574, val_loss: 0.061483, val_acc: 25.427181
+Epoch [223], train_loss: 0.061783, val_loss: 0.061763, val_acc: 25.426538
+Epoch [224], train_loss: 0.061755, val_loss: 0.061633, val_acc: 25.446236
+Epoch [225], train_loss: 0.061501, val_loss: 0.061349, val_acc: 25.392248
+Epoch [226], train_loss: 0.061587, val_loss: 0.061415, val_acc: 25.413420
+Epoch [227], train_loss: 0.061484, val_loss: 0.061568, val_acc: 25.412910
+Epoch [228], train_loss: 0.061569, val_loss: 0.061670, val_acc: 25.433643
+Epoch [229], train_loss: 0.061609, val_loss: 0.061431, val_acc: 25.438883
+Epoch [230], train_loss: 0.061716, val_loss: 0.061635, val_acc: 25.448380
+Epoch [231], train_loss: 0.061574, val_loss: 0.061499, val_acc: 25.402882
+Epoch [232], train_loss: 0.061364, val_loss: 0.061249, val_acc: 25.401731
+Epoch [233], train_loss: 0.061633, val_loss: 0.061814, val_acc: 25.433445
+Epoch [234], train_loss: 0.061409, val_loss: 0.061677, val_acc: 25.411070
+Epoch [235], train_loss: 0.061505, val_loss: 0.061288, val_acc: 25.404964
+Epoch [236], train_loss: 0.061908, val_loss: 0.062048, val_acc: 25.480164
+Epoch [237], train_loss: 0.061393, val_loss: 0.061329, val_acc: 25.466105
+Epoch [238], train_loss: 0.061374, val_loss: 0.061556, val_acc: 25.446487
+Epoch [239], train_loss: 0.061558, val_loss: 0.061444, val_acc: 25.456516
+Epoch [240], train_loss: 0.061464, val_loss: 0.061209, val_acc: 25.426186
+Epoch [241], train_loss: 0.061401, val_loss: 0.061355, val_acc: 25.416681
+Epoch [242], train_loss: 0.061459, val_loss: 0.061113, val_acc: 25.385975
+Epoch [243], train_loss: 0.061519, val_loss: 0.061268, val_acc: 25.478928
+Epoch [244], train_loss: 0.061388, val_loss: 0.061523, val_acc: 25.423872
+Epoch [245], train_loss: 0.061323, val_loss: 0.061540, val_acc: 25.455898
+Epoch [246], train_loss: 0.061273, val_loss: 0.061378, val_acc: 25.482986
+Epoch [247], train_loss: 0.061555, val_loss: 0.061106, val_acc: 25.463915
+Epoch [248], train_loss: 0.061552, val_loss: 0.061336, val_acc: 25.467468
+Epoch [249], train_loss: 0.061353, val_loss: 0.061260, val_acc: 25.476038
+Epoch [250], train_loss: 0.061173, val_loss: 0.061442, val_acc: 25.456488
+Epoch [251], train_loss: 0.061183, val_loss: 0.061065, val_acc: 25.448872
+Epoch [252], train_loss: 0.061521, val_loss: 0.061317, val_acc: 25.429003
+Epoch [253], train_loss: 0.061214, val_loss: 0.061381, val_acc: 25.445488
+Epoch [254], train_loss: 0.061438, val_loss: 0.061522, val_acc: 25.417393
+Epoch [255], train_loss: 0.061314, val_loss: 0.061642, val_acc: 25.465427
+Epoch [256], train_loss: 0.061272, val_loss: 0.061480, val_acc: 25.456659
+Epoch [257], train_loss: 0.061207, val_loss: 0.060900, val_acc: 25.438862
+Epoch [258], train_loss: 0.061287, val_loss: 0.060973, val_acc: 25.430468
+Epoch [259], train_loss: 0.061114, val_loss: 0.061612, val_acc: 25.470102
+Epoch [260], train_loss: 0.061120, val_loss: 0.061037, val_acc: 25.458408
+Epoch [261], train_loss: 0.061063, val_loss: 0.060805, val_acc: 25.452116
+Epoch [262], train_loss: 0.061215, val_loss: 0.061344, val_acc: 25.455034
+Epoch [263], train_loss: 0.061098, val_loss: 0.060875, val_acc: 25.380371
+Epoch [264], train_loss: 0.061275, val_loss: 0.061548, val_acc: 25.479805
+Epoch [265], train_loss: 0.061052, val_loss: 0.060830, val_acc: 25.439276
+Epoch [266], train_loss: 0.061290, val_loss: 0.060837, val_acc: 25.486549
+Epoch [267], train_loss: 0.061198, val_loss: 0.061182, val_acc: 25.497404
+Epoch [268], train_loss: 0.061352, val_loss: 0.061203, val_acc: 25.463902
+Epoch [269], train_loss: 0.061155, val_loss: 0.061199, val_acc: 25.418228
+Epoch [270], train_loss: 0.060998, val_loss: 0.061086, val_acc: 25.414341
+Epoch [271], train_loss: 0.061335, val_loss: 0.061143, val_acc: 25.454334
+Epoch [272], train_loss: 0.061179, val_loss: 0.061088, val_acc: 25.465967
+Epoch [273], train_loss: 0.061281, val_loss: 0.060651, val_acc: 25.429815
+Epoch [274], train_loss: 0.061347, val_loss: 0.060645, val_acc: 25.476549
+Epoch [275], train_loss: 0.061356, val_loss: 0.060771, val_acc: 25.451300
+Epoch [276], train_loss: 0.061140, val_loss: 0.060674, val_acc: 25.398293
+Epoch [277], train_loss: 0.060924, val_loss: 0.061454, val_acc: 25.389860
+Epoch [278], train_loss: 0.060956, val_loss: 0.060980, val_acc: 25.445610
+Epoch [279], train_loss: 0.061062, val_loss: 0.060846, val_acc: 25.433872
+Epoch [280], train_loss: 0.060958, val_loss: 0.060915, val_acc: 25.410597
+Epoch [281], train_loss: 0.061139, val_loss: 0.060933, val_acc: 25.412109
+Epoch [282], train_loss: 0.061128, val_loss: 0.060882, val_acc: 25.484734
+Epoch [283], train_loss: 0.061080, val_loss: 0.061314, val_acc: 25.495663
+Epoch [284], train_loss: 0.061162, val_loss: 0.060748, val_acc: 25.483822
+Epoch [285], train_loss: 0.061028, val_loss: 0.060831, val_acc: 25.449846
+Epoch [286], train_loss: 0.061208, val_loss: 0.060600, val_acc: 25.464073
+Epoch [287], train_loss: 0.061087, val_loss: 0.060491, val_acc: 25.435928
+Epoch [288], train_loss: 0.060819, val_loss: 0.060781, val_acc: 25.467493
+Epoch [289], train_loss: 0.060988, val_loss: 0.060861, val_acc: 25.505167
+Epoch [290], train_loss: 0.061061, val_loss: 0.060650, val_acc: 25.496855
+Epoch [291], train_loss: 0.060969, val_loss: 0.060807, val_acc: 25.479935
+Epoch [292], train_loss: 0.060969, val_loss: 0.061019, val_acc: 25.487928
+Epoch [293], train_loss: 0.060855, val_loss: 0.060699, val_acc: 25.471088
+Epoch [294], train_loss: 0.060975, val_loss: 0.060898, val_acc: 25.427866
+Epoch [295], train_loss: 0.060806, val_loss: 0.060593, val_acc: 25.489983
+Epoch [296], train_loss: 0.060983, val_loss: 0.060515, val_acc: 25.458511
+Epoch [297], train_loss: 0.060922, val_loss: 0.060884, val_acc: 25.485563
+Epoch [298], train_loss: 0.060943, val_loss: 0.060637, val_acc: 25.479918
+Epoch [299], train_loss: 0.060787, val_loss: 0.060686, val_acc: 25.427811
+python3 ./UNet_V15.py  6875.45s user 6615.49s system 99% cpu 3:45:19.31 total
diff --git a/UNet/Sim_logs/UNet_V15_25678820.log b/UNet/Sim_logs/UNet_V15_25678820.log
new file mode 100644
index 0000000000000000000000000000000000000000..80812ecb0f0d5b5d95445b8681f2315e3971bbdf
--- /dev/null
+++ b/UNet/Sim_logs/UNet_V15_25678820.log
@@ -0,0 +1,320 @@
+(OK) Loading cuda 10.2.89
+(OK) Loading python 3.7.11
+(!!) The SciPy Stack is available: http://www.scipy.org/stackspec.html
+ Built with GCC compilers.
+Collecting torch==1.10.1
+  Using cached torch-1.10.1-cp37-cp37m-manylinux1_x86_64.whl (881.9 MB)
+Collecting typing-extensions
+  Using cached typing_extensions-4.1.1-py3-none-any.whl (26 kB)
+Installing collected packages: typing-extensions, torch
+  WARNING: The scripts convert-caffe2-to-onnx, convert-onnx-to-caffe2 and torchrun are installed in '/home/yk138599/.local/bin' which is not on PATH.
+  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
+Successfully installed torch-1.10.1 typing-extensions-4.1.1
+WARNING: You are using pip version 21.2.4; however, version 22.0.3 is available.
+You should consider upgrading via the '/usr/local_rwth/sw/python/3.7.11/x86_64/bin/python3.7 -m pip install --upgrade pip' command.
+number auf epochs: 300
+batchsize: 32
+learning rate: 3e-05
+kernel size is: 7
+ seed is: 2199910834
+Epoch [0], train_loss: 0.157380, val_loss: 0.117250, val_acc: 10.432413
+Epoch [1], train_loss: 0.146083, val_loss: 0.102137, val_acc: 12.537807
+Epoch [2], train_loss: 0.135836, val_loss: 0.095803, val_acc: 13.580077
+Epoch [3], train_loss: 0.128564, val_loss: 0.098410, val_acc: 13.126359
+Epoch [4], train_loss: 0.124251, val_loss: 0.097964, val_acc: 13.146672
+Epoch [5], train_loss: 0.121025, val_loss: 0.097823, val_acc: 13.161149
+Epoch [6], train_loss: 0.118360, val_loss: 0.096139, val_acc: 13.381820
+Epoch [7], train_loss: 0.116058, val_loss: 0.096515, val_acc: 13.325438
+Epoch [8], train_loss: 0.113608, val_loss: 0.096194, val_acc: 13.403194
+Epoch [9], train_loss: 0.111248, val_loss: 0.093872, val_acc: 13.753829
+Epoch [10], train_loss: 0.109318, val_loss: 0.094674, val_acc: 13.626279
+Epoch [11], train_loss: 0.106907, val_loss: 0.093169, val_acc: 13.903856
+Epoch [12], train_loss: 0.105061, val_loss: 0.091782, val_acc: 14.173604
+Epoch [13], train_loss: 0.103131, val_loss: 0.089959, val_acc: 14.685189
+Epoch [14], train_loss: 0.101286, val_loss: 0.090499, val_acc: 14.560270
+Epoch [15], train_loss: 0.099405, val_loss: 0.088619, val_acc: 15.032706
+Epoch [16], train_loss: 0.097705, val_loss: 0.088927, val_acc: 15.239760
+Epoch [17], train_loss: 0.096150, val_loss: 0.084933, val_acc: 16.063675
+Epoch [18], train_loss: 0.094500, val_loss: 0.085695, val_acc: 16.142694
+Epoch [19], train_loss: 0.093010, val_loss: 0.081338, val_acc: 16.902172
+Epoch [20], train_loss: 0.091669, val_loss: 0.082908, val_acc: 16.637043
+Epoch [21], train_loss: 0.090194, val_loss: 0.080893, val_acc: 17.364742
+Epoch [22], train_loss: 0.088788, val_loss: 0.079887, val_acc: 17.626421
+Epoch [23], train_loss: 0.087467, val_loss: 0.079770, val_acc: 17.907120
+Epoch [24], train_loss: 0.086331, val_loss: 0.075875, val_acc: 18.616716
+Epoch [25], train_loss: 0.084972, val_loss: 0.076136, val_acc: 18.924250
+Epoch [26], train_loss: 0.084035, val_loss: 0.078194, val_acc: 18.814463
+Epoch [27], train_loss: 0.082867, val_loss: 0.072495, val_acc: 19.781271
+Epoch [28], train_loss: 0.081970, val_loss: 0.072911, val_acc: 20.423105
+Epoch [29], train_loss: 0.081166, val_loss: 0.073880, val_acc: 20.139250
+Epoch [30], train_loss: 0.080075, val_loss: 0.072874, val_acc: 20.394693
+Epoch [31], train_loss: 0.079399, val_loss: 0.071856, val_acc: 20.781494
+Epoch [32], train_loss: 0.078588, val_loss: 0.070808, val_acc: 20.841953
+Epoch [33], train_loss: 0.078323, val_loss: 0.070966, val_acc: 21.132822
+Epoch [34], train_loss: 0.077376, val_loss: 0.069703, val_acc: 21.523916
+Epoch [35], train_loss: 0.076474, val_loss: 0.069282, val_acc: 22.102262
+Epoch [36], train_loss: 0.075983, val_loss: 0.070646, val_acc: 21.989868
+Epoch [37], train_loss: 0.075119, val_loss: 0.069266, val_acc: 22.797651
+Epoch [38], train_loss: 0.074667, val_loss: 0.067347, val_acc: 22.635202
+Epoch [39], train_loss: 0.074291, val_loss: 0.068050, val_acc: 22.712029
+Epoch [40], train_loss: 0.073598, val_loss: 0.068213, val_acc: 22.735540
+Epoch [41], train_loss: 0.073009, val_loss: 0.066605, val_acc: 23.253105
+Epoch [42], train_loss: 0.072455, val_loss: 0.066668, val_acc: 23.503990
+Epoch [43], train_loss: 0.072150, val_loss: 0.068468, val_acc: 23.303886
+Epoch [44], train_loss: 0.071795, val_loss: 0.066722, val_acc: 23.569872
+Epoch [45], train_loss: 0.071305, val_loss: 0.065428, val_acc: 24.086452
+Epoch [46], train_loss: 0.070964, val_loss: 0.065313, val_acc: 24.134108
+Epoch [47], train_loss: 0.070322, val_loss: 0.065029, val_acc: 24.371841
+Epoch [48], train_loss: 0.069981, val_loss: 0.064892, val_acc: 24.341751
+Epoch [49], train_loss: 0.069801, val_loss: 0.065424, val_acc: 24.133171
+Epoch [50], train_loss: 0.069606, val_loss: 0.064016, val_acc: 24.337107
+Epoch [51], train_loss: 0.069425, val_loss: 0.064643, val_acc: 24.488834
+Epoch [52], train_loss: 0.068862, val_loss: 0.064705, val_acc: 24.634754
+Epoch [53], train_loss: 0.068628, val_loss: 0.064004, val_acc: 24.901066
+Epoch [54], train_loss: 0.068314, val_loss: 0.065282, val_acc: 24.613474
+Epoch [55], train_loss: 0.068757, val_loss: 0.063983, val_acc: 24.938080
+Epoch [56], train_loss: 0.068014, val_loss: 0.063511, val_acc: 25.079432
+Epoch [57], train_loss: 0.067706, val_loss: 0.063596, val_acc: 25.216507
+Epoch [58], train_loss: 0.067030, val_loss: 0.063488, val_acc: 25.244486
+Epoch [59], train_loss: 0.066801, val_loss: 0.063879, val_acc: 24.798466
+Epoch [60], train_loss: 0.066842, val_loss: 0.063318, val_acc: 25.194384
+Epoch [61], train_loss: 0.066737, val_loss: 0.063627, val_acc: 25.162683
+Epoch [62], train_loss: 0.066456, val_loss: 0.062469, val_acc: 25.493280
+Epoch [63], train_loss: 0.066205, val_loss: 0.062259, val_acc: 25.441355
+Epoch [64], train_loss: 0.066239, val_loss: 0.062323, val_acc: 25.320858
+Epoch [65], train_loss: 0.065989, val_loss: 0.062266, val_acc: 25.407030
+Epoch [66], train_loss: 0.065977, val_loss: 0.061794, val_acc: 25.493366
+Epoch [67], train_loss: 0.065868, val_loss: 0.061924, val_acc: 25.529289
+Epoch [68], train_loss: 0.065375, val_loss: 0.061947, val_acc: 25.579130
+Epoch [69], train_loss: 0.065740, val_loss: 0.063017, val_acc: 25.474098
+Epoch [70], train_loss: 0.065551, val_loss: 0.063464, val_acc: 25.541735
+Epoch [71], train_loss: 0.065301, val_loss: 0.061892, val_acc: 25.692558
+Epoch [72], train_loss: 0.065708, val_loss: 0.062659, val_acc: 25.605089
+Epoch [73], train_loss: 0.065092, val_loss: 0.061786, val_acc: 25.752781
+Epoch [74], train_loss: 0.065025, val_loss: 0.062828, val_acc: 25.563627
+Epoch [75], train_loss: 0.065310, val_loss: 0.062373, val_acc: 25.578400
+Epoch [76], train_loss: 0.064985, val_loss: 0.063019, val_acc: 25.644169
+Epoch [77], train_loss: 0.064892, val_loss: 0.062396, val_acc: 25.707342
+Epoch [78], train_loss: 0.064716, val_loss: 0.062148, val_acc: 25.721197
+Epoch [79], train_loss: 0.064727, val_loss: 0.062423, val_acc: 25.704599
+Epoch [80], train_loss: 0.064678, val_loss: 0.062474, val_acc: 25.706469
+Epoch [81], train_loss: 0.064764, val_loss: 0.062597, val_acc: 25.735456
+Epoch [82], train_loss: 0.064438, val_loss: 0.061794, val_acc: 25.749170
+Epoch [83], train_loss: 0.064571, val_loss: 0.062237, val_acc: 25.712233
+Epoch [84], train_loss: 0.064202, val_loss: 0.061173, val_acc: 25.810139
+Epoch [85], train_loss: 0.064175, val_loss: 0.061562, val_acc: 25.811954
+Epoch [86], train_loss: 0.064412, val_loss: 0.061539, val_acc: 25.810654
+Epoch [87], train_loss: 0.064217, val_loss: 0.061524, val_acc: 25.793770
+Epoch [88], train_loss: 0.063974, val_loss: 0.061660, val_acc: 25.807642
+Epoch [89], train_loss: 0.063695, val_loss: 0.061392, val_acc: 25.839903
+Epoch [90], train_loss: 0.064171, val_loss: 0.062011, val_acc: 25.694704
+Epoch [91], train_loss: 0.063579, val_loss: 0.061124, val_acc: 25.864183
+Epoch [92], train_loss: 0.063832, val_loss: 0.061631, val_acc: 25.807083
+Epoch [93], train_loss: 0.063958, val_loss: 0.061011, val_acc: 25.868889
+Epoch [94], train_loss: 0.063873, val_loss: 0.061374, val_acc: 25.826662
+Epoch [95], train_loss: 0.063492, val_loss: 0.061120, val_acc: 25.846340
+Epoch [96], train_loss: 0.063694, val_loss: 0.061591, val_acc: 25.826962
+Epoch [97], train_loss: 0.063572, val_loss: 0.060891, val_acc: 25.892015
+Epoch [98], train_loss: 0.063733, val_loss: 0.062548, val_acc: 25.677332
+Epoch [99], train_loss: 0.063305, val_loss: 0.061365, val_acc: 25.850439
+Epoch [100], train_loss: 0.063439, val_loss: 0.062072, val_acc: 25.766123
+Epoch [101], train_loss: 0.063214, val_loss: 0.061247, val_acc: 25.870298
+Epoch [102], train_loss: 0.063301, val_loss: 0.061171, val_acc: 25.879204
+Epoch [103], train_loss: 0.063259, val_loss: 0.061589, val_acc: 25.845522
+Epoch [104], train_loss: 0.063377, val_loss: 0.060757, val_acc: 25.882006
+Epoch [105], train_loss: 0.063358, val_loss: 0.060710, val_acc: 25.927376
+Epoch [106], train_loss: 0.063167, val_loss: 0.060505, val_acc: 25.939100
+Epoch [107], train_loss: 0.063059, val_loss: 0.061115, val_acc: 25.874729
+Epoch [108], train_loss: 0.063165, val_loss: 0.060405, val_acc: 25.955454
+Epoch [109], train_loss: 0.063174, val_loss: 0.060533, val_acc: 25.923393
+Epoch [110], train_loss: 0.062965, val_loss: 0.060436, val_acc: 25.915901
+Epoch [111], train_loss: 0.062872, val_loss: 0.060511, val_acc: 25.921656
+Epoch [112], train_loss: 0.062950, val_loss: 0.060247, val_acc: 25.956305
+Epoch [113], train_loss: 0.062948, val_loss: 0.060505, val_acc: 25.942123
+Epoch [114], train_loss: 0.062926, val_loss: 0.060405, val_acc: 25.944590
+Epoch [115], train_loss: 0.062799, val_loss: 0.060408, val_acc: 25.951038
+Epoch [116], train_loss: 0.062829, val_loss: 0.060256, val_acc: 25.956650
+Epoch [117], train_loss: 0.062925, val_loss: 0.060922, val_acc: 25.879738
+Epoch [118], train_loss: 0.062607, val_loss: 0.060164, val_acc: 25.911777
+Epoch [119], train_loss: 0.063164, val_loss: 0.060252, val_acc: 25.948145
+Epoch [120], train_loss: 0.062684, val_loss: 0.060109, val_acc: 25.953802
+Epoch [121], train_loss: 0.062665, val_loss: 0.060343, val_acc: 25.953333
+Epoch [122], train_loss: 0.062913, val_loss: 0.060066, val_acc: 25.940706
+Epoch [123], train_loss: 0.062616, val_loss: 0.060167, val_acc: 25.955675
+Epoch [124], train_loss: 0.062626, val_loss: 0.059898, val_acc: 26.018400
+Epoch [125], train_loss: 0.062743, val_loss: 0.060147, val_acc: 25.993614
+Epoch [126], train_loss: 0.062598, val_loss: 0.059864, val_acc: 26.000212
+Epoch [127], train_loss: 0.062453, val_loss: 0.059921, val_acc: 26.021612
+Epoch [128], train_loss: 0.062282, val_loss: 0.060225, val_acc: 25.971550
+Epoch [129], train_loss: 0.062494, val_loss: 0.060071, val_acc: 25.986273
+Epoch [130], train_loss: 0.062420, val_loss: 0.059968, val_acc: 26.003193
+Epoch [131], train_loss: 0.062452, val_loss: 0.059944, val_acc: 25.991583
+Epoch [132], train_loss: 0.062457, val_loss: 0.059803, val_acc: 26.012323
+Epoch [133], train_loss: 0.062549, val_loss: 0.060904, val_acc: 25.920750
+Epoch [134], train_loss: 0.062731, val_loss: 0.060496, val_acc: 25.958662
+Epoch [135], train_loss: 0.062181, val_loss: 0.060240, val_acc: 25.971834
+Epoch [136], train_loss: 0.062254, val_loss: 0.060023, val_acc: 25.969713
+Epoch [137], train_loss: 0.062720, val_loss: 0.060350, val_acc: 25.969784
+Epoch [138], train_loss: 0.062718, val_loss: 0.060540, val_acc: 25.977648
+Epoch [139], train_loss: 0.062624, val_loss: 0.060962, val_acc: 25.893267
+Epoch [140], train_loss: 0.062376, val_loss: 0.060260, val_acc: 25.991743
+Epoch [141], train_loss: 0.062169, val_loss: 0.060293, val_acc: 25.985497
+Epoch [142], train_loss: 0.062299, val_loss: 0.060264, val_acc: 25.956593
+Epoch [143], train_loss: 0.062272, val_loss: 0.060052, val_acc: 26.022915
+Epoch [144], train_loss: 0.062268, val_loss: 0.060115, val_acc: 26.018883
+Epoch [145], train_loss: 0.062176, val_loss: 0.060274, val_acc: 25.957609
+Epoch [146], train_loss: 0.062398, val_loss: 0.060188, val_acc: 25.964291
+Epoch [147], train_loss: 0.062200, val_loss: 0.060592, val_acc: 25.946192
+Epoch [148], train_loss: 0.061998, val_loss: 0.059957, val_acc: 25.985971
+Epoch [149], train_loss: 0.062340, val_loss: 0.060372, val_acc: 25.967810
+Epoch [150], train_loss: 0.062173, val_loss: 0.060026, val_acc: 25.997412
+Epoch [151], train_loss: 0.061867, val_loss: 0.060066, val_acc: 25.963873
+Epoch [152], train_loss: 0.062153, val_loss: 0.060182, val_acc: 25.978817
+Epoch [153], train_loss: 0.062261, val_loss: 0.061093, val_acc: 25.878489
+Epoch [154], train_loss: 0.061987, val_loss: 0.060429, val_acc: 25.963266
+Epoch [155], train_loss: 0.061973, val_loss: 0.059892, val_acc: 25.999470
+Epoch [156], train_loss: 0.062001, val_loss: 0.059876, val_acc: 25.999460
+Epoch [157], train_loss: 0.061748, val_loss: 0.059876, val_acc: 26.005753
+Epoch [158], train_loss: 0.062543, val_loss: 0.059803, val_acc: 25.984608
+Epoch [159], train_loss: 0.062034, val_loss: 0.060041, val_acc: 25.995100
+Epoch [160], train_loss: 0.061774, val_loss: 0.059676, val_acc: 26.007435
+Epoch [161], train_loss: 0.061771, val_loss: 0.059710, val_acc: 26.021397
+Epoch [162], train_loss: 0.061717, val_loss: 0.059598, val_acc: 25.975269
+Epoch [163], train_loss: 0.061664, val_loss: 0.059741, val_acc: 25.969244
+Epoch [164], train_loss: 0.061852, val_loss: 0.059318, val_acc: 26.055590
+Epoch [165], train_loss: 0.061784, val_loss: 0.059708, val_acc: 26.000534
+Epoch [166], train_loss: 0.061816, val_loss: 0.059662, val_acc: 26.024637
+Epoch [167], train_loss: 0.061665, val_loss: 0.059335, val_acc: 26.043877
+Epoch [168], train_loss: 0.061569, val_loss: 0.059464, val_acc: 26.026781
+Epoch [169], train_loss: 0.061710, val_loss: 0.059395, val_acc: 26.047794
+Epoch [170], train_loss: 0.061643, val_loss: 0.059613, val_acc: 26.030350
+Epoch [171], train_loss: 0.061632, val_loss: 0.059859, val_acc: 26.016088
+Epoch [172], train_loss: 0.061584, val_loss: 0.059551, val_acc: 26.021042
+Epoch [173], train_loss: 0.061690, val_loss: 0.059293, val_acc: 26.074032
+Epoch [174], train_loss: 0.061908, val_loss: 0.059708, val_acc: 26.021519
+Epoch [175], train_loss: 0.061589, val_loss: 0.059219, val_acc: 26.066294
+Epoch [176], train_loss: 0.061570, val_loss: 0.059353, val_acc: 26.061235
+Epoch [177], train_loss: 0.061510, val_loss: 0.059259, val_acc: 26.075563
+Epoch [178], train_loss: 0.061600, val_loss: 0.059366, val_acc: 26.036573
+Epoch [179], train_loss: 0.061829, val_loss: 0.059502, val_acc: 26.032328
+Epoch [180], train_loss: 0.061449, val_loss: 0.059352, val_acc: 26.030367
+Epoch [181], train_loss: 0.061673, val_loss: 0.059409, val_acc: 26.049753
+Epoch [182], train_loss: 0.061450, val_loss: 0.059430, val_acc: 26.050547
+Epoch [183], train_loss: 0.061537, val_loss: 0.059459, val_acc: 26.045412
+Epoch [184], train_loss: 0.061446, val_loss: 0.059267, val_acc: 26.043846
+Epoch [185], train_loss: 0.061455, val_loss: 0.059273, val_acc: 26.090521
+Epoch [186], train_loss: 0.061540, val_loss: 0.059391, val_acc: 26.053461
+Epoch [187], train_loss: 0.061564, val_loss: 0.059056, val_acc: 26.031898
+Epoch [188], train_loss: 0.061534, val_loss: 0.059200, val_acc: 26.072617
+Epoch [189], train_loss: 0.061390, val_loss: 0.059160, val_acc: 26.041533
+Epoch [190], train_loss: 0.061299, val_loss: 0.059212, val_acc: 26.069656
+Epoch [191], train_loss: 0.061531, val_loss: 0.059191, val_acc: 26.088581
+Epoch [192], train_loss: 0.061354, val_loss: 0.059272, val_acc: 26.038612
+Epoch [193], train_loss: 0.061426, val_loss: 0.059246, val_acc: 26.067976
+Epoch [194], train_loss: 0.061390, val_loss: 0.059171, val_acc: 26.085213
+Epoch [195], train_loss: 0.061366, val_loss: 0.059130, val_acc: 26.084753
+Epoch [196], train_loss: 0.061368, val_loss: 0.059137, val_acc: 26.091408
+Epoch [197], train_loss: 0.061446, val_loss: 0.059162, val_acc: 26.005461
+Epoch [198], train_loss: 0.061400, val_loss: 0.059223, val_acc: 26.063356
+Epoch [199], train_loss: 0.061396, val_loss: 0.059244, val_acc: 26.020042
+Epoch [200], train_loss: 0.061272, val_loss: 0.059299, val_acc: 26.021336
+Epoch [201], train_loss: 0.061308, val_loss: 0.059071, val_acc: 26.031279
+Epoch [202], train_loss: 0.061191, val_loss: 0.059077, val_acc: 26.095921
+Epoch [203], train_loss: 0.061469, val_loss: 0.059214, val_acc: 26.078180
+Epoch [204], train_loss: 0.061230, val_loss: 0.059013, val_acc: 26.113670
+Epoch [205], train_loss: 0.061398, val_loss: 0.059177, val_acc: 26.049238
+Epoch [206], train_loss: 0.061294, val_loss: 0.059443, val_acc: 26.042669
+Epoch [207], train_loss: 0.061171, val_loss: 0.059311, val_acc: 26.058189
+Epoch [208], train_loss: 0.061159, val_loss: 0.059178, val_acc: 26.072527
+Epoch [209], train_loss: 0.061144, val_loss: 0.059307, val_acc: 26.059299
+Epoch [210], train_loss: 0.061250, val_loss: 0.059174, val_acc: 26.050510
+Epoch [211], train_loss: 0.061197, val_loss: 0.059135, val_acc: 26.087519
+Epoch [212], train_loss: 0.061177, val_loss: 0.059432, val_acc: 26.030069
+Epoch [213], train_loss: 0.061205, val_loss: 0.059262, val_acc: 25.998846
+Epoch [214], train_loss: 0.060992, val_loss: 0.059136, val_acc: 26.086220
+Epoch [215], train_loss: 0.061125, val_loss: 0.059231, val_acc: 26.067858
+Epoch [216], train_loss: 0.061042, val_loss: 0.059097, val_acc: 26.102047
+Epoch [217], train_loss: 0.061027, val_loss: 0.059050, val_acc: 26.097326
+Epoch [218], train_loss: 0.061123, val_loss: 0.059234, val_acc: 26.077600
+Epoch [219], train_loss: 0.061071, val_loss: 0.059090, val_acc: 26.083717
+Epoch [220], train_loss: 0.061010, val_loss: 0.058995, val_acc: 26.069033
+Epoch [221], train_loss: 0.060988, val_loss: 0.059327, val_acc: 26.041716
+Epoch [222], train_loss: 0.061107, val_loss: 0.059004, val_acc: 26.124763
+Epoch [223], train_loss: 0.061156, val_loss: 0.059023, val_acc: 26.069595
+Epoch [224], train_loss: 0.061047, val_loss: 0.058954, val_acc: 26.040382
+Epoch [225], train_loss: 0.061115, val_loss: 0.058889, val_acc: 26.087154
+Epoch [226], train_loss: 0.061012, val_loss: 0.058846, val_acc: 26.075975
+Epoch [227], train_loss: 0.061068, val_loss: 0.058995, val_acc: 26.087965
+Epoch [228], train_loss: 0.060842, val_loss: 0.058856, val_acc: 26.118254
+Epoch [229], train_loss: 0.060982, val_loss: 0.059052, val_acc: 26.057440
+Epoch [230], train_loss: 0.061100, val_loss: 0.058903, val_acc: 26.140438
+Epoch [231], train_loss: 0.060899, val_loss: 0.058948, val_acc: 26.078693
+Epoch [232], train_loss: 0.060856, val_loss: 0.058954, val_acc: 26.044487
+Epoch [233], train_loss: 0.060768, val_loss: 0.058906, val_acc: 26.118345
+Epoch [234], train_loss: 0.060855, val_loss: 0.058977, val_acc: 26.093098
+Epoch [235], train_loss: 0.060890, val_loss: 0.058714, val_acc: 26.108763
+Epoch [236], train_loss: 0.061212, val_loss: 0.059140, val_acc: 26.059669
+Epoch [237], train_loss: 0.060920, val_loss: 0.058749, val_acc: 26.111774
+Epoch [238], train_loss: 0.060937, val_loss: 0.058883, val_acc: 26.087370
+Epoch [239], train_loss: 0.060946, val_loss: 0.058788, val_acc: 26.110954
+Epoch [240], train_loss: 0.060867, val_loss: 0.058907, val_acc: 26.070318
+Epoch [241], train_loss: 0.060758, val_loss: 0.058598, val_acc: 26.138950
+Epoch [242], train_loss: 0.060799, val_loss: 0.058767, val_acc: 26.128546
+Epoch [243], train_loss: 0.060860, val_loss: 0.058881, val_acc: 26.053812
+Epoch [244], train_loss: 0.060879, val_loss: 0.058798, val_acc: 26.122761
+Epoch [245], train_loss: 0.060764, val_loss: 0.059065, val_acc: 26.029280
+Epoch [246], train_loss: 0.060986, val_loss: 0.059148, val_acc: 26.078863
+Epoch [247], train_loss: 0.060759, val_loss: 0.058862, val_acc: 26.116337
+Epoch [248], train_loss: 0.060736, val_loss: 0.058976, val_acc: 26.070549
+Epoch [249], train_loss: 0.060895, val_loss: 0.058978, val_acc: 26.151186
+Epoch [250], train_loss: 0.060828, val_loss: 0.058915, val_acc: 26.139668
+Epoch [251], train_loss: 0.060627, val_loss: 0.058790, val_acc: 26.101185
+Epoch [252], train_loss: 0.060720, val_loss: 0.059050, val_acc: 26.102467
+Epoch [253], train_loss: 0.060753, val_loss: 0.058828, val_acc: 26.114029
+Epoch [254], train_loss: 0.060804, val_loss: 0.059027, val_acc: 26.118883
+Epoch [255], train_loss: 0.060814, val_loss: 0.058975, val_acc: 26.126846
+Epoch [256], train_loss: 0.060765, val_loss: 0.058991, val_acc: 26.067196
+Epoch [257], train_loss: 0.060681, val_loss: 0.059106, val_acc: 26.046139
+Epoch [258], train_loss: 0.060769, val_loss: 0.059060, val_acc: 26.087196
+Epoch [259], train_loss: 0.060699, val_loss: 0.058990, val_acc: 26.098282
+Epoch [260], train_loss: 0.060724, val_loss: 0.058898, val_acc: 26.155052
+Epoch [261], train_loss: 0.060753, val_loss: 0.059152, val_acc: 26.115892
+Epoch [262], train_loss: 0.060738, val_loss: 0.058942, val_acc: 26.080402
+Epoch [263], train_loss: 0.060757, val_loss: 0.058960, val_acc: 26.120367
+Epoch [264], train_loss: 0.060651, val_loss: 0.058838, val_acc: 26.168789
+Epoch [265], train_loss: 0.060650, val_loss: 0.058785, val_acc: 26.121895
+Epoch [266], train_loss: 0.060539, val_loss: 0.058783, val_acc: 26.110737
+Epoch [267], train_loss: 0.060644, val_loss: 0.058738, val_acc: 26.127699
+Epoch [268], train_loss: 0.060532, val_loss: 0.058778, val_acc: 26.114973
+Epoch [269], train_loss: 0.060597, val_loss: 0.058871, val_acc: 26.098886
+Epoch [270], train_loss: 0.060589, val_loss: 0.058907, val_acc: 26.100807
+Epoch [271], train_loss: 0.060910, val_loss: 0.058868, val_acc: 26.143995
+Epoch [272], train_loss: 0.060703, val_loss: 0.059159, val_acc: 26.053267
+Epoch [273], train_loss: 0.060725, val_loss: 0.059011, val_acc: 26.119574
+Epoch [274], train_loss: 0.060767, val_loss: 0.059027, val_acc: 26.115953
+Epoch [275], train_loss: 0.060594, val_loss: 0.059098, val_acc: 26.084379
+Epoch [276], train_loss: 0.060678, val_loss: 0.058731, val_acc: 26.199896
+Epoch [277], train_loss: 0.060650, val_loss: 0.059121, val_acc: 26.036444
+Epoch [278], train_loss: 0.060595, val_loss: 0.058958, val_acc: 26.124540
+Epoch [279], train_loss: 0.060581, val_loss: 0.058908, val_acc: 25.997028
+Epoch [280], train_loss: 0.060622, val_loss: 0.058881, val_acc: 26.132822
+Epoch [281], train_loss: 0.060708, val_loss: 0.058939, val_acc: 26.107489
+Epoch [282], train_loss: 0.060559, val_loss: 0.058712, val_acc: 26.132875
+Epoch [283], train_loss: 0.060481, val_loss: 0.058762, val_acc: 26.128878
+Epoch [284], train_loss: 0.060666, val_loss: 0.059102, val_acc: 26.111902
+Epoch [285], train_loss: 0.060538, val_loss: 0.058681, val_acc: 26.121132
+Epoch [286], train_loss: 0.060556, val_loss: 0.059141, val_acc: 26.053810
+Epoch [287], train_loss: 0.060492, val_loss: 0.058868, val_acc: 26.115608
+Epoch [288], train_loss: 0.060576, val_loss: 0.058977, val_acc: 26.102871
+Epoch [289], train_loss: 0.060535, val_loss: 0.058805, val_acc: 26.133907
+Epoch [290], train_loss: 0.060497, val_loss: 0.058674, val_acc: 26.043528
+Epoch [291], train_loss: 0.060459, val_loss: 0.058617, val_acc: 26.139551
+Epoch [292], train_loss: 0.060536, val_loss: 0.058604, val_acc: 26.122805
+Epoch [293], train_loss: 0.060508, val_loss: 0.058664, val_acc: 26.146891
+Epoch [294], train_loss: 0.060477, val_loss: 0.058656, val_acc: 26.140783
+Epoch [295], train_loss: 0.060419, val_loss: 0.058822, val_acc: 26.071680
+Epoch [296], train_loss: 0.060575, val_loss: 0.058718, val_acc: 26.113245
+Epoch [297], train_loss: 0.060506, val_loss: 0.058674, val_acc: 26.137980
+Epoch [298], train_loss: 0.060383, val_loss: 0.058514, val_acc: 26.173740
+Epoch [299], train_loss: 0.060478, val_loss: 0.058672, val_acc: 26.130503
+python3 ./UNet_V15.py  6922.92s user 6582.53s system 99% cpu 3:45:22.68 total
diff --git a/UNet/Sim_logs/UNet_V16_25648002.log b/UNet/Sim_logs/UNet_V16_25648002.log
new file mode 100644
index 0000000000000000000000000000000000000000..b0a1465948a34d270bbfa4b4f0eebcf832e3cf08
--- /dev/null
+++ b/UNet/Sim_logs/UNet_V16_25648002.log
@@ -0,0 +1,320 @@
+(OK) Loading cuda 10.2.89
+(OK) Loading python 3.7.11
+(!!) The SciPy Stack is available: http://www.scipy.org/stackspec.html
+ Built with GCC compilers.
+Collecting torch==1.10.1
+  Using cached torch-1.10.1-cp37-cp37m-manylinux1_x86_64.whl (881.9 MB)
+Collecting typing-extensions
+  Using cached typing_extensions-4.1.1-py3-none-any.whl (26 kB)
+Installing collected packages: typing-extensions, torch
+  WARNING: The scripts convert-caffe2-to-onnx, convert-onnx-to-caffe2 and torchrun are installed in '/home/yk138599/.local/bin' which is not on PATH.
+  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
+Successfully installed torch-1.10.1 typing-extensions-4.1.1
+WARNING: You are using pip version 21.2.4; however, version 22.0.3 is available.
+You should consider upgrading via the '/usr/local_rwth/sw/python/3.7.11/x86_64/bin/python3.7 -m pip install --upgrade pip' command.
+number auf epochs: 300
+batchsize: 32
+learning rate: 3e-05
+kernel size is: 7
+ seed is: 373686838
+Epoch [0], train_loss: 0.168821, val_loss: 0.174704, val_acc: 4.989962
+Epoch [1], train_loss: 0.160454, val_loss: 0.158591, val_acc: 5.769836
+Epoch [2], train_loss: 0.153920, val_loss: 0.147498, val_acc: 6.909436
+Epoch [3], train_loss: 0.149922, val_loss: 0.153916, val_acc: 6.221963
+Epoch [4], train_loss: 0.145867, val_loss: 0.148497, val_acc: 6.651636
+Epoch [5], train_loss: 0.142693, val_loss: 0.145023, val_acc: 6.854781
+Epoch [6], train_loss: 0.139995, val_loss: 0.142181, val_acc: 7.193930
+Epoch [7], train_loss: 0.137684, val_loss: 0.144121, val_acc: 6.859946
+Epoch [8], train_loss: 0.135692, val_loss: 0.136729, val_acc: 7.739902
+Epoch [9], train_loss: 0.133862, val_loss: 0.133845, val_acc: 8.140974
+Epoch [10], train_loss: 0.131268, val_loss: 0.130523, val_acc: 8.555520
+Epoch [11], train_loss: 0.129388, val_loss: 0.134083, val_acc: 8.112595
+Epoch [12], train_loss: 0.127728, val_loss: 0.126883, val_acc: 9.172525
+Epoch [13], train_loss: 0.125326, val_loss: 0.125998, val_acc: 9.264442
+Epoch [14], train_loss: 0.123780, val_loss: 0.123789, val_acc: 9.599032
+Epoch [15], train_loss: 0.123305, val_loss: 0.119215, val_acc: 10.346504
+Epoch [16], train_loss: 0.120687, val_loss: 0.120844, val_acc: 10.055427
+Epoch [17], train_loss: 0.119345, val_loss: 0.117644, val_acc: 10.718880
+Epoch [18], train_loss: 0.117936, val_loss: 0.113276, val_acc: 11.338315
+Epoch [19], train_loss: 0.115483, val_loss: 0.111815, val_acc: 11.604565
+Epoch [20], train_loss: 0.114546, val_loss: 0.110413, val_acc: 11.962605
+Epoch [21], train_loss: 0.112715, val_loss: 0.108394, val_acc: 12.249233
+Epoch [22], train_loss: 0.111860, val_loss: 0.108207, val_acc: 12.429058
+Epoch [23], train_loss: 0.109636, val_loss: 0.105046, val_acc: 12.789742
+Epoch [24], train_loss: 0.108303, val_loss: 0.103570, val_acc: 13.003980
+Epoch [25], train_loss: 0.107655, val_loss: 0.103866, val_acc: 12.997822
+Epoch [26], train_loss: 0.105731, val_loss: 0.097607, val_acc: 13.659499
+Epoch [27], train_loss: 0.104038, val_loss: 0.098877, val_acc: 13.489855
+Epoch [28], train_loss: 0.103345, val_loss: 0.096622, val_acc: 13.817748
+Epoch [29], train_loss: 0.101943, val_loss: 0.094036, val_acc: 13.993798
+Epoch [30], train_loss: 0.100592, val_loss: 0.094521, val_acc: 14.059101
+Epoch [31], train_loss: 0.099488, val_loss: 0.093171, val_acc: 14.177934
+Epoch [32], train_loss: 0.097841, val_loss: 0.091475, val_acc: 14.338976
+Epoch [33], train_loss: 0.096693, val_loss: 0.090372, val_acc: 14.469481
+Epoch [34], train_loss: 0.095624, val_loss: 0.090416, val_acc: 14.549758
+Epoch [35], train_loss: 0.094536, val_loss: 0.087994, val_acc: 14.714739
+Epoch [36], train_loss: 0.093431, val_loss: 0.086937, val_acc: 14.858049
+Epoch [37], train_loss: 0.092893, val_loss: 0.085335, val_acc: 15.050459
+Epoch [38], train_loss: 0.091669, val_loss: 0.086047, val_acc: 15.033724
+Epoch [39], train_loss: 0.090921, val_loss: 0.085171, val_acc: 15.250104
+Epoch [40], train_loss: 0.089613, val_loss: 0.083821, val_acc: 15.519266
+Epoch [41], train_loss: 0.088809, val_loss: 0.083268, val_acc: 15.647236
+Epoch [42], train_loss: 0.088315, val_loss: 0.081977, val_acc: 15.792148
+Epoch [43], train_loss: 0.087249, val_loss: 0.080491, val_acc: 16.124668
+Epoch [44], train_loss: 0.086283, val_loss: 0.080950, val_acc: 16.305861
+Epoch [45], train_loss: 0.085849, val_loss: 0.079654, val_acc: 16.453831
+Epoch [46], train_loss: 0.084655, val_loss: 0.078529, val_acc: 16.689724
+Epoch [47], train_loss: 0.083988, val_loss: 0.077988, val_acc: 16.841690
+Epoch [48], train_loss: 0.083336, val_loss: 0.078015, val_acc: 17.159775
+Epoch [49], train_loss: 0.082384, val_loss: 0.077525, val_acc: 17.335773
+Epoch [50], train_loss: 0.081736, val_loss: 0.074964, val_acc: 17.917822
+Epoch [51], train_loss: 0.081284, val_loss: 0.076614, val_acc: 17.829664
+Epoch [52], train_loss: 0.080833, val_loss: 0.074578, val_acc: 18.319691
+Epoch [53], train_loss: 0.079860, val_loss: 0.074074, val_acc: 18.793287
+Epoch [54], train_loss: 0.079926, val_loss: 0.075460, val_acc: 18.836342
+Epoch [55], train_loss: 0.078791, val_loss: 0.072506, val_acc: 19.724186
+Epoch [56], train_loss: 0.078337, val_loss: 0.073373, val_acc: 19.211843
+Epoch [57], train_loss: 0.078788, val_loss: 0.075447, val_acc: 19.202269
+Epoch [58], train_loss: 0.077742, val_loss: 0.073045, val_acc: 19.587805
+Epoch [59], train_loss: 0.077421, val_loss: 0.073346, val_acc: 20.111938
+Epoch [60], train_loss: 0.077061, val_loss: 0.072214, val_acc: 20.341171
+Epoch [61], train_loss: 0.076476, val_loss: 0.071202, val_acc: 20.455523
+Epoch [62], train_loss: 0.075931, val_loss: 0.071295, val_acc: 20.863672
+Epoch [63], train_loss: 0.075667, val_loss: 0.070474, val_acc: 21.079798
+Epoch [64], train_loss: 0.075526, val_loss: 0.071675, val_acc: 20.863516
+Epoch [65], train_loss: 0.074995, val_loss: 0.070227, val_acc: 21.815720
+Epoch [66], train_loss: 0.074890, val_loss: 0.069810, val_acc: 21.893724
+Epoch [67], train_loss: 0.074530, val_loss: 0.070197, val_acc: 21.901255
+Epoch [68], train_loss: 0.074304, val_loss: 0.070097, val_acc: 22.216478
+Epoch [69], train_loss: 0.074150, val_loss: 0.070101, val_acc: 22.043175
+Epoch [70], train_loss: 0.073500, val_loss: 0.068183, val_acc: 22.761692
+Epoch [71], train_loss: 0.073638, val_loss: 0.069424, val_acc: 22.370634
+Epoch [72], train_loss: 0.073200, val_loss: 0.068987, val_acc: 22.582407
+Epoch [73], train_loss: 0.072886, val_loss: 0.067955, val_acc: 23.218803
+Epoch [74], train_loss: 0.073076, val_loss: 0.069416, val_acc: 22.755173
+Epoch [75], train_loss: 0.072929, val_loss: 0.068764, val_acc: 22.986229
+Epoch [76], train_loss: 0.072747, val_loss: 0.070415, val_acc: 22.883764
+Epoch [77], train_loss: 0.072071, val_loss: 0.068316, val_acc: 23.391951
+Epoch [78], train_loss: 0.072359, val_loss: 0.067557, val_acc: 23.309893
+Epoch [79], train_loss: 0.072292, val_loss: 0.068684, val_acc: 23.207310
+Epoch [80], train_loss: 0.071525, val_loss: 0.066652, val_acc: 23.566259
+Epoch [81], train_loss: 0.072114, val_loss: 0.067119, val_acc: 23.800896
+Epoch [82], train_loss: 0.072118, val_loss: 0.066901, val_acc: 23.797825
+Epoch [83], train_loss: 0.071233, val_loss: 0.067233, val_acc: 23.749273
+Epoch [84], train_loss: 0.071772, val_loss: 0.066766, val_acc: 23.821022
+Epoch [85], train_loss: 0.070956, val_loss: 0.066404, val_acc: 23.900364
+Epoch [86], train_loss: 0.071104, val_loss: 0.065803, val_acc: 23.972105
+Epoch [87], train_loss: 0.071448, val_loss: 0.066678, val_acc: 23.796980
+Epoch [88], train_loss: 0.070757, val_loss: 0.066577, val_acc: 24.050838
+Epoch [89], train_loss: 0.071529, val_loss: 0.066542, val_acc: 23.950994
+Epoch [90], train_loss: 0.071300, val_loss: 0.066701, val_acc: 24.033707
+Epoch [91], train_loss: 0.070459, val_loss: 0.066382, val_acc: 24.112017
+Epoch [92], train_loss: 0.070764, val_loss: 0.066920, val_acc: 24.219307
+Epoch [93], train_loss: 0.070931, val_loss: 0.065583, val_acc: 24.273502
+Epoch [94], train_loss: 0.070581, val_loss: 0.066261, val_acc: 24.285542
+Epoch [95], train_loss: 0.070467, val_loss: 0.065994, val_acc: 24.323364
+Epoch [96], train_loss: 0.070341, val_loss: 0.065554, val_acc: 24.324341
+Epoch [97], train_loss: 0.070247, val_loss: 0.065408, val_acc: 24.372330
+Epoch [98], train_loss: 0.070378, val_loss: 0.066362, val_acc: 24.135532
+Epoch [99], train_loss: 0.070011, val_loss: 0.066023, val_acc: 24.215134
+Epoch [100], train_loss: 0.070389, val_loss: 0.065461, val_acc: 24.474741
+Epoch [101], train_loss: 0.070273, val_loss: 0.065556, val_acc: 24.333271
+Epoch [102], train_loss: 0.069809, val_loss: 0.065897, val_acc: 24.388474
+Epoch [103], train_loss: 0.069862, val_loss: 0.065712, val_acc: 24.387058
+Epoch [104], train_loss: 0.069956, val_loss: 0.065244, val_acc: 24.464169
+Epoch [105], train_loss: 0.069922, val_loss: 0.065046, val_acc: 24.496244
+Epoch [106], train_loss: 0.069882, val_loss: 0.065413, val_acc: 24.476435
+Epoch [107], train_loss: 0.069511, val_loss: 0.064574, val_acc: 24.606596
+Epoch [108], train_loss: 0.069569, val_loss: 0.066102, val_acc: 24.432573
+Epoch [109], train_loss: 0.069328, val_loss: 0.064749, val_acc: 24.596062
+Epoch [110], train_loss: 0.069290, val_loss: 0.064924, val_acc: 24.595415
+Epoch [111], train_loss: 0.069125, val_loss: 0.064480, val_acc: 24.586441
+Epoch [112], train_loss: 0.069058, val_loss: 0.064701, val_acc: 24.576965
+Epoch [113], train_loss: 0.069148, val_loss: 0.064769, val_acc: 24.597813
+Epoch [114], train_loss: 0.069602, val_loss: 0.065523, val_acc: 24.356266
+Epoch [115], train_loss: 0.069278, val_loss: 0.064888, val_acc: 24.564903
+Epoch [116], train_loss: 0.069306, val_loss: 0.065511, val_acc: 24.454792
+Epoch [117], train_loss: 0.068914, val_loss: 0.064612, val_acc: 24.673323
+Epoch [118], train_loss: 0.068608, val_loss: 0.064089, val_acc: 24.695139
+Epoch [119], train_loss: 0.068903, val_loss: 0.064967, val_acc: 24.714537
+Epoch [120], train_loss: 0.068520, val_loss: 0.064310, val_acc: 24.801317
+Epoch [121], train_loss: 0.068818, val_loss: 0.064212, val_acc: 24.655928
+Epoch [122], train_loss: 0.068365, val_loss: 0.064407, val_acc: 24.645523
+Epoch [123], train_loss: 0.068785, val_loss: 0.064688, val_acc: 24.612242
+Epoch [124], train_loss: 0.068527, val_loss: 0.063888, val_acc: 24.752796
+Epoch [125], train_loss: 0.068444, val_loss: 0.064210, val_acc: 24.708994
+Epoch [126], train_loss: 0.068313, val_loss: 0.064003, val_acc: 24.802580
+Epoch [127], train_loss: 0.068446, val_loss: 0.064500, val_acc: 24.688297
+Epoch [128], train_loss: 0.068302, val_loss: 0.064115, val_acc: 24.774767
+Epoch [129], train_loss: 0.068606, val_loss: 0.064065, val_acc: 24.802086
+Epoch [130], train_loss: 0.068433, val_loss: 0.064297, val_acc: 24.789152
+Epoch [131], train_loss: 0.068090, val_loss: 0.064151, val_acc: 24.699011
+Epoch [132], train_loss: 0.068281, val_loss: 0.063741, val_acc: 24.721733
+Epoch [133], train_loss: 0.067756, val_loss: 0.063922, val_acc: 24.788536
+Epoch [134], train_loss: 0.068055, val_loss: 0.063812, val_acc: 24.842268
+Epoch [135], train_loss: 0.067794, val_loss: 0.064098, val_acc: 24.805460
+Epoch [136], train_loss: 0.067903, val_loss: 0.063508, val_acc: 24.886969
+Epoch [137], train_loss: 0.067908, val_loss: 0.063889, val_acc: 24.750406
+Epoch [138], train_loss: 0.067837, val_loss: 0.063684, val_acc: 24.871977
+Epoch [139], train_loss: 0.068127, val_loss: 0.063683, val_acc: 24.818779
+Epoch [140], train_loss: 0.067923, val_loss: 0.063233, val_acc: 24.965101
+Epoch [141], train_loss: 0.067505, val_loss: 0.063499, val_acc: 24.899918
+Epoch [142], train_loss: 0.067578, val_loss: 0.063169, val_acc: 24.946678
+Epoch [143], train_loss: 0.067557, val_loss: 0.063610, val_acc: 24.863077
+Epoch [144], train_loss: 0.067402, val_loss: 0.063601, val_acc: 24.869188
+Epoch [145], train_loss: 0.067525, val_loss: 0.063399, val_acc: 24.851610
+Epoch [146], train_loss: 0.067296, val_loss: 0.063458, val_acc: 24.879656
+Epoch [147], train_loss: 0.067581, val_loss: 0.063026, val_acc: 24.925795
+Epoch [148], train_loss: 0.067697, val_loss: 0.063350, val_acc: 24.897598
+Epoch [149], train_loss: 0.066974, val_loss: 0.062864, val_acc: 24.959099
+Epoch [150], train_loss: 0.067291, val_loss: 0.062758, val_acc: 25.013077
+Epoch [151], train_loss: 0.067107, val_loss: 0.063040, val_acc: 24.976074
+Epoch [152], train_loss: 0.067553, val_loss: 0.062783, val_acc: 24.975346
+Epoch [153], train_loss: 0.067169, val_loss: 0.062938, val_acc: 24.980742
+Epoch [154], train_loss: 0.067367, val_loss: 0.062644, val_acc: 25.060879
+Epoch [155], train_loss: 0.066767, val_loss: 0.062951, val_acc: 25.043200
+Epoch [156], train_loss: 0.067326, val_loss: 0.062979, val_acc: 24.984934
+Epoch [157], train_loss: 0.067141, val_loss: 0.062485, val_acc: 25.101660
+Epoch [158], train_loss: 0.066882, val_loss: 0.062735, val_acc: 25.084642
+Epoch [159], train_loss: 0.067101, val_loss: 0.062502, val_acc: 25.040747
+Epoch [160], train_loss: 0.066755, val_loss: 0.062726, val_acc: 25.090197
+Epoch [161], train_loss: 0.066896, val_loss: 0.062390, val_acc: 24.967295
+Epoch [162], train_loss: 0.066757, val_loss: 0.063023, val_acc: 25.040974
+Epoch [163], train_loss: 0.067068, val_loss: 0.062671, val_acc: 25.053768
+Epoch [164], train_loss: 0.066502, val_loss: 0.062439, val_acc: 25.123419
+Epoch [165], train_loss: 0.067134, val_loss: 0.062964, val_acc: 24.977575
+Epoch [166], train_loss: 0.066725, val_loss: 0.062475, val_acc: 25.096312
+Epoch [167], train_loss: 0.066408, val_loss: 0.062237, val_acc: 25.115797
+Epoch [168], train_loss: 0.066972, val_loss: 0.062476, val_acc: 25.065439
+Epoch [169], train_loss: 0.066844, val_loss: 0.062982, val_acc: 24.992649
+Epoch [170], train_loss: 0.066691, val_loss: 0.063069, val_acc: 25.023138
+Epoch [171], train_loss: 0.066490, val_loss: 0.062400, val_acc: 25.070658
+Epoch [172], train_loss: 0.066596, val_loss: 0.062089, val_acc: 25.159948
+Epoch [173], train_loss: 0.066668, val_loss: 0.062536, val_acc: 25.041462
+Epoch [174], train_loss: 0.066485, val_loss: 0.062121, val_acc: 25.158598
+Epoch [175], train_loss: 0.066123, val_loss: 0.062513, val_acc: 25.114441
+Epoch [176], train_loss: 0.066541, val_loss: 0.062082, val_acc: 25.131683
+Epoch [177], train_loss: 0.066293, val_loss: 0.062245, val_acc: 25.121414
+Epoch [178], train_loss: 0.066118, val_loss: 0.062028, val_acc: 25.144640
+Epoch [179], train_loss: 0.066394, val_loss: 0.062061, val_acc: 25.140141
+Epoch [180], train_loss: 0.066130, val_loss: 0.062296, val_acc: 25.097042
+Epoch [181], train_loss: 0.066145, val_loss: 0.062107, val_acc: 25.150711
+Epoch [182], train_loss: 0.065977, val_loss: 0.061798, val_acc: 25.215389
+Epoch [183], train_loss: 0.066369, val_loss: 0.061884, val_acc: 25.214371
+Epoch [184], train_loss: 0.066362, val_loss: 0.061823, val_acc: 25.179178
+Epoch [185], train_loss: 0.066081, val_loss: 0.061870, val_acc: 25.128424
+Epoch [186], train_loss: 0.065878, val_loss: 0.061828, val_acc: 25.202023
+Epoch [187], train_loss: 0.065906, val_loss: 0.061965, val_acc: 25.182520
+Epoch [188], train_loss: 0.065959, val_loss: 0.062103, val_acc: 25.179377
+Epoch [189], train_loss: 0.066002, val_loss: 0.061768, val_acc: 25.187653
+Epoch [190], train_loss: 0.065659, val_loss: 0.061565, val_acc: 25.267128
+Epoch [191], train_loss: 0.065774, val_loss: 0.061653, val_acc: 25.251127
+Epoch [192], train_loss: 0.065785, val_loss: 0.061715, val_acc: 25.238987
+Epoch [193], train_loss: 0.065704, val_loss: 0.061591, val_acc: 25.245132
+Epoch [194], train_loss: 0.065899, val_loss: 0.062169, val_acc: 25.205021
+Epoch [195], train_loss: 0.065792, val_loss: 0.061860, val_acc: 25.226282
+Epoch [196], train_loss: 0.065747, val_loss: 0.061634, val_acc: 25.257288
+Epoch [197], train_loss: 0.065715, val_loss: 0.061686, val_acc: 25.236841
+Epoch [198], train_loss: 0.065949, val_loss: 0.062220, val_acc: 25.181610
+Epoch [199], train_loss: 0.065566, val_loss: 0.061722, val_acc: 25.186630
+Epoch [200], train_loss: 0.065520, val_loss: 0.061613, val_acc: 25.258274
+Epoch [201], train_loss: 0.065291, val_loss: 0.061589, val_acc: 25.237555
+Epoch [202], train_loss: 0.065396, val_loss: 0.061620, val_acc: 25.241150
+Epoch [203], train_loss: 0.065412, val_loss: 0.061848, val_acc: 25.241516
+Epoch [204], train_loss: 0.065496, val_loss: 0.061451, val_acc: 25.308750
+Epoch [205], train_loss: 0.065333, val_loss: 0.061535, val_acc: 25.294907
+Epoch [206], train_loss: 0.065191, val_loss: 0.060932, val_acc: 25.350859
+Epoch [207], train_loss: 0.065352, val_loss: 0.061653, val_acc: 25.241371
+Epoch [208], train_loss: 0.065326, val_loss: 0.061089, val_acc: 25.326258
+Epoch [209], train_loss: 0.065335, val_loss: 0.061257, val_acc: 25.319139
+Epoch [210], train_loss: 0.065326, val_loss: 0.061477, val_acc: 25.253340
+Epoch [211], train_loss: 0.065627, val_loss: 0.061380, val_acc: 25.261879
+Epoch [212], train_loss: 0.065260, val_loss: 0.061373, val_acc: 25.307621
+Epoch [213], train_loss: 0.065511, val_loss: 0.061600, val_acc: 25.267424
+Epoch [214], train_loss: 0.065190, val_loss: 0.061127, val_acc: 25.319830
+Epoch [215], train_loss: 0.065168, val_loss: 0.061111, val_acc: 25.304289
+Epoch [216], train_loss: 0.065204, val_loss: 0.061234, val_acc: 25.273411
+Epoch [217], train_loss: 0.065082, val_loss: 0.061146, val_acc: 25.274296
+Epoch [218], train_loss: 0.065319, val_loss: 0.061031, val_acc: 25.287146
+Epoch [219], train_loss: 0.065349, val_loss: 0.061312, val_acc: 25.312191
+Epoch [220], train_loss: 0.065251, val_loss: 0.061157, val_acc: 25.274046
+Epoch [221], train_loss: 0.064902, val_loss: 0.061295, val_acc: 25.290777
+Epoch [222], train_loss: 0.065206, val_loss: 0.060876, val_acc: 25.336880
+Epoch [223], train_loss: 0.065145, val_loss: 0.060873, val_acc: 25.361237
+Epoch [224], train_loss: 0.065304, val_loss: 0.060913, val_acc: 25.318602
+Epoch [225], train_loss: 0.064987, val_loss: 0.060856, val_acc: 25.317549
+Epoch [226], train_loss: 0.065068, val_loss: 0.060717, val_acc: 25.356104
+Epoch [227], train_loss: 0.065213, val_loss: 0.060798, val_acc: 25.359606
+Epoch [228], train_loss: 0.064946, val_loss: 0.060878, val_acc: 25.330372
+Epoch [229], train_loss: 0.065037, val_loss: 0.061378, val_acc: 25.260857
+Epoch [230], train_loss: 0.065020, val_loss: 0.060667, val_acc: 25.384377
+Epoch [231], train_loss: 0.064869, val_loss: 0.060609, val_acc: 25.394993
+Epoch [232], train_loss: 0.064937, val_loss: 0.061359, val_acc: 25.237644
+Epoch [233], train_loss: 0.064644, val_loss: 0.061020, val_acc: 25.286518
+Epoch [234], train_loss: 0.064921, val_loss: 0.060757, val_acc: 25.322016
+Epoch [235], train_loss: 0.064590, val_loss: 0.060516, val_acc: 25.410088
+Epoch [236], train_loss: 0.064900, val_loss: 0.060686, val_acc: 25.356890
+Epoch [237], train_loss: 0.064663, val_loss: 0.060746, val_acc: 25.360853
+Epoch [238], train_loss: 0.064633, val_loss: 0.060577, val_acc: 25.380682
+Epoch [239], train_loss: 0.064826, val_loss: 0.060492, val_acc: 25.388208
+Epoch [240], train_loss: 0.064667, val_loss: 0.060874, val_acc: 25.317463
+Epoch [241], train_loss: 0.064756, val_loss: 0.060819, val_acc: 25.344551
+Epoch [242], train_loss: 0.064870, val_loss: 0.060559, val_acc: 25.394030
+Epoch [243], train_loss: 0.064533, val_loss: 0.060451, val_acc: 25.389988
+Epoch [244], train_loss: 0.064530, val_loss: 0.060455, val_acc: 25.387156
+Epoch [245], train_loss: 0.065050, val_loss: 0.060669, val_acc: 25.396732
+Epoch [246], train_loss: 0.064394, val_loss: 0.060446, val_acc: 25.418114
+Epoch [247], train_loss: 0.064780, val_loss: 0.060578, val_acc: 25.394337
+Epoch [248], train_loss: 0.064451, val_loss: 0.060430, val_acc: 25.385979
+Epoch [249], train_loss: 0.064673, val_loss: 0.060416, val_acc: 25.355883
+Epoch [250], train_loss: 0.064522, val_loss: 0.060624, val_acc: 25.357508
+Epoch [251], train_loss: 0.064468, val_loss: 0.060325, val_acc: 25.435438
+Epoch [252], train_loss: 0.064520, val_loss: 0.060151, val_acc: 25.443539
+Epoch [253], train_loss: 0.064583, val_loss: 0.060585, val_acc: 25.371460
+Epoch [254], train_loss: 0.064504, val_loss: 0.060198, val_acc: 25.449791
+Epoch [255], train_loss: 0.064599, val_loss: 0.060497, val_acc: 25.390217
+Epoch [256], train_loss: 0.064359, val_loss: 0.060221, val_acc: 25.447971
+Epoch [257], train_loss: 0.064182, val_loss: 0.060349, val_acc: 25.412462
+Epoch [258], train_loss: 0.064389, val_loss: 0.060205, val_acc: 25.458727
+Epoch [259], train_loss: 0.064276, val_loss: 0.060482, val_acc: 25.391733
+Epoch [260], train_loss: 0.064183, val_loss: 0.060399, val_acc: 25.397600
+Epoch [261], train_loss: 0.064377, val_loss: 0.060266, val_acc: 25.409285
+Epoch [262], train_loss: 0.064348, val_loss: 0.060606, val_acc: 25.377424
+Epoch [263], train_loss: 0.064337, val_loss: 0.060368, val_acc: 25.405046
+Epoch [264], train_loss: 0.064238, val_loss: 0.060386, val_acc: 25.412012
+Epoch [265], train_loss: 0.064366, val_loss: 0.060509, val_acc: 25.358137
+Epoch [266], train_loss: 0.063969, val_loss: 0.060359, val_acc: 25.430693
+Epoch [267], train_loss: 0.064424, val_loss: 0.060252, val_acc: 25.424616
+Epoch [268], train_loss: 0.064205, val_loss: 0.060130, val_acc: 25.438032
+Epoch [269], train_loss: 0.064358, val_loss: 0.060445, val_acc: 25.400906
+Epoch [270], train_loss: 0.064050, val_loss: 0.059847, val_acc: 25.502495
+Epoch [271], train_loss: 0.064141, val_loss: 0.060351, val_acc: 25.398273
+Epoch [272], train_loss: 0.064142, val_loss: 0.060393, val_acc: 25.402483
+Epoch [273], train_loss: 0.064259, val_loss: 0.060184, val_acc: 25.437586
+Epoch [274], train_loss: 0.064490, val_loss: 0.060471, val_acc: 25.379112
+Epoch [275], train_loss: 0.064115, val_loss: 0.060193, val_acc: 25.403997
+Epoch [276], train_loss: 0.063887, val_loss: 0.060201, val_acc: 25.444384
+Epoch [277], train_loss: 0.064079, val_loss: 0.060059, val_acc: 25.441257
+Epoch [278], train_loss: 0.064208, val_loss: 0.060014, val_acc: 25.451544
+Epoch [279], train_loss: 0.064129, val_loss: 0.059921, val_acc: 25.470760
+Epoch [280], train_loss: 0.064252, val_loss: 0.060240, val_acc: 25.379601
+Epoch [281], train_loss: 0.063983, val_loss: 0.059875, val_acc: 25.465803
+Epoch [282], train_loss: 0.063952, val_loss: 0.060024, val_acc: 25.423491
+Epoch [283], train_loss: 0.064055, val_loss: 0.060194, val_acc: 25.393248
+Epoch [284], train_loss: 0.064056, val_loss: 0.060180, val_acc: 25.437065
+Epoch [285], train_loss: 0.064013, val_loss: 0.060569, val_acc: 25.335697
+Epoch [286], train_loss: 0.063916, val_loss: 0.059950, val_acc: 25.444969
+Epoch [287], train_loss: 0.063968, val_loss: 0.059924, val_acc: 25.458305
+Epoch [288], train_loss: 0.063837, val_loss: 0.060214, val_acc: 25.410999
+Epoch [289], train_loss: 0.064011, val_loss: 0.060013, val_acc: 25.444223
+Epoch [290], train_loss: 0.064261, val_loss: 0.059973, val_acc: 25.474503
+Epoch [291], train_loss: 0.064108, val_loss: 0.060264, val_acc: 25.424423
+Epoch [292], train_loss: 0.063828, val_loss: 0.060403, val_acc: 25.379494
+Epoch [293], train_loss: 0.063724, val_loss: 0.059903, val_acc: 25.473366
+Epoch [294], train_loss: 0.064165, val_loss: 0.060285, val_acc: 25.409777
+Epoch [295], train_loss: 0.063999, val_loss: 0.060109, val_acc: 25.438858
+Epoch [296], train_loss: 0.064037, val_loss: 0.060426, val_acc: 25.373396
+Epoch [297], train_loss: 0.063992, val_loss: 0.060135, val_acc: 25.423183
+Epoch [298], train_loss: 0.063646, val_loss: 0.059934, val_acc: 25.459135
+Epoch [299], train_loss: 0.064073, val_loss: 0.059902, val_acc: 25.476109
+python3 ./UNet_V16.py  7087.32s user 6906.94s system 99% cpu 3:53:16.94 total
diff --git a/UNet/Sim_logs/UNet_64_V13_25614634.log b/UNet/Sim_logs/UNet_V16_25678821.log
similarity index 67%
rename from UNet/Sim_logs/UNet_64_V13_25614634.log
rename to UNet/Sim_logs/UNet_V16_25678821.log
index 67b01f6da2c580256eb6c6b3ad669a9f695ed1c4..0f5a58df55e362f43d23d62f5d9fab11525df848 100644
--- a/UNet/Sim_logs/UNet_64_V13_25614634.log
+++ b/UNet/Sim_logs/UNet_V16_25678821.log
@@ -7,15 +7,14 @@ Collecting torch==1.10.1
 Collecting typing-extensions
   Using cached typing_extensions-4.1.1-py3-none-any.whl (26 kB)
 Installing collected packages: typing-extensions, torch
-  WARNING: The scripts convert-caffe2-to-onnx, convert-onnx-to-caffe2 and torchrun are installed in '/home/yk138599/.local/bin' which is not on PATH.
-  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
-Successfully installed torch-1.10.1 typing-extensions-4.1.1
+ERROR: Could not install packages due to an OSError: [Errno 116] Stale file handle
+
 WARNING: You are using pip version 21.2.4; however, version 22.0.3 is available.
 You should consider upgrading via the '/usr/local_rwth/sw/python/3.7.11/x86_64/bin/python3.7 -m pip install --upgrade pip' command.
 Traceback (most recent call last):
-  File "./UNet_V14.py", line 10, in <module>
+  File "./UNet_V16.py", line 10, in <module>
     import torch
   File "/home/yk138599/.local/lib/python3.7/site-packages/torch/__init__.py", line 197, in <module>
     from torch._C import *  # noqa: F403
 ImportError: /home/yk138599/.local/lib/python3.7/site-packages/torch/lib/libtorch_cuda.so: cannot read file data
-python3 ./UNet_V14.py  0.14s user 0.06s system 47% cpu 0.420 total
+python3 ./UNet_V16.py  0.13s user 0.04s system 31% cpu 0.537 total
diff --git a/UNet/Sim_logs/UNet_V16_25679419.log b/UNet/Sim_logs/UNet_V16_25679419.log
new file mode 100644
index 0000000000000000000000000000000000000000..09c433f764c3cc6c82033a6ceed031b07828d5ce
--- /dev/null
+++ b/UNet/Sim_logs/UNet_V16_25679419.log
@@ -0,0 +1,220 @@
+(OK) Loading cuda 10.2.89
+(OK) Loading python 3.7.11
+(!!) The SciPy Stack is available: http://www.scipy.org/stackspec.html
+ Built with GCC compilers.
+Collecting torch==1.10.1
+  Using cached torch-1.10.1-cp37-cp37m-manylinux1_x86_64.whl (881.9 MB)
+Collecting typing-extensions
+  Using cached typing_extensions-4.1.1-py3-none-any.whl (26 kB)
+Installing collected packages: typing-extensions, torch
+  WARNING: The scripts convert-caffe2-to-onnx, convert-onnx-to-caffe2 and torchrun are installed in '/home/yk138599/.local/bin' which is not on PATH.
+  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
+Successfully installed torch-1.10.1 typing-extensions-4.1.1
+WARNING: You are using pip version 21.2.4; however, version 22.0.3 is available.
+You should consider upgrading via the '/usr/local_rwth/sw/python/3.7.11/x86_64/bin/python3.7 -m pip install --upgrade pip' command.
+number auf epochs: 200
+batchsize: 32
+learning rate: 3e-05
+kernel size is: 7
+ seed is: 2199910834
+Epoch [0], train_loss: 0.201452, val_loss: 0.207882, val_acc: 5.004320
+Epoch [1], train_loss: 0.197311, val_loss: 0.187029, val_acc: 4.885676
+Epoch [2], train_loss: 0.193774, val_loss: 0.181770, val_acc: 4.980228
+Epoch [3], train_loss: 0.190197, val_loss: 0.174787, val_acc: 5.175769
+Epoch [4], train_loss: 0.186220, val_loss: 0.165585, val_acc: 5.567457
+Epoch [5], train_loss: 0.182818, val_loss: 0.159423, val_acc: 6.092173
+Epoch [6], train_loss: 0.179174, val_loss: 0.161556, val_acc: 5.895570
+Epoch [7], train_loss: 0.175798, val_loss: 0.151565, val_acc: 6.866718
+Epoch [8], train_loss: 0.174171, val_loss: 0.145591, val_acc: 7.494323
+Epoch [9], train_loss: 0.170896, val_loss: 0.145541, val_acc: 7.530961
+Epoch [10], train_loss: 0.168893, val_loss: 0.143673, val_acc: 7.741639
+Epoch [11], train_loss: 0.166415, val_loss: 0.141870, val_acc: 7.931571
+Epoch [12], train_loss: 0.164499, val_loss: 0.138280, val_acc: 8.448054
+Epoch [13], train_loss: 0.162407, val_loss: 0.137628, val_acc: 8.482207
+Epoch [14], train_loss: 0.159827, val_loss: 0.136999, val_acc: 8.443478
+Epoch [15], train_loss: 0.157580, val_loss: 0.136904, val_acc: 8.450577
+Epoch [16], train_loss: 0.155677, val_loss: 0.132332, val_acc: 9.024961
+Epoch [17], train_loss: 0.152960, val_loss: 0.131082, val_acc: 9.220119
+Epoch [18], train_loss: 0.150790, val_loss: 0.130016, val_acc: 9.335469
+Epoch [19], train_loss: 0.149126, val_loss: 0.130554, val_acc: 9.292521
+Epoch [20], train_loss: 0.147120, val_loss: 0.125617, val_acc: 9.885945
+Epoch [21], train_loss: 0.145060, val_loss: 0.123776, val_acc: 10.169753
+Epoch [22], train_loss: 0.143313, val_loss: 0.122246, val_acc: 10.326592
+Epoch [23], train_loss: 0.141130, val_loss: 0.122216, val_acc: 10.311440
+Epoch [24], train_loss: 0.139267, val_loss: 0.119107, val_acc: 10.719450
+Epoch [25], train_loss: 0.137401, val_loss: 0.118162, val_acc: 10.859447
+Epoch [26], train_loss: 0.135684, val_loss: 0.114451, val_acc: 11.300768
+Epoch [27], train_loss: 0.133590, val_loss: 0.113761, val_acc: 11.350463
+Epoch [28], train_loss: 0.131884, val_loss: 0.110582, val_acc: 11.776168
+Epoch [29], train_loss: 0.129901, val_loss: 0.106443, val_acc: 12.216680
+Epoch [30], train_loss: 0.127914, val_loss: 0.107979, val_acc: 12.084242
+Epoch [31], train_loss: 0.126482, val_loss: 0.105635, val_acc: 12.369968
+Epoch [32], train_loss: 0.124902, val_loss: 0.106134, val_acc: 12.345869
+Epoch [33], train_loss: 0.122651, val_loss: 0.103781, val_acc: 12.715107
+Epoch [34], train_loss: 0.121143, val_loss: 0.098726, val_acc: 13.280902
+Epoch [35], train_loss: 0.119795, val_loss: 0.098030, val_acc: 13.342083
+Epoch [36], train_loss: 0.117546, val_loss: 0.095330, val_acc: 13.777961
+Epoch [37], train_loss: 0.116091, val_loss: 0.097672, val_acc: 13.558163
+Epoch [38], train_loss: 0.114863, val_loss: 0.096475, val_acc: 13.491291
+Epoch [39], train_loss: 0.113118, val_loss: 0.096027, val_acc: 13.596957
+Epoch [40], train_loss: 0.111738, val_loss: 0.092828, val_acc: 14.097707
+Epoch [41], train_loss: 0.110338, val_loss: 0.090343, val_acc: 14.440719
+Epoch [42], train_loss: 0.108486, val_loss: 0.089104, val_acc: 14.622812
+Epoch [43], train_loss: 0.107810, val_loss: 0.090015, val_acc: 14.567845
+Epoch [44], train_loss: 0.106041, val_loss: 0.085877, val_acc: 15.184800
+Epoch [45], train_loss: 0.104489, val_loss: 0.086740, val_acc: 15.001059
+Epoch [46], train_loss: 0.103760, val_loss: 0.084775, val_acc: 15.290279
+Epoch [47], train_loss: 0.102179, val_loss: 0.083312, val_acc: 15.581590
+Epoch [48], train_loss: 0.100747, val_loss: 0.084066, val_acc: 15.566797
+Epoch [49], train_loss: 0.100052, val_loss: 0.084379, val_acc: 15.455223
+Epoch [50], train_loss: 0.098835, val_loss: 0.080944, val_acc: 16.179691
+Epoch [51], train_loss: 0.097629, val_loss: 0.081550, val_acc: 16.207285
+Epoch [52], train_loss: 0.096373, val_loss: 0.081217, val_acc: 15.860223
+Epoch [53], train_loss: 0.095271, val_loss: 0.079131, val_acc: 16.408823
+Epoch [54], train_loss: 0.094355, val_loss: 0.079842, val_acc: 16.350599
+Epoch [55], train_loss: 0.093032, val_loss: 0.078103, val_acc: 16.880590
+Epoch [56], train_loss: 0.092284, val_loss: 0.077721, val_acc: 16.848852
+Epoch [57], train_loss: 0.091336, val_loss: 0.076820, val_acc: 17.265930
+Epoch [58], train_loss: 0.090480, val_loss: 0.076184, val_acc: 17.471977
+Epoch [59], train_loss: 0.089595, val_loss: 0.075887, val_acc: 17.491354
+Epoch [60], train_loss: 0.088827, val_loss: 0.074515, val_acc: 18.066059
+Epoch [61], train_loss: 0.087503, val_loss: 0.075623, val_acc: 17.770304
+Epoch [62], train_loss: 0.086985, val_loss: 0.073307, val_acc: 18.450212
+Epoch [63], train_loss: 0.086098, val_loss: 0.073805, val_acc: 18.118103
+Epoch [64], train_loss: 0.085274, val_loss: 0.072592, val_acc: 19.154591
+Epoch [65], train_loss: 0.084477, val_loss: 0.072760, val_acc: 18.673069
+Epoch [66], train_loss: 0.083744, val_loss: 0.072367, val_acc: 19.196085
+Epoch [67], train_loss: 0.082907, val_loss: 0.071817, val_acc: 19.290882
+Epoch [68], train_loss: 0.082619, val_loss: 0.070938, val_acc: 20.006514
+Epoch [69], train_loss: 0.082079, val_loss: 0.071417, val_acc: 19.574812
+Epoch [70], train_loss: 0.080725, val_loss: 0.071185, val_acc: 19.577452
+Epoch [71], train_loss: 0.080417, val_loss: 0.070770, val_acc: 20.084049
+Epoch [72], train_loss: 0.079932, val_loss: 0.070192, val_acc: 20.394215
+Epoch [73], train_loss: 0.079362, val_loss: 0.070052, val_acc: 20.516485
+Epoch [74], train_loss: 0.078911, val_loss: 0.069091, val_acc: 20.726458
+Epoch [75], train_loss: 0.078147, val_loss: 0.068699, val_acc: 21.245630
+Epoch [76], train_loss: 0.078018, val_loss: 0.068805, val_acc: 21.370071
+Epoch [77], train_loss: 0.077694, val_loss: 0.068462, val_acc: 21.353788
+Epoch [78], train_loss: 0.077103, val_loss: 0.067826, val_acc: 21.869888
+Epoch [79], train_loss: 0.076592, val_loss: 0.068379, val_acc: 21.869455
+Epoch [80], train_loss: 0.076340, val_loss: 0.068374, val_acc: 21.857979
+Epoch [81], train_loss: 0.075504, val_loss: 0.067543, val_acc: 22.198469
+Epoch [82], train_loss: 0.075428, val_loss: 0.067793, val_acc: 21.811359
+Epoch [83], train_loss: 0.074990, val_loss: 0.066695, val_acc: 22.913658
+Epoch [84], train_loss: 0.074914, val_loss: 0.066829, val_acc: 22.822201
+Epoch [85], train_loss: 0.074127, val_loss: 0.066166, val_acc: 23.263536
+Epoch [86], train_loss: 0.074101, val_loss: 0.066818, val_acc: 22.885071
+Epoch [87], train_loss: 0.074012, val_loss: 0.066972, val_acc: 22.834167
+Epoch [88], train_loss: 0.073981, val_loss: 0.066261, val_acc: 23.247446
+Epoch [89], train_loss: 0.073412, val_loss: 0.066515, val_acc: 23.572893
+Epoch [90], train_loss: 0.073054, val_loss: 0.065779, val_acc: 23.888954
+Epoch [91], train_loss: 0.073178, val_loss: 0.066692, val_acc: 23.406059
+Epoch [92], train_loss: 0.072752, val_loss: 0.065459, val_acc: 24.168819
+Epoch [93], train_loss: 0.072623, val_loss: 0.065709, val_acc: 24.043087
+Epoch [94], train_loss: 0.072140, val_loss: 0.065444, val_acc: 24.056919
+Epoch [95], train_loss: 0.071828, val_loss: 0.065036, val_acc: 24.258673
+Epoch [96], train_loss: 0.071817, val_loss: 0.065730, val_acc: 24.198473
+Epoch [97], train_loss: 0.071912, val_loss: 0.065116, val_acc: 24.443230
+Epoch [98], train_loss: 0.071635, val_loss: 0.065364, val_acc: 24.498846
+Epoch [99], train_loss: 0.071389, val_loss: 0.065743, val_acc: 24.299046
+Epoch [100], train_loss: 0.071417, val_loss: 0.064991, val_acc: 24.580116
+Epoch [101], train_loss: 0.071147, val_loss: 0.065606, val_acc: 24.414068
+Epoch [102], train_loss: 0.071400, val_loss: 0.065048, val_acc: 24.754095
+Epoch [103], train_loss: 0.070931, val_loss: 0.065513, val_acc: 24.481409
+Epoch [104], train_loss: 0.071255, val_loss: 0.065245, val_acc: 24.599297
+Epoch [105], train_loss: 0.070692, val_loss: 0.065089, val_acc: 24.640785
+Epoch [106], train_loss: 0.070404, val_loss: 0.064824, val_acc: 24.776489
+Epoch [107], train_loss: 0.070588, val_loss: 0.065048, val_acc: 24.627594
+Epoch [108], train_loss: 0.070236, val_loss: 0.064686, val_acc: 24.860704
+Epoch [109], train_loss: 0.070019, val_loss: 0.064019, val_acc: 25.008987
+Epoch [110], train_loss: 0.070184, val_loss: 0.064535, val_acc: 24.765692
+Epoch [111], train_loss: 0.070054, val_loss: 0.064309, val_acc: 24.971581
+Epoch [112], train_loss: 0.069905, val_loss: 0.064166, val_acc: 24.972164
+Epoch [113], train_loss: 0.069670, val_loss: 0.064095, val_acc: 24.931513
+Epoch [114], train_loss: 0.069519, val_loss: 0.064418, val_acc: 24.981255
+Epoch [115], train_loss: 0.069909, val_loss: 0.063844, val_acc: 25.120024
+Epoch [116], train_loss: 0.069630, val_loss: 0.064002, val_acc: 25.165428
+Epoch [117], train_loss: 0.069846, val_loss: 0.064722, val_acc: 24.985840
+Epoch [118], train_loss: 0.069270, val_loss: 0.064985, val_acc: 25.023045
+Epoch [119], train_loss: 0.069343, val_loss: 0.064139, val_acc: 25.170860
+Epoch [120], train_loss: 0.069156, val_loss: 0.063598, val_acc: 25.215479
+Epoch [121], train_loss: 0.069048, val_loss: 0.064491, val_acc: 24.880329
+Epoch [122], train_loss: 0.069219, val_loss: 0.064149, val_acc: 25.126692
+Epoch [123], train_loss: 0.068941, val_loss: 0.063542, val_acc: 25.252489
+Epoch [124], train_loss: 0.068982, val_loss: 0.063973, val_acc: 25.197271
+Epoch [125], train_loss: 0.068591, val_loss: 0.063415, val_acc: 25.190750
+Epoch [126], train_loss: 0.069164, val_loss: 0.063791, val_acc: 25.089645
+Epoch [127], train_loss: 0.068505, val_loss: 0.063125, val_acc: 25.256884
+Epoch [128], train_loss: 0.068586, val_loss: 0.063713, val_acc: 25.267048
+Epoch [129], train_loss: 0.068742, val_loss: 0.063514, val_acc: 25.282600
+Epoch [130], train_loss: 0.068482, val_loss: 0.063603, val_acc: 25.293758
+Epoch [131], train_loss: 0.068881, val_loss: 0.063934, val_acc: 25.176140
+Epoch [132], train_loss: 0.068881, val_loss: 0.063339, val_acc: 25.264725
+Epoch [133], train_loss: 0.068415, val_loss: 0.063460, val_acc: 25.214863
+Epoch [134], train_loss: 0.068710, val_loss: 0.063622, val_acc: 25.237671
+Epoch [135], train_loss: 0.068388, val_loss: 0.063306, val_acc: 25.343506
+Epoch [136], train_loss: 0.068340, val_loss: 0.063120, val_acc: 25.277481
+Epoch [137], train_loss: 0.068416, val_loss: 0.063146, val_acc: 25.297064
+Epoch [138], train_loss: 0.068039, val_loss: 0.063029, val_acc: 25.405222
+Epoch [139], train_loss: 0.068323, val_loss: 0.063213, val_acc: 25.323675
+Epoch [140], train_loss: 0.068314, val_loss: 0.063063, val_acc: 25.346455
+Epoch [141], train_loss: 0.068030, val_loss: 0.063005, val_acc: 25.389124
+Epoch [142], train_loss: 0.067780, val_loss: 0.062767, val_acc: 25.443098
+Epoch [143], train_loss: 0.068214, val_loss: 0.062839, val_acc: 25.388569
+Epoch [144], train_loss: 0.067869, val_loss: 0.062553, val_acc: 25.462845
+Epoch [145], train_loss: 0.067768, val_loss: 0.062466, val_acc: 25.418514
+Epoch [146], train_loss: 0.067488, val_loss: 0.062456, val_acc: 25.497931
+Epoch [147], train_loss: 0.068172, val_loss: 0.062734, val_acc: 25.391335
+Epoch [148], train_loss: 0.067574, val_loss: 0.062512, val_acc: 25.463182
+Epoch [149], train_loss: 0.067615, val_loss: 0.062574, val_acc: 25.477964
+Epoch [150], train_loss: 0.067665, val_loss: 0.062648, val_acc: 25.407703
+Epoch [151], train_loss: 0.068019, val_loss: 0.062751, val_acc: 25.401464
+Epoch [152], train_loss: 0.067687, val_loss: 0.062952, val_acc: 25.480812
+Epoch [153], train_loss: 0.067575, val_loss: 0.063189, val_acc: 25.396423
+Epoch [154], train_loss: 0.067199, val_loss: 0.062443, val_acc: 25.508734
+Epoch [155], train_loss: 0.067536, val_loss: 0.062295, val_acc: 25.442200
+Epoch [156], train_loss: 0.067394, val_loss: 0.062106, val_acc: 25.539413
+Epoch [157], train_loss: 0.067590, val_loss: 0.062537, val_acc: 25.378576
+Epoch [158], train_loss: 0.067315, val_loss: 0.062551, val_acc: 25.463324
+Epoch [159], train_loss: 0.067221, val_loss: 0.062390, val_acc: 25.519466
+Epoch [160], train_loss: 0.066923, val_loss: 0.061963, val_acc: 25.591795
+Epoch [161], train_loss: 0.067012, val_loss: 0.061556, val_acc: 25.607359
+Epoch [162], train_loss: 0.066932, val_loss: 0.061794, val_acc: 25.590452
+Epoch [163], train_loss: 0.067325, val_loss: 0.062354, val_acc: 25.444351
+Epoch [164], train_loss: 0.067542, val_loss: 0.062152, val_acc: 25.546915
+Epoch [165], train_loss: 0.067159, val_loss: 0.062512, val_acc: 25.408287
+Epoch [166], train_loss: 0.067117, val_loss: 0.061764, val_acc: 25.580322
+Epoch [167], train_loss: 0.066831, val_loss: 0.061865, val_acc: 25.581944
+Epoch [168], train_loss: 0.067213, val_loss: 0.061427, val_acc: 25.661528
+Epoch [169], train_loss: 0.067174, val_loss: 0.062030, val_acc: 25.547386
+Epoch [170], train_loss: 0.066712, val_loss: 0.061724, val_acc: 25.635157
+Epoch [171], train_loss: 0.066949, val_loss: 0.061574, val_acc: 25.636808
+Epoch [172], train_loss: 0.066975, val_loss: 0.061833, val_acc: 25.534424
+Epoch [173], train_loss: 0.066941, val_loss: 0.061751, val_acc: 25.626888
+Epoch [174], train_loss: 0.066776, val_loss: 0.061870, val_acc: 25.611372
+Epoch [175], train_loss: 0.066851, val_loss: 0.061688, val_acc: 25.638208
+Epoch [176], train_loss: 0.066265, val_loss: 0.061474, val_acc: 25.638803
+Epoch [177], train_loss: 0.066642, val_loss: 0.061475, val_acc: 25.661186
+Epoch [178], train_loss: 0.066756, val_loss: 0.061780, val_acc: 25.591413
+Epoch [179], train_loss: 0.066581, val_loss: 0.061603, val_acc: 25.585327
+Epoch [180], train_loss: 0.066459, val_loss: 0.061501, val_acc: 25.591532
+Epoch [181], train_loss: 0.066599, val_loss: 0.061897, val_acc: 25.545984
+Epoch [182], train_loss: 0.066528, val_loss: 0.061496, val_acc: 25.646666
+Epoch [183], train_loss: 0.066506, val_loss: 0.061726, val_acc: 25.619368
+Epoch [184], train_loss: 0.066406, val_loss: 0.061662, val_acc: 25.697960
+Epoch [185], train_loss: 0.066300, val_loss: 0.061861, val_acc: 25.606236
+Epoch [186], train_loss: 0.066147, val_loss: 0.061305, val_acc: 25.699120
+Epoch [187], train_loss: 0.066299, val_loss: 0.060911, val_acc: 25.777912
+Epoch [188], train_loss: 0.066625, val_loss: 0.061082, val_acc: 25.740036
+Epoch [189], train_loss: 0.066425, val_loss: 0.062014, val_acc: 25.598404
+Epoch [190], train_loss: 0.066129, val_loss: 0.060947, val_acc: 25.758345
+Epoch [191], train_loss: 0.066396, val_loss: 0.060975, val_acc: 25.776411
+Epoch [192], train_loss: 0.066184, val_loss: 0.061004, val_acc: 25.741278
+Epoch [193], train_loss: 0.066094, val_loss: 0.061508, val_acc: 25.644258
+Epoch [194], train_loss: 0.066284, val_loss: 0.061427, val_acc: 25.666714
+Epoch [195], train_loss: 0.066280, val_loss: 0.061017, val_acc: 25.732939
+Epoch [196], train_loss: 0.066121, val_loss: 0.061231, val_acc: 25.722908
+Epoch [197], train_loss: 0.066081, val_loss: 0.061664, val_acc: 25.599962
+Epoch [198], train_loss: 0.066063, val_loss: 0.061053, val_acc: 25.761885
+Epoch [199], train_loss: 0.065832, val_loss: 0.061401, val_acc: 25.674393
+python3 ./UNet_V16.py  4773.91s user 4514.67s system 99% cpu 2:35:24.38 total
diff --git a/UNet/Sim_logs/UNet_V16_K3_25673552.log b/UNet/Sim_logs/UNet_V16_K3_25673552.log
new file mode 100644
index 0000000000000000000000000000000000000000..00fb10b9595e25d5d0fc8665dfd395cda70263eb
--- /dev/null
+++ b/UNet/Sim_logs/UNet_V16_K3_25673552.log
@@ -0,0 +1,220 @@
+(OK) Loading cuda 10.2.89
+(OK) Loading python 3.7.11
+(!!) The SciPy Stack is available: http://www.scipy.org/stackspec.html
+ Built with GCC compilers.
+Collecting torch==1.10.1
+  Using cached torch-1.10.1-cp37-cp37m-manylinux1_x86_64.whl (881.9 MB)
+Collecting typing-extensions
+  Using cached typing_extensions-4.1.1-py3-none-any.whl (26 kB)
+Installing collected packages: typing-extensions, torch
+  WARNING: The scripts convert-caffe2-to-onnx, convert-onnx-to-caffe2 and torchrun are installed in '/home/yk138599/.local/bin' which is not on PATH.
+  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
+Successfully installed torch-1.10.1 typing-extensions-4.1.1
+WARNING: You are using pip version 21.2.4; however, version 22.0.3 is available.
+You should consider upgrading via the '/usr/local_rwth/sw/python/3.7.11/x86_64/bin/python3.7 -m pip install --upgrade pip' command.
+number auf epochs: 200
+batchsize: 32
+learning rate: 3e-05
+kernel size is: 3
+ seed is: 2199910834
+Epoch [0], train_loss: 0.182880, val_loss: 0.165453, val_acc: 4.491613
+Epoch [1], train_loss: 0.178339, val_loss: 0.156712, val_acc: 4.921211
+Epoch [2], train_loss: 0.174052, val_loss: 0.150529, val_acc: 5.342243
+Epoch [3], train_loss: 0.170415, val_loss: 0.147087, val_acc: 5.685145
+Epoch [4], train_loss: 0.167229, val_loss: 0.145197, val_acc: 5.986865
+Epoch [5], train_loss: 0.163707, val_loss: 0.136032, val_acc: 7.208575
+Epoch [6], train_loss: 0.160407, val_loss: 0.134954, val_acc: 7.421455
+Epoch [7], train_loss: 0.156993, val_loss: 0.129813, val_acc: 8.460843
+Epoch [8], train_loss: 0.153804, val_loss: 0.124668, val_acc: 9.456677
+Epoch [9], train_loss: 0.151464, val_loss: 0.123621, val_acc: 9.746360
+Epoch [10], train_loss: 0.148680, val_loss: 0.123747, val_acc: 9.831948
+Epoch [11], train_loss: 0.146294, val_loss: 0.120030, val_acc: 10.528720
+Epoch [12], train_loss: 0.143901, val_loss: 0.118593, val_acc: 10.798935
+Epoch [13], train_loss: 0.141213, val_loss: 0.114498, val_acc: 11.484556
+Epoch [14], train_loss: 0.139067, val_loss: 0.112907, val_acc: 11.713828
+Epoch [15], train_loss: 0.137628, val_loss: 0.116325, val_acc: 11.274628
+Epoch [16], train_loss: 0.135926, val_loss: 0.112677, val_acc: 11.797544
+Epoch [17], train_loss: 0.133383, val_loss: 0.109489, val_acc: 12.165887
+Epoch [18], train_loss: 0.131395, val_loss: 0.112037, val_acc: 11.888839
+Epoch [19], train_loss: 0.129845, val_loss: 0.107282, val_acc: 12.411127
+Epoch [20], train_loss: 0.128060, val_loss: 0.107235, val_acc: 12.426538
+Epoch [21], train_loss: 0.126276, val_loss: 0.106807, val_acc: 12.503109
+Epoch [22], train_loss: 0.124634, val_loss: 0.104482, val_acc: 12.693846
+Epoch [23], train_loss: 0.123001, val_loss: 0.104760, val_acc: 12.700095
+Epoch [24], train_loss: 0.121311, val_loss: 0.101049, val_acc: 13.025831
+Epoch [25], train_loss: 0.119479, val_loss: 0.099286, val_acc: 13.235432
+Epoch [26], train_loss: 0.117750, val_loss: 0.098936, val_acc: 13.239571
+Epoch [27], train_loss: 0.115837, val_loss: 0.097873, val_acc: 13.355766
+Epoch [28], train_loss: 0.114608, val_loss: 0.094942, val_acc: 13.621919
+Epoch [29], train_loss: 0.113355, val_loss: 0.094661, val_acc: 13.664462
+Epoch [30], train_loss: 0.111523, val_loss: 0.091708, val_acc: 13.939689
+Epoch [31], train_loss: 0.109834, val_loss: 0.091671, val_acc: 13.941781
+Epoch [32], train_loss: 0.108359, val_loss: 0.088328, val_acc: 14.314763
+Epoch [33], train_loss: 0.106843, val_loss: 0.088421, val_acc: 14.332198
+Epoch [34], train_loss: 0.105453, val_loss: 0.089528, val_acc: 14.170455
+Epoch [35], train_loss: 0.103826, val_loss: 0.087086, val_acc: 14.456539
+Epoch [36], train_loss: 0.102540, val_loss: 0.084084, val_acc: 14.943543
+Epoch [37], train_loss: 0.101301, val_loss: 0.084733, val_acc: 14.747085
+Epoch [38], train_loss: 0.099780, val_loss: 0.084124, val_acc: 14.856912
+Epoch [39], train_loss: 0.098675, val_loss: 0.084586, val_acc: 14.787952
+Epoch [40], train_loss: 0.097081, val_loss: 0.081848, val_acc: 15.315651
+Epoch [41], train_loss: 0.095643, val_loss: 0.080517, val_acc: 15.443006
+Epoch [42], train_loss: 0.094784, val_loss: 0.078537, val_acc: 15.877364
+Epoch [43], train_loss: 0.093828, val_loss: 0.079141, val_acc: 15.770455
+Epoch [44], train_loss: 0.092374, val_loss: 0.078159, val_acc: 16.044653
+Epoch [45], train_loss: 0.091172, val_loss: 0.075539, val_acc: 16.749769
+Epoch [46], train_loss: 0.090349, val_loss: 0.075775, val_acc: 16.803091
+Epoch [47], train_loss: 0.089699, val_loss: 0.075135, val_acc: 16.848600
+Epoch [48], train_loss: 0.088644, val_loss: 0.074968, val_acc: 16.900824
+Epoch [49], train_loss: 0.087435, val_loss: 0.073597, val_acc: 17.555283
+Epoch [50], train_loss: 0.086715, val_loss: 0.073315, val_acc: 17.672327
+Epoch [51], train_loss: 0.085827, val_loss: 0.071352, val_acc: 18.711348
+Epoch [52], train_loss: 0.084604, val_loss: 0.072702, val_acc: 17.968601
+Epoch [53], train_loss: 0.084107, val_loss: 0.072994, val_acc: 17.879379
+Epoch [54], train_loss: 0.083250, val_loss: 0.072169, val_acc: 18.187979
+Epoch [55], train_loss: 0.082423, val_loss: 0.069905, val_acc: 19.633823
+Epoch [56], train_loss: 0.081644, val_loss: 0.070550, val_acc: 19.229185
+Epoch [57], train_loss: 0.081243, val_loss: 0.069727, val_acc: 19.645554
+Epoch [58], train_loss: 0.080261, val_loss: 0.069314, val_acc: 19.955225
+Epoch [59], train_loss: 0.079626, val_loss: 0.069575, val_acc: 19.705708
+Epoch [60], train_loss: 0.078832, val_loss: 0.069074, val_acc: 20.185289
+Epoch [61], train_loss: 0.078708, val_loss: 0.069079, val_acc: 20.065243
+Epoch [62], train_loss: 0.077929, val_loss: 0.067969, val_acc: 21.189041
+Epoch [63], train_loss: 0.077350, val_loss: 0.067735, val_acc: 21.273672
+Epoch [64], train_loss: 0.077176, val_loss: 0.067819, val_acc: 21.320707
+Epoch [65], train_loss: 0.076107, val_loss: 0.067374, val_acc: 21.800583
+Epoch [66], train_loss: 0.075980, val_loss: 0.066973, val_acc: 22.264250
+Epoch [67], train_loss: 0.075557, val_loss: 0.067129, val_acc: 22.323717
+Epoch [68], train_loss: 0.075204, val_loss: 0.066277, val_acc: 22.727797
+Epoch [69], train_loss: 0.074940, val_loss: 0.066590, val_acc: 22.619764
+Epoch [70], train_loss: 0.074360, val_loss: 0.066615, val_acc: 22.613945
+Epoch [71], train_loss: 0.074302, val_loss: 0.066275, val_acc: 22.837629
+Epoch [72], train_loss: 0.073772, val_loss: 0.066574, val_acc: 22.969719
+Epoch [73], train_loss: 0.073582, val_loss: 0.065626, val_acc: 23.585529
+Epoch [74], train_loss: 0.072997, val_loss: 0.065882, val_acc: 23.361164
+Epoch [75], train_loss: 0.073319, val_loss: 0.065970, val_acc: 23.650684
+Epoch [76], train_loss: 0.073094, val_loss: 0.066055, val_acc: 23.608261
+Epoch [77], train_loss: 0.072766, val_loss: 0.065533, val_acc: 23.958328
+Epoch [78], train_loss: 0.072602, val_loss: 0.065719, val_acc: 23.883707
+Epoch [79], train_loss: 0.071757, val_loss: 0.066037, val_acc: 24.027109
+Epoch [80], train_loss: 0.072094, val_loss: 0.065671, val_acc: 24.088985
+Epoch [81], train_loss: 0.071968, val_loss: 0.065496, val_acc: 24.348417
+Epoch [82], train_loss: 0.071580, val_loss: 0.064975, val_acc: 24.440689
+Epoch [83], train_loss: 0.071420, val_loss: 0.065692, val_acc: 24.223001
+Epoch [84], train_loss: 0.071460, val_loss: 0.064955, val_acc: 24.614624
+Epoch [85], train_loss: 0.071565, val_loss: 0.066824, val_acc: 24.313698
+Epoch [86], train_loss: 0.071117, val_loss: 0.065339, val_acc: 24.473072
+Epoch [87], train_loss: 0.071033, val_loss: 0.065937, val_acc: 24.378109
+Epoch [88], train_loss: 0.071190, val_loss: 0.064750, val_acc: 24.754419
+Epoch [89], train_loss: 0.071269, val_loss: 0.065611, val_acc: 24.617151
+Epoch [90], train_loss: 0.070745, val_loss: 0.065717, val_acc: 24.848305
+Epoch [91], train_loss: 0.070540, val_loss: 0.065011, val_acc: 25.024311
+Epoch [92], train_loss: 0.070608, val_loss: 0.065445, val_acc: 24.866245
+Epoch [93], train_loss: 0.070017, val_loss: 0.065511, val_acc: 24.757442
+Epoch [94], train_loss: 0.070223, val_loss: 0.065269, val_acc: 25.013844
+Epoch [95], train_loss: 0.070883, val_loss: 0.065620, val_acc: 24.999701
+Epoch [96], train_loss: 0.070381, val_loss: 0.066136, val_acc: 24.933899
+Epoch [97], train_loss: 0.070260, val_loss: 0.065622, val_acc: 25.007727
+Epoch [98], train_loss: 0.069840, val_loss: 0.065227, val_acc: 25.110502
+Epoch [99], train_loss: 0.070176, val_loss: 0.065405, val_acc: 24.986322
+Epoch [100], train_loss: 0.070223, val_loss: 0.065424, val_acc: 25.037689
+Epoch [101], train_loss: 0.069760, val_loss: 0.065513, val_acc: 25.107347
+Epoch [102], train_loss: 0.069678, val_loss: 0.064927, val_acc: 25.114353
+Epoch [103], train_loss: 0.069779, val_loss: 0.065444, val_acc: 24.982958
+Epoch [104], train_loss: 0.069904, val_loss: 0.065167, val_acc: 25.235592
+Epoch [105], train_loss: 0.069581, val_loss: 0.065430, val_acc: 25.002729
+Epoch [106], train_loss: 0.069462, val_loss: 0.065056, val_acc: 25.072231
+Epoch [107], train_loss: 0.069978, val_loss: 0.065441, val_acc: 25.211206
+Epoch [108], train_loss: 0.069933, val_loss: 0.065074, val_acc: 25.153706
+Epoch [109], train_loss: 0.069688, val_loss: 0.065701, val_acc: 25.074369
+Epoch [110], train_loss: 0.069317, val_loss: 0.064691, val_acc: 25.281208
+Epoch [111], train_loss: 0.069568, val_loss: 0.065414, val_acc: 25.222319
+Epoch [112], train_loss: 0.069415, val_loss: 0.064758, val_acc: 25.309275
+Epoch [113], train_loss: 0.068835, val_loss: 0.064812, val_acc: 25.254728
+Epoch [114], train_loss: 0.068752, val_loss: 0.064541, val_acc: 25.406357
+Epoch [115], train_loss: 0.069192, val_loss: 0.065206, val_acc: 25.223680
+Epoch [116], train_loss: 0.068925, val_loss: 0.065075, val_acc: 25.189890
+Epoch [117], train_loss: 0.068708, val_loss: 0.064735, val_acc: 25.363987
+Epoch [118], train_loss: 0.069036, val_loss: 0.065129, val_acc: 25.268377
+Epoch [119], train_loss: 0.069090, val_loss: 0.064875, val_acc: 25.226374
+Epoch [120], train_loss: 0.069389, val_loss: 0.065104, val_acc: 25.183491
+Epoch [121], train_loss: 0.068434, val_loss: 0.064664, val_acc: 25.404833
+Epoch [122], train_loss: 0.069510, val_loss: 0.065150, val_acc: 25.172905
+Epoch [123], train_loss: 0.068115, val_loss: 0.064816, val_acc: 25.424482
+Epoch [124], train_loss: 0.068578, val_loss: 0.064679, val_acc: 25.419846
+Epoch [125], train_loss: 0.068929, val_loss: 0.064099, val_acc: 25.502287
+Epoch [126], train_loss: 0.068887, val_loss: 0.064255, val_acc: 25.444502
+Epoch [127], train_loss: 0.068297, val_loss: 0.063999, val_acc: 25.442703
+Epoch [128], train_loss: 0.068143, val_loss: 0.064383, val_acc: 25.353046
+Epoch [129], train_loss: 0.068705, val_loss: 0.064843, val_acc: 25.288448
+Epoch [130], train_loss: 0.068502, val_loss: 0.064606, val_acc: 25.401680
+Epoch [131], train_loss: 0.068081, val_loss: 0.064606, val_acc: 25.382147
+Epoch [132], train_loss: 0.068256, val_loss: 0.064917, val_acc: 25.369390
+Epoch [133], train_loss: 0.068114, val_loss: 0.064732, val_acc: 25.361006
+Epoch [134], train_loss: 0.067967, val_loss: 0.064697, val_acc: 25.317535
+Epoch [135], train_loss: 0.068291, val_loss: 0.064175, val_acc: 25.388960
+Epoch [136], train_loss: 0.068107, val_loss: 0.063800, val_acc: 25.490244
+Epoch [137], train_loss: 0.068146, val_loss: 0.064294, val_acc: 25.465004
+Epoch [138], train_loss: 0.068074, val_loss: 0.064418, val_acc: 25.505686
+Epoch [139], train_loss: 0.068055, val_loss: 0.064524, val_acc: 25.434137
+Epoch [140], train_loss: 0.068176, val_loss: 0.064087, val_acc: 25.554359
+Epoch [141], train_loss: 0.068099, val_loss: 0.064099, val_acc: 25.509546
+Epoch [142], train_loss: 0.067927, val_loss: 0.063968, val_acc: 25.367878
+Epoch [143], train_loss: 0.067816, val_loss: 0.063985, val_acc: 25.585520
+Epoch [144], train_loss: 0.067932, val_loss: 0.063743, val_acc: 25.565334
+Epoch [145], train_loss: 0.067973, val_loss: 0.063777, val_acc: 25.437910
+Epoch [146], train_loss: 0.067067, val_loss: 0.063448, val_acc: 25.552097
+Epoch [147], train_loss: 0.068291, val_loss: 0.064000, val_acc: 25.452480
+Epoch [148], train_loss: 0.067968, val_loss: 0.064678, val_acc: 25.323200
+Epoch [149], train_loss: 0.067695, val_loss: 0.063768, val_acc: 25.485405
+Epoch [150], train_loss: 0.067345, val_loss: 0.063385, val_acc: 25.511879
+Epoch [151], train_loss: 0.067403, val_loss: 0.063420, val_acc: 25.621832
+Epoch [152], train_loss: 0.067529, val_loss: 0.063863, val_acc: 25.604113
+Epoch [153], train_loss: 0.067694, val_loss: 0.063918, val_acc: 25.540131
+Epoch [154], train_loss: 0.067366, val_loss: 0.063540, val_acc: 25.583210
+Epoch [155], train_loss: 0.067298, val_loss: 0.063704, val_acc: 25.601765
+Epoch [156], train_loss: 0.067540, val_loss: 0.063696, val_acc: 25.492268
+Epoch [157], train_loss: 0.067282, val_loss: 0.063804, val_acc: 25.437174
+Epoch [158], train_loss: 0.067153, val_loss: 0.063092, val_acc: 25.512878
+Epoch [159], train_loss: 0.067343, val_loss: 0.063940, val_acc: 25.671282
+Epoch [160], train_loss: 0.067430, val_loss: 0.063729, val_acc: 25.624382
+Epoch [161], train_loss: 0.067225, val_loss: 0.063919, val_acc: 25.575089
+Epoch [162], train_loss: 0.067396, val_loss: 0.063143, val_acc: 25.582422
+Epoch [163], train_loss: 0.067408, val_loss: 0.063991, val_acc: 25.467690
+Epoch [164], train_loss: 0.066958, val_loss: 0.062727, val_acc: 25.687160
+Epoch [165], train_loss: 0.066939, val_loss: 0.063421, val_acc: 25.628954
+Epoch [166], train_loss: 0.067123, val_loss: 0.063465, val_acc: 25.600430
+Epoch [167], train_loss: 0.067100, val_loss: 0.063612, val_acc: 25.643641
+Epoch [168], train_loss: 0.067123, val_loss: 0.063314, val_acc: 25.691729
+Epoch [169], train_loss: 0.066875, val_loss: 0.063310, val_acc: 25.617189
+Epoch [170], train_loss: 0.067034, val_loss: 0.063394, val_acc: 25.596483
+Epoch [171], train_loss: 0.066988, val_loss: 0.063278, val_acc: 25.624846
+Epoch [172], train_loss: 0.067033, val_loss: 0.062953, val_acc: 25.607973
+Epoch [173], train_loss: 0.066712, val_loss: 0.063813, val_acc: 25.616770
+Epoch [174], train_loss: 0.066852, val_loss: 0.063104, val_acc: 25.591417
+Epoch [175], train_loss: 0.066932, val_loss: 0.063592, val_acc: 25.630791
+Epoch [176], train_loss: 0.066741, val_loss: 0.063458, val_acc: 25.628233
+Epoch [177], train_loss: 0.067048, val_loss: 0.063651, val_acc: 25.713282
+Epoch [178], train_loss: 0.066933, val_loss: 0.063314, val_acc: 25.657255
+Epoch [179], train_loss: 0.066489, val_loss: 0.062297, val_acc: 25.752775
+Epoch [180], train_loss: 0.066544, val_loss: 0.063330, val_acc: 25.736423
+Epoch [181], train_loss: 0.066641, val_loss: 0.062822, val_acc: 25.609354
+Epoch [182], train_loss: 0.066571, val_loss: 0.062753, val_acc: 25.705515
+Epoch [183], train_loss: 0.066877, val_loss: 0.062869, val_acc: 25.682652
+Epoch [184], train_loss: 0.066417, val_loss: 0.063120, val_acc: 25.734293
+Epoch [185], train_loss: 0.066620, val_loss: 0.063086, val_acc: 25.689774
+Epoch [186], train_loss: 0.066678, val_loss: 0.063452, val_acc: 25.709080
+Epoch [187], train_loss: 0.066301, val_loss: 0.063087, val_acc: 25.638849
+Epoch [188], train_loss: 0.066336, val_loss: 0.062714, val_acc: 25.731310
+Epoch [189], train_loss: 0.066309, val_loss: 0.062837, val_acc: 25.761599
+Epoch [190], train_loss: 0.066594, val_loss: 0.062685, val_acc: 25.656879
+Epoch [191], train_loss: 0.066317, val_loss: 0.062754, val_acc: 25.766451
+Epoch [192], train_loss: 0.066547, val_loss: 0.062388, val_acc: 25.751625
+Epoch [193], train_loss: 0.066164, val_loss: 0.062372, val_acc: 25.764336
+Epoch [194], train_loss: 0.066453, val_loss: 0.063042, val_acc: 25.725258
+Epoch [195], train_loss: 0.066663, val_loss: 0.062859, val_acc: 25.750107
+Epoch [196], train_loss: 0.066212, val_loss: 0.062879, val_acc: 25.733913
+Epoch [197], train_loss: 0.066236, val_loss: 0.063044, val_acc: 25.693302
+Epoch [198], train_loss: 0.066521, val_loss: 0.063279, val_acc: 25.631691
+Epoch [199], train_loss: 0.066226, val_loss: 0.062632, val_acc: 25.726093
+python3 ./UNet_V16.py  1486.49s user 1405.16s system 99% cpu 48:13.34 total
diff --git a/UNet/Sim_logs/UNet_V17_25655858.log b/UNet/Sim_logs/UNet_V17_25655858.log
new file mode 100644
index 0000000000000000000000000000000000000000..8839e4123744fd83971cb9be0ab0caf1af7fedfc
--- /dev/null
+++ b/UNet/Sim_logs/UNet_V17_25655858.log
@@ -0,0 +1,720 @@
+(OK) Loading cuda 10.2.89
+(OK) Loading python 3.7.11
+(!!) The SciPy Stack is available: http://www.scipy.org/stackspec.html
+ Built with GCC compilers.
+Collecting torch==1.10.1
+  Using cached torch-1.10.1-cp37-cp37m-manylinux1_x86_64.whl (881.9 MB)
+Collecting typing-extensions
+  Using cached typing_extensions-4.1.1-py3-none-any.whl (26 kB)
+Installing collected packages: typing-extensions, torch
+  WARNING: The scripts convert-caffe2-to-onnx, convert-onnx-to-caffe2 and torchrun are installed in '/home/yk138599/.local/bin' which is not on PATH.
+  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
+Successfully installed torch-1.10.1 typing-extensions-4.1.1
+WARNING: You are using pip version 21.2.4; however, version 22.0.3 is available.
+You should consider upgrading via the '/usr/local_rwth/sw/python/3.7.11/x86_64/bin/python3.7 -m pip install --upgrade pip' command.
+number auf epochs: 700
+batchsize: 32
+learning rate: 3e-05
+kernel size is: 7
+ seed is: 373686838
+Epoch [0], train_loss: 0.225521, val_loss: 0.200477, val_acc: 4.888072
+Epoch [1], train_loss: 0.223437, val_loss: 0.212603, val_acc: 3.751951
+Epoch [2], train_loss: 0.221676, val_loss: 0.216037, val_acc: 3.269665
+Epoch [3], train_loss: 0.220573, val_loss: 0.216764, val_acc: 3.025600
+Epoch [4], train_loss: 0.219644, val_loss: 0.219904, val_acc: 2.834784
+Epoch [5], train_loss: 0.218642, val_loss: 0.216500, val_acc: 2.784642
+Epoch [6], train_loss: 0.218039, val_loss: 0.220258, val_acc: 2.612371
+Epoch [7], train_loss: 0.217145, val_loss: 0.221617, val_acc: 2.504683
+Epoch [8], train_loss: 0.216369, val_loss: 0.216452, val_acc: 2.604869
+Epoch [9], train_loss: 0.215530, val_loss: 0.214720, val_acc: 2.600123
+Epoch [10], train_loss: 0.215003, val_loss: 0.218270, val_acc: 2.448672
+Epoch [11], train_loss: 0.214278, val_loss: 0.216819, val_acc: 2.413450
+Epoch [12], train_loss: 0.213464, val_loss: 0.210153, val_acc: 2.595991
+Epoch [13], train_loss: 0.213043, val_loss: 0.219902, val_acc: 2.210801
+Epoch [14], train_loss: 0.212122, val_loss: 0.217774, val_acc: 2.231015
+Epoch [15], train_loss: 0.211479, val_loss: 0.212241, val_acc: 2.401925
+Epoch [16], train_loss: 0.210914, val_loss: 0.211274, val_acc: 2.384103
+Epoch [17], train_loss: 0.210247, val_loss: 0.214867, val_acc: 2.172542
+Epoch [18], train_loss: 0.209444, val_loss: 0.207535, val_acc: 2.373839
+Epoch [19], train_loss: 0.208590, val_loss: 0.209135, val_acc: 2.259588
+Epoch [20], train_loss: 0.207839, val_loss: 0.209158, val_acc: 2.230937
+Epoch [21], train_loss: 0.207303, val_loss: 0.207009, val_acc: 2.253109
+Epoch [22], train_loss: 0.206551, val_loss: 0.212044, val_acc: 2.063432
+Epoch [23], train_loss: 0.205845, val_loss: 0.207884, val_acc: 2.135679
+Epoch [24], train_loss: 0.204992, val_loss: 0.208809, val_acc: 2.055373
+Epoch [25], train_loss: 0.204498, val_loss: 0.208098, val_acc: 2.085763
+Epoch [26], train_loss: 0.203687, val_loss: 0.204685, val_acc: 2.127319
+Epoch [27], train_loss: 0.203231, val_loss: 0.208503, val_acc: 1.965854
+Epoch [28], train_loss: 0.202283, val_loss: 0.203037, val_acc: 2.105308
+Epoch [29], train_loss: 0.201684, val_loss: 0.200806, val_acc: 2.149603
+Epoch [30], train_loss: 0.200802, val_loss: 0.199929, val_acc: 2.112038
+Epoch [31], train_loss: 0.200237, val_loss: 0.202042, val_acc: 1.971708
+Epoch [32], train_loss: 0.199749, val_loss: 0.203477, val_acc: 1.957753
+Epoch [33], train_loss: 0.198904, val_loss: 0.203583, val_acc: 1.912094
+Epoch [34], train_loss: 0.198237, val_loss: 0.200761, val_acc: 1.965483
+Epoch [35], train_loss: 0.197921, val_loss: 0.199030, val_acc: 2.022635
+Epoch [36], train_loss: 0.197005, val_loss: 0.200322, val_acc: 1.930785
+Epoch [37], train_loss: 0.196380, val_loss: 0.199688, val_acc: 1.908981
+Epoch [38], train_loss: 0.195934, val_loss: 0.199648, val_acc: 1.859464
+Epoch [39], train_loss: 0.195092, val_loss: 0.192257, val_acc: 2.110854
+Epoch [40], train_loss: 0.194285, val_loss: 0.197222, val_acc: 1.901038
+Epoch [41], train_loss: 0.193439, val_loss: 0.193753, val_acc: 1.988905
+Epoch [42], train_loss: 0.192813, val_loss: 0.192422, val_acc: 2.006047
+Epoch [43], train_loss: 0.192063, val_loss: 0.196039, val_acc: 1.844386
+Epoch [44], train_loss: 0.191772, val_loss: 0.195996, val_acc: 1.845894
+Epoch [45], train_loss: 0.190709, val_loss: 0.188302, val_acc: 2.079348
+Epoch [46], train_loss: 0.189601, val_loss: 0.187847, val_acc: 2.010082
+Epoch [47], train_loss: 0.189493, val_loss: 0.189414, val_acc: 1.993049
+Epoch [48], train_loss: 0.188764, val_loss: 0.188920, val_acc: 2.003197
+Epoch [49], train_loss: 0.187950, val_loss: 0.190054, val_acc: 1.898002
+Epoch [50], train_loss: 0.187607, val_loss: 0.187015, val_acc: 1.995214
+Epoch [51], train_loss: 0.186898, val_loss: 0.191318, val_acc: 1.803796
+Epoch [52], train_loss: 0.185720, val_loss: 0.183704, val_acc: 2.053640
+Epoch [53], train_loss: 0.185539, val_loss: 0.185741, val_acc: 1.949381
+Epoch [54], train_loss: 0.184571, val_loss: 0.184574, val_acc: 1.948169
+Epoch [55], train_loss: 0.183670, val_loss: 0.181589, val_acc: 2.069604
+Epoch [56], train_loss: 0.183501, val_loss: 0.182628, val_acc: 2.022862
+Epoch [57], train_loss: 0.183036, val_loss: 0.183164, val_acc: 1.976804
+Epoch [58], train_loss: 0.182050, val_loss: 0.182462, val_acc: 1.970344
+Epoch [59], train_loss: 0.181497, val_loss: 0.176426, val_acc: 2.251236
+Epoch [60], train_loss: 0.180410, val_loss: 0.180928, val_acc: 2.020009
+Epoch [61], train_loss: 0.180090, val_loss: 0.181890, val_acc: 1.942317
+Epoch [62], train_loss: 0.179403, val_loss: 0.178498, val_acc: 2.103807
+Epoch [63], train_loss: 0.178963, val_loss: 0.178221, val_acc: 2.100923
+Epoch [64], train_loss: 0.177697, val_loss: 0.173459, val_acc: 2.409476
+Epoch [65], train_loss: 0.177235, val_loss: 0.173084, val_acc: 2.405458
+Epoch [66], train_loss: 0.176889, val_loss: 0.174664, val_acc: 2.319426
+Epoch [67], train_loss: 0.176452, val_loss: 0.179245, val_acc: 2.006182
+Epoch [68], train_loss: 0.175181, val_loss: 0.175894, val_acc: 2.215741
+Epoch [69], train_loss: 0.174318, val_loss: 0.173317, val_acc: 2.417897
+Epoch [70], train_loss: 0.174044, val_loss: 0.173060, val_acc: 2.441489
+Epoch [71], train_loss: 0.173476, val_loss: 0.178449, val_acc: 2.054877
+Epoch [72], train_loss: 0.172940, val_loss: 0.173776, val_acc: 2.403948
+Epoch [73], train_loss: 0.171794, val_loss: 0.171679, val_acc: 2.613915
+Epoch [74], train_loss: 0.171058, val_loss: 0.174150, val_acc: 2.452019
+Epoch [75], train_loss: 0.170533, val_loss: 0.171868, val_acc: 2.627979
+Epoch [76], train_loss: 0.169479, val_loss: 0.162778, val_acc: 3.637827
+Epoch [77], train_loss: 0.169241, val_loss: 0.170368, val_acc: 2.836561
+Epoch [78], train_loss: 0.168585, val_loss: 0.166053, val_acc: 3.225826
+Epoch [79], train_loss: 0.167631, val_loss: 0.167515, val_acc: 3.166553
+Epoch [80], train_loss: 0.167185, val_loss: 0.163044, val_acc: 3.667021
+Epoch [81], train_loss: 0.166521, val_loss: 0.166188, val_acc: 3.424889
+Epoch [82], train_loss: 0.165326, val_loss: 0.161534, val_acc: 4.113596
+Epoch [83], train_loss: 0.164786, val_loss: 0.163717, val_acc: 3.757610
+Epoch [84], train_loss: 0.164258, val_loss: 0.162451, val_acc: 3.905890
+Epoch [85], train_loss: 0.163815, val_loss: 0.159850, val_acc: 4.328480
+Epoch [86], train_loss: 0.162843, val_loss: 0.161515, val_acc: 4.226298
+Epoch [87], train_loss: 0.162450, val_loss: 0.162682, val_acc: 4.104952
+Epoch [88], train_loss: 0.162045, val_loss: 0.165397, val_acc: 3.897516
+Epoch [89], train_loss: 0.160993, val_loss: 0.163109, val_acc: 4.163593
+Epoch [90], train_loss: 0.160066, val_loss: 0.161320, val_acc: 4.436359
+Epoch [91], train_loss: 0.159892, val_loss: 0.156152, val_acc: 5.238584
+Epoch [92], train_loss: 0.158465, val_loss: 0.152244, val_acc: 5.791491
+Epoch [93], train_loss: 0.158460, val_loss: 0.156020, val_acc: 5.265517
+Epoch [94], train_loss: 0.157796, val_loss: 0.153379, val_acc: 5.780321
+Epoch [95], train_loss: 0.157271, val_loss: 0.156148, val_acc: 5.500933
+Epoch [96], train_loss: 0.156256, val_loss: 0.155614, val_acc: 5.586423
+Epoch [97], train_loss: 0.155665, val_loss: 0.153655, val_acc: 5.918782
+Epoch [98], train_loss: 0.154542, val_loss: 0.147252, val_acc: 6.802880
+Epoch [99], train_loss: 0.154238, val_loss: 0.150119, val_acc: 6.538148
+Epoch [100], train_loss: 0.153681, val_loss: 0.154423, val_acc: 6.096446
+Epoch [101], train_loss: 0.153417, val_loss: 0.151217, val_acc: 6.523224
+Epoch [102], train_loss: 0.152812, val_loss: 0.151885, val_acc: 6.316592
+Epoch [103], train_loss: 0.151498, val_loss: 0.147914, val_acc: 7.193071
+Epoch [104], train_loss: 0.151063, val_loss: 0.150618, val_acc: 6.842686
+Epoch [105], train_loss: 0.151157, val_loss: 0.151850, val_acc: 6.607367
+Epoch [106], train_loss: 0.150052, val_loss: 0.147296, val_acc: 7.369088
+Epoch [107], train_loss: 0.149502, val_loss: 0.149026, val_acc: 7.226380
+Epoch [108], train_loss: 0.148539, val_loss: 0.144379, val_acc: 7.964418
+Epoch [109], train_loss: 0.147944, val_loss: 0.147380, val_acc: 7.551367
+Epoch [110], train_loss: 0.147486, val_loss: 0.144187, val_acc: 8.050684
+Epoch [111], train_loss: 0.146470, val_loss: 0.145695, val_acc: 7.988792
+Epoch [112], train_loss: 0.145735, val_loss: 0.142288, val_acc: 8.519160
+Epoch [113], train_loss: 0.145768, val_loss: 0.140672, val_acc: 8.745532
+Epoch [114], train_loss: 0.145100, val_loss: 0.143965, val_acc: 8.269713
+Epoch [115], train_loss: 0.144308, val_loss: 0.140864, val_acc: 8.790869
+Epoch [116], train_loss: 0.143532, val_loss: 0.137265, val_acc: 9.177047
+Epoch [117], train_loss: 0.143284, val_loss: 0.141854, val_acc: 8.776222
+Epoch [118], train_loss: 0.142530, val_loss: 0.141324, val_acc: 8.926067
+Epoch [119], train_loss: 0.141904, val_loss: 0.140133, val_acc: 9.100611
+Epoch [120], train_loss: 0.141310, val_loss: 0.141725, val_acc: 8.985974
+Epoch [121], train_loss: 0.140627, val_loss: 0.132521, val_acc: 10.135745
+Epoch [122], train_loss: 0.140012, val_loss: 0.135146, val_acc: 9.781301
+Epoch [123], train_loss: 0.139661, val_loss: 0.135700, val_acc: 9.807665
+Epoch [124], train_loss: 0.138585, val_loss: 0.137136, val_acc: 9.886886
+Epoch [125], train_loss: 0.138363, val_loss: 0.134100, val_acc: 10.182715
+Epoch [126], train_loss: 0.137922, val_loss: 0.135531, val_acc: 10.033261
+Epoch [127], train_loss: 0.137016, val_loss: 0.131368, val_acc: 10.597752
+Epoch [128], train_loss: 0.136381, val_loss: 0.133935, val_acc: 10.372334
+Epoch [129], train_loss: 0.136135, val_loss: 0.131750, val_acc: 10.586087
+Epoch [130], train_loss: 0.135502, val_loss: 0.135654, val_acc: 10.307796
+Epoch [131], train_loss: 0.134683, val_loss: 0.130956, val_acc: 10.816577
+Epoch [132], train_loss: 0.134997, val_loss: 0.133410, val_acc: 10.495415
+Epoch [133], train_loss: 0.133752, val_loss: 0.131398, val_acc: 10.742564
+Epoch [134], train_loss: 0.133456, val_loss: 0.133012, val_acc: 10.619922
+Epoch [135], train_loss: 0.133211, val_loss: 0.130391, val_acc: 10.927408
+Epoch [136], train_loss: 0.131687, val_loss: 0.128874, val_acc: 11.184031
+Epoch [137], train_loss: 0.131366, val_loss: 0.131545, val_acc: 10.900283
+Epoch [138], train_loss: 0.130764, val_loss: 0.127685, val_acc: 11.313000
+Epoch [139], train_loss: 0.130742, val_loss: 0.131472, val_acc: 10.977825
+Epoch [140], train_loss: 0.128705, val_loss: 0.125636, val_acc: 11.542446
+Epoch [141], train_loss: 0.129333, val_loss: 0.130201, val_acc: 11.186658
+Epoch [142], train_loss: 0.128703, val_loss: 0.120777, val_acc: 11.951021
+Epoch [143], train_loss: 0.128413, val_loss: 0.125293, val_acc: 11.643603
+Epoch [144], train_loss: 0.127017, val_loss: 0.122922, val_acc: 11.888933
+Epoch [145], train_loss: 0.127071, val_loss: 0.121679, val_acc: 11.946896
+Epoch [146], train_loss: 0.126299, val_loss: 0.116835, val_acc: 12.316205
+Epoch [147], train_loss: 0.126205, val_loss: 0.122429, val_acc: 11.947044
+Epoch [148], train_loss: 0.125822, val_loss: 0.126783, val_acc: 11.623916
+Epoch [149], train_loss: 0.125377, val_loss: 0.124064, val_acc: 11.818813
+Epoch [150], train_loss: 0.124973, val_loss: 0.124129, val_acc: 11.851917
+Epoch [151], train_loss: 0.124109, val_loss: 0.123868, val_acc: 11.905753
+Epoch [152], train_loss: 0.123394, val_loss: 0.122260, val_acc: 12.033355
+Epoch [153], train_loss: 0.122888, val_loss: 0.120340, val_acc: 12.177044
+Epoch [154], train_loss: 0.122067, val_loss: 0.118461, val_acc: 12.285703
+Epoch [155], train_loss: 0.121631, val_loss: 0.115779, val_acc: 12.450863
+Epoch [156], train_loss: 0.121688, val_loss: 0.122534, val_acc: 12.041314
+Epoch [157], train_loss: 0.121518, val_loss: 0.119965, val_acc: 12.216788
+Epoch [158], train_loss: 0.120548, val_loss: 0.119485, val_acc: 12.253298
+Epoch [159], train_loss: 0.119763, val_loss: 0.116346, val_acc: 12.440938
+Epoch [160], train_loss: 0.119031, val_loss: 0.113353, val_acc: 12.622397
+Epoch [161], train_loss: 0.119153, val_loss: 0.115852, val_acc: 12.473422
+Epoch [162], train_loss: 0.118980, val_loss: 0.114015, val_acc: 12.579429
+Epoch [163], train_loss: 0.117501, val_loss: 0.113918, val_acc: 12.598813
+Epoch [164], train_loss: 0.117555, val_loss: 0.114576, val_acc: 12.571602
+Epoch [165], train_loss: 0.116716, val_loss: 0.112354, val_acc: 12.686863
+Epoch [166], train_loss: 0.116522, val_loss: 0.111913, val_acc: 12.732050
+Epoch [167], train_loss: 0.115713, val_loss: 0.113874, val_acc: 12.619524
+Epoch [168], train_loss: 0.115294, val_loss: 0.114549, val_acc: 12.598673
+Epoch [169], train_loss: 0.115488, val_loss: 0.108887, val_acc: 12.875306
+Epoch [170], train_loss: 0.115510, val_loss: 0.112050, val_acc: 12.714781
+Epoch [171], train_loss: 0.114274, val_loss: 0.108512, val_acc: 12.897728
+Epoch [172], train_loss: 0.113924, val_loss: 0.112165, val_acc: 12.714762
+Epoch [173], train_loss: 0.113229, val_loss: 0.107493, val_acc: 12.980722
+Epoch [174], train_loss: 0.113190, val_loss: 0.114251, val_acc: 12.643078
+Epoch [175], train_loss: 0.112213, val_loss: 0.109611, val_acc: 12.877857
+Epoch [176], train_loss: 0.111712, val_loss: 0.110335, val_acc: 12.843200
+Epoch [177], train_loss: 0.111917, val_loss: 0.111173, val_acc: 12.793277
+Epoch [178], train_loss: 0.111648, val_loss: 0.106399, val_acc: 13.061252
+Epoch [179], train_loss: 0.111139, val_loss: 0.109732, val_acc: 12.877392
+Epoch [180], train_loss: 0.110259, val_loss: 0.108615, val_acc: 12.951241
+Epoch [181], train_loss: 0.109542, val_loss: 0.107080, val_acc: 13.051385
+Epoch [182], train_loss: 0.109460, val_loss: 0.108189, val_acc: 12.991185
+Epoch [183], train_loss: 0.108984, val_loss: 0.106416, val_acc: 13.132536
+Epoch [184], train_loss: 0.108142, val_loss: 0.106298, val_acc: 13.131763
+Epoch [185], train_loss: 0.108212, val_loss: 0.105941, val_acc: 13.146985
+Epoch [186], train_loss: 0.108120, val_loss: 0.103066, val_acc: 13.334968
+Epoch [187], train_loss: 0.107920, val_loss: 0.103902, val_acc: 13.314610
+Epoch [188], train_loss: 0.106895, val_loss: 0.100835, val_acc: 13.518340
+Epoch [189], train_loss: 0.106600, val_loss: 0.107190, val_acc: 13.098133
+Epoch [190], train_loss: 0.105943, val_loss: 0.102746, val_acc: 13.390917
+Epoch [191], train_loss: 0.105103, val_loss: 0.105061, val_acc: 13.305601
+Epoch [192], train_loss: 0.105119, val_loss: 0.104230, val_acc: 13.358141
+Epoch [193], train_loss: 0.105331, val_loss: 0.103973, val_acc: 13.350530
+Epoch [194], train_loss: 0.104381, val_loss: 0.102944, val_acc: 13.431730
+Epoch [195], train_loss: 0.104179, val_loss: 0.099643, val_acc: 13.685327
+Epoch [196], train_loss: 0.104083, val_loss: 0.106469, val_acc: 13.221363
+Epoch [197], train_loss: 0.103320, val_loss: 0.102523, val_acc: 13.540021
+Epoch [198], train_loss: 0.102630, val_loss: 0.103219, val_acc: 13.558242
+Epoch [199], train_loss: 0.102809, val_loss: 0.104471, val_acc: 13.398151
+Epoch [200], train_loss: 0.102305, val_loss: 0.100552, val_acc: 13.729774
+Epoch [201], train_loss: 0.101473, val_loss: 0.100068, val_acc: 13.868263
+Epoch [202], train_loss: 0.101490, val_loss: 0.101447, val_acc: 13.678704
+Epoch [203], train_loss: 0.101638, val_loss: 0.102626, val_acc: 13.627562
+Epoch [204], train_loss: 0.100968, val_loss: 0.102584, val_acc: 13.594855
+Epoch [205], train_loss: 0.100455, val_loss: 0.100870, val_acc: 13.855977
+Epoch [206], train_loss: 0.100002, val_loss: 0.100003, val_acc: 13.902484
+Epoch [207], train_loss: 0.099239, val_loss: 0.095693, val_acc: 14.355294
+Epoch [208], train_loss: 0.099848, val_loss: 0.101714, val_acc: 13.840943
+Epoch [209], train_loss: 0.098578, val_loss: 0.102176, val_acc: 13.788995
+Epoch [210], train_loss: 0.098694, val_loss: 0.098604, val_acc: 14.149654
+Epoch [211], train_loss: 0.097349, val_loss: 0.098924, val_acc: 14.280993
+Epoch [212], train_loss: 0.097884, val_loss: 0.099260, val_acc: 14.135083
+Epoch [213], train_loss: 0.097660, val_loss: 0.096434, val_acc: 14.562165
+Epoch [214], train_loss: 0.097362, val_loss: 0.098613, val_acc: 14.289005
+Epoch [215], train_loss: 0.097417, val_loss: 0.099874, val_acc: 14.322172
+Epoch [216], train_loss: 0.097042, val_loss: 0.102028, val_acc: 13.938865
+Epoch [217], train_loss: 0.096212, val_loss: 0.100676, val_acc: 14.155949
+Epoch [218], train_loss: 0.095484, val_loss: 0.095439, val_acc: 14.743315
+Epoch [219], train_loss: 0.095789, val_loss: 0.098693, val_acc: 14.447923
+Epoch [220], train_loss: 0.095131, val_loss: 0.096241, val_acc: 14.712017
+Epoch [221], train_loss: 0.095276, val_loss: 0.094719, val_acc: 14.917500
+Epoch [222], train_loss: 0.094695, val_loss: 0.096323, val_acc: 14.744534
+Epoch [223], train_loss: 0.094084, val_loss: 0.094041, val_acc: 15.149668
+Epoch [224], train_loss: 0.093505, val_loss: 0.096626, val_acc: 14.820982
+Epoch [225], train_loss: 0.094246, val_loss: 0.095864, val_acc: 14.851582
+Epoch [226], train_loss: 0.092550, val_loss: 0.094130, val_acc: 15.274075
+Epoch [227], train_loss: 0.092968, val_loss: 0.095432, val_acc: 15.203457
+Epoch [228], train_loss: 0.092531, val_loss: 0.093024, val_acc: 15.414883
+Epoch [229], train_loss: 0.092402, val_loss: 0.096094, val_acc: 15.059305
+Epoch [230], train_loss: 0.091977, val_loss: 0.093956, val_acc: 15.447883
+Epoch [231], train_loss: 0.092282, val_loss: 0.095054, val_acc: 15.315081
+Epoch [232], train_loss: 0.091179, val_loss: 0.091454, val_acc: 15.747750
+Epoch [233], train_loss: 0.090675, val_loss: 0.094361, val_acc: 15.756608
+Epoch [234], train_loss: 0.090947, val_loss: 0.095411, val_acc: 15.518085
+Epoch [235], train_loss: 0.090493, val_loss: 0.093424, val_acc: 15.828175
+Epoch [236], train_loss: 0.090772, val_loss: 0.097338, val_acc: 15.216061
+Epoch [237], train_loss: 0.090088, val_loss: 0.092929, val_acc: 15.897971
+Epoch [238], train_loss: 0.089947, val_loss: 0.095261, val_acc: 15.667640
+Epoch [239], train_loss: 0.089216, val_loss: 0.093184, val_acc: 15.935421
+Epoch [240], train_loss: 0.089996, val_loss: 0.090074, val_acc: 16.350595
+Epoch [241], train_loss: 0.089256, val_loss: 0.092822, val_acc: 16.145176
+Epoch [242], train_loss: 0.088422, val_loss: 0.088942, val_acc: 16.669399
+Epoch [243], train_loss: 0.088515, val_loss: 0.096400, val_acc: 16.334715
+Epoch [244], train_loss: 0.088500, val_loss: 0.088868, val_acc: 16.484138
+Epoch [245], train_loss: 0.087992, val_loss: 0.091142, val_acc: 16.699986
+Epoch [246], train_loss: 0.087290, val_loss: 0.091868, val_acc: 16.728291
+Epoch [247], train_loss: 0.088021, val_loss: 0.093184, val_acc: 16.704596
+Epoch [248], train_loss: 0.087184, val_loss: 0.095372, val_acc: 16.562603
+Epoch [249], train_loss: 0.086391, val_loss: 0.090152, val_acc: 16.863943
+Epoch [250], train_loss: 0.086826, val_loss: 0.090776, val_acc: 17.232859
+Epoch [251], train_loss: 0.087188, val_loss: 0.088603, val_acc: 17.504694
+Epoch [252], train_loss: 0.086086, val_loss: 0.088973, val_acc: 17.624674
+Epoch [253], train_loss: 0.086872, val_loss: 0.090786, val_acc: 17.243994
+Epoch [254], train_loss: 0.086320, val_loss: 0.091760, val_acc: 17.463425
+Epoch [255], train_loss: 0.085822, val_loss: 0.088585, val_acc: 18.080057
+Epoch [256], train_loss: 0.086199, val_loss: 0.093042, val_acc: 17.269024
+Epoch [257], train_loss: 0.084955, val_loss: 0.085786, val_acc: 19.054554
+Epoch [258], train_loss: 0.084757, val_loss: 0.083849, val_acc: 18.762280
+Epoch [259], train_loss: 0.084803, val_loss: 0.087536, val_acc: 18.502058
+Epoch [260], train_loss: 0.084945, val_loss: 0.086727, val_acc: 18.074024
+Epoch [261], train_loss: 0.084606, val_loss: 0.086579, val_acc: 18.099579
+Epoch [262], train_loss: 0.083866, val_loss: 0.085451, val_acc: 18.604822
+Epoch [263], train_loss: 0.083266, val_loss: 0.083919, val_acc: 18.967142
+Epoch [264], train_loss: 0.083759, val_loss: 0.085106, val_acc: 18.983711
+Epoch [265], train_loss: 0.083785, val_loss: 0.085792, val_acc: 18.737696
+Epoch [266], train_loss: 0.083815, val_loss: 0.089189, val_acc: 18.712194
+Epoch [267], train_loss: 0.083525, val_loss: 0.086734, val_acc: 18.707359
+Epoch [268], train_loss: 0.083297, val_loss: 0.082766, val_acc: 19.255033
+Epoch [269], train_loss: 0.083014, val_loss: 0.083638, val_acc: 19.110022
+Epoch [270], train_loss: 0.082221, val_loss: 0.085026, val_acc: 19.229252
+Epoch [271], train_loss: 0.082102, val_loss: 0.086176, val_acc: 19.213478
+Epoch [272], train_loss: 0.082207, val_loss: 0.082825, val_acc: 19.920815
+Epoch [273], train_loss: 0.082071, val_loss: 0.084074, val_acc: 19.629463
+Epoch [274], train_loss: 0.081864, val_loss: 0.084171, val_acc: 19.546871
+Epoch [275], train_loss: 0.081854, val_loss: 0.084368, val_acc: 19.903879
+Epoch [276], train_loss: 0.081781, val_loss: 0.081870, val_acc: 20.218567
+Epoch [277], train_loss: 0.080908, val_loss: 0.079374, val_acc: 20.779495
+Epoch [278], train_loss: 0.081535, val_loss: 0.082000, val_acc: 20.494452
+Epoch [279], train_loss: 0.081019, val_loss: 0.082427, val_acc: 20.457493
+Epoch [280], train_loss: 0.081284, val_loss: 0.083263, val_acc: 20.260782
+Epoch [281], train_loss: 0.081374, val_loss: 0.081873, val_acc: 20.379150
+Epoch [282], train_loss: 0.080730, val_loss: 0.081622, val_acc: 20.324417
+Epoch [283], train_loss: 0.080986, val_loss: 0.082876, val_acc: 20.562754
+Epoch [284], train_loss: 0.080567, val_loss: 0.080701, val_acc: 20.725595
+Epoch [285], train_loss: 0.079967, val_loss: 0.080232, val_acc: 20.963263
+Epoch [286], train_loss: 0.080428, val_loss: 0.080660, val_acc: 21.264912
+Epoch [287], train_loss: 0.079508, val_loss: 0.078157, val_acc: 21.301420
+Epoch [288], train_loss: 0.079719, val_loss: 0.077887, val_acc: 21.246996
+Epoch [289], train_loss: 0.080088, val_loss: 0.079419, val_acc: 20.679556
+Epoch [290], train_loss: 0.079218, val_loss: 0.076994, val_acc: 21.542936
+Epoch [291], train_loss: 0.079526, val_loss: 0.076356, val_acc: 22.095728
+Epoch [292], train_loss: 0.079349, val_loss: 0.075007, val_acc: 21.814531
+Epoch [293], train_loss: 0.078963, val_loss: 0.073357, val_acc: 22.246147
+Epoch [294], train_loss: 0.079363, val_loss: 0.075666, val_acc: 22.104734
+Epoch [295], train_loss: 0.078546, val_loss: 0.072020, val_acc: 22.473478
+Epoch [296], train_loss: 0.079115, val_loss: 0.072522, val_acc: 22.343428
+Epoch [297], train_loss: 0.078842, val_loss: 0.073411, val_acc: 22.142817
+Epoch [298], train_loss: 0.078640, val_loss: 0.072111, val_acc: 22.642056
+Epoch [299], train_loss: 0.078745, val_loss: 0.071351, val_acc: 22.611338
+Epoch [300], train_loss: 0.077469, val_loss: 0.071391, val_acc: 23.257679
+Epoch [301], train_loss: 0.077894, val_loss: 0.071810, val_acc: 22.794500
+Epoch [302], train_loss: 0.078097, val_loss: 0.071907, val_acc: 22.899117
+Epoch [303], train_loss: 0.077950, val_loss: 0.071763, val_acc: 22.850365
+Epoch [304], train_loss: 0.078285, val_loss: 0.072697, val_acc: 22.783119
+Epoch [305], train_loss: 0.077943, val_loss: 0.071184, val_acc: 23.245777
+Epoch [306], train_loss: 0.077249, val_loss: 0.071407, val_acc: 23.100761
+Epoch [307], train_loss: 0.077674, val_loss: 0.071822, val_acc: 23.184509
+Epoch [308], train_loss: 0.077445, val_loss: 0.071279, val_acc: 23.158426
+Epoch [309], train_loss: 0.077963, val_loss: 0.071372, val_acc: 23.373583
+Epoch [310], train_loss: 0.077794, val_loss: 0.071320, val_acc: 23.346653
+Epoch [311], train_loss: 0.077380, val_loss: 0.070997, val_acc: 23.347015
+Epoch [312], train_loss: 0.077436, val_loss: 0.070727, val_acc: 23.638199
+Epoch [313], train_loss: 0.076850, val_loss: 0.070921, val_acc: 23.673035
+Epoch [314], train_loss: 0.077175, val_loss: 0.070903, val_acc: 23.503597
+Epoch [315], train_loss: 0.076916, val_loss: 0.070642, val_acc: 23.612350
+Epoch [316], train_loss: 0.077250, val_loss: 0.070465, val_acc: 23.999195
+Epoch [317], train_loss: 0.076930, val_loss: 0.071309, val_acc: 23.627354
+Epoch [318], train_loss: 0.077032, val_loss: 0.070931, val_acc: 23.763678
+Epoch [319], train_loss: 0.077192, val_loss: 0.070647, val_acc: 23.885544
+Epoch [320], train_loss: 0.077381, val_loss: 0.071194, val_acc: 23.958788
+Epoch [321], train_loss: 0.076792, val_loss: 0.071211, val_acc: 23.894426
+Epoch [322], train_loss: 0.076573, val_loss: 0.070681, val_acc: 24.229385
+Epoch [323], train_loss: 0.076114, val_loss: 0.070673, val_acc: 24.154079
+Epoch [324], train_loss: 0.077811, val_loss: 0.070284, val_acc: 23.940901
+Epoch [325], train_loss: 0.077451, val_loss: 0.070346, val_acc: 24.166716
+Epoch [326], train_loss: 0.076599, val_loss: 0.070785, val_acc: 24.275305
+Epoch [327], train_loss: 0.076792, val_loss: 0.071639, val_acc: 23.789049
+Epoch [328], train_loss: 0.076512, val_loss: 0.070211, val_acc: 24.421040
+Epoch [329], train_loss: 0.076367, val_loss: 0.070596, val_acc: 24.349596
+Epoch [330], train_loss: 0.076600, val_loss: 0.070568, val_acc: 24.171778
+Epoch [331], train_loss: 0.076445, val_loss: 0.069939, val_acc: 24.371338
+Epoch [332], train_loss: 0.076460, val_loss: 0.070825, val_acc: 24.195522
+Epoch [333], train_loss: 0.076461, val_loss: 0.070659, val_acc: 24.157898
+Epoch [334], train_loss: 0.076592, val_loss: 0.070626, val_acc: 24.275621
+Epoch [335], train_loss: 0.076725, val_loss: 0.071014, val_acc: 24.235291
+Epoch [336], train_loss: 0.075901, val_loss: 0.071201, val_acc: 24.229855
+Epoch [337], train_loss: 0.076553, val_loss: 0.070656, val_acc: 24.285795
+Epoch [338], train_loss: 0.075796, val_loss: 0.070846, val_acc: 24.405552
+Epoch [339], train_loss: 0.076086, val_loss: 0.070114, val_acc: 24.305590
+Epoch [340], train_loss: 0.076300, val_loss: 0.070924, val_acc: 24.202412
+Epoch [341], train_loss: 0.076395, val_loss: 0.071000, val_acc: 24.362644
+Epoch [342], train_loss: 0.075953, val_loss: 0.070552, val_acc: 24.492447
+Epoch [343], train_loss: 0.076550, val_loss: 0.069794, val_acc: 24.352814
+Epoch [344], train_loss: 0.076183, val_loss: 0.069851, val_acc: 24.637802
+Epoch [345], train_loss: 0.076731, val_loss: 0.070019, val_acc: 24.491779
+Epoch [346], train_loss: 0.075769, val_loss: 0.071203, val_acc: 24.418249
+Epoch [347], train_loss: 0.076908, val_loss: 0.069863, val_acc: 24.506372
+Epoch [348], train_loss: 0.076843, val_loss: 0.070526, val_acc: 24.400377
+Epoch [349], train_loss: 0.076256, val_loss: 0.070385, val_acc: 24.551140
+Epoch [350], train_loss: 0.076875, val_loss: 0.071945, val_acc: 24.121635
+Epoch [351], train_loss: 0.075900, val_loss: 0.070210, val_acc: 24.639420
+Epoch [352], train_loss: 0.076817, val_loss: 0.069980, val_acc: 24.498306
+Epoch [353], train_loss: 0.075689, val_loss: 0.070502, val_acc: 24.608931
+Epoch [354], train_loss: 0.075927, val_loss: 0.070882, val_acc: 24.521439
+Epoch [355], train_loss: 0.076493, val_loss: 0.071985, val_acc: 24.149048
+Epoch [356], train_loss: 0.076528, val_loss: 0.071591, val_acc: 24.267262
+Epoch [357], train_loss: 0.076092, val_loss: 0.070699, val_acc: 24.647509
+Epoch [358], train_loss: 0.077009, val_loss: 0.070816, val_acc: 24.318102
+Epoch [359], train_loss: 0.076190, val_loss: 0.070307, val_acc: 24.652073
+Epoch [360], train_loss: 0.075739, val_loss: 0.070277, val_acc: 24.686193
+Epoch [361], train_loss: 0.076121, val_loss: 0.071700, val_acc: 24.248198
+Epoch [362], train_loss: 0.076022, val_loss: 0.069841, val_acc: 24.661228
+Epoch [363], train_loss: 0.076420, val_loss: 0.070763, val_acc: 24.432346
+Epoch [364], train_loss: 0.075518, val_loss: 0.070006, val_acc: 24.774664
+Epoch [365], train_loss: 0.076572, val_loss: 0.070109, val_acc: 24.513390
+Epoch [366], train_loss: 0.075918, val_loss: 0.069809, val_acc: 24.523125
+Epoch [367], train_loss: 0.076111, val_loss: 0.070411, val_acc: 24.540627
+Epoch [368], train_loss: 0.076221, val_loss: 0.070471, val_acc: 24.594376
+Epoch [369], train_loss: 0.076994, val_loss: 0.070309, val_acc: 24.487837
+Epoch [370], train_loss: 0.076219, val_loss: 0.070056, val_acc: 24.653826
+Epoch [371], train_loss: 0.075976, val_loss: 0.070755, val_acc: 24.621819
+Epoch [372], train_loss: 0.075992, val_loss: 0.070668, val_acc: 24.492039
+Epoch [373], train_loss: 0.075819, val_loss: 0.070284, val_acc: 24.634964
+Epoch [374], train_loss: 0.075758, val_loss: 0.070988, val_acc: 24.467863
+Epoch [375], train_loss: 0.076888, val_loss: 0.071378, val_acc: 24.036341
+Epoch [376], train_loss: 0.075787, val_loss: 0.069171, val_acc: 24.856047
+Epoch [377], train_loss: 0.075944, val_loss: 0.071084, val_acc: 24.425425
+Epoch [378], train_loss: 0.076033, val_loss: 0.071073, val_acc: 24.418659
+Epoch [379], train_loss: 0.075786, val_loss: 0.070141, val_acc: 24.627804
+Epoch [380], train_loss: 0.076283, val_loss: 0.070269, val_acc: 24.683105
+Epoch [381], train_loss: 0.076576, val_loss: 0.070056, val_acc: 24.609512
+Epoch [382], train_loss: 0.076171, val_loss: 0.070844, val_acc: 24.416599
+Epoch [383], train_loss: 0.075550, val_loss: 0.069197, val_acc: 24.799341
+Epoch [384], train_loss: 0.076540, val_loss: 0.069586, val_acc: 24.687487
+Epoch [385], train_loss: 0.075773, val_loss: 0.070591, val_acc: 24.570108
+Epoch [386], train_loss: 0.075704, val_loss: 0.070582, val_acc: 24.575327
+Epoch [387], train_loss: 0.075778, val_loss: 0.069339, val_acc: 24.834801
+Epoch [388], train_loss: 0.076309, val_loss: 0.070982, val_acc: 24.342163
+Epoch [389], train_loss: 0.075955, val_loss: 0.070816, val_acc: 24.452393
+Epoch [390], train_loss: 0.076022, val_loss: 0.069590, val_acc: 24.614897
+Epoch [391], train_loss: 0.075872, val_loss: 0.070596, val_acc: 24.543787
+Epoch [392], train_loss: 0.075869, val_loss: 0.069139, val_acc: 24.860853
+Epoch [393], train_loss: 0.076229, val_loss: 0.070044, val_acc: 24.567900
+Epoch [394], train_loss: 0.074681, val_loss: 0.070291, val_acc: 24.658741
+Epoch [395], train_loss: 0.076294, val_loss: 0.070943, val_acc: 24.472326
+Epoch [396], train_loss: 0.075509, val_loss: 0.070833, val_acc: 24.443398
+Epoch [397], train_loss: 0.075259, val_loss: 0.069952, val_acc: 24.728539
+Epoch [398], train_loss: 0.075828, val_loss: 0.069431, val_acc: 24.794260
+Epoch [399], train_loss: 0.075480, val_loss: 0.069052, val_acc: 24.851952
+Epoch [400], train_loss: 0.075205, val_loss: 0.069296, val_acc: 24.907017
+Epoch [401], train_loss: 0.075738, val_loss: 0.069560, val_acc: 24.729822
+Epoch [402], train_loss: 0.075118, val_loss: 0.070161, val_acc: 24.701050
+Epoch [403], train_loss: 0.076127, val_loss: 0.070196, val_acc: 24.463913
+Epoch [404], train_loss: 0.075473, val_loss: 0.070310, val_acc: 24.572536
+Epoch [405], train_loss: 0.075703, val_loss: 0.069495, val_acc: 24.606087
+Epoch [406], train_loss: 0.075740, val_loss: 0.070584, val_acc: 24.566422
+Epoch [407], train_loss: 0.075880, val_loss: 0.069874, val_acc: 24.619169
+Epoch [408], train_loss: 0.076534, val_loss: 0.069611, val_acc: 24.679876
+Epoch [409], train_loss: 0.076260, val_loss: 0.069685, val_acc: 24.643616
+Epoch [410], train_loss: 0.076362, val_loss: 0.070039, val_acc: 24.547279
+Epoch [411], train_loss: 0.075981, val_loss: 0.069543, val_acc: 24.688694
+Epoch [412], train_loss: 0.075681, val_loss: 0.069544, val_acc: 24.704983
+Epoch [413], train_loss: 0.076230, val_loss: 0.070611, val_acc: 24.388157
+Epoch [414], train_loss: 0.075668, val_loss: 0.069763, val_acc: 24.702723
+Epoch [415], train_loss: 0.075534, val_loss: 0.069744, val_acc: 24.624735
+Epoch [416], train_loss: 0.075828, val_loss: 0.071714, val_acc: 24.226368
+Epoch [417], train_loss: 0.075831, val_loss: 0.070439, val_acc: 24.446308
+Epoch [418], train_loss: 0.076149, val_loss: 0.070533, val_acc: 24.448210
+Epoch [419], train_loss: 0.075708, val_loss: 0.070327, val_acc: 24.506060
+Epoch [420], train_loss: 0.075799, val_loss: 0.069293, val_acc: 24.794205
+Epoch [421], train_loss: 0.075730, val_loss: 0.069076, val_acc: 24.781359
+Epoch [422], train_loss: 0.075473, val_loss: 0.070758, val_acc: 24.528984
+Epoch [423], train_loss: 0.075295, val_loss: 0.069702, val_acc: 24.748390
+Epoch [424], train_loss: 0.075646, val_loss: 0.069860, val_acc: 24.727348
+Epoch [425], train_loss: 0.075397, val_loss: 0.070253, val_acc: 24.626167
+Epoch [426], train_loss: 0.075474, val_loss: 0.069833, val_acc: 24.547180
+Epoch [427], train_loss: 0.076276, val_loss: 0.069298, val_acc: 24.647209
+Epoch [428], train_loss: 0.074779, val_loss: 0.068806, val_acc: 24.915800
+Epoch [429], train_loss: 0.075058, val_loss: 0.069331, val_acc: 24.833012
+Epoch [430], train_loss: 0.075870, val_loss: 0.069760, val_acc: 24.604601
+Epoch [431], train_loss: 0.075903, val_loss: 0.070406, val_acc: 24.410114
+Epoch [432], train_loss: 0.075639, val_loss: 0.070535, val_acc: 24.391472
+Epoch [433], train_loss: 0.075156, val_loss: 0.069880, val_acc: 24.650904
+Epoch [434], train_loss: 0.075551, val_loss: 0.070164, val_acc: 24.482533
+Epoch [435], train_loss: 0.075335, val_loss: 0.069585, val_acc: 24.753885
+Epoch [436], train_loss: 0.075749, val_loss: 0.068875, val_acc: 24.874670
+Epoch [437], train_loss: 0.075607, val_loss: 0.069567, val_acc: 24.597744
+Epoch [438], train_loss: 0.075939, val_loss: 0.068797, val_acc: 24.953398
+Epoch [439], train_loss: 0.075799, val_loss: 0.069555, val_acc: 24.600672
+Epoch [440], train_loss: 0.075367, val_loss: 0.071246, val_acc: 24.335981
+Epoch [441], train_loss: 0.075713, val_loss: 0.069576, val_acc: 24.729164
+Epoch [442], train_loss: 0.075635, val_loss: 0.068757, val_acc: 24.750288
+Epoch [443], train_loss: 0.075772, val_loss: 0.069497, val_acc: 24.704136
+Epoch [444], train_loss: 0.074888, val_loss: 0.070202, val_acc: 24.535603
+Epoch [445], train_loss: 0.075142, val_loss: 0.069257, val_acc: 24.819935
+Epoch [446], train_loss: 0.075606, val_loss: 0.069292, val_acc: 24.825394
+Epoch [447], train_loss: 0.075658, val_loss: 0.068641, val_acc: 24.882851
+Epoch [448], train_loss: 0.075554, val_loss: 0.069729, val_acc: 24.653234
+Epoch [449], train_loss: 0.075352, val_loss: 0.069391, val_acc: 24.677191
+Epoch [450], train_loss: 0.075566, val_loss: 0.069983, val_acc: 24.601891
+Epoch [451], train_loss: 0.075196, val_loss: 0.069134, val_acc: 24.838860
+Epoch [452], train_loss: 0.075236, val_loss: 0.068846, val_acc: 24.895107
+Epoch [453], train_loss: 0.075443, val_loss: 0.070017, val_acc: 24.702642
+Epoch [454], train_loss: 0.075058, val_loss: 0.069538, val_acc: 24.794815
+Epoch [455], train_loss: 0.075199, val_loss: 0.069934, val_acc: 24.673288
+Epoch [456], train_loss: 0.074784, val_loss: 0.068960, val_acc: 24.902525
+Epoch [457], train_loss: 0.075582, val_loss: 0.069139, val_acc: 24.844568
+Epoch [458], train_loss: 0.075285, val_loss: 0.070254, val_acc: 24.638706
+Epoch [459], train_loss: 0.076327, val_loss: 0.069986, val_acc: 24.604424
+Epoch [460], train_loss: 0.075063, val_loss: 0.069422, val_acc: 24.716663
+Epoch [461], train_loss: 0.075672, val_loss: 0.069439, val_acc: 24.687065
+Epoch [462], train_loss: 0.075690, val_loss: 0.070258, val_acc: 24.517735
+Epoch [463], train_loss: 0.075633, val_loss: 0.069533, val_acc: 24.722071
+Epoch [464], train_loss: 0.075632, val_loss: 0.069226, val_acc: 24.661505
+Epoch [465], train_loss: 0.075929, val_loss: 0.069213, val_acc: 24.730049
+Epoch [466], train_loss: 0.075279, val_loss: 0.069667, val_acc: 24.662964
+Epoch [467], train_loss: 0.075123, val_loss: 0.069849, val_acc: 24.687363
+Epoch [468], train_loss: 0.075754, val_loss: 0.068497, val_acc: 24.886225
+Epoch [469], train_loss: 0.074969, val_loss: 0.069625, val_acc: 24.762264
+Epoch [470], train_loss: 0.075372, val_loss: 0.070367, val_acc: 24.650772
+Epoch [471], train_loss: 0.075647, val_loss: 0.069651, val_acc: 24.696733
+Epoch [472], train_loss: 0.074954, val_loss: 0.068321, val_acc: 24.982605
+Epoch [473], train_loss: 0.075921, val_loss: 0.069465, val_acc: 24.690783
+Epoch [474], train_loss: 0.075232, val_loss: 0.069537, val_acc: 24.728056
+Epoch [475], train_loss: 0.075665, val_loss: 0.070222, val_acc: 24.481684
+Epoch [476], train_loss: 0.075110, val_loss: 0.070096, val_acc: 24.682806
+Epoch [477], train_loss: 0.075165, val_loss: 0.069448, val_acc: 24.820549
+Epoch [478], train_loss: 0.075472, val_loss: 0.069102, val_acc: 24.836391
+Epoch [479], train_loss: 0.075177, val_loss: 0.068925, val_acc: 24.960016
+Epoch [480], train_loss: 0.075031, val_loss: 0.069066, val_acc: 24.819004
+Epoch [481], train_loss: 0.075384, val_loss: 0.070368, val_acc: 24.507629
+Epoch [482], train_loss: 0.075838, val_loss: 0.068943, val_acc: 24.785515
+Epoch [483], train_loss: 0.075531, val_loss: 0.068551, val_acc: 24.899302
+Epoch [484], train_loss: 0.075732, val_loss: 0.069292, val_acc: 24.744791
+Epoch [485], train_loss: 0.075800, val_loss: 0.069766, val_acc: 24.635136
+Epoch [486], train_loss: 0.075437, val_loss: 0.068514, val_acc: 24.987902
+Epoch [487], train_loss: 0.075757, val_loss: 0.069560, val_acc: 24.659050
+Epoch [488], train_loss: 0.075939, val_loss: 0.070316, val_acc: 24.445646
+Epoch [489], train_loss: 0.075479, val_loss: 0.068595, val_acc: 24.900023
+Epoch [490], train_loss: 0.075005, val_loss: 0.068878, val_acc: 24.814182
+Epoch [491], train_loss: 0.074993, val_loss: 0.068967, val_acc: 24.860813
+Epoch [492], train_loss: 0.075465, val_loss: 0.070180, val_acc: 24.553217
+Epoch [493], train_loss: 0.075592, val_loss: 0.069969, val_acc: 24.586128
+Epoch [494], train_loss: 0.075850, val_loss: 0.070261, val_acc: 24.415331
+Epoch [495], train_loss: 0.075239, val_loss: 0.069424, val_acc: 24.698418
+Epoch [496], train_loss: 0.075793, val_loss: 0.069795, val_acc: 24.664711
+Epoch [497], train_loss: 0.075208, val_loss: 0.069631, val_acc: 24.760466
+Epoch [498], train_loss: 0.075909, val_loss: 0.070081, val_acc: 24.588179
+Epoch [499], train_loss: 0.075257, val_loss: 0.069079, val_acc: 24.835526
+Epoch [500], train_loss: 0.075346, val_loss: 0.072060, val_acc: 24.084436
+Epoch [501], train_loss: 0.075656, val_loss: 0.069481, val_acc: 24.717051
+Epoch [502], train_loss: 0.075281, val_loss: 0.069539, val_acc: 24.776688
+Epoch [503], train_loss: 0.075493, val_loss: 0.069336, val_acc: 24.794392
+Epoch [504], train_loss: 0.074972, val_loss: 0.070126, val_acc: 24.682827
+Epoch [505], train_loss: 0.075493, val_loss: 0.069357, val_acc: 24.843922
+Epoch [506], train_loss: 0.075280, val_loss: 0.068822, val_acc: 24.927219
+Epoch [507], train_loss: 0.076115, val_loss: 0.069635, val_acc: 24.732645
+Epoch [508], train_loss: 0.075710, val_loss: 0.069450, val_acc: 24.821960
+Epoch [509], train_loss: 0.075303, val_loss: 0.069189, val_acc: 24.674152
+Epoch [510], train_loss: 0.075036, val_loss: 0.069191, val_acc: 24.849209
+Epoch [511], train_loss: 0.074854, val_loss: 0.069456, val_acc: 24.783627
+Epoch [512], train_loss: 0.074962, val_loss: 0.069889, val_acc: 24.655197
+Epoch [513], train_loss: 0.074911, val_loss: 0.069636, val_acc: 24.619469
+Epoch [514], train_loss: 0.075367, val_loss: 0.069588, val_acc: 24.531120
+Epoch [515], train_loss: 0.075201, val_loss: 0.069502, val_acc: 24.710800
+Epoch [516], train_loss: 0.075227, val_loss: 0.068764, val_acc: 24.822914
+Epoch [517], train_loss: 0.074832, val_loss: 0.068272, val_acc: 25.024515
+Epoch [518], train_loss: 0.075435, val_loss: 0.069635, val_acc: 24.615059
+Epoch [519], train_loss: 0.075034, val_loss: 0.068797, val_acc: 24.848316
+Epoch [520], train_loss: 0.075345, val_loss: 0.069660, val_acc: 24.653772
+Epoch [521], train_loss: 0.074867, val_loss: 0.068381, val_acc: 25.050434
+Epoch [522], train_loss: 0.075306, val_loss: 0.069674, val_acc: 24.641880
+Epoch [523], train_loss: 0.075333, val_loss: 0.069727, val_acc: 24.677141
+Epoch [524], train_loss: 0.074751, val_loss: 0.070019, val_acc: 24.712175
+Epoch [525], train_loss: 0.075322, val_loss: 0.069267, val_acc: 24.739244
+Epoch [526], train_loss: 0.075262, val_loss: 0.068973, val_acc: 24.753132
+Epoch [527], train_loss: 0.075038, val_loss: 0.068477, val_acc: 24.993690
+Epoch [528], train_loss: 0.075256, val_loss: 0.069692, val_acc: 24.560959
+Epoch [529], train_loss: 0.075315, val_loss: 0.069380, val_acc: 24.708576
+Epoch [530], train_loss: 0.075124, val_loss: 0.068954, val_acc: 24.839018
+Epoch [531], train_loss: 0.073914, val_loss: 0.068629, val_acc: 24.966030
+Epoch [532], train_loss: 0.075253, val_loss: 0.068621, val_acc: 24.906809
+Epoch [533], train_loss: 0.075323, val_loss: 0.069054, val_acc: 24.779993
+Epoch [534], train_loss: 0.075214, val_loss: 0.068212, val_acc: 25.051649
+Epoch [535], train_loss: 0.075172, val_loss: 0.069551, val_acc: 24.773720
+Epoch [536], train_loss: 0.075257, val_loss: 0.068984, val_acc: 24.964739
+Epoch [537], train_loss: 0.075827, val_loss: 0.069202, val_acc: 24.824223
+Epoch [538], train_loss: 0.074814, val_loss: 0.069611, val_acc: 24.727116
+Epoch [539], train_loss: 0.075373, val_loss: 0.069792, val_acc: 24.659153
+Epoch [540], train_loss: 0.074898, val_loss: 0.069054, val_acc: 24.893875
+Epoch [541], train_loss: 0.075289, val_loss: 0.068801, val_acc: 24.830488
+Epoch [542], train_loss: 0.075108, val_loss: 0.068246, val_acc: 24.940794
+Epoch [543], train_loss: 0.075382, val_loss: 0.069037, val_acc: 24.802637
+Epoch [544], train_loss: 0.075082, val_loss: 0.068290, val_acc: 24.956266
+Epoch [545], train_loss: 0.075196, val_loss: 0.068349, val_acc: 24.771391
+Epoch [546], train_loss: 0.075475, val_loss: 0.069675, val_acc: 24.675653
+Epoch [547], train_loss: 0.075243, val_loss: 0.069578, val_acc: 24.748230
+Epoch [548], train_loss: 0.075063, val_loss: 0.069427, val_acc: 24.774281
+Epoch [549], train_loss: 0.074966, val_loss: 0.068434, val_acc: 24.969971
+Epoch [550], train_loss: 0.074609, val_loss: 0.068973, val_acc: 24.767759
+Epoch [551], train_loss: 0.074673, val_loss: 0.069550, val_acc: 24.636799
+Epoch [552], train_loss: 0.074488, val_loss: 0.069054, val_acc: 24.704512
+Epoch [553], train_loss: 0.074670, val_loss: 0.068670, val_acc: 24.768234
+Epoch [554], train_loss: 0.075285, val_loss: 0.068059, val_acc: 25.007502
+Epoch [555], train_loss: 0.075014, val_loss: 0.068869, val_acc: 24.793320
+Epoch [556], train_loss: 0.074835, val_loss: 0.068155, val_acc: 24.990009
+Epoch [557], train_loss: 0.075061, val_loss: 0.069079, val_acc: 24.761229
+Epoch [558], train_loss: 0.075229, val_loss: 0.069245, val_acc: 24.587582
+Epoch [559], train_loss: 0.074635, val_loss: 0.069113, val_acc: 24.781860
+Epoch [560], train_loss: 0.074783, val_loss: 0.068120, val_acc: 25.044172
+Epoch [561], train_loss: 0.075327, val_loss: 0.068665, val_acc: 24.770386
+Epoch [562], train_loss: 0.074694, val_loss: 0.068362, val_acc: 24.953886
+Epoch [563], train_loss: 0.074385, val_loss: 0.068349, val_acc: 24.921904
+Epoch [564], train_loss: 0.074645, val_loss: 0.068726, val_acc: 24.895700
+Epoch [565], train_loss: 0.074727, val_loss: 0.068630, val_acc: 24.939793
+Epoch [566], train_loss: 0.074843, val_loss: 0.068434, val_acc: 24.927223
+Epoch [567], train_loss: 0.075309, val_loss: 0.068837, val_acc: 24.831358
+Epoch [568], train_loss: 0.074780, val_loss: 0.067314, val_acc: 25.192219
+Epoch [569], train_loss: 0.074329, val_loss: 0.068813, val_acc: 24.850687
+Epoch [570], train_loss: 0.074611, val_loss: 0.068706, val_acc: 24.855169
+Epoch [571], train_loss: 0.074745, val_loss: 0.068999, val_acc: 24.807993
+Epoch [572], train_loss: 0.074838, val_loss: 0.069011, val_acc: 24.790350
+Epoch [573], train_loss: 0.075072, val_loss: 0.068634, val_acc: 24.836487
+Epoch [574], train_loss: 0.075305, val_loss: 0.068401, val_acc: 24.927874
+Epoch [575], train_loss: 0.075343, val_loss: 0.068350, val_acc: 24.798958
+Epoch [576], train_loss: 0.074699, val_loss: 0.068970, val_acc: 24.816547
+Epoch [577], train_loss: 0.074540, val_loss: 0.068429, val_acc: 24.848282
+Epoch [578], train_loss: 0.074865, val_loss: 0.069067, val_acc: 24.662241
+Epoch [579], train_loss: 0.074980, val_loss: 0.068448, val_acc: 24.838507
+Epoch [580], train_loss: 0.073783, val_loss: 0.068143, val_acc: 25.076097
+Epoch [581], train_loss: 0.075198, val_loss: 0.068561, val_acc: 24.910004
+Epoch [582], train_loss: 0.075389, val_loss: 0.068277, val_acc: 24.783466
+Epoch [583], train_loss: 0.074489, val_loss: 0.068947, val_acc: 24.862541
+Epoch [584], train_loss: 0.074387, val_loss: 0.068138, val_acc: 25.038843
+Epoch [585], train_loss: 0.074737, val_loss: 0.068949, val_acc: 24.814539
+Epoch [586], train_loss: 0.074974, val_loss: 0.068700, val_acc: 24.816605
+Epoch [587], train_loss: 0.074426, val_loss: 0.068415, val_acc: 25.003311
+Epoch [588], train_loss: 0.075568, val_loss: 0.068205, val_acc: 24.969633
+Epoch [589], train_loss: 0.075004, val_loss: 0.069413, val_acc: 24.673611
+Epoch [590], train_loss: 0.074715, val_loss: 0.069193, val_acc: 24.730394
+Epoch [591], train_loss: 0.075603, val_loss: 0.068557, val_acc: 24.766607
+Epoch [592], train_loss: 0.075109, val_loss: 0.068439, val_acc: 24.998987
+Epoch [593], train_loss: 0.074550, val_loss: 0.068471, val_acc: 24.893696
+Epoch [594], train_loss: 0.074652, val_loss: 0.068662, val_acc: 24.823364
+Epoch [595], train_loss: 0.074957, val_loss: 0.068606, val_acc: 24.900343
+Epoch [596], train_loss: 0.075399, val_loss: 0.069157, val_acc: 24.688379
+Epoch [597], train_loss: 0.075147, val_loss: 0.068337, val_acc: 24.885517
+Epoch [598], train_loss: 0.075097, val_loss: 0.068971, val_acc: 24.714645
+Epoch [599], train_loss: 0.074503, val_loss: 0.067846, val_acc: 25.054575
+Epoch [600], train_loss: 0.074538, val_loss: 0.068943, val_acc: 24.752800
+Epoch [601], train_loss: 0.074580, val_loss: 0.068749, val_acc: 24.781712
+Epoch [602], train_loss: 0.074126, val_loss: 0.068244, val_acc: 24.984310
+Epoch [603], train_loss: 0.074955, val_loss: 0.069210, val_acc: 24.751987
+Epoch [604], train_loss: 0.074158, val_loss: 0.068123, val_acc: 25.006414
+Epoch [605], train_loss: 0.075011, val_loss: 0.068103, val_acc: 24.926304
+Epoch [606], train_loss: 0.074638, val_loss: 0.067905, val_acc: 24.954475
+Epoch [607], train_loss: 0.075289, val_loss: 0.069395, val_acc: 24.613358
+Epoch [608], train_loss: 0.075118, val_loss: 0.068721, val_acc: 24.810282
+Epoch [609], train_loss: 0.074551, val_loss: 0.068072, val_acc: 25.057022
+Epoch [610], train_loss: 0.074714, val_loss: 0.068647, val_acc: 24.845844
+Epoch [611], train_loss: 0.074271, val_loss: 0.068109, val_acc: 25.007954
+Epoch [612], train_loss: 0.075398, val_loss: 0.067911, val_acc: 24.940088
+Epoch [613], train_loss: 0.074235, val_loss: 0.069033, val_acc: 24.827866
+Epoch [614], train_loss: 0.074364, val_loss: 0.068743, val_acc: 24.876907
+Epoch [615], train_loss: 0.074515, val_loss: 0.068088, val_acc: 24.930677
+Epoch [616], train_loss: 0.074859, val_loss: 0.067888, val_acc: 24.968676
+Epoch [617], train_loss: 0.074857, val_loss: 0.068815, val_acc: 24.797096
+Epoch [618], train_loss: 0.074727, val_loss: 0.068320, val_acc: 24.912947
+Epoch [619], train_loss: 0.074599, val_loss: 0.068062, val_acc: 25.017290
+Epoch [620], train_loss: 0.075068, val_loss: 0.067308, val_acc: 25.093145
+Epoch [621], train_loss: 0.075025, val_loss: 0.067892, val_acc: 24.969398
+Epoch [622], train_loss: 0.075477, val_loss: 0.068156, val_acc: 24.870407
+Epoch [623], train_loss: 0.074823, val_loss: 0.069082, val_acc: 24.706327
+Epoch [624], train_loss: 0.074195, val_loss: 0.067909, val_acc: 25.046415
+Epoch [625], train_loss: 0.074959, val_loss: 0.068345, val_acc: 24.786222
+Epoch [626], train_loss: 0.075530, val_loss: 0.068473, val_acc: 24.699867
+Epoch [627], train_loss: 0.074772, val_loss: 0.068930, val_acc: 24.747780
+Epoch [628], train_loss: 0.074747, val_loss: 0.067511, val_acc: 25.002884
+Epoch [629], train_loss: 0.074847, val_loss: 0.068065, val_acc: 24.938913
+Epoch [630], train_loss: 0.074008, val_loss: 0.068650, val_acc: 24.909143
+Epoch [631], train_loss: 0.074405, val_loss: 0.067962, val_acc: 25.048475
+Epoch [632], train_loss: 0.074067, val_loss: 0.068786, val_acc: 24.859756
+Epoch [633], train_loss: 0.074968, val_loss: 0.068439, val_acc: 24.836016
+Epoch [634], train_loss: 0.074616, val_loss: 0.068428, val_acc: 24.917618
+Epoch [635], train_loss: 0.075107, val_loss: 0.068252, val_acc: 24.853422
+Epoch [636], train_loss: 0.074603, val_loss: 0.067756, val_acc: 24.998810
+Epoch [637], train_loss: 0.075634, val_loss: 0.069030, val_acc: 24.576197
+Epoch [638], train_loss: 0.075328, val_loss: 0.068161, val_acc: 24.943935
+Epoch [639], train_loss: 0.075150, val_loss: 0.068077, val_acc: 24.998676
+Epoch [640], train_loss: 0.074627, val_loss: 0.068745, val_acc: 24.761885
+Epoch [641], train_loss: 0.074682, val_loss: 0.067883, val_acc: 25.014423
+Epoch [642], train_loss: 0.075182, val_loss: 0.068059, val_acc: 24.882029
+Epoch [643], train_loss: 0.074982, val_loss: 0.068218, val_acc: 24.923063
+Epoch [644], train_loss: 0.075402, val_loss: 0.067828, val_acc: 24.816097
+Epoch [645], train_loss: 0.074609, val_loss: 0.068069, val_acc: 24.873640
+Epoch [646], train_loss: 0.074543, val_loss: 0.068001, val_acc: 25.019173
+Epoch [647], train_loss: 0.074708, val_loss: 0.069713, val_acc: 24.689978
+Epoch [648], train_loss: 0.074056, val_loss: 0.068306, val_acc: 25.027992
+Epoch [649], train_loss: 0.073990, val_loss: 0.068179, val_acc: 24.996017
+Epoch [650], train_loss: 0.074155, val_loss: 0.067907, val_acc: 25.046970
+Epoch [651], train_loss: 0.074276, val_loss: 0.069186, val_acc: 24.768217
+Epoch [652], train_loss: 0.074933, val_loss: 0.068604, val_acc: 24.844414
+Epoch [653], train_loss: 0.075439, val_loss: 0.067916, val_acc: 24.953001
+Epoch [654], train_loss: 0.074529, val_loss: 0.068296, val_acc: 24.917536
+Epoch [655], train_loss: 0.075189, val_loss: 0.068854, val_acc: 24.736181
+Epoch [656], train_loss: 0.074342, val_loss: 0.067480, val_acc: 25.129534
+Epoch [657], train_loss: 0.074470, val_loss: 0.068564, val_acc: 24.930815
+Epoch [658], train_loss: 0.074777, val_loss: 0.068188, val_acc: 24.869452
+Epoch [659], train_loss: 0.074472, val_loss: 0.068053, val_acc: 25.013138
+Epoch [660], train_loss: 0.074881, val_loss: 0.067877, val_acc: 24.922300
+Epoch [661], train_loss: 0.074650, val_loss: 0.068423, val_acc: 24.801237
+Epoch [662], train_loss: 0.074530, val_loss: 0.068733, val_acc: 24.812737
+Epoch [663], train_loss: 0.074289, val_loss: 0.067885, val_acc: 25.096079
+Epoch [664], train_loss: 0.074888, val_loss: 0.068474, val_acc: 24.934235
+Epoch [665], train_loss: 0.075460, val_loss: 0.068200, val_acc: 24.853336
+Epoch [666], train_loss: 0.074617, val_loss: 0.068451, val_acc: 24.968113
+Epoch [667], train_loss: 0.075117, val_loss: 0.067765, val_acc: 24.950068
+Epoch [668], train_loss: 0.074601, val_loss: 0.068543, val_acc: 24.826843
+Epoch [669], train_loss: 0.074514, val_loss: 0.068832, val_acc: 24.791267
+Epoch [670], train_loss: 0.074450, val_loss: 0.068615, val_acc: 24.930687
+Epoch [671], train_loss: 0.075111, val_loss: 0.068456, val_acc: 24.820807
+Epoch [672], train_loss: 0.074161, val_loss: 0.067959, val_acc: 25.038225
+Epoch [673], train_loss: 0.074599, val_loss: 0.067776, val_acc: 25.042049
+Epoch [674], train_loss: 0.074061, val_loss: 0.068251, val_acc: 24.936707
+Epoch [675], train_loss: 0.074857, val_loss: 0.068578, val_acc: 24.833534
+Epoch [676], train_loss: 0.074961, val_loss: 0.068126, val_acc: 24.960783
+Epoch [677], train_loss: 0.075499, val_loss: 0.068434, val_acc: 24.791071
+Epoch [678], train_loss: 0.074568, val_loss: 0.068627, val_acc: 24.864019
+Epoch [679], train_loss: 0.074648, val_loss: 0.068431, val_acc: 24.885237
+Epoch [680], train_loss: 0.074752, val_loss: 0.068410, val_acc: 24.903778
+Epoch [681], train_loss: 0.074513, val_loss: 0.068787, val_acc: 24.882734
+Epoch [682], train_loss: 0.075408, val_loss: 0.069118, val_acc: 24.719629
+Epoch [683], train_loss: 0.074314, val_loss: 0.067902, val_acc: 25.005547
+Epoch [684], train_loss: 0.074992, val_loss: 0.067907, val_acc: 24.948713
+Epoch [685], train_loss: 0.074947, val_loss: 0.068390, val_acc: 24.710794
+Epoch [686], train_loss: 0.074227, val_loss: 0.068195, val_acc: 25.015814
+Epoch [687], train_loss: 0.074576, val_loss: 0.068009, val_acc: 24.954607
+Epoch [688], train_loss: 0.075115, val_loss: 0.067717, val_acc: 25.037401
+Epoch [689], train_loss: 0.075075, val_loss: 0.067606, val_acc: 25.000908
+Epoch [690], train_loss: 0.074454, val_loss: 0.068005, val_acc: 25.022781
+Epoch [691], train_loss: 0.074501, val_loss: 0.068406, val_acc: 24.928032
+Epoch [692], train_loss: 0.074477, val_loss: 0.068702, val_acc: 24.785856
+Epoch [693], train_loss: 0.074611, val_loss: 0.068290, val_acc: 24.932909
+Epoch [694], train_loss: 0.074336, val_loss: 0.069080, val_acc: 24.739576
+Epoch [695], train_loss: 0.074024, val_loss: 0.068879, val_acc: 24.792883
+Epoch [696], train_loss: 0.074144, val_loss: 0.067501, val_acc: 25.141935
+Epoch [697], train_loss: 0.074393, val_loss: 0.068306, val_acc: 24.885071
+Epoch [698], train_loss: 0.074951, val_loss: 0.068308, val_acc: 24.839592
+Epoch [699], train_loss: 0.074679, val_loss: 0.068845, val_acc: 24.902161
+python3 ./UNet_V17.py  16197.25s user 15814.35s system 99% cpu 8:54:09.95 total
diff --git a/UNet/UNet_V12.py b/UNet/UNet_V12.py
index cb3602201b4b8f5bba15f5eb0632d4f039784ff4..4082a1d4a8932c7c03f375a294f14204e05c13e3 100644
--- a/UNet/UNet_V12.py
+++ b/UNet/UNet_V12.py
@@ -1,11 +1,4 @@
-#like V6_2 but only the different phases as input
-"""UNet_V6.ipynb
 
-Automatically generated by Colaboratory.
-
-Original file is located at
-    https://colab.research.google.com/drive/1yvtk3lFo_x0ZiqtFdnR8jgcjPKy3nZA4
-"""
 
 import torch
 import torch.nn as nn
@@ -226,7 +219,7 @@ if __name__ == '__main__':
     use_seeds = False
     seed = 373686838
     num_epochs = 300
-    b_size = 32
+    b_size = 12
     opt_func = torch.optim.Adam
     lr = 0.00003
     kernel = 9
diff --git a/UNet/UNet_V13.py b/UNet/UNet_V13.py
index 57e0d16715646ea800201ae976e09a2f9160aa59..2242a6041faa0f2794e6bbfa00500a103eec5487 100644
--- a/UNet/UNet_V13.py
+++ b/UNet/UNet_V13.py
@@ -172,8 +172,8 @@ def fit(epochs, lr, model, train_loader, val_loader, path, opt_func=torch.optim.
         result['train_loss'] = torch.stack(train_losses).mean().item()
         model.epoch_end(epoch, result)
         history.append(result)
-    torch.save(model.state_dict(),f'{path}/Unet_dict_V13_2.pth')
-    torch.save(history,f'{path}/history_V13_2.pt')
+    torch.save(model.state_dict(),f'{path}/Unet_dict_V13.pth')
+    torch.save(history,f'{path}/history_V13.pt')
     return history
 
 def get_default_device():
@@ -226,7 +226,7 @@ if __name__ == '__main__':
     use_seeds = False
     seed = 373686838
     num_epochs = 300
-    b_size = 32
+    b_size = 10
     opt_func = torch.optim.Adam
     lr = 0.00003
     kernel = 7
diff --git a/UNet/UNet_V15.py b/UNet/UNet_V15.py
index 11072152928258221d80449e4adf0878849b1120..7d35a871d3450ebc4ca3c04a9d74c48a6d2fc82f 100644
--- a/UNet/UNet_V15.py
+++ b/UNet/UNet_V15.py
@@ -227,8 +227,8 @@ def Create_Dataloader(path, batch_size = 100, percent_val = 0.2):
 if __name__ == '__main__':
     #os.chdir('F:/RWTH/HiWi_IEHK/DAMASK3/UNet/Trainingsdata')
     path_to_rep = '/home/yk138599/Hiwi/damask3'
-    use_seeds = False
-    seed = 373686838
+    use_seeds = True
+    seed = 2199910834
     num_epochs = 300
     b_size = 32
     opt_func = torch.optim.Adam
diff --git a/UNet/UNet_V15.py~c0cc9d273fc96dfe1568dcc5cddd577a337efb3f b/UNet/UNet_V15.py~c0cc9d273fc96dfe1568dcc5cddd577a337efb3f
new file mode 100644
index 0000000000000000000000000000000000000000..b20f5ecfa19e3d9908cf4edafcd2e37f47e3d027
--- /dev/null
+++ b/UNet/UNet_V15.py~c0cc9d273fc96dfe1568dcc5cddd577a337efb3f
@@ -0,0 +1,252 @@
+#like V6_2 but only the different phases as input
+"""UNet_V6.ipynb
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+    https://colab.research.google.com/drive/1yvtk3lFo_x0ZiqtFdnR8jgcjPKy3nZA4
+"""
+
+import torch
+import torch.nn as nn
+import numpy as np
+import random
+from torch.utils.data.sampler import SubsetRandomSampler
+from torch.utils.data.dataloader import DataLoader
+from torch.utils.data import TensorDataset
+import torch.nn.functional as F
+from torch.utils.data import random_split
+from torch.nn.modules.activation import ReLU
+
+class depthwise_separable_conv(nn.Module):
+    def __init__(self, in_c, out_1_c, out_2_c, padding, kernel_size):
+        super(depthwise_separable_conv, self).__init__()
+        self.depthwise_1 = nn.Conv3d(in_c, in_c, kernel_size= kernel_size, padding=padding[0], groups=in_c, bias=True)
+        self.pointwise_1 = nn.Conv3d(in_c, out_1_c, kernel_size=1, bias=True)
+        self.batch_norm_1 = nn.BatchNorm3d(out_1_c)
+        self.relu = nn.ReLU()
+        self.droptout = nn.Dropout3d(p=0.25)
+        self.depthwise_2 = nn.Conv3d(out_1_c, out_1_c, kernel_size= kernel_size, padding=padding[1], groups=out_1_c, bias=True)
+        self.pointwise_2 = nn.Conv3d(out_1_c, out_2_c, kernel_size=1, bias=True)
+        self.batch_norm_2 = nn.BatchNorm3d(out_2_c)
+    def forward(self, x):
+        x = self.batch_norm_1(self.relu(self.droptout(self.pointwise_1(self.depthwise_1(x)))))
+        return self.batch_norm_2(self.relu(self.droptout(self.pointwise_2(self.depthwise_2(x)))))
+
+class convolution_Layer(nn.Module):
+    def __init__(self, in_c, out_1_c, out_2_c, padding, kernel_size):
+        super(convolution_Layer, self).__init__()
+        self.conv_1 = nn.Conv3d(in_c, out_1_c, kernel_size= kernel_size, padding=padding[0], bias=True)
+        self.batch_norm_1 = nn.BatchNorm3d(out_1_c)
+        self.relu = nn.ReLU()
+        self.conv_2 = nn.Conv3d(out_1_c, out_2_c, kernel_size= kernel_size, padding=padding[1], bias=True)
+        self.batch_norm_2 = nn.BatchNorm3d(out_2_c)
+    def forward(self, x):
+        x = self.batch_norm_1(self.relu(self.conv_1(x)))
+        return self.batch_norm_2(self.relu(self.relu(self.conv_2(x))))
+
+class head_layer(nn.Module):
+    def __init__(self, in_c, out_c = 1, padding = "same"):
+        super(head_layer, self).__init__()
+        self.conv =  nn.Conv3d(in_c, out_c, kernel_size=1, bias=True)
+        self.sig = nn.Sigmoid()
+    def forward(self, x):
+        return self.sig(self.conv(x)) #convolution
+        #return self.sig(self.pointwise(self.depthwise(x))) #convolution
+
+class Encoder(nn.Module):
+    def __init__(self,kernel_size, chs, padding=(("same","same"),("same","same"),("same","same"))):
+      super().__init__()
+      self.channels = chs
+      self.enc_blocks = nn.ModuleList([depthwise_separable_conv(chs[i][0], chs[i][1], chs[i][2], kernel_size=kernel_size, padding=padding[i]) for i in range(len(chs))])
+      self.pool       = nn.MaxPool3d(kernel_size=2, stride=2)
+      #self.batch_norm = nn.ModuleList([nn.BatchNorm3d( chs[i][2]) for i in range(len(chs))])
+
+    
+    def forward(self, x):
+      ftrs = []
+      for i in range(len(self.channels)):
+        ftrs.append(x)
+        x =self.enc_blocks[i](x)
+        #print(f'size of ftrs: {ftrs[i].size()}')
+        x = self.pool(x)
+        #print(f'size of x after pooling{x.size()}')
+      ftrs.append(x)
+      #print(f'size of ftrs: {ftrs[3].size()}')
+      #print(f'length of ftrs: {len(ftrs)}')
+      return ftrs
+
+class Decoder(nn.Module):
+    def __init__(self,kernel_size, chs_upsampling, chs_conv, padding=(("same","same"),("same","same"),("same","same"))):
+        super().__init__()
+        assert len(chs_conv) == len(chs_upsampling)
+        self.chs         = chs_upsampling
+        self.upconvs    = nn.ModuleList([nn.ConvTranspose3d(chs_upsampling[i], chs_upsampling[i], 2, 2) for i in range(len(chs_upsampling))])
+        self.dec_blocks = nn.ModuleList([depthwise_separable_conv(chs_conv[i][0], chs_conv[i][1], chs_conv[i][2], kernel_size=kernel_size, padding=padding[i]) for i in range(len(chs_conv))])
+        self.head = head_layer(chs_conv[-1][2])
+    def forward(self, x, encoder_features):
+        for i in range(len(self.chs)):
+            x        = self.upconvs[i](x)
+            #print(f'size after upsampling: {x.size()}')
+            enc_ftrs = self.crop(encoder_features[i], x)
+            x        = torch.cat([x, enc_ftrs], dim=1)
+            #print(f'size after cropping&cat: {x.size()}')
+
+            x        = self.dec_blocks[i](x)
+            #print(f'size after convolution: {x.size()}')
+        x = self.head(x)    
+        return x
+    
+    def crop(self, tensor, target_tensor):
+        target_size = target_tensor.size()[2]
+        tensor_size = tensor.size()[2]
+        delta = tensor_size - target_size
+        delta = delta // 2
+        return tensor[:,:,delta:tensor_size-delta,delta:tensor_size-delta,delta:tensor_size-delta]
+
+class UNetBase(nn.Module):
+    def training_step(self, batch):
+        input, labels = batch 
+        out = self(input)                  # Generate predictions
+        loss = F.l1_loss(out, labels) # Calculate loss
+        return loss
+    
+    def validation_step(self, batch):
+        input, labels = batch 
+        out = self(input)                    # Generate predictions
+        loss = F.l1_loss(out, labels)   # Calculate loss
+        acc = accuracy(out.detach(), labels.detach(),normalization=self.normalization)         # Calculate accuracy
+        return {'val_loss': loss.detach(), 'val_acc': acc}
+        
+    def validation_epoch_end(self, outputs):
+        batch_losses = [x['val_loss'] for x in outputs]
+        epoch_loss = torch.stack(batch_losses).mean()   # Combine losses
+        batch_accs = [x['val_acc'] for x in outputs]
+        epoch_acc = torch.stack(batch_accs).mean()      # Combine accuracies
+        return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
+    
+    def epoch_end(self, epoch, result):
+        print("Epoch [{}], train_loss: {:.6f}, val_loss: {:.6f}, val_acc: {:.6f}".format(
+            epoch, result['train_loss'], result['val_loss'], result['val_acc']))
+        
+def accuracy(outputs, labels,normalization, threshold = 0.05):
+    error = (abs((outputs) - (labels)))/(outputs+normalization[0]/normalization[1])
+    right_predic = torch.sum(error < threshold)
+    percentage = ((right_predic/torch.numel(error))*100.)
+    return percentage
+    
+class UNet(UNetBase):
+    def __init__(self,kernel_size = 7, enc_chs=((2,16,32), (32,32,64), (64,64,128)), dec_chs_up=(128, 128, 64), dec_chs_conv=((192,128, 128),(160,64,64),(66,32,32)),normalization=np.array([0,1])):
+        super().__init__()
+        self.encoder     = Encoder(kernel_size = kernel_size, chs = enc_chs)
+        self.decoder     = Decoder(kernel_size = kernel_size, chs_upsampling = dec_chs_up, chs_conv = dec_chs_conv)
+        #self.head        = depthwise_separable_conv(1, 1, padding = "same", kernel_size=1)
+        self.normalization = normalization
+
+
+    def forward(self, x):
+        enc_ftrs = self.encoder(x)
+        out      = self.decoder(enc_ftrs[::-1][0], enc_ftrs[::-1][1:])
+        #out      = self.head(out)
+        return out
+
+@torch.no_grad()
+def evaluate(model, val_loader):
+    model.eval()
+    outputs = [model.validation_step(batch) for batch in val_loader]
+    return model.validation_epoch_end(outputs)
+
+def fit(epochs, lr, model, train_loader, val_loader, path, opt_func=torch.optim.Adam):
+    history = []
+    optimizer = opt_func(model.parameters(), lr, eps=1e-07)
+    for epoch in range(epochs):
+        # Training Phase 
+        model.train()
+        train_losses = []
+        for batch in train_loader:
+            loss = model.training_step(batch)
+            train_losses.append(loss)
+            loss.backward()
+            optimizer.step()
+            optimizer.zero_grad()
+        # Validation phase
+        result = evaluate(model, val_loader)
+        result['train_loss'] = torch.stack(train_losses).mean().item()
+        model.epoch_end(epoch, result)
+        history.append(result)
+    torch.save(model.state_dict(),f'{path}/Unet_dict_V15.pth')
+    torch.save(history,f'{path}/history_V15.pt')
+    return history
+
+def get_default_device():
+    """Pick GPU if available, else CPU"""
+    if torch.cuda.is_available():
+        return torch.device('cuda')
+    else:
+      print('no GPU found')
+      return torch.device('cpu')
+      
+def to_device(data, device):
+    """Move tensor(s) to chosen device"""
+    if isinstance(data, (list,tuple)):
+        return [to_device(x, device) for x in data]
+    return data.to(device, non_blocking=True)
+
+class DeviceDataLoader():
+    """Wrap a dataloader to move data to a device"""
+    def __init__(self, dl, device):
+        self.dl = dl
+        self.device = device
+        
+    def __iter__(self):
+        """Yield a batch of data after moving it to device"""
+        for b in self.dl: 
+            yield to_device(b, self.device)
+
+    def __len__(self):
+        """Number of batches"""
+        return len(self.dl)
+
+def Create_Dataloader(path, batch_size = 100, percent_val = 0.2):
+    dataset = torch.load(path) # create the pytorch dataset 
+    #size_data = 500 #shrink dataset for colab
+    #rest = len(dataset) -size_data
+    #dataset,_ = torch.utils.data.random_split(dataset, [size_data, rest])
+    val_size = int(len(dataset) * percent_val)
+    train_size = len(dataset) - val_size
+
+    train_ds, val_ds = random_split(dataset, [train_size, val_size])
+    # Create DataLoader
+    train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=1, pin_memory=True)
+    valid_dl = DataLoader(val_ds, batch_size, num_workers=1, pin_memory=True)
+    
+    return train_dl, valid_dl
+
+if __name__ == '__main__':
+    #os.chdir('F:/RWTH/HiWi_IEHK/DAMASK3/UNet/Trainingsdata')
+    path_to_rep = '/home/yk138599/Hiwi/damask3'
+    use_seeds = False
+    seed = 373686838
+    num_epochs = 1000
+    b_size = 32
+    opt_func = torch.optim.Adam
+    lr = 0.00003
+    kernel = 7
+    print(f'number auf epochs: {num_epochs}')
+    print(f'batchsize: {b_size}')
+    print(f'learning rate: {lr}')
+    print(f'kernel size is: {kernel}')
+    if not use_seeds:
+      seed = random.randrange(2**32 - 1)
+    print(f' seed is: {seed}')
+    torch.manual_seed(seed)
+    random.seed(seed)
+    np.random.seed(seed)
+    device = get_default_device()
+    normalization = np.load(f'{path_to_rep}/UNet/Trainingsdata/Norm_min_max_32_phase_only.npy', allow_pickle = True)
+    train_dl, valid_dl = Create_Dataloader(f'{path_to_rep}/UNet/Trainingsdata/TD_norm_32_phase_only.pt', batch_size= b_size )
+    train_dl = DeviceDataLoader(train_dl, device)
+    valid_dl = DeviceDataLoader(valid_dl, device)
+
+    model = to_device(UNet(kernel_size=kernel,normalization=normalization).double(), device)
+    history = fit(num_epochs, lr, model, train_dl, valid_dl,f'{path_to_rep}/UNet/output', opt_func)
diff --git a/UNet/UNet_V16.py b/UNet/UNet_V16.py
index ad3fa4c20d0d5961309c1997e7459cd69e520bae..3621c74541a8a9011babd85a5056d4150eb021a6 100644
--- a/UNet/UNet_V16.py
+++ b/UNet/UNet_V16.py
@@ -175,8 +175,8 @@ def fit(epochs, lr, model, train_loader, val_loader, path, opt_func=torch.optim.
         result['train_loss'] = torch.stack(train_losses).mean().item()
         model.epoch_end(epoch, result)
         history.append(result)
-    torch.save(model.state_dict(),f'{path}/Unet_dict_V11.pth')
-    torch.save(history,f'{path}/history_V11.pt')
+    torch.save(model.state_dict(),f'{path}/Unet_dict_V16.pth')
+    torch.save(history,f'{path}/history_V16.pt')
     return history
 
 def get_default_device():
@@ -227,8 +227,8 @@ if __name__ == '__main__':
     #os.chdir('F:/RWTH/HiWi_IEHK/DAMASK3/UNet/Trainingsdata')
     path_to_rep = '/home/yk138599/Hiwi/damask3'
     use_seeds = True
-    seed = 373686838
-    num_epochs = 300
+    seed = 2199910834
+    num_epochs = 200
     b_size = 32
     opt_func = torch.optim.Adam
     lr = 0.00003
diff --git a/UNet/UNet_V17.py b/UNet/UNet_V17.py
new file mode 100644
index 0000000000000000000000000000000000000000..d855c4cc19bf7ec0ca1150bf75818bc0de768a70
--- /dev/null
+++ b/UNet/UNet_V17.py
@@ -0,0 +1,253 @@
+
+"""UNet 17 head layer 70 32 1 else like 16
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+    https://colab.research.google.com/drive/1yvtk3lFo_x0ZiqtFdnR8jgcjPKy3nZA4
+"""
+
+import torch
+import torch.nn as nn
+import numpy as np
+import random
+from torch.utils.data.sampler import SubsetRandomSampler
+from torch.utils.data.dataloader import DataLoader
+from torch.utils.data import TensorDataset
+import torch.nn.functional as F
+from torch.utils.data import random_split
+from torch.nn.modules.activation import ReLU
+
+class depthwise_separable_conv(nn.Module):
+    def __init__(self, in_c, out_1_c, out_2_c, padding, kernel_size):
+        super(depthwise_separable_conv, self).__init__()
+        self.depthwise_1 = nn.Conv3d(in_c, in_c, kernel_size= kernel_size, padding=padding[0], groups=in_c, bias=True)
+        self.pointwise_1 = nn.Conv3d(in_c, out_1_c, kernel_size=1, bias=True)
+        self.batch_norm_1 = nn.BatchNorm3d(out_1_c)
+        self.relu = nn.ReLU()
+        self.droptout = nn.Dropout3d(p=0.5)
+
+        self.depthwise_2 = nn.Conv3d(out_1_c, out_1_c, kernel_size= kernel_size, padding=padding[1], groups=out_1_c, bias=True)
+        self.pointwise_2 = nn.Conv3d(out_1_c, out_2_c, kernel_size=1, bias=True)
+        self.batch_norm_2 = nn.BatchNorm3d(out_2_c)
+    def forward(self, x):
+        x = self.batch_norm_1(self.relu(self.droptout(self.pointwise_1(self.depthwise_1(x)))))
+        return self.batch_norm_2(self.relu(self.droptout(self.pointwise_2(self.depthwise_2(x)))))
+
+class convolution_Layer(nn.Module):
+    def __init__(self, in_c, out_1_c, out_2_c, padding, kernel_size):
+        super(convolution_Layer, self).__init__()
+        self.conv_1 = nn.Conv3d(in_c, out_1_c, kernel_size= kernel_size, padding=padding[0], bias=True)
+        self.batch_norm_1 = nn.BatchNorm3d(out_1_c)
+        self.relu = nn.ReLU()
+        self.conv_2 = nn.Conv3d(out_1_c, out_2_c, kernel_size= kernel_size, padding=padding[1], bias=True)
+        self.batch_norm_2 = nn.BatchNorm3d(out_2_c)
+    def forward(self, x):
+        x = self.batch_norm_1(self.relu(self.conv_1(x)))
+        return self.batch_norm_2(self.relu(self.relu(self.conv_2(x))))
+
+class head_layer(nn.Module):
+    def __init__(self, in_c, out_c = 1, padding = "same"):
+        super(head_layer, self).__init__()
+        self.conv =  nn.Conv3d(in_c, out_c, kernel_size=1, bias=True)
+        self.sig = nn.Sigmoid()
+    def forward(self, x):
+        return self.sig(self.conv(x)) #convolution
+        #return self.sig(self.pointwise(self.depthwise(x))) #convolution
+
+class Encoder(nn.Module):
+    def __init__(self,kernel_size, chs, padding=(("same","same"),("same","same"),("same","same"))):
+      super().__init__()
+      self.channels = chs
+      self.enc_blocks = nn.ModuleList([depthwise_separable_conv(chs[i][0], chs[i][1], chs[i][2], kernel_size=kernel_size, padding=padding[i]) for i in range(len(chs))])
+      self.pool       = nn.MaxPool3d(kernel_size=2, stride=2)
+      #self.batch_norm = nn.ModuleList([nn.BatchNorm3d( chs[i][2]) for i in range(len(chs))])
+
+    
+    def forward(self, x):
+      ftrs = []
+      for i in range(len(self.channels)):
+        ftrs.append(x)
+        x =self.enc_blocks[i](x)
+        #print(f'size of ftrs: {ftrs[i].size()}')
+        x = self.pool(x)
+        #print(f'size of x after pooling{x.size()}')
+      ftrs.append(x)
+      #print(f'size of ftrs: {ftrs[3].size()}')
+      #print(f'length of ftrs: {len(ftrs)}')
+      return ftrs
+
+class Decoder(nn.Module):
+    def __init__(self,kernel_size, chs_upsampling, chs_conv, padding=(("same","same"),("same","same"),("same","same"))):
+        super().__init__()
+        assert len(chs_conv) == len(chs_upsampling)
+        self.chs         = chs_upsampling
+        self.upconvs    = nn.ModuleList([nn.ConvTranspose3d(chs_upsampling[i], chs_upsampling[i], 2, 2) for i in range(len(chs_upsampling))])
+        self.dec_blocks = nn.ModuleList([depthwise_separable_conv(chs_conv[i][0], chs_conv[i][1], chs_conv[i][2], kernel_size=kernel_size, padding=padding[i]) for i in range(len(chs_conv))])
+        self.head = head_layer(chs_conv[-1][2])
+    def forward(self, x, encoder_features):
+        for i in range(len(self.chs)):
+            x        = self.upconvs[i](x)
+            #print(f'size after upsampling: {x.size()}')
+            enc_ftrs = self.crop(encoder_features[i], x)
+            x        = torch.cat([x, enc_ftrs], dim=1)
+            #print(f'size after cropping&cat: {x.size()}')
+
+            x        = self.dec_blocks[i](x)
+            #print(f'size after convolution: {x.size()}')
+        x = self.head(x)    
+        return x
+    
+    def crop(self, tensor, target_tensor):
+        target_size = target_tensor.size()[2]
+        tensor_size = tensor.size()[2]
+        delta = tensor_size - target_size
+        delta = delta // 2
+        return tensor[:,:,delta:tensor_size-delta,delta:tensor_size-delta,delta:tensor_size-delta]
+
+class UNetBase(nn.Module):
+    def training_step(self, batch):
+        input, labels = batch 
+        out = self(input)                  # Generate predictions
+        loss = F.l1_loss(out, labels) # Calculate loss
+        return loss
+    
+    def validation_step(self, batch):
+        input, labels = batch 
+        out = self(input)                    # Generate predictions
+        loss = F.l1_loss(out, labels)   # Calculate loss
+        acc = accuracy(out.detach(), labels.detach(),normalization=self.normalization)         # Calculate accuracy
+        return {'val_loss': loss.detach(), 'val_acc': acc}
+        
+    def validation_epoch_end(self, outputs):
+        batch_losses = [x['val_loss'] for x in outputs]
+        epoch_loss = torch.stack(batch_losses).mean()   # Combine losses
+        batch_accs = [x['val_acc'] for x in outputs]
+        epoch_acc = torch.stack(batch_accs).mean()      # Combine accuracies
+        return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
+    
+    def epoch_end(self, epoch, result):
+        print("Epoch [{}], train_loss: {:.6f}, val_loss: {:.6f}, val_acc: {:.6f}".format(
+            epoch, result['train_loss'], result['val_loss'], result['val_acc']))
+        
+def accuracy(outputs, labels,normalization, threshold = 0.05):
+    error = (abs((outputs) - (labels)))/(outputs+normalization[0]/normalization[1])
+    right_predic = torch.sum(error < threshold)
+    percentage = ((right_predic/torch.numel(error))*100.)
+    return percentage
+    
+class UNet(UNetBase):
+    def __init__(self,kernel_size = 7, enc_chs=((6,16,32), (32,32,64), (64,64,128)), dec_chs_up=(128, 128, 64), dec_chs_conv=((192,128, 128),(160,64,64),(70,32,1)),normalization=np.array([0,1])):
+        super().__init__()
+        self.encoder     = Encoder(kernel_size = kernel_size, chs = enc_chs)
+        self.decoder     = Decoder(kernel_size = kernel_size, chs_upsampling = dec_chs_up, chs_conv = dec_chs_conv)
+        #self.head        = depthwise_separable_conv(1, 1, padding = "same", kernel_size=1)
+        self.normalization = normalization
+
+
+    def forward(self, x):
+        enc_ftrs = self.encoder(x)
+        out      = self.decoder(enc_ftrs[::-1][0], enc_ftrs[::-1][1:])
+        #out      = self.head(out)
+        return out
+
+@torch.no_grad()
+def evaluate(model, val_loader):
+    model.eval()
+    outputs = [model.validation_step(batch) for batch in val_loader]
+    return model.validation_epoch_end(outputs)
+
+def fit(epochs, lr, model, train_loader, val_loader, path, opt_func=torch.optim.Adam):
+    history = []
+    optimizer = opt_func(model.parameters(), lr, eps=1e-07)
+    for epoch in range(epochs):
+        # Training Phase 
+        model.train()
+        train_losses = []
+        for batch in train_loader:
+            loss = model.training_step(batch)
+            train_losses.append(loss)
+            loss.backward()
+            optimizer.step()
+            optimizer.zero_grad()
+        # Validation phase
+        result = evaluate(model, val_loader)
+        result['train_loss'] = torch.stack(train_losses).mean().item()
+        model.epoch_end(epoch, result)
+        history.append(result)
+    torch.save(model.state_dict(),f'{path}/Unet_dict_V17.pth')
+    torch.save(history,f'{path}/history_V17.pt')
+    return history
+
+def get_default_device():
+    """Pick GPU if available, else CPU"""
+    if torch.cuda.is_available():
+        return torch.device('cuda')
+    else:
+      print('no GPU found')
+      return torch.device('cpu')
+      
+def to_device(data, device):
+    """Move tensor(s) to chosen device"""
+    if isinstance(data, (list,tuple)):
+        return [to_device(x, device) for x in data]
+    return data.to(device, non_blocking=True)
+
+class DeviceDataLoader():
+    """Wrap a dataloader to move data to a device"""
+    def __init__(self, dl, device):
+        self.dl = dl
+        self.device = device
+        
+    def __iter__(self):
+        """Yield a batch of data after moving it to device"""
+        for b in self.dl: 
+            yield to_device(b, self.device)
+
+    def __len__(self):
+        """Number of batches"""
+        return len(self.dl)
+
+def Create_Dataloader(path, batch_size = 100, percent_val = 0.2):
+    dataset = torch.load(path) # create the pytorch dataset 
+    #size_data = 500 #shrink dataset for colab
+    #rest = len(dataset) -size_data
+    #dataset,_ = torch.utils.data.random_split(dataset, [size_data, rest])
+    val_size = int(len(dataset) * percent_val)
+    train_size = len(dataset) - val_size
+
+    train_ds, val_ds = random_split(dataset, [train_size, val_size])
+    # Create DataLoader
+    train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=1, pin_memory=True)
+    valid_dl = DataLoader(val_ds, batch_size, num_workers=1, pin_memory=True)
+    
+    return train_dl, valid_dl
+
+if __name__ == '__main__':
+    #os.chdir('F:/RWTH/HiWi_IEHK/DAMASK3/UNet/Trainingsdata')
+    path_to_rep = '/home/yk138599/Hiwi/damask3'
+    use_seeds = True
+    seed = 373686838
+    num_epochs = 700
+    b_size = 32
+    opt_func = torch.optim.Adam
+    lr = 0.00003
+    kernel = 7
+    print(f'number auf epochs: {num_epochs}')
+    print(f'batchsize: {b_size}')
+    print(f'learning rate: {lr}')
+    print(f'kernel size is: {kernel}')
+    if not use_seeds:
+      seed = random.randrange(2**32 - 1)
+    print(f' seed is: {seed}')
+    torch.manual_seed(seed)
+    random.seed(seed)
+    np.random.seed(seed)
+    device = get_default_device()
+    normalization = np.load(f'{path_to_rep}/UNet/Trainingsdata/Norm_min_max_32_angles.npy', allow_pickle = True)
+    train_dl, valid_dl = Create_Dataloader(f'{path_to_rep}/UNet/Trainingsdata/TD_norm_32_angles.pt', batch_size= b_size )
+    train_dl = DeviceDataLoader(train_dl, device)
+    valid_dl = DeviceDataLoader(valid_dl, device)
+
+    model = to_device(UNet(kernel_size=kernel,normalization=normalization).double(), device)
+    history = fit(num_epochs, lr, model, train_dl, valid_dl,f'{path_to_rep}/UNet/output', opt_func)
diff --git a/UNet/core.ncg02.hpc.itc.rwth-aachen.de.47730.7 b/UNet/core.ncg02.hpc.itc.rwth-aachen.de.47730.7
new file mode 100644
index 0000000000000000000000000000000000000000..e8f36ddbe5566960261cc6f04d6fd354f7d6a04b
Binary files /dev/null and b/UNet/core.ncg02.hpc.itc.rwth-aachen.de.47730.7 differ
diff --git a/UNet/core.ncg28.hpc.itc.rwth-aachen.de.45496.7 b/UNet/core.ncg28.hpc.itc.rwth-aachen.de.45496.7
new file mode 100644
index 0000000000000000000000000000000000000000..b4c72ebbd821051de2f5f4b760735552817d6bb4
Binary files /dev/null and b/UNet/core.ncg28.hpc.itc.rwth-aachen.de.45496.7 differ