From aa7e943bcb69bda937717d3abf6c39f3f14e46d8 Mon Sep 17 00:00:00 2001
From: Ycblue <yuchialan@gmail.com>
Date: Tue, 10 Jan 2023 16:49:26 +0100
Subject: [PATCH] fixed graphs

---
 .gitignore                                    |    4 +-
 DeepGraft/Inception_norm_rest.yaml            |   55 +
 DeepGraft/Resnet18_img_norm_rest.yaml         |   55 +
 DeepGraft/Resnet50.yaml                       |   49 -
 DeepGraft/TransMIL_feat_norm_rej_rest.yaml    |   23 +-
 DeepGraft/TransMIL_feat_norm_rest.yaml        |   19 +-
 DeepGraft/TransMIL_feat_rej_rest.yaml         |   27 +-
 DeepGraft/TransMIL_retccl_norm_rest.yaml      |   10 +-
 DeepGraft/TransformerMIL_feat_norm_rest.yaml  |   10 +-
 DeepGraft/Vit_norm_rest.yaml                  |   55 +
 DeepGraft_Project_Plan.pdf                    |  Bin 41190 -> 0 bytes
 .../__pycache__/loss_factory.cpython-39.pyc   |  Bin 2605 -> 2590 bytes
 code/MyLoss/loss_factory.py                   |    4 +-
 .../__pycache__/test_visualize.cpython-39.pyc |  Bin 14897 -> 16159 bytes
 code/cufile.log                               |    6 +
 code/datasets/__init__.py                     |    4 +-
 .../__pycache__/__init__.cpython-39.pyc       |  Bin 334 -> 322 bytes
 .../classic_jpg_dataloader.cpython-39.pyc     |  Bin 0 -> 6368 bytes
 .../__pycache__/data_interface.cpython-39.pyc |  Bin 8118 -> 9452 bytes
 .../feature_dataloader.cpython-39.pyc         |  Bin 7019 -> 11196 bytes
 .../__pycache__/jpg_dataloader.cpython-39.pyc |  Bin 0 -> 6399 bytes
 .../__pycache__/myTransforms.cpython-39.pyc   |  Bin 0 -> 58251 bytes
 .../simple_jpg_dataloader.cpython-39.pyc      |  Bin 8079 -> 7203 bytes
 code/datasets/classic_jpg_dataloader.py       |  336 ++++
 code/datasets/custom_jpg_dataloader.py        |   34 +-
 code/datasets/data_interface.py               |  105 +-
 code/datasets/feature_dataloader.py           |  386 ++++-
 code/datasets/jpg_dataloader.py               |  434 +++++
 code/datasets/monai_loader.py                 |    5 +-
 code/datasets/simple_jpg_dataloader.py        |  323 ----
 code/datasets/utils/__init__.py               |    0
 .../utils/__pycache__/__init__.cpython-39.pyc |  Bin 0 -> 162 bytes
 .../__pycache__/myTransforms.cpython-39.pyc   |  Bin 0 -> 58339 bytes
 code/datasets/utils/myTransforms.py           | 1426 +++++++++++++++++
 code/models/TransMIL.py                       |   52 +-
 code/models/TransformerMIL.py                 |   32 +-
 .../__pycache__/TransMIL.cpython-39.pyc       |  Bin 3811 -> 3888 bytes
 .../__pycache__/TransformerMIL.cpython-39.pyc |  Bin 3393 -> 3740 bytes
 .../model_interface.cpython-39.pyc            |  Bin 17801 -> 21335 bytes
 .../model_interface_classic.cpython-39.pyc    |  Bin 0 -> 18846 bytes
 code/models/model_interface.py                |  579 ++++---
 code/models/model_interface_classic.py        |  757 +++++++++
 code/test_visualize.py                        |  243 ++-
 code/train.py                                 |   59 +-
 code/utils/__pycache__/utils.cpython-39.pyc   |  Bin 4382 -> 4526 bytes
 code/utils/utils.py                           |   21 +-
 project_plan.md                               |  104 +-
 47 files changed, 4357 insertions(+), 860 deletions(-)
 create mode 100644 DeepGraft/Inception_norm_rest.yaml
 create mode 100644 DeepGraft/Resnet18_img_norm_rest.yaml
 delete mode 100644 DeepGraft/Resnet50.yaml
 create mode 100644 DeepGraft/Vit_norm_rest.yaml
 delete mode 100644 DeepGraft_Project_Plan.pdf
 create mode 100644 code/cufile.log
 create mode 100644 code/datasets/__pycache__/classic_jpg_dataloader.cpython-39.pyc
 create mode 100644 code/datasets/__pycache__/jpg_dataloader.cpython-39.pyc
 create mode 100644 code/datasets/__pycache__/myTransforms.cpython-39.pyc
 create mode 100644 code/datasets/classic_jpg_dataloader.py
 create mode 100644 code/datasets/jpg_dataloader.py
 delete mode 100644 code/datasets/simple_jpg_dataloader.py
 create mode 100644 code/datasets/utils/__init__.py
 create mode 100644 code/datasets/utils/__pycache__/__init__.cpython-39.pyc
 create mode 100644 code/datasets/utils/__pycache__/myTransforms.cpython-39.pyc
 create mode 100644 code/datasets/utils/myTransforms.py
 create mode 100644 code/models/__pycache__/model_interface_classic.cpython-39.pyc
 create mode 100644 code/models/model_interface_classic.py
 mode change 100755 => 100644 code/utils/utils.py

diff --git a/.gitignore b/.gitignore
index c9e4fda..104287f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,5 @@
 logs/*
 lightning_logs/*
-test/*
\ No newline at end of file
+test/*
+DeepGraft_Project_Plan_12.7.22.pdf
+monai_test.json
diff --git a/DeepGraft/Inception_norm_rest.yaml b/DeepGraft/Inception_norm_rest.yaml
new file mode 100644
index 0000000..48cd4eb
--- /dev/null
+++ b/DeepGraft/Inception_norm_rest.yaml
@@ -0,0 +1,55 @@
+General:
+    comment: 
+    seed: 2021
+    fp16: True
+    amp_level: O2
+    precision: 16
+    multi_gpu_mode: ddp
+    gpus: [0, 1]
+    epochs: &epoch 500 
+    grad_acc: 2
+    frozen_bn: False
+    patience: 50
+    server: train #train #test
+    log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
+
+Data:
+    dataset_name: custom
+    data_shuffle: False
+    mixup: False
+    aug: True
+    cache: False
+    data_dir: '/home/ylan/data/DeepGraft/224_256uM_annotated/'
+    label_file: '/home/ylan/data/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest_val_1.json'
+    fold: 1
+    nfold: 3
+    cross_val: False
+
+    train_dataloader:
+        batch_size: 500 
+        num_workers: 4
+
+    test_dataloader:
+        batch_size: 1
+        num_workers: 4
+
+Model:
+    name: inception
+    n_classes: 2
+    backbone: inception
+    in_features: 2048
+    out_features: 1024
+
+
+Optimizer:
+    opt: adam
+    lr: 0.0002
+    opt_eps: null 
+    opt_betas: null
+    momentum: null 
+    weight_decay: 0.01
+
+Loss:
+    base_loss: CrossEntropyLoss
+    
+
diff --git a/DeepGraft/Resnet18_img_norm_rest.yaml b/DeepGraft/Resnet18_img_norm_rest.yaml
new file mode 100644
index 0000000..d2183e4
--- /dev/null
+++ b/DeepGraft/Resnet18_img_norm_rest.yaml
@@ -0,0 +1,55 @@
+General:
+    comment: 
+    seed: 2021
+    fp16: True
+    amp_level: O2
+    precision: 16
+    multi_gpu_mode: ddp
+    gpus: [0, 1]
+    epochs: &epoch 500 
+    grad_acc: 2
+    frozen_bn: False
+    patience: 50
+    server: train #train #test
+    log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
+
+Data:
+    dataset_name: custom
+    data_shuffle: False
+    mixup: True
+    aug: True
+    cache: False
+    data_dir: '/home/ylan/data/DeepGraft/224_256uM_annotated/'
+    label_file: '/home/ylan/data/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest_val_1.json'
+    fold: 1
+    nfold: 3
+    cross_val: False
+
+    train_dataloader:
+        batch_size: 1000 
+        num_workers: 4
+
+    test_dataloader:
+        batch_size: 1
+        num_workers: 4
+
+Model:
+    name: resnet18
+    n_classes: 2
+    backbone: resnet18
+    in_features: 2048
+    out_features: 1024
+
+
+Optimizer:
+    opt: Adam
+    lr: 0.0001
+    opt_eps: null 
+    opt_betas: null
+    momentum: null 
+    weight_decay: 0.01
+
+Loss:
+    base_loss: CrossEntropyLoss
+    
+
diff --git a/DeepGraft/Resnet50.yaml b/DeepGraft/Resnet50.yaml
deleted file mode 100644
index e6b780b..0000000
--- a/DeepGraft/Resnet50.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-General:
-    comment: 
-    seed: 2021
-    fp16: True
-    amp_level: O2
-    precision: 16 
-    multi_gpu_mode: dp
-    gpus: [0]
-    epochs: &epoch 200 
-    grad_acc: 2
-    frozen_bn: False
-    patience: 200
-    server: test #train #test
-    log_path: logs/
-
-Data:
-    dataset_name: custom
-    data_shuffle: False
-    data_dir: '/home/ylan/DeepGraft/dataset/hdf5/256_256um_split/'
-    label_file: '/home/ylan/DeepGraft/training_tables/split_PAS_bin.json'
-    fold: 0
-    nfold: 4
-
-    train_dataloader:
-        batch_size: 1 
-        num_workers: 8
-
-    test_dataloader:
-        batch_size: 1
-        num_workers: 8
-
-        
-
-Model:
-    name: resnet50
-    n_classes: 2
-
-
-Optimizer:
-    opt: lookahead_radam
-    lr: 0.0002
-    opt_eps: null 
-    opt_betas: null
-    momentum: null 
-    weight_decay: 0.00001
-
-Loss:
-    base_loss: CrossEntropyLoss
-
diff --git a/DeepGraft/TransMIL_feat_norm_rej_rest.yaml b/DeepGraft/TransMIL_feat_norm_rej_rest.yaml
index 3dcc817..3a9886f 100644
--- a/DeepGraft/TransMIL_feat_norm_rej_rest.yaml
+++ b/DeepGraft/TransMIL_feat_norm_rej_rest.yaml
@@ -3,27 +3,30 @@ General:
     seed: 2021
     fp16: True
     amp_level: O2
-    precision: 16 
-    multi_gpu_mode: dp
-    gpus: [0]
+    precision: 32
+    multi_gpu_mode: ddp
+    gpus: [0, 1]
     epochs: &epoch 1000 
     grad_acc: 2
     frozen_bn: False
-    patience: 100
-    server: test #train #test
+    patience: 300
+    server: train #train #test
     log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
 
 Data:
     dataset_name: custom
     data_shuffle: False
-    data_dir: '/home/ylan/data/DeepGraft/224_128um_v2/'
-    label_file: '/home/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rej_rest.json'
+    mixup: True
+    aug: True
+    cache: False
+    data_dir: '/home/ylan/data/DeepGraft/224_256uM_annotated/'
+    label_file: '/home/ylan/data/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rej_rest_val_1.json'
     fold: 1
     nfold: 3
     cross_val: False
 
     train_dataloader:
-        batch_size: 1 
+        batch_size: 100
         num_workers: 4
 
     test_dataloader:
@@ -34,13 +37,13 @@ Model:
     name: TransMIL
     n_classes: 3
     backbone: features
-    in_features: 512
+    in_features: 2048
     out_features: 512
 
 
 Optimizer:
     opt: lookahead_radam
-    lr: 0.0002
+    lr: 0.002
     opt_eps: null 
     opt_betas: null
     momentum: null 
diff --git a/DeepGraft/TransMIL_feat_norm_rest.yaml b/DeepGraft/TransMIL_feat_norm_rest.yaml
index ea452a3..1651aa4 100644
--- a/DeepGraft/TransMIL_feat_norm_rest.yaml
+++ b/DeepGraft/TransMIL_feat_norm_rest.yaml
@@ -4,9 +4,9 @@ General:
     fp16: True
     amp_level: O2
     precision: 16
-    multi_gpu_mode: dp
+    multi_gpu_mode: ddp
     gpus: [0, 1]
-    epochs: &epoch 500 
+    epochs: &epoch 1000 
     grad_acc: 2
     frozen_bn: False
     patience: 50
@@ -16,16 +16,17 @@ General:
 Data:
     dataset_name: custom
     data_shuffle: False
-    mixup: False
+    mixup: True
     aug: True
-    data_dir: '/home/ylan/data/DeepGraft/224_128uM_annotated/'
-    label_file: '/home/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest.json'
+    cache: False
+    data_dir: '/home/ylan/data/DeepGraft/224_256uM_annotated/'
+    label_file: '/home/ylan/data/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest_val_1.json'
     fold: 1
     nfold: 3
     cross_val: False
 
     train_dataloader:
-        batch_size: 1 
+        batch_size: 100
         num_workers: 4
 
     test_dataloader:
@@ -37,12 +38,12 @@ Model:
     n_classes: 2
     backbone: features
     in_features: 2048
-    out_features: 1024
+    out_features: 512
 
 
 Optimizer:
-    opt: Adam
-    lr: 0.0001
+    opt: lookahead_radam
+    lr: 0.002
     opt_eps: null 
     opt_betas: null
     momentum: null 
diff --git a/DeepGraft/TransMIL_feat_rej_rest.yaml b/DeepGraft/TransMIL_feat_rej_rest.yaml
index ca9c0e4..4a054f2 100644
--- a/DeepGraft/TransMIL_feat_rej_rest.yaml
+++ b/DeepGraft/TransMIL_feat_rej_rest.yaml
@@ -3,44 +3,47 @@ General:
     seed: 2021
     fp16: True
     amp_level: O2
-    precision: 16 
-    multi_gpu_mode: dp
-    gpus: [0]
-    epochs: &epoch 500 
+    precision: 16
+    multi_gpu_mode: ddp
+    gpus: [0, 1]
+    epochs: &epoch 1000 
     grad_acc: 2
     frozen_bn: False
     patience: 50
-    server: test #train #test
+    server: train #train #test
     log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
 
 Data:
     dataset_name: custom
     data_shuffle: False
-    data_dir: '/home/ylan/data/DeepGraft/224_128um_v2/'
-    label_file: '/home/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_rej_rest.json'
+    mixup: True
+    aug: True
+    cache: False
+    data_dir: '/home/ylan/data/DeepGraft/224_256uM_annotated/'
+    label_file: '/home/ylan/data/DeepGraft/training_tables/dg_split_PAS_HE_Jones_rej_rest_val_1.json'
     fold: 1
     nfold: 3
     cross_val: False
 
     train_dataloader:
-        batch_size: 1 
-        num_workers: 8
+        batch_size: 100
+        num_workers: 4
 
     test_dataloader:
         batch_size: 1
-        num_workers: 8
+        num_workers: 4
 
 Model:
     name: TransMIL
     n_classes: 2
     backbone: features
-    in_features: 1024
+    in_features: 2048
     out_features: 512
 
 
 Optimizer:
     opt: lookahead_radam
-    lr: 0.0002
+    lr: 0.002
     opt_eps: null 
     opt_betas: null
     momentum: null 
diff --git a/DeepGraft/TransMIL_retccl_norm_rest.yaml b/DeepGraft/TransMIL_retccl_norm_rest.yaml
index fa9988d..9b04677 100644
--- a/DeepGraft/TransMIL_retccl_norm_rest.yaml
+++ b/DeepGraft/TransMIL_retccl_norm_rest.yaml
@@ -16,14 +16,14 @@ General:
 Data:
     dataset_name: custom
     data_shuffle: False
-    data_dir: '/home/ylan/data/DeepGraft/224_128um_v2/'
-    label_file: '/home/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest.json'
+    data_dir: '/home/ylan/data/DeepGraft/224_256uM_annotated/'
+    label_file: '/home/ylan/data/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest_img_val_1.json'
     fold: 1
     nfold: 3
     cross_val: False
 
     train_dataloader:
-        batch_size: 1 
+        batch_size: 5 
         num_workers: 4
 
     test_dataloader:
@@ -34,8 +34,8 @@ Model:
     name: TransMIL
     n_classes: 2
     backbone: retccl
-    in_features: 512
-    out_features: 1024
+    in_features: 2048
+    out_features: 512
 
 
 Optimizer:
diff --git a/DeepGraft/TransformerMIL_feat_norm_rest.yaml b/DeepGraft/TransformerMIL_feat_norm_rest.yaml
index 7c90fbf..2f86ef1 100644
--- a/DeepGraft/TransformerMIL_feat_norm_rest.yaml
+++ b/DeepGraft/TransformerMIL_feat_norm_rest.yaml
@@ -10,14 +10,14 @@ General:
     grad_acc: 2
     frozen_bn: False
     patience: 100
-    server: test #train #test
+    server: train #train #test
     log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
 
 Data:
     dataset_name: custom
     data_shuffle: False
-    data_dir: '/home/ylan/data/DeepGraft/224_128um_v2/'
-    label_file: '/home/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest.json'
+    data_dir: '/home/ylan/data/DeepGraft/224_256uM_annotated/'
+    label_file: '/home/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest_val_1.json'
     fold: 1
     nfold: 3
     cross_val: False
@@ -34,8 +34,8 @@ Model:
     name: TransformerMIL
     n_classes: 2
     backbone: features
-    in_features: 512
-    out_features: 1024
+    in_features: 2048
+    out_features: 512
 
 
 Optimizer:
diff --git a/DeepGraft/Vit_norm_rest.yaml b/DeepGraft/Vit_norm_rest.yaml
new file mode 100644
index 0000000..ae4a0e5
--- /dev/null
+++ b/DeepGraft/Vit_norm_rest.yaml
@@ -0,0 +1,55 @@
+General:
+    comment: 
+    seed: 2021
+    fp16: True
+    amp_level: O2
+    precision: 16
+    multi_gpu_mode: ddp
+    gpus: [0, 1]
+    epochs: &epoch 500 
+    grad_acc: 2
+    frozen_bn: False
+    patience: 50
+    server: train #train #test
+    log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
+
+Data:
+    dataset_name: custom
+    data_shuffle: False
+    mixup: True
+    aug: True
+    cache: False
+    data_dir: '/home/ylan/data/DeepGraft/224_128uM_annotated/'
+    label_file: '/home/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest_val_1.json'
+    fold: 1
+    nfold: 3
+    cross_val: False
+
+    train_dataloader:
+        batch_size: 500 
+        num_workers: 4
+
+    test_dataloader:
+        batch_size: 1
+        num_workers: 4
+
+Model:
+    name: vit
+    n_classes: 2
+    backbone: vit
+    in_features: 2048
+    out_features: 1024
+
+
+Optimizer:
+    opt: Adam
+    lr: 0.0001
+    opt_eps: null 
+    opt_betas: null
+    momentum: null 
+    weight_decay: 0.01
+
+Loss:
+    base_loss: CrossEntropyLoss
+    
+
diff --git a/DeepGraft_Project_Plan.pdf b/DeepGraft_Project_Plan.pdf
deleted file mode 100644
index 1b5a0c95fca1a588080f74941d9141b5b45bb604..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 41190
zcmY!laB<T$)HCN&ZES38<l-_kS1?e>Ps-x5vEkBp%giZBEmF{T%SkLrbxBRmPf6v{
z_esr5FUe3aF*h>cva{nVE-6Y)%;l<>^ET%8I@#?LYwyQjJkPl4?w-jFi4IoNnkzXB
zCUbl!UQj5+GHK#T6$iWeRVR|JCY5K;_nt7lQCEKT`B!R@*N-agez}`}?f=bhzdXNQ
z{$s0KdH%YsyJ~+G{hoh&-_$KXBW~S&I%k_e^Zw`0?ta@oKhOSO*6*2eoaN7p3lzVd
z`(U2sHFrnv|3B}{Z}0eVdw$;iKWaJkzwS%h$3@-Pqd)cc@5gWNIdMq;n?7r<<13|$
z9-6r`XYIZ?ZNd{~rSD-iw?6FOyIuK4^=|b^*K*^JrQW;!ZKl3*wCd&eU*{F~RzAKx
zbIk)Qu_R?hj?=l{Ki%<DE4r<o7aLo6Q#k6K>Jzb_C$D+k^*)|-?q=@Ei9h$gw-wu0
zGBtX4opqR#vANd7`8o?r8mjJTzr5|fdS9|ibM~Dp<vTxBOj`Q&kIL!zPY1S%Jb83^
z?UC4Z_MgAnvZybf<oV+JoNodPw2!SQe_6*DrIAzfZEI6S&r*RJHdzVnoh6Pww}m_>
z-`&komUt#K{$LGH>)xl^BxYTdDEjnZQs2baN;~>CKD>1?t>Vw;v-6hTcddOr+0e|f
z`J!a9u!no>DJL~KHiJ|NX*Pqcl8?1F&b;H-IJ@KQ66r%Lp66}OEIby`V(>?8Ut&m0
z=>O6j4bkuaq<>qn?)+1;xnqO7ZqXX=ZbO+Er;90DDokT_HaTW`Gj8r%+7)?m64#pC
zo9THQbxgl!+STv+JF{~^-sAXl|10lSY8dhVo%DBq%76Wv|Noo(uK(Y)$mx~PDwR8N
z(;q#0xBa8O`P}PQnz*JP5c^xC{q&x<#(4)`JxifZ1Eb`w5c9jSw-#&ngc>D3jd~?_
z@>X?u`p)z&<t*onll!h7^H<|iG0OKS|70>Xgr%}i`S^sGug7X;mndv8SG0d}kN5QS
zs_P+Af^N)dn?w_JSUA1ceim5EfA~?Edx>aG`?5;Uvlp7}0(Ct$Zobp|{nW<IB|`5z
zlhRM*A9m&HzVkMvskM6XhlLZESecH__-?muRndlNmxOM#JeqVUU7~VUAJgQ+$Je_p
zyO1Kp{erLl;WmRgUb-LFO>Ws3R}(&KRgA>>@}=3*k9|*StaHBkech{&1EouavtIoQ
z74Ti|D`UR$OlOwW_3CHG(o)P*Bzji8D~P>yPWY9X`n{6Fvma$`chrb5=9ZE<aOkwl
zMuU(}-MBzg?wqZSNB7S>WyIQ>WtsX+Up#Ns<<7lKA6(dNw9fv~n>z8Bw#&^w{~o`c
z{r}Fszu#uxxBGki`xc9z?{<H`Z+~~+PsLml|JpywYg2v|oy_eGlW(cnS?k@@R_otc
z_h;Yn_b-=U@?Un}v(vWc_eb_$oh>`xZ|kq$S^MMd<+q2Aub<Sl_~NGbYaY#6<<@Z|
zo!!0f=>=OiW5wo4DJ<V+q#sI+*wmBoO>*+AVE!i-msXSo%yCWFpLMBm$JB&8*U62=
zuj-kW*0ZUGa9n9gk66@|F!A<I!LoDZ1sNw4Hs-zRHHrGTug&zEiP3^zhgZzeNfpdb
z3F+xpN<8*-Rf+%QUF}C5l;^kKygFspuhloFs$OvYcWP#$#cb(^wVlatPV#;|ZM16k
ztNy^{EqMh;UnIQR#Ls=j_5EEV;oL3We5+Nz&wP>bsrJOgqfai|{$9YpeWCLAmk(oY
zY?Kr38+a^O%X-DAk5&FeN@CWeXcv92s4vZqI{pVrrUiG;)ct=z_35d~QmgN4b)M`y
z=cE_#Hm;?uMvSY<{@=SPE+SJn%J1**C=0qfOM1)XHlrhZTQ<xtld{$PYIc9`*5`bi
zI#=vcp0GExvMwQlWAD$0YyYiS63cx#lBcVm<4Wbc#JWT3ZAYW^Iv-0;z9;#5v60$k
zR`c`04o$8Z(@h>pi1>NRObq#3q|6(-$G*wx+S26{8CJ@=$qJX`bvGz4t%){~e7XI4
z^R>lRD^fRJ@W|3CJ2*x8zkir~jNzJfa-YKY@f^N>hvR)W<A&w6X{LR<&TmTYv0}ZG
z-paJ<ikY@#z-$N2DmV5drKZ-^2WGHvt!{kCsS?>VBR0n6!4tk>wW+*^Pw%a3(0E~P
z60qTIkETq@F*&crj$K9B4(qM5J}kD?@jg&AEwFp4?(YN2PfwMX{`$6d$z%BmemBh*
zxvi{i$#^9EcaL<vx_OrDy2>5@K1TiS+12q{d2R25@6#twDSmq*h^N;`?9G!u>w?!y
zP7RC|YG9Xmf4aTJ=joowrEC_{4r;GT`g0_dXWogfgWB7Y{`iE;y6!8wx4vbo&BB)(
zzAT$rED?P<M=->==%VN4?+J`&OHHP4I+PT-bV9TIlzg%EKegG}I~!VeOIS@7_dOu~
z=IDyE&lEHs=JZHS-2dxLosxmudWoKo#;t}`&kWp__C#l_5jR$sQ!d$cf9lke$8@`u
za@H$u=Ueu$b5f+rVllDrtIthS$x!sx*s^Tt{2Tu~vbvX_S{lCk*Wa~!#N<BDQa-w5
zR#H%veaA{+k;&6zj!&N~+WRoccHI*RqvJ1~)XcujzcROn@4s%gW!u^MS@VDU*sZRS
z+~TuUYi42Ib?<Q96_TOfGryjFakZ+iuyj6a^8F{m-@g|4=EYvp^>-<@{<2KF=kSeW
z-Ax@Uy{EaV#@?6V5ztw8HfmRn)9zP7*<Ty=j*GSb^nT7(_<z&3WRGB}D(x%|nOzRs
z7R+%i<__tK%w=R#xUxy?jO&jai??0@@(XqK6t2u*mT~)0In({cm+CXGo$~K&`f%`@
zSkCJVu~{0kTMe}m9rv%wk!)gXN?}TNdZxRq-G<eDs`v7+(%;dYT6S??Kgwu`-q1U}
z&(qI-Pu9ufi$0azX<Sh8x;JLqh2WKk^SgUFo>=~I@N1cOa^;qXn~(ald^^o-E_i48
zwANW1Z<uusIwh=6<vZe1uzXVM^jCKp9;}`^;g>FVPG;ORoe2x2J_(#isbswrEu=Rq
z>R3~#&d!5%$5=A5l6gOL?JtNI_`Z(CB_O=!;rvPMzg{n>d~%?%mg61g<_Z3ui#Z~$
z`Y8+YUvVyY;}`0@Y-9MoC>vf!zAerL%X?c(yUv-Z%0?`|-*jWcGXZxCkt1_DSud_U
z<gBoGp1_whVGsVzaw$SlQk%~030n6j{OZB{rL(^9T%DlYlI68r<e$fj$MyO5Pp#?;
zFOZMhefG)k-%nFnk6+-3TFv=4?nZXZ%xK1w3(g%2%QZR^7af*AFJ5Hwd6}?{JfBjd
z#PfN|w;#OKYxqw7R7TsB!ZlCQA|#HV`oy=XDro!f+IJ2fR~|7({Y?(v6%x^YwUm`(
zg|<M5%XID4mv(J)WPWMH;kz<>+0{3@4mgSb<c??i_;piQvImnywU_ag!Ye76jY7+2
zI<4@Q`LpSjtghijx4<HaT*>PHsavZ}k{slZdMdp*yP|KNbe9xoVf3P-v!W9odWbcu
ziL|Wp;t%ZYa$=his1SapW<q#I)(OXA@0~>nq5S)&Hhy{>I<uVb`;OIba@C_}?#=ss
z;<NWN=ASJkYV#-PpHP3|{fYDEo|C?xZ_T-V(05_XB!7vsLX-So{GHzBDAcC$^MGAq
z$KE$mr2;qAuQ2~Uz$|w2!qn7*Mr%@eJTg|Yu8NS-%ZfhL)Ty)c;0!mG?uWf52R#2B
zn(eq}?<Ic^@h`%USpT+Ec-kw*UGV29Ugy5|_)_Z&{u9nKi2dVC6qC;Qspw<-!Jk9?
zk8sm2E14haEv@xScE&qp_e=@@C%Ev@A$tWo7qeAdkqK#yHJ#!tTO_1E8N>+)?AWlX
z|Gt;QQ%R<--zp2#SVDR_X0*iZyY<OZCOn#_)cj%5>E{;Ax4T@}lK+e3MCKcwx!1Mh
z-lv`Q3d+`#F1uIOcne?qDt35f{bshU`pt9hu)a=Vo650fS=x@T{Kpo&d#CU>c!|OH
z$~^Wn6|L-Bb{$_=qPl_4dK!<$d_A22;n<cnQLmd=b(eTcZ&|9>9Jc%QB+l9=>-R_=
zxqJFmx8iks&zN0O@|%~G-uQU;bmIH=M8A2PbWip8XWm)0WAhXj<(;Kpk2a`S-nt|C
zc|rZ1C466&1goFlYuC%PU3X&AS;bWa8XXS8+(}pGhQ+U*H-RZl*h6fyoQhbK)T9gW
z6g}@(#n-3Cb!j3GLvNkdNS9snGGV6u)58aU*tmN9|D^8Qo-Zy}<K)U5^!nuLfHPmq
zTlRe2cbXw{jX~vue~HbLUzb1L`LBm#nqJvqiTCeSH^-dc=&<m{o5h@Ocb}?0!@p_u
zH<j;cf>KN5%|bRCKGSZ>Td^^vPe!G2|F@am&(}?<*i>HeVV#zU&%vLm?&8P##n-)j
z+4xny#_G^D^+(dz-iys|PQUmlGh*rgxsDynU$(L{|86flcJsrn{4hRd7KsOIBDFr=
z|5x{G|BLfiUuq=1{Ac*lf4ltl`SSDUE&8puwYq0r=%WKKI&+Lxeq&8^4=K;L`_$PY
z`fPjU9?xU<y;aZOQ!%*P-Tx%_U)#!-IA-&IE+zbPZyD9U_WRtMZgHnmVCCvF!pxdC
z9Ve}?csEt;x|s3N361mfOjcg#365d@V!U`yb(rvimehSk211EPcIfVK)o?m#<MT<Y
zuKk5p(>!74$sUjUX8pChzs7c<Nq}zd<wTB0*ZI!e)#6WVP1RZKJ#o$jt0gA9H!ocA
zeX;haSQ9Vz>boMU$7gn~wY50R#&mq4&w~`<dS1D}&qj+Xs(yWRP5-*jtpCLHkL@N~
zwu}6EmEn2#U*^hXiH656i_EFtU#)h%wEolg`oD+Ix9t1<<$L{~ho6tn|M#<8=~DUq
zi+A5@8-3v3wP(A-s+l^~8#d}4Kixm|)U}Jh4qpy>K3O9`-EgP)({{tkd+G_78P-JS
zuDf;LTl!z+rfm=JzPSH+&X)`>jeN7X6E(-$_G>#&oxd@*HfQ&*Id2ceM5}6BJ^Foe
z|ED+B8=vkDzh&sT+v;E4M&8@Eb0c=<y!#j)pXI#YeoNv_!<8HQDl?rbzw4`rY`iQt
z$6w+2X0D&dxqePp2}$SUY>U*;JE>Z~-)iHbAHPjKpB}!=+~glzV6)(w`{&xVTPnUD
zd>uY<XFq@Ts!OkC+x0GNmC~Kcr@lmD<>Ljj@;+a%+N8hi;~n<oK)+ir?+G?<ad@Jf
zx$DyEjr)&%D0tAKsIUIE<l~71DfPD{Q%^?7slP38JQXol{cXwHQyG(!8$TIFwcoO^
zJrO_S+oB_Ww+`gSIymcBm8DDi`~Pf6sgmF@xsdF<VwIe}(c~AJDjVj!GhEPFYR**i
zP`^g($kGbCF2S&>!*Rc6H{Y1{@R&E7<-;XSU%v}(&C{r_FPwO3QAYi7L%!Z7&tJDE
z)P-E~xgJ0Ji}KX8C;ysU`PR&`UX<gp;P#dIh0V%Avh4*IrU_U)R`t8}p=XN6<40`^
zR|GkqX{r;rlf~)5v{fR7`-a#jue|~1&aHaob?<<M!J%gJ!@2jfie7goZohx>oQUGb
zEoW-?y_mfC#-61?w==t^EEKHSU>$t>sF-H6(D@B755MTBxgobI_s#!@enQEUyf!>P
z^1{RF#@wWW6!kYLh8Dt-WqT#mRz5wg@%N7nr)AID&A0e|FaFVRRJ&wpb?NI3$-(?j
zFR!(~;<BAnd}*n>;F-nI{Tc43Gfj4;*6+R{<7&@V_+^vjset}7OV#xS_wc+jy&=xH
z_sP*kos(Ijrj*Uf?rxm3Wa-U|oKtn!%1^0w7weh1bB6HCo4Fs?<(S-kHc;}^^3vjv
zBT?q=fmI!uS40loU&GXy^sRZ0Q?&EHnenGunO9rQ@K1~vm+g=H%l1fOwN*msRed{~
z{YeL&|I+iHI(N63_H^f^mmbxv2$8T1)9PLIK=9_Vy*+2mS&m&=JOB6d>72)&_5T0Q
za5wVU@1C>$hjZEzO7@)IFtcT2^2S3qc19$A30Sj5;@FmB;t|WfNJ|~rta@&4anCjp
z4JF^!Uo0)xGOGjq)hi5I#F!Ec&VI?fwbY4G{s(tFYoThGMvsiv6tSrO2Mvb>-Mn^h
z3DM{&imO-=5Vc~>*A@B8R<+F47IEtr<w`!z)w;2`_0WrrPKrJ;H{)N;?sjhFVvUO0
z(qqUc+U2o1L_@)J&c)T@qFfP&lJ4uIMK*2QT4SU?W%K4$yFM*)s{HV1n${Gt%i;F1
z)fW#*c1;TVyip^u<8kiKuKicztbRZJD%70vPvrWlmWP-Amy0``UfZ|JR=L%<LeE>m
zcJH>-4<XYGJT@)aG)<Lf{;5fPPLY?6ZM2+y{sSX-&`U1$la{NRSFs$u>?tmKk+bSz
z<ntnv{&T-FJiMPo>75e#(!c*xia@x2DyQ@DO-rtp=y^-*(eReAo2t6@SaIZ`2d<Ha
zK15A3DAMwl*b>D!NBDf|$sc!WD=gNfp4`EjD0DuxljkX4{Fy}&0xLeoOj|R{>Cf@v
z$fj@2YrMi!Ie#sUvDmPxd4JqAo6lj}OxaUUUcDFcrReXY-bu|Y{qa|K#ePqTFJox<
zKII?(>f2`nxKi^{phHioc`2X)D=tGz1>%OTjEyWXhOYYV922aazy3inLvZbWh9-u$
zUPhl16gtH(Y+iVSkLjd`4Eu@a%U9)>R#|wz>(ja6;kNp&>iG$^mZ7Dqc6nx+noYOw
zUbKFFn1!$3`pD;Vrt#1B`){*S_1DoIGs0z?;@50W<GuOf>GSvg@BeT3<1+n5=<I~8
zrArNMexHhxfBp2zjJO}K_2c!wK6ozgufK2G>Hhy#A9D?FADuB{$6a~9?7n0AKPKnh
zZo1MEzB+!#@u$Jo*Sz%Qf8O}m!^E7$s~EZa18d{51=<<0+a?~IF!g1)R(fchT>-PF
zyX=`oq35!<C`#@#dp;v%%>rewKaWg5rt@q+r6BwI2yezen;l0A9{qNDl)boDbm22m
z)65;-lYLk-HFlNcS5(+-$<DlS?ekU>L2+kh@%-Nx+k;D1&98Yf`D)Z%>+jsm2eq>|
z+On<ezvubhdJ^}HgrGx(MlaTyGA=)yz_XFhW0{<+ub1T832JPmMwuUG#vbf=H-Y{8
z8?C2x>CNVc{q&}1-(13{dn#db<=(ZHhmQ-#_&F#Z*12HN?9$O`!NJxk@IETLbdTDO
zL+*R``}R0KW427_;g@dj)f2Y4zeFpxH-c^D*B2K!crJO=<R(b;BubU;-4eMqL1gKj
z6|0uNVY&D1aliTpv7J(XvP`)3I+S1R({g5=KFzc7_u)T^L2SVSK~9=2@|*Y<YE5fe
zAU3;;sm9`O?^b2quln9@_IgQ=BXaVZ3Wa-SC){CK%zbQoNKS+jZ~e5S$1+o@jtbXb
zd#xI9cF}t6h&^YW>tzbfUh)_2xhl_Q{_F4q*1v_<^1rq_8ntcCU|`dK(ZBhfPrby=
z7yQlQU)mq`{u9~o!rwOQSFpd@-<1M8Hg92H*I(;A<AjCcf)p!(YKu~i;Dw%#B7d}8
zaDV5hcEnFXBk7`=WzOe?_HTG5{}b47Ri3R=;BBD*kC0-C>Z^YBSsncypAO&H`Nv?}
zRC)h)>7R)O%Jw{Q6Xe^@KV6=nWM6aZ>2f*$Ps-uV(Oc7eoy2pT3*^2m@13f|7!#Go
z`i`H)VF}MFg(Xr;H~%yFcK++~$gFGf8&_%^Fy@*D^7~^o3G;(1SWgFc_Xch#U=G}=
zsCaNjq*K$yXwJj}WksI22@?{oggHsvSyL&oM?ul?xiM#Ain1clrGStbsx1$DTSPW^
zc-UqM1^KISt`yj@WDEPd?%L+nr!5pUQmh2*ElN2|7kWAc{b+gN`cCoEQNIP8Nf(z%
z<$P9De8V%jMIhjW18ZWBC`<bR3GNe1<O6>?vAtqXe*Wv=jWGK~Tu;@e@E@~KnOJws
zq9pxe!jxm#fi_&ne$PC5ujbm;#d7|)4E5N`rFxs5JAS|Gd|cyT|M8pyPZRG6$Tll-
zEIz2>A``%O(6Wa)(56R2@PCigV>JQu#vPnT{x5cj*~uxi`Dc%>K=KN{H&usae5)q+
z=-=xsIxO-2?ZZ2g!GB(=n4R1qB(uV_r*L849^<rEa!L^zlF?4Z9K6h)?LF7hdaes^
zK4Wm|u<KOKh`^auS3;J}^U(@c{BZZ8Ooe3$&qsqcjzZ&SjDl>&Z4okeJWlX&DQ;`b
z3200d3+PVVr>35Bf1U2p4=Z$e3|PgsEKNwi6=bk(eqc0*&-3o-O+Odq)IMTeblAX?
zd*RIYjH#LzW^NMcIc}io?Rb95(xg8>rsT$)o3&|6a){W@2pbP6(fGsfW_zE~IGTSv
z<;cOpYeHPja-9;1fl1sir)h{B_Sn+JsJdx>T*aN12Wl+lia+LyHRb!)FI(@ue)5N4
zNj;r?_Iu;&Zq?syvAcez^Rnf|664;RnO47A3KlV`FMBd0p=G6Pe$PXx$hD`W-P=9a
zTd9R>f4}M?Wx*<&fANS_q*6=TGam7koy`g^aq|NVT0`cje!Q)B%)ldWUhD;_1N`cX
z?yb{%#q+}5?XTD2PYpt1*93ao=5cgqpE_d7*A=j+eNJ>+{DO=*&6Q<5nwjR}AwPas
zO!{5B?10L0lhZGbrMv$-o-HKVe?F!)DmQC#?i0J<z&UfYl=F?hSgs2Z-d$Aw*vVj7
zg@C%jOg|4Xi8G3^jV}vA64GWKcw$$(NW8Xd?zg|2<MN}<2K_Hu_WF2`{(ajyaep!^
ze||b@{=L4&*?fOh>Gx;<zC4nB@1xIeKc}Qtv^zBK;;pu)4==x58WeR=-rK}tY0aAZ
zeHkBj$=<)5AAJ6$QR<63d#bnE{Q3Lk+w9+$Z?|vmD}Ddtc74rv;l}m$e?Gl`Z)aEY
ze}4S5>94+O|0=0WiLts|6qq`@pyHHQ`1D=I?4Q3p|GZ`2S^X=PQ<G;cU-t9$?&Hr>
z`K`KU_Z6w0KXzQYl)d*^?^LGE@oQ7BKIZ-XdhLOqk*tq5{#Mp*`CBQmSFzo;SNGwp
z<*myr!);z<Jic}(qdfjykM5<9i_cy9s?v7B)5Ln+pNLbte(|12_jHkqX=`QYe5|jp
z5OJ05p3RIaw_c^#?mzPLL-?WvQT;m4?_In9>ujH%#*4lwpY~Y0JH_lymjAx;SNr$P
zQfF=dywZ%f&cD85&b(l|mlLBW2Zo)uoL}HQv)NnY1Y66b2^U#|uc=Sb+U|2Q*Y9`K
z>@$&NUcXsBNe6B;vAHC^V8#!|V`um`s06(94SH*`Y2uVbu}OOp4^4XBDC_h8km|B}
zr=PO&qD@cg4;`9yayC!jsY<=rno1o`;ewb2yAn3CXeeCYBALa&uw04Z!%8j&nX}4&
z7aA58WK_*O`6jgXoOtb9?&(ZX@4J_EKl3rU8hf^VRrk_7rBi|{XHD3*dVZj5aMJ#i
z<#GBgQd^zne^jXRW)*L}G|Qtcb?($i8CC7t38!NBooHvbo29kW{B8M-YhKUq6iDw?
zc`<W+@rrxf%6zk*uUk1U|NH&-1<%(#>zj5bWLw*(NltY})phf2*XwT*pZDH(zxK4l
zS9Nx8+NGz%ZN7hdl*94AZ{7s2Px8sTY3VgBO>BQj+BwDb^N#$`F?OF+J@?Yhq&W%q
zRzF~s>?jUV`LOcEZjH*2)Z34AG~Gp4ZZ}X*ty?!~t$(ne+=D5T*sW5ZG&<Qy+d96Q
zZ0Ol1)7&0omBtxaE@1TZ(ZPFDx{FifJZzWFmN_dGJHx23?~LP|>5p1IdGFx7Y~Vbx
zzoE0Ob+d}i155UPu?rG=)lH;(IDKu}?05Rjf96{M>(Wj0x35z_e4G?_O#6$`wOcc1
zC%nB=vu>8&y{|nrip{~dkG<dWv16Ox^_VNyWpmGlXRj{bGjq-&cRBv;Jbd5WerUdA
z?N-@vV*R-<kKfFAZ62~BbJgj|+bovfkA7yDRxc^F_}%G%=)Y^<OV&K>Np|+zqO<7r
z@}A^1`qf|4y&aQd<9biB8S^z3_TICeqQ`Xj)!O3XQ;Ih(|Jt>u)ac6u2ls52*9K2H
zIyBt_d@P%~ioE?7eY99HktgNHB;%BMlVV$h&R8Cje<s5xTO>1E<+;F;@cGJp?FO6G
z#A?r2PP?zCz{@qEgi9>u*wXp3`D#9%?hiCydT=N3z2x$_?k@Uljk2)AcS+&BFM{JO
zUe9PY_2aT#b!y-8d+$z({(U7{cH5;mH-E<B4X<aMo;3H=&vWm5Gqv-+_j2)x*519l
z<bA;EvfHoE?o;A0-TlSvr`cjf=dEXs6c_B>Z}!c*cIs>96rH_`k6&NEooVOvQ`0t2
z{ZKz+E#I~1KNa6Bt?#Uv-5*_jD(t|egfDj+MWbsE%oi<tf6Huj^Vgd<_a1HXI~Hn}
z9rkX<wyWNj+?S`6U(b5>YwN83ElPc=$z1E(#jI{jdG_P2?%Dg#`?+56J<rdcTXep5
zwaI(M$5olLf4`e|ZEx0frR(pPIJdUj$VI5gM;wujOsP9pG&l5I`NcZzZ)<nWt36dd
zxAuF;x3>rDlKI*{ZunjDqVrnD#s_=eg!6ExM$WY8*t>D}k+*y9C4Tw!*K2v`ecn_4
z=NP{!%zpkwVsXEbOv_!v-_;*CPPDhmUjNkU+@!nzz2>}G^Y%^p8^2i&tTUdaTAzF0
zu<~z?=eByiU{$R`Tk}7Qe~Mr6RF%5@S9Q`xHqUg^$@%+kh(1q}HToWywK|UZ;JsfN
z;zsF`&2#_$sS#he{pRaMl`+;~cPsCCXKkEyhR<%|-pe=Yewk#Q_@}`Xmw&XN=Gda@
zdWDYp9}~<!**|+MYy9;|PRjSq69XUaX{x@rTVg@`dhN9G7iHJenVq{|20Z(h^0esp
z&I2plr@ns4|6#u6|ML~sj^zK%JkM``b+YKmx7ph-KDs9tmSn5F^{U9aj6c#57awe%
zQ2xkvPm)pXa?bD++s|(^w*BTg8}=tY&m!*bYxU#p))#VgKMM9;U&R-0Z(F_c?XS{c
z=jE&3OB6q@irAg^s&{jZfr+Kpzlv*B-*UgcyyV8zow-s)f9;|&-CfJCZ+YSwxiR>4
zcjDg6HxBDd53OY_JN4<sm%D!iK3tFAbUD8`<=EWF*!jXai{?D2h~zKczvt|)c)N3D
zYq(M|x<*EZM0Sk~4GhguXZ6Ekvt!H!YWMGVI?sA~lADxg0?)l(;a>t7vs-Q2Rpnbs
zGcJq#`dsaH>F&kS>zb{0qSfDjdAWbCOj|OQH{|yG!XGg=Gou!5zj{6D%*Vu^xrJ)#
z!b$6{8LK-_Y)V}-d;R<A+wEuQ3n;#3p1k}<&)Ln|=dMO-O*}Hg{L{~;Q(rc(m!JMR
zJjL$czK*DMDNZL`-_%9qoK>9p)BegozC$iWVjo+loOPD}Ep%f}ij1nVm_p<;C%0>o
zl0W(a*901@(_2&I7uLThw&t4*ORt%*_{24);?u3gW-ff|7|U@tX-eVcZ<|hS<nUT_
zRB5JW`GSgdYOIA~UN0n?GFCYr^0HA;jhX23!|?L~pGBo5F-0Y>pQs3XKM6m6lH<Th
zLEmRA&Fxo@7R(9!c3|NYO_zg0fe$qK4;np}J7f7+W@0b5o!=dfw#b<pA{{ZUFK6aB
zDi?AEM@qGQI=Zr^srOge(!d{=7hPiFV2#}sohuRXVe<dB##R%btqzQ0ii*-_mnSMD
zdhKzP__SbcFw2&%zCbnUE003YNvj!txz+p1MC9V6i$Y~SW{a-uIID8Y<@Jv(8R;uF
zlueMj8aMlH|FkO_Tl3GK%xFFz`8v$v@?vYZ$SZqIFR$Glz`b~PDEA8M-#cn1884ji
zZ|dR%+3n6MrxZWc2-wK8oM%7y|Gz@T=082>mv(bH$}pZd%JO~E`?#15?^-9n$Zpj3
zGu!RL>VBLhzB0T;vW!*4K1#uW#rE;d$mvczTMu3gb2?qk7AtvidE2J1nv<XK#7#(G
znVQQenYMwo@{N|BQ%QTa=2;7?eQw|49_pAZ;NwvEz3ZTVlX|zr<zqYn+vYUi|G_dV
zcjXiLRt}jt3m-49k(%b*Gy6_>iTBkF7Yb6pZCTNKK=N&e!P%SP50X|pvjj&dTJQA!
z#F4c`V3T|N^D8Ruo0_gp*?rK_=eU>kWi>fbH_xf1TMa@zVtv2PSv;$2J!g(ow(pmu
z?Vi^A|AvQWNc;RhmH)u%TKAWC8)E*a<mIi4e_vn2`1JPeyWM|_{tEGmoqfoh_u%a=
z{pa##Q_7!x*l@?<dB*RG>-Q7i#~e}#eXf+@)uW!wV{`9eXL8)Gn!~Rpj(@DPd~)f6
zX01;4qT3&ClpekQv)25}(ewk=*Y7LoSFPT!cKWcN<i3c<xpf|Yy=u~}^G@(fZ@e?>
z@bjntzSixuw^6#hK4ss}uXn{8%8%#Q@3EG>_y6bFJ3DqqubHL3BQ5F6=Hy#J>(=S*
z<o+LE9mQK&@UZs$WW5hJE4*(1GWEOjp!ACWt1BN2UUr`I%f3?)_4K;bla>5w?>@6u
zANm{N?V7#q+o8z+aoNwyY}Z$={kr0YykYgmjrF_AzRbVUH*ekCKT3aD7iMt$!BU18
z8dx9|B0k_%K!ys2#^&Z`_*V*9n46fPlqpNAIBSAKul=yi_rCP(X=<j}tZ6DsbQ8FA
zd7~Lm3g3!a!>7CDLc;|SE)nslC^kotQ;VDgqeTk?*nah{XyDKc5i?))s4wVy;h!nH
z&Mr!lw2xtXar*zB*;|$@s{6O^zxU^Rd!Ns#ez!Bd`uV*n42nk_q&U2tCNEEWdZ*~O
znZ4o%^_I40_aY)s+Z>GR{yTre{=9dWRNt-s7go|I@G#41#@El;;&*mz`ZS?aU~jX4
z@9m~jzoH%~SS|U>w*MHbxz7Ch56#ctsT?V4n!p(Ib<ZiSv+d=6PIZb$`q#KM?3npi
z;P>MV^TTQ%PfGpmwIs{w#zl_RyBDVAJq>)w%6zVr`_cS;8;c@!tDF8Z-4_4wIxeL0
z@t%L%pXjN!-0uEzJ3IT;beCHv#d4Of_;G7j<<`5u*B$w}<-OARPLAZxX$D7J6od|L
z+x0EdpmoZm?9PS)<&%fR63dKa9txZms&btdSo!Zz>toZ$Q|%{oa^^mlw=6F{R5~U3
zPpNLJ!QXBBa{ooGnQ*MuKPIZH!u8*I=2xHdmpXr+U%%w{y_VM|@(JNn^{O`BalYVF
z;q-&O;;PcQ7n5td&!nG;n&&BE;kI|j3K{YBiQztTJyIU2o&H%qMd(BJvBQl5jmgXF
zTx@$EhOeHbICE7<h<uC6QIm-++3SujXa2h2)k)u)6rPzLTqjJI%$PEf>xiEF-iZ>=
zxF-3pQ=5D;<VeV*T^=f*e(KliT|D`I@3BA6)UP}*7Wj2o>(7?M`}ON2I)93P)-O}3
zp8V@!M4tTjzf0xg`scH=3OF75EHyuE^R?x%t81Oj<G*_~O_?zvV(N`m<qy&){nOm5
zG)?-K_sUE5K0OX6q-~pwcJ8kae6ce0zkBV9&o*DPE$XBme%RezKQrX`uiZP&pIjF|
zW#POxLf>2po%B|QWIq0J=1`&Y#Pi?oD+`Cq|1>#PY*lxCq4lcsnOX8reS5dQZ<>CR
zt-|t?+9T<WcRN0>cmMHAqE0q;K9{nJ`0<?rWzySB|D2wfS99yC#{qBWv=#SGt+Y&&
zcT3&EyTm?HEZca-*>&ApBaP<t^laT}AvI5B@|1{Zg{*ZuX6dLDpRfsPpSOqQwrPss
zuTs@}fp%Q$`yKD9wDYJQ{xG{&Fu?zrQkmebkfnu_6@UCyje8fD7c8*3;m^c73yz)n
z?=-u80_%@ZLv!U&(MPK^QdPG-Q!;f^=?h)CO7N9hvw+o%Q!{#v(vn|$Em`IGhu7j>
z@5PwrFI&IV6&_pI;kjVP{b$pg*J>JCa=rZFljG~eS3HsZq5lW<6KiKAu9;DNr8?eU
zvRU$(+L>jqb}PPH=kwpT?%VJ68Fi|;Pb4?Z-}1$At-AKdecSopNK`i3xJC0%^4)*%
zo5oGy>|C|zld}80%4H+FYswz|5nnIhG<~xD!!zzD7SCA6-XNd8*RJhv$DUuO*E|TW
zn0lvqW2Nmq{>OhEdnSsWDSslD#ua&_=D<3&>=SG;kNF}zX1$0mRX?M$Sx{<k+TyiY
zb;q_$G@0Cfaki9R(~qkY|0_OfZPxh7_GB~b+x-%{nz1kBfA;Y_|L5oOG`6Uxbm5!d
zCxlB*yqdRhmeV(@X99;y?sw1KeKG%?+SG>pk~HD!qxl)1|Nj;WE554ISsnXz;R)+g
zT2uZiZ@DmeMepSBiVi*|7KQ0k4*s}N%yi;Kz1|L{`Fb^ds&x|0A1?lIv6<-DTzSOx
zy>-Ln?+(@GZ{D+A;^mxtzWKLJO}x#CudxReuJ7m8E1s+QVW-xvuu{XI>rd8oty{M4
zZ*0w@>G5C6YyU+ydjH~ia-J(^oxe4AjJmadX<6x}qUH4|j_HkuzrKCh@!I6i<~Oqs
ztv|i*lw#`tU(fAN{|odu_0LafUBAA4Xc^ayP5yf6r~l3F-Ld3*%zMKxYWtrRh|Q^y
zo0lp7ylS1r;jdpdH^)p|98k05o5x|nZyrlx`(zBCi|pWI=kvFVDXzHsP`3TM{51<c
z$G45sm>)8)DPfp3zu|+hf-Pgz%Y+9B*B!Sh-jPW=^zd7O`;4=<<ZqncVk1{KC$oOf
z<3hFU`~>~Yj}0^KU*6b0bI<Nf^T}Ixr+wdecjDbU+t@G19OZX9D`v5cb(_eNUGHw^
zlzYyOogJQ=cT48xne_X*_s^Q#;oa)X>+4f2yv4lK#BlQaH9Iyx^ZWL#sU!P%?uNX#
z2G=M3YFI5N9<(`QPIllyj&LK3Q@?)7rb^jIt39~n+_Cz=`>m{pV~o>9cBMVIJSkc4
zaiDDsUvwEWcU758wP`hTHTUWAsV+}e3AyP>8t!t?^OO%M*d(DQx+Avr#80n3@AMmj
zSz}p!oEdKvtw~jzaH_{LNRm0Do=I=I$ONq=p+f4Ok(W|dx@ArB^Az+Hc8HjM<eI<?
zp9KfmY-AdrUAwf>F8-pRZ;Dr7M6iu9&+Ef4gRkz6F;do9!#{bGr19x!#&=xzwq**>
zoDd~)Zf2Am-%_^jL!mb&Ty<kM>sh(h%t$d}|B||s^0lv6FS12G`h4~JYTbb9eHwk+
z4utn-s2p~@5&6kI=>6(=rmtVUJ^l){TtA$XbN%|{s+Q}_d<XOQ>L1%bUD(e+TJmA8
zXGzzuDw**9npeq}Ez^X5o|<Xg^fYy8@bW1l{{Ie}hKt9VuKaMS>Cc`{;dK+3AM4AT
zm-)PV5vc6WbNZZ=l}ho213h0%-ZS){o|t0f#b#*excn-I&GAKT#&;7rp0?jC*}ihY
zrP=S->c~ubd9FO<Iq%6c-^E%Ez4I!UkdJBGzF|Vso~eoDCuGkZa8H%75Sccy=-+vb
zry5J|cyqdaw=nXXd+0Ua{)x{^<r^hcpWe7P<vH)J!^<;&OWk^Al;M7)?|8HFijY-X
z-u)>!c<I%#KLV#X)^>6V?-hQo%5zd>EobKqpCe%v6Si*?W$R_XmB9A2KQpx>EM)o7
zsn5d0TD!eZef~U0&cDFp*^&(hXS)ZcD5Xk@skNC-Sz5hN%j)Kfyt6s^dp>teJj40(
zOX$5-r?O8U&Qa~lP5CS>{hniX&BKHD_I!9Sdw;=$Q_owC_g4N)oPTO-hSz%=SMlX7
z2J`d2?2_KEzC-5y%luzXTFC;6cYoLh&Q45Al2|sS`pW6ZhFy6%Iv;j%OF8#Wbywmo
z-jla@`^M1q`D@O)mrj0Elv1ej>4{tVjtA!wZb+<ZGdy_X61Rbtj;itL!#-7wyLSmG
z{t*9A<z{WWEN<4LcxT}dZY_4xHy>AI-KcO;E8ZZSr=i*Z?QTlnXNHNJYYrL+2N}M#
zmB^mimHFb}7e4;Vi`!g-PfH(;`L#qV-!7{rIdw-OU)A-}Rf&6!ZFtDO{P^t*vz^78
zo2Fdr4f0(T^@8uS<O&rTQG15JZnBp$mdGCFzj!=T-ETso(eC2CC)}P+et(w#=)Y-U
z))N|~MPpxdTRCZ_HQD5xm}=PlbbDI*!;Vt|QaOoorfC+Zqow^m`Mub>PAlx+p_NZd
zJaQr|GLt0AUu_J0xTZ!b{N0~p0dXNLQ>SlV@<-#NyH#}i?z!xLBShFgha}y0`Le0!
z&#_OdekO*Wh-@u1zaRVVOUqM*D2o@m^Tk8%@|l|Kxa>Js*`s#pmOUA(wy#KW`}tyq
zLSW0OKJWAYC8hIs8lPg5HCW?&eb;G8b!itTWu`l<H$M1Dw13~A7TX=KtTo3e^!QwZ
zbFqAWmz;Z(TG)4ge*X06iz{{V%l5qT6Xveo!I>rg(?_SIs%hQ}Q<ce%Tdd6t`!1~V
z(tYEYILoO;>~C?BBJ0XsB`f^i>wQ1>nQiaYcN=12)~)jm<-c0wBN_7CFna2@mq#_Z
zue#WDynDIg6p#3iIUi5)#(VzWd-u@%h3a1y-0h9Zzj(!M>Q3`x6Bay~e8{CVv2x2v
z&*Da@3vp-m<|((C@V+<}78%yOv#RIR5$lz!E?RG`ky(1IrfBu&v{F4|&89_{IGv3Q
zxB6y$J@ji*nB+D8xq)?8JUXgtGk-F&&FEandGMOak*V^f!b-s!i=9Fj+$vlxJN2^2
zn+(mppI7*JFdr=varj-_&#=%Sm!Idv9c3Spv>v}Hj_Q*N`^~>|h#x&+dqw8ZmcR(+
z){CpBuF#q+ToI}LI>lDm=v-KUBfG#S6#*TQla96>4|0wM25bD_Fxm1dC4I^wM$IQH
zT3(;l^NTfG{Aa<u2L@HgJJnW*@9JJG)!Vy%YEAjnR~p+{R!ciOn%?N(^=+;fV0&L-
zGP&G_OX<a7&U=-@hdS?ZYS}C}#2mO|!6DNZixQY+?lo-F=WBk&XnfjMV$!=O;d6VB
zU0$`cX-0@tZ#<u4Z|Fp`cV2nco+`pmdG4ri>uA3!H$LFy!+Q7yYg>WM_lkh;``Fwv
z-(4_}`lrD8BmL~B-#$m)ub*MNI%(QTo}Fs@?>~{B#;dmer17ETPqS^p(vI)-vV5~k
zyv$H>smQl`%+*sbRbL5Svb^B=MxpX@op<NhPl<&se#o)>a?zh{Gi4^^8apa6$`_{m
z3^{iq>S>PT!|j`X)V^vJk4so@U-{`!t-AlJJq4@J`ybr9xJ}}uPvsI0m-8E#)~o6q
z&oo_gb4j{ZQ`O#S>6S@+vlrgrNk4NTuPx2E<;;ZKHXY+Li>^O9sxm?N!}Cj(Z!Plq
z)}5L&ySx7ICfhabUz_6Dr5u~j_B6&^t}k8x=d`!a1Lw4q?>@2n6LWjMxOj2p1h4vW
z^?6v~f4`Z#+$7oZpZXs!eRt^Yn{bV#9yv?wS57;w617Ea`DEjaw>$nniI{GEaZ--w
zI*zDII%iBLvEKM7_TWS26J4>s=VwJ7V;VLqotU{Yi%CQ2WNAv^-*BC87xR2x-8kOk
z<Sg|3>kON`;7JQ!@*CJVzkS-Xg-4#})#Tq^KfmRCFKjz7(^|SrHfZ17ZIPC2;W>Nv
zyZr3l#QW;|_5;h7-+vUYU7dG#@~je(b63v>s$D5}|F^gx`-<_AS0~K>6xDIRbWc0I
zbIrxP%dbkzlxM%0@<w4(+3urG1+Fp|rM7O`7IAw`?!~eU>#b3?-SK-H$~NwpYrq}e
zvd=}lc%$fciJ0RINk&5ZZzqXcyl*f`YnfP4WU)I<V?*EiDclNwnc41c&)xMY>3H?2
z)%*u}4ITzdR6P|wZ1?1x=6j9jQ}Uj=di+1UFg-f)(((W%?Zf^3(T?E@Zn-+{R+$uJ
z*?#?{2h(cBIdM<6iXK#xoA)Hqa(`pGirf~R^N){BSk1xYT_Wn3RMs8x#qOAc`lqj6
zb~@3#%>Hk7cS{#NlrwvNX!e`Ge_rLD+QBaJdfN<^^2IDxMnQh8^TfCG98goa)Y))7
zBcNGon)z<wt0$|{y5=)$)bm(t>m-~N-)DR5U-$1#zu$a&Ctf&fo@v@c9-G3GY+}|;
zY|{^{^6u!8?=}jJ34IWId`9?>tIy{ezGPuLu69^t=V`}Dy^9oYPI#0eEW;_}mM@fT
zV7~moE|C?!jjKdXcsD-OeBjr(M8sk?ldNWe4Qp9Yld7hGS7VS@f)Aq}t5}WW#azt|
zGZ%Hg{9)2{_~84XE4wWA1uhHhlYTit!+ME)>z=s&$D*lIZXABGQqIlQw&&dP1LC%O
zq=Fh&F8zF3Q((cT$s!ld^rf|SMdWqFm2T8-+appaDBP~XxZD20@<7kfUfq!G$4zQP
zZXM!|d=MnOT>e*ctxXTp{?7T!q|E|u*6hAya_ewW^|`~oo-dokHv7N*{^nlpS@!3#
z6K;O9?arFFGvL(veG^Q-_5Ev>+q5ZvZgi{UMxn)@n0vgKJ=Ja&Z3@*E)Gb`5lEmlv
z#De{qfH!mOlQrG%ZqH6r?Ck6|*nG@m&&3<375}jBp88n6X2qT#99Is8${*O_9v|?p
z_?h)nbq||bv5IF;`=@SWPTcWKcm~sUyJa>%IWrW3f3E(t_tWA}T0D*?t2QMt8BQ$R
z6z!Dnufk*)t@5SJ(<$Hl$ez1QKg_cW<NMF=oxz_G=RE)XLlZaC={pUl&V3*9OeWdO
zkNNsKnZUf}tqPmv_kHsH?*8YMzhA@3-?3kwM^4P&F+afE`9$m{Z~vg+J!Xy1b2TLt
zoxi$uuPvBlw<3N;x~gqOzstE3J2|vI_<cV2ebXZGFLUml{`UI$I*w~goD4*}f($1F
zIUZmB?Qz5zPDRFL923+V4n)21E}FLL-V1M`+poNeuBtp-Ja<QmZ}^kK&5M6E+owP2
zc<4N{(fOv};mbCbW~L`A**UJMeChpX^L=tr(9d1Ft4{@%A3wNwJ=?2o?Q*qyvREP-
zEf@coU-q^-;d|pB)AR8gt6ny(t}VFD{r>QuFQMjZqbAztmbB^qT{Y9(zwXhGy=&*)
zs@!TCKBf1or{4V;m#nUw4%p%|`<Ga2&QIq9THc3WZFx02zV>}XWh-A-i9r~9rj^C|
z=If;&rtryKIp`+4<`QG@!se4!C$2RvX=YtsQV_(RC~NXkq08Zgq^!%s-=!Ov_s&?9
zBD<sHf@>hhnuP}#Sd(~7xddgcGOjr?9~FJA(5+;pF+I(yRHU!xj+MbHg+<MNvND$)
zxm7s=LmH9-InKWER{ngQxvJ1+(Ich4Un(Duzqua$DPm?{`c#p3W$#XZ_Ir0a*W`2F
zaSxfEsI3z`i<V_vDR4VCE9F7tq=u8%A8m8mEqW*{W1~~EXx3VRO->s{1@38kn>hR}
zWIV<1aayABtjg?Hnpw^dX7tO2PublkYyazw_4#{i|IFC(>5TH5SKPkm_ZEFh-S+Cx
z-TuE@#Flo}tkQWRHFf<)sg0+s+SDVmY$aKm7P3w;?(Qxq+&#55`&z;pzvCV&pZ{~%
zqavN0I;;9aLhrGx32mF!r1(B7YFldM)wI&g+%S`EI*X~4FsFor(G`gqF4qzaW(svT
z&oI5z)qLdIJZ4?X3v-xxEdypVFO<<Z*O<fA<2~V`i%9iP{bR3GwWWSG?wcqS>UgZ<
zp)#`p)9aaxO=$}x4<GRoH9DnL!JfkNMJgfTT*VF<6SiZsQkviLzy0&0{r}3?Z8d?9
zE;}dQ?KIbyp8e+J?oC<C6rRY>GyLB8pnLT-u@IHFcQ>u}eQ<2u>e)tzSu*{%YB_g%
zn`}MSx^=tGj^tzWm$O-_?OwISW=YuG*8$cm|8OR`|2;Fw{p)jHwu_BdW23C{lh%Z#
z7hcE`@obv<QG{JG+CrA;<%KzXTVFo#YWJ`zS>{}A<zl;Po!+ir5$jI(o-SEcEou9z
zYeU0lx3g<M_8(VCOndog?G@e~Axra)I@jIdcq@F%Lht#*+4n2n%}g!-lll19+Sk*K
znRmr=pDSJKC9!k1^;(wo8tdmRtXW+1?dQk7^Y^O%hJFd!V)9!mYM1;>{pZ4lCzUcx
z!~|DcEZ%GJ!YgX(l_@1!o!(wXQg@x!UQj6En9JnPvRGiz-7=Ppk~In{DjSX-WSPA>
z-*vmigY&N@zr7N(oXhr9-hm^>X7r@h=TBok{fl?u)k})m&+nbzeXhjrQ{bsU=eviN
zv85e|w_cThA-`HEeu|huWA^*MZIah*S4SmDdEIZFwqYV;$}Ah5sS-SsVkfkitiPG1
zpd#q)TV5E@9hzA<VXNtK$6To$mlP(k&CR?pPi(VejBLA}_>1hOFEbWonSJ9A$xZmq
z*D821)n)DkS;wh-<!f)uJZzAAC@$>s-|ea8Ik`D|>i>SKy=tt$DRbt8Wbw4a#m{wj
zyGu=vVcR-izW)0a_Vs(Dx=q=m4)mDq@YLHdgT>e0=}T2_XQ>)DQ{wN5j~3kEzc2Xr
z*GrX?VKP&CtU|V(yY{f9Onr;R!ke7Dek)3yPH-OeJ5hG%(fs3K{=HJ!v7eZ_q*_0^
z<UTTsWDlC1q42WtZ2g@p`PEA|y*~5v^Z%y)`BP@SkJugBAV0q*x9s5rj)R+2CBIzQ
z|J&02adQ2={i3y6qQ1s`^}P|ElVfzF92nX*7KNS5^PX_QlOvIJ%6%n|gwmUv-%Nfh
z`YrUE+HbzyZkyOQ?7UxhrtZ|e)ayNK4~PjL-6%Ba-SUEadj6}mwwz`*=Dzzar~1UX
zQ)Oq=?C!5QI%C6C8Q<^AH{McXzs#w-t7y^7Th@pAIgiXw-o9#^H|u^;v2A}}`e*0q
zOW7TnZU0a9+lN-qHxIX(=f^F+zd(1#wxYGK(odCa>(*Avs|kNMVYN-!+;c}aHv6bu
zJDQfh)Z<%=igQp^p~1<F52Eb6W{VEW9B4e?sN_DU$ZW^vhbd+)n-|X9=4mD}|5V$z
zoEdX?b#Gpnz#Dl}A@%UgoEHmt-@dsaz5M%I7xNXH7l*p;+5gQ@^{!qjPlz|?_RJP@
zyAyv7yZ3$K4x2ntR!&>%!n4PXp4qj#{|i484OKAu$85g$&z05rSKiB=%Gi0lrMWeG
zx!JmyTOr%l1<c&HQE}%AmSgW$$8kgztTKIl=+wX2rpn)ADgq_X?%VS2#-4C1`^tT<
zG`IYZ&3Kq^s3ohS(!X_<PukjJYhF&zaoiA@oa)-+n(S`0Mcvs#^4x`L6-lef3&n3t
z;@Nug!f(HIuTM_ep~(MbE$h{ij-U6lyL7$QF1gC9A7l4yVf5jCyTZs7IajZ3GrRL;
z&Az;M|IBx9o>5zBdie17()ADhHMI5PCpTQX+n|5qbYR?9m!yd?Cm!wVe!Rsh?$H{D
zWsjnbq9PL(Uh>PHExSc)<=d5^=Z-8~wDfpN*Q~Fbl@^;{JuW-*LNo7_3g_r(>G$_`
ze~(c&fA+__rE||ppZ=36D)y(Ug(Lb7KS?w4p2T*4om6_rhRJCbX>Dr80>)QLW-e?l
zV@m6ok+@&7>_Q%|?e_^U3)^q=EI%V)=FDx&JkzIh^NSy+3w^|0=PwcI$TM(WD;bkE
zLF2f_D~VKD*`5P_R@v1uh4+J2y}PtGaK2KvQ*xx%HrcnGVWE~8-tQY4mEUZ+bMwNs
zoSUYWk3YM+mzpMLFV0&W5OLOa&9&&>h@+cUY;@V=8Mol7W0PK^a%ZR1l{qWQI~gYi
z=8Gn|JW(;c6{GrT=OUZZs6>w(f#@S;8=ar{cRC!&n6JDj@Qm0~iA}*a%CAg#GK4JF
zwunv<6_(kzo%vhLjI~xP`QMjbOqrY<)Ze#A?-e6&sn^#Z9cRPdMP9$gCck&FPm}q>
z+tO1*|8#Os?OBmhdS5=4xo3G;yw>ywd2^2&^+ufkaN*=(O}Sf@9G^Edx3kG*RTg|y
z_*kIEclD67G5_Ssjth#iBKN%iWVb2W>PgzNWaK>dpCjFOY0dG!`^E43G$$^VDdbT*
zviS3kvr%)SpYPUT-5lF>F;OW@d-*l%s*{VAuW8AP{8U)*FNZ@<-ox(4E%xO#AExlj
zeLm<WzvhEuFaNQioJIWWoWHyl**kwx_|Jr8{A#}sylnn#_hM0VHK*Bc1*^t>p#nRL
z*UkM^5sz=Yt~HpmsNm6^&m!&0(JxM@PALuS&*&^^dhx}2WlZ3j$wqHKFkbd^H~X!y
z#7jlpX&e8h8i$&TckUG16r2=v_@j8>(T>;C&v8`UyJ%_Ic(3N&?g}4)QnjkE34-2v
zhuMGqHZF@|y_Lkz)%PgUXzjLt-<l7}B}BBzZhqP(>Z|$Yg=}~Ez4C;px0kk_*=MiM
zy}cwqX}Mjo<oub|JL`_kd7aNATM>J<t3Gbq|NXnnUDuwg>kji*oBGLOv)^=wNy3xn
zYNknV{CL!HcHk7Dj;+e9w}KVsFr~k+oy0Wv%Mo?1oQ3XAwU^7!bNybD?;mrkFJ5w;
z%J)s#&Dq<w9GzzLZ{bsRxwLPw`%8W_z7>DEe(u$(Mb_F;>_sazikZTiT8ox-&eORe
z8^x^qZ_<@V>yI*?<;y5=Sj+6hr%~pxjoHt#YV#NUIc*X%Bc6WWT=o0T_T~GV_3MAV
zntA)*p4!XDe*fDv@#XrOg*B{l28K7irhm$O{#yP2@4x(3FX9|$_Dzayu5L9cUH?Eq
zZ*tgb<y}1`!s)4ts#*6gDK%n#5vRz^Bza*D<5tNBry4xiO41yvkNw{k-qHD5(%?e&
z>(Hg9d8-o|x~p4@FITMdy}`a?|M`3SeodV|Z|Bc#ucxzL%imkux+}6G>bJP+3GV*B
zCv9i9hnx*MrzKzi?N3i~^!@6$M_1$<ybe4s_GoHv6YKpKzf@MMs7P39&f@v|ad+r`
ztqxr+_bs<J&3Y&FEyFVY?Vl}QwiIslzWK4V*Zq?DIsXcahdZ~u6Si>H3;WW(;nJ?2
zkdu#14l!hR1*;s%6t#$7X3<b;5i_G)#?6c|zG=<b8JsG@=Te-~)wZNNX$VUhDNf+z
zc&|I3Bdj^=<%QLo_qFQZHU1{EHznm@KwDUQZGT+Ew+r@GalcP&K78-ahQ!dHbFT8Q
z+xu$nb-sO*KZ$3)T{L@o%K3f!GoLPTd~o~MiQL&Sm%Y8bpFVoR*t{+>WY5ooQ>7<!
zt>3vS<He5CmoDq=Ulmjvyi+M%;!FG_mrqYk3^g?aCzZ&gZ<eaib&FWEgxPlW<gG%<
zVYQ16UMs#d=MS6MhkrMII=|6fsi!1+-N0#Ww)@Yub*s&HNv7#>pZoQC?Q1uk>ZhXh
z3cCKUf6S|_ep`Nj?OVOy&IcBF6-sCQ%Fg3CR@rKEZOv6Rr~OCjH2sYI58qLXDs}l~
z-)8yoyY3|)&%nRO%KXnbM9oO+F*rEGd+{9U%eJk@EQ9MGe(Wwhx2M%8+ra!Ud#YbU
z_S5+d9J^SiCAP4b1+Gt4VO+!iDsiryQ<cV5=Xr8>syr?+FK_p=-Lty=aaG2}!_W4a
z1U9h-mWYV=|L8chxF$Vl$7cN+<yP^)ibZm6ijThT>p1XUc**o<K}^dc_#>+fUNyc*
zW{(uz|FP-k*UvBG?dE-B|5KQ_a9OJU4_&ir2A#UJd)23W-`?+9yqCG&qHWnk_Psxj
zFFo^_Md|LV`hr#emva7o!XYr(?SePkwMCaVtC@MO_B>>}G>p45N;`YQhA7T2YGKK-
zS<OGqG`2c6^19tpILl@xU0`OB-Ms(ygB`qlHxicfZoBc|o7^cIubC;#2idsmHylhi
ztGLdlfAfLB(&+SN)hQ<P7GF60Q6P2u%uUf5&6&1uCOqJl-s8XAWtMx+hJ$RSW(@*!
zjeEIFf9cG9W%2Z)!*k6|j%KclZJukca!u{G-Xm(ZV$q%U+Xph6jZ2!Y=U#AI9`C-9
zQQE*wT0mM?QLFB&#isW=AFqku^6~uHpPTPXzsWjiSv~Ef{ppl#&o9XvSJzj5*u>0x
zz%AG2)T8Yy%=1o1pI`FQsIN>TU)Excr*ZAKJI2nj`7JZw-E}T2Phb4dwy!pPqw((F
zw!0_#9`o3etWwu{&A%mRXRAuq#v5$C2aI)2XP0#DSrM;(C`j<rk)VxUDehep)S7%*
z{95E)emJ#!ceT(vq$pUns$_e2w97Ux7RN1}l1YsgzE8wCzN}1&6S^<{UoUHWmwXqG
zkZ<r!CmV0kzs)D)!X;BWlnWKiv-bVysOUIU8ThyV_{8b{8fQck8#oqB`+6o)@JiMG
z;FQ}g83MC69!a0xAt1Ccge9`&q0$c51<vd<WLtHW3|{R>+87qHlJyQN!}3JQwF-jA
zyqZs}X%krS{O>-C?$9e&f4)jPe)IXIUl&r|@$Q><$b&s@?ylqjb6&Zp$xjzQPuyDc
z{d5Au-_@t?7FgS?ePTGX$8jbnfBF2kb<t;E>@k|Vx53Qu`ebFXwJApGFG|*=Jd-I~
z>$>TNuT^J4p8C0JnGMGny=4NPHfZwAc+l{W&!f2ECSwotIjaZU?0eoeJeR%jtYI!+
z$MX4N?)i(qa4%xM*Dt-{sX6<R%g#3#BWC569oRMR(wZxbm0PTetrbo(&S5@hvEVY}
zd#eXK9+zM2e^qkAIO)sYBYUUpP1$SnICI7$xws>YZ*5(bnKsKLEIDv5UMS>p=pUzq
zZ@j1Hr@rj+KK#Sn{mUN?f!~aVdfw3m@jdy!7!nkcpY}NNusypGYANYle%4Sk`1bw>
zyy2T!m(LG%5}uwDv`qb;;-43f-AdjY^89hBnV4z&|I*{#%m3M%bnaZug}IQ;(2&@b
zY{mwt3)x}=ee;_Qc=kRIe{w@sOTl5cC&$Xg&W|?m_r7DlaHM3STZdECgZuh7woDe|
zT=V_S_ne!7S7oFZ+xFYt(TM)A=enEajAN@OA2?rdu}!r}Tla6*2mSVQn>0FZSAH@u
zStcJDSRhy1_U6gmGv@53^By+3zV~aq{Opi<e5iCm_116fYzFg=uhH4rWM{Fr?Au!D
z26v6yW?E0S&fo6%IlOR|Zb75P!ptWt5;o0cxjvJ7gA(8RC$6$_Cl9Qs|Jf7rJg%;s
zZ}sH>+2cxjf_nw3+@{C~E@gL_=`dyT`&S$G|Gay<yGN%?ViU{OM-xw|D-|ncZ|d({
zGr?!2w^wqi>BN+2lf36$vbo8to9N#AY0e3*!*OdiUn)6mHEr|Va^3akT1_A47VbZt
zwzcv1&$k)kS3mt_vMHb2fi?7vuq^>HFf}#8zXZt4!q5_B3D8zc?uwABGoSD8uk?R+
zM#XsMNrqx|XCWscwI_}hEJd6r1$bs{PwSY$$tfXnB7><ZR5&VHv7>W!?%iJv?>4PH
z>Yn$(v8E~e>N_shRokMr9&y?lyY+$C8rGup|HWxSP5s|)zrVeAa`k!3|G)n{|Np<3
zPe7qXWuo1o$sWGW8y;M%<-aDt^j*>9(!PDVr|i}5v^>@?sLB7mOs=x_^;df}m+S5v
z+jD)t-1@qGt8K3Ok*+Vl)Ys>}?h~>$RDP@e<JP*^?Wt$CoagvtHC=epAJ6Bnw(%XF
zRlGVdj^$&qmEeRY&-x$i5IKI?`ug{rchW}b0*AsCb#|>=y*Ky4L)o7zUi%%f-@Pxl
zIOK(3_lA1L_)6#7zpgUAcG6co#Bu4r(ygm^og4nJ{SMZ!t9tw@<9~l^I)|augfxLf
z8$*G<865K6iSy6>&03+}`y^|T^NwDYjqX2A|8!GQn5O=wGc)Abb(<6G<CnxbPMh&9
z?u148=9S%^bN<AfRhaX9{`0$Wp;xu0&QrBxcK(wTnXZ;wf7@d9XQyMWhQZSpGG(qT
z{n#$<w)1(XN5>E4jRK*Sk9F&N);yG9n`r!+wa?D!PG9%=H%EEBJ9_q?eIy+vEAmSG
z;69E<=Hvgn>}KfaEuE*h^NQ9pey1Rzc^(HXAMcu^`GS33#DpxnNGGQ!MM2$iS<0(V
zT3vEAn{@Vt^`rw~lb$YVGf~(!H^$p+g0!Yt*`wKY@gL9szq|SWY5NHu#2e*(f7u>?
zd+5Goo#1a<tu3{W-?tPvoe|%!U8hyIDtc*f>GH34f4P5s|MI@^6REs}{p!8Oz1h9L
zZ@w=+!{Nl&Kl{+x-}SSfs`782zUNba#*ym<Z!@Y@Z|coaxV^ux?NDj;V~e{smpn4n
zf2>#G7MTBQ|5o1@YD+t}iT#w$oPFVH$9slf9QSW%9E!AW+|7LdR-xpd5SjAHT631O
zKioSxMLlt`x^~7BF1I)KUjJ+_vK#fj@%&<WW;wf1?L3i*=Un0yj5{X1=vL`XQTt`(
zR9s{k=^d81<$g!t-R8e~J=szZr9I3~DnGS|bGJG%wP%ra<8)8PnXYM%XaC>&=Ti$y
zrPHA#nXSrq+1><aNiIG8zoB~0_xHh}3QwYHG@qp{dF2qpzBuP_oOO`<NwFtOL*{Z!
zy(s@yW%gMm`SgkXdzX4#ZT(StLO*#~xnk{<TK%j!ty4=*+D{2<*!!zRG_=QBy!!Nn
z{d3O<@prbLSQ{~QP73$$I4Rf98?_$(6JGrL<dXGep>Njzzuobt+JBE)GrQ4=$WI?t
zj(9)e{iG9ka^lG=6IgFHaPspNO`4;)zHV~Jl;_P)*Hq7uIUG~_O8SMxCu0fa{ATvo
z@9W+KMo3nQhb^+L7Th-RO$+x1);}rcHzdl;PWdrC%udm5k&twK{Z4i9leiTMi<8XX
zJ07`xNBaq5op$+*J$+{J2j99x@O<CuJb{0zgt7mm`38)4SL8oW>D=M^hkIv#&2y!Y
znwaXiJ66;D-)ua8^750)XN_@}{FQ`#mOngi;9+iOaNx(ReHMLJ!arUw^e?`1!`5Pl
z^x|iEm)PDbeD$mQIDOgX@Yk2D996zAVBOztwMJNf(($e7TWp_586DZ{I_pIJ<oO(a
zvhTG2upc=#S9#JmwLjr+^ix`H-t#+?9P}n9W=5X&ywo{|tWDaI)@ST}asBX<{TF}d
zH>MW~SIH#EWj48XZj%w|v6I(1cCGy_QxL~zK|As2Ba>}^*f#64v!*DXaXXXqgePfs
zfAXzoPG-Muw8uBhw3yfUK__Rk&|@E&^n}?HCD;|_UR<tz%(CP7v@1<=ucIc^wmSr5
zJ$xY{Bf)%IqGT@nQ=2byWo>r<*7<+)Wm5cyFY7MHyw>EHmt}jkH0%1x$IFlK`=8op
zKPTqJ@4p$--Zx)t-S?RLe*Uk!PWw9d%>Qrq-R^_$!J}z2+BW6>nEEmF!_#E;#92m%
zja9$M$$w5X_!p+}b??vBn)}S_roQZ!k+v!OAmJ}9YQ6aQk%>1Bs`phcYuix!X~s#Z
zNp1(0dK5f3>NEdS%(q7qKE+gpWw8A5dUlz+Hic!ETSChIlkE@vzMcL(_3y;L7yl~d
zvVE7oTVJ)`f@xFITBl3PmHZ2L&$s#|(mrYGlI3SS6*J>s+vUyB{~Ea2b_vgYshUco
zhIog^wx@c|yqS=%VG&}sCEH^{n}eiRQR|81i^qS-J$9G3{wwkQ#J!~Z_ip*zH-6B%
zrFQ9+MO&6T{*v_IKm2(D*U25#KM$<D6Bhld`o%w;`^`V>8uIV|Er`!(%fHUwUU%ZJ
zLdcuR@4ntWeb;!#%iqg)8`oZH$X}4XpgbYLVbAMxKJjP$CcI;L>bF^B(zf5*itqnk
z_aytT-kG0eFK*vayRujCzFf`jk7xUu%(-er&bd6ievyB1OuERSK#9(|VR^lataqD*
z{4{Ifs>yti_eo^e>@Te~lliVM<*ig+%epTtXMJSg)7@56o@PgGyt!S;<o5KK2N^<z
zrL9c+R(7*@>Oao@p|@|kc(k-b=8TEbK~l?>K32aeyKCRS=&#nVe(sv3xi6~5*7T-n
z%#O5NUUMcsRsAUsqs2b`SmjiAKWNSuam5lQpOv4?c5=<}Et@bahc!r;U1FokGik=(
zcIySDXHRNh>#sFAYH6J3ge40?luJFCFBNL4y*w>Y=Jn{~i5TYv=UJ6{4m>DGcJ}l8
z+F=(KzxGvjbjXcmX(kue2W@w{nj7qSxog_9#D?96?%&-xd5ZtqFm9<?S5I6i*m|o`
zwzcfnhUD2+Z<L-sd^hjiyk5&r_FdH(&KZ|`=f2D9V&a;mkoe%;yLt2EgwkK0d#Psm
zrJLh`{>!u$59iL6YmUfY&|mm!hE&3oyS7t04D*-7n=J~^>abn0u-eIXvP}M+Q<tWR
zK76+9Slh2%w`OVU#mCj&+_Y-x!c$$UPrIjXa^?0GKQrHady3!3H-8?i*>guSa;D^E
z=9hKzt&iqSp0p-y^2G($JDK{kCuy%1SQ+w@=cZg1cL>|9RiBEUdX=ZU#^t3dhy*@0
zi#+Off+r-@Fkpf2ROL;xI(=`e6u5*=C^|b`W2(hcBS%iNol~X+i84mo#t2SuPjHPj
z{#G8wa%j^p&DZhgemSX0M-)EzuFSr<D@r?j)k5F7f2Rs=5o|lVOtn#UwQuFllp86>
z)=5^s>TxKMNYi1u_Ao8vocyuP^QNvkIx}or>58qTXQztB?YEyZeYcM<SBQZ~<RO<;
zyHr=r>zr~u=<-RvN{jCa|K}>3ZPJ>Q{F0+&OYGe>pK?#!J|F(=`~8Ktim$wJXq{F2
z+0*=7&DL4cD^`RQuekE+7pLu2@z5r%v>QxC|0A0&bJqMixpcxd7MJOX*4`Q6Pp7R2
zxc_E(zj@2PcklUGp9It|*`KR&ZM&AjZKl76#k}jc2;KF~njLt=<IPRkIMb5!h^?+x
ziFFo+Uj@|siekMtpGs|ca?Y&fL44!^`|Gc_`(L<HCp>FG^aaZoiTmg8N<7}-6!y^J
z?dt5l{?tvUcK2%>oU`GStH!ej7RPS77}h?X!THUD;rg~+3wOqg8ci!@(YpB9`qq_w
zp%SszFSc^txU%H@-hU4J59^)hZj)Tw6Q=t*LVI<Rnic0xHRFUk0n_TFrB!O>^6kDZ
zYVz6bJWt%gFvKr)fzhNNx39c(V>GhbrKK*%(Ho(owPEoV)vz^17WN%7xq>kpwu<H$
zUQJL~wT$JIdf4<?p*y!NoViiu#+50mTAf?ecU(zOJ*~ZSlYNk&dE+UrE2j3&T*-?M
zyKeg4>9nV#MO$NIHm6%;T55gqajC1~AIlQ%t(`b~Z(S(oW3G1Qv#SrAyt&|fb8(!Q
zOrn@}@7b%ojjwizo-Gz`J0ch|XF}jlRl~-#{1_g#1z9HyPMn=HonLt&FJlphb>pX?
zKdXN6Fz+)fv1q<@Kv#e2?1SFNR$k;Wjb0ZMJ8{9{$KEYnCUd5R^m<>usCX(e=;_z?
zzaQ20r#o+%6VM~W_Ug*2LXl~jE)oWI(?bjezAl~3w)c)qge(WURch$U+-1d!-zCQR
zSv3E&*%;P4y+_TG`>xBM4mtUN^i>OG-lsfY<$bdvDdMZ@mraFR?&NcXn12a=*x2IG
zG)+I4lQ%JS-QCk!jJ_`zuEpJK(>yD*t3Dzy!>Ih+awn$KR==2e&o1qedVO;3z1b`E
z*jKi_I`(N#j85*;!;Ck=%-0;))+T!P`~y*kd)IF4>1WRKTw-HmBU^a({D~F~o>PGu
ztd|;n*h7MB`FMKn{qAw{kUz}DbKuxpWe15f27Jty><%4G+_qzp&oYmL)uq2X1deMw
zzQS~fr!Ydkd1LyFfa#iQfrc7xM-N3>zBdTCVAo>Py}V3$g1Sxu>wANutHvKVzVJ$m
z=FeHguQ_E$>+M;3tMZ~Q{%L7dkp8`BQPHo29XqTGHx@>(oMN4^;EvF{H~lS{n*_!6
zc>gtWHM;KbX>`r#IJamP)9TZq#S7*fI<2ushwpme4moF$n1(mCJHBnzy1<c<`7PP&
zRObJOZ@1Tag=_V=F1`P)-Nxjd=$^*Ejs_1CL-?Pba^DlDFK{q};gdr*i{pw$Jqd=3
zhngRGt!HwqS>P&R;B<sztJ?=jM<b^*9LswOu5hsTHmnF!z9OM0e>bJ&oU)rmLW`m@
zkJQ5R9D&MHdO!C_ExgVV+rzjrRqd(8;`?V`H2W}m&8(gkxOhsekMnz#zlHOfdlc90
z>^Zr~@Z64Hv5$96bWMEq_4*ng8*?8U^FI6dW3{Yu?h_LD3fYUJ)<@1vE<doR@|#oj
z!#96YcHigwv5#q!8JAJH)TJ%_$8#QBv|IH5)lG@tE;0i34tJKsX%<N43LTeLo#+2K
zVp8;Bv)Qf+5<6uMD!hHnSLAMa@l5d?KliA^yPo_piSRsC^k&HgS(mluQ$*QTJ_}hk
z^VPWtlU?TuOz6Dp_J`GJl?6A`ORMy*=I>2Ag%>zl8?3Tj^|r<I!VlInx|j8S#Q!>{
z-?!(hTEs2By=>8mtXmw;XY1?keRoaY@2{%c+q-?`>2r5Xo#EMEG&92_iT#X~@Ot&8
zqgql?hbP_8ekWAPnJ#*`O+5Bo>Ld=$MK3n#2)phP<64{epmU?Ck4%^7r?m>vjI|xc
zig|N-o6mSxOgyJ)r}*Y*dPv51>%X1lyDwBc<bC&LM(_ThJ5@o|FP3ckoN~wMq1~1X
z9;>FzROqr|UjEoOZB-bPLGqob^l-+GDqE|-^WA+l#wXGiu*EE@Jv4dqXT^1b`)Al5
zKh|{1x=F=mg|5A2nVhM5$velnN}L*Ha_m;NeiN4mues`+l-0Q)Ve*1!&SK?mcdGXN
zO*ypQ%HGSP^{dwz&!F6EORlK&M;pAH96WjRq{`-#xw8@<^)5?UenefeBe|V@-FJtr
z>|zxMUN?T0vv}G#zxsh7|C>(?N!2zN9yLnwi&P$X)0mwsXYsP}y6uNK>}@s$>)H1`
zZPeorsZco1Z0A;TjQM@l1CGT%S2bOq`9-#8W9z|YIRTr1PlktWUpD@p8TDl5xg(0_
zXFvXxm;NZq_JF3GMlzRwb;6X)+ix4=(zFhx^4mDtOct7?7XJGF`90p7z6yP*$eJc9
zGDY;<>~@Zri$AO$oO%@X<E?A4mxZmoLGx+P>#8@|RUd5cjx*5ny<WF8Hvi(egMHlV
z{un(-w61@8<>&j%`kn6%&97v?^X2#}?&_m!#qA>h9eV$h^?B9H|LzxR>>7XO9PC*a
zR5bZ(*dq0LCX?5i<_NKbXGeJ4o6A<4Dd5}eA*GSo5Lsizn{wvqkrXwd#6@imT;}tR
z8Z|2EO*oe*BraxgM7Tfu{QkAJEAKMS-e`3FfY{sK?eTSA-+t%c^Go{O*6&+t4;QeT
zI5FQ@^fmLadgJvZ`G0}d+!3J>YPTcQ)E6(kImvJN_62Lti3sK{%}kyUb(k&g`HNzw
zE1ZJrPpl6<Z)xcj-f5JWe&>=3$A%D1fg_7@eC9M+E_yqyzby0rqP>T6C7E}BlYTp=
zjOE$g8tea`w#DaPxzJ+&_gi_?NwHb1+!v~?zTER!)8@!M(@OR3((5l{f;%S6PWF6m
zE4S0srgWA21K$Xlubb@R&p&@HYP<aU=aVd_<*#v<$Z9vU^zMq_e!PvduCglZn2E-e
z?(Q_lYdQ;eWbBIXyY6~(rV($@@{f!Btb5GrmdT!KT^TI@&rU@D?ymm}qph4GA274O
zaG1aH@4eFFxwmFkUUZL>k1}SjD|m9ZS)YCJ>iPP67ilOf=RcfkdzC%z{%5DK(^vVY
zx1^;%(aGN3d`?&N^r6Hr+vT*T*fY(G{<7zy1B>A6Nhg|>_lb&1ChihsQJ!OX$a7n-
zZDxzD+l_<UR`V&FNF^S$@O@tC_T|@)e-+lw;Rc$8g71yqOzzo#oJ05VnKWgE_s4T4
z|Jy0}E<GlWP2cix#k}Nzl;2{vC#{xD=bPN^B%ioKjLm%W+1b|T%&s%0rPq8{o4oDn
zwW;wZFYVv{IBwEGZGATxO`kQIt525IZ@ho9`<C@Bn}?YX4}N}os9D`VDP{UGJHcNq
z+8%0}DzB$*dKnp6wb$75)YHn7C&k5Ird??-Wam|1>-*@_q=xB}YCPuzdK6ilDNSrY
zE39&G)`t@NLRpQrSxFHmSFzttoyoexe9O9+mmV$ujxsEE>wG!$sB7`51v@l^&7*3M
zDQV~&*fi;pmX1cD+Q-H(XVx_6Mb2+|*}cFufakV+$bH*i%TG(Kk4U?6?`z$kUk~r6
zK0kfg#yIVX@9w;<p4&BTcC4~Xf7$fuYj=>`&i{#1g}v78F3YUF<oy5F{PS6}qqIuD
zzx1(MSo}45j-a<o=*y>9L-%@TZn?UM-|GT<UeG!p|H4@>79M(Wq`385l(5;%60=D{
zo)6d5cyTFyn`KbwYvob%R7P>x<y+0co%0pmY&+CsshT8o{A&HWxs6No`Q}XCY9FC=
z_Whpt-RHP6FI|gY>cF<Fi?xXFpU%pKvA_3S+rR7Zt*;f?dz`q!yW0a6&({fh5_0dM
z$Gj{bvt{fuM<#^{Pt!4*yI$?g>zE^wybQ5FoTgo!{4Jn*QN{VDM-6<d7v6og;F@S!
zV!)CmOrQ5njykC?e#XiBO>6wV&r#p~KU?tGu)RHUOgjI@hq~)$FHMuQZ`Uz+bkSD%
z(k}k8In%AT6*evu|7O3QcVEx6Ao*tt`DKqd@0y~zQf>C#RI|@}?jEVW9P{kghST|*
z!#(vpgktp;T~<wwx#<xUyW={W*)R8)-Fq%PZ4J}EQla>QRqULE-@&Qc>at}oGL@fu
zAI-Y+lyCRt^M~A@Fer1SpNTDFD_6NZ)4tZO?B=o3`^Py;vUlHJ|2(lj(V6SV!Opzt
zTd!}8mzMkW@mk2+?SBgX>)!v#cYF30?G{ld-e0!uTQoo4eW^Ct|G{P%zI(@?Kdn+t
zck{KlnmKt>_93y8o?l8IYPXsmQfu@V=VI{_nZu+mkZ;nz&hy}@A0m2@v$};BaCRN^
z%g#MGr!}CcaB2{D;o+~Z4K!KitrO-}Tlewm`DG36S?lla=zAP?EY9NBYv=h}I@|<(
ztlrKkx!lIz=bnFQ-H(TJ{~evqd(ZU!uj>h15(Q76OlO`w`TE_`ht2(V59_~Qe!k`R
zme}(bU&tSby4`5_Ol7Or1w~J@=6lQhOK*1izAyQ-XQ$bj+hxlSC1}m@V>K0h^fyCm
zj#ul{&Y&qC9h(kq@-E-u;`jTlnN-RauA9MMHl*KMY;J3{=<mm+rrWdR6Q_J!o!7aq
zGGooW-E7PFa^<TXZI;CD-#2+3(=9gs#km#xmBO`(Kh(Lh983^VJ;=F4v1jG!GN!4a
zUw@oEX8zJ?>89PMkM26b<NDoL`{qiv_c9UUnYA(tw)1@8NOtd=^St%v^LW?48w1~J
z{gAwzue@IK_Ln_Zy>!HOA6DXQyS3p#uXN!B##PL1G8djVM6qk|ZLoZ>f|*TLAaO!4
zU&m!0qggLhj2<=YG-Q`4KOo5dCXI((X2WU5y?h$Q4Nn>Z)w0c{e@Ud&EZ?z%Kc@I4
z+k_huLhXxPG!s7SdiYq_uC~~k;Aa@ie^=q$pXR=R#8AVEcgsHizs>b{wNvEo4KHgl
z9=LaM-0X-mm@IVE`#_3;lyBPRdEzXqS--G(sxTF@t^M*QE+FjKIiZl~MrAq96_@#|
zw*7cu&J@2+_P$dr@BOyOTW>#{{}A7MtMt)A?*7A$xkqp8a`92SX1nCcLs>_W27&ao
z99<l%ICi_f5LHxa@^1-Nk`aBV(EPnKKI+YpBag3h#3X*auv1CvKcCd+8F%x77PEBT
zUUprY=bcHvu4?Og-sLxRCN)}od|`Q_Z+4c{k>3?Owo}bCpK+^gyV<Q+Z+NBKX!D=T
z2O?*jV63y`F1OhiQ8D>|QA2j>$A32QrJ><@pZ5QZ5W6zb#dI(G>Xnk4jp98wzt(Ya
z4~lhLsg|?lqh`d%*clBILw?*6Hjt~mtj>2}CgaR^D>oJNf19_g_hPlF+_~O{tM9g*
zH7t6q_F!k`t@~THD~OjX3Kng7d@EhwcHP~-SGYO-3#F<~wf^>RWArwkcxPppd+v>d
zt(`)J?~0t)OZV+cJi)s*k7JkOK86?7I;E4>Xzh6+z^6TXa<28a#uoccQ=fgZ{MWT;
ztMt!HAAC2<{9b-{hmrrUh+DreZ}h*|rS+wqf9(v%yb!IgR`p%xXT3b4y8KSed%1nt
zr<Ipq9s2*SwL{!rBX0h5W6rqzRlVj{TZ6w`zB%!FsI~BwnG@`VKP-}-b5c*hd{c74
z9}yY*l{J?mXUMsEMbsX<>~&3tk6*8Z|El`G-UF@ai3gV-^^^H;@_PNb?B8Z<cdz<<
z-hSt+E4%e;ZNBDD%f41p5_9j;?^C|s3%?vxFRQbET(mSqGwKD;wcNeW{vEF?2#@;t
z=+LEvPUW>JKYcC2{okCwbN5k9%um)+S5mr<rkF;2G@Fuj^s=3t`-gQ4C#&@-o++z-
zSRrvNKkM;w?dk<PHY~Xi`GQBqFVyNr@#*i|{%?r1t=X~V%>E6R3y+t*+a_-M_UOj=
zYa!~k^ZIU8zwC6L(SOL$c#?sB$LR+}%O5rJ8y|kS$>OWat+#EC=5xxLU$s@(ziJD9
zS3g1WK$(76QPAI+M;Z7036OrcuQ@w!se7-nSmC?m8KQ^&Y?RMdZ`)QZD(?2nntghm
z_OD}#c^$`+gihc3IYYF|XzmPNx5*4ub5DHSv%-9q>*`jvCz;wkQHj$<vu^C4az{X)
z>GA7Gi+Rh^qPqUXg-zhn&ye_6{^+Q|kBjXF(zh9JEX#e?G$*o}BdoX9@Z0JCjG4P?
zpL1cGdo)Fw{c^)|H-<6VY*%d1>AYqGfjz&qYkc?x6XI63Ot`dUqQ#emm+BZcPQMal
zIIWQHPwiPf&!tgJCY!AvAAJ4d0(kE6-d!AXkGd#xkD-NfwJonebC1>FxyO9y++(}h
zgKt@H`FRp-`XghDS!)ZbUcSjTXXp>umK(An+djYf+4{%D+YYckYPobG@L-BH=Q?BY
zgix8Vv#Wz6Oqmz|yILGHxBBN@%Qt?`<uctn;sT0Xro0pEWD1<7bjrxZwMnDiDdb(j
z^sD*h#rxX@tvA)HET2>3zs1|<3FFBS0nf@!!V{+`=zg5M-tk&QO6-~^|CTv#S~7w2
z<%wMtYDVglo-B>1Z&1=#-{$kpj<2#*&xh%Fzj6d$y!4Ly0@k-&x7zHF2w9SjH73lE
zVgh~?fr){M0seVNOLGGYF62u~rdD&tgk1mnVek8Wso!dw8PnP(sy1!vNifjzPf^ow
z3sf<ZVigh16%mO^R?_S2lJ0cN-jXHcdnH3AM(f>?8zxb{mvnOfpIsDr`<Q9qX3hTh
zcg&d=`QE<&z3unCyXSYN+vl^j2s&AX^mAy*2hEvgng42;zu=+$t@l0^25V(;IM-bG
z$9YTp*x5D3{!f<oICSVcbX3+lojs!IANuDWhx8Ri6`2_kfziz}9-PJc&W1iUHRoEU
z^Xgw!O7V6(A-hT{KG5@BjR5!WsfR1dL`yX8Wz-7a+Pm=X(k!pLIZtQDCe%;juMO=F
z?_bSfb7MPCf^OSK;b5(_>l@@Z>IUp*K5Dx)>C_L4=yTB=8+RxETQ=*}EvrmDt;Q-I
z+4VE+_C0$mtsSPkgyGN5ofq6EFd3fcD2SH6^77{K2NfX=Zi;fhcr|!!*(UP|FShZT
zP~*AzLP14B$$wVcKj*ITZYclYcSyf@=lg%T=N1?73vW%VS)sYv=l*Z8BF!g_+kc<0
zooe^r?(4HF)|_jlcf4=;->i02uSW2jO`eO)o4-u%mIBgslekP84K*#?5~lJTHq@!M
zPm}!H^;=nR=f#+B@|S`|oYRsl52@?_;V5}^^rvQO!()#&7ypdTfJ03YCergRI<Pl|
z7`$e@BKy^;HqcJ0q3Z%iNL$v0m<2L-6(yH0D3N&0enq;a?O+7!tb5n4eS5mTzOJ?E
z|8Fnn<Ojk1+ZHLmsQqH+cU<_Ro!Zoyr{<_iF6oI$;J)$dM0)q1pVO*m+&)(;_U}X5
zK8Jrk5BNWBZ^>E6XZKvhRzz7;TSR?I;P=`~4>x;PMt_~5wIpl>`-=V*`B}R%>lWW!
zSLr`{)!a3A7w%oVckSzi(d&PA-&-fY;{Lk-mnP(Bu&$ct{`q!b+pqe)4XgW|G`r5M
zZMJ&)B|+o5a_#2@Yg#XHER{QT<V3OUl(PZz+BR*ftyAAoI=8UllhBmRFO~OgZq;uK
z{>?Rob>ctHx1W{YDh9AQMzXqo-GBaCnf0b=k9NzSa?;=xob$5Sbjsn0&v`VaKa|O;
z=zDEaU!T(d{^IHvELJ^#PH+WHXusyLy}>OgGj(O}&E9LSuOyB(K52W+bK=_*kGTuv
zG^~Ttt<OGmIF_tbDi;`S$l>dhe6hkrcF7LDS64H%s$BMkH}hTocgthFbZS#rii0Qn
z&GoL26H2sJO?kc}&|G8xw&}Z*w<j7H37&Q653c<wU;KGtx{`>Mk$76@p@^k6o0+m6
zC%Ey>V$OcKM(f4WC`-OA(;m&>vsmW*HgQHM@8kZ%RpzhwzrNhC>X%%{UB}zM|DV5W
zw9rha$+dBhm1q`o!e5IkbFGihb@nfs68`kl-?)=6Sg)muIpto_Kd^VLPRw*S59NQm
zeg6KBO}yR3x8QMxctr2^h2Kh2GBm5Wa@}>m``CA$TX6jXR|!w9i?(Cfzssc(-)5>`
zzt%MOcfa^OMw2<c+ZgY-Znz)#L6=wJk5)hyV;0*D*Bs&B%S<?85B<I{JEC*m0nIa3
z2NGp}&G;?-`{MrGA2PQZ>zKbg7u@%-p7H)-u}{3o)SG^PG9>40m?Oz@_ewLL+wBh;
zH=}~)HN`W_mF*Kc>9B>xmT{}(T=yNV<+CR2daIVOhi5IDkJ`e_2fXdBZQl*AIImIB
zvpD<u#ezHA_tfg{pM1sOxCCp(f%J!;Icl^5RpT1AEszbGx`i|MNZlLv-b?(s|DB30
z?LTu|+7kP;uKGFKi8Bflelz@fUcW4jWq+|pz52Sh)z8c>Z8Tdl$&O*2tDX^;WYlBF
zS!vs2r=MDyG-GjkZo>cXh51b?(?4?G&`L@Cui|}&{Zfs^<KqhA?hSb>;+6E=B<!2K
zK5RJgInYLC$(2^`2QQnR3p`<7HSK}phb50|1LQk`y%r?g;hx9uINNS>L4WO6y({aj
z-!<|CNbD?W7nIjJFjJua%3)b&&hKdlc9p!i3T7^I)VG-^{qAYN0qeteb+1p^@!PJ)
zYgXa*+S(V&rS@6HSHBoPs=3iHZQlmR`Wp)T^C~Bn>23MlCF~uby)Sgin_qSl-u%+L
zaJAHS!qceQ($iP>hI@Y9diDB@$=5$z%f3;%_D$4R(<Z&{>5^BMI?R48AFsM=-_KS3
zuV1f!6!U9+!JFyvEHS_IH~ert@a)&0qywd&9_<#_k5PH#8ejGx>A*v)Hr*BaW{feL
zSG;ug();pr%ahG^^QSDRQ_ErQ$=G=4<F`Yyhc~WS!NJAs(qLxRb#%$uubTgs9Qo^c
zQT^-u7Q4i+^B;U)V;6pN_SO7z^^@n=wBFb7D!Dr6z&qiW>QR0ZW(j{)m-uUV-B9gq
z#Ji2JlB@e;Esn|FoBX24e7U)}>2;Iq=EY}ob(4HH|7>h{C{QM^WffAdQPQ#2Uc&0c
zi|^0h?cX-vBzf(OO{))t%4hKyE)4K~u~)$J_Uzr$Z|+jx(w=$q^F2AW-12vMX{WPy
zq+YMyJ)<sajr+_9_4XN73ETMm7BRaoI?i^ntJ`d9{;HcUW^24l{+dmA;T5mHE}fzM
z@9P{x#;)$7x#uJ*3Ul+XU#s)F(0xxKncsaL;~VkG+Og-YZ^!)FT77+9jQ5eP8zmVF
z*Pi7x@3mVHKY5#4`Po0F*RK`)QjTa|wb8A_`21R*65HrI&vsqtxGcSKwc*-{EA!^u
zKe6Cy+tqhhTbN4MmmOibv-(oFp>x=@wQJ%$FNG)k<+|RrMbsrG(SV&teWR4$LbsOz
z9?{%k?>FyeZqIbpF^XV1Ev~j}8t<F8QM;D(XP<m@Z(Tauww>E-dU?C=c3&2s{Kd86
zpOcPWNEXx91M{;4)~yU!v~jV@splGkHA`C9&rLfVAvxu6uK30c=ch_c&|Y*}Mp(jl
z(}AxM3WrU(<|#kSJZCbsV8)aUGg(w(3oqQfy;S{&LEoXi+DN^Pl5*dkI2ea@a&CCI
zL23ExTNk!1?6EL;eXzm8MA7m?p5Q(M4t}m9ySSKH*DXqWBXayv<AHw0h6u(f?F--V
zKHzT5W?W|1@b*Sj(&;VJBsT6lGGV2DqQhl_7vVjdFFn2V{MG3%pTDk&o4-tc(fVua
zuf<>3{<1aZ#qMS5S2=oE?qwa8$vf8Td$>C|h--H1w!~J&)Qc+w;-tJZmX#O2I3cj#
zq{YjvPb>FOMd*i@Q?&!*auPK*?=T6Glh)x0b#h%YwQ7Y#((D`!{fOxBXme%Nnui`+
z#g;C)a`j+v-U;P^0I%Pznas<tT)TSJttx?a%d4QY6W^!Yy7jHKL@z|3q490@Yk}C1
z=|SDug0%;<xJ1GpTdC-;ZjH9~xRL3T%Ow@8ZGQZK(2a#=J>Kkj$ytYIT-)Y6MW5a2
z(c#5zY~J2&;{4K4CMh43o4MHeG%73}oPR(6y8oHCx0io+-diO1X6D?f6X#vq8=X}Y
zl;k9yzvuYV4HCRnbHmmKX|c>p=KP>}Rrb)+e*&Hb(#)c9`?oJz5WVX5`xU3&PkZ&9
z?UeI7u5u2!oJHFgIK2B5)^g|Ry8tB{EvwS$?JhGcAMTr(RI%)ap|JFwy@B~$hhFo|
zJ^y{z?2jiFE)9sE`JQ)|bNG{A%0+X1r$|R-RM>2AI@Xo+`dd4b@oK3*PaBl;f>!^k
z{@ov5>h0>QWg2odfLpaXdsabFyiWLxl=}5*Y?@m{bmm-%50uoBih6avyvpWa-b!XC
ztNeX7Ex%4jxBd91=y>mvZReuCX}f0!MN5U5NME?|NyK;Q_DzpAUJ_W@u$A}7{R<mZ
zQhYdVADrd4HTbU0KBuNe<Ld19{7Wyya<`Wr(A>i<)V_nI`TE@g?t_JU!u9Uo4i<d*
zP?%@Cz}mJM{}gT3TyfS<XUMpFLEytKX5Oc~qGxlzChyH_-oDUtapsDT%sT@EHFGZB
zDNEUDED|BeBYA3d!TN+JFBmUqf4&g7R8dXw!Marqaz9Rgt&%IL>X9w(`ghrYX%j<X
z!y!3-)x)3C1bbWER@7x=?s)5#Joop68PkM#44;-PSz3D}%E|rJZFj3_)2kb|w;o-}
zt7w+!u=F~=NPL{&DtYcprZbI>Wpli&E2!GTtG3u$U036LDRUUJmikWT7$pJDu+lh&
zBIoHQ5>MLRwSRcrCCs2A<@-)XKgap|$LhaNC+?pa%6H=cZ;PG>C-<R(4XQr_1hti9
zKASIHez2>nDB;GEnwc{fn&kVQcy#vl13lN%_A7UU#0f=;ZTiB$@KfQ$(1j-Z*M#TQ
zJ6G08u<n{(p7ApAN%rm=R}R0u^S)WZYKFi1?O)APRDVf(X{)!yJU!=iVK?Wypylc+
zY>kH<^I!f@5Vc-#fmwX+rUI7KNXfXUh&Q@2R-3*Y<WPFZxJ!2SgZ=r^`74e0@Ny)0
zOk7Yf-N;X<rIYPN0sDeSXJ7MvpD<xTLlQ&N>AfKZpR0CzZxr5op3TIGf5+d)pOrsn
z{GR7)spa>V`&RkmOyO@Y;~#usv3<BYT8sJcJC1~ig$rDCGvt|!S(&*eod3sKB))WM
zGuPhTSFG<ze+k>^oD`^OE3Mk^^OVCdWtMw|>7RLF>Pd_<C$P8OmYy<Wp+nbZ<3eZt
z({KCI4%QxKzSVx0mn%Q9kooQ%>6-$K&zPM$x;Fe+a8dI%)2a(`8}{Zguh_EY0<Z5P
zkEuIXiQhe9y<pS9=$Kp4^LW2K@q8Y`b-IPC@ZpC&c`FlceVVnW;^hT3L494h$f_F+
z6RKZ+xKzuN_G|ic*4eubM7&Hix8U75ec|3Mlg;jIaV(c&<88IMUm4-DB2F&#Dc?@p
zb&|?E{V(2Yzqlh!?M^xev)2^HPJu~%*=dKBlYZxj{StY&uWG0NzF7jQGgmxvX<Z>=
z+B2=tEb-P>{;IwNmPG4Y>kme~$!Q8b{N!C)%>5-Ib0126HE`xh-m&G0-it{Sm;N^8
zn0B(9VT!xHfVjJ;f%KiO`>~#^sc{=tEwu5N%6qfH^h3Mzjoackp1kGm_%Lh2DQi3Z
zCEuQ1{1<a;zVy52nfn=(x^g9^U1xsZ?KH#dz=|OL6-%st^=v*^&|0L$;kNYPhf;Hn
zOY&3QCa5dOUtUxZxISogq|*e27hklFSOw;-yVz3PV>kcL^of1EYP-!R8cO-t*4iAF
zDBn6Kqq6k&Z1t!zL$#M(Q|f;j*>PRy{c+*z>7e=>dpXMHU);AlYN?Wrwt`E!`u=U!
z$`zY^-Iymo**x<^&s$yH<>hZI?tXga-Q)Ue`Iiq*-+w4n*uy6O|DDy7+Y&|}nBQ=e
z83!qMuShY8>booa<lEvqBD?h8q<@jJjg(t_{>4V4>gyg$D~?O;Y1W+H_W17khj%Z1
zc+TLgWP4ubh^D~1-LA}zGaPwNCvM3U={E0clIY1}wK(--rmaD}dYD&w!o^+JPEOE&
zaPWuiCy`+0*Na_vl}g@DOprPzv9y;@zpXV=PFwEGlE>e-<t@!#=X~z}MxM)hs}w}j
zEnONHZkb}>Xmm;Q(vM%NbChaXHb2`Mo7P^S{BTa$6PLqB-(3upn%sH)!u6PqRqWTS
zWBA-&3w#Y+r*X_{(V~u^mFs!6Sd-TBPSQ}|C<<7#K*l}l=8TIMpC9j;cdbxwv8UU`
z0~L=olWk|JU5fjjI!kC%O<389^X7SX_H^B<+gFtF)O)?n?zGp_wg$aaeU|#NG*elw
zz5VgUiX~q*ec|dmzJ%9P-aFpQKKbsm`SomaYg-@tKKtX*E+>+_RZ4F4hBkwqdkP=6
zIyx$7#Wf4A<5gmRD*ef_sCFNR?3;ZCcWVzg&-<E|TK3X2@~3r;hU2a;XQFhzRIrBI
zct^2cUmI)j?%RTN#wcZn*Su4@9QPi*k<qecm&CzEn~xmJ-0HS?m05;Jfl1dp9^ZY-
zT~}Px>)f|y-;VV7Iaif0N}uQUtlg-8Ic@8QCO&St?Brv+e{Wlpv10zdpxZe+t6nW$
zy+8K<IWhjv2YWQ<ubWk$<MQ>5`f|zgzZ+FUet&(FE6v`}v#{zxykf#t(X%4IRZAJK
zEj@PZDCeHN-r}bYPr0Hp`C`f%-dDai{Cw0Nb@qO(VDRpDn6lL|FhfM5YPX(%g2aZ)
z)6J@)W-Lv-Q>+o5`tP2t`;W9Hxl^&+?|a`*x^ns1$us9(U6ft^Kqvm$`Z?#N+HW4R
zj{mci_hx^|!Cy<I-E&vXF!*-y>fw2DCfU8u7Cb#3EFX7kPtIROo4+S^1|RR)Z)3W=
zAnClXWhlS@Jk#w_+n-;#7@ppm6!F;3ph~z=_;KTVtqXq}W-NKQ#b$AU5z8j6xpEt&
zwwbMX@I`96ns4yhL{q6X&nkIhPJTUD==QtjOU}c~H>}?@uQ@OADopj;gt_cd%Z>g2
ze4KGbBlCmr9PPc0#o-fnmQP!!^f#J2kp1po4Y8?RcNfW|ZN8cm$Ppzi6fozsP^k2l
zh&g`FxffohtPOL|y>M!?f-)DY_T?RuFST&aE$W|g!QLxU_S-Rarp($YTa0Y?ruAQS
z-qg4(=iwQB^M4PkzrQcmTYu-{sZWglPunhaEdToQhUEJByL<a}>-U#^KYh*qeMQ;5
zXYXuh?zh$Mm8k5jYu+sPL64zwqoTf)YMRvJM-{zMx$J>2CnYfM)YzPmVi+MIx-r7x
zYN>*yRE~-iZ^oYI>yG`lI^q=e`=aLDrOs*^qTB%`Pl{)=I?Ua8YLdc*<QE&RI-Nfv
zcIU^3O)a9=3-dm{Tk}`CEcMOm+@H@E@^-W7>D7E(`SbP1PJUIHBefrrE9QOKnZ5nX
z+qZ$K;rBA{+^P?In1B8)gI*4s=L@!&fJGq}PYEcks7z?^>SbwVxbjT$)Ysi_rnOE}
zYVV0_KFPy2=dGHm?C!6%tGm~v^>3NBE$%2&aHHdW&MGMlQ3fx|K&4YIlaI=@Z=J2>
zDV^MVJ!|*JYbpve*RvZ*_4Lf!!IAtxMl00mdds@42U}Q@Om?ksRe1S7P37>eCyQNn
zw?F@$8Ily%|7Kp~UB`ZnyN?Q7T03;urqtvWyNhZ_{`e^$za_VBQCjBLo35wN+1?A-
zT<|#E`<ea5HHmU*duNB~-QDqTr)h6R-skvx_M09om|D6o&bTZ)v*7cO%=Nb;vW54B
z+UEYc@@i+$-#4tMm+ME~bWlxIonvpVq?!7tnd$uTSte!HMvY5e+}-Nf8~6TF{>ArK
z<1cT2St)w)b>~$sp_Hk*-n{|(2C`~T<J2GSe*M$xj&auXl3;(WuRJpMpYN-V+MtoM
z?#tn`Z|i+-&b|BRkeu4jkh>q(=l_?juc~_er}g!<tNc+$Gb}atD(Ob<yC`|+o4lK!
zWA1bNKY>Z{HlKfex?26$UqS7S^v$>f(Tu8{Y4ZvmC`{$*yKB*PdHe2@2FoViP<*57
zc_TS)Qmxv>)AJ^~ap*g=Y~Vhk;9jz^<DsCwMnzv@$GoFD=UlI5*fj|GUSrzz{)LZ$
zt!3?{ntgtG`_rchr6*>~N>)A>{q&Suy1;bauO^dcH9vNT9ht{`Q|!Pg(eBu*JdF9r
ziW8g^I+_yP*9#jgPCM1KV862Hj(|o+tHeh8oiiW2aO*$k`T6cw2e<fT7u}c2=UHxw
z{wBcxF=@WgYr`uMq0X;=rY_(3rT$0QYqj2b+q!c8I<Ke`KkF?|$@%;W;CeLs%kBHq
z8CF*v@akTp!6({zGHZF?q7*jg^yVGcpN7p|eD-3h<K~%9ErKHj0*a=wMqXa8aWGbK
z!dD}m-)pWXf7#Ki@UBqeyNAy~x5z6GIr-YwPG5I#%A}8KHG2#1t!`hk`t|krA2Q4N
z?uzef4z1q)xRU+evp<&}?!3Lo=l6%{`z2MUdUG#3Kkd<ThW*PLZO%`5TDfONwuwZA
zV}U~Nv3Cwu&3zZOW4+u?ztl-}TpV~@$!@NAWz!F(I&Jw!`k$^>#yMBStyD>MTx$8~
z;YtJl$!9BfTe)Y*aAr6fSKpHCmtNDtekP$@>&A}j*$Y43F4|_J|8{cSw0&tZJ1YM5
zwcPiu$+g}byh|-M`df~DChNSO?wM*USKi;^I*Db|hoB>mH75wFc8ksZ^L5F|GnuKs
zUcFoR{_c!R4RQ>tb_yDvNI7G&@$u5nm&7k=-4W(L@Hg6Sr`hwR{Pz=`E;(i@Pk1|_
zgh6}X?po&64N)RV;SDR?_&f6&1;QF9%nHrk!sHP%v0*!x;;Q?nb_-2+R&GCMGj;26
z<~~s+?NzS(Gygt(;^|g8X=PsRw9nh;rEaRX<eG4fNkzx`lwfj2k78H)#GsWIH*q-z
zii!BR^j=AIoUqp~F|@s{?P0)rj)&5{Cv4L_{a>2At9{P%PIvCJKeO}h-`LW5HE4b<
zs{${RhfGnYRX}Z@xPjw=0Fi}pEY%#Bo*ejh$}>&-`F`6Z;We=g*(>*#FJQWu=6qtI
zz!|o+VatuJJxWAYhrK@g<>^khbR7e25i{G1r4v}U1c(*p?9cvm=*Y{7-Ia|Mo=KBt
zPCA*h`FWCYK$g1VqU_t7!hM==t_|mlzPu=yzur%4`jgOo!AiVG_P?+5{`KIT+JhOk
z+4oO~{)kpcR`1IU+&4pGgTRl~SEc+GUtS%aU?cw6bArW(l!;8|10HZD=m@ZL1X&we
zO}|(3eKX(fH6ogGX1LCre*f3x#Ovuj2e-_gyQlib+^r7NLwmVBEnCkn;9L+b{6y!F
z*VD@i&-u-E3#kO36e^CKFrz-yZtusAHjbGJixzw?3OnF8q3+Mi^q1miw*Bi6O<%J4
z((T#id-jM;NnXE2%>RJ$hkzKp-05Y8*RFYA`hF?3<BgJ3kec^I$*%&7O-~&7%p|l%
z>>PvDp^sdx`*c<)?D6E|xYZkQyi@t2*wiq8HSI4S4xBuAXUf@MUm{=1tAE_Ndd3!y
zv-Te6_q)xSXJflH^wA&rxs#&S+g#_Xc0QsutuxYNb!bv=+lwh`7o=6Qw`WQSXPgxf
zzBaqa_v*6A%d*w{m@is9vRJ(4U}EIX*NV$a`yy)+7hgIjw!UYvOt7=XhlU*SlZ7vL
zu2%mj{(o2Hm+~DS)nD4av?^P3@4%C7`wsrRZ}H*J&KK9NpZLigbiGPT)43r!^uXE>
zlK`J*J1e4dde|09NFOhIEw{c?De&E=gq$1^?<uK{f;tL^KToL_w13AP{eF%glk772
z*QJx^6-Mxd?Kbwj%lY-;Of&1P)p@H``!*imm@~EHar(;V2M!x9G>zW&efFP{f8D3s
zO80(z+Abd4Zz(l@%D!__rpNc!<VDsA&Aa!9Z{CCr3CH%lY`nz2_t?5h)nN8_mb=WJ
ztd#s8`2G;j_LH-(Ro@7yn&&7uUuA*IMU@8DL<QM~Gt-xDIl9bG@qz6{yOekJ7K%S_
z%Dp*vTlvP^*=85_hB4;;IuPeJML}xn9=RtsQp_3{uiOz$x^L^`)4WyVgv=8Mo|4wL
zpRdlHEkA$G$3xxR{txn(Mw{z7rrAF|H?KH)TdfC^NE_D&Th-_KGm}IZm!C8C5?vgW
zt<bLdu+Xl*vPJpSO-7$@+A|dx9&FxtcHUj#-!qK!Jr7Ch?AkObxbVL5od4(UxZkg+
zQ>;|`b6eB0YJ>BX`HY>N7Y{fk<R@<Rx|SBMIkn7PWOkZFl7_KBvtxF!Q1`ZOv20O}
zWJkrr%brW;esP_&tWWuNvDH!yx%FJ?FAZiGWbOIY^l{V7H-2xF_wO<PFmcL_p2+vL
z1vOm<U--@3ecfK=aCE6m^oHF}Pi_t}RH&}G7%$ED;PKi^>GK~<Tw))v>}^@J__y=9
z-3PBlyQ)4sV7OqhqfdNX!pvJt2EEDuKY3PKUbUXpteeWdxME71*3r1TUji?xxdu(T
zExT6zrR1_F3mqqW3NP+o)wfHaRCkqbSMcN&D`rSU2O1n(@KKD_(nQ3k*(uriQ+MO;
zO#KXnvo*n1E;dS!IcM6gopRSR&eHg_k;<2x^Toa)s*Y?z3PEq1S!J06wSRkTQUCw&
zRG`iM^_dT!t^Kj~^yJrRVJrJej~CywyS(-5`={CMb7xqdmtP~6#WOkUna{nweErXE
zw2NKk(>Hyj=RSMMJ6GqQ$Kxc#Qf7WBJlqy~$@qP{@$<sx2QF2-i0+s0Ti(1RxckxG
zX)6{jxR`W7Y3kG+4+>i3y616zx>wEg?u7JA75xYS{izX)T|eBF&Q0H3S#MQWtfFkN
z{xf?-*%jVBPfiGT&x_wx@pqD?&z>(+zxWncO;xK_uXg47R{iGfwPNw}HGgi-KmTv0
z*LSA_9~>WenXD3eyoKWv+h*3tIfkZbJ!ja8ckVnElb>h!*-NR~XS?yb136_DJg$!I
zy(_w!^b+@Nh*D_cU>0~G^`xfqOYE~HQ|{Itz4GqgN`6l1S>KqyY<c~lCqpqU@87>7
zpYDZq&z_lP*M0w?^_cVRn#;v`TNTy#UKjkiw{=(GWWV}9H-DZCHhH?AJIQ=$q4RC?
z9FYq**d|@|?%DczqsQ{gpR4l4H;1;1yj!#&DpBF0tH9(fmo0o=@M*O#b@fY8_`-ap
z#&gP<GJTbj8%5^x)sv0lW)**9iL-kCl&8(|!27hH*UqQge!kPCdR~9#-(O3Nat`yU
z{l2*I^~clO&suN(TB`Zu<N1GI4w~27{^fh1nZCjyllwwd#|*BzfYaQ8X`2_Qo|9l_
zYO)GpYu>7nVX(p@@B>qQZ~Ci)J3eU5&N=ITUwY#G^mED~M`Yi>?g`_W^e(aBUbnVd
zUeCQbRWF~OzI1-NUah{W&F0QzuA~qB`|H(zcx<oiE>tp0G%a&nQ+crQ{{7vI;sGZ!
zCM2&Y7I6M~BtzOonP+itSD0jX(-oznoWKjmF3vAM!6e=H#p=eL#4XBxN4+Z79^NRi
zrs5wXhxiY3JN=5S*>@IhS#?tKhtSR1bc>sNwO;+nTl*-|@ABTPEm@)gIa@<c9oyKt
zz&bNy#f(EjlX7~tYfsf%tZjRE<#rVj*0*BKdZGrGA}0ojlx%*f^Y`Q$;XlPk{+8ac
zDz@%X^i5pJY?4;_{u1vD{*`<B*<T;n{`^fzQ2ZI6JB517Zd$zEIDbaZ_4NIvFTU)|
zTwkWl_09C3^Z8w_r=R_jTdxr*Fu_Y&%te&%DF5797LvjfIMl=ZS9L_MJ$bM6`C9$`
zYVY@be0h@l^>?wUvWraYc&04bl<oL((cIKCY29AAfzi2QQ$sI^vqm0dn5Sr7)K$H<
zY3E%57snn!w<|AfYXUsgYP*li%n7m1Kh5}Vj>-3hZJ#eKx@Pk1!lk3^=G)dS37NWi
za<K<z*^2Eig0d9?S?h`fs#bM1XDwlH()!Y&dLS|BEyv^cC$}7y66Sok*~T-{PW!XI
z&3nJ_KV4tg_I><zdv(}arNY(s@-r7oSvgBNeqvn2cy6ZdF$rC<7Lz^4vWz9Wo7I+F
z7iIlgwTPwQoX4F?|2LkAg`c9QEm^bSdS8RePx1S*o7UdmrT$8KiKoxQ+xf?3YVB|B
zP^$Z=^Qqk+$ESX!-u$rMUmY7GmQI<hywqrptYH%ucf>;W>#S;0NxSZRGk-WYzQ6Tz
zzRroAg1!e&7GEmv=9f7jU$4M>;Qa&X<fU2f?#$(K(_XrDox4mJ^O@r>^lv{n;>IEN
za0llcQ$<5fL(hLwhtjs+oczW!a*2K4q%hytiOQE){|J^v6st-3t$!G{ahk&&<DB=Q
z+imaWq@~`R8d~~A<#EDwuMP5^C-jfqo-|1yXje>ORjZTaa-X^br)M?BUo_i$bHT;k
zQ$7_wu-SL+z5jdPd$zJc$!{h;*&JQ_N!z~qhvN6PcFAHl`@$1`|0?Hou9u3jI&?-Q
zeEOL=8<ZEv-%~#GTHog4rnO&PgKGA?{_}-%@o8=K{yTru1<YUE-^hG~_rpE^RGAND
z3rsHNv6M`7e%aB*rpeHv+pyNi>GyAez4Fc-9<?`(vX%si<=2U)aX72hZch*2T@h@v
z<H{Vx=l%1te3?%!zL(2zVA=iY4$Fl9EjuE(l;v)a6vvIIS3>`6FYo-l_PA|a*@3s(
z|Nm_KI^TS~T>Q4g<mBYX(?Wgbl<-zuXPq-yw&Lg7FK6TJ|J-JOU;jt^)$gT(Idah~
z6ZT&De*EI%%m3%Sn3Pc3^nUxHlcIjdn4`}9F~9$w@p}Gi1;-iQ-xp0)Kl^WCOEUkb
zYQq^%&-D85D}8n1j_d3BmCt@Ie{S?~ZTf7{*{U1{WvQF@74~_iHhi2Ozvtt}bosrd
zVmm(D798gi*|dn=rJHY)!NIJ}ug|2VMO?@UcwL!w|NT=Qg{udr9#N5>DsgOjX8Kyw
zsrvg$4@iEE+SGJfCq5?3v~i7beSJvM%*W05B(Dbq8*85Sc=|B(fv5O}SDhIfZiGIF
zefC0wx63JQp08I~X3h~Y?sZnX)Mf=vevvV|kxfz{;>NA`Bll~k|9x-So1?tKx4ikV
z-z%s4$IG6We00zi?@PX+_rL1--2GMCX3UT2FHJt~ZNFCP%fp!$eqYpInSOp(=${*F
zrib$%j?{k>yW!5)7Za`@Q4IXc7AC&F^6AT;)0!GzSBu|>IoX_kA$U&4Zoj%`ZOnH!
zxvDP@F%s*(mu8;Tu-S~`^uf8sGX&Nw&ycP^>o{w3P1>(phm@BqJ@ha=U6B;N{-%YB
z=+qFK&!5hP$5nCeQR<tvFY;=Xc}l>aP47-$Gm1{x&uCJzac@J8;M_xe?MAmUw?sKE
zUt_j5l=0Z}@KUbw6uY8^!&}&oyV<0^3Ff{g+oKVBfBN$`x6ggclHHc+m!Tb8o$ep8
zKFiH~Zt{~I7x{}evls+i{9`*wM9}SM#uYEO`xC_?-+Ol}bq1>F7d?#Osk}JfZc^Kv
zqf*KI`9j;~FfK?6``k10jAla;tH#++mhN454UJxwwmiS0wn(i>*-O!d^IXQF%PU(~
zXtX<Bn0>|givMa!Q}!PxCe%Kh!f}apmO_csqkUUCR5*)*QZ}W1H@SRgg?l`k)|2VQ
ztBWU<{dsv|-^F<<pC_4%?Rv}A+t$PXaR0p6d%tuOel7YEe1GlDiguHvOOFecPs{Zt
zonm-x*zi^4K$P!aZ42qC4Uy9nOwSkcD1KQxr|amNrpuEzJY5_arQXkcqxI~3q3Wqu
z?gvcY>}#bdYu~!vV{849l{;Qbt`y5Ze0)ib+SH4li$8q3Hhb;+;C)XTuSTl<m$P>-
znKi}AZMl)eRjsKPvo32+o#VLFUFJpjk&D0Hse1d(u(Ysylc8CzP}zD%^o{g8BLm}&
zzb8C>skSnkJFG`Y?!Zm;CG5@WO9fx4{yD=cnG^LoSp4xaq04W#>&<2GxyB$G#3-zK
zN@y`_lPFI|i`Q9Uu|?5g{j5?O6<_~-&sluy=A+YxOkTH7DCFI|xztYb$}f%2?~Eqx
zsJS_JzyAOFJEuQHa$#H0NA!)shN#QNVuK)uiG0>Bdc!B`*-$!JAjmtq^RS)CHhG55
zmrA_?tl+~$)=U-Sys-Ixby?1%O;%yoe01u*H?K-en!EAVVT+i(OxF$_z2V`ptanye
zOa4`Jy{_gR^Nw@=I#uR8<$2Fhc|~i#y0qDQ<)SJ&9+|H{C|#X6d;KQ99s8`lGuz(i
z{J276Mw_0+UXyoQUp5}qxE*Ho<g7`4^Rsh5=Exd!Mg*8=T`<_?)|mf>b<v#m;0=Fd
z`~(}X{{O_e@|It%x!g_9gkrtQjS89qs_HH)U8YRDysGkF@eXkth2H5;zO+QuIjNio
zkTcL&F~cHg>ee)`DYA7zj*g$EJefIZxy|Ihm`g&<o^A(iziO>4{&c3}r^h@=L*rHZ
zCcj?$Lhkg6X;bfgTd?k4yXb>^h2KtJ>21Be{vq4;71N%JZ&O|@fh{784UpFJVO?1n
zQdy9ypzoiQ6<m_brSGR;3SQ|~R9vE9Y+=Nu4_O1KU}$b)in6}%Xn1V?Z40s5@9TFQ
zu*>OoRGBn=e)l4WB=yZR)#fgEnNXg-k$>IVw-ss=xgyza)$ea#bV);cWzYqN$v2j{
ze7aX%@s8covq?nredkFp$5}erMk&q8-g4*rnhpzlYR+WxOchOBbR{V0k3@TU&74F0
zlfUTkO%l44IAO|`X-ia(zL+v^%C-$1{k)9|i<+F&7k{0xq;u=kB|IAqE4Clu`!+@S
zo77|mW0zD$T>&EoZ;5ozC{52b9>zUJi+KgEE#Y<8q?nsHMK@FZg4GtjV>X%cALKmP
zA6#teDG->JvgG(~CFX<xx3-ObyjdnYF17VE*5;m!XfJm^#wzhSrKzFD;kBa539e>G
z&Z33aH8!1c);y86=Ka;xuKU(>mj++`lJqk=_U=2r?fT+BLt}IKXD?b7p1wRi-HBoI
z2B~=MmZ*iXkEB0@Rve%4sp8o6m+!jmjyruYJa+EV^<65$$8YakHrd{_d#BmC$fY}P
zE&sm0Z1LL5OPqZla<1R)`gT>();zANjlX6K74SPb8A<Eb2|QZ=BVpn6t7R3Z*6WL}
zkJneSf4{#*OJjdjy^h$=-AVDXuY5P=&t&$V{d&>D`s9jIx1YZs6+8d@sk>}m$=glY
z=U>`)|M^n<`EjZ6S=X2Hl8X8-WamBD@r_gL^X64Kw>}#Gu;`s1xx}fe$fG?kZB9*O
zPWkJ}uZ$Hq{>)ymIf04keD+LMNdc=3)iKjTHy@XgPo3!eF}&-_i7P9v6l9fT`D8^s
z4!kIIqf2O2iqph+^RJ1QW7fXj7Tz%Fo8hz2cB6GEo7Hxn@YmC<kG{J7_3YjMo)*6G
zUo3uFqwZty@8A75iYpzZ#QHeRx)eV?nQ=mVnXH}3+w3<jU6a4=%b0nl|Mq@)y)yZ(
zUA&#IOLw;GpYq*$cdGou`MLt{XX(4w$3Iz}v|sbo+g&+}3YPek*(82_zdHVu?2@jC
zu3xWbvp-4UIC0UlbaO42lx@o~8`-m;{$Gp<OZ_UMP{-j>!Lx*SpWLMG*q`i7mcPC&
zS+#wIp=O!um9Oh=94XnPXMD<-YYDg3WsmA9dAdeob_>2#J~YyeT%o~~6LD_&u3xg7
z+~?<AdwO_-%Yx(k-?NJ{cy&LR(BzjAX5DP-`TI=7(pL57zG=Md``EXxtXusm?Z0^H
zj|z?{f9e8)7^fyaR<%s{xbN=LEoYY`<mjC%VmmcwKYQra-Tk|ZYKvKxnYk8k(VrFA
z@T!vW(Z$^fkImi~hcUVy&t$!@?7UQ$M$+0FeTyEYe_hHD&Lz!Pv(i|f^$?$js@2-}
z3wOCT-0#^PWUsFFy7ZXXZ-G}EPYS$MdUsuM!R4Ftv}b?)*I(%IEOKu8+EZ%+KKeY>
znX{a=z^dIYzp8SF<rTq$8XCX%EC>)5_;A9mb4rRTFJB+ql$Q+pb*x|SI~T9$4)l2!
zS*z~c7Oc%TB}nlEE4$#MiDENaUjEb*TNUW?&)>ef=6UDie+vZK__&ImEcmco_xo?L
zl}|1xU45eTJ@7cAaK*&^H*+8FyCr-0(z8|X`sC(5iFo->Yx@7s@%ta;O_Yr3D#$*#
zZAxncw^P^Ee{z;;P0`L`{?F!m)U!)(eI&7aYtdYf^#OfS6K8U??VMZ@@Zoq$K!MTA
zEtfr)O~15J<ws(IR<WPt)wN5*XSt=WnKIMysHx|(eTVwIckH;iOTl{5`%hIN4O>r&
zlpWYIiQRD`(~H_zk@u@2n(rOy&XA7Mob>2K^o6g>eh4K$+b5i=+*(?<@72Axk7szl
z^3U&zKBDz>byo2X-A9HWlX`{kcQ3N}_0%fv-y_o<q2JW1l;{5Qjq3JQ%$(exW7T`h
z<mfU@J>QQnwH{uUJ+%AElHKke)B7eYYcWw>qN#XCR_A>9mqjinonMy6)fJoO7cR)F
zzQsD@9@Cu;r+?cHb%#xStNBT}>r2a`&ta2aXL@C)ST4HYqmfg-WHnzv(8<oP)idY&
z*B!cWUcPR5-nFalyH-0htKAL0!TY`Jy{g{yTIGsXR>Pb#=M(qs-yxE>D?4b9gz=%c
z8ISp<*?!gJt8`f?^7+oeRm+3=cPXDKXs_|wV0^x`UB=8zT~JO(@2Pcv%Bne2`R{lK
z%6SG_8Vl~2zVwazO#kqony=<h;(YTgXnk9+Z|WiCRa+z$W@){8zv9)?b<-Y)oRkdF
z{K>Lv%M5Q(e(BjBoyRlRR?g5e6@4!ox=Z|1mCgL6vz)KGsMs&bDsy_BnEdTz$kLu`
z%GPsco#<TO^OgB(uko>BiP;=4+Iw#9?3wA9Y!muOey!eByTxssk2}wJ$X(d0d#>k+
zsekKV*{^%|nB6~-ZQEQmPvZgq3iHjMZ}CabUiGnmrPRsnd0fByv@V~{{@lCAtaybN
z>!14H(`UZLT6YpW5e-k>X=#Y4I}I!iQ0mURk>~qkOnLU5kAJ8<N3C$qjftz|q9#P$
zxFq^CtgvyN!2WF<Q<?wU)=j?tG&SDlqqcdNa;n<n={5h3{d<#Q?AS0zv5jk|!xWa8
zjH~N}7AQ>6o}eJIb(LV(J}yVc2@(rb1TOlsY<;z1O^^$(g4?x;ii_;l1#i5<veYSM
z?vn+#H}nZ6T|Fzo=q&VrMIxC+&A~->bKfpK@dHb`UnqO*TDQ;O^{yC3?;sV%37L*8
z8ivh_Tqkn9>iV@%*^z74juemX`XGgcNn&jwV*V^b%b9+82x$c{FX>{u63eCO<hnpY
z@Cs{`M4-o(fFR?5YbT=`x|-Il@p;+4>PTp5(L)1`6%tc+uZTIi;Eih3J&lEtvHgh)
zRaq-OJt_J3?!EoH2Y;R&o?rj>$HUY2-__aAy?@~6?C;-mpOx%uo%7}RuWu#)r0(U-
ztN0qTJg=T9Cw^bmzfar`E?-{$;qS{e`S!c(|G(3V`4{vz=DGi~G;{swu~(k9$C{{o
z`CF&THF;{?zW-mFEb^~CIvw@j?(?sk;lHlFman_`BO{GJuIGYq-2KhVS5>T>^fB$_
zuQdHfo4E7tu4W4@VcAxZc-`<#P3`8}$5(ip1WadM5h^ohMRG)ff6k*Drt|(N?f9%K
zx$mUl{R?lU`wIOhy<>e_@Z9j7jpr^o^PPpu72nD5zqxiWo%!FyKf3S!^giKO`DTMA
zpLE%cj>F5&<zASmB_V6}VuPVmf?Ct>3#Kjx@`48+i8(GYeAmf!&!+9HFYC>&JDn2S
zcAb<KP08V&wDEz<X+5K}v#rjwO?le6qh!yf=u-y#mAac(?QhQ2h@6>m+Uteep&J=4
zrvw8ZzENAy!#Shnqk@3m#ywwFB>$LKb@xzF?0Jt3Mx86>vDY{CN}n`Zxxl<XvHN10
zNVvO}v=i%b?T-^5X<YO@u*3F==Z&|s1r3)ps7u{`Tqe&@a7X@=pzON`(m#(+W_b57
z{_K7Y=|65CU+45Zx_(pQ==#l`eA0_gzR2lOTfEtDR?oR(8Aevp$~x+;Spw6N{;YHV
z;&S8fZmC4ei<4^?2l6^TRb~xbC0%a4<IT3o%BwCvG*9vAE4ll1RnONSwkL&bZwlGI
zOfr4CccOaYqa8x0eHy+CCUb0%5qwY;+ED%A@g>dqs}5WZNU&UV;OhDV*DRNr8}S+K
zWvf0Sb9{N7<(iDE0VbA<GOn)Au(11fdZKrm?!TwI3f>$L%v@cU>Fn>cSbgEe$roN|
zHojQd_@Xo5i%Z5QmyAyy8AYZta>pjOe|(Z?Qz_vOWzMnbncQCZ$-r-&-1);6l@jXw
zkDnyYS#x`dLH;yr>DS9LvXk$M<mMhTcisBcG2e8u-lM5e8-0DdSI+&n!Z&Dl>d9QA
z?I*v^XMU~vt4jNCVa9a%*IQpy?fntozU^Yw`c3!r*0jI=+VQg>soLUi&8G_)5!KhW
ze*MhDdob$fap&k?$rpd@@8Nz~JiDIZe)#fCtc8^ksSSN2cthXN+ze4znHd<N6jo<v
z_~zes6KH$?OO*SI&_aQufA6t2F?<ToHt}q>$lz<^?>eH9lM=XKh0?$8we3?}E-7pK
z>8)jm+<o}&-0w4=7PL=WDY$aNN>QUr>kqFF%BV2msmc`5;tlec$+a}q!I?X>f1+E)
zOM~F5g?m-J1+1ic0#_PbN}e|9P=b_{mxSzwRhJG$xbiCZ9F||X#^7GiVNPKl-fxGN
zt#ni7@SeA{>yXxtil0RW|2}LucjU+2EoZ{6wi}&hd%XSj@7^729aaAA6OT|1E$xmd
zN%nrp{5IF0>+a5F4i~>E80S`BxFyMXaNS$Wj@;@A_ar(0o;|Vq;OcPZp#N6;&ua($
zP6|D<dCSigiQA<wD<12=@ArPQGP9sw)Q0S<8?4eyH&mr<-N2Ui^>A&%u4@J&R;kxG
zR@dZAy|6}fv(tOMBeB|FvJS7guv&6<OWfHTDqpgWsd?%rPhK*8qo(DZ{DSLTd)qc@
zzA)|ab)1=)bL!x!8zNt%jxAeo#=xxCL+aQvPyOV}OQvtkbh%_?#=Cdg#>^L6dwi8A
zK^SKp)~DvMozh7*?pTtV)Aq&4%(+T(c4OS}8$6F*OU!0$P0nEx>Dw3?U~w!>u=My1
zof|UAn-hwA&NiJjG~+e{3D_iWPWaq&w&{?u89Uec8!{R;$;Ask^qh5EpPs{4Wt&{Q
zq-Ue0%UxqL_Pvc8D_`*ToOO;&$dUU9V&o^}$W<{Xf9?P=L>f0%2Cy7^*4ea?HNfSV
zn84Q)H<)fHC37cq_vkk08f|0PhDG3o&LPih_J{wScwS-<c=N($bM?<Zx6IP(KFZ+!
zg^7K}s!aCB-8s$0{x6q(Pm8MC8&|7mrT**Qm6$Wn7HMsl)V1B`m>o9%>-+9SLZ?i!
z|Lj~l?I)MsihBy){>MT$u6j@$_U!d44ffLceIbVPuFQ{*e?EUxQPk6;uhMqiN;puo
z>jsChguj{b6tNpYvo+TDuH1Tk*~CdkotKvMxukxtf4Fj9i$uUJo&$UT*`5}U*HYQz
zpTxm+Ec=MkeBZF{RJBWPM{Y)b<h>{zxp;Dh&bdny{Yn>2Ippm6Dn#vGRf_8u!PO6L
zx*cWk-E(TX<W=K`7H=L17RBzl+GsU9&uG^-m0xGS1z&x0_bV&!64$E@Rq|c;*XiH$
zxt>&G>(27&)k3rTHaQLTHx7KisdPW&X>*0)O!4`f6u&K-lb*USszdyDrE}5dIQ`Al
zT`5udZ;tQE+I_UR!%X1l%<rBTxQ<?*>rh$Qoy=&s=)<7~rl;G?nMxVE7@o%nEuJ*-
zT%FF+5T&{6-~akHX|m?BL{%SWhm9LbJGXG9dR(wtShGCjYx~vPD_;t8Z|!^c!sNi?
zJ+YUCMZ9kGt<2rZ;l5sOT5j}~pxfE3_b(ncF1dct`MsFv^*RUJk29=q)aF<Ix_7a+
z?Bm9pv!AP6y18+8W=WXNbNiXTa!h-9?)|GUo7u2+i`!9yOA*H`_IL%D&2%~TY${uz
zqsWI}m;BXN&YSHQWbd~;nDK}Gvyiz?yNdtM@tA47s%4_~TiN`K59gPimYwRjZ?>RD
zR-QHUE$+<Yx3)#~C6||ETieY}c`0ykrq}hE*Vz`^Z<GCaII{5Hk7X6XYr1NkZa6zR
z<R+zV*)>_f%2#+#jQf6nj@xs%uYDCd)OJKM|H)?Ir2RUl9Hv@W#RV`d@!VpU`trfN
zxqSZ$t!y0s-A`kBrN#cwq{?P#f8Nu5eGEx)d8>Zy7c;&2I&*t@T+YTVHfPRiv;2K}
z_32g7y><aNyNtwh%@;OF#``|ijQf1)*)HqlK{EGGhd1Bdug^F2^pD(cYaeH=cm7&(
zV_n}f%dQ3MPd|E}6V`j}^vhQ(%odmLa{BCT<vG1Q)_+%2ihuXJ>_S`JeXCvHZaveb
z?;4p~7Z~Q>m%Z~wrr~ahx%KhwnXlaq_qnI;vUzx5mfDFeE3AM2d$f&TvTDLdjTF<n
zHMUC^Y`^MXQ6HZ6-^Oh*uZ`f&yXSU#=*R88e|qlh&(HLXWw)1n4{YHtPgs6``jRTf
zkFlPvGxa}REwOCc5*gNO7Fp_~xK>Vz|LeWDvTr{%*Y9@Nu`ZnBWZ37Nx<jH%r`^4K
zYE|j+hd%szWp~ePQ{}aKc=YX_8P}vMtzIRv3+<A#%U_*4Q&#2=_mTuPHmsGEF;Zm}
zoLQBsU}$W@r5}`^U!q`WV6I@G5X7bLnU|KY0BV8<;n;c#X?;forKZ7aFCz<6F8y$Y
zXaz$BBLyRcSdb|xsTB&*3I-r66k<`THq{H3L5Cd#94^XL#&4~ho?AJw{IuEh1v}V}
z$TW#@y1F?X6YQ{HF#P^NBt-J>tS47}*n=;}yQ~X$pXYL*ftgjqiStxIqt=xLe7qS>
z+_xS`eSA|rkF)O0N%N2EZCM4J*mqrMw&O|QQ0_?J5uV}1V?5(9)3gZ(xUOwz)P0k{
z&u-zwrEYPUHBTncZe>e=lZN6H0WS{K79l6jg-F~kZn+-<4ji&CocMnkG${5cG#UCN
za2cLS;Ny;PV$zN{%(iVqv#rbn4$V0ac*IYbpTA*~{JH0>vF41F{B&21nYaA~i>Kd;
zl+=3mZ^C)GeQ(xqnSTD1zP>cWK{)CHo4Cyf#fA%e?4JAYt$n`7{#lKS(*MI>LMLH~
awc?V*qLPZD)HE&=19M|TE>%@me>VUL%!{u8

diff --git a/code/MyLoss/__pycache__/loss_factory.cpython-39.pyc b/code/MyLoss/__pycache__/loss_factory.cpython-39.pyc
index 5d4cc8bb3818896e5944e5eb2f6b551388b67e48..7273dc294c45323d033079fc36e6b2181013059d 100644
GIT binary patch
delta 517
zcmZ20GEamzk(ZZ?fq{Xcwr+2-`bORy=K2^$28J4jEQVT^6oy*X62=my8kT0JB9#(m
z2&<W)mURMSo=FW$3S%}?kwpnh4P!GSSWKaYC50)QsmP~<bphK#h&rAcmK5e}rlL5Q
zSl+K1mK2t3rlNup)&=Y!Re3LJSW;NCnTnc9SQpfDfQ7Eru%xhMGZif=VO_vk!qv=B
z!n%NaAwvyQ4f8^lTGqU6H7qIY*-S-SAmX*Gc}r?oQaG}iimo8(U_;W;Qp1wMnax!6
z1xcO<Nj{~9C50=SNniqFkr<K^j>-KjqUL%ftXaG%JSn`rjJ2$JS~V;weA!GB7>is=
z_)_>&1i+#qDf|-{ivm&vComR=cri3Fmhex$&9aS0;1*|6VsUDGPJVIm<Rz@H8I31T
zV2k3i0|h%H8#@cr<Zo=2jHZ(f*!KzOF)%O`8N&&a$^Y0D8SN)4aQHB4ZqDTpWHi(U
zNt%KPOAuktz`$^eBR)PaF*h|nz6hj0iY>PiWY;a$f}+g4lF4g1wMC6UY7ypwStgU;
HaykG2KY5HD

delta 534
zcmbOyvQ~sQk(ZZ?fq{Wx_SuqT)s4J4%=IaZ3=B04Sq!x-DGar&C5$yJ&5Vo;B}^sE
zH7w0cMLH!c5LPooE$alvJd+xh6vk|(B8L*z8pdWuu$V#(OA1pqQ&C6>+XD855Oq8?
zEGf*{OhsuhvAkb3EGaD6OhpwXYzsI*s`6gcu%y(pW-}Fam9Q<~1PfiOVM$@jW-3}$
z!nS~`gu9ucglz%OLWUZq8s>#8wXAvDYFJX(vzdzaK*VcV^On@Gq;O<272QG7!H%S(
zrG_PiGn=XC50X4DLf$AZrG_PiE1PKoW04$^5>AAYJf|9#6mGBzvl6x}zR3$&gynro
z_*3{(1bP{3S@T3v_$M$HMWhH$U@VUDVrXJ45tw|LWt*_zEzYFG;?($@{NmzUtOZ4x
zc_ovlvc6_CncT=0#p3{q2Szq_7Df)n$xqoV8O<ikv+onoXJB9`GJzAOli#u{GCEA=
z=kQ_F+8o0n$f&Oak~9MmRv-do&Ml7k_`Jm2)cE)!kp3vP+)9w8MH&nY43p<^YKt0!
PR3c0TvrH%7<8%N39-ohh

diff --git a/code/MyLoss/loss_factory.py b/code/MyLoss/loss_factory.py
index f17f698..9125333 100755
--- a/code/MyLoss/loss_factory.py
+++ b/code/MyLoss/loss_factory.py
@@ -27,10 +27,10 @@ def create_loss(args, n_classes, w1=1.0, w2=0.5):
     ### MulticlassJaccardLoss(classes=np.arange(11)
     # mode = args.base_loss #BINARY_MODE \MULTICLASS_MODE \MULTILABEL_MODE 
     loss = None
-    print(conf_loss)
+    # print(conf_loss)
     if hasattr(nn, conf_loss): 
         loss = getattr(nn, conf_loss)()
-        # loss = getattr(nn, conf_loss)(label_smoothing=0.5) 
+        # loss = getattr(nn, conf_loss)(label_smoothing=0.1) 
     #binary loss
     elif conf_loss == "focal":
         loss = FocalLoss_Ori(n_classes)
diff --git a/code/__pycache__/test_visualize.cpython-39.pyc b/code/__pycache__/test_visualize.cpython-39.pyc
index c3a94d6f97af78fa95e1263a76b8e60928a923d5..e06f73a4c5069d526f1bbf4449d7223a191e5e73 100644
GIT binary patch
delta 8479
zcmdm3GQW;5k(ZZ?fq{V`+Hp_vuBwTAGE6hrHtHBMGP4y;*_^=W%~;P9#ly&u!jPhp
z6Pg<q6_y(w6%OVz=0rqAq%fqY=0xU3Mnyu!qN1Ww7*ZH>qH|-SVi*}x#TUe;if6?w
zh+oJsfvM<Ridr61iuyvvsMyqm)YxW5kczChti%OL3mKx4Q<+jtvRs-OQ#7O*qFho_
zQe76X)~9GLWNc=PN=-G%a!Aq2W}3iUq>!SWqSL~Vn(D#;lF8#s(M9H`iZ4h@O-*5t
zWLU@;m7bcOq9@4^m64i}qA$siVj#&7m6;lom6c+cVw7T(BAH^`%Mz8H%CsP7Aw!l^
zii9LXRBmc^RvyTd6!Fyh+}R8%CUcn=GNu@%nD#O;GNfiN$Y03d!Vt?7RghwoS`c42
zfw^!>s(6;<f}({CsoC+xso4ukQezi_eCMB<nPQe=-pd3Li7HJ^PqC0>h$>4>NiAz;
zbYX~<i7HPmTTqc&zK}7sEVX<=Wr}5r)j}p4h6&7tw^B>%7v!aufs9IFh^k60#ju*C
zkT11#LG?lgMg~+rq==`2oNhgrIjRQAwwcQuRh!C_T9aCvVw+-=nweskV&BUaRhJr`
zT9+b`;*jFl%N$jo8kN<MnwjF%%NW&=S|70>Vj)9RV>&}rQ#wObb2>v*ONui%xLZ>*
zK|W|^jA~1bOsxm8Cosn9MYX54$1q2AM0Lh6M|nqeMRlk4q;@tlM)jt4Ea+Rv01b(b
z)Q$!HDWE9o0Hu(u#03);GV~WPGNg87O$0?=)FcqA9vmZfsT~U@r%p<>6M;y}N<wHC
zDD5iAu#hFHBX!Dx?9`5hj8KE8rcQ<EN^x7r95pqyG<9k-W7M>8h7?8?1{Q{9W=4iQ
zAwz}<ti=fi42)nH2?|H?tm&!KQY2E`dzqr@XQa-|ng!?1PHkE+Cv_SqLQ<O-%t@UM
zX0<GsliCSpwWan-GNgF0GDtF{NSHG)GuSYsLc}~lVyQi;GnyGwd$XA!auZkzm!*m?
zn48+Xka5Ahg$xVkFJy>XkSe}lVQR}l#s!NOGStI`7pEqqF36e)&NK<Bi=ixNB*gkf
zElF)lT>`RP5|mOPY)?sssHLe@DGb?66WEG8QW&F_<?*EUr7mq|Os$e;h+3AKkm9vq
zdFqnXWvNS3s}{0=OrOA3D3jWdx*&CNGgFl6iqwSE6(HeMNJ?0lnvmk1x-zv<f}xp-
zks*aKm_gHL^9$Ap=E<*RdF#2D7#K<zQW$F(vKT?~B@8tTSxi~XAQMVh7O<u;fx@YV
zF^0L8xt1k{sg|{tt%RY58KR<wxt6_#J&P@cS(2fa1I%KPWT@q=VPC*r!%@R2!T=Uy
zm4wi2P?{YS{k7~h3=8U+YS<StF5pOEUdUL>Rl^0*mcp@+xt6PjVF70i*Fwfx?h=+P
zt{UzXmK4rjrdpm7?kpZSkGF<l0dEa=4a-8NTD}^FEWQ-3Y^I`bH4IstS^QZ7>5R4f
zC4wMc4Sxz#4MUbt3U@C{4MV(eo)1WMeF{%D(*(vMHIR}l5vT@H5D!Cx3`jIf4AsaK
z-d<*qaW%{f#KE2rC}CN^Rl~B7u~x7|qDHWpF`cnisD>d+xQ44nD1|A7ua~)2xP)ba
zWR37b#*})78i7`(6owRrR;Dy2NrqbC6#iO~5|%8f8sQXHuzy8MShA#RL>I__jIR+%
zVNT)fWsYI06{{7mVXhIEWLO}(kfBzhh9OI?Mo^L=MF5m=Q&=Dg7A`6X60PA`Ait2I
zMi7>qYq;%dcx!lS1Z!Ap*lHwFgyygZGiVC?RdFPj7MJAbS}BASF)}bPyaW?WAQm%-
zU;z>T|Ns9#*_Jz!TZ@5#p-3A<=uBS7tv~rHx3`MIE!Kjf%)F9Y97&1E*-80%skb<C
z^HWlD;?qiQv8AMzWhSRiR^*WmE6zwPNWH~UmYG_9i?cMZxUe)ewJNm;Wacfl#G>@v
z#EM&tZnxNr^V3Q|Y?hq-;^JFe5VP_Ub5n1z<s??778%`Q$;&UwEdte!MY@w)cr+&;
z;*sZIhZ;Ef0gq_C<Smxs)SR?itVxL_$r-me!Bl)^O2sYK%)Elqk|J{k28LT4<;9ut
zAd`zML0ra)TU-!@#hF#9ApfMtgIVk;i6x2gX(dG_Ag`N&2vZQ@4kAFoaf>fAuec;J
zFF7?nIlnZog!vX@<m5bFTSlwNYj|b79T^xHirqk!ITH^f4>K1N9}^cV3nLdJ7qc3p
z03#n07b71d9}|xh7b6do1V{~|1Tz<-5E~a0NR0ra6e|lOAEOi_5Bp>RK7D>e1_lNW
z1_lOD3>Cj)o$SZwD$1O~nZnw_P{RnSc^5L)vX?L|V4l2!Pp7_wX#q<LFN!304MP@d
z4Z{L9Q0`sGP{Xl+eIdg_rW8If<nLu+1eFaODFQXz3pf`tfJH%ag1u~D8LkweY{sGw
zHEfXNoWclB&^&o8HEf_vTf?z{XCXr~V+vz7^MrcFBCQ&RERGamaQ@{%vy3kVY-pYs
zL`{kahGl#;>@^%IqA6m%%!~|~44n+=44q8r47I%J47GeIY_<F){54!PoHcAU9L+2)
z46#WuES-$C0=0s*LLH1LZ1s{1os1of9SjQu7BZAEPGBtBQ6rE7DxU<?n9><j*iyvj
zFm*6=FxoKG@GTIm5dxKhc~T$|P)#Apus{%@9waw`v9JhYnhQhhl1|2ShFW2SY8QrB
zkr<|0ky^GI;RS*<Yzyld85t%p7JdRn1E?UV6|P~(0tFz`1g0XN8sRLV6vh<xUQl2$
zEf8MF0E&uQ(HhYOA`2N(*s_@+s%k_*B}NH%jc}GIIAB>OFcoE$FlBMp2&b^6u=g_6
z3fJ&15Cqx4$WQ}wkfaSm{RGCsd+7|dV(ARE;wAhLm)Gz$Gr2Ity2UWnO4Lf$NT#r5
zgPb#~gP}%3k|9M>lA%_rglU0LjnqQM8cDF+l^U@Hf;AGL5Xj@I5r?rSFcr=!0Y#p8
zGh>!Sij*Wnt#pcXtxSnzjZBJ6GgE)PRIO}@)B@=m*@cX?a^Vaq3@9}`bFq>ExPE73
z0M+oB47KtlpyHPc+Gv?lt5Bmbn;}KEMy^I$lA%PlMxmKel3_N(T;^KEOokfy8buq1
z61fz)8bxrW#*`&rBV5A-4iiw>sZ?L9+|HE7m?EFT+QLzz49Xt0Dwzzmsx>Mu46%ng
z86k#9f>QuA9u*`R7AP!aSfIF&VS&;@h6TzC!4`qkNHU~=EL4H2RFVXzt7gVdmUf1A
z#x$lBWsp4`EFgP2nM;@!)U$LjFJuHUv*bINYnTx(t5s`fNn=b=0cogF10_k2rdstH
z^%_-Ba%^XAXGvp9QAL#p`55XDHE>gO7T6<Mos3W?DS%UoT#7oVVy{uH?_iV!wTazo
zMM~sqL_mVD_^MHqX6Rs)W~dR7W~kMu(a2H-XFd&3TN-MZk|aZoh$KUbrX)kH<^-0)
zrU@*CNfVe0txD8t<Wsb47;0D+Fx1F{Qbmc>0`(gC63H5dEb-Y4DcW<HQgoV`7S=N{
zGL&d6&;&Qkbz+!mwQ99X<Z85LGo<L&XoKPr<fIyHaDXc<WGK<7(XwHfz*@+LNC&Y&
zwK_EnSz0wZk_@0eL#-~Hr!UD+t5>57PQa{1c_|FF`guGh+BJI3j5WH_47K|8C2}bS
z3v_C9YV>RLYILCuDb_-}6hly%h_IxYv4$ZYR8*l9I3>CZ^hz|+85c6v8q^qM>BGV<
z#YmbV#Tdq0V6c#ZhatrTR79mP1T$!w`V}dGY6n$@$+E(ZBH)Tcj)8$;CDSb?J%gfX
z1_p-71;WydTPODjyE3XzPBapkY$dERSxb~lE)%2?q__x7$b-1q3=9n0K#Ux?(k?to
zcc3bLg-7X56r~y#ATHQmT+Y}9QUP~{2B_+UI24zL-6$IB!L=q{8}@)TFfcHLXfog8
z$j^&UEyz#KxW$%}pPrstbc?gNGA}tkC9}BX7H3InaY;O=p`gifi=&_@KRrGvv8X5)
zWI!H>$OjS1pxT`StTeGSEIA!i6N7M(AOiz~CgUyE;*!MlR86K^Y{kj>MXANNcncCs
zGE?(P;=%l)5|AOKpq9|@|NsA2@#Q2YrRK!vCKl*r73b&OVl2AF7(ba`+=DS|a)h{|
zHYl!C85tOgazOU7g3XBH4S*UM46@=DTX9ZiN^0@ssp5_zwP0lhMfpj^;DFR*D$1Jt
zLR^;%WRNb{AaJB>ifbztRf3&d1kwXmTm(-Z#t4;@B|@Bn!KUVw<`z`mVk^nd$t*5`
zXlA;_1GXIExngWanL&)wl#~@;$#jdoAh9SlucY`EM`}f8aY=lBc2VHuI7vM=BT%>)
zPo5;1WB_4;_~4iYxf0?$Fc<E|Xt2Sc#BU^}8f|!stpL(?<P9iFEh(uC05v32i;C<)
zZf8zTECEMNa`r8boc#3ol+5IkTRh-s%uOvR%1kaU3IZ$4$xkl|0I@jp;*)a{i;Gi>
zCpSomPWG1;(`2s7EV#vzomyFZi!HIBAT=+g$OWX7Jug46Dzzy87HeK<ZfZ`E<K+EP
z#`Q&#ATL;hh(Hk01R_8oS=0<-wSaW8fP(TCYi@Q*W>FMZUO_z6F}GM!N^=WtvBS8m
znI);Y#YG_?J*;Ut`H3aB*wXTgauZ8#F%_iT;&e$YNpvd$w`)uC<CBZaZm~_Sm1bu)
zEDD?4F0E4!E;H&tx;b+bE8-#PBN4>shQuGJ<yw4;1DaNgi$EFc7H58G2~;g8#opqB
zO2?NZ7Nw__+!BWI!2tzH14Z>9Q<(~iZZVb=)qvQn#W_BiDXF)F3i69HOESw+;~|Xv
zyy9B|$@xV^smUerP?L&p@uel^6sN-Z;MO#xh0R_7v0<{9j3ZkGC<xOg$I3){q=J}p
zKsK?JCFYc-7T;n_E`q0_Tg-{c$+y^wQj1G-O2ElTleMS;q@@u=RD+0C5K%k%pNvHP
zI#Bca1Smr@u`seQ@i0j+iZF>Vihvr`j3UfpjBNken53Av7=;*Fn1vWsn8jEHm{}O1
zkXee6gPD&}f`x}kj){kf1*DIOi;0I(f{Blj1KK<n0I6mYV+1wO1sHjl_}D-qJd9dg
zEX<QLW%(HyH&@CsGqW0*nw9!aZd5Q(26?uMRZ}mkApPY7kViR^GIKKXQWJ|b8H;*B
zj#r+1Md5;^H;4(&BwzxZQ+OE|7#2+4tC-4j8>Evt$lWQVsBN;Il34vhkaAF}FS-np
zVoxkiEGkN@EaC@=F9Hd%B^H-d7Nizk0f~S!5?5MLer`OtBq*8-5}gE6#{tSGxk)*x
zMc_mZPT!!8N|7r_mZc~)r}P#}eqL&EkrYUPsU*M1j)9+n;TC5qD902fmZTOrPrj}s
z&v<?EcO||01t62b*(VglS`O00Qk+<pswr6%1`>$^iEu+YSm2~vlmrq6r)ZXp{M^(i
z_N)R>E-$*pmYknolmg8=Ov(ATI7>2fzzHm)=my9b=JMjqTa4ki7(H(BCYF?>=9NsI
zqb#U+6J%f&hyYcvw-_snKtXwn9n{ZDPRuRxn|w~$m@#EDlZqnK<cq?VlVjAllyOyC
z7hq*omHy<5io%l<G&m+NQ46lerT!vJebG6P3sgV_HGk1_5E~q>*FmhOAOe)|i@>1-
zjt)>H6+H$?JOL4QAYv(q00lfGc0fgtrZ6}hId8Fo0;9O-!{mB(IYy(&^VF5N;eIaq
zG5NH*GUJWO@6}aVA2Kj79GEPrQ7!<g@`}!Y%=`@^9!_4Mp{6Ook;_rfV9pU!q{zU)
zkPPbigEA2a^D!_mfPxO(!#p<my@qCe3Ue(}2WYH?D}|+up~x<UwU#M`xt19$#tmwJ
z6-9vfDeNGTc<wwF5U+#>#LMCawedO_vUs!jz$8DI6vz^U(n4TW!eE|A4aiL1EKx8|
z3`~mG)Wa<2n8Q=cQX-MYkjYTPvV^gZv6dC2xq~4~vV$Q@3Z%S)Axj!0mc`w{kR_AC
zIfuQLtwa{AgAJ^My+m$-JV@0-##)XNg%qwDjt+(_mMp~-?i7%NS+bN;c)=pdU{WQ8
zuRevpm#dbugbk#vnK4B`nxU4fM74%%fm(_nXdJv2WSM%35O|O&DMdI%q=lh|8{DGj
z&J${9NMlS9O%Z9~fQi*`Phc!=D&a{H1BDGpG}fw?r-nUCqlTx0Aw|5Gsg}2f7u0TM
zWGG?lsAtHMZe~o8kY=dijn}H-%+jvmtzk`(1hv&CFc!9fTb)vVF9ksH3u=10X|mkn
z%*;zpEhx#%&%4D@lv<pZT4HDcW0@M<VlK-p3Ax3QmYP^n3TpPS6{VIWC+BGL+~Nd}
zcogO5m)zolw07zX5=%0QVnCUXJFhf1zC6DuJGH2|Xd1}ASdcJJX>n>i+$`4I%!<;2
zTg-{2>9<&u6O%JiZ}FBCC1yfv=*;9I4^ReS2ao^2s;+{P%-qbX)FN=DSu_=7v<-+j
zz`!8I02!YGmGifl(+f&(vE?O#YNjH?$@<#TtVN*a-sB+dEBqRa3=At7!8Q3xP)o{G
zM;_dg%4PyJ$tK6?)F^0!mE2-ZPD=+@FNj7|E=18U9YZYxMh1pkyk(iirHMJ2Rf(Wb
z)MNrX_ZAl@cyjYoN^?>*b&BjKhw9qN+yyxlR6O5e^#u)o6@gpQVCP#;UaD)P01CSz
zVNf9f53?ds5LXODh%-)pp{vYzU^0)Mb)quJYL4`x#FY5N<m94dpx|Xm&&f|J(gVqe
z<m4x&#Dh!q<c!qh?1FsI_#1C#US>%=Y!I$!0mwbz1}<L_$SXzfK>`vWyF?f#&(t%J
z0*}0LmL$bP+tuvg_(?4)a+v&BPd!c_q!d&i-D1g2%*^9pVqho+)oKh(T+Dpjd~7U?
zJfK<;OmZ=?F!8XkF|sjoFmf=0XeKbp#>m9T_Jxg~j|E!WGO{qTF|zz(<C+|zufqYU
zV2c7bcj)sm27sFBMQ1^&f~yEL0$BtP(js$^TO2@s=g7*>%mb%Ia9v{slCTC5HjE4m
znk-0Ojh_6>K!dSvGQXj!VF*}jd_2e_@$q0w!2PojO{Ss_P`I>%2$1iJ%0MjT$*G14
zjGB|%3=P;Ai&#Mo)~$xR!r+=x4P=`-hybOu$?{^NlevtvbTvUDS|9?Hx**94QKf^L
z@+ZJ)3uJuu<U}Jc6HwD$lNsV-P`rYZY!NQqr@*==-!~Go1w|Y<<GpNPU|;}cba0jg
zx9C!fZ!zcQ6x`w}DbJ5jDoV^t&bY+|8ZU)3KB_z>XK`pag9lZM^^>y;O7tO_K0XQ5
zlL86o6_jLXGTve<1~suuGC?DanoMBpZgFSk!E^ZJe@0?*RT}!0If;4tp!}fklA2oJ
zUX++tqHknm5^rQ`Hrd!X+@eSm6r-#~LZF}lH+;lFEN~tG6#*|nBbk~^;GzYbJv14M
z;u#qjzyW*RSfRcf6k_0z#MP@{X9neLkTOuETciqNseuS}5CLyi7IiT&FjR3lC8jHc
zWag$?DQK=L5(8x|c2IiC$xMQzI&ez})VeBi1EreD=_UubO+ilP1es<(*~qk!@#5qy
zraJX&K>~|GL_dg701=?7v?vgy9F*pY*g-5=kR_Z&sfj7^m5I4Ipyo|pT4p*V=3PK4
z!0Edfq>QaNwWut$=oU*_eojgeIC+EG^+k3dw{RvU7N>%{yG5}K4D}4R*vnFjiZk=`
zAmsuJC~p*j%OLLLe2^8OHhD%7DC2+=*DW?kSFQ-uIxDgS*$gV6Al)lan+#lN-C`{+
z$jK}zIskSLB&mR7F-jd2&&8=F#d=kVMMabEnMpH7O#Wr2SRcvAzz`)4mj$=u5=#<6
z<y30XErF8!qT~#{(vr-aVm*+eq9~Bnpw8eemXgfe)S?)W?|8t<VBIHPh{bwAsl|S&
zCAZihbP=d^bc-dqG$rvCPiAp^Vp(ElP9iI)8EZW`-(1#n87LC`K*lctIiCwrKo-S<
zM8WBf7sRp!sRS1>ppx+xKghB1#U+U)so<XG<WJ_xA{?G6sd*)tC6%|B(vpk7%`>gZ
z+s#!~z@3#_EU9IQIYojX(?H4l7I>ndIJM*!Q%X+J%*nPEhV|fv9H<WnZt&e=PD)IN
zbd7i*En#R&7d(mrZto?4oV6N6ID<UEkyu)ipPX1+QUq>9f-4axkSwT8eTx-T-xfpW
zB|zQUBJkt{s0CjHnr|pF1w}8U(LWKS3Dk~+v?xIx%OX%m0NjTz(g(?&1QEs{;uMHj
zGg;Wuh>3-9vb&`<a}-C}<W@_)dLBjrMh-?E5N47BlYESP%sh+&OnfYSOdO0nEJBQY
zEHaEdpuQ3#7n2Yp2NMsY6cZN{3lB()gNcukhmnU-go%%ZhlvARgRn3PG4U}8FmZq;
kMi}`(6C;dji~=ltj2fUm919mCXd*;}QGrotvY3@L0NrE0cK`qY

delta 6997
zcmbPVx3PpTk(ZZ?fq{WxU4Cuym&}QLGE6&IH|iKNGP4$K*_^=W%~&5A6`C6s6~@Sr
z!jQt46CM?w!jPhp6OkJc6#*5CjEYPZUl5flo)x_yW+B4_rlM^rs(DN)Y6}^oqEcg1
zqna5R89=gGaSP%XGDIb$GNqbiximATs7o_Mxuhnhx-4K#(OAgX%ovrFYLexUqM6M!
zfw_nyMJq+SzJ(z*$%O$VmB*K&lcJ5_r;0B~PEAT-kYrfM7?qNmlA<fg5S5ymnxZGk
zkfJZi5S5ndpPJUpxF9_>Y9V7(Mrulmfh0pzW@=(;W;3G;L##|xR%+&g?9{A<jH#Ka
zSqpMf3{#92GTAUpU@i<x%~+6|nz^u^5#-CLywr>Z`3o6b7-D&%3Q~+y3*rkWFc)^E
zW-KUL$iT>u%Cw+(Aw!l^ii9LXR7r|>YRPPd6qC8kQKe9}>0IWhvee+z($umPvlQdh
zv=sAR)~NE-u+;Jti4=<z%U<TFiqy!g%G9(Jt6s*a%GB@$;R_j}s_N4jqN>vwqH59^
zqH0sD!TzrU`3f8+^{El56=1(yi)u)1h+&RujB1Kuj`EIbj%rD5O>JssjA~15T+qId
z0U8R8sf`OdQa~Zr2#u1?g$(@?j0~w6sg2EyQC;B-DU2)(EDX)ej0|}~hV={+Sc)4A
z7#P7Y669C$tnSpV6p0j@UZ$v?)ZVN<IIlmodclO$u7#lZu30c4wI9r?T`(cF3CyZb
zZIfh3v1MhDWJr-PXJBTqVMv9D*@47TTT^?Q8B^P`nILi#SPB*DQ^gldOs!eSxM0#k
zh6R%sGDJ;D6<;tlwRR!nf@upG;G)x0V^gPO%>XC0*wpDz7Br+{{i0^3)~C({SuV*C
zl?P?pNisyuO3h1Q$Yz?rT9lE(7&SYOC$&9wRx@L2o-{+$?9|v4XZr<nQfH>lPMwvS
zw~z&7`~=oQo7BqGDXG(&nWE;V#-`2%i9+LRUTSQLL+ZTLDhY;WCPs!7#$X0b$IU0$
zBADyN85kHi7#J9w85kIf?=dnklrSt{Oko5CObufUb1ic%OAK=@Yb{$XdkuRElO#he
zdkrg?X02gaz_gH|mZgSy0doq|LPju;xrU*JxtTGBsg|RbvxH#*OAX6H##*ithAfsE
zjuhq;mR_b>?h@84HVCi2m#LPgh9QeRg*BV0Xh97_7DpCm7FRlBEpG`oh*!g#!dSzQ
z#goF;%TmJ-&zt83R?VKxRCEp`pT!4N%@5*XsFneVW(lC0m%`D@3^J>RA*)_+0%H+J
z4Z{M#8s>$J3xweQ;LGEyVMgZHFfR}W$=33hh}3X3Go~}v3e+%U@z(Iw2&6EkaP~6S
z3YIV|5Umkh$e6-V!{5r3!jQty%9I8QGr<(DTA>n#EU_BF6mD=B2$wKqiPs3%LxQeG
zD1|A7rI$H|saB*`w1&AxRFYwV#6pHzF+>p7Fl0&Aa7i+x@PJZf3Ns`%!bN#OqBYzL
zq!u#NaKX}Q4Mz=64R;M!4Qma1jaUlb9QI%aO@6<SB1Q%VhL?;C42%qu(|IDf)fgBU
ziqt`b#^lpH`b;mGCV%JgW0anp$t%s5mXn`Ya*MsVurxKbDs}RFUQt!9-29Z(ocO%N
z+|(kF-dk)riIu5EMz>h<@{4kdK!taa=Hy$vnw}gGm1!lnI7{=OM%-d4&a6r;;$vW7
zxWyKdnpd1(bc-c7H8JlNOG$n~_AR!Q)RM&Hj9V-zi6x1*STajeb2Y^$JMc-yb7kfg
zl$OMU#EVQo+BwRLGr`Wd#hsK`lAHl%7Z;g<ykZDaz!-mvF|x=Q#5V#FAWz<62N@8b
zR&t9kGq1QLF)ukaJ~_WMuVnH;K3hif$^ZFeeL*G^JAo=8CN3rcMh*~U=40kz;bP=t
z<m2RG<YAIv;$q}slwjs!6k_6F6l3CJ<YMMx7GRWOWntuFlw#yz<6;zJl;f$AnVhID
zJh_2ij9&wq+n7PQ?GVf4rTng<EGe8RtSt;RjG(e}A!99j3DW|W$^ZFv>Qi_T5}-^|
z!?A#EAwx4`3S%~N(S{m^1?)9!H5?0>YI*XwN|+XKq_8by1m&g$oHeW<@lM7Xo({$o
zJ{yL1#x%wh{uF^0jt<5JT%a1GnX#5PlcAHL1e7{BYuIYIn^{~KVr^nrI+<$uYWZsg
zI_jA^7(j~47$-0m$)pHoGGsD#Fmy24Fx2qZ2&4$jVFK|a8Bzo#8EW`!7z)3oGjuYi
zGt>$~^tmv^7IZSDGt>$p)Pc-%VTe_z<*N}|z=II4;ge)Y5eC(nj0_VP3u8){vUux3
z0Z}8gkTHcVg}s-lR;UJMkcbUK2lE1+4kmChCAfeGqzbIjrG&4BA&bA6F-sstRFa`q
zI7O^hq(rbrBt^WLsb8vAv_xouaE<6f##*s(h7<;*DurPJWAQiS8YQ!yp;o*^q=q3&
z6jYZ@V2qtoD^Vjcn;}J_Myy6ylA%PbMxvQfl3_N(T;^KIOokfq8c7?567dwt8c9$w
zTf&qjQ6p5t1a>|sYfIHiw=<_Prbwl*ws6!)gW|hZCX=C7wnoN<A@)!w6T}Qja2R(m
z)ORpRGNgz|GAxi>$N<XM3#1n^ERb0Uwg{w1k|9OfhM`0j5saYVZD#CbZD(j_N@Gfq
z0a?_+3bLq^1r(YcEDISytSpHRmKtUxhsm|GrZJ|-g4EZ@rLajdxG*%=GuF!2$k)h%
zV!WNDoi&XqMGi#)Bgn^4hsfJ7OkgU^1N$Va6P7YWBpGUjO2ktXK$Q{5B#^<eZnc~x
z;x(KgF<5lfNJ=wwFiA7ia7r`OD%2=s$)zaPrzoZ<^)fLsApBRuDanwcEXh#I4QdQc
zU@kP9z*NXr!&@VsqGH2P!?J*(MjVtTN`w~3*NB%0g3`ilh7{GgOet#3ObbC~F)mO5
zrT<u+7^YgqTBQ>48pYWRDe5&!plAd+sYVGL-qH)}89Eqi6v6GTTZnWJ8&s=Y!;qy|
zqb$jgq9Msps{-d~N;1@{)~KYgWiw4+DRN3-s8!43DN(9XZDy=dk!Gk>D-lo8TA*B`
zT%%T_TB8E3xLFE?QnW!C9brc^V+}*RVn7W;mP!qK4M&Pjif%76BSVSm0<{jtbjF2@
zwdytMS?aLBOVN{NNYRJ!7HBMF;9*EH0O?3!2xibU^ea*V756NY=ZiY&fl9?9IR*xX
zl}xvo^bBq>R)BdBLLO9Pa@pi$7MCRF=N8yam@Fvf%A(A`z%cosrYSeL1O@3WN}SB7
zCCr<`z`!sORH~-JRDcUwG!>IjRDg<Yh-C;frXZ`B+%Lw(h>)9xBKICeZaRvbzc^H%
zILKWyU~;!ON>YnU;uA}gC;!)%N|It=V8~>sVTcu~WvpTBV5nh~WLUtkkYO@YAxkg=
z*kziGMZOFS44O<uNgzX_Ky1ca?768WMVZOPU?oM+kV?r+hJ@5okV;T6RGC6cd)CRf
z6*wo`h;awP72RSh&dE$kE!JeZ#ad95pHy7r1=5xU3U$`v<ou%4m%so2|6j$IlbDp6
z6Q7${pqEvgpLdI~s3?SifuV{$IVZ8W*h*pYK~2%gwNmOVE18OtCihAz%7A?Q3}hC_
z2~oTOi6xn-c_s0|ATz<L-%08SWr7rdECYK1<h{vyQo7(+d=HWW=>;pUmC{hi1N*uN
ztOp#SARiZj?D+~-JNc+ohLbng<$0yK1(mnhO7cPJrU<N&=@t*ztMOo86=O5z2gI0s
zX<3Pol}xwT3lfV`^Gb?uaimsc7MH~5XBT-*UM#K0rVmnQF!{Q4k^qDW;)7k32TFv}
zAScN(GB6Z@oL?jecJ?jS;*!Ml)XAALs*IYG`(-5jEkG`1PEITVOC@LDV$Mm;yTw+T
zmswbvdW+RDG|1oi7F$UwxG};5PBFQV_`b!GomyFZi@7SZ;1*kAK|yL>N|EPeTUq0n
zTbz0E;223QE|LV9&stEFnO6dd)Dn<0LE%$W3SyOk<XJ!gbc;1NJ0-K|7E4NLZow^9
zQ0u0+$Oojx3Z#ZDEx#x?v1IaJ8S%;2W#ySQiu@-3l+{TF=UY(Z-QvtmtcZssp9qi+
zR&Yq);wXSbdT~(<NR&CTG&zbZuOJ@kjiN%3C}U0(E2xcCa*G$L91=rC*`OT!LC%p4
zl+cP|C!5Ph)^~$+u$3j|l%^KnVoWZA2mdYR#N^~#Y(=TXr8y;##Kcxq1oA>Uh$sdT
z1t6jvM1YeJDD~Xp1BYvBL4I;Zd}>}wC#V%P2i(SDVPs+AVUl1JVH9B!Vd4X|v6x*1
z82K2v7<pJYn8jFFxTP5RK+P>C7Dg6E9!3@>5k@&i5k?*+E=E3f0kD`B7YnlhlN1vN
z6Bm;Rvk(&tBL~ywdIe@?Rzo9;(%i}I$_9)flXob~ur+|(r8oJO@&!gGP$FXGWnf@P
zntWI#wf->3)67BcP9a4VpghN-msOCisb0hc7EdluDe4DlwgwTqL1OHQ#fe2liIqkC
zAfX8$A-2TglFEYAqCFrHaLVIKE6UG}2WOO`UXW-jNF7IFadB#HQch|SIJJNh3g<1>
z;*7+C)FRu->r^E~L2co(%;eNtoT+)Gxv52oC8<SjlW(ZXGwz@KLsieP4`dc7?SV_I
zB2a;Ni={ZRELBso$PXkJ1R{b#L?noam>jPrEec9)x0sUiZ*i7n=A_0KB$i|p9hls&
zrp|p3q$nRmBu_r5Cd%kO`MR1hquOR}bwwr?Jy03})dQg31320t^#B(r3r+%O!O6Xv
z#gM|g9way!EC@1g8>j{XHG4Jri!Oo$PJxL1AmTiT0HrmEAHcx_3bdj#Ac?ae;v9&W
z3?e|qHzc$`c}r6m?8{rMAb%7W-2??HZ$W->W=ZDc$y&mU`jeMyDRD#m1JeC)@)a#*
z#siaoXsNQEWME)unyjQ<E*b}t+5#e;fC!L8ksK&lm@6_cFn}^%F{nqw!O9WLQO_X9
z5mN*bPX>hoDDi?YDE;s-Fff38Sll-Ghqh*Y2Lq@lo5GyURAiCDQp=RblEPZc)WML&
zoy7wtdBG%K7C)300P7G0lU!giaN}_fM=f)Sa2lx6VqU`7$5_h(QqjSXCDOr=B??m4
z!H^{e63YVV5>H{D!(Pi;QZE74!V1>HRwB7T3Zx9&zmQJhsA2D5$YRNoN#RTZX=2He
zP2mQM$bm`u6rL2`Uane>5;l;oX2ujgX@*+P5``Mh1&S&Bph2-(kUpgpfo!G;j73T*
zf+<2R3^iP!re-Zyo=`hOeHvqmaEee12TZVrYXW1jQ;Bkl2)LES<-!naRm)wXT*D3K
zvt_B&aCb1Ih=N80;MR&sGt}_JtJZL2snzh*u%w8C+Jh4q3*Epi8wtOc0-zWJHE(Wl
zq@^a7loq8Hhq!5S+~TYU_wkDI^Gj}VL8?<wGQY(I=_Z0Yptrd5N^|4O^NX@mi;9at
zZID|WNr}nXN%?uHAXa)jSQSrcacVrwY)y7Zf0rdEzqt4oXMRCRW-h2N3ogKKG3Vr`
z7ySacXa&RM*?Lkcx}e&K>y`+}qT<B7%#zCZvc#PD;*!*Y;#+Jv`RVDYMU$`TUE$|v
zWMEjy2(CrIb^1<yd0|inl4F8aY*iAuiJ5tm*J`Rx7BKK*6q+1wAPUZR3SbqJ>kJGT
z#V2nt5ETLU$GJeh&&^LM%}Lc%FS3|?)4)#XD9B-;!s`~RFR1%o^lP%Qp^+fC9{vfE
z0|i0RZ-&XOhRTd9Ca*NKR^(!2V7SGRUX+*;pO~Cn1Zs=jVoA@*Pbv}x<?N{plZ+%J
zi@=2txZ>q45(kOj0GalOVX~`{ffU#|oFz%|5Z~TnFDXjQ%u6jQQlH#!r0yjKQV*^i
zSU?^;#mK-=4DvVw6BjcdHy;}hBO4<N6B{ENr~}RjqS+Xkez0-#v4H9U2xens`NhUL
zS;Sa}gM*QeQGl_?e6xo!A7cQhz$)4bjstM{T?7w;A~}$~>R|6><!9!B(gry6ixfc;
zN+3d+k%2*z1<6Orlb4xjFd9xiZ=z~w1=boL4{~&TJlK*VP%;eBWGboxdA|}wfGg7+
z5KC{es;L5_>SQ-lgUPj~I>O)rpBH2eABX_O|K$JrqLUrWSSMdIRW%R*i3)-UArJv>
z?|=zVSb>{imEed6nGFgrsAi*j5s<5yA+82T61aB2Wk@y5kXr&7pc*T_Ah9ShH?;(m
zBS0|+$rfD2C5a`e@t}5W6&rX!NADJcTa|`>WlmzAJ}9Z_yQHQTxECd+P3|{mV>UFh
zm^{Ng+(MJ3=sU=Ppz8J(YjR?8Mk?6Am!J_xO{OAv($i#wWJYLf3hEiw$)Yk+JVmu2
zBfx$KH~9QO-dzK8cM+&_f@ql()qv`3E~mtFg^<kLR4WC|`9)%&T7exD!#SBrkaPg5
zX^TL$caiSo>lR12Wk6xf39`px@-oXt#+{QjtaMaCO_L&U|E38fr2ry8C4Z4QNI574
z6n$ivTxlg;4^H*kAW?9-ssq``R-9T?mRfX+B`rTE1(K}(f=o~ar5w(r#NyO=P$5$E
z8>E=MEVZaOGd~ZUuWqq`k|emDaf>@SKQApa9a2Yw(knQX-eQBamcbQ%k%BJBW>C3v
ziz6pL9XtkBWC}8twYVTBv!rMRhz*Ij;v&__4%WJic9Zk073=LmZV@QSFG|kPD=o>)
zDb@o;W|0HPJV%fmOG##KYLOF&$pcmfYfXY`<XbGsr74NGcruIQ6U!1aa}q%<nW-SV
zm=kjgZgII6C8h)?C+4IU-C`>S)8G=M$OL3Os8=X^iz_)JH95N=AJlAi1_^`9AaI?p
zHhF`Ms@N_5oczR;ctipO$Mxh&8)Xfa)Uw1JNR|c{INv~_3rcU0@x9{Il3PqEIYk|l
zf7%$<gBuT^b`Q9baf>-AF&#33$OEYmlJoP6QlJBe;5J4m$TU!kyvPmYBaXz<lKkYv
z;*ugzd!Wb$Bn$5B+JabiAOhSw2De^XKwMD!1kzH;Vqj2XC<3+kionfAZICp${j(9o
zG5`^qK*X%cPi>8uUNB6Sw6kWG;YgnxZl}i%8hqqo<d{6cPRy8@kCB6khf#`&i;0DY
qhmnhsg9+3q=3x|K;$z`q;$Vanh(b&}Oae?COngjSjFUgw=>Y&sQy?w?

diff --git a/code/cufile.log b/code/cufile.log
new file mode 100644
index 0000000..d9aee1a
--- /dev/null
+++ b/code/cufile.log
@@ -0,0 +1,6 @@
+ 21-12-2022 16:48:14:373 [pid=1690629 tid=1690629] NOTICE  cufio-drv:625 running in compatible mode
+ 22-12-2022 10:31:41:400 [pid=1904890 tid=1904890] NOTICE  cufio-drv:625 running in compatible mode
+ 22-12-2022 10:52:13:216 [pid=1909914 tid=1909914] NOTICE  cufio-drv:625 running in compatible mode
+ 22-12-2022 11:02:15:996 [pid=1912278 tid=1912278] NOTICE  cufio-drv:625 running in compatible mode
+ 22-12-2022 11:15:17:212 [pid=1915495 tid=1915495] NOTICE  cufio-drv:625 running in compatible mode
+ 02-01-2023 00:11:43:868 [pid=931838 tid=931838] NOTICE  cufio-drv:625 running in compatible mode
diff --git a/code/datasets/__init__.py b/code/datasets/__init__.py
index 2989858..4f19064 100644
--- a/code/datasets/__init__.py
+++ b/code/datasets/__init__.py
@@ -1,4 +1,4 @@
-
-from .custom_jpg_dataloader import JPGMILDataloader
+# from .custom_jpg_dataloader import JPGMILDataloader
+from .jpg_dataloader import JPGMILDataloader
 from .data_interface import MILDataModule
 from .fast_tensor_dl import FastTensorDataLoader
diff --git a/code/datasets/__pycache__/__init__.cpython-39.pyc b/code/datasets/__pycache__/__init__.cpython-39.pyc
index d67531a6e6443d08e896d436a6b2ba379fe43528..72bc845bf4c597bb8c39e8bd4a53b0601a654ebc 100644
GIT binary patch
delta 36
scmX@dbcl&Jk(ZZ?fq{YH(&4Si`zG>EWaOK;%!|!YKO;Xkb>i<}0LhXJPXGV_

delta 48
zcmX@abdHHPk(ZZ?fq{XcMm{ik!bIMQ9HPmk#U=T<@e}8Jal7ef<maXam*f{2PJAB>
E07upjn*aa+

diff --git a/code/datasets/__pycache__/classic_jpg_dataloader.cpython-39.pyc b/code/datasets/__pycache__/classic_jpg_dataloader.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..56a80385988c3216122973acc207f2a9ffeb76e6
GIT binary patch
literal 6368
zcmYe~<>g{vU|`VE+LJ75%)sy%#6iaF3=9ko3=9m#4onOTDGVu$ISf&ZV45k4DTN`0
zDTgVSIf|JPB*&b?lFJ&!3TCt9u;sExvFCC`ae(=(Ih?s%QCwg)TMl<FPZSTB&6vZM
z%ge;T$dJn##RnDP&lQLgfU*Ungi;t%*mHz)MWRH&VjMZ5xnfabU^Zutc&>PqIGD|q
zBatf^B?)FT=14_Jfn6ybC7r^M!kr_ND;p&TmgC8h&sB(0U}Q+)P2tPo%~gz2WMpt>
zNa0TrXkkbZNaf2?YG#g7P7zEIYGH^{NmWe|&SsjxRAiIFkb-PCV~$#s8rTl?DD@PE
z6p<W_T+Jv=upOc~TDjU$+KddTVheN@GGytd@Jcd7>7|IJ>dj_I5ueK(r4MCG%w>);
zNRf;(OjS<RPc=x9N;OQ8PLb(ljWSB*NHt24O_58HPf_S)i84-QT40jOlx4cWY$3x0
z#-d3nig`>aN(&jIz@k~^3oI5gL|LY4r^u%$_cBIVrCKepUdZ6W5X%!~lOmmJ6K^|#
zv2a_e)dIUzlZA{RcShN#Fr+i48j3JPIi#vDP+!Op<(Mj-YTV4|!Vqf_<&<ilqLR%z
zfvJdrkzoQ;p<1d_40DuoluHbAlxvh*lzXa2s&kfSGb1BIs(lKtIYX5F1jfQQP?Nk;
zJyKLty;2<|7@C<F8B!R788p>ig3^|sCgUx(kkq{5{31=pTP!JwC5f6$w|I&Y^HTD2
z<BJP&GD~i8xg?e(`s633q=M9Nge4YbCMM;iYBJtp&(BMZ&&V$UF^e-w)6&4AECGon
z86eY23R7}58E>(A<|d|tlyfDPrst;Sm82FGYck%F@XSq0%t_2kPE7$>P@Gy4oS0jX
zlUnqWk%56hlkpZ$Ze>VOVqS4teo=05GRU>am<`G)4r5?oNCoGFD5ezAc7`;@6xI~B
z7S1T<6!sL37KSL66wVZ`7KSL+6tPsc6rMSZpw!mF8pRGtXHgu`^u?JXoFdY~5XIHb
zz`_v49n7F9ev8K|z}+b^9pd|!DGUq@FJCY-Fu2{~%*;zpEhx#%&r9Zpx&%atF)%QI
z_{^YuTF1n|P{Pp6uz+zP10zE^Lk&YbQznRH29qpc5!M>k8ishb8s-{?c=j5`8isg|
z8m1bCc+MJ@8ishT8nzmSc<vOCI1gAoFPP*@0jc2!vju9nQkb)uii%QLYPs`-YPeEZ
zvzdw-QrK#_^H@^YYq?7VYZ$UPQ#hKL`uEiGln5;luHjk8Sj$_(Tf>mWF3C{KSHtJR
z5IZS`sg}Q1poV{e2q?rBGS%{zh}Q5oGiHg^3YLi0Fl2EwGuH4+Go&-t3e^a?FvP~y
z3fBlP5MRhpbf-q3h9QfkM$m?#hJl44OQ1xeMxdE7ov}o+M5;!3fpiM@LdIssW=0VP
za|R}cc!3(>1v20qAyF$*!;r<gK(<DtMzBU8jTw|5CNLH%6y7OZ70!^tz{0@7(9F!p
zkSAowFoCf+#DIYj3?msB8A?PK$Sq{Zl2750WT+JdyOFb*F-swZR~i(~ieR5Bf_*N7
z@OceGyb>0Z*P)oKyg;QyIZHK#50pxJnM#zi)IjN^moc4bAt*M~OCTyVK&hseF`W@v
zbb)4xa+X$#Xo^@bV~KK>HjD)-fzp{5g2Z%FL{r4mnNlRcD)dq$Q>4HweNdT`BHPQ9
z&H^^kpu}*2QHorNVU}^0$wE-9o0b?ZP$@A4l|3bfS?1{sDe{XL(-~3}7BQwM_A)`;
zXOY4X%%G{{_tJ=gfx)dv7?gCi85tOEF_&eQWHvA|FuZ)gz`$^evFMhFo2z3;Xpn1g
ze2{C1v$IdUk%5WDEjA|~e`oLDTZ~#od<+Z>RZ_({nJKC91&Jk@sd**wDVfP7@rikQ
zS;hHznoQ|u&w#)a`}A2eLEx*sCew3JhG1ZLU=L;du-9a|#g$Z)nVwOSms(tWiz7Kd
zucRokxFo$=WS#rNsuFupX;G4qT9Wt@lz}uEZwcp>=EmojmK2nh#3yGY=H;d46c@=r
z$_K`RB3THFttc@iv$Pm$_fD`ei8%!si7%NL7#Pxb$hE!>+_BDHlc`7*<Ym_4%=Fwu
zO{OAI1_lN&p~;-ib$xMxE!Tbf^pj=#=1b|{wNIbs+@16M_f30E=5*Pp6zlu`XYJD~
zU#b~$tT|&3_3s&wmsoOB6Z38{7nh`HTHazUE-gqcy2TM6pP83g5+8qy6I7<gXXd5l
z-{Jx@lM|COQg88rL(?ZQDK*C>Gr8mzYg%SbYVj?8gp&B;%&OE|EV=n9skb;mvY?>P
zxW$zN(gR_0=EW!HBo-H^7T@B@O{|DdN=%37;>pd-L*k|87L-)RgKaFn#h094R8(3}
z0^_qn9K;4TxA+!IenD#9Ef!F;-eSqgPfWSRUX)splbD=Z#0d&Kmg17s+*@p^6`92)
z#kZI<^Ga?p=YVwNWEPj)VoA@*PrAjPS&~|ml38?%EwP{=H815Bb8>mgE!Kj>qLSiU
zYz2u$sd**0#KG<Xc?6N33KB~)ZV6#Z6!C+sW_QldEyyoUy~XXEpOatYm041fT6Bxk
z!_@`K6i5b%$Ctosja!`VMTwQg$%#3sw|IiUm6~%#er9s&EfEOMBfluKDnG9zF~=<@
zv*4BxL^Le5s3bEPCgxaLk`ECN$}dR-75TULA$<45(&A#LoYEqgfMZ%(W?rgun9(g>
zhzi%7#Nv|7<XZwIMTwbt$S&hTawS_(DmbzQJwcUYL4FR{7GF@zvpab@hdOyW-x4eW
zt3ft`BP1VEb>HIj%P-1J%mF1BzOuv|Bt@EHw^)i(bJB`H#XylUD8U<o2on%t03u95
zge53^S#vWhN(*i=Czht)Vg;qUoLeklcQK~j;shlKaHhW{T9TSu07*UZ;6wuvW~{iy
zSb2-BB0euMH}w{G1t?}S;=!qc4Z;DXfRf~lTPzv*xv47|Z*j&$l5c$cN`_zl`XEvL
z%ACYJ{qp>x?BasN<Wzld1HjkQN7p4awZOe7F|9;DIX@*;ACygsQ%j2V!RbFUIX<f(
zJsu<oO0lU$dIgn5+6)W~lAvn12~-X-axt+nB0*FhBO4<J6tl4KF>*0-G4U|+FmW*o
zFmf<*Fmo}gG4U{33Gy)tG4V0#FtaeSFfuVR{bS+a`p&__!pQZXgPDa9q>t?%L_g0@
z4lW^S7A7`E7Lcv~SpHUtA~z-UVC6$HsDuJJ5QI4x7#LU?7#N&E&bDA+V5nh?VXkGW
zWv*o@VOYRe!?cjGmbHYbhN+n`i@BB!T;noJGSsryu-35DFxRlvu%|JnF$XiOWc1Ty
zEdn(}imXA=3(C1g_8^uwsKn$bN(9GuVQFen<t?s)#FFF;aCF_`C@;>0L?A0TYHu-T
z=BD4`EXmACg$T1l%m1PxkZE9|7-SYldTI$c0v3SW4(`Y>aWQkTa4~Ulqj?>s62<GF
zU;vf*AY3fOz`y`*$T6lcwQ$rhx-c|@nyk!z$)KhoD5O9bq=kinfdN#t6@waHpw>_<
zUoB$?Ll#2~qa?#brb3or2Cz#t8H=nyK4-ecq-Su8F>@s&I0(RUMPLtb*?^L2a(-@s
z-A)DuhR+}ys<ct!4CHWds1)nj<m4wO<`moMA#{Pw(qsagdy73YFC{ZMwHPc|R1XR(
zh%E>+c7Os3<Oc@EDsde4ykuYkH-$N1C3`X`d_YbEVNeqo6w=`2lEDCKY%XA|Va#Gm
zVU%Q8$e6-V!_>+o$xzE&!&n1KCZI&bTmvFmYM5$RAnGI-z$_+72+a(oStJ=2vZR3%
zTn%dpYYkHgTMeiI$CSnt%%I8YR|JX)aKoTT4dgpTQ0TJXVlBxpO3t{&0qQ^$q!#7g
zVlB={EJ(e@oSay4i?u4XD8Kj?b8co{krPNg8ziuplM>U5TEVFZ<j>5MisD<`AXYKB
zLr{E+8>BJ5ATcE+H3br)h=eCtk{_RzS&^CouK7Tf-X2iE@_+(}MS_uon~hQHKO1wE
z97+&lXu=3sP~d_C7F1H!FlI4;0=1U8gmD2=4KpZkOPFhzQ<!R)Bp5^(ni;_X%~Hcy
z!y?I0%UZ)4%%I7P6iAvZMIE4EY6B7NAi@z507X3@zjLM(<rl;kC6=Uu!=k7cq^b`@
zfISN)z-fpFRL;eNROgqLTm*R*RBkXZaxuy=axhm(qIeWpElTbI#UCj3f&B<dMI{V1
z3|Wkzj8;?vavKvkj*9d_E&wNBunWKh*ct5c@j0n^@$ol7E&!Rtz*r@MVmDL)N<Igb
zMDQZ+8>on5$YLl809g&nvb9V#3|UNI7IO(RsGtLjuz*EaKph1zi?xO!iw(>Ib)1-L
zSZY{n*dRq8s7nKuVFz_sQW&$Digwg6Ea0qRUkEBpIck|=m}^;TS!>x!xN10RSZ6b&
zFwJEGb&G2l7I4>aEM%<Zs9{*Z184EVS$s7d3-}i@fK=pZVK)=S3_hq%P=^l5h2Vxc
zv)?UNaGT&3XMBE9W_o5`VveRj5vU#pmsGbn(oz#kN{dp9i$E#m7ALG=F22PBs$k=j
zK!tW{@huK$fm{rUDRAXp14>UE#ffF9@tL{lMX{hL^al|UAR+-I%aWX5P^rlY$$+ex
zc`2zCMHL_k=JMjqqH>VBDiBc(5@ReWsswSskq;)oDS$gZK0UQ0vm`Y)KK>3U6=;Am
z9WxuK{$%6;lUz&!Ok7M{%wmiJ5I&0ln;0t>6BkpJ1X@~w8>PusBnYw%R13JdLAoxQ
zj72S=M9BkcS%du_e~T+VJ~uz5G$%Da{uWPsd|_!~4nziA^%sExp{N;T5GVkPCV*Iz
zK*VGaF(2eFUT6a(IW;FIJ|3xLDF7(}`LRfVfq_8+<Tnt5gORVEfrCSWgGE4;gM)(^
zEQ^x7LG=;5<gkMl$qN}EMQtq;sJxiXkitBdxt6(xVF7ar%R<Ilh8l(iEH%uagjmBk
zn<0gDE(@p+3{EC&e&90p7o&caC8TWx%Fz0V){v2rNxY$vMX7ImVqPAod77G{pOTtX
znywEWY$=X6y2Tvi?o`F1msOCi$qH`87rBEx=m81?_T>D$vecrITP&dTqR9e|>09i{
z`MIE>nxcFL28JjuNDmA&u7eav;P9Enz`(%H$iPqxDTDYJc^J7Ei$J2Fn92n;dgC=2
zZ;7}i7MDO8tDwOi$asn-Be+$Yms(<K5TBG-oSKuFms+$0)Y;U)=~*LFGi1+Jg<(|*
z&RUsy>G36?p{ip2l=S$V%-qb9cvHxrR(ybCaJ+|WyjOl+YH@sCeo=0GQEG8Xd`W6?
z3AjrOsnw={`p%jx=<Q}uE`13q#a@DZ9iqv6iwm3`!Bx;LZcv{U)PzkfD!#>4keL^s
zo0^+nR9VHAT$++-WpIl*xu8^&@fKG>Q7YJ-sVPOEv|h#Kl$fp%l9`)orJ(73i#4w_
zx1jPCQ(i$4xYhs_u~FRM`c<#ABr~VD2-Ft1C4eNN2TB}8pfp$na$OW3L@u$kBtN|<
zF{KEUGNXhcf_Zs*X{C9|pzcg!P7!Eq`4%hKSZFsD%#IQRD=y0{&dkr#L-bgRB0))q
ztspTkC9(JxQ$flt-s0?>)Wo7ZsMA2<atqW}%*e@1f+RUm9dnC0z|#lZF1^K^TxL{c
z12!fzH$Aa5y$Dog-(t>8OuWSdwiQzQ7Zve>(ki3_2ueG}sU_fi2To)~pyuW+ru^bt
zEG3z_skivj62Y+u8qkbS$thX|O1}KbrNt%r;1~v{wW8G^6X96^oW`O3^rHOy5=d-=
zs{LE+pa3mSExE;%l5>kKCAAFHbz}iW<1L=d;`qd}#LOH}0=vbWm|Jj*%e^QuB^cD%
zEV{)8?sOJKae(_#@o6QXW_D3xZYpT>6VwqZO3b~*Ta;Q@npu=u91n_~TTCVSw-}Re
zaf92K@yYq6c_p`4i%SwgnKQE_H5c4YfRxfKppq_%8<Ofk?BXJDNzGbNl$looZfo3P
zPR&DZEg{Nn4jV|hX$ML`#h^h=4n__}9!4HU0Wc}V$OGkr#9(qf%tG}H1srUkt`Ss>
pg^!7cg@=WONr*>)Sqj`?0%d<LCKg66CJr#44OH-R@i1~Q0RS0O*4O|5

literal 0
HcmV?d00001

diff --git a/code/datasets/__pycache__/data_interface.cpython-39.pyc b/code/datasets/__pycache__/data_interface.cpython-39.pyc
index e1151f291a6f1bd56821ff01a34f3019adc21b35..798fdf96f92b70a082697e2b35225649b461b9e4 100644
GIT binary patch
delta 4924
zcmdmH|HhLqk(ZZ?fq{Wx<<YIl%}x{fWEd?bYR_Thn0P{=o{=GiAw?iZAy+X<F;^){
zDOWj487wB4qmru{r3z*<=BP!fr!b^2=4eD|xHF^(r3kk$qzI?7W@$DvM`^h;q==-5
zwlJiKLPfOQ8B)Yj#9J6r#6cpdip?xhI_?Z95-E}`3@MVSI?c>cx+#po44RBmlVceB
zSOmNR+$Y~++-%ALawh`=Lva@a14Al96k`fQ6jO>+Dl-_eq_U*4rm)RnYiCGfOkq#q
zXyJ`wOOc*@fXPb=Vqp{?#F8lfb_N!PD1l%GO_|9e%nGWSY_~X55=#=}i}LeJZgJ%#
zCZ*=Y7bKQs+~Ug3Pf5**&r8fro$Sl3D8&PH0*K;cU|?WkU|`?}IceGCC^0EvmKuh5
zmKxR?hIrN##u|orwi>p{?P8ksTr~{wd^OxP4DtLmoHY#b0x29d4Do_B%ry-0Lg@@O
z4DrG>>@^JWA}JuYXbnS_K+%K}u^NVW@jRg#j^a5b5@0r4i6n?lVF+f><n(*_fPsPG
zB`1ggMPZQyh;;%)7%?y~6oJGtUobO(1JZ#-hD`<}DmyuerN)J~q$n{nFFrXZvA8%h
z`4&%ZVnuvXVtRaWW>spDGy?;}EgrC3e0geSdPYf+99SqVHNK=MF|Rl+zbLnecM_{8
zHz+)cKmnl1H(8ohT0<6Og*u3U=qN2tjZaHWEGaEYEiN(uiR*y~{mCJ$SpupI3=H+4
zz%u~_CnMM73#?L;AFy%=GP5zVF|skSF>)}nFmf<eiA`Q7E;gB;O`eaLfq?<!FA!aP
zYO)QRM14~YLl$!lLl#R7Ll$cah{aaJki}lZki}8Mki}WUkj0h4(hCZ2ZWNWQH4IrW
zm0T%oy=*lM@jNBGDeNVDH4ItY&5X@Vj0`2bDI8!v4}@RCn8I1cP_zeOQhgR{3Kz(z
z6p-;;DcmXCDLi27_)B=fTKSt9n;97yN(7o2vIJ`wQ+RtpiJQ+496>9Y{BAL)WtQAx
zEdj-dCVP<-D5zOL>8=PKkVTMC1Ie(0WO8mXmnG(CvKC2!0<g##6s8EpE({C|RXiae
z^AubXOA@UVs<^@ubD(UHjVvXp#U(}V3=9lzP#dwCfv|NYW04&L1B0eekpRdI%*o{`
zw^$1ji%N=%R6vneBnff>$mQ7FTqHetBfCsJYe7+FUdb)yoYcHqoFE6pgYsOF3dls(
z;?$DT0!`*3Q;<O*yKb>$<maXqS%Uc1Ai@Sj1TZi#EC3}L5pa^>W8z@cVpL)lU=-lw
zU{qn`W2zGN_4EOG+BZL?G$&OLZ0<_NB726(fgCdRRtyXbH4Iq{MN%b<H4Iryuy{;i
z>}3vSK!}xqQU_BrNDWI0Q!h&}131qy`>kXuf@C95a4>_}x7dpla|?1(i;6(0Uy}`z
z&RIat^#&1e^+kdp>sK;DJPGy`m;m|gmH;^Z!9kLfpO}(bv<u`-Hjp=&*%(EbCV%D#
zbtno3S<6xcHrxl*@K6vJY#x{Z8_HLfn1f{G;mQ4+PEkS(3=AcpP+$a=QCUnWOd!8&
zGD8w1#0qAJ75*SILB1-&W;#DS5+N==wpoiSh>-_my(Z%==Hk-am6JQT<-|Y&MPT=E
z+2mvvmn7%s7T8^yyocLL4wPtbaiyo0#Dfdn;#*941-IA|ixTtFQ;Ym2i|}ajf>OjS
zc2H6&PA!@2#-kzwHZKn3G_axEAnlNHs`%vOHXiHxWKgjT3ONvFXJB9eg%>E36sv*a
zpV5UOR;HGzhAD=*mbsQChN+gdmaT-LhB*tIQ43jW7=syBGWuyU75PFEXGUT{swO))
z-fpobC6**-6vcvMn3EFIZ?S@%ev6~LI1^k^+^T0UNG!=r%_}JayA(`-UCNhST3nK!
z8=st?lapAIdJ`1G;7W~|gNcukN3==?HSwY<NAVRm0|NudXW)YH3j+f~2V)jP3Zo=L
zEmKK7;{v7{rgX-IjJ3=uOq~oRj0>1k7;7M6%(X0NeAW)e8kQ7>6oyu&G$u)gTDA_x
z8dfAedkxzHmW2#8%pwe6njJ)@FxN0kFhE4a89JG3Kv{`3o1y4H4MTkvYYmeGLoG)M
z;{vuCriF|t3^g3BAfq`;7#Fa2Fm^E4a3U;7V_L{q%avzP0(SO7#!jYohIYnwrgr8u
zmK2s0))tNujvA(BMi+)=##*it#s!=;Tp%4aT)_;QY<@+I3=9nE#$fQ+J{?-8e6UXj
ziGvaV2!j#?C=m#Qf?NZnfuV*WR-u-$gQ<p5lA%bVgAqjMadj|*=njT#mWfP-EWr#&
zQ5X$sC9&LMOe)G}U|@(6DoV^t$<K|4Wd3+i!j9r7NzE;Yw@6A%hv);vdQm(B1A_n~
z14B^)s9e@Zjbc#R&CRSREdW)&lMhIW)^io*f|7O`D1EVj?6}1aB0)tbV@Z)ANSFg8
zP>@=bdy6?av7{&kB*t1?l9-&W$qi1VU>DxvfYs=?7!#w|KziaUpmb#vD@d{eLRH>k
zNy$u018b;{&rGQ(j^czn3gVLzkefi>xy6g(<nN#WRR9Mu7ZVR73!?y|022tSFmo|-
zF$%HBFmo}=Fmka}N#pUjCI`4OED~p65MTheV?x~A+}t!7{WJw9-{#S*zr_slKTA@6
zeohgn*-!+E&>~Q604b=#K+cN*5s@Gw8$=X?hz3yD@Ww-0I?1UyIq~rj4}o(fxOM^g
zsz{81fuR=U7f=Pq!6c_BCB?_d$IQpfA;2M5!~&9@T*D+ac`|Q@vN*_KP-82^ZzU^I
z)rTnP<tE#R$xe3VE42m%O_3r@Ly;1Qr3@lKt(Xu^L2&g4@e-mAg%~vX7oS>kkuJys
z97&1E*-80%so;iKkufMGazSDmT^Gn~P^k)0*$v{Fg9r-{0roH0=U`8RLQ<M>avi@d
zqwVB9{D<ltK#H6obqu&R2Njx-Djj4}`mC8C@Y24>4Wtm%ekk$)sTKgY3yM5JqFx{Z
zR6HSt4A?;+6N*|vvVI@}RB{!8D@brf2C8ZtAfW+@R!ugD_drd7qCk-P3=jc|qM}j|
z3(YGaJ^GB3<pgyYgC+-X$R>lF52@uq^?V7aUC7wXn8FCD%{7_*+%%brLO_AbUYt>y
zmX?#M39VPbZURSvAjl#tHD3`Zx!EyJ-YFPm4N|BHtuDbH0SREME{j0P(~*&Zp%|2Q
z85pZ1Cnv~gP5vM#Ygp6=3Ph+Opnw3kE3g^@O5QGvlaC4M)*~fDP)6Y^Y6LmA2}Cr5
zh!zk54(~P)3l!M|vQ0Zk9XNS_32+dD6G{gI1A`>U>7Z20!6YLh#m~pe%*R~RF}Ym0
zn$do8i-?+W4ovq-CP>^(gb6@$9LS_w+~5XTd|G}^O0g&7WG@j*h;DPZZcV7}saSLi
zAauuP=B1=o_)dN(VjPI(aZuY3lF%lD%$Nouz#c*nAh#6tGcYiCfP4k2gE<(P7=@Ub
z7{Q|8tX?m=ffp%^i#jLUh%HhC^*uF3Aqf>6r05wClx?NK*|rFr;z1oDaE^r}cK^u^
z;!-L-AUA=-7wjgmb3mpAgPa4ZpBTX1A&JQg_*o~<7T035oqSBZpdOqxnZaEIqy{@E
zA8K-<XF+h+4B<F%76d14Py*9rfkY8n_5wQwOn_Yk%2fFv7l8tZfst?WAqf@636q~o
zH0ppAfe1~eq8LyvVl6Jn$wY7Qfa|`7$x|eq?ZJsb6y)t$AR+}sfXe}pn{Kgzbr%;+
z0tthRMI@ObP$bq(7Lzh$oHE%%DnJ92QZ!kMW`mT1nm~|nodM#`1mz>B*QQQBAf+IJ
zW)>(dG)?{@WdSZF94F^Vvs;4Nx0<YwdIW3=I0u9CK1d2{HUt#_9h28d7ioa3gk)Eo
zia{B_XR?P(h5@XI0Yw1;O`we4Kl!|j6?#4wAT6JRLazwqZHNa!4hNS+MIZ}`aOU{w
z3=9l`ps42qRdgIoGBRTP0xSY-d@Ov-MbkmStT%aq>>d4Ed|3tQNL`E~P_l~>0}mc#
zCdZ>m@=l&Ar{WJ9IEfO14htab1q~$J0yUm9ax#;^>G>8<Nq$js2BeKuR0N8YTg-_`
z$wi<}OcAIxS0pl-Ltd6qbh4VfRu-t5E&>%+;9LxLCnO=ECWBiXHo4&Lp&h7=SPUAM
p;9%rn<YDAs5n$wD1YsT~9!5B1;$ad2$ue>z3)M5o3h9e*0ssLW6Yl^3

delta 3420
zcmaFkxy_z0k(ZZ?fq{Wx*_zJeEv6IsWEgcOYR_S0n0P{=K35@1Ay+X<k&z*VAw?iZ
zDOWj48O&zPQHfGbVMt-jQHxS@XGjrD5o%#b5lUsvQg3FC(r{-;5l#_lVMq~yifFns
zq==@7wJ@ZJfkaZ}n^~f?+!<2DQzTj#QY2Efnwg`tQy7C8G$mhx9OI|Sc#Fe-axr7`
zWG<$~ll!DN`6?I~7*ZLc7*iOcm{P<h7YIo+icfyT<i*v_z`_v47tEk3G1-k-L2@PQ
zEzXq0lEnC;{QQzzTsetJsX6fli6t46yO|ZGz+MH}z`(%3!@$76#K6G7&%nS?+%s8Q
zI4p^yh9RD>hO>qto<E(Th9O>{hOLGnUN8m37OG({t|<|&VTc#W<0%mZ(J3JHf+-Ba
z44RyNFCQ>4Fuddh5g=QN#6heRAi{`&fuRT_7NRLOc@B#Vn>0v3X7WCk8e5*^oW$bd
z`0~`u^o)`sSq27%TRds0@g+rxdBth@MY%;FSKi`JN-R!|2YD+eKQSe>sECh&fkBgJ
z@;p{)IW>@B5RIk9sqty4i6x~)sl`RQAaT9P4_ULgR2Uc->OsELnLI&8PLPp}k&T%R
zL^854axk$mPVQkd=VN1FU;w$E8KiK}<cn+)j5U*AvB}k^u=KLmGM8|baMv(5Gd43a
zGL-N@*eR?c49$$SEHHiziv)<zTEbhyT*Fesn$FbB6vG4#Q-*l{8pag1GKQjcH4ItI
zH4IrSH4Is-DeN^2S!^jBDV)8mpzsFU!jL5ZwMVd-A&Xn6hB1Y!7nC@-Cof<Zt7j{c
z0tFLGZhlH?5#0As|1vNzJfAfa1YX*&Wb(VkQj%I+Qsm6Qz~H9I0@hgM016nmzLktc
z)}X)?E&`>4Tg=JjDYsY)5{pWTiwr>uB|!wpLYP(Z3=9anZm|{=W#*OKV$Mm;yTzQD
zS5m}0xr;-lo)cs(IKqmQL5^iDPAw@dSjkodP75FlZ?R<L=ce9b2SsXeYRN6e#9NF>
zMP?w|EI@=Mi11}#V3-Ap4>3@hW#nVxU{qn^VH98#U=(BHV3c9xW2zGN_4IK`EJ^gu
zPbtkw)dO3^vXZgLX0pGS?Bq}dl@#V8P=dR~UYwX)kds<eBm?p<YY_{`1}_i+k}eVe
zu|QHQ86mC&I|EFB+;U5xq$n{n59}vMBHRda7$^fVFjvV<KBo}iwUWtClL>4Y#10mS
z9o`_5!FGU*FA4>5!RCSqu;F}Vi8)9{Z=2lD<urMoqO8qIs2M&WP58~=hle@DeLFX6
za|bc<fGp8uyv1BxnyWdvlSfVrBv1slmCGh4v$!NVKexc{=;XaTR*Y(sfAGjKdQ2AO
z)#L=nQ~(15!(?w>6&bLmIFKX2I=RzROQ4yyc+cc+UTY~$c5oIi0>u<K?}xa#xoI-`
zY4T5=BdA$l<Oxa!EJ^wKIYprCSOf~VB2fH7!YB-6bOeZq0ufmt)x7cX;L;&EH76%N
zz6k6DFab^_Aa@jrGB7aIg5m*GOmHyCNK1+HvGOtVF@t52L8%6eC$~!pOimK&;6*sk
zuSjAtuka)Xd60byAOcik6)A#PN+1GKVhMs1FvQJ>ynu)a)ycPn)sk*;Bqb(iC*|j*
zg3GWX0|o|$m0XZ?giRMb`xO;~tTF)+rXT|BHLxea{sRTod654UCQp!2p4=rO#%MKp
zp~#_nTaaRVP(sGY)nF4HLGn%@0+jcQTtF%Xz*)S=6(s5gBDg^W!g*jvfpvo-tH=W+
z3n~qZz?lS`iowZHnSnu(0TOq9AT?-i21V*)hRI4|IxNAU<nu;BDo}`lfuV$<h9Qfw
zh9Qe7g)xPxmpPb0liANrlkpaNaYkucT287aGbAm5tpkTJmaI|)N-3WhCZ89JG61Q8
zX#zVBB!De{6@gOLSBA-v;=u-GprC--1ojS2O`w$cgJJR=aou{PWCKdmd_~zHkLQ4h
zTo925BEX@R4`P9$jX(-70I35<G?)N~Avnql85kJOf}$Lh{y3OqM5OrnSV1YeaB{Ci
zHKWbsIg+YID;fR3`C%m!B#l*~ng%lH7B{#kj8DtYNh$uzFgZ!m60BLxyb7jR6RNuo
zCIB`QQZ@)6bjN4rrKDCcPW~xr?1$!WVUSL+ziU7&P$dg71MDd<0dh-GIRgX3ACRXw
z7#J8h7?~J_n3))h$|qMzo#jOeTX2??om?p0s#XMwCQ%|XpSlM)$%0w{;A{s;t)Q5_
z#RVy{K_PI9160C-SVbiumx99^;XY7=u}|)n31YOGd{3sJ9-On7(d$c4R?_4|%SE>Y
zyaL>zbr7WBEkesu;Peb~kR}Txwm|h?kw3^8pz;+dz`$YU3u=}rPd+58&sZ_}lWe07
z*qI<glc^{MR4}p@7vyA?pzDigU|=wrJYUY)9-JCPK<2lAh!hY3&dnf~++qXkE-tDD
z34@G<7zDN(6pKca73B>XYbOWG2WWtjiY9AOD@Z9jhya;T)CgiVfg%X%p}NVZ<rPHG
z%mO6_v&sMEEf|BLRk5}ur~=f4R=r?Tz?lzT^<vGtpbT#_d8a~=2FOZ8^@>$7D5E<}
z4pz*NFNy@Y5?cuZ%Gb`5Un^RnXKMk{vNb5Mia@@E6k8y7f(x1=kOf6JGk6070|P4~
z14A+2<RwZ%Y#^?t&g4BxcO;`ki!(t@l=!TIbfnfm5zpk~$|{V!liw&yn1Ncpx7a}q
z`kc%pa8kU*Q<7hloB?ayfCBRtb7E3*ksv4lKqYUH@MJ3$Sw@k`5h_|~penNnRLFoc
zWf91FNCH5OzFQnNx!@L+9U~~;>VWet2O|$72a5nBs0qfy$iu|L#KQ=NOgu~?AUQ^k
LWT5~deGyIo%Rsjb

diff --git a/code/datasets/__pycache__/feature_dataloader.cpython-39.pyc b/code/datasets/__pycache__/feature_dataloader.cpython-39.pyc
index 10319c23951eae75c10e1a3f2836050359059030..60ef98c89f08368cb1252c836eccbf99d987cfbf 100644
GIT binary patch
literal 11196
zcmYe~<>g{vU|^V;v@>~2A_K!?5C<7^GB7YWFfcF_r!g@wq%fo~<}gGtf@!8GW-!eX
z#gf91!j!|3%NoVX2vWnG!<Ne)#SUh(<Z$G2Msb4KtT|k{+)><MHd_u)E^icXE?*QM
zn9rWWpDPe00A_RK2<8e!3FQh$34{4CJ2`Viaz&#=!D3uFV!7f`;$Sv+jzq3RlmwW~
zm?Ie_33iuMloXhjj*?DcNa4wm$(4<gg{qN@l5=NB;Z5OdVMyUi<<63CW{y&DXGq~s
z5olpZ5rB#)rYfZfW;0D-DoRRWNI|xdF-JK{8Em>rluD}j0@Z~KS!yXlk_=JmDZ;7h
zvl&uE<}ycVK-r>mnWHqJY_Yk_QCca&shYDHQpD#nM`@=jXX&JBr%0qo_A)Utq)0{S
zrYfXrrfQ`~r^uv8q)4RdrpWfPN9jRD<x*ro5Gtw<6_rnsN!3kJ=w*#ENaah_OA$&H
zNY!g*Oi@fx>Sc*COyx_}PZ3H{Oi}J-jxtK+OEqX_j51C&Nl{5PO*L$0Y-VC)NHs}O
zO*Kt5g7ef;O;e2_ycC8gvlNDO##CJqhA8tCp(u+~%T$YIMi+)yzbLCzl?5sb8KSIH
zB|y4e7-AKoY*KAvn4@f?>|&Us?4ul_98*nFZL^%385tQSFcqpnbvUP*q^PGlr&>!e
zfLt(vsZc0|F_=M9<0U8s`e`!WVhc&lE6y*{WW2=@mROXTn3R*M$#{!BKQA>tBfmtG
z@fJ%;Vo9PV(=DE&#JrUJ-1y>xoXnD2TrP<vi9Y#>DXAc>5}vt9i8+aR$*CzI@#55y
z;Kba5oYW#srdu2!{l%#z#kaUhiW2jR)AEaQi$PWtXOyO;fo)?6NG!<!SyfV)lB>yh
zi`6qXF+DXI<V0l53gr~XF)%Qsf-^}JQ;JwSLmFcWOA2cXXB2Y^TMBy%LljF2M+#>P
zLlkR@cq&^8_Z-F)o)q2|)+qKAz7+lzhA55{ffT_OhA7Syp%md3hA6HSkrdGuhA8fK
z1{Q`Wo?r$|iCY40sfi_}MX64S=@5Uqz3dQXV0ih0nSmjh8|oAgCC$LVz`?-4zzoX6
zEldmyB@E3B3m6wNFfybw)G)*|Wr9d%Fv$WIVFi<HHJmjJ@$5CsH4O0_HH<Y3@tieG
zH4O1wHLNuZ@!U0BH4O1QDIjrPuzEf)$zQ`(!w@e}!(PJ>FIdA-!w@e778foNSs=QQ
zAxkWUNs^(KJB7K1dp1J~%UqD@;w2I_3|ZnStj$dQGPOJ<k_)72cou?8mJVk~VPIik
zVQ6M%WXKaRWGFshz`zKGkqnFsV0~O5eG?e_6%aaWd24uU7_vAd8EW}z_*@ubzr`@s
z^4AK~@Gp=7MZrR*TK*D|8vbU+EZJJY8o?TdEa?Su3mM87iawQy)G%amHZ#`nOEaW1
z)(X`yWC=}REXpX6PvHQkca2(Mgqqn5DV#OJvl-?xOEc7p)Cet5s1aERO5==$b~QpS
z46#+UqBWum6c;i~U@TfxBT&PT#Zn_^!%)MJ#kN49M5#ugnGvMEh9Qe}fkKTah@IzD
zB2uDUBihWE!X?ho%m~$e1FD-JtebU#0zwT~7f%gCmhb|F8h#LO0%KuL;U|>P6Eb9&
zz*NkD9Dd-KQ2~>xU{VcCs@E`N2~1!rYADeFxeM%TuUfGhh6S26Vhb5#m}<pim}@0!
zC2OT>7_w9rDAb69?5ttPQeB`>BLQWrEl{YDgtFBaDAY)S*b|rvO;Q+w88o^5p3j;I
z0w?WXGBPkQ{9@Fvve&Q7NzBs+Wk`LO)YJm^qQtZk{gR@@%)HFJ^!O4`<}cPS&dE$k
zjW0+n$xO{FiBHK)E{RXf)5|K(&%4D~bW6m|)iESA$Tc`V$Th^-*(ct}z{Da-0-I>O
zhpWp=P~CTnB|fn<{THiVRbo+*rq(Ui;?jcDqFWsC@tJv<CGqjMI6+Q|&&*59zr_V+
zCMPCmq~78KJH;n4DK*C>Gr8mzYg%SbYVj?$9FR!yEq;X3_~Oi})LSgM`6;QlI6<mF
z?$5Zz1y%`RbLPb-=Oh*vrxxGh$xW<?PfARO=;BN%$}fm7N-Rme#gm#_P*NEWwzl{d
zUvhp?QE5R5jL!;j6>Dy0MQOn;=ETzUTkN@+#l@L<=|wyY3=FrJlgm?Xu@)p2l@#9+
z2O9#i8WD^Ii6t4gSn>-}^KP+#!;>W^KQZN&5T?>w>_w>sIf==sw^)ixQgd&yWT#dZ
z-(oFF%u7$b#g<r5keZiri!HSxv$&-A7IS7^$t~vMlA>EYX^=7t<hkNooaM!t@p*~4
zpdd@m&o4?TzQqBJ*5V>wkb^nWQ%mAOp{OZ+i={X<C#^`Cfq|h&1w<%;6tIFqG^a=v
zB%lQ%v_XUph)@I(Y9K`{8Tq-Xw;0oIae_PrPBFJcOHy+SAORN-i3CtWy2V&=i?Q++
zTLsuzx40`n;gb=clUZDHi!t*SM`CF@SOla399WDcw^)-BOOi8galkxtiw7(a5A_i{
z)JH2BZ*j&$VmLm2CBrXoeUPgl3BEkPD7&~IF*#K~1XKh0div-hl7Di3N-8)@fa)lH
zSO9_o1r#}{MS2C5MW7;H3RJx3feL&^E+#fc6bR$PXht?BHYPqs5Y%E8VB})tV&Y-q
zVdP@uV-#TKVB}$xVB}-sVB%urViaKHV-#cLViaRCGG>G7=V0Vx5@F$C1i>mv^m<4S
zmTi)mK(2veP>lqtg`7doWME`qNN1>JDq*N$Y-Y@2tYxlYUci*XxR4Q4JESnAFt;*E
zGNiC%GZi_cFfKsIGNdqpWEO&SfJ#zkNrqY$s41)|tl3OOlS<eYu-7oAuq|Y&Wi8=Y
zz*)nZ!Y&DFlQKYcvOz^TP(<1Dd`h?$a6@$V8`N^taHMb|RMfK7aHJsVsAWefTR}C`
z737k&gl7S74ckJ-T23&FeIa8lR}E(ka}8GwOB!<;Q!v9yF27rh@h|`X|Ns9bsA?_(
zr&>u+sucwhpw>i@Jt+NhKqI|)B_B8+7ny-XSs@J_maP2Dydo2j2wQP!T3TksEf!EQ
zR%8egV#+JH#hO@DlvsI-CBw9!@)nC*W=`rY){^|9<cwQfX+`<D@p+}W1(hqgi=05V
zg7VibZb%9SXU(EOkZ>@_FvhA|Y<UHsybE?$6lX?CnrS?!G`qzPO8rTR=|w>x)nRZV
z9K>>DU|^UHigr0rT3}*f;$oCylwjs!;$!4t6l0WN;$swG)Z+l9LM*8XRs^7=CQ!-(
z1sVt!3o$S-fE!tiDNHRKHH<C{&7d|UvtKf(VgcC+!XPay3=9myAeV?SfD&L0Lo8n{
zV+TVPLk*)O!$hV+mSBb=kmFY}7AY_=Fsx*{#iVCY<O5<dLc#|mTLkjeEiM~Sv5=gf
zTVS`Bfq~&O$c`#~jLZOXKDdA>*0agUPfpA!w$nrCgxJOeHv1NPW?o8Wa%wSHu&4kO
zbP$^mX6ynv9^?}S<|=7CcD-a^VqjRwc#8v8izI`*4ss$0vokO-u)$h{ZVaFn;R41Q
z#w?~3MoET+j42E?;QUw1Qo~roBFRw83JzVS6y_F=8WvFK*0R;G*07Z@*D#l`fO2p%
zQyOzHgC>h#5h%oQ2fGL?*m*#~juz+;Z-Yv(m5jGoA&Czj+(m^93=GFWK~luPz))p|
z;rEjK__WN5)D&=?2}+~bf_o)vQ5q;EbAb8^1*t{3w^)la5(`pqG3RFH-D1v3&0EP1
zi3?^>Vy*(&R{_=l3dhWpiegaSNRKZD_a2IG@qk<j<3ZeC3DN_OHbD$09RS51s1Rge
zkznLt6ym8;BJ2;8m;-5s#~dhK)-Yx<fMTtdrG#+-Qw>8FGbsK_SZY{Om};0M7(^JF
z8A0V<Eo%*94XY$WEgLv4nNwI=IBHnIamil8R>KY|ku+H$@mB=OB2<XPqD)X&phn?M
zkPtWu4Kch9s?*{@jh+0`5*)Et3(^jX!lD`w3sk)qS%b1C2RPb_8bP99Yt=z4P}~(Y
zfmqETq6I{N-O~nQfn$jW+14wd2m-YT8JM^j6+ltMR3(eoohYResEz~WEpVyyg@J*g
z0~R|>;MieGX9U$;DNLOVC5#K0Qy6QQYCvMl;Mhavvvx4nfW}l9TA9*7{Ux>z#v0ZX
zh7=f|y@qW8%R+`4W)TK3%?=_{m?1F^Vv93$GJ|?+S*+O%MF&8w_!=e&hFXpi#szFO
zOrSJU!vRhgoF$A4*gF_Im}@u@=BF`%>Z?2hh^rSeb~3dyv@^CdwKJ!&fV;9BB^)(O
zpngF!V=Y$+;{whaE|88Iu3!dDHb^RZZVU#G?Vp2+90rCD_PCRj2CQ6AsAcS60+kCz
z5*>^nI*+S^8ANw5WV0X@(2%?aE-P4WF(!fI{gx0YSA#|Xz_}CBYU2R6^FWyy9Pgkg
z2NxyZK?wky4vjG47gXGW+t4_R5w0S5LcGNSvh@}_h|J6@De?yy3{IDz6ncv}IkBY3
z0#t;s7MCO@XT$1Qa6$zo&s&U%x7a|M<13(aC8$Z69$x{WDsQo*WTvHoQ}HcMxU(Pu
z&;zmz<fmJ_D2~1hO2?oYjDd-ZiHDJeQGii^Nq|v+S%n$YrW0b3Vdi3#VdP?|lEd$N
zlqdt0?V!2`+=K(=k`hoimJw9hu4MFE$y8Jia*jNR09Rh%5C;=rC$h)K=cMMv$NvL4
z4rC4kW0eGk^-x79`3lr|0hN4Ui|ZH}K-Mx8+0-y(fzwDWsN4gmnp$R12?$CrwJhMC
zHY2F^0;hfui){j9o(ZVu4OYR9tbYM}4Li7J%N)a8%Tmi)%LW=hNMX!on!s4ZQNx_Y
z32K?va)8^29N_%Jki`SZK{Y6Or<SXRA&a+YT@AwmK3J}R=&oVN;>l(xdRD@<fWL;T
zglhq4G=Qgu3nDAQ0A?{uGAv|*<Ug=oP}zlywV)9fZb^n((D(}{$X|IGB)SV~7Q`JB
z7>f#sbx93Z4O0zE4O<$xo?pr0cZ(I=D+gEi;Ob(fU=g_g1xj>9wxA?n2O>c2*CNp1
zaS=FKfLek@ppF|N{~#6Q;J({jkhaO-3{#a_lwZ7(2VAn>V$IA;Nv*iW3T{h*^E;$v
z3T|+Mnx~MQ2g*|5yb{HpUs?jnz)?I<Mm(tghU6PWqU4T`Pfsn$EJ@9ck7s0LU?>K)
zq#2mlKz$;1P+sBV1d$?~Jd9jSB1{5|Y>Zq?0*o5W0*o9iTpV1CB8&ozLW~;BRWewT
zGTdlQRyQ}dA_0)!1Q{3@G#QJ^K(!VxXp8~ehKRq#6(66QpHiBW8Xtd)CqBNgG%*Jv
z1Mb}wfrG9TWB{nXD{2L?Iza?Dz!rd5D?p*f3+-ekr{?6u$0K<+1f&F%5sUa47#KkP
z3~<AlgORVEfr&$&gH1q$gOfv%gBdJ8z=NA&=z$sU3u@dU1z|F%0AhdwQ2&<`-k>FA
zh!ZqGiEW6J1w6zF8UV#M#0eSz6#x%(3W5hZu?=x@r7+iU;TYoN1`ly^lQzWJ|D+bK
zua>6<G~5Ro;N-30bzz9z6~k1^2OiYqYi7)nsTC-J4P)|whcQ7zoS<P$pAy*=Hc-VK
zt5GY2P&1n$g}p`yI*ch?Be+1WMi@L?$5^OTBj~~q8&NA#BeFmqJd7Do!w)W81;7K5
zYzyQ{6hK3%AoZZ4c7YsZ2((57G;ry{5GzqD3KbWv5vbu$V+N_5z*yKbfw3^4@B>Om
z8Z=1BSbPY%D-9l!R05OAU{VE4szL`Tol4X|ri1+i8KhLlGDxYkKn^}gsk}fAK1ivu
zKn^}gsk%T8I!MV__zgTr$>CQ7>Rf_LxFS&JrOFC2VuKi-0hfbBjZPMUy7e!?-FgX7
z8K6-F8nS`pZW&PKlm!u>J~E`R1Q(^?Q7mwY3F?_cM!vwKUEn4LxRDELJQZnz^ny!Z
za48BNOV9<0f$Gj8JrGMDL>Pbw(9l4U5r}0BB0xnRcuWD@P${wmiCKXNP)T!(J+maW
zC?&H9YpD(zr2`jDn$qB68f|<I|G-?43&;{z5CQ6h7P*619w5RKM0kM+Zx8|M=oI;a
zSbm@)j;kQCBsl{-5C<vr13*&XaSB9j050y}<vci)-9hC$sPn);;s_ZZs5W5YVdBCv
zItCg8vtfpgj4`2%jLBm!#74)*Kr=T4$H$5yK(QJNA~HY(I4;2ixDo@^hciGC3d%4H
z_y?76MFgnL4C?-&G&VskQ&0yEguyi=xWi1fp{1f+kY_+W7sv<?!bhMHj$Mol3`O8>
zi7Af619CIC4OEP+iwvpni$PX^4F<Qv!QlmJI&24p7s!7M%vFj6%?CG8ipm)n7*K{F
zKpkB85CkaSmM{#e!J{JZ2n^WwV2^`>^)NhGZE$!SW2^>i=vIRQwgE(d%?H~Awgl81
z+Y1U^P&WlWx}!m?Whem*G6EjJprUW^j`0+M$5Fsx0S*RGSYLyOwK)#&gR&gHah{@f
zPzZN`2(Zy$OTgxX8oU=l!3=KvA;xHwiLePJlaG$nAdS=%b%BDq8<f#OU3_Sp8Z?5;
z02(z0jRqEj8X2HwS1joxG)3T13UEw<BN3EHzQ7ZS1&*i#m2>dX4y>7R0?5@<K?K-r
zuq|K<Ks};cpi}~CK@l7QQ6a_w#Eyd$%>%h-K8OGpy<nGt39!pR-L&5zmw^jmM$FL{
zNH<51;2?+uY!Ji>c@QIus6mjt3uFv|u#q^_z>vjTv<Bx8$P;1*9taMBu+%Wua-j}S
z6fFb=|6&la6hwf7A54Jb0MtwV2Z{qw8yUH$&x5b0&(Fe)Iam*mEdu@gqLrxb0JlxS
zj#<aRz~BaQ3}|S8gOM)~r6UfK1&t8oCT8Zv$0PMZbkKSs@dZWsS*gh-`jE-u)TGk%
zcyPx=FS)p^N&})ARO})xt1vP$i8nN|DD{m`%*)F!Ni0cCsS1Rs#NE3|NsrIT%*`x`
zH-)UTiVtuMj`whl_sY*pEsoF2FUpNCN-ZvlFG(#f0WZm@;@3|BIY&RSG(9&pucSBx
zw2~_ov@jq(Kl>#p9~3PD1%f6Uq;muA>!5XHKpmc2%w>r=w^%?1YqFp#1t}<659(Ag
z-{OKyUV;}~utO$lif(b|mFC8SrrA@AiZz*vwu9nhCukxGwB{=_FFqy5N}-CYEHNiD
z1;XY8Sp;EnIVGkmgk<KXS}AC{++r(8%u7iuzQt6Ka*Gu_by}1JG7Z$PC<1vWiVr-w
zs+U+=lAm6bm{J64NJa@m1oQIr(n|A^OEUBG5_5_`rE3&7M5eSPGpD!+)H}Q-fFz;^
zinSt8bD#*+@VdnVmV<h$2-H-#1u>v3vp6$9uLv{>QUt0UZ}Apq=cFbU<>^682Gw}C
zK#LJFax#-3)5ze?TO!EupuXTO=HxP?Tg(BTK1F;Wu@q2OiygXL4m>7Q1WJ!Zt3cvR
z`Ng+bN-}d(k-C<k-dq$XIN8M~7nea7%@pP5mw=<|7FT|0NkJ)S#ZJ*Jj@-oT)RfGk
zVsH=f7B55>$gjnzCAYZC5_7=IK0q8ENK7P_Bo?QZL~%e>=0tJ8V(S(=L`BXm)`B9?
z5IkrY`W9pIEpBi-IX*eRG_T|qTWUdmaz-(viwkO1-2x5B7nR&%0S*2^1{9Zryv70=
zb%V@fgNGGCBmK9SQ}a?FZEJ8r0wzEi_!fsvE_l$+4peg%=YW>lF>x@0mQa8?hCGY{
zj35l-gJeK#4pyOhhI$5lFbN)_=V0Vu15JnXfR|P9F|ja-F^Vw>v5PSBF>*kbYVfl#
Taxt<n^MFie5n$xw;a~&+ty^cZ

delta 3575
zcmdlJ{@RQ$k(ZZ?fq{YH$$`$~%a#-QWEit1YCo(`RZQW}W}3iMl$OGf!j&VID;_1z
z$dJO2!kD8Jr39vxqm)y{7pN>`$Wl!akYtEbOA$;}o6V3SG?zI_9m*D-%N(TvWsA&Z
zj?zpKOx2jpkRm#lIZ7*4DN8$5D@80tyqAfQAw?oeCsiR;BULj+GNoQBMJz=uRVPKd
zmpw`sDlC&Cm8z2>+shiIm#Uv?kgD6vm?9^_(99TR2;-+3N`UxAsl2JWsfMXW=}gT`
zF-%d$Q6{N;sm3V+De@@_z06UjseGv>aGqHzU#ea+W0ZNSMT%moL8@spV|_CdBSWf1
zic+dUs#!B5l&73(kZKO$r7%QUrZA*4rs{|=L|LT>L|LcWq*^yKx-i80McJk*FHl~{
z5M`Gt0W!>mAyy&EKGi;kIm#i*F@`zHDatv@CDkI;A<MOyk&$5nQ=t}Ahg+&eib|?m
zs-T?&1IW?|Ood`8jKK_=s+*@V*)vLkqY4yV3=9m?3=9k$3=9m+3=9m#X_MbGtJc?W
z)G)-e)iBpE#Ix5h)-c3#)G*aB#B<iL)G)+z)o|7@#B-;B#Cg&gY8c{q!6aV|YYjs@
ze+^p=L%cu@dksUpAXr?eM0kP7LWV5S6edZATCNo48m`$4DJ*kAri+z`*Dz#>rLZ<L
z^~=<9mq;v-tl?f*4>DRRoFRpQg@J{knVFFxPr#6&c$WbKBN#?9FfxEOa)LE}gKK20
z<*DJRVaQ^aWT@q>;dNn%Jrl!J%U8=^B3#4Q%$OxzD^Ma_!;r<%%vi%G&5+JmD_Fyj
zB{+ex$gM;sg$*3+8nr@f5Ji(2r9|sxYlIhq!j`d6rbf_(AvUX4q()?c+(L#4j70@C
z{51?&EHwf)3^fc{tP5moL_liuoJxdC<ZDEl8B;jK8JZcPs+U1k^TAXj)PQw?94@p#
zwuTSHo4{D;Rro5LVREySVEs8{kAuTh0Zb}_NhL6;3?@}-7_#^$Fcl@0sDfMs_E1Ev
zXbr;xwHncdj4{l$VzuHi%(W7=lC@Gb3|Wc`WNXAh_SG<CDJ+n!5reXo7Rc5}K-tO$
z3uJ2~p&XS3vNcj5_5`LvjueJq22D=C^jR}O;H3RaMg|6kUyS;bb){{@Z!s3VWMyDr
zxWy8mSepKeRj(?ss7O<NvJkt&<S6z#W-P@eskygUixTtFQ*W^)78IoBrQBjmt;j4c
zDZa&ATvBw4CoMIxq_ikCJ}EK1_!e7EVp3|(<O&WmO$7!9h9X4}ArB&yK!iGo&;Svd
zAVLmAC{Mn}VImpDRuP|<n45ZwyCS|Iu_PltC$qTZ7Gq}AWNl6vHdctSZk%;ok_-$C
zQlLzbHTi>#$YeHIa~VEHF-9&%F(xBpHbxlaVB}*GW9DI;Y{{iJd4(!3qsZh^F5h}k
zc-JsXGSsq`Fw`(MGiI@+ux2wA%_(7Dz){1P!nTm9maT+y0ap!M3cDnz1Y~Gsl4MxO
zSj!F-<v<bT$nz=TUcdv<*>6zGS;LvaiBM6?Uc;G!q@%W;10%oOLryj&yfrM%jI}H!
zd<*z%*cUR^a)DVK3mI#<Yq)AyYM5)dYgp5m)0l%9G`ak4F~-0A|NsC0m!MLi2ppv1
zpdb|i5uj)-vSMIhxWxgDqGC-Wfg&D|oDoQvwYVTBv*Z>_R(@t)kqk)K5G2f2T$+}a
zS#gV{DzT`jNN@5@Zs~gF%)F9YtR?wH$r-n}(u(qP<MT>$3o12viflppKv8{*J1Maw
zIRhMt#YJ8qVNVd@12U7b>K0pG0Vs)pT@=Nck&<Q_pO%@EdW*fhI1?=44N~n7Cjvk$
z8wLi3sSFHS48@@2!@$JC#KkDZD8a<V$i>9R$ipbcD8a<XD8Q)4&cO&0tCF0|C@ea8
z2~Un4$Rth1A`1ovhLudWnDh*aTtQ4mh>Jn8MIb9~aoK=UT5^7Ff!%fn28Pd|^0rDJ
zY|MFHw#k{iq6Q$vnoMAgx7aiDQZkcMi;F;lMQI>MLX1gIEr|!G(&8<X*YWBwYEHh*
zyGjSF3QVMcZNJ3|O5{-8AdN-o3=9khK*nTD-pFUoqRCnmH~9mfoUkT4#6sqz#Pp&(
zkb<1aiu{^NV8e1je315Af+hL!X_*zNDe=XbRp3mxYjO_%az>%a{(NToV8vhpVtr8p
zNF!JPq#JDcWspz?0|P^q;bcWY*~xWEBFUPpMFk+?I1rH!B9uWyECU0BCI{GEMa3Wi
zuyHCNRt|_L0THDjq6|cU?Wq8<K$hL&Nl7e8j87@bFUT(~Ik%Zt(3?>x-53lW+k=9E
zfq~(J{p5ZHsmW?8y53;jU;^xRmRpQTU`wNfiW2iu@^j;h6LSl4Qo#j96h}#FE+l<{
zb%5*xd+sa9_KeAYgv{$Txe$K3#R4+p7CVT{%quB!2Pp%45#-rh%*lx*MaH0n%UWEL
zn4GQ2jpS#LPj4|M-eLo3j<0~yl~JroiRtkb5UTPPOG;*1S`jEfqBwIiD@qHj!HFE~
zx+;)uASd491*ICWG)VS3s8|E#B?cxgCLTr>Mgc|vCILnP_Q`LBBP=x;{WO`1ia>g0
zK?FGMfz1OGVDs4H<8xB;;^Y5-Okx5hWyUIr$&OsYlP8MEGYU-JrfQ?DP{WYLR>LUC
zP|IG!uz(#@%)~I)GS@Q2FxRrwvevSJs@@dFY^I_=llO?~vod7y7A=|_ttQEMYw{ma
zjmdM>bn97b*r0_Fi{CBQlA^@SydqGwQX~XQA%aEVavqfRip)VywFD8MqNvCk!~#1X
zl(mb%H3PPk2CgGoK#J?Y(NL9IlwS-jZf>#GXXd4(RuqBD7jQZR7dPMn1XSEWq7xKP
zkO*MUFD(IuZxj!d5uce-0nLMmu;Y%8Pfsn$EJ@9ckN*z}KP^!BF|mOP7IrR1E=C?s
zE=Dm9P%$GW!X&`R#>mAaz^K72z{tVE#m>T1B?AuV7;$OVY)~jnt`S$O2h~<ZV6SF@
zJOj!=Mdct?C5QlfuocAW1o@ITK0Y}ovA8%nH76%N9#X=9-RBEZ0*c@weg*~xkeS5-
zp!OgmUp0q32b+Kh2PcOr2Qygq7Ds%1ZenI$e0-4*0}BH~k?`aV>M@L-ll7$)m{QUw
zn`?MywJ|U-xM{K>)z07u0|z>|76FyEw^&M2i%T?F&{cutppA)__C=x~uQJ`@f)xIs
zGUyg}UTJQ8d45rLYEkjzzZwS8?zh+q67y0Li*GR%q}*Z!R~<!>AV0-T_R!q!76D@F
zgF=8gxy<MmbAYE$5g$k_8YIT<l30>hoLU0*T~QZEh$+AL7E4KHZfcP>NSO|Zu$%0q
zC6xqfZ`|U_FD)r3Es0ObEV{*!o0y%Nl37$-1akK+c2F5qoLX{=2jsbUD6a?{FW{`z
z05Yo@WC<5IS%a0_;wnqb$xH#WC$H5~slUaTe2W`g+Q%p7m*$n+VoNQ^PtGU?7aHJ-
z<`$^rC@Q(d0!mQeatoYXZ?S;N{~|<E1ttDl%&B=PNYM?Bb5L~O;;_lhPbtkwwF70Y
zVo(y|VB%opm>jCDrlbNYBRCj2*gzE_4+{??4<jEF3zHa=5VHWI0HY8S-{kq)vH%|W
BYIFbq

diff --git a/code/datasets/__pycache__/jpg_dataloader.cpython-39.pyc b/code/datasets/__pycache__/jpg_dataloader.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..32985c0a0392e944a17373474d4eb59dea3f7a71
GIT binary patch
literal 6399
zcmYe~<>g{vU|`r6yd(LhGXuk85C<8vGcYhXFfcF_8!<63q%fo~<}gGtf@!8GrWA%0
zrW~eR<|t-HkQ{RkOD<~^E11oa!<Ne)#h%L%#R2BC=5Xe6MR9@IY&qPyJW)JgHe(K3
zE-w=UBSS806dzQCKUW}10Lm7O5=vo6Vb2lH6^Rl7i*e+L=88p$f!Uln;<@5c;$Svc
zjzq3xlq8tVm?IS>1$L!$lynM13U`i7u56SXSdJ%0K35@1fsrAFH-#^UH&-!Ak&(fj
zA%#ChpoJktAeApmshK%SIaMV^Fq>%tQ;}W@LkhB;j5(@Ns$jF#qSR6tQiO8Ub2Xwg
zz-9~QXy$4~X)!XSiY?Gy$dILz!Yj!TrJEv>symw@MRYE6lpd5VHkUa{KSeysAk{Ec
zFI7K9BGn*8GDWJFHOeTJBh@HHJViQ1CPlWFCCWIJX@N;9Q<muhvxN*37>l}6<nowO
z<QFnVfkm^-7g#K0h_X!8N|8xX=w*zuO0`;Gy^z6$A(kh~CPgyUCf;@eW8u0~s|9wc
zCJPxs?u@ceVMu38H4tHla!6HMptg`9$}v?u)wr3_g(21`$|=<;hB?YP$|Z(5$~DR@
z%01O1)j7+vnURqp)g#pf%Cb-4HD`#jpTJo71Zsv?sz-`qs#mI`1Vb|uBSQ*fFoUMj
zOHgX^(`3BG7LuA*oL{8Lc#9<^u_RHG=@w5>VqQvqZhUb;PG-q1E|<iTM4$Y`lvI!!
zj<Cd{%*3ReR87WP?D=`A@frCgAZBq!X<8aslqDdsBm-nxNnuK^CgUwu&)mdxkaDiX
z()8Taypq(SVok<d5}vt9i8+aR$*CzI3yM=qf)jHKa#D+4GBPkQXfod7$*l}2O3W)x
z%P-0;P6oLa8M8t;#bFE#45{F>AH|d+($0{^n8K36+QJ#doWhpE-og;YlERU~*}@RT
znj)IYmcl)UF@-0Ew}mx|J%ulYzl9-+BSj!Zu!SLtGesyxxP>8#tDS*`A&NVgK~wCO
zfLDOKucr^lfjQt1c$vb$!0_?~GXsNLGB?x-AWDRRfdRy424zVFCI*HQhGvEZj0+hU
z8PXYQ7~+{SK_oMnWC4q?*09wu#Iw~f*D%Dh*D%&F#B<az)iA_!*09zv#B<fK*D%C$
zr+~zH!0LIyBwq?h4L_JIP$F2vkj0t8+|1O!td_e(Xn}AI_d>>6o*JGShAehThFabl
zUKfVgni!^9zFPhoz6BznpjpUN%U2>=!`IB1B~~j?B3i?c#nH@I!zay<&R8p0Bj~~q
z>rpFIBeXz#Aw$ug8vYuFES4Gp8-^MN7KSW=5{VlAX2x{J63G&&8leT!DQpWFn;DxK
zMHtK(m>A*(Y8bLu7s%8Iff97#p2Dthh7<-C1{Q{9W=4iQAwz}<jKy*W42)nH$-u}^
zBDz3!Aw!m23dp^}VApUqGiJ%Ba7cqfOabgi1+X7w5Pqy-h*!j7as`UXN(+=rl(JM(
zI8(S%xO<sOl(JMoQQym$&a@B|u4*M<6${i$l(IBHiJ_OVL@7%X##*4YkRhFUAxKO+
zg)c=Qohd~StU?ErP{1tR6p<9s6tP~WbQZ9QdL{Y`luPs%7%XHc(a$nWXGjrW#F);I
zBC&`uMY5L(YOhfWLokD;l;2Au1_lPVTZ~0fB5tmZA)!I8!SO+^A<oV|@kRzF7Ex?Y
zKK{<$!M7N-iuf297^<X-b23v>;|mf?GE?(P;!`q{OX3sr^s<Wc^HwsYpFINtPwdlY
z%>;q3_A8m5gHk2~!vlLL<A?o9rYNqYqRjM+lDyR7;wX;f{JfH)#Nv|Y)gtTMA6Avv
zuVlQ%T9T1kl2|0cz`(GQ@fKr2ktBr0R+N~MSz7!YV*XCB(!`vCjKr5r3=9m<cgVHA
z4&1TMekD_p3IhYfE!N`9^xQ;E=5(&>iwkVI?%SuIEZa9<O8>5X`ZVY6oaeu9+G{eW
z%SNSG-}gUjpI-S=&5&cw8GEQd&e&@*-D1g2P0YK+TwIc(X?%;dxU?X(=oUwOd}dx|
zNqqb*PEctTpP84IAH@Y`CMPCmq~78KhmucXQfiJ%W^&0b*0ju=)Z$zG2qp2wnN_K`
zSaS1IQg3mBWI;iiaf>SlqzA(0%!^OXNh~f-ExyH*n^+N_l$Z|I6~&XAnTNzn%`GUY
zj0f9T9L1NMUsP0DPy*%OVud(}4Qy`lEtdR()Vy0Ppy;~A0xC^zu@|Kl<Rm7i7IA_C
zjHS3FHTM=<YDH#oN%1Y_%)F9Y%sC((Ihn;Jw^-71@{?|{B^DH<=B3<XPA*Tm#afV9
zR8oA4tst=|HLv8BIM^8=_al-=L1Ib9Eg?*aB7TsK?9TbQ1^LCPx4512bMlM4GD}KQ
zi*9jxxVk`@0?8op_!4+Ae2XUtTs1gn<Yy+Q-V%ZEJo1Y&tMc<o5_8;gG7D}AK}5q+
zi%K$+VPcM@CHWBXp!|}=lFa<PTl^5ddtzyEu~SZI5lp}_EiE%I)j7=Q7B57FYffTu
zNoMjbfs&%c%sgc0a3P!)#SxMZsmY=^{ql=)6LUbZ%vY9}gQP)I<Q7YDYED`aDC-p&
zGcYg|8G=HB6_glqicCNP1|Y%|M3_OOGAl|8ZZRj8rr%;ri{b>uA2{ztiI$}17C<69
z9vsJD;aiLqw-_s<*ec@l5_40dxGO+0k`WJzM35jjMp!cPb5mC`-r|ghB(wPVl?=b!
z^+BTgl{txd`sMjW*~JBk$*KC_D$v){N7p4awZOe7F|9;DIX@*;AC#PnQ%j2VvkKDV
zK^#zuN-feWs4UW9U|;~{+2TA<{%7Q3Vq-*ts60kCPCiC1MlL2EMjj?EMgc|+Mh+$}
zMl~h_K|UrSMm|Oz78XVpMkYq4e=ICqzd1Nq7(wdU{;{xtXr7-O$}CK5j4U7<{;~Y6
zl0>gC^<eom8B|Ju90bCksuGmwok7)Q8Y8H>T)<eukj0e3n8K98+{;wLki`tDtRRJT
zEmH{tNPYoJ2}2eus9x^{sb+(*7O+E$YKAP16t)zObfy$eunI`g4^qnoD*99SAVqa8
zb2>vUOFBa>YYBG^V>9Cd9&o$Nr<SdRcL84w+d{@#_8NvP{uKUfrlJis3=0Hm*dbyI
z1mP?pD2ru*Fo?y-P{X=FxP~oD1YGsCl!z`6t6^ElSjz&jBTJ-~ql&MFAxpf5Lz1DE
zGn^q$gN30)Vgh5O45T_Nk*wjE&5#1BLZxOi%w-0(>mnKQm<$-e`lP}7xRCUPVCa)U
z)mOt(!&$>o!&<|Y#+=3+%%CagS0n~X+2SBV3PebQ2pJFoD)5WsKrB$=DN+Ejlt6?s
zh)@L)Y9K-#oGZXtu}WScvsfW7zeFK3FFB_)B{fAMGfx4W7ZgARtAeI1xTGoa0BQ6D
z5ndp|2SoUS2tN?v4<Z6UL?DO=0ui9paf>~(B(*3dv#1D^UWzP0;-KtW<PBm4gNRTN
z;RGUVAkNA!O3t{&T3nKtoL!^`65{Yqt#mCa$}hUbT2PdkR|3iP?jS90poS)Q5vT<T
zDG=c1Msb!GXT~QbmLzA~;sWJk2s?_iBr^wG17}1rXXd8g;s7ZF$=_lH`}!6;w015^
z09gPg62Y!WPb~pw)03bS4+;bZCV0l;;QG$SgPyV2xL6ojm;}I?lM6($axwBTaj|kS
zGBNTo@-a%WKxrl{nGR+fN~Qy4Kv4S>go}k37#P5fWyTby7LFQ57lvj~w}9C%8Pxm+
z<xvm@X<=btU=Rkm8kBNC4bND<TE-5BEQT6JNrs6`g)G4gVBcyo7MX(rl<5|ep201~
z%$1CgkVOOnmkp>COwP|Ouw!RrVE7EOp-La4!~i)S9Gb;?HaYppi8;k~dI+5myO_Xc
z-(t_qOUX=5Ed~n~<%7Z;9Nb(8GuRjz7>YsuU|_70#$(q@1}1Q~g9BFACxgNX<U|l=
zV_;wab;!X5Rt5v4z^Y*c6<CrC3mH=wYM5G?BpGU%YZz;oB^hd2N*ER}*MLZt8m1Z+
zh&l-dFpEhNLNh~Y7D<MMENM&&8EaW<SW8%Im`d0{g;Fz98dETXCaYf&C`Q0dz#>pC
zd<k-wCVLU6CvuAe)Kx1;Ey}&cTAYzska~+bIkDsxYgKAde(^2l+|0Zpdr+LPL4u1p
zDKWjM6r3JGzRXOiD89uFVikkCUd2(|AdT?_i76?mDUdKl#GYVDetcSHMQRGTB?M{&
zae;!C2NXOk5{w+&Y>Zm}*_f-8FajAvBSz4If)^aLpc=S_F^eIE5fqpuj0>1*m_fl?
z!d%0g!c@Z~!63rW%m@x@mKw$y7D)zBpanB%G9v|(CQDHTC@FwKsHhyovO@$yQ4Prd
zoGC^51@T3RC8^->D5?dissj;V4}%GCO5y?4t??k$`K2ZDAP;kZJj}?&D96abTqTR)
zS!DGnc?lGYpmYfKCnznIFw`()F@m!lqn{=dIHrp9L2dviWUw2+1lS?$@$osSdGYb;
zAUA-_VPLG1z_1;v2qoWx3QtgX8Eo-A1_qF|3`GGT%RyPVmZ^pziwVqPE@7@=YGwqB
zuz*EaK>ax|i?xO!iw(>I^;wx}SZY{n*dWyksJ9800acnDDU8`nMGI;e7I1>9#Te#V
zj#{P|=316o)>^h2h6P+T95oDC+zXjvm})s{IcpfQcxpHxm3}Qp4M#0ko(|D^Icm9T
zxIp=<hAoW=RKbCJRxEx+NuW>!RbHBmMGc_1=Pd#?=D-EiEsnI*#FEmY)Z!vgvVauz
zpfbETiU&kNiu%;zC=O^@UJQv8aLtngGPoW@fLn--AhRJg$Su~)yp+_6qAUgmhA8Ip
z;>@B<kTOtNQIriza*QRnSRpkEV|)~6erXBBBJ^y=9Uq^bT9R3knj0Ul1&V7>Z;FA5
z4O9g%a)3!LCIKccCN3s1Mj=K%Nj6q7Mj?I{rYadMsR3@3CTo!($UUHP-_6ZUld-4-
z<aQoVhYIZL_*-1@@wxdar8%kb@wa&5;|og@b09L{7G)9G>%|~5K*3f74g`?@i<&@U
z9Uw3ALff;+sX00E@kl<411SL|<01hD1_n?|qZq{CVC1W3Fyyf1U=a}I;NV~e%Ys~#
z3u@=bYck#vaZ4;Nfi&w~kcXiJi&Bg8QcFw?;*%1KQ*$!&Qhzb(S7|_6&!8-*k7zF&
z8JWZznVOaQ#wX_GfjS?lDOG_Gl?X-P_Dp78dVC3JaI089B|SbTGdHs&-V`zn79Zdk
z9Pi;8@0FjIS{$F3Uz8hPlv-R8Uy@o}0`7u?%Vu!GbJJvjG%~;`22`Sf6W>cv!4aa#
ze2WX53c#i4E$+P1+;~uHKeebhimM<qFFrRlH@~Q|iY>V`CDF>@7IShzsU~9-S3yxK
z*omnrRa{Pq=?Wp4xv5qPn%=iq^Gb6IDsM666@Y7mB2du~#a#kw2kDiTWabnXf$EDW
z0VEMUPyiKy>h&T}xe&z%kxMKs$xkmzOeq4D#!<o$!Mr@Zw9>p}P=_rsrwBCae2W!q
zEVR=NVT*wkmt_`b=I7}l`ea2Rpj5$DkeHW}SbU4AASH^oI6Egbu_zDfG*FO$+e{fb
znMvSoa1p3vjA9P(^Z_^eqL`D*jEXEld5SGFH$Aa5y$Dp{L@{S3CPwjqZH3fbMMb=z
zGzRL|MzMpEN^xolI5`z{g51EAUwn(DBr`Yl7GGK-INCsCp7AL;MSUQ%`IAeFOY*@{
z3Qko;{UF0Y1!j>8h!qPm12W)Hl%Ef6LxN&9ipML!-6=60GCq2X9Tc*~sU^3VQgUvw
zrKFZ+CZ|TRfTA*rC$l&{u`Dq&2b8{UF(>90L~*$nC8h)?C+4IUMX?n_Xbx~sG(N55
z7FR)HQDSZ?Xt)&I)lbZg;w?%oEX^!REsh7p(=Dcw{9BC4QQY9hQhaiLX<iAq>04BC
ziv?7GfU7!ifpm)nl#`>lAt?{UE(RA$Md={lfhx;e%&B?ERX(EP;jn>}3U;7$R17Lv
zIT$$@c^G*Z1;C^bBM+1h5`)R{FbmZ))H6hYdX`Wz7Ct5(79JKBCLtaHW+`xglLu6u
YF|jamF>x^Rum~`6v2n5TFmf;f0Le(81^@s6

literal 0
HcmV?d00001

diff --git a/code/datasets/__pycache__/myTransforms.cpython-39.pyc b/code/datasets/__pycache__/myTransforms.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..76f721cf88d4e4a13955f2aacc4747eed8fbd2fc
GIT binary patch
literal 58251
zcmYe~<>g{vU|`^$y)}9JGX{poAPzF-VPIfzU|?V<R$*pfNMT4}%wfo7jACR2v6*t1
zqL@+`QkZg>qnN=oOB4&3W{qO4;sLYRqS(S2^2AseQrRp07#Tn?g(-?Hk|B@D0AdD5
z6en0ER}>eR=8ob9(>zfS6L_O|Qy5a1bNF)kqxf?Lq6ENpv*ZZo3PlNl*-SaYQNk$<
zDXcjnxuQ{`U@^8Fv0U*eaWI=bM<Q1;N{W#ol_g6$g(I7x=te3}mQ0jPDo>Vdlx&_r
zDo>UilrIMoNoPpmT*Me9pUR)5kiwP9pQV_>oywo3l){tBpQW6_o64W1lERnDpQW0@
zpUD=bmLiZM*uoH{?#_@Rlp@^1kRqI_(aapB>CTWMk|Nr|kRl4^Yq>L|h^2_PFr<is
z`P%LbDH17?Eet7=V7`t!LyA<2bPGd@G?=gJ&X6LLBHO}{A`9m0xih55rO3B1q{xH$
z`tA%V3Mq;$3@M6WzJWVKic*Sl3qy)Bm~ZILkfM^J+QN{cnrhh09A)IrkfN5N-olWg
z4(1!XGo)ywXtpq<XoC4B?hGkfDcUUzDcWHDrtS<WIw`s>3@N%`zL`5iie8F-3qy)N
zm~ZaRkYbQx*us!v2<BV3Go%=$7`HH_7=!ti?hGj=DW)w9DW<8G&CF3&?hGkrDdsH<
zDdu2#Yj=heixkTih7?OM-^QIG#VW<Rg(1Zn%(r!CNU=$=ZDB~U1@rCP8B*+0>{}R8
z?7@6{cZL*)6vq~Z6h|=M!JQ$+DaE;kA;lTYcXVe+aY=D)VMuWW^PSuoQruG9TNqN@
z!F*?Th7^w!&lZLhPcYxbogu|5#k++e#T(3bb!SNNN%3uANbv>p-P{>c{8Ic|7*hPf
ze0O(-lz^1L7KW5SFyAAEDVRYs=p`usXfod7NXabAEY8f&^V4Ly#p;=xn4Wrz8%()n
z=9HusX)@koP0r8B2Qm0_6AR*#^Ye>RGV>BkQj0YiZ}F!mmKGOhCg#Pb!IX2QmF6Xv
zWaj53=4dj#WM*JsDB@&bV9=Dm#qOM+TaaIzdW$0@KO{A;IKSu?S4e(<rw`b^TbzFR
zMY)MNnN_K`*n(1vK{S_hYF<fdk#kXg!7b*1#FSfXK8d+WDT%kZgA(&n@^c*v3UVrM
z@jy7v8TpyXsW2)3qLkF4TU<~nkO3kPwnu(ZW>tP(Nn(y$PG-R^A&6*LYEemMGE__u
zA_j3p3fOjjh){55RVqY)!!5Hc6)eRbl9~r*i1=jYr6v}I6eZ>rr-A$e3WZzT&Y<A)
z$}A~KExN@AF(xR#BoQnLb-H6(T4rAAEl&5M#LD92#GKSyQ2P)9P&Wmn78MtyCYNNE
zrQYI&80K1(Se%)cev8w?)dgyekYi~{K13F770g6XggNDu7Qw85*zFu<1UD}yvA85N
zxk@m=(?<^+HF|!ojzO-$Ayq;MAtz5CPmnOOGAB>xP$y4kWFa3%Kj$d_U}PZ=M_*r0
zKldtLB*Xk8k{LlM5{f}=Rt5$JXHcOY#=yW(!mxm`hG8LNkw6VY7E>@o33ClY2}>4h
zGgFa64Z{Mqg$yZ-*(@M-7JCX)3PUTCB&gtKNMUYemShNKDB(z9S-@GsRm0HCxR9}#
zv57H-A(%mv)$b(}0|Ud$fB*mgfBFCa|Nm7&p?Rqlpa4!yQ7EY_NL8q=)#Sd#=yr=Y
zKC?K!ATuXEGZ&OcSRm;$vp6%axFj(zIrSE6Nq$js#w|8z%4SZ<%)P~0oRL_NdW$op
zvLMy9s3^ba78@jC++qP4zLN14b7pS(N`@j!1_p*-uKF4IxvBb<If;4t<@rU~#RZAU
zsrumL<m>69>ynyU;9iuNR-&JrpOUJdl30>hoLW+>pIZr50y4N*ub}c4e|&ms3B;N4
zAXi#}0ufw+GIBApF-kB>Fcn!dFfb%T0~=0(f&yHPYA`S`q%uS?rZ7Y?r7%V@r!b{3
zw=hJpq_Cv0wlGAorm&^3w=hJprEml@XmS>T%ubYJWMF{i?_!1G)Uwo~#2keZSXL-j
zD9KMxEy+kN(&JK4P*89zN-wqovp_r~Wg0n|#U%>)X$lDma9<@PDC8$)r6!jYYg#G5
z<dO80<SQgYN*0LWt`&*71v#k*^OY6s?Cg*nqX%`UMl{?B5GAON(L<Cz8iod%I`|dC
z$}|lOg|V7ooynl^g#|W<4Z`3Q4hp~;#u|or22iS8$>_I|@fH`tgDaVCu@tA~q!rnL
z3R6&00jHNDP{iKih>y?A%PfhH_X9;U$VdjpDgmem^<a`Hc7w`f5C+>V!N9;!!;r<`
z!Vt?-%T&Wu!`RGN%UH-(!x+p^WCOC$mVtp`C36udWfWO3FfiO=EP>b$b_vW8$%#2R
z@$rEmN3elB$;idXh2jv10+ehHas)h^gA4*WB#SYHQJkTc5tPwg7-FSrnQ9nQn9M;U
zj48|um}{68GJ<%8LN$yjEFc!hsViCielcp?V&wY84vNHTgW6wAT$(FcZgIxPC+8#<
z7stngf+{aDH#I&U5|E|{f8F8*<(|Zn_~MeH%)Im>P{?5MV^L~BQG7h8JSqk?IT;vv
z7`YfZ7zG$PP<#qe=C_i)$OjZ$T=DU_`6;D2sqyi*c;e#=OA~V-&bY-MAD@z+93Nlg
z2nuHx5aACZzyXUOKp_LpWFTi1b1*P4a4@oPaWHeRf<<xXV+DfwIG90`y@;EEfgy1Y
zIRECAr52SaBq}5%1bF%=fD3<6?kiGANXRSAEvVGXOGzv$N~}ysP$<b)C;?aEkWA;C
z57t|(kf;FHs*qo#fTB%7!$YA$AzYzC!C6xwGf$x;BUPa&F)uw8oI#=ubQFwCO=Ceu
zCn|u8Q$4qw{KOJSkqRmmz~!lehO<J20@x%}Qxu{N^bB+q4D}3R!3Jigfo(vzCn+^2
zKQA3r7%AlErGg9uxga+`CAC;V!$(KK$5BTiKu5t-N5M@;A;{fHM<LQV$+-y3cGOXD
z_Koz`Q80vL=lmiCsQIW~%t$O&NCB1Z3bqP)1$w2Kc_kKL*LXtwn-3}l6_OK+Q;T(=
zAy}-CSd^+zlv+|+l$V;KP@Y+mkzZP(0BQ+j=A}cz7bORSvN<U8fD;6$^Z?h7j0+iq
z88jKI=76mOWdqb=79>)TnXI6J2oUrL(zJp^Iw%T~QWcURkqW8{QDYlqR1ny`NX0lv
z5E4~Z3ee<`nxX)%FCl&aDbQpp(gEcP&XW9iNcvgH1TI@{F&AVeqh-1xP@c#Dl_?+}
zGq6_)!b(}a_;@S@3aH%xQV9+hP!>fLD8URXnf$7lG&EN-6?ua)87ObAWCUA{ZYwAg
z7Q$_1M6nf`xi#6qnY##7;1vad%nJq)Aomu5v?Cb_&dA_A49aNnAag+_BnKl42L~rO
z^I_y*P~3vb+hQ&B9L$-*oWjz=7{!vpn!*OIi`i2+z;!XGS<h7j@^oT0GpH^G=SPJ^
zXc`0M#l$=XX#NGoFFX}MA_p2{pbQMjdRmaU1mzM)wu9$7uvw6t4L2}9O#!S5oc6$Z
z6O_wA_8=Ds<r$edsR{)}sl};9Wtn;D;QU#Zm{STZ2Ov&CO}3!ysh|NWkRYv51&}~O
zyrz``xMQLKZYC)t=A|eUWLBi+D5Rtolw^QhotalqTA~1ICn;#;7l7K^i8-1O8$c-$
zggrqG4X}9$3Yo<U2?>7rd8r8r3K}V?X^EvdC7K`y7NvsoXK{XRszPFMacM5ttYU@S
z#FSKp#3WEw1r;L*>kusukR1xT3Z9UH2%-X1K$s{bXC&t3rRIRjfQ(dygam{mz*a!A
zToOno$lb{?ApzCh*vvF0%}ip9G$O%BAEF#-2s2R!6tXD(PDw3EEy~RVm4Hc=U|Dd~
zfs0a&Og&IgWagD5B<Lt4B&30=g@gn|lMEct3JM7c#TofUB?$@8c-GTXh=+GGzzN4n
zA)}<Epx8=ZzaTRwC%;^;C^a#qBqKE?Ke<>hGhaV7Pd^9Lxh>JpNX$z~%FoZ%PtMOv
zPAw=Y*2^f#%~6K(bU_hT3{LHS7-ciO3<8zSh?X6=J*CNbiv^@t6H@FIMS;o<a2Ww6
zzy$@UJ<tp)C_n}=Fjfg8y2;p^bRcysunrR+tiuFuLoEaq42)H4z)l4P6Kd`PiGV8w
zSZ+tFvhmdjXe9{9D6EwNTpg@-L8({ZvWO(E$qcC^cuMjSeJ)5yg<=UVP^sGn4=#4J
z;KEW*paho@v;~aZ@JwMSWh_zyH9QwEgBqfcCT9&}3Nx(1smbD3<i^0jP{jd?Oj}Tc
zlTlNX1yQ#^oC|JbLSq`#LPx3>z%dP~EBZlB2c;B9gA&}FM2&GsrK8E{R}_HieMFK0
z75t?jgP`q0UM^-1PH=TVLhZtZvv%POX3*p=;sv!7lb9G7;6pWt`UPCigWE0&xv7bH
z-~t)cHBLz^N>NBjEz1OtctGOVJ+mw|53I~eAt6D-*HA}KPfy1;PctC_sxJjB5e$;h
z2?mR&<%3(H2??+g1YC4x79;v~;1ph-nUkZC2O50=jU*|gCMIXVG=S;^L_wX9fKtsR
zBq(I+r9zySkN_^JqoHMVtb(nA2AYU2*!6G~nhN>~#U&|lenJA+F?xCmdHE%&R)`8A
zBm>#S3W>=j#R{MbBtK1|ASW?7RR`<}9fiyig_Qi%Vvu%)+|m+Ii3BbZ!OnnI=@7@F
zW@m8FYZRvzmZs(<r)pX$1Vfo1&x0h36+i)r94ruHK$*9=Bt-$2Ud*V(XHsS!*f$zU
z`T03I@VeJZ!6`pK2h=9WP0UVJC;^2<enDyxIAthg=IMepLEPh(3@Xn+JxLG-u|b&@
z+^`b^Wms@`xrV8RA)c{@xrQO03Dk9l6tK*<SU^6##avvHa*G}66ipULQ!fsbfk2J0
zq682NoPoduI6Hz0-X)-11Ztr%FtRbSF;)q|6Fcs*7o;5SHb`R}RBY5RWPyqgmW3?f
zhWTP}q5y?7YNUZgAZ=j<4M;JpX{7*RK$_2>$O8@KC}=qAD0t{7glmEVF)0<1&{H6x
z1~Lh25ezZSN&)Vd6ch^(x;0rLMK32j4;7&X8MNNI#RhRPdVvNii&ug|5ENYu?5LrK
zr6>k9+2LaVu)zpWQM`Z&9(qjTpusvOzbau3P{e`?TOCk#v#mC))zoA~Dsw@_Es}d9
zu|zSb#N7mUAX@Z73R*u+MmO9gEvOKj4l)2*((-Vza&Ur+M%*Q>9$Gs-l_iTcjS)Pq
z#u&v09am$Aj;nD%$JIdfc;a_P1_sDLG*bSDSDBz>04mbb!38QPi9li!HGP4^5g8lQ
zde6)&(X>)<NiEJSN=*S3K%kZ~NSPj}tqNj*n!||-u!2M(Co>z;Ne87c5Z1`hQ7G5c
zLDs7PE_;(w6><|xk~1JJSWq5EH8W2kGp_`-;Z~fRn3I!Qq>!4D4)2u1s*{`?1xyn`
zeUBvASR%+ZAe^ZODm~K_GEy_sGfEWf6v{JGN;1Ih%sgl|f;q4#71Xx|yAYlwK}KkR
zeXa#FLLX)XNEp)V2dT;g%}EsG=Olu<6rh@+L`MOsjDv+LiZVR~7f6pnArsPQ1Q`Uv
z;E@(c$3_n}rVgz=l0n@-Q0WK4AT|htOBoYz3$KQ;gt3{aSfzxahOvaHnW;#jgc&q^
zm%^CMRIK8~(8LICda{5^Nmfw#sLAA4qzRggVJp%Hb;6l5^Ga@Uc$TCVB_`#h-eS&4
z%_{;W)mywMj@N{a{(+>5(y-(yP_cOklqEprAOoWcBO7BCKP1)SE;He|UI^@)1z^`^
zfph&rW^j4g0FNBh^b88O%-nROfef@(DkOFxSplRNYmo@gL{<txu=D^<bqIZ$ERe#G
ztq43H1r8f<06@nqz~iCVO2Ffwz<`#3h=9OS{DIWK0|GQ&R{|Q1Wt0RBcQRx#Wih8P
zE?}u)$YQNwTnH})Wx?qbQVgO*6r@08yCn%}H6y3f_>z3k=qA|p=mjPyUSY*LKf=IN
zum*7D2qwTaB&hhj0P-EEN@f6!@gn>OD?K@}j*Wm)?=22k8YgDV6I1|h1K9;FX}S48
zB`ru4qof6eBY4Dw#JLN8P!s++X!@=gT)sjIQh3r(NGt&hCxho9AqfLDErN;$BFk1#
zEe<NIlZ)~T(8@%3LI<VR%)Al>(6mx&VhTtLN*SyGD|ZtWiVI6Y!-gQ!6u=2WM*-v*
zP4p55G)w_;IYvJf6eZvS1vJhCE=Cx$kO~q=agqjVWU=J2K?)O4F#>63u|moa_Po;E
zq|~C~TWo${MiD4(z&Qn$NkGX9J(GY+jJu$yg_IaPj3P``!if16+@%I6*@2x5N(P9Q
z7NnQ^i|i5u6p96@gj!MXbYZ0cN-_mlhBu)-S8hm(i3gbr4vZquFeZ8^fEr7n)hxxJ
z<jcU076Mp`3s7qb9s(kub}&O01ERIGkP*~U0u6L>fl~yih|pY#+$jY&kiZdwt8MfK
z6f0o&Frv8!n!Rzij6j*`Do8stzp`_%ac~xaL_ui+RExneDCL3kD=6im%r$a@TSK4`
zPRJ}HYYImSCwPRDE0{r(r$_+Q2i?uezz~p_0?wQ0SvEgUAu%UMp*RyXst+1VM^dF!
zkeH&R03L9JWC_&FT#%TO0veUn0A<1aB88IDf*epMDgY{;p9kqsCgp=x7l89{q5^30
zJtq~^ssXi`3X1Z}GE+cJiwv}yKC@V%v=}6juTYSf0(K+BoMNy+V7oy1Jts9Uy(B}y
z2*Wgx0I2+cx)P<#$j?*ANlh!!F9NrHK!c$r`33q(`6VU!xe7(8ur&iYm0-IPAwEO0
z%>-mP%<U*9gIaJPNhF)WT}TigWTlQmNq&Kj0>WabrBJ`2n1$jHxJ(+T%Z?G`;IUk=
z6gaxTb|&Y8XWL5@K;p<vDg^^Hf58n06{ye<H3oO9k)i=)ib9Z%g1e4_6Ks&67&Wv|
z%mH`aA+E_RR>;rGsf5I6c}6N^ZU-7Op!vR3P?*8o3pWKtfktsj5vZdFT4n(1|3ea)
zo<eX&erZmMLQ*Pdgd5^v9dO%NN1-S+Ehja(1k{8tuFTC%Eh)-Oh9qQ=KS3DL%mmHe
zfO_pEs0wu9;#Qz^SPU7mNra0+atEl{oLLNEgM0!?K$&To(Ag<)_`^K~vQ`)DKx7la
z=_9eY1ZoDfBE@LpqnQHrAhJPFq0Ia|P<jQo(7<j64Vx6D7NjPYfU+!_b@`A80a=G)
z1IVpz`9%t;kTn82pdbKe`DjBO1tT2=V;u#PSg`R5MhdApsky0npb?rpg{1tF3}~5>
znFo&t(AXb_`QTngQEG8%4yYbih&BeBj4&T;ixFgs6678b)>VMVCn3KNl%L@a1dpQ=
z^tF+Wf*}||Lsdt?L`T6G;_D(%J$8!&mOU_rdO(c=a3x;>?izqPI$2C5%xO%`OvNIg
zj!g+e4O0nY7E=jJ33D2AGgGk^NVJAIg|UpG*bKx5w`M@=gOW<vz?~wdGRC5$5_ZrC
z=K>Ch7^u1gtpx(NY&gN)Brfn^5;v%oqsi*0$x>7d>L;;)($Ot;XiB`r2@eWQCUAf1
zC8$oWvV>I=xuwM=3Q4I7iQv92D6=Ml(pG+vf(a~5K?-0U@c3S$LX|*uEvk$r54d$z
zlmqH*<$~P63a%hPgIYzmIAEiCMX4Yut}xIH8hC+G5vYQ{#SSfJZn1!hxLZ6>F1TER
z4TRNzOs)kHbsz%V!Uq%JW<IEW#?HvVPz-8hFffWRiZSvqiZQY<^89CG1`mtz1SF>5
zZl=Lw5HwhaXr{r3bRYv_b?{V#S`UMA1l|rF$U~&G)<7|XrLhJb>tZfQOesPO4_wVP
zVNjren@8+b&>+CkPJ;)53`RSxh9L{wsawblYOiTB`BiCZfV`BMmkw&O>43{uP-hNa
zK7pgow%VvxQ<Dw7`-bLnTn#%Jkjp`i0`=N>{=-}kZPjTqfiopIz2k1-ff`>QkXm?R
zTs$0{;3gSH3l9`V;1(W<Ju*QC28OCS@XAhwL{R-&q??ioS{|9AkON-)sgMYsqlPUl
zgVb)Q=?AP(K?AlDRntlVvTjr#CIT8aOG*Wqo&s)Iz!zIVOvC6Xf$}%FKmvINRJ1fR
z7Rz}tlrWYsfeM!_=4SA!C3s=N0xm$9{HnyEOV>bkXkLDa0w}G3##%I4Z*jobw^%@N
zP-F(mhM-jmw^+gMhxW*enn2><oCzl2*;5@9pCE@ZFv>Aj@k4xwJ4b?A++Z(&W}0dk
z7BJL+*NZ^<a*PmLa3y+ekX4`vWk9wF<o@`0loef|avh`&Y!PTm3V8uWQ8>sa5g-Cr
z8r8#U5h!@#<MB*>fzpg3NGr7C!OFqH!CV9qrB<@MB>-7Y3tyC2T-EH9Se&W=UR(xF
zLy4Gc6B9urgwPgN5qQONUTQI9{0}w3!qZ=|0(1o=qVs{V;t^shxah>4P(g(bD4~Lq
zD7fAf0@aG(nvNNiI3e-Q0<L9lv4HF-0#&w<rE&J4I0C0OFab|;7N9VOBsn3*DoOMp
zK_os<I)L@{KnV}T2aOyRgA!i}Lo)+p0WQQ5jJJgS@=H8HYc@g6t<)57O$J?Zc#9>m
zD82X=TXs2!#@2zh0XYhk=oqj&2}?2sISD@M2J#&unUcEju*ek@R$d^&7es&q3`~H`
z1+N9N2RRUw86fk!&;^6oLXTiV2Q3OP0vU&r&_SXY2^}0wprRTyzm3wv=4@w3V+6H>
zS~xM+5w$b0FhsEjGiY)ZfyQO4Rzpf+Nb1e0R3MPb6BQuc@q(iKq{O7moXnC+a6+X{
zatE~w3KTTJt3NcY6wobs4oPwk?6HoVgCf0H3f!P9VXR?jW?H}mDgqf9zzsj-OmT~~
zxU?X(2pj<5amrha1+X!2@EY81(9%&xNIi^*YfziX9~3#@pl1|dtb%Si00$u=bAZz-
z#HkD*M}aW7zyytjfKp-!;{wKo4A~4tLZGFspov7Z)wvQNaY*K2E&@5`78@jjAlcUm
zOF{$<H3fql2g-pAjC_nNkQKUUP9&6BKnWe>M9`QMc$F?=3Md>IvY4SuBp9+-;H!3-
zA&VtaSQaqBSM4g*Fs86V#Df_$*`SLkIKhdez_z-!$Q|T(4-nxAnyFxggfggJ1h;L$
z-i3G*SAL5Ec@s1|2+D5)h$XscUWH^fO~#^5kUbn&$E-n73SNW=&d9fTp^J%=Q$dTJ
zkqV<8kbA)y9+YW3Kn{TBP&Pgm4t8)#!pNbZbO3H4fu=oCawvSgJ!p?Y5vU1XWd=?}
zc(Pw+o<bsMF>OkIt^#;?3LK;;6$B_<f-pF-ftGTYKw2;ip!Q2MXyG?g3CjXjP+Wm>
zg$qNhP%UWwnz@Eqk^$7tsbwrwKqNi3q83o^gB84j7u*wp#7i$ol)X5kG%YPB71rGV
zS$B&S?66ymnMgqg4mD8fOb3M;By|cwBMp{;z?*sCOFsQpvSN)akReD0BQho^08>Dw
zgFMf{$if5;QnKSi7}P?wfy4)N@DE;pgX?B!G~`q&6l5l6r=}>R73JqbqYg7xK-nLJ
z!Lb5zD!9AEkj1o+5w;*7x)fiNxd<G#Y{}rQPLR;XmDxef`C^cVLAjtx6jE11_DCT@
zvS>0W;eta7L4acf<jH)LFawETgc-;J2T&~oYQ3VQVLtF|He(b^Ds%+588p+)mBQV^
z5XGLtlfv7=5XAxBjK*Ii3L4WDWM^OijjAIJ*TTo!LGv*1)SZ)`3=U>UTOGA#0nLC6
zmFXC8Z3Y?i1J5FZ1`1%4Jt#BE{@?|ti8=7WD%5dkaFPNU2EzH!rKX@=N+=_wpp~uQ
z)kO+<-~njRs26n56lN)Cd<`;Iiee<n*f9DSDrhJheHd5=G+G84cqTXo3|eh~w8|hq
zO#xy*>gYF`ohXij*$)}{1`kHVhP|P-gGNn3!<V2vb2*ieL<0(`g2a^g%(VEt)Kt)L
zJ$R{7YGNK}Cb<N%k^y8iXjl<4<d~VJ09&U}l931=UPN(7N-AVA1vmwk<SQhW<!7cS
z6eWVTh^2#uf>J9$t4K5R^Yj#gGxL&D6+m-C1)#CZ%wmO<{5;h46^UsjsYS2|2aU$2
z!Z-@Wsj0c(Jq$^yV0|c=BR?&zI2CEg7c>B5r4TUeN7JnoLMR<aM_Qr*9}Pz&7f_N>
zpv~a9f-a%abEM&HP$q?^N|Y3+pbHz9wgROUSici8Wq>k#t^t}BBX;~8vG4^P1PTfu
z!$GBo!T^t%E08c~u0hPGIkkKWA0DQBq}<O9RF5-&hvkyNgLlx51BeZ8HGtXxkj_4#
z>3eWrhZQ`?%~r!w!w}D2!&<`-&w&^c-~<l|AO@Zh^ZDQ*(p$VJ<z^9l1X+^}vcM8N
zxeOkXoC<1Nz~zxzX5a<}sA)YJ)WCqX)J2#;<H{ghB@At?fEG$%-NFcJT*Eu?G2lKO
zXlri_Q!P_1a}8q*Q!Ps^YYkHkYc^AnK@D>a3y4l(OaZMGWlCcSW+-7?zy#*8NH8p9
zs%3-nnQB<UeD)f)8ul8N8di{6O{PT9*r|JJi9$hQQDQD=p8{m@YeE933{Oaaj{iXV
zVBm5cQEs5NPe54+@5naDK$L|DARYOoB?YA=;N83m8sK>VO)CXg*gj>5EO_w;tZ)FC
zf^Y)X<xgN^Km|onVs5d5My8HJmW~2=Q6F@0v>>q<I$e$8O{|^-CsIvj@V;V58<icT
z4z$dW1H4G;7B|F|U_abqEWgE=af_)0gv*O2f%*%KS!fC578ht6GdRGD=fcwoCl7eT
zB@dGTsDBM>)q>IqLN!XC2b53PVLKzvKu6+J7)u$86hLtf8W76@Ex7{qb66I#fQNrr
zi;`*>v)EFYBtbH4Da?|f&Ug}BJy-;$LV_WM3A988w$c}B!V<7NsNci@F)5p&XbHrG
z6wn$si0L3(m?aq`7|=~vzz%j9+d`%o7Eqc7rRNgH1spYuHB6xNT*JDMHJCw@r6`|)
zfx!<P{4YU+Y?_?lTnb*cR#cCg0m1nZoIU4)LYxJZNLE5~EvS12j)<Z;AWbtt#0(ID
z)-3?_vDSlP12hxBz|O@e#w^9C!34r$OlbKQ%UA-)iST?2>h{9M60oiI6^E~3flq^g
z=0vcp_yv0kT;|{!SUv)BGpNH2+D(H}x<H2kSddHFmDpFXfr_kKJjI~18<KNVOEU6P
zia;TW7$87O!r*WN7j&S`O*zPV&=4R8Ba1W_7ZW3wAcrWp+k?@g0u}1u9u;Ub2&G3w
zXm<u^s;z1{;(UvoN`<tX%mS?aDNydr&&{cXOm$-!wS$x%sD&5yp*L8W18F8FrGi?t
zAbUZj7`#bU2AZJ+O@bKcnSvWF;87QpoC=B^5C->yLE`|R901u84<71T$p{|%fvqY7
z4SOMlH#n$45p)g|)F5*h7^`F<F^vck(2*b52jW0FKtT_-9Wr>nfMFph|1)MY6iJjY
zEno(X76vnb#y?gvR&|1X0V<qP!w4jTw}6KQ4O~Cg(iz_KwNili0UDXumYlJGOI&c^
zfb;z=)(ns(I9Hrq1ceZ&Tx4M911;dh6-qerFR0584kgf71#12UO{haQ+OlbY2J^72
zJp)G@u3UNt<T6l9;E6j(e)WTn+u@$71m%VkAoD<N1rA0QRxV}^?jn#V6|<Z;XeekY
z?C=<991WKYSF{*Z0f4r{V9RIEKw%4wD``j?g@pr^a#_(*(7-Ha>Mp~Zx<S=oF;eQr
z?lyAMcF}TBn5+O1xDxRff&oKp5(cHN2O!f>k}yaVcM=B8PoUHqOesv@-6qW786VIX
z9AxJ>dn!i?c<*>KV-#l!X9`yfLljpEH+YSu5@<ZGkdc8QAhif|GDfii(z!#34R_#C
zI9TxynfHO0TZl1GP+wDl$QlN*_#D(<1?^A)Z^PDwoJybvJzxl4bP``76(G({%Be*7
zAT<RsMut&Efr|2!%;J*#BG5E$Ja|(9eC{c;1T;oaQk0(qSva1OnqHI&-o1j*0^Z9A
z9vv@+3;`Q}n%0Kct%Ov#kZKWB#6tUd3?Mcrn}Lg5Q1J!s!ZComaNsQyC|lnkwE$?3
z#w`JaLm+O44aKhlrCxA33{r<JZ!<74FhKGUWDA@WB!43%0_>G2DBFQB*sY*#W8k4|
z#)XW<G9^qYj0>1cSU}SVkQ10nSQoH@D_FK{hN7q%#w>Pd_l;oz2e^^Tkj1%>8Pb(2
zVOYRb!wBk-)i7jngSOLws$os0s@3qcKw>qF<QKFA0XLDjidg}Xv~p5Wk``ps5hPwg
zX{kyc)G#Q9t?opelcJ~Ko?ikwX^ID2e;4V3BFF?pfL3!s7KVTZ{vnYIZtQ~FouG)k
z#f`8(6}-Cx))5Ca>2I-uH$mNEgKUbr#a&#ISX5GwpP5%ue2X(RF9pIxPXnM<J}W2<
zfcxg`T#Q0Ypq7FNqYz^i*0caEPC*SQSh@urb^-5rsW5`h@Ty@<VU%WQW~^lf?Q(&#
zSwNkl6eiG$z8V%0hGxcvOh_Wk5Ro*dT2_!~4Ff`*1Vak51Vas&#Gwb=t;esYh7oKc
z*dC~fj39FtGNm!qvV-izbQ@kh94RaiH$l3EAhSUG#z9@Y6wpyd%pg`eOD$&&D|nEB
zBb}v|tA?|ND-E<!pEa?Yk%0m0Py<ptqmMWsNg>KMM8d<&8sJo+U<KLV1}nkA4S0ku
zP?Cit8wD%ajyROr1^gMs2Xv5EGU%Ya%sfz+Ai5;KKo>q!pbK6v4x1=I<AGGg!d#VK
zl$oBHmzV>|O{g0OsbvRz`VMoAAb5QzxPM(V4>ao!T_y+W$Z9e}#>T)|a~i0v!d<im
zlxx?52uSA4NX$u#hlJuSE-(ii^taec@(V!Tyv0$HUjPn^TO3LGB@h;SQho_YvS=O1
zp!FaE+?@my;EoO``-_4yD5$1p;NoHAVH9E%`p?272PNhHvoHxUf=CTU9>yve%mN%~
zyaePKShfYRK~)a82*=gJC!!Uk3vwN(-eSN}WWX9h+*s#wK_$s8c4&PMUV{PYWiJ8+
z2FUl|33;S)3lTP;Ao&4mH9`B^>_V&@0!)lt;zb}Cl3Gs8(3TVE+&ji7wp8d2-Dbup
z4(P5F&J@UuCRYkiFoPy<5ok;!(Uu9%OlC<wv}ptxuuIHKQAh+&Pbq-+!9((&BX|%I
zlm#Fib%<KnsWes~2?IR?3rKUqKoi6V%?ZHs3V0F<Y&6s|a9%~W2Y$p2*f3*#6OdUZ
z`o@@Mq3Ftl9DAdu0Nx7$8bJb^oLQ`p2Fj#4m7whtphGf2#zC5dAh$q#1=f>UtWc0&
zP@0oi1lv9VI!UZ35qcb$C-|%tQ0p%*wFG<$B1A8083r=bN&$XSBV=q55=i-JkUct}
zCEH-Pfyyy(+sz6T7LXmrApNL;2GI=iP(f-6Tpu_}ko2L11+*}P1UgD2f@2Ta9F%q~
zyjg1nY676`{RLaC$&_9#vd;Zs)fRi`nx+r-noQ}CJo>^uJroSS+9!j?(jcM?3?Mcr
z+kkT}bW9w)Lm0G-2y{Y19;gY$2pVaOXR2XJVU%R3VFHbDX0sIAfR@fnGSo0b_(g6d
z%nMjjm=`iKGJt0ZKx63)@oeBmCp&1a0CfBbsHgxJiocY=ja&sp(<VPnAv-fKML{Ds
zGfzh$H?cxflLIoZ0A6GWYQo&&C{HW`)eObASU@ar+vOH3IHqo~f&*TY6=_Og4=CsF
z1rfMft#+Uy2RtqbT0X+W#i+o@1)fuYwOJrLl2bs73=r*CP>{ib5VTbd-exI*v{@LM
zLDLD$wJbF(HOwLmpms<KV>6=*L+q(qwi2c+<{HL6h8k88hFW&W;u%m7E@XtVnI#z(
zGNv)ra+I(vfT-r^V_3*o%UQx+0utk_VXxs7VF0y=n;C1lkVU0oqTDG=HC!wV%%DN&
zSvAbrj72heEQ}0=GAT?o+)(jNH7p465~c-QDNHpiHQeCI3?3+-xrPhO=dIzX;jQ7W
z;Ywp+VqjzlW+;>?;jUqBW-Q^ZVQFTRW~k+>;Va>(VQOZ~W|+WOq)@{Qaaj!)L=@D9
zYi6wF&NHatE8zvJW0}BM<Wa*?%MJ2D4L3v-)OG{;DvtwX0!S9(rW6(_hFYE)uzSF4
zBo~2JJSFzyY6F4tCwk)tl$TS$ISx`%fbu(PX#o-eHN7?9y;IOEKWK#nsFX^A&cq|{
zCjjXHr$oeb3nYnyjycP$0{8O}1upio17rp`8DlYn+;RkKmxSDz9N2;yaFGLYEevA|
z(U+hRSwuSs+$_@MgG~5<4ln|bLEd67&C3K07~EpXO)SZ{#hjC$ev3J^q5#}by2V<Q
zUz(S4i>0`*sH6zgV7kQ$UdnlkIX5%!7Bi@r;0MnZ6(KS^yg>z82%MUDi#@TVBsI67
z<Q8{HViBkflUS6Rc#AV9KRq5C*td8fVGH3Ef#)#5vl*aSCh+9OEso4Qs4TeA04CtA
zDlbqWVgM=-xOo^kn0OfZ7-bj*n8X;xm<$*}lTJd+0^rq?Vi1yv1+*y!sf~qP$bj4j
znZZIC=>~O?Ibm~Bpne`?Iera87UaY(W~4bE<^?P@@HrpwK>!R{YzsjPbQ!A_!efrS
z^bQ%H03U5Y!p>(%Q!=L#oaI2{(V%t)<bVXs(;nF&GbD)k1Rws$167W+JsMo7F8~$q
z;P?O&;79@WMN2@D0&0~quuH+4lgI&$^)yIO$qou?aD@okNCoP3W??y70W@q~3#t;C
z7O<4Cfyc=}1OMzU44_4k;DxOqSyr$tTMA1HNR|UlmK_vPFtx!9njC&robU;(DsCO{
zah|E5?L8<vCO}POkk3H-%!)y+#~OxMfm%?flre>&nJJ8ck)e<&g%LEWUQ`a6>s!fK
zbQs(bxy7VsaEq}RsSOMYuUnip>8YSItc&a#nHU&8gHm#pE+o!SLk!XY*RugFr^_j}
z(}U{+IToCk$aiefc~DG(&WX@uD!K%kZDcG$b7;{O1_p*ECI*Hg(3bHk5gpJD>eM3G
zxhf#LH8nYlGC>A`69@d%PRKf7P#XfWWdWYJU~9CqKzeYUwpIs9OQ5a+Xd9FWqa33g
zBOi`51qmTPP263SqHK^=plPQfM9&12VsUp*Kyhpca{~t>3y%;F6C;-<2N$^SfVF!9
zio;?Bv?GMTi}*ncg*l>Fz>D@F^TCp!83}K2!QclvdQ1V_=!SQ2K~r!ksRc!;$%!SP
z&TgJUT4Gs#5vVPUnMS~-prlq%mI9?#P(!I$1-$NJ0b>dysGV5C1Ui(xnJJ5<hB1?&
zhG_|7ANU52U<T0sw&IWs@OCSZMWBik?x12lh`sQW^$`OYsBYC!D9A|#?YJo|PDQAJ
zX-7)hu(LDtG+B#wfud(Oi0A?lpumB2CPBvq7WII{5G4&L8oNO;1M(pQBNtPZ1e)96
ztwtQjzk*CC>VumKHX3XRC}$RdY;T8|&%wyT3>LwNaZrf>o&W~zHA5T9$9D`nXlp~F
z4hx>HXJ%eWzCv1lX^}#5eo<a(Q8Bmz07VDrv<lFH?V$D*IM(&_6u`*`adI7cLV_Gu
z4Bm<dIwb)VvETtDxS>U$gP<zG`(PDvGmF88LctDwhMXCqke>#&4BYMjpKJ%!46fOW
z6)Hhab%C674?P~aSWh81HB|w0kaIa?)lD93FE_{q8TsX)CS6KuVh(s_8(}=?7z0TA
z5Us8RHFhXpF{}VOGq41Da1+=+3JS1<2yz&Fi7n_LEgXlqz}lAJS_5&I3;3K_h|@7n
zwpW0hTMy4o3bqQ+qu-IvwudTGP*8x~Spl{|Q%6A=at2=}C>B9mz(6jAw~-<H)WCj$
z*rK2S-A1Va-k+EVZU7WpK~L%~E=f$z)`*53(5#`U0ZL4o;79_oKv55(i(@r46_jDo
zsgS5(;sQO-5D{4r`=ICMBe_7?GY^s|N>YnU6ms(O3qV|j<itFMlzi~CYg#_&P}j`7
zbfkm}F%6u)bQB=1Mg?1iB+wyc3d#y#aZpi{s9>4`bqQQqX0d`1Vl)66kV(Zl3VC3!
z=qMyZ)=VkbLd?+vr6vueb%YSRp@#w3g4YA)K+clXE6YqR*U&YDn*k1%G*IaVaT>@d
za0Z3gg&sN(GvgD>(iLn$gD#~xB@iQ#ou_N4sRueTPs0!t<{-8CWvNAwhyxpl(k=y+
zJfIRD++YILVvrRWC5)i+#j=>2nTpgP6F3kOJd(hWCso1<n$!WGI%eg?kirDt1q->(
zfEjY;Se0=Ar2K}SGNJ%JKcl2PUm+zkH#HA*2Td_FV}R2P$oqPlY>>lqK&O?VFG&P#
zeJ{#GEo{N%ET|M-1}bMkT^7(Gj-b<pL>Q|CVaXp}jAQs7RPuv;51ROZOkRPuUx7{+
z0<S^QgdXq1nU-0Wiqv|=wQ+VG$QhtuWk9taI$r`BK>=0nAPi!|t;ad=3Obg&s0id~
zT+`1Raaaj$%-~)y0jfVHf;2;Cn?*P{IGBq-q8L>aD3O4xD3aGGNrPsV4^v)ADHP<C
zLMC28ivd8rkh0VwP#S{ODJ2=H#i<G!8SvYFz-ysk%k9Aj@g!9$z>fI_=Y<hd=Yg^o
z<y9Uy%)o^+xT*u4by8eH^)t&r7Jx3L0A0=$4~lCA@J<a)D}~VFR0Ws}Qsm~r9f62@
zuv00mJfYV-jn<u`btj~0<c4*dF}V5wZ<7TdQA~d230~*H1{!QHx(=FLWXE3>-r|L2
z25>fk^@YLR<eQ)h1Kb+{6YyH{EU37L^o7|NA+;nMW0e3jrQ@z9K~oK2e}Z<qBC1Ky
z=`Nt7J0Vpi^z2NIlGHq;niA|3T-D_@kb9ujC6X&})Ru5pfV$ME3lTvFtYAMc6D5%F
zEhh#$6jvpC6T3s9bu8ZFgF!XmR*-gRH7m}=$-!I%62+)yK_LOIW=ZS`f|jo)eq~}{
zfS*=@^_ccV*b$QO?gL~&S8ifSQDz0W>6r*V<~+VEHMt}o)N9NwC@q0bkL9PO<z(hT
zPfUj#=#FYmJd!z}HYn5-9mwXov>ebtE_fRqv;zUVZzM4f(&8>IO)4o$OfErj0(d8F
zW{E<1MrLvbsMTHqI(rS$>Q7BkNU8*11y%|_5-cUZ1YGzR6y>LsCWAKim1KZB3aD-c
z9a#+XGuXtURM2fEpmhtN?lI)3Yf#TJvjmd&A(MWIIgm?FAiXotj^Iql)>Nchamq6?
zOHx7eT!=vna4=gb1eX>R<QJzZM1cGOI)E*wG&fHH8rD^*Mftj*Zh0!`gl5o97!XT9
z^<fCe&!8*MN=rbesHZ~iI7!YgODxI+-9-oUO|*+bg@Oz8I5Ws;_qn+m5qc#WnmP&*
zNcWT#K$iI_1c$jOfVY}~r+MM20JIYXbRvBs(qa2xKR}}x;xg2JHEQ@m6(SvP4h>SM
z6Ll0^KwVeZwNBtD1Njsw-C)xji%lO!sSF<jE&y%+U|hfe8nCJX(M$^&vzd#uO2DUK
zWHEun7BciPh%<n@Fic1)8Nn(ON*GgEL5CSYWEL`}uz?Pd04)>;jUj_|fl7U5aM=wx
z1c)87I|DpXTBU<MSP&DZkV9P|3&N^oqpNEbDio?~V-<>0lS>lw(m~Va(Ad?h(n1a$
zs1X^7WsoFMoS2&m+e}+sTjc^iH7TzW`Q9s}eiw8q8@ub^uFzzKR6*6C>IAej?UpE3
z9k;lVTn4L%Zi6)40TJLj2u#2$qPL&|5mbzWjv9j;GA6<(z{tYL_MhoL8w(p_l`OW)
zHL=%Fpk5Sw78i6x0VrOP<6MLREW*5yQ3N!S4@wD;_zfyy0-Zhzn$rcvDJUhRuz_y?
zfvQ2o@&c9=rWE#tpshTt5)8FWC2R}WL9w32xDd1j4LofFGGifQEhBiXB1hG1c(O*#
zDxic8%?>0U{|_l!kS8v&P7uNl|A$uxppjqD^d}_ag6v0|*{iaIItVnkgBr)s<fi~m
zX_<MTVkogBGbtw(S`=0Z!O{yf=_=YPR54Z87HNYTF&vN-1WpWyWW)j*3n>C84$#!w
zEvDRD*kaWr(BuI(sJ4J!7j{bs?w-^Xs2KXx0%+{w7br=A`q2#RTA(Sq|4d8*|Jay?
z82Lcs{kW17mZk=%7KS%9K-Dp<sR23vqy&@%m=~}tWN2ngVG?J6oNbcA3>IgFh>J5I
zWI(&N{Hnw?;J!dENo|Wj3pc9xbriri9YCv0Ta+{3DnWS&6o9wbO7cOYc94LJ1qp+5
z6_@~LEzprmT+9p%;7bER=Rk-tO5x5*(AEp?5ds+o28O$!V1+ha_&Inv1d2eS7)=*Y
z9t4F|F=!eFWe{5sJii7y-<LIoDU~&at(h^6DTO_a8N8wjvdRj)rV6~u3UW>aX9`~m
zKX`q*Krn-*P?0QX6hVcBfdMi;51Nb209_*puGy2oi|9ZHZRtRkXoC(R1*PNS#FA3*
zycYN%QPAOxh&+IrBN0YG4;um<%>bTe2aS+|=d6*Jwpb~6<d-YtmL_L_nh06ot9}&_
zW*~AID5oLB<I@ts&8o~|h2)I<;?z6^=<GRY9|`Ehkm%gR3Jn7t1w#d0B;zze0@_Fd
zkTW$wW`i*3*y4;-=rK&uu=O{wNXN=5<mKln<fW!3f=)z$UZqfsa3pBWW)UdNAZy*p
z@H)%{gqa|R!9+>&HcS)L(=aZTy^NGH$o4eC3{-C;#7XivLL1cU2mva4KBF|1{8XM%
z3he=aVi<%oN>d5@Q#YeDRYxJCG&L5Sy09fzcwGrH6nY{uxXLlGu?6W=u(4Gz&@%<;
z*9FlwwhFnKc_7i;#0n5)pl1q+2yoSgu~H7+KmgShko6ZSjG*c~o)OGrN?`)?n7};d
z6lO3Fv`d(w27D1R=y;nHj$Y92c}PQ#)9)4+B6EXJw=DtXqgz~vsJq3SQJM;N5PUh`
zgZ<z=A8wk=x7Z-<{adU_;B_)W$vK$?@oAYw#U=6idGVkgfhOxMp7`X9)a2}V@VrIQ
zBT%UiIzqna35fL+RL4SB&0&#3uO~qx<%*#C0Wt;vsu}qhc^H}gSBW6qS`Kezf!naR
zm^1UzLNLxEfY*?qS{&&t0x!^_|7?aLr4r^8rUfjZv!%ezWab)13D71MMsbF8rdq}t
z)}k$-c?8g^#1zJ6CXg;0hP*7WNo*zTpd$>6mX&aTXBwEZnTnRxuz=Kp)JZac7r`+W
zO#<BrjKpI}VUc9WW-Q*4$Hd4`!koeivcHC9A$W!&kFSI?g$*PET6_n(D+OvE3)BRr
zq6;-3n{I#>+vTyOfUe{KU9bvm&{r9GrYTg{Li%pdW1t|@n&9JVU}pd$m$iD}Ra4+l
ztdgs)g`A9CjI?YrDYc|LH8oG6y0*%?x)w<Se#$(=22iSp9K@#&sg=NctUyQSWRxfv
z>1lF7)>?wrycB^B4=#c?wTeJDlR%bDfLg|oQ@cP*kf6IpZn1*5LET~jT~S>043wro
zD^QDGfLP#$1(*Q0E?zP)Flc~M4`_}6)Vknc<O4U_MHr<R4M2NFSQHpp7^@^u(h^eY
zMLG8w)Jp?l(A+{XDD9>*)Pi<WfVS$JfybB_Kr1Ujdn+Uv7BY1*)UYgIDq#jMg8;41
z<^bL7&d2~t5T%So8DJfZU>%I04g3q4IvJrl7}FS2*iu+qU^<vmKz7Um>i{o^tzm-d
zV20{oPGd}AN7BIz)^P@`1H5Fmh8e1Z395rBjWLA-p`(Oh0ZR=Fc$ow%XpLnpYYk|*
z24~e9&{hxFLOjU2Y1qNGptc0)5b1(~9MHMz`Or&@AcY#JRCFv#F9jW#3AsKP+~EWD
z&@&QCK&=$mxfuxc*!u=>>mWlakoD%EgC0||N{dU1k=hxk%_)=u8d1W5rg>rhft*8!
zZ~#(I7-dxnZ0ZYi!Wj5wZE(OcbTA<1-JrJy3nqeH6^|%qi_Aeq9^)-Rgb=j;ffRP2
zlzNN91~Ox9=gtgUZ=`{oC=u)Qkd{M1wUFsjeyB@fEf$zN`C$SWt}J@bz`)=Qb0C@4
z!U6~pAux*t5klCl_NUNlE~t+|H5@cjxDc%YL~MZl84R;blMTFbv}ix5pU0M1P>`CJ
z0zSF7XacCn0Zrj*vSF*{z^C8c;)Hjjz!qq-6ukp!`v@Ywf(Xz8xFS#@053$e1{GJJ
zl~fEIEKD4rW6iOUT#Ot{RbtrF2g-~wsHg%JS>T~?kg4Fd0s~|fCulGnJPgfJ%TxnC
zB{~>%Id?rg-=KCmKp7fyjWYNKWAL&XL@vUbjp3^{Kv!?)<QFMGdLft{eNEOP@b(!{
z30s8LaDZO(1==`R^c@s<;IbY}fI|?}REh$HAgKSxz|O<S@t=hWy{Uks2My{Fg2M@P
zgDGkc8tnv8#uR4oX&@}%#gvSoIvvbsh4Vo}aG+JNjF7%I(mA4hh*AKwfUt@WQD%Yp
z0y+waYycMF0=Gj!OifL8^s`2>1X2;Mes(G-U?A-%9`Hh8&|+cqz=3wLHCe!IKr{l!
zIzvz+t>`EyIzSaJ(d!IBePlt9n?Oq$I2c*@x!5IynHag`IJm+6XWU(OjH4WR!IzCg
zj&g);8E1oT8D~%7PT>I`<p{d7jju=!)HQzy>YC?+ZY>4($iWF1G&Y}@m!1RdFQBG=
z$nNQ4)JbVbKRdq&v@lc?d=3U=^A1dLNxnjHD)>4S(5wI=gMhM<XPN>`8F+OQwB3aJ
zS}j;dSrc;B9(=Vw)D(EG1sM*$;3FwjK|>cRtD~R|WkK(q2B|7aElvcTX|1494Zng~
z&(GB{$Tc`b2g}YMR59mJCr{^E<YORFoZy%Tzj6??_y=SWXtE+LGp8iA2(m^l0m3Ox
zhzGBy$;~eUT~Y*Eoev&y!U(VYT+qNpiVmo8nTd3Yqyp%|a?nL53QC4bAW5YFC2$ir
zvsj@xwFJ~m!SEl#@YIR|(AW!j(jM723h+ZM!77SDJHFC#64MbnL175GIK2qeR|G3c
zfvos|?)EClR{&oln^~fe16nYskeZiYnhqX<$;i)v+zpkQSCUzjs!#+@ix{iZp*Ex?
z<`k!bJO&Q|u(_a#AE;H31Pl@Yx3a+NC%{cDP>6!A&IBt!bvkJIq(TX(F$U5HalZn{
zhmbquQ;Q+40{J@;ye|TrRUkb;@Z4KoK4iuSa?d2faHtXuBVBOYAA6ue73HVFZ9y0S
z^0q&CS1qXX0&-nxK|yK}_yQ^D%1~6rNOuZ>R3pp=@zA^uI&V@z!x-W=P@sa&0JBmE
za(7ZFz;*~FX!{1}5>e1~UEmPFA6KBeLqStfuv?r!ml=a@`+{E8jP3R&&>is5tU&SQ
z?@)8VS<wyBoW<w~gIWdPnhtcqFu4B;xl~UJ)HTUwD3Ss7gu#7aMs9{0$bLK)_$oT)
zY~~`n66O>Zh>9A9c*y;o(7kuy-FO_}Gy6C}O9M1nA+Gpkj9%j5?vfVd7iWTo{vh4a
zDm@IH&?}?CnH<)Sg&fU_vvYZi9a;c_dz800V1?f;Hb}~cbR|KD0YW>PML$6`+Ak3C
z8$^J+njm>_I}SpC8y%qPuNhR2fzG~w^fEaZ`ItcGz_EdM_pvcni9^PLgFx9FbigXC
z?}O5aMj6Kk%{4)K8Q@+&xNEr(G!6mk;WJmw26fB8J9m%|`-3D+^!;aGQIM_RQ~}Cd
zSX*eY6~OQ#_e%0XYokGPtB_0K5)!~Br9#bzbi}Y^Ur4I~v>9EK3DI`eWCDliEmm+H
z3kg3&%WW<L1H&XxAcC4<3~VAyu*)0ZL5Ao8VKi|;tvzsp1}#H?HE}_QQ$r5L0?&GZ
zud}IP$YQHu$YNi}77S|Yl6@H?BpgWDk_3u)ENxwmB9O<y0Rui+;}#pl=proX4mM8-
zZvFlNB}s7bfeCOz1hszWz{8M(hY3rvBh>hf0?j~UT-gYkA%r%95sh9*BN*1`1sx*D
zP|8^32X79uA)3RmX7EZjzbYOLXhsAzX+iB@SWX1<d2~RVrW5m0U_3}!4&s4lo>pQs
zfw5&%TutB=pwQt1FJ=YJ-hz(a7hvSTmOUZuU`-Y`Xdzm(5H-LNp$=+K=7G!wHLW=q
zSwy(_nHae^Ie5S=O^ik?Xc!Va??h%J7IX#&Uy(eh5xa*SHov5hm<GCl2vQxx8&IG@
z7I+1yke!+eUR(+ZP=(Aq$YKo0>XTvH*i{IIGyn}it-zGj#N?99vcwW-gDW#X4{2Hn
z+)>W~NkUFAMQOKyTQZ;k%}uPx%q`7TNK7iu&jBrONdqkb0yUCAH9gvjPEc73!k`8w
zY+V;5k|1rSVm$?<yZ+!ZskR!4ItodUmVTZB!cc|cjLbAhjDd^=;ml&tp#dr2qY@#_
z`4rFy7bxB#T?F0C-1K<xnH~yS3W*9f3Mmx|HVUYMDDDH>fqq9Xq_EGaRKRTq<PZ@B
zErleIC6!Q15Tbet;h=joL6=M+f*P^J8PTFc-q(y02_Ok@CjvSx3A#SIEHOt%AyqG3
z0VT*lDnS=q!fQNGP+|B1)GLED>A-WdiJ(~-h)x?@g(RqZLHR}jv6?D5F((z&Q-m(e
zMsjsVYGM)Y{vnD-g249WBf5YDdWywhvyu9VItqyj5J6BIASWj^2b8`+kzS#YSdm$b
zngld-!D@9Bw81pe?hge8$YM9}Fm4WL7*|ih71Y@P9VU;k6q@>r;1@_4!KMa?at3-V
zq^BxqfL#)85UT_6N3>zACX!RYBg(`%#RTdU)Pr{t6)JSW0RgrL6dYJ>1SR}R><W$G
zPBX^hG!!qv({*A&0Vs=r0}he0;c8(u-N^26gF0Zz`Jf>{NI8q>gTZ^;ASDV=H6h^5
zd!WJ<)J5|It%y)?%mWp2CE#I$^rFN{aHfUq<O1aZP?rrJ=%AdRUs?k1zJbdgP#p<M
z6d={eU1&XxfK1T-EIV6MJp(-hNKT<u{|tJdZ!&167t|&JVNM1H@PT^Z=IkEG0UwN@
zi$t55itzW(G{8MM))F@GOr%Q*J8UMh*vE?jwYQc6YN#y%9V=D>Q(scT4mu_yg*lt4
zxW<big%#YpV{&1LRjOrAVXI+>&S1_+Vb5kPZh+2La)4A7Hh>z~@f<bGU~}J<Fw`)^
z%{^1X4m!94Z0;2=h7?Yajv8hchFFnWjv9^>#%#u-8zsysTp;nn8{qR>IBQryCQV>0
zT2{hP!vgXB1jZsA(2Oehzz?|+<`iy_jv5wjh8h;|J}<7k3b1OZk0vk{MU=3Erm{eG
zPhc!gfbw{<nLs=*h7?|q2~bgHkZ4gG$OcK!{5hW_Lke38TRP|jdagV<s4B4DqBSY}
zDFQ7FH7uYF_B9OgTzQa_>bbzZUhW#U8isftaNk!D()TU;0ct#If;K+eLL0v*Q>UOc
zQlMM_DP}=sQf7$)YQGkI;kI6tJvjB|fEp>#^^~BJ8(J(wZ{!1y&EPNqG*t>pW(-9;
zK(2wT_Jp)Wpv5SX!(fxW24E8ntE|D<3`qfGhL<>(%R@ASCvTBwlwp1SDnrjSNL2tD
z^Ff<VM(OCobm)PP8>n)Cl<?#P2gv0ndf@49jE*>HkR%v%ZxHCbo^*y9hFA_zFK8lD
zAxkg=Qt$O5C~CpI0WbmTh~46{0mUe2;i276*fdg=CS(Y}5!_OTuUJ5y&dV$=0qfD^
z#X36<?&yF|CxTCmLuZ#j=>dEq$t_lJV1Rp%w>UwiD7X;S<b)hk1U7+*k%0lMxQH3V
z0(Uf7KrB#^Q*;^AJK+83mq0z7Fi_HA1PyF}1~_CGnf|jdu`zKlLRcJ(VvJmj$b1ek
zpNkQ6G$Tld03+9b7DlfBY)nl5SXlTNB^db_g_uBtB(UqB&_V}gQ4Gp?jSS$TaRFlu
z1Iq9SE9fM{QpTb*&`=gQBQr`eh%nT$)j($2SwQpc;FFM;7BGW{SJ=22YS<PsEo20Z
z6@W$9xfyEMK_Z}2k@8qlSZX;y^X;XKMIB&yq=TszGSzbCv81rpa)Rd9OBstkl(2w{
zJGN}5qFrFs%wYF{=inDIrLd<n)p8YnfvV!jW-9uEsR}B>20reJ6SS59q$`glg}s)m
zhP8$Rd=?|<eqyewPAtPfSo?zT>X5vlA5cAs-(19?k6+OSP$B{qFp%XWptu6()gn;s
zrpbyJ6S~EeR{%aN1Ux9G$qg9@0j1DeJmB&o9^8j1zQqPk*TuIuK>Zfb=ynlk0OS^1
zZem4zN(Gdzyu|}69zi`jq(lzRnqUHyKfz-zpjx&VG+xBOCIY^(K>&O}AsaJj+Et7R
zMDj37L60p2iNXd`ph*<Za1^LM2W6&WkY%vpD9|ol*o<opV;1W|R?w2{EcP14bjBK{
zMT|k9h29)B3|X8hjB}X!K%;z2RgLgy1GQo>XJ5gMA;>Tt*yo^T4Z+3MNHehzzd-t(
z5WSE?ED^mLO(t+*QUod?G`YYbhlnJk5C-q!0w0~j21zGK+K^^#Z!s68mLQd$pb!F|
zn<&r1zyP@nj!lLUqcp%WY6U8vL1TsBfCe3+lmc4E&XC0bYC05&f$}?e)QKUB5iFts
zDjA?6Okfc!&`1?jggJ#Zo2AI7hB1W=D#DV&n$27kSHqaX4i;fd;Q-Mkte`Q(X2x13
zP=hIjQ<4FENFApQLk(jVM;b#47bps|xH1`Qzz2*3GiY-ARY_EXRx8yaM!uk}fZ8f?
z9Z2Ydig4R%a3q45wV)N&;2dCE4W=LhppJ!YH8?NSf-_u|DERU)(Ck{OZ8fZa0X7z1
z`rB5+nYG|CG)UCE0WC}6E&^pXq<Dl^2e(*KGLuVgam2@i==gZZIX9p^6^J=#*oY*k
z(zwN#f*!A+#oZd9cm)lSF@W#$1l0(r7{n96jB)7r8>^omw3Uo|01h-3wh2@{K?mUE
zx%9xZ$ecwW8H@oqP{IKZz>&N)9JGKUF@ceR!8tz<G@1j+E1-@yybl7&IH;p(d8N7W
zkUmB{d=f|lGU=pY2x@{DYeLsJgT|NON?_NufqFg=?GXFGMFK`Cj+Ck0VI3mS=40@*
zC3N!)ctIw_g`na@SHUw4WHE;0Y;6?`6|A5KV88~gL9H_Aj1AO@V22Pg$QY|Z#tKj~
zz-OQ+6e-x+Dx`ymB(R~778z(d1k~UFVGtXH!MPo@<OXu^1E`S=-pjd?@s==_aE9Gb
zE(B`dfXja{0WNhw(>5-k)Cn?|fw4*m9&Yf)8OlThNEy7q0W}IsK<jA0=b@!A_JW3G
znSSBhK!dmRPKKl?$UF;LY61l@Mtp+=k>VWEw#=zifa}GWOD+;*WMF_D55ZHC9}ibr
z1P&ftTVCDaVZ@FaMpz1E*g83IG=a9Zz}C@#*T*n}HYY(&Tawhk5?i1_eoajlMEL?a
zM+@vaT>D%D;I2c9K}Z1tx`hKY5ruop2q;rqfeZmnB5^RXaB?woa2A0?saSF-f`;os
zcYVO)2^!&$ER9lxK(a(4^4-y(a-jfz9<G9co*{G<2Wmls<FWzp6hC+p9I5LDwI6GO
z!$@pM2@Re}uof;*?|>KkBHewhPyp^hmga$SeL8e|E+_?KDAX|2Er9wBBo8j4z{M8S
z<ji6PLs;2ig-Gmh#~{mtixOO>z)K(;roiPP(Scq<J)bob1a{hE6cM0e0-XG@lv4Qb
zD~SiCLXdU_M%cMt@X*FyB!Q|jc##CUMZJbGive`ZO%Y!TQyx<ab1mqkdPeXa229xu
zMS3O7pu^8<m{M3k1r~VMA8U~?BLl-NMjvpY3vQu9N($DZC{QyVe0DhaE(TERu1Ew@
zh=3|k$U&Ik+zBpK!34Ny1x-XIfqVnXQw;3T9c|FF%8>m;NfE*hy1>VvR<j82T_<@U
zXW$N0L@6N(sv<y3|BzZS2p@qma0tjKkdHVRS-7~EIe3adq8OzFe1gva?L52`KJaxW
zkX35@;MHl2;46o@!Ryo5Q+R_JGzE&3L1l#k3j+h<UM0vPGSC@V&|S7UsR~7@>7Yp`
z(52eYb=TlLmRgipoLa1qSyBvH&4M()r4A{46kLlEK^3_I=-e^|$I^7r3T^O$D|OJk
zMpQ<AUb;eRi9%uyqNxBn(JZ5+q@dVJUq7*^BC|{{zbIY5ASF%T(A>a6&%ngO(oC-)
z1#(y#YN4c1V1-hYflDJ$cM>uiqX2R+Xig`;AhihW3ivDlbk8?(e+F_yCDL)%1x5J<
zklRl{WhBU~)D#8K@UKE*dLn2o9puz2Na=y_59B0sBy%7~vO<K>OhVEH9=x)GI~mMH
z+?A49tY84UuQ(I5ju5mg5Ik}PTMJZ@4=<BI<r3Jr3ZTV7;DtxUP|c80K*(|y(6FB|
zni*jGQSagn(ot~NQE-BljMx?#fh+`J&ot1~Q3?1!MRiCVsOx}@1jRyXVsZu~;Xup1
z#5~Y-?O>lGB_&X(<|HPkS}7#u=jWt?E*plfbpxGZ2kFs(`jaqqh(Lm_$8!U%XoG|-
zMr{L1pQ$O(3w6K&rmO%x8VBjr0y{f9BztTT>|#C41LjZ^!%k#Gy>v|j>9hef<%kpG
z5OYjuO8oMRauahht5P*I4D?JaOm!3t^h`|6z?6ZRrjCM!fu50(C5UfiWCEc~HE~-B
z$*Q1`$jnPecN+BAIq<Hmm9T@@r`V^<x4sVCA$Qe&B~!XF7+khbPXhxk2XJrE4Pz-7
zd~rDFQbq9aIb#-633CZc32PclGgGmI7ef;x+EMHlXh*R-fp(Tdk75UpjioSVGZaTb
zj&4t3%4R7_D&c@lK!D|nK)v&9mZGW>4(LP!=u84m@H7G!cnW|Uyo`(oJfO}CYUOLP
zL5I@8t+}0`!D|ih*bpeUf_unFjT`iVYg@QVNP>qacjPt-VgMOUyCxs{aBL;0VnmF@
z?gDYaXLy2lF@Y;f_>r$guR*e)m3FXoWw_*#DjqA49#99R3smQTmVz-bN`Ov_XOsYs
zRdGPqm9a5aiJ>GFL^}c0E&%nMKp50pfe%XAFfxFKniyi3L05LxvVm@S0gs3>LQajJ
zz!=L{%K;h<1;vsiXulHZjCiIRb`geJPOv!29;8|>P-+3C0noT4{G@npWKsA@@jNx$
zHLRc|hBZ7jpj(w>N|+XKq%hU6LQjI{h4R^Ic)%yY^ObPcFqd#;aW^v+?Wtj|<<E-)
zi|}OeX7M#M6+J8A$>Il{*<Qnw&RoKiB>-yKED&7CP|KfZ0#+$hA`F@ko4{BkRl<`c
zQiD(>n!=g_+BV6&KrDr|hP#GmA#*K%4Q~w}h+V^<#tJ)oK7|SFLbe))G|-+Xw!~Uo
zr@w<sbm;Op_#PvWU}hfDx?!{m5_C9OZaUKGA&}D$Aaxcvd4tY#*KpQR@X%2R*93JI
zl2XA*7^P}dP@r1vsGtC;2}!LWF^Y0fg#)h6;A;wzuJP4TD1&(hd5s|`xX_juf`m{)
zuxKABgMfOQ;8v?9<1N;dlFEWq$Po_WMaCd$_@VFMtvlcY-XUYn2SBn%Km_QBcl68+
zYPQ{Ci7(EqxW!joQk0pO9-o{8x@oxh7Axpn_sU!B;C27TMz`4Wz{P%i5qLrdTp)l>
zgDAQSG8?pLR8s)bNJcFUZn1(!v6CTG281dvIswvj612&P3*_I_cu+H@2y`KE5%_p`
z&=@D;0^D1SW$5JvXeHrHP<a75_84^K4hIuxibVu`G(2dphX5lFBOBznct#aQ5oTB~
z16s}K;TX~el^mdfdvLD?)ItW$PoywHwjXDK`V~Ilfo;%G2z-@g3Ue<@3@hwn;99m4
zcI2ypK~tnzoS;ME!F>>xs(Ijg9+X^Yk~y#@Oz?mVB(H%ks|GckQxqV4NZy0APe6_W
z*Pf81#ta!O-3y8>P|>c*g_zEOTz>|N2Q)>HwVkZscr60g1?-R+InY|pTZ|RA7%Pz?
zS(%Xmd4>^O8GyP}yFrl)>VSZ{R7#8zSPFkghssZr72G%|5(k+i0U|(o#Z8kD>=8{4
ztT!5g>KpXUJK!D=f&e=O)Ili%IR(_i<zQqH2Q7HyQ3Q8fFnU9vE-I+ASZs#g8{&Z-
zvysXQ-o3*B?hAo;??kbKdq)gW9N_C7d5b_PIx&`s0dbs0L26M+X;Bj7C?OA57wBq{
z;)2BFRLD%WLSi0xQ9pR(0}_>Ppq=XA)^ti{acUmuyrLY?2uMb%LPlzCVo82QWe&JQ
zm7<WEU!0i-H54>zk(ZiN4C)?%=Tj9@QbFS<p!I*CJ{aVtWrb33uPRX?Ewdsu1$qb#
z#NJ?#@)WSeFn2?Ce<0ba1GN$~`|slDq)=P}>c1#t=B1>9b{wYWmE=@HObf}#EP@yY
zo-P2jv!R|SN-ZwU0iP@e*A2SNDHU=m6KMGi#QDXbP=XkSn%zn=QcDt{XDw)2VLLPg
zEvOL<Vui$<f{aAaC^7V?5b)x<MA(TT3ZT0li!zf+K{r(@L>uaYt<q62)CMz<PK-!O
zEh)())*P5I=!Sqgn2-&Ipfy6dr8!`iX=xR!E7&TqgI%hnP^_S>zyY#NOAB256oDox
zF$Q!%{dsWF=L0@Xz6QD)r&z>`0n}gu?aQxWh-WTg$zol=2D(5Gbl-Xnqc}qf^Frnt
zhIn>}ObvK11yqa!+@xjks{*Y)ECH>d%P-AI0j;)2TlfH3oS?}BE_=X(Gn$-e71k|Q
zu;+_dLD?KUS_9sx$_kE;Tdd&VgpGr%gVdm9XHd4k49d>nl7&%(kqvYNkC2C}ix+6S
zRS}{|3Chc$<|GJ%Dg=1v0d#B`s8Xl_T^z(v!T{R#zJLY91D#k=%Ur_<x|pbjNt~gU
zrG$9_D@X;Xfu9AQcn57YU%<W)G)d38fMX#;Ejy&SCBY!VP{UrsCcz-huz(ZP!KmdZ
z;ab35!?6%FJIdmx$po(N!NKzKKd9zpEzQg;vC!lJ2gWVd#G<0a%3JJ^qw-RVKrwlX
zy(m4&C?hrH7JEi&icwK|5~Q(k3{;+jjvlzhmRJmKN`QQOixZTJ!NzEELaK5NP!MW@
z2=J^IQ(kctOI~Jf`Yo1B&>b->nYoD-QLIJ9nYrm`74$9G{$meUmz$sf2Q|wXn79~)
zz$aUBFmf<TF;$5m0v@_u*~8TZccKKHZwa1a2GzbG%!D}KlDWtf<W7W(aSic51Gxq|
zNQM&ekm>|uZ49Vy0nQSjk{ve04>@cKx=Vl=zV;uoOMnH=X8}(Iz;+FQB9hgw$PLt=
z;{>N;&?G<=mkv1fLfPOX3}U04G6kNB1Se>O2XU?Ye+lv+sJa4e&w(_Bs!%+N7zYHE
z0h*iyDoAj+f~sWPw`hQ>tP>z>p|g@)Tx?8?Ts$1i;K~f6o&=>$P-qro%u4cr&T(l0
z9Y@8H!r8(Q#hSvE!rj6U#g+n^bK)&h1&tTpXJ%kXECm(z-~k)ZB2>u6Q}AXR1p^%g
zOVBY*76zdCMk8|rP^k#rxs#U;Duj?*Jg9X7Y*Kk-FIt6c5`!G=jIxv)zGfAy0zBOc
z+CvGcT|sR}5FXtnnpX^2WrzqD=suLuU82RQDd6k~8VH6rHH%WgS8isOrNWwher_S)
z);q=+9=tRLHBf3mt6dn1q(IeiIztNMBF2TFu{EYV(25wwGKOL;lnW3*2R|^xGlR$6
zK=%hS#Iu6O+`y+gYcl&m&jEM|nhJWkg@J(~laG;sp-LIDeg(cpH8W2k8d1jRC>WU=
z#A>obI!04KH7j`KA$V;Gs9TJ&*YXw{#6ZZ(6jUW>wJ3NsA`>eELovAeWaMBJViI5k
zts!J%WMiz7b}TK)hYX3qcMifgTY{@NQ0s+(0d#>l1E|FVuE*rSO}ho)+sd=R$0dRe
z#(-aD#um%~US*)k4(`Ym{l({{B3!jBAIQ;QC*X1>q3RYC3$W@IT#bTf)IrNqq17&;
znuS!muxgg5&3Q%tK;8niqi`M}AqesUmi=?sLmg77YO?ydg+K>yL8Avq@d<8;g8P)9
zO6()3wgg292P2CBxTXY&V$_tNb{Y1Xk`vaWg!Ct)Si$pcu$q#qNDb7=m;p%~3hs%e
z#l=oJrA43-9?+S>;C>>gi&vDGl37};0NPlS3))-@DYDQiMTi;=&^lio1@I7;CTO$_
ztP?WY3|hAXnYaT@tbp2))}Xs!)IrVsl9JRS@X-XBB?>kQM&RCuAMQpTJYzE!Ie}OC
zGnO!c1~oxvmVsCKGiEa(GB)^PPw<gapapKA<)bwW@oaf)DXbtG-1A_A-gsgI^($iR
zn}j9Z;L-@vF#$K4*dZMg@Guy7Kr9uMo)A3}Hi%`gzKA8LZ3<2SU;>;BKz$JfP#Oa@
zniv>4peu_d;1^xNV-3+O0p}>BObu#DfipFD{R!g&2GITi&;V5mV=p7P=fDJ76#}we
zljRm0WWzjYJcHpD57_w3#5{1i0I9&4nKeMJ1Y3=mtcRs2J*=1gfkt59MK#!M7~5<i
zYh$D|V9rngxkCXGY~c1U=HeKzBXKQ`(Z=scX#QmM!@W8Nlx3trra<#03m53h6_6-K
zz6ALVoG(FRS||sqg6@E0h+;`)P2mRTOvtP^4>)%+B8~NcmNfGgse^LqC1@^1U!a-@
z9zIB_RB#S6Qh<)Tz|I{5<y~;M0e{woD1n@!sc8irwg69XLTX{~`h3K6BG^W71_d?b
zaubWQ6_9Re1??e%#kP(@W|{(McVl94ab|j+4mgAR<-<;($H?BGf()EcZNRk;=D-4I
zFaf&lMk9qeo3YpkwCx6EGY>O3JG1&#LH&W8kwL>zIjNu(eo3iCdf>qg=)B-z`>GJn
zJcWY9l8pSE{B+RGC6M;N4u&zAc?uc^ItF@%rkXmS;tPD06{wYx4mw#xA-58KWV2pD
zr6voc2tXX`2kM%^GCYWlRQSU)yeTMsK{{t5j1u5Y1+bB6Nc2008Nsu=AIjnBpiv0W
z@H99W<rpCoTi^xvj3S`Z5SdayJDu!G7(qv(FJxT6T*8>e0zMdkF^jbZdb@NQ6R6(k
zWvT^Fi={B6Ft;*EGSo7cFfL%LVP43X!Yl<Ew}9+pVwD14u)F}Y<A+rov{Q&plA#8|
z0^L{5D#4Hry4V;*GcRPUWvgK;Va(#l;)IO<)UahSq_FoggI&S~S`Syt4j$8B18q3~
zjc;V})G*Y5_fn-Wq;Rw{OEQ#jW$~79W$~qO_OjN3+{{qQIDxT9poD*cKn+6*R|-Q4
zYb#S46X+sms2DehoyHW*0O5m5J#a$d@dKCFE1C2l6~IcyTP&bmlHef}aCxrD2s!^6
zd=@qn==$5l^x|7wN%=)7sYSl|DXDrZne@QQH3cCfC*W}>9Z+Fz03x(OtBJUi^YcLW
zH|8alq!!77gu&x0h`N9|xy<O6P<m<!B!=OM=oVLbVo?D^@D{J9Ux;f^JoH4OTS89$
zK`yRA@j<R`KCaFo@rDM5MW8%XWC&W5gfu(@y4$xnJBmFsHyylH0CaLk6nir03a_fv
zD3*efVnZ-#bc@j!EkWKAf~;PLJIFcA$QqPBK??#InD{`=O-3;$&^e?$j5>@;pphF!
zj{j^-T#PDARZ5Uj3*vlGs>L)6CHaC}53jC3r-y(>a=?id+-lEau0a~jVJY$g1tcQ4
zLHTJV(%SkW50E^#G6fUh3<esc@CSJSwXQ-q5mtJk4Dt|LSN#&yfE1_T@)JiD1#)K*
zt|}@BzuVwdlqRDe!BH7-_<%f&dsGHgx#)we1T`=?7+F|3*q9i3gt?f()eg<-G|)bi
zL}g~!{Dp#RPGWIMW-?kW1FFFA)oY*|7t?bS(W^I5y`&5;n~PG@Qi~Ky@~spUU@Nwg
za`Myll8aOF^zu?m^a~5)jm(YA3@t1y^%ILqGLv&s^;1$y5;Jp(^(_p{jSP%UE#dW@
zG7dvPi(L})(&G&+EsZfvF)}bRFts$pVoFMWa<N`<a%Mp#cm-KXezHE8rw?{^YF=`x
zKIn4bBGBEh5E;F^6mZL2FE_CuJ~=<XC?ykAsub&Gl;q|pgEiw)0=6Hu-402uI4ghb
z@uH&uIR_nV9w@Xk)1dVvXvvoj=x`|T5nIKGV*<)E5=&A+7a4*kB8oH9a}%wx>NbSy
z&d<vM-S!5W>nR3xN5Gm8QH@2rjRJW0fSrONcxI_8KR*}L90A>xTUr7h(*v8KUaSz5
znv+_Vm{+2ZQlX=e0-Z(zc?7HgG2a8>g7Y%CErcsL5Uq?Dkc}D!dIqMTHUeb3G{|5|
z+DV{N5!#+-0I@+BT)~4*>W6fHvY48giX^}rn88O^6)V6dV^WxEn4oPY6L6c!4ztY!
zUS|Y35}VbpN(+)EK=W=8|08^dJ{gk^S@pZpz6i8hw8{iW8w%YRa2pC1M4GJF+DNxp
z!J1$jYQRONFP7pQ)IkDmii8e+NP$~RkTw(2S|CX2g4AXLwasBc3fe{qYR!X#6t)-$
zsm%o1LIrBcFo`hKGC}ur`+zGYW~3Grs3iqzL4h|mqPC!FSxP`{9`-Db8kRnW6lM{I
z6wpc9OrSHfML?}5e6nmMj0-qR7#DCsc49ANgtA#dEgC2rv>b_XA#)6KEqg6TEl4$Y
z4Mz=|I715CLePR|?izMbiww@=1hLYXYq?55Z7Z%6@VG!P%K~oDfycEB5I;^}EYd6C
zUBFkvkiwb5kirFTQ9;BIEh;b{y+u_73P{LqM_W*|fhthg^b~l-S}AC^6;shfP?s1q
zj00&y6*+>MF8nwfQbkT6!$2)u@L1X{7SQ^&TO7Hm#Tn^EnJKr}62W5)MY<qmpy@cI
z1`#*Xd=)q;ftv@Ql`*%NQYvmSrBoKFg0!iD8f=VJx7aiDQZkcMF|KPDfHZ2LWr1^;
zQ8uXl1l_3tY7fC$L~KlQjB?CIj9N^fO~5KlRdOiWpm%5^48dpzfm{zxlAyD?G1@^4
zS)ha2K|8NOoh41yqEL|2QQJaAU|)KJB*5(-Fad7_HGy0WZ3MyA%)#>sp@BEhnjm5a
z-axwqA*)*aihMwWrQFbZ8`L}k@4wSzMc?9r$nUsjuv$UR2W4B({X^h0t1vu4Qd<ce
ze)!u;phim=$XaMyiHC!OiIGQ)i@69Sqv?5zD?UE0w4}5sH9o!w6tYF2U@8KwuDZos
zTv-fBvK)!Y$>0k77Pn_EXsxzuQBi);E!Mo!+=5E*JT3Uz+*_=mN!Q|A+{yVlpc7NU
z8+btrD{nCec=|w+HK-K3#a^5ZHm(TNa4vFYWMGJrMr{RTf{q0$$cM}~6oJxJlqjkS
z$e2%Y5h!Eb5(D2mR+b5xy3vELs3<N11vYrN_ZAQI=GOSkytMqHDo_?q0nIiuCnhD|
zVvmnc%*lz5zr_yirxay_6l@16VE1wKbB^*4zQyk0=<Dn0=YEUX$v*<JlhPWbfGs~M
zD>b>K2-N#60u{_fps+6D1(^UEW-S6;V_yUs=!Tp)-3*cdP5BmyfLNg8$csclEKuj6
zNF2mc0ui7Uqu{&S!MEdr@9qQNf&)HR7kn-mcyA<ln-h2!19<TccxJK)H0TN*Dg`%*
z!Eue$NrGppTO2mI`6;D2sdk{Ut{61n2RRr3G>^^1$iv9P$i)a^gE13m?HLDHl!u9n
zk&TgqiGz`YmxBW|Xb;7|ShzSi1!M%o1e65i1>^;!1q=lQ1;hko1h@q_1Of#l1SAA3
k1;PZl1XKhx1WW{C1zZH`88`)`1mpz51T+P-1VjYX0S@0+VE_OC

literal 0
HcmV?d00001

diff --git a/code/datasets/__pycache__/simple_jpg_dataloader.cpython-39.pyc b/code/datasets/__pycache__/simple_jpg_dataloader.cpython-39.pyc
index 782f8d99cad131fbd9c433fc963873773108d79b..6cc398072d0abb4066cf4db1cd155d247b46928f 100644
GIT binary patch
delta 4120
zcmeCTUu>bB$ji&cz`(%JYq=o#g9QV_V-N=!vokO-I503U6st|te#59VS%NXTUMxyK
zRVh_BRWC(6RX;@{MY5MQ${>{^)gVPEMJh!)MW&Y}$}p8_fl(?`mhl3Ug$xrIi|SHj
z^O#cP7BWVGMYBv7m@Q<8GEdb^kxr5CWsI^&wOC-ekimr^mM6+8MIzNI-g*LK;i6QF
z1vaTh3mF+1QW&CaQy9`2Q}t6s7^3V_RTro(WQej)6;Cy6W^`eQHHvabb%<e(a*T3{
zVUBW+a*1+HbxU>3a&Km2WJq;Ob%L^NQ-sVJqHHHH7T$oG;gRZ=qLAv5YA?ai%*4o$
z!Whh;sW|x$(<(;R$;+9G)L*7BFfhD)!OXzmmJALoX!uAlFfed4FfcHKf+vY-vIC21
zJ!=g^JX;NO4MRM84Py;MJVy;v4MRL<4NDC}JXZ}{4MRM43P_wMouP&yo)=8=rGV7%
zgV_Qlf;9|ToGHxBO#RDhxk`i<2-k2eWUS?`;jUrGVwYs7<*DIuVTi4XVXEb=<*VUc
zAd<q8!n%;DmbXN-hBu{|F-xqLzeKc#A&aA#v4&TgA)T>Sphm!jA=aZ-utso!_(F!F
zJvDqa3|TBS{5A|V3@i*;0woePe9esMj3tsKQZ<4Lq*K@yGBz_dGm0>nGcYm43)C=V
zu`ZCQ5d_6U;hxC>tfKXD1`Lc~7|FoMP$IfOb|FKSTnfm6LMa?ILbDlCIOl?bK_2WO
zd9a6M5FV;wh*t<_NMT@MU}0!xW@N|{GGv&*SX_c^Y6@2hcMC(Ua0(BY7D?d+)1oPS
zDctof47Fk@{9pm`62&Z~62%3|DFP`1DS~sD7BZ$YrU<17_kzMz1tOcJnj(@Snj+TA
zR3lQNn5C8?4o<jC&5R|AS?b_qE8NReqL`(TB9$To)+=11n5CJ*o+6tfmm=TG+{~EH
znxX)fVTiBSD$!n`Q=+{<cOgTGc9vc`LyF=e#&m`hrA3S>%DqhKObbELtPhS<128E7
zCJn))5tuYiVF+f>RPlRh#K6Gdc8jqnO2p08F(fp|H8?)VHN@H3C*H`w#Nrm4laIf%
zcknGnt(S}p3=F>*^{Xb!bIfN{n7p4OmN9s;EN7{<Bm)COku->q0}%>vLJ`DLVqjnh
z0Wp*r7#Jq|aT-p3$0<yNo={{xRz<1|3=Fr}gA(&n@^ej!2wNG3tUVS*yD1Uc!(rMp
zwHX;0G?{L(Wu#`NXO!GxEze9T$%x`CN-fT;N_8wvFOr=6pG$#JYO(^imYftwn>fg+
z9KMxqIhh4MLARi+P?wO&Iowi=Qj^=c1DP~UCO_of<;|9pn3S4Re2XQ&AT{q6OIC4y
z-Yu4#{KS-7>_w>sIf==sMVuhhSc*$hb8oSwR%8~J6yIXb%qzLYoCDI4lUZDHizPiL
zKWXwU9w%)XkYcXj)WXu#ypqhsoLg)`V0YdUgt#}pq$n}3I4!>@w|KG+@07_Wc$Hi=
zC2p}4r{<&;Ie;AR2qNr3gcFFc0};+3!X0EZYi?#mX+e=Zh|QRGixcGF_>|1#lF5yH
zvJz2j74dnAxv5dy74Zd$B^mJ`*F~{aOy0!jrnHjr7H52Xa!z7#aeRCcIFW$Uhb038
z1E@GFuA7|7uO!XL#=^(Q#mL3P!^p$L#VEkY!N|eH#i+()AUJsezX%84e<miTe;lln
zH}Pvs786iW2Zav@0|Nudk;NYv7#PwSYMIg*YMDzIY8aat7chcLcCX2a0@C$N3pi?6
zQy5d27BbhemT)fMs$pHoSj$?&w16ALVq~acUcg<$lEniqZA3~q7x30FEo7`^f|!-X
zQ_EHbD!=$@*d!Th*~1y~)L0lw_$yaH%A*p28n)RCDa<ttS%R|}<}!nd|44>BCIber
zE+JpAHV(M9dl=e;QMJ`D)v(uq%5;u2<}~JD22B>fmrS6@0OiHW?Se}6B9QdKo>`Jw
zl#*F=iz%<*7JFiGVo_0IWf7=`DDniw6<cC)No7ImE!NV^yb_CB9J%>rsfiVt#kW{X
z@{5u)ZgHg*<>$udmF5;y76pSW@CE5&EiOq+&MvY6u{pd`D_x6<@{4Y<78GUXl@x&!
zN<BEAfbyCqXHgbNBPbEw;w&%Dj894|0htU+5KuN}NoGzeIAz>o&dg1}#Q{<VlE1|Y
z&Ih;H3ld8*Q}aq7$pfB7ZgHfimc*wdmLzTj)pu&3N|1?#k%dV>hEal%kBN(s3q-PV
zF)}goG4e1<v4H8x$Ay~AL565D7I`p$b1{>i!7awjm5dO#g5-)oiSQPe4JfH5=jRsK
zT?Y9=d2)lWp#ex<lL@T$7JFu1N@j9uF~n*8AVm<JAg6&{UwmouLt#@XP?ZdFAPBQD
zFff3sZE)Jmm~0>-QqR=NB*{?AT*FwyEXh#IQo^u+xdueC)G*bsWHF^MN-{_=fLTnE
z5SkfEvq&;5WJv=hN7fqF64n}~61EypRmqgb6wIK>>Q@9x6h)#SmsNsV4IqbTvKKXg
zW3MPNFQp*0C|COyYjH+mLFz5$<iwI&tW~K+`Ng-Gb2Iab0ze_e2JtL&Qet}1l*vA#
z(h}StCGiD`DJiKb5Wj;$rATq|KS43kYoO@h0r`|gf{}xpjZy1A8}sCuqKfq(%R%BG
z3<?Dh28V(P0|P@1V-^F*qqWQ>j0>1*m=`jpFqAOYFsCrpFi9|oFf=oQ1Bs=Ev4%yG
zp_a9VHJCw@8Oej1EJf2n{sj50Xc~y+k8pj_Yz78ThFhE|MfnBsMTsS;MN>d3=75N~
zAOh?GFaeH19#F!H2dU04EqM*{5vVv}VB}(yW8`3-tRwEJ4howhTaaFG#Dn#M39td|
z@$osSdGYa|C(jYr<>O;uV1WB}#pLVa;q^6)pwa@=E=pm{W}3iQBvQk$fU|~uA!7`4
zEk`X=40A0@Eo&`X4Z{Mi8jc!<EbfI&F-)}_wVX8!Sv)lyDNHHMy-c+n=?t}8H4It2
z3-}f?)Pfpi{9qPO4TlRutY0l}4etVhg$xS>ON6q77c$oJ*6`Hurm*zZGuHB!2rUq)
z;Z0#pVOz*t%Ui>}K(vMztb}2KSPd_zX$IzTEsy|-GcpwJD3M$s1#bUw<uzb;E?(z2
zA)Hf><{nJvfE|JAfC-F+ZU`sTaDlQ%4QmY>q#y*hUfBI^v6d7iX6B{Onh63s?KK&T
z7Jw20<1Mz#+{E<MVsM_QF9PKUP3a;~6$UN|iqb%F2}(0X=^$1Hh{yyH;7A5ne?1`X
zd=LRjw?zv<tOQVA5-7=!Pf0CFEy~Tz%PcO*OfHH739~{<&svbWY7kKeBI+3!80v5F
zCYGk>rskEzgI!gW3R2IKoDVB(8$c?XKn~}F<UiKTyp+_6qAmsohA8Ip;>@B>kcu`C
z(GAkeSaORMQoJ(8-{Q<KErAuV#i@nynYrolDN%edW>IQAsHIZW0n!alv|s|9a=GK<
z(^E?_OHy;=<G+DYKB$;vU}6JRz>FMVl8Z@ziHnJcS&UJLk&lH1T-I_h3NUdof~sjy
zY0JYZ#>4_v%fToBDv6oFa!gecUIFe-iRnHn`H3m1MS4hfX|fjagPbD(YJ9r6X)+c~
zo}4J9l>%xD6oCVN5=a^xLEtC=hyEgv*m{uvd7%x7<kXxTSYrb0t8$PMkk^Vp*$~ta
zD+bxY!N^z7AjP4`!6G0!`Ms1QH@K7(V_;yY(wLknEz9d0pO}}IUy@jono<=wS%E_#
zDkVKWCo?y*B;GW>xF9F9BtF0~INrlG-YY*ZwKzU6zbH4pD7CmGz9hA{L=V&q0w=Ot
z%w>r=ZkjBR+7X<Nz|AO7%6kbaRzftHCv(e4P1ch!=JhOE268Squ{2E1lbOhvI7!w`
z*bC%Cw#?l0#M1O4P~lo6GdWULm2t!5dRaO5t)Mi)pIlm8k`H!4UTTS{LD4plLEu(4
zxY=9?GJ+Fat`+6yL+fggLvFEy1dCHkChN+z2hRrS0wuB{P`mXOH#kXwTX4lipmbK$
z0#X1fX>T#7<{=mV;J^lj^DPdW-29Z%oK!ndiC>&BSw>#VvYtT-)PCb&<Y44s<YD1s
n;$h)oVPO*D5nz@Ax9WI6$pw^Xm^c`DSOgfkn7LSa7&({#-{a%#

delta 5029
zcmZ2%(QmJv$ji&cz`(#D|F<@Iiy;HUV-N=!b22b6I503U6rY)>{f6<xWC_OTdf`-^
z*$gRSbD5*`Q-o9XW;3LS&t;A>NL9%)Of^W6NRjMiVq{2>iZV)7O4Uo%PmxZMNi|B5
z?PZNJPBlq2O*L+2Opy~|Xl9HugYi?%BtU%gRQ6QkRI^m`bf#ve7^WzTD9coiRErd$
z6!}zzREuWD6p0jt`d-#3t5l{1)~QTcHVbSQGE87Bx|5=q$CRS9kTD7@nq{}Zej!7Y
zL#k$qM2d1RW0Yg6;{vCJ3@!|@JW<XmGO5n-E)$pv8B!e=xTab!WMpJWVTf`|VMu38
zH4<Tna!*xVpt_JD$|F@g)vB4%g(0>o$}`n7hB?YB$~&f>Im#!>H_9*7Kh?XLk&z+Q
zYe7H?!$QUdfeRTR#zlFhdP7;>AV+4|EeKl3&@aKrkm{J~)yx<b9L|u!$il$F(9F!p
zkSAowFoCIfjsXKB7)CNMGNiht2$?fPxlLdyG=n-OB-KAfB{d|~gGYj)85F>b!3>(J
zlf#%+F|tf%WG+&7dzr$(!0_?~GXq01DB57rD96CSz|FwGzzmAU1I&|WF{?7NO+LUZ
z%g8?Y9<xk+i9iiQ7H0}`GgJSTTFw%|1wu8P3mI#eB^heDN`y;9YPgyiLH?}aa$$&V
ziD9bcuH`8at>JEF%o3~RE)lI^$l_>bTp$h(-lSSym`Dw;G(!rDGy_DfG(#<44c`KZ
zg$zZjN<>N|YxqDqni-qx8EY7_c(Pe0FcvY?@YFD5vD9$eFw`()u`ZCR;RD4@;i?*j
zEM5e!uq&J)g#jh_7>m`AgRew%f%HO#ESVHmNrqbf6t)`v*$gS{bD3)x;$^|!l?8iO
z2ISp(sDBya<**oAgKTUHM+#>PL#;px7nl}I;Re$}DLi0WIE5EXi<HP`DU`@(DW>qH
z@TUm$GSvu{$Y&{~2&M?72=_8IGnU9_DW`~}h^C13GL_WJXQ`x!q=<t!H3B8_S*j_l
zDNNvO#N5o7&YB_xmSKojD^XvdQKG&;b0I^CdX`o?LyGhw#&m`hnMI5#vb{{{ObZ!n
z7~-|l8EP2fb-<)9nA8K4`e4$ah9Qf80&~%x8s-$aY^Dj!MPl_e3|VRm3>Pxgik29G
zf*qXTm(+^YFf1^x5nIR@!(1y~D-pw7D_JX5E1k|zD^nxx!VqgwD_bMGz+@rA0@D(+
zEc1nowX!v$3oL46!7PRa+BLEZEEh6>c`^&6K;n!H6POAolvpjWu8~>DSSwS*kfpnz
zUaCd{6w5UXSvm`(KoX3g*w<SiRU-)%*Iyu2BL!s}ERd>^2C*kF73SqF13Q@m?BrPl
zoveq&$qe;4T*_SNgyvFm9m-s&kirnmpegV7(uje9A><Zg(JeM7AAe`>;FpXHll9n^
zGRjZB!WPTuFxiN`R6~M+fuTqWM96{&c{rf}Vkt5(FgSu3N|VpCyAz?<38vV@yhw$C
zf#DW=P-0$6ey&LoVT+t`=rtumuM0%46{iBD<m6~hEn7*DfEdUH9KMxqIhh4ML11HU
zLAjwWx7d9vgYru(4K$f=F=pT5%uY=$h%e5pN-dI{ypJ=G^A>AKQDSDErp;tQu3h#l
z`30$Yw^*`@^Yd=8<m4x&++r;#%FHXd#hjCxSHuajh^4qBHTM>KW=U#MN@me5w#?#q
z5Y3X4SzIz%jN3^-8l;LVIJK}eHLoNyF=ui!_Y^j^oW!KmoXMg*D)qMnO7i1VQcF^c
zax?QXi%T+-i$L||EspfmlK7OwlEhoAd8N4pmABXui%TjCQg1Qk72IMi&CDyYxW$`T
znx31QR}!C@o0y(@izPX~pz;=LNq$js#x2(3lEma}O}Sev#i==IMRuT|u?G>hAOck3
z-r~&3Pn1XjTT)~L5(US)E6Bl&X_M=CWJPYVCFf`47nR)NuZU01&n?JFElHicna8dE
z7H4s4VSHw8dVI<)#*$mCNr@#OJHg(%#Zg|I8K0M!n|g~2M8-oT*$Wa&GE?(PZgJ+9
zmO#arGjr2dGT!2hk5A4?EG~|ZF9N4Ua56M!U|;~HpW=0(YKD=EiH#8sC+qSGXfyLM
z@-gx-@-T8S2{B4Baxh9TaWV2S8Zd#x`51MWI2fUjX>tj#hBX@_2O}3F7YiSw7!wO4
z2crNZNI4%f7uXPxK|&xoMu;3ABOeo7X7X%aRen&ka4;}1fRb#n9OLBkyyEpXwJaq}
z3z$n-7O>W^EM#2BxPYyMDT{p}V=Ze5#{$k8RuHR(X#p3A#mG>@ynw5QC5szWx)h3(
za4g`dVOq#o%T&X(fH#E^RJvyIWwR7D)i5mJuVJcTu3^aHTgV*4T+39;T+3F&kR<>T
zO<_u5?q!Z)s%5ILWiDZ;VQgm1;;v<{;;UiE60BjDWT@o`XUJ1yVJH!*yaK85ON49K
zXEUU*)G%a;%x0L&461D-8S<D67{Izj!MZrZ8S*qhx+XAI@?hu_L)BNqRKroD$zH=;
z!<ojM#vII`$?8|c$iTpmK5Hfj?6lWpyv3PPlwS~Elvt8F`5d28y*N0}6#0PCS|%u&
zu_qQM78NB{7J*t1MQ$MR3=ok8B62}0xYCO9bK}7|pvWI2>J1|DKvFE=d<#mmw**V_
z<I^%LQd7WrIX)>d{T2@>@5F=b&o3=0vI1$S=kQLgbS)~%FDmi`nF7uZ!5~%;0|SF5
zS5ZEQ4azCEI6+AmoNkMnKw3FVGILVn3ld8*idsQ@#`vN}km3dq(F`KM2^UO&Q*Rmr
z1H(*Eb4Ux6dYM=lS(pSA7$ran5R|Z(1Q^AbMVPo4nZRjSiU~yXP3974vH_W)$yk)g
zz`(GQ=@yfo!7awjm5dO#f#ixn=HKG70hKVIJZZNJWW5pt14EVeWCKC*$@c|B4L}Mt
znZTNEv1jI`WG1H;L-N*CkZOn#pb`e0V~clARuMF<X8}1CirE<$7}#J<I#9%vFf3rK
zVa#GmVU%Q8$e6-V!`#Xw$xzEu!&t*2$xzGM&XC3kN|-GiH7qU+&5X5dHLNvkCCoL<
zB`ly6+ROw_BrK3bk_<{IP^%c27#J8>7#IQ=gh2r&0!l*+H4L#lwTvAMHH?xB6PXHG
zf*Byr2YIY$5-4mKZ?S?(7pS*DJ}R2Vz`$@A<Uep|7)_oiB<HDTlarsEm{V-0hX^rE
z)*^7?;wVbYODRY#%Du%}oRL_NdW$(XGq1=Clo;3{VaJ@5m|ipoWaO;LhC<RtJRmFM
zixWXbA=KvCAX%{WJq!#CdqG|Rl@bgr5{w*-LOhfEg%nXj79<G|S&&^dj9CnzP_1Pt
zVO+ojDn?QmN|<X{QkZI(B^W@x7;s3l)-cw98k=n3kY-L{Y2m101&1_y4O<O+8WSQ5
z7J<SLDTwPCU|~#Z5Oag`0Jt!RdKc_dP(WV=`5PS2x?Tb9PKoJ0`H3m1MS95j4jTCN
zsG-kNG!LW~6a__dLAi-JGp|IG103Q-3qVp!K|}|Lm<1vhf`~;RVljvSr-3CP7T7Vp
z3=9kxLBS3xpcxps80A0#&r~G|ch=-)VLNqDP#0N)bb+$~*bp!QGUOI}e0)x7UVQw`
z$uER;`9Q@HE3ByZn5--kUZ294%~W)#hG78*s4oLbmf&_Pdo5E8b1h3PYb{$kLoG;-
zYXSE{hFZ=Vb{B?Nom#FMt_3^`85Z!C@MZBYWUS??VOStg!?i#V+-~Kn;aDJ4!v(Gm
zI!c5Wh}3W_WUS@LE5fj+9?7B-(Gsy5CQz5DmWe`3{~#=_;izG%VF6`YNO=x!P%`-y
znS!!|czqG5T>vhXiV{G9xC}%jfrw-f0qPhQftwbfk`EHjQ6Py_5Rncdz$H7V*e?PX
z@9`k991sC6*TJa)RIV3+GDi`pOwi;8Cy86EnRzLJsTD<_!ZwPzyf_n5_JRuLB5>JT
z4>F|%MAU)^PzjAxT7cu1J3c-=wIs77H8(!~4k)phO;!*WQ(%-~VPWI~H3pc#Z3z|`
zMjl3}2&iP9oFOhJ$l_MS4>DJPfq}tIld)*#<Qd{xDWLXA5!kacK$76_0F?kmp!8U@
z93-|9l(>1Jjhp1uoE%sK2b}KOKuSQ4E8=5dU=Rm63S<KZBVRoO6Ndr^i-5>vH3`QU
zP-ZR?1(okQ`jt6}dHOD?sRiyuiD@PA1x5K;smUe!$@wX%`YEYNrRnkD_N!iUaaolj
zL^UWsBDClm8JWZz8d;R)#+MmY8BCrdp-^93kds*w57w>+YU@_<L(I~LHZO}qG#PJk
zq*i1Wm&E61zXU}MI90f5vJ`>qP;k5#fV=~0S3(;AFYRA~iaJe}TU?Mr1XNw$;?670
zjW5qH%1$jRzQqnMNSRWLChJR@@P-zFlL<J5<W4S-oX8kGnMuk`*c}v#Y?-;~iKXd9
zJRn=7Cr3-EGOn83C?&_Ze)0k-(fSP_;Q$a32qMZE7#N~B!9fw9TwHdG6BG#XMfv$9
zU<cpg$}cS`C@ldu)j4t#vr|(ti;9atj=#kYQd*o^QUs1rP@!>)G4U2-(k(8CaUkhi
zTxE$lnUI$AEvA&5Ta#Z)HD!QHE>IdR0{26}Wf(Ylf*KpQ*ozBGQ&X!_Z?UH3<R_Ng
z;z%ql$xlu!F1f`9am+2|)Vvg=WD8CspyY9j!zMRBr8Fni4m`e6F!_MAmS-Lp6A!p~
z%fraS0#3TjJWM={d`v7%Ld*h;0*pdzV$4#EJm5Ag7b6!F3nLd32O|%Q05b=p2)N~|
K!N|kN!2|%rn8Dou

diff --git a/code/datasets/classic_jpg_dataloader.py b/code/datasets/classic_jpg_dataloader.py
new file mode 100644
index 0000000..8c98e52
--- /dev/null
+++ b/code/datasets/classic_jpg_dataloader.py
@@ -0,0 +1,336 @@
+# import pandas as pd
+
+import numpy as np
+import torch
+from torch import Tensor
+from torch.utils import data
+from torch.utils.data import random_split, DataLoader
+from torch.autograd import Variable
+from torch.nn.functional import one_hot
+import torch.utils.data as data_utils
+import torchvision.transforms as transforms
+import pandas as pd
+from sklearn.utils import shuffle
+from pathlib import Path
+from tqdm import tqdm
+from PIL import Image
+import cv2
+import json
+from imgaug import augmenters as iaa
+from torchsampler import ImbalancedDatasetSampler
+from .utils import myTransforms
+
+
+class JPGBagLoader(data_utils.Dataset):
+    def __init__(self, file_path, label_path, mode, n_classes, data_cache_size=100, max_bag_size=1000, cache=False, mixup=False, aug=False, model='inception'):
+        super().__init__()
+
+        self.data_info = []
+        self.data_cache = {}
+        self.slideLabelDict = {}
+        self.files = []
+        self.data_cache_size = data_cache_size
+        self.mode = mode
+        self.file_path = file_path
+        # self.csv_path = csv_path
+        self.label_path = label_path
+        self.n_classes = n_classes
+        self.max_bag_size = max_bag_size
+        self.min_bag_size = 50
+        self.empty_slides = []
+        self.corrupt_slides = []
+        self.cache = False
+        self.labels = []
+        if model == 'inception':
+            size = 299
+        elif model == 'vit':
+            size = 384
+        else: size = 224
+
+        
+        # read labels and slide_path from csv
+        with open(self.label_path, 'r') as f:
+            json_dict = json.load(f)
+            temp_slide_label_dict = json_dict[self.mode]
+            # print(len(temp_slide_label_dict))
+            for (x,y) in temp_slide_label_dict:
+                x = x.replace('FEATURES_RETCCL_2048', 'BLOCKS')
+                # print(x)
+                x_name = Path(x).stem
+                x_path_list = [Path(self.file_path)/x]
+                for x_path in x_path_list:
+                    if x_path.exists():
+                        # print(len(list(x_path.glob('*'))))
+
+                        self.slideLabelDict[x_name] = y
+                        self.labels += [int(y)]*len(list(x_path.glob('*')))
+                        # self.labels.append(int(y))
+                        for patch in x_path.iterdir():
+                            self.files.append((patch, x_name, y))
+
+        # with open(self.label_path, 'r') as f:
+        #     temp_slide_label_dict = json.load(f)[mode]
+        #     print(len(temp_slide_label_dict))
+        #     for (x, y) in temp_slide_label_dict:
+        #         x = Path(x).stem 
+        #         # x_complete_path = Path(self.file_path)/Path(x)
+        #         for cohort in Path(self.file_path).iterdir():
+        #             x_complete_path = Path(self.file_path) / cohort / 'BLOCKS' / Path(x)
+        #             if x_complete_path.is_dir():
+        #                 if len(list(x_complete_path.iterdir())) > self.min_bag_size:
+        #                 # print(x_complete_path)
+        #                     self.slideLabelDict[x] = y
+        #                     self.files.append(x_complete_path)
+        #                 else: self.empty_slides.append(x_complete_path)
+        
+        home = Path.cwd().parts[1]
+        self.slide_patient_dict_path = Path(self.label_path).parent / 'slide_patient_dict_an.json'
+        # self.slide_patient_dict_path = f'/{home}/ylan/data/DeepGraft/training_tables/slide_patient_dict_an.json'
+        with open(self.slide_patient_dict_path, 'r') as f:
+            self.slide_patient_dict = json.load(f)
+
+
+        self.color_transforms = myTransforms.Compose([
+            myTransforms.ColorJitter(
+                brightness = (0.65, 1.35), 
+                contrast = (0.5, 1.5),
+                # saturation=(0, 2), 
+                # hue=0.3,
+                ),
+            # myTransforms.RandomChoice([myTransforms.ColorJitter(saturation=(0, 2), hue=0.3),
+            #                             myTransforms.HEDJitter(theta=0.05)]),
+            myTransforms.HEDJitter(theta=0.005),
+            
+        ])
+        self.color_transforms = myTransforms.Compose([
+            myTransforms.Grayscale(num_output_channels=3)
+        ])
+        self.train_transforms = myTransforms.Compose([
+            myTransforms.RandomChoice([myTransforms.RandomHorizontalFlip(p=0.5),
+                                        myTransforms.RandomVerticalFlip(p=0.5),
+                                        myTransforms.AutoRandomRotation()]),
+        
+            myTransforms.RandomGaussBlur(radius=[0.5, 1.5]),
+            myTransforms.RandomAffineCV2(alpha=0.1),
+            myTransforms.RandomElastic(alpha=2, sigma=0.06),
+        ])
+
+        self.resize_transforms = transforms.Resize((299,299), transforms.InterpolationMode.BICUBIC)
+
+        # sometimes = lambda aug: iaa.Sometimes(0.5, aug, name="Random1")
+        # sometimes2 = lambda aug: iaa.Sometimes(0.2, aug, name="Random2")
+        # sometimes3 = lambda aug: iaa.Sometimes(0.9, aug, name="Random3")
+        # sometimes4 = lambda aug: iaa.Sometimes(0.9, aug, name="Random4")
+        # sometimes5 = lambda aug: iaa.Sometimes(0.9, aug, name="Random5")
+
+        # self.resize_transforms = iaa.Sequential([
+        #     iaa.Resize({'height': size, 'width': size}),
+        #     # iaa.Resize({'height': 299, 'width': 299}),
+        # ], name='resizeAug')
+        # # self.resize_transforms = transforms.Resize(size=(299,299))
+
+        # self.train_transforms = iaa.Sequential([
+        #     iaa.AddToHueAndSaturation(value=(-30, 30), name="MyHSV"), #13
+        #     sometimes2(iaa.GammaContrast(gamma=(0.85, 1.15), name="MyGamma")),
+        #     iaa.Fliplr(0.5, name="MyFlipLR"),
+        #     iaa.Flipud(0.5, name="MyFlipUD"),
+        #     sometimes(iaa.Rot90(k=1, keep_size=True, name="MyRot90")),
+        #     # iaa.OneOf([
+        #     #     sometimes3(iaa.PiecewiseAffine(scale=(0.015, 0.02), cval=0, name="MyPiece")),
+        #     #     sometimes4(iaa.ElasticTransformation(alpha=(100, 200), sigma=20, cval=0, name="MyElastic")),
+        #     #     sometimes5(iaa.Affine(scale={"x": (0.95, 1.05), "y": (0.95, 1.05)}, rotate=(-45, 45), shear=(-4, 4), cval=0, name="MyAffine"))
+        #     # ], name="MyOneOf")
+
+        # ], name="MyAug")
+        self.val_transforms = transforms.Compose([
+            # 
+            transforms.ToTensor(),
+            transforms.Normalize(
+                mean=[0.485, 0.456, 0.406],
+                std=[0.229, 0.224, 0.225],
+            ),
+            # RangeNormalization(),
+        ])
+
+
+
+
+        
+
+    def get_data(self, query):
+        
+        patch_path, wsi_name, label = query
+
+        # img = np.asarray(Image.open(patch_path)).astype(np.uint8)
+        img = Image.open(patch_path)
+        # img = np.moveaxis(img, 2, 0)
+        # print(img.shape)
+        # img = torch.from_numpy(img)
+        tile_name = Path(patch_path).stem
+        # patient = tile_name.rsplit('_', 1)[0]
+        patient = self.slide_patient_dict[wsi_name]
+
+        # for tile_path in Path(file_path).iterdir():
+        #     img = np.asarray(Image.open(tile_path)).astype(np.uint8)
+        #     img = np.moveaxis(img, 2, 0)
+        #     # print(img.shape)
+        #     img = torch.from_numpy(img)
+        #     wsi_batch.append(img)
+        #     name_batch.append(tile_path.stem)
+
+        # wsi_batch = torch.stack(wsi_batch)
+        return img, label, (wsi_name, tile_name, patient)
+    
+    def get_labels(self, indices):
+        return [self.labels[i] for i in indices]
+
+
+    def to_fixed_size_bag(self, bag, bag_size: int = 512):
+
+        #duplicate bag instances unitl 
+
+        bag_idxs = torch.randperm(bag.shape[0])[:bag_size]
+        bag_samples = bag[bag_idxs]
+        # name_samples = [names[i] for i in bag_idxs]
+
+        # bag_sample_names = [bag_names[i] for i in bag_idxs]
+        # q, r  = divmod(bag_size, bag_samples.shape[0])
+        # if q > 0:
+        #     bag_samples = torch.cat([bag_samples]*q, 0)
+
+        # self_padded = torch.cat([bag_samples, bag_samples[:r,:, :, :]])
+
+        # zero-pad if we don't have enough samples
+        zero_padded = torch.cat((bag_samples,
+                                torch.zeros(bag_size-bag_samples.shape[0], bag_samples.shape[1], bag_samples.shape[2], bag_samples.shape[3])))
+
+        return zero_padded, min(bag_size, len(bag))
+
+    def data_dropout(self, bag, drop_rate):
+        bag_size = bag.shape[0]
+        bag_idxs = torch.randperm(bag_size)[:int(bag_size*(1-drop_rate))]
+        bag_samples = bag[bag_idxs]
+        # name_samples = [batch_names[i] for i in bag_idxs]
+
+        return bag_samples
+
+    def __len__(self):
+        return len(self.files)
+
+    def __getitem__(self, index):
+
+    
+        if self.cache:
+            label = self.labels[index]
+            wsi = self.features[index]
+            label = int(label)
+            wsi_name = self.wsi_names[index]
+            tile_name = self.name_batches[index]
+            patient = self.patients[index]
+            # feats = Variable(Tensor(feats))
+            return wsi, label, (wsi_name, tile_name, patient)
+        else:
+            t = self.files[index]
+            # label = self.labels[index]
+            if self.mode=='train':
+                # t = self.files[index]
+                # label = self.labels[index]
+                img, label, (wsi_name, tile_name, patient) = self.get_data(t)
+                save_img(img, f'{tile_name}_original')
+                img = self.resize_transforms(img)
+                img = self.color_transforms(img)
+                img = self.train_transforms(img)
+
+                # save_img(img, f'{tile_name}')
+
+                img = self.val_transforms(img.copy())
+
+                
+                # ft = ft.view(-1, 512)
+                
+            else:
+                img, label, (wsi_name, tile_name, patient) = self.get_data(t)
+                # label = Variable(Tensor(label))
+                # seq_img_d = self.train_transforms.to_deterministic()
+                # seq_img_resize = self.resize_transforms.to_deterministic()
+                # img = img.numpy().astype(np.uint8)
+                img = self.resize_transforms(img)
+                # img = np.moveaxis(img, 0, 2)
+                img = self.val_transforms(img)
+
+            return img, label, (wsi_name, tile_name, patient)
+
+def save_img(img, comment):
+    home = Path.cwd().parts[1]
+    outputPath = f'/{home}/ylan/data/DeepGraft/224_128uM_annotated/debug/augments_2'
+    img = img.convert('RGB')
+    img.save(f'{outputPath}/{comment}.jpg')
+
+if __name__ == '__main__':
+    
+    from pathlib import Path
+    import os
+    import time
+    from fast_tensor_dl import FastTensorDataLoader
+    from custom_resnet50 import resnet50_baseline
+    
+    
+
+    home = Path.cwd().parts[1]
+    # train_csv = f'/{home}/ylan/DeepGraft_project/code/debug_train.csv'
+    data_root = f'/{home}/ylan/data/DeepGraft/224_256uM_annotated'
+    # data_root = f'/{home}/ylan/DeepGraft/dataset/hdf5/256_256um_split/'
+    # label_path = f'/{home}/ylan/DeepGraft_project/code/split_PAS_bin.json'
+    # label_path = f'/{home}/ylan/DeepGraft/training_tables/split_debug.json'
+    label_path = f'/{home}/ylan/data/DeepGraft/training_tables/dg_limit_5_split_PAS_HE_Jones_norm_rest_test.json'
+    # output_dir = f'/{data_root}/debug/augments'
+    # os.makedirs(output_dir, exist_ok=True)
+
+    n_classes = 2
+
+    dataset = JPGBagLoader(data_root, label_path=label_path, mode='train', n_classes=n_classes, cache=False)
+    # dataset = JPGBagLoader(data_root, label_path=label_path, mode='train', n_classes=n_classes, cache=False)
+
+    # print(dataset.get_labels(0))
+    # a = int(len(dataset)* 0.8)
+    # b = int(len(dataset) - a)
+    # train_data, valid_data = random_split(dataset, [a, b])
+    # print(dataset.dataset)
+    # a = int(len(dataset)* 0.8)
+    # b = int(len(dataset) - a)
+    # train_ds, val_ds = torch.utils.data.random_split(dataset, [a, b])
+    # dl = FastTensorDataLoader(dataset, batch_size=1, shuffle=False)
+    dl = DataLoader(dataset, batch_size=5, num_workers=8, pin_memory=True)
+    # print(len(dl))
+    # dl = DataLoader(dataset, batch_size=1, sampler=ImbalancedDatasetSampler(dataset), num_workers=5)
+    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
+    scaler = torch.cuda.amp.GradScaler()
+
+    model_ft = resnet50_baseline(pretrained=True)
+    for param in model_ft.parameters():
+        param.requires_grad = False
+    model_ft.to(device)
+    
+    c = 0
+    label_count = [0] *n_classes
+    # print(len(dl))
+    start = time.time()
+    for item in tqdm(dl): 
+
+        if c >= 1000:
+            break
+        bag, label, (name, batch_names, patient) = item
+        print(bag.shape)
+        # print(name)
+        # print(batch_names)
+        # print(patient)
+        # print(len(batch_names))
+
+        # bag = bag.squeeze(0).float().to(device)
+        # label = label.to(device)
+        # with torch.cuda.amp.autocast():
+        #     output = model_ft(bag)
+        c += 1
+    end = time.time()
+
+    print('Bag Time: ', end-start)
\ No newline at end of file
diff --git a/code/datasets/custom_jpg_dataloader.py b/code/datasets/custom_jpg_dataloader.py
index 95d5adb..8e1ce3d 100644
--- a/code/datasets/custom_jpg_dataloader.py
+++ b/code/datasets/custom_jpg_dataloader.py
@@ -19,6 +19,7 @@ from albumentations.pytorch import ToTensorV2
 from imgaug import augmenters as iaa
 import imgaug as ia
 from torchsampler import ImbalancedDatasetSampler
+from .utils import myTransforms
 
 
 
@@ -173,19 +174,48 @@ class JPGMILDataloader(data.Dataset):
 
     def __getitem__(self, index):
         # get data
+
+        color_transforms = myTransforms.Compose([
+            myTransforms.ColorJitter(
+                brightness = (0.65, 1.35), 
+                contrast = (0.5, 1.5),
+                # saturation=(0, 2), 
+                # hue=0.3,
+                ),
+            # myTransforms.RandomChoice([myTransforms.ColorJitter(saturation=(0, 2), hue=0.3),
+            #                             myTransforms.HEDJitter(theta=0.05)]),
+            myTransforms.HEDJitter(theta=0.005),
+            
+        ])
+        train_transforms = myTransforms.Compose([
+            myTransforms.RandomChoice([myTransforms.RandomHorizontalFlip(p=0.5),
+                                        myTransforms.RandomVerticalFlip(p=0.5),
+                                        myTransforms.AutoRandomRotation()]),
+        
+            myTransforms.RandomGaussBlur(radius=[0.5, 1.5]),
+            myTransforms.RandomAffineCV2(alpha=0.1),
+            myTransforms.RandomElastic(alpha=2, sigma=0.06),
+        ])
+
+
         (batch, batch_names), label, name, patient = self.get_data(index)
         out_batch = []
-        seq_img_d = self.train_transforms.to_deterministic()
+        # seq_img_d = self.train_transforms.to_deterministic()
         
         if self.mode == 'train':
             # print(img)
             # print(.shape)
             for img in batch: # expects numpy 
                 img = img.numpy().astype(np.uint8)
+
+
+                img = color_transforms(img)
+                img = train_transforms(img)
                 # img = self.albu_transforms(image=img)
                 # print(img)
                 # print(img.shape)
-                img = seq_img_d.augment_image(img)
+                # img = seq_img_d.augment_image(img)
+
                 img = self.val_transforms(img.copy())
                 # print(img)
                 out_batch.append(img)
diff --git a/code/datasets/data_interface.py b/code/datasets/data_interface.py
index 7049b22..065e844 100644
--- a/code/datasets/data_interface.py
+++ b/code/datasets/data_interface.py
@@ -12,8 +12,8 @@ from torchvision.datasets import MNIST
 from torchvision import transforms
 # from .camel_dataloader import FeatureBagLoader
 from .custom_dataloader import HDF5MILDataloader
-# from .custom_jpg_dataloader import JPGMILDataloader
-from .simple_jpg_dataloader import JPGBagLoader
+from .jpg_dataloader import JPGMILDataloader
+from .classic_jpg_dataloader import JPGBagLoader
 from .zarr_feature_dataloader_simple import ZarrFeatureBagLoader
 from .feature_dataloader import FeatureBagLoader
 from pathlib import Path
@@ -124,7 +124,7 @@ import torch
 
 class MILDataModule(pl.LightningDataModule):
 
-    def __init__(self, data_root: str, label_path: str, batch_size: int=1, num_workers: int=8, n_classes=2, cache: bool=True, use_features=False, mixup=False, aug=False, *args, **kwargs):
+    def __init__(self, data_root: str, label_path: str, model_name: str, batch_size: int=1, num_workers: int=8, n_classes=2, cache: bool=True, use_features=False, train_classic=False, mixup=False, aug=False, *args, **kwargs):
         super().__init__()
         self.data_root = data_root
         self.label_path = label_path
@@ -140,34 +140,43 @@ class MILDataModule(pl.LightningDataModule):
         self.seed = 1
         self.mixup = mixup
         self.aug = aug
+        self.train_classic = train_classic
+        self.max_bag_size = 1000
+        self.model_name = model_name
 
 
         self.class_weight = []
         self.cache = cache
         self.fe_transform = None
-        if not use_features: 
+        # print('use_features: ', use_features)
+        if self.train_classic: 
             self.base_dataloader = JPGBagLoader
+        elif not use_features: 
+            self.base_dataloader = JPGMILDataloader
         else: 
             self.base_dataloader = FeatureBagLoader
-            self.cache = True
+            # self.cache = True
 
     def setup(self, stage: Optional[str] = None) -> None:
         home = Path.cwd().parts[1]
 
         if stage in (None, 'fit'):
-            dataset = self.base_dataloader(self.data_root, label_path=self.label_path, mode='train', n_classes=self.n_classes, cache=self.cache, mixup=self.mixup, aug=self.aug)
+            self.train_data = self.base_dataloader(self.data_root, label_path=self.label_path, mode='train', n_classes=self.n_classes, cache=self.cache, mixup=self.mixup, aug=self.aug, model=self.model_name)
+            self.valid_data = self.base_dataloader(self.data_root, label_path=self.label_path, mode='val', n_classes=self.n_classes, cache=self.cache, model=self.model_name)
+
             # dataset = JPGMILDataloader(self.data_root, label_path=self.label_path, mode='train', n_classes=self.n_classes)
-            print(len(dataset))
-            a = int(len(dataset)* 0.8)
-            b = int(len(dataset) - a)
-            self.train_data, self.valid_data = random_split(dataset, [a, b])
+            print('Train Data: ', len(self.train_data))
+            print('Val Data: ', len(self.valid_data))
+            # a = int(len(dataset)* 0.8)
+            # b = int(len(dataset) - a)
+            # self.train_data, self.valid_data = random_split(dataset, [a, b])
 
             # self.weights = self.get_weights(dataset)
 
 
 
         if stage in (None, 'test'):
-            self.test_data = self.base_dataloader(self.data_root, label_path=self.label_path, mode='test', n_classes=self.n_classes, cache=False)
+            self.test_data = self.base_dataloader(self.data_root, label_path=self.label_path, mode='test', n_classes=self.n_classes, cache=False, model=self.model_name, mixup=False, aug=False)
             print(len(self.test_data))
 
         return super().setup(stage=stage)
@@ -177,13 +186,17 @@ class MILDataModule(pl.LightningDataModule):
     def train_dataloader(self) -> DataLoader:
         # return DataLoader(self.train_data,  batch_size = self.batch_size, num_workers=self.num_workers) #batch_transforms=self.transform, pseudo_batch_dim=True, 
         # return DataLoader(self.train_data,  batch_size = self.batch_size, sampler = WeightedRandomSampler(self.weights, len(self.weights)), num_workers=self.num_workers) #batch_transforms=self.transform, pseudo_batch_dim=True, 
-        return DataLoader(self.train_data,  batch_size = self.batch_size, sampler=ImbalancedDatasetSampler(self.train_data), num_workers=self.num_workers) #batch_transforms=self.transform, pseudo_batch_dim=True, 
+        if self.train_classic:
+            return DataLoader(self.train_data, batch_size = self.batch_size, num_workers=self.num_workers) #batch_transforms=self.transform, pseudo_batch_dim=True, 
+        else:
+            return DataLoader(self.train_data,  batch_size = self.batch_size, sampler=ImbalancedDatasetSampler(self.train_data), num_workers=self.num_workers) #batch_transforms=self.transform, pseudo_batch_dim=True, 
+            # return DataLoader(self.train_data,  batch_size = self.batch_size, sampler=ImbalancedDatasetSampler(self.train_data), num_workers=self.num_workers, collate_fn=self.custom_collate) #batch_transforms=self.transform, pseudo_batch_dim=True, 
         #sampler=ImbalancedDatasetSampler(self.train_data)
     def val_dataloader(self) -> DataLoader:
         return DataLoader(self.valid_data, batch_size = self.batch_size, num_workers=self.num_workers)
     
     def test_dataloader(self) -> DataLoader:
-        return DataLoader(self.test_data, batch_size = self.batch_size, num_workers=self.num_workers)
+        return DataLoader(self.test_data, batch_size = 1, num_workers=self.num_workers)
 
     def get_weights(self, dataset):
 
@@ -201,6 +214,69 @@ class MILDataModule(pl.LightningDataModule):
 
         return torch.DoubleTensor(weights)
     
+    def custom_collate(self, batch):
+        # print(len(batch))
+        # print(len(batch))
+        for i in batch:
+            
+            bag, label, (wsi_name, patient) = i
+            print(bag.shape)
+        
+        # print(bag.shape)
+
+        # bag_size = bag.shape[0]
+        # bag_idxs = torch.randperm(bag_size)[:self.max_bag_size]
+        # # bag_idxs = torch.randperm(bag_size)[:int(self.max_bag_size*(1-self.drop_rate))]
+        # out_bag = bag[bag_idxs, :]
+        # if self.mixup:
+        #     out_bag = self.get_mixup_bag(out_bag)
+        #     # batch_coords = 
+        # if out_bag.shape[0] < self.max_bag_size:
+        #     out_bag = torch.cat((out_bag, torch.zeros(self.max_bag_size-out_bag.shape[0], out_bag.shape[1])))
+
+        # # shuffle again
+        # out_bag_idxs = torch.randperm(out_bag.shape[0])
+        # out_bag = out_bag[out_bag_idxs]
+        # batch_coords = batch_coords[bag_idxs]
+
+        
+        # return out_bag, label, (wsi_name, batch_coords, patient)
+        return batch
+
+        
+    def get_mixup_bag(self, bag):
+
+        bag_size = bag.shape[0]
+
+        a = torch.rand([bag_size])
+        b = 0.6
+        rand_x = torch.randint(0, bag_size, [bag_size,])
+        rand_y = torch.randint(0, bag_size, [bag_size,])
+
+        bag_x = bag[rand_x, :]
+        bag_y = bag[rand_y, :]
+
+        temp_bag = (bag_x.t()*a).t() + (bag_y.t()*(1.0-a)).t()
+        # print('temp_bag: ', temp_bag.shape)
+
+        if bag_size < self.max_bag_size:
+            diff = self.max_bag_size - bag_size
+            bag_idxs = torch.randperm(bag_size)[:diff]
+            
+            # print('bag: ', bag.shape)
+            # print('bag_idxs: ', bag_idxs.shape)
+            mixup_bag = torch.cat((bag, temp_bag[bag_idxs, :]))
+            # print('mixup_bag: ', mixup_bag.shape)
+        else:
+            random_sample_list = torch.rand(bag_size)
+            mixup_bag = [bag[i] if random_sample_list[i] else temp_bag[i] > b for i in range(bag_size)] #make pytorch native?!
+            mixup_bag = torch.stack(mixup_bag)
+            # print('else')
+            # print(mixup_bag.shape)
+
+        return mixup_bag
+
+
 
 class DataModule(pl.LightningDataModule):
 
@@ -240,7 +316,7 @@ class DataModule(pl.LightningDataModule):
         return super().setup(stage=stage)
 
     def train_dataloader(self) -> DataLoader:
-        return DataLoader(self.train_data,  self.batch_size, shuffle=False,) #batch_transforms=self.transform, pseudo_batch_dim=True, 
+        return DataLoader(self.train_data,  self.batch_size, sampler=ImbalancedDatasetSampler(self.train_data),shuffle=False,) #batch_transforms=self.transform, pseudo_batch_dim=True, 
         #sampler=ImbalancedDatasetSampler(self.train_data),
     def val_dataloader(self) -> DataLoader:
         return DataLoader(self.valid_data, batch_size = self.batch_size)
@@ -325,3 +401,4 @@ class CrossVal_MILDataModule(BaseKFoldDataModule):
 
 
 
+# if __name__ == '__main__':
diff --git a/code/datasets/feature_dataloader.py b/code/datasets/feature_dataloader.py
index 3e9dbb5..21b8081 100644
--- a/code/datasets/feature_dataloader.py
+++ b/code/datasets/feature_dataloader.py
@@ -23,13 +23,14 @@ import h5py
 
 
 class FeatureBagLoader(data.Dataset):
-    def __init__(self, file_path, label_path, mode, n_classes, cache=False, mixup=False, aug=False, data_cache_size=5000, max_bag_size=1000):
+    def __init__(self, file_path, label_path, mode, model, n_classes, cache=False, mixup=False, aug=False, data_cache_size=5000, max_bag_size=1000):
         super().__init__()
 
         self.data_info = []
         self.data_cache = {}
         self.slideLabelDict = {}
         self.files = []
+        self.labels = []
         self.data_cache_size = data_cache_size
         self.mode = mode
         self.file_path = file_path
@@ -48,7 +49,7 @@ class FeatureBagLoader(data.Dataset):
         self.missing = []
 
         home = Path.cwd().parts[1]
-        self.slide_patient_dict_path = f'/{home}/ylan/DeepGraft/training_tables/slide_patient_dict_an.json'
+        self.slide_patient_dict_path = f'/{home}/ylan/data/DeepGraft/training_tables/slide_patient_dict_an.json'
         with open(self.slide_patient_dict_path, 'r') as f:
             self.slide_patient_dict = json.load(f)
 
@@ -59,25 +60,28 @@ class FeatureBagLoader(data.Dataset):
             # print(len(temp_slide_label_dict))
             for (x,y) in temp_slide_label_dict:
                 
+                x = x.replace('FEATURES_RETCCL_2048', 'FEATURES_RETCCL_2048_HED')
                 x_name = Path(x).stem
-                x_path_list = [Path(self.file_path)/x]
-                # x_name = x.stem
-                # x_path_list = [Path(self.file_path)/ x for (x,y) in temp_slide_label_dict]
-                if self.aug:
-                    for i in range(5):
-                        aug_path = Path(self.file_path)/f'{x}_aug{i}'
-                        x_path_list.append(aug_path)
-
-                for x_path in x_path_list: 
-                    
-                    if x_path.exists():
-                        self.slideLabelDict[x_name] = y
-                        self.files.append(x_path)
-                    elif Path(str(x_path) + '.zarr').exists():
-                        self.slideLabelDict[x] = y
-                        self.files.append(str(x_path)+'.zarr')
-                    else:
-                        self.missing.append(x)
+                if x_name in self.slide_patient_dict.keys():
+                    x_path_list = [Path(self.file_path)/x]
+                    # x_name = x.stem
+                    # x_path_list = [Path(self.file_path)/ x for (x,y) in temp_slide_label_dict]
+                    if self.aug:
+                        for i in range(10):
+                            aug_path = Path(self.file_path)/f'{x}_aug{i}'
+                            x_path_list.append(aug_path)
+
+                    for x_path in x_path_list: 
+                        
+                        if x_path.exists():
+                            self.slideLabelDict[x_name] = y
+                            self.labels.append(int(y))
+                            self.files.append(x_path)
+                        elif Path(str(x_path) + '.zarr').exists():
+                            self.slideLabelDict[x] = y
+                            self.files.append(str(x_path)+'.zarr')
+                        else:
+                            self.missing.append(x)
                 # print(x, y)
                 # x_complete_path = Path(self.file_path)/Path(x)
                 # for cohort in Path(self.file_path).iterdir():
@@ -129,22 +133,24 @@ class FeatureBagLoader(data.Dataset):
         
 
         self.feature_bags = []
-        self.labels = []
+        
         self.wsi_names = []
         self.coords = []
         self.patients = []
         if self.cache:
             for t in tqdm(self.files):
                 # zarr_t = str(t) + '.zarr'
-                batch, label, (wsi_name, batch_coords, patient) = self.get_data(t)
+                batch, (wsi_name, batch_coords, patient) = self.get_data(t)
 
                 # print(label)
-                self.labels.append(label)
+                # self.labels.append(label)
                 self.feature_bags.append(batch)
                 self.wsi_names.append(wsi_name)
                 self.coords.append(batch_coords)
                 self.patients.append(patient)
-        
+        # else: 
+        #     for t in tqdm(self.files):
+        #         self.labels = 
 
     def get_data(self, file_path):
         
@@ -154,7 +160,7 @@ class FeatureBagLoader(data.Dataset):
         if wsi_name.split('_')[-1][:3] == 'aug':
             wsi_name = '_'.join(wsi_name.split('_')[:-1])
         # if wsi_name in self.slideLabelDict:
-        label = self.slideLabelDict[wsi_name]
+        # label = self.slideLabelDict[wsi_name]
         patient = self.slide_patient_dict[wsi_name]
 
         if Path(file_path).suffix == '.zarr':
@@ -171,11 +177,11 @@ class FeatureBagLoader(data.Dataset):
         # np_bag = np.array(z['data'][:])
         # np_bag = np.array(zarr.open(file_path, 'r')).astype(np.uint8)
         # label = torch.as_tensor(label)
-        label = int(label)
+        # label = int(label)
         wsi_bag = torch.from_numpy(np_bag)
         batch_coords = torch.from_numpy(coords)
 
-        return wsi_bag, label, (wsi_name, batch_coords, patient)
+        return wsi_bag, (wsi_name, batch_coords, patient)
     
     def get_labels(self, indices):
         # for i in indices: 
@@ -224,10 +230,6 @@ class FeatureBagLoader(data.Dataset):
         bag_x = bag[rand_x, :]
         bag_y = bag[rand_y, :]
 
-        # print('bag_x: ', bag_x.shape)
-        # print('bag_y: ', bag_y.shape)
-        # print('a*bag_x: ', (a*bag_x).shape)
-        # print('(1.0-a)*bag_y: ', ((1.0-a)*bag_y).shape)
 
         temp_bag = (bag_x.t()*a).t() + (bag_y.t()*(1.0-a)).t()
         # print('temp_bag: ', temp_bag.shape)
@@ -275,7 +277,9 @@ class FeatureBagLoader(data.Dataset):
             # return wsi, label, (wsi_name, batch_coords, patient)
         else:
             t = self.files[index]
-            bag, label, (wsi_name, batch_coords, patient) = self.get_data(t)
+            label = self.labels[index]
+            bag, (wsi_name, batch_coords, patient) = self.get_data(t)
+            # print(bag.shape)
             # label = torch.as_tensor(label)
             # label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
                 # self.labels.append(label)
@@ -283,34 +287,297 @@ class FeatureBagLoader(data.Dataset):
                 # self.wsi_names.append(wsi_name)
                 # self.name_batches.append(name_batch)
                 # self.patients.append(patient)
-        if self.mode == 'train':
-            bag_size = bag.shape[0]
+            if self.mode == 'train':
+                bag_size = bag.shape[0]
+
+                bag_idxs = torch.randperm(bag_size)[:self.max_bag_size]
+                # bag_idxs = torch.randperm(bag_size)[:int(self.max_bag_size*(1-self.drop_rate))]
+                out_bag = bag[bag_idxs, :]
+                if self.mixup:
+                    out_bag = self.get_mixup_bag(out_bag)
+                    # batch_coords = 
+                if out_bag.shape[0] < self.max_bag_size:
+                    out_bag = torch.cat((out_bag, torch.zeros(self.max_bag_size-out_bag.shape[0], out_bag.shape[1])))
+
+                # shuffle again
+                out_bag_idxs = torch.randperm(out_bag.shape[0])
+                out_bag = out_bag[out_bag_idxs]
+
+
+                # batch_coords only useful for test
+                batch_coords = batch_coords[bag_idxs]
+                # out_bag = bag
+
+            # mixup? Linear combination of 2 vectors
+            # add noise
+
+
+            else: 
+                bag_size = bag.shape[0]
+                bag_idxs = torch.randperm(bag_size)[:self.max_bag_size]
+                out_bag = bag[bag_idxs, :]
+                if out_bag.shape[0] < self.max_bag_size:
+                    out_bag = torch.cat((out_bag, torch.zeros(self.max_bag_size-out_bag.shape[0], out_bag.shape[1])))
+                
+
+        return out_bag, label, (wsi_name, patient)
+        # return out_bag, label, (wsi_name, batch_coords, patient)
+
+class FeatureBagLoader_Mixed(data.Dataset):
+    def __init__(self, file_path, label_path, mode, n_classes, cache=False, mixup=False, aug=False, data_cache_size=5000, max_bag_size=1000):
+        super().__init__()
+
+        self.data_info = []
+        self.data_cache = {}
+        self.slideLabelDict = {}
+        self.files = []
+        self.labels = []
+        self.data_cache_size = data_cache_size
+        self.mode = mode
+        self.file_path = file_path
+        # self.csv_path = csv_path
+        self.label_path = label_path
+        self.n_classes = n_classes
+        self.max_bag_size = max_bag_size
+        self.drop_rate = 0.2
+        # self.min_bag_size = 120
+        self.empty_slides = []
+        self.corrupt_slides = []
+        self.cache = cache
+        self.mixup = mixup
+        self.aug = aug
+        
+        self.missing = []
+
+        home = Path.cwd().parts[1]
+        self.slide_patient_dict_path = f'/{home}/ylan/DeepGraft/training_tables/slide_patient_dict_an.json'
+        with open(self.slide_patient_dict_path, 'r') as f:
+            self.slide_patient_dict = json.load(f)
+
+        # read labels and slide_path from csv
+        with open(self.label_path, 'r') as f:
+            json_dict = json.load(f)
+            temp_slide_label_dict = json_dict[self.mode]
+            # print(len(temp_slide_label_dict))
+            for (x,y) in temp_slide_label_dict:
+                
+                x_name = Path(x).stem
+                x_path_list = [Path(self.file_path)/x]
+                # x_name = x.stem
+                # x_path_list = [Path(self.file_path)/ x for (x,y) in temp_slide_label_dict]
+                if self.aug:
+                    for i in range(5):
+                        aug_path = Path(self.file_path)/f'{x}_aug{i}'
+                        x_path_list.append(aug_path)
+
+                for x_path in x_path_list: 
+                    
+                    if x_path.exists():
+                        self.slideLabelDict[x_name] = y
+                        self.labels.append(int(y))
+                        self.files.append(x_path)
+                        for patch_path in x_path.iterdir():
+                            self.files.append((patch_path, x_name, y))
+
+
+        self.feature_bags = []
+        
+        self.wsi_names = []
+        self.coords = []
+        self.patients = []
+
+        if self.cache:
+            for t in tqdm(self.files):
+                # zarr_t = str(t) + '.zarr'
+                batch, (wsi_name, batch_coords, patient) = self.get_data(t)
+
+                # print(label)
+                # self.labels.append(label)
+                self.feature_bags.append(batch)
+                self.wsi_names.append(wsi_name)
+                self.coords.append(batch_coords)
+                self.patients.append(patient)
+        # else: 
+        #     for t in tqdm(self.files):
+        #         self.labels = 
+
+    # def create_bag(self):
+
+
+
+    def get_data(self, file_path):
+        
+        batch_names=[] #add function for name_batch read out
+
+        wsi_name = Path(file_path).stem
+        if wsi_name.split('_')[-1][:3] == 'aug':
+            wsi_name = '_'.join(wsi_name.split('_')[:-1])
+        # if wsi_name in self.slideLabelDict:
+        # label = self.slideLabelDict[wsi_name]
+        patient = self.slide_patient_dict[wsi_name]
+
+        if Path(file_path).suffix == '.zarr':
+            z = zarr.open(file_path, 'r')
+            np_bag = np.array(z['data'][:])
+            coords = np.array(z['coords'][:])
+        else:
+            with h5py.File(file_path, 'r') as hdf5_file:
+                np_bag = hdf5_file['features'][:]
+                coords = hdf5_file['coords'][:]
+
+        # np_bag = torch.load(file_path)
+        # z = zarr.open(file_path, 'r')
+        # np_bag = np.array(z['data'][:])
+        # np_bag = np.array(zarr.open(file_path, 'r')).astype(np.uint8)
+        # label = torch.as_tensor(label)
+        # label = int(label)
+        wsi_bag = torch.from_numpy(np_bag)
+        batch_coords = torch.from_numpy(coords)
+
+        return wsi_bag, (wsi_name, batch_coords, patient)
+    
+    def get_labels(self, indices):
+        # for i in indices: 
+        #     print(self.labels[i])
+        return [self.labels[i] for i in indices]
+
+
+    def to_fixed_size_bag(self, bag, names, bag_size: int = 512):
+
+        #duplicate bag instances unitl 
+
+        bag_idxs = torch.randperm(bag.shape[0])[:bag_size]
+        bag_samples = bag[bag_idxs]
+        name_samples = [names[i] for i in bag_idxs]
+        # bag_sample_names = [bag_names[i] for i in bag_idxs]
+        # q, r  = divmod(bag_size, bag_samples.shape[0])
+        # if q > 0:
+        #     bag_samples = torch.cat([bag_samples]*q, 0)
+
+        # self_padded = torch.cat([bag_samples, bag_samples[:r,:, :, :]])
+
+        # zero-pad if we don't have enough samples
+        # zero_padded = torch.cat((bag_samples,
+        #                         torch.zeros(bag_size-bag_samples.shape[0], bag_samples.shape[1], bag_samples.shape[2], bag_samples.shape[3])))
+
+        return bag_samples, name_samples, min(bag_size, len(bag))
+
+    def data_dropout(self, bag, batch_names, drop_rate):
+        # bag_size = self.max_bag_size
+        bag_size = bag.shape[0]
+        bag_idxs = torch.randperm(self.max_bag_size)[:int(bag_size*(1-drop_rate))]
+        bag_samples = bag[bag_idxs]
+        name_samples = [batch_names[i] for i in bag_idxs]
 
-            bag_idxs = torch.randperm(bag_size)[:self.max_bag_size]
-            # bag_idxs = torch.randperm(bag_size)[:int(self.max_bag_size*(1-self.drop_rate))]
-            out_bag = bag[bag_idxs, :]
-            if self.mixup:
-                out_bag = self.get_mixup_bag(out_bag)
-                # batch_coords = 
-            if out_bag.shape[0] < self.max_bag_size:
-                out_bag = torch.cat((out_bag, torch.zeros(self.max_bag_size-out_bag.shape[0], out_bag.shape[1])))
+        return bag_samples, name_samples
 
-            # shuffle again
-            out_bag_idxs = torch.randperm(out_bag.shape[0])
-            out_bag = out_bag[out_bag_idxs]
+    def get_mixup_bag(self, bag):
 
+        bag_size = bag.shape[0]
 
-            # batch_coords only useful for test
-            batch_coords = batch_coords[bag_idxs]
+        a = torch.rand([bag_size])
+        b = 0.6
+        rand_x = torch.randint(0, bag_size, [bag_size,])
+        rand_y = torch.randint(0, bag_size, [bag_size,])
+
+        bag_x = bag[rand_x, :]
+        bag_y = bag[rand_y, :]
+
+
+        temp_bag = (bag_x.t()*a).t() + (bag_y.t()*(1.0-a)).t()
+        # print('temp_bag: ', temp_bag.shape)
+
+        if bag_size < self.max_bag_size:
+            diff = self.max_bag_size - bag_size
+            bag_idxs = torch.randperm(bag_size)[:diff]
+            
+            # print('bag: ', bag.shape)
+            # print('bag_idxs: ', bag_idxs.shape)
+            mixup_bag = torch.cat((bag, temp_bag[bag_idxs, :]))
+            # print('mixup_bag: ', mixup_bag.shape)
+        else:
+            random_sample_list = torch.rand(bag_size)
+            mixup_bag = [bag[i] if random_sample_list[i] else temp_bag[i] > b for i in range(bag_size)] #make pytorch native?!
+            mixup_bag = torch.stack(mixup_bag)
+            # print('else')
+            # print(mixup_bag.shape)
+
+        return mixup_bag
+
+    def __len__(self):
+        return len(self.files)
+
+    def __getitem__(self, index):
+
+        if self.cache:
+            label = self.labels[index]
+            bag = self.feature_bags[index]
             
+        
+            
+            # label = Variable(Tensor(label))
+            # label = torch.as_tensor(label)
+            # label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
+            wsi_name = self.wsi_names[index]
+            batch_coords = self.coords[index]
+            patient = self.patients[index]
+
+            
+            #random dropout
+            #shuffle
+
+            # feats = Variable(Tensor(feats))
+            # return wsi, label, (wsi_name, batch_coords, patient)
+        else:
+            t = self.files[index]
+            label = self.labels[index]
+            bag, (wsi_name, batch_coords, patient) = self.get_data(t)
+            # print(bag.shape)
+            # label = torch.as_tensor(label)
+            # label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
+                # self.labels.append(label)
+                # self.feature_bags.append(batch)
+                # self.wsi_names.append(wsi_name)
+                # self.name_batches.append(name_batch)
+                # self.patients.append(patient)
+            if self.mode == 'train':
+                bag_size = bag.shape[0]
+
+                bag_idxs = torch.randperm(bag_size)[:self.max_bag_size]
+                # bag_idxs = torch.randperm(bag_size)[:int(self.max_bag_size*(1-self.drop_rate))]
+                out_bag = bag[bag_idxs, :]
+                if self.mixup:
+                    out_bag = self.get_mixup_bag(out_bag)
+                    # batch_coords = 
+                if out_bag.shape[0] < self.max_bag_size:
+                    out_bag = torch.cat((out_bag, torch.zeros(self.max_bag_size-out_bag.shape[0], out_bag.shape[1])))
+
+                # shuffle again
+                out_bag_idxs = torch.randperm(out_bag.shape[0])
+                out_bag = out_bag[out_bag_idxs]
+
+
+                # batch_coords only useful for test
+                batch_coords = batch_coords[bag_idxs]
+                # out_bag = bag
+
+            # mixup? Linear combination of 2 vectors
+            # add noise
+
 
-        # mixup? Linear combination of 2 vectors
-        # add noise
+            # elif self.mode == 'val': 
+            #     bag_size = bag.shape[0]
+            #     bag_idxs = torch.randperm(bag_size)[:self.max_bag_size]
+            #     out_bag = bag[bag_idxs, :]
+            #     if out_bag.shape[0] < self.max_bag_size:
+            #         out_bag = torch.cat((out_bag, torch.zeros(self.max_bag_size-out_bag.shape[0], out_bag.shape[1])))
+            else:
+                # bag_size = bag.shape[0]
+                out_bag = bag
 
 
-        else: out_bag = bag
+        return out_bag, label, (wsi_name, patient)
 
-        return out_bag, label, (wsi_name, batch_coords, patient)
 
 if __name__ == '__main__':
     
@@ -328,23 +595,24 @@ if __name__ == '__main__':
     # data_root = f'/{home}/ylan/DeepGraft/dataset/hdf5/256_256um_split/'
     # label_path = f'/{home}/ylan/DeepGraft_project/code/split_PAS_bin.json'
     # label_path = f'/{home}/ylan/DeepGraft/training_tables/split_debug.json'
-    label_path = f'/{home}/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest_test.json'
+    label_path = f'/{home}/ylan/DeepGraft/training_tables/dg_limit_5_split_PAS_HE_Jones_norm_rest_test.json'
     output_dir = f'/{data_root}/debug/augments'
     os.makedirs(output_dir, exist_ok=True)
 
     n_classes = 2
 
-    dataset = FeatureBagLoader(data_root, label_path=label_path, mode='train', cache=False, mixup=True, aug=True, n_classes=n_classes)
+    train_dataset = FeatureBagLoader(data_root, label_path=label_path, mode='train', cache=False, mixup=True, aug=True, n_classes=n_classes)
+    val_dataset = FeatureBagLoader(data_root, label_path=label_path, mode='val', cache=False, mixup=False, aug=False, n_classes=n_classes)
 
     test_dataset = FeatureBagLoader(data_root, label_path=label_path, mode='test', cache=False, n_classes=n_classes)
 
     # print(dataset.get_labels(0))
-    a = int(len(dataset)* 0.8)
-    b = int(len(dataset) - a)
-    train_data, valid_data = random_split(dataset, [a, b])
+    # a = int(len(dataset)* 0.8)
+    # b = int(len(dataset) - a)
+    # train_data, valid_data = random_split(dataset, [a, b])
 
-    train_dl = DataLoader(train_data, batch_size=1, num_workers=5)
-    valid_dl = DataLoader(valid_data, batch_size=1, num_workers=5)
+    train_dl = DataLoader(train_dataset, batch_size=1, sampler=ImbalancedDatasetSampler(train_dataset), num_workers=5)
+    valid_dl = DataLoader(val_dataset, batch_size=1, num_workers=5)
     test_dl = DataLoader(test_dataset)
 
     print('train_dl: ', len(train_dl))
@@ -371,7 +639,7 @@ if __name__ == '__main__':
     # start = time.time()
     for i in range(epochs):
         start = time.time()
-        for item in tqdm(train_dl): 
+        for item in tqdm(valid_dl): 
 
             # if c >= 10:
             #     break
diff --git a/code/datasets/jpg_dataloader.py b/code/datasets/jpg_dataloader.py
new file mode 100644
index 0000000..c983d55
--- /dev/null
+++ b/code/datasets/jpg_dataloader.py
@@ -0,0 +1,434 @@
+# import pandas as pd
+
+import numpy as np
+import torch
+from torch import Tensor
+from torch.utils import data
+from torch.utils.data import random_split, DataLoader
+from torch.autograd import Variable
+from torch.nn.functional import one_hot
+import torch.utils.data as data_utils
+import torchvision.transforms as transforms
+import pandas as pd
+from sklearn.utils import shuffle
+from pathlib import Path
+from tqdm import tqdm
+from PIL import Image
+import cv2
+import json
+from imgaug import augmenters as iaa
+from torchsampler import ImbalancedDatasetSampler
+from .utils import myTransforms
+
+
+class JPGMILDataloader(data_utils.Dataset):
+    def __init__(self, file_path, label_path, mode, model, n_classes, data_cache_size=100, max_bag_size=1000, cache=False, mixup=False, aug=False):
+        super().__init__()
+
+        self.data_info = []
+        self.data_cache = {}
+        self.slideLabelDict = {}
+        self.files = []
+        self.data_cache_size = data_cache_size
+        self.mode = mode
+        self.file_path = file_path
+        # self.csv_path = csv_path
+        self.label_path = label_path
+        self.n_classes = n_classes
+        self.max_bag_size = max_bag_size
+        self.min_bag_size = 50
+        self.empty_slides = []
+        self.corrupt_slides = []
+        self.cache = False
+        self.labels = []
+
+        # self.features = []
+        # self.labels = []
+        # self.wsi_names = []
+        # self.name_batches = []
+        # self.patients = []
+        
+        # read labels and slide_path from csv
+        with open(self.label_path, 'r') as f:
+            json_dict = json.load(f)
+            temp_slide_label_dict = json_dict[self.mode]
+            # print(len(temp_slide_label_dict))
+            for (x,y) in temp_slide_label_dict:
+                x = x.replace('FEATURES_RETCCL_2048', 'BLOCKS')
+                # print(x)
+                x_name = Path(x).stem
+                x_path_list = [Path(self.file_path)/x]
+                for x_path in x_path_list:
+                    if x_path.exists():
+                        # print(len(list(x_path.glob('*'))))
+                        self.slideLabelDict[x_name] = y
+                        self.labels += [int(y)]*len(list(x_path.glob('*')))
+                        # self.labels.append(int(y))
+                        self.files.append(x_path)
+
+        # with open(self.label_path, 'r') as f:
+        #     temp_slide_label_dict = json.load(f)[mode]
+        #     print(len(temp_slide_label_dict))
+        #     for (x, y) in temp_slide_label_dict:
+        #         x = Path(x).stem 
+        #         # x_complete_path = Path(self.file_path)/Path(x)
+        #         for cohort in Path(self.file_path).iterdir():
+        #             x_complete_path = Path(self.file_path) / cohort / 'BLOCKS' / Path(x)
+        #             if x_complete_path.is_dir():
+        #                 if len(list(x_complete_path.iterdir())) > self.min_bag_size:
+        #                 # print(x_complete_path)
+        #                     self.slideLabelDict[x] = y
+        #                     self.files.append(x_complete_path)
+        #                 else: self.empty_slides.append(x_complete_path)
+        
+        home = Path.cwd().parts[1]
+        # self.slide_patient_dict_path = f'/{home}/ylan/DeepGraft/training_tables/slide_patient_dict.json'
+        self.slide_patient_dict_path = Path(self.label_path).parent / 'slide_patient_dict_an.json'
+        with open(self.slide_patient_dict_path, 'r') as f:
+            self.slide_patient_dict = json.load(f)
+
+        # def get_transforms_2():
+        
+        self.color_transforms = myTransforms.Compose([
+            myTransforms.ColorJitter(
+                brightness = (0.65, 1.35), 
+                contrast = (0.5, 1.5),
+                ),
+            myTransforms.HEDJitter(theta=0.005),
+            
+        ])
+        self.train_transforms = myTransforms.Compose([
+            myTransforms.RandomChoice([myTransforms.RandomHorizontalFlip(p=0.5),
+                                        myTransforms.RandomVerticalFlip(p=0.5),
+                                        myTransforms.AutoRandomRotation()]),
+        
+            myTransforms.RandomGaussBlur(radius=[0.5, 1.5]),
+            myTransforms.RandomAffineCV2(alpha=0.1),
+            myTransforms.RandomElastic(alpha=2, sigma=0.06),
+        ])
+
+
+
+        # sometimes = lambda aug: iaa.Sometimes(0.5, aug, name="Random1")
+        # sometimes2 = lambda aug: iaa.Sometimes(0.2, aug, name="Random2")
+        # sometimes3 = lambda aug: iaa.Sometimes(0.9, aug, name="Random3")
+        # sometimes4 = lambda aug: iaa.Sometimes(0.9, aug, name="Random4")
+        # sometimes5 = lambda aug: iaa.Sometimes(0.9, aug, name="Random5")
+
+        # self.resize_transforms = iaa.Sequential([
+        #     myTransforms.Resize(size=(299,299)),
+        # ], name='resizeAug')
+
+        # self.train_transforms = iaa.Sequential([
+        #     iaa.AddToHueAndSaturation(value=(-30, 30), name="MyHSV"), #13
+        #     sometimes2(iaa.GammaContrast(gamma=(0.85, 1.15), name="MyGamma")),
+        #     iaa.Fliplr(0.5, name="MyFlipLR"),
+        #     iaa.Flipud(0.5, name="MyFlipUD"),
+        #     sometimes(iaa.Rot90(k=1, keep_size=True, name="MyRot90")),
+        #     # iaa.OneOf([
+        #     #     sometimes3(iaa.PiecewiseAffine(scale=(0.015, 0.02), cval=0, name="MyPiece")),
+        #     #     sometimes4(iaa.ElasticTransformation(alpha=(100, 200), sigma=20, cval=0, name="MyElastic")),
+        #     #     sometimes5(iaa.Affine(scale={"x": (0.95, 1.05), "y": (0.95, 1.05)}, rotate=(-45, 45), shear=(-4, 4), cval=0, name="MyAffine"))
+        #     # ], name="MyOneOf")
+
+        # ], name="MyAug")
+        self.val_transforms = transforms.Compose([
+            # 
+            transforms.ToTensor(),
+            transforms.Normalize(
+                mean=[0.485, 0.456, 0.406],
+                std=[0.229, 0.224, 0.225],
+            ),
+            # RangeNormalization(),
+        ])
+
+
+
+
+        
+        # if self.cache:
+        #     if mode=='train':
+        #         seq_img_d = self.train_transforms.to_deterministic()
+                
+        #         # with tqdm(total=len(self.files)) as pbar:
+
+        #         for t in tqdm(self.files):
+        #             batch, label, (wsi_name, name_batch, patient) = self.get_data(t)
+        #             # print('label: ', label)
+        #             out_batch = []
+        #             for img in batch: 
+        #                 img = img.numpy().astype(np.uint8)
+        #                 img = seq_img_d.augment_image(img)
+        #                 img = self.val_transforms(img.copy())
+        #                 out_batch.append(img)
+        #             # ft = ft.view(-1, 512)
+                    
+        #             out_batch = torch.stack(out_batch)
+        #             self.labels.append(label)
+        #             self.features.append(out_batch)
+        #             self.wsi_names.append(wsi_name)
+        #             self.name_batches.append(name_batch)
+        #             self.patients.append(patient)
+        #                 # pbar.update()
+        #     else: 
+        #         # with tqdm(total=len(self.file_path)) as pbar:
+        #         for t in tqdm(self.file_path):
+        #             batch, label, (wsi_name, name_batch, patient) = self.get_data(t)
+        #             out_batch = []
+        #             for img in batch: 
+        #                 img = img.numpy().astype(np.uint8)
+        #                 img = self.val_transforms(img.copy())
+        #                 out_batch.append(img)
+        #             # ft = ft.view(-1, 512)
+        #             out_batch = torch.stack(out_batch)
+        #             self.labels.append(label)
+        #             self.features.append(out_batch)
+        #             self.wsi_names.append(wsi_name)
+        #             self.name_batches.append(name_batch)
+        #             self.patients.append(patient)
+                        # pbar.update()
+        # print(self.get_bag_feats(self.train_path))
+        # self.r = np.random.RandomState(seed)
+
+        # self.num_in_train = 60000
+        # self.num_in_test = 10000
+
+        # if self.train:
+        #     self.train_bags_list, self.train_labels_list = self._create_bags()
+        # else:
+        #     self.test_bags_list, self.test_labels_list = self._create_bags()
+
+    def get_data(self, file_path):
+        
+        color_transforms = myTransforms.Compose([
+            myTransforms.ColorJitter(
+                brightness = (0.65, 1.35), 
+                contrast = (0.5, 1.5),
+                # saturation=(0, 2), 
+                # hue=0.3,
+                ),
+            # myTransforms.RandomChoice([myTransforms.ColorJitter(saturation=(0, 2), hue=0.3),
+            #                             myTransforms.HEDJitter(theta=0.05)]),
+            myTransforms.HEDJitter(theta=0.005),
+            
+        ])
+        train_transforms = myTransforms.Compose([
+            myTransforms.RandomChoice([myTransforms.RandomHorizontalFlip(p=0.5),
+                                        myTransforms.RandomVerticalFlip(p=0.5),
+                                        myTransforms.AutoRandomRotation()]),
+        
+            myTransforms.RandomGaussBlur(radius=[0.5, 1.5]),
+            myTransforms.RandomAffineCV2(alpha=0.1),
+            myTransforms.RandomElastic(alpha=2, sigma=0.06),
+        ])
+
+        wsi_batch=[]
+        name_batch=[]
+        
+        for tile_path in Path(file_path).iterdir():
+            img = Image.open(tile_path)
+            if self.mode == 'train':
+            
+                img = self.color_transforms(img)
+                img = self.train_transforms(img)
+            img = self.val_transforms(img)
+            # img = np.asarray(Image.open(tile_path)).astype(np.uint8)
+            # img = np.moveaxis(img, 2, 0)
+            # print(img.shape)
+            # img = torch.from_numpy(img)
+            wsi_batch.append(img)
+            name_batch.append(tile_path.stem)
+
+        wsi_batch = torch.stack(wsi_batch)
+
+        # if wsi_batch.size(0) > self.max_bag_size:
+        
+
+        wsi_name = Path(file_path).stem
+        try:
+            label = self.slideLabelDict[wsi_name]
+        except KeyError:
+            print(f'{wsi_name} is not included in label file {self.label_path}')
+
+        
+
+        try:
+            patient = self.slide_patient_dict[wsi_name]
+        except KeyError:
+            print(f'{wsi_name} is not included in label file {self.slide_patient_dict_path}')
+
+        return wsi_batch, label, (wsi_name, name_batch, patient)
+    
+    def get_labels(self, indices):
+        return [self.labels[i] for i in indices]
+
+
+    def to_fixed_size_bag(self, bag, bag_size: int = 512):
+
+        #duplicate bag instances unitl 
+
+        bag_idxs = torch.randperm(bag.shape[0])[:bag_size]
+        bag_samples = bag[bag_idxs]
+        # name_samples = [names[i] for i in bag_idxs]
+
+        # bag_sample_names = [bag_names[i] for i in bag_idxs]
+        # q, r  = divmod(bag_size, bag_samples.shape[0])
+        # if q > 0:
+        #     bag_samples = torch.cat([bag_samples]*q, 0)
+
+        # self_padded = torch.cat([bag_samples, bag_samples[:r,:, :, :]])
+
+        # zero-pad if we don't have enough samples
+        zero_padded = torch.cat((bag_samples,
+                                torch.zeros(bag_size-bag_samples.shape[0], bag_samples.shape[1], bag_samples.shape[2], bag_samples.shape[3])))
+
+        return zero_padded, min(bag_size, len(bag))
+
+    def data_dropout(self, bag, drop_rate):
+        bag_size = bag.shape[0]
+        bag_idxs = torch.randperm(bag_size)[:int(bag_size*(1-drop_rate))]
+        bag_samples = bag[bag_idxs]
+        # name_samples = [batch_names[i] for i in bag_idxs]
+
+        return bag_samples
+
+    def __len__(self):
+        return len(self.files)
+
+    def __getitem__(self, index):
+
+        if self.cache:
+            label = self.labels[index]
+            wsi = self.features[index]
+            label = int(label)
+            wsi_name = self.wsi_names[index]
+            name_batch = self.name_batches[index]
+            patient = self.patients[index]
+            # feats = Variable(Tensor(feats))
+            return wsi, label, (wsi_name, name_batch, patient)
+        else:
+            t = self.files[index]
+            # label = self.labels[index]
+            if self.mode=='train':
+
+                batch, label, (wsi_name, name_batch, patient) = self.get_data(t)
+                batch, _ = self.to_fixed_size_bag(batch, self.max_bag_size)
+                batch = self.data_dropout(batch, drop_rate=0.1)
+                # print(batch.shape)
+                # # label = Variable(Tensor(label))
+
+                # # wsi = Variable(Tensor(wsi_batch))
+                # out_batch = []
+
+                # # seq_img_d = self.train_transforms.to_deterministic()
+                # # seq_img_resize = self.resize_transforms.to_deterministic()
+                # for img in batch: 
+                #     # img = img.numpy().astype(np.uint8)
+                #     # print(img.shape)
+                #     img = self.resize_transforms(img)
+                #     # print(img)
+                #     # print(img.shape)
+                #     # img = torch.moveaxis(img, 0, 2) # with HEDJitter wants [W,H,3], ColorJitter wants [3,W,H]
+                #     # print(img.shape)
+                #     img = self.color_transforms(img)
+                #     print(img.shape)
+                #     img = self.train_transforms(img)
+                    
+                #     # img = seq_img_d.augment_image(img)
+                #     img = self.val_transforms(img.copy())
+                #     out_batch.append(img)
+                # out_batch = torch.stack(out_batch)
+                out_batch = batch
+                
+                # ft = ft.view(-1, 512)
+                
+            else:
+                batch, label, (wsi_name, name_batch, patient) = self.get_data(t)
+                batch, _ = self.to_fixed_size_bag(batch, self.max_bag_size)
+                # label = Variable(Tensor(label))
+                # out_batch = []
+                # # seq_img_d = self.train_transforms.to_deterministic()
+                # # seq_img_resize = self.resize_transforms.to_deterministic()
+                # for img in batch: 
+                #     # img = img.numpy().astype(np.uint8)
+                #     # img = seq_img_resize(images=img)
+                #     img = self.resize_transforms(img)
+                #     img = np.moveaxis(img, 0, 2)
+                #     # img = img.numpy().astype(np.uint8)
+                #     # print(img.shape)
+                #     img = self.val_transforms(img)
+                #     out_batch.append(img)
+                # out_batch = torch.stack(out_batch)
+                out_batch = batch
+
+            return out_batch, label, (wsi_name , patient)
+
+if __name__ == '__main__':
+    
+    from pathlib import Path
+    import os
+    import time
+    from fast_tensor_dl import FastTensorDataLoader
+    from custom_resnet50 import resnet50_baseline
+    
+    
+
+    home = Path.cwd().parts[1]
+    # train_csv = f'/{home}/ylan/DeepGraft_project/code/debug_train.csv'
+    data_root = f'/{home}/ylan/data/DeepGraft/224_256uM_annotated'
+    # data_root = f'/{home}/ylan/DeepGraft/dataset/hdf5/256_256um_split/'
+    # label_path = f'/{home}/ylan/DeepGraft_project/code/split_PAS_bin.json'
+    # label_path = f'/{home}/ylan/DeepGraft/training_tables/split_debug.json'
+    label_path = f'/{home}/ylan/DeepGraft/training_tables/dg_limit_5_split_PAS_HE_Jones_norm_rest_test.json'
+    # output_dir = f'/{data_root}/debug/augments'
+    # os.makedirs(output_dir, exist_ok=True)
+
+    n_classes = 2
+
+    dataset = JPGBagLoader(data_root, label_path=label_path, mode='train', n_classes=n_classes, cache=False)
+    # dataset = JPGBagLoader(data_root, label_path=label_path, mode='train', n_classes=n_classes, cache=False)
+
+    # print(dataset.get_labels(0))
+    # a = int(len(dataset)* 0.8)
+    # b = int(len(dataset) - a)
+    # train_data, valid_data = random_split(dataset, [a, b])
+    # print(dataset.dataset)
+    # a = int(len(dataset)* 0.8)
+    # b = int(len(dataset) - a)
+    # train_ds, val_ds = torch.utils.data.random_split(dataset, [a, b])
+    # dl = FastTensorDataLoader(dataset, batch_size=1, shuffle=False)
+    dl = DataLoader(dataset, batch_size=2, num_workers=8, pin_memory=True)
+    # print(len(dl))
+    # dl = DataLoader(dataset, batch_size=1, sampler=ImbalancedDatasetSampler(dataset), num_workers=5)
+    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
+    scaler = torch.cuda.amp.GradScaler()
+
+    model_ft = resnet50_baseline(pretrained=True)
+    for param in model_ft.parameters():
+        param.requires_grad = False
+    model_ft.to(device)
+    
+    c = 0
+    label_count = [0] *n_classes
+    # print(len(dl))
+    start = time.time()
+    for item in tqdm(dl): 
+
+        # if c >= 10:
+        #     break
+        bag, label, (name, batch_names, patient) = item
+        print(bag.shape)
+        # print(name)
+        # print(batch_names)
+        # print(patient)
+        # print(len(batch_names))
+
+        print(label.shape)
+        # bag = bag.squeeze(0).float().to(device)
+        # label = label.to(device)
+        # with torch.cuda.amp.autocast():
+        #     output = model_ft(bag)
+        c += 1
+    end = time.time()
+
+    print('Bag Time: ', end-start)
\ No newline at end of file
diff --git a/code/datasets/monai_loader.py b/code/datasets/monai_loader.py
index 0bbe803..4a3240e 100644
--- a/code/datasets/monai_loader.py
+++ b/code/datasets/monai_loader.py
@@ -111,10 +111,11 @@ if __name__ == '__main__':
         data_list_key="training",
         base_dir=data_root,
     )
+    
 
     train_transform = Compose(
         [
-            LoadImaged(keys=["image"], reader=WSIReader, backend="cucim", dtype=np.uint8, level=0, image_only=True, num_workers=8),
+            LoadImaged(keys=["image"], reader=WSIReader, backend="cucim", dtype=np.uint8, level=1, image_only=True, num_workers=8),
             LabelEncodeIntegerGraded(keys=["label"], num_classes=num_classes),
             RandGridPatchd(
                 keys=["image"],
@@ -133,7 +134,7 @@ if __name__ == '__main__':
             ToTensord(keys=["image", "label"]),
         ]
     )
-    train_data_list = data['training']
+    training_list = data['training']
     # dataset_train = Dataset(data=training_list)
     dataset_train = Dataset(data=training_list, transform=train_transform)
     # persistent_dataset = PersistentDataset(data=training_list, transform=train_transform, cache_dir='/home/ylan/workspace/test')
diff --git a/code/datasets/simple_jpg_dataloader.py b/code/datasets/simple_jpg_dataloader.py
deleted file mode 100644
index c5e349f..0000000
--- a/code/datasets/simple_jpg_dataloader.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# import pandas as pd
-
-import numpy as np
-import torch
-from torch import Tensor
-from torch.utils import data
-from torch.utils.data import random_split, DataLoader
-from torch.autograd import Variable
-from torch.nn.functional import one_hot
-import torch.utils.data as data_utils
-import torchvision.transforms as transforms
-import pandas as pd
-from sklearn.utils import shuffle
-from pathlib import Path
-from tqdm import tqdm
-from PIL import Image
-import cv2
-import json
-from imgaug import augmenters as iaa
-from torchsampler import ImbalancedDatasetSampler
-
-
-class JPGBagLoader(data_utils.Dataset):
-    def __init__(self, file_path, label_path, mode, n_classes, load_data=False, data_cache_size=100, max_bag_size=1000, cache=False):
-        super().__init__()
-
-        self.data_info = []
-        self.data_cache = {}
-        self.slideLabelDict = {}
-        self.files = []
-        self.data_cache_size = data_cache_size
-        self.mode = mode
-        self.file_path = file_path
-        # self.csv_path = csv_path
-        self.label_path = label_path
-        self.n_classes = n_classes
-        self.max_bag_size = max_bag_size
-        self.min_bag_size = 50
-        self.empty_slides = []
-        self.corrupt_slides = []
-        self.cache = True
-        
-        # read labels and slide_path from csv
-        with open(self.label_path, 'r') as f:
-            temp_slide_label_dict = json.load(f)[mode]
-            print(len(temp_slide_label_dict))
-            for (x, y) in temp_slide_label_dict:
-                x = Path(x).stem 
-                # x_complete_path = Path(self.file_path)/Path(x)
-                for cohort in Path(self.file_path).iterdir():
-                    x_complete_path = Path(self.file_path) / cohort / 'BLOCKS' / Path(x)
-                    if x_complete_path.is_dir():
-                        if len(list(x_complete_path.iterdir())) > self.min_bag_size:
-                        # print(x_complete_path)
-                            self.slideLabelDict[x] = y
-                            self.files.append(x_complete_path)
-                        else: self.empty_slides.append(x_complete_path)
-        
-        home = Path.cwd().parts[1]
-        self.slide_patient_dict_path = f'/{home}/ylan/DeepGraft/training_tables/slide_patient_dict.json'
-        with open(self.slide_patient_dict_path, 'r') as f:
-            self.slide_patient_dict = json.load(f)
-
-        sometimes = lambda aug: iaa.Sometimes(0.5, aug, name="Random1")
-        sometimes2 = lambda aug: iaa.Sometimes(0.2, aug, name="Random2")
-        sometimes3 = lambda aug: iaa.Sometimes(0.9, aug, name="Random3")
-        sometimes4 = lambda aug: iaa.Sometimes(0.9, aug, name="Random4")
-        sometimes5 = lambda aug: iaa.Sometimes(0.9, aug, name="Random5")
-
-        self.train_transforms = iaa.Sequential([
-            iaa.AddToHueAndSaturation(value=(-30, 30), name="MyHSV"), #13
-            sometimes2(iaa.GammaContrast(gamma=(0.85, 1.15), name="MyGamma")),
-            iaa.Fliplr(0.5, name="MyFlipLR"),
-            iaa.Flipud(0.5, name="MyFlipUD"),
-            sometimes(iaa.Rot90(k=1, keep_size=True, name="MyRot90")),
-            # iaa.OneOf([
-            #     sometimes3(iaa.PiecewiseAffine(scale=(0.015, 0.02), cval=0, name="MyPiece")),
-            #     sometimes4(iaa.ElasticTransformation(alpha=(100, 200), sigma=20, cval=0, name="MyElastic")),
-            #     sometimes5(iaa.Affine(scale={"x": (0.95, 1.05), "y": (0.95, 1.05)}, rotate=(-45, 45), shear=(-4, 4), cval=0, name="MyAffine"))
-            # ], name="MyOneOf")
-
-        ], name="MyAug")
-        self.val_transforms = transforms.Compose([
-            # 
-            transforms.ToTensor(),
-            transforms.Normalize(
-                mean=[0.485, 0.456, 0.406],
-                std=[0.229, 0.224, 0.225],
-            ),
-            # RangeNormalization(),
-        ])
-
-
-
-
-        self.features = []
-        self.labels = []
-        self.wsi_names = []
-        self.name_batches = []
-        self.patients = []
-        if self.cache:
-            if mode=='train':
-                seq_img_d = self.train_transforms.to_deterministic()
-                
-                # with tqdm(total=len(self.files)) as pbar:
-
-                for t in tqdm(self.files):
-                    batch, label, (wsi_name, name_batch, patient) = self.get_data(t)
-                    # print('label: ', label)
-                    out_batch = []
-                    for img in batch: 
-                        img = img.numpy().astype(np.uint8)
-                        img = seq_img_d.augment_image(img)
-                        img = self.val_transforms(img.copy())
-                        out_batch.append(img)
-                    # ft = ft.view(-1, 512)
-                    
-                    out_batch = torch.stack(out_batch)
-                    self.labels.append(label)
-                    self.features.append(out_batch)
-                    self.wsi_names.append(wsi_name)
-                    self.name_batches.append(name_batch)
-                    self.patients.append(patient)
-                        # pbar.update()
-            else: 
-                # with tqdm(total=len(self.file_path)) as pbar:
-                for t in tqdm(self.file_path):
-                    batch, label, (wsi_name, name_batch, patient) = self.get_data(t)
-                    out_batch = []
-                    for img in batch: 
-                        img = img.numpy().astype(np.uint8)
-                        img = self.val_transforms(img.copy())
-                        out_batch.append(img)
-                    # ft = ft.view(-1, 512)
-                    out_batch = torch.stack(out_batch)
-                    self.labels.append(label)
-                    self.features.append(out_batch)
-                    self.wsi_names.append(wsi_name)
-                    self.name_batches.append(name_batch)
-                    self.patients.append(patient)
-                        # pbar.update()
-        # print(self.get_bag_feats(self.train_path))
-        # self.r = np.random.RandomState(seed)
-
-        # self.num_in_train = 60000
-        # self.num_in_test = 10000
-
-        # if self.train:
-        #     self.train_bags_list, self.train_labels_list = self._create_bags()
-        # else:
-        #     self.test_bags_list, self.test_labels_list = self._create_bags()
-
-    def get_data(self, file_path):
-        
-        wsi_batch=[]
-        name_batch=[]
-        
-        for tile_path in Path(file_path).iterdir():
-            img = np.asarray(Image.open(tile_path)).astype(np.uint8)
-            img = torch.from_numpy(img)
-            wsi_batch.append(img)
-            name_batch.append(tile_path.stem)
-
-        wsi_batch = torch.stack(wsi_batch)
-
-        if wsi_batch.size(0) > self.max_bag_size:
-            wsi_batch, name_batch, _ = self.to_fixed_size_bag(wsi_batch, name_batch, self.max_bag_size)
-
-
-        wsi_batch, name_batch = self.data_dropout(wsi_batch, name_batch, drop_rate=0.1)
-
-        wsi_name = Path(file_path).stem
-        try:
-            label = self.slideLabelDict[wsi_name]
-        except KeyError:
-            print(f'{wsi_name} is not included in label file {self.label_path}')
-
-        try:
-            patient = self.slide_patient_dict[wsi_name]
-        except KeyError:
-            print(f'{wsi_name} is not included in label file {self.slide_patient_dict_path}')
-
-        return wsi_batch, label, (wsi_name, name_batch, patient)
-    
-    def get_labels(self, indices):
-        return [self.labels[i] for i in indices]
-
-
-    def to_fixed_size_bag(self, bag, names, bag_size: int = 512):
-
-        #duplicate bag instances unitl 
-
-        bag_idxs = torch.randperm(bag.shape[0])[:bag_size]
-        bag_samples = bag[bag_idxs]
-        name_samples = [names[i] for i in bag_idxs]
-        # bag_sample_names = [bag_names[i] for i in bag_idxs]
-        # q, r  = divmod(bag_size, bag_samples.shape[0])
-        # if q > 0:
-        #     bag_samples = torch.cat([bag_samples]*q, 0)
-
-        # self_padded = torch.cat([bag_samples, bag_samples[:r,:, :, :]])
-
-        # zero-pad if we don't have enough samples
-        # zero_padded = torch.cat((bag_samples,
-        #                         torch.zeros(bag_size-bag_samples.shape[0], bag_samples.shape[1], bag_samples.shape[2], bag_samples.shape[3])))
-
-        return bag_samples, name_samples, min(bag_size, len(bag))
-
-    def data_dropout(self, bag, batch_names, drop_rate):
-        bag_size = bag.shape[0]
-        bag_idxs = torch.randperm(bag_size)[:int(bag_size*(1-drop_rate))]
-        bag_samples = bag[bag_idxs]
-        name_samples = [batch_names[i] for i in bag_idxs]
-
-        return bag_samples, name_samples
-
-    def __len__(self):
-        return len(self.files)
-
-    def __getitem__(self, index):
-
-        if self.cache:
-            label = self.labels[index]
-            wsi = self.features[index]
-            label = int(label)
-            wsi_name = self.wsi_names[index]
-            name_batch = self.name_batches[index]
-            patient = self.patients[index]
-            # feats = Variable(Tensor(feats))
-            return wsi, label, (wsi_name, name_batch, patient)
-        else:
-            if self.mode=='train':
-                batch, label, (wsi_name, name_batch, patient) = self.get_data(self.files[index])
-                # label = Variable(Tensor(label))
-
-                # wsi = Variable(Tensor(wsi_batch))
-                out_batch = []
-                seq_img_d = self.train_transforms.to_deterministic()
-                for img in batch: 
-                    img = img.numpy().astype(np.uint8)
-                    # img = seq_img_d.augment_image(img)
-                    img = self.val_transforms(img.copy())
-                    out_batch.append(img)
-                out_batch = torch.stack(out_batch)
-                # ft = ft.view(-1, 512)
-                
-            else:
-                batch, label, (wsi_name, name_batch, patient) = self.get_data(self.files[index])
-                label = Variable(Tensor(label))
-                out_batch = []
-                seq_img_d = self.train_transforms.to_deterministic()
-                for img in batch: 
-                    img = img.numpy().astype(np.uint8)
-                    img = self.val_transforms(img.copy())
-                    out_batch.append(img)
-                out_batch = torch.stack(out_batch)
-
-            return out_batch, label, (wsi_name, name_batch, patient)
-
-if __name__ == '__main__':
-    
-    from pathlib import Path
-    import os
-    import time
-    from fast_tensor_dl import FastTensorDataLoader
-    from custom_resnet50 import resnet50_baseline
-    
-    
-
-    home = Path.cwd().parts[1]
-    train_csv = f'/{home}/ylan/DeepGraft_project/code/debug_train.csv'
-    data_root = f'/{home}/ylan/data/DeepGraft/224_128um_v2'
-    # data_root = f'/{home}/ylan/DeepGraft/dataset/hdf5/256_256um_split/'
-    # label_path = f'/{home}/ylan/DeepGraft_project/code/split_PAS_bin.json'
-    label_path = f'/{home}/ylan/DeepGraft/training_tables/split_debug.json'
-    # label_path = f'/{home}/ylan/DeepGraft/training_tables/dg_limit_20_split_PAS_HE_Jones_norm_rest.json'
-    output_dir = f'/{data_root}/debug/augments'
-    os.makedirs(output_dir, exist_ok=True)
-
-    n_classes = 2
-
-    dataset = JPGBagLoader(data_root, label_path=label_path, mode='train', load_data=False, n_classes=n_classes)
-
-    # print(dataset.get_labels(0))
-    a = int(len(dataset)* 0.8)
-    b = int(len(dataset) - a)
-    train_data, valid_data = random_split(dataset, [a, b])
-    # print(dataset.dataset)
-    # a = int(len(dataset)* 0.8)
-    # b = int(len(dataset) - a)
-    # train_ds, val_ds = torch.utils.data.random_split(dataset, [a, b])
-    # dl = FastTensorDataLoader(dataset, batch_size=1, shuffle=False)
-    dl = DataLoader(train_data, batch_size=1, num_workers=8, sampler=ImbalancedDatasetSampler(train_data), pin_memory=True)
-    # print(len(dl))
-    # dl = DataLoader(dataset, batch_size=1, sampler=ImbalancedDatasetSampler(dataset), num_workers=5)
-    device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
-    scaler = torch.cuda.amp.GradScaler()
-
-    model_ft = resnet50_baseline(pretrained=True)
-    for param in model_ft.parameters():
-        param.requires_grad = False
-    model_ft.to(device)
-    
-    c = 0
-    label_count = [0] *n_classes
-    # print(len(dl))
-    start = time.time()
-    for item in tqdm(dl): 
-
-        # if c >= 10:
-        #     break
-        bag, label, (name, batch_names, patient) = item
-        # print(bag.shape)
-        # print(len(batch_names))
-        print(label)
-        bag = bag.squeeze(0).float().to(device)
-        label = label.to(device)
-        with torch.cuda.amp.autocast():
-            output = model_ft(bag)
-        c += 1
-    end = time.time()
-
-    print('Bag Time: ', end-start)
\ No newline at end of file
diff --git a/code/datasets/utils/__init__.py b/code/datasets/utils/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/code/datasets/utils/__pycache__/__init__.cpython-39.pyc b/code/datasets/utils/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..19391f5fd1eeca742d1e73b5108a9cf52b2c4e60
GIT binary patch
literal 162
zcmYe~<>g{vU|=|UU~4jneheazF(U&5g98HtLoo{j149Z!FoPze-%5rekT8Py<*uKR
zpPQ;*nUk2OU!Gr-U0jfuoT?vEl$clS>*=HGlA2oJUX++tqMw|flB%DQSdv(rT2icE
kT9TPltREkrnU`4-AFo$Xd5gm)H$SB`C)EyQ!Do;|0Cd<Wp8x;=

literal 0
HcmV?d00001

diff --git a/code/datasets/utils/__pycache__/myTransforms.cpython-39.pyc b/code/datasets/utils/__pycache__/myTransforms.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b1a33858df412d4143659f5a98c36a3b322b6180
GIT binary patch
literal 58339
zcmYe~<>g{vU|=}%WqY#03kHVAAPzF-VPIfzU|?V<HehC8NMT4}%wfo7jACR2v6*t1
zqL@+`QkZg>qnN=oOB4&3W{qO4;sLYRqS(S2^2AseQrRp07#Tn?g(-?Hk|B@D0AdD5
z6en0ER}>eR=8ob9(>zfS6L_O|Qy5a1bNF)kqxf?Lq6ENpv*ZZo3PlNl*-SaYQNk$<
zDXcjnxuQ{`U@^8Fv0U*eaWI=bM<Q1;N(#(o%8|&Gj*?+yNM*^AP2tF9D0-60lO-1=
zm&%hRA0?kBkjj&#0Oc!yMA8{jI2SQSDW>vgDW!0w@@FZhaHsNTsig3v@@J{0@TT%-
zsip9x@@J{1@Mp3`X`~3G2(~apX}U9{2&D+OFr)~lYBe)QX}dF|h@^<NFr<ir`8w_l
zDPk$&Eet8*V7{(9LyAO-WD7%zB$%(~&X6LNBHhA}A`Ry2yECN7q{y}~q{xE#2JQ?g
zaw+mH3@P$pzM(rqib9HF3qy(`m~Z6HkfM~L+`^Ee4CWiVGo+}bsJ1YqsHPe>Ge?=Y
zGo+}csJAersDt^Y?hGj!DVi+|DVkuunL9&@R*H5DLy9(7zqva@icX4d3qy)7m~Y|E
zkfN8O-@=fh59V9CGo%=#7`8B^7=rm$?hGkLDaI`fDaK&FwL3$KNs4I;LyBptbu)96
zjXOh%S&DfJLy9?A-qxKV#UjPBg(1Zf%(ruANU=(>Zed8V2J`LR8B%OgY+D#oY{7g7
zcZL+Z6#Ev26nik=(VZd1A;qzUA;l5QcXDS)aY}J+VMuWX^PSxpQe0A8TNqMY!F(5Y
zh7`9H_ZEf}cQD`8ogu{|#j}MW#S_eTb7x5LO7U)CNbv^q-Q5{dd{TT{7*c$}d=Gbq
z6u%Vz7KRjmFyGUiAtfLsu!SKd5X|>VVG3r@40;L5OPY+gI8rjpGK(|w^ZYcKZn1ji
zCZ?y};s#T0nK>n?MVgGaSd;T}@<9y#+{A+T<ox`il+3)ulGI{N##{X9iKWHGnTdJv
zX)xtnX{C9|C7Jnoi8-1~FPRw_7>YO<7#K9=Z?QY)=N9A_r{3ZS$qz}*E6y*v#TAkt
z;OPUl?-r+Deo<~>PG(i=Ew-T4Vi3*coSIjXTI5`mUvP^#ATi|@n@?hHQcB`2?x4iH
zl>A)Bf`XjNTRaesb4Gq<aw<&9zbGZO=oS}L3S@u?gzb@Elv$OZSCW|HmXld<O9&zw
zmReMjnG6*Zgor^Lkpi}zA0iZ-S(OSA;Bd<<O9e}@hot6#86rNJd8vs-Aw`LK#c3eF
zfI{IGw=*dCyfRBlQj2c!L5vB?FG&PTLY?lImX?{9dW+M&D6z6QIWZ^o7Suk30MtzZ
zsYS&FsmUdoWvRD#A%?jYB^GDqrQhQ8aCL!NBji|Gk`IxETLm)_6k$#|rA06+Aa*;4
z8NtoVNh~hOOs<j+(eng_enEZ?*s;F(DXDsXu8u*j!68*L*d(1ieLO)5uxoYlbPjd$
zbjB{}<LKuc<sXb)(!<f$*VE6vN*t@D{t?NHpwtY-AT}!l1A{ZDatLE!U?^c&z*xhu
zkg-Uhh9Qe7n4yHZhM|Ndi?x}l$f1T|0oy``6vk{85Ic)Kg(-!hl}Qp*0WhR6w=zpI
z1T&Oyq_8aDEa9qQXl7i<*v#0(n8Faupvmg@l8J$V;pM;o|Np=I|NsC0DxuK4)Cy4K
zq^2m8R2HNvRM%>9-(qyT#T%bl9AA)`6Q7w2%3myyw4GU;nO9trn3tS-i?t-bC^_R6
z8#Grir)1{dVlB={EJ(e@8B$r0>RMEkUv!HNlBjO6fDB*Bc#AnRH+>~TktG8I!!K|B
zjQreG{mPugJpJ<gqU_>=#N<?ca8mU3^wD)mO)YRQN=z%!PtH$C)lW$*Ni0q+Db_D7
z$;>I%&#eS21Q}nfS5SG2KR!LR1mf6ukXyk)39gPAxft0PB^V``imVwJ7?Pnu4yQl?
z0<M%b7#J8*8KM|d7^0X`7^9d|m{OQq7@}BGSW;M97@}BH*izVA7^2uxID#28Ig3DM
zC(1E0FhEO)Vuj+=vecr)9EB2CW-3-F$xlx$$w)2I<5EyiP;e|tFSY`+Ks+R68abK8
zB?|dz3JD2tUnL|c<R@jNCYKazS}DNfk@S@0D<nh8CWzs#6^Xe8IjIQql@;vl?2sIz
z2X&}MG~5XgC8&<kLzG_{h6b8C_!YxSMGXvvv6^6=$)NCs1vZEc!tltcVXR?@X8@(m
zm5hEX8E<hRJh+nS7E5tzPFj&2s5k~C7;vg7vSMIhxWy44pP83g5+5H9$}u1#85pYs
zpdQqNNut;dD%C+4Y_|jh149i%7J~~zEKe;{4O0zcGh;1dAzKY&Fhh|I$U<8N28NZ)
zMWB>XWC600u>@j2*d;JWBq!$N#K%X09Ki<iBqJ9i7m7n53Q#gT$Pw_&4l)SjkSxX&
zMsbE(Mo@NlVThHgWvXFJVKN7aFs3jsV6I_W$Oz&U3e_;Cuz*+~r><o6`^Bhni;?RW
zJ17#X4QhWeacQn(xy2bDpPZ9eTpS+{3aY%s+|>AZNI;q*{B?^LlzS3O;)_d)GV{`l
zKp}(0k432kMe*_RAU}c{undemj9iQyi~@`tC_aTK^IOSY<O2#WuK4)e{FKt1)cE*Y
zJn`{`rHMHZXWU|sk59=@j*l;L1ckE;i0}sy;DALCppXG)GLUh_91IK$9E>bn9LyZ7
zU{T!pSb<<Z4rb6~FXCokU`U(;&cAtOsYN9Ui3$k`0iHez;F=wj`-&7267ouO3o7;U
zQWA@b5-SrD6iV_HO2EwkNTzen2kR|XNK}ApRmd+=K+&e4;h|8W5Ux<6;H;^TnWs>a
zk*ZLXn3tXk&LGhSItoUnrm-NS6BWRPs-9a;eqsrvU<H*5;8ImV!&#w10c;YgDGJdB
zdImZQhI$6EU;{JLz&0S<la!j1pO+3Qj1=<oQb7iST#%cel3J{w;iIGA<EWz$prhca
zqu{2a5ajNpqY&ww<Xi-1JL)Jn`$l@}C>TPrbAFKm)O=JgW+WCXq=3qJ1zUx@0=?4A
zyb=qrYdj(T%?Fi&3dxDZsl__b5G+<mEJ{@<N-ZfZ%1cdAD9<d($S*BX0JSAD^U@*V
zi;{yu*&LL4K<Ts?RQ!T#NydeY!3>&=Rdc}BfwBQ=F$)qY$V^tyKm-VS1Zi49A{`V3
zNvR6SkVpm9h^Vm*GAan{UZi3iBnXKrD+OqBNKH`y*P0MNfD~vl73qL-1!qZqJS6?B
zWCE8hx0nkulhHC=5hza-g7O>4#|-RMg0NCnFFqbifdXnhfK-CR1(Zb*1xhf(N+!Q5
zCJoJ%Ohw+HOa{tKD;dF7quUC~gjH}`8BuJ7W^PS3aON%w1Q`_sB7#8#$h}1%?MOy~
zGcq_2gECqM$Xrkf$-&6N!NCd6d>A<x6t|%Awpa^22Xm${r?9jzMzN%@rm%tQV)hgc
za9s@Q7;qJVJe`=$462L4`B5Pen#MqRF)>d8ntws@3r_`*$brTfC<8;Xo)#o7LAeBy
z?cli%Y!)PE!wt+&Qvj<1r#*1q1m$v&J;((@c}8YVszO0gYH?~&S!P~3IDeKU=9EIq
z0f<vjlPxHFDrkTTBt39}qyQ2~h}X1I0C#N^z>Ot^#Jm)Rg3OB49EFtBf|3l7t26To
zN=p<#ttADG`~pymJuycUVgo28g0Ls3p#e58K_RnPAtAvpKQA>QK|v!WH7&6;r$iIv
zz@k)e{w&VVO;t!NE-uXln^mlko0yWSkeCF@s-R*7VI8970kT6uSHTlf5J6Oc3J4R0
z<c!3;ywn^}8IX~xkdT0I1lS5lmP-Q31i3poCM2M`8=INNq?t*KkwzpK=|hww4Phqg
zfI=3<-zljjsYSV&pb{{t5-bajI&e{{k*Nm?ip;!{gajRhgoHFuwUCg2Xp(^gT0tQp
zp*SPIs3ai)8qa!q3h@D+K1d12N+F}9q@dVJU%wzTCnvvLuP8M!r6eOYB|o`XFEd|1
zHBUbW)IBcI&q&NmNy^X9)=$pQOHM5)Db~v<$<0xQ@^nEFRt!$<ei&slybJ=B&4`vA
zxILxGc#8$3R})g~6@i*eNTmh1pa8W8dO!sQ$RGyBDq%!N8+(%uq>csFZQ_G<o4{?T
zg`k3gv1$$2si0s&&0Qc7aHRmt?Pygtz8V3o1OXX^wNikqgVio5^$J`Tk;FBbA(aGA
zNj{?I1qrE8P%#TGQo#f`xIm?DKRmeD(Si$0J%JKjM$i^8a>Fx)p_H*m4b<>lzzk}L
zLYka4j48~p2B#*AUy&OF149)DC^Bt94NgW)O%_Dm0&y<5kqM1yKP*lM)fF>9P6wqF
zNP`mGoJ5UrNTs96=vNegY6c<<Km~st$RKF@ke7>@gA-gGkWjmD;jCRagBdiri+DjT
z#Uv&M2KZnPqJ9C_^We6NLT+ke9=JdTb&yjMi&7L)Qp+;I!w`@-cF!zJ%>yg5Qb<V9
z@HN!Y)6>)O&C^Usfa*&DO9X=?bb`U+Y5Cw*XhH(41OXSFnZ<~H9XN%TXXfN6<blR-
zKx0t~sfo!MFb$wO0Z~vVB%oAt2?+|BdZ`fSB_x1L>S$;g9jjogpn)c$3wAwRg{FeO
zLUBn7oS%>Yc8s2$LSBAJsuiL_2+2Tpu|i^UNwET`0?AKPD9A}nPSpXsLPsI9L?I<V
zwHTybA-A*yR3d?kM6ffURXW76sM#4D^cuyfg{7%^$*G!F3c*k&$nzk{Vg*n@A_oh^
z7*OUdE=f_qr57_Q@tKsF2lkCdQht7p4!rKQQgF)8&jGawauc&t6-q#1kzbHn1Wp+W
znR&WkO%V6EC4<T{P)`zsL2OW_1vl)(Kp7U?U9MrOVTfm}VXk3_X99JdAq6b+Ef$bZ
zZ!s5_q}*bMIz^KO($tFsWgt)&t|$S-0%ssF0WL{E1@Br=E&{dC7#P_Y*%+&Y;E5e~
z*$Yw*cN?TJ4k|Wk7_vac2g^bhaKn5tI8lH?8a2{DB9OMQf(E1**0fT9Fd)rmP~?Gz
zauhV2brd{w6v8z@ftZwvNa!h$Py?BSwFrioW~Bi4OA3kw2;G{jkfN6po`;H1gA7`4
z-C~2d7`^HOmBpJtAqa{t26oiY!%`H3n(Xi~0N7vzs3=~*1P?tXanO(*lV6pv1}I`d
zg{=-KyV+J7)@o|9B9*zI;uguhkyxS_RO0T2I}j~;AqB0UCZikfk``15E&v$-EophU
zSUEVsMI-K#Ru8QmpURTOn#KqoS7VG~gO01QL&w!PpyO(wdOYzvBLf3uC>tsN!>deC
zG5{56>EHqtltdsgiJHDZ;)sk5YQ1OXm1tTixTF?m7Nw?u3LsER8Kg`P)K&#CK+WMq
z1z16%kdv7W>7;{F7zk@*=qQwH>LBY?0GGW<sS3G?CCM3(7Az=_qneqgkeOG4+Hfn*
zP0YzjEmBBLNr!jJVbw`ajsm8MpuR^EY%CGv8W7IZ1C^d>3K^-H=@}&ob_(U0DJ2=;
zc4i(l8^Ih{lnUxwgIx&Ek{}~Az&_W48KDm|0wfIS^@CJpB9E(sYK9UW1*9?#7OE)9
z^b}kmJqm?PNTU&C5D0@uS|IIHJ=h35wDw2_bpt`A9|(ikAPg>LOu#L?8paaFW~O45
z5{4Sa5~gORB83uW(C}RfV>VN<iWfr@Be?0w0xl(4LFJ<+lV6c0XgY_jNFUS*XU@zk
zxy9jGl3J9Ql#_akIVUx*2$WQB@uE0h6FT|_k}68WlBYn$=4nut0F{Fbj4GgFlOK|5
zahI9!TrULn%>uA%v%tB2Av3tVY=B1&YI+8RTV`%L(m)1UD-{yEkgNbwjI~IFXCf<w
zAXs_;r#gf_O%_OD$W{cN(E^7JH~^qy7U1zvY$f0YP+&kyKtw=bDgHoe-~j;|uPXtK
z#xhERhC3Oun6j8t7#FbAFl4dTFfN1_gR<as3MmFrA_`I<vfYw|w3?CAX?#gOXmk_o
zdh`Ml6tA#iogZOfDp&)!as(6L8WL1|-URs$R3$Tj#&{9_gO#3~SjR>{srMEKER7R0
z<_RhQ_k-+$mbBb_ppq6Oic!*n!Vx@TLgL&7Kd1@+95ew~3@%?G1t~mfC?uAEg_FTE
znUI8mnifGt1CeDbs1^qm*2zWr1!!d=JfVY9Yi3@F0%&q6H8BOG1*HsDfR($63dMz`
zpkYIhX$s&3p`!qDj3#;s0~)4)xE!OO3W^eNfdU$50v98USx5y5q&P_fHL_Up*dT=o
zs2G7XvsfWz2zy>>Zc=Jd@hvt#Frx?*H{hHC%Os%Wg`P=3CB`#Q)Iv%O9!3$SDq+NY
z3+_?_l<dGx1|<VTOAFG={Y7?(0Sd)}R6?yNc)GAs0413MEW?}7o+~#b#l(Zm1qVhE
zXy6Y$6hMuoH=s}eC0_=1v=G2jT!30j@DLCIwSyV57!a+cg^Zw<5@?{43!EZAMTF)`
z<W4ELfdq~aTy3MTaQ85xxd)oPakq>>ndu%#J2bztbFgu67J)=TX#!M>!7(W1f%7XU
z<)O?qa)Vn#pb<{UEF)_QM+zr+gp(_nL6fIQ0MrNF&C0+KkeC9_o9J0KKTjbsCr6<;
z6Evz18cRn~rBslZqND&GaD-$D)XZFvn34h-mDB)b!TchHlG1`4P$wz?DxRMQ=};!+
zgVra2^Kha9X!<@U71XK$wV4Wv^2;()KuwDbw3<G%SfR8SB$2OBkeC8?BgC9yut8wE
zK>0l<H7~s+L%|5cG>`zO{D8U=rOe3BQ^-k8E730kw|zi^p(XhR`bqgECHc7uMX9h=
z2RW5syAmNjL$b{TWH`+2C?<nia3Dz}o55X35FccvjzUR(fsO*gVyLB1zoD3g;t;q@
z8mP;T5#->pT(A^4y1;fO=Y!|mOB6uj$W1B*12liZ4F?sd&=55ScdL=20c46okdA`8
zj)D_xkf0bfv{1|ecithc$t+gL&&#QV#Ata&Dr9a48Z)5zzEn_{!Q2Zs1x0~IaY+%V
zqX$}S0P6ok5}BSta7KP<PKrWODrkfo;$a<d+gL}TC^ao7HMs=TgfFhl%}p&S%1nkN
zWRO2W7}3lG&E9}|?IoxRbm8JwpmbOa8L~-)i$Zb-sM(xZ3}J(O0!l!cX_?U3DRB71
zJq5B>7wkY}6T#^tvA6_k2DBo@XyT)p0`(xWK~SO0{5(*41-H<^ZUzmT6r~oVCYFG*
zESh!skO%=;hhhWBt#0{63aOAa0y>}|0B8AVLmdSp9R*_@1(R5?@d`!?sX3{+sd=Cg
znmmQ1{E`f4nUa|Yj|b4$ABOqhUPe)BacK^y9#@Dq2Ahm9A8d;eWQr2x9uU@5fX62x
zzYmn3;SK~(z!3Dck&c2P7(qi-N5Mo#!5HG}B2YbcivyNDFot?SjRJ5bUjgnKfI2!^
zOeM@|OwCNiBA|{<2}2E231b#h2}=oc8gnyKu@*?QhB<|?jG@>J#0Ix!K&yn3O4z`i
zBBnCNqNEab&<N)O4u}}2x&*BV0=H~9!QCV-@L&=*sFkD1>Zi$4R1E4Tv4GOiEp}*1
zyu}F*3QZ<(f9fTuPOh?qRTH_T#U%<!sS1hUzAh-UCW6vdevyI+EKNZQU>)%IUZO&k
zKy@vuj3y7bbybuD>TTtM+`tO1AV7m!MYlL$qk2WDAStde&<q-QnNbm_g1^NMEoW}A
zfQq<VJWwvUT!IaR)qqT{1reZOWAv5@sC_2D$iPqxYGg1liZF^X@-T`qvM}=eXJZBr
zi}3^`rr>U-!DA3KSchn)!H0Ap17daXRD@a&gK`Ak4j#xuq_oyRF@vSC1|92SE=Wu%
zLJJRE%{3WNpg@~z&>+CkPJ;)53`RSxh9L{wsawblYOiTB`BiCZfV`BMmkw&O>43{u
zP-hNaK7pgow%VvxQ<Dw7`-bLnTn#%_kjp`i0`=N>{#QX=4{g<HGJ!KCIKAU;;ei@o
zKapB^Vq82NoZu!IMhg!VM&K46i9Iqw1_p+zI`G<1g+x&OTBMtj3R+;9qL2e#KB|xi
zo}-2>ErZl<sObl+P(cH>mQ~YA0kZN`A0`4CH%m$dnVte}Sil!sK}^HwD1q`fxIhAV
z22`{(GZxEvF_bWtFo6n}EaqnLswH?~!U8TpnEa~5p-b05b!c9Gi2^9CfW}%hS#NQ`
z*tb|faZqFi%8Q^?3Ab3m?uYisi<&^<;0`#L0QbT{+0y_NpCE@ZFv>Aj@k4xwJ4b?A
z++Z(&W}0dk7BJL+*NZ^<a*PmLa3y*ZkX4`vWk9wF<o@`0loef|avh`&Y!PTm3V8uW
z5on28Q3S|4xYDROR*OKv6CaOf@(Yw^v_M*+9S>Fx77pejkSMj1<t+iof?W8b#Nw)E
zr^Mn^1@PiBa2iU)T$`8(T66$yVHJVbJm;krL&pD56D&Oa6)QkjKq5LH7%Ltjrh<!3
z+zAy_=ztO`D2amWO(9UN2(IavL5UL*?=0Y2<`xUcjv`QH3t1Xx4~ipjY6BDSB<BbU
zV@Q${Vyu!x4-!P;1Em94Uk{Y<Kzz{15p=OqGXrE9F2oUxw}k!jOFThqHbKp;)D&<{
z23>M^izTrrz4#Veb~%Vfuckm9XjhP<K#7h4yOXdaQ;?J3qi!JIA(APn3lEE2L1E<u
zB78vvIKaRJ$XxJRAP<lOL74$EzYAS3h%NL8CiG?o1_o=8aVQBLB#M#H!O;XNszLMH
zC_QY>c7`-YP&=rF6LTF=I|B<t6nij(CRY(?T()X8q$Gx<-keGW0;xPv0n!~WD9TSt
zOv=p3EU5%1RO%#mP`jW&K?A({L(@tD-IC{!B=^7`>&Q7M(u<|Q4ayS68ir=31x%nK
zkdXo0@I%fNw^)ly3sQ@~0RSGSyv0}m8v_Tg!R-bu9c6^n!-%*B70MBy$N>jEqX1(S
zbkhPj2oaeBoK_)DWdJz}guw+SXe<Pj5=$5tFfL@sW+)N@Eo}u&B%-a(l>muDG7obR
z$T7FrAQ1$~zD`&YA}FWCfE))Z{uvng7+D}IbkUqhD6@bPI>?EjF(>dUUB(npI5K20
zLzhS}WU;_k?J`3aOQf(YV1lpORjOf3VTFhXGib6w7g2D66G?$>b#0M5$nhQ^!V@%8
z!3qgwP`wCl+k(9d@g}bP2C9*YLBoTf{3d`{qKoELNM_SyEb0W=!+~|o8Wg4AMTp>x
ze2W*lm^e8VwAdM`Faj;s0k6jfr<p)dI74$N8y^b?J6IGWhl0`pxP=6o_C(2{@b&hf
zoeD*uCU}(@I1%B=ewld+iJ--_Dfzhy;NdB7kfKx&pmYhs;KT-6%3T6!!7zZ@FU_EZ
z-%KSe3s^yM1<DmJ46#DBp!sX&8fHlbP&=oVu}}e#^w^48K)nxE@CsgVPXrP#y&zHc
z;*8R?w478}cLQYIEmp9@ZZT#e1s%AU1f|XbP^dvtrw}yKU>OL!9SFYU({Cj!*2n@G
zf@ClvV}b%O7i2of^BjyUOyD3TJ3fR#EmRvwd_V{P;Pp4SZiYrfPNhOYW^#6Fib7gZ
zel9fXFk=Oj{XrNUD<G$WyGsmNObZ!d3-X~$@im!?z+ual4BjjS32j`Ny&C3WP%fwv
zh1AuMT~>&YESe0;XyA}S5a1XAd9ng#1USq<A{b!?3Q-48Edy%3qNHIy@N70?6iX^}
z1h*M9)6JE_-NF#Xp2CyD+rkjV0p8BWUnB||(-mZAU;vG(BMsNW$J;^kF!0o!lb;L@
zW=LBdwPpd$fDD!C7;tR{8T13sB7+7BV3R#4Gs^zp1*eHQ@WCq7acFRo0vQIv`Ou}N
zpuI{cBcz~}t>D!~3VGlGXwaw^bkG!LDQJ8RGFFOWB+A$@`WPx`C>wnkSO+v(1{!!K
zI0g(_ZGg1OAU{n3Vn6EWH=3O&j)U0`8TkeeM#F}^p|*oYO+mw#pj~%4m5@XO3aNs`
zl=#fF_`K9q&~QC?sZwfU9%v@H1hSF=WHe}45i;bMnWg|+r%;lS2p(QUaY#xkWHALe
z1(xJ1B$nl8rYIC8f;Nh!gNA}qD?qDAGxPKG6oNDJl2a8xb3+B7vCGV2g_Qg})b$mK
zX(g#eum}f@#-_qJ3dO0Zx!^qvNvU9cD48QaEv+~eX~-8e0A!^QFziRutrS8i9Y;r6
zq5&TbM<f?el2M?|;JJb>q0w`s;cQSQg{MlC6sVvJ8<(~Mr4?Ae6EbChGJLK9nieB=
z{2a0H1sntl3LwKlrH8@*kC`iwFleqp%&0lFd<q{PrhKH_&kZpwmkb`fgLWK1Y<Q~y
z)CPcb_6bejgZnzH;6ZM-8kQP{c=j6B8isfd#E<|dct`*-@Qj$x2M>|n;zcPpi{K;3
znrx5-mf*={@Q~zGP}>47kJK^)H!wg=YtW`McuQS`2{f(@!d1f1)(U8$1lBE#pvE=4
z10MtK(}6bl#xT_~)iT#G#xT{g)Uwtv)v#tW6&chp*RX)-6vh<LT2ZDnmSBbw#sy4Z
z9*YFSLZ(_aD4(f@70hR^VXI-UVX0vSsnujk1daN-r<N!bBo-y+g7zsu7QZGWfXeWM
z1nBq=qz?uz*Ae9gYWoC~h47ASgA7Djhyc=&Us_U7S_0n9tDpg%2hg-qaE0wwhRA{!
zf4~X{kSPc!U|s$MHU?Bs6eZ>sD`;fuC}im<fEV>a2S*DMi=oriDBi^CNpK?7WCrgm
zhO|-HLFz!u3^~Ayq;7FTTnYBWEynU&j2X9>N<g^0XcDNuz?g-WKyGn?wlRYPym%R?
zAqwu%aq@sST=Fmpfcn?4RxK!<AXKCDc|iGu9kw&#40I$ug|U>eNC6b*paHQg(2^@q
zKZj)@3wZd4wJ52EF^esQNfIQ(mclFv>WnAB)q_P~DkK<Em_SQpU@Lv0CM*HVgZfPj
z5R<YQik3i3NCB;JgP0Dog;|n8f&txx1?*s#u`OhZVF9ISP<k$5T)<JoSi=NL&o!(I
zS%VogS&H%*7#RG(!T%C8$fn5&&ZXdGYen^_84#Qw!P#>zD8yMniDV@-*Mhod;D{)i
z1JX1TM1b}sphXI}FT4X38=#p026iq+F=i=74JHs4V?xWfSjG}SPK4)MP`4L0mVj-w
zuQ+@S3w#;`G$(>(#V^=X;4%l-!17s;n?W6J&~6%((givUz=B-TuEf5A4OC>^;wc85
z{*auTT9T2UQUnS~aG-z(2#`Vx+?@g!bfC^n1IT*N5FiI5i!>J(6C;-(hbXw)gVCb`
z73$y~6=*aFrAI|*cLr#xt!g>qgp8a@g|wW^0<8ThQ0~mn&8dV;bz>Q|gOnbqg%|dr
zH&~eiX(lJ7f?Bj7dqJfbyh&9CnxO<uf*9zTf*UR1Q5Te)3W^;N2KR$O;{c!>0ND}`
z9_m`j2p;=^tttZzdm)84IH*ApbR87bAafWPt7IWDjR+FZp&{4@;y^k;K@YYaGI+j#
zVIe60GiEasNt7@xU<QpA1~Y)hKUOkUb%K2XDx6Wn2qc2HfQJPQTtC**8Q$}?Qh@jY
z8kyLZoUwpQTyWrk^ZhN>43H%_SDf7fg%GG*WMJn5E#SlzN;vW_sLKxyCD2#}YW@XH
zs6#f|vT1+@^RTQv14kRKT>2E`GEhw5i91Ms^@EPv;hw4l<%WwO^FVC{4n`JME@lqy
zB9JH*vz$0+C}=6{_!(#%4VMg8v=~$iECCVV(hN+%^Vxe)*h1q<8j?m~;XtKaR<sl}
zFpHVG%kZY|Pat<8rEctQBR6dqEe8e13J`%S5&s1_0B^t$n}k8B3skGa&U}C*VUQ^9
zBn+CLK&dsDQkcNIO_;$mKA<r;$j)*0RE`wz-tlI}D9#kl6s{JAD6SN4@ES`c(0E)S
zBLhP~Y7yvUjA8|(lZy}=?!cpPu;L#w?*lKl5M!XAzNP|^H4I|$IjFx1+Mxp8hOG-Z
zl|T=A2ob#KB)&o_K%BgkQ;G0FY6@bE45Nwy73C?J#U=SgplRHA@TLOz+*4)=XpEqw
zC_e|Xa6Bb7y(ksDdj+8dyq6I?I$jJJ0yY3OtqrkT38`=))gq{fh4%9pKx|NI0vEZU
z;tSk`V*q#Iz*{Czw!T4X0ni?eTLK7&K->-+ieCjvz2I^fqz+ph%E`pQPz=sPkS%ah
zko=952(VY8AT1yab}MMx7<ed~aUo-|ObJs8;{xUq7SJ>T<Q%3F)&;EK3YIOKp(v_`
zF^e7AePdX_0dC|nWN|KJhIHjh7#48VFoHT{H4ItYpzU;^YFLx0YBfA9kXQ{P`2{UO
zz)d8sVpc#Tt(;Vpqy^b@1c_HrTB?!<H4KVjt2+_rr06NQ=a+!ao8kf2-$lBh2r>Z?
zpw(QEg(0AUe@Ntl8@u3kCnzFsaU<+c1@G>Fb;LnU`dh5vO;ESkAe*9YaTk{)7L^p_
zXXcd@-{MToOMx)a(*S5Dg%6Ylz<n-uE=D0HP)k9CQHZe$Yg&L7r=SKDEZu?*yMTAR
zR2V@gdDSqcFiJBtGuASLcDX>=ETB$N3KM8WUk!^0Lo?$-CL|GNh)5b!Eh|X0h5?~Y
zf+2-jf}sXX;?M)`*5lVx!w5DJY!B2#Mv%D+nbMeQ*+F(<x(%-$juaM%n;_jnkXfL8
z<Df2H3g{>!W)Lf#rIxdX6+Fnmk<L=fRl`}sl?K|V&zjiH$iRSgr~xUS(MKGRq!48r
zBH>|X4RESZu!8JwgO%Xm20TI+D9J*Sje-?yM;yxR0{)ER13Jhn8FW-%W*(?Z5M7dA
zpbMWV&;_p-hfNfq@j$9#VXn$A%1qD9OU!}fCe)3C)UpFUeTTV55WKz<+`lfG2b%SV
zE|UXwWHp%~V`Jc~IStfS;VxPO%C(>!#6^(InUR>277q!<TU=lcIOuP&m*f|Kym^bG
zB)<S07PmN(@=G8r_N4q0kYv$1kZJ2d1i0-5Ccq6aQ1+JtWl&H}&A`RO$ipbaDD<C&
zNe)WN{bykkVg!*Ij6950GMEK8(s&8TGq7w6VuPw2a1oBHg-=8)$PDB<P`$-~qsV|Y
zg1E8H<$_9*TkO#K9=rwv(#u{13Jj3%ard`DLBa^K9opY!7h>fQU}EGFF9OMs)N*2m
zwwyrc-Z4h8r9yY;HZw+XKzF5Zra)#ixl(w788mr|Kw}z-woG_tGE4HIO(V#FU1DB}
zLLzv2N&&nN9+LkY!GnmPECA`KL)5}frLh7@80Z;TK$;T<njk)CP5_=)z>`p5qoJ08
z^D44E@FQ-(h8gRdfXp({H^wvzMOP-|*c&|s@LmYe2ol)j%wmN!P$tc(1Z|%H9kU5C
z4$>q9xdq}Yu%66fg@XKo(wxL1*!Bs~Nn%Bb(Br^7!Dp?2T7P+|CE&9VA$n2EFp!y6
z3h?t9A!Cb>K*~>p?9l-&*#^4}RE~k$ZdRbMfb1{^=|>GTh-Q$73Q|+x`oK|wqz@%5
zpoJkM&`}~09DB&-ptNh@%~~r^698@RFW72Lru1r&b?y(Vw%9}0G<~qwWJ-tR(HHjV
zp<wXUJ{dHY1`%ao0I@;Y2Ap%DW8&Z)!k}eDpc4}EKust{&`4uEQw>uJqa;HO6KISx
zo2A$Wv~*sQp@td4FLEnkUci#VypWNR0X$0p8cS!0X9G7n*+Fv!pyN+KMFqG}{G|kL
z<SHPVHu-4^*_nAM3L3eYc{&QYi4~ff9FTbh@FGJ{6Xq62d14W$W+=YJ0%C#NF1J|0
zF?EX-9PpZ~NK*=XKskReh``lqbq8g7@VF#s`3MsiqXHurcuoP<W`XQTP5~`4K(t>$
zK?Vy#&{j2go23NOW?^UsO(!tdvedBDFpDsN+94^7&5SM#v8QU;N|>^kYZ&_&YFI@W
zYS|%+XFx%?kP*semSkASn8sAgQNpqSqMD<RVIgBJX9;@=NQ|?Fy@peS0n{RHW~}8x
z7L|sHa;GrWaIr8jg9f2z)i7r>7RltXFftU%q%hTRL&Z1Mupq=sm=<uQFx9ZsaDyi^
zc%Xdd8ZI!Ow}z*Nw}!ihD~*MTfsrAYp-`rTyN0=$v4p#ZrI}Hhp_Z?PuY{+DshKgG
zVFF{3LJc#-Wi?z7QBWJMnX#5T&!C2{gcq!iWddW7M-59YH^>Jy+z?Sv+YRKaJPwcv
zAX$i;Qdp!IYI$nF?g6urTm)M2l-Q4}4Ft-c=#3vxUQPk$I7mqW%I~P91xN(c^wxm)
zPC>K$pcN9JQYr~L6OX)~0Hgz)5)soakR%Q|<}9-c+|NT4xY)}MkQv})jKvIc%Mq+y
z5^`s9U<+!%MGnZdFpMokUxG$t5$zywvq+N<GT{R{dkH)Sd5gU?FB3FiaEm23u_WUb
zb54HxE#}mU0&qj=7Hd&{X<o`Lmg2&qk|I!p=@u(^Dd#Qb+|0aN%%Ea|A3R%Bgvjjh
z1{G)_aBAW$_QaBs)ZBuSTihjyMW8lJVo_@1EzX?$^muS!-{OIUEreGDp2GmoW`Jgy
zz>^!dI5P8~vfu_4n1Hvcf<c9d0jNOW=3(Su;$h@tlwlNL5@Qr&GGGKvIteigfLBk7
zK}aSR(54uqHWqRr19Bf^1`B1R8`MSSgw08T`gxG$_%#e!kQ2L@k>-4u7qHa8=X}5i
z0Wf5-Ed(vlWvp5Vk2&(vJ7jzUe6#@xJD(v<$(%}XmIIAPgW4I80}?P#dt`^qkRaj{
zeE1^|R5{Z2XmFtpI#vZ+U$hn!DWFy<1G^NwIf)$5SWkllmF%FP23LrnjZ~msXBL*T
z6+pw*wV*1IX#q<K8+e=yH1N;v!T?$n30~L=l4S+UvZb)JfMhw)WZ6Lx1ydW$pvmD^
z#R;Fls^ZoGALp40+TH_Np9)G~U<_&^gM0?sXI2bqJ=QS93e<u+rHm;I%}ikoj0}ZL
zDU6^|^`dgnT;EE@qQl^h$So#4gIkQnXyJ8>(<VI?bdq(ET^ADr!)K5sRl1NkLk%%V
z16<Drw45%d*iH|w59C;IULxPIMdv{kHRu!!O{SttpxH*oA~c5<U14Bg=w@PID7p$7
zL>JKk?VwIAf}K|mvRhM=vnUf}5IAwbPwj-P0|vDrAX^sTi3_$yI}4--*J*3*ptJ<)
z8i2Mzi7?7B$}#fcNK=py^3%lKMJdV#Sp}MQDnj&3K%*zPyC<MHwt~5VgOP<th=+-h
zOOt~O+;_m*JpsjGu>#r=Lf}RGpoPL5Q7qs^`;hryNl;*Vg9`>f(9vTG;6^vRgA1C1
zOGzy#N=;5I0d;os6w(sQ@{2%iVazlFHU%ZMg0d7SwSpQ-#VX)+7Yi6u7(wmC5+=~m
z?9EJBEH#Xo3^hzk82iBYc?2_n_O}&>WPrC@fh+=5oNxyf>p|>=pUsaLz(94YjzU3B
zDrm<|X>lq-4NN;y(uSR#p{L1Ov<nnHpwpg;x<D*Y;6OT)Z6IzBh(HwoplF-|iW!g(
z85p^kswB|d25&XuIQ|u6LQx;wRIt%tOTg6&$o2^^^Ent<n2SIn7%>hi5x^6`puJ{j
zL;3iQVFzt(NYr7$)Ah{EE6G<#%P%cbNX{?HOD!q}R{)^s0G(C=I<Otoz5>U(o}L0Y
z`5;cNLr+MM<BGvs@j#~}fFc$=fCM+R2y_rsC3qjKLT+X;_)sX=q0f*rLlpAUz?OmA
z9pIDgpqjxod$B?#$f+)n)9#_iBNyu_1gEAdfDUplhpf8EgYD%8xgaCI9Mq&sNlnZF
z&uk-%2OVPoX&<81m7vBB<tv62KxYP)Ko4#L`$s_mmJmS>gD<fK9i)Zh5Eoe65?pH_
z4s!vYGYfG##>w^ykaO$dxk<rR0ebX1(%JS<MG6WEuv;&{HfZW7C_~QR%LK(DXbTv~
zrSLW~WS<(?FA!T46rkHEHNg856TuCDVk_uL-Nhw|$=MpwumhSkG&MkpNfR7NAQmX<
zL3DAfrlx{2EIJhu6-->9=NTd*3t}Ji+<YV#D0}8X5=BXBafw1setrRntB{<Sr;w5l
zo_0;k2Oa8~nU{`~a3Q9F)0d6{q}8ZktB?dbq)b6s0W1zGY7!MpQ=l$^E6XfaFhYz5
zKm#(VSVtib>=hk_WXPH+1zU(YdZ5&#fwYbgVmI_K09){Sz#Pa~l6qyCspT5FhHx{$
z!IB0l-5^c_83oRu5WCPr2V!P?Vp+O^EojiCG^Ye&B(n2#4K?*ZN9Ji5g2Ei6Hoq*j
z2oiB%15w(gpppkv!h;)3pjr&F0;7ZxbiP;?b2C$s8e{?oLV`yU81ke_SV5CI;8Vw}
zycklL;JaWU*BLNF&K#>U4uF*3&{IYfz~^U_l;<m?Wag&kfo`!WhGq<KngMxVPm>LD
zcn;{aGV~>hpsnvkd8magxSR!*!s|ihEU3!@I>Zrlx{wHCl^`tn!;5hY--Akiu<t<=
zACSo_(Dp0P=|bQ&D4NjYeK^xH%TkeAuedhOZU;F76s!!W)<fq@KqDxi${mD3Y`FC}
z2VOykp%)c_JdJDmc^3{Vp^X{b3noDI$1IR$=z<9m4h|0HB9JIX6$MHp;3|sbHA>Q;
zndQTjS5gWEIi-+^SI}YrP%oq`wFs1kV0B7KMrv`Yf<^}Xz98^gDA;m)@IgFDl?t$9
zzQK86#MF79tVMa12M#lE;S8?oKxds4mr(u8GLQwJODRCtJH><IS^>OML(@tjv^Z4(
zCW92Yd2mM{;vVc&N-Iz3MNy-5=V;vtX&Sj<-DV7~KET^#!ABI6UwMMpd9Z;7+l#J)
zCKuW9SB1BDVVMD(O<;Xta5ourU?Y0>8&peP0~Pm>zAzgjq?Tl3tP+5xbllY>XsQA1
zPtX=|L^TOI-34@XC#0%`o}I~2lA4E9Q-Ym>tGc`oau2k+L~;d=+7j*xP?s8YAtLC2
z73}9_q68AY<-}lz;;LjHVRtCBj>UU?FsKIH2ht9$X2rQUIhcz;q8QaIC?vquEQviq
z(DK#9uS^UK@Y5==9@Cx(J3<oPeSj?J%1taO%B%o4JrlvloX3}?CYR)cdX2dSr6thm
zvHY~OoXkAviRq97-BHbnM=}T028EiU1KC`cmIE5d1#hE+b|7H)jU?tlTHM8@NhL*z
z$t5UG0Pm#DEKw-W$V|=vwc1NSXRkq8{i!JmNtNKMz)Imqf~Dk_fD8YEqWqN7WYEUG
zk_>Q10oARbBa2~v2Af!v3cAe%v~B^^J%${04eD8DmO%18WYRA&2Xg5Nq<03|5u6Fx
znu>HQPI*RVNh)Zb3o&Q`4rVKb;L?JE{NhxF2#`NO2e9Ro=H@9t!@4T9C|?)UEl&lV
z&<wf>17ZoNJ`4f*8Fb}YX$j~Q^;F0mC&~F`iA9;98|h%aiFQ$_P;h}BX9hX#J~vk*
zLa#(aQ%4~J>7KFz$TB~L;4l{j@K!VMG%q|AfOdj_PNYvnI&2^82WS*ST!z}OMh#!6
zLZsu(p+O3DqK<+KsOt*5)(ISCAfF<o8*F-GvFXDomEmK+1)%L8j0+e*16DO4nrR_p
zHgl0y3HUUOEGCfHLWVvDaRzV~h6za}BUoiZ31bQ?=r99_%tFQ#HqapwpoQX~F=VhV
zP^r%hF1sOz0I@@MXMiV4t8}mj3u59Fa;Ph0L0FY+bakylg+g_0tU^(0a!F!dI%wJ)
z8oPQ`TF9XTH6kOi43Z>@6LV8xn`x_St6adRCgoKk-+P7B?}AQcV|N|g6`HJ&DySON
z{{SsbyCsTM$1QFom%%Ec+aOJMKm>SL3{1egYu`WxBB&S#9W@3yWK4unfRTlf?LX6h
zHWoI<Dp_pTb7HTdK)opVEH3DX0#Ljn$GHdtScG{YqX=juACwXx@f%da1Uh{bG^Y!S
zQ&37sVFTX)0#$>E<pnG$OeyROL0frPB^YX%O4t^#gJM04aUp068hF|UWX3|qT1N0%
zMUJZ3@MMjeRX_<HnjJ_y{vT4dAWvLkogjoA{tvGXKqJ4P=}$<;1=)`_vsYyabr5K7
z2Q`kN$xi{C(lYZv#ZY2NW>QWnv?#0+f~6N|(p9uosA8(FEz$-xVmKfv2%H!Y$%q9s
z7E%OG9H6PUTTHpRu*IrLpvePnP;CLdF6@>N+&!r&P%-qj4QT9wnHhF^9lI82itaxX
zlfXYVW+6sC(0D(t<b<WE0jh=JO$|_W3~OpY&Oa#uB?0CIEDIT$8B>_V86anyq%ecU
zSs~)$3<w#}t}VYRaSgaHkV{hABGAH(Dt;XW@J$EMD$^F_%(qHV9s&j6Ew++;(5M|G
z;9^0-;9LbJz*!4)<dQHb96)Ui&^Zudj8eF>614S#dxSuSfq~%}D5rvq<6var=iucK
zC<2LMG+jV>5ENF$plKMCL2N<r{2J(dU)B_+RMr%>X2vw86!tV`@QNzPDl71sD)1^R
z$T<<5DSRpX;PvSO!3>&0MY5n#1Qiwr2FUn4Xf84Xbd4amW={eyq5~bYr2|=_4LXDr
zl#YuNOG?4>THu33L5DLU@&Ia%L>K`*YzTBT19+MpG(rlVvqoOpVx{1bU#^f_nw$Y@
zB4mLt3syjwfyiZ`oQ4pOPfG+ht1^ofk~8v)Q}Yy{v*)0FB%l*RqH_}~Gz@eU3>9>d
zjMD@OXd?+g&eQ~%4Z@&fi!)N8$1p|1*5AY;9V@Glm!GGQmztgkIuQYSl|nJXk)So3
zMW8T)taT^D>o5}#W`Z0B6D7&pFilWT!?;xTGE&MQ+tUa$P`!;1C&}XoZBVZx1gPx!
zjM7x{Q+Y-yv<Co+VGzzJO(pD4-Hg&y9fge2)L3xp!j@d&btTA9=!wYSD#yUa7Nk?b
z##X^V&lIFz7ew3GD&%J7fkbl?D?pTio+%_Ez*QT@N;!A~0aRB&)?cJBf~xa)Mlg>l
zg$c}K0`r(tn87^IE@6fm@I}a=<84wndO^GAAq_oFzgt|0%nd%>wgi-qZgC-^?iO=K
zX)4%3@a23D_Jj9)xM?!qVuQ5zZ?PtU*U1Pa=VTVdr)3rum&E7i#e;eTnyj~Y;*&E{
zle6Q&^A<&qK&Ad;5b*>=JO$OU&{cC-q|ob0&=`OgsD6Nq0f1^oK1Lo!rvFtUNcWz@
zTUp>X>@DWZytELEvk2fdB&ZfgI*Y&ywCF#Zp-8EOIfZEf3+QYqa5I^?hEW2viG@*|
zA)Tp~v4*v13uqnzv??)$v6%^^%Z4E@3v3cw2|MTr!=hy+9N?J-=4_^-Wi>1ywIFqp
z4B$m@j75_`Hv%K^SW;Ld8L}CRx8yM~GL$f<u!8KbVOa>Cp~&Ma;Y?uziGUX0f$mCy
zn#Te)fvM<14alY&pv881EGeKXIY1YzLL2l|MxJR3)wPhm8}t|`$h0Q-xEk0Qz{q8-
z9(dIhI25bos%s%9V;3VWn@mbADNjw!Q>d=3vaYU0l7OEw53vE1>LCa5=|gHI@E$AB
z(K#6<3PyUGT#&VvpfxW=pu>ZU;7zTf<Dg^$UW5s18ADF(0xdy;?i#tp3f=~Fiv@H=
zaS`Z-h9b}k)S?$4ZQzClm;kpfUNSH+7=ls{XpR8Xy5L~s12@`57^N5uKzl}56c||;
zt0Yj;5>o0#IrkaVO9Nrh+(I!Z?WQx-f_74Xw(6UK$CwyED=R^JD<l~fGIcW4uq<FI
zVFoXQ0IkpF0NwA-$N)+ZrHn-xU>%HL9gLt2{0o^n8KF8D(->3OQdnDHI+#*GcFY3n
z056EGVS?&lhU#EWV@zR3(!mVYaR#gdykxe98LEQ`s)H$wF@*!6ql94rOAQNnnFK3n
zjb$xs4QROrXVn|fRu9-hJjl9f*ul1-wgl)9>4JhB(7EgR&`XRUg&L?-bSz3Q1s#|P
zxjq=&;RE&1GZITctrXa~83^^*`v!39AVVsU_2!_19#gVPi%W`;+8L<LDU<>lQNn?y
zd13y6oI{6j08&pFWmO4m>I-zjSTU$50tYNZ2Loc>4SH*^U?SL6@rZJ^$Q)GUG2RkH
z2tn%~NMQ#`skb<6AT!o>0nD)VMjFV860uGXX*m>B3z;tEhq@HjVu87nA0~j|%A)rS
z3=APK2a;(mEPxOZ0<%~UA%xxP2nwy{g8CR#!$Bj33(*=t#0J=(F)+(C*}yADi$M4M
z++s^CC`ipq0iWDkGyzoPfTnOY*|1e};M4DJal*S%U<))^ir#^=eFPDpn@oy83*aCp
zGlF)FxPXc)&`K%>4i+X3(6Q!NNG?VWrYbS)=>uiP7*te&iY)L@ILK6RTY&+xiW4*#
z4jzVPsb#7GpAsDmx}3Wno^MdQ9H0!1xked$gE4qn4I&p|&BpLm8=$MVbMlK6AiWUG
zj=m;q5hx(Ag(z&K2DEXo=sPF`!DT&|0A(Za4jj<v8hAf413M2R$A1<k^rixi9yF*!
z2o5LE4W_6)XtWbV8B>_Sr-8757gI8V>U1!l70w3@!GTu6GD7;=Nau+1AxZ(z0>Uaj
zM41KV3+N~yvH@6x3)~I`F*P;W(a#zMRYOInt*atj{p>tY(gGD;pqdc0P#A>K0|(m0
z)?@*<0nrE?>kL7Sw4$S+=m1r?M6WXh^^v7OZUQZ30Cm>6*d>IS7`fy)xWWBr++B8z
zqa1m`myJV?a)fOeXM=7TXHVfy;Q=4z2)eV4uSgEmHGc@|n&*RVEd}?;!3h{NHlLW6
zo&)VKpr(Gv?&)IGNohzwJHH6DFjNzK4hCfN4oq=LzCv*-_&OBOtN<c|fU=TjngUE2
zcy$xB-GuvEEm%ic6LQube6>H+6nL%$84kYSBPmruLl-Kmqo56CLGPUgsVYh>P6VB4
zt)Nj2zk*uN&($%=H8?~E%g!HEG3QVxPv=_XV<1tS;Ft%$auBrm2V@awvLY=rrzEuq
zvPLce!YNLO2d}5e%`XC7QUqF^4<2#C2(SEH(7;8C4ybXNiFAsj0_ehW&_yQ-N`^`x
zNu>ZKa1%GPSfMzz1k_By@E^kP)QSSo*b8{l9@#ev@Ix)ZDvCinzS43M(-AsBVF<c7
zy$IA-1S?8`toVTL_A1F&0AC}US)z~wS}>@PnwMXi4jzKZ$j^b?4V9W#l3A3hPy|kk
z7^~BvHl!uy6sLkb1`h$SxuA(3s8x^z3=#mhvcT&nz)dYsh=Q)p1S>#wI%xT%LJ6ob
z2GR#{zXHgIkUQj4iy^K8`8yH3F9MuZAU#0v+*@8gWX325ExMpeG>mk?ZGY^63RRS!
z2Db&(b^hR8wV=)m$aSR!1*t{g3#6bcLs1nY-6;f8jW8R;L-RW5yh#NOV~E>8feJbU
z%t|51-ASPU+aZ*o?Hiy=L_ybefkOa)T!HQm1x-c4ZgB!#W(>OR3wl*Ew%eOPcfdol
z0>ziVL(KtaMK?%u7NaK&Y88NMI?x5f;QlM*QavqD*Cd;vNCwms2KRv(xfyC8`|()d
ztLT`snTza7m{V9FDry+wA@_Ge_uheb<8gq`?BfJ24bWtTxZ;;FdWna-OIna$oCzBG
zgLFr$^e}WnuZ#v~a#%kWax^Q>&gCt3XaNZBQQqQ!6@ItaASoZxl>{9I2<>PVfvyoL
z`UR@ieuD^bR}&-;ZpT3gaH9iM{q=zAG0@o;pt_ZVk%N(s33Lt|8+dmg8)KC?WE?mM
zl+8g0tit*}D2-^8aeUBR6Qq{`?)8JamJ31S5TG7DbJc86w+y^<2l=o+NYX^#e+Cu>
z*$Pe-pfUw(3k|jc7=GklNj_+8G-z%Waw%Lw0@$QfsQHkN7?$h{X*Ga0qiZrD+RmCx
z;1IpV3a(=z;fH9s&1GO<m<<X<P&15yO@s+{c>_Gi5M3aQCN5~SADo~;%Mf5qT+rdv
zkVCP+vtHoqY-$*?*lHNE*cY+|gPOWzU&aUt2U4~qfg&DDTbH8<<Z*DofKS%A#Rf4N
zTe?GO{r&+ZNpSFi32;IL4HPbgham?K6P9F0sPP*Gnt{f+vJo^x2yFx-8oiK4Fs#uF
zIz*76l(EPU-W+B_G>2i$;FWBCRXiHdj0kGdg4(~ZoCxOg=zum&C+4NVc#yIj!~@Sf
zt;A>oW6P$vn!p=Dp~DAW%nF*l1s%OFz{r6udqUd5nk;V6LbPZhYJej`9n_vI2bl|M
zT5~Y6h;Z>UF>-No@PJ#I7>!uaFeG^1iOfbU=nM|NB6(0Fb`Lvjen}xQ4Riq!q&kK-
zpg@Bx@Cr~NJ2e%&xD*ng3YmG3#Tby)C&RX}s}Kxn02+W=fhnnp$t9U(i6zhmS7v@5
z(zFt|qn-njgq&cC(ry8_WIzF$n^=*VTbiqom{gpf16tma23i6HY9xVbdbAatpt2T(
zK@CjUx-Lj0LE22kdJ0H){lR5YZ8Z{g6p|n<{X7MPp$f$rnQ4$10~rg#nZ=+(15&_8
zB|@6>DWDN9P`pFB2)dcM>G9w*JruMQ5*2I|QYsW|6i@|G+y}M;{f=HpVV_f}fZGnp
zAtDM|3P~VKDxsDjMD-NHLHB5aE}23EHDZY~qD6<iuNfs0Koa0i1aw*wbbWMLVvdeN
zs$RMRN|1q6f-bm(*La|y!tevAR|aX)f#+ruL9;Rtoi?@#Nl^EK@{IywHC1wAPAaIU
z2wj+s<m!yn#3J1NLlln$f$hsjbO8zU6pO)TBlQz?6cQC6f}l1)PEKkLD1Cz>y+R?e
zBC{AZ325ko)#@l{gK4DQ9|{VP#ctqX+#Jv_uAYJ`sIvh&Odeq=H1!w3FOV{VO$`v`
z4D?t?PgT$WyCm8mRtMsbXv0`dB&UE!l!<eS3Dha52k#^*ROo^O0&EW`II!9XO8Ax7
z6&k^vW{kyYC|-i6>%@WrP!<6P93o}I)xv7Jk=@}2b-<GIK|_F$au(4CgZH>WN)(`K
zLcp8%K!q!)i{=Sh5uxCi2P))Bz{3XVMTwQ*Obgk`1<C`UE*m`1K{-Fav;^LL1D8FZ
zIuev9K&p|u(0UpHnV|hycDAN^26_gNoI<Pq8T3HkWYA16s7(UGoD2-$1NFeo**%a0
zJ{Un4i8eD8;qRYmfO~SRC2ZiCNS6|J*i2-xj~4@KZ!HDXP+I~zR;&c3zNCa5bWBDH
zb2d|PjTb`-E4X*Z<iZfERLh>iR>KaR!JLu8p3PX?0G+So0I4c$05!1VIck`}=DsUo
zs9}bid!~dPbZ`gQ+$&xTDV!i3HOwvyu_Cn`H5@67*^EUuN|;l)K;nfrz~{Gc*06v~
zn!s4Jtc0P41>*Y&j72)28CCFsA95wkDcm3(H7wi=H7wwLUR-$<VAW6`O<*jFC}9Up
zWr6ITz*w9B<?&=Qfp}gFDZC&PprXtm(V{kx4U(Ywb3RFi6t)z$bkGU(TzPU(RbahE
zYf|`A1X>tsSU?->YZ&6W@*pSGbAfxk+%;@94DmeRzONvp?_2Z()OgecZG5(cHhxj2
zPC;v=K)C=?%!10K%n}9Eel7UIZM`acaO%wgHBzAKDM2MSv{;7T$Oj&q!C?SssuYyW
z7>ag)TmxC{32BQ!i%}$p!6thRz$O}2S%b3~k^;yKFL5rHhiC>*-XhN^!}|JFhMsAV
zssJ?RgEpOv($R<M&;uVgP~`wA;mHXOkjqW<z|-9r9dXbgNigW%AkcX|=?patu^gaY
z&_t#}mS6^?-s?qB)Pj2hU;@+;yTxS#ic!$QLpvrG*nFNQWC*|!+){_HSU{f6%PcMd
z>(S)JIy(;T=%7xFLuZ#j=>dEq$t_lJV1Rp%w>UwiD7X;S<b)hk1U7+*k%0lMxQH3V
z0uOMofLNd+r|2@KcfkA6Z-aU`VW6bL2pZS`4RFXXGW};^Vq@Z9gs?am#TdC5k@*~8
zJ{KeCXhx6@0Y<L>ER0<L*_fFAv9RzlN-**<3Ne8ONnqDMp@k00q8OC(8X3Sv;{wJS
z29)6uR?ta?rHn;sprI^qMrM>`5MiigtAWh4vw-H?!6zXxEno%@uds15)UYjNTF3|*
zD*%hIb2HSigG4~5BIU89u+(yZ=G#jdi#ovaNC#6bWUA%NV@YAH<pj;ImogT8C}9B?
zcWl{AMZ3VNnZfP@&%rNbN?}iDs^u#D0#(J4%~bRSQx#N%4Sd`cCul7JNLL<93VSVA
z4QmYt_$)@y{lr{Vomhr}u=WMv)ggI9KcIRNzqyD(AHSjvphN^JU?9s$Kyd}mt3{yN
zO_LQdCUlD_uK;{l2zXFVlN&M+0!pE`c);aFJh%^2e2Wd7u8VJRfch<<(d{D80LU%2
z+{B9blnN+ad5Z^DJc4?5NQoSrHNgZZe}czcI9V7Nib3N=3~VCc8yf_`2Nbd~gQi`@
zm_Q^CqZIVmLXaqIFa?@K@eD_S>T^(LDh62w8;%0)(uK{q)-YzVE@TBQ$<AW0VN7SN
zVOqo(1X}3LQNxhMnZh`SsSh;D$5hn_k2X*%26Ofm+!%rk(}8^sYSs{3Y>hM%3-Jr2
z-wDwRIm8mttI=cv7bZoZ5<-&;9CC<ALJDE<E-vuVIc$)0f~1WZQu=^qZErCbrIsL-
zo}dr{&)RB$q60GaC&P$Q8ekc<0+r7o_27U89io&1TF1_i#Q<tL6p4ZIJ9yNIA&U_#
zq5&!ypdw6Q5i8J06;y;dg*BU{$ft%eg$*jglERwJTohNsn8FSgVNBrw(Iu>)F~nxZ
zS|(6~DTPy#0enaurwv05V-`mmLkbrt3bVK}8Ee1?j07`ia{E<DRD)J4)gnf|psj$~
zDsdf1=!1%I+iGwmf|#|S71rP!U|S8QAOfI{g>5xBFVuoFT$L#J@-WcsTB>a|tbYME
z7GC<>R>PUK;4w5v)Vu*LOW`g8Wj3UEgjWZ*SW+^ROKx$*$Ajqjc*r?7pgk3cIcV63
zB&gE3#h8K~ub{=<hM;%_4UsW`@ACxJ2&fpu6Tpmd==d9}pC7c9jC%kMG#0iSR6Ril
z;N-dVz_ZAlMIafB0XR^?0S~~DyfqxOfFd!0k%7TEKMypT1Ia6(jyJpy0?9b2qiK1i
zx$%%bMm&5HNCPtIq+tkZf*5N;*Eoa5m*7fZ*R_FqJ`n8?`@ls4Mk$Vzsoh~6BGBez
z@U$g#^9*=FCd7rH;zU=$GYw=hhU08)6$}-upa)>U2CYG@GU$vA)QMn+5HiRZt3k#J
zP&2@1pePh6*xD+jgNP)sp^z3CXgUPc-~eF|8-&5R9kk>Ia_|GFkqzF<xsvggFqUwJ
z-B2zBYTtm%e=q?qbwJZLzM#|zGM9m|N(dfq@WvU+L<2|}yubl93QIuiXu#*8r7-q_
zhGm(4;oCrixAabiq$tQd3tDOd1u;f^g9MS{9MZPTsZ@aL#h6Pj5@cjxfE^FPQ<5JK
zS6T!P9$Z^q1K?rAjv7W-3T4<jIdC-LIwAse+LELOme>Lf@@s0cAj%iWIa*-X;o9dK
z33nY@3_=PJ&@CLGi74D#MnIX`8Dt1(5{ZM6g_Db!gR=-EO2v{x5j0$Xf)O@c584g_
ziEv1kMkzudSt1en?r2cCPyjy<SHVEf5W0#3wV=Uq*#LNoA3O<;)OEv`;4l&!QbL1g
z60C&_)H~qCzDRdpD-?ixkfnK`T%Qizo(oFB7z#BEbqk<=1IdGnC~&a_H950b!4Ot<
zSRoQS+%d@V;GzVVDew{qhbeG*NOYi=P|s)01c9CQ7)1oAm;fh#ETt6w`${rEsSu={
zff07D7d*7F7fGP13|=IGZc(pc%whl?b5q2Z!j#99!dweFsh$yhhXGSILy=wyGwATM
z8m1H$P=N*B^~YKy%*eoSi_r&M=z?45kdlJ6C<@e!2cI1dzKa3Wx+@Za6e6Gs6mk$I
zICp|eRWJcAT0s+$*&yG5@)QF*bVnQXtTJRjQBs7kgD&tfsMRdOd)G-h$Qif;6;Vov
zf~pA6(m$kD48ljC3>*tG3gjaWMiwqEW)7YrkSIne0iWPAKsygFg%5n431pQTKX`Q-
zBlyZ;Zt(gv_7vV=22Ft?Wl&k6z{0?QxK|0XhzxWF7Ic?wPO3stYC34r33RD8blo*L
zkEIqR7N-^~WR?^|R<j_@Z>d8n2?f`pL{LSp06Mo!!Lc+Qv_c!af=3-RuMw4zpO>zX
zTB4AcgJ>#%PBhCXDJdwn($`Ncs>m$U%P&gTFGxw#H#9e}&@(Wxur$*vNP!%dhFT~o
z6j-4YW#G~X)SZOP#wdUs44Tu)FGwu{y8=E70NwMA+@FCQQHgZibwN>n0p#`*P#Fm_
zD>X#{H2kZOn4Sn)O9wgi3Q~F?`~x}39LXHWk*pA5G?S2Yfd{Xw;7$f}5qG6z7AqLQ
z?kmm&ts?|23j~i`!PWwm<ipD(P`Lzlt^#Or5P0EHF;p{T6cDnU1vKnujAjPde$=~o
zgLD+!brhUnB_p;)Mj#78*fR|@byNaAP*EKc2kJUtBSEo{nwXpcNjT7QFEI~vT|3yP
zNJ$A4syT_tsa6U}`T04ipv#7#Yu!Mn*g<+Up#CIG9U_pR>+#$`E7~9-i&5Kv(r0Q4
z^g<nQfGI0LkH$edwZP8K4#^%{1iM%d^ME-N#jq0@Q7>K7Kss#zO*!JkIK&(ini9YK
zqTIxs%&Jrk4Ff$B3sW5h13eQ{GcaXfrm3T#VW4MZWC`LM8JR#RQ%&4fLb56-Br@~T
z(VYf8b`HENYbER;_9^!1@~y7}cgS6}U&)ki3<j6&)6>9!%K_Y5bi-H*245Twx>ON7
ze9oA~RKi@sQo@?X(#%vW;l<Fzh;|ga1=>;UPN1FT(4*MFV`C|d*$l-|kfYmEn6g=l
zl1eyW6A)mzB2e!<o296#gabO!06LR^6FiN;1)c)n1}`Jy0S~D2f?D~SY|x=}aBFTS
zXz*GCJT?T%t>7LqQsV}F;Mx|h5|ZHI$sM_kf*3$X)2_*fJ{(&Ksu&R?vAaNA@EM-q
zT}<H05`N@s(QA+_Xr&!&T^TNUq>9H1qzBYNnGC9PKuf_G7$rcb#WPBP$ErA>>&n;|
ztHe-}3Zk6=Y8QZdP9O~Gt-uGRY#13pLrn}Z%%CefYuP}zynshU86l^}PhgDYtK|TV
zhJs>A60~0lbVfW=4Z8?KEhktUWe-v<7bvxW(g0{&5`I!VH?k=Fq<Eeh?iyCm62lsv
z8qlpuG9^q4I8vBuSfMAu^FsM-H9X*x;Q2~8YnV&8vbdX>iuTkn*YfAZfkk+-c(eGL
znTnp3@MQ6W&TOyYNoOwM$r1pyY!(PEWT@rOGXbj<DiH=vh)rNDk}BcJ5~)F`5=~)E
z0d1RPULcmjTEkt#vyi!#zlOJl55%tFPh*9hJ)gn^b|G60LmFsL6kB2~uG8PaB|3C@
z9DI)vNH8-GY27ed1qnKwEH@qL^bp8t2#`7poV-EjxobG<D0t{7glmF23rVTqB#cru
zDkxB`c2rP+)P$r~kQhZdsKNnPXYe(JNZ0u4D3rlGgS^HN6kKRa3_(IDAy~8zltDnf
zO>nDKlkpa7N=aowD&z<U@gif8H2lza@YWsh0q>Bp<^v$vBOn5F#5;QC1~uDmvBVc=
zR@~w%E-A{)OOH>^0o^oQe2W!yu6yMzcJR7?W20N_dEjC{z6iWi5nLdEPJ<}A3o;wD
zXjD@G(nv-v4Q{c5MzND2R0f19FFFCzbP{x!1Q*D^sqvs@OcCfp;3Dwx@SrhH#09vw
z7|YPh3(!i!MWFHmbnG$c${h|S&=iXZ_-J_0UJn6A9!56Eaq)~Qj3UgiUIw(9(ZeyM
z4JtW61NY!w4XA|-nx9Bvgls>~0`)6=zysT$p%C~g%M|8bmKavp#lW>}CG5yo1B0eW
zvp7MA#Dn`FELHQs^*kuK&?IwUO_<;T8Ax6OT~-ZhIHxE;_>jB@X`g@`1+G0INsSpY
zSh^P!TcDy{lM6AO0lEGR6c1>MAZt5W!SPxIt_#?a#>5#bZZTFOMY1v@1M&<bxH15B
zsg8gm7t{d(b*YpXC9o9!kPelfCM&pcP$UjANdiQG@`{@#BiJLF99VBO0@XL@n|HuH
zAOrz+3aEop4RQ*ohs(jpA`V*c$fF4ExM1{#KwVT&XR+7}y*I=IJ7y!56})?g0o)e?
z@7{@G2ltK`qBy|UJ@OWTQgmW069WUJvdgIi-Dy`+T9gDiO31_21-e?KxF9h(6*7~p
zkeCNv)DIr{fJCJmXs0^3HJy@KoSFwZuP6sJ<d%`Dkdd03Sdw2+nFH=nr6{E47iZ={
z4F!!_<fY~mgStoH`Ba6JRM7YdX#F3k4+gnuS)ml%t4dTz%dALEfgVBwu{Ri`JOylV
zL241$-H_cMNVe)gtpv^fyEr;26qkVdFAAA?DXE|xhpBlbIh7F8LNYRoAcldb3qb8`
zs3(e2i%WCBCyT*#gD!JQg`CO+T0R4DelaMNAcmo4w~~z1l0@iP3z}Bg4h=yIYD9xr
zAu*>QBM~%83_U6YytpnAc4CME=&r}2%%oD#O_d7KhPq&@bQBD=!3?AmBa%`}N-~Kx
z2WAYqA)pQ>WP>4SjZkiB4%lT{TE*%LwhHWEmue{#D`+clfNay!0vA6;povP10Uc0(
z9$fVKfKQXJfv(0W7V%;LHJCvA@@p94nM+u*SQoH?F3<zrw_d|2&XB^qkhz8-o*g1n
z1Kvvk72^OmX<7WLK&uZ+Kx^poOLI~{tL@PiK0p>HXflDz9`N9dCMQ~jb&D13`65<O
zHV2Q^fOo30g5%>BD>yh|<KXHbHE7uxl<n_;vNO13VH9Cx10BI5<l*Y#1=?;^glJNN
z@-nD73BsTX0p5849a{#f6ly>h2Qic|fVRCaU;*(!Csx!l*D!)ECaPf)XQ*W<VP3!r
zQUPk<XMrc)L0ioiurCBn(z7n$SjbSz4ry*lFo-bJu-C9jFi0~j-~@FrYB@@{7I4>a
zECkJtviNB-f$Mv4u)O>asySIpGxJI;G<m>*af>yvs3@`W7CYpqywoC4Ox|KIN>4J%
zNKLuLo{^ejRFs|sX)GKAmFJ+N2X3(?7K57-Am85N1f^oIF`Ar^s$2sUgqk1%JnO}j
zR~*HXmzkS>izO3uM+{45Zem3gYf*7#ZaP{8eG9h#*u&N35h%bx&2k1NE=D2n$(Ep9
zBT`INB8Y&8Zddkjb-|q|LFZe7r<g&tF9<Us&bMSPG6lI4;bL4v{O>`ofew<PL_DNA
z!B`su>RW)b1gK=EY^yBjHYL9zH&B0$6P%7glK@p*I^fg`WrLG2h>dc}6nH8UoS+dN
z#I^4K6Uc+0>I$?y2htR(Lh&eK91v6nXmS#$Ai?1Zs*-Wvq5-P1E`qFu&PsA|u`w}n
z@o+GMD>ICG5|lPUp;?SEE6D>o$E5{y92G|jXA46VYYJBicMC%lTMB5-iML1<G+ub0
znSmj(6ja!Q2W&u#P$3&n!JBOq40IGMLB}*%7=Y#*jm!-|r6P3aPF^~w5JGP8pw<bn
zN#&8fXce+a405zH%2IClnpLn0@N_F^4<)2_1+^VPcyyO&UNK~qAtGF$`%p%Ai591(
zfU_fLAQ;}%EJ_7mxtUp(3TyWHxrKmR?-*lv@X{F6K&b((c3~)z0#(Q93@MC@7#D)Z
z)|m1@D`FVS7>cz}E<gYs{J;>;3?6d>-5<yh&k7!M1E1=w$?OL`2jC@WD(K}F1_p*q
zK1K$HDrLy}75Ey}%sho?L>Z%_U}SC(tH}=O7)=4ytl*W0;I$>7ZZXDQ%Uf&^10gF@
zP?ez7qTtnt+^h@?#o+3bk%Li)Nq`ZwhLDYsjj>AFv9u%~(o2Bv9E5MS1XppO)(Zmz
z1BeZ3@qp_wIdIc%0r<A^EbwuOpo1~smzl8zGk{kaXtIMlaz%gfd8r6jEh`RkG{{;8
zT+Sp^-GX8PR^5V6oL~kW_X%2-N{!8VMgKtF0=1)X9w8wG@&cCqbJ#;2QmJaP`niQb
z2X8^62T1V=Zi#~Xl%PuN7pS%bMF|HZivYN$1c_qQl%RGQ_L`Ct)}w^<C!<)w^KG!2
zlB-A!)XJCvNgN99iKWHGPC2DTpb;L>nZn?HBB+a3l$erPTC4!tSd$CdTnj0(&?-fU
z8V%4oUmXST5SJ!sv<$2hGTIDUw*#5D15K=e+L6|vyI|Bo&HR#*)FSZF1eqlYHVQ`I
z-iIIVMjkw4GZs04SNSuRFo6a&L1&hMSNSt$Ga)iI_+n4+ky4-qZlL9(H4O1=d2A`H
zAR65BV1wRxVgvOnV(goQCEeiC2+}bDH=5WX9TV^{7<fP|6_lP3JrXvEWw5@8C8%u*
zP61#7oD4vH5lv7U12vi$7&)LTizVO}UBP1w(JKMxD5OjcYDs}JHF*6A;{pcI{sGVc
zRSIJ-Be>_l1X>jWvR;$r78_*4d=aP+y~P7IJ~J^7oGw5raAsyhkSoDfBPQ!%DM}CP
zWq+U%7?3Rpw_$9vh1^6Yr2%t>0>~W-kYEG1e=!%wfE|fzaf}ImM?&)_qaW_oF`z7?
z0x|`fFIl)iSFV6WG4dtIZ{U0hTIY#!pepDNIEE;eRMr%3aL$Cxdh>vDCnM5W4`@j<
zZ;?7EmtKPAQuGC?iQwUbq)G+nFe3%%xC`vuF;LzGcN_3$U5FCMDVmyA&|wSk1Sg~x
z2CvUYOecbE1ZPlCLoPS5I9mbfmR8UnB3NweC}gH7fOa<~78hrx=jnhmxL-c(1bU3@
z4JydMiNOY3`(O?%fCdww+io;cn6nv+jX>LOP&V^0gR?WMUlr6J$Qc<l9F>y_TH%+J
zTBHXa+<?vt9=5Lv@yt^wNG!?7&&f{*-CP1`|Lb5FlbNTWVW4B6XK1Ra11i42S6P8t
zDe0h-MHF%?;YT*>6;x`nK#Bmwv3{Vg87#wt*hqyxJj2_8(ifz2Cc-EI-c$e^nTABa
zbC?l4yZeETcY@}21_sb51Za2~9E@^|kclnuf_p|0&}oQFDWIKBb|s9UBheQ!E?_QU
z%who_48WMhS_8dZI*kcbZ}c+Nf~Un&7*d#9nIsu%nM)WKu+=axWK3a}0*zZh_A#+a
zfiGBI0NU}xDh}Ey#3sp517U&gt7es8NC#bP45FDAGS;%yu$3@oab$5q#(!$qvKUg>
zdzry5VFTCR?BFpCHqe#>(D+6cPYpv2crR57LkdSLvm`?aR~ByxR~BCiXD@3l$juC;
zj1w4(1WNc92-Gm7aHTM$u(mR#F@Y{}hKg~6*lA3`3=lr3)B`6J9zSq-y^=`}QUR=F
zyu||AB?%rv0hi~RjF9uM!DnGJfv&$zOfSC0m6TtUl3L`OpOUJ#l1UG&TvHG-asnQA
z(g79b1|UKkw3>)JIX@3{e`8)^NotWSNEkf6f~X6algo^538kl&Kw=o4h;DI}Cl(bz
z1aI+r`h~a##Y0adx+Ub~ALQa16d&a3=Hu!d5^rc=SOm&LMTVd?Nl3#ppu2sGv!mEE
zbJM|F1wbcvM6oA>uJEc#jbbS%DK-R?Mz<J!(Guh>A;{`=xPzR-j9ftJ6SN?Zfr$^)
z++-AE0-Zz3!>Gfk1RA+v<oM6V#Kow>RHXzdwII$1rCLnGP?9gm_3-Knbb1JABnO;W
z!L9Zz<{G5Y9F`(4P(UJr8<d|`BCV}2@&L($D^oB5&S0QHiU^PgQ0pp$6Jezn${-K1
zb=5CH4M=ecE<bTpQ6P5~;i{sd@w*LPMQJkn5ge5PhY!fZxJP9`m5T+)N>BrXgOP=m
zgN=!iN0^HlT<y@TP6O>TNmOQr&0i?E<|G!EWG184GN1|!U%dvpaWOqN5xsf?)l16o
zvbiWVEwxCYB;QIw0k&c*DJMT&FS$4+PcJXEM8B{w-pJg@%+SKZQa`b%Br`cDRX-)Q
zBr!9mSl`0H+{nP#)Dm9bDdR8%wAdvvFFoGS($W~y6e9y815-;gET*L7Cl~7#CubH^
zf>)5G<R|NcdHP^yr{*Q6>Vqx^E&|>C3X##vO98jc^>Pyn;*<09i&8Q{rAo10MoDgt
zGFUS%C1Cqe+wG9linH>^9xplykaN(%=7B;xGYwi#f|h*gfDVTOAF)-8I3}PxBe5hE
zbde!wBBD4mJvY%Bt8PQM?)<zQ&~0y^xt?NBcLb~n5!G0<+bDo{57;Rff@hYh^7C^+
z%@NRDxuqrGF+H#u>ct8{sX3`-iFqXoDHS>jDbQ&okVn7@5c53{E;uiP+d{a41JTNe
z0okZwpl4tTY9m0lOM?uiq@4sR6=5|zhz-Kv3LbP)KcxGU#njAHBmv&Q3_iN5SOGQ}
zlfqQP1Z^{!fZI%Vm~AHTIwQ!D*sOk4T97mWns<ZvAK^Rn$(VG=s^69NMWEH9RVFyv
zQ0T^h+fc9|(qzTfM!Llc)&$#711>Usu@vW^4pI)N{DKUANP$~RkTw(2S|CX2g4AXL
zwasBc3fe{qYR!X#6t)-$sm%o1LIrBcFo`hKGC}ur`+zGYW~3Grs3iqzL4h|mqPC!F
zSxP`{9`-Db8kRnW6lM{I6wpc9OrSHfML?}5e6nmMj0-qR7#DCsc49ANgtA#dEgC2r
zv>b_XA#)6KEqg6TEl4$Y4Mz=|I715CLePR|?izMbiww@=1hLYXYq?55Z7Z%6@VG!P
z%K~oDfycEB5I;^}EYd6CUBFkvkiwb5kirFTQ9;BIEh;b{y+u_73P{LqM_W*|fhthg
z^b~l-S}AC^6;shfP?s1qj00&y6*+>MF8nwfQbkT6!$2)u@L1X{7SQ^&TO7Hm#Tn^E
znJKr}62W5)MY<qmpy@cI1`#*Xd=)q;ftv@Ql`*%NQYvmSrBoKFg0!iD8f=VJx7aiD
zQZkcMF|KPDfHZ2LWr1^;Q3)vhf$r1*wTECWA~q&DMmc69MlB}LCSVn&DmfHw&^xpd
zhG4XVK&}TTNzhr{80{d2EYQL1pq<yC&XOi;Q7FjisBNJlurIwq65#d^n1DBex<Rgn
zHiBSl=HU5+(7+pLO%SmIZ=hX*kX0>yMLwXxQf_Fy4Qd{N_upx<qHl3Q<ab;%SbZSp
zgR(8?{vq(0RT!QisjUPKKm2VaP@^RtWG%F<#KXbC#K<GY#aslE(e%E>6(65gT2fk+
z8XsQ-3fUr1FcpDTSKVSRt}KQmS&qcyWN-z3i`z37v{u`-s3^ba7HeK<Zb2n@o)&y<
z?k!f(q-*gl?&SO&(21$w4ZNU*mA9A!JbfU^8dQqiVlU1H8&?EsI2SoHGB89*qqYJv
zLB|3W<U{5gia_ZqN)%NEWXz|y2$ZpIiGlAOE6W5;-RQwrR1_D10vj^iYX_QIW(>K-
z1HHjDJ~J;Zzo-h7l~X`c%!x_Kx7g$36LWIn<8QG;`zuA+AO+h&3fO%d{hXuxgKx2W
zIQsf}`nlg?cJhya?5MN`DPYS_%1TWxDFXF^i$Dc*5h%=yctIwBhFgn3*Vz|=%6!Pl
z)6F0W(9~~{2#5tbmb^$5!~%69io`)IB@h8xISRhR9ehhJ_zpktZ8+fbb;0M8f%iy)
zw>yD%Gk_QIfM+O+K!dN~AyaVE7#!zFoh5kYy2W9Wo1apelWGSl^NK;&)<X^m0L^DJ
zG4e3-Fmf@1*kH^AT7$*`7Uf~&Vd7$BW8`4sVC3NC-~bKZL-8*bAr4Lf838c?B>{N>
zc>!qwLjgenF##C?ZUGK~KmiE>2?0xiFaa(B6#)$a6M<L(7lC>PP5~(aIe{<%O#v+d
H5dn1oku`i%

literal 0
HcmV?d00001

diff --git a/code/datasets/utils/myTransforms.py b/code/datasets/utils/myTransforms.py
new file mode 100644
index 0000000..3ac760a
--- /dev/null
+++ b/code/datasets/utils/myTransforms.py
@@ -0,0 +1,1426 @@
+from __future__ import division
+import torch
+import math
+import sys
+import random
+try:
+    import accimage
+except ImportError:
+    accimage = None
+import numpy as np
+import numbers
+import types
+import collections
+import warnings
+
+from PIL import Image, ImageFilter
+from skimage import color
+import cv2
+from scipy.ndimage.interpolation import map_coordinates
+from scipy.ndimage.filters import gaussian_filter
+from torchvision.transforms import functional as F
+import torchvision.transforms as T
+
+if sys.version_info < (3, 3):
+    Sequence = collections.Sequence
+    Iterable = collections.Iterable
+else:
+    Sequence = collections.abc.Sequence
+    Iterable = collections.abc.Iterable
+
+
+# 2020.4.19 Chaoyang
+# add new augmentation class *HEDJitter* for HED color space perturbation.
+# add new random rotation class *AutoRandomRotation* only for 0, 90,180,270 rotation.
+# delete Scale class because it is inapplicable now.
+# 2020.4.20 Chaoyang
+# add annotation for class *RandomAffine*, how to use it. line 1040 -- 1046
+# add new augmentation class *RandomGaussBlur* for gaussian blurring.
+# add new augmentation class *RandomAffineCV2* for affine transformation by cv2, which can \
+# set BORDER_REFLECT for the area outside the transform in the output image.
+# add new augmentation class *RandomElastic* for elastic transformation by cv2
+__all__ = ["Compose", "ToTensor", "ToPILImage", "Normalize", "Resize", "CenterCrop", "Pad",
+           "Lambda", "RandomApply", "RandomChoice", "RandomOrder", "RandomCrop", "RandomHorizontalFlip",
+           "RandomVerticalFlip", "RandomResizedCrop", "RandomSizedCrop", "FiveCrop", "TenCrop", "LinearTransformation",
+           "ColorJitter", "RandomRotation", "RandomAffine", "Grayscale", "RandomGrayscale",
+           "RandomPerspective", "RandomErasing",
+           "HEDJitter", "AutoRandomRotation", "RandomGaussBlur", "RandomAffineCV2", "RandomElastic"]
+
+_pil_interpolation_to_str = {
+    Image.NEAREST: 'T.InterpolationMode.NEAREST',
+    Image.BILINEAR: 'T.InterpolationMode.BILINEAR',
+    Image.BICUBIC: 'T.InterpolationMode.BICUBIC',
+    Image.LANCZOS: 'T.InterpolationMode.LANCZOS',
+    Image.HAMMING: 'T.InterpolationMode.HAMMING',
+    Image.BOX: 'T.InterpolationMode.BOX',
+}
+# _pil_interpolation_to_str = {
+#     Image.NEAREST: 'PIL.Image.NEAREST',
+#     Image.BILINEAR: 'PIL.Image.BILINEAR',
+#     Image.BICUBIC: 'PIL.Image.BICUBIC',
+#     Image.LANCZOS: 'PIL.Image.LANCZOS',
+#     Image.HAMMING: 'PIL.Image.HAMMING',
+#     Image.BOX: 'PIL.Image.BOX',
+# }
+
+
+def _get_image_size(img):
+    if F._is_pil_image(img):
+        return img.size
+    elif isinstance(img, torch.Tensor) and img.dim() > 2:
+        return img.shape[-2:][::-1]
+    else:
+        raise TypeError("Unexpected type {}".format(type(img)))
+
+
+class Compose(object):
+    """Composes several transforms together.
+    Args:
+        transforms (list of ``Transform`` objects): list of transforms to compose.
+    Example:
+        # >>> transforms.Compose([
+        # >>>     transforms.CenterCrop(10),
+        # >>>     transforms.ToTensor(),
+        # >>> ])
+    """
+
+    def __init__(self, transforms):
+        self.transforms = transforms
+
+    def __call__(self, img):
+        for t in self.transforms:
+            img = t(img)
+        return img
+
+    def __repr__(self):
+        format_string = self.__class__.__name__ + '('
+        for t in self.transforms:
+            format_string += '\n'
+            format_string += '    {0}'.format(t)
+        format_string += '\n)'
+        return format_string
+
+
+class ToTensor(object):
+    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
+    Converts a PIL Image or numpy.ndarray (H x W x C) in the range
+    [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
+    if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
+    or if the numpy.ndarray has dtype = np.uint8
+    In the other cases, tensors are returned without scaling.
+    """
+
+    def __call__(self, pic):
+        """
+        Args:
+            pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
+        Returns:
+            Tensor: Converted image.
+        """
+        return F.to_tensor(pic)
+
+    def __repr__(self):
+        return self.__class__.__name__ + '()'
+
+
+class ToPILImage(object):
+    """Convert a tensor or an ndarray to PIL Image.
+    Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
+    H x W x C to a PIL Image while preserving the value range.
+    Args:
+        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
+            If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
+             - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
+             - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
+             - If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
+             - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
+               ``short``).
+    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
+    """
+    def __init__(self, mode=None):
+        self.mode = mode
+
+    def __call__(self, pic):
+        """
+        Args:
+            pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
+        Returns:
+            PIL Image: Image converted to PIL Image.
+        """
+        return F.to_pil_image(pic, self.mode)
+
+    def __repr__(self):
+        format_string = self.__class__.__name__ + '('
+        if self.mode is not None:
+            format_string += 'mode={0}'.format(self.mode)
+        format_string += ')'
+        return format_string
+
+
+class Normalize(object):
+    """Normalize a tensor image with mean and standard deviation.
+    Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform
+    will normalize each channel of the input ``torch.*Tensor`` i.e.
+    ``input[channel] = (input[channel] - mean[channel]) / std[channel]``
+    .. note::
+        This transform acts out of place, i.e., it does not mutates the input tensor.
+    Args:
+        mean (sequence): Sequence of means for each channel.
+        std (sequence): Sequence of standard deviations for each channel.
+        inplace(bool,optional): Bool to make this operation in-place.
+    """
+
+    def __init__(self, mean, std, inplace=False):
+        self.mean = mean
+        self.std = std
+        self.inplace = inplace
+
+    def __call__(self, tensor):
+        """
+        Args:
+            tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
+        Returns:
+            Tensor: Normalized Tensor image.
+        """
+        return F.normalize(tensor, self.mean, self.std, self.inplace)
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
+
+
+class Resize(object):
+    """Resize the input PIL Image to the given size.
+    Args:
+        size (sequence or int): Desired output size. If size is a sequence like
+            (h, w), output size will be matched to this. If size is an int,
+            smaller edge of the image will be matched to this number.
+            i.e, if height > width, then image will be rescaled to
+            (size * height / width, size)
+        interpolation (int, optional): Desired interpolation. Default is
+            ``PIL.Image.BILINEAR``
+    """
+
+    def __init__(self, size, interpolation=Image.BILINEAR):
+        assert isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)
+        self.size = size
+        self.interpolation = interpolation
+
+    def __call__(self, img):
+        """
+        Args:
+            img (PIL Image): Image to be scaled.
+        Returns:
+            PIL Image: Rescaled image.
+        """
+        return F.resize(img, self.size, self.interpolation)
+
+    def __repr__(self):
+        interpolate_str = _pil_interpolation_to_str[self.interpolation]
+        return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
+
+
+class CenterCrop(object):
+    """Crops the given PIL Image at the center.
+    Args:
+        size (sequence or int): Desired output size of the crop. If size is an
+            int instead of sequence like (h, w), a square crop (size, size) is
+            made.
+    """
+
+    def __init__(self, size):
+        if isinstance(size, numbers.Number):
+            self.size = (int(size), int(size))
+        else:
+            self.size = size
+
+    def __call__(self, img):
+        """
+        Args:
+            img (PIL Image): Image to be cropped.
+        Returns:
+            PIL Image: Cropped image.
+        """
+        return F.center_crop(img, self.size)
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(size={0})'.format(self.size)
+
+
+class Pad(object):
+    """Pad the given PIL Image on all sides with the given "pad" value.
+    Args:
+        padding (int or tuple): Padding on each border. If a single int is provided this
+            is used to pad all borders. If tuple of length 2 is provided this is the padding
+            on left/right and top/bottom respectively. If a tuple of length 4 is provided
+            this is the padding for the left, top, right and bottom borders
+            respectively.
+        fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
+            length 3, it is used to fill R, G, B channels respectively.
+            This value is only used when the padding_mode is constant
+        padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
+            Default is constant.
+            - constant: pads with a constant value, this value is specified with fill
+            - edge: pads with the last value at the edge of the image
+            - reflect: pads with reflection of image without repeating the last value on the edge
+                For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
+                will result in [3, 2, 1, 2, 3, 4, 3, 2]
+            - symmetric: pads with reflection of image repeating the last value on the edge
+                For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
+                will result in [2, 1, 1, 2, 3, 4, 4, 3]
+    """
+
+    def __init__(self, padding, fill=0, padding_mode='constant'):
+        assert isinstance(padding, (numbers.Number, tuple))
+        assert isinstance(fill, (numbers.Number, str, tuple))
+        assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
+        if isinstance(padding, Sequence) and len(padding) not in [2, 4]:
+            raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
+                             "{} element tuple".format(len(padding)))
+
+        self.padding = padding
+        self.fill = fill
+        self.padding_mode = padding_mode
+
+    def __call__(self, img):
+        """
+        Args:
+            img (PIL Image): Image to be padded.
+        Returns:
+            PIL Image: Padded image.
+        """
+        return F.pad(img, self.padding, self.fill, self.padding_mode)
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\
+            format(self.padding, self.fill, self.padding_mode)
+
+
+class Lambda(object):
+    """Apply a user-defined lambda as a transform.
+    Args:
+        lambd (function): Lambda/function to be used for transform.
+    """
+
+    def __init__(self, lambd):
+        assert callable(lambd), repr(type(lambd).__name__) + " object is not callable"
+        self.lambd = lambd
+
+    def __call__(self, img):
+        return self.lambd(img)
+
+    def __repr__(self):
+        return self.__class__.__name__ + '()'
+
+
+class RandomTransforms(object):
+    """Base class for a list of transformations with randomness
+    Args:
+        transforms (list or tuple): list of transformations
+    """
+
+    def __init__(self, transforms):
+        assert isinstance(transforms, (list, tuple))
+        self.transforms = transforms
+
+    def __call__(self, *args, **kwargs):
+        raise NotImplementedError()
+
+    def __repr__(self):
+        format_string = self.__class__.__name__ + '('
+        for t in self.transforms:
+            format_string += '\n'
+            format_string += '    {0}'.format(t)
+        format_string += '\n)'
+        return format_string
+
+
+class RandomApply(RandomTransforms):
+    """Apply randomly a list of transformations with a given probability
+    Args:
+        transforms (list or tuple): list of transformations
+        p (float): probability
+    """
+
+    def __init__(self, transforms, p=0.5):
+        super(RandomApply, self).__init__(transforms)
+        self.p = p
+
+    def __call__(self, img):
+        if self.p < random.random():
+            return img
+        for t in self.transforms:
+            img = t(img)
+        return img
+
+    def __repr__(self):
+        format_string = self.__class__.__name__ + '('
+        format_string += '\n    p={}'.format(self.p)
+        for t in self.transforms:
+            format_string += '\n'
+            format_string += '    {0}'.format(t)
+        format_string += '\n)'
+        return format_string
+
+
+class RandomOrder(RandomTransforms):
+    """Apply a list of transformations in a random order
+    """
+    def __call__(self, img):
+        order = list(range(len(self.transforms)))
+        random.shuffle(order)
+        for i in order:
+            img = self.transforms[i](img)
+        return img
+
+
+class RandomChoice(RandomTransforms):
+    """Apply single transformation randomly picked from a list
+    """
+    def __call__(self, img):
+        t = random.choice(self.transforms)
+        return t(img)
+
+
+class RandomCrop(object):
+    """Crop the given PIL Image at a random location.
+    Args:
+        size (sequence or int): Desired output size of the crop. If size is an
+            int instead of sequence like (h, w), a square crop (size, size) is
+            made.
+        padding (int or sequence, optional): Optional padding on each border
+            of the image. Default is None, i.e no padding. If a sequence of length
+            4 is provided, it is used to pad left, top, right, bottom borders
+            respectively. If a sequence of length 2 is provided, it is used to
+            pad left/right, top/bottom borders, respectively.
+        pad_if_needed (boolean): It will pad the image if smaller than the
+            desired size to avoid raising an exception. Since cropping is done
+            after padding, the padding seems to be done at a random offset.
+        fill: Pixel fill value for constant fill. Default is 0. If a tuple of
+            length 3, it is used to fill R, G, B channels respectively.
+            This value is only used when the padding_mode is constant
+        padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
+             - constant: pads with a constant value, this value is specified with fill
+             - edge: pads with the last value on the edge of the image
+             - reflect: pads with reflection of image (without repeating the last value on the edge)
+                padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
+                will result in [3, 2, 1, 2, 3, 4, 3, 2]
+             - symmetric: pads with reflection of image (repeating the last value on the edge)
+                padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
+                will result in [2, 1, 1, 2, 3, 4, 4, 3]
+    """
+
+    def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant'):
+        if isinstance(size, numbers.Number):
+            self.size = (int(size), int(size))
+        else:
+            self.size = size
+        self.padding = padding
+        self.pad_if_needed = pad_if_needed
+        self.fill = fill
+        self.padding_mode = padding_mode
+
+    @staticmethod
+    def get_params(img, output_size):
+        """Get parameters for ``crop`` for a random crop.
+        Args:
+            img (PIL Image): Image to be cropped.
+            output_size (tuple): Expected output size of the crop.
+        Returns:
+            tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
+        """
+        w, h = _get_image_size(img)
+        th, tw = output_size
+        if w == tw and h == th:
+            return 0, 0, h, w
+
+        i = random.randint(0, h - th)
+        j = random.randint(0, w - tw)
+        return i, j, th, tw
+
+    def __call__(self, img):
+        """
+        Args:
+            img (PIL Image): Image to be cropped.
+        Returns:
+            PIL Image: Cropped image.
+        """
+        if self.padding is not None:
+            img = F.pad(img, self.padding, self.fill, self.padding_mode)
+
+        # pad the width if needed
+        if self.pad_if_needed and img.size[0] < self.size[1]:
+            img = F.pad(img, (self.size[1] - img.size[0], 0), self.fill, self.padding_mode)
+        # pad the height if needed
+        if self.pad_if_needed and img.size[1] < self.size[0]:
+            img = F.pad(img, (0, self.size[0] - img.size[1]), self.fill, self.padding_mode)
+
+        i, j, h, w = self.get_params(img, self.size)
+
+        return F.crop(img, i, j, h, w)
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)
+
+
+class RandomHorizontalFlip(object):
+    """Horizontally flip the given PIL Image randomly with a given probability.
+    Args:
+        p (float): probability of the image being flipped. Default value is 0.5
+    """
+
+    def __init__(self, p=0.5):
+        self.p = p
+
+    def __call__(self, img):
+        """
+        Args:
+            img (PIL Image): Image to be flipped.
+        Returns:
+            PIL Image: Randomly flipped image.
+        """
+        if random.random() < self.p:
+            return F.hflip(img)
+        return img
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(p={})'.format(self.p)
+
+
+class RandomVerticalFlip(object):
+    """Vertically flip the given PIL Image randomly with a given probability.
+    Args:
+        p (float): probability of the image being flipped. Default value is 0.5
+    """
+
+    def __init__(self, p=0.5):
+        self.p = p
+
+    def __call__(self, img):
+        """
+        Args:
+            img (PIL Image): Image to be flipped.
+        Returns:
+            PIL Image: Randomly flipped image.
+        """
+        if random.random() < self.p:
+            return F.vflip(img)
+        return img
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(p={})'.format(self.p)
+
+
+class RandomPerspective(object):
+    """Performs Perspective transformation of the given PIL Image randomly with a given probability.
+    Args:
+        interpolation : Default- Image.BICUBIC
+        p (float): probability of the image being perspectively transformed. Default value is 0.5
+        distortion_scale(float): it controls the degree of distortion and ranges from 0 to 1. Default value is 0.5.
+    """
+
+    def __init__(self, distortion_scale=0.5, p=0.5, interpolation=Image.BICUBIC):
+        self.p = p
+        self.interpolation = interpolation
+        self.distortion_scale = distortion_scale
+
+    def __call__(self, img):
+        """
+        Args:
+            img (PIL Image): Image to be Perspectively transformed.
+        Returns:
+            PIL Image: Random perspectivley transformed image.
+        """
+        if not F._is_pil_image(img):
+            raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
+
+        if random.random() < self.p:
+            width, height = img.size
+            startpoints, endpoints = self.get_params(width, height, self.distortion_scale)
+            return F.perspective(img, startpoints, endpoints, self.interpolation)
+        return img
+
+    @staticmethod
+    def get_params(width, height, distortion_scale):
+        """Get parameters for ``perspective`` for a random perspective transform.
+        Args:
+            width : width of the image.
+            height : height of the image.
+        Returns:
+            List containing [top-left, top-right, bottom-right, bottom-left] of the original image,
+            List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.
+        """
+        half_height = int(height / 2)
+        half_width = int(width / 2)
+        topleft = (random.randint(0, int(distortion_scale * half_width)),
+                   random.randint(0, int(distortion_scale * half_height)))
+        topright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),
+                    random.randint(0, int(distortion_scale * half_height)))
+        botright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),
+                    random.randint(height - int(distortion_scale * half_height) - 1, height - 1))
+        botleft = (random.randint(0, int(distortion_scale * half_width)),
+                   random.randint(height - int(distortion_scale * half_height) - 1, height - 1))
+        startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)]
+        endpoints = [topleft, topright, botright, botleft]
+        return startpoints, endpoints
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(p={})'.format(self.p)
+
+
+class RandomResizedCrop(object):
+    """Crop the given PIL Image to random size and aspect ratio.
+    A crop of random size (default: of 0.08 to 1.0) of the original size and a random
+    aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
+    is finally resized to given size.
+    This is popularly used to train the Inception networks.
+    Args:
+        size: expected output size of each edge
+        scale: range of size of the origin size cropped
+        ratio: range of aspect ratio of the origin aspect ratio cropped
+        interpolation: Default: PIL.Image.BILINEAR
+    """
+
+    def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):
+        if isinstance(size, tuple):
+            self.size = size
+        else:
+            self.size = (size, size)
+        if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
+            warnings.warn("range should be of kind (min, max)")
+
+        self.interpolation = interpolation
+        self.scale = scale
+        self.ratio = ratio
+
+    @staticmethod
+    def get_params(img, scale, ratio):
+        """Get parameters for ``crop`` for a random sized crop.
+        Args:
+            img (PIL Image): Image to be cropped.
+            scale (tuple): range of size of the origin size cropped
+            ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
+        Returns:
+            tuple: params (i, j, h, w) to be passed to ``crop`` for a random
+                sized crop.
+        """
+        width, height = _get_image_size(img)
+        area = height * width
+
+        for attempt in range(10):
+            target_area = random.uniform(*scale) * area
+            log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
+            aspect_ratio = math.exp(random.uniform(*log_ratio))
+
+            w = int(round(math.sqrt(target_area * aspect_ratio)))
+            h = int(round(math.sqrt(target_area / aspect_ratio)))
+
+            if 0 < w <= width and 0 < h <= height:
+                i = random.randint(0, height - h)
+                j = random.randint(0, width - w)
+                return i, j, h, w
+
+        # Fallback to central crop
+        in_ratio = float(width) / float(height)
+        if (in_ratio < min(ratio)):
+            w = width
+            h = int(round(w / min(ratio)))
+        elif (in_ratio > max(ratio)):
+            h = height
+            w = int(round(h * max(ratio)))
+        else:  # whole image
+            w = width
+            h = height
+        i = (height - h) // 2
+        j = (width - w) // 2
+        return i, j, h, w
+
+    def __call__(self, img):
+        """
+        Args:
+            img (PIL Image): Image to be cropped and resized.
+        Returns:
+            PIL Image: Randomly cropped and resized image.
+        """
+        i, j, h, w = self.get_params(img, self.scale, self.ratio)
+        return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)
+
+    def __repr__(self):
+        interpolate_str = _pil_interpolation_to_str[self.interpolation]
+        format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
+        format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
+        format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
+        format_string += ', interpolation={0})'.format(interpolate_str)
+        return format_string
+
+
+class RandomSizedCrop(RandomResizedCrop):
+    """
+    Note: This transform is deprecated in favor of RandomResizedCrop.
+    """
+    def __init__(self, *args, **kwargs):
+        warnings.warn("The use of the transforms.RandomSizedCrop transform is deprecated, " +
+                      "please use transforms.RandomResizedCrop instead.")
+        super(RandomSizedCrop, self).__init__(*args, **kwargs)
+
+
+class FiveCrop(object):
+    """Crop the given PIL Image into four corners and the central crop
+    .. Note::
+         This transform returns a tuple of images and there may be a mismatch in the number of
+         inputs and targets your Dataset returns. See below for an example of how to deal with
+         this.
+    Args:
+         size (sequence or int): Desired output size of the crop. If size is an ``int``
+            instead of sequence like (h, w), a square crop of size (size, size) is made.
+    Example:
+         # >>> transform = Compose([
+         # >>>    FiveCrop(size), # this is a list of PIL Images
+         # >>>    Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
+         # >>> ])
+         # >>> #In your test loop you can do the following:
+         # >>> input, target = batch # input is a 5d tensor, target is 2d
+         # >>> bs, ncrops, c, h, w = input.size()
+         # >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
+         # >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
+    """
+
+    def __init__(self, size):
+        self.size = size
+        if isinstance(size, numbers.Number):
+            self.size = (int(size), int(size))
+        else:
+            assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
+            self.size = size
+
+    def __call__(self, img):
+        return F.five_crop(img, self.size)
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(size={0})'.format(self.size)
+
+
+class TenCrop(object):
+    """Crop the given PIL Image into four corners and the central crop plus the flipped version of
+    these (horizontal flipping is used by default)
+    .. Note::
+         This transform returns a tuple of images and there may be a mismatch in the number of
+         inputs and targets your Dataset returns. See below for an example of how to deal with
+         this.
+    Args:
+        size (sequence or int): Desired output size of the crop. If size is an
+            int instead of sequence like (h, w), a square crop (size, size) is
+            made.
+        vertical_flip (bool): Use vertical flipping instead of horizontal
+    Example:
+         # >>> transform = Compose([
+         # >>>    TenCrop(size), # this is a list of PIL Images
+         # >>>    Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
+         # >>> ])
+         # >>> #In your test loop you can do the following:
+         # >>> input, target = batch # input is a 5d tensor, target is 2d
+         # >>> bs, ncrops, c, h, w = input.size()
+         # >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
+         # >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
+    """
+
+    def __init__(self, size, vertical_flip=False):
+        self.size = size
+        if isinstance(size, numbers.Number):
+            self.size = (int(size), int(size))
+        else:
+            assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
+            self.size = size
+        self.vertical_flip = vertical_flip
+
+    def __call__(self, img):
+        return F.ten_crop(img, self.size, self.vertical_flip)
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)
+
+
+class LinearTransformation(object):
+    """Transform a tensor image with a square transformation matrix and a mean_vector computed
+    offline.
+    Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and
+    subtract mean_vector from it which is then followed by computing the dot
+    product with the transformation matrix and then reshaping the tensor to its
+    original shape.
+    Applications:
+        whitening transformation: Suppose X is a column vector zero-centered data.
+        Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),
+        perform SVD on this matrix and pass it as transformation_matrix.
+    Args:
+        transformation_matrix (Tensor): tensor [D x D], D = C x H x W
+        mean_vector (Tensor): tensor [D], D = C x H x W
+    """
+
+    def __init__(self, transformation_matrix, mean_vector):
+        if transformation_matrix.size(0) != transformation_matrix.size(1):
+            raise ValueError("transformation_matrix should be square. Got " +
+                             "[{} x {}] rectangular matrix.".format(*transformation_matrix.size()))
+
+        if mean_vector.size(0) != transformation_matrix.size(0):
+            raise ValueError("mean_vector should have the same length {}".format(mean_vector.size(0)) +
+                             " as any one of the dimensions of the transformation_matrix [{} x {}]"
+                             .format(transformation_matrix.size()))
+
+        self.transformation_matrix = transformation_matrix
+        self.mean_vector = mean_vector
+
+    def __call__(self, tensor):
+        """
+        Args:
+            tensor (Tensor): Tensor image of size (C, H, W) to be whitened.
+        Returns:
+            Tensor: Transformed image.
+        """
+        if tensor.size(0) * tensor.size(1) * tensor.size(2) != self.transformation_matrix.size(0):
+            raise ValueError("tensor and transformation matrix have incompatible shape." +
+                             "[{} x {} x {}] != ".format(*tensor.size()) +
+                             "{}".format(self.transformation_matrix.size(0)))
+        flat_tensor = tensor.view(1, -1) - self.mean_vector
+        transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)
+        tensor = transformed_tensor.view(tensor.size())
+        return tensor
+
+    def __repr__(self):
+        format_string = self.__class__.__name__ + '(transformation_matrix='
+        format_string += (str(self.transformation_matrix.tolist()) + ')')
+        format_string += (", (mean_vector=" + str(self.mean_vector.tolist()) + ')')
+        return format_string
+
+
+class ColorJitter(object):
+    """Randomly change the brightness, contrast and saturation of an image.
+    Args:
+        brightness (float or tuple of float (min, max)): How much to jitter brightness.
+            brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
+            or the given [min, max]. Should be non negative numbers.
+        contrast (float or tuple of float (min, max)): How much to jitter contrast.
+            contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
+            or the given [min, max]. Should be non negative numbers.
+        saturation (float or tuple of float (min, max)): How much to jitter saturation.
+            saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
+            or the given [min, max]. Should be non negative numbers.
+        hue (float or tuple of float (min, max)): How much to jitter hue.
+            hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
+            Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
+    """
+    def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
+        self.brightness = self._check_input(brightness, 'brightness')
+        self.contrast = self._check_input(contrast, 'contrast')
+        self.saturation = self._check_input(saturation, 'saturation')
+        self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
+                                     clip_first_on_zero=False)
+
+    def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
+        if isinstance(value, numbers.Number):
+            if value < 0:
+                raise ValueError("If {} is a single number, it must be non negative.".format(name))
+            value = [center - value, center + value]
+            if clip_first_on_zero:
+                value[0] = max(value[0], 0)
+        elif isinstance(value, (tuple, list)) and len(value) == 2:
+            if not bound[0] <= value[0] <= value[1] <= bound[1]:
+                raise ValueError("{} values should be between {}".format(name, bound))
+        else:
+            raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name))
+
+        # if value is 0 or (1., 1.) for brightness/contrast/saturation
+        # or (0., 0.) for hue, do nothing
+        if value[0] == value[1] == center:
+            value = None
+        return value
+
+    @staticmethod
+    def get_params(brightness, contrast, saturation, hue):
+        """Get a randomized transform to be applied on image.
+        Arguments are same as that of __init__.
+        Returns:
+            Transform which randomly adjusts brightness, contrast and
+            saturation in a random order.
+        """
+        transforms = []
+
+        if brightness is not None:
+            brightness_factor = random.uniform(brightness[0], brightness[1])
+            transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
+
+        if contrast is not None:
+            contrast_factor = random.uniform(contrast[0], contrast[1])
+            transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
+
+        if saturation is not None:
+            saturation_factor = random.uniform(saturation[0], saturation[1])
+            transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
+
+        if hue is not None:
+            hue_factor = random.uniform(hue[0], hue[1])
+            transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
+
+        random.shuffle(transforms)
+        transform = Compose(transforms)
+
+        return transform
+
+    def __call__(self, img):
+        """
+        Args:
+            img (PIL Image): Input image.
+        Returns:
+            PIL Image: Color jittered image.
+        """
+        transform = self.get_params(self.brightness, self.contrast,
+                                    self.saturation, self.hue)
+        return transform(img)
+
+    def __repr__(self):
+        format_string = self.__class__.__name__ + '('
+        format_string += 'brightness={0}'.format(self.brightness)
+        format_string += ', contrast={0}'.format(self.contrast)
+        format_string += ', saturation={0}'.format(self.saturation)
+        format_string += ', hue={0})'.format(self.hue)
+        return format_string
+
+
+class RandomRotation(object):
+    """Rotate the image by angle.
+    Args:
+        degrees (sequence or float or int): Range of degrees to select from.
+            If degrees is a number instead of sequence like (min, max), the range of degrees
+            will be (-degrees, +degrees).
+        resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
+            An optional resampling filter. See `filters`_ for more information.
+            If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
+        expand (bool, optional): Optional expansion flag.
+            If true, expands the output to make it large enough to hold the entire rotated image.
+            If false or omitted, make the output image the same size as the input image.
+            Note that the expand flag assumes rotation around the center and no translation.
+        center (2-tuple, optional): Optional center of rotation.
+            Origin is the upper left corner.
+            Default is the center of the image.
+        fill (3-tuple or int): RGB pixel fill value for area outside the rotated image.
+            If int, it is used for all channels respectively.
+    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
+    """
+
+    def __init__(self, degrees, resample=False, expand=False, center=None, fill=0):
+        if isinstance(degrees, numbers.Number):
+            if degrees < 0:
+                raise ValueError("If degrees is a single number, it must be positive.")
+            self.degrees = (-degrees, degrees)
+        else:
+            if len(degrees) != 2:
+                raise ValueError("If degrees is a sequence, it must be of len 2.")
+            self.degrees = degrees
+
+        self.resample = resample
+        self.expand = expand
+        self.center = center
+        self.fill = fill
+
+    @staticmethod
+    def get_params(degrees):
+        """Get parameters for ``rotate`` for a random rotation.
+        Returns:
+            sequence: params to be passed to ``rotate`` for random rotation.
+        """
+        angle = random.uniform(degrees[0], degrees[1])
+
+        return angle
+
+    def __call__(self, img):
+        """
+        Args:
+            img (PIL Image): Image to be rotated.
+        Returns:
+            PIL Image: Rotated image.
+        """
+
+        angle = self.get_params(self.degrees)
+
+        return F.rotate(img, angle, self.resample, self.expand, self.center, self.fill)
+
+    def __repr__(self):
+        format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)
+        format_string += ', resample={0}'.format(self.resample)
+        format_string += ', expand={0}'.format(self.expand)
+        if self.center is not None:
+            format_string += ', center={0}'.format(self.center)
+        format_string += ')'
+        return format_string
+
+
+class RandomAffine(object):
+    """Random affine transformation of the image keeping center invariant
+    Args:
+        degrees (sequence or float or int): Range of degrees to select from.
+            If degrees is a number instead of sequence like (min, max), the range of degrees
+            will be (-degrees, +degrees). Set to 0 to deactivate rotations.
+        translate (tuple, optional): tuple of maximum absolute fraction for horizontal
+            and vertical translations. For example translate=(a, b), then horizontal shift
+            is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
+            randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
+        scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is
+            randomly sampled from the range a <= scale <= b. Will keep original scale by default.
+        shear (sequence or float or int, optional): Range of degrees to select from.
+            If shear is a number, a shear parallel to the x axis in the range (-shear, +shear)
+            will be apllied. Else if shear is a tuple or list of 2 values a shear parallel to the x axis in the
+            range (shear[0], shear[1]) will be applied. Else if shear is a tuple or list of 4 values,
+            a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.
+            Will not apply shear by default
+        resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
+            An optional resampling filter. See `filters`_ for more information.
+            If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
+        fillcolor (tuple or int): Optional fill color (Tuple for RGB Image And int for grayscale) for the area
+            outside the transform in the output image.(Pillow>=5.0.0)
+    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
+    """
+    # degree: rotate for the image; \in [-180, 180]; 旋转
+    # translate: translation for the image, \in [0,1] 平移
+    # scale: scale the image with center invariant, better \in (0,2] 放缩
+    # shear: shear the image with dx or dy, w\in [-180, 180] 扭曲
+    # eg.
+    # preprocess1 = myTransforms.RandomAffine(degrees=0, translate=[0, 0.2], scale=[0.8, 1.2],
+    #                                        shear=[-10, 10, -10, 10], fillcolor=(228, 218, 218))
+    def __init__(self, degrees, translate=None, scale=None, shear=None, resample=False, fillcolor=0):
+        if isinstance(degrees, numbers.Number):
+            if degrees < 0:
+                raise ValueError("If degrees is a single number, it must be positive.")
+            self.degrees = (-degrees, degrees)
+        else:
+            assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \
+                "degrees should be a list or tuple and it must be of length 2."
+            self.degrees = degrees
+
+        if translate is not None:
+            assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
+                "translate should be a list or tuple and it must be of length 2."
+            for t in translate:
+                if not (0.0 <= t <= 1.0):
+                    raise ValueError("translation values should be between 0 and 1")
+        self.translate = translate
+
+        if scale is not None:
+            assert isinstance(scale, (tuple, list)) and len(scale) == 2, \
+                "scale should be a list or tuple and it must be of length 2."
+            for s in scale:
+                if s <= 0:
+                    raise ValueError("scale values should be positive")
+        self.scale = scale
+
+        if shear is not None:
+            if isinstance(shear, numbers.Number):
+                if shear < 0:
+                    raise ValueError("If shear is a single number, it must be positive.")
+                self.shear = (-shear, shear)
+            else:
+                assert isinstance(shear, (tuple, list)) and \
+                    (len(shear) == 2 or len(shear) == 4), \
+                    "shear should be a list or tuple and it must be of length 2 or 4."
+                # X-Axis shear with [min, max]
+                if len(shear) == 2:
+                    self.shear = [shear[0], shear[1], 0., 0.]
+                elif len(shear) == 4:
+                    self.shear = [s for s in shear]
+        else:
+            self.shear = shear
+
+        self.resample = resample
+        self.fillcolor = fillcolor
+
+    @staticmethod
+    def get_params(degrees, translate, scale_ranges, shears, img_size):
+        """Get parameters for affine transformation
+        Returns:
+            sequence: params to be passed to the affine transformation
+        """
+        angle = random.uniform(degrees[0], degrees[1])
+        if translate is not None:
+            max_dx = translate[0] * img_size[0]
+            max_dy = translate[1] * img_size[1]
+            translations = (np.round(random.uniform(-max_dx, max_dx)),
+                            np.round(random.uniform(-max_dy, max_dy)))
+        else:
+            translations = (0, 0)
+
+        if scale_ranges is not None:
+            scale = random.uniform(scale_ranges[0], scale_ranges[1])
+        else:
+            scale = 1.0
+
+        if shears is not None:
+            if len(shears) == 2:
+                shear = [random.uniform(shears[0], shears[1]), 0.]
+            elif len(shears) == 4:
+                shear = [random.uniform(shears[0], shears[1]),
+                         random.uniform(shears[2], shears[3])]
+        else:
+            shear = 0.0
+
+        return angle, translations, scale, shear
+
+    def __call__(self, img):
+        """
+            img (PIL Image): Image to be transformed.
+        Returns:
+            PIL Image: Affine transformed image.
+        """
+        ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img.size)
+        return F.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor)
+
+    def __repr__(self):
+        s = '{name}(degrees={degrees}'
+        if self.translate is not None:
+            s += ', translate={translate}'
+        if self.scale is not None:
+            s += ', scale={scale}'
+        if self.shear is not None:
+            s += ', shear={shear}'
+        if self.resample > 0:
+            s += ', resample={resample}'
+        if self.fillcolor != 0:
+            s += ', fillcolor={fillcolor}'
+        s += ')'
+        d = dict(self.__dict__)
+        d['resample'] = _pil_interpolation_to_str[d['resample']]
+        return s.format(name=self.__class__.__name__, **d)
+
+
+class Grayscale(object):
+    """Convert image to grayscale.
+    Args:
+        num_output_channels (int): (1 or 3) number of channels desired for output image
+    Returns:
+        PIL Image: Grayscale version of the input.
+        - If num_output_channels == 1 : returned image is single channel
+        - If num_output_channels == 3 : returned image is 3 channel with r == g == b
+    """
+
+    def __init__(self, num_output_channels=1):
+        self.num_output_channels = num_output_channels
+
+    def __call__(self, img):
+        """
+        Args:
+            img (PIL Image): Image to be converted to grayscale.
+        Returns:
+            PIL Image: Randomly grayscaled image.
+        """
+        return F.to_grayscale(img, num_output_channels=self.num_output_channels)
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)
+
+
+class RandomGrayscale(object):
+    """Randomly convert image to grayscale with a probability of p (default 0.1).
+    Args:
+        p (float): probability that image should be converted to grayscale.
+    Returns:
+        PIL Image: Grayscale version of the input image with probability p and unchanged
+        with probability (1-p).
+        - If input image is 1 channel: grayscale version is 1 channel
+        - If input image is 3 channel: grayscale version is 3 channel with r == g == b
+    """
+
+    def __init__(self, p=0.1):
+        self.p = p
+
+    def __call__(self, img):
+        """
+        Args:
+            img (PIL Image): Image to be converted to grayscale.
+        Returns:
+            PIL Image: Randomly grayscaled image.
+        """
+        num_output_channels = 1 if img.mode == 'L' else 3
+        if random.random() < self.p:
+            return F.to_grayscale(img, num_output_channels=num_output_channels)
+        return img
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(p={0})'.format(self.p)
+
+
+class RandomErasing(object):
+    """ Randomly selects a rectangle region in an image and erases its pixels.
+        'Random Erasing Data Augmentation' by Zhong et al.
+        See https://arxiv.org/pdf/1708.04896.pdf
+    Args:
+         p: probability that the random erasing operation will be performed.
+         scale: range of proportion of erased area against input image.
+         ratio: range of aspect ratio of erased area.
+         value: erasing value. Default is 0. If a single int, it is used to
+            erase all pixels. If a tuple of length 3, it is used to erase
+            R, G, B channels respectively.
+            If a str of 'random', erasing each pixel with random values.
+         inplace: boolean to make this transform inplace. Default set to False.
+    Returns:
+        Erased Image.
+    # Examples:
+        >>> transform = transforms.Compose([
+        >>> transforms.RandomHorizontalFlip(),
+        >>> transforms.ToTensor(),
+        >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
+        >>> transforms.RandomErasing(),
+        >>> ])
+    """
+
+    def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False):
+        assert isinstance(value, (numbers.Number, str, tuple, list))
+        if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
+            warnings.warn("range should be of kind (min, max)")
+        if scale[0] < 0 or scale[1] > 1:
+            raise ValueError("range of scale should be between 0 and 1")
+        if p < 0 or p > 1:
+            raise ValueError("range of random erasing probability should be between 0 and 1")
+
+        self.p = p
+        self.scale = scale
+        self.ratio = ratio
+        self.value = value
+        self.inplace = inplace
+
+    @staticmethod
+    def get_params(img, scale, ratio, value=0):
+        """Get parameters for ``erase`` for a random erasing.
+        Args:
+            img (Tensor): Tensor image of size (C, H, W) to be erased.
+            scale: range of proportion of erased area against input image.
+            ratio: range of aspect ratio of erased area.
+        Returns:
+            tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing.
+        """
+        img_c, img_h, img_w = img.shape
+        area = img_h * img_w
+
+        for attempt in range(10):
+            erase_area = random.uniform(scale[0], scale[1]) * area
+            aspect_ratio = random.uniform(ratio[0], ratio[1])
+
+            h = int(round(math.sqrt(erase_area * aspect_ratio)))
+            w = int(round(math.sqrt(erase_area / aspect_ratio)))
+
+            if h < img_h and w < img_w:
+                i = random.randint(0, img_h - h)
+                j = random.randint(0, img_w - w)
+                if isinstance(value, numbers.Number):
+                    v = value
+                elif isinstance(value, torch._six.string_classes):
+                    v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()
+                elif isinstance(value, (list, tuple)):
+                    v = torch.tensor(value, dtype=torch.float32).view(-1, 1, 1).expand(-1, h, w)
+                return i, j, h, w, v
+
+        # Return original image
+        return 0, 0, img_h, img_w, img
+
+    def __call__(self, img):
+        """
+        Args:
+            img (Tensor): Tensor image of size (C, H, W) to be erased.
+        Returns:
+            img (Tensor): Erased Tensor image.
+        """
+        if random.uniform(0, 1) < self.p:
+            x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=self.value)
+            return F.erase(img, x, y, h, w, v, self.inplace)
+        return img
+
+
+class HEDJitter(object):
+    """Randomly perturbe the HED color space value an RGB image.
+    First, it disentangled the hematoxylin and eosin color channels by color deconvolution method using a fixed matrix.
+    Second, it perturbed the hematoxylin, eosin and DAB stains independently.
+    Third, it transformed the resulting stains into regular RGB color space.
+    Args:
+        theta (float): How much to jitter HED color space,
+         alpha is chosen from a uniform distribution [1-theta, 1+theta]
+         betti is chosen from a uniform distribution [-theta, theta]
+         the jitter formula is **s' = \alpha * s + \betti**
+    """
+    def __init__(self, theta=0.): # HED_light: theta=0.05; HED_strong: theta=0.2
+        assert isinstance(theta, numbers.Number), "theta should be a single number."
+        self.theta = theta
+        self.alpha = np.random.uniform(1-theta, 1+theta, (1, 3))
+        self.betti = np.random.uniform(-theta, theta, (1, 3))
+
+    @staticmethod
+    def adjust_HED(img, alpha, betti):
+        img = np.array(img)
+        s = np.reshape(color.rgb2hed(img), (-1, 3))
+        ns = alpha * s + betti  # perturbations on HED color space
+        nimg = color.hed2rgb(np.reshape(ns, img.shape))
+
+        imin = nimg.min()
+        imax = nimg.max()
+        rsimg = (255 * (nimg - imin) / (imax - imin)).astype('uint8')  # rescale to [0,255]
+        # transfer to PIL image
+        return Image.fromarray(rsimg)
+
+    def __call__(self, img):
+        return self.adjust_HED(img, self.alpha, self.betti)
+
+    def __repr__(self):
+        format_string = self.__class__.__name__ + '('
+        format_string += 'theta={0}'.format(self.theta)
+        format_string += ',alpha={0}'.format(self.alpha)
+        format_string += ',betti={0}'.format(self.betti)
+        return format_string
+
+
+class AutoRandomRotation(object):
+    """auto randomly select angle 0, 90, 180 or 270 for rotating the image.
+    Args:
+        resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
+            An optional resampling filter. See `filters`_ for more information.
+            If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
+        expand (bool, optional): Optional expansion flag.
+            If true, expands the output to make it large enough to hold the entire rotated image.
+            If false or omitted, make the output image the same size as the input image.
+            Note that the expand flag assumes rotation around the center and no translation.
+        center (2-tuple, optional): Optional center of rotation.
+            Origin is the upper left corner.
+            Default is the center of the image.
+        fill (3-tuple or int): RGB pixel fill value for area outside the rotated image.
+            If int, it is used for all channels respectively.
+    """
+
+    def __init__(self, degree=None, resample=False, expand=True, center=None, fill=0):
+        if degree is None:
+            self.degrees = random.choice([0, 90, 180, 270])
+        else:
+            assert degree in [0, 90, 180, 270], 'degree must be in [0, 90, 180, 270]'
+            self.degrees = degree
+
+        self.resample = resample
+        self.expand = expand
+        self.center = center
+        self.fill = fill
+
+    def __call__(self, img):
+        return F.rotate(img, self.degrees, self.resample, self.expand, self.center, self.fill)
+
+    def __repr__(self):
+        format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)
+        format_string += ', resample={0}'.format(self.resample)
+        format_string += ', expand={0}'.format(self.expand)
+        if self.center is not None:
+            format_string += ', center={0}'.format(self.center)
+        format_string += ')'
+        return format_string
+
+
+class RandomGaussBlur(object):
+    """Random GaussBlurring on image by radius parameter.
+    Args:
+        radius (list, tuple): radius range for selecting from; you'd better set it < 2
+    """
+    def __init__(self, radius=None):
+        if radius is not None:
+            assert isinstance(radius, (tuple, list)) and len(radius) == 2, \
+                "radius should be a list or tuple and it must be of length 2."
+            self.radius = random.uniform(radius[0], radius[1])
+        else:
+            self.radius = 0.0
+
+    def __call__(self, img):
+        return img.filter(ImageFilter.GaussianBlur(radius=self.radius))
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(Gaussian Blur radius={0})'.format(self.radius)
+
+
+class RandomAffineCV2(object):
+    """Random Affine transformation by CV2 method on image by alpha parameter.
+    Args:
+        alpha (float): alpha value for affine transformation
+        mask (PIL Image) in __call__, if not assign, set None.
+    """
+    def __init__(self, alpha):
+        assert isinstance(alpha, numbers.Number), "alpha should be a single number."
+        assert 0. <= alpha <= 0.15, \
+            "In pathological image, alpha should be in (0,0.15), you can change in myTransform.py"
+        self.alpha = alpha
+
+    @staticmethod
+    def affineTransformCV2(img, alpha, mask=None):
+        alpha = img.shape[1] * alpha
+        if mask is not None:
+            mask = np.array(mask).astype(np.uint8)
+            img = np.concatenate((img, mask[..., None]), axis=2)
+
+        imgsize = img.shape[:2]
+        center = np.float32(imgsize) // 2
+        censize = min(imgsize) // 3
+        pts1 = np.float32([center+censize, [center[0]+censize, center[1]-censize], center-censize])  # raw point
+        pts2 = pts1 + np.random.uniform(-alpha, alpha, size=pts1.shape).astype(np.float32)  # output point
+        M = cv2.getAffineTransform(pts1, pts2)  # affine matrix
+        img = cv2.warpAffine(img, M, imgsize[::-1],
+                               flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_REFLECT_101)
+        if mask is not None:
+            return Image.fromarray(img[..., :3]), Image.fromarray(img[..., 3])
+        else:
+            return Image.fromarray(img)
+
+    def __call__(self, img, mask=None):
+        return self.affineTransformCV2(np.array(img), self.alpha, mask)
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(alpha value={0})'.format(self.alpha)
+
+
+class RandomElastic(object):
+    """Random Elastic transformation by CV2 method on image by alpha, sigma parameter.
+        # you can refer to:  https://blog.csdn.net/qq_27261889/article/details/80720359
+        # https://blog.csdn.net/maliang_1993/article/details/82020596
+        # https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html#scipy.ndimage.map_coordinates
+    Args:
+        alpha (float): alpha value for Elastic transformation, factor
+        if alpha is 0, output is original whatever the sigma;
+        if alpha is 1, output only depends on sigma parameter;
+        if alpha < 1 or > 1, it zoom in or out the sigma's Relevant dx, dy.
+        sigma (float): sigma value for Elastic transformation, should be \ in (0.05,0.1)
+        mask (PIL Image) in __call__, if not assign, set None.
+    """
+    def __init__(self, alpha, sigma):
+        assert isinstance(alpha, numbers.Number) and isinstance(sigma, numbers.Number), \
+            "alpha and sigma should be a single number."
+        assert 0.05 <= sigma <= 0.1, \
+            "In pathological image, sigma should be in (0.05,0.1)"
+        self.alpha = alpha
+        self.sigma = sigma
+
+    @staticmethod
+    def RandomElasticCV2(img, alpha, sigma, mask=None):
+        alpha = img.shape[1] * alpha
+        sigma = img.shape[1] * sigma
+        if mask is not None:
+            mask = np.array(mask).astype(np.uint8)
+            img = np.concatenate((img, mask[..., None]), axis=2)
+
+        shape = img.shape
+
+        dx = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma) * alpha
+        dy = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma) * alpha
+        # dz = np.zeros_like(dx)
+
+        x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
+        indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))
+
+        img = map_coordinates(img, indices, order=0, mode='reflect').reshape(shape)
+        if mask is not None:
+            return Image.fromarray(img[..., :3]), Image.fromarray(img[..., 3])
+        else:
+            return Image.fromarray(img)
+
+    def __call__(self, img, mask=None):
+        return self.RandomElasticCV2(np.array(img), self.alpha, self.sigma, mask)
+
+    def __repr__(self):
+        format_string = self.__class__.__name__ + '(alpha value={0})'.format(self.alpha)
+        format_string += ', sigma={0}'.format(self.sigma)
+        format_string += ')'
+        return format_string
\ No newline at end of file
diff --git a/code/models/TransMIL.py b/code/models/TransMIL.py
index 01a0fa7..d75eb5f 100755
--- a/code/models/TransMIL.py
+++ b/code/models/TransMIL.py
@@ -58,19 +58,45 @@ class PPEG(nn.Module):
 
 
 class TransMIL(nn.Module):
-    def __init__(self, n_classes):
+    def __init__(self, n_classes, in_features, out_features=512):
         super(TransMIL, self).__init__()
-        in_features = 2048
-        inter_features = 1024
-        out_features = 512
+        # in_features = 2048
+        # inter_features = 1024
+        # inter_features_2 = 512
+        # out_features = 1024 
+        # out_features = 512 
         if apex_available: 
             norm_layer = apex.normalization.FusedLayerNorm
         else:
             norm_layer = nn.LayerNorm
 
         self.pos_layer = PPEG(dim=out_features)
-        self._fc1 = nn.Sequential(nn.Linear(in_features, inter_features), nn.GELU(), nn.Dropout(p=0.5), norm_layer(inter_features)) 
-        self._fc1_2 = nn.Sequential(nn.Linear(inter_features, out_features), nn.GELU())
+        # self._fc1 = nn.Sequential(nn.Linear(in_features, int(in_features/2)), nn.GELU(), nn.Dropout(p=0.2), norm_layer(int(in_features/2))) # 2048 -> 1024
+        # self._fc1_1 = nn.Sequential(nn.Linear(int(in_features/2), int(in_features/2)), nn.GELU(), nn.Dropout(p=0.2), norm_layer(int(in_features/2))) # 2048 -> 1024
+        # self._fc1_2 = nn.Sequential(nn.Linear(int(in_features/2), int(in_features/2)), nn.GELU(), nn.Dropout(p=0.2), norm_layer(int(in_features/2))) # 2048 -> 1024
+        # self._fc2 = nn.Sequential(nn.Linear(int(in_features/2), int(in_features/4)), nn.GELU(), nn.Dropout(p=0.2), norm_layer(int(in_features/4))) # 1024 -> 512
+        # self._fc3 = nn.Sequential(nn.Linear(int(in_features/4), out_features), nn.GELU()) # 512 -> 256
+
+
+
+        if in_features == 2048:
+            self._fc1 = nn.Sequential(
+                nn.Linear(in_features, int(in_features/2)), nn.GELU(), nn.Dropout(p=0.6), norm_layer(int(in_features/2)),
+                nn.Linear(int(in_features/2), out_features), nn.GELU(),
+                ) 
+        elif in_features == 1024:
+            self._fc1 = nn.Sequential(
+                # nn.Linear(in_features, int(in_features/2)), nn.GELU(), nn.Dropout(p=0.2), norm_layer(out_features),
+                nn.Linear(in_features, out_features), nn.GELU(), nn.Dropout(p=0.6), norm_layer(out_features)
+                ) 
+        # out_features = 256 
+        # self._fc1 = nn.Sequential(
+        #     nn.Linear(in_features, out_features), nn.GELU(), nn.Dropout(p=0.2), norm_layer(out_features)
+        #     ) 
+        # self._fc1_2 = nn.Sequential(nn.Linear(inter_features, inter_features_2), nn.GELU(), nn.Dropout(p=0.5), norm_layer(inter_features_2)) 
+        # self._fc1_3 = nn.Sequential(nn.Linear(inter_features_2, out_features), nn.GELU())
+        # self._fc1 = nn.Sequential(nn.Linear(in_features, 256), nn.GELU())
+        # self._fc1_2 = nn.Sequential(nn.Linear(int(in_features/2), out_features), nn.GELU())
         # self._fc1 = nn.Sequential(nn.Linear(1024, 512), nn.ReLU())
         
         self.cls_token = nn.Parameter(torch.randn(1, 1, out_features))
@@ -79,7 +105,7 @@ class TransMIL(nn.Module):
         self.layer2 = TransLayer(norm_layer=norm_layer, dim=out_features)
         # self.norm = nn.LayerNorm(out_features)
         self.norm = norm_layer(out_features)
-        self._fc2 = nn.Linear(out_features, self.n_classes)
+        self._fc = nn.Linear(out_features, self.n_classes)
 
         # self.model_ft = ResNet.resnet50(num_classes=self.n_classes, mlp=False, two_branch=False, normlinear=True).to(self.device)
         # home = Path.cwd().parts[1]
@@ -94,11 +120,15 @@ class TransMIL(nn.Module):
     def forward(self, x): #, **kwargs
 
         # x = self.model_ft(x).unsqueeze(0)
-        h = x.squeeze(0).float() #[B, n, 1024]
+        if x.dim() > 3:
+            x = x.squeeze(0)
+        h = x.float() #[B, n, 1024]
         h = self._fc1(h) #[B, n, 512]
         # h = self.drop(h)
-        h = self._fc1_2(h) #[B, n, 512]
-        
+        # h = self._fc1_1(h) #[B, n, 512]
+        # h = self._fc1_2(h) #[B, n, 512]
+        # h = self._fc2(h) #[B, n, 512]
+        # h = self._fc3(h) #[B, n, 512]
         # print('Feature Representation: ', h.shape)
         #---->duplicate pad
         H = h.shape[1]
@@ -135,7 +165,7 @@ class TransMIL(nn.Module):
         h = self.norm(h)[:,0]
 
         #---->predict
-        logits = self._fc2(h) #[B, n_classes]
+        logits = self._fc(h) #[B, n_classes]
         # Y_hat = torch.argmax(logits, dim=1)
         # Y_prob = F.softmax(logits, dim = 1)
         # results_dict = {'logits': logits, 'Y_prob': Y_prob, 'Y_hat': Y_hat}
diff --git a/code/models/TransformerMIL.py b/code/models/TransformerMIL.py
index d5e6b89..c5b5951 100644
--- a/code/models/TransformerMIL.py
+++ b/code/models/TransformerMIL.py
@@ -4,6 +4,13 @@ import torch.nn.functional as F
 import numpy as np
 from nystrom_attention import NystromAttention
 
+try:
+    import apex
+    apex_available=True
+except ModuleNotFoundError:
+    # Error handling
+    apex_available = False
+    pass
 
 class TransLayer(nn.Module):
 
@@ -46,12 +53,25 @@ class PPEG(nn.Module):
 
 
 class TransformerMIL(nn.Module):
-    def __init__(self, n_classes):
+    def __init__(self, n_classes, in_features, out_features=512):
         super(TransformerMIL, self).__init__()
-        in_features = 1024
-        out_features = 512
+        # in_features = 2048
+        # out_features = 512
         # self.pos_layer = PPEG(dim=out_features)
-        self._fc1 = nn.Sequential(nn.Linear(in_features, out_features), nn.GELU())
+        if apex_available: 
+            norm_layer = apex.normalization.FusedLayerNorm
+        else:
+            norm_layer = nn.LayerNorm
+        if in_features == 2048:
+            self._fc1 = nn.Sequential(
+                nn.Linear(in_features, int(in_features/2)), nn.GELU(), nn.Dropout(p=0.6), norm_layer(int(in_features/2)),
+                nn.Linear(int(in_features/2), out_features), nn.GELU(),
+                ) 
+        elif in_features == 1024:
+            self._fc1 = nn.Sequential(
+                # nn.Linear(in_features, int(in_features/2)), nn.GELU(), nn.Dropout(p=0.2), norm_layer(out_features),
+                nn.Linear(in_features, out_features), nn.GELU(), nn.Dropout(p=0.6), norm_layer(out_features)
+                ) 
         # self._fc1 = nn.Sequential(nn.Linear(1024, 512), nn.ReLU())
         self.cls_token = nn.Parameter(torch.randn(1, 1, out_features))
         self.n_classes = n_classes
@@ -63,7 +83,7 @@ class TransformerMIL(nn.Module):
 
     def forward(self, x): #, **kwargs
 
-        h = x.float() #[B, n, 1024]
+        h = x.squeeze(0).float() #[B, n, 1024]
         h = self._fc1(h) #[B, n, 512]
         
         # print('Feature Representation: ', h.shape)
@@ -102,7 +122,7 @@ class TransformerMIL(nn.Module):
         #---->predict
         logits = self._fc2(h) #[B, n_classes]
         # return logits, attn2
-        return logits, attn1
+        return logits
 
 if __name__ == "__main__":
     data = torch.randn((1, 6000, 512)).cuda()
diff --git a/code/models/__pycache__/TransMIL.cpython-39.pyc b/code/models/__pycache__/TransMIL.cpython-39.pyc
index 21d7707719795a1335b1187bffed7fd0328ba3fb..0e5f4fc107123caf151ac1282261571966a5a167 100644
GIT binary patch
delta 792
zcmaDXyFrdGk(ZZ?fq{YHvEa63S^158uh}OrW-+Vh2q{X;EB5vDDPm<{U`S>GDS={;
z05<~z12Y2yL$MGe149Wz31baIGt&a5g$#@gCCo)KB`jI2S!}f|d2A)@Ssb-2C7d<P
zDU7|0H4O1wHB2c?*-S-UCF~2hOV}6i)G(Fs)-a_oOEWYxE@UcUU%<DJp`@NYi$8@W
zg|(Nlh6N$r%m~te;MXw2G%RGRVTc#VGpb=qVasMJ+Ji70p{4}M%o^rq#)T{(Jtgc5
z1WSY#2&b^8u-7mzWL^kzq)2KFh!QOkt6`~OPT}Zfs$qy1N8w3;WST*$Bted<VXk4w
z5?u&VCzZkw%%I8XSES9rz>vwn!N5?&&cMKsZVU#G?KK&1F&4aJU}0dG?916E$()&2
za*M^?)hF~8yGv1iL4Ik;EtdGSWW&ktIh{n9<I|EgS&M`i7#NDY7#J9CacAaD4&yRa
z(g4K<BOe<JBNr1J6Avc`BMYMeqr^WB77j)hCWU`&e3O@QnH#Y&GBDIIE?`*5kiwYF
zQY2QxxPUQ*X(3}RV+|uHzG|6j7#1+sfJB&Tn6g+>m?v{^M;kMzu=Fz3LPVG(8EV;U
z7_zt*@YJxUu%@ssWM06#kfD~Ngfok)hNFfljVXmWg=O+IZqsn4G>~eLnL=PQIcqqB
z88q4aic~-zU<47$AOaLLFaQ7l|6fzMh!f;3_Ts|Q)YPifTdZk0`H3Y(?jTh<Ai{-#
zfnl;N&urleP<m4Wd7GJsk&BUok&9#UNggeBO-67Uo&1?clZnrIvK+6BkS$0fD9|+-
z!J$?ZKiP-Zi!pfeblyhB@X3;Va*VN)E%>I17IQK3G4e6;F!C_UFbXhoFbXgVu>k--
C7PGbh

delta 764
zcmdlW_gI!Mk(ZZ?fq{XcP^>jMP<$ibYxa6(5TAj8p;(21fgzP4iZO*Dim9C;jWLBW
zg{g%jiaCWjg{6fdilv=_g&~SHm_d{E7Dq@?VqUSYr%y5yR5yrXV_;xlXJB9enOJ;<
zfq|ifp@gx9p_yp`(?SMDh7`tHrWB@H<`m{ymJ;S7jS`kD)-1OATGl+a680>PTGkTI
z8kQ86Ud9@Rc&-xm1>7a<3wUanYM2)?m9Q`1UC2<vp2e5Kn!?u0Si@Sw+|0O;rG_D%
zAFh(Qh6Sc_AyW-Qyg&*20>KiY1;Q!pDeN^Y3z-*!OctpDk)kDHHLNu(DIC3FAf@7P
zE{G=ql4=GilLR|~rG_C(6l|na3PUi1CZ}H}0|x`cO9mDOh9Xu528JSS1_p+7P!KRM
zJh0bfyv0~Bc@k%v42!#~Pv|Xnm!kZF{L+$JEb(c{hPT*2WW3R2LoO#(kcg2cTaggR
z5HAJ>hFjd3dGTqfi6x~)sl`!znRz9tMU&gPOr;b+rZe&}vOpmh6B`o`qZXqS^W=M6
z<~j_F3=B1l3m8%u7cwqjT*y$%RKu`<sfKAGBbdbuWie&3q%ci(=Z-dJN@4D0s%5GH
zi7-ks)UwwwWN|OxsbNoHNnu&Y3<~aAjuOr+t{RRSrZlD$rWEGMSGi4t7}J;}A*Ks~
z&E%}%1f>YSB1VuGltBb2Fkb%u|Np<H@GbV@!qU{#s?=MoX*v0cB}Hx^1@0h12jpAk
z%)FAxi9E9f<3I@z>_=ukMlL2UmdX6QT9X}kHJNx6Cuj4@2-$%2fD)G`V-YBziV`Nz
v<n>|<nf!#ekuhR&8lN0v+~iiiX`-QAjC_oIj694yj53S@j2w&tj6!SxW~;6g

diff --git a/code/models/__pycache__/TransformerMIL.cpython-39.pyc b/code/models/__pycache__/TransformerMIL.cpython-39.pyc
index f2c5bda0715cd2d8601f4a33e2187a553a3f322a..8b1abd15afbfda834b941d0a5e2eff2d1aa7773d 100644
GIT binary patch
delta 1412
zcmX>oHAhxEk(ZZ?fq{WRU4KI|mk0yHV-N=!b1*P4I503U6kAQywym!cNMT4}%;AdS
zN@0%T4rj;{V_`_;sZ3yG0KpWNDDFsxJSGDMcZL+!6t)(I6t+~REZ%13C_Z<F6!sL3
z7KRiKs0hD1LkedKR|`W57gR(bRWOA+n`r`Lky$Fk0-+S1g^UY?7cxYNq>85SrttML
zE~p1_#Ztvn#TG~`WN2n&WJncD6=`OSVTzKBl1i0IVN79cWrC6nDU2!ntxRbwk_=JO
zsSFEbQl(P_7Ba;!Maf3VrOGalU&s(8pUSX6Ayqa@5o(n}D#HS$RM}MdY?f5nRE34i
zQOc>}smfW3AiGi+6@nQw1z&=K$4`^-mVjSnaY<2ru473_YF<fZeqM;%<a`#X&1H-;
z7#Rg8^D`e|l$(5oxs#E9a~w+$BcuA}jjWxFjJlJp*;5$RCNE%Dn%ux*R?inwl$ckX
zmS2>cTIB2LQ^dl+z>o|gK=yzzD+2=qHv<C$GbkE<FfcHbFqAOXFf=nQU|Pt)$WX#u
zBvZnY#hS%d%aX@d!k)!Z%TmLX!kEoeR9C{jfU|^s0ap!E33m-s3X?QLGvh+067~f=
z3mHn-vv^aOQ|eiI8EaS&(#?z@4G4Y>Gfcxmwi<?bzC5EErWDp}rlJi9!x3trhJwwk
zVQywz$O6(+!oGmNL|}nn3R?<W4f8_gg^V=}@j^8qQn<cEq=q?#y%)q0En#24Q3H_{
z139CHxrQN2cp*~_L%etjLokCThhHWG2LnS98v_GFx-l3$w%265#aNKZz{0>#q{_g+
zP^8Piz@RBn#1CTIfd~Nx28LUFi3O<@@rh-LnK_9`IjOf;K>S<0{(1RDxrsTMRf(V|
zyv65MTAZ5VlUSKr<d<KRTciNeuL#o56`WdF3X15&oLg)@nR%&+MYotU^Ga^9xV!p<
z-ePwt$}h+-Et&j?!&+Dnq=6+qE!jwuwMZCbgwJGCPI+4q5K|OHh=T|T5CL+W1t@A&
zK#|SJ$HKzM#l*(Q!N|gBz$o#LgN1{Ug-PKb8|&mMPHU$}3=9l4j0+f27#A`wU|h&h
z%T&X#fT@OQA!9954O13#3X>#5EprJ=3F`v364nLmpwL{%*vyC!nf#GcnwcYub22}d
zY&}y7b1xHAC!-`oEqe_^7S{sq8uk>H6qbd|pmb8pQNoeMS;JAol*W_-Q_rw~7i4D)
zQ!P_1XAQ#wJ`gX3A%y{y{n9{g0J)1FBwEW=!xhY+$?8|c2=XpGhF<>v|6l39rob)s
z;=<C@)T-25tZ6y<i6upzAZ2<W!i|A}Ve$zsJ#JqR4-}|Hev=uw<9R|r;UxtMFDAap
zIo$f}nvCFlGkG?*9IrV@5|n{B82Mx-ALG_g^#N&51Q8(HHC2j?K=H#-l3$dZp_i8j
zj^taDg?Y3YLnhnvNXr*Fg4BYFy<5V*`6;D2sebt-ZuzBoDXv9D`9+hXcx2hUK}sSf
z*YapE#!p_xQ*2_uz`(%4$ipba$ic|N#Kgz}rdb%-n1sZI1cf}f82K3a7<m|Z7-bj*
L7&#aP7=_pXfNn<4

delta 1136
zcmbOudr(R{k(ZZ?fq{WR_O^5KIROTS#~=<eW@BJraA06yC>ENiZCmfokiwk8(!!9!
zlFF3D)yy2l?aq+Gn!?t?kirHP;c;h3VNc;`VMyVCitwiLrEq35O<*kIOJ!KVpTf0}
zae=@>hA6>Qp%m^Eo?Zw~I8`K7c!B6bhGs@ahE(BH!Dhx7rYNx}@l^2?#uUa@rWD4M
zXb{Da!kEI_%9O?;$q*%x%CJB(RU(CNAyW)flvI>-s?-9Rg$z+LsSFEbQ>C)xpjOGI
zGAxi!l}eS#W=WMwm0idjrI0F;s*oiIvMhx$m_d_&au%c1<lF30n-?(7U}WT)Y{`6t
zQGD`0=1xYA%^fU3jEoAKZ?kqXGOA5ZWlv$0pL~j4sh$}cQpG9^3=F9ZQH&`JQB3U&
zX^bh1DNHRKQOuCgiehPJU}1=24Q9|}y~P(&l$ckXmS2>cTIB2LlgtD)14OYfFfgz)
zFff43Ev{f-U?^cIVXR?jW?I0skb#jQg|U_?g{hXggn0o=3G)Kh`WmJh=7mfp%nR5S
zGAv}OVTflhVP3#d!nuGeg*k<}hIt|LLdF_~c<vey$y35x!<@p>3u5z?FfZV*fyfGg
zjH+R-VaVbEn<1FO0CK2bCIbrtLlMYHMXC%83`M#O3=En=Mf@PP9f%N60+9*~3=Frp
zf>R4iQ}aqP6LW5{`DEs$CKladad-6zo&1i|T0#+|P7tJyB|a_LNRy>V7$o94*@jDA
zO%%iw2N4n=LV$sR!4RC@Kmo$Y$H>CS!X(8sc?Oqt{R0LDh8o5N3=0`*nQ9moFxD_F
zWUOVXVaj4kVU%P5g-Z#`0@f0i1#BR%Eo5wFEMZ0$VPdFdsb#HUsbLjis9~01sAVf*
z&*G?Is$oiDNMUGYN@1vB1Cbzh8nYxrIw;*S^)l5m)zpJ5VwPm6Wv^k#;#|O0!=A#D
z!m^Ng0rx_NS`M%=95qa72n`GictB2!VX9@S<*Z>?zzgC*tzk}Ml7x7J4<uU4Rl`-o
znZ^VTc|>>@F)}bPy!`+F|9?&XTcWIKIr)htMNXh_(gOu2b7o%2<d58X+-@KtPzV>f
zPuAjz=K<$fDNqP9aZH}dqd)mHj~uT7C|E)9$-&6OKlvMvhN>$_TNH=@#jvJgkrBvL
zj*|SM<P5#MydqHCMoo6+)n@daoX;ygc_ptbn+r&J;N&yB8jKN>zw#DqfYLq(BM+kx
jBL^c7vyixuppZ2eBOfClBM&1FqYR?}BL|}ZqYxVa95D8(

diff --git a/code/models/__pycache__/model_interface.cpython-39.pyc b/code/models/__pycache__/model_interface.cpython-39.pyc
index 0466ab737a0a0da0cfa6b04ea69f2aef82561a6d..3da7b2035d7f1297664e6c70bc48be9b1643e010 100644
GIT binary patch
literal 21335
zcmYe~<>g{vU|{HL+ncPgfPvvLh=Yt-7#J8F7#J9eOPClKQW#Pga~Pr^G-DJKn9m%=
z45nG4Sim%E6f2l!i(&)Q>`@$Gnlp+kg&~D0hdY-iiiZ(o26GN?E?*QsBSR{GmcRnR
z6qbdIU~$GAm>!`hp%jJ`)*Rtnkth+c99xcPu2_^9n9ZIeo+}Y00cLaLNajjKNrBm%
zInuc@Q8HjQSB`A1T$CJ`&7C8ks}Q9CX7l98=PE`i<|;)g<tj%hgT;7rRB~0LRCCp$
z)N<9M)WKqWIU2c|QJT40QCeU=V~%!|HrPKpxw=uhP%*t+T_y%bhFtw91E`2$lwk@(
z3V)7Ku5pwxSWF<tB-b>`G}kQ3EZ02BJl7)10xT|=qn&FRWeH{r<yhrfM_K3EMA?A(
z!a26Nc2Ra<wn&bBu0xbVu49xVm@k^+l<OSj3}%bvxa7J<x#qe>xq<oOIqtb0Q66A6
zV~%H(SBgZ6WR7>PPm~W-%s0w6g&{>M$2Zq6$`33io#USy5ETGs%j5*+21Nzs21f<w
zhD3#c#bk3rbHk#-a>Jv-!F;)#h}_7iNHAMICn`5ODjLjI$cf2~jfw@c8FS)t<D=pk
z8Qd9C6jPL17*do{y|NOTnWGZj8B&x}R9YBPR8qaNlA4*LlHD0nR8!Pi7*f<yb+b~M
znWIuu7=sx!)n9_rqn{?@EtY`9k_=78TWkfD1v&X8RU##cNja&BrMk)aIr&9NIhlDW
zh6b99w^%)M6Vp>Q8E-L%Wrk=n-V#VIN=+<Djn6MA$;{2HN-ff4yu}R_%gHY;)?~aT
zoLrP&TpXX8S5lN;P#Is6Uz7~8Nw6rjI4`xt)F3`7u{bp+GY@2{q+@YqZf<HxQD(AF
zesOVdW?p(us!w84YK|uJEh#V~I5$7PBqK8~-5F%8E5y`WV!`=oB_WAL>8T|s;{0C4
zDaZnv%(plkL!Es=rr+WRvEqG8b4oJ7M&A-~&d)8#FU~B<%+E{AaSU|^Dbr-S#S-8e
z?0$<iz$Mt(5#+(*lEjku;^h3I)M8DRTRcgbd5J}p@rk8H`N_8gazW-L=Oh*vLq%kv
z@&!ey$(hBO`FZh0smX~sIq}J*MP;eC6p=JwR{{!mZ~$vE-4YB+O({)I^$GIN3&=?<
zNlh%hCGDJFoSB#En3tEDn3I{89-df~8<bjHl2}v%3V%ND)XHM#{G6QBWRSl!nQpPW
zB$gx=r<U9j&&f|riBCyQPAtjD$<K>VNi0ds$t*6>WW2>09_$&EnwXMWq{(!P%egeS
zG$*knvn=(NAcE~!mRbZ!4IH_dIl1{MsW~qh85kHq+RHLa;)`<=b8<k53M>V33mYWT
zHJNXTIF^>=yQL<UloqABR+JPaCYR(F-Qoz#4Dkh-bBi;$JTni>1UcBfC^5y^(f1aQ
zYi4?C9+aoacuTN2BfmU8IWadrKQBHLnl#0nL4lu{mYG`QUs_U73QZT9jJJ4v5-U@S
zz}gdYlR;@888gAkV;KephE#?q#uSDqrWD2$rWS@M<`m`>mKKI6mK4@t22HkG0_aXp
zW=1lKm4ShQje&u|8C<$EFfi0G)iA^}lrUy7)i7i+q%eX^O=0e3sbPp`E@4@~x{x7@
zErmsrp_aLZA&WVSy@X={X9{Z#^K6C`wz<p;LF(CinHU*BvJ1EtGPp3ra@De=FxIfd
za~B4c@GRg>;izHA;#&wZpE;gCg&~+hlhe;FWF^Zj?!406cyM}5ExyH^n^O?QRZ^ZG
zpH!5Xmz)vBm6u<X3(9?oMMX*s3=F>*^?!NjSLP(<>6hmhWfvDDCa3C$6eZ>r`+EB5
zx}>HSxECd+mFOqur=;pb;#EI6yP!nBD77RxIVV0TwYVf6B%oJNlChHU7F#hWGnc$%
z;9y|T6u8Bi2X_ivP-?MXYRN4QNSZJ;xWxfBFFviL2ox2!n3Ky>Zm|}ClJPBma7qQ`
zyVUrU%;b_=tl)Cw77IxH7FR)HQDSatNorB?E#9Kk!qUv5)Z+N`qQsP2OnG^?*nBec
zQWJ}A@#L45#HT^hT=6ZYv}8@zTP($?Icc}JAU5YE=B5^zGcYjRV#&zQO$A$?n7fjp
z$O{xQZu%fW62k^!aAsafYEfEZa;jcI<t>i*_{_Y_lK6OgQ1ajdr5i>zCNV}nMmZ)v
zMh?a*N%X{}2h))Z3K)<E5C*Y9sScdVKuwJrh6M~Yj0+iq8CEj-X)+dBf$}0#ksHWE
zj1^!{KnQP;2zy$7QF&rf3dnE8AhQ@4t0XXNhAQ&YWCgh!6hQIux47ctbMsS5b5i5u
zZ}G&(7nUaGKxB%17#J9e{6PfBScq+47RbsXJq88_JCLVAO&<<M4sNgrMn(jA3gp4!
z6-XJeogodB4VYRukTYWnTMI)JYYKY`M+-v~TMB0iR|`WFdkS|7PYXj7M+$EWUkgJN
zX9|CcKnp_@SBhYYPzys8cZzU|NDD(0Pl{-YSPMfGZ;E(|L<>U{Uy5XkR0~5Ce~NU9
zObbJlK#FXNTnj^#V2XT-LJLEbP>NcLQi^g5W0Y`;dWuSlY71kONIL@yLzHMRgQmtU
zK5%*E35)AwkS{_00AX$h1_oXR1_lsae1e66p@gA?v4)|UX#vwh21bS&h6T)EnlTGh
zD6rHpX0fI)W-}Ezl(1#7*Dz$Uq%eVspcGbcp}~>IQNyr+6RZOy$Ck}flvKi1!&JkV
z#nKEaY`F9IQ28Z1pit^%tYL`f1({yM5YJb`T*DC0U&E5ZDala75HFCzRl^W3n8IDd
z5HD0BT*Hth(9D=6lENd&kiwhIRCJ?66d~HoSj(EiSHqgZW5bZbzd)>pbs@-X0fY*W
zz8Z#j@f3j+!4`%R36L+E8C@8fLFyz^gs_N8fz+h1WwT6REV@=Aoh6e3as^9@NQ!7L
z$gMIZ(pj=lF|l5dKjliKv*b}k<V&Qp6i`GIOQaVll}KkPLq)-+l}Kl)K>1?5OeNA;
zs!*Od$gUFUEVUHj6mhV7Bzl=kq_fn~_!`Lkbhd?zwQPAMU^h#qNP)u{;#wFF;!+q-
z9_}Wv2-r0+mMSP6_A-Lq025P(h^4cF>;d}_EK|b}uUNyDr3p&0U^}(IJlPZ(Fi#uI
zlY{bfz&v><Pq&68MM07wORt6@UOz>#h9Tadh9yNw5|k1QYZ$T&Q<SrrCNLFkNdbu%
zO<>G>1Qt~Ri{2?QUSN`<3XXHr5;Hg(q;3~jomw{21m+@@67wvJ6!jDhuzQSZ7_y8O
zSb{4so?7-4%^LQ2s|n173?<eJY(VKQO9)(r8O7U9V9YB5o1+CbXGV!#mVJtLie`#t
z3R8*>I2Ra|2rqD0$dDzHqASTz3#tH(vK&jC7C5Ky*Ko{cNYMjVoti27y^yLCo6S`v
zPFdCqTo!_=R-<^^ynkRj4ZwDuDsfF=N-@Oi@?Cjnz&ebwnI^ClsgyWnxn()07^j$k
zbu!m-)^K9+?+37dO=}py{^iVakGCzcUf@x~Sz^7wb0GsL4C0NT{x<`gJS|1DmJ6h&
z#5&6>g(t;4#UjPBm$}4xf%ig&66-9V6t)z$6suk)u)KMSHBNco6q{y7u>H0W`ypm5
z<g8(c2glC@mZA+MP7D0f8B**PF@j^*KTiZClA@bpU&D|kn9d9l1vfJs{BChLmXv_o
z2q96Ni6tfRC7|{~CIbrtgC<Ln0jRR!Dkw@VDN4-DOHGLaH5NhjM{1O4Qetv;Qhr`)
zJgAKVtC?RiF)%Qs&zcDWC+$Iv$zoWW{}yvjetMA^0|P@Q0}}(oOHfS_#Zr=-TNK3w
zBIC<4ixP8+j2IXgk{Ln8E);`I1U1TnL6wgf$asbthFHE@#uQLznQ;!&M5aQPV1|`U
zMIc?8Ot)B*^GoweZm}ol7Zs%@mw+p>l}xvo^bBq>W`cPT0#vQt;<5oXnv?T$3+yTx
z7#Kc-EUnT;tqkB*xSmZ;esW??v7H`57ddV%ih|d&v2Yfms!d>EV5kC#CxU|?(Z{&O
zQJ$Kao>7vTqRD)Vr6jR98xlk8iO|+OQ&9>?A2`}ra}$$`@->+uLaA_ZHb|F7lMyU&
zi=`|vCmz($FDOb)%dCjv0r3kGOEOdQO5$&^mVnxXQ9NJ@Dte0r)Ykxq_AL%rD!9b~
zO9D|WDVcfsRU)7!Q&N6@wmyifTb5apWM~F!FdACi;`Q`(ba(Z04KehN4>Qzcyu}Xj
zTd~_MHb{H)Wdq2~`XB<-x-BvWu}naOC8%-4SQH8h9-h>+w9I6XYx7b|s#N^KJY75;
z^+AnYpVY*nyv)3G*NVj4f}GT1D{!+jqckZ>Hm@u*B{MM|S!;ZGW=d*tYJ8GGCIc%2
z!!5SrOpvaZA`A=+FG1-K)OpNKEy_#Hi7(Eq0`+*o?c<cxm!Kr8$@KF7|NsAA{^Vj{
zD5?gzj}_b|hY*n<Lmq&fUIR-1FPIq^Gy`t27MB*J78QXKHn{P1OQbllEHyr(vLLkx
z(JF>EYC(;(B1HxUhA0kSPakjsy~PS{q2A&GHKf70GN*_g6ecX7?t2laxpIp$r#Lw^
z9?W+KNpXTZqoCgYEl#j)1&JjYx41L&;En4d7mzzaxvt0tq=N;NL2j{R<!9zWQVeTR
zVqSV`Q8vgeJfNfy4@($4;1Gt40Tkb2bqo#icfKVOPz3Ee2Z6iJ&fuOib3l;uEoP8B
zzjJ<GS}CY+?VDIqlv#00AQ>hQp9>Z71rIAAI>NU&9FvnvixQJ7Z*e*2XQbwNCl(YW
z-ePw%3<mY=Z?QqFy2S~%`xbX_L27a)D5;cG-eOJ8$<Ir@#aot`lbI3^b#Rm@gj)bj
z{7~Urki-w;i6CbMxK>Cw6lavCrRAjFVuvz{qCr8K10q1}`64w?c=D$e<>$sDm8pWz
zoD-i4>q{4@gS4oCh&&Jh8fh$Y1hJez1gO(c<O*V0fCx}-D6$5zY(WI5kzNGqp4{T_
zOi9fv$t<ZX;sjaDoKc!|iwoSF1b47+aRsLqmV#6y=G<a&cl8Oq#pax!S7wxQizO)4
zC-fGlZ(>D2etr%}AS^Sr{1!`5VqVHE;Z#V5iqFg|C@qOkEGkN@)Z_+tRf@n_wh82n
z7Lb*!*@;Ez#kW{WQgaJ#u|tEf$P*;*0V0Y)X-Ejo|CxE|D;aNb#zT6W@$rb#8k|c4
zK=r^iP&v&A>Tz)}aWP6Taxii+3o!FB@i1{PvoUe8urabRiZBW=aWL|*2(gGUiZSyr
ziZBX+As-{te-<V_MlLYT!N~HTg$arL&cVya#K#DNT#R7&hm8%y17l_$Mm9z+W*$Z^
zCLTr}MgeXfMm|=MSz`P=Od^bYEOLw>o&YNk6AvRFBM&nVqYxuVwGbl<BNHRrKMoci
zMixdEMm|P1Mvw{+3C7>qxH%Ywn5x9Vc^lpnfmc2#{YOxN1j_25q)^-i8l?b_O=L3_
ziPSJGV1)D}YZ$Vaib`r2vzSwuBpGT!9Sr6aX7ES_;{ui%rW$4u2CyuPB!p&#(rl6p
z3t4IzYZw+VflOM!n!>b@v6iKV1)?j3eIau#OCDDZBTS|aq?WOU1=N4l<nSvB0YxsT
zI))T~VIVHJ3<Pznia>RnCR-7xJc1+`Q0BPBTAYzska~-y43yG1OY@2gOH)&;QZ-p2
zDa#L(p16_{OOi9d<u9n}Ne8o{SV0M>7?Mm7wJ4}3%f!gQPz-7_Fff5)l8qTesxb<%
z2rzLmGBH+(p@lWH$BYrwAZLSv8syR%#sv%u89>2P!?=KPAwx4`EmH|o7IO`Fz^Rw1
zmbru_ixtiT1vE<ya~g9ngC?^dI6zi1-eOM4%++MO#hR9rpICB>D>*-}Bs0A<zqGgr
zlz(rrB^IUUCRW^Hbi2h~oS#;bn^*x2h+C}SY<G(-CqF&2r1%zVWPC<q$t||X_=2MR
zB(y-d#Zp|7S|APz1<)V^0}}@`D9!P)RSBc{9Hb7#+dK>m44`5I?Clev6vi0CT+39;
zT+0F;t6*9P;<MJW)v|-e#<G~RSU_D;P}jGY3Dlrpz*fUr!?A!J)FcKqrL#Cvn6jCQ
zf@(MyaDvklX9-sga}9ellM6$vOAJ#jS1or5cMVrFV=Y$=Ll#dBmn1_9Gb@86Lkf#I
z12cmSsF@EDWd(_raMy4*GuCirGeKkvV`><(cvIMFI8)fckOkanW@lum;jCe+VXt9M
z;he(~%%I8Tw~`UuZYlz&P;eOyZeYZKk}0V03R%f|i=&_@KRrGvvFH{@eqMZPL4I-u
zILy;ii;6aZ(jsSZWnOZ83TVicr3j>}Xdg)bUXUDX4tU&~uV_C=2o$45ppyF*S6XQv
zcqBJ5=N3Dt?VOQc0<Pa725bZAWywp)%q`joVlr1{7TjXa%qsy`wnY)3W(5y8JKSOg
z=Y*o&AoGub)H22w?E!J%j)>xgS`ZI*4A_QSjFq=oD?tecJ)!V|TlkrI>G7bHpax1Y
z5}-1ciHli)RfJIhR1&fYF>x@7F)FY~2v*6Ur5_aSC<zIa@Hk)tsvv_)7#2WAgBWVS
zBMJ*a`3EG%3>vP5i!qllWHF|&AjDEwQ`maJV=ka1n8lpJF2<0;n$1+Sri7)2A&V6>
zJ`w9z%UZ*b#a6>A$&kV!$xzD%=W$9h)UwyGfpv7HFw}D7@szOFus1W-ut_u2a+GkS
za4p~jCD|JG8n%Tj&5Vo;g)u4IAo~&KgU3HWW-_PnKy80e!;l3^$Sj}{t6pYCP%>pr
z;m>9)`cng%3S?+yN)bo{6~LT%EHzBEoHYzt+$n-J3=4Q_IBQrI@PaZ(iVzeE_p*Ve
zh=Lh3Mf{RM=@S$QAPj1dfr=qeYq1#AC`|`-gLrBgYZy})BpINcAVx?c1eJm-nTp~-
z2?A7b7J&pH1gO_j1nObLGBPj}fqEEKrnuq~oa*CK^HPZF_z>edQ0{=Z;}XbiSez9H
za~4^?2W18P<|n|+hvfb(AZL_;2yi(7DgvtH!EGdk>RKy>#N=d!>RKIzWJq69p}JPT
zx)#G_pxPK5FQBRh94{E%QzE*2AP0dX<RvJ}++qc1sg;bkSc^*%(^HGUl_;p3s^Sa*
z^~UuROOr!ZvJ@QxIsPz+I07P$f(i*fNG-ygoLEwH8dN&)CYKf!fqHb{C@ty)sRRv{
zLkbpfxx-pel$lp@i!Cj`C^xa>mPk&1dOUhd99#qyb%RV~ht<zq-~uU16zp7B0TQ1N
zp1mo)#R{sGif?h|CRTtesN!2}CE%&PTa3w&<}oP9Z!sq(C*R@(jq!ukL!uCzIzZ|5
zmJr-<u=_ztG89xqfttt+93qS&OfsC{<{7w2$i~FOD8?wp!Urz4gcw<vsuT$(ag<3Q
z&}c6wtjq!ha1E%kW3FY1VX9>Xl_?7tYM8+lAbTxGEoTios2xztUc&*VIchk;G$*(L
z#a_d*kg1ldgt>;Lh7(@dao6&cuz)JCT5eF;Rl_aGkirP6)>5F=8C(=x1(vYX@H8{l
zaKkG*A5giL!cxPP!U~2=;L;IP*|FDffXfw@g$y;UHEcCpDeQCDgBd{0GH?<HC7~j4
zpmTt_`k=01NYO!1s$oZ~L5je+sU95L&`uG!7SUt}*K4=fQc_D2lQW8%L8<8!hybNE
zNRk58RV#VHNu%ft$OKMsg%zKfQh`)yag-Nl#)EsdFF@K+D=@HKMdv^g=RpKGIe-ap
zn&1cb))GPUqTo8K4wN7ubrv7903!zovhwf=F@ozXW(lS$IkZH8W&ld9rNF=dn)ZTB
z11f;W{IgJMEzpR5FH<cOxHe-2)e^`x*8;{AHWV=yusU`WG1e3g1iuzE1H#k-trKh6
zOBfa~rEnqS7cznxRawk63=2RFaaK_5f+imqGNo{XAx|$0BdG3SOX1CCEIL#JT7Ckm
zd-&jW4|6SN340A^4J$}rGbl}R)UbkPBAOXf__CQNFcyItUR){sH4F>5Vd~)KbEgP^
z>ZUv~P<@xepCSmh6=XM99Nc7MsbNhKN)ZOtf|(4p-02LpJn0Ozyy*<Jd@0Pe{3X1g
zy0wP6hP9c+g&{U6hNV`ZR<Ks6R=7q0+%XZT5vbu_z_*a0jBx^E(To~FkZ6ri8dExB
z3Ui9c9HtuX8UY)I8omYmHNv1}F?muT5lMy=QAvgc{0Q|Rxe1JgK@ihi7-E;y3ZygC
ziXc?GFvN<)Fx86IGS`SK;0NVqMurKDg_l6#UL&%Qu~r0}!+60X<~lVZSpq41DJ;F9
zkYZXOxR3!9hP7ffVhe;8GNdqPGf!YD(y0-t5tC%76)EAU5y=t;hbqeirXsHrhAfd9
z5on4Lso`C~53+%gp$6t2F&mKG`_dU|#nTyTB}#Z9?ylhjmltj^Otq4=QZ-U3%wX44
z)o|BHN;0H??2;~FSRha%y^yg+3M{v!MtlK(jU>qbd0aITF!ltd!YZ)Oco#C(@<M&a
zIDx6?MGY@#9JQ4x1(Hs9!K(NcGS>2yaMbX@eZgGBRl^7NGhYoumS_!c4IeC~LF$lF
z5pz)hIE9GUfKm}JL>G=!B#tE&fl{~_$b7gSaLO(L&F8Rz+jv2>GBpfY5;ZcC3@H+l
z47IXwo}?s0tz3;PIGmV^cBL@X%IEQvNY=<TGuFsTGt|nLNTo<Ekgk!bk*|@fk%hGK
zCNLLvrAUKv8o~~6I}bGLC6mGp9>WLOD4W734w_GqEs<LwU&D|kn<68^019dFSh_5E
z?KN<j2QJlcwTM>3S}axOxC=0Ny@;);25MO_6oHyL7)=*~jkcnT@Uj<|1J}SDNQUW`
z37Wo^0@JS$G<`k!rmtiy0@aO>whFGW*+7x`;93!H;@C)$`QUCVUh_A@%nw<~T!h-n
zM6^hw*g-9c#M0!Vn+yyL&q3W45WdA+mY7p?oq>U2CDSc7$eOELywH{eWDQo)H&6_H
z2Mq&)>hpM*B*-jqYX#I$sZuUW%uyJaM!5+Sw3k!_njX?r05`0QZh=}fpmuq3_AL%j
zLm9NLrD!I|FZV$NxM>aQq}^i9NzJ>(R+^VtSelAxJr~^rsRA{bZn0#iRu<o4ODrfz
z%}XiT0Wz39FF&s;wJ84<YhGz?YEIF5kRVHLYGU3kUXW!)`N@#=XGOC?#(*31pwaju
zP&>Zp4oK`Sh?oN+K%sbx4K#3@p9@+slbWKb0B)8=34kIE)Z2$O)*;P5@HE9kkk&^a
zd%25qGE-8)jkMxh9MJeFhP2I|g5r%cDKihcT22%+kCu~Kk_wMUP`KRUf$E9}%_J0o
zBKsB}ObFT;5Qg!=u?$HYw-^g<F&5uqDk!?eSaORMoPKVxf}L|q2(r|#EHxg&$j>VV
z%^QFx{9u+B-{MP4%qfODrnm^4oj{G+TkHi8JBmOndlCJRq9-7KgEIt}05>#6klKmx
zVs<I0wK)Zp%Go3sK^+hg(1<aU2%i`u+kZ9|E=EwdgO5dsQH5EIQ-B#X;tR%1Qj8qT
ze2fw-JWO&dJWMQ%B8+@Yps{2L@cJPE5RXv+G(yZI#>mCU!z94S!vs+wz{tf0YS#*|
z3NflMi81kkY=VwVb1{NCJrdw?ZXU)eWg;pHj1CWckY*BWv=4WO2Q<bE>)0@1bZnU6
z9UB&`9UF)!q+`R3*0C|EfpuV*7c$gvfx0j$EZ`0ds~;qy;2juE>>U<xp$0CQLGi80
z3hu06?WDZI65E{M`5jQUuw`OkC>8)kF%ut)03#1Gboh(2N&+o-;aX6-5c0@f2%RC;
zg@BCQ!khSfpr(HfLly^kVCxRNbAcGx0(CAxT?;PIfOv`kxH|#rDzJfvy0CR8z-EHG
z6KtTxT^M%p)PTAZ!YLxq?gUplQkNoyxt0%0cY+_%p{U^pbsTE>Yxrxp;N6KOkWNO8
zAhbInI)|x-tA^i(0o))L0yX6Fq(CB&ru71TgnE$N1jfQRh-u*N1b;e1tuR70v^ybE
z%UmOj)Sb8oZcYm?WUK`(h605iXjzU$jc}GgiU7DfA&`yePKefsB6TM$YJ_V<B^hdk
zOE_zU!3}YNY*2S1tb`$py+#<?oe-|!fpvyzVD1qIcPCDzGt`QKx)de65O>$`!n+d^
zwURZG;AS#YQCkgHjf5maint_0trWOBA+?aPMiSheI8Y<DfWJlp<o`S_aMuFNp1@St
z2KE`xLdIGiSh|?NRP?2W2il#0qzoRgDp;pOq=u)47u1OWE#d>kY6)izFKA6{Gb1>~
zFc*1%yCxFgt^-spyc@usf}CO`QiL#445;fM3bF&P2i$e2VaO5#iK5sH>P~P#yA#sj
z4urHMLkg$^0cz??)PQ(W;Le2%IGmV^j-@cv%7MERvdxS&GSUpSawSqJ(hH<(q(PmD
z8X0(ZVp@s}ygLE51EV{k0q#Bsh@*BVWRbcPa%6NTir&EsG+gb`2v~cj$`D&22Cv@+
zpc@eha~e&$5z!QwPPiKpLxJh<2}W8h1*U%>XnGt4rhg=8dOXbZkfKlE<|4SV1dTd^
zI}1pi2oA6esO1k@g#}tPP+S7;JrsQf*@EaP2!IvBT8tnKphhluOtVS_BsV~Pg}X4X
zV)PY?et>-N6GY4c5#UY&Xo$3k0o1<&cNczvBz}Vk(D*u{hfwqfB=#3XfO-c-|3Iw&
zAOh4cC}IQ|0O|+a;sNyy3W}gz1H!$6BGBx35i=tLgQh&BN5BUewT~}KEiTO|DaO_x
zC}IKG#|k3A?JQ6ipok5`Wd{+U#(fb7h{XvaxIhFqh~NPcydZ)PMDT+M0T3YwB7{H$
zsI^uEYMmDegSbSr%=ti(4VjybPt8jS2Q}D0jaLRn2}Thn5m4Kjk&jV?3EU=Ul41h4
z%9+7!a#o7l<bqtRJfH~%77>t%u=Y5M2zra09kfoQN)~Tvi&El&7Dj?*l)y9Zpl%3c
z-$^!0krZgzc@}t|NHbF{V;&zWzl5cR5wg_0ml-r2kitHP8MNvnow0@~oiU9GG6?{h
zN4mwFo0*pmS*E$u9<)mew36%=TN-#PPZ6jGUvw2TCBS@(vp6{;6|`TV=oWi!eqLrt
ze$g$?G|)zk)V$<MO;*VCHE7375opOz5qJfVBv>yvQ9(pyKrssP3uLqe(ys!|E@h^d
z79njxD}DirL(u#e12Y$s05c!67z;*k2Ae*VmN#f+47@i3nyjy31nt}bt$`_FEMZ!}
zT*3re$+D1fA!9QmrU(-Qc$EWl4T}gv4U+^zEo%vD7F!K?rZojLq|Q*o+6tZ8O=Fg1
zNQW)_KvDrw2O>c#7O>YaF5pN3ExiRz1A*8q3mF%()UwsEE#ORHNnu^cw16{(F$FYL
z#8$%w+Nb0PUMd0}wS$b`fg-L5w2DTP3p^wMnb~8?E4amyoSK<)i>0`*s03+zg1sme
zx}*lQUhx(ucx7)vesQWMJ2+-;v6ZCe73UY-V)Te&iU;BFD6Yhml=z&~y!4U`&@xO=
zF#w4%aNL2T4Yc_Wv{DDMQW!K__zx6k;Jq+RT#Q<bJd7HQMvNj%RSIZn9_k*1MwDa^
z8i@oYBXF_@r6N$W&tlAC0#9*+_ESidfc6nErZCkomN69Bq%bdF0R<gs&IGa$3A8@}
zw3;A~rG~MVsY<GZt%Q96M+#dF<7|exObZ!nn3|btnZp_KL>L)Bu!J*(y_vCzF_IyV
z$pEyEg9|hdP{R<<2p-Yp@GAo4!=kgGaS+T!$sh-VXMI5fJ>Xei@N7J2aS&uS9vpI-
zjJLRw6ALmyyYs42i*T*?6=H^tGgaxLg&JaUC$=_O6+0;B<`nBy>3imZ2P#0JUYe7t
z;Fg(_s^FKHo2rmsqyUq125qkciz;ez-(t@IFEcHMWLj{S8SLQ>kcU}|3vx0`Zn3AQ
zmL!&x6y4&?%q_?-D#^)Ay2T4&f|i_?=A_=@3QNo>O?53Q$}hUbo0(T!l9-pAnV17w
zB+CL$Y2b0vD6Zth+|(TKNYX7<uqVL*1R+34tEdsQ=7^brp%^s##=ywM!okSK#K9=T
z#KOqL#Ky?+pNWy_KL-;VXkndDl_b~<#6melK%t~7P;m-RSD@v$DWE+p44_3z;KGs_
zl!m~25}4uJ`Pd}E>$~9F`Pe}zD1`&OCxJPI6S^mX8Ps(FZ|7r5VXk3{=PnE=;mG0v
zt>^%4_KOG2lrS&g2kpfHb)dj$ip#I45L9;;F)%Q=g%lxHHyeRO;Y*w~1&hGTMlC?1
z;02zbm1jkuMIex+o}i_<MX2jNi_n&d&I9QMEfc-PRFV&xtS!q-PSs?F#0Yr#C3qnw
zIA?<iP#l0FFta2cwyawp6dT|%5Joom@<t9uK1PfMK;Uh0Fl`v6F(|Kqj4w`N0BxTF
ztx(KjS_oR1$e6;^3R)Zk+C9mV#kzoPAww-oCPOW24YLbFtQ@F>tYIl*DAFncZ>7p&
ztzm0s0*wmUFcfOlurA=JVFCqi4PzQZCPNMD62?Bz?r7$!QwpBQDN!L&0i013N{ch|
z(iKWFQXy+PiVO1dQa~c$oB-a2$fbY;K#O%1K>G~!pm{|BG^7dIV_a0Jrx2N6s*s$R
zr;wOaoUZ_0m#2`JSE&G6^on9iNxnjUS!z*vQD#Xh7Q4$p`9~o$55*!qxC!8jz)zF&
z7JFu1aY1Tw$t^BWrYuSaalmV0Z?S-^EYbnhvY?_9(q`0S1g&E)El5c$ftC41#vn1S
z{F02+qIi%V=z?Gfn=LaBG#VYn3X(Cr#hh4_4#}PH<WXeA$iQF=PZX?TjG)!5pt*Jt
zMlNP7i2|uGK-p6V%8{V523*>MGD8gusNe*xn^*v@PE(jci%e@kbKT4f86+4$i%%DV
zNH9;DA%#VR0lXxY6|~C^v^*8G@fNfM6}$qB3p4<f#l3*1hG{kfWIOI$<{Hr2Fy4jC
z&=po}&?**Ov4YlGy#zO_K{-*A?-p}%K`D3yzM!&56%??NprB(0t;%FeEG_{p(pLb9
zu$E@#l~@#kQaGq#l%ES;5Pge1CqF$Mw5gRVwW1)kC^I)TujCfEdQAhjd5Ug>+DL4W
z+z470S9AlEUqOZNEtZnZoYY&)nYrk}e~S~;#s_bf_60=(xS<4Ek_TF1%!n~A30I5~
z^q|%eDEPrazXUYa3*LKL!?cj8mZgNT2E6nQyr7k30SjcJy9Bi4jlG!>MyD|LGS{+}
zaAa}Tu!0Lw7SQHKP;pqxR>Pdamd#XD243dIF3A8Y5W%~lIp#9efXv|oML?cX4O<qd
zXyi@-sS%jXkirR8#Wj}&v;`Dg#Ik`V+M$uf>j&D}lbM&2S^?VHlT(_TR~&MSF(rxv
zw6MRVC=qnb0Bd4iUVh0f=Cs_BTP(@Bi3LTVh2r4mQ<b1Te4~K^WM4rQi+*x$d=-mc
zL0<aH35*O3;B~nLnVLdH6F_ls2UHC*6{Ota1Z}!^18q|-asZ_SM-b5oQUdC^-C{1z
zE55~^k(yYNn^+J9%A4`fICcXmbO#Y0AOaNSx7b13F48j7!6_s@C9~)jb8=1^v`tn7
zpLhW$2ar$m(sC0^qF7VX;*)cuSkf}np$p;>c@i|46b4ETkb;_rk%Li%QH+s;QGrQ`
z38VE4N)(_LG<eGebcaQe79#^gGAKWShPy!+)STmlt$@m5WB|2}7-|?ni6n*@v<3>Y
z>=(2dzLznE3A_r5A&WVMIh&<u8mJ>u!vq>`1J#2m3@I$VEVW#q)>sW|Gb4D=&83#R
zh7~k9uz+JBcsFb<PYoLipSOlHiwiVtm%>`Z4cbg!!v&I`%`lfGg&j0_Sm>8$Q^E~$
z6UZjedKOT1fG`biGQuQ|8ZJ;v5Td_^a{&)%*Ks<e0^wZ1i^Ahe;RaVMoD29<c%UqS
z6kZ4`OE85mg};}Pk)eh;MIf7L0%MUIXh$AsH*S{j0ugXh76g%VnZcbjQAnaMVax&z
ziVK0&iO*(85dx`65uOX#C(8s)>|%Z^nO=hOXc1^R<Vwa{>}i?lpjJG1xI~kw2(-hi
z2(+i`78_*7tcpWN0leSFwg@y@RmBG7fQmJ6{ZS?C2AT|0fXtVbr79F9mZVk*g%p*h
zqKR<^`8z9s&$sx+os(alTBJ~vnVwMs${acQ$>1DS#i}3V?;Kynst=lr{KcY|Rghj3
z1WMw;py33GqCQa8VJ^rixy4ajnglwQq!`js6b0oP@bCvX|9}TQ;Cbp63rJ-Vs6Bm)
zqd2uBz9J_x7s9FpvA7^&ptaSv!0bx!(&k&7An}sSlAP3AY&ogvpovs0Ij<-Z<iIG9
z1Nk7uE_kXFG|LIkY`LIfI*KE$pa?u!8O2coW!>T`$pH0V@^ey(Z!v?iFH>Sg6gT+X
z5a?85JlM}^1x2@*OA3n6D^AcjO%f=RYJsvBlMsslBNw9>qX3ftqX44{GanNVBM+Mx
zGiVhfXp;gLBM&ndqY4v7l?uwfpg;#l5Jr{-mF?hieG?<7odsF42F{@1Ws6x%;0(H{
zgc+1=K>I^dSbCXbm}|ky7)w}6SV0S-!5OrumaB%nhD(wG)FK0|;;-ed;XvW@)Uai7
z)bP}Br?5h^6&om9max_^)i8n9<x4U&GlF={jM5CXydc$}75V8*HM}*v=}e%^2Gz$6
zTDn$flb2S)336966Wm2$vr;&+nb6E)M|KsQ%~Qj+0PLcLpiRSUSsZBg@uYxls$pBe
z1(5+~X0`>~C_El$CTCl~o5BZG!I#1hVP)}yw!}j-z7RO$_kbGs;95*@0W>><GBvch
zAq>g*HLO{n1!V$Yb)vHwQXoZy=v<Z*u@rGoZ3pfiNca_j*Ja0pk}9}easZTJ5<y&0
zz6Muiko?S~s~g4X1U-KzUX$q-C+H}U;*!doRPZQT6bDSQXe!8zX$%YuRU839kgQ#l
z1TqrTQG#S>7X5&r_$n6tf};4MRFD)RH;WaefdtY)1gO3%5(gQFCHod-fRupNKo@0#
zSXm$<8$^IAkfK}=D-T4FkvWU<K^hA|1Skw{al((=0gY3Dj^rsS07-%}VG(Gs@fK52
zDrk=o<a{FZ+z8qd(*w$lpn{ZvOMr=sNsJYg69pK>7$q20SOgd)7(oMfJWNuc&0yf|
zL-0H)jnVT=kB1GsYBGW=Qy3*y1lq4w6bK5kAP|8#Wdpo7<N`?S8HjibB1Axb5Cstu
zAVMBQXo3h(*SN?G!~(Uzi@ZTBP`wWs{{?yH7B6(iTykno4s6#PxRJ01WHPAt1>S1v
z3)(*iDug(gdFmOAIYc@5IrKR+In+7o86-KRIO-XMIK()(In+4#I0QI2Ie56V!1{0>
z^#NLi4LfrO+fg6jV>Uo%@30mv1MThx^<qJVAP9rlAj}MM6{z4ZVQ6Lm^#T|{tqq2F
zCQ!C00=Zd}xu_Q85pV#4hQ2i+$D=^T!oYq6`xfj`kfMX2_ysk~85p@3t9U^>7_o1r
z1KAC71GwM>wV^=6g*6OWjD4U@bD&{U#;RC&Z!8Ehs#Xl{$3gpi$;tUe;9g`&K3Et$
z?xp~0=%s+Vb)eoRXj_{CXt%6FaY<2WGU)6iL=R7ssb~YpZ(wghd+4C8J-3+iOCcNB
zAqg1lH_*nmqj0~m@Gw^Kfc*yDyr#*9JIPgnk_|YNzyvtZz-EHt<p9h~(56XnOlhXw
zVlJ*ME&{iAL7knV3XlQjAVMERfMyVGu@xldr6d*?fmY;4u@|Q%Cgm696@`QPM(hQk
z)2T9(zylYzxN;LqKoxvWW>OI-%)#XfXnR8?sJByEoO+8nxy<MmbAYE0xH}akR!|Av
zQH-=(Hoh#g1UxDi#qC?^4?DT62-H@JV)Ly89r^{?1I=Apl9^Mi2PTU^gTYavV76Xz
zX>mz@E@+Mee1sWzZ)_B2NoH=Y9(Yy?)TaXPoh|}xfhz)S1&HF!$xKQtO$IASEiM8z
zj*Iv~5e7;vQG!re_}1#;B0-R(5QqS6{<_5h_J&?w9(WsAlrW5|hu9BRbR495Cx`&;
z=Zq2utATErMOG&annhE?A*%;&4T82ggBQjZfpSz4xXlGFmcb=AXpMZ71lRyQu#@$2
zisIp;{6(O#`ywfjB|N$Ld5M{Npw2>OdXY3phzlYFO2<W@qqd4<L877vQ9aO#{-RXS
z;bTQ|AUR%$9MmI4@*od_hWDa`Arg71CE&xV^zu?mioqM)ZwbPpJqOh81a0BJ#Z-`U
zOQ;NT0vmW78#L~mT2!PAYIAXe{Q{9K0u@G4LP#Qd;BlYgB2|!59wg1h;QiQ;z1A8a
zrwYLA1)V+xI+?6U6XaS@Ss5jRE(b|jdf>izkv2%X3JxV;`{N-C-HMBJKuWoj6AMa8
zbM-(2t3|pX5niyWVm-*gT1B97zgxmSF#m)4pT6Kzo4}_o-QtJKKt)00(6`v298hM4
zlo#M!4kkdk{1%5zF4zz|P_LjEw6z>Ga)XS)RUT;P1gP?3hVVc;nL%@D;N8@WLW~@Y
lJmAVvfC)7J1;*?kF%afq6alSRV+85pVG`mOs%JpJN&waSqXhr}

delta 9998
zcmcb<jIpzukuQ;#my3acfnnmG&g8?@6ZvEqT_$Qri=;55Fy;tE38XNju;d8l3PlMq
zGE7|LTQ8d{A0?lw5T%f-7^TR_kiw9{lcSWY9HpGA5~Y%>8l?&r<IPdaRgY57)risn
z^BHqAqcp)rYUOH2X+y<ya<!Ql7#VVPqx7I6`ce8R3@Lm$2Dye&hF~%N9L-##C?hai
zAjh~q*CfgW%n{5n%{7ZM1G9y4%yTWGEOITQEWv!?9IIUGC~GiVB*!M#Hp({FF3Jwf
z7tOKHb%=5Rvl(+7qnuL2Qp9tdb6uibpkl63t|<&D5;?B9Zc%PvG07bFT#qOZMuwDn
zh7_qB&s?u4uUzja?_8fKAFu-H9N%2OD8F3)D1R_tCMO^_Fe(ttmdy#u4UP&1v*mI^
zazmp+!EDByu-x#da7G4qh7|b}g%*Ysg;b}kh-T)fNOy)5#T2C$h7_e#r>v-E=BQ|Q
zh7{!#l@<n#6qQu%te9rzsMr+7U<OUqm!P=w(`3BG5|CJup~-lQt)Q|XC%<I#B_>fu
zS;kwe`2{7Jxtff(1cOpjN|RH4g8cIWauQ2Y6H7O@vgk4~%5L7w#>yya#K6FCizOpJ
zH}w{4L1Iy2?n;It*U4wuLm90nD|5uFn=&vkXfhQ!f|M~<6uB}mFhB@5kO+HPeo=X1
zQHtB-l^i~X?jT7o5P?tzW`WceX)-V{STQg#6tgfeFmNz(a2IJ#7W9>6oHAM8SAKFb
zX8~6`0}Dfxa4>_W;^dE<T8sgcWw=7>(@MB%7_(Sv7_zui*n6337~*;II7)b%89=NO
zz7&oW&R)hEhIsxO#w^wvhIoM*<{E~0!5WqnE=iCYp%UR5hAg3G#w?K(Zb^m|o@}O~
zQzfDZ(PqY4))d|v))a0Vh7`U9Vl}J_LFPgA)x$K_FvN?e@TUm0FqBBtFl4bbGrBM|
zgA_`FcqwezEE5=u@=Bz#q``c_6d|x<rAwq1$dpKB$$~|NL82v6S#n^WNH1fFRF*uL
z4OU+wm8Fm(2<D6SGL=YWDW=pTbCnR1>1+!bYuWO`;1+_7u3?CWTbRO@0u}>Vm%^3;
zwJe1#1<9%uwiK8}f)I8(E6CgusVrr%GeqjafvZv?m8A+10P|H#q_WhI`06!mSsI`W
z1(wwW^CVKl!8|Q6PZG-02J@t#Je?Yr6lqC@EZrK0c)b*v8isiN8kQ7UNl?TYq_EU5
z#0MMJFk~5|$YnE4U@A&U0ZAB5V9a{~7L^B!_LLYeFiBAW$C_!08JrDLcL1zT5v=Y?
ziFuYqic*U5WPV=JdX*aXc&iCag&Rt&7uckLJZHF&sfHoTDBgAgV_pSVpISE41m+^Y
z61yyWuxC|LR8p8y)O$e=Gb#~Y;INP(OC&`@lA)FZq%+I0#A$(Z3SSM!Y=#ugxy%b0
zQ&dv4dYKp*K(Y%g7c#go#PZa#r>NAh>%?14U@p`FTdrNhkOj9q?;qG|9kA66B`zsU
zDY{4wW-ctqI|Ejs2Uc;b#3{=)%Q;0q#Q>zTm${a+h7;8)uuaRrHh}^YWD{qWTfA+F
z^#b=A&Jybd9t#;jUXM4*D*~Hrl+84OrN}2mrIrh%ro=kSGle@Y#W=+z#k7~X#Cn0(
zLWUCSEbkPy6t)zzUM8>t;}mn8@;)gR&5U3NSVFuCF=HWT4MV(93PUi1rq$$^eDaLi
zlR5cKoi&+?v_XXnTS{tKW^!u!teGIN(_WMD7H4s8ett<tW?uR&=A8WWm&^<d3{fm4
z$+<;QTp%*OEVC#vr$~2lqpZl}3H-L2pyFpG(=8@FgIkQ5kb(;&R0JxKZgJV<WEPhs
z=jRsK6;1xeZ)$jpH8(N2C?Dnk_QbN(qQvynTO8%7nduoNsVOTN!A{d;DvAdcbJ~-i
z3ry3`WME=oh+->BElEz!DbfN-$btwx5TOqu3_*kms3c`93YffHaE4Veh~s;UwYan(
zwW!DsBo8V*Z;2EqmZipLR2HNbflK4mlGLK&B1j>fmzbMcBr|!YkYYVQNJ9|F^PGvL
z$+_`4`NhRWd>}z?P{^@>`A#4vC%7KStV%7q#R)dMAh9H)hzX?B93;;I3W8fKS^1fH
zw^&LNi?eUB7A5ATrxvAxWO+*Ri;^>PQ%j06lZ$V$I)(=MJKqxUojg(4f3lCT^W+D@
zqKvmDe-jQ84F{Q+1|sZ11gP35Qk)ziqT#9p;>&}G3=m-rB5Xi}Er_rK5e^{27({>q
zsmKh(vH%g5AOcirOg=B-ZlTEyu7`@sL86r)q6*|l*6hTh^x|7AC8@aux7eYMEph?L
zJA;Vq$(f?6_3&sf0tK`Ws3N)sszVw17&#a@n79}v7&(}^7)6*kn0Qz?SlF1j7)2Nb
zm^c`DScF)_n0Xk5n0OeO{<ARgFmixtHb$2JEKEq`cMe`5W-hQC7b6>(MvyKnKiN11
z7zLOF82KQOnTL^$k&B6qiHDJkk%y6oQGk($k&l&+k&97`pNC0|k&i`=5yTT=<zeDs
z<YVL!o9r#7#Z)CeNkCjW85D&e_k%Df8-Os#yTx9U*Ne&2o7FHbU`}CL$XLr*!?1v<
zhH)X|0v0fzDUYiL&aPotz*xhy5Y(h#_ABxS1s5YIX|fi9QV%4gL2-GDy|}P6HMJ`B
z7H4T5l&Q%Ki4IRtI%G{tO)M#f=P)G(a26{9wFa0N85oK|iIaheixCtuT#Q0YTuhUn
zi<dCUPL7dKV6>cED<Q?`IC+MIk|H>szyvstvJ{u37Vv{i0Jm0|IG85imXN6j`4;4J
z5awZEVBln6U;z7m4g&*24Py**EpshPEo%+K0)`sqg&;m#Ejy@`UcgwxR>QK8sg|>Z
zDT_IaC5yEN)V5$tVeDn9<tkyXVQFTp<*s4K;-~?&7MNHWBpFhe%^8>(Y(S+rM3f~C
zBwWH?!_mxG!=23pm*z}it>FY0i)<;(DQqe1z3hw(HJml<H5@f8DI9ZHf*CY9{fZJn
z;SA0(;DRCo6o8yL#mTAQj2xmlnNdo%{uWnKVo7pFd~qfy)Qc<glH*e{i%T?Fioi*K
z9mtHe;Pju9n3S5M$yKx-Bm|ZO=NnM|y2X`NnwMOXnV*-KbBjGcFEu_RzXVdAFlXkK
zfOC0K2q?yRz>#^26>RA(&ZK&<<(VlJMXN!^ZUx!I7+<so#DUulc1jd4)ZBQmLvArf
z!jl0c72M)2DN4-D%gjrU2gRl=C^m&aF~`KkCBV$XB*M(WC&nnpEFn-O<C~w7n&X*Q
zk~&#OT7e%FtsJnZ{l_plT3XVWDTO(OsfD2i-2Pg~Sj$|(uz)Fr1w{-LrOYX;Vhky4
z*-S-CCa;$kmu8n_sAa2R$YQHum1IcakYuQ3uVDkrH%)#hEh@^nfU}0RhNFhPhHW8B
zGb1BIVMq$sWNsNrCvK=YH)<HNxN4YdSW<XWczc-{K?#C8g|CKT0Z$E64a)*vP(7N$
z4}}7~Y@qgjFoUL`-(&@2Bcqj!Mc{w~CGM3>MNy!T1mzvH&?<^yU|<MmWMC+Yo%~8B
zUjr1!5GDITIzYN1Tu}Z1D~^CDZj`lf1Vs}>7MJ2Euwsx&c_0Fm+l#<S5tIt6<R>36
z4XF<U$$%}2gtNe0xRq%zQ(l6y<t<il1g~Vg#adjFn4Vh283KxM{lwDbkd-V&8$gC^
z1QDA+#1>F;<0}HC(Ob;Pi6vl7$=SDflS_+=Qu9jU#Zn9MlQW9yL5e|DA0(A7oBTme
zQwWq+iyA>Pi$KP37CBDVm$#6D``{LHVsbJh1RyS++#@feC<jh$B8(zTBFxa@myv@J
zRP>55im~u93QRsPZ(0veX*EcNNDOl=Yb{$XJ5o`?QOjA&1ui)`z(omn2@|;3;HhEA
zVy@wlWJqBIl@BS1aseXB3=%D2s^My8tl@!|8XPs8HC#0;HLUeD>^0meEOXd{88lh_
zAdvzuA2iv~vmQ9A%Rt$Zy&$n9Gc~USoEtS+kjjA_pnw9^<eI$T>|L}IB(MuaAaXTF
zd2wcZUSh6A>Md???gw#Fi;FIUlwry5MSDOJ;LrsVZj%=%ib&>zay6us7ho1(<zePv
z7GsoSsgj#~P)AAul<!fhEOAgL5SFvQFijRzGSp^DVMEQuB@9_iDeMTb6y_9`UZw@i
z3mGP_&@-)PN#U;HNa2A(UQm9kVE}ak7qHc^EMQ;AP{Rr$7cxOr@b$7VGL&%CurA<S
z$k5D~!k^7t^aI?m7pP%a0Ls~@>Or}jjgcWw45B7Q5NZxoydKmi1w~wnP>L|9ILKtE
z<xFR&<w|F$<xXd)<tgC-6(%*zHLT4nE)20AF)X#bwS2YwwE{J~HJqU0qKt6@V^Kj3
zUk!g6Q#xY}XAQ3nLk-UYz8ZmrjEoF<LLd=Ih6Q{O6(F$*jD<E3y)N|(v2L}z=?t}k
z5M?e5u_7@{wL-PbHG&KHK-C^2!vw~{c_j>4{Gc+jMsOh`Bm`>(Yq%Hif%sq*ePCJc
zg^ab_C7|{I&qBspo)V549z-}zU@B4phk^(=q_{!hT3^EhqTs;`4Hjr{iJ%7;G-?ED
zxNCUedLU5(>f;K5I@Ga2wV>X*aE-7eLyD**L#;>+Ll%T5Cdp7MS|gIe4DQSIr7+ay
zh~@E=h}4KSGuDVmGt`Qeh^B}y5UUZc5vvic5rLH96POC)QY1hrs03~axYrJ{MjSL4
zFoCJ)Lk&ZgL<+w+sB<Y%BDp{c%$F2l00lU>dm;rc!3|b2LNXyZ+2bl;dtpUmmHFg@
zI?9uGt4T2~ncOIAs#&xTo^fz#?gMLvs6IeYbw85og9KGifT_0702LdMLK2rlCL$Dr
z%K^Mzo&-~DnybkOE`{(LJ_W2ERCIBaCFVdHJw?YD7#LPE7opY@MW6yOiXEgXu{8NP
zs2B&~Tg+vNIYmb%7ifyIL5lv#-I}pP)LWlnHWw9wst;*!EmU+I6m};-1h~Ki)iJj?
za`MyTQ!<lFiuyqkpxWjZb53gBEw<9U%)-)CNF{Uz<O$}g%z|4i*{S}O#kbfJ3kp*6
zQf{&2rY7bUO#o>{ug5@Qr$EGM5HT4<fc$Wattc@sB|rBTOL1yyil%sx<K%~0V*20$
z_$)}#IS_FkWFdEPPG(9fq?o?N0WJND!2v(nOj|Yn7B5sRzBoC*DD@T}j9Zdel%860
zi?QGqWAQDff}&fDpxPI#h81jP(FKqtAm<c;+5<%wL0oX$g9%Xme@kTY1s##_c2Ier
z1d0qc2}Th{5k?tMw}wfCO^ij1k?lVlivXhvvlyoUGYcaaGD(5t8F`q*Sa=v&7zJ4Q
zSVUO(n7G)uScMohn8cX)K)SdluhwyM1-0neVU+_Yt-@OL@XDBF0mDLuS{8663~Aqk
zD`jX!3~IkKPcBptt!D<e;*l#}L~|Wp@z$`Xu*_k@TI;ewGCa8525yMKYh7^lsL4{a
z3=~4(mNz)<fGb-_Te@f`Ln;G95o&E)bOogTDu@8*D{%B8j7P6pL9xwQl3H8>ZaMz}
zg*mj2W#nKIVCG-}wVNl)>8bNeAXlhH%%BQY9ZQ9}0Hs1*$Ox@W<v^7gC^%VbSQfB>
zYfd(BYnCeo1i3*qXAMIZdkUytU_z*9W&+ED)PU>X63!ad1zh0Tl`osQ=p9%Oe+|O|
zZcsG{(zg&z2e`UmPpRim5kNJnh9QdwT)TqCE14M?G8t+)(vfP|6y{o<5?(wtEXM-A
zg$&>tHVaaJrZd)XfNNOp1^n<DmZOGOl3@XVJwyqp#Gk-eXjTK(?7|S+1g&Ag%1~-p
zes~Q#16+R!E@Z3))iDAo!r4p{7>mBt2xbYU@TIWyGN$llGc6EW$N=(xt#FO-0^x-W
zDa_f-6BvvB)d+$ryV`oeJgyRk1tOqIxmK`-YXLuq1+JbuN<=|TMn(p(HCziBYe6a*
zYakUiXAL(Zz$Y*jC4qw+R84c&aDgnW;Rca7f?Wg_>h&pHDc~3c)$%nA3&d);YPjK=
z!0}%qUc-<j0IjA$6|+Q*2(+3O1=W~f9;liYs}Y4((`Ql`YQ^(-N+fH<ni*?Er5S3)
z!L_thjYy3+sGf$`(r$C0)im4^a5W7YYL`yo0uOcS)i7kqr0{{OX_*q)1)xE6nG}p_
z+F0W{D0hL&D_pf3KNGln^}xu6_^Md|q6b`&6Rw&CkyI0|nuU;5-yrCEVYur0qMHO&
zi-1)#FffD^-3AXdfQu9G2qCD-h19SdV8O)F<f1#EB8LUUEjj`+I}oHt0K|`n)gmBe
zkSb#GHq$sFYTZJZ6Cky&Jl0zG9>}(dAOcj^f(IpvKs~ae`yjCgAOhU)168?24?)~V
zAmTBidR+l>I(uGzUR7#Q{w>zL(%jUXq9-6l;Oce~hy_*+Dx@I=H)<7J^c1Az8K`9-
z0q!qF@#UA66qJ_47o`@L=9CmeY6H~TSiT6<EGc>c(h8cDD|!WDy#^6)Kw&qzP+LU4
z=q-r<4n({M5g$OrM-cG|tY)FMa6K_~FCRD}!2RR+)VvfvCI*ILP(Pl55mE_DfGSxg
za0Sey!UV2@S>aVMBMW*Z%*MyW#l*u1(#56D$;HCQ#KFYH#KnwUBSZSjT;Sfa1fv-9
zWM2#07y||dh7!>D2V)IG7E?1*En^9D4Py#p3R4PmFLMe@3Tq169Ohc48pd?S8m4r{
zG^St%O?JOq%(<C)>7Ygf1H%J*O{QBcx%nxnx7gAWlS}f8if(}dByTdSrK%B25gUjD
z8sLSDLW+W71v=sg7X1T~2j_Ay;WjzZQj)&{6d)j-3`~5?VoZ~JEq~gAnvg|HK&rum
z9iRkV6bdqp>lSNqMq)u~5vb<A#gtcYizPWVGv^jdabZyjG>uL^Xr;jDIQfy4wi4JH
zFad4`2^OV-b;p+!CFT{U<rn3)OxCx)&$xRszm0_<NS!9*Ew1Fmg3OY{9MAwfuCbIE
z;M_5}!lpt1JhBaH@D#Cv+*>=z)|(A9^;%RmInY*I6l5r*WesvcQ3V47LqF5xT3hGM
zj`n_xjIxs_ILPROhPQ9AfQH12ctNdQu$kbj0vblW#a3F7l30?e$qr87MIw`LI~brF
zEX>Hju#IW5g5ybhaNvQ13>0`T|AU;R3kp2GTg=G?rQlpvP+7zVvgs!%|1sw#R@`Ds
zEG_}fVK7eCaI$ec2~y7nv5y_3@F>U>kT-9!lw{_l-eS(oO^5gk>_d>}ZgGOz-<i3I
z>8Zz<HZOHjWn?@&`Lwf{W-mxDC_mp~DoDA->5^EI=vD-p8Ic2-EDs{UdE98SmW!N#
z3W%=?BGf>{$;lBevi9H*g}0`QK(4vPo}8bTmYY}-#hQ{9pPU=Tl9rhskLIQ#P|<Uq
zY4R2qoBCwXEH)$|GcbV0z(9!(JOVAn$iM&|ECLOzGsS?%p=;Sn7#A?5FoK#Fph2P(
zrfim?f)bVzR?xr^xJtFEWv^kWVV49q5<vaxT8<i46h3DOV-`mZM-68Rb4@+_Y=*f^
zDJ-C2heD$~of1xvL5vKL)*DPasNx5+IZGH9fVD1UVq_>`T)>sW3UW#b;{xs!HYkfH
zg&oS`tzpRG1C5Yo@y}*R;hf6^60czfRl`NUAaa3{3|RsT1Z$XQGo*0OWhwy`140X#
z85#0qN*J>+3;++9GC_w!`TW3jP7x?o-C|45Nh~goFFH8+f}44L5h#&Wap)K2C+lSu
zq!%4#U|^_X(GT)>E;0bcpCKaygQjp%H#ngc<docEOUq0zElLHCXhQ}J!L<cEE!<)O
zEs7`trx&&g$oMo{C4^=z$t=l9Eiwlg3{EE&8XzW8Y5}ENE=alsm-(R17dW|0{^za`
zSriR+Mp{A9E#{JfBDBPEix-rDi}I7>lS_-rQg4F_CKXVEVG;sQg9tE!pcH6=2{iiy
z7UN=+VB%m@nB3_hAEzl&<PA#dpqZ*7Ul0qFvB486kO98EAhAmz0yK|R^clqZ0wR8Z
z2yjN_0{I72N)&-|bde^A3rfR9u9G+V2)lwa8<<!C(gMnM;HgN^pmZ^4+Kz*nr=G!_
zgO`Jk!-7MKLxQ7}Lx>}bgNK8gL!Co_gOh`&$ZxWsuLt9t$x*&$<{qH%VlFBH83uMN
z11P#6i${t;>>{u|U>m`fffW5<nta@Mb^R@tvdq+SuqxQR5@>|-7IS`S30la4!{84n
zJi&R8g@>_<CoD6yTo1Y!MUxG8$hm=n0PFxT0d^0_DZxdciKFi@LqW5CU=hu@Tg=6k
z#YNz9g^`hg;TBU-YLO_&6af$cYB1blD@e>sNh~fhW@KQ9VlPfjOv*3HD>9wz;;+nN
z!^prex!nJX0C>2Y13aIqmzP&$J2^T)l6xB{s8)iAhRO8-icCc!lkWw{X@TmxD4yK>
zyu?gBP`Q+uUL*!;CU8N7QW8rNi$DXYMG}*>1Er-TL2|qhIoR~46v$BN$q9jyj53p}
z0`-MKL*Tc#!S<JB7H8(?70FND6DVP&07}F>@Hz}M-K+@G3yS|DWsu1N1(jfJ@##g0
zDe=jPxkaFKT%<agFGz_|ZL(pIR=qk%9d~kKK}l(@USdf}QIQ5ngcnr1=M?J&r55|8
zmK1?zcy9^&fTq$xlR==O+c!U@G$$3jHs}^VTm~u%nijvs2IYX_2Pq+dqYo5ww>WHa
umB6OhfeMde&>S=eXi66e^Dy#E3RdLfVFXR2f~Hvo7<t$x8wM*g8Up|+^)E;O

diff --git a/code/models/__pycache__/model_interface_classic.cpython-39.pyc b/code/models/__pycache__/model_interface_classic.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..97044fe05269d43ef2c97e75a0d3b37f58673a23
GIT binary patch
literal 18846
zcmYe~<>g{vU|@*-xGh<>j)CDZh=Yt-7#J8F7#J9eJ(w66QW#Pga~Pr^G-DJKn9m%=
z45nG4Sim%E6f2l!i(&)Q>`@$Gnlp+kg&~D0hdY-iiiZ(o26GN?E?*Qsn9Z0Y5G9bp
zkiwE9m@5<|1Quh>5zZBf5&^T>azt~*qQt;#_8jqCi6{v$n<GauS1L*h%;wCI&XtLh
z0kgSsWOL=B<iKq19NAp?DEVB4D1}_bC`GUsPmWTqa+GqeN|Z{jYLqHij5kLuS3OES
zS0hRT%xBEejM4<VT`N~RN*gMsldH|dz{rrR8>I&o(T~zkVMyW2F~~KHG6ak9=V;~{
zMHzwF0y)OHCQ&B2rctI~zF>}7u6dL>m@SlJk!u-cnQIkg1?CIqSm)YA*?`$1IkvfW
zQFgiZQTAZIXpTd!W0WJ9&6wj9<(wjxBA(-t>l)<>6?2PnOJPWn$Z^Yck8%f#N#=Ou
zdPaGI*-|-Px!zIUxjs=oxxP`pU@_?&zg+()|J;D605D%BConfCDhSM$%?Zv8i3$O;
z<#IxE!=l2#Y{s1M+=!?MMh16=6!{c|7KRjsROhV7X6C3UcZL+j6r~o16s1(>tmtOu
zs2F#K6y+3^7KRj+RPC(TX6C546vki%P1Tp6H0Gztc#9<<u_Qy2@fKS_WkF7Ui6-MM
zR?pnT^i)m8Tg+jZA)1W01d@wV6H8L#^9xEcb2F<_i!>Q;aYMy&@{5Z#8E*+E7v&ch
z$EW6%6y+CG#+T$5C4<ZqEJ`iTOD!=qh)+r^PR+^81DPu6SX`N#n_5zone3BaTpXO4
zm!6aAlbDp6qse?r3d{)3&Cf5%$jnQ31{v!LG4+;MaDG}zNMccXY6*%szgKYzvVbP@
zEe^*}XJ3%%xA;M<c;C{Tl1#ACw?v%ta|`l|GfOh_^Ad9$L!Cj&G?{L(1h@vf-(n4L
z33he_d9b)7u_V4YIlm|s<Uv-j=QWvb2?nL6lqRS81o`I$<Rq4)CYIikcFr%(%u995
z%S%nn$;?X+Pb|s}N-Zu)EGhwM<?~LhEOyS%$w^HH*{R8Ni`^x$B(XTP<d%3&equ^|
zN@{XqNk&e7UVKVoNn%cBF(?u^!-GA8QWH~Bi!_;TaXFXfmgXduWR|7g5=5{a%TkLV
zF~yOanUkBJlA809k%56hlkpa3S!PLmac*Kx4k&8DQXsdmK_Wzx`Id-dX-U3YYGO%g
zQL1Z2Nl{{QNq*5Sj<C!SUywPsID^YG^T14ygWZb~Q=A=rZ}GThrl;mXd76y31dB8B
z%j1(1bK~>#;xnPiM$9=UvA8%hEi<*qzqF*F6dGrmjJJ4v5-U@Sz}gdYlR+sE88g93
zIvEBAhE#?q#uSDqrWD2$rWS@M<`m`>mKKI6mK4@t22HkG0_aXpW=1lKm4ShQje&u|
z8B}hTFfcIGFx4=`Gn6o9G1V|+F{ChpOif|#WvO9^XD(q`z`Bqji!FsklA)Hlh9QeN
zi@k(n0cQ$p4fAY<6t=m{3qk7Hdzly+K(Y(C7BaXn#B$ZLq%hX7#B&!0mGCU!P2s3v
z$l_ZFGM_n~KZPNfL6g(ZEo3FjE$+P1-1uZrkfavhV$RJexW!dco*$o7l$e*Caf>T2
zzbF@!GZKr6<QNzjelhC*^3bo$NzBtP&o9a@E=WvH)ek93%q#Zw^wD)mO)YRQN=z%!
zPtH$C)rZ8ZesXp}iGERPNpf;dd{Sz0NjykEub?DjCF3o&Vo*jadC9=Rz@RB`i!%@I
z6t<w$V!zaqTO5!yVQO%T18iP=T1gQoDsC|+m#5rfEdV9sTm0aZ3d$3y@hO?fCAV0?
zdHfa&Nc<L8L1Iy2ZfZ$tQSmL_qSV6D%%ard`1GR0lv_-BdAHbnGV@Xsi*E7cmzKn*
zLDF3DEvB?&P1aj1#i==Ix40lS=OyN*7MU_IFx+Cv$j?m$Tb`J^lA*{06f%DLAVCtt
z24Zk#UP)?ET4Hi4#P^xWdIgoYIO5|o^D;}~<E=r-gb$Q{7}=P_82K3GnD`hu7^@`F
z6PX@NM=~gYKpH?8#0I54aB2fp<~0lp7-|?7G6plOWc1TyEV5u=U|7ji<O1>-V+GhV
z5W*8A!k(62RGwIr;s$aB$SelNDhUjmp^E%8SwZdw1yp?eEw1?Z-29Z%oYeUETRidc
zg{6r(5Sbz`1_p*AKM(;5I*4sx7RbsXEd~Y#E0A@d#svo>2RB#*BSV5b1@d6=G^7mK
z&X5Mm4ooc^$eA*Qt%V_qHHAHeqlF=gErm0MtA!zoJ%u}kr-dPkBZW7GuZ1CsGlf4z
zpoJldD@8CxsD&YlJ4HA}q=g}hCq*<xtc4+pH$^-}qJ<%fFGVs%s)ZqnKSer4riCF&
zAVoGsu7x2=FhwavAw{u;F-oYNfrTMTIG90G`IZ>Cl=6hfFr@TJP6qi6<a!VWMJ_J`
z0|Ph6({orE7)lsQ7;6}snHDfDWME{dVOYQnrWvz9MFL9+YYkHkV-`y@sMuhuVa#H$
zVTk9bVXk3_=d58#VU%R3VTk7{;jUrG;%a8h;z?nWWJqDoW-6*F;YElxGuE=Eu+*@o
zFxfDqurA=MVO_|Gsjr41o<D^xg}sHLM4*Nti=~;-g`pXwPB4W7i>MGtO$ui=%LK-v
zsuJNWkrXbl2zLrkFUWl&CBj*vP%++Kko(0-gtNp^M8r#kvm{VNBuj)BNR<d@Nkc{X
zK&F)lXURbMyuC~%!dbFV9@wrD;Viint`txhu%z(!GM5Nv$)oWVkooCs3mI$K@=CyN
z7Dy571^E%;S{M)FQW#Gh?k2DZ*flVgYzmAe2WQDcSm~@Fd%!*ftEpj#m#ksSQcMv_
z5eD0-1m=lAdCFj(D3qrH=7~Xhsx>Sr;*tzmYBdb;>M0U64DlK@3|Sm0lG#iXn2P*T
zK&e1;0&|`hSX2rudZ9!sOFK&klyX6-791y<De^T8S(@>>DH65pAl3psa229g%TdFg
zqF~NY%U;8tqL|GvfvKpahGT&~SeB=jGey3JGu~hVQ{kcsOoc5ah6{{9y0eV4Oh9oC
zu52{pjVCbWodCN)DVu2mbCFkxX_i@va*7<-x0-nEQ~}%h1B;!^g<=z!3V)OsE-=Tm
z7nF{(AU6AfZB_-_{HMes%M#?W6!{eS6txs}a0RPb!o9$1Aww2Vih?9VEhx2WW?7eL
zEzn6}t>K!@kfH&uVC7RZdm$C9CfxtrDe^Vk@dgu^3m3uMk)oBNy^yKIaDmN2h7!XC
zwkg^R8A0w#(N57p5!Zo=>ww&|kZB<}+~bWWFy_qxyI41yX#z`;SBYJUUW!_ZJ~*dn
zf<hO=c9uc~kPRR?jF4x{gZSS7Y}%y~d$3ucl#L$l4#;s`;!wg1G7ppvni-oJQw${-
zvKc0@6cu2J36{bI6IcohU?Bj~hj0fhCi0GfePxu*G=a594VngxQ%u0_VXoz=;lbiB
zQ0y9|nAR|4IWA<X;mLA}HwM{K!viukUNf%<Y^E95%o!>2wY(tN62mO#6s8pO6pIwg
zUgi?R1umeN$Z}2LOyNwi>SY4Uo2OXgly^(9X@<m^EyQMs84EdU7~(a-4JSLlTg*B6
z=`TU`&n=dc<lLfLTp%*OEVC#vr^tYTfgu^x&IOh1APlNBK~273Q0Xqlz`&5sP{R<*
zSId|J>f|!cVVcNP$Px^yf<d}8nQpNr=a=S{++t78FDgn+E&*4ME17OF=^5N&%mniw
z1gNsS#bpC(k|*cq7TD!6Ffe=u*;r+az0!d<5cF(v@{<#DitY3ey2){KQ8>H~kAkxh
z6?rTJ14BMYJdS~Z;Uy>>ZgJ&;`a0nD<}KFT#N?uUP3BuHC5gq^MLOW-Mk2J=z*Lk3
z(g_yiC{N8y&nQVv(PV~*g4zNQ^=wI*d5J}pnv7tHTP$UXIq{%AML|(&T4u#99uU7E
zu_QA!uO$8!YYC|Nd5Z^3K}Bz|fVwqBprF6S0dM_rX67ZQ7JxcLRbmB|;KrnWS!Qu&
zex6mCfu5m(p2001xRUrX<0=_Xn1=W;<M?o}dyDm4T-_W)eL%fbc8C{>-N4Q0m;4~V
zLYuaR7PojkeI4Ch{aix~z2n0SU$TQ_*&t2kkRokRI2eKmBM@Q2z`&5nzycze7#MCb
zmt~gR;&+NSHiGnU@={9-vo#rSaTOG$mJ}ss=B1_-nS+8K)bzW>lbV*6nG6bzyws8^
z6~8b~7f(lhP`lkHHL)l!GcVn>A~ClhC$-oL?CFfsq+7CiWtl0NiSfu<<I6KsQj1gL
zlMFH$SQ!{@u@z^6biEV-*#t`LpgwALYEfQlPJD4@6{uqkZtAC`z65zflj-IE|NsBL
z{K>_@P}BtK6T07GEiNrcEh-8E`4QAUyd_ecSe6=}QCW~$glNEn+uudt0Ad04?r*Vx
znv_MZpb+B(_kuw~0Jk{7dJ7UuGQgdMB2aVk77Hk+++xYf&&-3Q7S^J~y!6zfY>;Li
zP%?;zB?=y}?;*ng#kW`;LxcRCZ;1pHr6z+DK~QRPVor{8X;E3~E#`nA=UdDmd4A{o
zytGnKBJoWuDax$4C6Ejgh|h(J_<~0k5dH029FED!rA3L!mAAN@^D|QOyb}uw5^u4)
z83uzo_qW&}R^8%++kJ~WxF9t-6BLalmA6=vbMo_2Z}FBT=47VC!yGIM;TAv>K1>*r
z@L@a=<ZJ-f3JHhejMB8UoYY(FP)1P%D2!Oa-IXF65SuxpG^xlI#NtXUt%%RbPtPnV
zzQvK8k(rZHl$uv$2Z~2~5CIw<EOG>~KwXeq+?jdsURIF;NVpJeN^ojnX=+|cW@1i}
z8AueAXp1aCEKmpN7K^*9Pv|X9a2MS#zbLl|l*)=YL0Xw}QuB(OKrAkB!2<5v-(qvl
z&nq)Zxy2Hc3exDCSP_t)p92yI%S<iT<O6qjiWY$^Xay1NAOl#l6N}P|Z?Tl5<`&#y
zhlLaqs7DF**)2|ZQY-@Xh>F}nS}Q?Xg`kB;d@5wTA~P?2CF3p5cu1cvJ|0nKgS$UI
zpxWRcsI+DT_2xJ@_*ght*qFE&MHmH`I2d_YgjmEF#h7^*MHq#^kdKk+KMNBdBNv$F
zU}X8v!h}SA=iudI;$s9sE=Dl?!^Q^UfiW`=BO413BNrnNlL#XRBMaD0E=C?kJ~kN;
z$%-Vx$igV}pNE+T)YoK@V+7gA!pOzQ#>fFBd4BS6@<45pVc}uoftm+439Orog@;jy
zk%y6sNr;h!k?B7NGe`}{4mOZ&j35$>zgNi<sH$-HWkF?44dVia6vl;&wTv|k3m752
zT1_TDaB&VA87eL;O--#zg+wzb$RV+SsG>lf&_AH40Qbk3xR|Qsa5xCs#YJ%sC=G%%
zf)aKysEh%*XCVW~H8qS27#A`$GuASdFl8~<fJcgYnQEC!Sh85*JeC@!8kQR7H0EFi
zO=drE)v%KB7IR8wt|r?p*0h}b#FATF$@zIDndznZrNxk($Cg->o|{;4i_z^CdvShR
zNp50=CM!5K-(t<oD=00w#Rkodtda2<i6yt#BI65+@{`cKev75JB(;Evk%6HYR5dU#
zaWM0+@G<gnS4rat0g!r>fh-;d1_n?L1bcrDXdsI*hPjrxmZg>zJjlSj5X5JzWv}G`
zM-N*K%R;7F&JvKPIhvVV7-C&wm}<Fdxl5R9xSAQkt#6hZE=h(IMpgz%h7=}q24)5u
z(6AOnlo=#i!d%1M%vi&f4V5i4sA0$g^<+6ySiz7fg*An(mz|NJhO>sfhNFfhg?$c7
zFoPzCAGj0)hjLLQDBpteeaK4ITO0*N`RVaViAA?K^7G<T3-XgQz+sr4T6BvmDX}Cu
z16+yS;w-MrOO8(g4S}*0fixE#0U2``l>J$Az+=R0MMptGAjw<IRhb31m^1TAz%@(}
zsQstO1C1!Kskb=6RD5PiMbSZ!wlg4;7~_i$fjDrp!B*X3th~iq35rkjc;p3F^O<?+
z@t_Ej1VyM2s9a&<Vi92CVHRTKU=m|gV3J^}Qo#|iC<dS;a8SzNfDKj3FoH%V7C^>V
z7;3;H0SiG1oM8cD3NvUR6fVYG0up0E5o4)g$YM%i6=O(Y%VsKCQUV&x1dk`g`hn6*
z7HbWwBtr_jBttD5C`Q3N4oOh&lMSq+DTSeyBaf$qt%kjsv4%~Wp_ZeBJ%w`tM-6KY
zM-6)o+d`IRMn;ChkQ6SEy$JKc;}xu6+qt2(->G59;;dn=VF3-7rtqZj_A)bq(^(2%
zHe=DZ8qm}gLn~7Xe;Sh{LoH_wLl$?6Kn=qJo*GVwo`p;)f>0>b%LbbG2xia}_Dcq}
zBSC==!k`8(sB{2Dd@-oBN(VLkd1@JJ7*iM|8KBL6Mo1z86-g_ZilRX=?+YS80+6T%
zweyQWZD~VB28JR~Te`}YP)vdoaeQiC3Q<jcVq6JI<q-E=!{Iz5nDfXA1yI_@Z@w|i
z{GwWr3qU~)$<d(9TqO^#`xUBdtrQZIlNG9Kbrg~z9VmtBTK(!;3`c<qZ*bJ$?voMG
zzj1~;ssfaGS;2W}CF3pD;*!Ml)S`HhOF<*&Rh%K9*0_FRX>!O)mZIYz@e?58B#1Z-
zY6J5^%6I1E#FAUQ$)!a_piT=oB8$2~ia<lpkfLM{*s6k}%)F9YY-#yLxrrsWL~`=e
z<Ix+OMLR(nLB1^N16jcZDSkx3u7njC@%i9+km6gcplYJ{7H4i^1-Qm2zQtAop3u6*
zn2ZSNTg-{c$+tK`qvc@rkVpe3S5Qj5B?LDd>~?S>1Qkk3pgh4W!YINh!VYeffEyuf
zOgxNYjABfDj3R7Yi~@{RnnWjUl)4BshRO*msSY63MKR2^thH>l>_{~dM=fV9R}BZK
zg0AJL;RMs1HC$kttAq(uXEZZ`YbBN%E_gk~Q_EYzQp3~CSjz)0pLnp;Q>-;0QAj<-
zQp4NKSi=Lar+jL_(>OKU;980`1yN6N)Ns~t)v(mCf=aO(?iBVp9N6lsTO6R?EU4EL
zQgjRyn;hsB1vn42ff5HhwC#V3vm`SoH6Gka0#!<y?BELO7F$YcNn&zF5vXu4Itxl9
z=RgFgD+H;ipzY)HAkhmT0#QM6low}0Ond{9cnu=3)L2EAKoXZh1UPYk32+MG2X{{r
zLEUR`ofQB|9ulDB!6d*ez{mlD%%HX-xaMM(V5w5Wktom%LaDnz^K>BZfm<g(m_c<{
z7E0X(nzZX>s$~M#Rji;o1FimILlI*Et7AtIV@(IuR*WtTu_CqXHEbnJHS8%Ik_^p^
z<_xuLg)!+2wHzQb!Ll4+Sx%@dM`2YusAhrb1+|LUQ@EhAoP{$=7#1+6aHH4;8l7aR
zVOYRg!?J*FAwvx-h+N2&!UKl9y)2BNI*c7$hyAMoExSO}VcaDgHLMFbK~2&W{%qz6
zj76YPS}t&{#$Cf*!@7_OWE$8cka>cjS}jiuG>4EPkRpUCUc->Z14^%89l|LhpxQH&
zp_V6|p_VtDp_VV5p_addw}zvJt%kXVwVB0*AvPt3rB<L;uvVy6xJICcX93?rhBC$p
zj795e1k)KZ8EbfI1Z)^;_!sck2rp!0WXO}N5d?`zGQd@yNoRnF)Ck(ZS&|G3_-lkf
zIwmj{)<Dd4VTk=#E0E4mD*{muGQou*wyIVX6s8cd8c|7xg^aZz|AE2_JYrrWmL-tF
zpTg41n8KgUv_Nnn1IT~1;x*z6gcgEkP?$mE>NR3D;*t!tVkI0kVp+n?Ahj$Ln2K^r
z7_vla#Gq+PtVU!3e~lQ_UH`!LiY#QT6{%s!g4wsPMuZ6a_SJ}h?Gq{Cs1ZT3?+(~L
z5vYA4HGB*BYeb;-y#m|Ew~(=x56t3U$XLq{wV81ObCFaHA870m-oD{0;i%z9N*l~Y
z0pNHStzlTeUBg$y4+;l(I^#(}P8*_F(grAvih}gP^?=i62{^PtGf1&PwGuT9S>iPk
zk_;(gk_@$ya2_aZrD`NW^Hmd=i*}_j)Jo^^lt|P_H8a*oN;A|-mq?~aERd>^sFALb
zs*!}W`6e(IcBM#y@(RKZaH~%oJYp+FUb_rjrh!W>TrHklSmj@3Pq=_W)P`6_U%@@r
zqFva=jtMr-imt-TSX>UxgE<tV6-3zd>jX{Dr@-_Z1Whl1nO+2{*^0o`4XENF-7A`m
z;I0&;!GbHOYGKxetYj`iZJ{ArbGO(*RdHfza?u?I28QRLstbf~F_$Ig6x{+<z@R!V
z9$E><gA{^B(7@f!D&?}o9EE{ttzLk6PE#7(*etpW3jR)zgFy|}<m_7<pk^d!4Mh>C
zLl0>`f*YQzK#JH(^D+xdQ^8FHaEr3&A;<tw1L77-c4}quEw;phg4Dc}qJ1Fc?0NZl
zRjEb!w^;K^b5nC}u@+|}7Np){$xThnE1C<^32xYeTOFV_ZP9&@*aJ}8Mh4PS6##`Z
zs9y_fq(YiJpmm5vPe6*Fg6!cg&dE%Hv~r4zK)t719MGUIE&?SLNVLBIDHR1xmE@$B
zq{1Ty<k?$1Q1$WPF=AdA54^(a79WfYYYi9NVl2MJR8Vw_vE&vjL=7w03Admwfrtq~
zR&tf4#zPqSdBsJ~K#qo|g(6VWfuu)pK?80ci6FIV;DtsusEq__!!s~RfSS;ZB1|ld
z9E?H`k`cldVG?6x`_IM<YDfz)sxXVOh%mD-f+3R>6CWcFBM%d3Tuux;9wz{1OMsi<
z0*s*1L(qsIoX^O|1R6)=VH98#VpL%QHPPArvvEP1?p%zZhPwn~l{N_lA4-EAG<XCW
z#|Jmh%CI%qSr#xXWT*vICX6+Zp(Jp_9W;E@%vcMW+Gnoe#?lCfh(a3SOrU|K8g5u4
zyauVMox(DQEto-*6@6GplMRx_5aTcKW;M9*(PSwCr&aJk54_b39oZ?u(qJxn2TFL{
zkg*<6OS~9t4w@~u7$eaW0w;Kc9+diTf?^-k#bIC)VB}yDVCG;EW0YgAQos@VaE&M}
zVrd2j&_XN7!ZS0Lq1GY>EpMw~Sin*PUJ_Ts3R)PqkST>540)ifU$zw9680J<kQ_us
zGfV|{3LmIV3!XLR&jz(U1>kLD0q{5vR|#hgD`-ThnK4BG+(vc-_t^x&?I13Qd2kz{
z=7HPDY@jtmV0VCA1G9$**`=V>M9_9JcREsA8B05vAJS5;;jiHawOzsO<Rdi#;C3>1
z4ZjToxU~mv_2$Xd2-I-b@FTU8AEh%uM8GX-uw0D*xScErZ68j8m<w(v^QSY^3PaR`
z>;kuwMM@YJKuxU?0k@Myp`iw9C#TeiW(lSUfZNFe*-W6eF(|Za#cISB2*caSDK(-s
zpq8Cz31^LHmIydRLG9$G5{4|%8c}GwRkTKU0e_7s*j-G85PO9eGS&(s*>|Nzm<ao>
z)Chy^6E5Mb5k|7_6WBgssC~jUys#d|1g1g;us?YhGS>2fS>P5k)MijS*`S6O+D?X~
z58e{a8a|}-!CaIEP9KnVGH(qZC>(I44^TT9lr}&ZmOhwk7#4`t@Ye9b^?*}xi8#m&
z&~`G2EdlBQfZNHSRx^YLY9~w9NI=`k7g88%rSf=6Bx@v_8EYh@8EU0Uq*5dnNY{vi
zTFEsM@OJV7Xge8XE(gdCa61`fu1pFyc%2An6_0F+fH-LVkZg(E0?_Ip*%T=e22d=4
z+tboyw5N+cfGR$4C5Nl!^%mARsj|dZtih{9Y(twgZb!d^Ig#|nXA$9c^m__Szd<l~
zKfp{!Ye$C^eFW7{kje%$x&&_1BDHlnz_kge4GUUb0-78xE`c<11;8?}))7bryk%3R
z0+Jh`W^N1<bfCPb0o0aMKx^w3eFp{BJP?5x9|a8+K~@)lTDnC)K(e6r7NUh)^b;iZ
z3q*Va5uhe+(Qgn7G=o_57sLY1=;CkL7A*v+1BKHqwxYzml>A)KD&y1?O=(Drmk%-;
z8()-KT$)o-3~42xws(sdKv4;5=N2)7Sl}iusC8Qen$9a?28pqN2v!im1|rx&1P6%V
z1QA>yf*VBefCyd?K}5@x4-}`62{O>I?rTu1WD2O`18b8)+M&oKH@HR0q{0SnM>44}
zi?K@3xJ@d?!o$eJSfxr-A%c>TK})(o(*oc&DQF>A31~o#5j1tw%v8%*!dwF$Kulro
zWlmuMO*776u4SrWOlPcNN@q-C3TDt`_q)ZMo0*qBYbFTnv<Ge70Iiz1#g+!%!%*}J
z6#Jn03r*%*oW;o*sh|bYMYq^<^Yb!6dmK2^Kr7T!^O7qyS&Bfr1d2cl6Ce}VqM*dg
z3{EBxQCU#Dfy-1d0qH$}<^VF&ON)@U#1yA6F)$Q^Ok-f;V-;h?*=on87o`aaYO#ZI
z3OIx|fm`jM9Sf2SwM-?9B}@yLOPCh0)G#e%T*%nWh$+Iv0Gb{KO}vRP)G$df)UuYa
zX0g?PXT(xKgOv<5tgX<=w=`x+hIH7xIg$#9IuHp`v4FjXaREmP^Fl_DsbDtCLdJzG
zwQMzP3phcsw2)~5X9{BqXzH1*2GrtbgUqypM_VBE1$dSw1QfblkZcTU*56{vE4amy
zoSK<)i>0`*s02}n++r_Eh4y4Yi<@q7mVkCZ6yz7DYO;f4=N4N@YF=@E(Je-gTTJmF
z9Da){F(oBFCp9m<B;yunqf2Q{33_`Uv>^%9L5>HT2b$L@1;rbr-Ot6S#mK{`!Dz%N
z!c?V!BW*+7g)jgmd4tA!K#2exd7zRJwB00&F^dU2CCQl0R3rhKHH~LXVX9#)V<@so
zVP3!j3c?nK8pZ{zDNGCDbG~^jHH@`PRZ=BvCF~10QrK!3XEV%YTF6+#)XY@N9L|s@
z!pH!EC7dbj&5TWqkqmiE2B4MfT%fL84MRL5xFx~i2U*8+5mW<VE}H~77&Li>HhT){
z7eZ!F!667*?UtNakXe$L16urrYb9bQtWu~l#Swa--6EhZ3#mESnif^;pp2YTtXHM)
znFsEogL+@3IjIV6nK`Kneu=rM3i(9}Fga(?o*J;Iq9*q(_6+dCp<+lz1`lL`3YwxW
zkjGhz3vx0`Zn3AQmL!&x6y4&?%q_?-D#^)Ay2T4&f?QRalX{CQEHS4v)wQT7zvvck
zW?pegVqS74*xQ;c;FJfh+i!6tC+4Q+fahp#v4TAd4k!o#E+x1Ser*7yG0;LR21YIx
z4n{U64n`R!7Dgr}Hb##BOrX^}Ol*vNj66bBieRHK_A=-p%)%%uKr!eIT3-WN%?X-7
zsbS1wTF98i4BF$=3YuI8F9XN|Pi54yWHQvU)-by;#L9v4Zw*TsLy=YqXmK=X)eKuR
zQw?hkn+-#uRt@U{4p1q>$WX(W#*oQS!@7jA543ZTx$2aHCvvzbBr1TTO`)_nGcR4C
zBqJ3vEnHlXpO*p>0mln?a}bvT5=bk`&s6~J{?UWRoC0W^2(&A&s8UZMGQU(IIWbQm
zF{e0R0ler&Au+E~0kn7w#gvkKh5WMAqVl55l2j~qmw{qaAu|ufB0abX;H4^lnw+=T
zGxLfIQj<$=ae<OZQ96hNUPX3`1!QHB0jM4TFJ%KaQ9u*%x7bPxQW8sGB~p<EBLl-N
zuKbdW)S`Hho?DQ$au7CKW*&I(juj+hc#Ao)C>@ek;MGcz86yM3a!^VU0M+-bVvL{_
zU7(pd5k@X%oGVLEk|@fa7f?9_$^qcAs0dV%fQNfPbFd4*g?<Y2LgreQ8c=Vac_D)Y
zLk-IUmW3b^%#&tFVG#ih4zVm?En&-IU&vU?Qo^x-vxa3MBX~6lR|>NvLl*Y}o*Jgv
z43N!obD3*Et3Y@cGDFvEz$yZ8@ef)~@bW*Xb_P}antZpIlM7101D6GrMQWe`mI4JK
zGid1*TVinuXkDrzNQAXCGq1#=2$a%qae|UfVo_0I<t_G{{Pc9tmNTx@ih|Ul%-qzx
z5=bSG2Ckip?t$tXHb|~j2bq5xWI3p4xWxjRw7tcgnTs9`w>Uw=v*4|a+d+{5TA#xJ
zTHXU%Y{!UmI0&u@B?>@wIVc*yQLqFQ1>l`wHB1YcYFSDcYrxBCz^m85K@aNOmVlP#
zus1Wp=oH3Y=33Sgjx0{lMj}urJB76uyr`y@t%e!AY^Myoc!ph)A&X}LFL=))$6Tfw
zkU4yyh{ywNcmYjLaf6orWC_e>NZ|yl;+o3>+MosAcEJXkjDtoRub(CpXrWt5Y6WPC
zd`@X@UUA4R#*|wepmnh&MTww|x~z$LdHE%`nA37gZm}fiCKeRk1O+ZAj;jRq;p=o2
zAj@*ASoD)~<EvQo3i8rlPGDqU058HT$kY@nnheqg8kN4qRFHCu6SOtf4YZlC$ORN*
zt{?(Dgbf-Zzr|dfSA2^-BQ>!kH?aVmSL2~^?FCZk4I+F%1Sslnv4hqqr)8#tQ%ZbF
zX3;I?<eW5U3j#5?T?9%NAfM)?ftGo&rliFu=iXvT%S?wZDMjQ_&{+HdP_h8$QAQp{
z4n`G5F-8tX1tujXoNYc(;sCY#z$=%b3zduX7#SFnLHQGu+d&w#Hw~UxR2UgRbvZ)~
zBPg-JR#=oUf^sfslmIkRm%^0IQdCd^+O5V4+C}2R5NlNnUaKVuo>yJKz7RD3%~r#K
z#OEqu%;Es8hDc$q;Q+NLQdmG^W`#z1IyI~*tVk+3YuHfuTqTSPz&aN)F*1}eF5paI
z1G%JxaRFBfJCwzp!U1K0R&elw^k?yb5*(=I!pKm=oWhmORP?WgAqyhMzd!(-6uE0y
zW;4uX2JOkq5`-k$62>f4Gk8F1Qh4XGr0}KigAy&ceIo#EYJy6bTWraoCP91=xT_9M
zio$M*ImM|8klBH<RE46%lGG}pkfKs_G0q@=XN6?Ydh;p{{i6J2y{v+CaFVWK)erJ_
zj;~?`iGq?aNFp4R#v&LQ7&L{7CW0Cm%mq21g+h>W2b5GH3yQ!UU3fyi#RA%4e~YaG
zypr)2TP1{MEy*m&NiB*8nF&tD2_P0~TGr%-6l36SDriOqo@jHy#U4jmK@ntTg`)(<
z;ws4iwN~?UQi_XWz>Z8SD7wX5Qc#3e_1xkGrO=}M<alrZoB<_p@LD1!Ar{aCg#aT6
zN-={-9`KX}7o!9d2crt^1&=5OYBGXTyQWByKPa;YfQUd40osKC*`*2|>c0XK1076I
z^d7|G0|kR1hyX3QEK&fmbU}nAhyYcWMgAZbC{q@Jx*4~4p=-gDQ*&})>%qa9cRR>D
zP_qoY`vJr#26>Q!nWvt?kb{jwoI{jDi9?zrk3)!~l0%S#n?s#LfP<5R2do~n83vR?
z;g}i9Dpnz|F&4Z{6|`fWwP-zP<{7kL5vrE~#0FuIVc_O>2}3gjsCEW5xoQ~VnLz2g
z2-LLGWG-q3c>o+-pg|B#$Yx?lPYUceurERW0ykP-gJKX=T{19oF;?+{mgB>BCZRMS
zL3V@O0Ir}wWm*km7Pv#+$56{u1L|%wR>i_wOF@u6Suwbs1#OchC+8P|TX`k<U}12t
zO#xK!rhuAVpw?hnW@@<tX!D0caY<2WGU&tsL<<bmeFrH3`$ZGl^#-kky2YGdir#+%
zr<M0`&#~|@R`Gy62VFX@$%Z?H6@!urIGDf$IMl!a28x(hFf&2>0>DwF8GnnpxU#qi
zTmgYwAxuT7MOGj~z^f$1Kn)YNg2cR(#NwhT5Qn`uH8Cl_D6c36%mZzW$;nIt?~b{}
zm77=s%KIPzP?#5`fz&W3ml@q+4)F9T0>#xW*gmLu#Kxldvdj{2|LqpHZ>2x%{0;D^
z#Vt19O3=|2kgYo0r6rj;#d=^8yyWbbD44C6Tv}X`p9>o60w2Ty+0VgQl9`*U2Ofz8
zwQq_*RZkIUy08eeN%|IVPG(YKX);(rYH<;$cU%P8PgMj;A-4pfvhck?#YKXk)CpP=
z4er6;;sEdM(aXy#0_{w>B@E;0rIqG^&%j8`xy6NG7wrdW?*)}m;$WrFT_H%yi-bW+
zKr4K2Nr08;fo<2zDT;@W@DzatZ;C)mO>gn!=I145>VfLq%=990kjY#SAy6_b0xg6q
zk_3s0B1H8-Ywn6tK__k$NrB{eA#za16oIDpia;ygZV5vq@={B{Cp75grIr*I$%3>9
zLU*#o=VYd5l;maRrQc#I$hjp{2Hw+z7&AyMDv}4O=LY))A`9NBb4v(GL=W7pDlSq4
zDdmBWSfmye7l8_|B4tpRfJ&fS0x)|)M=pSl<0w)E$*F-f%Am_Z5{VwTiCd%&Qlf%G
z3E2L4$dr6>kp@U9cXDDuNolSgs25$N2@>H2t18xmoF-8O+F5i<*azl+P;<@~d_)2G
zh=E)Da2cp5XgAs|HYf*_6_M&Da7G2?(OVogxnM)=K$UuN8fcakw1g1}g9aoyK-2Hc
xAQosE9<;Fsy48nKh!HHuD8K|7Bn4wOkO&C#Fp4k<fOIl}bP4eb)ic1L8UXs(wNL;6

literal 0
HcmV?d00001

diff --git a/code/models/model_interface.py b/code/models/model_interface.py
index 0186c06..3d37368 100755
--- a/code/models/model_interface.py
+++ b/code/models/model_interface.py
@@ -8,6 +8,8 @@ import pandas as pd
 import seaborn as sns
 from pathlib import Path
 from matplotlib import pyplot as plt
+plt.style.use('tableau-colorblind10')
+import pandas as pd
 import cv2
 from PIL import Image
 from pytorch_pretrained_vit import ViT
@@ -28,8 +30,9 @@ import torch.nn as nn
 import torch.nn.functional as F
 import torchmetrics
 from torchmetrics.functional import stat_scores
+from torchmetrics.functional.classification import binary_auroc, multiclass_auroc, binary_precision_recall_curve, multiclass_precision_recall_curve
 from torch import optim as optim
-from torch.optim.lr_scheduler import ReduceLROnPlateau
+from torch.optim.lr_scheduler import ReduceLROnPlateau, CosineAnnealingWarmRestarts
 
 from monai.config import KeysCollection
 from monai.data import Dataset, load_decathlon_datalist
@@ -37,6 +40,7 @@ from monai.data.wsi_reader import WSIReader
 from monai.metrics import Cumulative, CumulativeAverage
 from monai.networks.nets import milmodel
 
+
 # from sklearn.metrics import roc_curve, auc, roc_curve_score
 
 
@@ -81,24 +85,27 @@ class ModelInterface(pl.LightningModule):
         if model.name == 'AttTrans':
             self.model = milmodel.MILModel(num_classes=self.n_classes, pretrained=True, mil_mode='att_trans', backbone_num_features=1024)
         else: self.load_model()
-        # self.loss = create_loss(loss, model.n_classes)
-        # self.loss = 
         if self.n_classes>2:
-            self.aucm_loss = AUCM_MultiLabel(num_classes = model.n_classes, device=self.device)
+            # self.aucm_loss = AUCM_MultiLabel(num_classes = self.n_classes, device=self.device)
+            # self.loss = LabelSmoothingCrossEntropy(smoothing=0.1)
+            self.loss = create_loss(loss, model.n_classes)
         else:
-            self.aucm_loss = AUCMLoss()
+            # self.loss = CompositionalAUCLoss()
+            self.loss = create_loss(loss, model.n_classes)
         # self.asl = AsymmetricLossSingleLabel()
-        self.loss = LabelSmoothingCrossEntropy(smoothing=0.1)
+        self.lsce_loss = LabelSmoothingCrossEntropy(smoothing=0.2)
 
-        # self.loss = 
-        # print(self.model)
         self.model_name = model.name
         
         
-        # self.ecam = EigenGradCAM(model = self.model, target_layers = target_layers, use_cuda=True, reshape_transform=self.reshape_transform)
         self.optimizer = optimizer
         
         self.save_path = kargs['log']
+        
+        # self.in_features = kargs['in_features']
+        # self.out_features = kargs['out_features']
+        self.in_features = 2048
+        self.out_features = 512
         if Path(self.save_path).parts[3] == 'tcmr':
             temp = list(Path(self.save_path).parts)
             # print(temp)
@@ -112,40 +119,38 @@ class ModelInterface(pl.LightningModule):
 
         #---->acc
         self.data = [{"count": 0, "correct": 0} for i in range(self.n_classes)]
+        self.data_patient = [{"count": 0, "correct": 0} for i in range(self.n_classes)]
         # print(self.experiment)
         #---->Metrics
         if self.n_classes > 2: 
-            self.AUROC = torchmetrics.AUROC(num_classes = self.n_classes, average='macro')
-            
-            metrics = torchmetrics.MetricCollection([torchmetrics.Accuracy(num_classes = self.n_classes,
+            self.AUROC = torchmetrics.AUROC(task='multiclass', num_classes = self.n_classes, average='weighted')
+            self.PRC = torchmetrics.PrecisionRecallCurve(task='multiclass', num_classes = self.n_classes)
+            self.ROC = torchmetrics.ROC(task='multiclass', num_classes=self.n_classes)
+            self.confusion_matrix = torchmetrics.ConfusionMatrix(task='multiclass', num_classes = self.n_classes) 
+            metrics = torchmetrics.MetricCollection([torchmetrics.Accuracy(task='multiclass', num_classes = self.n_classes,
                                                                            average='weighted'),
-                                                     torchmetrics.CohenKappa(num_classes = self.n_classes),
-                                                     torchmetrics.F1Score(num_classes = self.n_classes,
+                                                     torchmetrics.CohenKappa(task='multiclass', num_classes = self.n_classes),
+                                                     torchmetrics.F1Score(task='multiclass', num_classes = self.n_classes,
                                                                      average = 'macro'),
-                                                     torchmetrics.Recall(average = 'macro',
+                                                     torchmetrics.Recall(task='multiclass', average = 'macro',
                                                                          num_classes = self.n_classes),
-                                                     torchmetrics.Precision(average = 'macro',
+                                                     torchmetrics.Precision(task='multiclass', average = 'macro',
                                                                             num_classes = self.n_classes),
-                                                     torchmetrics.Specificity(average = 'macro',
+                                                     torchmetrics.Specificity(task='multiclass', average = 'macro',
                                                                             num_classes = self.n_classes)])
                                                                             
         else : 
-            self.AUROC = torchmetrics.AUROC(num_classes=self.n_classes, average='weighted')
+            self.AUROC = torchmetrics.AUROC(task='binary')
             # self.AUROC = torchmetrics.AUROC(num_classes=self.n_classes, average = 'weighted')
-
-            metrics = torchmetrics.MetricCollection([torchmetrics.Accuracy(num_classes = 2,
-                                                                           average = 'weighted'),
-                                                     torchmetrics.CohenKappa(num_classes = 2),
-                                                     torchmetrics.F1Score(num_classes = 2,
-                                                                     average = 'macro'),
-                                                     torchmetrics.Recall(average = 'macro',
-                                                                         num_classes = 2),
-                                                     torchmetrics.Precision(average = 'macro',
-                                                                            num_classes = 2)])
-        self.PRC = torchmetrics.PrecisionRecallCurve(num_classes = self.n_classes)
-        self.ROC = torchmetrics.ROC(num_classes=self.n_classes)
-        # self.pr_curve = torchmetrics.BinnedPrecisionRecallCurve(num_classes = self.n_classes, thresholds=10)
-        self.confusion_matrix = torchmetrics.ConfusionMatrix(num_classes = self.n_classes)                                                                    
+            self.PRC = torchmetrics.PrecisionRecallCurve(task='binary')
+            self.ROC = torchmetrics.ROC(task='binary')
+            metrics = torchmetrics.MetricCollection([torchmetrics.Accuracy(task='binary'),
+                                                     torchmetrics.CohenKappa(task='binary'),
+                                                     torchmetrics.F1Score(task='binary'),
+                                                     torchmetrics.Recall(task='binary'),
+                                                     torchmetrics.Precision(task='binary')
+                                                     ])
+            self.confusion_matrix = torchmetrics.ConfusionMatrix(task='binary')                                                                    
         self.valid_metrics = metrics.clone(prefix = 'val_')
         self.valid_patient_metrics = metrics.clone(prefix = 'val_patient_')
         self.test_metrics = metrics.clone(prefix = 'test_')
@@ -156,13 +161,23 @@ class ModelInterface(pl.LightningModule):
         self.count = 0
         self.backbone = kargs['backbone']
 
-        self.out_features = 1024
 
         if self.backbone == 'features':
             self.model_ft = None
+            
         elif self.backbone == 'dino':
             self.feature_extractor = AutoFeatureExtractor.from_pretrained('facebook/dino-vitb16')
             self.model_ft = ViTModel.from_pretrained('facebook/dino-vitb16')
+        # elif self.backbone == 'inception':
+        #     self.model_ft = torch.hub.load('pytorch/vision:v0.10.0', 'inception_v3', pretrained=True)
+        #     self.model_ft.aux_logits = False
+        #     for parameter in self.model_ft.parameters():
+        #         parameter.requires_grad = False
+
+        #     self.model_ft.fc = nn.Sequential(nn.Linear(model.fc.in_features, 10),
+        #                                     nn.Linear(10, self)
+        #     )
+
         elif self.backbone == 'resnet18':
             self.model_ft = models.resnet18(weights='IMAGENET1K_V1')
             # modules = list(resnet18.children())[:-1]
@@ -188,7 +203,7 @@ class ModelInterface(pl.LightningModule):
             # )
         elif self.backbone == 'retccl':
             # import models.ResNet as ResNet
-            self.model_ft = ResNet.resnet50(num_classes=self.n_classes, mlp=False, two_branch=False, normlinear=True)
+            self.model_ft = ResNet.resnet50(num_classes=128, mlp=False, two_branch=False, normlinear=True)
             home = Path.cwd().parts[1]
             # pre_model = 
             # self.model_ft.fc = nn.Identity()
@@ -196,8 +211,8 @@ class ModelInterface(pl.LightningModule):
             self.model_ft.load_state_dict(torch.load(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth'), strict=False)
             for param in self.model_ft.parameters():
                 param.requires_grad = False
-            self.model_ft.fc = nn.Linear(2048, self.out_features)
-            
+            self.model_ft.fc = torch.nn.Identity()
+            # self.model_ft.eval()
             # self.model_ft = FeatureExtractor('retccl', self.n_classes)
 
 
@@ -207,25 +222,6 @@ class ModelInterface(pl.LightningModule):
             for param in self.model_ft.parameters():
                 param.requires_grad = False
 
-            # self.model_ft = models.resnet50(pretrained=True)
-            # for param in self.model_ft.parameters():
-            #     param.requires_grad = False
-            # self.model_ft.fc = nn.Linear(2048, self.out_features)
-
-
-            # modules = list(resnet50.children())[:-3]
-            # res50 = nn.Sequential(
-            #     *modules,     
-            # )
-            
-            # self.model_ft = nn.Sequential(
-            #     res50,
-            #     nn.AdaptiveAvgPool2d(1),
-            #     View((-1, 1024)),
-            #     nn.Linear(1024, self.out_features),
-            #     # nn.GELU()
-            # )
-        # elif kargs
             
         elif self.backbone == 'efficientnet':
             efficientnet = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_efficientnet_widese_b0', pretrained=True)
@@ -250,6 +246,10 @@ class ModelInterface(pl.LightningModule):
                 nn.Linear(53*53, self.out_features),
                 nn.ReLU(),
             )
+        if self.model_ft:
+            self.example_input_array = torch.rand([1,1,3,224,224])
+        else:
+            self.example_input_array = torch.rand([1,1000,self.in_features])
         # print(self.model_ft[0].features[-1])
         # print(self.model_ft)
 
@@ -260,17 +260,26 @@ class ModelInterface(pl.LightningModule):
         if self.model_name == 'AttTrans':
             return self.model(x)
         if self.model_ft:
-            x = x.squeeze(0)
+            # x = x.squeeze(0)
+            # if x.dim() == 5:
+            batch_size = x.shape[0]
+            bag_size = x.shape[1]
+            x = x.view(batch_size*bag_size, x.shape[2], x.shape[3], x.shape[4])
             feats = self.model_ft(x).unsqueeze(0)
+            # print(feats.shape)
+            # print(x.shape)
+            # if feats.dim() == 3:
+            feats = feats.view(batch_size, bag_size, -1)
         else: 
             feats = x.unsqueeze(0)
-        
+        del x
         return self.model(feats)
         # return self.model(x)
 
     def step(self, input):
 
         input = input.float()
+        # print(input.shape)
         # logits, _ = self(input.contiguous()) 
         logits = self(input.contiguous())
         Y_hat = torch.argmax(logits, dim=1)
@@ -282,45 +291,36 @@ class ModelInterface(pl.LightningModule):
 
         return logits, Y_prob, Y_hat
 
-    def training_step(self, batch, batch_idx):
+    def training_step(self, batch):
 
         input, label, _= batch
 
-        #random image dropout
 
-        # bag_size = input.squeeze().shape[0] * 0.7
-        # bag_idxs = torch.randperm(input.squeeze(0).shape[0])[:bag_size]
-        # input = input.squeeze(0)[bag_idxs].unsqueeze(0)
-
-        # label = label.float()
-        
         logits, Y_prob, Y_hat = self.step(input) 
 
         #---->loss
-        loss = self.loss(logits, label)
+        # loss = self.loss(logits, label)
 
         one_hot_label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
-        # aucm_loss = self.aucm_loss(torch.sigmoid(logits), one_hot_label)
-        # total_loss = torch.mean(loss + aucm_loss)
-        Y = int(label)
-        # print(logits, label)
-        # loss = cross_entropy_torch(logits.squeeze(0), label)
-        # loss = self.asl(logits, label.squeeze())
-
-        #---->acc log
-        # print(label)
-        # Y_hat = int(Y_hat)
-        # if self.n_classes == 2:
-        #     Y = int(label[0][1])
-        # else: 
-        # Y = torch.argmax(label)
+        loss = self.loss(logits, one_hot_label.float())
+        if loss.ndim == 0:
+            loss = loss.unsqueeze(0)
+        # if self.n_classes > 2: 
+        #     aucm_loss = loss
         
-            # Y = int(label[0])
-        self.data[Y]["count"] += 1
-        self.data[Y]["correct"] += (int(Y_hat) == Y)
+
+        # total_loss = (aucm_loss + loss)/2
+        for y, y_hat in zip(label, Y_hat):
+            
+            y = int(y)
+            # print(Y_hat)
+            self.data[y]["count"] += 1
+            self.data[y]["correct"] += (int(y_hat) == y)
+
+
         # self.log('total_loss', total_loss, prog_bar=True, on_epoch=True, logger=True, batch_size=1, sync_dist=True)
-        # self.log('aucm_loss', aucm_loss, prog_bar=True, on_epoch=True, logger=True, batch_size=1, sync_dist=True)
-        self.log('lsce_loss', loss, prog_bar=True, on_epoch=True, logger=True, batch_size=1, sync_dist=True)
+        self.log('loss', loss, prog_bar=True, on_epoch=True, logger=True, batch_size=1, sync_dist=True)
+        # self.log('lsce_loss', loss, prog_bar=True, on_epoch=True, logger=True, batch_size=1, sync_dist=True)
 
         # if self.current_epoch % 10 == 0:
 
@@ -339,11 +339,23 @@ class ModelInterface(pl.LightningModule):
         return {'loss': loss, 'Y_prob': Y_prob, 'Y_hat': Y_hat, 'label': label} 
 
     def training_epoch_end(self, training_step_outputs):
-        # logits = torch.cat([x['logits'] for x in training_step_outputs], dim = 0)
+
+        # for t in training_step_outputs:
+        # probs = torch.cat([torch.cat(x[0]['Y_prob'], x[1]['Y_prob']) for x in training_step_outputs])
+        # max_probs = torch.stack([torch.stack(x[0]['Y_hat'], x[1]['Y_hat']) for x in training_step_outputs])
+        # target = torch.stack([torch.stack(x[0]['label'], x[1]['label']) for x in training_step_outputs])
+            # print(t)
+
         probs = torch.cat([x['Y_prob'] for x in training_step_outputs])
-        max_probs = torch.stack([x['Y_hat'] for x in training_step_outputs])
-        # target = torch.stack([x['label'] for x in training_step_outputs], dim = 0)
-        target = torch.stack([x['label'] for x in training_step_outputs])
+        max_probs = torch.cat([x['Y_hat'] for x in training_step_outputs])
+        # print(max_probs)
+        target = torch.cat([x['label'] for x in training_step_outputs], dim=0).int()
+
+        # logits = torch.cat([x['logits'] for x in training_step_outputs], dim = 0)
+        # probs = torch.cat([x['Y_prob'] for x in training_step_outputs])
+        # max_probs = torch.stack([x['Y_hat'] for x in training_step_outputs])
+        # # target = torch.stack([x['label'] for x in training_step_outputs], dim = 0)
+        # target = torch.stack([x['label'] for x in training_step_outputs])
         # target = torch.argmax(target, dim=1)
 
         if self.current_epoch % 5 == 0:
@@ -361,31 +373,49 @@ class ModelInterface(pl.LightningModule):
         # print('probs: ', probs)
         if self.current_epoch % 10 == 0:
             self.log_confusion_matrix(max_probs, target, stage='train')
+        if self.n_classes <=2:
+            out_probs = probs[:,1] 
+        else: out_probs = probs
 
-        self.log('Train/auc', self.AUROC(probs, target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        self.log('Train/auc', self.AUROC(out_probs, target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
 
     def validation_step(self, batch, batch_idx):
 
-        input, label, (wsi_name, batch_names, patient) = batch
+        input, label, (wsi_name, patient) = batch
         # label = label.float()
         
         logits, Y_prob, Y_hat = self.step(input) 
+        logits = logits.detach()
+        Y_prob = Y_prob.detach()
+        Y_hat = Y_hat.detach()
 
         #---->acc log
         # Y = int(label[0][1])
         # Y = torch.argmax(label)
-        loss = self.loss(logits, label)
+        loss = self.lsce_loss(logits, label)
+        # loss = cross_entropy_torch(logits, label)
+        # one_hot_label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
+        # print(logits)
+        # print(label)
+        # print(one_hot_label)
+        # aucm_loss = self.aucm_loss(logits, one_hot_label.float())
+        # if aucm_loss.ndim == 0:
+        #     aucm_loss = aucm_loss.unsqueeze(0)
+        # print(aucm_loss)
         # loss = self.loss(logits, label)
+        # total_loss = (aucm_loss + loss)/2
         # print(loss)
-        Y = int(label)
 
-        # print(Y_hat)
-        self.data[Y]["count"] += 1
-        self.data[Y]["correct"] += (int(Y_hat) == Y)
+        for y, y_hat in zip(label, Y_hat):
+            y = int(y)
+            # print(Y_hat)
+            self.data[y]["count"] += 1
+            self.data[y]["correct"] += (int(y_hat) == y)
         
         # self.data[Y]["correct"] += (Y_hat.item() == Y)
-
-        return {'logits' : logits, 'Y_prob' : Y_prob, 'Y_hat' : Y_hat, 'label' : label, 'name': wsi_name, 'patient': patient, 'loss':loss}
+        self.log('val_loss', loss, prog_bar=True, on_epoch=True, logger=True, batch_size=1, sync_dist=True)
+        # self.log('val_aucm_loss', aucm_loss, prog_bar=True, on_epoch=True, logger=True, batch_size=1, sync_dist=True)
+        return {'logits' : logits, 'Y_prob' : Y_prob, 'Y_hat' : Y_hat, 'label' : label.int(), 'name': wsi_name, 'patient': patient, 'loss':loss}
 
 
     def validation_epoch_end(self, val_step_outputs):
@@ -396,18 +426,23 @@ class ModelInterface(pl.LightningModule):
         
         logits = torch.cat([x['logits'] for x in val_step_outputs], dim = 0)
         probs = torch.cat([x['Y_prob'] for x in val_step_outputs])
-        max_probs = torch.stack([x['Y_hat'] for x in val_step_outputs])
-        target = torch.stack([x['label'] for x in val_step_outputs], dim=0).int()
+        max_probs = torch.cat([x['Y_hat'] for x in val_step_outputs])
+        # print(max_probs)
+        target = torch.cat([x['label'] for x in val_step_outputs])
         slide_names = [x['name'] for x in val_step_outputs]
         patients = [x['patient'] for x in val_step_outputs]
 
         loss = torch.stack([x['loss'] for x in val_step_outputs])
+        
+        # print(loss)
+        # print(loss.mean())
+        # print(loss.shape)
         # loss = torch.cat([x['loss'] for x in val_step_outputs])
         # print(loss.shape)
         
 
         # self.log('val_loss', cross_entropy_torch(logits.squeeze(), target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
-        self.log('val_loss', loss, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        # self.log('val_loss', loss.mean(), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
         
         # print(logits)
         # print(target)
@@ -415,8 +450,16 @@ class ModelInterface(pl.LightningModule):
                           on_epoch = True, logger = True, sync_dist=True)
         
 
+        if self.n_classes <=2:
+            out_probs = probs[:,1] 
+        else: out_probs = probs
+
+        bin_auroc = binary_auroc(out_probs, target.squeeze())
+        # print('val_bin_auroc: ', bin_auroc)
+
+        # print(target.unique())
         if len(target.unique()) != 1:
-            self.log('val_auc', self.AUROC(probs, target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+            self.log('val_auc', self.AUROC(out_probs, target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
             # self.log('val_patient_auc', self.AUROC(patient_score, patient_target), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
         else:    
             self.log('val_auc', 0.0, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
@@ -435,55 +478,77 @@ class ModelInterface(pl.LightningModule):
         patient_list = []            
         patient_score = []      
         patient_target = []
+        patient_class_score = 0
 
         for p, s, pr, t in zip(patients, slide_names, probs, target):
+            p = p[0]
+            # print(s[0])
+            # print(pr)
             if p not in complete_patient_dict.keys():
-                complete_patient_dict[p] = [(s, pr)]
+                complete_patient_dict[p] = {'scores':[(s[0], pr)], 'patient_score': 0}
+                # print((s,pr))
+                # complete_patient_dict[p]['scores'] = []
+                # print(t)
                 patient_target.append(t)
             else:
-                complete_patient_dict[p].append((s, pr))
+                complete_patient_dict[p]['scores'].append((s[0], pr))
 
-       
+        # print(complete_patient_dict)
 
         for p in complete_patient_dict.keys():
+            # complete_patient_dict[p] = 0
             score = []
-            for (slide, probs) in complete_patient_dict[p]:
-                # max_probs = torch.argmax(probs)
-                # if self.n_classes == 2:
-                #     score.append(max_probs)
-                # else: score.append(probs)
+            for (slide, probs) in complete_patient_dict[p]['scores']:
                 score.append(probs)
-
-            # if self.n_classes == 2:
-                # score =
-            score = torch.mean(torch.stack(score), dim=0) #.cpu().detach().numpy()
-            # complete_patient_dict[p]['score'] = score
-            # print(p, score)
-            # patient_list.append(p)    
-            patient_score.append(score)    
+            # print(score)
+            score = torch.stack(score)
+            # print(score)
+            if self.n_classes == 2:
+                positive_positions = (score.argmax(dim=1) == 1).nonzero().squeeze()
+                # print(positive_positions)
+                if positive_positions.numel() != 0:
+                    score = score[positive_positions]
+            if len(score.shape) > 1:
+                score = torch.mean(score, dim=0) #.cpu().detach().numpy()
+
+            patient_score.append(score)  
+            complete_patient_dict[p]['patient_score'] = score
+        correct_patients = []
+        false_patients = []
+
+        for patient, label in zip(complete_patient_dict.keys(), patient_target):
+            if label == 0:
+                p_score =  complete_patient_dict[patient]['patient_score']
+                # print(torch.argmax(patient_score))
+                if torch.argmax(p_score) == label:
+                    correct_patients.append(patient)
+                else: 
+                    false_patients.append(patient)
 
         patient_score = torch.stack(patient_score)
-        # print(patient_target)
-        # print(torch.cat(patient_target))
-        # print(self.AUROC(patient_score.squeeze(), torch.cat(patient_target)))
-
         
-        patient_target = torch.cat(patient_target)
-
+        if self.n_classes <=2:
+            patient_score = patient_score[:,1] 
+        patient_target = torch.stack(patient_target)
+        # print(patient_target)
+        # patient_target = torch.cat(patient_target)
+        # self.log_confusion_matrix(max_probs, target, stage='test', comment='patient')
         # print(patient_score.shape)
         # print(patient_target.shape)
-        
+        if len(patient_target.shape) >1:
+            patient_target = patient_target.squeeze()
+        self.log_roc_curve(patient_score, patient_target, stage='val')
+        # self.log_roc_curve(patient_score, patient_target.squeeze(), stage='test')
+
+        # if self.current_epoch < 20:
+        #     self.log('val_patient_auc', 0.0, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
         if len(patient_target.unique()) != 1:
-            self.log('val_patient_auc', self.AUROC(patient_score.squeeze(), patient_target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+            self.log('val_patient_auc', self.AUROC(patient_score, patient_target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
         else:    
             self.log('val_patient_auc', 0.0, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
         
-        self.log_dict(self.valid_patient_metrics(patient_score, patient_target),
+        self.log_dict(self.test_patient_metrics(patient_score, patient_target),
                           on_epoch = True, logger = True, sync_dist=True)
-        
-            
-
-        # precision, recall, thresholds = self.PRC(probs, target)
 
         
 
@@ -507,36 +572,51 @@ class ModelInterface(pl.LightningModule):
 
     def test_step(self, batch, batch_idx):
 
-        input, label, (wsi_name, batch_names, patient) = batch
-        label = label.float()
-        
+        input, label, (wsi_name, patient) = batch
+        # input, label, (wsi_name, batch_names, patient) = batch
+        # label = label.float()
+        # 
         logits, Y_prob, Y_hat = self.step(input) 
-
+        loss = self.lsce_loss(logits, label)
         #---->acc log
-        Y = int(label)
+        # Y = int(label)
+        for y, y_hat in zip(label, Y_hat):
+            
+            y = int(y)
+            # print(Y_hat)
+            self.data[y]["count"] += 1
+            self.data[y]["correct"] += (int(y_hat) == y)
+
         # Y = torch.argmax(label)
 
-        # print(Y_hat)
-        self.data[Y]["count"] += 1
-        self.data[Y]["correct"] += (int(Y_hat) == Y)
+        # # print(Y_hat)
+        # self.data[Y]["count"] += 1
+        # self.data[Y]["correct"] += (int(Y_hat) == Y)
         # self.data[Y]["correct"] += (Y_hat.item() == Y)
 
-        return {'logits' : logits, 'Y_prob' : Y_prob, 'Y_hat' : Y_hat, 'label' : label, 'name': wsi_name, 'patient': patient}
+        return {'logits' : logits, 'Y_prob' : Y_prob, 'Y_hat' : Y_hat, 'label' : label.int(), 'loss': loss, 'name': wsi_name, 'patient': patient}
 
     def test_epoch_end(self, output_results):
         logits = torch.cat([x['logits'] for x in output_results], dim = 0)
         probs = torch.cat([x['Y_prob'] for x in output_results])
-        max_probs = torch.stack([x['Y_hat'] for x in output_results])
-        target = torch.stack([x['label'] for x in output_results]).int()
+        # max_probs = torch.stack([x['Y_hat'] for x in output_results])
+        max_probs = torch.cat([x['Y_hat'] for x in output_results])
+        target = torch.cat([x['label'] for x in output_results])
         slide_names = [x['name'] for x in output_results]
         patients = [x['patient'] for x in output_results]
+        loss = torch.stack([x['loss'] for x in output_results])
         
         self.log_dict(self.test_metrics(max_probs.squeeze(), target.squeeze()),
                           on_epoch = True, logger = True, sync_dist=True)
-        self.log('test_loss', cross_entropy_torch(logits.squeeze(), target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        self.log('test_loss', loss.mean(), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+
+        if self.n_classes <=2:
+            out_probs = probs[:,1] 
+        else: out_probs = probs
+            # max_probs = max_probs[:,1]
 
         if len(target.unique()) != 1:
-            self.log('test_auc', self.AUROC(probs, target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+                self.log('test_auc', self.AUROC(out_probs, target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
             # self.log('val_patient_auc', self.AUROC(patient_score, patient_target), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
         else:    
             self.log('test_auc', 0.0, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
@@ -554,63 +634,91 @@ class ModelInterface(pl.LightningModule):
         patient_class_score = 0
 
         for p, s, pr, t in zip(patients, slide_names, probs, target):
+            p = p[0]
+            # print(s[0])
+            # print(pr)
             if p not in complete_patient_dict.keys():
-                complete_patient_dict[p] = [(s, pr)]
+                complete_patient_dict[p] = {'scores':[(s[0], pr)], 'patient_score': 0}
+                # print((s,pr))
+                # complete_patient_dict[p]['scores'] = []
+                # print(t)
                 patient_target.append(t)
             else:
-                complete_patient_dict[p].append((s, pr))
+                complete_patient_dict[p]['scores'].append((s[0], pr))
 
-       
+        # print(complete_patient_dict)
 
         for p in complete_patient_dict.keys():
+            # complete_patient_dict[p] = 0
             score = []
-            for (slide, probs) in complete_patient_dict[p]:
-                # if self.n_classes == 2:
-                #     if probs.argmax().item() == 1: # only if binary and if class 1 is more important!!! Normal vs Diseased or Rejection vs Other
-                #         score.append(probs)
-                    
-                # else: 
+            for (slide, probs) in complete_patient_dict[p]['scores']:
                 score.append(probs)
             # print(score)
             score = torch.stack(score)
             # print(score)
             if self.n_classes == 2:
                 positive_positions = (score.argmax(dim=1) == 1).nonzero().squeeze()
+                # print(positive_positions)
                 if positive_positions.numel() != 0:
                     score = score[positive_positions]
-            else:
-            # score = torch.stack(torch.score)
-            ## get scores that predict class 1:
-            # positive_scores = score.argmax(dim=1)
-            # score = torch.sum(score.argmax(dim=1))
-
-            # if score.item() == 1:
-            #     patient_class_score = 1
-                score = torch.mean(score) #.cpu().detach().numpy()
-            # complete_patient_dict[p]['score'] = score
-            # print(p, score)
-            # patient_list.append(p)    
-            patient_score.append(score)    
-
-        print(patient_score)
+            if len(score.shape) > 1:
+                score = torch.mean(score, dim=0) #.cpu().detach().numpy()
+
+            patient_score.append(score)  
+            complete_patient_dict[p]['patient_score'] = score
+        correct_patients = []
+        false_patients = []
+
+        for patient, label in zip(complete_patient_dict.keys(), patient_target):
+            if label == 0:
+                p_score =  complete_patient_dict[patient]['patient_score']
+                # print(torch.argmax(patient_score))
+                if torch.argmax(p_score) == label:
+                    correct_patients.append(patient)
+                else: 
+                    false_patients.append(patient)
+        # print('Label 0:')
+        # print('Correct Patients: ')
+        # print(correct_patients)
+        # print('False Patients: ')
+        # print(false_patients)
+
+        # print('True positive slides: ')
+        # for p in correct_patients: 
+        #     print(complete_patient_dict[p]['scores'])
+        
+        # print('False Negative Slides')
+        # for p in false_patients: 
+        #     print(complete_patient_dict[p]['scores'])
+        
+        
 
         patient_score = torch.stack(patient_score)
-        # patient_target = torch.stack(patient_target)
-        patient_target = torch.cat(patient_target)
+        
+        # complete_patient_dict[p]['patient_score'] = patient_score
+
+        # print(patient_score)
+        if self.n_classes <=2:
+            patient_score = patient_score[:,1] 
+        patient_target = torch.stack(patient_target)
+        # print(patient_target)
+        # patient_target = torch.cat(patient_target)
+        # self.log_confusion_matrix(max_probs, target, stage='test', comment='patient')
+        self.log_roc_curve(patient_score, patient_target.squeeze(), stage='test')
 
         
         if len(patient_target.unique()) != 1:
-            self.log('test_patient_auc', self.AUROC(patient_score.squeeze(), patient_target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+            self.log('test_patient_auc', self.AUROC(patient_score, patient_target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
         else:    
             self.log('test_patient_auc', 0.0, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
         
         self.log_dict(self.test_patient_metrics(patient_score, patient_target),
                           on_epoch = True, logger = True, sync_dist=True)
         
-            
-
-        # precision, recall, thresholds = self.PRC(probs, target)
-
+        
+        self.log_pr_curve(patient_score, patient_target.squeeze(), stage='test')
+        
+        
         
 
         #---->acc log
@@ -624,6 +732,10 @@ class ModelInterface(pl.LightningModule):
             print('test class {}: acc {}, correct {}/{}'.format(c, acc, correct, count))
         self.data = [{"count": 0, "correct": 0} for i in range(self.n_classes)]
         
+
+
+
+
         #---->random, if shuffle data, change seed
         if self.shuffle == True:
             self.count = self.count+1
@@ -631,12 +743,22 @@ class ModelInterface(pl.LightningModule):
 
     def configure_optimizers(self):
         # optimizer_ft = optim.Adam(self.model_ft.parameters(), lr=self.optimizer.lr*0.1)
-        optimizer = create_optimizer(self.optimizer, self.model)
-        # optimizer = PESG(self.model, loss_fn=self.aucm_loss, lr=self.optimizer.lr, margin=1.0, epoch_decay=2e-3, weight_decay=1e-5, device=self.device)
+        # optimizer = create_optimizer(self.optimizer, self.model)
+        if self.n_classes > 2:
+            # optimizer = PESG(self.model, loss_fn=self.aucm_loss, lr=self.optimizer.lr, margin=1.0, epoch_decay=2e-3, weight_decay=1e-5, device=self.device)
+            optimizer = create_optimizer(self.optimizer, self.model)
+        else:
+            # optimizer = PDSCA(self.model, loss_fn=self.loss, lr=0.005, margin=1.0, epoch_decay=2e-3, weight_decay=1e-4, beta0=0.9, beta1=0.9, device=self.device)
+            optimizer = create_optimizer(self.optimizer, self.model)
         # optimizer = PDSCA(self.model, loss_fn=self.loss, lr=self.optimizer.lr, margin=1.0, epoch_decay=2e-3, weight_decay=1e-5, device=self.device)
-        scheduler = {'scheduler': ReduceLROnPlateau(optimizer, mode='min', factor=0.5), 'monitor': 'val_loss', 'frequency': 5}
+        # scheduler = {'scheduler': CosineAnnearlingLR(optimizer, mode='min', factor=0.5), 'monitor': 'val_loss', 'frequency': 5}
+        scheduler = {'scheduler': ReduceLROnPlateau(optimizer, mode='min', factor=0.1), 'monitor': 'val_loss', 'frequency': 10}
+        # scheduler_aucm = {'scheduler': CosineAnnealingWarmRestarts(optimizer_aucm, T_0=20)}
         
+        # return [optimizer_adam, optimizer_aucm], [scheduler_adam, scheduler_aucm]     
         return [optimizer], [scheduler]     
+        # return optimizer_aucm
+        # return [optimizer], [scheduler]
 
     # def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
     #     optimizer.zero_grad(set_to_none=True)
@@ -690,6 +812,19 @@ class ModelInterface(pl.LightningModule):
 
         pass
 
+    def init_backbone(self):
+        self.backbone = 'retccl'
+        # import models.ResNet as ResNet
+        self.model_ft = ResNet.resnet50(num_classes=128, mlp=False, two_branch=False, normlinear=True)
+        home = Path.cwd().parts[1]
+        # pre_model = 
+        # self.model_ft.fc = nn.Identity()
+        # self.model_ft.load_from_checkpoint(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth', strict=False)
+        self.model_ft.load_state_dict(torch.load(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth'), strict=False)
+        for param in self.model_ft.parameters():
+            param.requires_grad = False
+        self.model_ft.fc = torch.nn.Identity()
+        self.model_ft.to(self.device)
 
     def instancialize(self, Model, **other_args):
         """ Instancialize a model using the corresponding parameters
@@ -728,31 +863,97 @@ class ModelInterface(pl.LightningModule):
 
         fig_.clf()
 
-    def log_roc_curve(self, probs, target, stage):
+    def log_roc_curve(self, probs, target, stage, comment=''):
 
         fpr_list, tpr_list, thresholds = self.ROC(probs, target)
 
-        plt.figure(1)
+        # self.AUROC(out_probs, target.squeeze())
+
+        fig, ax = plt.subplots(figsize=(6,6))
         if self.n_classes > 2:
+            auroc_score = multiclass_auroc(probs, target.squeeze(), num_classes=self.n_classes, average=None)
             for i in range(len(fpr_list)):
+                
                 fpr = fpr_list[i].cpu().numpy()
                 tpr = tpr_list[i].cpu().numpy()
-                plt.plot(fpr, tpr, label=f'class_{i}')
+                ax.plot(fpr, tpr, label=f'class_{i}, AUROC={auroc_score[i]}')
         else: 
-            print(fpr_list)
+            # print(fpr_list)
+            auroc_score = binary_auroc(probs, target.squeeze())
+
             fpr = fpr_list.cpu().numpy()
             tpr = tpr_list.cpu().numpy()
-            plt.plot(fpr, tpr)
+
+            # df = pd.DataFrame(data = {'fpr': fpr, 'tpr': tpr})
+            # line_plot = sns.lineplot(data=df, x='fpr', y='tpr', label=f'AUROC={auroc_score}', legend='full')
+            # sfig = line_plot.get_figure()
+
+            ax.plot(fpr, tpr, label=f'AUROC={auroc_score}')
+
+
+        ax.set_xlim([0,1])
+        ax.set_ylim([0,1])
+        ax.set_xlabel('False positive rate')
+        ax.set_ylabel('True positive rate')
+        ax.set_title('ROC curve')
+        ax.legend(loc='lower right')
+        # plt.savefig(f'{self.loggers[0].log_dir}/roc.jpg')
+
+        if stage == 'train':
+            self.loggers[0].experiment.add_figure(f'{stage}/ROC_{stage}', plt, self.current_epoch)
+        else:
+            plt.savefig(f'{self.loggers[0].log_dir}/roc_{stage}.jpg', dpi=400)
+            # line_plot.figure.savefig(f'{self.loggers[0].log_dir}/roc_{stage}_sb.jpg')
+
+    def log_pr_curve(self, probs, target, stage, comment=''):
+
+        # fpr_list, tpr_list, thresholds = self.ROC(probs, target)
+        # precision, recall, thresholds = torchmetrics.functional.classification.multiclass_precision_recall_curve(probs, target, num_classes=self.n_classes)
+        # print(precision)
+        # print(recall)
+
+        # baseline = len(target[target==1]) / len(target)
+
+        # plt.figure(1)
+        fig, ax = plt.subplots(figsize=(6,6))
+        
+        if self.n_classes > 2:
+
+            precision, recall, thresholds = multiclass_precision_recall_curve(probs, target, num_classes=self.n_classes)
+            
+            # print(precision)
+            # print(recall)
+            
+            for i in range(len(precision)):
+                pr = precision[i].cpu().numpy()
+                re = recall[i].cpu().numpy()
+                ax.plot(re, pr, label=f'class_{i}')
+                baseline = len(target[target==i]) / len(target)
+                ax.plot([0,1],[baseline, baseline], linestyle='--', label=f'Baseline_{i}')
+
+        else: 
+            # print(fpr_list)
+            precision, recall, thresholds = binary_precision_recall_curve(probs, target)
+            baseline = len(target[target==1]) / len(target)
+            pr = precision.cpu().numpy()
+            re = recall.cpu().numpy()
+            ax.plot(re, pr)
         
-        plt.xlabel('False positive rate')
-        plt.ylabel('True positive rate')
-        plt.title('ROC curve')
-        plt.savefig(f'{self.loggers[0].log_dir}/roc.jpg')
+            ax.plot([0,1], [baseline, baseline], linestyle='--', label='Baseline')
+
+        ax.set_xlim([0,1])
+        ax.set_ylim([0,1])
+
+        ax.set_xlabel('Recall')
+        ax.set_ylabel('Precision')
+        ax.set_title('PR curve')
+        ax.legend(loc='lower right')
+        # plt.savefig(f'{self.loggers[0].log_dir}/pr_{stage}.jpg')
 
         if stage == 'train':
-            self.loggers[0].experiment.add_figure(f'{stage}/ROC', plt, self.current_epoch)
+            self.loggers[0].experiment.add_figure(f'{stage}/PR_{stage}', fig, self.current_epoch)
         else:
-            plt.savefig(f'{self.loggers[0].log_dir}/roc.jpg', dpi=400)
+            fig.savefig(f'{self.loggers[0].log_dir}/pr_{stage}.jpg', dpi=400)
 
     
 
diff --git a/code/models/model_interface_classic.py b/code/models/model_interface_classic.py
new file mode 100644
index 0000000..d3eda00
--- /dev/null
+++ b/code/models/model_interface_classic.py
@@ -0,0 +1,757 @@
+import sys
+import numpy as np
+import re
+import inspect
+import importlib
+import random
+import pandas as pd
+import seaborn as sns
+from pathlib import Path
+from matplotlib import pyplot as plt
+import cv2
+from PIL import Image
+from pytorch_pretrained_vit import ViT
+
+#---->
+from MyOptimizer import create_optimizer
+from MyLoss import create_loss
+from utils.utils import cross_entropy_torch
+from utils.custom_resnet50 import resnet50_baseline
+
+from timm.loss import AsymmetricLossSingleLabel
+from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy, JsdCrossEntropy
+from libauc.losses import AUCMLoss, AUCM_MultiLabel, CompositionalAUCLoss
+from libauc.optimizers import PESG, PDSCA
+#---->
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torchmetrics
+from torchmetrics.functional import stat_scores
+from torch import optim as optim
+from torch.optim.lr_scheduler import ReduceLROnPlateau, CosineAnnealingWarmRestarts
+
+from monai.config import KeysCollection
+from monai.data import Dataset, load_decathlon_datalist
+from monai.data.wsi_reader import WSIReader
+from monai.metrics import Cumulative, CumulativeAverage
+from monai.networks.nets import milmodel
+
+# from sklearn.metrics import roc_curve, auc, roc_curve_score
+
+
+#---->
+import pytorch_lightning as pl
+from .vision_transformer import vit_small
+import torchvision
+from torchvision import models
+from torchvision.models import resnet
+from transformers import AutoFeatureExtractor, ViTModel, SwinModel
+
+from pytorch_grad_cam import GradCAM, EigenGradCAM
+from pytorch_grad_cam.utils.image import show_cam_on_image
+from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
+
+from captum.attr import LayerGradCam
+import models.ResNet as ResNet
+
+class FeatureExtractor(pl.LightningDataModule):
+    def __init__(self, model_name, n_classes):
+        self.n_classes = n_classes
+        
+        self.model_ft = ResNet.resnet50(num_classes=self.n_classes, mlp=False, two_branch=False, normlinear=True)
+        home = Path.cwd().parts[1]
+        self.model_ft.load_state_dict(torch.load(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth'), strict=False)
+        # self.model_ft.load_state_dict(torch.load(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth'), strict=False)
+        for param in self.model_ft.parameters():
+            param.requires_grad = False
+        self.model_ft.fc = nn.Linear(2048, self.out_features)
+
+    def forward(self,x):
+        return self.model_ft(x)
+
+class ModelInterface_Classic(pl.LightningModule):
+
+    #---->init
+    def __init__(self, model, loss, optimizer, **kargs):
+        super(ModelInterface_Classic, self).__init__()
+        self.save_hyperparameters()
+        self.n_classes = model.n_classes
+        
+        # if self.n_classes>2:
+        #     self.aucm_loss = AUCM_MultiLabel(num_classes = self.n_classes, device=self.device)
+        # else:
+        #     self.aucm_loss = CompositionalAUCLoss()
+        # self.asl = AsymmetricLossSingleLabel()
+        # self.loss = LabelSmoothingCrossEntropy(smoothing=0.1)
+        self.loss = create_loss(loss, model.n_classes)
+
+        # self.loss = 
+        # print(self.model)
+        self.model_name = model.name
+        
+        
+        # self.ecam = EigenGradCAM(model = self.model, target_layers = target_layers, use_cuda=True, reshape_transform=self.reshape_transform)
+        self.optimizer = optimizer
+        
+        self.save_path = kargs['log']
+        if Path(self.save_path).parts[3] == 'tcmr':
+            temp = list(Path(self.save_path).parts)
+            # print(temp)
+            temp[3] = 'tcmr_viral'
+            self.save_path = '/'.join(temp)
+
+        # if kargs['task']:
+        #     self.task = kargs['task']
+        self.task = Path(self.save_path).parts[3]
+
+
+        #---->acc
+        self.data = [{"count": 0, "correct": 0} for i in range(self.n_classes)]
+        self.data_patient = [{"count": 0, "correct": 0} for i in range(self.n_classes)]
+        # print(self.experiment)
+        #---->Metrics
+        if self.n_classes > 2: 
+            self.AUROC = torchmetrics.AUROC(task='multiclass', num_classes = self.n_classes, average='macro')
+            self.PRC = torchmetrics.PrecisionRecallCurve(task='multiclass', num_classes = self.n_classes)
+            self.ROC = torchmetrics.ROC(task='multiclass', num_classes=self.n_classes)
+            self.confusion_matrix = torchmetrics.ConfusionMatrix(task='multiclass', num_classes = self.n_classes) 
+            metrics = torchmetrics.MetricCollection([torchmetrics.Accuracy(task='multiclass', num_classes = self.n_classes,
+                                                                           average='weighted'),
+                                                     torchmetrics.CohenKappa(task='multiclass', num_classes = self.n_classes),
+                                                     torchmetrics.F1Score(task='multiclass', num_classes = self.n_classes,
+                                                                     average = 'macro'),
+                                                     torchmetrics.Recall(task='multiclass', average = 'macro',
+                                                                         num_classes = self.n_classes),
+                                                     torchmetrics.Precision(task='multiclass', average = 'macro',
+                                                                            num_classes = self.n_classes),
+                                                     torchmetrics.Specificity(task='multiclass', average = 'macro',
+                                                                            num_classes = self.n_classes)])
+                                                                            
+        else : 
+            self.AUROC = torchmetrics.AUROC(task='binary')
+            # self.AUROC = torchmetrics.AUROC(num_classes=self.n_classes, average = 'weighted')
+            self.PRC = torchmetrics.PrecisionRecallCurve(task='binary')
+            self.ROC = torchmetrics.ROC(task='binary')
+            metrics = torchmetrics.MetricCollection([torchmetrics.Accuracy(task='binary'),
+                                                     torchmetrics.CohenKappa(task='binary'),
+                                                     torchmetrics.F1Score(task='binary'),
+                                                     torchmetrics.Recall(task='binary'),
+                                                     torchmetrics.Precision(task='binary')
+                                                     ])
+            self.confusion_matrix = torchmetrics.ConfusionMatrix(task='binary')                                                                    
+        self.valid_metrics = metrics.clone(prefix = 'val_')
+        self.valid_patient_metrics = metrics.clone(prefix = 'val_patient_')
+        self.test_metrics = metrics.clone(prefix = 'test_')
+        self.test_patient_metrics = metrics.clone(prefix = 'test_patient')
+
+        #--->random
+        self.shuffle = kargs['data'].data_shuffle
+        self.count = 0
+        # self.model_name = kargs['backbone']
+
+
+        if self.model_name == 'features':
+            self.model = None
+        elif self.model_name == 'inception':
+            # self.model = torch.hub.load('pytorch/vision:v0.10.0', 'inception_v3', pretrained=True)
+            self.model = torch.hub.load('pytorch/vision:v0.10.0', 'inception_v3', weights='Inception_V3_Weights.DEFAULT')
+            self.model.aux_logits = False
+            ct = 0
+            for child in self.model.children():
+                ct += 1
+                if ct < 15:
+                    for parameter in child.parameters():
+                        parameter.requires_grad=False
+            # for parameter in self.model.parameters():
+                # parameter.requires_grad = False
+
+            
+            # self.model.AuxLogits.fc = nn.Linear(768, self.n_classes)
+            self.model.fc = nn.Linear(self.model.fc.in_features, self.n_classes)
+        elif self.model_name == 'resnet18':
+            self.model = models.resnet18(weights='IMAGENET1K_V1')
+            # modules = list(resnet18.children())[:-1]
+            # frozen_layers = 8
+            # for child in self.model.children():
+
+            ct = 0
+            for child in self.model.children():
+                ct += 1
+                if ct < 7:
+                    for parameter in child.parameters():
+                        parameter.requires_grad=False
+            self.model.fc = nn.Sequential(
+                nn.Linear(self.model.fc.in_features, self.n_classes),
+            )
+        elif self.model_name == 'retccl':
+            # import models.ResNet as ResNet
+            self.model = ResNet.resnet50(num_classes=self.n_classes, mlp=False, two_branch=False, normlinear=True)
+            home = Path.cwd().parts[1]
+            # pre_model = 
+            # self.model.fc = nn.Identity()
+            # self.model.load_from_checkpoint(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth', strict=False)
+            self.model.load_state_dict(torch.load(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth'), strict=False)
+            for param in self.model.parameters():
+                param.requires_grad = False
+            self.model.fc = nn.Sequential(
+                nn.Linear(2048, 1024),
+                nn.GELU(),
+                nn.LayerNorm(1024),
+                nn.Linear(1024, 512),
+                nn.GELU(),
+                nn.LayerNorm(512),
+                nn.Linear(512, self.n_classes)
+            )
+        elif self.model_name == 'vit':
+            self.model = ViT('B_32_imagenet1k', pretrained = True) #vis=vis
+            for param in self.model.parameters():
+                param.requires_grad = False
+            self.model.fc = nn.Linear(self.model.fc.in_features, self.n_classes)
+            # print(self.model)
+            # input_size = 384
+
+        elif self.model_name == 'resnet50':
+        
+            self.model = resnet50_baseline(pretrained=True)
+            ct = 0
+            for child in self.model.children():
+                ct += 1
+                if ct < len(list(self.model.children())) - 10:
+                    for parameter in child.parameters():
+                        parameter.requires_grad=False
+            self.model.fc = nn.Sequential(
+                nn.Linear(self.model.fc.in_features, self.n_classes),
+            )
+            
+        elif self.model_name == 'efficientnet':
+            efficientnet = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_efficientnet_widese_b0', pretrained=True)
+            for param in efficientnet.parameters():
+                param.requires_grad = False
+            # efn = list(efficientnet.children())[:-1]
+            efficientnet.classifier.fc = nn.Linear(1280, self.out_features)
+            self.model = nn.Sequential(
+                efficientnet,
+            )
+        elif self.model_name == 'simple': #mil-ab attention
+            feature_extracting = False
+            self.model = nn.Sequential(
+                nn.Conv2d(3, 20, kernel_size=5),
+                nn.ReLU(),
+                nn.MaxPool2d(2, stride=2),
+                nn.Conv2d(20, 50, kernel_size=5),
+                nn.ReLU(),
+                nn.MaxPool2d(2, stride=2),
+                View((-1, 53*53)),
+                nn.Linear(53*53, self.out_features),
+                nn.ReLU(),
+            )
+
+    # def __build_
+
+    def forward(self, x):
+        # print(x.shape)
+        x = x.squeeze(0)
+        # print(x.shape)
+        return self.model(x)
+
+    def step(self, input):
+
+        input = input.float()
+        # input = input
+        # logits, _ = self(input.contiguous()) 
+        logits = self(input.contiguous())
+        # logits = logits
+        # print(F.softmax(logits))
+        # print(torch.argmax(logits, dim=0))
+        Y_hat = torch.argmax(logits, dim=1)
+        Y_prob = F.softmax(logits, dim=1)
+        # Y_hat = torch.argmax(logits, dim=0).unsqueeze(0)
+        # Y_prob = F.softmax(logits, dim = 0)
+
+        # print(Y_hat)
+        # print(Y_prob)
+
+
+        # Y_hat = torch.argmax(logits, dim=1)
+        # Y_prob = F.softmax(logits, dim=1)
+        
+        return logits, Y_prob, Y_hat
+
+    def training_step(self, batch, batch_idx):
+
+        input, label, _= batch
+
+        # label_filled = torch.full([input.shape[1]], label.item(), device=self.device)
+
+        logits, Y_prob, Y_hat = self.step(input) 
+
+        loss = self.loss(logits, label)
+
+        
+        for y, y_hat in zip(label, Y_hat):    
+            y = int(y)
+            # print(Y_hat)
+            self.data[y]["count"] += 1
+            self.data[y]["correct"] += (int(y_hat) == y)
+
+        self.log('loss', loss, prog_bar=True, on_epoch=True, logger=True, batch_size=1, sync_dist=True)
+
+        return {'loss': loss, 'Y_prob': Y_prob, 'Y_hat': Y_hat, 'label': label} 
+
+    def training_epoch_end(self, training_step_outputs):
+
+        # logits = torch.cat([x['logits'] for x in training_step_outputs], dim = 0)
+        probs = torch.cat([x['Y_prob'] for x in training_step_outputs])
+        max_probs = torch.cat([x['Y_hat'] for x in training_step_outputs])
+        target = torch.cat([x['label'] for x in training_step_outputs])
+
+        # probs = torch.cat([x['Y_prob'] for x in training_step_outputs])
+        # probs = torch.stack([x['Y_prob'] for x in training_step_outputs], dim=0)
+        # max_probs = torch.stack([x['Y_hat'] for x in training_step_outputs])
+        # target = torch.stack([x['label'] for x in training_step_outputs], dim=0).int()
+
+        if self.current_epoch % 5 == 0:
+            for c in range(self.n_classes):
+                count = self.data[c]["count"]
+                correct = self.data[c]["correct"]
+                if count == 0: 
+                    acc = None
+                else:
+                    acc = float(correct) / count
+                print('class {}: acc {}, correct {}/{}'.format(c, acc, correct, count))
+        self.data = [{"count": 0, "correct": 0} for i in range(self.n_classes)]
+
+        if self.current_epoch % 10 == 0:
+            self.log_confusion_matrix(max_probs, target.squeeze(), stage='train')
+
+        # print(probs)
+        # print(target)
+        # print(probs.shape)
+        # print(target.shape)
+        if self.n_classes <=2:
+            out_probs = probs[:,1] 
+        self.log('Train/auc', self.AUROC(out_probs, target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+
+    def validation_step(self, batch, batch_idx):
+
+        input, label, (wsi_name, tile_name, patient) = batch
+        # label_filled = torch.full([input.shape[1]], label.item(), device=self.device)
+        
+        logits, Y_prob, Y_hat = self.step(input) 
+        logits = logits.detach()
+        Y_prob = Y_prob.detach()
+        Y_hat = Y_hat.detach()
+
+        # loss = self.loss(logits, label)
+        loss = cross_entropy_torch(logits, label)
+
+        for y, y_hat in zip(label, Y_hat):    
+            y = int(y)
+            # print(Y_hat)
+            self.data[y]["count"] += 1
+            self.data[y]["correct"] += (int(y_hat) == y)
+        
+        # self.data[Y]["correct"] += (Y_hat.item() == Y)
+        self.log('val_loss', loss, prog_bar=True, on_epoch=True, logger=True, batch_size=1, sync_dist=True)
+        # print(Y_hat)
+        # print(label)
+        # self.log('val_aucm_loss', aucm_loss, prog_bar=True, on_epoch=True, logger=True, batch_size=1, sync_dist=True)
+        return {'logits' : logits, 'Y_prob' : Y_prob, 'Y_hat' : Y_hat, 'label' : label, 'name': wsi_name, 'patient': patient, 'tile_name': tile_name, 'loss': loss}
+
+
+    def validation_epoch_end(self, val_step_outputs):
+        
+        logits = torch.cat([x['logits'] for x in val_step_outputs], dim = 0)
+        probs = torch.cat([x['Y_prob'] for x in val_step_outputs])
+        max_probs = torch.cat([x['Y_hat'] for x in val_step_outputs])
+        target = torch.cat([x['label'] for x in val_step_outputs])
+        # slide_names = [list(x['name']) for x in val_step_outputs]
+        slide_names = []
+        for x in val_step_outputs:
+            slide_names += list(x['name'])
+        # patients = [list(x['patient']) for x in val_step_outputs]
+        patients = []
+        for x in val_step_outputs:
+            patients += list(x['patient'])
+        tile_name = []
+        for x in val_step_outputs:
+            tile_name += list(x['tile_name'])
+
+        loss = torch.stack([x['loss'] for x in val_step_outputs])
+
+        self.log_dict(self.valid_metrics(max_probs.squeeze(), target.squeeze()),
+                          on_epoch = True, logger = True, sync_dist=True)
+
+        if self.n_classes <=2:
+            out_probs = probs[:,1] 
+        if len(target.unique()) != 1:
+            self.log('val_auc', self.AUROC(out_probs, target).squeeze(), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+            # self.log('val_patient_auc', self.AUROC(patient_score, patient_target), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        else:    
+            self.log('val_auc', 0.0, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+
+
+
+        self.log_confusion_matrix(max_probs, target.squeeze(), stage='val')
+
+        #----> log per patient metrics
+        complete_patient_dict = {}
+        patient_list = []            
+        patient_score = []      
+        patient_target = []
+
+        for p, s, pr, t in zip(patients, slide_names, probs, target):
+
+            if p not in complete_patient_dict.keys():
+                complete_patient_dict[p] = {s:[]}
+                patient_target.append(t)
+                
+            elif s not in complete_patient_dict[p].keys():
+                complete_patient_dict[p][s] = []
+            complete_patient_dict[p][s].append(pr)
+            
+
+        for p in complete_patient_dict.keys():
+            score = []
+            for slide in complete_patient_dict[p].keys():
+
+                slide_score = torch.stack(complete_patient_dict[p][slide])
+                if self.n_classes == 2:
+                    positive_positions = (slide_score.argmax(dim=1) == 1).nonzero().squeeze()
+                    if positive_positions.numel() != 0:
+                        slide_score = slide_score[positive_positions]
+                if len(slide_score.shape)>1:
+                    slide_score = torch.mean(slide_score, dim=0)
+
+                score.append(slide_score)
+            score = torch.stack(score)
+            if self.n_classes == 2:
+                positive_positions = (score.argmax(dim=1) == 1).nonzero().squeeze()
+                if positive_positions.numel() != 0:
+                    score = score[positive_positions]
+            if len(score.shape) > 1:
+                score = torch.mean(score, dim=0)
+            patient_score.append(score)    
+
+        patient_score = torch.stack(patient_score)
+        patient_target = torch.stack(patient_target)
+        if self.n_classes <=2:
+            patient_score = patient_score[:,1]
+        if len(patient_target.unique()) != 1:
+            self.log('val_patient_auc', self.AUROC(patient_score, patient_target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        else:    
+            self.log('val_patient_auc', 0.0, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        
+        self.log_dict(self.valid_patient_metrics(patient_score, patient_target),
+                          on_epoch = True, logger = True, sync_dist=True)
+        
+            
+
+        # precision, recall, thresholds = self.PRC(probs, target)
+
+        
+
+        #---->acc log
+        for c in range(self.n_classes):
+            count = self.data[c]["count"]
+            correct = self.data[c]["correct"]
+            if count == 0: 
+                acc = None
+            else:
+                acc = float(correct) / count
+            print('val class {}: acc {}, correct {}/{}'.format(c, acc, correct, count))
+        self.data = [{"count": 0, "correct": 0} for i in range(self.n_classes)]
+        
+        #---->random, if shuffle data, change seed
+        # if self.shuffle == True:
+        #     self.count = self.count+1
+        #     random.seed(self.count*50)
+
+
+
+    def test_step(self, batch, batch_idx):
+
+        input, label, (wsi_name, batch_names, patient) = batch
+        label = label.float()
+        
+        logits, Y_prob, Y_hat = self.step(input) 
+
+        #---->acc log
+        Y = int(label)
+        # Y = torch.argmax(label)
+
+        # print(Y_hat)
+        self.data[Y]["count"] += 1
+        self.data[Y]["correct"] += (int(Y_hat) == Y)
+        # self.data[Y]["correct"] += (Y_hat.item() == Y)
+
+        return {'logits' : logits, 'Y_prob' : Y_prob, 'Y_hat' : Y_hat, 'label' : label, 'name': wsi_name, 'patient': patient}
+
+    def test_epoch_end(self, output_results):
+
+        logits = torch.cat([x['logits'] for x in output_results], dim = 0)
+        probs = torch.cat([x['Y_prob'] for x in output_results])
+        max_probs = torch.cat([x['Y_hat'] for x in output_results])
+        target = torch.cat([x['label'] for x in output_results])
+        slide_names = []
+        for x in output_results:
+            slide_names += list(x['name'])
+        patients = []
+        for x in output_results:
+            patients += list(x['patient'])
+        tile_name = []
+        for x in output_results:
+            tile_name += list(x['tile_name'])
+
+
+
+        # logits = torch.cat([x['logits'] for x in output_results], dim = 0)
+        # probs = torch.cat([x['Y_prob'] for x in output_results])
+        # max_probs = torch.stack([x['Y_hat'] for x in output_results])
+        # target = torch.stack([x['label'] for x in output_results]).int()
+        # slide_names = [x['name'] for x in output_results]
+        # patients = [x['patient'] for x in output_results]
+        
+        self.log_dict(self.test_metrics(max_probs.squeeze(), target.squeeze()),
+                          on_epoch = True, logger = True, sync_dist=True)
+        self.log('test_loss', cross_entropy_torch(logits.squeeze(), target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+
+        # if self.n_classes <=2:
+        #     out_probs = probs[:,1] 
+            # max_probs = max_probs[:,1]
+
+        if self.n_classes <=2:
+            out_probs = probs[:,1] 
+        if len(target.unique()) != 1:
+            
+                self.log('test_auc', self.AUROC(out_probs, target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+            # self.log('val_patient_auc', self.AUROC(patient_score, patient_target), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        else:    
+            self.log('test_auc', 0.0, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+
+
+
+        #----> log confusion matrix
+        self.log_confusion_matrix(max_probs.squeeze(), target.squeeze(), stage='test')
+
+        #----> log per patient metrics
+        complete_patient_dict = {}
+        patient_list = []            
+        patient_score = []      
+        patient_target = []
+
+        for p, s, pr, t in zip(patients, slide_names, probs, target):
+
+            if p not in complete_patient_dict.keys():
+                complete_patient_dict[p] = {s:[]}
+                patient_target.append(t)
+                
+            elif s not in complete_patient_dict[p].keys():
+                complete_patient_dict[p][s] = []
+            complete_patient_dict[p][s].append(pr)
+            
+
+        for p in complete_patient_dict.keys():
+            score = []
+            for slide in complete_patient_dict[p].keys():
+
+                slide_score = torch.stack(complete_patient_dict[p][slide])
+                if self.n_classes == 2:
+                    positive_positions = (slide_score.argmax(dim=1) == 1).nonzero().squeeze()
+                    if positive_positions.numel() != 0:
+                        slide_score = slide_score[positive_positions]
+                if len(slide_score.shape)>1:
+                    slide_score = torch.mean(slide_score, dim=0)
+
+                score.append(slide_score)
+            score = torch.stack(score)
+            if self.n_classes == 2:
+                positive_positions = (score.argmax(dim=1) == 1).nonzero().squeeze()
+                if positive_positions.numel() != 0:
+                    score = score[positive_positions]
+            if len(score.shape) > 1:
+                score = torch.mean(score, dim=0)
+            patient_score.append(score)    
+
+        patient_score = torch.stack(patient_score)
+        patient_target = torch.stack(patient_target)
+        if self.n_classes <=2:
+            patient_score = patient_score[:,1]
+        if len(patient_target.unique()) != 1:
+            self.log('test_patient_auc', self.AUROC(patient_score, patient_target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        else:    
+            self.log('test_patient_auc', 0.0, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        
+        self.log_dict(self.valid_patient_metrics(patient_score, patient_target),
+                          on_epoch = True, logger = True, sync_dist=True)
+        
+            
+
+        # precision, recall, thresholds = self.PRC(probs, target)
+
+        
+
+        #---->acc log
+        for c in range(self.n_classes):
+            count = self.data[c]["count"]
+            correct = self.data[c]["correct"]
+            if count == 0: 
+                acc = None
+            else:
+                acc = float(correct) / count
+            print('test class {}: acc {}, correct {}/{}'.format(c, acc, correct, count))
+        self.data = [{"count": 0, "correct": 0} for i in range(self.n_classes)]
+        
+
+        #---->random, if shuffle data, change seed
+        if self.shuffle == True:
+            self.count = self.count+1
+            random.seed(self.count*50)
+
+    def configure_optimizers(self):
+        # optimizer_ft = optim.Adam(self.model_ft.parameters(), lr=self.optimizer.lr*0.1)
+        optimizer = create_optimizer(self.optimizer, self.model)
+        # optimizer_aucm = PESG(self.model, loss_fn=self.aucm_loss, lr=self.optimizer.lr, margin=1.0, epoch_decay=2e-3, weight_decay=1e-5, device=self.device)
+        # optimizer_aucm = PDSCA(self.model, loss_fn=self.aucm_loss, lr=0.005, margin=1.0, epoch_decay=2e-3, weight_decay=1e-4, beta0=0.9, beta1=0.9, device=self.device)
+        # optimizer = PDSCA(self.model, loss_fn=self.loss, lr=self.optimizer.lr, margin=1.0, epoch_decay=2e-3, weight_decay=1e-5, device=self.device)
+        # scheduler = {'scheduler': CosineAnnearlingLR(optimizer, mode='min', factor=0.5), 'monitor': 'val_loss', 'frequency': 5}
+        scheduler = {'scheduler': ReduceLROnPlateau(optimizer, mode='min', factor=0.1), 'monitor': 'val_loss', 'frequency': 5}
+        # scheduler_aucm = {'scheduler': CosineAnnealingWarmRestarts(optimizer_aucm, T_0=20)}
+        
+        # return [optimizer_adam, optimizer_aucm], [scheduler_adam, scheduler_aucm]     
+        # return [optimizer_aucm], [scheduler_aucm]     
+        return [optimizer], [scheduler]
+
+    # def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
+    #     optimizer.zero_grad(set_to_none=True)
+
+    def reshape_transform(self, tensor):
+        # print(tensor.shape)
+        H = tensor.shape[1]
+        _H, _W = int(np.ceil(np.sqrt(H))), int(np.ceil(np.sqrt(H)))
+        add_length = _H * _W - H
+        tensor = torch.cat([tensor, tensor[:,:add_length,:]],dim = 1)
+        result = tensor[:, :, :].reshape(tensor.size(0), _H, _W, tensor.size(2))
+        result = result.transpose(2,3).transpose(1,2)
+        # print(result.shape)
+        return result
+
+    def load_model(self):
+        name = self.hparams.model.name
+        # Change the `trans_unet.py` file name to `TransUnet` class name.
+        # Please always name your model file name as `trans_unet.py` and
+        # class name or funciton name corresponding `TransUnet`.
+        if name == 'ViT':
+            self.model = ViT
+
+        if '_' in name:
+            camel_name = ''.join([i.capitalize() for i in name.split('_')])
+        else:
+            camel_name = name
+        try:
+                
+            Model = getattr(importlib.import_module(
+                f'models.{name}'), camel_name)
+        except:
+            raise ValueError('Invalid Module File Name or Invalid Class Name!')
+        self.model = self.instancialize(Model)
+
+        # if backbone == 'retccl':
+
+        #     self.model_ft = ResNet.resnet50(num_classes=self.n_classes, mlp=False, two_branch=False, normlinear=True)
+        #     home = Path.cwd().parts[1]
+        #     # self.model_ft.fc = nn.Identity()
+        #     # self.model_ft.load_from_checkpoint(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth', strict=False)
+        #     self.model_ft.load_state_dict(torch.load(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth'), strict=False)
+        #     for param in self.model_ft.parameters():
+        #         param.requires_grad = False
+        #     self.model_ft.fc = nn.Linear(2048, self.out_features)
+        
+        # elif backbone == 'resnet50':
+        #     self.model_ft = resnet50_baseline(pretrained=True)
+        #     for param in self.model_ft.parameters():
+        #         param.requires_grad = False
+
+        pass
+
+
+    def instancialize(self, Model, **other_args):
+        """ Instancialize a model using the corresponding parameters
+            from self.hparams dictionary. You can also input any args
+            to overwrite the corresponding value in self.hparams.
+        """
+        class_args = inspect.getargspec(Model.__init__).args[1:]
+        inkeys = self.hparams.model.keys()
+        args1 = {}
+        for arg in class_args:
+            if arg in inkeys:
+                args1[arg] = getattr(self.hparams.model, arg)
+        args1.update(other_args)
+
+
+        return Model(**args1)
+
+    def log_image(self, tensor, stage, name):
+        
+        tile = tile.cpu().numpy().transpose(1,2,0)
+        tile = (tile - tile.min())/ (tile.max() - tile.min()) * 255
+        tile = tile.astype(np.uint8)
+        img = Image.fromarray(tile)
+        self.loggers[0].experiment.add_figure(f'{stage}/{name}', img, self.current_epoch)
+
+
+    def log_confusion_matrix(self, max_probs, target, stage):
+        confmat = self.confusion_matrix(max_probs, target)
+        print(confmat)
+        df_cm = pd.DataFrame(confmat.cpu().numpy(), index=range(self.n_classes), columns=range(self.n_classes))
+        fig_ = sns.heatmap(df_cm, annot=True, fmt='d', cmap='Spectral').get_figure()
+        if stage == 'train':
+            self.loggers[0].experiment.add_figure(f'{stage}/Confusion matrix', fig_, self.current_epoch)
+        else:
+            fig_.savefig(f'{self.loggers[0].log_dir}/cm_{stage}.png', dpi=400)
+
+        fig_.clf()
+
+    def log_roc_curve(self, probs, target, stage, comment=''):
+
+        fpr_list, tpr_list, thresholds = self.ROC(probs, target)
+        # print(fpr_list)
+        # print(tpr_list)
+
+        plt.figure(1)
+        if self.n_classes > 2:
+            for i in range(len(fpr_list)):
+                fpr = fpr_list[i].cpu().numpy()
+                tpr = tpr_list[i].cpu().numpy()
+                plt.plot(fpr, tpr, label=f'class_{i}')
+        else: 
+            # print(fpr_list)
+            fpr = fpr_list[0].cpu().numpy()
+            tpr = tpr_list[0].cpu().numpy()
+            plt.plot(fpr, tpr)
+        
+        plt.xlabel('False positive rate')
+        plt.ylabel('True positive rate')
+        plt.title('ROC curve')
+        plt.savefig(f'{self.loggers[0].log_dir}/roc.jpg')
+
+        if stage == 'train':
+            self.loggers[0].experiment.add_figure(f'{stage}/ROC_{comment}', plt, self.current_epoch)
+        else:
+            plt.savefig(f'{self.loggers[0].log_dir}/roc_{comment}.jpg', dpi=400)
+
+    
+
+class View(nn.Module):
+    def __init__(self, shape):
+        super().__init__()
+        self.shape = shape
+
+    def forward(self, input):
+        '''
+        Reshapes the input according to the shape saved in the view data structure.
+        '''
+        out = input.view(*self.shape)
+        return out
+
diff --git a/code/test_visualize.py b/code/test_visualize.py
index 79f814c..623066e 100644
--- a/code/test_visualize.py
+++ b/code/test_visualize.py
@@ -51,26 +51,63 @@ class custom_test_module(ModelInterface):
     # self.task = kargs['task']    
     # self.task = 'tcmr_viral'
 
+    # def forward(self, x):
+    #     batch_size = x.shape[0]
+    #     bag_size = x.shape[1]
+    #     x = x.view(batch_size*bag_size, x.shape[2], x.shape[3], x.shape[4])
+    #     feats = self.model_ft(x).unsqueeze(0)
+    #     feats = feats.view(batch_size, bag_size, -1)
+    #     return self.model(feats)
+
+
+
+
     def test_step(self, batch, batch_idx):
 
-        torch.set_grad_enabled(True)
+        print('custom: ', self.backbone)
+        print(self.model_ft.device)
+        
 
-        input_data, label, (wsi_name, batch_names, patient) = batch
-        patient = patient[0]
-        wsi_name = wsi_name[0]
-        label = label.float()
-        # logits, Y_prob, Y_hat = self.step(data) 
-        # print(data.shape)
-        input_data = input_data.squeeze(0).float()
-        # print(self.model_ft)
-        # print(self.model)
-        logits, _ = self(input_data)
-        # attn = attn.detach()
-        # logits = logits.detach()
+        torch.set_grad_enabled(True)
 
-        Y = torch.argmax(label)
+        input, label, (wsi_name, patient) = batch
+        
+        print(input.device)
+        # input, label, (wsi_name, batch_names, patient) = batch
+        # label = label.float()
+        # 
+        # feature extraction
+        x = input
+        batch_size = x.shape[0]
+        bag_size = x.shape[1]
+        # new_shape = (batch_size*bag_size, x.shape[2], x.shape[3], x.shape[4])
+        # x = x.view(new_shape)
+        x = x.view(batch_size*bag_size, x.shape[2], x.shape[3], x.shape[4])
+        data_ft = self.model_ft(x).unsqueeze(0)
+        data_ft = data_ft.view(batch_size, bag_size, -1)
+
+        logits = self.model(data_ft) 
         Y_hat = torch.argmax(logits, dim=1)
-        Y_prob = F.softmax(logits, dim=1)
+        Y_prob = F.softmax(logits, dim = 1)
+        # logits, Y_prob, Y_hat = self.model(data_ft) 
+        loss = self.loss(logits, label)
+
+        # input_data, label, (wsi_name, batch_names, patient) = batch
+        # patient = patient[0]
+        # wsi_name = wsi_name[0]
+        # label = label.float()
+        # # logits, Y_prob, Y_hat = self.step(data) 
+        # # print(data.shape)
+        # input_data = input_data.squeeze(0).float()
+        # # print(self.model_ft)
+        # # print(self.model)
+        # logits, _ = self(input_data)
+        # # attn = attn.detach()
+        # # logits = logits.detach()
+
+        # Y = torch.argmax(label)
+        # Y_hat = torch.argmax(logits, dim=1)
+        # Y_prob = F.softmax(logits, dim=1)
 
         
 
@@ -92,13 +129,13 @@ class custom_test_module(ModelInterface):
             target_layers = [self.model.attention_weights]
             self.cam = GradCAM(model = self.model, target_layers = target_layers, use_cuda=True)
 
-        if self.model_ft:
-            data_ft = self.model_ft(input_data).unsqueeze(0).float()
-        else:
-            data_ft = input_data.unsqueeze(0).float()
-        instance_count = input_data.size(0)
+        # if self.model_ft:
+        #     data_ft = self.model_ft(input_data).unsqueeze(0).float()
+        # else:
+        #     data_ft = input_data.unsqueeze(0).float()
+        instance_count = input.size(0)
         # data_ft.requires_grad=True
-        
+        Y = torch.argmax(label)
         target = [ClassifierOutputTarget(Y)]
         # print(target)
         
@@ -111,13 +148,13 @@ class custom_test_module(ModelInterface):
         k = 10
         summed = torch.mean(grayscale_cam, dim=2)
         topk_tiles, topk_indices = torch.topk(summed.squeeze(0), k, dim=0)
-        topk_data = input_data[topk_indices].detach()
+        topk_data = input[topk_indices].detach()
         # print(topk_tiles)
         
         #----------------------------------------------------
         # Log Correct/Count
         #----------------------------------------------------
-        Y = torch.argmax(label)
+        # Y = torch.argmax(label)
         self.data[Y]["count"] += 1
         self.data[Y]["correct"] += (Y_hat.item() == Y)
 
@@ -143,58 +180,84 @@ class custom_test_module(ModelInterface):
 
         logits = torch.cat([x['logits'] for x in output_results], dim = 0)
         probs = torch.cat([x['Y_prob'] for x in output_results])
-        max_probs = torch.stack([x['Y_hat'] for x in output_results])
-        # target = torch.stack([x['label'] for x in output_results], dim = 0)
-        target = torch.stack([x['label'] for x in output_results])
-        # target = torch.argmax(target, dim=1)
-        slide = [x['name'] for x in output_results]
+        # max_probs = torch.stack([x['Y_hat'] for x in output_results])
+        max_probs = torch.cat([x['Y_hat'] for x in output_results])
+        target = torch.cat([x['label'] for x in output_results])
+        slide_names = [x['name'] for x in output_results]
         patients = [x['patient'] for x in output_results]
-        topk_tiles = [x['topk_data'] for x in output_results]
+        loss = torch.stack([x['loss'] for x in output_results])
         #---->
 
-        if len(target.unique()) !=1:
-            auc = self.AUROC(probs, target)
-        else: auc = torch.tensor(0)
-        metrics = self.test_metrics(logits , target)
+        self.log_dict(self.test_metrics(max_probs.squeeze(), target.squeeze()),
+                          on_epoch = True, logger = True, sync_dist=True)
+        self.log('test_loss', loss.mean(), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
 
+        if self.n_classes <=2:
+            out_probs = probs[:,1] 
+        else: out_probs = probs
+            # max_probs = max_probs[:,1]
 
-        # metrics = self.test_metrics(max_probs.squeeze() , torch.argmax(target.squeeze(), dim=1))
-        metrics['test_auc'] = auc
+        if len(target.unique()) != 1:
+                self.log('test_auc', self.AUROC(out_probs, target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+            # self.log('val_patient_auc', self.AUROC(patient_score, patient_target), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        else:    
+            self.log('test_auc', 0.0, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
 
-        # print(metrics)
-        np_metrics = {k: metrics[k].item() for k in metrics.keys()}
-        # print(np_metrics)
 
-        
+
+        #----> log confusion matrix
+        self.log_confusion_matrix(max_probs, target, stage='test')
+
+        #----> log per patient metrics
         complete_patient_dict = {}
-        '''
-        Patient
-        -> slides:
-            -> SlideName:
-                ->probs = [0.5, 0.5] 
-                ->topk = [10,3,224,224]
-        -> score = []
-        '''
+        patient_list = []            
+        patient_score = []      
+        patient_target = []
+        patient_class_score = 0
 
 
-        for p, s, l, topkt in zip(patients, slide, probs, topk_tiles):
+        for p, s, pr, t in zip(patients, slide_names, probs, target):
+            p = p[0]
+            # print(s[0])
+            # print(pr)
             if p not in complete_patient_dict.keys():
-                complete_patient_dict[p] = {'slides':{}}
-            complete_patient_dict[p]['slides'][s] = {'probs': l, 'topk':topkt}
+                complete_patient_dict[p] = {'scores':[(s[0], pr)], 'patient_score': 0}
+                # print((s,pr))
+                # complete_patient_dict[p]['scores'] = []
+                # print(t)
+                patient_target.append(t)
+            else:
+                complete_patient_dict[p]['scores'].append((s[0], pr))
 
-        patient_list = []            
-        patient_score = []            
         for p in complete_patient_dict.keys():
+            # complete_patient_dict[p] = 0
             score = []
-            
-            for s in complete_patient_dict[p]['slides'].keys():
-                score.append(complete_patient_dict[p]['slides'][s]['probs'])
-            score = torch.mean(torch.stack(score), dim=0) #.cpu().detach().numpy()
-            complete_patient_dict[p]['score'] = score
-            # print(p, score)
-            patient_list.append(p)    
-            patient_score.append(score)    
-
+            for (slide, probs) in complete_patient_dict[p]['scores']:
+                score.append(probs)
+            # print(score)
+            score = torch.stack(score)
+            # print(score)
+            if self.n_classes == 2:
+                positive_positions = (score.argmax(dim=1) == 1).nonzero().squeeze()
+                # print(positive_positions)
+                if positive_positions.numel() != 0:
+                    score = score[positive_positions]
+            if len(score.shape) > 1:
+                score = torch.mean(score, dim=0) #.cpu().detach().numpy()
+
+            patient_score.append(score)  
+            complete_patient_dict[p]['patient_score'] = score
+        correct_patients = []
+        false_patients = []
+
+        for patient, label in zip(complete_patient_dict.keys(), patient_target):
+            if label == 0:
+                p_score =  complete_patient_dict[patient]['patient_score']
+                # print(torch.argmax(patient_score))
+                if torch.argmax(p_score) == label:
+                    correct_patients.append(patient)
+                else: 
+                    false_patients.append(patient)
         # print(patient_list)
         #topk patients: 
 
@@ -231,7 +294,6 @@ class custom_test_module(ModelInterface):
 
             patient_top_slides = {} 
             for p in topk_patients:
-                # print(p)
                 output_dict[class_name][p] = {}
                 output_dict[class_name][p]['Patient_Score'] = complete_patient_dict[p]['score'].cpu().detach().numpy().tolist()
 
@@ -303,7 +365,7 @@ class custom_test_module(ModelInterface):
         #     return coords
 
         home = Path.cwd().parts[1]
-        jpg_dir = f'/{home}/ylan/data/DeepGraft/224_128um_annotated/Aachen_Biopsy_Slides/BLOCKS'
+        jpg_dir = f'/{home}/ylan/data/DeepGraft/224_256uM_annotated/Aachen_Biopsy_Slides/BLOCKS'
 
         coords = batch_names.squeeze()
         data = []
@@ -477,23 +539,37 @@ def main(cfg):
     # cfg.Data.label_file = '/home/ylan/DeepGraft/training_tables/dg_limit_20_split_PAS_HE_Jones_norm_rest.json'
     # cfg.Data.patient_slide = '/homeStor1/ylan/DeepGraft/training_tables/cohort_stain_dict.json'
     # cfg.Data.data_dir = '/homeStor1/ylan/data/DeepGraft/224_128um_v2/'
+    train_classic = False
+    if cfg.Model.name in ['inception', 'resnet18', 'resnet50', 'vit']:
+        train_classic = True
+        use_features = False
     if cfg.Model.backbone == 'features':
-        use_features = True
-    else: use_features = False
+        use_features = False
+        cfg.Model.backbone = 'retccl'
+    else: 
+        use_features = False
+
+    print(cfg.Model.backbone)
+    # use_features = False
+
     DataInterface_dict = {
                 'data_root': cfg.Data.data_dir,
                 'label_path': cfg.Data.label_file,
                 'batch_size': cfg.Data.train_dataloader.batch_size,
                 'num_workers': cfg.Data.train_dataloader.num_workers,
                 'n_classes': cfg.Model.n_classes,
-                'backbone': cfg.Model.backbone,
                 'bag_size': cfg.Data.bag_size,
                 'use_features': use_features,
+                'mixup': cfg.Data.mixup,
+                'aug': cfg.Data.aug,
+                'cache': cfg.Data.cache,
+                'train_classic': train_classic,
+                'model_name': cfg.Model.name,
                 }
 
     dm = MILDataModule(**DataInterface_dict)
     
-
+    # print(cfg.Model.backbone)
     #---->Define Model
     ModelInterface_dict = {'model': cfg.Model,
                             'loss': cfg.Loss,
@@ -503,6 +579,7 @@ def main(cfg):
                             'backbone': cfg.Model.backbone,
                             'task': cfg.task,
                             }
+    # print(ModelInterface_dict)
     # model = ModelInterface(**ModelInterface_dict)
     model = custom_test_module(**ModelInterface_dict)
     # model._fc1 = nn.Sequential(nn.Linear(512, 512), nn.GELU())
@@ -551,8 +628,8 @@ def main(cfg):
     for path in model_paths:
         # with open(f'{log_path}/test_metrics.txt', 'w') as f:
         #     f.write(str(path) + '\n')
-        print(path)
         new_model = model.load_from_checkpoint(checkpoint_path=path, cfg=cfg)
+        new_model.init_backbone()
         new_model.save_path = Path(cfg.log_path) / 'visualization'
         trainer.test(model=new_model, datamodule=dm)
     
@@ -616,10 +693,12 @@ if __name__ == '__main__':
     from models import TransMIL
     from datasets.zarr_feature_dataloader_simple import ZarrFeatureBagLoader
     from datasets.feature_dataloader import FeatureBagLoader
+    from datasets.jpg_dataloader import JPGMILDataloader
     from torch.utils.data import random_split, DataLoader
     import time
     from tqdm import tqdm
     import torchmetrics
+    import models.ResNet as ResNet
 
     device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
     print(device)
@@ -642,7 +721,19 @@ if __name__ == '__main__':
     n_classes = hyper_parameters['model']['n_classes']
 
     # model = TransMIL()
-    model = TransMIL(n_classes).to(device)
+
+    model_ft = ResNet.resnet50(num_classes=128, mlp=False, two_branch=False, normlinear=True)
+    home = Path.cwd().parts[1]
+    # pre_model = 
+    # self.model_ft.fc = nn.Identity()
+    # self.model_ft.load_from_checkpoint(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth', strict=False)
+    model_ft.load_state_dict(torch.load(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth'), strict=False)
+    for param in model_ft.parameters():
+        param.requires_grad = False
+    model_ft.fc = torch.nn.Identity()
+    model_ft.to(device)
+
+    model = TransMIL(n_classes=n_classes, in_features=2048).to(device)
     model_weights = checkpoint['state_dict']
 
     for key in list(model_weights):
@@ -667,9 +758,9 @@ if __name__ == '__main__':
     model.eval()
 
     home = Path.cwd().parts[1]
-    data_root = f'/{home}/ylan/data/DeepGraft/224_128uM_annotated'
+    data_root = f'/{home}/ylan/data/DeepGraft/224_256uM_annotated'
     label_path = f'/{home}/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest.json'
-    dataset = FeatureBagLoader(data_root, label_path=label_path, mode='test', cache=False, n_classes=n_classes)
+    dataset = JPGMILDataloader(data_root, label_path=label_path, mode='test', cache=False, n_classes=n_classes, model_name = 'TransMIL')
 
     dl = DataLoader(dataset, batch_size=1, num_workers=8)
 
@@ -693,7 +784,15 @@ if __name__ == '__main__':
         # print(bag.shape)
         bag = bag.unsqueeze(0)
         with torch.cuda.amp.autocast():
-            logits = model(bag)
+            batch_size = bag.shape[0]
+            bag_size = bag.shape[1]
+            bag = bag.view(batch_size*bag_size, bag.shape[2], bag.shape[3], bag.shape[4])
+            feats = self.model_ft(bag).unsqueeze(0)
+            # print(feats.shape)
+            # print(x.shape)
+            # if feats.dim() == 3:
+            feats = feats.view(batch_size, bag_size, -1)
+            logits = model(feats)
         Y_hat = torch.argmax(logits, dim=1)
         Y_prob = F.softmax(logits, dim = 1)
 
diff --git a/code/train.py b/code/train.py
index 53ab165..984a609 100644
--- a/code/train.py
+++ b/code/train.py
@@ -8,6 +8,7 @@ from sklearn.model_selection import KFold
 from datasets.data_interface import MILDataModule, CrossVal_MILDataModule
 # from datasets.data_interface import DataInterface, MILDataModule, CrossVal_MILDataModule
 from models.model_interface import ModelInterface
+from models.model_interface_classic import ModelInterface_Classic
 from models.model_interface_dtfd import ModelInterface_DTFD
 import models.vision_transformer as vits
 from utils.utils import *
@@ -70,9 +71,9 @@ def make_parse():
     parser.add_argument('--loss', default = 'CrossEntropyLoss', type=str)
     parser.add_argument('--fold', default = 0)
     parser.add_argument('--bag_size', default = 1024, type=int)
+    # parser.add_argument('--batch_size', default = 1, type=int)
     parser.add_argument('--resume_training', action='store_true')
     parser.add_argument('--label_file', type=str)
-    # parser.add_argument('--ckpt_path', default = , type=str)
     
 
     args = parser.parse_args()
@@ -103,6 +104,13 @@ def main(cfg):
     #             'dataset_cfg': cfg.Data,}
     # dm = DataInterface(**DataInterface_dict)
     home = Path.cwd().parts[1]
+
+    train_classic = False
+    if cfg.Model.name in ['inception', 'resnet18', 'resnet50', 'vit']:
+        train_classic = True
+        use_features = False
+
+
     if cfg.Model.backbone == 'features':
         use_features = True
     else: use_features = False
@@ -116,6 +124,9 @@ def main(cfg):
                 'use_features': use_features,
                 'mixup': cfg.Data.mixup,
                 'aug': cfg.Data.aug,
+                'cache': cfg.Data.cache,
+                'train_classic': train_classic,
+                'model_name': cfg.Model.name,
                 }
 
     if cfg.Data.cross_val:
@@ -124,6 +135,7 @@ def main(cfg):
     
 
     #---->Define Model
+    
     ModelInterface_dict = {'model': cfg.Model,
                             'loss': cfg.Loss,
                             'optimizer': cfg.Optimizer,
@@ -131,9 +143,14 @@ def main(cfg):
                             'log': cfg.log_path,
                             'backbone': cfg.Model.backbone,
                             'task': cfg.task,
+                            'in_features': cfg.Model.in_features,
+                            'out_features': cfg.Model.out_features,
                             }
-    if cfg.Model.name == 'DTFDMIL':
-        model = ModelInterface_DTFD(**ModelInterface_dict)
+
+    if train_classic:
+        model = ModelInterface_Classic(**ModelInterface_dict)
+    # elif cfg.Model.name == 'DTFDMIL':
+    #     model = ModelInterface_DTFD(**ModelInterface_dict)
     else:
         model = ModelInterface(**ModelInterface_dict)
     
@@ -169,7 +186,7 @@ def main(cfg):
             logger=cfg.load_loggers,
             callbacks=cfg.callbacks,
             max_epochs= cfg.General.epochs,
-            min_epochs = 100,
+            min_epochs = 150,
 
             # gpus=cfg.General.gpus,
             accelerator='gpu',
@@ -179,10 +196,12 @@ def main(cfg):
             precision=cfg.General.precision,  
             accumulate_grad_batches=cfg.General.grad_acc,
             gradient_clip_val=0.0,
+            log_every_n_steps=10,
             # fast_dev_run = True,
             # limit_train_batches=1,
             
             # deterministic=True,
+            # num_sanity_val_steps=0,
             check_val_every_n_epoch=1,
         )
     # print(cfg.log_path)
@@ -192,21 +211,16 @@ def main(cfg):
 
     # home = Path.cwd()[0]
 
-    copy_path = Path(trainer.loggers[0].log_dir) / 'code'
-    copy_path.mkdir(parents=True, exist_ok=True)
-    copy_origin = '/' / Path('/'.join(cfg.log_path.parts[1:5])) / 'code'
-    # print(copy_path)
-    # print(copy_origin)
-    shutil.copytree(copy_origin, copy_path, dirs_exist_ok=True)
+    if cfg.General.server == 'train':
 
-    
-    # print(trainer.loggers[0].log_dir)
+        copy_path = Path(trainer.loggers[0].log_dir) / 'code'
+        copy_path.mkdir(parents=True, exist_ok=True)
+        copy_origin = '/' / Path('/'.join(cfg.log_path.parts[1:5])) / 'code'
+        shutil.copytree(copy_origin, copy_path, dirs_exist_ok=True)
 
     #---->train or test
     if cfg.resume_training:
         last_ckpt = log_path = Path(cfg.log_path) / 'lightning_logs' / f'version_{cfg.version}' / 'last.ckpt'
-        # model = model.load_from_checkpoint(last_ckpt)
-        # trainer.fit(model, dm) #, datamodule = dm
         trainer.fit(model = model, ckpt_path=last_ckpt) #, datamodule = dm
 
     if cfg.General.server == 'train':
@@ -222,16 +236,9 @@ def main(cfg):
     else:
         log_path = Path(cfg.log_path) / 'lightning_logs' / f'version_{cfg.version}'/'checkpoints' 
 
-        print(log_path)
-        test_path = Path(log_path) / 'test'
-        # for n in range(cfg.Model.n_classes):
-        #     n_output_path = test_path / str(n)
-        #     n_output_path.mkdir(parents=True, exist_ok=True)
-        # print(cfg.log_path)
         model_paths = list(log_path.glob('*.ckpt'))
-        # print(model_paths)
-        # print(cfg.epoch)
-        # model_paths = [str(model_path) for model_path in model_paths if 'epoch' in str(model_path)]
+
+
         if cfg.epoch == 'last':
             model_paths = [str(model_path) for model_path in model_paths if f'last' in str(model_path)]
         elif int(cfg.epoch) < 10:
@@ -242,9 +249,9 @@ def main(cfg):
         # model_paths = [f'{log_path}/epoch=279-val_loss=0.4009.ckpt']
 
         for path in model_paths:
-            # print(path)
-            new_model = model.load_from_checkpoint(checkpoint_path=path, cfg=cfg)
-            trainer.test(model=new_model, datamodule=dm)
+            print(path)
+            model = model.load_from_checkpoint(checkpoint_path=path, cfg=cfg)
+            trainer.test(model=model, datamodule=dm)
 
 
 def check_home(cfg):
diff --git a/code/utils/__pycache__/utils.cpython-39.pyc b/code/utils/__pycache__/utils.cpython-39.pyc
index df4436cad807f2a148eddd27875e344eb4dcbe14..7323a9ff0b5a105e42a911fa4478357cbd7b97a1 100644
GIT binary patch
delta 1027
zcmbQIv`(2fk(ZZ?fq{WxOZ(pBYa4mvS?bvt7#N%x7#NCYFfcIGFk~@gF{UtPGZ&ea
zFl8~<Fl4c$Fr_f3u=Fz5GS)C=vDPw`FlDiU#A}#RSbLdjne)6#*g^be#u|ormJ*H>
zuxv9EBLmEY8ip*^6p$Lu*$i`;z~*uEveq(!Ows^p1uFm>R>G0O2@(M*Y-VI+s9~&O
zPG<^c(Bzu@o5duwiZ3}WT`wm;J-#5ZB*RJ}imfEIxFkMRlch+Lfq~%`dvR$}d`f0f
zkvandLy-mp149*`uCA`GolkzcLP}<lm4YVMEq3?Rywsw^oLg+gsYPX}MUx|0UD<E3
zrWWKUXH1^Q>Z+vz3L#D&CJrW+e{3vlOhPO?j66&nj4X^?j4XfISUDJ3{<E<NFc+Cj
z=4AU*&r-r!!`#f6!j}T_8-EI03R8+eFULZV^O+WaLPQX%LI|ot7(;~!RD~#}iW06A
zu@v!M##*)-rUfiDY#?(>xKkujBzqZa*=v~i7O>Q?g9K|p;UqPA61%E6M_FP{d`^CG
zaXQE=Abg8CH#2YYS@wfWRrZthIaMa_;}BD`(lbe`eaXbYz@W)dWDgF&l*}Sf(B9%m
z%gjm5OUz9za+>^`!-)G9JIKhy(qyo9OF1hAkltI&xrr6G_(4(yi6xn-c_r~6C3chb
zIW1YircbWow6L`V#fCPBa0FSxmRL}bnwL^!1`@FV5jr5k6eP!7TvAkI4&n+!oMk-u
z8>bzk`(#rtV@BV}nOv2ONs}LQePf(7`4)F8W7^~d9yz5f1_p-7AU)Y2f)S*V?G|fM
zVqSXcE#{omyrP`R3wWg1GC{KWlMnDnM1q-lU_OXY23akfT$Ep29G{w3Qj}j%8DElL
zl$<dUlruO$0mvxDsKg}3SX9Qq!0-}k1*5?&M#Ec-Mz<J^Z!wzOVl*xCpKQpxi+2%7
z7bsX6n2Opb|KL?)%$Y3D7r~gaxq`2aQA!tNr3r`tc^MK6ATQrCnj9z~GucEyoY7*k
zkAQ(5INXxJZUehUFE39otu!yWBr`uRG3ORjULM#vMJ*t++9$6P2$$6mVFV{uCk{p-
OMh;E^MmYS%#t8t)fB`lD

delta 912
zcmZ3dJWq)?k(ZZ?fq{YH>74dtr;WVvEcI**3=GZ;3=G8$3=9l43|R~*jM>aZCMAqn
zOf?Ky%qdJM%qc9r%(aX)j9DzTOeKt2tRV3krWDp*rdsAalM*%%znL+GQJSHKA)Xno
zlM$?!t(T>i5vD7J5u&SvJ%t^lYc_+xT&8A5Mur;38s>DSU<OT&$-=BA?3&C)nhXpK
zlk-`nIXG^y7MCQZr%s;3>N@!atE-v<$OD{QOdL!s|JYdAn1oma7&#c37(qM^MyCI4
zECS3$hLgkD{?zl8aMmz4Gp6vTFr_f12&4$6Fr^6fax4Vd$h3f^gd>FoszMm5f(=83
z2yPW6Tq&X{V!e#DY&A>^SZdfn=9X}$h^I*OGS;%!F!3#5sbL2R)_}YwIr#y*su+7&
zVorQwX>vNq7a)9#IXAIlvJA&To?9Fsxt#ps;wtOO_c^T8YhN-kFfiO=&dtoz<S4QM
zvDs5HiwY7;GH!9CW#**jCFZ6U*-y6TG-9f<ntY!_W%2?}QK8yf{2+r05=$~u^GZOL
zO%~*`a4`excTderElSKOvIFth5(^4a^HPe885kIfOu-_>sYPX}MMXv+4s&ryQIQEq
zPMCp#L6f7%U~(Im9iz+Sqg=*}UXy=vRWc?{F6REm*grXor<E~v@&_I{rA!6}h6x}&
zSs;QDq>=3wYf)ledg?9aoYcIc?8yeaQfwI@*}TaCyb_UMW-gcyB9uW^3nv%l7Z=B;
z=9LuX7gWZV<QFAp^nsEo2Pn)Kr5Kf%#2AZ885kH|LakslxW#CAi_z#7qwy_9lUt0Y
zMZS{{^6uiD57Gq+M+T;%w#glQYK+;F*Yia%CU558Z)23w23ctcB0ye-gapXTw~Qt~
z6p)#GL_nO;bn+bm13ggi6(xe*26l~JUY=fBX<l+kW`15`&Ml_AJg{?$nn7l@O|}vY
bmsJ&E<YDAt;&9?%6k_Dy6kvqIUu>KJH!R}I

diff --git a/code/utils/utils.py b/code/utils/utils.py
old mode 100755
new mode 100644
index 9a756d5..f7781e8
--- a/code/utils/utils.py
+++ b/code/utils/utils.py
@@ -39,7 +39,7 @@ def load_loggers(cfg):
     
     
     #---->TensorBoard
-    if cfg.stage != 'test':
+    if cfg.General.server != 'test':
         
         tb_logger = pl_loggers.TensorBoardLogger(cfg.log_path,
                                                   # version = f'fold{cfg.Data.fold}'
@@ -51,13 +51,16 @@ def load_loggers(cfg):
                                         ) # version = f'fold{cfg.Data.fold}', 
         # print(csv_logger.version)
     else:  
-        cfg.log_path = Path(cfg.log_path) / f'test'
+        cfg.log_path = Path(cfg.log_path)
+        print('cfg.log_path: ', cfg.log_path)
+
         tb_logger = pl_loggers.TensorBoardLogger(cfg.log_path,
-                                                version = f'test',
+                                                version = cfg.version,
+                                                sub_dir = f'test_e{cfg.epoch}',
                                                 log_graph = True, default_hp_metric = False)
         #---->CSV
         csv_logger = pl_loggers.CSVLogger(cfg.log_path,
-                                        version = f'test', )
+                                        version = cfg.version, )
                               
     
     print(f'---->Log dir: {cfg.log_path}')
@@ -79,11 +82,11 @@ def load_callbacks(cfg, save_path):
     output_path.mkdir(exist_ok=True, parents=True)
 
     early_stop_callback = EarlyStopping(
-        monitor='val_auc',
+        monitor='val_loss',
         min_delta=0.00,
         patience=cfg.General.patience,
         verbose=True,
-        mode='max'
+        mode='min'
     )
 
     Mycallbacks.append(early_stop_callback)
@@ -105,7 +108,7 @@ def load_callbacks(cfg, save_path):
         # save_path = Path(cfg.log_path) / 'lightning_logs' / f'version_{cfg.resume_version}' / last.ckpt
         Mycallbacks.append(ModelCheckpoint(monitor = 'val_loss',
                                          dirpath = str(output_path),
-                                         filename = '{epoch:02d}-{val_loss:.4f}-{val_auc: .4f}-{val_patient_auc}',
+                                         filename = '{epoch:02d}-{val_loss:.4f}-{val_auc: .4f}-{val_patient_auc:.4f}',
                                          verbose = True,
                                          save_last = True,
                                          save_top_k = 2,
@@ -113,7 +116,7 @@ def load_callbacks(cfg, save_path):
                                          save_weights_only = True))
         Mycallbacks.append(ModelCheckpoint(monitor = 'val_auc',
                                          dirpath = str(output_path),
-                                         filename = '{epoch:02d}-{val_loss:.4f}-{val_auc:.4f}-{val_patient_auc}',
+                                         filename = '{epoch:02d}-{val_loss:.4f}-{val_auc:.4f}-{val_patient_auc: .4f}',
                                          verbose = True,
                                          save_last = True,
                                          save_top_k = 2,
@@ -121,7 +124,7 @@ def load_callbacks(cfg, save_path):
                                          save_weights_only = True))
         Mycallbacks.append(ModelCheckpoint(monitor = 'val_patient_auc',
                                          dirpath = str(output_path),
-                                         filename = '{epoch:02d}-{val_loss:.4f}-{val_auc:.4f}-{val_patient_auc}',
+                                         filename = '{epoch:02d}-{val_loss:.4f}-{val_auc:.4f}-{val_patient_auc:.4f}',
                                          verbose = True,
                                          save_last = True,
                                          save_top_k = 2,
diff --git a/project_plan.md b/project_plan.md
index b0e8379..0fde7bc 100644
--- a/project_plan.md
+++ b/project_plan.md
@@ -7,11 +7,11 @@ With this project, we aim to esatablish a benchmark for weakly supervised deep l
 
 #### Original Lancet Set:
 
-    * Training:
-        * AMS: 1130 Biopsies (3390 WSI)
-        * Utrecht: 717 Biopsies (2151WSI)
-    * Testing:
-        * Aachen: 101 Biopsies (303 WSI)
+* Training:
+    * AMS: 1130 Biopsies (3390 WSI)
+    * Utrecht: 717 Biopsies (2151WSI)
+* Testing:
+    * Aachen: 101 Biopsies (303 WSI)
 
 
 #### Extended:
@@ -23,80 +23,80 @@ With this project, we aim to esatablish a benchmark for weakly supervised deep l
 
 ## Models:
 
-    For our Benchmark, we chose the following models: 
+For our Benchmark, we chose the following models: 
 
-    - AttentionMIL
-    - Resnet18/50
-    - ViT
-    - CLAM
-    - TransMIL
-    - Monai MIL (optional)
+- AttentionMIL
+- Resnet18/50
+- ViT
+- CLAM
+- TransMIL
+- Monai MIL (optional)
 
-    Resnet18 and Resnet50 are basic CNNs that can be applied for a variety of tasks. Although domain or task specific architectures mostly outperform them, they remain a good baseline for comparison. 
+Resnet18 and Resnet50 are basic CNNs that can be applied for a variety of tasks. Although domain or task specific architectures mostly outperform them, they remain a good baseline for comparison. 
 
-    The vision transformer is the first transformer based model that was adapted to computer vision tasks. Benchmarking on ViT can provide more insight on the performance of generic transformer based models on multiple instance learning. 
+The vision transformer is the first transformer based model that was adapted to computer vision tasks. Benchmarking on ViT can provide more insight on the performance of generic transformer based models on multiple instance learning. 
 
-    The AttentionMIL was the first simple, yet relatively successful deep MIL model and should be used as a baseline for benchmarking MIL methods. 
+The AttentionMIL was the first simple, yet relatively successful deep MIL model and should be used as a baseline for benchmarking MIL methods. 
 
-    CLAM is a recent model proposed by Mahmood lab which was explicitely trained for histopathological whole slide images and should be used as a baseline for benchmarking MIL methods in histopathology. 
+CLAM is a recent model proposed by Mahmood lab which was explicitely trained for histopathological whole slide images and should be used as a baseline for benchmarking MIL methods in histopathology. 
 
-    TransMIL is another model proposed by Shao et al, which achieved SOTA on histopathological WSI classification tasks using MIL. It was benchmarked on TCGA and compared to CLAM and AttMIL. It utilizes the self-attention module from transformer models.
+TransMIL is another model proposed by Shao et al, which achieved SOTA on histopathological WSI classification tasks using MIL. It was benchmarked on TCGA and compared to CLAM and AttMIL. It utilizes the self-attention module from transformer models.
 
-    Monai MIL (not official name) is a MIL architecture proposed by Myronenk et al (Nvidia). It applies the self-attention mechanism as well. It is included because it shows promising results and it's included in MONAI. 
+Monai MIL (not official name) is a MIL architecture proposed by Myronenk et al (Nvidia). It applies the self-attention mechanism as well. It is included because it shows promising results and it's included in MONAI. 
 
 ## Tasks:
 
-    The Original tasks mimic the ones published in the original DeepGraft Lancet paper. 
-    Before we go for more challenging tasks (future tasks), we want to establish that our models outperform the simpler approach from the previous paper and that going for MIL in this setting is indeed profitable. 
+The Original tasks mimic the ones published in the original DeepGraft Lancet paper. 
+Before we go for more challenging tasks (future tasks), we want to establish that our models outperform the simpler approach from the previous paper and that going for MIL in this setting is indeed profitable. 
 
-    All available classes: 
-        * Normal
-        * TCMR
-        * ABMR
-        * Mixed
-        * Viral
-        * Other
+All available classes: 
+    * Normal
+    * TCMR
+    * ABMR
+    * Mixed
+    * Viral
+    * Other
 
 #### Original:
 
-    The explicit classes are simplified/grouped together such as this: 
-    Diseased = all classes other than Normal 
-    Rejection = TCMR, ABMR, Mixed 
+The explicit classes are simplified/grouped together such as this: 
+Diseased = all classes other than Normal 
+Rejection = TCMR, ABMR, Mixed 
 
-    - (1) Normal vs Diseased (all other classes)
-    - (2) Rejection vs (Viral + Others)
-    - (3) Normal vs Rejection vs (Viral + Others)
+- (1) Normal vs Diseased (all other classes)
+- (2) Rejection vs (Viral + Others)
+- (3) Normal vs Rejection vs (Viral + Others)
 
 #### Future:
 
-    After validating Original tasks, the next step is to challenge the models by attempting more complicated tasks. 
-    These experiments may vary depending on the results from previous experiments
+After validating Original tasks, the next step is to challenge the models by attempting more complicated tasks. 
+These experiments may vary depending on the results from previous experiments
 
-    - (4) Normal vs TCMR vs Mixed vs ABMR vs Viral vs Others
-    - (5) TCMR vs Mixed vs ABMR
+- (4) Normal vs TCMR vs Mixed vs ABMR vs Viral vs Others
+- (5) TCMR vs Mixed vs ABMR
 
 ## Plan:
 
-    1. Train models for current tasks on AMS+Utrecht -> Validate on Aachen
+1. Train models for current tasks on AMS+Utrecht -> Validate on Aachen
 
-    2. Visualization, AUC Curves
+2. Visualization, AUC Curves
 
-    3. Train best model on extended training set (AMS+Utrecht+Leuven) (Tasks 1,2,3) -> Validate on Aachen_extended
-        - Investigate if a larger training cohort increases performance
-    4. Train best model on extended dataset on future tasks (Task 4, 5)
+3. Train best model on extended training set (AMS+Utrecht+Leuven) (Tasks 1,2,3) -> Validate on Aachen_extended
+    - Investigate if a larger training cohort increases performance
+4. Train best model on extended dataset on future tasks (Task 4, 5)
 
 
-    Notes: 
-        * Resnet18, ViT and CLAM are all trained on HIA (Training Framework from Kather / Narmin)
+Notes: 
+    * Resnet18, ViT and CLAM are all trained on HIA (Training Framework from Kather / Narmin)
     
 
 ## Status: 
 
-        - Resnet18: Trained on all tasks via HIA  
-        - Vit: Trained on all tasks via HIA 
-        - CLAM: Trained on (1) via HIA 
-        - TransMIL: Trained, but overfitting
-            - Check if the problems are not on model side by evaluating on RCC data. 
-            - (mixing in 10 slides from Aachen increases auc performance from 0.7 to 0.89)
-        - AttentionMIL: WIP
-        - Monai MIL: WIP
+- Resnet18: Trained on all tasks via HIA  
+- Vit: Trained on all tasks via HIA 
+- CLAM: Trained on (1) via HIA 
+- TransMIL: Trained, but overfitting
+    - Check if the problems are not on model side by evaluating on RCC data. 
+    - (mixing in 10 slides from Aachen increases auc performance from 0.7 to 0.89)
+- AttentionMIL: WIP
+- Monai MIL: WIP
-- 
GitLab