From 70574bb76a6b0c325c5e48de906f8718e827c99e Mon Sep 17 00:00:00 2001
From: Ycblue <yuchialan@gmail.com>
Date: Fri, 2 Dec 2022 15:44:10 +0100
Subject: [PATCH] project plan, mixup, test eval

---
 DeepGraft/AttMIL_feat_norm_rej_rest.yaml      |  52 ++
 DeepGraft/AttMIL_feat_norm_rest.yaml          |  52 ++
 DeepGraft/AttMIL_feat_rej_rest.yaml           |  52 ++
 DeepGraft/AttTrans_resnet50_norm_rest.yaml    |  52 ++
 DeepGraft/TransMIL_feat_norm_rej_rest.yaml    |  52 ++
 DeepGraft/TransMIL_feat_norm_rest.yaml        |  14 +-
 DeepGraft/TransMIL_feat_rej_rest.yaml         |  52 ++
 DeepGraft/TransMIL_feat_rejections.yaml       |  52 ++
 .../TransMIL_resnet50_norm_rej_rest.yaml      |   6 +-
 DeepGraft/TransMIL_resnet50_norm_rest.yaml    |   2 +-
 DeepGraft/TransformerMIL_feat_norm_rest.yaml  |  52 ++
 DeepGraft_Project_Plan.pdf                    | Bin 0 -> 41190 bytes
 README.md                                     |   7 +-
 .../__pycache__/loss_factory.cpython-39.pyc   | Bin 2635 -> 2605 bytes
 code/MyLoss/loss_factory.py                   |   2 +-
 .../__pycache__/test_visualize.cpython-39.pyc | Bin 13391 -> 14897 bytes
 code/datasets/ResNet.py                       |  10 +-
 .../__pycache__/ResNet.cpython-39.pyc         | Bin 12596 -> 12695 bytes
 .../custom_jpg_dataloader.cpython-39.pyc      | Bin 12376 -> 12309 bytes
 .../__pycache__/data_interface.cpython-39.pyc | Bin 10399 -> 8118 bytes
 .../feature_dataloader.cpython-39.pyc         | Bin 0 -> 7019 bytes
 .../simple_jpg_dataloader.cpython-39.pyc      | Bin 0 -> 8079 bytes
 .../zarr_feature_dataloader.cpython-39.pyc    | Bin 5865 -> 6943 bytes
 ...r_feature_dataloader_simple.cpython-39.pyc | Bin 0 -> 5976 bytes
 code/datasets/custom_jpg_dataloader.py        |   2 +-
 code/datasets/data_interface.py               | 210 +++++----
 code/datasets/feature_dataloader.py           | 394 ++++++++++++++++
 code/datasets/feature_dataloader_deca.py      | 320 +++++++++++++
 code/datasets/feature_extractor.py            | 155 ++++--
 code/datasets/feature_extractor_2.py          | 186 --------
 .../feature_extractor_annotated.ipynb         | 223 +++++++++
 code/datasets/feature_extractor_annotated.py  | 267 +++++++++++
 code/datasets/feature_file_checker.py         |  82 ++++
 code/datasets/monai_loader.py                 | 179 +++++++
 code/datasets/simple_jpg_dataloader.py        |  17 +-
 code/datasets/test_normalization.ipynb        | 195 ++++++++
 code/datasets/zarr_feature_dataloader.py      | 196 +++++---
 .../zarr_feature_dataloader_simple.py         | 255 ++++++++++
 code/models/AttMIL.py                         |   2 +-
 code/models/TransMIL.py                       |  34 +-
 code/models/__pycache__/AttMIL.cpython-39.pyc | Bin 1560 -> 1560 bytes
 .../__pycache__/TransMIL.cpython-39.pyc       | Bin 3741 -> 3811 bytes
 .../__pycache__/TransformerMIL.cpython-39.pyc | Bin 3445 -> 3393 bytes
 .../model_interface.cpython-39.pyc            | Bin 17157 -> 17801 bytes
 code/models/model_interface.py                | 444 ++++++++++--------
 code/monai_test.json                          |   1 +
 code/test_visualize.py                        | 271 ++++++++---
 code/train.py                                 |  48 +-
 code/utils/__pycache__/utils.cpython-39.pyc   | Bin 4175 -> 4382 bytes
 code/utils/utils.py                           |  20 +-
 monai_test.json                               |   1 +
 paper_structure.md                            |  35 ++
 project_plan.md                               | 102 ++++
 53 files changed, 3380 insertions(+), 716 deletions(-)
 create mode 100644 DeepGraft/AttMIL_feat_norm_rej_rest.yaml
 create mode 100644 DeepGraft/AttMIL_feat_norm_rest.yaml
 create mode 100644 DeepGraft/AttMIL_feat_rej_rest.yaml
 create mode 100644 DeepGraft/AttTrans_resnet50_norm_rest.yaml
 create mode 100644 DeepGraft/TransMIL_feat_norm_rej_rest.yaml
 create mode 100644 DeepGraft/TransMIL_feat_rej_rest.yaml
 create mode 100644 DeepGraft/TransMIL_feat_rejections.yaml
 create mode 100644 DeepGraft/TransformerMIL_feat_norm_rest.yaml
 create mode 100644 DeepGraft_Project_Plan.pdf
 create mode 100644 code/datasets/__pycache__/feature_dataloader.cpython-39.pyc
 create mode 100644 code/datasets/__pycache__/simple_jpg_dataloader.cpython-39.pyc
 create mode 100644 code/datasets/__pycache__/zarr_feature_dataloader_simple.cpython-39.pyc
 create mode 100644 code/datasets/feature_dataloader.py
 create mode 100644 code/datasets/feature_dataloader_deca.py
 delete mode 100644 code/datasets/feature_extractor_2.py
 create mode 100644 code/datasets/feature_extractor_annotated.ipynb
 create mode 100644 code/datasets/feature_extractor_annotated.py
 create mode 100644 code/datasets/feature_file_checker.py
 create mode 100644 code/datasets/monai_loader.py
 create mode 100644 code/datasets/test_normalization.ipynb
 create mode 100644 code/datasets/zarr_feature_dataloader_simple.py
 create mode 100644 code/monai_test.json
 create mode 100644 monai_test.json
 create mode 100644 paper_structure.md
 create mode 100644 project_plan.md

diff --git a/DeepGraft/AttMIL_feat_norm_rej_rest.yaml b/DeepGraft/AttMIL_feat_norm_rej_rest.yaml
new file mode 100644
index 0000000..4eda25f
--- /dev/null
+++ b/DeepGraft/AttMIL_feat_norm_rej_rest.yaml
@@ -0,0 +1,52 @@
+General:
+    comment: 
+    seed: 2021
+    fp16: True
+    amp_level: O2
+    precision: 16 
+    multi_gpu_mode: dp
+    gpus: [0]
+    epochs: &epoch 500 
+    grad_acc: 2
+    frozen_bn: False
+    patience: 50
+    server: test #train #test
+    log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
+
+Data:
+    dataset_name: custom
+    data_shuffle: False
+    data_dir: '/home/ylan/data/DeepGraft/224_128um_v2/'
+    label_file: '/home/ylan/DeepGraft/training_tables/dg_limit_20_split_PAS_HE_Jones_norm_rej_rest.json'
+    fold: 1
+    nfold: 3
+    cross_val: False
+
+    train_dataloader:
+        batch_size: 1 
+        num_workers: 8
+
+    test_dataloader:
+        batch_size: 1
+        num_workers: 8
+
+Model:
+    name: AttMIL
+    n_classes: 3
+    backbone: features
+    in_features: 1024
+    out_features: 512
+
+
+Optimizer:
+    opt: Adam
+    lr: 0.0002
+    opt_eps: null 
+    opt_betas: null
+    momentum: null 
+    weight_decay: 0.01
+
+Loss:
+    base_loss: CrossEntropyLoss
+    
+
diff --git a/DeepGraft/AttMIL_feat_norm_rest.yaml b/DeepGraft/AttMIL_feat_norm_rest.yaml
new file mode 100644
index 0000000..fefbe5e
--- /dev/null
+++ b/DeepGraft/AttMIL_feat_norm_rest.yaml
@@ -0,0 +1,52 @@
+General:
+    comment: 
+    seed: 2021
+    fp16: True
+    amp_level: O2
+    precision: 16 
+    multi_gpu_mode: dp
+    gpus: [0]
+    epochs: &epoch 1000 
+    grad_acc: 2
+    frozen_bn: False
+    patience: 100
+    server: test #train #test
+    log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
+
+Data:
+    dataset_name: custom
+    data_shuffle: False
+    data_dir: '/home/ylan/data/DeepGraft/224_128um_v2/'
+    label_file: '/home/ylan/DeepGraft/training_tables/dg_limit_20_split_PAS_HE_Jones_norm_rest.json'
+    fold: 1
+    nfold: 3
+    cross_val: False
+
+    train_dataloader:
+        batch_size: 1 
+        num_workers: 4
+
+    test_dataloader:
+        batch_size: 1
+        num_workers: 4
+
+Model:
+    name: AttMIL
+    n_classes: 2
+    backbone: features
+    in_features: 512
+    out_features: 1024
+
+
+Optimizer:
+    opt: Adam
+    lr: 0.0001
+    opt_eps: null 
+    opt_betas: null
+    momentum: null 
+    weight_decay: 0.01
+
+Loss:
+    base_loss: CrossEntropyLoss
+    
+
diff --git a/DeepGraft/AttMIL_feat_rej_rest.yaml b/DeepGraft/AttMIL_feat_rej_rest.yaml
new file mode 100644
index 0000000..3d854be
--- /dev/null
+++ b/DeepGraft/AttMIL_feat_rej_rest.yaml
@@ -0,0 +1,52 @@
+General:
+    comment: 
+    seed: 2021
+    fp16: True
+    amp_level: O2
+    precision: 16 
+    multi_gpu_mode: dp
+    gpus: [0]
+    epochs: &epoch 500 
+    grad_acc: 2
+    frozen_bn: False
+    patience: 50
+    server: test #train #test
+    log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
+
+Data:
+    dataset_name: custom
+    data_shuffle: False
+    data_dir: '/home/ylan/data/DeepGraft/224_128um_v2/'
+    label_file: '/home/ylan/DeepGraft/training_tables/dg_limit_20_split_PAS_HE_Jones_rej_rest.json'
+    fold: 1
+    nfold: 3
+    cross_val: False
+
+    train_dataloader:
+        batch_size: 1 
+        num_workers: 8
+
+    test_dataloader:
+        batch_size: 1
+        num_workers: 8
+
+Model:
+    name: AttMIL
+    n_classes: 2
+    backbone: features
+    in_features: 1024
+    out_features: 512
+
+
+Optimizer:
+    opt: Adam
+    lr: 0.0002
+    opt_eps: null 
+    opt_betas: null
+    momentum: null 
+    weight_decay: 0.01
+
+Loss:
+    base_loss: CrossEntropyLoss
+    
+
diff --git a/DeepGraft/AttTrans_resnet50_norm_rest.yaml b/DeepGraft/AttTrans_resnet50_norm_rest.yaml
new file mode 100644
index 0000000..cfffa7d
--- /dev/null
+++ b/DeepGraft/AttTrans_resnet50_norm_rest.yaml
@@ -0,0 +1,52 @@
+General:
+    comment: 
+    seed: 2021
+    fp16: True
+    amp_level: O2
+    precision: 32
+    multi_gpu_mode: dp
+    gpus: [0]
+    epochs: &epoch 1000 
+    grad_acc: 2
+    frozen_bn: False
+    patience: 100
+    server: train #train #test
+    log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
+
+Data:
+    dataset_name: custom
+    data_shuffle: False
+    data_dir: '/home/ylan/data/DeepGraft/224_128uM_annotated/'
+    label_file: '/home/ylan/DeepGraft/training_tables/dg_limit_100_split_PAS_HE_Jones_norm_rest.json'
+    fold: 1
+    nfold: 3
+    cross_val: False
+
+    train_dataloader:
+        batch_size: 1 
+        num_workers: 4
+
+    test_dataloader:
+        batch_size: 1
+        num_workers: 4
+
+Model:
+    name: AttTrans
+    n_classes: 2
+    backbone: resnet50
+    in_features: 512
+    out_features: 1024
+
+
+Optimizer:
+    opt: Adam
+    lr: 0.0002
+    opt_eps: null 
+    opt_betas: null
+    momentum: null 
+    weight_decay: 0.01
+
+Loss:
+    base_loss: CrossEntropyLoss
+    
+
diff --git a/DeepGraft/TransMIL_feat_norm_rej_rest.yaml b/DeepGraft/TransMIL_feat_norm_rej_rest.yaml
new file mode 100644
index 0000000..3dcc817
--- /dev/null
+++ b/DeepGraft/TransMIL_feat_norm_rej_rest.yaml
@@ -0,0 +1,52 @@
+General:
+    comment: 
+    seed: 2021
+    fp16: True
+    amp_level: O2
+    precision: 16 
+    multi_gpu_mode: dp
+    gpus: [0]
+    epochs: &epoch 1000 
+    grad_acc: 2
+    frozen_bn: False
+    patience: 100
+    server: test #train #test
+    log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
+
+Data:
+    dataset_name: custom
+    data_shuffle: False
+    data_dir: '/home/ylan/data/DeepGraft/224_128um_v2/'
+    label_file: '/home/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rej_rest.json'
+    fold: 1
+    nfold: 3
+    cross_val: False
+
+    train_dataloader:
+        batch_size: 1 
+        num_workers: 4
+
+    test_dataloader:
+        batch_size: 1
+        num_workers: 4
+
+Model:
+    name: TransMIL
+    n_classes: 3
+    backbone: features
+    in_features: 512
+    out_features: 512
+
+
+Optimizer:
+    opt: lookahead_radam
+    lr: 0.0002
+    opt_eps: null 
+    opt_betas: null
+    momentum: null 
+    weight_decay: 0.01
+
+Loss:
+    base_loss: CrossEntropyLoss
+    
+
diff --git a/DeepGraft/TransMIL_feat_norm_rest.yaml b/DeepGraft/TransMIL_feat_norm_rest.yaml
index dab6568..ea452a3 100644
--- a/DeepGraft/TransMIL_feat_norm_rest.yaml
+++ b/DeepGraft/TransMIL_feat_norm_rest.yaml
@@ -3,20 +3,22 @@ General:
     seed: 2021
     fp16: True
     amp_level: O2
-    precision: 16 
+    precision: 16
     multi_gpu_mode: dp
-    gpus: [0]
+    gpus: [0, 1]
     epochs: &epoch 500 
     grad_acc: 2
     frozen_bn: False
     patience: 50
-    server: test #train #test
+    server: train #train #test
     log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
 
 Data:
     dataset_name: custom
     data_shuffle: False
-    data_dir: '/home/ylan/data/DeepGraft/224_128um_v2/'
+    mixup: False
+    aug: True
+    data_dir: '/home/ylan/data/DeepGraft/224_128uM_annotated/'
     label_file: '/home/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest.json'
     fold: 1
     nfold: 3
@@ -34,12 +36,12 @@ Model:
     name: TransMIL
     n_classes: 2
     backbone: features
-    in_features: 512
+    in_features: 2048
     out_features: 1024
 
 
 Optimizer:
-    opt: lookahead_radam
+    opt: Adam
     lr: 0.0001
     opt_eps: null 
     opt_betas: null
diff --git a/DeepGraft/TransMIL_feat_rej_rest.yaml b/DeepGraft/TransMIL_feat_rej_rest.yaml
new file mode 100644
index 0000000..ca9c0e4
--- /dev/null
+++ b/DeepGraft/TransMIL_feat_rej_rest.yaml
@@ -0,0 +1,52 @@
+General:
+    comment: 
+    seed: 2021
+    fp16: True
+    amp_level: O2
+    precision: 16 
+    multi_gpu_mode: dp
+    gpus: [0]
+    epochs: &epoch 500 
+    grad_acc: 2
+    frozen_bn: False
+    patience: 50
+    server: test #train #test
+    log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
+
+Data:
+    dataset_name: custom
+    data_shuffle: False
+    data_dir: '/home/ylan/data/DeepGraft/224_128um_v2/'
+    label_file: '/home/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_rej_rest.json'
+    fold: 1
+    nfold: 3
+    cross_val: False
+
+    train_dataloader:
+        batch_size: 1 
+        num_workers: 8
+
+    test_dataloader:
+        batch_size: 1
+        num_workers: 8
+
+Model:
+    name: TransMIL
+    n_classes: 2
+    backbone: features
+    in_features: 1024
+    out_features: 512
+
+
+Optimizer:
+    opt: lookahead_radam
+    lr: 0.0002
+    opt_eps: null 
+    opt_betas: null
+    momentum: null 
+    weight_decay: 0.01
+
+Loss:
+    base_loss: CrossEntropyLoss
+    
+
diff --git a/DeepGraft/TransMIL_feat_rejections.yaml b/DeepGraft/TransMIL_feat_rejections.yaml
new file mode 100644
index 0000000..a1d2ae6
--- /dev/null
+++ b/DeepGraft/TransMIL_feat_rejections.yaml
@@ -0,0 +1,52 @@
+General:
+    comment: 
+    seed: 2021
+    fp16: True
+    amp_level: O2
+    precision: 16 
+    multi_gpu_mode: dp
+    gpus: [0]
+    epochs: &epoch 500 
+    grad_acc: 2
+    frozen_bn: False
+    patience: 50
+    server: test #train #test
+    log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
+
+Data:
+    dataset_name: custom
+    data_shuffle: False
+    data_dir: '/home/ylan/data/DeepGraft/224_128um_v2/'
+    label_file: '/home/ylan/DeepGraft/training_tables/split_PAS_HE_Jones_rejections.json'
+    fold: 1
+    nfold: 3
+    cross_val: False
+
+    train_dataloader:
+        batch_size: 1 
+        num_workers: 8
+
+    test_dataloader:
+        batch_size: 1
+        num_workers: 8
+
+Model:
+    name: TransMIL
+    n_classes: 3
+    backbone: features
+    in_features: 1024
+    out_features: 512
+
+
+Optimizer:
+    opt: lookahead_radam
+    lr: 0.0002
+    opt_eps: null 
+    opt_betas: null
+    momentum: null 
+    weight_decay: 0.01
+
+Loss:
+    base_loss: CrossEntropyLoss
+    
+
diff --git a/DeepGraft/TransMIL_resnet50_norm_rej_rest.yaml b/DeepGraft/TransMIL_resnet50_norm_rej_rest.yaml
index 9885743..1fa23b5 100644
--- a/DeepGraft/TransMIL_resnet50_norm_rej_rest.yaml
+++ b/DeepGraft/TransMIL_resnet50_norm_rej_rest.yaml
@@ -6,11 +6,11 @@ General:
     precision: 16 
     multi_gpu_mode: dp
     gpus: [0]
-    epochs: &epoch 500 
+    epochs: &epoch 1000 
     grad_acc: 2
     frozen_bn: False
-    patience: 50
-    server: test #train #test
+    patience: 100
+    server: train #train #test
     log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
 
 Data:
diff --git a/DeepGraft/TransMIL_resnet50_norm_rest.yaml b/DeepGraft/TransMIL_resnet50_norm_rest.yaml
index b58c6d8..0511d26 100644
--- a/DeepGraft/TransMIL_resnet50_norm_rest.yaml
+++ b/DeepGraft/TransMIL_resnet50_norm_rest.yaml
@@ -17,7 +17,7 @@ Data:
     dataset_name: custom
     data_shuffle: False
     data_dir: '/home/ylan/data/DeepGraft/224_128um_v2/'
-    label_file: '/home/ylan/DeepGraft/training_tables/dg_limit_20_split_PAS_HE_Jones_norm_rest.json'
+    label_file: '/home/ylan/DeepGraft/training_tables/dg_limit_100_split_PAS_HE_Jones_norm_rest_RA_RU.json'
     fold: 1
     nfold: 3
     cross_val: False
diff --git a/DeepGraft/TransformerMIL_feat_norm_rest.yaml b/DeepGraft/TransformerMIL_feat_norm_rest.yaml
new file mode 100644
index 0000000..7c90fbf
--- /dev/null
+++ b/DeepGraft/TransformerMIL_feat_norm_rest.yaml
@@ -0,0 +1,52 @@
+General:
+    comment: 
+    seed: 2021
+    fp16: True
+    amp_level: O2
+    precision: 16 
+    multi_gpu_mode: dp
+    gpus: [0]
+    epochs: &epoch 1000 
+    grad_acc: 2
+    frozen_bn: False
+    patience: 100
+    server: test #train #test
+    log_path: /home/ylan/workspace/TransMIL-DeepGraft/logs/
+
+Data:
+    dataset_name: custom
+    data_shuffle: False
+    data_dir: '/home/ylan/data/DeepGraft/224_128um_v2/'
+    label_file: '/home/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest.json'
+    fold: 1
+    nfold: 3
+    cross_val: False
+
+    train_dataloader:
+        batch_size: 1 
+        num_workers: 4
+
+    test_dataloader:
+        batch_size: 1
+        num_workers: 4
+
+Model:
+    name: TransformerMIL
+    n_classes: 2
+    backbone: features
+    in_features: 512
+    out_features: 1024
+
+
+Optimizer:
+    opt: lookahead_radam
+    lr: 0.0001
+    opt_eps: null 
+    opt_betas: null
+    momentum: null 
+    weight_decay: 0.01
+
+Loss:
+    base_loss: CrossEntropyLoss
+    
+
diff --git a/DeepGraft_Project_Plan.pdf b/DeepGraft_Project_Plan.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..1b5a0c95fca1a588080f74941d9141b5b45bb604
GIT binary patch
literal 41190
zcmY!laB<T$)HCN&ZES38<l-_kS1?e>Ps-x5vEkBp%giZBEmF{T%SkLrbxBRmPf6v{
z_esr5FUe3aF*h>cva{nVE-6Y)%;l<>^ET%8I@#?LYwyQjJkPl4?w-jFi4IoNnkzXB
zCUbl!UQj5+GHK#T6$iWeRVR|JCY5K;_nt7lQCEKT`B!R@*N-agez}`}?f=bhzdXNQ
z{$s0KdH%YsyJ~+G{hoh&-_$KXBW~S&I%k_e^Zw`0?ta@oKhOSO*6*2eoaN7p3lzVd
z`(U2sHFrnv|3B}{Z}0eVdw$;iKWaJkzwS%h$3@-Pqd)cc@5gWNIdMq;n?7r<<13|$
z9-6r`XYIZ?ZNd{~rSD-iw?6FOyIuK4^=|b^*K*^JrQW;!ZKl3*wCd&eU*{F~RzAKx
zbIk)Qu_R?hj?=l{Ki%<DE4r<o7aLo6Q#k6K>Jzb_C$D+k^*)|-?q=@Ei9h$gw-wu0
zGBtX4opqR#vANd7`8o?r8mjJTzr5|fdS9|ibM~Dp<vTxBOj`Q&kIL!zPY1S%Jb83^
z?UC4Z_MgAnvZybf<oV+JoNodPw2!SQe_6*DrIAzfZEI6S&r*RJHdzVnoh6Pww}m_>
z-`&komUt#K{$LGH>)xl^BxYTdDEjnZQs2baN;~>CKD>1?t>Vw;v-6hTcddOr+0e|f
z`J!a9u!no>DJL~KHiJ|NX*Pqcl8?1F&b;H-IJ@KQ66r%Lp66}OEIby`V(>?8Ut&m0
z=>O6j4bkuaq<>qn?)+1;xnqO7ZqXX=ZbO+Er;90DDokT_HaTW`Gj8r%+7)?m64#pC
zo9THQbxgl!+STv+JF{~^-sAXl|10lSY8dhVo%DBq%76Wv|Noo(uK(Y)$mx~PDwR8N
z(;q#0xBa8O`P}PQnz*JP5c^xC{q&x<#(4)`JxifZ1Eb`w5c9jSw-#&ngc>D3jd~?_
z@>X?u`p)z&<t*onll!h7^H<|iG0OKS|70>Xgr%}i`S^sGug7X;mndv8SG0d}kN5QS
zs_P+Af^N)dn?w_JSUA1ceim5EfA~?Edx>aG`?5;Uvlp7}0(Ct$Zobp|{nW<IB|`5z
zlhRM*A9m&HzVkMvskM6XhlLZESecH__-?muRndlNmxOM#JeqVUU7~VUAJgQ+$Je_p
zyO1Kp{erLl;WmRgUb-LFO>Ws3R}(&KRgA>>@}=3*k9|*StaHBkech{&1EouavtIoQ
z74Ti|D`UR$OlOwW_3CHG(o)P*Bzji8D~P>yPWY9X`n{6Fvma$`chrb5=9ZE<aOkwl
zMuU(}-MBzg?wqZSNB7S>WyIQ>WtsX+Up#Ns<<7lKA6(dNw9fv~n>z8Bw#&^w{~o`c
z{r}Fszu#uxxBGki`xc9z?{<H`Z+~~+PsLml|JpywYg2v|oy_eGlW(cnS?k@@R_otc
z_h;Yn_b-=U@?Un}v(vWc_eb_$oh>`xZ|kq$S^MMd<+q2Aub<Sl_~NGbYaY#6<<@Z|
zo!!0f=>=OiW5wo4DJ<V+q#sI+*wmBoO>*+AVE!i-msXSo%yCWFpLMBm$JB&8*U62=
zuj-kW*0ZUGa9n9gk66@|F!A<I!LoDZ1sNw4Hs-zRHHrGTug&zEiP3^zhgZzeNfpdb
z3F+xpN<8*-Rf+%QUF}C5l;^kKygFspuhloFs$OvYcWP#$#cb(^wVlatPV#;|ZM16k
ztNy^{EqMh;UnIQR#Ls=j_5EEV;oL3We5+Nz&wP>bsrJOgqfai|{$9YpeWCLAmk(oY
zY?Kr38+a^O%X-DAk5&FeN@CWeXcv92s4vZqI{pVrrUiG;)ct=z_35d~QmgN4b)M`y
z=cE_#Hm;?uMvSY<{@=SPE+SJn%J1**C=0qfOM1)XHlrhZTQ<xtld{$PYIc9`*5`bi
zI#=vcp0GExvMwQlWAD$0YyYiS63cx#lBcVm<4Wbc#JWT3ZAYW^Iv-0;z9;#5v60$k
zR`c`04o$8Z(@h>pi1>NRObq#3q|6(-$G*wx+S26{8CJ@=$qJX`bvGz4t%){~e7XI4
z^R>lRD^fRJ@W|3CJ2*x8zkir~jNzJfa-YKY@f^N>hvR)W<A&w6X{LR<&TmTYv0}ZG
z-paJ<ikY@#z-$N2DmV5drKZ-^2WGHvt!{kCsS?>VBR0n6!4tk>wW+*^Pw%a3(0E~P
z60qTIkETq@F*&crj$K9B4(qM5J}kD?@jg&AEwFp4?(YN2PfwMX{`$6d$z%BmemBh*
zxvi{i$#^9EcaL<vx_OrDy2>5@K1TiS+12q{d2R25@6#twDSmq*h^N;`?9G!u>w?!y
zP7RC|YG9Xmf4aTJ=joowrEC_{4r;GT`g0_dXWogfgWB7Y{`iE;y6!8wx4vbo&BB)(
zzAT$rED?P<M=->==%VN4?+J`&OHHP4I+PT-bV9TIlzg%EKegG}I~!VeOIS@7_dOu~
z=IDyE&lEHs=JZHS-2dxLosxmudWoKo#;t}`&kWp__C#l_5jR$sQ!d$cf9lke$8@`u
za@H$u=Ueu$b5f+rVllDrtIthS$x!sx*s^Tt{2Tu~vbvX_S{lCk*Wa~!#N<BDQa-w5
zR#H%veaA{+k;&6zj!&N~+WRoccHI*RqvJ1~)XcujzcROn@4s%gW!u^MS@VDU*sZRS
z+~TuUYi42Ib?<Q96_TOfGryjFakZ+iuyj6a^8F{m-@g|4=EYvp^>-<@{<2KF=kSeW
z-Ax@Uy{EaV#@?6V5ztw8HfmRn)9zP7*<Ty=j*GSb^nT7(_<z&3WRGB}D(x%|nOzRs
z7R+%i<__tK%w=R#xUxy?jO&jai??0@@(XqK6t2u*mT~)0In({cm+CXGo$~K&`f%`@
zSkCJVu~{0kTMe}m9rv%wk!)gXN?}TNdZxRq-G<eDs`v7+(%;dYT6S??Kgwu`-q1U}
z&(qI-Pu9ufi$0azX<Sh8x;JLqh2WKk^SgUFo>=~I@N1cOa^;qXn~(ald^^o-E_i48
zwANW1Z<uusIwh=6<vZe1uzXVM^jCKp9;}`^;g>FVPG;ORoe2x2J_(#isbswrEu=Rq
z>R3~#&d!5%$5=A5l6gOL?JtNI_`Z(CB_O=!;rvPMzg{n>d~%?%mg61g<_Z3ui#Z~$
z`Y8+YUvVyY;}`0@Y-9MoC>vf!zAerL%X?c(yUv-Z%0?`|-*jWcGXZxCkt1_DSud_U
z<gBoGp1_whVGsVzaw$SlQk%~030n6j{OZB{rL(^9T%DlYlI68r<e$fj$MyO5Pp#?;
zFOZMhefG)k-%nFnk6+-3TFv=4?nZXZ%xK1w3(g%2%QZR^7af*AFJ5Hwd6}?{JfBjd
z#PfN|w;#OKYxqw7R7TsB!ZlCQA|#HV`oy=XDro!f+IJ2fR~|7({Y?(v6%x^YwUm`(
zg|<M5%XID4mv(J)WPWMH;kz<>+0{3@4mgSb<c??i_;piQvImnywU_ag!Ye76jY7+2
zI<4@Q`LpSjtghijx4<HaT*>PHsavZ}k{slZdMdp*yP|KNbe9xoVf3P-v!W9odWbcu
ziL|Wp;t%ZYa$=his1SapW<q#I)(OXA@0~>nq5S)&Hhy{>I<uVb`;OIba@C_}?#=ss
z;<NWN=ASJkYV#-PpHP3|{fYDEo|C?xZ_T-V(05_XB!7vsLX-So{GHzBDAcC$^MGAq
z$KE$mr2;qAuQ2~Uz$|w2!qn7*Mr%@eJTg|Yu8NS-%ZfhL)Ty)c;0!mG?uWf52R#2B
zn(eq}?<Ic^@h`%USpT+Ec-kw*UGV29Ugy5|_)_Z&{u9nKi2dVC6qC;Qspw<-!Jk9?
zk8sm2E14haEv@xScE&qp_e=@@C%Ev@A$tWo7qeAdkqK#yHJ#!tTO_1E8N>+)?AWlX
z|Gt;QQ%R<--zp2#SVDR_X0*iZyY<OZCOn#_)cj%5>E{;Ax4T@}lK+e3MCKcwx!1Mh
z-lv`Q3d+`#F1uIOcne?qDt35f{bshU`pt9hu)a=Vo650fS=x@T{Kpo&d#CU>c!|OH
z$~^Wn6|L-Bb{$_=qPl_4dK!<$d_A22;n<cnQLmd=b(eTcZ&|9>9Jc%QB+l9=>-R_=
zxqJFmx8iks&zN0O@|%~G-uQU;bmIH=M8A2PbWip8XWm)0WAhXj<(;Kpk2a`S-nt|C
zc|rZ1C466&1goFlYuC%PU3X&AS;bWa8XXS8+(}pGhQ+U*H-RZl*h6fyoQhbK)T9gW
z6g}@(#n-3Cb!j3GLvNkdNS9snGGV6u)58aU*tmN9|D^8Qo-Zy}<K)U5^!nuLfHPmq
zTlRe2cbXw{jX~vue~HbLUzb1L`LBm#nqJvqiTCeSH^-dc=&<m{o5h@Ocb}?0!@p_u
zH<j;cf>KN5%|bRCKGSZ>Td^^vPe!G2|F@am&(}?<*i>HeVV#zU&%vLm?&8P##n-)j
z+4xny#_G^D^+(dz-iys|PQUmlGh*rgxsDynU$(L{|86flcJsrn{4hRd7KsOIBDFr=
z|5x{G|BLfiUuq=1{Ac*lf4ltl`SSDUE&8puwYq0r=%WKKI&+Lxeq&8^4=K;L`_$PY
z`fPjU9?xU<y;aZOQ!%*P-Tx%_U)#!-IA-&IE+zbPZyD9U_WRtMZgHnmVCCvF!pxdC
z9Ve}?csEt;x|s3N361mfOjcg#365d@V!U`yb(rvimehSk211EPcIfVK)o?m#<MT<Y
zuKk5p(>!74$sUjUX8pChzs7c<Nq}zd<wTB0*ZI!e)#6WVP1RZKJ#o$jt0gA9H!ocA
zeX;haSQ9Vz>boMU$7gn~wY50R#&mq4&w~`<dS1D}&qj+Xs(yWRP5-*jtpCLHkL@N~
zwu}6EmEn2#U*^hXiH656i_EFtU#)h%wEolg`oD+Ix9t1<<$L{~ho6tn|M#<8=~DUq
zi+A5@8-3v3wP(A-s+l^~8#d}4Kixm|)U}Jh4qpy>K3O9`-EgP)({{tkd+G_78P-JS
zuDf;LTl!z+rfm=JzPSH+&X)`>jeN7X6E(-$_G>#&oxd@*HfQ&*Id2ceM5}6BJ^Foe
z|ED+B8=vkDzh&sT+v;E4M&8@Eb0c=<y!#j)pXI#YeoNv_!<8HQDl?rbzw4`rY`iQt
z$6w+2X0D&dxqePp2}$SUY>U*;JE>Z~-)iHbAHPjKpB}!=+~glzV6)(w`{&xVTPnUD
zd>uY<XFq@Ts!OkC+x0GNmC~Kcr@lmD<>Ljj@;+a%+N8hi;~n<oK)+ir?+G?<ad@Jf
zx$DyEjr)&%D0tAKsIUIE<l~71DfPD{Q%^?7slP38JQXol{cXwHQyG(!8$TIFwcoO^
zJrO_S+oB_Ww+`gSIymcBm8DDi`~Pf6sgmF@xsdF<VwIe}(c~AJDjVj!GhEPFYR**i
zP`^g($kGbCF2S&>!*Rc6H{Y1{@R&E7<-;XSU%v}(&C{r_FPwO3QAYi7L%!Z7&tJDE
z)P-E~xgJ0Ji}KX8C;ysU`PR&`UX<gp;P#dIh0V%Avh4*IrU_U)R`t8}p=XN6<40`^
zR|GkqX{r;rlf~)5v{fR7`-a#jue|~1&aHaob?<<M!J%gJ!@2jfie7goZohx>oQUGb
zEoW-?y_mfC#-61?w==t^EEKHSU>$t>sF-H6(D@B755MTBxgobI_s#!@enQEUyf!>P
z^1{RF#@wWW6!kYLh8Dt-WqT#mRz5wg@%N7nr)AID&A0e|FaFVRRJ&wpb?NI3$-(?j
zFR!(~;<BAnd}*n>;F-nI{Tc43Gfj4;*6+R{<7&@V_+^vjset}7OV#xS_wc+jy&=xH
z_sP*kos(Ijrj*Uf?rxm3Wa-U|oKtn!%1^0w7weh1bB6HCo4Fs?<(S-kHc;}^^3vjv
zBT?q=fmI!uS40loU&GXy^sRZ0Q?&EHnenGunO9rQ@K1~vm+g=H%l1fOwN*msRed{~
z{YeL&|I+iHI(N63_H^f^mmbxv2$8T1)9PLIK=9_Vy*+2mS&m&=JOB6d>72)&_5T0Q
za5wVU@1C>$hjZEzO7@)IFtcT2^2S3qc19$A30Sj5;@FmB;t|WfNJ|~rta@&4anCjp
z4JF^!Uo0)xGOGjq)hi5I#F!Ec&VI?fwbY4G{s(tFYoThGMvsiv6tSrO2Mvb>-Mn^h
z3DM{&imO-=5Vc~>*A@B8R<+F47IEtr<w`!z)w;2`_0WrrPKrJ;H{)N;?sjhFVvUO0
z(qqUc+U2o1L_@)J&c)T@qFfP&lJ4uIMK*2QT4SU?W%K4$yFM*)s{HV1n${Gt%i;F1
z)fW#*c1;TVyip^u<8kiKuKicztbRZJD%70vPvrWlmWP-Amy0``UfZ|JR=L%<LeE>m
zcJH>-4<XYGJT@)aG)<Lf{;5fPPLY?6ZM2+y{sSX-&`U1$la{NRSFs$u>?tmKk+bSz
z<ntnv{&T-FJiMPo>75e#(!c*xia@x2DyQ@DO-rtp=y^-*(eReAo2t6@SaIZ`2d<Ha
zK15A3DAMwl*b>D!NBDf|$sc!WD=gNfp4`EjD0DuxljkX4{Fy}&0xLeoOj|R{>Cf@v
z$fj@2YrMi!Ie#sUvDmPxd4JqAo6lj}OxaUUUcDFcrReXY-bu|Y{qa|K#ePqTFJox<
zKII?(>f2`nxKi^{phHioc`2X)D=tGz1>%OTjEyWXhOYYV922aazy3inLvZbWh9-u$
zUPhl16gtH(Y+iVSkLjd`4Eu@a%U9)>R#|wz>(ja6;kNp&>iG$^mZ7Dqc6nx+noYOw
zUbKFFn1!$3`pD;Vrt#1B`){*S_1DoIGs0z?;@50W<GuOf>GSvg@BeT3<1+n5=<I~8
zrArNMexHhxfBp2zjJO}K_2c!wK6ozgufK2G>Hhy#A9D?FADuB{$6a~9?7n0AKPKnh
zZo1MEzB+!#@u$Jo*Sz%Qf8O}m!^E7$s~EZa18d{51=<<0+a?~IF!g1)R(fchT>-PF
zyX=`oq35!<C`#@#dp;v%%>rewKaWg5rt@q+r6BwI2yezen;l0A9{qNDl)boDbm22m
z)65;-lYLk-HFlNcS5(+-$<DlS?ekU>L2+kh@%-Nx+k;D1&98Yf`D)Z%>+jsm2eq>|
z+On<ezvubhdJ^}HgrGx(MlaTyGA=)yz_XFhW0{<+ub1T832JPmMwuUG#vbf=H-Y{8
z8?C2x>CNVc{q&}1-(13{dn#db<=(ZHhmQ-#_&F#Z*12HN?9$O`!NJxk@IETLbdTDO
zL+*R``}R0KW427_;g@dj)f2Y4zeFpxH-c^D*B2K!crJO=<R(b;BubU;-4eMqL1gKj
z6|0uNVY&D1aliTpv7J(XvP`)3I+S1R({g5=KFzc7_u)T^L2SVSK~9=2@|*Y<YE5fe
zAU3;;sm9`O?^b2quln9@_IgQ=BXaVZ3Wa-SC){CK%zbQoNKS+jZ~e5S$1+o@jtbXb
zd#xI9cF}t6h&^YW>tzbfUh)_2xhl_Q{_F4q*1v_<^1rq_8ntcCU|`dK(ZBhfPrby=
z7yQlQU)mq`{u9~o!rwOQSFpd@-<1M8Hg92H*I(;A<AjCcf)p!(YKu~i;Dw%#B7d}8
zaDV5hcEnFXBk7`=WzOe?_HTG5{}b47Ri3R=;BBD*kC0-C>Z^YBSsncypAO&H`Nv?}
zRC)h)>7R)O%Jw{Q6Xe^@KV6=nWM6aZ>2f*$Ps-uV(Oc7eoy2pT3*^2m@13f|7!#Go
z`i`H)VF}MFg(Xr;H~%yFcK++~$gFGf8&_%^Fy@*D^7~^o3G;(1SWgFc_Xch#U=G}=
zsCaNjq*K$yXwJj}WksI22@?{oggHsvSyL&oM?ul?xiM#Ain1clrGStbsx1$DTSPW^
zc-UqM1^KISt`yj@WDEPd?%L+nr!5pUQmh2*ElN2|7kWAc{b+gN`cCoEQNIP8Nf(z%
z<$P9De8V%jMIhjW18ZWBC`<bR3GNe1<O6>?vAtqXe*Wv=jWGK~Tu;@e@E@~KnOJws
zq9pxe!jxm#fi_&ne$PC5ujbm;#d7|)4E5N`rFxs5JAS|Gd|cyT|M8pyPZRG6$Tll-
zEIz2>A``%O(6Wa)(56R2@PCigV>JQu#vPnT{x5cj*~uxi`Dc%>K=KN{H&usae5)q+
z=-=xsIxO-2?ZZ2g!GB(=n4R1qB(uV_r*L849^<rEa!L^zlF?4Z9K6h)?LF7hdaes^
zK4Wm|u<KOKh`^auS3;J}^U(@c{BZZ8Ooe3$&qsqcjzZ&SjDl>&Z4okeJWlX&DQ;`b
z3200d3+PVVr>35Bf1U2p4=Z$e3|PgsEKNwi6=bk(eqc0*&-3o-O+Odq)IMTeblAX?
zd*RIYjH#LzW^NMcIc}io?Rb95(xg8>rsT$)o3&|6a){W@2pbP6(fGsfW_zE~IGTSv
z<;cOpYeHPja-9;1fl1sir)h{B_Sn+JsJdx>T*aN12Wl+lia+LyHRb!)FI(@ue)5N4
zNj;r?_Iu;&Zq?syvAcez^Rnf|664;RnO47A3KlV`FMBd0p=G6Pe$PXx$hD`W-P=9a
zTd9R>f4}M?Wx*<&fANS_q*6=TGam7koy`g^aq|NVT0`cje!Q)B%)ldWUhD;_1N`cX
z?yb{%#q+}5?XTD2PYpt1*93ao=5cgqpE_d7*A=j+eNJ>+{DO=*&6Q<5nwjR}AwPas
zO!{5B?10L0lhZGbrMv$-o-HKVe?F!)DmQC#?i0J<z&UfYl=F?hSgs2Z-d$Aw*vVj7
zg@C%jOg|4Xi8G3^jV}vA64GWKcw$$(NW8Xd?zg|2<MN}<2K_Hu_WF2`{(ajyaep!^
ze||b@{=L4&*?fOh>Gx;<zC4nB@1xIeKc}Qtv^zBK;;pu)4==x58WeR=-rK}tY0aAZ
zeHkBj$=<)5AAJ6$QR<63d#bnE{Q3Lk+w9+$Z?|vmD}Ddtc74rv;l}m$e?Gl`Z)aEY
ze}4S5>94+O|0=0WiLts|6qq`@pyHHQ`1D=I?4Q3p|GZ`2S^X=PQ<G;cU-t9$?&Hr>
z`K`KU_Z6w0KXzQYl)d*^?^LGE@oQ7BKIZ-XdhLOqk*tq5{#Mp*`CBQmSFzo;SNGwp
z<*myr!);z<Jic}(qdfjykM5<9i_cy9s?v7B)5Ln+pNLbte(|12_jHkqX=`QYe5|jp
z5OJ05p3RIaw_c^#?mzPLL-?WvQT;m4?_In9>ujH%#*4lwpY~Y0JH_lymjAx;SNr$P
zQfF=dywZ%f&cD85&b(l|mlLBW2Zo)uoL}HQv)NnY1Y66b2^U#|uc=Sb+U|2Q*Y9`K
z>@$&NUcXsBNe6B;vAHC^V8#!|V`um`s06(94SH*`Y2uVbu}OOp4^4XBDC_h8km|B}
zr=PO&qD@cg4;`9yayC!jsY<=rno1o`;ewb2yAn3CXeeCYBALa&uw04Z!%8j&nX}4&
z7aA58WK_*O`6jgXoOtb9?&(ZX@4J_EKl3rU8hf^VRrk_7rBi|{XHD3*dVZj5aMJ#i
z<#GBgQd^zne^jXRW)*L}G|Qtcb?($i8CC7t38!NBooHvbo29kW{B8M-YhKUq6iDw?
zc`<W+@rrxf%6zk*uUk1U|NH&-1<%(#>zj5bWLw*(NltY})phf2*XwT*pZDH(zxK4l
zS9Nx8+NGz%ZN7hdl*94AZ{7s2Px8sTY3VgBO>BQj+BwDb^N#$`F?OF+J@?Yhq&W%q
zRzF~s>?jUV`LOcEZjH*2)Z34AG~Gp4ZZ}X*ty?!~t$(ne+=D5T*sW5ZG&<Qy+d96Q
zZ0Ol1)7&0omBtxaE@1TZ(ZPFDx{FifJZzWFmN_dGJHx23?~LP|>5p1IdGFx7Y~Vbx
zzoE0Ob+d}i155UPu?rG=)lH;(IDKu}?05Rjf96{M>(Wj0x35z_e4G?_O#6$`wOcc1
zC%nB=vu>8&y{|nrip{~dkG<dWv16Ox^_VNyWpmGlXRj{bGjq-&cRBv;Jbd5WerUdA
z?N-@vV*R-<kKfFAZ62~BbJgj|+bovfkA7yDRxc^F_}%G%=)Y^<OV&K>Np|+zqO<7r
z@}A^1`qf|4y&aQd<9biB8S^z3_TICeqQ`Xj)!O3XQ;Ih(|Jt>u)ac6u2ls52*9K2H
zIyBt_d@P%~ioE?7eY99HktgNHB;%BMlVV$h&R8Cje<s5xTO>1E<+;F;@cGJp?FO6G
z#A?r2PP?zCz{@qEgi9>u*wXp3`D#9%?hiCydT=N3z2x$_?k@Uljk2)AcS+&BFM{JO
zUe9PY_2aT#b!y-8d+$z({(U7{cH5;mH-E<B4X<aMo;3H=&vWm5Gqv-+_j2)x*519l
z<bA;EvfHoE?o;A0-TlSvr`cjf=dEXs6c_B>Z}!c*cIs>96rH_`k6&NEooVOvQ`0t2
z{ZKz+E#I~1KNa6Bt?#Uv-5*_jD(t|egfDj+MWbsE%oi<tf6Huj^Vgd<_a1HXI~Hn}
z9rkX<wyWNj+?S`6U(b5>YwN83ElPc=$z1E(#jI{jdG_P2?%Dg#`?+56J<rdcTXep5
zwaI(M$5olLf4`e|ZEx0frR(pPIJdUj$VI5gM;wujOsP9pG&l5I`NcZzZ)<nWt36dd
zxAuF;x3>rDlKI*{ZunjDqVrnD#s_=eg!6ExM$WY8*t>D}k+*y9C4Tw!*K2v`ecn_4
z=NP{!%zpkwVsXEbOv_!v-_;*CPPDhmUjNkU+@!nzz2>}G^Y%^p8^2i&tTUdaTAzF0
zu<~z?=eByiU{$R`Tk}7Qe~Mr6RF%5@S9Q`xHqUg^$@%+kh(1q}HToWywK|UZ;JsfN
z;zsF`&2#_$sS#he{pRaMl`+;~cPsCCXKkEyhR<%|-pe=Yewk#Q_@}`Xmw&XN=Gda@
zdWDYp9}~<!**|+MYy9;|PRjSq69XUaX{x@rTVg@`dhN9G7iHJenVq{|20Z(h^0esp
z&I2plr@ns4|6#u6|ML~sj^zK%JkM``b+YKmx7ph-KDs9tmSn5F^{U9aj6c#57awe%
zQ2xkvPm)pXa?bD++s|(^w*BTg8}=tY&m!*bYxU#p))#VgKMM9;U&R-0Z(F_c?XS{c
z=jE&3OB6q@irAg^s&{jZfr+Kpzlv*B-*UgcyyV8zow-s)f9;|&-CfJCZ+YSwxiR>4
zcjDg6HxBDd53OY_JN4<sm%D!iK3tFAbUD8`<=EWF*!jXai{?D2h~zKczvt|)c)N3D
zYq(M|x<*EZM0Sk~4GhguXZ6Ekvt!H!YWMGVI?sA~lADxg0?)l(;a>t7vs-Q2Rpnbs
zGcJq#`dsaH>F&kS>zb{0qSfDjdAWbCOj|OQH{|yG!XGg=Gou!5zj{6D%*Vu^xrJ)#
z!b$6{8LK-_Y)V}-d;R<A+wEuQ3n;#3p1k}<&)Ln|=dMO-O*}Hg{L{~;Q(rc(m!JMR
zJjL$czK*DMDNZL`-_%9qoK>9p)BegozC$iWVjo+loOPD}Ep%f}ij1nVm_p<;C%0>o
zl0W(a*901@(_2&I7uLThw&t4*ORt%*_{24);?u3gW-ff|7|U@tX-eVcZ<|hS<nUT_
zRB5JW`GSgdYOIA~UN0n?GFCYr^0HA;jhX23!|?L~pGBo5F-0Y>pQs3XKM6m6lH<Th
zLEmRA&Fxo@7R(9!c3|NYO_zg0fe$qK4;np}J7f7+W@0b5o!=dfw#b<pA{{ZUFK6aB
zDi?AEM@qGQI=Zr^srOge(!d{=7hPiFV2#}sohuRXVe<dB##R%btqzQ0ii*-_mnSMD
zdhKzP__SbcFw2&%zCbnUE003YNvj!txz+p1MC9V6i$Y~SW{a-uIID8Y<@Jv(8R;uF
zlueMj8aMlH|FkO_Tl3GK%xFFz`8v$v@?vYZ$SZqIFR$Glz`b~PDEA8M-#cn1884ji
zZ|dR%+3n6MrxZWc2-wK8oM%7y|Gz@T=082>mv(bH$}pZd%JO~E`?#15?^-9n$Zpj3
zGu!RL>VBLhzB0T;vW!*4K1#uW#rE;d$mvczTMu3gb2?qk7AtvidE2J1nv<XK#7#(G
znVQQenYMwo@{N|BQ%QTa=2;7?eQw|49_pAZ;NwvEz3ZTVlX|zr<zqYn+vYUi|G_dV
zcjXiLRt}jt3m-49k(%b*Gy6_>iTBkF7Yb6pZCTNKK=N&e!P%SP50X|pvjj&dTJQA!
z#F4c`V3T|N^D8Ruo0_gp*?rK_=eU>kWi>fbH_xf1TMa@zVtv2PSv;$2J!g(ow(pmu
z?Vi^A|AvQWNc;RhmH)u%TKAWC8)E*a<mIi4e_vn2`1JPeyWM|_{tEGmoqfoh_u%a=
z{pa##Q_7!x*l@?<dB*RG>-Q7i#~e}#eXf+@)uW!wV{`9eXL8)Gn!~Rpj(@DPd~)f6
zX01;4qT3&ClpekQv)25}(ewk=*Y7LoSFPT!cKWcN<i3c<xpf|Yy=u~}^G@(fZ@e?>
z@bjntzSixuw^6#hK4ss}uXn{8%8%#Q@3EG>_y6bFJ3DqqubHL3BQ5F6=Hy#J>(=S*
z<o+LE9mQK&@UZs$WW5hJE4*(1GWEOjp!ACWt1BN2UUr`I%f3?)_4K;bla>5w?>@6u
zANm{N?V7#q+o8z+aoNwyY}Z$={kr0YykYgmjrF_AzRbVUH*ekCKT3aD7iMt$!BU18
z8dx9|B0k_%K!ys2#^&Z`_*V*9n46fPlqpNAIBSAKul=yi_rCP(X=<j}tZ6DsbQ8FA
zd7~Lm3g3!a!>7CDLc;|SE)nslC^kotQ;VDgqeTk?*nah{XyDKc5i?))s4wVy;h!nH
z&Mr!lw2xtXar*zB*;|$@s{6O^zxU^Rd!Ns#ez!Bd`uV*n42nk_q&U2tCNEEWdZ*~O
znZ4o%^_I40_aY)s+Z>GR{yTre{=9dWRNt-s7go|I@G#41#@El;;&*mz`ZS?aU~jX4
z@9m~jzoH%~SS|U>w*MHbxz7Ch56#ctsT?V4n!p(Ib<ZiSv+d=6PIZb$`q#KM?3npi
z;P>MV^TTQ%PfGpmwIs{w#zl_RyBDVAJq>)w%6zVr`_cS;8;c@!tDF8Z-4_4wIxeL0
z@t%L%pXjN!-0uEzJ3IT;beCHv#d4Of_;G7j<<`5u*B$w}<-OARPLAZxX$D7J6od|L
z+x0EdpmoZm?9PS)<&%fR63dKa9txZms&btdSo!Zz>toZ$Q|%{oa^^mlw=6F{R5~U3
zPpNLJ!QXBBa{ooGnQ*MuKPIZH!u8*I=2xHdmpXr+U%%w{y_VM|@(JNn^{O`BalYVF
z;q-&O;;PcQ7n5td&!nG;n&&BE;kI|j3K{YBiQztTJyIU2o&H%qMd(BJvBQl5jmgXF
zTx@$EhOeHbICE7<h<uC6QIm-++3SujXa2h2)k)u)6rPzLTqjJI%$PEf>xiEF-iZ>=
zxF-3pQ=5D;<VeV*T^=f*e(KliT|D`I@3BA6)UP}*7Wj2o>(7?M`}ON2I)93P)-O}3
zp8V@!M4tTjzf0xg`scH=3OF75EHyuE^R?x%t81Oj<G*_~O_?zvV(N`m<qy&){nOm5
zG)?-K_sUE5K0OX6q-~pwcJ8kae6ce0zkBV9&o*DPE$XBme%RezKQrX`uiZP&pIjF|
zW#POxLf>2po%B|QWIq0J=1`&Y#Pi?oD+`Cq|1>#PY*lxCq4lcsnOX8reS5dQZ<>CR
zt-|t?+9T<WcRN0>cmMHAqE0q;K9{nJ`0<?rWzySB|D2wfS99yC#{qBWv=#SGt+Y&&
zcT3&EyTm?HEZca-*>&ApBaP<t^laT}AvI5B@|1{Zg{*ZuX6dLDpRfsPpSOqQwrPss
zuTs@}fp%Q$`yKD9wDYJQ{xG{&Fu?zrQkmebkfnu_6@UCyje8fD7c8*3;m^c73yz)n
z?=-u80_%@ZLv!U&(MPK^QdPG-Q!;f^=?h)CO7N9hvw+o%Q!{#v(vn|$Em`IGhu7j>
z@5PwrFI&IV6&_pI;kjVP{b$pg*J>JCa=rZFljG~eS3HsZq5lW<6KiKAu9;DNr8?eU
zvRU$(+L>jqb}PPH=kwpT?%VJ68Fi|;Pb4?Z-}1$At-AKdecSopNK`i3xJC0%^4)*%
zo5oGy>|C|zld}80%4H+FYswz|5nnIhG<~xD!!zzD7SCA6-XNd8*RJhv$DUuO*E|TW
zn0lvqW2Nmq{>OhEdnSsWDSslD#ua&_=D<3&>=SG;kNF}zX1$0mRX?M$Sx{<k+TyiY
zb;q_$G@0Cfaki9R(~qkY|0_OfZPxh7_GB~b+x-%{nz1kBfA;Y_|L5oOG`6Uxbm5!d
zCxlB*yqdRhmeV(@X99;y?sw1KeKG%?+SG>pk~HD!qxl)1|Nj;WE554ISsnXz;R)+g
zT2uZiZ@DmeMepSBiVi*|7KQ0k4*s}N%yi;Kz1|L{`Fb^ds&x|0A1?lIv6<-DTzSOx
zy>-Ln?+(@GZ{D+A;^mxtzWKLJO}x#CudxReuJ7m8E1s+QVW-xvuu{XI>rd8oty{M4
zZ*0w@>G5C6YyU+ydjH~ia-J(^oxe4AjJmadX<6x}qUH4|j_HkuzrKCh@!I6i<~Oqs
ztv|i*lw#`tU(fAN{|odu_0LafUBAA4Xc^ayP5yf6r~l3F-Ld3*%zMKxYWtrRh|Q^y
zo0lp7ylS1r;jdpdH^)p|98k05o5x|nZyrlx`(zBCi|pWI=kvFVDXzHsP`3TM{51<c
z$G45sm>)8)DPfp3zu|+hf-Pgz%Y+9B*B!Sh-jPW=^zd7O`;4=<<ZqncVk1{KC$oOf
z<3hFU`~>~Yj}0^KU*6b0bI<Nf^T}Ixr+wdecjDbU+t@G19OZX9D`v5cb(_eNUGHw^
zlzYyOogJQ=cT48xne_X*_s^Q#;oa)X>+4f2yv4lK#BlQaH9Iyx^ZWL#sU!P%?uNX#
z2G=M3YFI5N9<(`QPIllyj&LK3Q@?)7rb^jIt39~n+_Cz=`>m{pV~o>9cBMVIJSkc4
zaiDDsUvwEWcU758wP`hTHTUWAsV+}e3AyP>8t!t?^OO%M*d(DQx+Avr#80n3@AMmj
zSz}p!oEdKvtw~jzaH_{LNRm0Do=I=I$ONq=p+f4Ok(W|dx@ArB^Az+Hc8HjM<eI<?
zp9KfmY-AdrUAwf>F8-pRZ;Dr7M6iu9&+Ef4gRkz6F;do9!#{bGr19x!#&=xzwq**>
zoDd~)Zf2Am-%_^jL!mb&Ty<kM>sh(h%t$d}|B||s^0lv6FS12G`h4~JYTbb9eHwk+
z4utn-s2p~@5&6kI=>6(=rmtVUJ^l){TtA$XbN%|{s+Q}_d<XOQ>L1%bUD(e+TJmA8
zXGzzuDw**9npeq}Ez^X5o|<Xg^fYy8@bW1l{{Ie}hKt9VuKaMS>Cc`{;dK+3AM4AT
zm-)PV5vc6WbNZZ=l}ho213h0%-ZS){o|t0f#b#*excn-I&GAKT#&;7rp0?jC*}ihY
zrP=S->c~ubd9FO<Iq%6c-^E%Ez4I!UkdJBGzF|Vso~eoDCuGkZa8H%75Sccy=-+vb
zry5J|cyqdaw=nXXd+0Ua{)x{^<r^hcpWe7P<vH)J!^<;&OWk^Al;M7)?|8HFijY-X
z-u)>!c<I%#KLV#X)^>6V?-hQo%5zd>EobKqpCe%v6Si*?W$R_XmB9A2KQpx>EM)o7
zsn5d0TD!eZef~U0&cDFp*^&(hXS)ZcD5Xk@skNC-Sz5hN%j)Kfyt6s^dp>teJj40(
zOX$5-r?O8U&Qa~lP5CS>{hniX&BKHD_I!9Sdw;=$Q_owC_g4N)oPTO-hSz%=SMlX7
z2J`d2?2_KEzC-5y%luzXTFC;6cYoLh&Q45Al2|sS`pW6ZhFy6%Iv;j%OF8#Wbywmo
z-jla@`^M1q`D@O)mrj0Elv1ej>4{tVjtA!wZb+<ZGdy_X61Rbtj;itL!#-7wyLSmG
z{t*9A<z{WWEN<4LcxT}dZY_4xHy>AI-KcO;E8ZZSr=i*Z?QTlnXNHNJYYrL+2N}M#
zmB^mimHFb}7e4;Vi`!g-PfH(;`L#qV-!7{rIdw-OU)A-}Rf&6!ZFtDO{P^t*vz^78
zo2Fdr4f0(T^@8uS<O&rTQG15JZnBp$mdGCFzj!=T-ETso(eC2CC)}P+et(w#=)Y-U
z))N|~MPpxdTRCZ_HQD5xm}=PlbbDI*!;Vt|QaOoorfC+Zqow^m`Mub>PAlx+p_NZd
zJaQr|GLt0AUu_J0xTZ!b{N0~p0dXNLQ>SlV@<-#NyH#}i?z!xLBShFgha}y0`Le0!
z&#_OdekO*Wh-@u1zaRVVOUqM*D2o@m^Tk8%@|l|Kxa>Js*`s#pmOUA(wy#KW`}tyq
zLSW0OKJWAYC8hIs8lPg5HCW?&eb;G8b!itTWu`l<H$M1Dw13~A7TX=KtTo3e^!QwZ
zbFqAWmz;Z(TG)4ge*X06iz{{V%l5qT6Xveo!I>rg(?_SIs%hQ}Q<ce%Tdd6t`!1~V
z(tYEYILoO;>~C?BBJ0XsB`f^i>wQ1>nQiaYcN=12)~)jm<-c0wBN_7CFna2@mq#_Z
zue#WDynDIg6p#3iIUi5)#(VzWd-u@%h3a1y-0h9Zzj(!M>Q3`x6Bay~e8{CVv2x2v
z&*Da@3vp-m<|((C@V+<}78%yOv#RIR5$lz!E?RG`ky(1IrfBu&v{F4|&89_{IGv3Q
zxB6y$J@ji*nB+D8xq)?8JUXgtGk-F&&FEandGMOak*V^f!b-s!i=9Fj+$vlxJN2^2
zn+(mppI7*JFdr=varj-_&#=%Sm!Idv9c3Spv>v}Hj_Q*N`^~>|h#x&+dqw8ZmcR(+
z){CpBuF#q+ToI}LI>lDm=v-KUBfG#S6#*TQla96>4|0wM25bD_Fxm1dC4I^wM$IQH
zT3(;l^NTfG{Aa<u2L@HgJJnW*@9JJG)!Vy%YEAjnR~p+{R!ciOn%?N(^=+;fV0&L-
zGP&G_OX<a7&U=-@hdS?ZYS}C}#2mO|!6DNZixQY+?lo-F=WBk&XnfjMV$!=O;d6VB
zU0$`cX-0@tZ#<u4Z|Fp`cV2nco+`pmdG4ri>uA3!H$LFy!+Q7yYg>WM_lkh;``Fwv
z-(4_}`lrD8BmL~B-#$m)ub*MNI%(QTo}Fs@?>~{B#;dmer17ETPqS^p(vI)-vV5~k
zyv$H>smQl`%+*sbRbL5Svb^B=MxpX@op<NhPl<&se#o)>a?zh{Gi4^^8apa6$`_{m
z3^{iq>S>PT!|j`X)V^vJk4so@U-{`!t-AlJJq4@J`ybr9xJ}}uPvsI0m-8E#)~o6q
z&oo_gb4j{ZQ`O#S>6S@+vlrgrNk4NTuPx2E<;;ZKHXY+Li>^O9sxm?N!}Cj(Z!Plq
z)}5L&ySx7ICfhabUz_6Dr5u~j_B6&^t}k8x=d`!a1Lw4q?>@2n6LWjMxOj2p1h4vW
z^?6v~f4`Z#+$7oZpZXs!eRt^Yn{bV#9yv?wS57;w617Ea`DEjaw>$nniI{GEaZ--w
zI*zDII%iBLvEKM7_TWS26J4>s=VwJ7V;VLqotU{Yi%CQ2WNAv^-*BC87xR2x-8kOk
z<Sg|3>kON`;7JQ!@*CJVzkS-Xg-4#})#Tq^KfmRCFKjz7(^|SrHfZ17ZIPC2;W>Nv
zyZr3l#QW;|_5;h7-+vUYU7dG#@~je(b63v>s$D5}|F^gx`-<_AS0~K>6xDIRbWc0I
zbIrxP%dbkzlxM%0@<w4(+3urG1+Fp|rM7O`7IAw`?!~eU>#b3?-SK-H$~NwpYrq}e
zvd=}lc%$fciJ0RINk&5ZZzqXcyl*f`YnfP4WU)I<V?*EiDclNwnc41c&)xMY>3H?2
z)%*u}4ITzdR6P|wZ1?1x=6j9jQ}Uj=di+1UFg-f)(((W%?Zf^3(T?E@Zn-+{R+$uJ
z*?#?{2h(cBIdM<6iXK#xoA)Hqa(`pGirf~R^N){BSk1xYT_Wn3RMs8x#qOAc`lqj6
zb~@3#%>Hk7cS{#NlrwvNX!e`Ge_rLD+QBaJdfN<^^2IDxMnQh8^TfCG98goa)Y))7
zBcNGon)z<wt0$|{y5=)$)bm(t>m-~N-)DR5U-$1#zu$a&Ctf&fo@v@c9-G3GY+}|;
zY|{^{^6u!8?=}jJ34IWId`9?>tIy{ezGPuLu69^t=V`}Dy^9oYPI#0eEW;_}mM@fT
zV7~moE|C?!jjKdXcsD-OeBjr(M8sk?ldNWe4Qp9Yld7hGS7VS@f)Aq}t5}WW#azt|
zGZ%Hg{9)2{_~84XE4wWA1uhHhlYTit!+ME)>z=s&$D*lIZXABGQqIlQw&&dP1LC%O
zq=Fh&F8zF3Q((cT$s!ld^rf|SMdWqFm2T8-+appaDBP~XxZD20@<7kfUfq!G$4zQP
zZXM!|d=MnOT>e*ctxXTp{?7T!q|E|u*6hAya_ewW^|`~oo-dokHv7N*{^nlpS@!3#
z6K;O9?arFFGvL(veG^Q-_5Ev>+q5ZvZgi{UMxn)@n0vgKJ=Ja&Z3@*E)Gb`5lEmlv
z#De{qfH!mOlQrG%ZqH6r?Ck6|*nG@m&&3<375}jBp88n6X2qT#99Is8${*O_9v|?p
z_?h)nbq||bv5IF;`=@SWPTcWKcm~sUyJa>%IWrW3f3E(t_tWA}T0D*?t2QMt8BQ$R
z6z!Dnufk*)t@5SJ(<$Hl$ez1QKg_cW<NMF=oxz_G=RE)XLlZaC={pUl&V3*9OeWdO
zkNNsKnZUf}tqPmv_kHsH?*8YMzhA@3-?3kwM^4P&F+afE`9$m{Z~vg+J!Xy1b2TLt
zoxi$uuPvBlw<3N;x~gqOzstE3J2|vI_<cV2ebXZGFLUml{`UI$I*w~goD4*}f($1F
zIUZmB?Qz5zPDRFL923+V4n)21E}FLL-V1M`+poNeuBtp-Ja<QmZ}^kK&5M6E+owP2
zc<4N{(fOv};mbCbW~L`A**UJMeChpX^L=tr(9d1Ft4{@%A3wNwJ=?2o?Q*qyvREP-
zEf@coU-q^-;d|pB)AR8gt6ny(t}VFD{r>QuFQMjZqbAztmbB^qT{Y9(zwXhGy=&*)
zs@!TCKBf1or{4V;m#nUw4%p%|`<Ga2&QIq9THc3WZFx02zV>}XWh-A-i9r~9rj^C|
z=If;&rtryKIp`+4<`QG@!se4!C$2RvX=YtsQV_(RC~NXkq08Zgq^!%s-=!Ov_s&?9
zBD<sHf@>hhnuP}#Sd(~7xddgcGOjr?9~FJA(5+;pF+I(yRHU!xj+MbHg+<MNvND$)
zxm7s=LmH9-InKWER{ngQxvJ1+(Ich4Un(Duzqua$DPm?{`c#p3W$#XZ_Ir0a*W`2F
zaSxfEsI3z`i<V_vDR4VCE9F7tq=u8%A8m8mEqW*{W1~~EXx3VRO->s{1@38kn>hR}
zWIV<1aayABtjg?Hnpw^dX7tO2PublkYyazw_4#{i|IFC(>5TH5SKPkm_ZEFh-S+Cx
z-TuE@#Flo}tkQWRHFf<)sg0+s+SDVmY$aKm7P3w;?(Qxq+&#55`&z;pzvCV&pZ{~%
zqavN0I;;9aLhrGx32mF!r1(B7YFldM)wI&g+%S`EI*X~4FsFor(G`gqF4qzaW(svT
z&oI5z)qLdIJZ4?X3v-xxEdypVFO<<Z*O<fA<2~V`i%9iP{bR3GwWWSG?wcqS>UgZ<
zp)#`p)9aaxO=$}x4<GRoH9DnL!JfkNMJgfTT*VF<6SiZsQkviLzy0&0{r}3?Z8d?9
zE;}dQ?KIbyp8e+J?oC<C6rRY>GyLB8pnLT-u@IHFcQ>u}eQ<2u>e)tzSu*{%YB_g%
zn`}MSx^=tGj^tzWm$O-_?OwISW=YuG*8$cm|8OR`|2;Fw{p)jHwu_BdW23C{lh%Z#
z7hcE`@obv<QG{JG+CrA;<%KzXTVFo#YWJ`zS>{}A<zl;Po!+ir5$jI(o-SEcEou9z
zYeU0lx3g<M_8(VCOndog?G@e~Axra)I@jIdcq@F%Lht#*+4n2n%}g!-lll19+Sk*K
znRmr=pDSJKC9!k1^;(wo8tdmRtXW+1?dQk7^Y^O%hJFd!V)9!mYM1;>{pZ4lCzUcx
z!~|DcEZ%GJ!YgX(l_@1!o!(wXQg@x!UQj6En9JnPvRGiz-7=Ppk~In{DjSX-WSPA>
z-*vmigY&N@zr7N(oXhr9-hm^>X7r@h=TBok{fl?u)k})m&+nbzeXhjrQ{bsU=eviN
zv85e|w_cThA-`HEeu|huWA^*MZIah*S4SmDdEIZFwqYV;$}Ah5sS-SsVkfkitiPG1
zpd#q)TV5E@9hzA<VXNtK$6To$mlP(k&CR?pPi(VejBLA}_>1hOFEbWonSJ9A$xZmq
z*D821)n)DkS;wh-<!f)uJZzAAC@$>s-|ea8Ik`D|>i>SKy=tt$DRbt8Wbw4a#m{wj
zyGu=vVcR-izW)0a_Vs(Dx=q=m4)mDq@YLHdgT>e0=}T2_XQ>)DQ{wN5j~3kEzc2Xr
z*GrX?VKP&CtU|V(yY{f9Onr;R!ke7Dek)3yPH-OeJ5hG%(fs3K{=HJ!v7eZ_q*_0^
z<UTTsWDlC1q42WtZ2g@p`PEA|y*~5v^Z%y)`BP@SkJugBAV0q*x9s5rj)R+2CBIzQ
z|J&02adQ2={i3y6qQ1s`^}P|ElVfzF92nX*7KNS5^PX_QlOvIJ%6%n|gwmUv-%Nfh
z`YrUE+HbzyZkyOQ?7UxhrtZ|e)ayNK4~PjL-6%Ba-SUEadj6}mwwz`*=Dzzar~1UX
zQ)Oq=?C!5QI%C6C8Q<^AH{McXzs#w-t7y^7Th@pAIgiXw-o9#^H|u^;v2A}}`e*0q
zOW7TnZU0a9+lN-qHxIX(=f^F+zd(1#wxYGK(odCa>(*Avs|kNMVYN-!+;c}aHv6bu
zJDQfh)Z<%=igQp^p~1<F52Eb6W{VEW9B4e?sN_DU$ZW^vhbd+)n-|X9=4mD}|5V$z
zoEdX?b#Gpnz#Dl}A@%UgoEHmt-@dsaz5M%I7xNXH7l*p;+5gQ@^{!qjPlz|?_RJP@
zyAyv7yZ3$K4x2ntR!&>%!n4PXp4qj#{|i484OKAu$85g$&z05rSKiB=%Gi0lrMWeG
zx!JmyTOr%l1<c&HQE}%AmSgW$$8kgztTKIl=+wX2rpn)ADgq_X?%VS2#-4C1`^tT<
zG`IYZ&3Kq^s3ohS(!X_<PukjJYhF&zaoiA@oa)-+n(S`0Mcvs#^4x`L6-lef3&n3t
z;@Nug!f(HIuTM_ep~(MbE$h{ij-U6lyL7$QF1gC9A7l4yVf5jCyTZs7IajZ3GrRL;
z&Az;M|IBx9o>5zBdie17()ADhHMI5PCpTQX+n|5qbYR?9m!yd?Cm!wVe!Rsh?$H{D
zWsjnbq9PL(Uh>PHExSc)<=d5^=Z-8~wDfpN*Q~Fbl@^;{JuW-*LNo7_3g_r(>G$_`
ze~(c&fA+__rE||ppZ=36D)y(Ug(Lb7KS?w4p2T*4om6_rhRJCbX>Dr80>)QLW-e?l
zV@m6ok+@&7>_Q%|?e_^U3)^q=EI%V)=FDx&JkzIh^NSy+3w^|0=PwcI$TM(WD;bkE
zLF2f_D~VKD*`5P_R@v1uh4+J2y}PtGaK2KvQ*xx%HrcnGVWE~8-tQY4mEUZ+bMwNs
zoSUYWk3YM+mzpMLFV0&W5OLOa&9&&>h@+cUY;@V=8Mol7W0PK^a%ZR1l{qWQI~gYi
z=8Gn|JW(;c6{GrT=OUZZs6>w(f#@S;8=ar{cRC!&n6JDj@Qm0~iA}*a%CAg#GK4JF
zwunv<6_(kzo%vhLjI~xP`QMjbOqrY<)Ze#A?-e6&sn^#Z9cRPdMP9$gCck&FPm}q>
z+tO1*|8#Os?OBmhdS5=4xo3G;yw>ywd2^2&^+ufkaN*=(O}Sf@9G^Edx3kG*RTg|y
z_*kIEclD67G5_Ssjth#iBKN%iWVb2W>PgzNWaK>dpCjFOY0dG!`^E43G$$^VDdbT*
zviS3kvr%)SpYPUT-5lF>F;OW@d-*l%s*{VAuW8AP{8U)*FNZ@<-ox(4E%xO#AExlj
zeLm<WzvhEuFaNQioJIWWoWHyl**kwx_|Jr8{A#}sylnn#_hM0VHK*Bc1*^t>p#nRL
z*UkM^5sz=Yt~HpmsNm6^&m!&0(JxM@PALuS&*&^^dhx}2WlZ3j$wqHKFkbd^H~X!y
z#7jlpX&e8h8i$&TckUG16r2=v_@j8>(T>;C&v8`UyJ%_Ic(3N&?g}4)QnjkE34-2v
zhuMGqHZF@|y_Lkz)%PgUXzjLt-<l7}B}BBzZhqP(>Z|$Yg=}~Ez4C;px0kk_*=MiM
zy}cwqX}Mjo<oub|JL`_kd7aNATM>J<t3Gbq|NXnnUDuwg>kji*oBGLOv)^=wNy3xn
zYNknV{CL!HcHk7Dj;+e9w}KVsFr~k+oy0Wv%Mo?1oQ3XAwU^7!bNybD?;mrkFJ5w;
z%J)s#&Dq<w9GzzLZ{bsRxwLPw`%8W_z7>DEe(u$(Mb_F;>_sazikZTiT8ox-&eORe
z8^x^qZ_<@V>yI*?<;y5=Sj+6hr%~pxjoHt#YV#NUIc*X%Bc6WWT=o0T_T~GV_3MAV
zntA)*p4!XDe*fDv@#XrOg*B{l28K7irhm$O{#yP2@4x(3FX9|$_Dzayu5L9cUH?Eq
zZ*tgb<y}1`!s)4ts#*6gDK%n#5vRz^Bza*D<5tNBry4xiO41yvkNw{k-qHD5(%?e&
z>(Hg9d8-o|x~p4@FITMdy}`a?|M`3SeodV|Z|Bc#ucxzL%imkux+}6G>bJP+3GV*B
zCv9i9hnx*MrzKzi?N3i~^!@6$M_1$<ybe4s_GoHv6YKpKzf@MMs7P39&f@v|ad+r`
ztqxr+_bs<J&3Y&FEyFVY?Vl}QwiIslzWK4V*Zq?DIsXcahdZ~u6Si>H3;WW(;nJ?2
zkdu#14l!hR1*;s%6t#$7X3<b;5i_G)#?6c|zG=<b8JsG@=Te-~)wZNNX$VUhDNf+z
zc&|I3Bdj^=<%QLo_qFQZHU1{EHznm@KwDUQZGT+Ew+r@GalcP&K78-ahQ!dHbFT8Q
z+xu$nb-sO*KZ$3)T{L@o%K3f!GoLPTd~o~MiQL&Sm%Y8bpFVoR*t{+>WY5ooQ>7<!
zt>3vS<He5CmoDq=Ulmjvyi+M%;!FG_mrqYk3^g?aCzZ&gZ<eaib&FWEgxPlW<gG%<
zVYQ16UMs#d=MS6MhkrMII=|6fsi!1+-N0#Ww)@Yub*s&HNv7#>pZoQC?Q1uk>ZhXh
z3cCKUf6S|_ep`Nj?OVOy&IcBF6-sCQ%Fg3CR@rKEZOv6Rr~OCjH2sYI58qLXDs}l~
z-)8yoyY3|)&%nRO%KXnbM9oO+F*rEGd+{9U%eJk@EQ9MGe(Wwhx2M%8+ra!Ud#YbU
z_S5+d9J^SiCAP4b1+Gt4VO+!iDsiryQ<cV5=Xr8>syr?+FK_p=-Lty=aaG2}!_W4a
z1U9h-mWYV=|L8chxF$Vl$7cN+<yP^)ibZm6ijThT>p1XUc**o<K}^dc_#>+fUNyc*
zW{(uz|FP-k*UvBG?dE-B|5KQ_a9OJU4_&ir2A#UJd)23W-`?+9yqCG&qHWnk_Psxj
zFFo^_Md|LV`hr#emva7o!XYr(?SePkwMCaVtC@MO_B>>}G>p45N;`YQhA7T2YGKK-
zS<OGqG`2c6^19tpILl@xU0`OB-Ms(ygB`qlHxicfZoBc|o7^cIubC;#2idsmHylhi
ztGLdlfAfLB(&+SN)hQ<P7GF60Q6P2u%uUf5&6&1uCOqJl-s8XAWtMx+hJ$RSW(@*!
zjeEIFf9cG9W%2Z)!*k6|j%KclZJukca!u{G-Xm(ZV$q%U+Xph6jZ2!Y=U#AI9`C-9
zQQE*wT0mM?QLFB&#isW=AFqku^6~uHpPTPXzsWjiSv~Ef{ppl#&o9XvSJzj5*u>0x
zz%AG2)T8Yy%=1o1pI`FQsIN>TU)Excr*ZAKJI2nj`7JZw-E}T2Phb4dwy!pPqw((F
zw!0_#9`o3etWwu{&A%mRXRAuq#v5$C2aI)2XP0#DSrM;(C`j<rk)VxUDehep)S7%*
z{95E)emJ#!ceT(vq$pUns$_e2w97Ux7RN1}l1YsgzE8wCzN}1&6S^<{UoUHWmwXqG
zkZ<r!CmV0kzs)D)!X;BWlnWKiv-bVysOUIU8ThyV_{8b{8fQck8#oqB`+6o)@JiMG
z;FQ}g83MC69!a0xAt1Ccge9`&q0$c51<vd<WLtHW3|{R>+87qHlJyQN!}3JQwF-jA
zyqZs}X%krS{O>-C?$9e&f4)jPe)IXIUl&r|@$Q><$b&s@?ylqjb6&Zp$xjzQPuyDc
z{d5Au-_@t?7FgS?ePTGX$8jbnfBF2kb<t;E>@k|Vx53Qu`ebFXwJApGFG|*=Jd-I~
z>$>TNuT^J4p8C0JnGMGny=4NPHfZwAc+l{W&!f2ECSwotIjaZU?0eoeJeR%jtYI!+
z$MX4N?)i(qa4%xM*Dt-{sX6<R%g#3#BWC569oRMR(wZxbm0PTetrbo(&S5@hvEVY}
zd#eXK9+zM2e^qkAIO)sYBYUUpP1$SnICI7$xws>YZ*5(bnKsKLEIDv5UMS>p=pUzq
zZ@j1Hr@rj+KK#Sn{mUN?f!~aVdfw3m@jdy!7!nkcpY}NNusypGYANYle%4Sk`1bw>
zyy2T!m(LG%5}uwDv`qb;;-43f-AdjY^89hBnV4z&|I*{#%m3M%bnaZug}IQ;(2&@b
zY{mwt3)x}=ee;_Qc=kRIe{w@sOTl5cC&$Xg&W|?m_r7DlaHM3STZdECgZuh7woDe|
zT=V_S_ne!7S7oFZ+xFYt(TM)A=enEajAN@OA2?rdu}!r}Tla6*2mSVQn>0FZSAH@u
zStcJDSRhy1_U6gmGv@53^By+3zV~aq{Opi<e5iCm_116fYzFg=uhH4rWM{Fr?Au!D
z26v6yW?E0S&fo6%IlOR|Zb75P!ptWt5;o0cxjvJ7gA(8RC$6$_Cl9Qs|Jf7rJg%;s
zZ}sH>+2cxjf_nw3+@{C~E@gL_=`dyT`&S$G|Gay<yGN%?ViU{OM-xw|D-|ncZ|d({
zGr?!2w^wqi>BN+2lf36$vbo8to9N#AY0e3*!*OdiUn)6mHEr|Va^3akT1_A47VbZt
zwzcv1&$k)kS3mt_vMHb2fi?7vuq^>HFf}#8zXZt4!q5_B3D8zc?uwABGoSD8uk?R+
zM#XsMNrqx|XCWscwI_}hEJd6r1$bs{PwSY$$tfXnB7><ZR5&VHv7>W!?%iJv?>4PH
z>Yn$(v8E~e>N_shRokMr9&y?lyY+$C8rGup|HWxSP5s|)zrVeAa`k!3|G)n{|Np<3
zPe7qXWuo1o$sWGW8y;M%<-aDt^j*>9(!PDVr|i}5v^>@?sLB7mOs=x_^;df}m+S5v
z+jD)t-1@qGt8K3Ok*+Vl)Ys>}?h~>$RDP@e<JP*^?Wt$CoagvtHC=epAJ6Bnw(%XF
zRlGVdj^$&qmEeRY&-x$i5IKI?`ug{rchW}b0*AsCb#|>=y*Ky4L)o7zUi%%f-@Pxl
zIOK(3_lA1L_)6#7zpgUAcG6co#Bu4r(ygm^og4nJ{SMZ!t9tw@<9~l^I)|augfxLf
z8$*G<865K6iSy6>&03+}`y^|T^NwDYjqX2A|8!GQn5O=wGc)Abb(<6G<CnxbPMh&9
z?u148=9S%^bN<AfRhaX9{`0$Wp;xu0&QrBxcK(wTnXZ;wf7@d9XQyMWhQZSpGG(qT
z{n#$<w)1(XN5>E4jRK*Sk9F&N);yG9n`r!+wa?D!PG9%=H%EEBJ9_q?eIy+vEAmSG
z;69E<=Hvgn>}KfaEuE*h^NQ9pey1Rzc^(HXAMcu^`GS33#DpxnNGGQ!MM2$iS<0(V
zT3vEAn{@Vt^`rw~lb$YVGf~(!H^$p+g0!Yt*`wKY@gL9szq|SWY5NHu#2e*(f7u>?
zd+5Goo#1a<tu3{W-?tPvoe|%!U8hyIDtc*f>GH34f4P5s|MI@^6REs}{p!8Oz1h9L
zZ@w=+!{Nl&Kl{+x-}SSfs`782zUNba#*ym<Z!@Y@Z|coaxV^ux?NDj;V~e{smpn4n
zf2>#G7MTBQ|5o1@YD+t}iT#w$oPFVH$9slf9QSW%9E!AW+|7LdR-xpd5SjAHT631O
zKioSxMLlt`x^~7BF1I)KUjJ+_vK#fj@%&<WW;wf1?L3i*=Un0yj5{X1=vL`XQTt`(
zR9s{k=^d81<$g!t-R8e~J=szZr9I3~DnGS|bGJG%wP%ra<8)8PnXYM%XaC>&=Ti$y
zrPHA#nXSrq+1><aNiIG8zoB~0_xHh}3QwYHG@qp{dF2qpzBuP_oOO`<NwFtOL*{Z!
zy(s@yW%gMm`SgkXdzX4#ZT(StLO*#~xnk{<TK%j!ty4=*+D{2<*!!zRG_=QBy!!Nn
z{d3O<@prbLSQ{~QP73$$I4Rf98?_$(6JGrL<dXGep>Njzzuobt+JBE)GrQ4=$WI?t
zj(9)e{iG9ka^lG=6IgFHaPspNO`4;)zHV~Jl;_P)*Hq7uIUG~_O8SMxCu0fa{ATvo
z@9W+KMo3nQhb^+L7Th-RO$+x1);}rcHzdl;PWdrC%udm5k&twK{Z4i9leiTMi<8XX
zJ07`xNBaq5op$+*J$+{J2j99x@O<CuJb{0zgt7mm`38)4SL8oW>D=M^hkIv#&2y!Y
znwaXiJ66;D-)ua8^750)XN_@}{FQ`#mOngi;9+iOaNx(ReHMLJ!arUw^e?`1!`5Pl
z^x|iEm)PDbeD$mQIDOgX@Yk2D996zAVBOztwMJNf(($e7TWp_586DZ{I_pIJ<oO(a
zvhTG2upc=#S9#JmwLjr+^ix`H-t#+?9P}n9W=5X&ywo{|tWDaI)@ST}asBX<{TF}d
zH>MW~SIH#EWj48XZj%w|v6I(1cCGy_QxL~zK|As2Ba>}^*f#64v!*DXaXXXqgePfs
zfAXzoPG-Muw8uBhw3yfUK__Rk&|@E&^n}?HCD;|_UR<tz%(CP7v@1<=ucIc^wmSr5
zJ$xY{Bf)%IqGT@nQ=2byWo>r<*7<+)Wm5cyFY7MHyw>EHmt}jkH0%1x$IFlK`=8op
zKPTqJ@4p$--Zx)t-S?RLe*Uk!PWw9d%>Qrq-R^_$!J}z2+BW6>nEEmF!_#E;#92m%
zja9$M$$w5X_!p+}b??vBn)}S_roQZ!k+v!OAmJ}9YQ6aQk%>1Bs`phcYuix!X~s#Z
zNp1(0dK5f3>NEdS%(q7qKE+gpWw8A5dUlz+Hic!ETSChIlkE@vzMcL(_3y;L7yl~d
zvVE7oTVJ)`f@xFITBl3PmHZ2L&$s#|(mrYGlI3SS6*J>s+vUyB{~Ea2b_vgYshUco
zhIog^wx@c|yqS=%VG&}sCEH^{n}eiRQR|81i^qS-J$9G3{wwkQ#J!~Z_ip*zH-6B%
zrFQ9+MO&6T{*v_IKm2(D*U25#KM$<D6Bhld`o%w;`^`V>8uIV|Er`!(%fHUwUU%ZJ
zLdcuR@4ntWeb;!#%iqg)8`oZH$X}4XpgbYLVbAMxKJjP$CcI;L>bF^B(zf5*itqnk
z_aytT-kG0eFK*vayRujCzFf`jk7xUu%(-er&bd6ievyB1OuERSK#9(|VR^lataqD*
z{4{Ifs>yti_eo^e>@Te~lliVM<*ig+%epTtXMJSg)7@56o@PgGyt!S;<o5KK2N^<z
zrL9c+R(7*@>Oao@p|@|kc(k-b=8TEbK~l?>K32aeyKCRS=&#nVe(sv3xi6~5*7T-n
z%#O5NUUMcsRsAUsqs2b`SmjiAKWNSuam5lQpOv4?c5=<}Et@bahc!r;U1FokGik=(
zcIySDXHRNh>#sFAYH6J3ge40?luJFCFBNL4y*w>Y=Jn{~i5TYv=UJ6{4m>DGcJ}l8
z+F=(KzxGvjbjXcmX(kue2W@w{nj7qSxog_9#D?96?%&-xd5ZtqFm9<?S5I6i*m|o`
zwzcfnhUD2+Z<L-sd^hjiyk5&r_FdH(&KZ|`=f2D9V&a;mkoe%;yLt2EgwkK0d#Psm
zrJLh`{>!u$59iL6YmUfY&|mm!hE&3oyS7t04D*-7n=J~^>abn0u-eIXvP}M+Q<tWR
zK76+9Slh2%w`OVU#mCj&+_Y-x!c$$UPrIjXa^?0GKQrHady3!3H-8?i*>guSa;D^E
z=9hKzt&iqSp0p-y^2G($JDK{kCuy%1SQ+w@=cZg1cL>|9RiBEUdX=ZU#^t3dhy*@0
zi#+Off+r-@Fkpf2ROL;xI(=`e6u5*=C^|b`W2(hcBS%iNol~X+i84mo#t2SuPjHPj
z{#G8wa%j^p&DZhgemSX0M-)EzuFSr<D@r?j)k5F7f2Rs=5o|lVOtn#UwQuFllp86>
z)=5^s>TxKMNYi1u_Ao8vocyuP^QNvkIx}or>58qTXQztB?YEyZeYcM<SBQZ~<RO<;
zyHr=r>zr~u=<-RvN{jCa|K}>3ZPJ>Q{F0+&OYGe>pK?#!J|F(=`~8Ktim$wJXq{F2
z+0*=7&DL4cD^`RQuekE+7pLu2@z5r%v>QxC|0A0&bJqMixpcxd7MJOX*4`Q6Pp7R2
zxc_E(zj@2PcklUGp9It|*`KR&ZM&AjZKl76#k}jc2;KF~njLt=<IPRkIMb5!h^?+x
ziFFo+Uj@|siekMtpGs|ca?Y&fL44!^`|Gc_`(L<HCp>FG^aaZoiTmg8N<7}-6!y^J
z?dt5l{?tvUcK2%>oU`GStH!ej7RPS77}h?X!THUD;rg~+3wOqg8ci!@(YpB9`qq_w
zp%SszFSc^txU%H@-hU4J59^)hZj)Tw6Q=t*LVI<Rnic0xHRFUk0n_TFrB!O>^6kDZ
zYVz6bJWt%gFvKr)fzhNNx39c(V>GhbrKK*%(Ho(owPEoV)vz^17WN%7xq>kpwu<H$
zUQJL~wT$JIdf4<?p*y!NoViiu#+50mTAf?ecU(zOJ*~ZSlYNk&dE+UrE2j3&T*-?M
zyKeg4>9nV#MO$NIHm6%;T55gqajC1~AIlQ%t(`b~Z(S(oW3G1Qv#SrAyt&|fb8(!Q
zOrn@}@7b%ojjwizo-Gz`J0ch|XF}jlRl~-#{1_g#1z9HyPMn=HonLt&FJlphb>pX?
zKdXN6Fz+)fv1q<@Kv#e2?1SFNR$k;Wjb0ZMJ8{9{$KEYnCUd5R^m<>usCX(e=;_z?
zzaQ20r#o+%6VM~W_Ug*2LXl~jE)oWI(?bjezAl~3w)c)qge(WURch$U+-1d!-zCQR
zSv3E&*%;P4y+_TG`>xBM4mtUN^i>OG-lsfY<$bdvDdMZ@mraFR?&NcXn12a=*x2IG
zG)+I4lQ%JS-QCk!jJ_`zuEpJK(>yD*t3Dzy!>Ih+awn$KR==2e&o1qedVO;3z1b`E
z*jKi_I`(N#j85*;!;Ck=%-0;))+T!P`~y*kd)IF4>1WRKTw-HmBU^a({D~F~o>PGu
ztd|;n*h7MB`FMKn{qAw{kUz}DbKuxpWe15f27Jty><%4G+_qzp&oYmL)uq2X1deMw
zzQS~fr!Ydkd1LyFfa#iQfrc7xM-N3>zBdTCVAo>Py}V3$g1Sxu>wANutHvKVzVJ$m
z=FeHguQ_E$>+M;3tMZ~Q{%L7dkp8`BQPHo29XqTGHx@>(oMN4^;EvF{H~lS{n*_!6
zc>gtWHM;KbX>`r#IJamP)9TZq#S7*fI<2ushwpme4moF$n1(mCJHBnzy1<c<`7PP&
zRObJOZ@1Tag=_V=F1`P)-Nxjd=$^*Ejs_1CL-?Pba^DlDFK{q};gdr*i{pw$Jqd=3
zhngRGt!HwqS>P&R;B<sztJ?=jM<b^*9LswOu5hsTHmnF!z9OM0e>bJ&oU)rmLW`m@
zkJQ5R9D&MHdO!C_ExgVV+rzjrRqd(8;`?V`H2W}m&8(gkxOhsekMnz#zlHOfdlc90
z>^Zr~@Z64Hv5$96bWMEq_4*ng8*?8U^FI6dW3{Yu?h_LD3fYUJ)<@1vE<doR@|#oj
z!#96YcHigwv5#q!8JAJH)TJ%_$8#QBv|IH5)lG@tE;0i34tJKsX%<N43LTeLo#+2K
zVp8;Bv)Qf+5<6uMD!hHnSLAMa@l5d?KliA^yPo_piSRsC^k&HgS(mluQ$*QTJ_}hk
z^VPWtlU?TuOz6Dp_J`GJl?6A`ORMy*=I>2Ag%>zl8?3Tj^|r<I!VlInx|j8S#Q!>{
z-?!(hTEs2By=>8mtXmw;XY1?keRoaY@2{%c+q-?`>2r5Xo#EMEG&92_iT#X~@Ot&8
zqgql?hbP_8ekWAPnJ#*`O+5Bo>Ld=$MK3n#2)phP<64{epmU?Ck4%^7r?m>vjI|xc
zig|N-o6mSxOgyJ)r}*Y*dPv51>%X1lyDwBc<bC&LM(_ThJ5@o|FP3ckoN~wMq1~1X
z9;>FzROqr|UjEoOZB-bPLGqob^l-+GDqE|-^WA+l#wXGiu*EE@Jv4dqXT^1b`)Al5
zKh|{1x=F=mg|5A2nVhM5$velnN}L*Ha_m;NeiN4mues`+l-0Q)Ve*1!&SK?mcdGXN
zO*ypQ%HGSP^{dwz&!F6EORlK&M;pAH96WjRq{`-#xw8@<^)5?UenefeBe|V@-FJtr
z>|zxMUN?T0vv}G#zxsh7|C>(?N!2zN9yLnwi&P$X)0mwsXYsP}y6uNK>}@s$>)H1`
zZPeorsZco1Z0A;TjQM@l1CGT%S2bOq`9-#8W9z|YIRTr1PlktWUpD@p8TDl5xg(0_
zXFvXxm;NZq_JF3GMlzRwb;6X)+ix4=(zFhx^4mDtOct7?7XJGF`90p7z6yP*$eJc9
zGDY;<>~@Zri$AO$oO%@X<E?A4mxZmoLGx+P>#8@|RUd5cjx*5ny<WF8Hvi(egMHlV
z{un(-w61@8<>&j%`kn6%&97v?^X2#}?&_m!#qA>h9eV$h^?B9H|LzxR>>7XO9PC*a
zR5bZ(*dq0LCX?5i<_NKbXGeJ4o6A<4Dd5}eA*GSo5Lsizn{wvqkrXwd#6@imT;}tR
z8Z|2EO*oe*BraxgM7Tfu{QkAJEAKMS-e`3FfY{sK?eTSA-+t%c^Go{O*6&+t4;QeT
zI5FQ@^fmLadgJvZ`G0}d+!3J>YPTcQ)E6(kImvJN_62Lti3sK{%}kyUb(k&g`HNzw
zE1ZJrPpl6<Z)xcj-f5JWe&>=3$A%D1fg_7@eC9M+E_yqyzby0rqP>T6C7E}BlYTp=
zjOE$g8tea`w#DaPxzJ+&_gi_?NwHb1+!v~?zTER!)8@!M(@OR3((5l{f;%S6PWF6m
zE4S0srgWA21K$Xlubb@R&p&@HYP<aU=aVd_<*#v<$Z9vU^zMq_e!PvduCglZn2E-e
z?(Q_lYdQ;eWbBIXyY6~(rV($@@{f!Btb5GrmdT!KT^TI@&rU@D?ymm}qph4GA274O
zaG1aH@4eFFxwmFkUUZL>k1}SjD|m9ZS)YCJ>iPP67ilOf=RcfkdzC%z{%5DK(^vVY
zx1^;%(aGN3d`?&N^r6Hr+vT*T*fY(G{<7zy1B>A6Nhg|>_lb&1ChihsQJ!OX$a7n-
zZDxzD+l_<UR`V&FNF^S$@O@tC_T|@)e-+lw;Rc$8g71yqOzzo#oJ05VnKWgE_s4T4
z|Jy0}E<GlWP2cix#k}Nzl;2{vC#{xD=bPN^B%ioKjLm%W+1b|T%&s%0rPq8{o4oDn
zwW;wZFYVv{IBwEGZGATxO`kQIt525IZ@ho9`<C@Bn}?YX4}N}os9D`VDP{UGJHcNq
z+8%0}DzB$*dKnp6wb$75)YHn7C&k5Ird??-Wam|1>-*@_q=xB}YCPuzdK6ilDNSrY
zE39&G)`t@NLRpQrSxFHmSFzttoyoexe9O9+mmV$ujxsEE>wG!$sB7`51v@l^&7*3M
zDQV~&*fi;pmX1cD+Q-H(XVx_6Mb2+|*}cFufakV+$bH*i%TG(Kk4U?6?`z$kUk~r6
zK0kfg#yIVX@9w;<p4&BTcC4~Xf7$fuYj=>`&i{#1g}v78F3YUF<oy5F{PS6}qqIuD
zzx1(MSo}45j-a<o=*y>9L-%@TZn?UM-|GT<UeG!p|H4@>79M(Wq`385l(5;%60=D{
zo)6d5cyTFyn`KbwYvob%R7P>x<y+0co%0pmY&+CsshT8o{A&HWxs6No`Q}XCY9FC=
z_Whpt-RHP6FI|gY>cF<Fi?xXFpU%pKvA_3S+rR7Zt*;f?dz`q!yW0a6&({fh5_0dM
z$Gj{bvt{fuM<#^{Pt!4*yI$?g>zE^wybQ5FoTgo!{4Jn*QN{VDM-6<d7v6og;F@S!
zV!)CmOrQ5njykC?e#XiBO>6wV&r#p~KU?tGu)RHUOgjI@hq~)$FHMuQZ`Uz+bkSD%
z(k}k8In%AT6*evu|7O3QcVEx6Ao*tt`DKqd@0y~zQf>C#RI|@}?jEVW9P{kghST|*
z!#(vpgktp;T~<wwx#<xUyW={W*)R8)-Fq%PZ4J}EQla>QRqULE-@&Qc>at}oGL@fu
zAI-Y+lyCRt^M~A@Fer1SpNTDFD_6NZ)4tZO?B=o3`^Py;vUlHJ|2(lj(V6SV!Opzt
zTd!}8mzMkW@mk2+?SBgX>)!v#cYF30?G{ld-e0!uTQoo4eW^Ct|G{P%zI(@?Kdn+t
zck{KlnmKt>_93y8o?l8IYPXsmQfu@V=VI{_nZu+mkZ;nz&hy}@A0m2@v$};BaCRN^
z%g#MGr!}CcaB2{D;o+~Z4K!KitrO-}Tlewm`DG36S?lla=zAP?EY9NBYv=h}I@|<(
ztlrKkx!lIz=bnFQ-H(TJ{~evqd(ZU!uj>h15(Q76OlO`w`TE_`ht2(V59_~Qe!k`R
zme}(bU&tSby4`5_Ol7Or1w~J@=6lQhOK*1izAyQ-XQ$bj+hxlSC1}m@V>K0h^fyCm
zj#ul{&Y&qC9h(kq@-E-u;`jTlnN-RauA9MMHl*KMY;J3{=<mm+rrWdR6Q_J!o!7aq
zGGooW-E7PFa^<TXZI;CD-#2+3(=9gs#km#xmBO`(Kh(Lh983^VJ;=F4v1jG!GN!4a
zUw@oEX8zJ?>89PMkM26b<NDoL`{qiv_c9UUnYA(tw)1@8NOtd=^St%v^LW?48w1~J
z{gAwzue@IK_Ln_Zy>!HOA6DXQyS3p#uXN!B##PL1G8djVM6qk|ZLoZ>f|*TLAaO!4
zU&m!0qggLhj2<=YG-Q`4KOo5dCXI((X2WU5y?h$Q4Nn>Z)w0c{e@Ud&EZ?z%Kc@I4
z+k_huLhXxPG!s7SdiYq_uC~~k;Aa@ie^=q$pXR=R#8AVEcgsHizs>b{wNvEo4KHgl
z9=LaM-0X-mm@IVE`#_3;lyBPRdEzXqS--G(sxTF@t^M*QE+FjKIiZl~MrAq96_@#|
zw*7cu&J@2+_P$dr@BOyOTW>#{{}A7MtMt)A?*7A$xkqp8a`92SX1nCcLs>_W27&ao
z99<l%ICi_f5LHxa@^1-Nk`aBV(EPnKKI+YpBag3h#3X*auv1CvKcCd+8F%x77PEBT
zUUprY=bcHvu4?Og-sLxRCN)}od|`Q_Z+4c{k>3?Owo}bCpK+^gyV<Q+Z+NBKX!D=T
z2O?*jV63y`F1OhiQ8D>|QA2j>$A32QrJ><@pZ5QZ5W6zb#dI(G>Xnk4jp98wzt(Ya
z4~lhLsg|?lqh`d%*clBILw?*6Hjt~mtj>2}CgaR^D>oJNf19_g_hPlF+_~O{tM9g*
zH7t6q_F!k`t@~THD~OjX3Kng7d@EhwcHP~-SGYO-3#F<~wf^>RWArwkcxPppd+v>d
zt(`)J?~0t)OZV+cJi)s*k7JkOK86?7I;E4>Xzh6+z^6TXa<28a#uoccQ=fgZ{MWT;
ztMt!HAAC2<{9b-{hmrrUh+DreZ}h*|rS+wqf9(v%yb!IgR`p%xXT3b4y8KSed%1nt
zr<Ipq9s2*SwL{!rBX0h5W6rqzRlVj{TZ6w`zB%!FsI~BwnG@`VKP-}-b5c*hd{c74
z9}yY*l{J?mXUMsEMbsX<>~&3tk6*8Z|El`G-UF@ai3gV-^^^H;@_PNb?B8Z<cdz<<
z-hSt+E4%e;ZNBDD%f41p5_9j;?^C|s3%?vxFRQbET(mSqGwKD;wcNeW{vEF?2#@;t
z=+LEvPUW>JKYcC2{okCwbN5k9%um)+S5mr<rkF;2G@Fuj^s=3t`-gQ4C#&@-o++z-
zSRrvNKkM;w?dk<PHY~Xi`GQBqFVyNr@#*i|{%?r1t=X~V%>E6R3y+t*+a_-M_UOj=
zYa!~k^ZIU8zwC6L(SOL$c#?sB$LR+}%O5rJ8y|kS$>OWat+#EC=5xxLU$s@(ziJD9
zS3g1WK$(76QPAI+M;Z7036OrcuQ@w!se7-nSmC?m8KQ^&Y?RMdZ`)QZD(?2nntghm
z_OD}#c^$`+gihc3IYYF|XzmPNx5*4ub5DHSv%-9q>*`jvCz;wkQHj$<vu^C4az{X)
z>GA7Gi+Rh^qPqUXg-zhn&ye_6{^+Q|kBjXF(zh9JEX#e?G$*o}BdoX9@Z0JCjG4P?
zpL1cGdo)Fw{c^)|H-<6VY*%d1>AYqGfjz&qYkc?x6XI63Ot`dUqQ#emm+BZcPQMal
zIIWQHPwiPf&!tgJCY!AvAAJ4d0(kE6-d!AXkGd#xkD-NfwJonebC1>FxyO9y++(}h
zgKt@H`FRp-`XghDS!)ZbUcSjTXXp>umK(An+djYf+4{%D+YYckYPobG@L-BH=Q?BY
zgix8Vv#Wz6Oqmz|yILGHxBBN@%Qt?`<uctn;sT0Xro0pEWD1<7bjrxZwMnDiDdb(j
z^sD*h#rxX@tvA)HET2>3zs1|<3FFBS0nf@!!V{+`=zg5M-tk&QO6-~^|CTv#S~7w2
z<%wMtYDVglo-B>1Z&1=#-{$kpj<2#*&xh%Fzj6d$y!4Ly0@k-&x7zHF2w9SjH73lE
zVgh~?fr){M0seVNOLGGYF62u~rdD&tgk1mnVek8Wso!dw8PnP(sy1!vNifjzPf^ow
z3sf<ZVigh16%mO^R?_S2lJ0cN-jXHcdnH3AM(f>?8zxb{mvnOfpIsDr`<Q9qX3hTh
zcg&d=`QE<&z3unCyXSYN+vl^j2s&AX^mAy*2hEvgng42;zu=+$t@l0^25V(;IM-bG
z$9YTp*x5D3{!f<oICSVcbX3+lojs!IANuDWhx8Ri6`2_kfziz}9-PJc&W1iUHRoEU
z^Xgw!O7V6(A-hT{KG5@BjR5!WsfR1dL`yX8Wz-7a+Pm=X(k!pLIZtQDCe%;juMO=F
z?_bSfb7MPCf^OSK;b5(_>l@@Z>IUp*K5Dx)>C_L4=yTB=8+RxETQ=*}EvrmDt;Q-I
z+4VE+_C0$mtsSPkgyGN5ofq6EFd3fcD2SH6^77{K2NfX=Zi;fhcr|!!*(UP|FShZT
zP~*AzLP14B$$wVcKj*ITZYclYcSyf@=lg%T=N1?73vW%VS)sYv=l*Z8BF!g_+kc<0
zooe^r?(4HF)|_jlcf4=;->i02uSW2jO`eO)o4-u%mIBgslekP84K*#?5~lJTHq@!M
zPm}!H^;=nR=f#+B@|S`|oYRsl52@?_;V5}^^rvQO!()#&7ypdTfJ03YCergRI<Pl|
z7`$e@BKy^;HqcJ0q3Z%iNL$v0m<2L-6(yH0D3N&0enq;a?O+7!tb5n4eS5mTzOJ?E
z|8Fnn<Ojk1+ZHLmsQqH+cU<_Ro!Zoyr{<_iF6oI$;J)$dM0)q1pVO*m+&)(;_U}X5
zK8Jrk5BNWBZ^>E6XZKvhRzz7;TSR?I;P=`~4>x;PMt_~5wIpl>`-=V*`B}R%>lWW!
zSLr`{)!a3A7w%oVckSzi(d&PA-&-fY;{Lk-mnP(Bu&$ct{`q!b+pqe)4XgW|G`r5M
zZMJ&)B|+o5a_#2@Yg#XHER{QT<V3OUl(PZz+BR*ftyAAoI=8UllhBmRFO~OgZq;uK
z{>?Rob>ctHx1W{YDh9AQMzXqo-GBaCnf0b=k9NzSa?;=xob$5Sbjsn0&v`VaKa|O;
z=zDEaU!T(d{^IHvELJ^#PH+WHXusyLy}>OgGj(O}&E9LSuOyB(K52W+bK=_*kGTuv
zG^~Ttt<OGmIF_tbDi;`S$l>dhe6hkrcF7LDS64H%s$BMkH}hTocgthFbZS#rii0Qn
z&GoL26H2sJO?kc}&|G8xw&}Z*w<j7H37&Q653c<wU;KGtx{`>Mk$76@p@^k6o0+m6
zC%Ey>V$OcKM(f4WC`-OA(;m&>vsmW*HgQHM@8kZ%RpzhwzrNhC>X%%{UB}zM|DV5W
zw9rha$+dBhm1q`o!e5IkbFGihb@nfs68`kl-?)=6Sg)muIpto_Kd^VLPRw*S59NQm
zeg6KBO}yR3x8QMxctr2^h2Kh2GBm5Wa@}>m``CA$TX6jXR|!w9i?(Cfzssc(-)5>`
zzt%MOcfa^OMw2<c+ZgY-Znz)#L6=wJk5)hyV;0*D*Bs&B%S<?85B<I{JEC*m0nIa3
z2NGp}&G;?-`{MrGA2PQZ>zKbg7u@%-p7H)-u}{3o)SG^PG9>40m?Oz@_ewLL+wBh;
zH=}~)HN`W_mF*Kc>9B>xmT{}(T=yNV<+CR2daIVOhi5IDkJ`e_2fXdBZQl*AIImIB
zvpD<u#ezHA_tfg{pM1sOxCCp(f%J!;Icl^5RpT1AEszbGx`i|MNZlLv-b?(s|DB30
z?LTu|+7kP;uKGFKi8Bflelz@fUcW4jWq+|pz52Sh)z8c>Z8Tdl$&O*2tDX^;WYlBF
zS!vs2r=MDyG-GjkZo>cXh51b?(?4?G&`L@Cui|}&{Zfs^<KqhA?hSb>;+6E=B<!2K
zK5RJgInYLC$(2^`2QQnR3p`<7HSK}phb50|1LQk`y%r?g;hx9uINNS>L4WO6y({aj
z-!<|CNbD?W7nIjJFjJua%3)b&&hKdlc9p!i3T7^I)VG-^{qAYN0qeteb+1p^@!PJ)
zYgXa*+S(V&rS@6HSHBoPs=3iHZQlmR`Wp)T^C~Bn>23MlCF~uby)Sgin_qSl-u%+L
zaJAHS!qceQ($iP>hI@Y9diDB@$=5$z%f3;%_D$4R(<Z&{>5^BMI?R48AFsM=-_KS3
zuV1f!6!U9+!JFyvEHS_IH~ert@a)&0qywd&9_<#_k5PH#8ejGx>A*v)Hr*BaW{feL
zSG;ug();pr%ahG^^QSDRQ_ErQ$=G=4<F`Yyhc~WS!NJAs(qLxRb#%$uubTgs9Qo^c
zQT^-u7Q4i+^B;U)V;6pN_SO7z^^@n=wBFb7D!Dr6z&qiW>QR0ZW(j{)m-uUV-B9gq
z#Ji2JlB@e;Esn|FoBX24e7U)}>2;Iq=EY}ob(4HH|7>h{C{QM^WffAdQPQ#2Uc&0c
zi|^0h?cX-vBzf(OO{))t%4hKyE)4K~u~)$J_Uzr$Z|+jx(w=$q^F2AW-12vMX{WPy
zq+YMyJ)<sajr+_9_4XN73ETMm7BRaoI?i^ntJ`d9{;HcUW^24l{+dmA;T5mHE}fzM
z@9P{x#;)$7x#uJ*3Ul+XU#s)F(0xxKncsaL;~VkG+Og-YZ^!)FT77+9jQ5eP8zmVF
z*Pi7x@3mVHKY5#4`Po0F*RK`)QjTa|wb8A_`21R*65HrI&vsqtxGcSKwc*-{EA!^u
zKe6Cy+tqhhTbN4MmmOibv-(oFp>x=@wQJ%$FNG)k<+|RrMbsrG(SV&teWR4$LbsOz
z9?{%k?>FyeZqIbpF^XV1Ev~j}8t<F8QM;D(XP<m@Z(Tauww>E-dU?C=c3&2s{Kd86
zpOcPWNEXx91M{;4)~yU!v~jV@splGkHA`C9&rLfVAvxu6uK30c=ch_c&|Y*}Mp(jl
z(}AxM3WrU(<|#kSJZCbsV8)aUGg(w(3oqQfy;S{&LEoXi+DN^Pl5*dkI2ea@a&CCI
zL23ExTNk!1?6EL;eXzm8MA7m?p5Q(M4t}m9ySSKH*DXqWBXayv<AHw0h6u(f?F--V
zKHzT5W?W|1@b*Sj(&;VJBsT6lGGV2DqQhl_7vVjdFFn2V{MG3%pTDk&o4-tc(fVua
zuf<>3{<1aZ#qMS5S2=oE?qwa8$vf8Td$>C|h--H1w!~J&)Qc+w;-tJZmX#O2I3cj#
zq{YjvPb>FOMd*i@Q?&!*auPK*?=T6Glh)x0b#h%YwQ7Y#((D`!{fOxBXme%Nnui`+
z#g;C)a`j+v-U;P^0I%Pznas<tT)TSJttx?a%d4QY6W^!Yy7jHKL@z|3q490@Yk}C1
z=|SDug0%;<xJ1GpTdC-;ZjH9~xRL3T%Ow@8ZGQZK(2a#=J>Kkj$ytYIT-)Y6MW5a2
z(c#5zY~J2&;{4K4CMh43o4MHeG%73}oPR(6y8oHCx0io+-diO1X6D?f6X#vq8=X}Y
zl;k9yzvuYV4HCRnbHmmKX|c>p=KP>}Rrb)+e*&Hb(#)c9`?oJz5WVX5`xU3&PkZ&9
z?UeI7u5u2!oJHFgIK2B5)^g|Ry8tB{EvwS$?JhGcAMTr(RI%)ap|JFwy@B~$hhFo|
zJ^y{z?2jiFE)9sE`JQ)|bNG{A%0+X1r$|R-RM>2AI@Xo+`dd4b@oK3*PaBl;f>!^k
z{@ov5>h0>QWg2odfLpaXdsabFyiWLxl=}5*Y?@m{bmm-%50uoBih6avyvpWa-b!XC
ztNeX7Ex%4jxBd91=y>mvZReuCX}f0!MN5U5NME?|NyK;Q_DzpAUJ_W@u$A}7{R<mZ
zQhYdVADrd4HTbU0KBuNe<Ld19{7Wyya<`Wr(A>i<)V_nI`TE@g?t_JU!u9Uo4i<d*
zP?%@Cz}mJM{}gT3TyfS<XUMpFLEytKX5Oc~qGxlzChyH_-oDUtapsDT%sT@EHFGZB
zDNEUDED|BeBYA3d!TN+JFBmUqf4&g7R8dXw!Marqaz9Rgt&%IL>X9w(`ghrYX%j<X
z!y!3-)x)3C1bbWER@7x=?s)5#Joop68PkM#44;-PSz3D}%E|rJZFj3_)2kb|w;o-}
zt7w+!u=F~=NPL{&DtYcprZbI>Wpli&E2!GTtG3u$U036LDRUUJmikWT7$pJDu+lh&
zBIoHQ5>MLRwSRcrCCs2A<@-)XKgap|$LhaNC+?pa%6H=cZ;PG>C-<R(4XQr_1hti9
zKASIHez2>nDB;GEnwc{fn&kVQcy#vl13lN%_A7UU#0f=;ZTiB$@KfQ$(1j-Z*M#TQ
zJ6G08u<n{(p7ApAN%rm=R}R0u^S)WZYKFi1?O)APRDVf(X{)!yJU!=iVK?Wypylc+
zY>kH<^I!f@5Vc-#fmwX+rUI7KNXfXUh&Q@2R-3*Y<WPFZxJ!2SgZ=r^`74e0@Ny)0
zOk7Yf-N;X<rIYPN0sDeSXJ7MvpD<xTLlQ&N>AfKZpR0CzZxr5op3TIGf5+d)pOrsn
z{GR7)spa>V`&RkmOyO@Y;~#usv3<BYT8sJcJC1~ig$rDCGvt|!S(&*eod3sKB))WM
zGuPhTSFG<ze+k>^oD`^OE3Mk^^OVCdWtMw|>7RLF>Pd_<C$P8OmYy<Wp+nbZ<3eZt
z({KCI4%QxKzSVx0mn%Q9kooQ%>6-$K&zPM$x;Fe+a8dI%)2a(`8}{Zguh_EY0<Z5P
zkEuIXiQhe9y<pS9=$Kp4^LW2K@q8Y`b-IPC@ZpC&c`FlceVVnW;^hT3L494h$f_F+
z6RKZ+xKzuN_G|ic*4eubM7&Hix8U75ec|3Mlg;jIaV(c&<88IMUm4-DB2F&#Dc?@p
zb&|?E{V(2Yzqlh!?M^xev)2^HPJu~%*=dKBlYZxj{StY&uWG0NzF7jQGgmxvX<Z>=
z+B2=tEb-P>{;IwNmPG4Y>kme~$!Q8b{N!C)%>5-Ib0126HE`xh-m&G0-it{Sm;N^8
zn0B(9VT!xHfVjJ;f%KiO`>~#^sc{=tEwu5N%6qfH^h3Mzjoackp1kGm_%Lh2DQi3Z
zCEuQ1{1<a;zVy52nfn=(x^g9^U1xsZ?KH#dz=|OL6-%st^=v*^&|0L$;kNYPhf;Hn
zOY&3QCa5dOUtUxZxISogq|*e27hklFSOw;-yVz3PV>kcL^of1EYP-!R8cO-t*4iAF
zDBn6Kqq6k&Z1t!zL$#M(Q|f;j*>PRy{c+*z>7e=>dpXMHU);AlYN?Wrwt`E!`u=U!
z$`zY^-Iymo**x<^&s$yH<>hZI?tXga-Q)Ue`Iiq*-+w4n*uy6O|DDy7+Y&|}nBQ=e
z83!qMuShY8>booa<lEvqBD?h8q<@jJjg(t_{>4V4>gyg$D~?O;Y1W+H_W17khj%Z1
zc+TLgWP4ubh^D~1-LA}zGaPwNCvM3U={E0clIY1}wK(--rmaD}dYD&w!o^+JPEOE&
zaPWuiCy`+0*Na_vl}g@DOprPzv9y;@zpXV=PFwEGlE>e-<t@!#=X~z}MxM)hs}w}j
zEnONHZkb}>Xmm;Q(vM%NbChaXHb2`Mo7P^S{BTa$6PLqB-(3upn%sH)!u6PqRqWTS
zWBA-&3w#Y+r*X_{(V~u^mFs!6Sd-TBPSQ}|C<<7#K*l}l=8TIMpC9j;cdbxwv8UU`
z0~L=olWk|JU5fjjI!kC%O<389^X7SX_H^B<+gFtF)O)?n?zGp_wg$aaeU|#NG*elw
zz5VgUiX~q*ec|dmzJ%9P-aFpQKKbsm`SomaYg-@tKKtX*E+>+_RZ4F4hBkwqdkP=6
zIyx$7#Wf4A<5gmRD*ef_sCFNR?3;ZCcWVzg&-<E|TK3X2@~3r;hU2a;XQFhzRIrBI
zct^2cUmI)j?%RTN#wcZn*Su4@9QPi*k<qecm&CzEn~xmJ-0HS?m05;Jfl1dp9^ZY-
zT~}Px>)f|y-;VV7Iaif0N}uQUtlg-8Ic@8QCO&St?Brv+e{Wlpv10zdpxZe+t6nW$
zy+8K<IWhjv2YWQ<ubWk$<MQ>5`f|zgzZ+FUet&(FE6v`}v#{zxykf#t(X%4IRZAJK
zEj@PZDCeHN-r}bYPr0Hp`C`f%-dDai{Cw0Nb@qO(VDRpDn6lL|FhfM5YPX(%g2aZ)
z)6J@)W-Lv-Q>+o5`tP2t`;W9Hxl^&+?|a`*x^ns1$us9(U6ft^Kqvm$`Z?#N+HW4R
zj{mci_hx^|!Cy<I-E&vXF!*-y>fw2DCfU8u7Cb#3EFX7kPtIROo4+S^1|RR)Z)3W=
zAnClXWhlS@Jk#w_+n-;#7@ppm6!F;3ph~z=_;KTVtqXq}W-NKQ#b$AU5z8j6xpEt&
zwwbMX@I`96ns4yhL{q6X&nkIhPJTUD==QtjOU}c~H>}?@uQ@OADopj;gt_cd%Z>g2
ze4KGbBlCmr9PPc0#o-fnmQP!!^f#J2kp1po4Y8?RcNfW|ZN8cm$Ppzi6fozsP^k2l
zh&g`FxffohtPOL|y>M!?f-)DY_T?RuFST&aE$W|g!QLxU_S-Rarp($YTa0Y?ruAQS
z-qg4(=iwQB^M4PkzrQcmTYu-{sZWglPunhaEdToQhUEJByL<a}>-U#^KYh*qeMQ;5
zXYXuh?zh$Mm8k5jYu+sPL64zwqoTf)YMRvJM-{zMx$J>2CnYfM)YzPmVi+MIx-r7x
zYN>*yRE~-iZ^oYI>yG`lI^q=e`=aLDrOs*^qTB%`Pl{)=I?Ua8YLdc*<QE&RI-Nfv
zcIU^3O)a9=3-dm{Tk}`CEcMOm+@H@E@^-W7>D7E(`SbP1PJUIHBefrrE9QOKnZ5nX
z+qZ$K;rBA{+^P?In1B8)gI*4s=L@!&fJGq}PYEcks7z?^>SbwVxbjT$)Ysi_rnOE}
zYVV0_KFPy2=dGHm?C!6%tGm~v^>3NBE$%2&aHHdW&MGMlQ3fx|K&4YIlaI=@Z=J2>
zDV^MVJ!|*JYbpve*RvZ*_4Lf!!IAtxMl00mdds@42U}Q@Om?ksRe1S7P37>eCyQNn
zw?F@$8Ily%|7Kp~UB`ZnyN?Q7T03;urqtvWyNhZ_{`e^$za_VBQCjBLo35wN+1?A-
zT<|#E`<ea5HHmU*duNB~-QDqTr)h6R-skvx_M09om|D6o&bTZ)v*7cO%=Nb;vW54B
z+UEYc@@i+$-#4tMm+ME~bWlxIonvpVq?!7tnd$uTSte!HMvY5e+}-Nf8~6TF{>ArK
z<1cT2St)w)b>~$sp_Hk*-n{|(2C`~T<J2GSe*M$xj&auXl3;(WuRJpMpYN-V+MtoM
z?#tn`Z|i+-&b|BRkeu4jkh>q(=l_?juc~_er}g!<tNc+$Gb}atD(Ob<yC`|+o4lK!
zWA1bNKY>Z{HlKfex?26$UqS7S^v$>f(Tu8{Y4ZvmC`{$*yKB*PdHe2@2FoViP<*57
zc_TS)Qmxv>)AJ^~ap*g=Y~Vhk;9jz^<DsCwMnzv@$GoFD=UlI5*fj|GUSrzz{)LZ$
zt!3?{ntgtG`_rchr6*>~N>)A>{q&Suy1;bauO^dcH9vNT9ht{`Q|!Pg(eBu*JdF9r
ziW8g^I+_yP*9#jgPCM1KV862Hj(|o+tHeh8oiiW2aO*$k`T6cw2e<fT7u}c2=UHxw
z{wBcxF=@WgYr`uMq0X;=rY_(3rT$0QYqj2b+q!c8I<Ke`KkF?|$@%;W;CeLs%kBHq
z8CF*v@akTp!6({zGHZF?q7*jg^yVGcpN7p|eD-3h<K~%9ErKHj0*a=wMqXa8aWGbK
z!dD}m-)pWXf7#Ki@UBqeyNAy~x5z6GIr-YwPG5I#%A}8KHG2#1t!`hk`t|krA2Q4N
z?uzef4z1q)xRU+evp<&}?!3Lo=l6%{`z2MUdUG#3Kkd<ThW*PLZO%`5TDfONwuwZA
zV}U~Nv3Cwu&3zZOW4+u?ztl-}TpV~@$!@NAWz!F(I&Jw!`k$^>#yMBStyD>MTx$8~
z;YtJl$!9BfTe)Y*aAr6fSKpHCmtNDtekP$@>&A}j*$Y43F4|_J|8{cSw0&tZJ1YM5
zwcPiu$+g}byh|-M`df~DChNSO?wM*USKi;^I*Db|hoB>mH75wFc8ksZ^L5F|GnuKs
zUcFoR{_c!R4RQ>tb_yDvNI7G&@$u5nm&7k=-4W(L@Hg6Sr`hwR{Pz=`E;(i@Pk1|_
zgh6}X?po&64N)RV;SDR?_&f6&1;QF9%nHrk!sHP%v0*!x;;Q?nb_-2+R&GCMGj;26
z<~~s+?NzS(Gygt(;^|g8X=PsRw9nh;rEaRX<eG4fNkzx`lwfj2k78H)#GsWIH*q-z
zii!BR^j=AIoUqp~F|@s{?P0)rj)&5{Cv4L_{a>2At9{P%PIvCJKeO}h-`LW5HE4b<
zs{${RhfGnYRX}Z@xPjw=0Fi}pEY%#Bo*ejh$}>&-`F`6Z;We=g*(>*#FJQWu=6qtI
zz!|o+VatuJJxWAYhrK@g<>^khbR7e25i{G1r4v}U1c(*p?9cvm=*Y{7-Ia|Mo=KBt
zPCA*h`FWCYK$g1VqU_t7!hM==t_|mlzPu=yzur%4`jgOo!AiVG_P?+5{`KIT+JhOk
z+4oO~{)kpcR`1IU+&4pGgTRl~SEc+GUtS%aU?cw6bArW(l!;8|10HZD=m@ZL1X&we
zO}|(3eKX(fH6ogGX1LCre*f3x#Ovuj2e-_gyQlib+^r7NLwmVBEnCkn;9L+b{6y!F
z*VD@i&-u-E3#kO36e^CKFrz-yZtusAHjbGJixzw?3OnF8q3+Mi^q1miw*Bi6O<%J4
z((T#id-jM;NnXE2%>RJ$hkzKp-05Y8*RFYA`hF?3<BgJ3kec^I$*%&7O-~&7%p|l%
z>>PvDp^sdx`*c<)?D6E|xYZkQyi@t2*wiq8HSI4S4xBuAXUf@MUm{=1tAE_Ndd3!y
zv-Te6_q)xSXJflH^wA&rxs#&S+g#_Xc0QsutuxYNb!bv=+lwh`7o=6Qw`WQSXPgxf
zzBaqa_v*6A%d*w{m@is9vRJ(4U}EIX*NV$a`yy)+7hgIjw!UYvOt7=XhlU*SlZ7vL
zu2%mj{(o2Hm+~DS)nD4av?^P3@4%C7`wsrRZ}H*J&KK9NpZLigbiGPT)43r!^uXE>
zlK`J*J1e4dde|09NFOhIEw{c?De&E=gq$1^?<uK{f;tL^KToL_w13AP{eF%glk772
z*QJx^6-Mxd?Kbwj%lY-;Of&1P)p@H``!*imm@~EHar(;V2M!x9G>zW&efFP{f8D3s
zO80(z+Abd4Zz(l@%D!__rpNc!<VDsA&Aa!9Z{CCr3CH%lY`nz2_t?5h)nN8_mb=WJ
ztd#s8`2G;j_LH-(Ro@7yn&&7uUuA*IMU@8DL<QM~Gt-xDIl9bG@qz6{yOekJ7K%S_
z%Dp*vTlvP^*=85_hB4;;IuPeJML}xn9=RtsQp_3{uiOz$x^L^`)4WyVgv=8Mo|4wL
zpRdlHEkA$G$3xxR{txn(Mw{z7rrAF|H?KH)TdfC^NE_D&Th-_KGm}IZm!C8C5?vgW
zt<bLdu+Xl*vPJpSO-7$@+A|dx9&FxtcHUj#-!qK!Jr7Ch?AkObxbVL5od4(UxZkg+
zQ>;|`b6eB0YJ>BX`HY>N7Y{fk<R@<Rx|SBMIkn7PWOkZFl7_KBvtxF!Q1`ZOv20O}
zWJkrr%brW;esP_&tWWuNvDH!yx%FJ?FAZiGWbOIY^l{V7H-2xF_wO<PFmcL_p2+vL
z1vOm<U--@3ecfK=aCE6m^oHF}Pi_t}RH&}G7%$ED;PKi^>GK~<Tw))v>}^@J__y=9
z-3PBlyQ)4sV7OqhqfdNX!pvJt2EEDuKY3PKUbUXpteeWdxME71*3r1TUji?xxdu(T
zExT6zrR1_F3mqqW3NP+o)wfHaRCkqbSMcN&D`rSU2O1n(@KKD_(nQ3k*(uriQ+MO;
zO#KXnvo*n1E;dS!IcM6gopRSR&eHg_k;<2x^Toa)s*Y?z3PEq1S!J06wSRkTQUCw&
zRG`iM^_dT!t^Kj~^yJrRVJrJej~CywyS(-5`={CMb7xqdmtP~6#WOkUna{nweErXE
zw2NKk(>Hyj=RSMMJ6GqQ$Kxc#Qf7WBJlqy~$@qP{@$<sx2QF2-i0+s0Ti(1RxckxG
zX)6{jxR`W7Y3kG+4+>i3y616zx>wEg?u7JA75xYS{izX)T|eBF&Q0H3S#MQWtfFkN
z{xf?-*%jVBPfiGT&x_wx@pqD?&z>(+zxWncO;xK_uXg47R{iGfwPNw}HGgi-KmTv0
z*LSA_9~>WenXD3eyoKWv+h*3tIfkZbJ!ja8ckVnElb>h!*-NR~XS?yb136_DJg$!I
zy(_w!^b+@Nh*D_cU>0~G^`xfqOYE~HQ|{Itz4GqgN`6l1S>KqyY<c~lCqpqU@87>7
zpYDZq&z_lP*M0w?^_cVRn#;v`TNTy#UKjkiw{=(GWWV}9H-DZCHhH?AJIQ=$q4RC?
z9FYq**d|@|?%DczqsQ{gpR4l4H;1;1yj!#&DpBF0tH9(fmo0o=@M*O#b@fY8_`-ap
z#&gP<GJTbj8%5^x)sv0lW)**9iL-kCl&8(|!27hH*UqQge!kPCdR~9#-(O3Nat`yU
z{l2*I^~clO&suN(TB`Zu<N1GI4w~27{^fh1nZCjyllwwd#|*BzfYaQ8X`2_Qo|9l_
zYO)GpYu>7nVX(p@@B>qQZ~Ci)J3eU5&N=ITUwY#G^mED~M`Yi>?g`_W^e(aBUbnVd
zUeCQbRWF~OzI1-NUah{W&F0QzuA~qB`|H(zcx<oiE>tp0G%a&nQ+crQ{{7vI;sGZ!
zCM2&Y7I6M~BtzOonP+itSD0jX(-oznoWKjmF3vAM!6e=H#p=eL#4XBxN4+Z79^NRi
zrs5wXhxiY3JN=5S*>@IhS#?tKhtSR1bc>sNwO;+nTl*-|@ABTPEm@)gIa@<c9oyKt
zz&bNy#f(EjlX7~tYfsf%tZjRE<#rVj*0*BKdZGrGA}0ojlx%*f^Y`Q$;XlPk{+8ac
zDz@%X^i5pJY?4;_{u1vD{*`<B*<T;n{`^fzQ2ZI6JB517Zd$zEIDbaZ_4NIvFTU)|
zTwkWl_09C3^Z8w_r=R_jTdxr*Fu_Y&%te&%DF5797LvjfIMl=ZS9L_MJ$bM6`C9$`
zYVY@be0h@l^>?wUvWraYc&04bl<oL((cIKCY29AAfzi2QQ$sI^vqm0dn5Sr7)K$H<
zY3E%57snn!w<|AfYXUsgYP*li%n7m1Kh5}Vj>-3hZJ#eKx@Pk1!lk3^=G)dS37NWi
za<K<z*^2Eig0d9?S?h`fs#bM1XDwlH()!Y&dLS|BEyv^cC$}7y66Sok*~T-{PW!XI
z&3nJ_KV4tg_I><zdv(}arNY(s@-r7oSvgBNeqvn2cy6ZdF$rC<7Lz^4vWz9Wo7I+F
z7iIlgwTPwQoX4F?|2LkAg`c9QEm^bSdS8RePx1S*o7UdmrT$8KiKoxQ+xf?3YVB|B
zP^$Z=^Qqk+$ESX!-u$rMUmY7GmQI<hywqrptYH%ucf>;W>#S;0NxSZRGk-WYzQ6Tz
zzRroAg1!e&7GEmv=9f7jU$4M>;Qa&X<fU2f?#$(K(_XrDox4mJ^O@r>^lv{n;>IEN
za0llcQ$<5fL(hLwhtjs+oczW!a*2K4q%hytiOQE){|J^v6st-3t$!G{ahk&&<DB=Q
z+imaWq@~`R8d~~A<#EDwuMP5^C-jfqo-|1yXje>ORjZTaa-X^br)M?BUo_i$bHT;k
zQ$7_wu-SL+z5jdPd$zJc$!{h;*&JQ_N!z~qhvN6PcFAHl`@$1`|0?Hou9u3jI&?-Q
zeEOL=8<ZEv-%~#GTHog4rnO&PgKGA?{_}-%@o8=K{yTru1<YUE-^hG~_rpE^RGAND
z3rsHNv6M`7e%aB*rpeHv+pyNi>GyAez4Fc-9<?`(vX%si<=2U)aX72hZch*2T@h@v
z<H{Vx=l%1te3?%!zL(2zVA=iY4$Fl9EjuE(l;v)a6vvIIS3>`6FYo-l_PA|a*@3s(
z|Nm_KI^TS~T>Q4g<mBYX(?Wgbl<-zuXPq-yw&Lg7FK6TJ|J-JOU;jt^)$gT(Idah~
z6ZT&De*EI%%m3%Sn3Pc3^nUxHlcIjdn4`}9F~9$w@p}Gi1;-iQ-xp0)Kl^WCOEUkb
zYQq^%&-D85D}8n1j_d3BmCt@Ie{S?~ZTf7{*{U1{WvQF@74~_iHhi2Ozvtt}bosrd
zVmm(D798gi*|dn=rJHY)!NIJ}ug|2VMO?@UcwL!w|NT=Qg{udr9#N5>DsgOjX8Kyw
zsrvg$4@iEE+SGJfCq5?3v~i7beSJvM%*W05B(Dbq8*85Sc=|B(fv5O}SDhIfZiGIF
zefC0wx63JQp08I~X3h~Y?sZnX)Mf=vevvV|kxfz{;>NA`Bll~k|9x-So1?tKx4ikV
z-z%s4$IG6We00zi?@PX+_rL1--2GMCX3UT2FHJt~ZNFCP%fp!$eqYpInSOp(=${*F
zrib$%j?{k>yW!5)7Za`@Q4IXc7AC&F^6AT;)0!GzSBu|>IoX_kA$U&4Zoj%`ZOnH!
zxvDP@F%s*(mu8;Tu-S~`^uf8sGX&Nw&ycP^>o{w3P1>(phm@BqJ@ha=U6B;N{-%YB
z=+qFK&!5hP$5nCeQR<tvFY;=Xc}l>aP47-$Gm1{x&uCJzac@J8;M_xe?MAmUw?sKE
zUt_j5l=0Z}@KUbw6uY8^!&}&oyV<0^3Ff{g+oKVBfBN$`x6ggclHHc+m!Tb8o$ep8
zKFiH~Zt{~I7x{}evls+i{9`*wM9}SM#uYEO`xC_?-+Ol}bq1>F7d?#Osk}JfZc^Kv
zqf*KI`9j;~FfK?6``k10jAla;tH#++mhN454UJxwwmiS0wn(i>*-O!d^IXQF%PU(~
zXtX<Bn0>|givMa!Q}!PxCe%Kh!f}apmO_csqkUUCR5*)*QZ}W1H@SRgg?l`k)|2VQ
ztBWU<{dsv|-^F<<pC_4%?Rv}A+t$PXaR0p6d%tuOel7YEe1GlDiguHvOOFecPs{Zt
zonm-x*zi^4K$P!aZ42qC4Uy9nOwSkcD1KQxr|amNrpuEzJY5_arQXkcqxI~3q3Wqu
z?gvcY>}#bdYu~!vV{849l{;Qbt`y5Ze0)ib+SH4li$8q3Hhb;+;C)XTuSTl<m$P>-
znKi}AZMl)eRjsKPvo32+o#VLFUFJpjk&D0Hse1d(u(Ysylc8CzP}zD%^o{g8BLm}&
zzb8C>skSnkJFG`Y?!Zm;CG5@WO9fx4{yD=cnG^LoSp4xaq04W#>&<2GxyB$G#3-zK
zN@y`_lPFI|i`Q9Uu|?5g{j5?O6<_~-&sluy=A+YxOkTH7DCFI|xztYb$}f%2?~Eqx
zsJS_JzyAOFJEuQHa$#H0NA!)shN#QNVuK)uiG0>Bdc!B`*-$!JAjmtq^RS)CHhG55
zmrA_?tl+~$)=U-Sys-Ixby?1%O;%yoe01u*H?K-en!EAVVT+i(OxF$_z2V`ptanye
zOa4`Jy{_gR^Nw@=I#uR8<$2Fhc|~i#y0qDQ<)SJ&9+|H{C|#X6d;KQ99s8`lGuz(i
z{J276Mw_0+UXyoQUp5}qxE*Ho<g7`4^Rsh5=Exd!Mg*8=T`<_?)|mf>b<v#m;0=Fd
z`~(}X{{O_e@|It%x!g_9gkrtQjS89qs_HH)U8YRDysGkF@eXkth2H5;zO+QuIjNio
zkTcL&F~cHg>ee)`DYA7zj*g$EJefIZxy|Ihm`g&<o^A(iziO>4{&c3}r^h@=L*rHZ
zCcj?$Lhkg6X;bfgTd?k4yXb>^h2KtJ>21Be{vq4;71N%JZ&O|@fh{784UpFJVO?1n
zQdy9ypzoiQ6<m_brSGR;3SQ|~R9vE9Y+=Nu4_O1KU}$b)in6}%Xn1V?Z40s5@9TFQ
zu*>OoRGBn=e)l4WB=yZR)#fgEnNXg-k$>IVw-ss=xgyza)$ea#bV);cWzYqN$v2j{
ze7aX%@s8covq?nredkFp$5}erMk&q8-g4*rnhpzlYR+WxOchOBbR{V0k3@TU&74F0
zlfUTkO%l44IAO|`X-ia(zL+v^%C-$1{k)9|i<+F&7k{0xq;u=kB|IAqE4Clu`!+@S
zo77|mW0zD$T>&EoZ;5ozC{52b9>zUJi+KgEE#Y<8q?nsHMK@FZg4GtjV>X%cALKmP
zA6#teDG->JvgG(~CFX<xx3-ObyjdnYF17VE*5;m!XfJm^#wzhSrKzFD;kBa539e>G
z&Z33aH8!1c);y86=Ka;xuKU(>mj++`lJqk=_U=2r?fT+BLt}IKXD?b7p1wRi-HBoI
z2B~=MmZ*iXkEB0@Rve%4sp8o6m+!jmjyruYJa+EV^<65$$8YakHrd{_d#BmC$fY}P
zE&sm0Z1LL5OPqZla<1R)`gT>();zANjlX6K74SPb8A<Eb2|QZ=BVpn6t7R3Z*6WL}
zkJneSf4{#*OJjdjy^h$=-AVDXuY5P=&t&$V{d&>D`s9jIx1YZs6+8d@sk>}m$=glY
z=U>`)|M^n<`EjZ6S=X2Hl8X8-WamBD@r_gL^X64Kw>}#Gu;`s1xx}fe$fG?kZB9*O
zPWkJ}uZ$Hq{>)ymIf04keD+LMNdc=3)iKjTHy@XgPo3!eF}&-_i7P9v6l9fT`D8^s
z4!kIIqf2O2iqph+^RJ1QW7fXj7Tz%Fo8hz2cB6GEo7Hxn@YmC<kG{J7_3YjMo)*6G
zUo3uFqwZty@8A75iYpzZ#QHeRx)eV?nQ=mVnXH}3+w3<jU6a4=%b0nl|Mq@)y)yZ(
zUA&#IOLw;GpYq*$cdGou`MLt{XX(4w$3Iz}v|sbo+g&+}3YPek*(82_zdHVu?2@jC
zu3xWbvp-4UIC0UlbaO42lx@o~8`-m;{$Gp<OZ_UMP{-j>!Lx*SpWLMG*q`i7mcPC&
zS+#wIp=O!um9Oh=94XnPXMD<-YYDg3WsmA9dAdeob_>2#J~YyeT%o~~6LD_&u3xg7
z+~?<AdwO_-%Yx(k-?NJ{cy&LR(BzjAX5DP-`TI=7(pL57zG=Md``EXxtXusm?Z0^H
zj|z?{f9e8)7^fyaR<%s{xbN=LEoYY`<mjC%VmmcwKYQra-Tk|ZYKvKxnYk8k(VrFA
z@T!vW(Z$^fkImi~hcUVy&t$!@?7UQ$M$+0FeTyEYe_hHD&Lz!Pv(i|f^$?$js@2-}
z3wOCT-0#^PWUsFFy7ZXXZ-G}EPYS$MdUsuM!R4Ftv}b?)*I(%IEOKu8+EZ%+KKeY>
znX{a=z^dIYzp8SF<rTq$8XCX%EC>)5_;A9mb4rRTFJB+ql$Q+pb*x|SI~T9$4)l2!
zS*z~c7Oc%TB}nlEE4$#MiDENaUjEb*TNUW?&)>ef=6UDie+vZK__&ImEcmco_xo?L
zl}|1xU45eTJ@7cAaK*&^H*+8FyCr-0(z8|X`sC(5iFo->Yx@7s@%ta;O_Yr3D#$*#
zZAxncw^P^Ee{z;;P0`L`{?F!m)U!)(eI&7aYtdYf^#OfS6K8U??VMZ@@Zoq$K!MTA
zEtfr)O~15J<ws(IR<WPt)wN5*XSt=WnKIMysHx|(eTVwIckH;iOTl{5`%hIN4O>r&
zlpWYIiQRD`(~H_zk@u@2n(rOy&XA7Mob>2K^o6g>eh4K$+b5i=+*(?<@72Axk7szl
z^3U&zKBDz>byo2X-A9HWlX`{kcQ3N}_0%fv-y_o<q2JW1l;{5Qjq3JQ%$(exW7T`h
z<mfU@J>QQnwH{uUJ+%AElHKke)B7eYYcWw>qN#XCR_A>9mqjinonMy6)fJoO7cR)F
zzQsD@9@Cu;r+?cHb%#xStNBT}>r2a`&ta2aXL@C)ST4HYqmfg-WHnzv(8<oP)idY&
z*B!cWUcPR5-nFalyH-0htKAL0!TY`Jy{g{yTIGsXR>Pb#=M(qs-yxE>D?4b9gz=%c
z8ISp<*?!gJt8`f?^7+oeRm+3=cPXDKXs_|wV0^x`UB=8zT~JO(@2Pcv%Bne2`R{lK
z%6SG_8Vl~2zVwazO#kqony=<h;(YTgXnk9+Z|WiCRa+z$W@){8zv9)?b<-Y)oRkdF
z{K>Lv%M5Q(e(BjBoyRlRR?g5e6@4!ox=Z|1mCgL6vz)KGsMs&bDsy_BnEdTz$kLu`
z%GPsco#<TO^OgB(uko>BiP;=4+Iw#9?3wA9Y!muOey!eByTxssk2}wJ$X(d0d#>k+
zsekKV*{^%|nB6~-ZQEQmPvZgq3iHjMZ}CabUiGnmrPRsnd0fByv@V~{{@lCAtaybN
z>!14H(`UZLT6YpW5e-k>X=#Y4I}I!iQ0mURk>~qkOnLU5kAJ8<N3C$qjftz|q9#P$
zxFq^CtgvyN!2WF<Q<?wU)=j?tG&SDlqqcdNa;n<n={5h3{d<#Q?AS0zv5jk|!xWa8
zjH~N}7AQ>6o}eJIb(LV(J}yVc2@(rb1TOlsY<;z1O^^$(g4?x;ii_;l1#i5<veYSM
z?vn+#H}nZ6T|Fzo=q&VrMIxC+&A~->bKfpK@dHb`UnqO*TDQ;O^{yC3?;sV%37L*8
z8ivh_Tqkn9>iV@%*^z74juemX`XGgcNn&jwV*V^b%b9+82x$c{FX>{u63eCO<hnpY
z@Cs{`M4-o(fFR?5YbT=`x|-Il@p;+4>PTp5(L)1`6%tc+uZTIi;Eih3J&lEtvHgh)
zRaq-OJt_J3?!EoH2Y;R&o?rj>$HUY2-__aAy?@~6?C;-mpOx%uo%7}RuWu#)r0(U-
ztN0qTJg=T9Cw^bmzfar`E?-{$;qS{e`S!c(|G(3V`4{vz=DGi~G;{swu~(k9$C{{o
z`CF&THF;{?zW-mFEb^~CIvw@j?(?sk;lHlFman_`BO{GJuIGYq-2KhVS5>T>^fB$_
zuQdHfo4E7tu4W4@VcAxZc-`<#P3`8}$5(ip1WadM5h^ohMRG)ff6k*Drt|(N?f9%K
zx$mUl{R?lU`wIOhy<>e_@Z9j7jpr^o^PPpu72nD5zqxiWo%!FyKf3S!^giKO`DTMA
zpLE%cj>F5&<zASmB_V6}VuPVmf?Ct>3#Kjx@`48+i8(GYeAmf!&!+9HFYC>&JDn2S
zcAb<KP08V&wDEz<X+5K}v#rjwO?le6qh!yf=u-y#mAac(?QhQ2h@6>m+Uteep&J=4
zrvw8ZzENAy!#Shnqk@3m#ywwFB>$LKb@xzF?0Jt3Mx86>vDY{CN}n`Zxxl<XvHN10
zNVvO}v=i%b?T-^5X<YO@u*3F==Z&|s1r3)ps7u{`Tqe&@a7X@=pzON`(m#(+W_b57
z{_K7Y=|65CU+45Zx_(pQ==#l`eA0_gzR2lOTfEtDR?oR(8Aevp$~x+;Spw6N{;YHV
z;&S8fZmC4ei<4^?2l6^TRb~xbC0%a4<IT3o%BwCvG*9vAE4ll1RnONSwkL&bZwlGI
zOfr4CccOaYqa8x0eHy+CCUb0%5qwY;+ED%A@g>dqs}5WZNU&UV;OhDV*DRNr8}S+K
zWvf0Sb9{N7<(iDE0VbA<GOn)Au(11fdZKrm?!TwI3f>$L%v@cU>Fn>cSbgEe$roN|
zHojQd_@Xo5i%Z5QmyAyy8AYZta>pjOe|(Z?Qz_vOWzMnbncQCZ$-r-&-1);6l@jXw
zkDnyYS#x`dLH;yr>DS9LvXk$M<mMhTcisBcG2e8u-lM5e8-0DdSI+&n!Z&Dl>d9QA
z?I*v^XMU~vt4jNCVa9a%*IQpy?fntozU^Yw`c3!r*0jI=+VQg>soLUi&8G_)5!KhW
ze*MhDdob$fap&k?$rpd@@8Nz~JiDIZe)#fCtc8^ksSSN2cthXN+ze4znHd<N6jo<v
z_~zes6KH$?OO*SI&_aQufA6t2F?<ToHt}q>$lz<^?>eH9lM=XKh0?$8we3?}E-7pK
z>8)jm+<o}&-0w4=7PL=WDY$aNN>QUr>kqFF%BV2msmc`5;tlec$+a}q!I?X>f1+E)
zOM~F5g?m-J1+1ic0#_PbN}e|9P=b_{mxSzwRhJG$xbiCZ9F||X#^7GiVNPKl-fxGN
zt#ni7@SeA{>yXxtil0RW|2}LucjU+2EoZ{6wi}&hd%XSj@7^729aaAA6OT|1E$xmd
zN%nrp{5IF0>+a5F4i~>E80S`BxFyMXaNS$Wj@;@A_ar(0o;|Vq;OcPZp#N6;&ua($
zP6|D<dCSigiQA<wD<12=@ArPQGP9sw)Q0S<8?4eyH&mr<-N2Ui^>A&%u4@J&R;kxG
zR@dZAy|6}fv(tOMBeB|FvJS7guv&6<OWfHTDqpgWsd?%rPhK*8qo(DZ{DSLTd)qc@
zzA)|ab)1=)bL!x!8zNt%jxAeo#=xxCL+aQvPyOV}OQvtkbh%_?#=Cdg#>^L6dwi8A
zK^SKp)~DvMozh7*?pTtV)Aq&4%(+T(c4OS}8$6F*OU!0$P0nEx>Dw3?U~w!>u=My1
zof|UAn-hwA&NiJjG~+e{3D_iWPWaq&w&{?u89Uec8!{R;$;Ask^qh5EpPs{4Wt&{Q
zq-Ue0%UxqL_Pvc8D_`*ToOO;&$dUU9V&o^}$W<{Xf9?P=L>f0%2Cy7^*4ea?HNfSV
zn84Q)H<)fHC37cq_vkk08f|0PhDG3o&LPih_J{wScwS-<c=N($bM?<Zx6IP(KFZ+!
zg^7K}s!aCB-8s$0{x6q(Pm8MC8&|7mrT**Qm6$Wn7HMsl)V1B`m>o9%>-+9SLZ?i!
z|Lj~l?I)MsihBy){>MT$u6j@$_U!d44ffLceIbVPuFQ{*e?EUxQPk6;uhMqiN;puo
z>jsChguj{b6tNpYvo+TDuH1Tk*~CdkotKvMxukxtf4Fj9i$uUJo&$UT*`5}U*HYQz
zpTxm+Ec=MkeBZF{RJBWPM{Y)b<h>{zxp;Dh&bdny{Yn>2Ippm6Dn#vGRf_8u!PO6L
zx*cWk-E(TX<W=K`7H=L17RBzl+GsU9&uG^-m0xGS1z&x0_bV&!64$E@Rq|c;*XiH$
zxt>&G>(27&)k3rTHaQLTHx7KisdPW&X>*0)O!4`f6u&K-lb*USszdyDrE}5dIQ`Al
zT`5udZ;tQE+I_UR!%X1l%<rBTxQ<?*>rh$Qoy=&s=)<7~rl;G?nMxVE7@o%nEuJ*-
zT%FF+5T&{6-~akHX|m?BL{%SWhm9LbJGXG9dR(wtShGCjYx~vPD_;t8Z|!^c!sNi?
zJ+YUCMZ9kGt<2rZ;l5sOT5j}~pxfE3_b(ncF1dct`MsFv^*RUJk29=q)aF<Ix_7a+
z?Bm9pv!AP6y18+8W=WXNbNiXTa!h-9?)|GUo7u2+i`!9yOA*H`_IL%D&2%~TY${uz
zqsWI}m;BXN&YSHQWbd~;nDK}Gvyiz?yNdtM@tA47s%4_~TiN`K59gPimYwRjZ?>RD
zR-QHUE$+<Yx3)#~C6||ETieY}c`0ykrq}hE*Vz`^Z<GCaII{5Hk7X6XYr1NkZa6zR
z<R+zV*)>_f%2#+#jQf6nj@xs%uYDCd)OJKM|H)?Ir2RUl9Hv@W#RV`d@!VpU`trfN
zxqSZ$t!y0s-A`kBrN#cwq{?P#f8Nu5eGEx)d8>Zy7c;&2I&*t@T+YTVHfPRiv;2K}
z_32g7y><aNyNtwh%@;OF#``|ijQf1)*)HqlK{EGGhd1Bdug^F2^pD(cYaeH=cm7&(
zV_n}f%dQ3MPd|E}6V`j}^vhQ(%odmLa{BCT<vG1Q)_+%2ihuXJ>_S`JeXCvHZaveb
z?;4p~7Z~Q>m%Z~wrr~ahx%KhwnXlaq_qnI;vUzx5mfDFeE3AM2d$f&TvTDLdjTF<n
zHMUC^Y`^MXQ6HZ6-^Oh*uZ`f&yXSU#=*R88e|qlh&(HLXWw)1n4{YHtPgs6``jRTf
zkFlPvGxa}REwOCc5*gNO7Fp_~xK>Vz|LeWDvTr{%*Y9@Nu`ZnBWZ37Nx<jH%r`^4K
zYE|j+hd%szWp~ePQ{}aKc=YX_8P}vMtzIRv3+<A#%U_*4Q&#2=_mTuPHmsGEF;Zm}
zoLQBsU}$W@r5}`^U!q`WV6I@G5X7bLnU|KY0BV8<;n;c#X?;forKZ7aFCz<6F8y$Y
zXaz$BBLyRcSdb|xsTB&*3I-r66k<`THq{H3L5Cd#94^XL#&4~ho?AJw{IuEh1v}V}
z$TW#@y1F?X6YQ{HF#P^NBt-J>tS47}*n=;}yQ~X$pXYL*ftgjqiStxIqt=xLe7qS>
z+_xS`eSA|rkF)O0N%N2EZCM4J*mqrMw&O|QQ0_?J5uV}1V?5(9)3gZ(xUOwz)P0k{
z&u-zwrEYPUHBTncZe>e=lZN6H0WS{K79l6jg-F~kZn+-<4ji&CocMnkG${5cG#UCN
za2cLS;Ny;PV$zN{%(iVqv#rbn4$V0ac*IYbpTA*~{JH0>vF41F{B&21nYaA~i>Kd;
zl+=3mZ^C)GeQ(xqnSTD1zP>cWK{)CHo4Cyf#fA%e?4JAYt$n`7{#lKS(*MI>LMLH~
awc?V*qLPZD)HE&=19M|TE>%@me>VUL%!{u8

literal 0
HcmV?d00001

diff --git a/README.md b/README.md
index c8594bc..810ff74 100644
--- a/README.md
+++ b/README.md
@@ -31,8 +31,7 @@ wd = 0.01
 
 ### Ablation
 
-image drop out: 
-tcmr_viral TCMR efficientnet: version 0
+# Important things: 
+
+    * 
 
-wd incerease: 
-tcmr_viral TCMR efficientnet: version 110
\ No newline at end of file
diff --git a/code/MyLoss/__pycache__/loss_factory.cpython-39.pyc b/code/MyLoss/__pycache__/loss_factory.cpython-39.pyc
index db4ebcceee99d56aa365237e82f0e543b7ba5cc1..5d4cc8bb3818896e5944e5eb2f6b551388b67e48 100644
GIT binary patch
delta 512
zcmX>tvQ~sQk(ZZ?fq{Wx_SuqT)s4J4%#10MtC;2LbxK%Dm}^*?nVK1DStl^&nbfeP
zFlI9qIh3&0Fg7!S#T05-Qkb%tib6`*7O*dbsN<<&Nny@rDoTTi<^8H*Nny!mDyk@9
zTfhNQmG`2CC51Jcsi>=jZ2>1(<XR0&3R^Z)(W(-*1zaWE%?$M=Yzuf6GSo2DFfU}O
zWzE}G!;-?D%~Z4pB3{dyx1@$8g(I7(=nhN^#6q}^mKv56&TOWlKS=VtNG7J#u%vKh
zGfiMDl0#C#2~{z94hx4HH(1)Nge{9Ng(ro#m$8;LPpgI{g%2$5Q^KFZpCSMj6-nWr
zz*rQKA~=DuIL3>iiLpeWiLr(?m_bv>Z!;6?YDPwr$?MoM7zHMavnz^mFiJ3TfiWK=
z7b6do03!=yk=bNF_I(2S3=9lKCUC-ZvJ8hJqr+r74j)FX&D|V=j5<0XNiz^(1tJ_6
v7#MDG#K-3)=BCES7lHIgvE^3g<QEqgX-q!BsV8a-Qid=R%rc$K&E)_9r?rFF

delta 491
zcmZ20a$1Bpk(ZZ?fq{Wx+aIH3%Z<D_%#2x+tC;2L4N6!_m}^*?nNk>2n0gs&Stl^&
znbokQFlRFrxs<ThFg8QPlxkQ~ShAUlB1+g6urFi)tK+L-Nny=qD$0V1<^8E)Nny)o
zDyk`ATfhNQmG`QKC51hksi?1nZ2>1(<VFok3P(0m(Yg}01zeMbS%jr`)Uc#*W-}EX
zC}CT`16H%Fh9!k7o2lr*WJeZp?zS416z*)M35-QdlM`8_>v_Oz6_EZC?q&vv$u&$h
z%nMm+S@WD~SW<YwDr`#FviMT?Qusms%G0W0Nf7{x2bJ)r2&M>uMMY8sComSpr3g=8
zEKc%bXksi8Xkx5k4Q9|3@hehfU|`T>yv3iBn3S3mU!0qtUy_lT$C$qP3Cn6mM$^d?
z*fJPJCjVqp6ysu)V1!^kMlMDkCILnk#v=2{2JHI;3>X*~icH~z+2nuhij0nv6*znt
zwKwN-2r}yEf+WpBgf)n8WME*p#StH$mzbLxA72F0AH|kinUh~!T%<X96{nu42}l{j
MNHEK6@>@;^0KN`?rvLx|

diff --git a/code/MyLoss/loss_factory.py b/code/MyLoss/loss_factory.py
index 61502bf..f17f698 100755
--- a/code/MyLoss/loss_factory.py
+++ b/code/MyLoss/loss_factory.py
@@ -29,7 +29,7 @@ def create_loss(args, n_classes, w1=1.0, w2=0.5):
     loss = None
     print(conf_loss)
     if hasattr(nn, conf_loss): 
-        loss = getattr(nn, conf_loss)(label_smoothing=0.5) 
+        loss = getattr(nn, conf_loss)()
         # loss = getattr(nn, conf_loss)(label_smoothing=0.5) 
     #binary loss
     elif conf_loss == "focal":
diff --git a/code/__pycache__/test_visualize.cpython-39.pyc b/code/__pycache__/test_visualize.cpython-39.pyc
index e961ce76be403bfa75de922eeedcbaa056c2b97c..c3a94d6f97af78fa95e1263a76b8e60928a923d5 100644
GIT binary patch
delta 7850
zcmX?~v9Uxak(ZZ?fq{WxU4CuymrMqR#~=<e=44=CaA06yDBi)kQOA&xnYC!k<^)Cs
z#`+Y76onkWT>mJ4MurrI6vdo?+`y<nFk2}nC^tAN7|d4A3CRtO3e63R3Ip>QbHby-
zQy5ZIaw2jgq9UMTkx`MU;tQfu#j~Op#4Kc(z*Mv?MKzBpMQtHtR8(qgYE&~LBLhe_
zD{evjLWZb>)Ow~=lPs5J#uRmFhA5ZR#8j6BtSK4`8JiiSl2T2w98xs1nI<q7ainOa
zXtyw=Cb=+xWb*h@bddR};tP^flTsKY85S}|rKF~$=t?p~rKYB)=t(l9=u0w0rKS3(
zrZqDzNKcJg$QYH8nv!B5$q<#9npmHj+05v|5Gxavm72LAJ2h(|V`^q<)`FZA!xW>1
zOg0P?m<z*FGZy5gW-ep|`7$amHDf{kLIxLxSe~eY6ywx__`(Uyg<Yu`3yKypFfybv
zEht{dkmZyjA;}O`k|Lg3GMgdAWG-`5DU@wGmpQ5|H8{02wJgOfrQSF-EycW-HL5%{
zEVVpEBE=%bvX?okA~iCrGBqv5s+TdUGBtcb_(F!Ls&s~^>U4&vnskP!+7xTB|LZ`$
z0*6U`YD8)U*elnf8d4i#n4=n_nqruvyrY_<T2fn6o0=J;+EN=Av@c|UhC*X%<ARR*
z6i|>gLgS=!Aw$0eBSUINYGX5FR984d3L^^x3qvzABSW5$A;Scg;syf-Mlg&7`Bglt
zJGCoCBE_bcDXJ&6H>(fM>rbs-Fd?-I6lSS43nrxYgITo;CZsljS@o%Hk_;)fto00%
z3@H-k49pBR45<)7J5~lshSb*7o@U0>wrnPd+ys_Fg;enc6H{vzGA@|3kYT~(g$z+s
zQpFccO|4zXxM12s2Ds?-)Y#N1Su?;%EjD#Jlm!i`Sih*5sr9KdL6%E0MCH{(Id+l^
zQL|F>QW&zCCa@M|q%cO!&f`gKPo34wn3^Ze5H&kBHpPCyoYa}Avr}iK<}G9a89#xw
z&?dDqbxP{=W~QjQsj;bZL88#;nwJ`z;*dHowMv4anTe4hg)x{x)A1!JPx@&x-eL(z
zEXmMhytR2glP04pdkISlb1yR^Lk&X~QwnzqPYE*;pErd)g|C+hD$AcDfKa`Fbs<A7
zV-3h`!O6TV*^_HodKu*=bF=0%Don0st!2D7`6sJ?eF?(?mKv6YjI~@P3|TBS94X8x
zEWJ#%+$F48Y;Yb=4MP@t3Trk~(SjO=ERHPBEUt9MTHX?F5U++eg|UVqizkJxm!*aw
zo;S}6teQQWspuR?K8p{knjgeNQ(e!SCj%1B5<oRDg`<}lWLOPDmf!@&B90n{1%frq
z3mF#(r7$gIT)?!Dp_VU?uZ9_!U&FjW7$jTEUm{Y&)y$aASSwJ&ki}cWS0j+Zn8Ml1
zTq{_@ut2m%a3Ny~Lk)i`QwoDZ3PUSX8j~bLtzZgQtxyR=mROBo3O6_mgi9E*#3#qG
z%W_IAWT+Jbh1BF0_O0A%3=9lK>L5a6vI~bk(@Unwr5sh0zjMe-f~0P-<s??778%`Q
z$;&UwEdmw3MVgcKIb+$`Qc_D2lQSlFb4Cg>#@}L$EHVa}Vgw@8C;#A#<CX(4!G!$e
zL@p^uCILnc5M<_K=3(Jt<YVOH<YMGul3?Ou<YAOx=3*3L;$ReG;$!4u=3*9LlwxIJ
z<YSa#<YD7t6l0X*sgjwzURq-E6)r)34F(1V4h9AWW>86gh-LE^E_X)8g2^^K5tHZf
zNY&RcWbrpMW(lN-N;1?6r-;>xlnB;{q=+{&^-I-?mIy5nt`S|xSSuFJkivkH?-`4~
zA?N!{hFb9wks5|9QBZ+1fiZSUtwfE)Y=#tx8nGH-Nrn=!8i{5`Nru@BbD3);GZ|{c
zYb0$LO4!9yBx@v_nI`-3icQYpm1aKF$uxNqZ~A0LKH<p<e6HqsH8L&?v00r=9Sj{z
zk_;&#k_@#%CE_Uxplnqm3lei-h;^&wED^8a1c{aKfxI+%8J|%-sGOd_Txd3dsgSRR
zw?;fg#fG7VWdTEtIH<fS5n3Q$BVHm1Qa+m@MRhJyidr+%LMBFr4#ouv;L=<thN)Ju
zR;fh1MsYSnih7L_$n_vgYLpg8E@W6By^x`Uu|^SGBiusxJ~pUUxrQODUa>}5k|9My
zlA%_mh9L{W)0AYWRjpA;VasNkz*6Lt!ceQ0$5WzIquR_^qaw{vt5zbOqP0M|M!80<
zMzux-TBxuT3Z-bLu(mKDENNz}VTf0(VaQUcVXxsx(Miz-6&59`3)DIo(-{{s)~f5*
zsAs7|f}@!+MNgU`MIXjnps|pFhatrPq$PzRm_gIfuSjWftALLtC=IS;y2YetaEq~`
z2*iUB@(c_Nx43L_GK))+^K%RACQSY%;L4~x*+I}&4y-B>o`S(#kouw&1_p+SlV=M$
z;a4(g@^3;)rc90&B4En2$-4<DnLb%sn1CrWCRYo4Gs;aqF03R5_sA_So0QCCh{u;s
z{x2NJ=ruV|M4h!Lgn@x!a<zyJ8(68_<h>$t{9q>B`_Cr75=ml|o9rv9C4wsR9u#1y
zlN|&lCeIX+(*jF^i97}d22IAIOprWS0B+b<kPt|V$mIW`A-3M2IAP5z%`K?B#a5CJ
zikl*^W~N&_#W|TNsqw|h`9-P4*o^uyd4iZFix<fK55!Cue@zw@4>4NFRKy6foT*43
zWRn4i0J{KWDkL3%xo`)`PM$4p&8Ru~x_BCkC&(-h2^&L6u(Jw^GV@A`Ko%60fXo3o
zwx|@uDg(*0fLwcvH8(pYvuN^XiBLw3$)1w#jJGCFlvL+01gT)miDE6uFUT&rHTk$?
z2xG`(8L2qN?#bO!YF;2mXtEU*flNyW5yc>)07R672yoDW9C?ecB(=CCKD8h}IU_zb
zFQpSy2+jdjxJ)dJEKEF1lYdJ|$8$?D@_~v~kQgHmBMXxVqa33MBM%c7BOki}BMT!B
zqZStnvjCG669*F)lL)gA6AL2;6Az;XlL(^-BS=Jyk%LKtiGzuUiGz`ck%Li;k&j7%
zlY^0qQHp8uWNEv4R*<Do3<@Ank?ag|O%xLYLkYtI#)Y72o>7vamNkW`hIKYW3iDj%
zTDBUd1<VT>YT45nYB_4yT^M4cYB@_7YS@|?YZ#>&Y8YoTq_EU*rm%tv))clH&J=JF
zo5DVqO`4&WtAu3%YYE!|_8PAGg^UXs7jTrYWN|KJtmQ7@TEJby4Pw=BEZ_mL7#Rw+
zO1KvA)^IFjtOeD&d^H?3><gI|@GoRoAh3|3mZyd%OAurcm}Zn@U}C7{t>vp>PGJC5
zrBISFjaiZb>|}m$-NC<*v6erDqqbh4glmCFiD(UPjes;mGh>M;sB#9e(m>U^SPes#
z_(H~7!4ioYc2ETpD;L95D-^?2D_kp5!(SsL$xtI$BO=LABW%Mkfw54aMsR^-3hP2f
z2w5vwB9tXnBUr;*!<WXC!kNMas`6_D7chg>NT)E?r*NmRE@ZA1tP!jcTp+WML4u)1
zaDnVX5DDf<Gl1maz7<U2N#Si_s1>adt>LK=Y-Xwzs}ZXagX$Cm>xA;88B+K}7~o+m
zmcm~vUc*zv169idR$C9{qABJnVOt<q!vl^8?gjEG0#KGhjd%%LmLjM!s$s}dDp9Us
zS)j6zp_x&d0aUGKsip{~2&M>u8-Uyk)W8)j6F4;p&t(BsZgo6$+;w7gg5bhk#ILAc
ziGhLP7o&cai+*KJVxE3VVo9RDOKNI?dr@LqiN2AMNxY$vMQLt)VqRW;Nn%NAioRoF
zaz<)iyi;a=L2+e#FsM8!)_3ypclHjhVv5%QmlWUvu!>bvFRLK^<pWTz<w(lR$;?Yl
zEYf5wY5?U$Jq8AbWTwei<ujRH{-5lipcH<KDX-ubYiVX)iN!4*XMZ36AYaFTcrVwG
zTkL5$`H3aQMz@%A6Du@XZZT)(rr%=8O)Sp7#h#IxSdyDqP~-$M6jT5}$|F!NFXClj
zU`U$0Lm{>PFvtzeLGDf=MHLJT3{@;3muadOF@eRC%TtQ_LHeyh#BPundtz~7QBh)L
z5kE+10!WB0vACqNAhl=@NW=mp!j)E(pBoP@Xp4G5qOBlNj>O{P)ZC<;R7e2_D%+%P
zu@+|}7Ni#0f)ub6rRJ30V#&`-EiRIp+^8tc=r(z-qNM08=G@G@qWvHxoFJ3I4w!sX
zQ6aGpWH_jJDDnldrh}BQ6epIYYDyOQfkc8pL@<bm1Q8Kn1sVCdsZs1%1?llAnMJqQ
zlJoP6Qi_XwKypmU`L{SrGILVn3ld8*iVjS+P*Sfy2-2DlB9cLB7%Ra6c#FNfI5R#u
zF}KJaB+8v%T2fG20?~&W-nWE6PKr+~DM`&M$;{7-&rK{)V`N|`23g6#&Bq8T1wkdG
z7NZ0s7ZVQ)7n=y17_$^37ZVGU8nYOv&=g=6VH9DK0V!qYV$@(_;hg+e$wN^X+FAt{
zja7^c44DkI%;^lZEGdk&tTl`&jFa<}CF_||m{S-RGG{T?vZXNAvX?NWuz(szHB6vx
zZ!=>pM+s938;S^L33Ck-xM1;$VXEbdVXEb><*DJWVP3$pkfDsRXi5zasNYn>mBy6L
zSi@Yy4dU4_6sEw~k_-#jU}{o8WfmhtVM+=60*)Ha6ee+o6m}7Y8V(pMg(Dr*W#O!0
z$l_WE=`Vrhzz*oB<*MO=TFg-Nq=vbMOA<76kXOrB!<Pc;k?_|r*6>fxQx;3F6)54U
z;R6-w&7i`O2h_W%6{-=a;jIw_TPX;$vX!Yu2rMFm5P_7*Y0NeZg?|d4)bK9gUC6M2
zZy~6?$LR;jzo25FNN=)(pvB|_HC95+v}UzDa6tmedbnDAlNlKpic&$zYa0Ut!%I-d
zLzBPgB1qsAhyW%0qVphDH;4eIba1W$<+Y+SAhEL`;v9&W3?igJ1f&%KN+6oT;LLD~
z6_l`xi*8PyqVC8IPtQdUCqGbE4nF`&#O#&v;M{PFy#m63xE!3E!6nB@1_p*EP}0=`
z)y*8BMga?x1``XT5Tg*22&hrOD8sD5B*4f6s@7R#SSBZE1Vn8Cx#tOp0O?0E6Jf9-
z0|SF0sD1}^136eZf;s9L<TzrAK;p@uMi$6E5C)Z9Jh1**+hi_H{rVK<Y^EZM6qZ`1
zJeCyJTBZ($Ebc5GFv$xh`Lg(-v_J<#7H5_qnB)SB3Dq#Au+8D9WiAm;W5{HvVP3-6
z$5_h(QqjSXCDOr=B??m4!H^{e63YVV5>H{D!(Pi;A_3M>&kEMURwB7T3Z!fyD2qv_
zaMZAOFl4c0$$;utFiSRt8%)cANqJC}*~?YSQNjk&)y$Z}C(TgHS)x$Gxj-?6A2fbf
z3$jcpMIf7L0%MU9s0M3csNn)-+FGtWp>~Ee#uVWcq52jMm|zXp1jb^g66F*TaIcHY
zg(23emb*l`h8xUh%TlS~?qEm}?PaRvfm<sk%}~P=uUf;ArB=gJ!;&HnswO8e7P^5e
zLkYi^0-%@!rTkkQX{m`NrA4X5A#R!+w>UwyVtmo$30j(3Q$WdwBPlUCJ1IXe^%h4`
zVtRaWW>xAfp3>sfc$hX#_9C^(SGDBTe}Rl#0U|(g`|<=xH*<PH=`FUr#FEUi)FNF_
zyP9jVn6|kHcVcpKYEEiVVo83{E%ubuvdrYv$${DtjK3zY()Q+F$ymhC$iT3Yu}E@~
zjyR;#fJj%ME|uJ5ZJi90m5fzv;D)WOCetleFsle$bqIlV-(pTqONS&)a7qVhRG55R
z$4p$Dk%8eBZ&_w>X<|-hRU)Va*JLVEn=GOmAY615<Ofh4d5hIIKP5G%=-1?IT_b@X
zAmN`N0u(SszbEh1Rbd1BS8?)VUH#}=Ea?TM#YJ2oqdC%x5>w(6laq_4fr5c0Jtse@
zNEB2kOa<{pa`F>X;z9LZd~!x=a&|#JsG|U``2<0M#9IV%V9^bbWq&|@iVQtHDe(9T
zXGv0gPJViNYSAtBlA^@Sywsv1^~oFblx)G_BL=bp<X2FWAv5n3C|iSk&A`OP%*V~g
z#>2?Q$il?N$Oal%VFb}^j7&e+xF@sgE2uFsvN1D(Bw3i=vx)M7#$6Z%n1ootikUeW
z`4|Nli_ABB>vJ+fia=1irf4h3!Cc_h1tR2%<UkHl2M0w~er6uH<^{)$B1l3BL?|;d
zFle$M`4n6ebL6F#W36zxz#$b6s^|@wCSNvCF|-03A0MBan3)$J4|YfqC`pHCG8I*U
zg02!ofcmFJIUttaWDP^Pdd4DEkc$~_i9{t96}dqYzf)qmPkv%bYLO=6EddNMrdvEk
ziFqmcx$(sXIhiH5xLguT65;AuN(xhQtJsoDQxdHVAnhPtkpK8V1SlD!ly#E>1X(8M
zFmq16sI8(`Bmhz)2qHi!7LruK5ebSz@Yq2m6Rajlnd~4a0@7L!8qZ2+s9}gzsAWuH
zC}GHAoXs$osf4M9v6-=qp@;>-n#feh63kE}0t#kkNHBv_AUGOuSyv6S?v_9Xr~w#X
zkXV$Mn_7}uR9pl~fJIXn80r~raTS9a$nl^Nfhx9KP$tp4#o$(@fwyJti)dNH+Te)h
zH#jk8=B3A%Bqrsg7VD>^LjpTKz%e-9!!_Ou)P#%&jVi`_6r~oI=w%h>=M{l-3ro>=
zQ1pVD+P7GfK}~j$D;Zvb`j?taMeuy9$#{!9uQWG4IS15gEly^f9Lp>**<W0$zNi*t
z065@rb@SK20;WhAq!uwaPy=dXb2%lZD}-d`rdlaz&My)J<#BdUn$O8hg5(TP%eV;C
z+Ah)srCO$<)LY`k**U3+MR|I;lPe83G0IHVG3sXAIeCYXjwYykUNjM8PZNkx01=>C
zv&bBjR6yyz=p#r>b~3NAcs;nR&<4qXb6p+CcDCZwqO#PYTP$h$IVnZpO!pV0LltB@
zXHsHuYJ5(9adFXakYe_-)S}|d{5)_8b&Ca*VZnWbTinU{d1;yHko*YB&EV4Y7F$7L
zQEFaE5vXlmq)-pC8C3Y+;>gKQkIzfYO)WA78OsXJEk&Rr@)jhK6od0#lsc%ADNZdZ
z)~iY^DuUG{@gPx9!I)YUU!0j+kds<u$H>4CB@Ge>tA<uNND7PW89~WWpd`O2IYY0s
zBr~U252U=v0puu0kY`y+GILXloIp$-u(I6LlA_Gy;v!Jn>J|$q+uq{IERIhsOU%px
zrM)6>5zU;KTX2iZy(lpyI5{yVwdfXGF_;Dy=tU+V<0pYU$(3A>D8ZdU!r<Bg+<F6L
zm?&Ow_J}V}%}mcIDZa&=omzQ|xgfvb7JE@@K~7?F>Meedr{fVh3Ow?3iwlwSZn1*d
zw|I+E3rjPLQj6n3)yplG)Uw2!qNDYocm-D#-#{!-Ubw{$3WVa+l3PqEIYk{Hcd{0j
zBo>uKaf1hga`MwNONyhoz?_1j{G{SrP>IB()STiXaElYvlL7a^ZZRh%rWcujOyNmN
zEJ@CQv?Gg)%s}=@f$a|kF|9y^8%P~TVrfZ!a$<2w5vT!JWCM}_kN1N*g++ECE_mc0
z+~#coalvC_;P!16hzsgn7J<8{+8{2t(Yz7FG5`^qK*TJNMXZUXIz{=(x0qA&Qjm&I
za8?GDp|?0}a`RJ4b5cP`iJ|z#<QK*^%rYG5lhsYs>p4IzS{_EwJOd9S2a^;d2O|#?
zA0r<#52FAR9}6F7Ad8s~Jj^A<#KpwI!^6nM$iV~}DC1!iV&Y?gbn;jjSr~<wc$fs3
xIGFfA17?gGpxFu*E=DdUE=CbX1x6u8F-9&%E@p^*T#Rf?Jd6@7Y+T@47yy>^s}TSI

delta 6421
zcmdm3ay~;Rk(ZZ?fq{YH?_AsD+%N`)#~=<eW@TVtaA06yD2`*=sAI^;%v2P$Ie}4u
zkuOy*)ufq`ks*aKm_buv^9xohMt$xQmK5e*W=4h@hAgHOo)q2^W+Xme3VRBFFB4Q&
zAVqL;AiF4|(ByRXY}S`d3=9mDFR}MB%1=(<$Y)fXe1W5u@!DiRPXGEE&IK$B8EQFe
zIKVVV3Bv-`8kU8OwOl0(S*$f2Da<J>y-c;-C2U#ja2`($Ll#F0Yc^BSh8l(}&MdAh
z?sUdl-V%^FXAN%(V+}(VZwgy4OASLjU!E6OHG4Kw(LJzgeyD1J8iuTFOyx3QMS`dX
zrf~E!gAA)-SRhoxypVB$a0=5x#sy3Z8EW}zm=}nEc(wc`qBUI2jOmQEAfx$e_-X`F
z7*jZVnQH}07#4_4?&p%_7q1aa;Rbt2xP&1~V)AY-Sx!&@h=Dvl`61U<ZgmC*h9V6R
zp*eXWw?0#m=;TY>RctA#C5g!ylf8HpB|$1~vE?LIrWP69V#&)d$}IxLbCK5M7M|G2
zFL@$FSQ1N0@@_Fk78!%gF#-`9lY@BUxaC1iFrhGcH?Nc>qW~iZ2r~0A@_=Y2E=E2^
zK6Wlf9wrGUE=C?k31%)vAtn(<E=Dd!0Y)iS7DhfsDMlXF$$Wf<{Mrl*44e!M49uW3
z)Wy2lkI$Wvv1;-({s_(zz7D1up=Kt=$#40k>pPf1si}m&h9OI!nK4T+MO2cZRyakh
zR-{CzMkGbNnW<l@R<uNTfk=($LdIILaE25H76ulEW@bi)JRw7d2~5Qt1`Lc~7|FoM
zkjYRhULsn<kR{g4=)y38Db}l2qDEpiLyAO=SdFkGLy35eL^Go#!)%7R%(ar4;tVz7
zHIg<AC44E8HImItj0`1wHA2mdlluh3^h)?@q(QE0XHH{GkxF50;i!>zVQ6NomC0nN
zm93F+VTirg$prEbNY&&+0_o~}9V{Sr2~(Ct2TKie3R?<$FLMl2tyHaC315v|3Y#PY
zNPe=7psV8qunAu}nK~Fcm?RleL?jt%g+Qi(LkHv}1(4brS&*y?L+r6y&Jw;FPLLQR
zye97uG^#HVt>Mm+hZyX_5F1siSi@bTD9KQ(RHL*&av{S4>4gjn6jB&dR8rU$GJ`~G
zlolv1WRPH}QCgt15JZA`(hMo8A`o55B`R5}3mI#bO4Jsp*C;JytW}=CT==0zd4Wca
z!fb{VwHn3Q3@Pe!Sr#%f)-z0CE}S=kxv*pcQ=vi)Z;g10h7Cgv%L0ZP@r8_x3?;$~
zG;735gh25-n;}JWE>ntDGt)vQMurZ?1zHOkTo__?Vwh@GYE?`4YE))3q-fWug5n0`
z%NkXf2RayQRBRX~uoNyr#BFR)ty&F3eU^5Onj}MtjwC~^dJO}Vrz^=&t5Kt#!j{c6
zfwhP$g`rk6kEcYZMx&XrMqQerR<ndJMQ?#_jarRnjYf_7LU5!_U@5$nqMyRr!ho=(
znX!f;Ub}`NORol0$QYy;g37rP{RIXcjOmOE8EXx+YP7NpA@R}7m|`T&kYWtuEihWh
zz{8MY0@9Mg5X_)y>Q|&ZSzOph6O_+ZGTma*Gq}ZAQ3T>a2n7ZPhFe@VIhn;J$@#ej
zb}f^8gk2d`CMOz+OtunMQ3mH%u<}HBb_H`m8jDgG7#Lb7>xwwZfyHnsX`9?jNJ+=!
zw}g~*P4*QfU{UwvwS<)POy(8yW|W^CDW)U_b~MN%x43LlGLs=8Fmv)mu|P)e$&ba<
znQt-1Pv#f5VFN3apByMI#}8(L^ng8odvcw45~KX&hvHgjGEXM!ipWmZ6_-;5>i`q^
z3=9mKj76F7kc6A_YO=0~<mAZ`A)+>*2w}}D%`K?B#a5CJikPDO$$umy(ae84Sw+%P
z+#6(+C<6n-%l`}v48Iulf3fIg6{Js2lr#Yqev%<ZnoLEEAZwY5^g(J3Km^<|kTd}1
z!kr>9Sw_m5(Rgx%R2sJz*f9k~nRz9X4@ue7OM!HQ?JNbcKyEE61F_0M3Rysoy~UcF
zoswB}i?ui-u^{ypXGu|FUU5Nwaq2DR+|0aN%(;mbx7ZSkODYReZ!zT+++r=w%qy`d
z;s+VPnO2man^;toSb2-3II%4C7E4NLZow_q%#zgH;vyf0$@bE&jM|f1rQI2$CZCm7
z=Pv^3V$6wREy*v)E{U4VEEB?*H#tHkPOtzZ!%~u&lX{CeGdI1cck*@_wR(_GHQ9=a
zK~7Ew5hWm^5JXgf2yo1RLgE%*NosLPd}={{az=b=UP=w9R@nt=uQ0JNvM}*5OE8Ks
ziZF>V@iFo-@-YiA@-cET^006)i?OiqNHKzHFbR-KMjl2MCJ{zCMiE9HMy_N=E=E2k
z4psq17DgUMEiM*j0VXLX4kj)}4n`414kjTc7Er~<$it|?B*F-)=r|Zfn0Oe)7&(|U
zSUH$@7(psQm7^FVACm~95F-bp7^4Cs4<jFw5+esA7o!yG<b|?!^=u%QLa`VF0|SWf
z4Dvz<69YpEV+vCXLoHhkTMctFV=a3P!vcmHrW*EzObZwnGJx9=9H9184M!FWsD1*`
zDJ+r<V478ufr+7(vzDudC50h{p_K_rvZgUhGAv+S$N*{{)h}SL;a<pC%bmhj%TvO!
zfU|_FhO>r8nxUDogsX-N!b$@*TDWT%vUnDP+DptS>@~a)3rm<&IBK||Y~C7nP`w%}
z6T?)?7sFJ`Un@|<UBf5IP{UgzAjwd}Z^KZyrG|F_Ukb}YMhID3&s)Nl#b3i)!&$?X
z#+1UD!qv-M%Ui>{fEBDrAcYmw?pX*HtKnTB2(FuW7YHq6fbpam!0KV)!3%0*wlLHR
z)(F;c)bKVl)e6-J)d)d#3W0S(dD09ieDxv>3xrEVvOw(zp%ngF;S#Y0;x$4G8EZvq
zL>5S-2tZkqHNqugSyG_7sfHm-x<saib%E?chGs@-hFZ}Y(JZ+X!4$z1A#hVxWPyB*
z=xl}*;Tp!-3@IYus*s_EqlN?O2oA6#AiR18kSmZJ!2xyv2h>_>I6xHQfI5ykkvgF|
zUT|e2=9dgAY8V*mAxRIE>OeRcl=47DNjgI<V-2VRwy9+*VOYSJ!dSzU!X(MCkg1lr
zhB*b4f|x)t6vI@@Qp;MyxPZBYrG}-Mv4o|DwV4r=U>F$+Ich*10am|XoG}_2dRq3H
zF`D2K?&bgg|Nj@|fy%mi)>}+PskhkEGV@Xrb8>F678m4XmK61avbZ!z0b3EsO}E&R
z^Ye>Rif?g(%8B^A#N5<d%mw+yMaiJTgRv5=biBoto>~$Q(W1=Az);Ks@+Kn-69=OJ
zqY$G6W0hrcX>mz@Zv5mMiW+eU#Z}zp#hH5G^3X~F?A9V>P?7zBfq|ilCpWP;8zxyL
znpjejnpcvUpBJB-SO62g#gUYmlbM&ASft5V)CBT{=;SL3c}zt$lU)>*xQgb3gcg80
zHie4frbTl>USuf(6<4=dK*rx<&qz%y$xSRM0u=&i-YVi{U|{f@yjL+b<r2u>%t7u>
zAw`uSBWuA9j8890Oi5161yw9Xbs)19ZgC_Q7pLYX<)jvYD*|vGFae}x7KpG05q2QM
zSc+0}N{cMO%>2C6;v(tEyOpGE-9ZA}nRz9tMFsgei6yDG*b7pNa!X56i{^mzf_zXk
z55$6d4O~MMEd+UzRawD)B1jTk9r=P-GeN{6klVzH{6Op=5a9zNKrvku31UTnh?5}V
z=;StK_4*4SCa9tSM|jZ*5Em2}x7f>zGvkvJbBjDcLf~-Z&Mz$~C@qOENG!=f3&2}K
zpt>p^CFT@C=>y~t1||+p4mJ@M9wsqnDMl_P7A7@D9!4HU5k?_-Mge9}B_hMf!^FYP
z#i+r=!UXEt@G%N8@-d1rO?Fc8&=g@{VBlb2U;ss2@gYWd>CK$UP|K3eP|KRaT+3F&
zn8G}HwTfgtOA2cW^Frn<##;6i=30&tCQx^vg`tKC)QM<jtmQ0WO5sRhM-t&GVXk2U
z7qC__OtsuGOtn0<yfr*EETArM8Dr6u8r~Z2G^TXM8kQO!8-~Ih5DhBnA>yD0cVSKm
z+XD6)t`wGfafTF55r!Jh6cz|8g)1G@LFE8-V;3^k@`36BkQ~^;8@1du+)#@citf~~
z)No6J3Wt5Q{5AZb3a3_}hOtHfTEQSyHfb!93|Wln3@O};7_*rtFct+s)lFWfA_fbC
zTERRSs4$|I0viI-UMpC_UBeG5ahe%xg-W<<xSJVkg=++B_-cf}UJwE~6U1s|su2d!
zHNprHh?CNoZ5Rr_6yB-fTfnoBVFB+#P=lMN9##l~3I_%T76u3grNUy+_y)Ax1C@D<
zB}_Gppz?14a|-i9FsqrdmI+Oi1=NA7Wv*eWVNPQTX3%8yD>?v5mY|~c7o#q?DA8mo
zIs_6s%)k)LPy{OCijIIX1IsOr%-nQv$yRh6q@WR$@fefPQ!saWYDqwTaVDtXDF7v1
zP?BR{1hqWG7^@5>UsM#HoS?y>sArRtpPZOeY^R5$0a2nAiNgC0AlE@`!PP0Kn_RD!
zR}U%(@vEz6WMC*t1v&l{sL%xU?=_i=Uc&^6K%$y_MYll$;L-<Nc-#bWdqD)a)CZM7
zMb|*w>mcF=h?vH}z@P#NEN~$PYRzhjfQye?te_g9xacTI-K)v<>W<t<<;3LE>dL90
zQXz`H5>g*Tu~$GC5U+rmnMI&xWYHA{28KLPfP&iH3``tsEQ~BnEKC|qEQ~_nu?!wY
z0Y(vK4MqWQsldUg!KlF`10Me{lc|!12E1w1evm_+fe4U^NVXuXQet3WFa*`Kpz#+D
zRt`6gdImKP-y)DWv|ffJNl>-L4I3%RnfzWuzrF)Ba>WfMd9rw+G+zfp7Jrrim=pwy
z38gU4VXtK>5l&;sWT;_U!q~@H%M6n5V8{~bV8{{$sp??J5(9~4f%J%{u*_kvWhs#W
z>tF%vU@ehcAO%vjkg=AnL^_4Frk<^XA&VtTCIv#vrm%zga$r(Eg(HQtm$R0=1SBig
z%$UL@%}~oxqEN%JKrw}TAtR_<>tM)IO5w?7n!s4Zk;0q8*TPW42`ckyIrD_t8PXV2
z_*3{=IACHmoD&#}HA<9I1i+ns&OjH2SgTsD5@pbk86!gtYnDn4R|i9iAY`-;<kn`!
z6d`Gb8t!=28ul!;8txkA6k$-iYyxAU4!H3n;`dSj6hEK@t;s%lvX-IDbWmpFNJ>o3
zPRh?qy~UA~m>yr8S(U2EUZg(xotC`PUyz24AOe(KUV<v1TWooWC7ETZMS7t8%Q@LZ
z+nj|Zy`Xe*y><j+)#Mx6-rSmuMSP6lj-=FNJsoioa1sC$;3Nm?lQT|^)X6Z>WUOKX
z_h4)_nQpOySw-L~lb?}+VI|`&=H#?=NK%4pWS;z0$4p$9k%8eBZ&_w>X<|-hRU)WV
z&}1r7pX{U?z<!H4xjdz4(&VMOMl!!aPX7b)C97|KN@`9K$j3!sU;Uf>Tvvq+>^G&!
zf_nO4MI4}f%mp%;1Jt01PfSiOngP<wlAe>FR3r$J1~nmWiR9!bro@A~g7L{2sma*|
z`I&hoMc@iw5ELjpMPT=_WaQ_jPM)o&Cj}mZ<Sa>w&&f|uPc6E|UQ(2pnU`8rq%!%j
zo>C$>XoNt<gS-i95oP9W0_DjlP<~+IV&>!KV`pPz19!bZG}})$E<P3(Mi^vcWckI$
z$;ZmW#Kg$N$i&3P$i&FT45Hasm_D$HOfJwDXEfQ|q0h+(DbGM1@*+^<Nt3GxG~Qe!
z1G0S*D72(NEERAlWaVe(ftwTHIFSWO$bkrXP~@_JJz1m;5&$<xIr37=v9><Az=0GG
z3Jnpa$pVHd_2yvX<KuG^GxOr(!43h}5+RyQMb#ijRDlRkOSvcq!~!|@7GF+gdPYfJ
zW?nid7>jRll%*CGgW92*ensM-XkrJsH77F(;v;a258OD@1UV5@``i*Q&dy0qESg+o
zvXxO{vW96l<C)1@Om*s)gDjZ>B3eL%B8UK$N<}6htsvJHeFcfhfvn*yN=;0OuT0F%
z0X5I_(lXN_ApuT4;LvLTDPt>6Eh<Yby2X-~pOaDq4m{8ZNRblA@0>}A#i{W*`NhRW
z|3JNwTkKHJfK%Bm7Es6+fuiFUcXB?+3P`|$q6M5mZm|_47NzEu6oH$Sptc-1QQzXo
z$%j^^rXcr%s@NiM5(g(S1OZNG95%W6DWy57c5I-`U7Rx6&C-UMg(GQltED<4I{^QQ
B$xQ$N

diff --git a/code/datasets/ResNet.py b/code/datasets/ResNet.py
index f8fe706..5c3c277 100644
--- a/code/datasets/ResNet.py
+++ b/code/datasets/ResNet.py
@@ -394,4 +394,12 @@ def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
     """
     kwargs['width_per_group'] = 64 * 2
     return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
-                   pretrained, progress, **kwargs)
\ No newline at end of file
+                   pretrained, progress, **kwargs)
+
+if __name__ == '__main__':
+
+
+    model_ft = resnet50(num_classes=1024, mlp=False, two_branch=False, normlinear=True)
+
+    # model_ft.fc = nn.Identity()
+    print(model_ft)
\ No newline at end of file
diff --git a/code/datasets/__pycache__/ResNet.cpython-39.pyc b/code/datasets/__pycache__/ResNet.cpython-39.pyc
index 3d963e71935c049d798681b7b63ea581b34f4472..6abf8d43e83aa27f6d60f4b306f2509936d359c5 100644
GIT binary patch
delta 528
zcmdmzG(DL&k(ZZ?fq{WxxmsTGnTfozj7}4^lekS&1X2WB7^ApT%qGr=tT#_lNKtHI
zj1o?<NKr~rZeffPNwG{(Nl|TKj1o<;N>NKuZ()oQOR-MTNYQLzj1o_=NzqEtZefg)
zNU=@PNzrX#jFL>TOVLZwZ()p*O0iEdNHJ_-jFL{3NioW1n!s3OmnxcKoFbPZmm;5H
z(#sMhn<|$o+sw$wkir<uz@+K0Ie@WOlrd`ZJuywDD4EH=A~Ks*#hIBIqc&?wO=f27
zpS)Mrk8%2D9=R4q@#zc<3`Oz`3=Auoizb8E{UBn>=H>D$7};ik*fTeWDo$b)ngvqB
zR0LM<1QGwKw33l+4uriz1;GwiJH#m33=#*0RuM>N5mcF;#vVqMg&-+m5HTM_fK_sX
zSaU%HR3)F*P9}&UJRnhIL+<G8fJ*U#q>yw?Ua4=Wbc-WCJ~uHlFFrn#frWuVlclH$
zq$~?WWP*s6$=CHA809BR7^pDbnrv%e!Z=}ak%1QD^vQD#3`K1j7#MDG<mRWO=ESF!
d++r;#%FHX7eAYmj(QWca17|iaelBJKW&i+cihlqA

delta 431
zcmbQ9yd{Y@k(ZZ?fq{YHL7;E4{6t<^MumymN!&&$0x5zmj8WVv#uH~m#+#%lq$sv9
zMhT~wrYNN-w=hPDq?o0sq^PzqMv11Fr>Lc<w=hPDrC6kBq-eG<Mv13brf8*Tw=hOY
zq*$fsr0BLVMoFevr|6~Vw=hOYrP!nxq!_j^MoFhI1~X{dZZ2Z%6=l3NNnDfZmdxaQ
z5t+^I;>^s9w>EoAO=f1CH2J=)ALFdedU7p{;<FeS7>eW>7#LPE7fl1PCxM9Rn@`HG
zU}T#OV$a!Jt~iNNXf8+%QxRCb6GU8Ec_ky;d<grL3W8m!c8F251tbm%i6W5BBB-(e
zjXjJiOF&Y>AYu`S0ITE%u@-;`s7eE^olFozctE1ahWyjn0hQtfNg?T)d|KadvZ8@4
oqs-)Z0~N-o$t?yZjFTttG|*xJIec;yi|%ACLuE#X$$o~;0M)~A-v9sr

diff --git a/code/datasets/__pycache__/custom_jpg_dataloader.cpython-39.pyc b/code/datasets/__pycache__/custom_jpg_dataloader.cpython-39.pyc
index 9f536c5dcea366ca8fa4c041f92b33ca5546e2a4..549f6fa590ac3a8baf0494d6d9c5c7431c562d8f 100644
GIT binary patch
delta 783
zcmcbSFg1ZUk(ZZ?fq{YHnM7H#-bP+&5ypL!HAObc)UemE)Ueht*RZ8Ar!faJXfpZT
zVl633%*@l|pDZa_K6#yJF{8p{B{3yN=FRS6qKw>i3=9lK8X%&6a=N$|8<=0edAoQI
z6Jy(C7pZK<_Q@NhQncGZN*F-|SOr7}h}!}pT0w*a0|SF5Q&HDud+AI@&ISeshFgsB
zMIDnj%1ANxZ$2Ya!pPV**-=iIaoXfixoXDl$rt6etAS0M%fP^}lBuW*WJ))PXaW%+
z^NJ)vtO=9X$wx3YZDv<k&FBSoNjpdwtQYPQP39s$kf=Y1m;-VPdyy!J4Knc-Q(8e0
zx+aigCi5%35}yyUpRuSGq#D^)0c9t~Nt1(>RoK8%os(;oEw~qhRDqo}b@Bn_NX<qN
zzZpcNfC!MyMbkj6c_3mTh$saSQ#U)Qykz2mgiFRP#^R!$$>!=U+-pFR>%he1W9ngy
z%O}fftdWE`=@x5VVs2`25wfEmX;`}~0V$sZ3Ss8l%)Fu)kZ;+FmVrbMfr#ZGVg-m;
z2_ja32(TFkK`gN2r%X=N6lI(;`2wrN<bF+kHi$DOzf^H&FIo>$wE-MFs#;=<8z<Xn
zed68(65I?Tc7tVnw6&D>F)%RrX|fbSG=t>89#I3a_JarskkZYowQCp|4@}n3ZDZUz
zdA)86+ftC+l*yKQ%Hl^E7#O1XlS_+B@^j;hQj7CaOH2)lj!iDs`zsIj5F(brkx&oP
N8Vw>QP43q}2>`^E#~lCw

delta 873
zcmbQ5a3g^?k(ZZ?fq{WRu_!26XCtq)2;-&6nj#zROL(&QvjnmPYglR+vV>BYdYNij
zYS?R7YFKNSYuM75)0l%9G@1Qwv6d7iX69)!Mseqr=Ef)IBo-H^7HbMm{w7i`cZ(@6
zFN!OzG%vX%Ge0jeCyG5kFEu_RzoaOPfq`Liu4pl%(&UGtO3aKROq*H6L>am385kIf
zG(bedWEF8QHZZ?obBTBl6Jz^iMyYJZj>)-FDcbEIC5#{ftOB9~#BBu;Z6HE|fq_Aj
zsi=GNAE``6&PE0XhFgsBMV*uLWTY4;Z0?jPVH5?M0#a85CL}@HrcV}<t7hz(+$XnP
z4Q%#21_p+eOhw%w$sQ2V3?dL3CQi<lk6>)xd_;aVqZio34v+${Ubwq7nTz~DqW&Oa
zF34r<MWP@!$i!PrX$3{-nkMg5{42fyWH)0`9Y`gzjc1jd7$;8_P*!0BOLa}QQnuh;
z0#XI`!?ek@%8{B)AbtyoNC6QbTZ^WHSo1-|A`npuBBpKrul$mU2NFUVw-}3ydMAHT
zYY|-wl3Wib)Ie@woqSMPbn+r~bH){uZ>g`5ggEdPYhGe*YB5qMPMkbn!#ZjyNb_V+
z$TR0=<`u<&{LNOh93*-eM63W2D?!965V0CWfW3JL!~%O|Dgy&U8Y2TkF$XA-vPv*=
zFbeS$O`Uv!RbsM_ral|Qk&_!WJ=ivYG;N%GUQ?5C)8wC;pSU-J1h#;PJ(Is_*~;!`
zU|{gmWGRB^0f~cs2l80afz1KhQy3W!PJXJ>#<*>Aj&2LvGLVX?lfUUIiyvcPV2I*R
sE-fy}&y6oiEzV0VF*PVUKG{I;uRPd0h}Z^4NCQY~G>Diy*+>5*09E|ovH$=8

diff --git a/code/datasets/__pycache__/data_interface.cpython-39.pyc b/code/datasets/__pycache__/data_interface.cpython-39.pyc
index 2e97390e89b0ccb4905df819317f14850af46697..e1151f291a6f1bd56821ff01a34f3019adc21b35 100644
GIT binary patch
literal 8118
zcmYe~<>g{vU|?9brZahqDFee}5C<8vGB7YWFfcF_>o77fq%fo~<}gG-XvQeU6owS0
z9Hw06C}u{87)umu3PTEW4qGmJ6nid56bD$GC5JPYD~b!uX3gQw<%#0S<&EM6^VxFv
za`~h9!EE*%fn32TK}LoYjug%up<Ll8VKAF3M<iD?N)*iI&JoKMj}iy7d2%FjC8H$4
zdU<oCa;2lB!EC-9nOxZ@SumSFM=n=BN<LR1N+DM<N)aq3kfW5V9Hk6qGv=s7se=8b
z7NzFSkRq5O)WVP=l**c=-pm}O;m(jEoFdY~kRk#V(R61>5ls<mVMq}JiKNOmvqWjR
zGo*;8NVG7dNTg~tGe>EsFa|SdO1=bzqn{?@Ee`*JlFa<P#2ihgTRcUHc`5n1@x=u>
znI*TlToOwXeex4iQj0VhZ;6JdW~OJ9q^1Oc)dVNz7UZNBX)@hn2PrE~ExE-OT$)sz
zTB6B#i`Cc9GdM((@fKG}QDR<kT7FS(@k>Sq1_n*WTY?@gZl=DTJ|JB=FvEGg0^FSv
z)8UqhL?sp#xuqtSloq8Tiwj^8V+lws$<SoHCE=Nyl$ev4mz<gcGP^jn1m;HOTg;A5
z&bNdUlZs1<5|c}ka}tY-b5lz)@>6c{!2}_EkmJ4G@^ey>LE(dp*`b``3I+y-R0dG`
ziegF;OJz=FPGw1DNo7r8o5R}9kj9w8p2E?>8^x9)p30ueEXk0<nZh-P2`s~%!qdVV
z#gWRH!aIjCg)fD_g(Zp$$`eQtY+;GwhVq0`gj-mmcv3`CL|Yi5c-t9R7^3)s88jts
z@j}DSH$SB`C$$KaL0)n&FfhDiVqjnhaa+lHi!&v$Br(1yKfmM_S59J5YEFDXVoAm=
zuB610<c#>@%&OE|+<B$B@#XnN*{Mavw^)-ClQUA2K_)@{$iTzEzyLCjAC$Rz7#J8z
z7@8RtFfL?ZWT;`RVTfm{VX9$>XRcwcVTfm`VX0w=XH5Z#v(>QHFvPQ`fMhvRK(d@E
zAX%;ykT`b=TMa`zPYQbtLp*N{M-4+fUkzssLp*;vLk&Z`Kn+M%Fa^XGs$nm#DG{z=
zh!@G@DG>$HDIoQNDGb33nw)+wAAo$!2_itwE)oZ^PJjp_1_lOjpoD0O-C`{+El4f8
z#StH$nU`4-A73O1l9d7x(jY>Hfq~%`S7vTvdMYGbIP>Dci6gc67H>&nQF>}gd|qj8
zQfkpH{@m2Wy!fQV^!S|Ay!4WcTYP1SMJRkwkR~Oj7sr<rC1&Q`;zjUNi%V{?6sM-9
z++xkmtSBwG#hh50ev1ceQ+zo%TNTNIoWhfq3epY9F-4$AxW%88SezOUicv^rDdGcV
zBOZ`>IceDZQ=|s+8N|5K;?(#wNG>Wa(glg@fvjOkEJ`oF#g<(TqE|BB;*5v*IzE0S
z!!LLJjQreG{mPugJbh3KDK1D%PSp<q<zrt@A6=K!)B^XS#IzFq<ouLWeUQV7Q%j1$
zjQGsFlGLKK#N<@Hg32Nl1_p+DQ2NmUr7A`)MmF4#86wBR$ic+MSS5y%#PwjY=eLsS
z7He@yVtVQ=wxZOM(xSX%P#Oi94#I2<3=EtM3=GU5JNJOoEGWfhF{Lm{GSo8FFs3k-
zF%;F*Fk~^;Fl4dRFl4cUQfL-i4MP@t4MP@34MP@Z3QI40EprK133m;1Gh;I&BSQ%f
zgq^}F!qCiE%L3!qut<RTtR=iP%rz`Etm#b6OfjHz&j(7;HH;~2Wei2@P;3L)!Un27
zdO^8C0Bj3GmH^Zq!Dfamp&G^%u3k_9&Fu$H#VeWoZZW52mfT_mrz%ahB2enR#gdz!
zl8T6FMBF}~H4_A0+JiK*l%y7y6ge|6Ft};5fHf9@YD<K^m5fE!pyVW61j^{Qn3Ky>
zZm||57L^ni8G=#^BD=s76~d}ptOZ4xc_p`)b5iqeF=ytL6oEn>T;ylwfm7@)uCm0O
z%oGTl6XX~$vq%|~2w96$OG*n?vK4`IEXWzRSU@T77CSUe-C|6<#h6rN26Bi6h_D0^
zz6=Zuvp_K{28we=J|+%E6($}=0Y(8vF*XiH8Ad*)Dq+<01F?v)$cBM|AsJK)fpmf}
zhz&|Lpb#nsl?5dXH4IsdH4IryC7?9I*bHK`q%iif1T$zd`Drp2fr^b=?8T7EuLz_^
zleLHi<S;J~0g^5f0I@()D;Xi41G^qffV^-^034Ozpn&A*jUfMklQMIa9ID&Vm14Lb
z)C2;%Uj&@uVeSW|yA;M==3s`EOn#b7V0S~D%K~w(H^`A-=Ym{X6bj;kod_nt?&K>=
z%t3PHHn=Mpt7K4JiK>uvHx`96Ffgoyy37aU6#OpZho>b-0PaL`nk=f*&{SfS7@+tE
zrQ70t3=9k@jOm~lXAl9G7Yi6mm=-XXu+%U%GcIIY$XLr<!|cKkD^kl+!&Jk<${@**
z!eq|C%wWS%sFK1A)7i;T!dk<S#n8;?!Vs%a%TmHt!U8HwO4vZUBpI3+r5TzTI~ZzM
zY#0gyz|8`d6xJ4w4h9#7X2wnikVz07A`G>xC9DuO*ffz^mJS9`X(P!X$xy=zHep@~
z`vQ&{)`g%lgUzo9l%&B0M>;5hgD^^LfD#!f#R!7}OoD-d0aWwG3e+;zFqSZMfZaBc
zsgNa@p$L==G#PI(7nkO0GTma*Gq}Z=xsowT5K>(g#}}j)K`ITf$|7)r;j+oeEG|jT
z&n>V!%D}+z8DxExE^2&$s#K^MdNw)v$%#3|c6tbXnw;R$=@u7A9k}sQe2Xcs;1*kA
zQDR<tYLN%H=m4{Cv8LtZCxS}G{G#NHTRblLrAaxdA*p%A`9+%0VkLlqfgy?;Y)*V~
zeraCGEjBQxxF{B65IfW*5I@9$5+2wi+#uVba-f=|7!*_tOgxMtj2w(Yj695djB<=Z
zjC_m&j6zIRk~n>+$qtT-B2ZZcZli>_xw&aF`f2im+9Y|2xvBB-x47ctbHQ!7`1o5q
z@$rSFi8)Xi=Hil~B2SP*S(5VebHFuf5hx0aK&2ogABTY)9RVVuKtvWuH7~S2PfpFr
ziI0bP4iuk|vJ~XQB2fkghFVY}05vi=m}I1-#Q9kHnE9B&GPoPR1;~wGVLXjr5oqI=
zHH8z}_~lCBhBkhA<}jx4rtm=<zaSoeiU73n3*v#gIV@2;;6^WRFoUM(EpS%^sl|&F
zqJBl7$Vmn@fkEjFgh5S0P%wciwc-qLxk&ZaE2s_2mjY^&@`GEf0-%=ZN>0Db21W*k
zB6(0;D1Zo1%d|)l#8LtgAg6_B3WBRZNPHsdSVWnp3Q`KLa@9dB4G^ITBD6q+Hi!T<
zs%~*4B_?Ml<>#e>n~y~Xpi-0zQs-bZ7HlG@Wmr@UGQtEzn1TqjIu_J&I}b`6ARjQ`
zZn-gmT5VOrpxzcFZs1L|A}df`3yK3!e1I^hoCS5Ez-8?WaGYRiouz^6U9`4Y3QI3*
zEprJ+2`8u=g}1|?e0V#I3sf>9wZphUeGO<ktP9Cpa4QVd<^!1v>WP%_H8W)K*D$7V
zfLmUikd{}GEhuE{LCqtKwi7t`96|C<AOckX7P){_3V<6@MXn%GHxR)MA`o!_jsdW4
zP__n_X5dy+5xDgLu9LylAE<={$>e?@1JL3Jl>0$rA;m(VkOjAaq#!LICJsg!<|+ZC
z7y&naih@DW0je878bBDt22}(gr-O|_G<{MSQ<y*v2Tf)_H%-P{?8O<SX=ypBn#_;_
z2kcUCQpM6{C;}BnpFl|m9QTY>(nwAr*p>mcqreUVwV;q4)XM~E$1sB1F)){agA1oi
zKxN`rB$r4cxr9J_1+P1b%0Lkf4GeH(;dBD1ME-%~1SupZptVahS<!0>KTtivSCkEk
z{~QpJ3nKDB1UT*FgIJ)PN}xsnmDpITfI<cahO;2ofyyKfCK(YaesBc<633_jKtTfP
zI~0Qo4cO=hXygNwTIMjNFsHDzurxD9u|maI=P;$PrLcp=*uW!B9Jhp>5{py8gDZ$|
z0QKoOK|STP{G60xNE02LbU=v|ItT$utKeh-as;@TW%L94XeAS*tg8eC1-OC(6QBz2
z7B@KI$AgXi3-Tq{RHiC<ERN7aQoEAr78giUd}dxsY6X!FDyo9HKojbuI+y_1p^)a0
z0KyuG>5PmF48>4)so-}PM1v+fdU^mg<iJgYq8gBmp#C<*MsP@g2~cnpl`}9f`~d|6
zsGj6tWMULzW?}@3Vnj8llm>-<F=*Te7S$<|c<Op7<hniuRL`?Uaip@Ou%__LVS?85
zoM1lx9Htb=Ko?gkcZ$#)M(_Y8OB4^32O7L&iQ<LwK*N?SQGDQb0Dmxprt~c_=c4@L
z;;_V=c$795M&2RNCTIZHKlEr5)G)+DN3>Ev<66RCQUsKUi;6(yh$wYh0+0$FT(yH5
z0g&nhR5;xNjlgE+f%CyF4p6xVO58;ypu~b!tANT9c2J50H8%*=`fO~Vx}S})N*;TR
z;Ashfaxthi0M62&O0onzvIrZeL8_^lY8bNEQkZ*LKs_(exFV7qsEq)Yt6_-e0EMR}
zE41DQSJ}+y!yO>oG&#|#=UW0^0q)S01nEg4ji`a^YH)1_3P?>BNLd3Ks3`IW1s|wy
zft1(4*$h-OD}%xil+73z`IyC6*qEziaD)-KJSYN<C!=&rK_vn_fItIw;2~EAP)Xau
zP{Ii6jxJy-0d-6nvRFZbM++H2UB?=Rcs6iAF#BQj`9Q5MP#d5aRI{ct)G)+yfckt$
zeLb*GG?|KGKphBH@B{!@2tt587tg@JU;=90CNMBCRGH!P6ga&VW9{Iv7lDgw&~Qaj
z3n&~@Km@2?2TrxO*uZud7gd9V!37wY0J{KG#u<Tv7nFt=7}*%BlnA?`2-MF(31E<J
zP>6uiL@_vk8L}8^7$q5Em};47nM*)p08BMZ&7jdd7>^muV*v#)xU|<~Eoue15L8-%
z9bVK3;x>UYH8iP!>VR9kkXBD-UP@+iYH<`dC<{V4kf25Q4pexXfqVywd<I57Mn1+W
zb%Oqb)C|~Lcp!too&=5YAR1KQY=kkm#Z(jm@&QOGY)lL6C2;cwly1P|N7zP+z^y(T
zkjp_ymjSn{Az=X;--F~flynNJ=HX3JQ1U=H7%`d!9x2j<jue3c8{8tq?J`g+&;iM1
zsyM<E6fG3Ht0)o_fY_SFpq8LBk~7qBID@!Wp#XX-0Dr3xlxvDWi3`$z2Q|@)K;cpZ
za#az|_93W}WCgjIkAZ=KgGok4j9-97fQ^rZkGTjWr>S#`Ju|PkAT_z<7H4K|L4Hw5
zPG-_Awvx(%%)E4PtMry&L1hVObS6FrG<TSnnU{WxsUYW;09a73v?Md9SPvBJkWLUN
zU*3{Hm(zodwt^A|7HN?F;?$BNP+~6vWydHHun}dM#hLkeVB<kE6h)vS`4%^lR1s*l
zAWAT~w74Wc7iplg2-Ind5-rXI&2YwN6{I7}fyPUs<f;;jieNJ<NV4$|Jw?2rVJ9Kz
z%nOn{c=F~JJ7_vCCo>6L8QtOmy9nk-P(HiGoS2kcBnV1NpblOUsEv0^yf`~2HL)m9
z4>a<h6JMN~lbQ^gw=DwYlv}KMrMU%_MM)q%X&}pmeP98OXlsLqc8WmFdvL=K9EnI}
z5WFC|#bE;(kg@}1$zsq*0|z4qBM&17ivS~NsGWzAhlz)YhY<{!c$h>$a*Q0wLIFbh
GBAft75AJ&a

literal 10399
zcmYe~<>g{vU|^V=AC&B1z`*br#6iZa3=9ko3=9m#N{kE)DGVu$ISf${nlXwog&~D0
zhbfmiikT52#uCMv!jQt8!<Ne)#h%L%#Q_#)$>Gf9isH)Uj^YOMS#x-Dd82s2Y_=S}
zT>dD2Mursj6pkE$T)`+oFq<<+C|5X27|iC%5y=&e5(TrlbHsARqr|~_d2%FjC8H$4
zY~CEHT<IujFq<z&CRa8}Hdii6E>}KE9xTS6qmZi@rO3$O&X6LIBG|%^BACjWrPRzE
zrR>g-B9tQB!jK{i6;W|#ND)a9ZDB|eg^H-UGo*;6h_^7Lh=W8@Wt&-|)Z7_TBvK?>
z7*Zrt)tZ^3)KeIP88oF{f<n(vlkpaZe?duReqLgZCetmRqQtzE{M`8Bf}G5fTU;)Q
zC5b-yi7BZ?noPIYLA>JBl3Q%SrAfu9C7O)4SbhCGgF`eKZ*i3rCFT{U<rn1^zhq=!
zV9;c|CE%8tSW;S)>XeucHG=V$pofc_sjsIG$fO*Ixs102yaL?O#YCbKi;A!qz!H#H
zlA+0XOTsfZDKRH8FF7>@WL9x%NpNCrK~8FsCi5+3M<?f7!ih=6B}IwJCCNF7#l^X)
zB^miCxA<U!5I)EQ-fsChDaoLqK*pfN1mYJ*FfcHrGJq0L6jO?5J3|^{3Tp~m3uhEF
zC>gXcM6slZrEsQjwJ=7prf{e5v@k@mrSPWkwJ=1nr|_o;v@k?*qzI-6wJ=0+rU<8q
zv@k?*wKK3VL~#c*Xo}zB1v%a`uOzi7EipOuB`E#A<X~W6DB@&bVDL+31Gxc;LBh-o
z3=G_$G#<vlz)-@_%&>rQAp;{r4Py;MJW~x*4MRM04RZ}cJWCBr4MRL@4QmZUJX;N0
z4MRM84Z{KsklGT?1zZamvba+iB^hejQ<!SlXEUTQ&jsn?Nnr_Q&}6Op5nWuGo10iv
z8Oz0`00oXk>BUxX9!M}Lu_QSozBscgRY4;&uS7>7AC`)&6rxj7i<65o!2+>*3NEQ>
ziKRIu#R?_)3T7sHD8}TK=Ej%j7iFgw6_aF+1*$PAi6x1}sU`7wiMgo?8pS0=#JWda
z9bphS)PFJRSGns~<|O9n2RS>(7Zl}Zr6!l?7bPdhgOgHbT4r)0D8J}K?JU-JPRvcs
zsm#waG(%$QB^Q_ZY4YA;EiNrcExN@KAD@|*SrQ+AOQ57EF*7e75q`G>AtFdYaf`ns
zwYUUXjR05xSqTr+ez2c!v1OMh7Nr;85&&hZc#u?lZhlH>PHGV!0|Ub?=H&8}TdW0%
zMJ2_zIKi?-`S~T99Jg4CQ*+Xaq(Es-8bru|2w4yz2O{J_npraPb5mC`-r|gh1VMcK
zN`_xP`XJHZlKdh=NN9m#vA7^HIaNOdR2KMp`sli(rWUvtC8m|=C+DZ6A_5HTfJ|7j
z*DI(jl4M|D0NGzG0!r>2Tuf|?2*|<6$5<tXl2!Cz!I=yyZ9sZJ7{mr)XHXttVF1+w
z!3--I{faag7#LPE7O8-|1Y&~)iZnr<<0&XgEl4a%1&5g-NFx)-D3&TIR11-nC4-fK
zlrb<cfXWJxU0_Qq7#J987*iOtnTiE!7*m)^8Hz$m7-|@@7*m*2SbCXinM#;Sm}{7t
z8B<tA7@8StnPL1IW(g3VrG%x1sfM|RC7r36DF&3KA(@ylg)N(@I0b5E3Dit>keQ(D
zox&8%pvh5nN<jghyL1(j^Gowe6!J=QlTwQm^3xQ+={L0)k*jqTl1ejkQWVPalM|Cl
za}tXxk>v_fi$Dds0yuT*mnG)tgVLY^xN1N#B(b0%rxK(LS_3F(q@<>0=B1`6q*fH<
zWF}{p<WwqT<|$O>mli32;vSMR74nM|5{rv7(?KdT^Ate&Llf@&Tg+*hC4M2AOhw9|
zXkrDs;`ywZAn?-u77NHNZkilLpjH7y33Fy%$t~ub)Vv~4IeCj0tOFdaph}CoEHMYp
z;Q{G}@ibY%3F8)PaY<r&>MeGV3qd*<6K^plK~f4h&47~&YjJ8xX@NN?orr-F0FMA8
z8za+yHYNc^0cJ6F0VWWyN*Fcmfc0RcR8Z*-PBb8cAQ4)_ki}HPkj0$Bn8MV{63noY
z+0RXr`4$&aVZn`D2Cx@rl%}QSq-wGhfs$a6F(`o}GP5>_3wAr00DAyh{D3?ODk@To
zKvi5ZI6*U3$)UO%5rPn<<anXT8q^$sy3YjUApGv*gT)iX%4kqXLtQ6>>N-?~6uZq7
z<Tga{hb`<Nr7|Mu;*cCCi|ROd0)u!1#dRR>z#~5dR7^20U`Sy~VQvAJ6bqPASQaw6
zFf=nRWUOVZk}F{@VOhYM!U`%yW;4uXTF6+#*bFKe*uoj|L>L)Bu!KE@t(mcjF_IyV
z$$%k+A(%mv-R~9yhP9wn5)5)RsGLn_s9}iZsb#ETT)?o9VIor@OE3dCy@MKb$%zG-
zC5btiRjHaxx0v({ZZT$p<sbw&DRJ54WEPhs=jRsKfm%?XLHVJ|2sIo)-UnwiXmzA#
zlarsEm{V-0htPbBF}@01DHW%d6zf$Pc;=NQ=47TQfJ`b*Em3gG%t=-71J&&LMG7!U
zXHdHiEUE~q#<KD=^NK)K)-6_W0d<Q#J+&mUq@?H;XJ&3eeo;wIX3{NQ2ovg*Til?S
zgmJjS5_3vZU5kqHi!_<Q1ws^8GN@t)*RK#?!*fxQ0s{j>5-1!&d6$7vhLMSpjgjj=
z6C=}qP~8Qh(K8czNT9?GC^>-Q#v5d20=RXzfDu$0WHB#f%whqR2dzw!47H%rfpr1f
zLWWxAOom#P8YUNpSgBf2OOLsXp-8KSA&a$!Rg$5GrH0jpp-`)aWdVB)<3dJ8P{LwL
zW5{HvVOhf12WtN_SDjSw%quQQ%uCJ$N3%krLT-LaYK}r_ab{k+LP<udLUMjlQEG8P
zeqIVlq#&^<F*mg&wWt_gNP`-JX+`<D3ZPbmUIti2u|i5_GN^%+SX8N}5Sd@9keryO
zkeE}PuaKEnP+Fppm{+L)YOA1_Qj)KbUzS=_UX)pqipB0SP~?E>5U@p%whmgu%}<l#
z7JFu1aY1Tw$t^BW>=&hjI7NP-j16jF++qP)TLfv-WT#dZ-(o8*NJ%V7)no%l_${vd
zl8n@%c#x_nE^uuLVY6lCfs{wFf@BPDF(($Kqa}!2yqU;hnGK3Y0Z@Kmm0;vzWMSlB
z<X{wF<YKIn1|@2Ei(d~#mnLhGD9Ej#GzD&pf(buOMn6p+Py-UwxQ&m$#T5@tv+?n_
zc;e#=OA~V-GT>HJkq*cRD-dB1B0znLB1aGllxB;<L6s>lv>}(Anv)YBkK`3KkUCJ_
zE&`PgpfD>2`GSLqPmF_&OOb<_gPDVcgA*(Z%JLu|!ZACPRa}7FX%b0gPGwGINo7f8
zO<|kE3hqL&r*O3JMzN)crn09pOEQ2uP;;2TGN3LLZxlx=XA18e#uUC3{uY)fE+`Mw
z8)k{(hVno?VU{Q!aA%4)m_bwQ7B93D=?iK1a)SH<3d)zD@;1b6B`c(vz?GAjl$sM?
zkXVvYWCK!T3knj}<izBR)MQXE3+fF9P|E_Ol^+zDRp2~HsK=E8>WQ&|`(2>E6+=8n
z3aDqrnF8vMae<2Pc<vNXe~c%Ey@ny47u+}FOJS&Ci07|iFU~3vs9}f~%;PB$0?{cP
zH4O25;KH2K@8tsq28Nd)PrU>=tSA=5Jpm$&K%M|eglGyENq{(@hy#1JC?3Q`_>wC#
zH!(dG+-tkVnFndjrxxGhElDg&2lbx7O~qUMxv7bu4oZ4_PHJ9yNyaU{vc#hJq(lTC
z)CNgPOfQZHw-9deBKWDrC7?b<YDy8P6uZTfmI_h>X&4sqfI^x-DX|#R+6I+~sYOM+
zpaudLI0s^LbWu9U4-nHzi&Nv%Aj1m9ML{5OP-ZRyCtF0}4Dx<8DAGXU42)bzT?s^|
zf{l@l8N_2@<X~oFtP(>>Jn-&?-%6$;Gmxv<ic(8Ti}Fxf{-8n|l;}at&EjR?1On=i
zXEA|0*G!<cdKp7e38+C=!;r-SDx5*dA&aesAq&)UXRT!};V9v(VQyw@W@Kb2;exQC
zJp&dPzlH_eGhi*@2K5VSSm8Yb9&i$3OkpcyD4Ky{3wsSi7F!Bq3P&%f_rO=e-^`E&
z>W^^tf{K1FKXAdalF6?~7Ziqipa@_Ar<GeQpgI5%<VE_Ro)eQ_kpV~{h|q*6EGhvB
zfTHFWKe!qN=O1v&TFD3wGEG5n*Rx0wq)-V&<bw!MYaVPB!jfC81x1;8B}L$b4C;bG
z8t|YorOZ5V<KY$;xJ3YAbHdwJkkk$;>sCTaaInQisvvtnP4prQ5X%xofRX~Fpwk3#
zL3*Zvk_f1rV_@WC;$W0v;$akE6krqsv#W$r69l*kzml=2h=BpMRR!vAf)WC_B@b#o
zB3kk#%%CP)GpHVCNnr%F=QWx9G?~Gr&Mo$0$beN5sQA%jg|y!*L2d(QS5Tn?mMX&1
z5G?|g>?=Ud2bE6@OjUBIjs@pRa9}~Q7)nzE<X}*r73^S;_Yn>TrJ)qYUgltil}vt`
zOkjtC9RcfKfJ#AxC|0L|>YFuir!iK^pgN5}qn)6`it0fL655>rWpV<}0o8o#k(?uo
z>Ku4df&`Q%D>!EqfvOE~2^-?3$>_I|58O#AiUh?@6sR;{E-oo50;SYjEJ^wKIgpAM
z6c<IHgawI9P#hJ3LIYC!f;uvg1PCsu!08d36k`||7(nq}3@X++m}I1-g!owan2SK-
zMAoswc<NXYXdTO%!U?Toxl*`WAayLy97gb9C8Ul8@jyeCkUAE`0}Wgv*Ri6vz$3!w
z6)YsA{E9%`A>1`+2DpHvdJPJyNBO`tB|o?x6#&(jD>?l#8yFcFia>4jB2XxT-B^?Y
ziosM60jVzqAteZ?!oXVfq=A%z3b`VX>x(i$+$<0QYTy;+fLOVp^uUpnn4F!IpO*@*
z^@{R9X_S~6t*8xT1m22lD=6`Tnr?V1GVo{_WY7T;H}Lwas1Q_Mfl?-{RRb!k;FZ=4
zaGYSNw9>$37v3rhUQKa8sx5dm#f4T)an~@!^VC49sV*dQ!Ic!ODg;$!C49{cS^PDi
zY710NaYCx8qGC`OA=Occss|E$pem}U6r>1LKNXdOYBK@!x`!Jik5>1910R%&AvF)E
z8ZQFn#3FDp2QFN|%_(r6t^pYU&b8qD3(mSAiIbrC5dwuSxE7H@)FI4O0-(k|G)BM`
zNKq{)IzS~bNCOCi*ziIRWDKG-26g&CWw9n`5QwqJ4&*OpNZA5*CAglzRXf}OISU-`
z&^`y$83d~aP%ji*HGmra$d2h{f>aBP;1X98>Jo5J;dBY8_`8kd5=o>0Ay5<Gbw^PL
zQec213a1l56~H|tCxC}_VSxb}WCyigaFza`zE@ErDCWVD4ko~9037`-3=9mLK}JJM
zZV@SdK4w1VB9J(!%mKwO9E1D=3XWpX7#M6y3Dh$JMe`gc(C}~zOEY5>D^!ei4pRzS
z3OiVg4LoDVaZA`Ku{aexV}%Guzm-h4I6>|2wEUcuVo3W09Pdz@L2OVW1IH=I5uoz1
zNDJntm5{NHqHdS~q>cjl?-n;W&BlWb2F<q?gUn)Js*=az3_T>ZE17O_fi%Tu=B1=o
zknAMr@JmrY7AFZHtbv&R92EXgcd6ia7es?5J6dI41j=>b%Dku-Wa9)70gh<|0Sb<y
zP6h^sM<DA!okR{sCPpD<CPuI*Mr4BuSWx&E_aJwLCGk|#QpnYG3aE-^jp9gUNnuUl
znZpFFq&dNS{y9wGiD6Ks&7C4NhY>ul$P&eq3gL;Sh_$dp@j`jxDH1I#QGDQTGJi0G
zrt~c_=c4@L;;_V=c$B6Pdc%iMb>0B3I_OcIgS*0@F0xPxh!h5sBH(1)3MvLfsZ)tV
zDicsY6I_2oY64K%atqoK0VU~M9H3$g#42hBr4w*r08S_1Bm!zkyago@kPx8?n~e?B
z|72sVlE)q+cq(yFJ_c3f;7siXjtS&S9I2884H&YeF!!?5LZ*h<k>o(tJ6Ns;G*AH=
zGiHTWx!~HCxd=RZ0PpC5`+%C9=rt>-Q;*bm0=JgH^%E%UG+7}1dQcx4sb2;P9z;O{
zZZvZ;GB6Z_aux$4AF~(>8*`Nmj$i>70YzP)niQPzK^cvKfdRyZhY837;AwFNQ1RNr
zPy!k=XI#Jp8cPTD!$Cc6$QV0liX4=dG@1QSx=A48KrI6BXgR1ZOb3mYbAU$6kw(hF
zp3r0}ased<FA#y|HBjV8fm)*O3=9laX861WDQK|`kZZC-%4cDa1t42)u@)ufrKdvL
zEug@@#ReYrE-nHUIABLX2ym(d6>Acp-~}Zc21Yi<DkZ}1DC$EGV32N5h=3CvIDi?l
z7-|?L8Df}fnQEC!K>b{%8Yb}61dPWF=CObR7+mLQvKCDRc>)wskQ_D%#GMSv$1ISf
z0xI@llXsbUDVfQs#ZlnNJ17U@F>o-0eFrM9Wk9|InaRM&2bwHaC+I&&Wq`ds1~Lfj
zNn*P5Oht7dA0YSQ!CnG4NC@}hL93M@^(tc(E>{z5v%wpXxVrP;UNfu{4+?B>8xGvH
z$JvQj1BEFl7cekZsp1GvP+^E@KcVCwP@97|cNKxE60FVuwcRw4oS}xp83cRrpmv<5
z0D8sW1d2Ft`@b2)0u|mxAP<AfBc#RvDB**LT!`t;Ph?<Vcnzuo_!t-%IGAK)#P|hR
z1lYh`dXSu^Mo|FBHXl&Gk*%b%ATuu=Z0jw-g36NoqU4PDoXqr$lDy2k^jk~?IkyDB
zf_kMTnK{LJpuh%?j)D6Dw<OTzzzT{}ONu~2Uj$0XQ6gY9Wtqj9`FSwu;v!HB<rX)R
zR1v7`jS_%Nd4a5fw28s<>rsNqrNt%rxky7wMW7DVEm5d=RzW(l9H>f&60b@uDuRvH
zBFlotdvCFW#@}-?lfbpfEgrB#U?XCnTyu*#F)6tSRPGiDf`SXwNV_FooSl=JSd^y+
z8l=pLFHX%#O$N<06@d!IC}AI1xFK4{;F${WPztzJ4sNX#fuaGa6oD5dw>WGdD--NM
liMJRe4w|>&VdP)|4Gi)yB0(k|CJ{y+7NG<oD<ORmP5`088?*ob

diff --git a/code/datasets/__pycache__/feature_dataloader.cpython-39.pyc b/code/datasets/__pycache__/feature_dataloader.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..10319c23951eae75c10e1a3f2836050359059030
GIT binary patch
literal 7019
zcmYe~<>g{vU|@K1pfmZhB?H4_5C<7^GB7YWFfcF_XE8A_q%fo~<}gGtf@!8GW-!eX
z#gf91!j!|3%NoVX2vWnG!<Ne)#SUh(<Z$G2Msb4KtT|k{+)><MHd_u)E^icXE?*QM
zn9rWWpDPe00A_RK2<8e!3FQh$34{4CJ2`Viaz&#=!D3uFV!7f`;$Sv+jzq3RlmwW~
zm?Ie_33iuMloXhjj*?DcNa4wm$(4<gg{qN@l5=NB;Z5OdVMyUi<<63CW{y%wRZQW}
zW}3iMl$OGff^0rxj#88o*evBJ<y7$nDhnC1R8s^b8KTru1XI;!Go%R3WsXvZvW4d|
zM`=LWB6FFeG*bjqHD)uUh|Xn>(n?jz(oWS%5la#8WnyGVk%-bsRY=uH)l890kxCIu
z5lhuck?v)W(uIo3q)4Ueq{#NNM(L&Mry8W{HZ!Kki7+%XMj68RsfH3DzELV~s&1-b
zs!=*qGgAyxlyQ_vDqpH`ia?5dib5}QlxZqostKHDmdcl^*UT7Yo@$Yzm}-z}+RWI@
z#K@3pk)o7pkZK0!DW@8wnnQRg3{jRT4C#!iIwA~FRw)8e)~PnB*3FDA46%MuwyDYs
zlov8Y*`-Q=%yMCfRfw`rwU1$ra)@$_VUBW&a*lFIwMccya&2a0WSGEIs0G#GmTHlr
zlIoUfC&2)6!33s4u@uH&22ItMpj7Cm$#{z`BsH%%zetnu7DrfOQD$ONPO2v3E%yAp
z)cB125>3WiEGda4iJDBec#0D9Qu1@-iwklxOKx$wB$g!l<R_-2g0xC_<|ZZPB<3Zj
zrhvqYQ%iyqa|?1(i!_;Tae(v}r<N4o;wmXh%qvdIFUl<jSy7x(nwAE(jU^zlBm-ns
zNnuK^CgUwu&)mfH)MStokufWjQyj;@z>o^gC{au)V(koRj43QBtSy{T%qeUs>@5sY
zEGZl*oGlDdtSREDY$@Dx7*lvscw1Pb*i-mY_*)pFI8p>s1X~!QI8%gDgj*P*xKcz?
zL|Yi5xZ4?67@~NB88jts3Am*umXsEyIwht<{OR_xLzsc#<qKv8hGcNAhB`-@fq{X8
zfq{V;l#kPx7#K<zni&=_E@WV2NN1>Fh-b<Kk<4I{1uVi^!%@Q!&sM`+!w}D2!&t)*
z&r!ov!w}C|!&1W#&sD=&!w}D%0utu|tLFuid^M~!4DtLmY&8t=0yXS44Do_saiJ37
z1tJR>vP4suBpGVCQkZMFW;3L)%mtY)Rw7=*kR_JF+RW51Q_Ec<u|Tqhdm+eVsc?oA
z1{MYuhGu3)hCBg7hT>fY42)nH$-u|}*2f9f_YI-1mZyfNh9QexlA)HjhS!B5_Dl>@
zEnh8viEs^HGh>!?tw4!z4MP@3Gh+>(G($RLtzZp9mf!@&BDWHm6gF^b)~FRisF}@>
z!d@dZn_(`qG()X$jo<>=8sUYY6wFvCQzPiY5SvviQX{fJZXv@2#-f56{u+iXmKp&Y
zh8l(})&;UPA|SPSP9?%6@--sOj42%A49$#C)ytr&`CzIMYQVZc{t#LqTf+z9O<*ka
zDtv_!B0`1?6BvunA%_V#v=zXlBA8SHlgeOHrG_Dke*#lcQi&?abzr|n)QZ+HEKsWv
zUC0>2Tq{;99>ZKKQ7c(1Rl|^_xInf>9AtM5Lzcn<*%~n@TWNu8jRcgfyg;@_63SLt
zAX_5^VozWy<Vax%X3*sHOP@6p1Www&WMp7q_{FGSWu;%4lbEOPlA2oJUX++tqF+*!
zn3<QEmmXgN%Gt&G#W|TNsqqDgC7G#tCGjbl$tCfLd3ssJ`FXb(i(Z0q*e#a$#M1O%
zta??6MMawGw^)ly3sQ@2am2@G=4F<|$KT>iNi0c>&&*59zr_V+CMPCmq~78K8|{;r
zl$zs`nOt&<H7zqIwfGi4LP>maW>xAfmfZZ5)LWb&S&%z2ZgJ&+^g!60dGX0PiN(dK
z#kY8J6D#7A64N2NI8%!93*w6sOHyy~q~;csRK|m?E55~-oL^K_T2KPxvqD_Mnwwcs
zT5yXwu{8Y_dv0cNab{k65f1|c!!73I@|0Vw1&Ku^#ka)4hJdU_1V=$)NyaUf{DRcH
zTP)ykVadr)Ot~e5sq_|0aY<_KE!LvMy!6ytY>5R0sd*{4*itJpi%W`cF&CE<-Qr1u
z6e1vx6yIV4`?UBLXL)gEd|qO1YVj?$<ox`il;T?)&?qS`;srU9BR#bw9u#t#(zjTO
zQ*+Xa6c`v7iWEVFJcv*N5$Yg914L+o2ssd;4ARb$k)NA-i!tpMC&&ljXuc&{lA2op
z38i?jJ0QZ06}K2Gqu46I?zzQX0Sb(a_?*n*l3R?KQ5=b->0pstY!I1Sj3u{NlM+jk
zGj6ehO}WJZ^UN(Cuy{PwN9<4^tz^8#84roo`1q9!zr6K9j)SDr^8BLg;)2BFRQ(W8
z)!^&tql-wo$@wX%`k*iXRUZ1VfCLGEA|$m)ub{F>l7WFi3RLc9feK?rE+#fcBnacd
zXht?BHYPqs5Y%GkW8`AwV&Y-qViaKJVB}$xVB}-sVB%urV&r2KW8`8KV=^*En8m@!
z$0Wwg!w7*@lIWF#9xQt#Gl5(J#h?TSsv4X@P84BeU`S`EWh!B)VQgm1VytDZVP3$L
z!nlwTRB)#-q%gNKNiw9cWHS{xq%bZ($TFlbfn*kfbbty#W=V!x7EmEm17fj4&0|Yp
z&1Nc^Q^LN0qlPhsZ6Q-FTM6d^t{S!!c1cjPkpXH3J5-bdMU*4Yr-XX}4@76bK`mzu
zX9_1mMJ;;`X9|*zS`L)L5>$QMLoO;ycxzai8EaWe_!jWjurFk+<pQ%f7Bbdy*KpOa
z)G*g@*RZBBr!faJXma`8VvK+J|NsC0FF|!^5jZi6gA%g{hyWEzMOL6>%>hjW#hL=(
zyj)}i5@m(7NLaG+GxLgMKspRT!feH*X=#}iw^*tYi;9Z$KtfD;1-Doei;5B}Z?R;U
z7F6D1am&m}y~UiFS8|KBB)=#*;}%z1QGRZGUTJPYr6y02Eyy%b&b!48N!#E|TjT{2
z_5=|=AY&P;Zn5PRfU-E)nNge>DQTwhpaSj|J1BD`C8ig7gH-#&i2xAGhJk@$Dkx=u
zGBX1c3lkTk6r%(v2{Q39@-T`qN-*&;3NY%ibAXd7mP7|DB~TI_C;@`P5rm6{7#JA9
z4J5`CrWTGGMi+)=Q2USBFBw#2K$9s0NDB)C1A{QgHDU}5;D$>qUoB$?Ll#2~qa?#b
zrb3orh9Zy$G#QI57#J8<GTma*GbnNeF&QBN1(GcS`Ro>#4X6M~&d)8d+s?qi@EK%J
zl|DwE0XZLBh!pGD<m4wO<`moMA#_4)V*;Cfi#;<hB{Mm-7%W(n1`0liO$al#fE*9<
z2?KMLG#<NNg1ifA@4_mVWRTZEP6T0g1_lN;SZmIW0o0mXz*xhW#gxJ*$*_<yg`ozV
zO>0?d7;9K08ERR<q05xQ+`>`A0t($)wi?zNwi4zV<`NcAMsH>UCvq0QB2b9q4t7vl
z2el+(c|gIA7U&Rfr+@?d7Aqtb!h^dgoq>Vj04PW@7#J9;%rN|3k{_RzS&^Cou1Z1a
z6kBj>vKE2b)wei6?fin&qTE}o#TkhOskfMOGxKgS=cMLovP0s68I+*&K=$Q;l0OG1
z95YiYib0tqJ-!&+@-M!{19Byd2XTKcNDnyL1Tmbn3lw{xvXX&Cf{}w!h^I=4us={@
z4x||#bD)e_!<fYYinUsn62=8gH4ItIp!h3csbNWBs$rI30QK}hg<>sh4Py<fBttD5
zI4+q}SXwx0Siy10Uc*+yp2if+pvek}zamhEqCzAVC4j;LH3~0-guqc~h~afmwHFU+
z1Lc>N;E25fkakcM7UhFjpoTzEECU0BCI>j$ii$y^U~5%CEKuAPm4H~KAfgOJfZbC8
zVu53c2iewhpa=r@dziQw6+ltMR3(eoohYReHv<C$C~tvFr7sK&3>~o8VFJevQ#vE4
zf=glQWGG==z?{NZ!&CzjV+O|_GM}}Bu?93I!qCc;2I}#!buiYjrZA+y`0O=o3s@F1
z)G&)MfN6FRnZgW-aS&Uap_3WZ-^yamW+*xUu1J|A7-~667#FbBFoDuY4F@<~aF#GG
zVDDh;V6Ndrn4iW3s?PEZAg*4>*vZt+(9YP-)Xtp70`7)#lyKBAf!hDgplXkC0cQ;t
zNJkA<FoPx=Bo(C_gTZ5aP|jswVEAB<J4u1scj*i@46zEej2%p%a-m3~gAqjMadj|*
z=njT#7Ni0ilGngx1<NhQByhY(34wAosP_)eo#6Ie6bHCP2+GXhcn3u}xG4DwN&w(=
zXoL~JpyC$Xw8mMCa23H5;w=`Ct+&`gWM*DTkvqs>aJtL~6&%dTi6uqGAQo$JNn&!g
zCO1+-1trg0jET3{K$_z#pmb#vE2y-ofKZjUSW+_6(!i-WiWBZENB~rUECc!J7B7mU
zuY=Mts7_~K;$q@qWMLFw6krly6ku0j1~vYKSY(*F7-bl_SgPdk`yM6AKxI3q?g6*-
zK)Iv@)JbIoSGJ6PnoLDSAm_+}2yo>E4skF6b|QOxd`@a!eEc7f<3Q#xFjh%mSPxZ%
zlCMyjm;#IpAZr<ltZEptz-gowRPKROO)WF11O%lQaO05$%wh$nenwE)m#0v}ki`ZT
zWv^jazz%A*#W2@0*D}R0*Rs^I*0O<m_9=|nOhtccn6o%Rjn-O@60QZ@H5}kP!;r-T
z$u%`7`K6Yth9Qf$Xi*Kr0zOzyfatDa$l}RnD7sa`wSd2dtAuNT07#^U3nDAQ0A?{u
zGAv|*<T<ciP+4e0k{dMQQNvopDalaFn#WSZT+3C%Rl`)n0;+?d%}W-)Tdd%2Ft`E+
z7vP$LMc}>@s3B8i4vG{@5CLjr7FmN>;D`dX&x*jkEqIDXDq2DLvZw{5xDK3zt5S>d
zi=l1CTdbLRDXA4j;5H(-+yyro!Hqyr%>>B_pd=1SyX^U;C7=uy#RFx;gX&XAIz}WC
z?)doh)RN4S)ZF;^|Dc4T1!}M`v4L8L?4Y#E!^y=c#sTWqh%gB-vN3Wo2{39f3ovr9
zaIv#6RmosUP;dh^S>4>+iUdIZ5d>8lj78ZXiWk)J2iN5Bx47ctbMsS5b5i5uZ}G&(
z7nUaGKxDw(up)49f%;+KTBoQSWK<=H00&Ylh}8)SDPCxAFgZ0RCq5p@lfED&pzKh@
z&%nR{>e>~9+{3}hSIr^M!6qQW!O5Y@!3>rKIV(3YGcP_K9HB*^mTQ#`q*n>=LdF*q
z<!7ZPm*_*flBr3h>G9x{pqE@+R;2+^4XWdi226~MOyUiVEJ}Uj6Z7)&OA<>`Q>r{6
zD&gj0jH;xhLxx4;0~~|nJzV3x^7B%Q<MZ;1a^s6qi%a55Qj1H#gDh42`Y9l1=qHw@
z=ceYB6o-HsqN$(}gZTXHm!O<q)W*QT;HJrj)Y%88Rd9L(ca1>Zg<C8jGc{SzRe|K7
z!$L3Zi$p<D$aISf(s=<7zi{W3=Ej5i-Kj;zRiF{s%)I!N94mz?uCm0O%oGTl6J$7q
z$>o%ot`L%$n`)(?>3)l?ATci`vG^8KLCP&wa4)GS5)^CTW(UaiQGDQjlwM+KNq%}!
zVoDLHzK;@y2<GMKrIqF-mt^MWCFT@?THR6H5Sh}F%$(vPP*3lc0FsCvC~}HGjk+RG
z+u;@uSPs&{EGhyu_-;WAD9bF)%+D+01i1xN`rhI#&dy0qEXvb^m<(!t+yaeAWaMNf
zLHdrMah9S8kOqBFs$@<sGrGkb;OSGu2NH`0iLpZmv%x8@s0$>-lwW*{r6e;qwMZMJ
zOb0~RF)%Phae@<8d~$IabbzcVKfeSVJGZ#<OG^q$L8EO&w>WYWvr|(ti;9atF?EX_
z<iX<9l3P5G5Kk;gEKV(fWFT;j(Ezfb8e}spt`bWUZ^7a!u_Q5y17dMXP80_$25+%L
zOv|~&T2KV)+%hKL;s$rL;*;}B^Ga^9r55BTXB2}gUvL-h7N`a;D!Ii1DqX-W25{MR
ziv^UyA)Qxnxdkc+Z!xFlr66TnL;=NNlM5~%>_D|)F{miuVB%l|4X!|jO#~Qu7zN;b
zkPJwUgH@=Wp`JknR2y?Ja<GB=zC0`-9ehkIOkzwz%mR!8j6&dn8xAnZ$IrsZ#mK_U
O12T(6fRT@fgAoAvUGk~`

literal 0
HcmV?d00001

diff --git a/code/datasets/__pycache__/simple_jpg_dataloader.cpython-39.pyc b/code/datasets/__pycache__/simple_jpg_dataloader.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..782f8d99cad131fbd9c433fc963873773108d79b
GIT binary patch
literal 8079
zcmYe~<>g{vU|^8{TbsPakb&Vbh=Ytd85kHG7#J9e&oD7Cq%fo~<}gGtf@!8GrWA%0
zrW~eR<|t-HkQ{RkOD<~^E11oa!<Ne)#h%L%#R2BC=5Xe6MR9@IY&qPyJW)JgHe(K3
zE-w=UBSS806dzQCKUW}10Lm7O5=vo6Vb2lH6^Rl7i*e+L=88p$f!Uln;<@5c;$Svc
zjzq3xlq8tVm?IS>1$L!$lynM13U`i7u56SXSdJ%0K35@1fsw(TA%!=EuZ1CnFO@G#
zv6(qaDOEXzKbvU+Q_+bOh7@Fb7;{vjRKVt`MyaMSqzL4w<*G-igUu7n(a6<|(qv>v
z6<eUSkReMuMM#n%N+(4)RcAIsipX5%C|xL9bS`t09+WLMmpMv5ML1P&HbaW|T;?c)
zRFy2lRD%?W6v<vDMurrrD5F%RRJ~OF6zLS1RHGExUe+k%RFhQGRO4pG6gd%wX2vKp
z7(dla0>n2@WluFuHA^*5XKH4OVT!VdvP|VjwMY?4kxx}fwP<Eckw{VKWsR~*Wm;gJ
z%9LfZz;+?S1jeE}DT;YaDM||&qrjqBb_?tmGDJC~YNkk}DEBf(Ii@-;a9YUV!Vt?7
z<(wju>KyMffvJ!o)p3Dqs`WxfP$)#Xr7)y3rW%PbM7gJ`E>K;_5ap37o@&+1=)w?N
z73G=g8N(dq73Cen9OV<`8|9bkpX%Mr$jFfDwICpcVIkv!z=aGD)1tgmy`d~`kQ=k?
z76dJ1=$Bw*NOes0YG#ZI4rfSVWMN=oXl7<)$P+STn7~v#$AEzm3?msB8B*O+gv=SD
z+$Jy;nnB$XlIowLk{XigA;HiL3SGuv22ItMpmgr1$#{z`BsH%%zetnu7E4NENunmx
zEuNypyp;Uh_~L?`%#vGNE{P?HKKY3$sUS5RVTnbViAg!BnvA#D^Yc>UGxAG7%;JpF
zv^20NOF&{t2FSFM!jxQ1##^kOxryl@<y?uS>A9(SC8<TlnvAz3Jady0a}x8CQ&T_|
z6sML1C*~I9q!uNE9EOZppq%0`1_p*ya0ZNGN)c&iNMlT4Nnvf_jABk<OJQ$eh+;|M
zNa1W@h+<6<O=U~rp2L{Jlfv7=8pWQ%m%`t|5XF%qkRsT^5XG4ylp@^15XIHbz`_v4
z9n7F9c8kX=z}+b^9pX{9mnjSk3@=|WGcY8B$^fVf7~~ij7`Pc27??q2!U1Lmh7yKm
zh6RiZ85kMT8EP2fnKD5nGnixni?G(P)iA`f)iBpE#Ix5h)-c3#)G*aB#B<iL)G)+z
z)v(tv#B-;B#CgE#dBG%K3P=rqi9iiQ7H0}`GgJSTTFw%|1wu8P3mI#eB^heDN`y;9
zYPgyiLFuT5%Y`AfC5EY%yOyU!w1&HxF-xqLyF|2xA&aA#ae+8E^(NKw!bEC#r5RFK
zq!}P;r5S4ZYWNmNEMzEJRU%R%S;Gg?0W!IUA&V!QWddUnLk&+2Ll#R7w+%xLLl)}-
zsTw{|$}e11!;r;`;1za-Go&z}q)x_SHRRM;BDz3&Aw!l-3acbTEq@AI4gYM06!y8G
z(2)iERu=4A86@8_#LHnZwFcQ#NVL}qq;P?vzg94X8%ztO@PKLI6kaebQX-$FP$HkD
z2ucnq0=-N%f+g}<N-2V%<j~91%vd6yrJN#?BAO!B%TywtrII3&A`W8L2$aZYsiv@|
zFo8=b=4Qrp))XnQ3`4wHiTVPK67>a|3mHn(v$WC~Qlu9#rZc3-EMiQN?PW@5S_q0=
zZE(EmfJt32sRt(Y!K6VALl*x8=Au0{%qeo&OcR)k#A+C_)D{>nWT+J_F#?4-xGY&x
zD^|m>z_><iA!7`4t$3|O40EkytyHaaIzz2YjkpU#tVOMCjqC!Gg$xT!OU$y&7c$n$
z)`%{!sF4M;7#3*P$S$y4$N=WaERX_;GcrtIDx6SawZOVYW+7v(ObtVp?gFVA2~Z5z
zFl6a0kOGM_f?{58fmDqoR9t_7RE-prZLmP9MjFJPz*Lx*w+!rJ4zP=75p=O078m1k
zCv%|_mE6f(sF1=C%%Can_tJ=gfg$7;W6>=(Cm(-j@8Fk=3=9mv81<{n^(%7{^YmR(
zQw!XS64OfbONtUR^D^_&<4Zs#V6lF2PG(AKd_iJKW@=tZd`f0=iC$K5eqJ)j09cU$
zVzV+ZFa(2&3;_mEk&(p+%J3;nDa^gh!3>%#e$OF_AK2ew4+0mxhASCwvE(J@rmkeV
z#hjU#c#AVQKR2}`GdHz(CF3pT#M1PY3`O!FXZq=9<maYB99o`VlwDkqn4GE~Qk0li
z?Ca^Hi*S2#eoCr7sDLj{Eh*M7&IFaU@mU4w@gT7raPh8JP<e~PCMPjBDJ9Vk)a>~T
za#xi)a>=L{AD@|*SrQ+wXOokkoS0K=rw7wPvbWP`%>;pyaBmwGNiZ-l6iI;ySr7s8
zBG{ir3Lvf`0|SF2h@r&5z(BHf&(EF#fj4mLjES($iEQhNR2Uc-U=eCUgjLSevC5PP
zt6X4KX)<L#`v3p`OHjpji`BQ%BREWx=@x5QVoqslkt72HgC<kDF&MnIPfr7bPxiOi
zeJkA)b8|s$1=e&B1FW=24CG-B-%7Wf%mSYvurqE!xuGr~O+oo3mIj*4w-~c;ab~Bc
z7Q`24Rza+}#p+w>SemZMoX&N9ae*z@ef#v2W&7q!>EE?apXS`1^ZfTsdrjta*{Brj
z`~GL`(<@)98FH*SW1kL*(=+y(Ot)BaQxo%UF&CGl++qdiR85;(ti`1TsYSOqV5#;N
zCn&kcXXd5l-{Jx@lM|COQg88rv$9WOQfiJ%W^&0b*0ju=)Z$zG2qj>*-eSqkPf5MS
z36cfn`;1#$IUqd{HfLUZa!z7#acc1`p4`NW_@u;ih%TPo%seDsYHmSEWjxr%;#+*l
z`9(#g1tl;(E5t!8`30$Yw^%^M$t@O8_PoVfP?VWha*H`9HLr*h6w)lkC8@c$*fUE~
zi&8R+Zn0$+$Af5=oXp~qTWpC11*v%{x0sX5Q*N;qBo>tv-x3E~334mEyaI7DZV6#Z
z6iI`u;0jJHEKSWT$xO_-CF+=x5|Zyxn(COB5}a64T9jClnV)xy*Bz7qob&TaiV{Kg
zf+9bs=oTA@E={?`3Xbzz0^kUVF9DTHY57ID#kaVS*zC^vxdr*fskb;n@*%C!TbzFR
zMY)MNnN_K`_{tJ<kacpTrGl(aExyGD4*cR<oaM!t@u1?e_!bX{icd-`NzO<uzQqA8
zn~QG=l;p>!q?V)><!0t(7MElu7lC?yw>Z*MOTZ=IE!Mo!+=9wmY>CAsl?AD{nDPp4
zv6g1$l~~;3g|=JcGeHg6TP(@>1(mm0OY)17Gj6dKmn0@<Ys%eXDNfBvE3yOSD|--O
z3nD;e=q*lA?1AklvH=N#3outu=rX3=5-my1Er3LQJlHefTI3dE#Vy9lTWrbs8Tmye
zxA-gKlk-94bqTn#$+*P=D)Vn~7Q^a<TU>Ca5fa-NiEV<!Hoe7JoLU&4nVTM;a*MI#
z7ArWoZn1(LaEk*Ln76nPVR?%k8j`m-^Gi!0;d_fYGdF!D<1Nm3NV1QQM=Gk#85kHq
zg?aHhP#wa^#l*&l1W|d+e2jdIJd8YyTuefYQj8po5=>l-e2fN6AaOoM9VQM&C}d(`
z<Y5#5s}=gq#KZK3gO`PgjgbXp_CJ=t9J~lSI2gGYxmfrZ#h6$aIT!^PK_>DsbAk1O
z>=FXWF+$|{82OkGGF75jE4*YTkWZi(lz2E87#KihcCj2I14BAPEmJx}EprJ&4P!GS
zs7v6&5NlJ*Qo^)=xrAi_YYodn#)XUv*h-kP*cUR^vX*cx;H+T<v1*tWaDiBi3^mLP
zxN2CkxIqo6LXi@V1w1uO3mI#fYM2)ArZ6sKtYOIF%VsHRs$p2bU&93IQe^QhWR793
zWvXSaWvgMx5&(%p+7&TOAW?{Gvbby6tN3advIJ|`B^hct!Wr_^SQtu#Dz8AgjU~c0
z?6VnCSZWxuL}oM0Wd?OAA{p|S3>d(=M8Uc^!x{24K)NO{R`Ouz6GPQk!&Jjj!(PK&
z!<ojM25RH5`W1m1LeLt2r@bcQEzXpp{DSzR#FEr1d4<ekg}nR{h0MI<oYIuk6ot$@
z1#oIq0F|@~n&RLRugC|Ke=<Q?hdr@4v8X7qvIx{0D{=#gXMl(-5RnT~!If5&pBoR(
z)J6UvQEw2D2a;j|S3975cuTM(KRzw9A~gkEP=m_oTRh-0ALO|F(vl)8kOmI#)JoT)
zqWq#FPmm$t3=V3~6$OE6NUoxMkSHkcLrNlWb}DKD>EtZQ%z<RuqE--}F}|n~q__b@
zG=m7VCQcdy1H(*EQ&9_)(wJBnS(pSEB|vEzlt!5Z7{!=Hn7A03z&S>W2}JWTN-?3O
zSy-utl4e2a7u0bD;bI{M1_p2snlXi`g`<Yig`pWVEWqrC(yRh$0d>EHK`sJS@97LR
z46%H*j2#SF3^k0B3=^3OS%M)=QBB68L<R<il}xvo^bBq>X0Bv}gbUb%pyt~xE*ntQ
z1S-nxc7ePOYQ<G)qhvOa!@&_>jNHD=EG_}-0+%nEOki_wv1jI`WG1H;g9VGGf((S%
zf-qwz$k`x2Ffdk$<FMx?0}}&-CgUv*SZ$pQ@-QgyL71I^fq@Oy;RhwN5{3ngHH=wI
zDU6Z~3mH=wYM5J@BpGU1Y8Y!+BpGU1!C}gj!ra19!vYG^TDBV28nzPV8s-ufP|48D
z1kMjEko<r<utgX^Wl9Z0EDtEK(SjP{<w>B>X1v7;Dy5;J01AMjX$%Yuhe5#s4qqb_
ze`6$fY{9I_S_IBQ9H4Qag4CkiTdc(yi3O>*m~%7pio8H+o*fbg%t?vqMRP!Q%>t!v
za2RH$R21Lh28SkiKnGSQ7ejaux6cOY0lT(`fq`K!D4syYDFcfHBL|}pPn8^gSEIxc
zNFzLsK(4D{%whn=Pc2Ic;{qm7$(zDZ!d%0W!c@a7!2lXO1jiX`4Py;x7=jHPXUr)q
zEgUth;5cKiVXI+JV+v-_gqB-Hpy;4RWO0Mb21r{45)$A*07cnVP>_J5Oc%xT$i)%X
z*kUP~2hs|PvZA@5XkgCFE79Zt$57D%kknET(E%br(Nwe$#99O*7J~?I23-PTf!)%}
zz`$@36d|BGj)9SjQ4SO(OjVLNU4;@bpkf}B<-j!~$cPfqzyTw;bYb+<WCEw3B5RO!
z;5-Yq9ZZ01zr`LOpOcywAAb|%0+2Zjj8!5iHbWJl6m6hd0My9_Tk65c0J4&yD4>QR
zixHG{YME*nvY5au<`U)_re;v33L4L11ZQ1_ELN}_YYjsd8<+(euVboV0cBlC1y;k5
z#h${L%~W)#hG78*XjBfQ4m=9OUdt52T+33+TFaKsPz#ddTEM-Kp_a3T-Gw1mr<SXR
zYXQ$fh6TJOd|CVp8Ed&}7#0ZBa4ir7kHT=(a4Zn2;R09r9VNmGL~1w|GS+hB72&g|
zM6^T<<O`5LD75nr!p<7d$Rv`V!Q&%LenqCB3@=^;YO;b$_@V?*<ShdcNgx6=U|R$l
zFD(K$dO<Z9Bz~hnvZ){<9YlaDXi)841g@pyL1H-|0$e?VlP{=xE&}D{B2d(8a)Z<G
zE!NDul+=nMP@NgYTwa_BsX{^ZYZ15#tp^#>0wQWb1gN5g<Sj(9;*O6`Pc6wTNzILq
zzXM8IW}woYi49z_f}4trTucH?T#Pa-ER0;B1|t)=Ny;L_$ioN~0av+POjQzS$q{a_
zCW~7UKgb23VIDV4#v)KfuE_)HeS*s9`1o5~@$tF&DWy57@$t8K;^PZT6LTOkMWFU_
z5jf~(fJ^{~E~scM0%gad<sh+@puEisZEPo}=H$Q{++c6Efs}x<P!S&k1A{ook057o
zF!I$iFmWhwun357aBwh#WkD{=P0Y-TkJn_pCE}J?TmtEJxgd}93l^mo=cSgI8pJ0h
z7N_Q9=B0v5!6H!x28Jpf$cPhscqzW1C_gJTxkMi_%#@l`nhxr+W#;K67nfBjLR5oF
zGNjQoBO{Y|LnDjQ-1su1Dg%fTxPh2sZJ^1B_!N-I;6b)3etj@UAKE)B4gpo-sTG;U
zCGq*$FG1-CoO0bXS&G07HgMuB0EH5$TLx{3zO;V{sy;MXZgD|MW>C}c7I$80Zaio-
zEw!ll7CWTkD!Rp0keL^so0^+nR9VHAT$++-Wq6A@xu8^&@fKG>Q7X8Ho0?L^<&>DN
z5R#djYNenVS_Dq{;6x8j;2>v3afADgdZi_qImJbw))=UFT9ljt5zzw$QxT{UQUof}
zqWB=H6H80-(~A;Qz}X{87$TULr<YcmmkjENC*~A^8Y{P0!Nx{G1~(vVF|gvY%;L=a
zJUv9WyC@ozpx6o$^HLIvZ!s05+~O_H&Ph!y%7Zwh2oy5lj$cMjW)h?Y4l2-ZF$Z}1
zfE(Von3Kzlirm4*Wag$PmZlf+fC3micEAI+6&iR&ydXvFpafQ&S^`dlMXNwUO!>vP
zSU>}txA@W$i%a55z>`t&DLF;!85kI%_>)VEOY(E$VQH~w1IYXU5D^F>%0Y@a!9gFN
zTwDen-YLq@F9ApTEw231l7dpu*lf`)j@-oT)RfGk;v!H2xWx_%XOLsSc@5NVxW$-w
zi!lj2GLe}F?y=wEDof1COo6bOQgUvwrKFZ+Ca2zF0mc6<p3LI-#InT99MEv^E#}1B
zf?HhfMTsfF$%#3sMYq_B!E_V{XwWDpKCR>yS3zP?Vs0vE3Jg3blbCypw<xu+G_xqR
zI35(cx0p)uZ!sp{;s!T@<CF7C^Gd*j3`HfkSU|-Wq(TQ3g^)2faMcbj1wmbtTkOS!
zrKzb^skd0ua`F>PZgGH;O)_|31`-mtm{apokV;2z*#RmxZgJS;f{P|QP>L)DWfTrZ
z4sdrF+Tmv8f$~A!XQ&*rP(4E)7ZXSZtd<2_YBKXM@i6i+u`mfS3or^W3bBbXOEL0*
eyVzWeTudyCTudB{JS+mt9E>91j=2WNY$gEdcs8Q|

literal 0
HcmV?d00001

diff --git a/code/datasets/__pycache__/zarr_feature_dataloader.cpython-39.pyc b/code/datasets/__pycache__/zarr_feature_dataloader.cpython-39.pyc
index 6316a91dcd30db8d68e2af702b444f46463713d2..ae34e4af9a83aa088b82ae14f314d71bcb54fcf0 100644
GIT binary patch
delta 4100
zcmaE<JKsz@k(ZZ?fq{WR)h#r6fhGgPV-N=!b1*P4I503U6h}<de#96unUAqlR4PR}
zRXatdmn}*sRX0^HRi~LTMRxKuM*VuZ6!~7}DC1O?1tzI1S*8oj7BWm=EZUNykjIpw
zxR5alEShD$z+xdolw}G-lvN5tI%BG~2t$;0ia?Z2s%@%Xs&O-8Gb1BI3PY4#s(q?W
zGouSbY*Lg%s?q|bg$z-SsS+R+E)20cQBJ8&G0aiUQ7$peQLa&LQSMHu&I>#iGA!^+
zHCf0Q<(z7lB4ExCWjBGTkOivUE7dkdDb*|0QG%hFiIE}IHq{Pf*#ySI4=Ien44TT5
zk1*}6&tqU<NM(p(Oks#(N|9=3NMlT4Nnvf_jABk<OJQ$eh+;|MNa1W@h+<9QO5tu{
zh+<3ON#Si_h+<FSOW|)}h~h|*PUTDyoWqzRlp@^18pV|&k|Nr|5XGG$mLlH55XF-s
zks{f`5XIZhz`_v47tEk3b4w&Dv8c!`HL;|0@+)Ru3nK;whL<mx85oj5Q2>h)0R{#J
z5T6+oC5IRp7)ltL85S@uWME`SXQ*L_XG))}&*H99!&<`-&!56n!w@f!!VF@U2-YxU
zai*{|GxfLCa+C-y5T4w}B3>_2!_~}~C0ff>B2vSU#nH^TKx`p{3q!0!Ee}kjhDVwq
zg;koNL>#0xg-x76nxU4rhIfI)LWZJMHQY4}Su8bNHVickS*!~rYj_tjGBOmdD(nhp
zNMT@MU}0!xW@N|{GGv&*Sj=L;zzBws42%pVA`7JI7cyi?r?5*h)bgcp)bPz_Na36d
za;^;6xiVnq%0Qf3!?BRDh9O=Si?LD2#)5-E4ou2}Nd+*e2qu+EltIn}rzw$I{u(ES
zER`Dm8rBr9UM5C{35<n3C8{YPAF+W_M~P|*Pl;L$Ll#prBZ!~E5X_*->-VyPfq@}?
z)=UuCX+N2ZEl!K^7He{1az<*EU`k?1Vmz1~Uz}N$YNb%c4HnAGOUt)X&{UtC$+n5V
zNQ{Aj;TC5~QGP*uQDVttMfTGAD4sM>+!v+BCncsA-(t&2OiIluj^ZpY&Wz7X%uOwh
z;sH_dNr@%N8L7ol90iFbnW=dt#YMaz!}#M9Q&QsLRu$i3Ehx&&E4js-lbWX~aEqll
zH7Bh|o`Hd(NC8C1fe1wq0dh)_ECUMzLy^*CHV#*@m5jGI<KvTa5{rxD<5x2L3eeBU
z&rO{?nZt)mf`NfS5|r}%CcorRGGt_9WMkrE<YVGt<YE$Hlw;&z=3+Dei7@gp>M*f^
z<iVJaQHYs`u}XHbB9CZ&e0*kJW=VW}GAIqh(lt9YT|0wJw_^aM>l(&p#w^BKW>EOo
zFiSGjveYnSF@ssGDU7vjpmeu@wT7{VrG~kNwT3N)X$~tW?SW&7+3)55|NsB5WWB`+
z4wHh!l8jrdpfIh^xy1qs$Xo2t0KCNsiLT6)3Qe{m9#EKQfe2|30m_9%ATJl`F)%P_
zvKN_yM8V+*3b<Pwu;?jr2T6E>qJlXyrJ~3Q#03)|FBO?EFfc@c{Kmx(@-j0Q6Bi>F
zBMW1d3M5QWBQKc=qzH;Z*`0-ffdOPrF(~QPFk~^*FiJ9{FiJAiGQoLFk_@%XHOwi@
z*-S-xH4F<FYnVX6fg%qILWo`#NrqZhm|T4dt0V(BL?M!Fk_@%%;BaS=WB`S24SO0(
zFoPz$Uy&QgUtS=>1w?>-?EzweyspU%4y#*S@j3a4kknWt1qwF~aJb!K%)G^vR#4;v
zQV%u*s@;}>p$McOE!1vtq^FjE({u_b=!8H40#02V%v{WTj9|zFPF0eVE4c;hIT;uj
zKnV$yu!?_y6VU?34u%xQg-o?fB}@yLYnW1)BpFf|QW#p9BpDVm)-so{EMTo+UdUL>
z0%AecFe9s3$XLr-!;;080tzv<5|#z*HEatRQy5A(YS>ahA;>AgAi~hh*veE74mFk<
zc8CROpa@~9VXXlbZ>%*83%F`nYFN{lIv5r*buyN4gR2!#b;41?Q^SzO3o4XARbMSf
z2P24I!y(C#!ePTu!dJtP#or7P6Ub&M`Uc`H5bR(CMQ%G|8e<A)3RepUNR-!w0i-OR
z7n}vSCwp<pPJYhETMr5k5H<#dq7nlGLk&Z$R18xsV=Yq+QwoD5LkB}PQxQ`QV;BP?
zLm?BmB4h%`v>!M|G?{NP=@}IHff6@knI<Eo9Dyh9TbwrOsd=dt1x0of7#J8ngQBU*
z2ws~NP1fe&u1BN@J)4~T<iwm}J3Y9zB2Z#TW&~-3Vo(MEC46C!lWjodEy&R(wTv~4
za0i>zFoLo`4I?O0YZz0QB^hcMLD7`Ln!+}RHIJ!=aUxS8INcV7f@}bL36fFaNwx@_
zj9oz~667IGrlKH_H<?m0H5nlZ6(n2)%6zxDY;rP-OOo?*3+(2BG=xr;;gLd0>ysUL
zM7T7CZn0D)78Mn#Pj2Nhk1tXK8OIAQolEi|HN!2Iywr-4Tb!wRrMam^i6yDvTvKEQ
zG82?tiey20m<#d?G&#W;2OQwH7^|Y#@(MuJLlirx*hxxEzcrbKThZqhM_Oi1YCNpi
z(PRRvhqxZ(@FHIZ1_n^cQ7i)rUQQlHF(xiX8D=hK1x5iz4kiIc8D>6K0Tv#nDtSn8
z1kb)Gg)}G)fiTF-Vvs{Y<;McX8gNo90hdJ0j46x~plXuIuSf)x^O?Z~0J!u7dr*@Z
z;v-g&XTgODIDEkb$lRg?1_l9!MWAR06?zQpJj_)}AV;AF;pAFw-w==!K-GGzP%UEz
ztWac-WPrvxBRHxx8Ns2u5)!fCM7)v_;s%7J;S3B68$m7tMX99~NJ5hdtmqbdW?o8W
za%wR+ClqCY6oK4%iwjgXfhv@o;`Jccf>JC4)8u|0O~$mzJ9!ovf((IZ%4A?*Sjl*c
z6;$m&%>fxv1hRD($iPqr28JptkS`}M<kc{U28qRih*)q!$j!`ygdL>%xy786m|m0*
zl1~E>>63r*$}54b12tSBX;rW!KRzw9A~glv3;`vYEt9SI9+-obfeEOqxWNqyND~9%
zT)5lLfJ_Iw%@pjmIDX}5kc)~C4vYuIF{E@aDg-Gn0}-G!Uj%U*h+70Aia`X};!+R`
zWXUZaaNP`Q2IZHQoS1xvKiLqR3BVODIH<uIzyw$WdwhIOYF>Q&1*8x!5YVm%mHMC*
z1yWQD%F#6p3m8D@GKM9Fsg|jhxt67twU(`faRE~e3%Imr%wo=B$zrVm)rf2<jJ-^?
zEHzA^0=kB|hAoXbjX9V>lgUq$@fLSpX)dHSlv=FGUK9XwJjnmII1`KGOH%WS^NVgx
zt`ZcD0=1@~4TM{)nRzLx6;aIP#hFE^ARXX}&Ktz?1QB2#feDa{Z*j-Rr>B-=mZav!
z$6o<?4OH|oFoANZ2vd~;IPeXHq}g&oq0K)zQb@HP)D#1^Dj+Tb<#15eVJON0nGcGV
zq7o3R97KTfeo-ALE%8EIcFCzZIq~t3_y)TJ)W-v-oFY&I1!Q_LDF1RW@;P#Fafone
zaqx1m3W#uUaIk<>OnxDh&ge4PfK5R@B|W~lASbgVKEN?J-orKCD?cx_I6g1GC^x<+
zwYX$*wXnXSTjmaK1_n)5q{b(x`2;TLia-GeZBD<me+lxsCe!2#!jjTeY{{i5iB<-;
zn3D@itGJvJ(-lH8b5pGpG`%M)h={O7g8UXe*-E6J@z&&1BI;}rAOVfZ-$fKH>p{L{
z$}hgfQj(dQTBHw>F#r*u#$^;|Nl{{EUVL(K8MJ*^l%HP$cHFJWfudp}ASc~o2c^#9
z)RH1_Xqru~7LBUE#h4t$4bJuP$@!&uCAZj83-XgQii=D^_OU|5Sc?+#(o?}j%`MjA
zlEk8tTP&c+h2#)$QUKRK;JgA#54Tv;a`F>PZZVbQ-(pV9OF@bkaO8j!35QK?K6grK
zPO2TKqAX6ETqmYw@5aT%!^pwN!N$kP!^p$J!^p$P$Hc-U!Ysfjz$nDV$1KIj!@&Y>
YYVt6%FoM+bFblBqaquv4F!C`10GmIUwEzGB

delta 3164
zcmbPl_EJ|nk(ZZ?fq{YHL2giTxDo@yV-N=!gJc~T7#NDDOw@kFIAJm$W2vxMs&<N0
zFKd)es&1-Ys!lUwiuB|ejQaI5sRF5n&5S8xDYCt+QO2n(3rtd3vP>74Eo7L$ShOoe
zE{`ciej#HNSTxIgfyF|GD9co<RP7XnUd9F1sU`~<qij+bqHI$b(iu~=MHr&&QUs#x
zQyo(6n;BggVrN7-rYbE^TF4OPlq!*G3>M3Za!z%QVUBW%a;=YHj&h4~kMc<MOm%H$
zWMoM7Omzp-E(^RCGC(bG0XZqle1Z2uhJFb~hE$tWmuAK&pKyj0MivGZhGu3)hCCrd
zh6zl?83qiDU>M23$S{GakPB+LZ>ndCVybVdlLSLE6C=X}#=>tYjKK_=j7pO`nD!<_
zFfcHrGDI<^Fhnt>h_*ANF{ZGju(ohUF{g;7u%~deFh;SYaHepzFhsGYh^Mlp@XTRM
z;Z5OdVU1!>;ZG50VTj^L5lj(kVTj^P5l#_lVTj^tXJBE7;tpoel(;1_S%6c-%I#$e
z0|Ud$7t9O{$s9=WCCb3S0OB))qU;GH149WzGs6PLg$#@g=?pat@l2VMIa%E6Q<!QP
z;`vJiY8bLOQ<$5X`rB$bN(2`O)o?6itYMa9sO7BTbYX~1ieakds^u;buHkBC%o3^P
zDiN+>$l_>bTp+rT!G$5#p_T_GQo|$7kisI(P$C9Wo5Cv2Ak9$ATf@6Rd?7>8yc+Hr
zhAfsEE*pj#hAh?v^%6C_3mF+13g;Epg)^iuzyn^ukfHb+a<G>OFOXcwkR_GECdp9C
zm%?7dH=7}aV=i+IL%cND`O;wL%OE+QAzlW>Oi*Mp7W*KZ$_5S#*~u}i67@9<S^N_i
ziyo9Hft&_TQxUcNH4F=sYxoy3#xU0k)C$Hh*9z4N*NW6IWXUa%s1XFYy@nx6c7a5V
z0F*7iK%zzn%2rq)Q6mgxD=v_z5dpC$Fc!9@Fa$Gba{9fjU|?Vfxy4u%#qT!x5^Ff4
z%H&|SO>DPVlM|COCX29_*5BeuOHC{(ElQ0~N=z@l#g>zpl$uj~i?h5qGd?ddH?{Z{
z4~U9SN-RmvNG-m_QIJ@YnVMHpT*S-3z;KHrJ+&l0C9xz?Q}h-~acWLlksJd9Ly<g)
zkOdJ6AVL*H$bbk%kS4~oTcRbYxdrjXIg_P1%sg(ff=$203by<f2h8qUTnMXgu|q9h
z$#{!1K0Y}ovA8%sekH@NQ2mVj+|=Nb{3652=^Q>>;tUK7lAtshG<h$Fk})$MBOenF
zBNvkpqZ}g#GZ&*C6CWc8>M*e}!XO7DACnjp4<iIlZr~QLkB`sH%PfhHe?Ds_2<)^^
z1|@V@`Um-)oq>VD8D!BB1_p+7hFazlh8o6Z#w^BKmKv5ChAgHsh9VshUBe>DP|I4w
zki`sUv6ZkaV69<HVO+>m%U;5^fW3x2g-Mbjg&~EZl}VCeA!98^2}k_`PEZc1<t*V?
zzzJ2vjI3xOV=Y$=XBJlq$Z+lwjs@H`+zS~~7)p3*xKmhbm?RiP7@8SD2G;V_aMtiZ
zj89_%=Qyq!9!Um}{u<UAmKv@awlwB6=3oX*HoqcaP%N=BOg_XTBB#k)!~^1Lf(R)P
zVF-#oma4>}q9V1)&3xttn!H68APG>c-{MRu$}fm7N-Rme#SM;8ND3+fg;J3dNSW(o
zMt*U|sL7K2ik3wlAQ3R(31XQuFfasw!V{EG7?@a?xEQ$@xtO>Zc^Ji*xEN)axtQe`
z1sFM)CpU2GOpX+A2oeJad<{b^UoB$?Ll#2~qa?#brb3or2C&(hj77!_3=AuoZZYW@
z++xgJ$p~>P+{#;AHaVHaCCT}@1$NmC3=E$cCOh&;O=cIAw*slrWCE+d#h#g$l9`-Z
z3^ueV2IN+dgKu$x(mpuJ7H5In%>+svjFS@tH5tPvcL^>s1Q`O+6bW(%<1JQDdWM<<
zGNLG!0aR`>FcifxFfdeEO%~u3om?cOVc-w4BnU(Vf<l}*H#6@Rb53fWCOgDQ%t?vq
zMadxfa1aqO`IL~n64<&V5FcdrEy0rf__WN5)Rg$*%qnp9&YR3F{6Gt=3`{^=RTK)+
z0u})21G}jWBosGUR>U?M<bWcCdx9Al7&JM+t|&?asRHW`0kI%X0kP6SL<Wce8w(1Z
zB9Pg)ctAx(JSdsumzFe5-YSx8oC;DV4I;oH3)TQ8z#7=&<8xB;;^Vsz0bC_BS%6b~
za+0V}JtqSL11l^W&SGEymH7-sX*CR4jG(ko%LLATjFO<t$-IE2hG`*VEmH||7Hbw;
z7JCgymLr9+mkF$n6RZQ2ak;=OP=@6Ov)F2wYnW<SYFJ_Uq=q4j2P6|%%U;8<fVYNy
zA*l4`sAY;_u4SoZt!1m>K-B;)jhXy38E<jtmF7YUuhe2qz9MkJ2uh?yMxYSzpZr8r
zr5;>lnt=GGAi@kpfCC7WLo_+TVSkG?GcP5z0-UR&n9GYZA(_ktq}CNgfLc;Tjvy8|
zFu?>kM7iVR(^E?_OHy;=<9k7&DgjEiOl;81Cc?-A&Ub7AOk9j&9Fr5ojCj33an4wj
z2%>l<&k$2h0~NAGAm<b%fUE!|ilR&q3zUtDazLyiP{QSfmek3qIXUt1kh}nPiW^7?
z$UQ~;3=9k)ClvF68WfCt=3G)7ECM1N930G(EyS}K4JLnNRpctj$t;OaNlhwEpZr){
zzh09CsfqyyA}G+(A!YST`<EObr?TAQf|Np_^bZO7^8BLg)S}{B?8S+>1v#ljx3~&2
z^Wt+;bMuQTtJsoDQxdHVZZRhplxi{-flRF8a!O2B2+7P%wNlXZyTw+Jn3s}Rd`pn2
zAmtWoUTJPYWsxt)C;pQsO7t^EO;(gtXY&CGs89BjRID!sg#lB3@hz5;%-qx>J&=q(
zh_Gj1V2I)@DN4-Di%%{tyTu947e)E`C18I=apjkm6qJ_4r(_o0;>b<RPEE-yDh9jg
z7CT63acW5sIM6}4=N4n)Eyko<ToB_x(zm$E5_2+BCcl%k6OG~sDN4*M_Vx6+#Z;1i
zi#0btB{gTVj+8|GE!LvMy!6zfXpk#ei%SxVN^Y@$5(Xq=g9_|hET9q#QbK@|)h*V7
zqRhOKTdZk0`H3aBI1)=s@{<#bOK!129DIv8H7^Azn!#}misoA!Ho5sJr8%i~jG#0J
zDjO#6lu~yT=3?Su<Y44r;{#Q`%sfmyjC@QiOhU{8i~@{8Y+}q(j67@tOe~CCj4aF`
Vl{_p0%p8myjC{;|j5<sli~taq?R)?L

diff --git a/code/datasets/__pycache__/zarr_feature_dataloader_simple.cpython-39.pyc b/code/datasets/__pycache__/zarr_feature_dataloader_simple.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..94974784ee317472ad2486c5810ae4bdd346192a
GIT binary patch
literal 5976
zcmYe~<>g{vU|`@<$xSX$V_<j;;vi#o1_lNP1_p*=2POuF6owSW9EK=HFwGRj45nG4
zSW*~Lm~vQhS)*7PL28(D*mBvU*uiX;9FAPhC{8e&HHRygJBl03X3OEp<&EOa<%{A2
z^VxIwa|NOVz-*2j!Cavzp<Ll8VK5(NCufdGu4t4fSd1%2ELS{A9L(m<k;s*Zk^r+A
zb0niA!S0fZk^<AxQPL?4DLgqcxw28Rj12A!DZD9sEet7qsoYs|&CF5qsR}9l*-R6d
zifmFCQjpDM%u$R|1e>80rIaeZKzSiUmP(3%Btw*HieRehY=#t}xy(^&P`2<~<|uV2
zTVyVCltzkRs`_k(6w$fNQJSfWSz4)@DPk$&y-bV@DH2iIsq(4nsTwJgDPpPGDN?<x
zQ97x*sd}k8&5SA1A`H!pQTi}`s=fq>Z;;BHs*|dpYLL#<%oM{EWf*0Y%9m=GB9J1J
zB9<cC%MxXr%Cf*Dl_kq`f!RWa35-QsQsnZOQsfshMuA1M%okWJWQej%VTiIyVMu38
z)fQohvQ80*vPrc~wP|K_VTg^2vP)H3ptO)7%05*h)fg<M6XlTV5W^hh808eh9OWG4
z66Koemg?Nh$jFfDxWIiO!vc>~lZA{?juRLQzd&_+rn;plq<W^>OE5GuF)~bGEPRl{
z7|ful_!5-1{4^PFv4y1O73UXeGT!0{ODxJvOv*{sWW2?mpO+e+kzb<8c#9<^u_RHG
z=@w5>VqQvqZhUb;PG-q1E|<iTM4$Y`lvI#b3D4Z5#GJ&u<kS?9cyVe;aAIyjPHK@R
z(=85={^Hb<;#*uLMTvREY57ID#ULw+GfLCaz_zgjB$i}=tSTu?$<<`M#p;=xn4X#p
zaw0NjfpUt&7#J8*!O1_0DMh56A&oJGC55$xGm1HdErq>>A&Mo1BZaetA&NCcG?gud
zdk$j?PYQ1fYZQA5UkZN<Llj4fK#E`sLlkF<P>OI1Lljp#0}DeGcQAvd*e#K$#G)d%
z)Wnj~qEx5Ebci3_UUmpGFuZ)h%)pS$0d)q55@%pw;AUW8U<T#P7$ydW5{71m1&j+B
z7#Y$TY8c{~GC?FWm}CKqu-35EFvPRfFxN1|v)3@zFvN4zFx4=`bJnobFvN4!u-7oe
zbEkmBdBEy<!6aV|YYjs@e~Ca1Ll$QWQ!`WlpIVL*!39D!919t1m?ar%Icqpw7-Db4
zFx7I^a+e6#a5Xb#iPUnH2-h%VaWpe75M9XN!VtTymZydxi#>%oo2lp=Os<AUnjwWn
znxRAtq$`D0oI#o)ow1fTFN%<g67du^aN0De<*VUcAc3Nu9d05=4c}~r6xO-SAd?p|
zGBOl?tKoHFh*hiQui;-H3C<%rHQY4}Su8bNHVickS*!~rYWP7aComT36wWKG3uj1S
zU}0ckXl7<)$P+STn7~+^XTZP+hLH@63?;$~q!uz{NvCj1GSmvBaMcLRW=P?l3kqc!
zaHz?ELrn%L)EMGrv6y-U*;H`Y%YjLGFsT406~UxZ4MP_H1g4^h5@nE!!Ql{5D_Fy@
zK&3`-A!7`4tx&CS40EkWt!S-S4MUdv0*M-7ke_N8vg8&>)CfV@3JWA^M4)WN1rjx)
zP`1(ni5f8wdjezOzZ8aG22CEn^jR}OV5j{p#-dv+C8@<FQ6g@xjv=8zuEFs^t|89O
zKJi8dCKgq!dR2)<MK4(y7#O1X(Nx@GiBBv|f62(e!0?Mvzsg*{GAA)l-z7D*z`ZCj
ztwg`1C^0iHGcP^91XLOn>lf!_rliIfB$i~R=9R>!WG0vBWfkY=X{z30EiNrcExN@K
zAD@|*SrQ+AixZUD<1_Qp@^5j0naPRC8L7AUz()8aCZ*=MWG0u~Vol4;NiDv`k5Cd{
zoLQB6izPQdCG{33NET#Y#x1TKkRAw|GcP_lC$YFVwfGiKZem4zQerwp7iUUQenEUu
zVoB;Pp48lelFE3nb;Y;%lJkp-N()M0d{&5SSn>-}^KP+#{BVmUCqFTzh=+lJ;TB7A
zNowvb_RNyhqLj>{Tg=5JMYmXs67$kiZ?Pp76r|>*++xctjt9#nm#5rfEl4aXDZV8R
zwhZJ%c<_KY8MlNmC2sMgK?)X-<BM;xfqhqei?h5qGd?dd7vwV#6`zz?lAMuRe2W7b
z=*306AQy6^r<TNn!c0^A7E5tzPFj%y0|P^mB8ZR&5lSFJ9Yn~12xX8a#<W|aC8@au
zkU)qBTM3TDTZ|RA7%OkFCFf`47nMYDSAd-zpOaZ!a*HuDiVecQ#gdVqn|h0}<Q6N~
zRkv8dCfwqHx%n0s!pXPTp$=Zjc#AV05?%4}D;a);>w`>zB((DUqU_>=#N<@{5Kw*K
z>*=G5NPEfoDXID(FM?_eeNY;XhXol(6ck&jMe)U%pxQ;Rpt4Akfq?;pi(^2=F(Vff
z8zT~g@gOuaA0r<V4<i?o5EB=p5|aWW52GFv2O}3V7o!PCj**X12ci;e1_vV_lNb{Z
zBLr5-VyhSQU@1SD31mMMgOU@dz2*$675Eq#7}6PPnMxRH7@HZh7;Bkpm=`dmFfL?F
zVUh&5(po_@OEy!HLki;pge*e}6G&zuNC&871eNkEC9DhBYFHLB*0PkaX0c~+WP$6A
zEUpyRUZz@>8ip)xaBaX^!n1(4hB1Y0AyX||3Eu+#8nzU6ux$)bGuca67YNj_FJ!Fc
z0I{HIIFQvWWUS?^;m8sMm#JJOtP6x{xE3;|Fn~*Wkrb{PhAdGD22dLbWM(aQ4Mz=k
z3PTD*D^nViBttDn4QCBExMt$0VX0xR;jCdzV@_iZX3*sJyTus)^8f$;|6hVC&Rfih
zrRkcCx4834b0KLawYW$Klz>6S7bt<|7o`*zfy%2QF$M+(O)+qWEz$-hZ&pZqge5CK
zGp|SnEK-tRl$>#kGqE_n1l+c{#gvzKiz}@(FS#T$KQA$-2-J`$0_PPIkOme|GAq&m
zF`4oTZm}j76(v^Q;z}#Z&yCM3%`K?B#af(^Sde;)1Jnd6NG-~}#hjT}QUuCgMJgZ%
z@f2BuoD0g0x46OC1d^SL+(5#vAi@J=JY!W9TV4Su^GC6R@=sD?`Yj%a`s93&CvS1U
zOEoYnGo_-q$Qz^|O!$CUR-iH+l(IlRU|?cl;$oCylwjgw<YE+I<YMGu6l3CIlwsy#
zQDPKe<Y2~;aA8FWO2P#tVo;2La4{$bz-=qW6s8uA8b%j}W>C|S*)JIs6VRm20MY^q
zPhpUYK(Pa=8)Nxu89NxV7-|?L8749nvIH}LJ)y~1WX8b2u#)K(lb%75BZ$ce2|2j$
zZgJUw3ZUft+yc8w1_p-DAbYBevF1IH7r=#4v7Sv%esW??v7H`5H^fFJu=%&xGxJh1
zlT)E#nGOm@P*~pLLYPqj@&L#;42)Is1nheW^0p>p5vYlp4Dvn5l^_ht^=z>AASm)n
z7#1+rFlI5OFiJ9j@=gsiIIpnOFxIe0GSsqy1DGj=xrL*K1r)%wY&EPkY$eP!%q1+K
z@}ijuoMTx0z{wwX$cr$5N|_plSRPQwqlG&-D2h_S;eCr0R9r&?0u%;C84L^zO`s6T
zWME*Zvcl^1lKl9z%!<?$aD@v>zt}=tleH)mWKJB2hy|s0=G@G@Tg*ABd7A8ycwkOS
zOfSj<$%BHfC<Ua58=U09{e|LNJRsM_7ejaucjtnXgIsz`5W^WYphyEZ$XO&9IT(d_
zs?>;cKkis_VgR+`7ckZ^WHEtatc1CQC55qu8L6Zr5PNA%!3>%#e&Di-3Ncoc015zj
z4gts4RFDujzD%+D7gSfqgPKM8r6mNT3S5rGgG>ZP3N)&U3PA#3t3gE^II4<@Kw`xp
z0_>bp5DOeLJjfPKfJX|m0w_|LsuT%25+$5K<qIe$g6mJvut*80PrwK&epWL2X)+ZR
zfGm~;5#X#0b_|#RJBB?zJ|{IVK7J<19UyZU7^`Hkx&W#Wr3~a`U|;~%J7CK}>8pkz
zi=oJ)h9Qd)l&xx+KnVlPVy<DxVg|E76)y{z#R}>QfLWj}9#ahqC_6){y&8rrHn0qP
z4Z{L<P}Lj5T+30*6vJH0Qp;M)Rs-q?BB=*8`HRXy)eA=vxJ?HtJBv&~zBUID79heB
zM1VsAR4r(7Lb4faW?o8aMG>gtiDE7<&V<xM?jW_GdZ-BGw;~r17aVF}0vvSQ@$u=Y
zC7C6ux$*IHK*0wpR~VSs7`Yg^SU@!l7n=YR7o!+wl>&|cg=^Pjb}JG9Ss@7W0%K7Q
zh~feD6~I|M{uWn!d~SY9X-;Z<{4JjN_`=e}9Ec3KQC9@=Pf<3=0C3VQ0kO(J1gM}c
zssfc9ywE0Qa%xUad_1JI0Q<!gqz>evB7O!21__Wy_!t-%I2idHIpjE41VlJEIhet6
zAot`ZX6D7mgR^T9s6$_+18JzjTcz;@Mfq8&$tC)b)@f=|X?i@klcbkiTvnwBQ4Pug
zNS!DnBa?VTBa719_%fp^Ux*U8ff!w^l=S$V%-qb9cq0SI084y;V{p8OYrI!}UTSfC
zUVc$-d{JsKq~le^ub%>PgMMOZdTwf7NpT2h^d_|;v$!NaKl>#p#ouBDyHAq^sdE5M
zL!dSTv=jN#{w1j7)MUQJ1!;nSyI|l(7^sP!T2y?Cs~|HkJ~uTtzo@c`Ex9x$(aPW!
zb8<mx6_-<Dx<W{1ZmN}nrq?aDg2cR(#Nt~_1u3^!!7ZkuC{PT5YXgvTqWHjVGQGso
zlKk|d#FQdX10zZpBAAz_ht@WW;)ckSmSpA>7lAV4EdeAEJy1{=fvVXeP<?xg2P_Av
zGm45pe!2xQpe(aEGe55gl%tD4#mFt*;_RH%#G*Vsh{>Rl8*rB;BPTNn(xwFUvx*`?
zj@JaGFy`bkqg%`Yo<2pO^mB{dC9x#2IJE?vbc(7$Dwy(%Z?TkQ=B5@If^->y2v8Lg
z#R-mq_~ha;Xs@^^KfeSV9k;mhOG^q$L0##hTO7HG*{La+MaAF-=`D7U-#|8i6FsP$
zyTzDzi!tdIXz(mE58V5^#Z{J=lbHfxGo|F*VoOOa%S=wa#R7_=TRfS?@rh-LnK_`|
z`z_|g+=5$N?nQ|y!O4j^sYSQgiXk*(auheX^%S3+Uz%5Pi!HSvKRKhgNDt%#P}}Sl
zYjH_pQOPY9P;mgM4nPIUEf!ENElL6@0~IE>SPP0W^FV$2oczR+TTCVSx0qA&Qjl^Z
zI7@<Z<1G%GTyT!J1C{W_pvr}ViGz^?+Qs5w6kr5l7#}3V!N|iZRL`Ky#l*wN!N|eJ
v2U5!d>XPv>u`mfS3or^W3b6?=OEL0rurP8lvM__x@vsQ6axe<9aWDb^77G)3

literal 0
HcmV?d00001

diff --git a/code/datasets/custom_jpg_dataloader.py b/code/datasets/custom_jpg_dataloader.py
index c28acc1..95d5adb 100644
--- a/code/datasets/custom_jpg_dataloader.py
+++ b/code/datasets/custom_jpg_dataloader.py
@@ -212,7 +212,7 @@ class JPGMILDataloader(data.Dataset):
         #     print(out_batch.shape)
         # out_batch = torch.permute(out_batch, (0, 2,1,3))
         label = torch.as_tensor(label)
-        label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
+        # label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
         # print(out_batch)
         return out_batch, label, (name, batch_names, patient) #, name_batch
 
diff --git a/code/datasets/data_interface.py b/code/datasets/data_interface.py
index fc4acab..7049b22 100644
--- a/code/datasets/data_interface.py
+++ b/code/datasets/data_interface.py
@@ -6,121 +6,125 @@ import pytorch_lightning as pl
 # from pytorch_lightning.loops.fit_loop import FitLoop
 
 from torch.utils.data import random_split, DataLoader
+from torch.utils.data.sampler import WeightedRandomSampler
 from torch.utils.data.dataset import Dataset, Subset
 from torchvision.datasets import MNIST
 from torchvision import transforms
-from .camel_dataloader import FeatureBagLoader
+# from .camel_dataloader import FeatureBagLoader
 from .custom_dataloader import HDF5MILDataloader
-from .custom_jpg_dataloader import JPGMILDataloader
-from .zarr_feature_dataloader import ZarrFeatureBagLoader
+# from .custom_jpg_dataloader import JPGMILDataloader
+from .simple_jpg_dataloader import JPGBagLoader
+from .zarr_feature_dataloader_simple import ZarrFeatureBagLoader
+from .feature_dataloader import FeatureBagLoader
 from pathlib import Path
 # from transformers import AutoFeatureExtractor
 from torchsampler import ImbalancedDatasetSampler
 
 from abc import ABC, abstractclassmethod, abstractmethod
 from sklearn.model_selection import KFold
+import numpy as np
+import torch
 
 
+# class DataInterface(pl.LightningDataModule):
 
-class DataInterface(pl.LightningDataModule):
+#     def __init__(self, train_batch_size=64, train_num_workers=8, test_batch_size=1, test_num_workers=1,dataset_name=None, **kwargs):
+#         """[summary]
 
-    def __init__(self, train_batch_size=64, train_num_workers=8, test_batch_size=1, test_num_workers=1,dataset_name=None, **kwargs):
-        """[summary]
+#         Args:
+#             batch_size (int, optional): [description]. Defaults to 64.
+#             num_workers (int, optional): [description]. Defaults to 8.
+#             dataset_name (str, optional): [description]. Defaults to ''.
+#         """        
+#         super().__init__()
 
-        Args:
-            batch_size (int, optional): [description]. Defaults to 64.
-            num_workers (int, optional): [description]. Defaults to 8.
-            dataset_name (str, optional): [description]. Defaults to ''.
-        """        
-        super().__init__()
-
-        self.train_batch_size = train_batch_size
-        self.train_num_workers = train_num_workers
-        self.test_batch_size = test_batch_size
-        self.test_num_workers = test_num_workers
-        self.dataset_name = dataset_name
-        self.kwargs = kwargs
-        self.load_data_module()
-        home = Path.cwd().parts[1]
-        self.data_root = f'/{home}/ylan/RCC_project/rcc_classification/datasets/Camelyon16/Camelyon16.csv'
+#         self.train_batch_size = train_batch_size
+#         self.train_num_workers = train_num_workers
+#         self.test_batch_size = test_batch_size
+#         self.test_num_workers = test_num_workers
+#         self.dataset_name = dataset_name
+#         self.kwargs = kwargs
+#         self.load_data_module()
+#         home = Path.cwd().parts[1]
+#         self.data_root = f'/{home}/ylan/RCC_project/rcc_classification/datasets/Camelyon16/Camelyon16.csv'
 
  
 
-    def prepare_data(self):
-        # 1. how to download
-        # MNIST(self.data_dir, train=True, download=True)
-        # MNIST(self.data_dir, train=False, download=True)
-        ...
-
-    def setup(self, stage=None):
-        # 2. how to split, argument
-        """  
-        - count number of classes
-
-        - build vocabulary
-
-        - perform train/val/test splits
-
-        - apply transforms (defined explicitly in your datamodule or assigned in init)
-        """
-        # Assign train/val datasets for use in dataloaders
-        if stage == 'fit' or stage is None:
-            dataset = FeatureBagLoader(data_root = self.data_root,
-                                                train=True)
-            a = int(len(dataset)* 0.8)
-            b = int(len(dataset) - a)
-            # print(a)
-            # print(b)
-            self.train_dataset, self.val_dataset = random_split(dataset, [a, b]) # returns data.Subset
-
-            # self.train_dataset = self.instancialize(state='train')
-            # self.val_dataset = self.instancialize(state='val')
+#     def prepare_data(self):
+#         # 1. how to download
+#         # MNIST(self.data_dir, train=True, download=True)
+#         # MNIST(self.data_dir, train=False, download=True)
+#         ...
+
+#     def setup(self, stage=None):
+#         # 2. how to split, argument
+#         """  
+#         - count number of classes
+
+#         - build vocabulary
+
+#         - perform train/val/test splits
+
+#         - apply transforms (defined explicitly in your datamodule or assigned in init)
+#         """
+#         # Assign train/val datasets for use in dataloaders
+#         if stage == 'fit' or stage is None:
+#             dataset = FeatureBagLoader(data_root = self.data_root,
+#                                                 train=True)
+#             a = int(len(dataset)* 0.8)
+#             b = int(len(dataset) - a)
+#             # print(a)
+#             # print(b)
+#             self.train_dataset, self.val_dataset = random_split(dataset, [a, b]) # returns data.Subset
+
+#             # self.train_dataset = self.instancialize(state='train')
+#             # self.val_dataset = self.instancialize(state='val')
  
 
-        # Assign test dataset for use in dataloader(s)
-        if stage == 'test' or stage is None:
-            # self.mnist_test = MNIST(self.data_dir, train=False, transform=self.transform)
-            self.test_dataset = FeatureBagLoader(data_root = self.data_root,
-                                                train=False)
-            # self.test_dataset = self.instancialize(state='test')
+#         # Assign test dataset for use in dataloader(s)
+#         if stage == 'test' or stage is None:
+#             # self.mnist_test = MNIST(self.data_dir, train=False, transform=self.transform)
+#             self.test_dataset = FeatureBagLoader(data_root = self.data_root,
+#                                                 train=False)
+#             # self.test_dataset = self.instancialize(state='test')
 
 
-    def train_dataloader(self):
-        return DataLoader(self.train_dataset, batch_size=self.train_batch_size, num_workers=self.train_num_workers, shuffle=False)
+#     def train_dataloader(self):
+#         return DataLoader(self.train_dataset, batch_size=self.train_batch_size, num_workers=self.train_num_workers, shuffle=False)
 
-    def val_dataloader(self):
-        return DataLoader(self.val_dataset, batch_size=self.train_batch_size, num_workers=self.train_num_workers, shuffle=False)
+#     def val_dataloader(self):
+#         return DataLoader(self.val_dataset, batch_size=self.train_batch_size, num_workers=self.train_num_workers, shuffle=False)
 
-    def test_dataloader(self):
-        return DataLoader(self.test_dataset, batch_size=self.test_batch_size, num_workers=self.test_num_workers, shuffle=False)
+#     def test_dataloader(self):
+#         return DataLoader(self.test_dataset, batch_size=self.test_batch_size, num_workers=self.test_num_workers, shuffle=False)
 
 
-    def load_data_module(self):
-        camel_name =  ''.join([i.capitalize() for i in (self.dataset_name).split('_')])
-        try:
-            self.data_module = getattr(importlib.import_module(
-                f'datasets.{self.dataset_name}'), camel_name)
-        except:
-            raise ValueError(
-                'Invalid Dataset File Name or Invalid Class Name!')
+#     def load_data_module(self):
+#         camel_name =  ''.join([i.capitalize() for i in (self.dataset_name).split('_')])
+#         try:
+#             self.data_module = getattr(importlib.import_module(
+#                 f'datasets.{self.dataset_name}'), camel_name)
+#         except:
+#             raise ValueError(
+#                 'Invalid Dataset File Name or Invalid Class Name!')
     
-    def instancialize(self, **other_args):
-        """ Instancialize a model using the corresponding parameters
-            from self.hparams dictionary. You can also input any args
-            to overwrite the corresponding value in self.kwargs.
-        """
-        class_args = inspect.getargspec(self.data_module.__init__).args[1:]
-        inkeys = self.kwargs.keys()
-        args1 = {}
-        for arg in class_args:
-            if arg in inkeys:
-                args1[arg] = self.kwargs[arg]
-        args1.update(other_args)
-        return self.data_module(**args1)
+#     def instancialize(self, **other_args):
+#         """ Instancialize a model using the corresponding parameters
+#             from self.hparams dictionary. You can also input any args
+#             to overwrite the corresponding value in self.kwargs.
+#         """
+#         class_args = inspect.getargspec(self.data_module.__init__).args[1:]
+#         inkeys = self.kwargs.keys()
+#         args1 = {}
+#         for arg in class_args:
+#             if arg in inkeys:
+#                 args1[arg] = self.kwargs[arg]
+#         args1.update(other_args)
+#         return self.data_module(**args1)
 
 class MILDataModule(pl.LightningDataModule):
 
-    def __init__(self, data_root: str, label_path: str, batch_size: int=1, num_workers: int=8, n_classes=2, cache: bool=True, use_features=False, *args, **kwargs):
+    def __init__(self, data_root: str, label_path: str, batch_size: int=1, num_workers: int=8, n_classes=2, cache: bool=True, use_features=False, mixup=False, aug=False, *args, **kwargs):
         super().__init__()
         self.data_root = data_root
         self.label_path = label_path
@@ -134,32 +138,37 @@ class MILDataModule(pl.LightningDataModule):
         self.num_bags_train = 200
         self.num_bags_test = 50
         self.seed = 1
+        self.mixup = mixup
+        self.aug = aug
 
 
+        self.class_weight = []
         self.cache = cache
         self.fe_transform = None
         if not use_features: 
-            self.base_dataloader = JPGMILDataloader
+            self.base_dataloader = JPGBagLoader
         else: 
-            
-            self.base_dataloader = ZarrFeatureBagLoader
+            self.base_dataloader = FeatureBagLoader
             self.cache = True
-        
-
 
     def setup(self, stage: Optional[str] = None) -> None:
         home = Path.cwd().parts[1]
 
         if stage in (None, 'fit'):
-            dataset = self.base_dataloader(self.data_root, label_path=self.label_path, mode='train', n_classes=self.n_classes, cache=self.cache)
+            dataset = self.base_dataloader(self.data_root, label_path=self.label_path, mode='train', n_classes=self.n_classes, cache=self.cache, mixup=self.mixup, aug=self.aug)
             # dataset = JPGMILDataloader(self.data_root, label_path=self.label_path, mode='train', n_classes=self.n_classes)
             print(len(dataset))
             a = int(len(dataset)* 0.8)
             b = int(len(dataset) - a)
             self.train_data, self.valid_data = random_split(dataset, [a, b])
 
+            # self.weights = self.get_weights(dataset)
+
+
+
         if stage in (None, 'test'):
-            self.test_data = self.base_dataloader(self.data_root, label_path=self.label_path, mode='test', n_classes=self.n_classes, data_cache_size=1)
+            self.test_data = self.base_dataloader(self.data_root, label_path=self.label_path, mode='test', n_classes=self.n_classes, cache=False)
+            print(len(self.test_data))
 
         return super().setup(stage=stage)
 
@@ -167,6 +176,7 @@ class MILDataModule(pl.LightningDataModule):
 
     def train_dataloader(self) -> DataLoader:
         # return DataLoader(self.train_data,  batch_size = self.batch_size, num_workers=self.num_workers) #batch_transforms=self.transform, pseudo_batch_dim=True, 
+        # return DataLoader(self.train_data,  batch_size = self.batch_size, sampler = WeightedRandomSampler(self.weights, len(self.weights)), num_workers=self.num_workers) #batch_transforms=self.transform, pseudo_batch_dim=True, 
         return DataLoader(self.train_data,  batch_size = self.batch_size, sampler=ImbalancedDatasetSampler(self.train_data), num_workers=self.num_workers) #batch_transforms=self.transform, pseudo_batch_dim=True, 
         #sampler=ImbalancedDatasetSampler(self.train_data)
     def val_dataloader(self) -> DataLoader:
@@ -174,6 +184,22 @@ class MILDataModule(pl.LightningDataModule):
     
     def test_dataloader(self) -> DataLoader:
         return DataLoader(self.test_data, batch_size = self.batch_size, num_workers=self.num_workers)
+
+    def get_weights(self, dataset):
+
+        label_count = [0]*self.n_classes
+        labels = dataset.get_labels(np.arange(len(dataset)))
+        for i in labels:
+            label_count[i] += 1
+        weights_per_class = [0.] * self.n_classes
+        for i in range(self.n_classes):
+            weights_per_class[i] = float(len(labels) / float(label_count[i]))
+        weights_per_class = [i / sum(weights_per_class) for i in weights_per_class]
+        weights = [0.] * len(labels)
+        for i in range(len(labels)):
+            weights[i] = weights_per_class[labels[i]]
+
+        return torch.DoubleTensor(weights)
     
 
 class DataModule(pl.LightningDataModule):
diff --git a/code/datasets/feature_dataloader.py b/code/datasets/feature_dataloader.py
new file mode 100644
index 0000000..3e9dbb5
--- /dev/null
+++ b/code/datasets/feature_dataloader.py
@@ -0,0 +1,394 @@
+import pandas as pd
+
+import numpy as np
+import torch
+from torch import Tensor
+from torch.autograd import Variable
+from torch.nn.functional import one_hot
+from torch.utils import data
+from torch.utils.data import random_split, DataLoader
+from torchsampler import ImbalancedDatasetSampler
+from torchvision import datasets, transforms
+import pandas as pd
+from sklearn.utils import shuffle
+from pathlib import Path
+from tqdm import tqdm
+import zarr
+import json
+import cv2
+from PIL import Image
+import h5py
+# from models import TransMIL
+
+
+
+class FeatureBagLoader(data.Dataset):
+    def __init__(self, file_path, label_path, mode, n_classes, cache=False, mixup=False, aug=False, data_cache_size=5000, max_bag_size=1000):
+        super().__init__()
+
+        self.data_info = []
+        self.data_cache = {}
+        self.slideLabelDict = {}
+        self.files = []
+        self.data_cache_size = data_cache_size
+        self.mode = mode
+        self.file_path = file_path
+        # self.csv_path = csv_path
+        self.label_path = label_path
+        self.n_classes = n_classes
+        self.max_bag_size = max_bag_size
+        self.drop_rate = 0.2
+        # self.min_bag_size = 120
+        self.empty_slides = []
+        self.corrupt_slides = []
+        self.cache = cache
+        self.mixup = mixup
+        self.aug = aug
+        
+        self.missing = []
+
+        home = Path.cwd().parts[1]
+        self.slide_patient_dict_path = f'/{home}/ylan/DeepGraft/training_tables/slide_patient_dict_an.json'
+        with open(self.slide_patient_dict_path, 'r') as f:
+            self.slide_patient_dict = json.load(f)
+
+        # read labels and slide_path from csv
+        with open(self.label_path, 'r') as f:
+            json_dict = json.load(f)
+            temp_slide_label_dict = json_dict[self.mode]
+            # print(len(temp_slide_label_dict))
+            for (x,y) in temp_slide_label_dict:
+                
+                x_name = Path(x).stem
+                x_path_list = [Path(self.file_path)/x]
+                # x_name = x.stem
+                # x_path_list = [Path(self.file_path)/ x for (x,y) in temp_slide_label_dict]
+                if self.aug:
+                    for i in range(5):
+                        aug_path = Path(self.file_path)/f'{x}_aug{i}'
+                        x_path_list.append(aug_path)
+
+                for x_path in x_path_list: 
+                    
+                    if x_path.exists():
+                        self.slideLabelDict[x_name] = y
+                        self.files.append(x_path)
+                    elif Path(str(x_path) + '.zarr').exists():
+                        self.slideLabelDict[x] = y
+                        self.files.append(str(x_path)+'.zarr')
+                    else:
+                        self.missing.append(x)
+                # print(x, y)
+                # x_complete_path = Path(self.file_path)/Path(x)
+                # for cohort in Path(self.file_path).iterdir():
+                #     # x_complete_path = Path(self.file_path) / cohort / 'FEATURES_RETCCL' / (str(x) + '.zarr')
+                #     # if self.mode == 'test': #set to test if using GAN output
+                #     #     x_path_list = [Path(self.file_path) / cohort / 'FE' / (str(x) + '.zarr')]
+                #     # else:
+                #     # x_path_list = [Path(self.file_path) / cohort / 'FEATURES' / (str(x))]
+                #     x_path_list = [Path(self.file_path) / cohort / 'FEATURES_RETCCL_2048' / (str(x))]
+                #     # if not self.mixup:
+                #     for i in range(5):
+                #         aug_path = Path(self.file_path) / cohort / 'FEATURES_RETCCL_2048' / (str(x) + f'_aug{i}')
+                #         if aug_path.exists():
+                #             x_path_list.append(aug_path)
+                #     # print(x_complete_path)
+                #     for x_path in x_path_list:
+                #         # print(x_path)
+                        
+                #         if x_path.exists():
+                #             # print(x_path)
+                #             # if len(list(x_complete_path.iterdir())) > self.min_bag_size:
+                #             # # print(x_complete_path)
+                #             self.slideLabelDict[x] = y
+                #             self.files.append(x_path)
+                #         elif Path(str(x_path) + '.zarr').exists():
+                #             self.slideLabelDict[x] = y
+                #             self.files.append(str(x_path)+'.zarr')
+                #         else:
+                #             self.missing.append(x)
+        
+        # mix in 10 Slides of Test data
+            # if 'test_mixin' in json_dict.keys():
+            #     test_slide_label_dict = json_dict['test']
+            #     for (x, y) in test_slide_label_dict:
+            #         x = Path(x).stem
+            #         for cohort in Path(self.file_path).iterdir():
+            #             x_path_list = [Path(self.file_path) / cohort / 'FEATURES_RETCCL_2048' / (str(x))]
+            #             for x_path in x_path_list:
+            #                 if x_path.exists():
+            #                     self.slideLabelDict[x] = y
+            #                     self.files.append(x_path)
+            #                     patient = self.slide_patient_dict[x]
+            #                 elif Path(str(x_path) + '.zarr').exists():
+            #                     self.slideLabelDict[x] = y
+            #                     self.files.append(str(x_path)+'.zarr')
+
+
+
+        
+
+        self.feature_bags = []
+        self.labels = []
+        self.wsi_names = []
+        self.coords = []
+        self.patients = []
+        if self.cache:
+            for t in tqdm(self.files):
+                # zarr_t = str(t) + '.zarr'
+                batch, label, (wsi_name, batch_coords, patient) = self.get_data(t)
+
+                # print(label)
+                self.labels.append(label)
+                self.feature_bags.append(batch)
+                self.wsi_names.append(wsi_name)
+                self.coords.append(batch_coords)
+                self.patients.append(patient)
+        
+
+    def get_data(self, file_path):
+        
+        batch_names=[] #add function for name_batch read out
+
+        wsi_name = Path(file_path).stem
+        if wsi_name.split('_')[-1][:3] == 'aug':
+            wsi_name = '_'.join(wsi_name.split('_')[:-1])
+        # if wsi_name in self.slideLabelDict:
+        label = self.slideLabelDict[wsi_name]
+        patient = self.slide_patient_dict[wsi_name]
+
+        if Path(file_path).suffix == '.zarr':
+            z = zarr.open(file_path, 'r')
+            np_bag = np.array(z['data'][:])
+            coords = np.array(z['coords'][:])
+        else:
+            with h5py.File(file_path, 'r') as hdf5_file:
+                np_bag = hdf5_file['features'][:]
+                coords = hdf5_file['coords'][:]
+
+        # np_bag = torch.load(file_path)
+        # z = zarr.open(file_path, 'r')
+        # np_bag = np.array(z['data'][:])
+        # np_bag = np.array(zarr.open(file_path, 'r')).astype(np.uint8)
+        # label = torch.as_tensor(label)
+        label = int(label)
+        wsi_bag = torch.from_numpy(np_bag)
+        batch_coords = torch.from_numpy(coords)
+
+        return wsi_bag, label, (wsi_name, batch_coords, patient)
+    
+    def get_labels(self, indices):
+        # for i in indices: 
+        #     print(self.labels[i])
+        return [self.labels[i] for i in indices]
+
+
+    def to_fixed_size_bag(self, bag, names, bag_size: int = 512):
+
+        #duplicate bag instances unitl 
+
+        bag_idxs = torch.randperm(bag.shape[0])[:bag_size]
+        bag_samples = bag[bag_idxs]
+        name_samples = [names[i] for i in bag_idxs]
+        # bag_sample_names = [bag_names[i] for i in bag_idxs]
+        # q, r  = divmod(bag_size, bag_samples.shape[0])
+        # if q > 0:
+        #     bag_samples = torch.cat([bag_samples]*q, 0)
+
+        # self_padded = torch.cat([bag_samples, bag_samples[:r,:, :, :]])
+
+        # zero-pad if we don't have enough samples
+        # zero_padded = torch.cat((bag_samples,
+        #                         torch.zeros(bag_size-bag_samples.shape[0], bag_samples.shape[1], bag_samples.shape[2], bag_samples.shape[3])))
+
+        return bag_samples, name_samples, min(bag_size, len(bag))
+
+    def data_dropout(self, bag, batch_names, drop_rate):
+        # bag_size = self.max_bag_size
+        bag_size = bag.shape[0]
+        bag_idxs = torch.randperm(self.max_bag_size)[:int(bag_size*(1-drop_rate))]
+        bag_samples = bag[bag_idxs]
+        name_samples = [batch_names[i] for i in bag_idxs]
+
+        return bag_samples, name_samples
+
+    def get_mixup_bag(self, bag):
+
+        bag_size = bag.shape[0]
+
+        a = torch.rand([bag_size])
+        b = 0.6
+        rand_x = torch.randint(0, bag_size, [bag_size,])
+        rand_y = torch.randint(0, bag_size, [bag_size,])
+
+        bag_x = bag[rand_x, :]
+        bag_y = bag[rand_y, :]
+
+        # print('bag_x: ', bag_x.shape)
+        # print('bag_y: ', bag_y.shape)
+        # print('a*bag_x: ', (a*bag_x).shape)
+        # print('(1.0-a)*bag_y: ', ((1.0-a)*bag_y).shape)
+
+        temp_bag = (bag_x.t()*a).t() + (bag_y.t()*(1.0-a)).t()
+        # print('temp_bag: ', temp_bag.shape)
+
+        if bag_size < self.max_bag_size:
+            diff = self.max_bag_size - bag_size
+            bag_idxs = torch.randperm(bag_size)[:diff]
+            
+            # print('bag: ', bag.shape)
+            # print('bag_idxs: ', bag_idxs.shape)
+            mixup_bag = torch.cat((bag, temp_bag[bag_idxs, :]))
+            # print('mixup_bag: ', mixup_bag.shape)
+        else:
+            random_sample_list = torch.rand(bag_size)
+            mixup_bag = [bag[i] if random_sample_list[i] else temp_bag[i] > b for i in range(bag_size)] #make pytorch native?!
+            mixup_bag = torch.stack(mixup_bag)
+            # print('else')
+            # print(mixup_bag.shape)
+
+        return mixup_bag
+
+    def __len__(self):
+        return len(self.files)
+
+    def __getitem__(self, index):
+
+        if self.cache:
+            label = self.labels[index]
+            bag = self.feature_bags[index]
+            
+        
+            
+            # label = Variable(Tensor(label))
+            # label = torch.as_tensor(label)
+            # label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
+            wsi_name = self.wsi_names[index]
+            batch_coords = self.coords[index]
+            patient = self.patients[index]
+
+            
+            #random dropout
+            #shuffle
+
+            # feats = Variable(Tensor(feats))
+            # return wsi, label, (wsi_name, batch_coords, patient)
+        else:
+            t = self.files[index]
+            bag, label, (wsi_name, batch_coords, patient) = self.get_data(t)
+            # label = torch.as_tensor(label)
+            # label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
+                # self.labels.append(label)
+                # self.feature_bags.append(batch)
+                # self.wsi_names.append(wsi_name)
+                # self.name_batches.append(name_batch)
+                # self.patients.append(patient)
+        if self.mode == 'train':
+            bag_size = bag.shape[0]
+
+            bag_idxs = torch.randperm(bag_size)[:self.max_bag_size]
+            # bag_idxs = torch.randperm(bag_size)[:int(self.max_bag_size*(1-self.drop_rate))]
+            out_bag = bag[bag_idxs, :]
+            if self.mixup:
+                out_bag = self.get_mixup_bag(out_bag)
+                # batch_coords = 
+            if out_bag.shape[0] < self.max_bag_size:
+                out_bag = torch.cat((out_bag, torch.zeros(self.max_bag_size-out_bag.shape[0], out_bag.shape[1])))
+
+            # shuffle again
+            out_bag_idxs = torch.randperm(out_bag.shape[0])
+            out_bag = out_bag[out_bag_idxs]
+
+
+            # batch_coords only useful for test
+            batch_coords = batch_coords[bag_idxs]
+            
+
+        # mixup? Linear combination of 2 vectors
+        # add noise
+
+
+        else: out_bag = bag
+
+        return out_bag, label, (wsi_name, batch_coords, patient)
+
+if __name__ == '__main__':
+    
+    from pathlib import Path
+    import os
+    import time
+    # from fast_tensor_dl import FastTensorDataLoader
+    # from custom_resnet50 import resnet50_baseline
+    
+    
+
+    home = Path.cwd().parts[1]
+    train_csv = f'/{home}/ylan/DeepGraft_project/code/debug_train.csv'
+    data_root = f'/{home}/ylan/data/DeepGraft/224_128uM_annotated'
+    # data_root = f'/{home}/ylan/DeepGraft/dataset/hdf5/256_256um_split/'
+    # label_path = f'/{home}/ylan/DeepGraft_project/code/split_PAS_bin.json'
+    # label_path = f'/{home}/ylan/DeepGraft/training_tables/split_debug.json'
+    label_path = f'/{home}/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest_test.json'
+    output_dir = f'/{data_root}/debug/augments'
+    os.makedirs(output_dir, exist_ok=True)
+
+    n_classes = 2
+
+    dataset = FeatureBagLoader(data_root, label_path=label_path, mode='train', cache=False, mixup=True, aug=True, n_classes=n_classes)
+
+    test_dataset = FeatureBagLoader(data_root, label_path=label_path, mode='test', cache=False, n_classes=n_classes)
+
+    # print(dataset.get_labels(0))
+    a = int(len(dataset)* 0.8)
+    b = int(len(dataset) - a)
+    train_data, valid_data = random_split(dataset, [a, b])
+
+    train_dl = DataLoader(train_data, batch_size=1, num_workers=5)
+    valid_dl = DataLoader(valid_data, batch_size=1, num_workers=5)
+    test_dl = DataLoader(test_dataset)
+
+    print('train_dl: ', len(train_dl))
+    print('valid_dl: ', len(valid_dl))
+    print('test_dl: ', len(test_dl))
+
+
+    # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
+    # scaler = torch.cuda.amp.GradScaler()
+
+    # model_ft = resnet50_baseline(pretrained=True)
+    # for param in model_ft.parameters():
+    #     param.requires_grad = False
+    # model_ft.to(device)
+    # model = TransMIL(n_classes=n_classes).to(device)
+    
+
+    # print(dataset.get_labels(np.arange(len(dataset))))
+
+    c = 0
+    label_count = [0] *n_classes
+    epochs = 1
+    # print(len(dl))
+    # start = time.time()
+    for i in range(epochs):
+        start = time.time()
+        for item in tqdm(train_dl): 
+
+            # if c >= 10:
+            #     break
+            bag, label, (name, batch_coords, patient) = item
+            print(bag.shape)
+            # print(bag.shape, label)
+            # print(len(batch_names))
+            # print(label)
+            # print(batch_coords)
+            # print(name)
+            # bag = bag.float().to(device)
+            # print(bag.shape)
+            # label = label.to(device)
+            # with torch.cuda.amp.autocast():
+            #     output = model(bag)
+            # c += 1
+        end = time.time()
+        print('Bag Time: ', end-start)
+
+    
\ No newline at end of file
diff --git a/code/datasets/feature_dataloader_deca.py b/code/datasets/feature_dataloader_deca.py
new file mode 100644
index 0000000..a0543c5
--- /dev/null
+++ b/code/datasets/feature_dataloader_deca.py
@@ -0,0 +1,320 @@
+import pandas as pd
+
+import numpy as np
+import torch
+from torch import Tensor
+from torch.autograd import Variable
+from torch.nn.functional import one_hot
+from torch.utils import data
+from torch.utils.data import random_split, DataLoader
+from torchsampler import ImbalancedDatasetSampler
+from torchvision import datasets, transforms
+import pandas as pd
+from sklearn.utils import shuffle
+from pathlib import Path
+from tqdm import tqdm
+import zarr
+import json
+import cv2
+from PIL import Image
+import h5py
+# from models import TransMIL
+
+
+
+class FeatureBagLoader(data.Dataset):
+    def __init__(self, file_path, label_path, mode, n_classes, cache=False, data_cache_size=5000, max_bag_size=1000):
+        super().__init__()
+
+        self.data_info = []
+        self.data_cache = {}
+        self.slideLabelDict = {}
+        self.files = []
+        self.data_cache_size = data_cache_size
+        self.mode = mode
+        self.file_path = file_path
+        # self.csv_path = csv_path
+        self.label_path = label_path
+        self.n_classes = n_classes
+        self.max_bag_size = max_bag_size
+        self.drop_rate = 0.2
+        # self.min_bag_size = 120
+        self.empty_slides = []
+        self.corrupt_slides = []
+        self.cache = cache
+        
+        self.missing = []
+
+        home = Path.cwd().parts[1]
+        self.slide_patient_dict_path = f'/{home}/ylan/DeepGraft/training_tables/slide_patient_dict_an.json'
+        with open(self.slide_patient_dict_path, 'r') as f:
+            self.slide_patient_dict = json.load(f)
+
+        # read labels and slide_path from csv
+        with open(self.label_path, 'r') as f:
+            json_dict = json.load(f)
+            temp_slide_label_dict = json_dict[self.mode]
+            # print(len(temp_slide_label_dict))
+            for (x, y) in temp_slide_label_dict:
+                x = Path(x).stem
+                
+                # print(x, y)
+                # x_complete_path = Path(self.file_path)/Path(x)
+                for cohort in Path(self.file_path).iterdir():
+                    # x_complete_path = Path(self.file_path) / cohort / 'FEATURES_RETCCL' / (str(x) + '.zarr')
+                    # if self.mode == 'test': #set to test if using GAN output
+                    #     x_path_list = [Path(self.file_path) / cohort / 'FE' / (str(x) + '.zarr')]
+                    # else:
+                    # x_path_list = [Path(self.file_path) / cohort / 'FEATURES' / (str(x))]
+                    x_path_list = [Path(self.file_path) / cohort / 'FEATURES_RETCCL_2048' / (str(x))]
+                    for i in range(5):
+                        aug_path = Path(self.file_path) / cohort / 'FEATURES_RETCCL_2048' / (str(x) + f'_aug{i}')
+                        if aug_path.exists():
+                            x_path_list.append(aug_path)
+                    # print(x_complete_path)
+                    for x_path in x_path_list:
+                        # print(x_path)
+                        
+                        if x_path.exists():
+                            # print(x_path)
+                            # if len(list(x_complete_path.iterdir())) > self.min_bag_size:
+                            # # print(x_complete_path)
+                            self.slideLabelDict[x] = y
+                            self.files.append(x_path)
+                        elif Path(str(x_path) + '.zarr').exists():
+                            self.slideLabelDict[x] = y
+                            self.files.append(str(x_path)+'.zarr')
+                        else:
+                            self.missing.append(x)
+        
+        # mix in 10 Slides of Test data
+            if 'test_mixin' in json_dict.keys():
+                test_slide_label_dict = json_dict['test']
+                for (x, y) in test_slide_label_dict:
+                    x = Path(x).stem
+                    for cohort in Path(self.file_path).iterdir():
+                        x_path_list = [Path(self.file_path) / cohort / 'FEATURES_RETCCL_2048' / (str(x))]
+                        for x_path in x_path_list:
+                            if x_path.exists():
+                                self.slideLabelDict[x] = y
+                                self.files.append(x_path)
+                                patient = self.slide_patient_dict[x]
+                            elif Path(str(x_path) + '.zarr').exists():
+                                self.slideLabelDict[x] = y
+                                self.files.append(str(x_path)+'.zarr')
+
+
+
+        
+
+        self.feature_bags = []
+        self.labels = []
+        self.wsi_names = []
+        self.coords = []
+        self.patients = []
+        if self.cache:
+            for t in tqdm(self.files):
+                # zarr_t = str(t) + '.zarr'
+                batch, label, (wsi_name, batch_coords, patient) = self.get_data(t)
+
+                # print(label)
+                self.labels.append(label)
+                self.feature_bags.append(batch)
+                self.wsi_names.append(wsi_name)
+                self.coords.append(batch_coords)
+                self.patients.append(patient)
+        
+
+    def get_data(self, file_path):
+        
+        batch_names=[] #add function for name_batch read out
+
+        wsi_name = Path(file_path).stem
+        if wsi_name.split('_')[-1][:3] == 'aug':
+            wsi_name = '_'.join(wsi_name.split('_')[:-1])
+        # if wsi_name in self.slideLabelDict:
+        label = self.slideLabelDict[wsi_name]
+        patient = self.slide_patient_dict[wsi_name]
+
+        if Path(file_path).suffix == '.zarr':
+            z = zarr.open(file_path, 'r')
+            np_bag = np.array(z['data'][:])
+            coords = np.array(z['coords'][:])
+        else:
+            with h5py.File(file_path, 'r') as hdf5_file:
+                np_bag = hdf5_file['features'][:]
+                coords = hdf5_file['coords'][:]
+
+        # np_bag = torch.load(file_path)
+        # z = zarr.open(file_path, 'r')
+        # np_bag = np.array(z['data'][:])
+        # np_bag = np.array(zarr.open(file_path, 'r')).astype(np.uint8)
+        # label = torch.as_tensor(label)
+        label = int(label)
+        wsi_bag = torch.from_numpy(np_bag)
+        batch_coords = torch.from_numpy(coords)
+
+        return wsi_bag, label, (wsi_name, batch_coords, patient)
+    
+    def get_labels(self, indices):
+        # for i in indices: 
+        #     print(self.labels[i])
+        return [self.labels[i] for i in indices]
+
+
+    def to_fixed_size_bag(self, bag, names, bag_size: int = 512):
+
+        #duplicate bag instances unitl 
+
+        bag_idxs = torch.randperm(bag.shape[0])[:bag_size]
+        bag_samples = bag[bag_idxs]
+        name_samples = [names[i] for i in bag_idxs]
+        # bag_sample_names = [bag_names[i] for i in bag_idxs]
+        # q, r  = divmod(bag_size, bag_samples.shape[0])
+        # if q > 0:
+        #     bag_samples = torch.cat([bag_samples]*q, 0)
+
+        # self_padded = torch.cat([bag_samples, bag_samples[:r,:, :, :]])
+
+        # zero-pad if we don't have enough samples
+        # zero_padded = torch.cat((bag_samples,
+        #                         torch.zeros(bag_size-bag_samples.shape[0], bag_samples.shape[1], bag_samples.shape[2], bag_samples.shape[3])))
+
+        return bag_samples, name_samples, min(bag_size, len(bag))
+
+    def data_dropout(self, bag, batch_names, drop_rate):
+        # bag_size = self.max_bag_size
+        # bag_size = bag.shape[0]
+        bag_idxs = torch.randperm(self.max_bag_size)[:int(bag_size*(1-drop_rate))]
+        bag_samples = bag[bag_idxs]
+        name_samples = [batch_names[i] for i in bag_idxs]
+
+        return bag_samples, name_samples
+
+    def __len__(self):
+        return len(self.files)
+
+    def __getitem__(self, index):
+
+        if self.cache:
+            label = self.labels[index]
+            bag = self.feature_bags[index]
+            
+        
+            
+            # label = Variable(Tensor(label))
+            # label = torch.as_tensor(label)
+            # label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
+            wsi_name = self.wsi_names[index]
+            batch_coords = self.coords[index]
+            patient = self.patients[index]
+
+            
+            #random dropout
+            #shuffle
+
+            # feats = Variable(Tensor(feats))
+            # return wsi, label, (wsi_name, batch_coords, patient)
+        else:
+            t = self.files[index]
+            bag, label, (wsi_name, batch_coords, patient) = self.get_data(t)
+            # label = torch.as_tensor(label)
+            # label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
+                # self.labels.append(label)
+                # self.feature_bags.append(batch)
+                # self.wsi_names.append(wsi_name)
+                # self.name_batches.append(name_batch)
+                # self.patients.append(patient)
+        if self.mode != 'test':
+            bag_size = bag.shape[0]
+            
+            bag_idxs = torch.randperm(bag_size)[:int(self.max_bag_size*(1-self.drop_rate))]
+            out_bag = bag[bag_idxs, :]
+            batch_coords = batch_coords[bag_idxs]
+            if out_bag.shape[0] < self.max_bag_size:
+                out_bag = torch.cat((out_bag, torch.zeros(self.max_bag_size-out_bag.shape[0], out_bag.shape[1])))
+
+        else: out_bag = bag
+
+        return out_bag, label, (wsi_name, batch_coords, patient)
+
+if __name__ == '__main__':
+    
+    from pathlib import Path
+    import os
+    import time
+    # from fast_tensor_dl import FastTensorDataLoader
+    # from custom_resnet50 import resnet50_baseline
+    
+    
+
+    home = Path.cwd().parts[1]
+    train_csv = f'/{home}/ylan/DeepGraft_project/code/debug_train.csv'
+    data_root = f'/{home}/ylan/data/DeepGraft/224_128uM_annotated'
+    # data_root = f'/{home}/ylan/DeepGraft/dataset/hdf5/256_256um_split/'
+    # label_path = f'/{home}/ylan/DeepGraft_project/code/split_PAS_bin.json'
+    # label_path = f'/{home}/ylan/DeepGraft/training_tables/split_debug.json'
+    label_path = f'/{home}/ylan/DeepGraft/training_tables/dg_decathlon_PAS_HE_Jones_norm_rest.json'
+    output_dir = f'/{data_root}/debug/augments'
+    os.makedirs(output_dir, exist_ok=True)
+
+    n_classes = 2
+
+    dataset = FeatureBagLoader(data_root, label_path=label_path, mode='train', cache=True, n_classes=n_classes)
+
+    test_dataset = FeatureBagLoader(data_root, label_path=label_path, mode='test', cache=False, n_classes=n_classes)
+
+    # print(dataset.get_labels(0))
+    a = int(len(dataset)* 0.8)
+    b = int(len(dataset) - a)
+    train_data, valid_data = random_split(dataset, [a, b])
+
+    train_dl = DataLoader(train_data, batch_size=1, sampler=ImbalancedDatasetSampler(train_data), num_workers=5)
+    valid_dl = DataLoader(valid_data, batch_size=1, num_workers=5)
+    test_dl = DataLoader(test_dataset)
+
+    print('train_dl: ', len(train_dl))
+    print('valid_dl: ', len(valid_dl))
+    print('test_dl: ', len(test_dl))
+
+
+    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
+    scaler = torch.cuda.amp.GradScaler()
+
+    # model_ft = resnet50_baseline(pretrained=True)
+    # for param in model_ft.parameters():
+    #     param.requires_grad = False
+    # model_ft.to(device)
+    # model = TransMIL(n_classes=n_classes).to(device)
+    
+
+    # print(dataset.get_labels(np.arange(len(dataset))))
+
+    c = 0
+    label_count = [0] *n_classes
+    epochs = 1
+    # print(len(dl))
+    # start = time.time()
+    for i in range(epochs):
+        start = time.time()
+        for item in tqdm(test_dl): 
+
+            # if c >= 10:
+            #     break
+            bag, label, (name, batch_coords, patient) = item
+            # print(bag.shape)
+            print(bag.shape, label)
+            # print(len(batch_names))
+            # print(label)
+            # print(batch_coords)
+            # print(name)
+            bag = bag.float().to(device)
+            # print(bag.shape)
+            # label = label.to(device)
+            # with torch.cuda.amp.autocast():
+            #     output = model(bag)
+            # c += 1
+        end = time.time()
+        print('Bag Time: ', end-start)
+
+    
\ No newline at end of file
diff --git a/code/datasets/feature_extractor.py b/code/datasets/feature_extractor.py
index 0057c8e..0f2f64d 100644
--- a/code/datasets/feature_extractor.py
+++ b/code/datasets/feature_extractor.py
@@ -11,6 +11,15 @@ import torchvision.transforms as transforms
 import torch.nn.functional as F
 import re
 from imgaug import augmenters as iaa
+import argparse
+
+def make_parse():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--augment', default=False, action='store_true')
+    parser.add_argument('--cohort', default='RU', type=str)
+    
+    args = parser.parse_args()
+    return args
 
 def chunker(seq, size):
     return (seq[pos:pos + size] for pos in range(0, len(seq), size))
@@ -21,11 +30,12 @@ def get_coords(batch_names): #ToDO: Change function for precise coords
     for tile_name in batch_names: 
         # print(tile_name)
         pos = re.findall(r'\((.*?)\)', tile_name)
-        x, y = pos[-1].split('_')
+        # pos = pos
+        x, y = pos[-1].replace('-', '_').split('_')
         coords.append((int(x),int(y)))
     return coords
 
-def augment(img):
+def iaa_augment(img):
 
     sometimes = lambda aug: iaa.Sometimes(0.5, aug, name="Random1")
     sometimes2 = lambda aug: iaa.Sometimes(0.2, aug, name="Random2")
@@ -53,13 +63,22 @@ def augment(img):
 
 if __name__ == '__main__':
 
+    torch.set_num_threads(8)
+    torch.manual_seed(2022)
+
+    args = make_parse()
+    
+    augment=args.augment
+    cohorts = [args.cohort]
+    print('Augment Data: ', augment)
+    print('Cohort: ', cohorts)
 
     home = Path.cwd().parts[1]
     
-    data_root = Path(f'/{home}/ylan/data/DeepGraft/224_128um_v2')
+    data_root = Path(f'/{home}/ylan/data/DeepGraft/224_128uM_annotated')
     # output_path = Path(f'/{home}/ylan/wsi_tools/debug/zarr')
-    cohorts = ['DEEPGRAFT_RA', 'Leuven'] #, 
-    # cohorts = ['Aachen_Biopsy_Slides', 'DEEPGRAFT_RU'] #, 
+    # cohorts = ['RU', 'RA'] #, 
+    # cohorts = ['Aachen_Biopsy_Slides'] #, 
     # cohorts = ['Aachen_Biopsy_Slides', 'DEEPGRAFT_RU', 'DEEPGRAFT_RA', 'Leuven'] #, 
     compressor = Blosc(cname='blosclz', clevel=3)
 
@@ -77,28 +96,34 @@ if __name__ == '__main__':
     device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
     scaler = torch.cuda.amp.GradScaler()
     n_classes = 2
-    out_features = 1024
-    model_ft = ResNet.resnet50(num_classes=n_classes, mlp=False, two_branch=False, normlinear=True)
+    # out_features = 1024
+    model_ft = ResNet.resnet50(num_classes=1024, mlp=False, two_branch=False, normlinear=True)
+    
+    model_ft.fc = nn.Identity()
+    # print(model_ft)
+    # model_ft.fc = nn.Linear(2048, out_features)
     home = Path.cwd().parts[1]
-    model_ft.load_state_dict(torch.load(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth'), strict=False)
-    for param in model_ft.parameters():
-        param.requires_grad = False
-    for m in model_ft.modules():
-        if isinstance(m, torch.nn.modules.batchnorm.BatchNorm2d):
-            m.eval()
-            m.weight.requires_grad = False
-            m.bias.requires_grad = False
-    model_ft.fc = nn.Linear(2048, out_features)
+    model_ft.load_state_dict(torch.load(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth'), strict=True)
+    # for param in model_ft.parameters():
+    #     param.requires_grad = False
+    # for m in model_ft.modules():
+    #     if isinstance(m, torch.nn.modules.batchnorm.BatchNorm2d):
+    #         m.eval()
+    #         m.weight.requires_grad = False
+    #         m.bias.requires_grad = False
+    # model_ft.fc = nn.Linear(2048, out_features)
     model_ft.eval()
     model_ft.to(device)
 
     batch_size = 100
 
+
     for f in data_root.iterdir():
         
         if f.stem in cohorts:
-            print(f)
-            fe_path = f / 'FEATURES_RETCCL'
+            # fe_path = Path(test_output_path) / 'FEATURES_RETCCL'
+            fe_path = f / 'FEATURES_RETCCL_2048'
+
             fe_path.mkdir(parents=True, exist_ok=True)
             
             # num_files = len(list((f / 'BLOCKS').iterdir()))
@@ -107,21 +132,27 @@ if __name__ == '__main__':
                 if Path(slide).is_dir(): 
                     if slide.suffix != '.zarr':
                         slide_list.append(slide)
+            if augment:
+                tqdm_len = len(slide_list)*5
+            else: tqdm_len = len(slide_list)
 
-            with tqdm(total=len(slide_list)) as pbar:
+            
+            with tqdm(total=tqdm_len) as pbar:
                 for slide in slide_list:
-                    # print('slide: ', slide)
 
-                    # run every slide 5 times for augments
-                    for n in range(5):
+                    
 
 
-                        output_path = fe_path / Path(str(slide.stem) + f'_aug{n}.zarr')
-                        if output_path.is_dir():
-                                pbar.update(1)
-                                print(output_path, ' skipped.')
-                                continue
-                        # else:
+                    # print('slide: ', slide)
+
+                    # run every slide 5 times for augments
+                    if not augment:
+                        output_path = fe_path / Path(str(slide.stem) + '.zarr')
+                        # if output_path.is_dir():
+                        #     pbar.update(1)
+                        #     print(output_path, ' skipped.')
+                        #     continue
+                            # else:
                         output_array = []
                         output_batch_names = []
                         for tile_path_batch in chunker(list(slide.iterdir()), batch_size):
@@ -130,9 +161,6 @@ if __name__ == '__main__':
                             for t in tile_path_batch:
                                 # for n in range(5):
                                 img = np.asarray(Image.open(str(t))).astype(np.uint8) #.astype(np.uint8)
-
-                                img = augment(img)
-
                                 img = val_transforms(img.copy()).to(device)
                                 batch_array.append(img)
 
@@ -142,8 +170,8 @@ if __name__ == '__main__':
                                 continue
                             else:
                                 batch_array = torch.stack(batch_array) 
-                                # with torch.cuda.amp.autocast():
-                                model_output = model_ft(batch_array).detach()
+                                with torch.cuda.amp.autocast():
+                                    model_output = model_ft(batch_array).detach()
                                 output_array.append(model_output)
                                 output_batch_names += batch_names 
                         if len(output_array) == 0:
@@ -152,35 +180,60 @@ if __name__ == '__main__':
                         else:
                             output_array = torch.cat(output_array, dim=0).cpu().numpy()
                             output_batch_coords = get_coords(output_batch_names)
-                            # print(output_batch_coords)
-                            # z = zarr.group()
-                            # data = z.create_group('data')
-                            # tile_names = z.create_group('tile_names')
-                            # d1 = data.create_dataset('bag', shape=output_array.shape, chunks=True, compressor = compressor, synchronizer=zarr.ThreadSynchronizer(), dtype='i4')
-                            # d2 = tile_names.create_dataset(output_batch_coords, shape=[len(output_batch_coords), 2], chunks=True, compressor = compressor, synchronizer=zarr.ThreadSynchronizer(), dtype='i4')
-
-                            # z['data'] = zarr.array(output_array, chunks=True, compressor = compressor, synchronizer=zarr.ThreadSynchronizer(), dtype='float') # 1792 = 224*8
-                            # z['tile_names'] = zarr.array(output_batch_coords, chunks=True, compressor = compressor, synchronizer=zarr.ThreadSynchronizer(), dtype='int32') # 1792 = 224*8
-                            # z.save
-                            # print(z['data'])
-                            # print(z['data'][:])
-                            # print(z['tile_names'][:])
-                            # zarr.save(output_path, z)
                             zarr.save_group(output_path, data=output_array, coords=output_batch_coords)
 
+                            # test eval mode!
                             # z_test = zarr.open(output_path, 'r')
-                            # print(z_test['data'][:])
                             # # print(z_test.tree())
                             
                             # if np.all(output_array== z_test['data'][:]):
-                            #     print('data true')
+                            #     print('data same')
+                            # else: print(slide)
                             # if np.all(z['tile_names'][:] == z_test['tile_names'][:]):
                             #     print('tile_names true')
                             #     print(output_path ' ')
                             # print(np.all(z[:] == z_test[:]))
-
-                        # np.save(f'{str(slide)}.npy', slide_np)
                             pbar.update(1)
+                    else:
+                        for n in range(5):
+                            # if n != 5:
+                            output_path = fe_path / Path(str(slide.stem) + f'_aug{n}.zarr')
+                            if output_path.is_dir():
+                                pbar.update(1)
+                                # print(output_path, ' skipped.')
+                                continue
+                            # else:
+                            output_array = []
+                            output_batch_names = []
+                            for tile_path_batch in chunker(list(slide.iterdir()), batch_size):
+                                batch_array = []
+                                batch_names = []
+                                for t in tile_path_batch:
+                                    # for n in range(5):
+                                    img = np.asarray(Image.open(str(t))).astype(np.uint8) #.astype(np.uint8)
+                                    img = iaa_augment(img)
+                                    img = val_transforms(img.copy()).to(device)
+                                    batch_array.append(img)
+
+                                    tile_name = t.stem
+                                    batch_names.append(tile_name)
+                                if len(batch_array) == 0:
+                                    continue
+                                else:
+                                    batch_array = torch.stack(batch_array) 
+                                    with torch.cuda.amp.autocast():
+                                        model_output = model_ft(batch_array).detach()
+                                    output_array.append(model_output)
+                                    output_batch_names += batch_names 
+                            if len(output_array) == 0:
+                                pbar.update(1)
+                                continue
+                            else:
+                                output_array = torch.cat(output_array, dim=0).cpu().numpy()
+                                output_batch_coords = get_coords(output_batch_names)
+                                zarr.save_group(output_path, data=output_array, coords=output_batch_coords)
+
+                                pbar.update(1)
             
 
                   
\ No newline at end of file
diff --git a/code/datasets/feature_extractor_2.py b/code/datasets/feature_extractor_2.py
deleted file mode 100644
index 0057c8e..0000000
--- a/code/datasets/feature_extractor_2.py
+++ /dev/null
@@ -1,186 +0,0 @@
-import numpy as np
-from pathlib import Path
-from PIL import Image
-from tqdm import tqdm
-import zarr
-from numcodecs import Blosc
-import torch
-import torch.nn as nn
-import ResNet as ResNet 
-import torchvision.transforms as transforms
-import torch.nn.functional as F
-import re
-from imgaug import augmenters as iaa
-
-def chunker(seq, size):
-    return (seq[pos:pos + size] for pos in range(0, len(seq), size))
-
-def get_coords(batch_names): #ToDO: Change function for precise coords
-    coords = []
-    
-    for tile_name in batch_names: 
-        # print(tile_name)
-        pos = re.findall(r'\((.*?)\)', tile_name)
-        x, y = pos[-1].split('_')
-        coords.append((int(x),int(y)))
-    return coords
-
-def augment(img):
-
-    sometimes = lambda aug: iaa.Sometimes(0.5, aug, name="Random1")
-    sometimes2 = lambda aug: iaa.Sometimes(0.2, aug, name="Random2")
-    sometimes3 = lambda aug: iaa.Sometimes(0.9, aug, name="Random3")
-    sometimes4 = lambda aug: iaa.Sometimes(0.9, aug, name="Random4")
-    sometimes5 = lambda aug: iaa.Sometimes(0.9, aug, name="Random5")
-
-    transforms = iaa.Sequential([
-        iaa.AddToHueAndSaturation(value=(-30, 30), name="MyHSV"), #13
-        sometimes2(iaa.GammaContrast(gamma=(0.85, 1.15), name="MyGamma")),
-        iaa.Fliplr(0.5, name="MyFlipLR"),
-        iaa.Flipud(0.5, name="MyFlipUD"),
-        sometimes(iaa.Rot90(k=1, keep_size=True, name="MyRot90")),
-        iaa.OneOf([
-            sometimes3(iaa.PiecewiseAffine(scale=(0.015, 0.02), cval=0, name="MyPiece")),
-            sometimes4(iaa.ElasticTransformation(alpha=(100, 200), sigma=20, cval=0, name="MyElastic")),
-            sometimes5(iaa.Affine(scale={"x": (0.95, 1.05), "y": (0.95, 1.05)}, rotate=(-45, 45), shear=(-4, 4), cval=0, name="MyAffine"))
-        ], name="MyOneOf")
-    ])
-    seq_img_d = transforms.to_deterministic()
-    img = seq_img_d.augment_image(img)
-
-    return img
-
-
-if __name__ == '__main__':
-
-
-    home = Path.cwd().parts[1]
-    
-    data_root = Path(f'/{home}/ylan/data/DeepGraft/224_128um_v2')
-    # output_path = Path(f'/{home}/ylan/wsi_tools/debug/zarr')
-    cohorts = ['DEEPGRAFT_RA', 'Leuven'] #, 
-    # cohorts = ['Aachen_Biopsy_Slides', 'DEEPGRAFT_RU'] #, 
-    # cohorts = ['Aachen_Biopsy_Slides', 'DEEPGRAFT_RU', 'DEEPGRAFT_RA', 'Leuven'] #, 
-    compressor = Blosc(cname='blosclz', clevel=3)
-
-    val_transforms = transforms.Compose([
-            # 
-            transforms.ToTensor(),
-            transforms.Normalize(
-                mean=[0.485, 0.456, 0.406],
-                std=[0.229, 0.224, 0.225],
-            ),
-            # RangeNormalization(),
-        ])
-
-
-    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
-    scaler = torch.cuda.amp.GradScaler()
-    n_classes = 2
-    out_features = 1024
-    model_ft = ResNet.resnet50(num_classes=n_classes, mlp=False, two_branch=False, normlinear=True)
-    home = Path.cwd().parts[1]
-    model_ft.load_state_dict(torch.load(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth'), strict=False)
-    for param in model_ft.parameters():
-        param.requires_grad = False
-    for m in model_ft.modules():
-        if isinstance(m, torch.nn.modules.batchnorm.BatchNorm2d):
-            m.eval()
-            m.weight.requires_grad = False
-            m.bias.requires_grad = False
-    model_ft.fc = nn.Linear(2048, out_features)
-    model_ft.eval()
-    model_ft.to(device)
-
-    batch_size = 100
-
-    for f in data_root.iterdir():
-        
-        if f.stem in cohorts:
-            print(f)
-            fe_path = f / 'FEATURES_RETCCL'
-            fe_path.mkdir(parents=True, exist_ok=True)
-            
-            # num_files = len(list((f / 'BLOCKS').iterdir()))
-            slide_list = []
-            for slide in (f / 'BLOCKS').iterdir():
-                if Path(slide).is_dir(): 
-                    if slide.suffix != '.zarr':
-                        slide_list.append(slide)
-
-            with tqdm(total=len(slide_list)) as pbar:
-                for slide in slide_list:
-                    # print('slide: ', slide)
-
-                    # run every slide 5 times for augments
-                    for n in range(5):
-
-
-                        output_path = fe_path / Path(str(slide.stem) + f'_aug{n}.zarr')
-                        if output_path.is_dir():
-                                pbar.update(1)
-                                print(output_path, ' skipped.')
-                                continue
-                        # else:
-                        output_array = []
-                        output_batch_names = []
-                        for tile_path_batch in chunker(list(slide.iterdir()), batch_size):
-                            batch_array = []
-                            batch_names = []
-                            for t in tile_path_batch:
-                                # for n in range(5):
-                                img = np.asarray(Image.open(str(t))).astype(np.uint8) #.astype(np.uint8)
-
-                                img = augment(img)
-
-                                img = val_transforms(img.copy()).to(device)
-                                batch_array.append(img)
-
-                                tile_name = t.stem
-                                batch_names.append(tile_name)
-                            if len(batch_array) == 0:
-                                continue
-                            else:
-                                batch_array = torch.stack(batch_array) 
-                                # with torch.cuda.amp.autocast():
-                                model_output = model_ft(batch_array).detach()
-                                output_array.append(model_output)
-                                output_batch_names += batch_names 
-                        if len(output_array) == 0:
-                            pbar.update(1)
-                            continue
-                        else:
-                            output_array = torch.cat(output_array, dim=0).cpu().numpy()
-                            output_batch_coords = get_coords(output_batch_names)
-                            # print(output_batch_coords)
-                            # z = zarr.group()
-                            # data = z.create_group('data')
-                            # tile_names = z.create_group('tile_names')
-                            # d1 = data.create_dataset('bag', shape=output_array.shape, chunks=True, compressor = compressor, synchronizer=zarr.ThreadSynchronizer(), dtype='i4')
-                            # d2 = tile_names.create_dataset(output_batch_coords, shape=[len(output_batch_coords), 2], chunks=True, compressor = compressor, synchronizer=zarr.ThreadSynchronizer(), dtype='i4')
-
-                            # z['data'] = zarr.array(output_array, chunks=True, compressor = compressor, synchronizer=zarr.ThreadSynchronizer(), dtype='float') # 1792 = 224*8
-                            # z['tile_names'] = zarr.array(output_batch_coords, chunks=True, compressor = compressor, synchronizer=zarr.ThreadSynchronizer(), dtype='int32') # 1792 = 224*8
-                            # z.save
-                            # print(z['data'])
-                            # print(z['data'][:])
-                            # print(z['tile_names'][:])
-                            # zarr.save(output_path, z)
-                            zarr.save_group(output_path, data=output_array, coords=output_batch_coords)
-
-                            # z_test = zarr.open(output_path, 'r')
-                            # print(z_test['data'][:])
-                            # # print(z_test.tree())
-                            
-                            # if np.all(output_array== z_test['data'][:]):
-                            #     print('data true')
-                            # if np.all(z['tile_names'][:] == z_test['tile_names'][:]):
-                            #     print('tile_names true')
-                            #     print(output_path ' ')
-                            # print(np.all(z[:] == z_test[:]))
-
-                        # np.save(f'{str(slide)}.npy', slide_np)
-                            pbar.update(1)
-            
-
-                  
\ No newline at end of file
diff --git a/code/datasets/feature_extractor_annotated.ipynb b/code/datasets/feature_extractor_annotated.ipynb
new file mode 100644
index 0000000..2b34715
--- /dev/null
+++ b/code/datasets/feature_extractor_annotated.ipynb
@@ -0,0 +1,223 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "from pathlib import Path\n",
+    "from PIL import Image\n",
+    "from tqdm import tqdm\n",
+    "import zarr\n",
+    "from numcodecs import Blosc\n",
+    "import torch\n",
+    "import torch.nn as nn\n",
+    "import ResNet as ResNet \n",
+    "import torchvision.transforms as transforms\n",
+    "import torch.nn.functional as F\n",
+    "import re\n",
+    "from imgaug import augmenters as iaa\n",
+    "import argparse\n",
+    "import json"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def make_parse():\n",
+    "    parser = argparse.ArgumentParser()\n",
+    "    parser.add_argument('--augment', default=False, action='store_true')\n",
+    "    \n",
+    "    args = parser.parse_args()\n",
+    "    return args\n",
+    "\n",
+    "def chunker(seq, size):\n",
+    "    return (seq[pos:pos + size] for pos in range(0, len(seq), size))\n",
+    "\n",
+    "def get_coords(batch_names): #ToDO: Change function for precise coords\n",
+    "    coords = []\n",
+    "    \n",
+    "    for tile_name in batch_names: \n",
+    "        # print(tile_name)\n",
+    "        pos = re.findall(r'\\((.*?)\\)', tile_name)\n",
+    "        x, y = pos[-1].split('_')\n",
+    "        coords.append((int(x),int(y)))\n",
+    "    return coords\n",
+    "\n",
+    "def iaa_augment(img):\n",
+    "\n",
+    "    sometimes = lambda aug: iaa.Sometimes(0.5, aug, name=\"Random1\")\n",
+    "    sometimes2 = lambda aug: iaa.Sometimes(0.2, aug, name=\"Random2\")\n",
+    "    sometimes3 = lambda aug: iaa.Sometimes(0.9, aug, name=\"Random3\")\n",
+    "    sometimes4 = lambda aug: iaa.Sometimes(0.9, aug, name=\"Random4\")\n",
+    "    sometimes5 = lambda aug: iaa.Sometimes(0.9, aug, name=\"Random5\")\n",
+    "\n",
+    "    transforms = iaa.Sequential([\n",
+    "        iaa.AddToHueAndSaturation(value=(-30, 30), name=\"MyHSV\"), #13\n",
+    "        sometimes2(iaa.GammaContrast(gamma=(0.85, 1.15), name=\"MyGamma\")),\n",
+    "        iaa.Fliplr(0.5, name=\"MyFlipLR\"),\n",
+    "        iaa.Flipud(0.5, name=\"MyFlipUD\"),\n",
+    "        sometimes(iaa.Rot90(k=1, keep_size=True, name=\"MyRot90\")),\n",
+    "        iaa.OneOf([\n",
+    "            sometimes3(iaa.PiecewiseAffine(scale=(0.015, 0.02), cval=0, name=\"MyPiece\")),\n",
+    "            sometimes4(iaa.ElasticTransformation(alpha=(100, 200), sigma=20, cval=0, name=\"MyElastic\")),\n",
+    "            sometimes5(iaa.Affine(scale={\"x\": (0.95, 1.05), \"y\": (0.95, 1.05)}, rotate=(-45, 45), shear=(-4, 4), cval=0, name=\"MyAffine\"))\n",
+    "        ], name=\"MyOneOf\")\n",
+    "    ])\n",
+    "    seq_img_d = transforms.to_deterministic()\n",
+    "    img = seq_img_d.augment_image(img)\n",
+    "\n",
+    "    return img\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "usage: ipykernel_launcher.py [-h] [--augment]\n",
+      "ipykernel_launcher.py: error: unrecognized arguments: --ip=127.0.0.1 --stdin=9040 --control=9038 --hb=9037 --Session.signature_scheme=\"hmac-sha256\" --Session.key=b\"fea321c1-c51d-4123-ac4e-21d7b6f0be68\" --shell=9039 --transport=\"tcp\" --iopub=9041 --f=/home/ylan/.local/share/jupyter/runtime/kernel-v2-8466dCu9m1xIy2SG.json\n"
+     ]
+    },
+    {
+     "ename": "SystemExit",
+     "evalue": "2",
+     "output_type": "error",
+     "traceback": [
+      "An exception has occurred, use %tb to see the full traceback.\n",
+      "\u001b[0;31mSystemExit\u001b[0m\u001b[0;31m:\u001b[0m 2\n"
+     ]
+    }
+   ],
+   "source": [
+    "torch.set_num_threads(8)\n",
+    "torch.manual_seed(2022)\n",
+    "\n",
+    "args = make_parse()\n",
+    "\n",
+    "augment=args.augment\n",
+    "print('Augment Data: ', augment)\n",
+    "\n",
+    "home = Path.cwd().parts[1]\n",
+    "data_root = Path(f'/{home}/ylan/data/DeepGraft/tissue_detection/224_128uM/images')\n",
+    "slide_patient_path = f'/{home}/ylan/DeepGraft/training_tables/slide_patient_dict.json'\n",
+    "cohort_stain_path = f'/{home}/ylan/DeepGraft/training_tables/cohort_stain_dict.json'\n",
+    "with open(slide_patient_path, 'r') as f:\n",
+    "    slide_patient_dict = json.load(f)\n",
+    "with open(cohort_stain_path, 'r') as f:\n",
+    "    cohort_stain_dict = json.load(f)\n",
+    "# output_path = Path(f'/{home}/ylan/wsi_tools/debug/zarr')\n",
+    "# cohorts = ['DEEPGRAFT_RU'] #, \n",
+    "# cohorts = ['Aachen_Biopsy_Slides'] #, \n",
+    "# cohorts = ['Aachen_Biopsy_Slides', 'DEEPGRAFT_RU', 'DEEPGRAFT_RA', 'Leuven'] #, \n",
+    "compressor = Blosc(cname='blosclz', clevel=3)\n",
+    "\n",
+    "val_transforms = transforms.Compose([\n",
+    "        # \n",
+    "        transforms.ToTensor(),\n",
+    "        transforms.Normalize(\n",
+    "            mean=[0.485, 0.456, 0.406],\n",
+    "            std=[0.229, 0.224, 0.225],\n",
+    "        ),\n",
+    "        # RangeNormalization(),\n",
+    "    ])\n",
+    "\n",
+    "\n",
+    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
+    "scaler = torch.cuda.amp.GradScaler()\n",
+    "n_classes = 2\n",
+    "# out_features = 1024\n",
+    "model_ft = ResNet.resnet50(num_classes=128, mlp=False, two_branch=False, normlinear=True)\n",
+    "\n",
+    "model_ft.fc = nn.Identity()\n",
+    "# print(model_ft)\n",
+    "# model_ft.fc = nn.Linear(2048, out_features)\n",
+    "home = Path.cwd().parts[1]\n",
+    "model_ft.load_state_dict(torch.load(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth'), strict=True)\n",
+    "# for param in model_ft.parameters():\n",
+    "#     param.requires_grad = False\n",
+    "# for m in model_ft.modules():\n",
+    "#     if isinstance(m, torch.nn.modules.batchnorm.BatchNorm2d):\n",
+    "#         m.eval()\n",
+    "#         m.weight.requires_grad = False\n",
+    "#         m.bias.requires_grad = False\n",
+    "# model_ft.fc = nn.Linear(2048, out_features)\n",
+    "model_ft.eval()\n",
+    "model_ft.to(device)\n",
+    "\n",
+    "batch_size = 100"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [
+    {
+     "ename": "",
+     "evalue": "",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[1;31mRunning cells with 'Python 3.8.10 64-bit' requires ipykernel package.\n",
+      "\u001b[1;31mRun the following command to install 'ipykernel' into the Python environment. \n",
+      "\u001b[1;31mCommand: '/usr/bin/python3 -m pip install ipykernel -U --user --force-reinstall'"
+     ]
+    }
+   ],
+   "source": [
+    "patient_cohort_dict = {}\n",
+    "for cohort in cohort_stain_dict.keys():\n",
+    "    cohort_patient_list = list(cohort_stain_dict[cohort].keys())\n",
+    "    for patient in cohort_patient_list:\n",
+    "        patient_cohort_dict[patient] = cohort"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "for f in data_root.iterdir():\n",
+    "    slide_name = f.stem.split('_', 1)[0]\n",
+    "    patient = slide_patient_dict[slide_name]"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3.9.13 ('torch')",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.8"
+  },
+  "orig_nbformat": 4,
+  "vscode": {
+   "interpreter": {
+    "hash": "7b7fb95db5714bbf59d6a04f6057e8fa5746fef9d16f5c42f2fdbc713170171a"
+   }
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/code/datasets/feature_extractor_annotated.py b/code/datasets/feature_extractor_annotated.py
new file mode 100644
index 0000000..b6e6a78
--- /dev/null
+++ b/code/datasets/feature_extractor_annotated.py
@@ -0,0 +1,267 @@
+import numpy as np
+from pathlib import Path
+from PIL import Image
+from tqdm import tqdm
+import zarr
+from numcodecs import Blosc
+import torch
+import torch.nn as nn
+import ResNet as ResNet 
+import torchvision.transforms as transforms
+import torch.nn.functional as F
+import re
+from imgaug import augmenters as iaa
+import argparse
+import json
+
+def make_parse():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--augment', default=False, action='store_true')
+    
+    args = parser.parse_args()
+    return args
+
+def chunker(seq, size):
+    return (seq[pos:pos + size] for pos in range(0, len(seq), size))
+
+def get_coords(batch_names): #ToDO: Change function for precise coords
+    coords = []
+    
+    for tile_name in batch_names: 
+        # print(tile_name)
+        pos = re.findall(r'\((.*?)\)', tile_name)
+        x, y = pos[-1].split('-')
+        coords.append((int(x),int(y)))
+    return coords
+
+def iaa_augment(img):
+
+    sometimes = lambda aug: iaa.Sometimes(0.5, aug, name="Random1")
+    sometimes2 = lambda aug: iaa.Sometimes(0.2, aug, name="Random2")
+    sometimes3 = lambda aug: iaa.Sometimes(0.9, aug, name="Random3")
+    sometimes4 = lambda aug: iaa.Sometimes(0.9, aug, name="Random4")
+    sometimes5 = lambda aug: iaa.Sometimes(0.9, aug, name="Random5")
+
+    transforms = iaa.Sequential([
+        iaa.AddToHueAndSaturation(value=(-30, 30), name="MyHSV"), #13
+        sometimes2(iaa.GammaContrast(gamma=(0.85, 1.15), name="MyGamma")),
+        iaa.Fliplr(0.5, name="MyFlipLR"),
+        iaa.Flipud(0.5, name="MyFlipUD"),
+        sometimes(iaa.Rot90(k=1, keep_size=True, name="MyRot90")),
+        iaa.OneOf([
+            sometimes3(iaa.PiecewiseAffine(scale=(0.015, 0.02), cval=0, name="MyPiece")),
+            sometimes4(iaa.ElasticTransformation(alpha=(100, 200), sigma=20, cval=0, name="MyElastic")),
+            sometimes5(iaa.Affine(scale={"x": (0.95, 1.05), "y": (0.95, 1.05)}, rotate=(-45, 45), shear=(-4, 4), cval=0, name="MyAffine"))
+        ], name="MyOneOf")
+    ])
+    seq_img_d = transforms.to_deterministic()
+    img = seq_img_d.augment_image(img)
+
+    return img
+
+
+torch.set_num_threads(8)
+torch.manual_seed(2022)
+
+args = make_parse()
+
+augment=args.augment
+print('Augment Data: ', augment)
+
+home = Path.cwd().parts[1]
+data_root = Path(f'/{home}/ylan/data/DeepGraft/tissue_detection/224_128uM/training/images')
+output_dataset_path = Path(f'/{home}/ylan/data/DeepGraft/224_128uM_annotated/')
+slide_patient_path = f'/{home}/ylan/DeepGraft/training_tables/slide_patient_dict.json'
+cohort_stain_path = f'/{home}/ylan/DeepGraft/training_tables/cohort_stain_dict.json'
+
+with open(slide_patient_path, 'r') as f:
+    slide_patient_dict = json.load(f)
+with open(cohort_stain_path, 'r') as f:
+    cohort_stain_dict = json.load(f)
+# output_path = Path(f'/{home}/ylan/wsi_tools/debug/zarr')
+# cohorts = ['DEEPGRAFT_RU'] #, 
+# cohorts = ['Aachen_Biopsy_Slides'] #, 
+# cohorts = ['Aachen_Biopsy_Slides', 'DEEPGRAFT_RU', 'DEEPGRAFT_RA', 'Leuven'] #, 
+compressor = Blosc(cname='blosclz', clevel=3)
+
+val_transforms = transforms.Compose([
+        # 
+        transforms.ToTensor(),
+        transforms.Normalize(
+            mean=[0.485, 0.456, 0.406],
+            std=[0.229, 0.224, 0.225],
+        ),
+        # RangeNormalization(),
+    ])
+
+
+device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
+scaler = torch.cuda.amp.GradScaler()
+n_classes = 2
+# out_features = 1024
+model_ft = ResNet.resnet50(num_classes=1024, mlp=False, two_branch=False, normlinear=True)
+
+model_ft.fc = nn.Identity()
+# print(model_ft)
+# model_ft.fc = nn.Linear(2048, out_features)
+home = Path.cwd().parts[1]
+model_ft.load_state_dict(torch.load(f'/{home}/ylan/workspace/TransMIL-DeepGraft/code/models/ckpt/retccl_best_ckpt.pth'), strict=True)
+# for param in model_ft.parameters():
+#     param.requires_grad = False
+# for m in model_ft.modules():
+#     if isinstance(m, torch.nn.modules.batchnorm.BatchNorm2d):
+#         m.eval()
+#         m.weight.requires_grad = False
+#         m.bias.requires_grad = False
+# model_ft.fc = nn.Linear(2048, out_features)
+model_ft.eval()
+model_ft.to(device)
+
+batch_size = 100
+
+
+skipped_slides = []
+
+patient_cohort_dict = {}
+for cohort in cohort_stain_dict.keys():
+    cohort_patient_list = list(cohort_stain_dict[cohort].keys())
+    for patient in cohort_patient_list:
+        patient_cohort_dict[patient] = cohort
+
+print('patient_cohort_dict completed.')
+
+cohort_slide_tiles_dict = {}
+for tile in data_root.iterdir():
+    
+    slide_name = tile.stem.rsplit('_', 1)[0]
+    if slide_name in slide_patient_dict.keys():
+        patient = slide_patient_dict[slide_name]
+        cohort = patient_cohort_dict[patient]
+
+        if cohort not in cohort_slide_tiles_dict.keys():
+            cohort_slide_tiles_dict[cohort] = {}
+        if slide_name not in cohort_slide_tiles_dict[cohort].keys():
+            cohort_slide_tiles_dict[cohort][slide_name] = []
+        cohort_slide_tiles_dict[cohort][slide_name].append(tile)
+
+    else: 
+        skipped_slides.append(slide_name)
+
+print('cohort_slide_tiles_dict complete.')
+for cohort in cohort_slide_tiles_dict.keys():
+    print(cohort)
+    print(len(list(cohort_slide_tiles_dict[cohort].keys())))
+
+for c in cohort_slide_tiles_dict.keys():
+
+    fe_path = output_dataset_path / c / 'FEATURES_RETCCL_2048'
+    fe_path.mkdir(parents=True, exist_ok=True)
+
+    slide_list = list(cohort_slide_tiles_dict[c].keys())
+
+    if augment:
+        tqdm_len = len(slide_list)*5
+    else: tqdm_len = len(slide_list)
+
+    with tqdm(total=tqdm_len) as pbar:
+        for slide in slide_list:
+
+            # run every slide 5 times for augments
+            if not augment:
+                output_path = fe_path / Path(slide + '.zarr')
+                if output_path.is_dir():
+                    pbar.update(1)
+                    print(output_path, ' skipped.')
+                    continue
+                    # else:
+                output_array = []
+                output_batch_names = []
+                # for tile_path_batch in chunker(list(slide.iterdir()), batch_size):
+                for tile_path_batch in chunker(cohort_slide_tiles_dict[c][slide], batch_size):
+                    batch_array = []
+                    batch_names = []
+                    for t in tile_path_batch:
+                        # for n in range(5):
+                        img = np.asarray(Image.open(str(t))).astype(np.uint8) #.astype(np.uint8)
+                        img = val_transforms(img.copy()).to(device)
+                        batch_array.append(img)
+
+                        # print(t)
+                        # print(t.stem)
+                        tile_name = t.stem
+                        batch_names.append(tile_name)
+                    if len(batch_array) == 0:
+                        continue
+                    else:
+                        batch_array = torch.stack(batch_array) 
+                        with torch.cuda.amp.autocast():
+                            model_output = model_ft(batch_array).detach()
+                        output_array.append(model_output)
+                        output_batch_names += batch_names 
+                if len(output_array) == 0:
+                    pbar.update(1)
+                    continue
+                else:
+                    output_array = torch.cat(output_array, dim=0).cpu().numpy()
+                    output_batch_coords = get_coords(output_batch_names)
+                    zarr.save_group(output_path, data=output_array, coords=output_batch_coords)
+
+                    # test eval mode!
+                    z_test = zarr.open(output_path, 'r')
+                    # print(z_test.tree())
+                    
+                    if np.all(output_array  != z_test['data'][:]):
+                        # print('data same')
+                        print(slide)
+                    # if np.all(z['tile_names'][:] == z_test['tile_names'][:]):
+                    #     print('tile_names true')
+                    #     print(output_path ' ')
+                    # print(np.all(z[:] == z_test[:]))
+                    pbar.update(1)
+            else:
+                for n in range(4):
+
+                    # if n != 5:
+                    output_path = fe_path / Path(slide + '.zarr')
+                    # output_path = fe_path / Path(slide + f'_aug{n}.zarr')
+                    if output_path.is_dir():
+                        pbar.update(1)
+                        # print(output_path, ' skipped.')
+                        continue
+                    # else:
+                    output_array = []
+                    output_batch_names = []
+                    # for tile_path_batch in chunker(list(slide.iterdir()), batch_size):
+                    for tile_path_batch in chunker(cohort_slide_tiles_dict[c][slide], batch_size):
+                        batch_array = []
+                        batch_names = []
+                        for t in tile_path_batch:
+                            # for n in range(5):
+                            img = np.asarray(Image.open(str(t))).astype(np.uint8) #.astype(np.uint8)
+                            img = iaa_augment(img)
+                            img = val_transforms(img.copy()).to(device)
+                            batch_array.append(img)
+
+                            tile_name = t.stem
+                            batch_names.append(tile_name)
+                        if len(batch_array) == 0:
+                            continue
+                        else:
+                            batch_array = torch.stack(batch_array) 
+                            with torch.cuda.amp.autocast():
+                                model_output = model_ft(batch_array).detach()
+                            output_array.append(model_output)
+                            output_batch_names += batch_names 
+                    if len(output_array) == 0:
+                        pbar.update(1)
+                        continue
+                    else:
+                        output_array = torch.cat(output_array, dim=0).cpu().numpy()
+                        output_batch_coords = get_coords(output_batch_names)
+                        zarr.save_group(output_path, data=output_array, coords=output_batch_coords)
+
+                        pbar.update(1)
+
+
+print('skipped slides:')
+print(skipped_slides)
diff --git a/code/datasets/feature_file_checker.py b/code/datasets/feature_file_checker.py
new file mode 100644
index 0000000..e594e4b
--- /dev/null
+++ b/code/datasets/feature_file_checker.py
@@ -0,0 +1,82 @@
+import numpy as np
+from pathlib import Path
+from PIL import Image
+from tqdm import tqdm
+import zarr
+from numcodecs import Blosc
+import torch
+import torch.nn as nn
+import ResNet as ResNet 
+import torchvision.transforms as transforms
+import torch.nn.functional as F
+import re
+from imgaug import augmenters as iaa
+
+def chunker(seq, size):
+    return (seq[pos:pos + size] for pos in range(0, len(seq), size))
+
+def get_coords(batch_names): #ToDO: Change function for precise coords
+    coords = []
+    
+    for tile_name in batch_names: 
+        # print(tile_name)
+        pos = re.findall(r'\((.*?)\)', tile_name)
+        x, y = pos[-1].split('_')
+        coords.append((int(x),int(y)))
+    return coords
+
+
+
+if __name__ == '__main__':
+
+
+    home = Path.cwd().parts[1]
+    
+    data_root = Path(f'/{home}/ylan/data/DeepGraft/224_128um_v2')
+    # output_path = Path(f'/{home}/ylan/wsi_tools/debug/zarr')
+    # cohorts = ['Leuven'] #, 
+    cohorts = ['DEEPGRAFT_RU'] #, 
+    # cohorts = ['Aachen_Biopsy_Slides'] #, 
+    # cohorts = ['DEEPGRAFT_RU', 'DEEPGRAFT_RA', 'Leuven'] #, 
+    # cohorts = ['Aachen_Biopsy_Slides', 'DEEPGRAFT_RU', 'DEEPGRAFT_RA'] #, 
+    # cohorts = ['Aachen_Biopsy_Slides', 'DEEPGRAFT_RU', 'DEEPGRAFT_RA', 'Leuven'] #, 
+    
+    for f in data_root.iterdir():
+        
+        if f.stem in cohorts:
+            print(f)
+            fe_path = f / 'FEATURES_RETCCL'
+            fe_path.mkdir(parents=True, exist_ok=True)
+            slide_list = []
+            counter = 0
+            for slide in (f / 'BLOCKS').iterdir():
+                if Path(slide).is_dir(): 
+                    if slide.suffix != '.zarr':
+                        slide_list.append(slide)
+
+            print(len(slide_list))
+
+            with tqdm(total=len(slide_list)) as pbar:
+                for slide in slide_list:
+                    output_path = fe_path / Path(str(slide.stem) + '.zarr')
+                    # print('slide: ', slide)
+
+                    # run every slide 5 times for augments
+                    for n in range(6):
+
+                        if n != 5:
+                            output_path = fe_path / Path(str(slide.stem) + f'_aug{n}.zarr')
+                        else: 
+                            output_path = fe_path / Path(str(slide.stem) + '.zarr')
+                        if output_path.is_dir():
+                                # print(output_path, ' skipped.')
+                            pbar.update(1)
+                            continue
+                            
+                        else: 
+                            counter += 1
+                            print(output_path)
+                            pbar.update(1)
+            print(counter)
+
+                  
\ No newline at end of file
diff --git a/code/datasets/monai_loader.py b/code/datasets/monai_loader.py
new file mode 100644
index 0000000..0bbe803
--- /dev/null
+++ b/code/datasets/monai_loader.py
@@ -0,0 +1,179 @@
+import numpy as np
+import collections.abc
+import torch
+import torch.distributed as dist
+import torch.nn as nn
+
+from monai.config import KeysCollection
+from monai.data import Dataset, load_decathlon_datalist, PersistentDataset
+from monai.data.wsi_reader import WSIReader, CuCIMWSIReader
+# from monai.data.image_reader import CuCIMWSIReader
+from monai.networks.nets import milmodel
+from monai.transforms import (
+    Compose,
+    GridPatchd,
+    LoadImaged,
+    LoadImage,
+    MapTransform,
+    RandFlipd,
+    RandGridPatchd,
+    RandRotate90d,
+    ScaleIntensityRanged,
+    SplitDimd,
+    ToTensord,
+)
+from sklearn.metrics import cohen_kappa_score
+from torch.cuda.amp import GradScaler, autocast
+from torch.utils.data.dataloader import default_collate
+from torch.utils.data.distributed import DistributedSampler
+from torch.utils.tensorboard import  SummaryWriter
+import json
+from pathlib import Path
+import time
+
+class LabelEncodeIntegerGraded(MapTransform):
+    """
+    Convert an integer label to encoded array representation of length num_classes,
+    with 1 filled in up to label index, and 0 otherwise. For example for num_classes=5,
+    embedding of 2 -> (1,1,0,0,0)
+    Args:
+        num_classes: the number of classes to convert to encoded format.
+        keys: keys of the corresponding items to be transformed. Defaults to ``'label'``.
+        allow_missing_keys: don't raise exception if key is missing.
+    """
+
+    def __init__(
+        self,
+        num_classes: int,
+        keys: KeysCollection = "label",
+        allow_missing_keys: bool = False,
+    ):
+        super().__init__(keys, allow_missing_keys)
+        self.num_classes = num_classes
+
+    def __call__(self, data):
+
+        d = dict(data)
+        for key in self.keys:
+            label = int(d[key])
+
+            lz = np.zeros(self.num_classes, dtype=np.float32)
+            lz[:label] = 1.0
+            # alternative oneliner lz=(np.arange(self.num_classes)<int(label)).astype(np.float32) #same oneliner
+            d[key] = lz
+
+        return d
+
+def list_data_collate(batch: collections.abc.Sequence):
+    # print(f"{i} = {item['image'].shape=} >> {item['image'].keys=}")
+    for i, item in enumerate(batch):
+        data = item[0]
+        data["image"] = torch.stack([ix["image"] for ix in item], dim=0)
+        # data["patch_location"] = torch.stack([ix["patch_location"] for ix in item], dim=0)
+        batch[i] = data
+    return default_collate(batch)
+
+
+
+
+
+if __name__ == '__main__':
+
+    num_classes = 2
+    batch_size=1
+    tile_size = 224
+    tile_count = 1000
+    home = Path.cwd().parts[1]
+    data_root = f'/{home}/ylan/DeepGraft/'
+    # labels = [0]
+    # data_root = f'/{home}/public/DeepGraft/Aachen_Biopsy_Slides_Extended'
+    data = {"training": [{
+        "image": 'Aachen_KiBiDatabase_KiBiAcRCIQ360_01_018_PAS.svs', 
+        "label": 0
+        }, {
+        "image": 'Aachen_KiBiDatabase_KiBiAcRCIQ360_01_018_PAS.svs', 
+        "label": 0
+        }, {
+        "image": 'Aachen_KiBiDatabase_KiBiAcRCIQ360_01_018_PAS.svs', 
+        "label": 0
+        }],
+        "validation": [{
+        "image": 'Aachen_KiBiDatabase_KiBiAcRCIQ360_01_018_PAS.svs', 
+        "label": 0
+        }]
+    }
+    with open('monai_test.json', 'w') as jf:
+        json.dump(data, jf)
+    json_data_path = f'/{home}/ylan/DeepGraft/training_tables/dg_decathlon_PAS_HE_Jones_norm_rest.json'
+
+    training_list = load_decathlon_datalist(
+        data_list_file_path=json_data_path,
+        data_list_key="training",
+        base_dir=data_root,
+    )
+
+    train_transform = Compose(
+        [
+            LoadImaged(keys=["image"], reader=WSIReader, backend="cucim", dtype=np.uint8, level=0, image_only=True, num_workers=8),
+            LabelEncodeIntegerGraded(keys=["label"], num_classes=num_classes),
+            RandGridPatchd(
+                keys=["image"],
+                patch_size=(tile_size, tile_size),
+                threshold=0.999 * 3 * 255 * tile_size * tile_size,
+                num_patches=None,
+                sort_fn="min",
+                pad_mode=None,
+                constant_values=255,
+            ),
+            SplitDimd(keys=["image"], dim=0, keepdim=False, list_output=True),
+            RandFlipd(keys=["image"], spatial_axis=0, prob=0.5),
+            RandFlipd(keys=["image"], spatial_axis=1, prob=0.5),
+            RandRotate90d(keys=["image"], prob=0.5),
+            ScaleIntensityRanged(keys=["image"], a_min=np.float32(255), a_max=np.float32(0)),
+            ToTensord(keys=["image", "label"]),
+        ]
+    )
+    train_data_list = data['training']
+    # dataset_train = Dataset(data=training_list)
+    dataset_train = Dataset(data=training_list, transform=train_transform)
+    # persistent_dataset = PersistentDataset(data=training_list, transform=train_transform, cache_dir='/home/ylan/workspace/test')
+    
+
+    train_loader = torch.utils.data.DataLoader(
+        dataset_train,
+        batch_size=batch_size,
+        shuffle=True,
+        num_workers=1,
+        pin_memory=True,
+        sampler=None,
+        collate_fn=list_data_collate,
+    )
+
+    print(len(train_loader))
+    start = time.time()
+    count = 0
+
+    # train_transform = LoadImage(reader=WSIReader, backend='openslide', level=3)
+    # filename = '/home/ylan/DeepGraft/DEEPGRAFT_RU/T19-01474_I1_HE 10_959004.ndpi'
+    # X = train_transform(filename)
+    # print(X)
+    # img, meta = reader.read(data='/home/ylan/DeepGraft/DEEPGRAFT_RU/T19-01474_I1_HE 10_959004.ndpi')
+
+    # print(meta)
+
+    for idx, batch_data in enumerate(train_loader):
+        # print(batch_data)
+        if count > 10: 
+            break
+        data, target = batch_data["image"], batch_data["label"]
+        print(target)
+        count += 1
+    end = time.time()
+    print('Time: ', end-start)
+
+    # image_reader = WSIReader(backend='cucim')
+    # for i in training_list:
+    #     # print(i)
+    #     wsi = image_reader.read(i['image'])
+    #     img_data, meta_data = image_reader.get_data(wsi)
+    #     print(meta_data)
\ No newline at end of file
diff --git a/code/datasets/simple_jpg_dataloader.py b/code/datasets/simple_jpg_dataloader.py
index 332920a..c5e349f 100644
--- a/code/datasets/simple_jpg_dataloader.py
+++ b/code/datasets/simple_jpg_dataloader.py
@@ -20,8 +20,8 @@ from imgaug import augmenters as iaa
 from torchsampler import ImbalancedDatasetSampler
 
 
-class FeatureBagLoader(data_utils.Dataset):
-    def __init__(self, file_path, label_path, mode, n_classes, load_data=False, data_cache_size=100, max_bag_size=1000):
+class JPGBagLoader(data_utils.Dataset):
+    def __init__(self, file_path, label_path, mode, n_classes, load_data=False, data_cache_size=100, max_bag_size=1000, cache=False):
         super().__init__()
 
         self.data_info = []
@@ -35,7 +35,7 @@ class FeatureBagLoader(data_utils.Dataset):
         self.label_path = label_path
         self.n_classes = n_classes
         self.max_bag_size = max_bag_size
-        self.min_bag_size = 120
+        self.min_bag_size = 50
         self.empty_slides = []
         self.corrupt_slides = []
         self.cache = True
@@ -222,7 +222,7 @@ class FeatureBagLoader(data_utils.Dataset):
         if self.cache:
             label = self.labels[index]
             wsi = self.features[index]
-            label = Variable(Tensor(label))
+            label = int(label)
             wsi_name = self.wsi_names[index]
             name_batch = self.name_batches[index]
             patient = self.patients[index]
@@ -231,13 +231,14 @@ class FeatureBagLoader(data_utils.Dataset):
         else:
             if self.mode=='train':
                 batch, label, (wsi_name, name_batch, patient) = self.get_data(self.files[index])
-                label = Variable(Tensor(label))
+                # label = Variable(Tensor(label))
+
                 # wsi = Variable(Tensor(wsi_batch))
                 out_batch = []
                 seq_img_d = self.train_transforms.to_deterministic()
                 for img in batch: 
                     img = img.numpy().astype(np.uint8)
-                    img = seq_img_d.augment_image(img)
+                    # img = seq_img_d.augment_image(img)
                     img = self.val_transforms(img.copy())
                     out_batch.append(img)
                 out_batch = torch.stack(out_batch)
@@ -278,7 +279,7 @@ if __name__ == '__main__':
 
     n_classes = 2
 
-    dataset = FeatureBagLoader(data_root, label_path=label_path, mode='train', load_data=False, n_classes=n_classes)
+    dataset = JPGBagLoader(data_root, label_path=label_path, mode='train', load_data=False, n_classes=n_classes)
 
     # print(dataset.get_labels(0))
     a = int(len(dataset)* 0.8)
@@ -311,7 +312,7 @@ if __name__ == '__main__':
         bag, label, (name, batch_names, patient) = item
         # print(bag.shape)
         # print(len(batch_names))
-        
+        print(label)
         bag = bag.squeeze(0).float().to(device)
         label = label.to(device)
         with torch.cuda.amp.autocast():
diff --git a/code/datasets/test_normalization.ipynb b/code/datasets/test_normalization.ipynb
new file mode 100644
index 0000000..c9e0a6b
--- /dev/null
+++ b/code/datasets/test_normalization.ipynb
@@ -0,0 +1,195 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# from simple_jpg_dataloader import JPGBagLoader\n",
+    "import torch\n",
+    "from torch.utils.data import random_split, DataLoader\n",
+    "from pathlib import Path\n",
+    "import numpy as np\n",
+    "import random\n",
+    "from torchvision.transforms import transforms\n",
+    "import matplotlib.pyplot as plt\n",
+    "from PIL import Image\n",
+    "import cv2\n",
+    "import json"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "home = Path.cwd().parts[1]\n",
+    "label_path = f'/{home}/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest.json'\n",
+    "data_root = f'/{home}/ylan/data/DeepGraft/224_128uM_annotated'\n",
+    "n_classes = 2"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "val_transforms = transforms.Compose([\n",
+    "    # \n",
+    "    transforms.ToTensor(),\n",
+    "    transforms.Normalize(\n",
+    "        mean=[0.485, 0.456, 0.406],\n",
+    "        std=[0.229, 0.224, 0.225],\n",
+    "    ),\n",
+    "    # RangeNormalization(),\n",
+    "])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def visualize(cohort):\n",
+    "\n",
+    "    cohort_path = Path(data_root) / cohort / 'BLOCKS'\n",
+    "    print(cohort_path)\n",
+    "    cohort_slides = list(Path(cohort_path).iterdir())\n",
+    "    random_idx = random.sample(range(0, len(cohort_slides)), 10)\n",
+    "    random_slides = [cohort_slides[i] for i in random_idx]\n",
+    "    print(random_slides)\n",
+    "\n",
+    "    fig = plt.figure(figsize=(100,100))\n",
+    "    columns = 10\n",
+    "    rows = 10\n",
+    "\n",
+    "    for i, slide in enumerate(random_slides):\n",
+    "        tile_list = list(slide.iterdir())\n",
+    "        if len(tile_list) < 10:\n",
+    "            # continue\n",
+    "            tile_list = list(cohort_slides[random.randint(0,len(cohort_slides))].iterdir())\n",
+    "        random_idx = random.sample(range(0, len(tile_list)), 10)\n",
+    "        for j, tile_path in enumerate([tile_list[i] for i in random_idx]):\n",
+    "            img = np.asarray(Image.open(tile_path)).astype(np.uint8)\n",
+    "            img = img.astype(np.uint8)\n",
+    "            img = val_transforms(img.copy())\n",
+    "            img = ((img-img.min())/(img.max()-img.min()))*255\n",
+    "            img = img.numpy().astype(np.uint8).transpose(1,2,0)\n",
+    "            img = Image.fromarray(img)\n",
+    "            img = img.convert('RGB')\n",
+    "            # print((i+1)*rows+j)\n",
+    "            fig.add_subplot(rows, columns, (i)*rows+(j+1))\n",
+    "            # fig.add_subplot(rows, columns, (i+1)*rows+(j+1))\n",
+    "            plt.imshow(img)\n",
+    "    plt.show\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def hexencode(rgb):\n",
+    "    r=rgb[0]\n",
+    "    g=rgb[1]\n",
+    "    b=rgb[2]\n",
+    "    return '#%02x%02x%02x' % (r,g,b)\n",
+    "\n",
+    "def normalize(slides):\n",
+    "\n",
+    "    # cohort_path = Path(data_root) / 'debug' / slide\n",
+    "    # # print(cohort_path)\n",
+    "    # cohort_slides = list(Path(cohort_path).iterdir())\n",
+    "    # random_idx = random.sample(range(0, len(cohort_slides)), 5)\n",
+    "    # random_slides = [cohort_slides[i] for i in random/_idx]\n",
+    "    # print(random_slides)\n",
+    "\n",
+    "    fig = plt.figure(figsize=(100,100))\n",
+    "    columns = 10\n",
+    "    rows = 10\n",
+    "\n",
+    "    for i, slide in enumerate(slides):\n",
+    "        slide_path = Path(data_root) / 'debug' / slide\n",
+    "        tile_list = list(slide_path.iterdir())\n",
+    "        if len(tile_list) < 10:\n",
+    "            # continue\n",
+    "            tile_list = list(cohort_slides[random.randint(0,len(cohort_slides))].iterdir())\n",
+    "        random_idx = random.sample(range(0, len(tile_list)), 5)\n",
+    "        for j, tile_path in enumerate([tile_list[i] for i in random_idx]):\n",
+    "            # print(tile_path)\n",
+    "            img = np.asarray(Image.open(tile_path)).astype(np.uint8)\n",
+    "            img = img.astype(np.uint8)\n",
+    "            img = val_transforms(img.copy())\n",
+    "            img = ((img-img.min())/(img.max()-img.min()))*255\n",
+    "            img_np = img.numpy().astype(np.uint8).transpose(1,2,0)\n",
+    "            img = Image.fromarray(img_np)\n",
+    "            img = img.convert('RGB')\n",
+    "            # print((i+1)*rows+j)\n",
+    "            # fig.add_subplot(rows, columns, (i*2)*rows+(j+1))\n",
+    "            # # fig.add_subplot(rows, columns, (i+1)*rows+(j+1))\n",
+    "            # plt.imshow(img)\n",
+    "\n",
+    "            color = ('b','g','r')\n",
+    "            fig.add_subplot(rows, columns, (i*2)*rows+(j+1))\n",
+    "            for i,col in enumerate(color):\n",
+    "                histr = cv2.calcHist([img_np],[i],None,[256],[0,256])\n",
+    "                plt.plot(histr,color = col)\n",
+    "                plt.xlim([0,256])\n",
+    "            plt.show\n",
+    "            # plt.imshow(img)\n",
+    "\n",
+    "        \n",
+    "    plt.show\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "json_path = f'/{home}/ylan/training_tables/dg_split_PAS_HE_Jones_norm_rest.json'\n",
+    "with open(json_path, 'r') as jf:\n",
+    "    split_dict = json.read(jf)\n",
+    "\n",
+    "print(split_dict)\n",
+    "\n",
+    "slides = ['DEEPGRAFT_RA/RA0002_PASD_jkers_PASD_20180829_142406', 'DEEPGRAFT_RU/RU0001_PASD_jke_PASD_20200129_122805_BIG', 'Aachen_Biopsy_Slides/Aachen_KiBiDatabase_KiBiAcALSZ690_01_004_PAS']\n",
+    "\n",
+    "# normalize(slides)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3.8.8 ('torch')",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.9.13"
+  },
+  "orig_nbformat": 4,
+  "vscode": {
+   "interpreter": {
+    "hash": "7b7fb95db5714bbf59d6a04f6057e8fa5746fef9d16f5c42f2fdbc713170171a"
+   }
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/code/datasets/zarr_feature_dataloader.py b/code/datasets/zarr_feature_dataloader.py
index b244b87..9387e13 100644
--- a/code/datasets/zarr_feature_dataloader.py
+++ b/code/datasets/zarr_feature_dataloader.py
@@ -22,11 +22,11 @@ from PIL import Image
 
 
 class ZarrFeatureBagLoader(data.Dataset):
-    def __init__(self, file_path, label_path, mode, n_classes, cache=False, data_cache_size=100, max_bag_size=1000):
+    def __init__(self, file_path, label_path, mode, n_classes, cache=False, data_cache_size=50, max_bag_size=1000):
         super().__init__()
 
         self.data_info = []
-        self.data_cache = {}
+        self.data_cache = []
         self.slideLabelDict = {}
         self.files = []
         self.data_cache_size = data_cache_size
@@ -39,17 +39,23 @@ class ZarrFeatureBagLoader(data.Dataset):
         self.min_bag_size = 120
         self.empty_slides = []
         self.corrupt_slides = []
-        self.cache = True
-        
+        self.cache = cache
+        self.drop_rate=0.1
+        self.cache=True
+        print('mode: ', self.mode)
         # read labels and slide_path from csv
         with open(self.label_path, 'r') as f:
-            temp_slide_label_dict = json.load(f)[mode]
+            temp_slide_label_dict = json.load(f)[self.mode]
             # print(len(temp_slide_label_dict))
             for (x, y) in temp_slide_label_dict:
                 x = Path(x).stem
                 # x_complete_path = Path(self.file_path)/Path(x)
                 for cohort in Path(self.file_path).iterdir():
-                    x_complete_path = Path(self.file_path) / cohort / 'FEATURES_RETCCL' / (str(x) + '.zarr')
+                    if self.mode == 'test':
+                        x_complete_path = Path(self.file_path) / cohort / 'FEATURES_RETCCL_GAN' / (str(x) + '.zarr')
+                    else:
+                        x_complete_path = Path(self.file_path) / cohort / 'FEATURES_RETCCL' / (str(x) + '.zarr')
+                    print(x_complete_path)
                     if x_complete_path.is_dir():
                         # if len(list(x_complete_path.iterdir())) > self.min_bag_size:
                         # # print(x_complete_path)
@@ -66,26 +72,65 @@ class ZarrFeatureBagLoader(data.Dataset):
         self.wsi_names = []
         self.name_batches = []
         self.patients = []
-        if self.cache:
-            for t in tqdm(self.files):
-                # zarr_t = str(t) + '.zarr'
-                batch, label, (wsi_name, name_batch, patient) = self.get_data(t)
-
-                self.labels.append(label)
-                self.feature_bags.append(batch)
-                self.wsi_names.append(wsi_name)
-                self.name_batches.append(name_batch)
-                self.patients.append(patient)
-
-    def get_data(self, file_path, drop_rate=0.1):
-        
-        batch_names=[] #add function for name_batch read out
+        for t in tqdm(self.files):
+            self._add_data_infos(t, cache=cache)
+
+
+        print('data_cache_size: ', self.data_cache_size)
+        print('data_info: ', len(self.data_info))
+        # if self.cache:
+        #     print('Loading data into cache.')
+        #     for t in tqdm(self.files):
+        #         # zarr_t = str(t) + '.zarr'
+        #         batch, label, (wsi_name, name_batch, patient) = self.get_data(t)
+
+        #         self.labels.append(label)
+        #         self.feature_bags.append(batch)
+        #         self.wsi_names.append(wsi_name)
+        #         self.name_batches.append(name_batch)
+        #         self.patients.append(patient)
+        # else: 
+            
 
+    def _add_data_infos(self, file_path, cache):
+
+        # if cache:
         wsi_name = Path(file_path).stem
-        if wsi_name in self.slideLabelDict:
-            label = self.slideLabelDict[wsi_name]
-            
-            patient = self.slide_patient_dict[wsi_name]
+        # if wsi_name in self.slideLabelDict:
+        label = self.slideLabelDict[wsi_name]
+        patient = self.slide_patient_dict[wsi_name]
+        idx = -1
+        self.data_info.append({'data_path': file_path, 'label': label, 'name': wsi_name, 'patient': patient, 'cache_idx': idx})
+
+    def get_data(self, i):
+
+        fp = self.data_info[i]['data_path']
+        idx = self.data_info[i]['cache_idx']
+        if idx == -1:
+
+        # if fp not in self.data_cache:
+            self._load_data(fp)
+        
+        
+        cache_idx = self.data_info[i]['cache_idx']
+        label = self.data_info[i]['label']
+        name = self.data_info[i]['name']
+        patient = self.data_info[i]['patient']
+
+        return self.data_cache[cache_idx], label, name, patient
+        # return self.data_cache[fp][cache_idx], label, name, patient
+        
+
+
+    def _load_data(self, file_path):
+        
+
+        # batch_names=[] #add function for name_batch read out
+        # wsi_name = Path(file_path).stem
+        # if wsi_name in self.slideLabelDict:
+        #     label = self.slideLabelDict[wsi_name]
+        #     patient = self.slide_patient_dict[wsi_name]
+
         z = zarr.open(file_path, 'r')
         np_bag = np.array(z['data'][:])
         # np_bag = np.array(zarr.open(file_path, 'r')).astype(np.uint8)
@@ -96,15 +141,45 @@ class ZarrFeatureBagLoader(data.Dataset):
         bag_size = wsi_bag.shape[0]
         
         # random drop 
-        bag_idxs = torch.randperm(bag_size)[:int(bag_size*(1-drop_rate))]
+        bag_idxs = torch.randperm(bag_size)[:int(bag_size*(1-self.drop_rate))]
         wsi_bag = wsi_bag[bag_idxs, :]
         batch_coords = batch_coords[bag_idxs]
+
+        idx = self._add_to_cache((wsi_bag, batch_coords), file_path)
+        file_idx = next(i for i, v in enumerate(self.data_info) if v['data_path'] == file_path)
+        # print('file_idx: ', file_idx)
+        # print('idx: ', idx)
+        self.data_info[file_idx]['cache_idx'] = idx
         # print(wsi_bag.shape)
         # name_samples = [batch_names[i] for i in bag_idxs]
-        return wsi_bag, label, (wsi_name, batch_coords, patient)
+        # return wsi_bag, label, (wsi_name, batch_coords, patient)
+        
+        if len(self.data_cache) > self.data_cache_size:
+            # removal_keys = list(self.data_cache)
+            # removal_keys.remove(file_path)
+
+            self.data_cache.pop(idx)
+
+            self.data_info = [{'data_path': di['data_path'], 'label': di['label'], 'name': di['name'], 'patient':di['patient'], 'cache_idx':-1} if di['cache_idx'] == idx else di for di in self.data_info]
+        
+
+
+    def _add_to_cache(self, data, data_path):
+
+
+        # if data_path not in self.data_cache:
+        #     self.data_cache[data_path] = [data]
+        # else:
+        #     self.data_cache[data_path].append(data)
+        self.data_cache.append(data)
+        # print(len(self.data_cache))
+        # return len(self.data_cache)
+        return len(self.data_cache) - 1
+
     
     def get_labels(self, indices):
-        return [self.labels[i] for i in indices]
+        # return [self.labels[i] for i in indices]
+        return [self.data_info[i]['label'] for i in indices]
 
 
     def to_fixed_size_bag(self, bag, names, bag_size: int = 512):
@@ -136,36 +211,17 @@ class ZarrFeatureBagLoader(data.Dataset):
         return bag_samples, name_samples
 
     def __len__(self):
-        return len(self.files)
+        # return len(self.files)
+        return len(self.data_info)
 
     def __getitem__(self, index):
 
-        if self.cache:
-            label = self.labels[index]
-            wsi = self.feature_bags[index]
-            # label = Variable(Tensor(label))
-            label = torch.as_tensor(label)
-            label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
-            wsi_name = self.wsi_names[index]
-            name_batch = self.name_batches[index]
-            patient = self.patients[index]
-
-            #random dropout
-            #shuffle
-
-            # feats = Variable(Tensor(feats))
-            return wsi, label, (wsi_name, name_batch, patient)
-        else:
-            t = self.files[index]
-            batch, label, (wsi_name, name_batch, patient) = self.get_data(t)
-
-                # self.labels.append(label)
-                # self.feature_bags.append(batch)
-                # self.wsi_names.append(wsi_name)
-                # self.name_batches.append(name_batch)
-                # self.patients.append(patient)
-
-            return batch, label, (wsi_name, name_batch, patient)
+        (wsi, batch_coords), label, wsi_name, patient = self.get_data(index)
+
+        label = torch.as_tensor(label)
+        label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
+
+        return wsi, label, (wsi_name, batch_coords, patient)
 
 if __name__ == '__main__':
     
@@ -182,14 +238,14 @@ if __name__ == '__main__':
     data_root = f'/{home}/ylan/data/DeepGraft/224_128um_v2'
     # data_root = f'/{home}/ylan/DeepGraft/dataset/hdf5/256_256um_split/'
     # label_path = f'/{home}/ylan/DeepGraft_project/code/split_PAS_bin.json'
-    label_path = f'/{home}/ylan/DeepGraft/training_tables/split_debug.json'
-    # label_path = f'/{home}/ylan/DeepGraft/training_tables/dg_limit_20_split_PAS_HE_Jones_norm_rest.json'
+    # label_path = f'/{home}/ylan/DeepGraft/training_tables/split_debug.json'
+    label_path = f'/{home}/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest.json'
     output_dir = f'/{data_root}/debug/augments'
     os.makedirs(output_dir, exist_ok=True)
 
     n_classes = 2
 
-    dataset = ZarrFeatureBagLoader(data_root, label_path=label_path, mode='train', cache=True, n_classes=n_classes)
+    dataset = ZarrFeatureBagLoader(data_root, label_path=label_path, mode='train', cache=False, data_cache_size=3000, n_classes=n_classes)
 
     # print(dataset.get_labels(0))
     a = int(len(dataset)* 0.8)
@@ -200,7 +256,7 @@ if __name__ == '__main__':
     # b = int(len(dataset) - a)
     # train_ds, val_ds = torch.utils.data.random_split(dataset, [a, b])
     # dl = FastTensorDataLoader(dataset, batch_size=1, shuffle=False)
-    dl = DataLoader(train_data, batch_size=1, num_workers=8, sampler=ImbalancedDatasetSampler(train_data), pin_memory=True)
+    dl = DataLoader(train_data, batch_size=1, num_workers=8)#, pin_memory=True , sampler=ImbalancedDatasetSampler(train_data)
     # print(len(dl))
     # dl = DataLoader(dataset, batch_size=1, sampler=ImbalancedDatasetSampler(dataset), num_workers=5)
     device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
@@ -210,29 +266,35 @@ if __name__ == '__main__':
     # for param in model_ft.parameters():
     #     param.requires_grad = False
     # model_ft.to(device)
-    model = TransMIL(n_classes=n_classes).to(device)
+    # model = TransMIL(n_classes=n_classes).to(device)
     
     c = 0
     label_count = [0] *n_classes
     epochs = 1
-    # print(len(dl))
+    print(len(dl))
     # start = time.time()
+
+    count = 0
     for i in range(epochs):
         start = time.time()
         for item in tqdm(dl): 
-
             # if c >= 10:
             #     break
             bag, label, (name, batch_names, patient) = item
             # print(bag.shape)
             # print(len(batch_names))
-            print(label)
-            print(batch_names)
+            # print(label)
+            # print(batch_names)
             bag = bag.float().to(device)
+            # print(bag)
+            # print(name)
+            # bag = bag.float().to(device)
             # print(bag.shape)
             # label = label.to(device)
-            with torch.cuda.amp.autocast():
-                output = model(bag)
-            # c += 1
+            # with torch.cuda.amp.autocast():
+            #     output = model(bag)
+            count += 1
+            
         end = time.time()
-        print('Bag Time: ', end-start)
\ No newline at end of file
+        print('Bag Time: ', end-start)
+        print(count)
\ No newline at end of file
diff --git a/code/datasets/zarr_feature_dataloader_simple.py b/code/datasets/zarr_feature_dataloader_simple.py
new file mode 100644
index 0000000..c9313c9
--- /dev/null
+++ b/code/datasets/zarr_feature_dataloader_simple.py
@@ -0,0 +1,255 @@
+import pandas as pd
+
+import numpy as np
+import torch
+from torch import Tensor
+from torch.autograd import Variable
+from torch.nn.functional import one_hot
+from torch.utils import data
+from torch.utils.data import random_split, DataLoader
+from torchsampler import ImbalancedDatasetSampler
+from torchvision import datasets, transforms
+import pandas as pd
+from sklearn.utils import shuffle
+from pathlib import Path
+from tqdm import tqdm
+import zarr
+import json
+import cv2
+from PIL import Image
+# from models import TransMIL
+
+
+
+class ZarrFeatureBagLoader(data.Dataset):
+    def __init__(self, file_path, label_path, mode, n_classes, cache=False, data_cache_size=5000, max_bag_size=1000):
+        super().__init__()
+
+        self.data_info = []
+        self.data_cache = {}
+        self.slideLabelDict = {}
+        self.files = []
+        self.data_cache_size = data_cache_size
+        self.mode = mode
+        self.file_path = file_path
+        # self.csv_path = csv_path
+        self.label_path = label_path
+        self.n_classes = n_classes
+        self.max_bag_size = max_bag_size
+        self.drop_rate = 0.1
+        # self.min_bag_size = 120
+        self.empty_slides = []
+        self.corrupt_slides = []
+        self.cache = cache
+        
+        # read labels and slide_path from csv
+        with open(self.label_path, 'r') as f:
+            temp_slide_label_dict = json.load(f)[mode]
+            # print(len(temp_slide_label_dict))
+            for (x, y) in temp_slide_label_dict:
+                x = Path(x).stem
+                # x_complete_path = Path(self.file_path)/Path(x)
+                for cohort in Path(self.file_path).iterdir():
+                    # x_complete_path = Path(self.file_path) / cohort / 'FEATURES_RETCCL' / (str(x) + '.zarr')
+                    if self.mode == 'test': #set to test if using GAN output
+                        x_path_list = [Path(self.file_path) / cohort / 'FEATURES_RETCCL_2048' / (str(x) + '.zarr')]
+                    else:
+                        x_path_list = [Path(self.file_path) / cohort / 'FEATURES_RETCCL_2048' / (str(x) + '.zarr')]
+                        for i in range(5):
+                            x_path_list.append(Path(self.file_path) / cohort / 'FEATURES_RETCCL' / (str(x) + f'_aug{i}.zarr'))
+                    # print(x_complete_path)
+                    for x_path in x_path_list:
+                        if x_path.is_dir():
+                            # if len(list(x_complete_path.iterdir())) > self.min_bag_size:
+                            # # print(x_complete_path)
+                            self.slideLabelDict[x] = y
+                            self.files.append(x_path)
+        
+        # print(self.files)
+        home = Path.cwd().parts[1]
+        self.slide_patient_dict_path = f'/{home}/ylan/DeepGraft/training_tables/slide_patient_dict.json'
+        with open(self.slide_patient_dict_path, 'r') as f:
+            self.slide_patient_dict = json.load(f)
+
+        self.feature_bags = []
+        self.labels = []
+        self.wsi_names = []
+        self.name_batches = []
+        self.patients = []
+        if self.cache:
+            for t in tqdm(self.files):
+                # zarr_t = str(t) + '.zarr'
+                batch, label, (wsi_name, name_batch, patient) = self.get_data(t)
+
+                self.labels.append(label)
+                self.feature_bags.append(batch)
+                self.wsi_names.append(wsi_name)
+                self.name_batches.append(name_batch)
+                self.patients.append(patient)
+
+    def get_data(self, file_path):
+        
+        batch_names=[] #add function for name_batch read out
+
+        wsi_name = Path(file_path).stem
+        if wsi_name.split('_')[-1][:3] == 'aug':
+            wsi_name = '_'.join(wsi_name.split('_')[:-1])
+        # if wsi_name in self.slideLabelDict:
+        label = self.slideLabelDict[wsi_name]
+        label = torch.as_tensor(label)
+        label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
+        patient = self.slide_patient_dict[wsi_name]
+        z = zarr.open(file_path, 'r')
+        np_bag = np.array(z['data'][:])
+        # np_bag = np.array(zarr.open(file_path, 'r')).astype(np.uint8)
+        wsi_bag = torch.from_numpy(np_bag)
+        batch_coords = torch.from_numpy(np.array(z['coords'][:]))
+
+        # print(wsi_bag.shape)
+        bag_size = wsi_bag.shape[0]
+        
+        # random drop 
+        
+        bag_idxs = torch.randperm(bag_size)[:int(self.max_bag_size*(1-self.drop_rate))]
+        wsi_bag = wsi_bag[bag_idxs, :]
+        batch_coords = batch_coords[bag_idxs]
+        # print(wsi_bag.shape)
+        # name_samples = [batch_names[i] for i in bag_idxs]
+        return wsi_bag, label, (wsi_name, batch_coords, patient)
+    
+    def get_labels(self, indices):
+        return [self.labels[i] for i in indices]
+
+
+    def to_fixed_size_bag(self, bag, names, bag_size: int = 512):
+
+        #duplicate bag instances unitl 
+
+        bag_idxs = torch.randperm(bag.shape[0])[:bag_size]
+        bag_samples = bag[bag_idxs]
+        name_samples = [names[i] for i in bag_idxs]
+        # bag_sample_names = [bag_names[i] for i in bag_idxs]
+        # q, r  = divmod(bag_size, bag_samples.shape[0])
+        # if q > 0:
+        #     bag_samples = torch.cat([bag_samples]*q, 0)
+
+        # self_padded = torch.cat([bag_samples, bag_samples[:r,:, :, :]])
+
+        # zero-pad if we don't have enough samples
+        # zero_padded = torch.cat((bag_samples,
+        #                         torch.zeros(bag_size-bag_samples.shape[0], bag_samples.shape[1], bag_samples.shape[2], bag_samples.shape[3])))
+
+        return bag_samples, name_samples, min(bag_size, len(bag))
+
+    def data_dropout(self, bag, batch_names, drop_rate):
+        # bag_size = self.max_bag_size
+        # bag_size = bag.shape[0]
+        bag_idxs = torch.randperm(self.max_bag_size)[:int(bag_size*(1-drop_rate))]
+        bag_samples = bag[bag_idxs]
+        name_samples = [batch_names[i] for i in bag_idxs]
+
+        return bag_samples, name_samples
+
+    def __len__(self):
+        return len(self.files)
+
+    def __getitem__(self, index):
+
+        if self.cache:
+            label = self.labels[index]
+            wsi = self.feature_bags[index]
+            # label = Variable(Tensor(label))
+            # label = torch.as_tensor(label)
+            # label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
+            wsi_name = self.wsi_names[index]
+            name_batch = self.name_batches[index]
+            patient = self.patients[index]
+
+            #random dropout
+            #shuffle
+
+            # feats = Variable(Tensor(feats))
+            return wsi, label, (wsi_name, name_batch, patient)
+        else:
+            t = self.files[index]
+            batch, label, (wsi_name, name_batch, patient) = self.get_data(t)
+            # label = torch.as_tensor(label)
+            # label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
+                # self.labels.append(label)
+                # self.feature_bags.append(batch)
+                # self.wsi_names.append(wsi_name)
+                # self.name_batches.append(name_batch)
+                # self.patients.append(patient)
+
+            return batch, label, (wsi_name, name_batch, patient)
+
+if __name__ == '__main__':
+    
+    from pathlib import Path
+    import os
+    import time
+    # from fast_tensor_dl import FastTensorDataLoader
+    # from custom_resnet50 import resnet50_baseline
+    
+    
+
+    home = Path.cwd().parts[1]
+    train_csv = f'/{home}/ylan/DeepGraft_project/code/debug_train.csv'
+    data_root = f'/{home}/ylan/data/DeepGraft/224_128um_v2'
+    # data_root = f'/{home}/ylan/DeepGraft/dataset/hdf5/256_256um_split/'
+    # label_path = f'/{home}/ylan/DeepGraft_project/code/split_PAS_bin.json'
+    # label_path = f'/{home}/ylan/DeepGraft/training_tables/split_debug.json'
+    label_path = f'/{home}/ylan/DeepGraft/training_tables/dg_limit_20_split_PAS_HE_Jones_norm_rest.json'
+    output_dir = f'/{data_root}/debug/augments'
+    os.makedirs(output_dir, exist_ok=True)
+
+    n_classes = 2
+
+    dataset = ZarrFeatureBagLoader(data_root, label_path=label_path, mode='train', cache=True, n_classes=n_classes)
+
+    # print(dataset.get_labels(0))
+    a = int(len(dataset)* 0.8)
+    b = int(len(dataset) - a)
+    train_data, valid_data = random_split(dataset, [a, b])
+    # print(dataset.dataset)
+    # a = int(len(dataset)* 0.8)
+    # b = int(len(dataset) - a)
+    # train_ds, val_ds = torch.utils.data.random_split(dataset, [a, b])
+    # dl = FastTensorDataLoader(dataset, batch_size=1, shuffle=False)
+    dl = DataLoader(train_data, batch_size=1, num_workers=8, pin_memory=True)
+    # print(len(dl))
+    # dl = DataLoader(dataset, batch_size=1, sampler=ImbalancedDatasetSampler(dataset), num_workers=5)
+    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
+    scaler = torch.cuda.amp.GradScaler()
+
+    # model_ft = resnet50_baseline(pretrained=True)
+    # for param in model_ft.parameters():
+    #     param.requires_grad = False
+    # model_ft.to(device)
+    # model = TransMIL(n_classes=n_classes).to(device)
+    
+    c = 0
+    label_count = [0] *n_classes
+    epochs = 1
+    # print(len(dl))
+    # start = time.time()
+    for i in range(epochs):
+        start = time.time()
+        for item in tqdm(dl): 
+
+            # if c >= 10:
+            #     break
+            bag, label, (name, batch_coords, patient) = item
+            # print(bag.shape)
+            # print(len(batch_names))
+            # print(label)
+            # print(batch_coords)
+            print(name)
+            bag = bag.float().to(device)
+            # print(bag.shape)
+            # label = label.to(device)
+            # with torch.cuda.amp.autocast():
+            #     output = model(bag)
+            # c += 1
+        end = time.time()
+        print('Bag Time: ', end-start)
\ No newline at end of file
diff --git a/code/models/AttMIL.py b/code/models/AttMIL.py
index 048fb0f..89ff5d5 100644
--- a/code/models/AttMIL.py
+++ b/code/models/AttMIL.py
@@ -13,7 +13,7 @@ from pl_bolts.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR
 
 
 class AttMIL(nn.Module): #gated attention
-    def __init__(self, n_classes, features=512):
+    def __init__(self, n_classes, features=1024):
         super(AttMIL, self).__init__()
         self.L = features
         self.D = 128
diff --git a/code/models/TransMIL.py b/code/models/TransMIL.py
index ddc1262..01a0fa7 100755
--- a/code/models/TransMIL.py
+++ b/code/models/TransMIL.py
@@ -32,11 +32,12 @@ class TransLayer(nn.Module):
         )
 
     def forward(self, x):
-        out, attn = self.attn(self.norm(x), return_attn=True)
+        out= self.attn(self.norm(x))
+        # out, attn = self.attn(self.norm(x))
         x = x + out
         # x = x + self.attn(self.norm(x))
 
-        return x, attn
+        return x
 
 
 class PPEG(nn.Module):
@@ -59,16 +60,19 @@ class PPEG(nn.Module):
 class TransMIL(nn.Module):
     def __init__(self, n_classes):
         super(TransMIL, self).__init__()
-        in_features = 1024
+        in_features = 2048
+        inter_features = 1024
         out_features = 512
-        self.pos_layer = PPEG(dim=out_features)
-        self._fc1 = nn.Sequential(nn.Linear(in_features, out_features), nn.GELU())
-        # self._fc1 = nn.Sequential(nn.Linear(1024, 512), nn.ReLU())
         if apex_available: 
             norm_layer = apex.normalization.FusedLayerNorm
-            
         else:
             norm_layer = nn.LayerNorm
+
+        self.pos_layer = PPEG(dim=out_features)
+        self._fc1 = nn.Sequential(nn.Linear(in_features, inter_features), nn.GELU(), nn.Dropout(p=0.5), norm_layer(inter_features)) 
+        self._fc1_2 = nn.Sequential(nn.Linear(inter_features, out_features), nn.GELU())
+        # self._fc1 = nn.Sequential(nn.Linear(1024, 512), nn.ReLU())
+        
         self.cls_token = nn.Parameter(torch.randn(1, 1, out_features))
         self.n_classes = n_classes
         self.layer1 = TransLayer(norm_layer=norm_layer, dim=out_features)
@@ -90,8 +94,10 @@ class TransMIL(nn.Module):
     def forward(self, x): #, **kwargs
 
         # x = self.model_ft(x).unsqueeze(0)
-        h = x.float() #[B, n, 1024]
+        h = x.squeeze(0).float() #[B, n, 1024]
         h = self._fc1(h) #[B, n, 512]
+        # h = self.drop(h)
+        h = self._fc1_2(h) #[B, n, 512]
         
         # print('Feature Representation: ', h.shape)
         #---->duplicate pad
@@ -110,7 +116,8 @@ class TransMIL(nn.Module):
 
 
         #---->Translayer x1
-        h, attn1 = self.layer1(h) #[B, N, 512]
+        h = self.layer1(h) #[B, N, 512]
+        # h, attn1 = self.layer1(h) #[B, N, 512]
 
         # print('After first TransLayer: ', h.shape)
 
@@ -119,7 +126,8 @@ class TransMIL(nn.Module):
         # print('After PPEG: ', h.shape)
         
         #---->Translayer x2
-        h, attn2 = self.layer2(h) #[B, N, 512]
+        h = self.layer2(h) #[B, N, 512]
+        # h, attn2 = self.layer2(h) #[B, N, 512]
 
         # print('After second TransLayer: ', h.shape) #[1, 1025, 512] 1025 = cls_token + 1024
         #---->cls_token
@@ -128,7 +136,11 @@ class TransMIL(nn.Module):
 
         #---->predict
         logits = self._fc2(h) #[B, n_classes]
-        return logits, attn2
+        # Y_hat = torch.argmax(logits, dim=1)
+        # Y_prob = F.softmax(logits, dim = 1)
+        # results_dict = {'logits': logits, 'Y_prob': Y_prob, 'Y_hat': Y_hat}
+        return logits
+        # return logits, attn2
 
 if __name__ == "__main__":
     data = torch.randn((1, 6000, 1024)).cuda()
diff --git a/code/models/__pycache__/AttMIL.cpython-39.pyc b/code/models/__pycache__/AttMIL.cpython-39.pyc
index 550f6876773393094b93f1d14f352eb024d7917a..5d379a46200a876d652766c8d970eef8af36a4a0 100644
GIT binary patch
delta 29
lcmbQiGlPdWk(ZZ?fq{YH^@Hf-*Bg2585vnN`!b$p0RVFS2qFLg

delta 29
lcmbQiGlPdWk(ZZ?fq{Wx`5MvWR~vcl85x;2`!b$p0RU&k2VnpJ

diff --git a/code/models/__pycache__/TransMIL.cpython-39.pyc b/code/models/__pycache__/TransMIL.cpython-39.pyc
index 547c0a2e1594a0ef49a2c0482c066af1ca368fc9..21d7707719795a1335b1187bffed7fd0328ba3fb 100644
GIT binary patch
delta 1450
zcmbO$`&gDQk(ZZ?fq{XcP^>jMP<$ibe<n72{fzwF)X5Ue4c^QQ3=FIc3=GZ;3=G9G
z3=9l43=2S*v4(LW<3h$-rW(c?CUJ&Z#u~<822DmkO{OA21_p*A0R{#JP39sY1_p*(
zj1{+-^Gi!6v#=;L2{298WC>)H+FZ?I!pN-%Qpyh^<R<T8wP)0s{EPJkqw?g(Y-ZdV
zAYn}qp*2~BU7Jy2asazCs}utRgVE#}?0QntASsZGiey17uqhzfB8ABh*d>HPMr(r{
z&BDOIz`@AJR-`zYn<K8Cje&uIoq>S?<gwx_3=9k<3?+;;49!dnm=-cHGNdrpGNmxp
zGN&-tvXn3vX_T;Jv1YN=vgWasuxD}9vX*exu%xi`GS)D}bCs|!;4Wcbz*ECi!@Q8G
zgna?;LWUCdEWQ-h6t-T*8rB--X2yjqH4O3maFxvUH7qct3z=#d;sr|B7YLRJEf7v&
zPhqcNS;)K)WVA>Ph!iamt6{BSN#W=PsT4=yNq}UUL8>IdPGG5F$PxvcD3!tx%%I8X
zm&w4v!0?iRg@K_66oEycz({A%14T8%1A9%zTZ{!o%Ai<cDiQ_-ASZ||UBm}sTZ0Ht
zh)(Y0RMm>&b1N-QP4P*rOfB-uFUl>F14+w+2v8{B;w;E7j?V$fPR`_%)@OGq$}h+-
zExE-KpO$QRiw#7^8{OgzNGwXsO)W_+Dl!D63f7{;yp+5m%gJ{*_2t>XDW%8@B*C4T
z7oV1zSW;S)S{%ifnOBlpG+BzvK;9M<n+hQJGx9OAKp__s8xs$s7NZpN<Ww%LdQeIR
z#V$CxGB7eQ)G#h!NMT&axPWmXLoHJc!vdxnCUCN4SilTrF=er&FiA4hGMBKHuq|LO
zVOzja!wgE?&5W2LOboRwwX8KPHLM~GHOvwWwQMDvSzI+tHB2cCDGc?kOeqXCY#=g)
zp_M6(S&|{0DTOJ8xtFPysRm>bqa;HudksSt_X3_8_7s*BmW9lqWK+ulHin~yDGi~X
zVF4f1i3|()YnVWmE@TFKLI5r<2x5V(Ze>bgOsfa^1LR8~kZ>($4QDWeCaYf&BPgyw
zkqu4-FaQ7l|6f!17JG4FX=-X!>Mho^oczR+A~%pKcMzciiU;P*ypmf?c?Gvvl2bEt
zZm|>>7L|abuE+?a#uB84Ew!Q`F)!s7OLA#SVv%<}NWu|B_<#sdAyDKCN>^Nv_>}<Z
zVazBp0ntqH9=Dj{!*6jVrliE@q~@iUWE7c!B)F1uisMW2vs3emZ?Wa%r)QQF7m0z)
z0~6vP7AT{~!P5gXA0rnN7Yi4omKGxqvjz(nqX?rA3l|$73m2OhGZ#yhKuA$yUa_yI
zk6v1SQF&rfN|6<)kca}Ma~qH`kV1k-u?Qq`iz7ZhH!(9WK3<a<oJC$1h%+!0ffN>j
z5|<_;IH49LfLsC!RZVqpxZL6>$uCOI(96p!lAEl^tIrrRIgr<p%^sx8Zt@IXHBnG@
z0b5Z7F1<WKG7*z6^F}f1OxEKIlavq=6bj{H<YVMx<YDAtlwlNL<X{wF6k-DaJ9<N>

delta 1375
zcmaDXJ6D!3k(ZZ?fq{XcEI2UPQgkEVe<p4h{fzwF)Zmi*BE!kT%ndm#3=9k)?99Nx
zP^`hgz)-`G#ZberfU$;gA!7<-3R5pr3{x#rEprWH4U;%SEn^L14Rab(FoPzuUx+5-
zE$*V!lG38Q_{5TuJWZw|K?VkfA^`>l22GYCAqED9TZ|RAnDa|Zz!H=1F)K4kFivJ>
z31pPs9Li$C$gKoY&krKxCr@FuXVjj2h4lra%H+LlX55+}VJ#4$J^3G-HlyTZEp}&C
zX$A%c!^uVLdag1cDOnI92O_|xfMkmlL0mC7Ap&BFg9wmqAOnj*KIUNLV=Gdc{FFV8
zkClOeft`VYff*DC%O(eMNY<Ay*D$9r_cGQn#IuyJE?_HRUBF($RKvWGsf2X_$3ljM
zOf?MgoF!aEUM1XFJXyT8EO~4ttXX`uAl3XO0t*CFSW;MOm=`iH1eqaJ10sb>L~2-S
zm{VALLApgzcw!)#W{@iJ64nJ#>^00a3|YcpYa~(_f*CZ~{9b}$stDxRA{~%-R6ya(
zR3r>ybAs5CMSLK(4Tt~-2gt*>I1BQN<8u-#Q;Uk^85kHQ@8gtaWr<HqHk_Q#sj7F2
z&#kmLHN^)c@0VYcTciMrHqL;=qQu<PlGLIiBM_gpC^0W3ugGdL2bVrSD>!P4JV6rN
znR%1LxD2F0b{2zTgn^Ndk%f_kk%v)=iG`7inT>JsQZB7}9%vAQ)7c+TI%8bGu#lma
z37poL7Bbc{)i7l-r7%h|)H0Vam#{2gEn!)}R>QoIaUo+fBc=!wLoG`!YYj^cs|Z64
zvjjseTM2s>M-5XAQwl>0Ln~7XLk$~<1hLbY>m?b|nIOp-Y7w&}LoIs^Ll)-(t{Qew
zTrFf?z`c;6mIG`IM-5XNLOsI*o*JfwjNpvGS;Me^w}uI1@j_;>^ZDR%Tr~^}_(43V
z6PVMOBq3g`7XS&@a@TOzaDj6Vt6vc)`XCX>$iTqx^8f$;|1||~v8LtZCzcdBf#OgX
z6vE7zc_p`)@(OOTB&TNP++ry%EGhv<N|7;0jTJ}@TWUo?VqVHEmgLfu#3C<{xE+Y_
z1`(j_=u_kaN+3Ltn3DwQVazBp1<_3L9=Dj{!*6jVrliE@q~@iUWE7c$B)F1uisMW2
zvs3emZ?S^%w;_ZwnmmU`LK~FRqCoitl=&E#IT*QExEQq<d6+d=xEMtkg;@Al`B?Z^
z#hAGmCqLv7uD1p`1zV;Br5unXG*j}(6@f%<am2^xCT8Zv$7?c!L+WLLI0HkGEyxs5
zR@Gz#r@x{EkN_woHPyj^a*LxRzbH9FFE0<A*l!6#xs!`|l^KI3Pv>=Ha{*~_o_vp2
zO%#+rz}6Lkl5UX)NG5EuBwrMx?&K1_FiCMCK_M?LMm|P9Mjl2UMj1u{Mh->+Mj<u;
D8#^nk

diff --git a/code/models/__pycache__/TransformerMIL.cpython-39.pyc b/code/models/__pycache__/TransformerMIL.cpython-39.pyc
index 7ed17970f205f815c2b3f9dc1b6529a296ec9548..f2c5bda0715cd2d8601f4a33e2187a553a3f322a 100644
GIT binary patch
delta 1053
zcmew=bx?{ok(ZZ?fq{WR_O^5KxsAMLOaixf^Gb8$a}x7XaubWPi*HR1X4=juFqxaV
z+42^5QEEwPQC@svNlBh2Q;`q@14EG@0|SF5OOY@G1H&!Gid)S2r6mvvagZ#SkeGa&
zIaN~t#Fqgn<Bg9`&PgmTPEO6qiH|P=s{s?DASDu;Ls@tj1^7X1We@?@s4%&ZRY?kD
zOA!Z15UdoeNdm+bn7o$t38U)dNo-o&S|9;!5TP^qIGdUmSd$dUVn&Z!jN!K!oo+G4
z-(qyW#hIK_9AA>3otk%xD=js#1j^<}&dZAj@sV5xwnS#KIlGaoEXZUz5FrmDz^(&%
zt4ImN1v?2$fL#dI2l53-Z7~Z20|N&mA6t>~<aO+Yj1`j=IHc<ru#_+_V69=QVP42o
z!n}ZOA;UtZ8isiG66OUQC7cVmQkYYiYnT@@FJ!D?i07^Wkvt{5HOwh2y&yJU3G)K}
z8i=d_*d*o}hAbYi8G<Pc!3>(Lewhp`3=Bme+ly2|j?@LknNSfph`H@RB4A%AOn%Fu
z&uat<Db}LIyp+5mtI3+2(vn4rASIxPzQq!smTUx#?jqO8shsjUV1I&r4E8L@Btr%U
zhGJ0aU|{59WMO1sl425LWMkxE+PsBRma!i0h9X7=28Nga|NsB5$$yJAEhj&*q{s<m
zmmbIs%$a#5x0vz@Zm}e%X6D>tDK0E3DFV5!$QY!?3Z#ZDwW1&~FXa|Xa%oCpksC-H
z<jx{@kPo@Rsj>(X^%+H`APr3M9*(z|;=^xoC8ngr=cMMPmt+)~gJihisiF85D=38<
z-eSwiPtPnV#!Lqjx$Wz1L6(924JN?B01g3AfEs{802JUHj6D2BAdy=f@$tEdnR)T?
zn#|x>cv&D0ixf7HYZ;4NK_*6l2v7)VDuO+Gi=!mJC^<tfFRw^}fq@}P7|NaO&!f!h
z3v&46avoJtP#Oc<Qv^<*E+A6^CvW7@V2c1Tg(lzUF;fEvD{Fa1YEkMfj<n32lGLKq
P6eQn)Jt8sLn%4>dyfN#+

delta 1058
zcmX>o^;L>Dk(ZZ?fq{X6e}P8wsg1m5Oaf87d8N7WIf;2Gxrs&D#Zi-knYJ_XP3C59
zwv6H~N-ZfZ%8O4dDaq4hDiUB|U?}2eU|`T>DH3F0V7SFtaf>;>v;-m{3X%mAVv~<E
zr)u(n_>v%Hyz%kLIf=!^$*DOx@$p4qHDE#*q(p3UC<_mx056EG03yH|WhWQ1DoKHC
zDdGSLf|Y_biGjF$lh?97VN{wtiA{@J9VDOuA~Yu-XH)Y6YZ3=p%;<58G5i*z(=EpM
zTa3=PIFoaV<4f|hQ}d#@(oz#kplpuhyu5f2AIW84OC%?ovm3ceflQVL5i%eG>^hLQ
zisV6Du#><9*o9zyAYXtai&+>L7&sXD*oqV;uVXJ{oG@8|L%P0(Ifc2Gv4$a@rG#|>
zTM6p|_8O)d=7mfptP406GAv}OVTk7}VO_vg!o7eeg(ZchhIt|LLXdji8W71>!Vfn=
z0EsPF!n#1H2BK0JY$|gNLlz&{JdqTJU<OTAzf1-e24jXIkh6-kK(1E;MW$#GIP5J!
z0$|^P+#khRkY601lUSKrR3tn3D2G0;J}5+4ixTrv@`}tSGjd8R-eLpG8x_fcw1N`G
zEtdGSWFu(uC~}x=$|-LE4i|6;fddC*gCPS0LotZQz{tnQ!pOqN!zjfh#>mFV!^p$9
zxq(xbQ5obih#MIh7#Lpu|NsBLCjTwgw4D6Jk|JA>ecF=`a7l)OTvcQM;+uoq$Cg@A
zkeHWpizT@<C9%j6Bo1;{ku%7n+~8DO1W6hhMMfYEOz|GKnBv24aV4gt#OI{urI%zB
znM~H>mKOj!8BBm&S|m9+mfODG0%Rgs5KMsm29^ee9>^KcIOpLn0*Tz>h>y=r%*>0A
z*JLgNdFf?=I0G~S*g#HVEOG#u7zQFh@|uc8ARpc0D9JBM&d|%tE0SekV2Bciawkjk
zD6@KkL?%1)sEUFj3T#giIFZ_eO!1vu%cH>-0%8hGUd>~srVa8YYk5X$QR*#@w9K56
S)S}cBq@)h^h}dLyUMm2R)Zlag

diff --git a/code/models/__pycache__/model_interface.cpython-39.pyc b/code/models/__pycache__/model_interface.cpython-39.pyc
index 176ab832bfacbc7d53356fade7a947b9b78e4a7e..0466ab737a0a0da0cfa6b04ea69f2aef82561a6d 100644
GIT binary patch
delta 10511
zcmZo|W9;l^<V)n`<zir9V3_!)Gx>1!L_QfN7p93i>+5B6<)Y*m8B!QhxN~H4<)h?t
z6`~Y!6{8fvVmvuYxyn(>xhhdAxvEjBU@_htwOsWm^<0f84KSZEM>9$@g&~D8M=Mu5
zN*gMsldH|dz{rrR8>I&o(T~zkVMyW2F~~KHG6ak9=V%J$8buj_IRZJxxh7F2j0}?t
z7{%&yEu$>K!ooRLxz<tEV75q(O|ET}ZLVFE9hfhgW1s5~<p5?g<~T+<rHG}7=Q!uO
zM7co4T%%l57*Zs1Tyx!`+`wXzIqtb0Q66BnRE}q^SCm(-ca(RoPm~W>OghIm*DtEx
zFV{cHA1ok~6ObDi6$oa_<^<&iM+Jk~aycQnp;4h=He*g$Zg^BUBZE6bihPPf3qy)R
zs#8`(Gjmj=J41?Mic$+hic+dmR#Y={RJ1!oigJod3qy)Zs&-aPGjmjI3S%&Xrs_*j
z^!RBq-eL(zoUFnm!NMGt8M4`uiI=fnlkt{dP-;qPa;i^|e_lXNVo7RZsV3tsKJV1Z
zV(0vvoYdr!%=|n}rd#YTi6x1}sU^3>bMg~Y;!{$S6H793^7G<T5=#<uGK)(z8E<ih
z2YUvkCZ?noX)@j7axTp+%}FfDEK9v5h+sRGr4}Wor)o0Z;>gWp%gN18NzK{p#=Mu2
zQ<i~&;TNO+FOSK(Z2FU}S<lHEF)%RPV#&zQO})ihkXV$MyON>E6{N~dA0#+gm(82e
zdU7ROyt*j^1A``0kt0Y0V+B|dgm43iu&3o0l_wUZxJ~}g=40s2z`#)C1tLIZL6m`6
zAhkuB3=9lb3=9m#EDQ_`9E=>?MVgb(+sZOdnf%sPe)3NC0<Lxj7KSL{U<OUa$<`cN
zOaUyD(>WCD8M9bZ7_*s*97@=-*lQTFSW=i$m{V9%SbJG&7~(ndIBFOca4uwEWB|*t
zWwR8em2lNCX0g;TWO1jk_cGNm#Pj5Fl<+n)fLJAbDI6)By^J*s@%%N6S*$e-@d7o>
zH4O2BH7qGyk{~rgCBiifSwhW>St2Ri^^y!JJlRY|r%FT-!p)4etSP)TtSQ_!3@LmI
z#A;X<f(%5_C<!u3JcU0+poO7C0_3=6Mi+)=kUB{aFNH0eWddVSUWrteG?*`#A_R7<
zbcxggnG&fiS+HomFi5yWDoYN`73pOxk;;+>v%wllq_PxJ1i^gKUZxVMEJYNa5`ve`
zwve%wEiVjiA=u;^hIqJzDQqcVF_3L3Y$;&7>a*li*iw+JN?}WZTLfXJvw{pRk;+mA
zJ3|B<xGE)5S*l2U)e@;JH6*@z4O^B5DEEU^Yl3+aDdJ$B7MLdq=G8OAYlFE`U@k+v
zP7O<nv?N28ZVf}cUW!Z&L%e<sONuNwxD8T3X~M9EA<G~|E}LlrQ&CC^C>o3=Fy_4g
zi^_vVdrFKKn4~CxBh9qL49*6rI{;Rv2v&Ec#JoPsB1I`h8SE~j8ip*R1(pjLTo__`
zYS~j%YS`ngCNLFlD6w8(lLGRs;X)>mnt0m@jCmDcgVeH_CNLNImDpw3gMF`(qLRXt
zq7F{HMkT@v92PQUiKJ*qGSqT_bY?l0I4y8a;j7`8&5)uwmw915V~R?ORxcAcePdeA
zT&M%KT)T!L3vPMdKd{w0V5=KSTvC`)bn$qAxv(Jb3|NOASjVjrr!3bj=M?=EgA}Iv
z6y09tTFx3y41Y5hE(2Q)3T}|qoLO%1wk6gJ+-o>XtQUAJWB`Rlyir~e*l43{rU@)X
zJ}D}-Tp%?i)>)n@+$qK>CMl-9%q7+fycRN)SZ8^su%)o2nDsJ&<&9I!Q|fUk@JX>~
zW&}IH65@A=AqzQc7~+jm7=jrzt^96rIF^)z6eZ>rheUBEmXySofY_M~EDQ{qEJfO&
zB8;n`D7B<0F*7eUC5j_AGbbKYa7KwHB_?Ml<>#fwo8^_}#;2twmXsEy7QbYI6o+7)
zY$>T_naQc?vu1+8PJ2)}U7VYrUy_lTmp(ayN0w1{av4vcCa4Hp$#jcJ&)^ngCR(X@
zi^~R7#3$$H7T6U{{=j2ee~UFYF}WyTlL_KH_C!drev6|#H8VY<BsFCvBUnb0sVE*)
z)PkMDQkIw#ugQ3et)M71Ewdtu2gEN(EXhpGD~Z3wT9R5^5+B6_rl6v?SU}ZF5hExF
zIAET=#gR6760fe;EnZJwM|W30*APSR_%K6F##`(V+l$>Y8JHLtqS%U3OOlgwinKs3
z097wVdLWiQh%f{ZCZKR)EDD(Xop(lkF^J=Pi?z74AhoE-4<rw&DsG7sCzhqgXH*uX
z7J)0F)RNSq;vz`hl$V&BS|r23z!1gZ>*?bQs?2V&f}$qp78kgl1hb3yL52l^lyN4O
zCg;ZI<QEqg@qq-nK|W&v@tKO8CO_blXEdM0FUy!ZS&sj(p=eoRPG(9xG%VtCQ%j06
zlZ$UbLI}naK?<+QPx%!^!$Ibzfe3pL0je5{6emjyXt*nZ`0^kk14LMZ2pbS#3nJ`5
zgae2$1`(jJDKZ1GEI@=MhyYcLMVynn1UyVZRc%o@NU#z_RDt}#nw?mbUVMwCBsI6-
z7CY2|MJ^zDXAqG+nNv_z7nIP7z;!Jsczr;1?KMyfg^`bugOP)Yi&27+gPDs_go%TR
zhh=i0pa`ST<TOFA$!i3q*jf1)xfsRxC!ZB8t_M}&H4Iq{;A&T-hG79?4dX(_U{D3i
zRAg4exPUo@X(3}RV-3RsCXmbm7BHVFkE@0e#s=$R0_oCZ_ABxSITDoCG+B$BKy0x8
zKmmA*y|}P6HMJ`B7H4T5glV6u$qa6A6nTOomKBuwiqY~7xS7Gk$iPqxifjfZE=Dd!
zHbyQ+Ato-S$;X9MV^|p&K)IX&6g<VC(5+!yz_5@3<mej41&j+Bni*@EN|>^kYnW0P
zQ<!?0YMDz|vRL6fmKvrSmKx?X=3oYp2S90jCF3pTl+0XBw#f;?BB`KgyTz7Rl%AVd
zaf{LI7JG4iT1jqVg(fT5?YCGn^9o8!Zn5R$r)QQF-(ro7&qyq}#TFS~P?Vp9=9^nA
z#U-f){2=duTl}C1V&r4wW1D<OSgD?efq{XOfdS%`ISdR8HH<OLwam3FwX8J^3m9sc
z7lQa~wd|l8WC3FhTMf%Xrdm#r=dwUv0yTu#QW$%gYPm|-Ygn2YYq@I}vN&qEB^gqf
zSQ#W4Qkcydm>Fz9@e2`U0g0Bd*Kjm5)^G=8Gr^@fQ&?*_Q$R&DTMBauTMBzGJ0n94
zXAOG|M-58~#~hYm22D=CqC`*>gL4Zg+=?PV(a4!ooSX{IpObfpSk{Bn_BxQdwcrGv
zlbDp6qsdjY9wY>o1O;#rDF58zN-NC+x0VxgZn1;P<c$0hNHM{jnO9Pj3epk+Dr0z{
zL7$XZlALji6HLWtrc@NI1}WJJvWPLhXbl4cLqrkW?jn$FQM^!d<G~KO#TbbmMZDlj
zA~P>N9u!xyptuqOWiBQzE&*m9CJ|;1J~2i)=E=)Mtr-7Jekl5qWdZL(hRLVIeDuLq
zfimYxrlKg2OF-!w&HSPm1_p+3Mh1qW*vYQq`P!gRfhgGz(gD&9;ezuwNO1&MF#`j`
z<cDIWjv&uL6yQ=H1yf&?2Qm?q8;ihk1d5(2`N?kbAsS&I8L(ZEa2A*gw>1rH%H)d@
z>f#$fN;ZOsO(0?mC<ge7EGP3z$_vzkL_j4uBzBffwv*Hp0>x2LBS>ZuNG)fP<Kz-a
z3nREAZZRh&Cqp~|xAc|}Jls<Y@{=>-Q}a?nC*P9{tLFuEBVgI01}R&_FxRryvemL9
zWe$#7&RQ;Tk;DPc9NZ;L;Ecdi!;r;X!z0O%!U#(8DTtH~5oHF6mN3<DH8a-m!1Dn|
z4QCBkeGN+uYYlr1cM8iK_Fx80RzFB+!c)8^J2<5wB{Xmxm4Q+rJGABjr#(#;q*T8H
z6g;35t;q{c%tbpv0=qy2A`x?x7iUh6R1prj3=#*WXCzxd$+>6`NCF&)U;-3<xA?)8
zULvU52~NZLpfn6AJ_VQsSb3N^n8g_7SSF`Q>+*{u2ksZ9$@!`-^;t|Q>?uqw3^m|x
z45$y*%d~)bAww-|3I{^GmMw)7iN{{Tuz)Fr3n8+Q5mZcNv7~U<aHQ}+A#X1mBdGWR
z6=-ZVEDP8dGSskw$c0Q$6@0z*EQ|~#95t+<&R#QP3V$|p(GPHoTA+qu0apz(ng*T}
zu%UTk5H%@+P=lc2pw1yE22+GmgnOA888R7aInx<xxzZVGxzibHc}jR{*lXB8rZ%&<
zFvNPqu+-P`*7DWz*9z3|)^LLIb{XRY#-f56z8d~CrgX*{&Kh1Dh8ms)d^G|K85tS!
zgg_#a3=8-mDnMcr7z=G6dR-V|-D-K$8EOR~s$3XiMPit0g=(2=1Q+muigZSX35<pF
zN*J>EK_OTp2<iwhr?B)g)e6>dFW{>Ygc{ffmgZi_Sj$}ks;PMvGS>2xaMbW1!f66i
zkqS5zKy^7+4L2xYYj{8;Jb0m@0u3z@ETP3(U&F9KpoY7K2d;^)7ZfF+zNZkVYa1I>
z3+guu*9c28q=-r~)QZ$FWI=dhk_@$?H6kg@;9gZ<3PY_}9#4r#jc7AtjfgZutyqa@
ziueMt8sQqT8qpe&g)GgCj0_W)3gc2FKq;pLWQQf#5^(<+WQ{neS3ZHM=tB)dmP87_
zIH;Q^Q6jlO3e1-jVE}~>xEmlfd7_4~_DV)bb_C}KTxDc0q;#7i>%_7IQnDK96zzlO
zC0yG2z}ny%4iMDPkE-DyK@Ag-G?;3G$`D97iOWS3QPhKr3A`Skgrwf6UXu}AFyXgi
z3QWr_j<UoYaLpc4bc}(4VI^}BYOPQNDifpFL8=l<lb?fXO%T4tT$Y$qbOh8m02NK~
z&@wSTk+U=zQkF(cwv~^qCtN9ghB>9E5LB^97lGRj$3dZb0z`ldRZ#77iz6pLJw7Ef
zxumEcBmt_2ZZYSi=G|f|&C4t-O@&lXXFz2%b5&-+Etc%m`pV*4Y>5R0sd*{4SaMSn
z^NJ>bw4&E=AhA;*;xveu3?e`k+by=D#JrUJ+*>Tgsi`TN;^0C(N&plVpms!jerZWT
zX-P38ZJq_`ItL=ogIvH}oRgW7S|1PU3l-nufQC;oIBsr<g8Ga(sU@lKa07Yk77tWa
zJg8fFix<W#PR=h%y~PLPmLwLXr<UAeEV#v3e2b}|=oVwiEmnvcR<NZ-7eKB6d7!8s
z)IKP>2;zg25SRd0C?ZG|3cPY|2h}P`p!j8zU=(2#VUz*2WSK<R#8|`_+5WS!2r#NJ
zi*X7tvoL}olN3mvk%vi)g@=)aQGk_?MTCWqiHnVkRfti8NsLLD52THY53G!fk&TIm
zQDX961xs+dm>t#t0A&eSyBOX8U;#JzS-=%Pq?HVA2tX@;P%D@j+zQ65{6TGF<jS8L
zweknm9Q8HqDJ*l?uvYzSkoW{Q{J<?ac-3D7%8QyTMaw|p32y9y6DPRFhcsWY)%ZnM
zK+3Oz2#|iT=?+MBKDO$f6Wo0TC9*%DD1lb=j2uh?%p5G>rmCf?I-dk`MQ=1YPc^ij
zaGegSOllYwu-33FU<23GY@orO6fQ922Gt5R3|Z_cplXN-p`sbo4uGfuSIs4yHLMG`
zKs~V(zHH{AcVIpIHT4V&xIy(UNaI2@E#Ufz9W<s2whCkz*r*zYEFMstfX68zwK+#R
zQjMO%T+36!i(aE6Rp=ZG_!csNEA*^-NPV5oSi=FX(YY7!!)tVo8eU0;1^f^dpbBOJ
zW1(3MShEX5Y!kFb2dhG<(fQ#u`V4S&Ex3@e7F4GRqzGp-O<*kgQX`lpn8KIB(#x1q
z&zH@#KxiQY$p5v%HNp#o7c!(UXERS=Ec#a?2&!Cb1@pK{7#4_t+5okJHCzk$K`d~C
zp`%0;)L3O?09(Vgkg*n|lCcKTKH#k3Mg;f-rlKTpkb_zU+%;Sv`)arsu+<~QB^Od7
ziakXnMF1n%L77XehO34f9#!D@FA=X{$P$3I3P9}ui5d}Tt3VV~L4$dqR)JWJD6~~@
zCWWC^JddX&U9v{3nXyJxnxR%4+$@l)5vdUeH4Na*f;rG;0o)RBs{k~{E}g;!9>UbC
zVaSq6;RCk{WJ+WgfCkTHQqWrkCK}g4SqfaD;i^gbnZSLrw;DDiH3<a3+DK>;2%>5r
z+$0b})o_ELPlS;*)EC_(s6hm#A*ARwsFsG5D4-!Va6J#H);Yk%Ok!zr(H&5o#RB4j
zEA^s4kRAaLA6`X)lz}Ui2u=nDhAI`1<YZU*P$KI1LYQM9b-X;*I{qHWPZL1|sAew$
zRry66Antt-@c=}C`=6i+zUU!{`v^olMpWi2K%QXF%g?JyEy};enpc{enp5-yqzGKO
zPXe*PszD_-q!dT3@Qa>;lsp5qLnI)TJs+fwk1t9sF3rJS)z=q+T17=KKw3eQibbzL
ztk)po4JZ_aVHJ0BPGWI!Jh-MVdJB?%2O{2shz}s*BZ&9}(jrulUz`b=BaDYI^7D$3
zDs{9vy&jwez?C{5C{E(x)j1y%14A)rz<_}fQk_eH>R~2uRnDZs1g^|k;gvZf3wm|V
z#>d13s<9dQ7`ZsPSooMYn7Ej@m?2d=yk_SINkgi3Mlog{#>s{HdiBf<3=E)QJn#^n
z0Rsa=38;(6Si_LT)XY@NSi)Sx2pYOeVeSP_2d1#iVXkGWVN7SNVM=FAV+v-_WcRzp
zoST`K4r+}sFg&o=WV*!y8aKYhmX?@Yl3!GG3lv-+-)S=6;w(<iNKGltNiDj?o|~VS
z=~$9qbc-{sD7COOH7~hRlck6aq#86H3mKjj1;sIRq!KLp2P6*;Nig9y`GdYBe+4K2
zK{^?j_?X3*CW{%UO=dB?90zI>7lAq%Mc|PbP-ZC#1)0coi?ui-u^_bw)G)Zklvi+z
zB{?-S=N3zGVNnSp)!$+-N(HOC#ZsIJ8a@S2Z4~4er)shnIZoC$QqsP~21#;|c09yQ
zpyVwG)d`smNy{(FZJC^H<XR7!m<72M9N-P05fz3khAhS`rdmc&P-in0NtA$QN*Pm_
zY8cBHifmGt7qEb4VnJg93s^yez@Q1vg^aaKc`P-IwM<n~C2S?^3pi5PY8YoT%w<~0
zSObbW=5U5Q5k>|OEa6OHZ)R*_jAY1TQZ!(I^yI<Qa*W`<8;4&JC<zqp2DuwF(xovu
z-<aDF<QGlGTU^PB1(~2p@~YG#Tmx1!z~#o|)y5S9;6Z0lLlI(T?POyUPiGczCM^O9
zM{y-5=BDO=+uOHT!E+91L0444z`)QCictknnquT);b3HA+I+~QiO~lXp?;d2x7aiD
ziVIScOKx$cr-ItqAWjizK>ijBXb``M7t~n+I|W?!fJS|9v6U92B$j|<4wQyOCQmR^
zR*Yf=sW-gEoLH2O?hau_28L~uub7>30$B?#YQgE~<$sWVT~IpWyTzPbPzo->3Mz}(
zK(_t_g(5R(z?v<wxCAus!pO+LFnOP@#N^lJiVi11+SnlB!46V?6l54EWNxvPWagya
zV$RG(cgZbIP>(7zH!(f+*ko@DH^wECCs|Y&D%G%M@q@-#QrIOKvIJ%`q;S?S&t^#B
zn#+>Hox%g2&SYC42%5GB$20F_Q%f_(!;^C?W!PEtlXK&%SSC-j6xQqoxd2r1-eM|9
zxy9*{Sd!>g1ezL>1G!HgM1V_Uqsb>M<!n_zd{q#k1|mTIxy25eWJ=3Szr_x6Xi8?$
zE#~B$G*BJ^84d4j6&;-{ZzZJ-N?4Gv1{H=ypnBpwD7po}(apoi!KlJ0#>l~_z@)S}
z#VU!h9v=L+*dS$d5vb;_5_U_>DNa>@6uo7s3Pp(}sZ~NDMWyIsoI(E13dyBKWvSr!
ztK!fv%1_qIDo8In45}en^n?7Jiwr=%F=S+5&=fA}2FGDRPRT8{w9It%(xO!GP<RnI
z7{JvWJZNsQ6y)TW6oG<ovc0ajVUan=9B?36fLN#@rO5?}d2lBb)c-6xI=Rk9-0Bur
zNd~CE$j?bBE{X=*o>owFi@Btr2oh4@FarhGEnZL%7v(3%gFSS6@(CO7dXXY;P(Xub
zql$b%EKpVePv1aBJ@<mdE`bQpTvgF$5bFzw_yHoo$(ak}SWwO@0wwz*O%N9pGexe9
z3=Fq;<Kw|0pPZVL6CaP{qy-=Y%s>tU&pCny-HSmJbsWq*^$g}5yc~QS793I>5*(!*
zLL6BfJRIB{>Kp<boE$twev{AJdN9tJEMaG6<^jqT%ta+2)4*<L07V33NlX!lT?DoW
z9tz;R{$p~B-DWM2ENm_dG<tiBIlr_7$%SCIfo9wOOir|yt;ZQqZXh3nEdUc>`@niZ
zvq9fM=@V3Rb1?F-6oEuE<8Cn*R~8q6vj-@L-(o6CEfNJ8A^;*lEtFeq1&MhniN!_6
zj0_A>?8T{xNv!!rc}1p^nH-esZ9oCboLpvfi#fp4rwC+vlvqJ!Nq$jsMm%D|JiaWm
z1YDLzar;*K7nEe?W>%#Zf$PaAHs4Ag&}1yAhklE@v?Md9SPx8s8)Q+UV76XzX>mz@
zE~sggms(<KPy}igMsZ4&Waj4Tfg75j`LH6;Ty7C)8mx$S@<In;Zctyg2t4&GFnPa&
zxRf9$Za}j!;Og-f2iTo@d3i;)lRr2}@DyzW$*%+v4U?rEW$T4OOc78kNdlrYALMqu
zoT7Mm8=weOyGQZl=I145>LusrrDdiUiGg%*L4?4w=b%yYB2W)HN)#cgS6-YMUz7@3
ztW_il(!mRngU#AXF~~766iI_*gds9{sU_w4McKuAd8sAEMKY7^9QB3eKpBl2?7Fhd
z;>`TKBKgS;juKX&dgm4oxUL6P+@J|_MUZbm3A;!cWT*fvp3;jFQ{s~obBk0!3REXw
za#UheoBY*L%TXPqjypNAprkZcFR`Sgs7M1O!V78}<rM1$r55|8mK1>|t8WSWfL0yk
zW#*-W%3j}ma05wma)OhnDQF`978_Iul<tsHHaMMvlISfCn_RFaJ5V#HIBoJOCn*g^
p9%dd!Fl6EZj}ZwlaWL{Q3NeC69!4G}9!3F19!7!5_ng!ijR6_O7IFXp

delta 10013
zcmeC|W^8R^<V)n`<zir9U}y*pO!h3D$S1?}f^nkG`g-YH*(ljuxhT0@`6zith7^Vr
z?i__&#VEyGr6{Fb<tSya7*CE$u4<HOu3D5Dn9rD_9;KebkiwXwk*gV{2^G`I)nsB|
zWXRQy(t(QTM(L(7r10kG<?2W2gT?rA)N>7@48Uy09K-r3qZIxWfgIyplPD9YplOt8
z3PXxuj%lu0lo?n|D91e4BFX~H7S6HEwTiOJwT`mRwTZF;i;3je=GsNs<=RKtgZZL4
z4!Mp|j$pP}j#I94lrxwup5v118s!RRGv>JEx<}c&Gcvd{q)4PlwlJhfrW$2=G&4te
zx-+CmrAW6hq)4Y4WqCC-M|rz5q{yVmwlJi~rfO#SG&4u}rZ5IGXv$44VN%&VpNWfc
zvmVP{Mn;j%Z&+CwRWunG7;dp-<maZ|Vl7B4O3Yo!P-Mfv!0^jkA0!xDl3!#vS%BS}
z(RgwUd%U_10|SF5Q;`Ko7h?rjC4{gAiLj^T7nLU#rPxh=#O`Bg$H2f)<Om`_211m9
zSs=AV3JeSkMhpxL#ViaA3>=Ic+(inL<2hs*=S;5U=od;+Oi^lKh!SpRU}1<731-k#
zo-D?xRsV*WfuV$<gt3O9nP~yjLIy^L8iobTV45+DrG_D%rG{YvD~MmhR>M@on8nh}
zRKpO@Uc;EhQNs|=S;Jhz5YJV^lENs-P{R<<UBXktkj351n8lmIB*~D%oXu3!Qo@H2
zZDy=xO<}2FO<}TONMT)2&tJp3kP%a34MV&@3R?<$3qy%u4MP@7GouSbGf15fh?l~V
z%`$<pNUcOTO9ag4>;=h*ln5^nEfLNV1B-E`aQ8Bm2xp0dd7QnBCBj(}U^Wj_Mlyw`
z9xT8MRw$JMayv^3geQ&QrL!$$tYym!gIfnSx`rVhZea>X3RnzeT?$7E)Up(g6eO!s
zIG`p=rSL%5>8uOu8Ee@}gtKJ8&VcwrwnR8f4v8;UBAg|U#8;?c%Ti3?OW_CmSqaP&
zfbx`USW*Nf8M0Jr7~)k^glZV#)oNH$guzjxo+47i5U)|gkfoj?n$0wUsfZzkp}vM8
zUULFt-V3m>7+6@NL~DU|ia0o0bV_vLY>>JGV098;bp<7QS^6oGDN<mEXx1=fX)Z8W
z$l$^d%TvpqB3;8CZ#aRe(5J*`fpH4RgBlB&Kx*PmCNSnzfX$K3t_Qi}Rf%bq8Q6!?
zDbgvDDRRA_gr`};v%q{ILl$p}yd*;{2S{s{MTzACs}$B6j@b+;3UiqkGNwqUDE2ae
z5-nECPk=2~s$s~2Tb}n1Y_)PW(*))snfelI0zP0a<jFe&)}jK|QdVM_Ws_x<qMD)x
z*2!GUS;L9pZ{|WLu+{2Nt2wi5<4sD87TDErmKZItU&sIoi+Ihv`XaE=8epSeq)69t
zffSV(WjUlUrD&#TrD*puml!Q@T*y#jl;xDdk;0Lp)5`>w*G$pHDes)3*USiZfIh?l
z5Hl8Xf}%TxA(%na!0#4wPJa4iQ9cPqmC1U1ftsL#WF^xrCOv~&jG1U<%PlUOoXp~q
z<ow(MyQIl0_)P0xGJ(o1#v&z9DaBEqnwg$alA5B)1QB3QEK4m)Oi#VVnwywhl&{HD
z6a*@@K#HSS$`W(pH5qTQ6%?hWWmeo`ElDjdiND2?l30>h#0WMnDKR-aDL*eYlYxbS
z;TA_)YGO%gQEG7%OG;*5ew9dCV)EoXerbykO~zYX1x2YPMTwbtsVQ!m3``6RQEWx2
zCCSM-MT#IBML>iqh)@F&8X!V@@@4)RBFP~4yWC<eE-gqcDslx$*iEh%kWvPh-&{HQ
zi7D~9`6;P6MQk89vE<|z7vEyZOUz9zvYh-zK%P-=GLN7vW7uRZ!NV*(C8@=e<%ASP
zJwfJ#g9tMa0V>>!BqoOlX}C**_+lU;5=4Ls>LOzh%LGK2f(UaEp#>teL4+=d&<7C)
zAi@wta7;cd<RJ#GELb6~O9u&OGB7YiO_moH<F*EItw2QdWCvkY3s9^UfvX2l<>3OV
z9!`Lo0*ri&9AL=D#=*kI#KkDWD8R(Q$ipJUBF4<aC@^`su-D|T!eZ=9e2iR-VtkW@
zM2hP{-l<{8VknZRVOYRW!?=)f0b>f|LdIIAJgyozyM|!_Qw`HXP!-AK2aZuq<|2^R
zTb!kN#f7D*sa2^(U7+A+23LDU4xj*L1;uhPTDk*Qc5gtA<^a`c3`}f{LQGtYlbuCX
z>p{^1s_C3T(jbr3FfL#Kxn=?5LWWw#8ipE1P=(ga7{gS{RLfk#oW)YZl){w4+{;wU
zQo@?W2IsNXFx9Zuu%s~uGib6vTwDZ7I4c=%F{fnaYO>#AhdTQfYg$fzV#zJ8<ox=)
zlFand{L<ngQ0&}dODsyyO{}=Z=yr>}I6ti<H?cyK4eY&JteJTQr6sr6a`MwNONwtX
z#@}L%jL%3cxy2S4Ur>~vgy!d4EX5_M1z$nF0yo;2_!tFP_!#+^s)T((>CrQ<B(*3l
zak96VNIf?L0|O@m0|Ph=CNVHD)G)>{*D}|#)UwvFEMQp3Pz$QQ8Ecpqg2dTs*+J##
z0;U?a8kU7jwVWl)3s`Dc7Bbdy)i7kS)^JHOq%g8FNHU}_nKLjm*f79FnL(l@Y&9Is
zj5S=@a9Qgt_7s*H&J<QKWJ+O8Ve4gQWT@e+VXxt+;Ywkj!xGG($>CQ7&K95`E%E{-
zO;DPhtS4^i3}O_`1qsaor-7Wrq|_Wuj-n|bp?M$ztO69mMbkjsFpx6l%)F8!H&6oN
zF0z=sUtBC^CP;1xh?or`W`PK}u|*&=ZZSrphchp@+{nyJj|T-fBO?Ptu@ES0FmZ8k
zFbOd8aB?w;G0HJcwv(`8{582!;^pKtNgsW%v7lVFlBviCWDh9yq8VM}$H2f~$jHD@
z<UjeLWWEN-a}Xt~K{`OXAzX0A1SvLxDCSo(bzaF>1ojD76)yEAVD%uw;y?tb*n|W!
zD13iS{vZ{q;Q^8W8{!RTfw^!?9l(Z6&XradpAS;907NVV5sN{A%UfhHd9}2>Kpsd0
zR4NvMLceIr<Xh63yrAGODgY_tEV7s^Dq~>;j$@GZx0n-?lOg_t+jmO{9@wb``N<ja
zsd*{-lj~)|>OpCo6_$iSRuN9ZB}@yL!O4~r)M%*T#FA`Tz@m_3%L-1mobY7J22P!v
zDJ-DWS+B{8o;)>KiqH}eC_XgVkWwXTf-G7J3I{H5ZZBE}3a#ZJ0)L`h0a9YaV8#IH
z9e~T|Tl{5-IhiSmC7Jno;6&*ON|bz%M9Ij(B*4POEXF9uTqQU8qN3E~FgZbfUU0bx
zEdeGlPTsE(QeVqj!;r;X!?1t_RIn{%0yi5#tx^_7h7#5q)&*<}8JZbWxU!jx=G8D{
zv8Qm?Ff8DxVXk3?tK~@H>1AVN$OAQ{*;BYvc%f!M#lgDxN;pC0H^a<_$<;F~;DQ(y
z!(0n3bF#Q=K#hL>6oFo5Mh0+qfEU!Jj}52=MIs+Kx&$Q|z$FocCnU*G%U#0->OxFl
zEYe9~sO8DyDdDf-29-h547EHZ0x7}^1Zy~Jcxt$7xE8WBGcqz1{z(yWPGM?6vI5*~
z;HzQC5=!C7W}3iQ)KkNdC7i+~4(hK6mxwG71@lEk7#1>uI)}jwnqq#FC+Zk!uVjQI
z1aK<Gl@ntjndpq76XV3m{7PmTs~{1}z)*xsOB_TCNbwqiisRvm>(>%goB&qLz`ziq
z$y|h*oxwF)6g#N#l~|fww3&f{0aPx6(iTG$2S^y)!eK5;%qiLkvIw4^tCY(Ua}<(u
zCV!R+swZ5kw87j^lnBZeV&M9*XbZ^5t)SAIBPTySJ|#1`q^J(WWzI>>yTw+Tmswbv
z3NF;arAiSyNPQ)U*a;#)1w)Z01A`$0v=9ObZU+%NKtuzG06F*;TTx<ON`CGwmg3aZ
z6iwbDi^&&N#lpe)c^630ZV<5tWHDz!QEGB#ab|wrEw-Z6<iwnuTU;d>MXALZ`8g@Y
zMSDRCK}JHVtbHIZI3a)uP#JnlWby+=5$hOG9t9N$42%+tA|S}b$H?}dja`6Ifk})>
zhFORcTz@ly8d7XbJd9$DpcWPnW0msci;BvV?bLbeMHv_vKt2Z7<I@;HU2;gviV<F;
z$1v5h*0O;*&XNq^+MNkpyE89fPGMRIZt2uA*D%yDgWEfxdY)qe%R*4Ui5*O{moO|~
zg%p}4pt_ztg*k<#m#LPkge{A`z6QqSu3^aHNMX%pDq2><kj0tBmBpRTSjz*>u$(nK
zDU3Cs4jCJ`gT|K!>eX>(@uR9pVebWv3Gmf0ED!)SJQfJT9mre5yg&%VtK};ZuHkHE
zOlPd+uVKil=d0nZ;ZNb{WvUe@VOStiBe0M$g`tM8l_`ZGg`t%xjY*OLB$FjtBap%g
zcDG;&LzY;LAjJ16Tq#T`EWOMzOtnI_!Zpk_!jcSNUyC4oUBi$i4lZT5L2a27W_Sxn
zyoR%0k|Bi$BwWL_Kw=?74Wh1K2Nf+foHeX9A}PFc*n$}}`TRn_ZCY@CDw+sNNgN?X
ziFw7oo<5o^;CAgT-jc+k^wg60oW#o1qT*W|rNyc7$)zcYw**1SE3qIoz67KrEx#yN
zlewsip`L-E=qxCKode|<rdvGV7Gr!#YF=@E(Jgj}X~jhWASK}X9-b~W*&sy}xK#pf
zW`OFdq70C;I7{*ivg1MBDosgnCcGt3oLUl}UX+*;pPHALl#`kQsnRCbgOszSq?RNm
zXF%Fn(?M3v01=iT0@PZ+#Rm3{(Jhv|{G!|<X^{ANP``sYIWad%FtMa0H4j{Im4iEj
z#o#7?Q9sCNmg3B+)FL?$lPv_|DVE&S#JpQ9ASV`ufx2q9STajeb2UZLYf*5UW69*x
zveNaSTzrcqv7{ug2wZ!D{R{RvDDa}#L0*kdD~aOE%quQQ%u7y<PtGsRD=7kH_bA@<
zqQuJL<iwoR_~gXgTWrOpxw)w+x41wqh%d>^NiDv`17>IDrDP_j78hLtc@Ui0!GtY?
z9|OZJPH<-x)L!fbWqVLZjDd*@(wSoBW8?vKsla?bW-cZkCJAOPMj>VqMlME>8Yv|{
zMkz)f5iUkCMmeU*of_(VpfCY7;F&?qeYwf|G(ziZSyMPbHCPQJXvnLRF@+O}#{?Rk
zgH*1yY~UImTx-WLgPQiWoHgvAu{u!wUdvv?0cUfRurJ_9;R4lfCF~2pt@DLUj0`30
z3%F9ap`zR=JYZHm`vRU6UNDn=0dEaM7T;`!6u!AkAdwn|1^l2Ye<4#XR}EVYR|>xk
zLkUBcKn?Q(!4%MFi~zV)0p&yPTAmWnSe-DamY={F%ht(IB9bE5!O+aa$k5Kv&X~rO
zB9y|^!coHm@@6e>J3~8D8dHjJJ(3__iD(UX4Q~x!GqVdr?132OTK-yrTEP;L8i5-A
z4hBiEVKoA^LM5Pf3STp0iFgM?Gov&^jgT}$t#FNSmPCq3ib#qmXo#pps6?_xu$d9$
zpxA<1ks84o5lMzx(Hc=$I4zJ$;Yz6&OJQEf3=*ypT_C-XL4u)1bb-u55DDf<Go*-%
zK(xu0$YsedWULh}QCOf@Bf5~WR;)&Bfl`g|Y=#tx8j;xyDUx$pK;brlv2e=-#=?vV
zjD;#STnm)JgJe1}Ots>*5+x!v;<Fi2q-rE)Gt6afW~^sqsF8rVsD`UX+=gKSQ=u6m
zFk^#iC2K%Ke3Ft3DbkV*wNh}Nj3h&?bd40a$;4DNFNL92CXc5?wMM#`u|`Uop;o3u
zBt>?CT8(6lOpSDn6r{;CfvGSrMGln4z;=XyV+`DAQUQ%kf!l5JDGK0ro5ljo8m@H4
zg^abbHL_V+uwYM7lx9d#g7Fq;FJ$0hNKppq0FO1PfSOarlN)qdmBCpFSG(vPxaBZ8
zP{)Sorqp|g7UG&x9}tQOH>Ezo6q{;mGJ*?KNS6ke<G;Ywn`km36&|?Me+8=tl`)`x
z@vq4Rwjz$dghGl+QxyvGi!)0y%Tg7J5=&BlaR&K2D<qc|m8JgT&@akQ*2^kLkKzEA
zL!gEns6MD-PtHj!F1AvbY{(}XA`B`^B*5*v8irVbTE-5>8b(lVkz|<2RLBwx?Qnpj
z0@O9sWV*$ecZ;JSu_QA!ucR2_O;7^|+>I4s0(WEe_$2B{H@N6I$b%0+1lT&bsiI(0
z!3|ZWTkHjiMIf7Raimsc7MH~5XM?M{m;XWfR2Uc-s#rjt7OY}cuvI{GbBoqZ*5fm;
zFCyF|b%fb=iv<*J;P!15KZqG$lv-SxQ=*q#Ty~2wQIqKw3usjB7F$MYVoGX}rVhB?
zL^NI@ZPO}HMaG<5P<o3suT(6zpz;=TK~BjnwzSOj(xTK`ECo6FC6oW#ir3#_FHS5=
zP0LI#$^v->RLvIM0<k882w0nzp$OEFy~SLWSx^KTRlUVrTvBw4H8(pYv#96@$SzKB
zbD<!=IQ15DZf4#sX3$6zTVioZWkKpKro4h%tfiTGB^E`X;jmkrX+`<DiA6<;mG!q+
zK=$8a1=Xy@NDXIjn;F!uzQt6Ka*NX?u_V#0C^0wn7F$VvJSZsGz!6cbDF?2NqxkYm
zOA1O$pn+8kX>y=7r<oH=lW#Gn6%^fKE-5G~I>o@iP+!yo@+a8G4?!&jUT_5qOK4F-
z$XrMr3>gb#EV#v30!kL(6dA=1vOcjSqsSK&7%ZU5`WAC$Zu%{j?9|HQTWn>CIi;ys
zTI}@<44|exD1yO>T9X-)9$$c51x}Qp^mvO8oDkr{5^q5jdlIN$1ZmGRi7<gi-*^~>
z7<m|Zn79~uz|}dZV&`Gu;E-YzVB}%~wfn^w`50N4K%@X82csCHf;>nmlahb{qY$GA
zXb2D91(0AA;}Kw*tZb+^c@{gb3uxG>1T@+PYF;xnGu1NIFa|Re*@Iex%tfHlks{Ct
zFSv0DohyV0fHM`Cu$?SuB*`1b1fBk4VB%w*Y-yxc57G?s3kZYQpoRvxmmvTeEo9DO
zNMTH2>V<WDnf*Xx>fE4aVo82{UVdJxCgUy6s??%<aI;a96`US!v4R7u2t3LGa@Z~2
z{DP9qTu>t~J~IV*64L`5!omn?xLL6vr-G6@17npE^1x<tsvZ{QljDr8M1V?4aFZL-
z0tdH;-9drGb&It)Be5VAJQ7pHzzCXwO-{|sxy4dkSX6?@#JAX?4e+AtpfvCYq?f(O
zV)6uIC8;7|P>Bo<OfUg*c@e0An=tvVu`4(rK|{{qz^efT9zzyG7GoADvLGzxTBa1n
z8pbk)B8wEJ1uUSZPzz`fniVuC4Qk{pWUOV*W2s@RWv-GcVFS0xSZf$(Gk_XbHO$RS
zwJhNbc_NGqAXvhg!q&{##2Cqt$7H}z!yvqX3)Fk7VTfl0*QxA&MYAXGHL)-RxfL`I
zlAKtOS(2CoiUUxg4o>h80_+jcAS0-BDe|AJW?CTtYS2R}HSlCb&g6Bbo^FuXMF`*G
zN>0p8&4~w1dfZ|K#}ULtke%QGtD-ap28L`<OoAG+44{m`vf0Y4iIGuc@?LWpebAi4
zEtbTh^x`6AQ0RiK6$G(VL2<-ZT9A@hlB&rLPF6+QlQ}Gu6>qVE)EnMnPAp1Cw@#Ch
zfnmaAAB$5y;G_Uf0N}CD*P!SGRTY|iMIS)yk01gRzeRsRtbZT^T%UjvM-gbS7hGBv
zF)=bQOwPAdvfl<u3y=}}B6*MtH-WT(I#NaaAg%z20DBxvfCBXvCuk@mGdD3kb;0C|
zmTrvmCQDdVXv@^FWr2FN+$rpm3|RuR8B#dsGC}5t85wHW76^i7)4`F*Gx?yE8RLe@
zKdfXNc=VHV<3S~nUO`^^%L$AO4B+X9f=o@JB2aPzR}w|6AXl(~2!t;o^~mIKYq@k&
zkc=6KFb5GJKNbA|6`JfIm!@PE-C|D8NrQzqd@Q60<kws5$@zI{xrrrFtSM>n$+=N1
zX_@Kq=rI7QHC9f3U@gWdv-!Jq5@S6)B5tvP8zJ#U;I`FcQ0P4Y5l=w`IFg=$SR26g
zxqgtpbCEsBji5njP2nO?om%t>q~J4%01r2VJqD@(k)reqNETFI7J+6+ioSvC)bAic
zkUxvS0pJ2M7c~?#p`)|lN(<B<1baD(Bdwq)9yGHV#Zdxffom>sO;f}LGKU*PfTJ8t
zfa9GP6q-f($??ghEJbChTPM%3^R5>yasoL6R6Z2BfLNf!51zm(0?%Zu1c~hj5eGoT
zX%O)UM2LWF5CsuZAOck6fb&%mD76<^fW$zdUgQDFGQ9Ee;9yQp&B=*JiJe}MSs>?u
zCo@5-M2bNff`gf-o<Wm?lY@stk3)(>n8T1Gfg_WHor9T!n?sdD03=i73R);Kd8@rP
zW9Q_D_GV_#nkNNhB-k6$jPO+|MId$&*fw~G6oC|7n(XVaSqmhq37yyjjey@`&Mz%N
zPemZtU7M`sC|i#+%s@lmNZP>mfqmD>z`$@3ROEnKJD?fMB9Mrt-!10i%Hkq$z6a&;
zTTDf%MLM9sQv(slK)HaeATci`u^60-qS%X56O&l;i}H$`CZBdxb_6B9Tg=I2Mz@#)
zJbl2WLln1fr9Z4@1J}b*Y`&E~`NhSMNn!5NlFXc9Juq3sKH0}fx*jx78O2$WnVYKz
z8c!_(b?%Ema~wsWxw;}Aa5E?;Gbyn&8LS|+xCk_aUj!QDFX9Ku3PNS!m0)oZsID#&
z1VsjDobeV%Nq$jshF)G?kqJnHFpR60Rw|vB3>t$;%(=ytRtjSmfk$)lC;xJiWdk)e
zii9U`an{cQ4LaW925T+@kAoJ0rl&!zvZCY+h=?AjHJnpiBn}Ea9&jrZG-#DtR9pn=
zU=)ETwWUCg5Gbewn*?qrfJRs#14J?)Ihn}?F5>Lqa<WKva<+?>I;ggY;!aL1C@Ia=
zODriVDv}2&<b_zL7nEA;ms(P!HF=MVq&H|;(=C1<&;o<J%)E5p{FKt1RB#%<#Rlbo
l5&%+KfhWsb95%UN<#wR*r#ND=f~(YI8&{dhDXuDvq5v(_U8?{9

diff --git a/code/models/model_interface.py b/code/models/model_interface.py
index 9f4f282..0186c06 100755
--- a/code/models/model_interface.py
+++ b/code/models/model_interface.py
@@ -10,6 +10,7 @@ from pathlib import Path
 from matplotlib import pyplot as plt
 import cv2
 from PIL import Image
+from pytorch_pretrained_vit import ViT
 
 #---->
 from MyOptimizer import create_optimizer
@@ -28,6 +29,13 @@ import torch.nn.functional as F
 import torchmetrics
 from torchmetrics.functional import stat_scores
 from torch import optim as optim
+from torch.optim.lr_scheduler import ReduceLROnPlateau
+
+from monai.config import KeysCollection
+from monai.data import Dataset, load_decathlon_datalist
+from monai.data.wsi_reader import WSIReader
+from monai.metrics import Cumulative, CumulativeAverage
+from monai.networks.nets import milmodel
 
 # from sklearn.metrics import roc_curve, auc, roc_curve_score
 
@@ -69,11 +77,19 @@ class ModelInterface(pl.LightningModule):
         super(ModelInterface, self).__init__()
         self.save_hyperparameters()
         self.n_classes = model.n_classes
-        self.load_model()
-        self.loss = create_loss(loss, model.n_classes)
-        # self.loss = AUCM_MultiLabel(num_classes = model.n_classes, device=self.device)
+        
+        if model.name == 'AttTrans':
+            self.model = milmodel.MILModel(num_classes=self.n_classes, pretrained=True, mil_mode='att_trans', backbone_num_features=1024)
+        else: self.load_model()
+        # self.loss = create_loss(loss, model.n_classes)
+        # self.loss = 
+        if self.n_classes>2:
+            self.aucm_loss = AUCM_MultiLabel(num_classes = model.n_classes, device=self.device)
+        else:
+            self.aucm_loss = AUCMLoss()
         # self.asl = AsymmetricLossSingleLabel()
-        # self.loss = LabelSmoothingCrossEntropy(smoothing=0.1)
+        self.loss = LabelSmoothingCrossEntropy(smoothing=0.1)
+
         # self.loss = 
         # print(self.model)
         self.model_name = model.name
@@ -99,7 +115,7 @@ class ModelInterface(pl.LightningModule):
         # print(self.experiment)
         #---->Metrics
         if self.n_classes > 2: 
-            self.AUROC = torchmetrics.AUROC(num_classes = self.n_classes)
+            self.AUROC = torchmetrics.AUROC(num_classes = self.n_classes, average='macro')
             
             metrics = torchmetrics.MetricCollection([torchmetrics.Accuracy(num_classes = self.n_classes,
                                                                            average='weighted'),
@@ -131,7 +147,9 @@ class ModelInterface(pl.LightningModule):
         # self.pr_curve = torchmetrics.BinnedPrecisionRecallCurve(num_classes = self.n_classes, thresholds=10)
         self.confusion_matrix = torchmetrics.ConfusionMatrix(num_classes = self.n_classes)                                                                    
         self.valid_metrics = metrics.clone(prefix = 'val_')
+        self.valid_patient_metrics = metrics.clone(prefix = 'val_patient_')
         self.test_metrics = metrics.clone(prefix = 'test_')
+        self.test_patient_metrics = metrics.clone(prefix = 'test_patient')
 
         #--->random
         self.shuffle = kargs['data'].data_shuffle
@@ -146,12 +164,16 @@ class ModelInterface(pl.LightningModule):
             self.feature_extractor = AutoFeatureExtractor.from_pretrained('facebook/dino-vitb16')
             self.model_ft = ViTModel.from_pretrained('facebook/dino-vitb16')
         elif self.backbone == 'resnet18':
-            self.model_ft = models.resnet18(pretrained=True)
+            self.model_ft = models.resnet18(weights='IMAGENET1K_V1')
             # modules = list(resnet18.children())[:-1]
+            # frozen_layers = 8
+            # for child in self.model_ft.children():
+
             for param in self.model_ft.parameters():
                 param.requires_grad = False
             self.model_ft.fc = nn.Linear(512, self.out_features)
 
+
             # res18 = nn.Sequential(
             #     *modules,
             # )
@@ -235,22 +257,28 @@ class ModelInterface(pl.LightningModule):
 
     def forward(self, x):
         # print(x.shape)
+        if self.model_name == 'AttTrans':
+            return self.model(x)
         if self.model_ft:
+            x = x.squeeze(0)
             feats = self.model_ft(x).unsqueeze(0)
         else: 
             feats = x.unsqueeze(0)
+        
         return self.model(feats)
         # return self.model(x)
 
     def step(self, input):
 
-        input = input.squeeze(0).float()
-        logits, _ = self(input.contiguous()) 
+        input = input.float()
+        # logits, _ = self(input.contiguous()) 
+        logits = self(input.contiguous())
+        Y_hat = torch.argmax(logits, dim=1)
+        Y_prob = F.softmax(logits, dim = 1)
 
-        
 
-        Y_hat = torch.argmax(logits, dim=1)
-        Y_prob = F.softmax(logits, dim=1)
+        # Y_hat = torch.argmax(logits, dim=1)
+        # Y_prob = F.softmax(logits, dim=1)
 
         return logits, Y_prob, Y_hat
 
@@ -264,12 +292,19 @@ class ModelInterface(pl.LightningModule):
         # bag_idxs = torch.randperm(input.squeeze(0).shape[0])[:bag_size]
         # input = input.squeeze(0)[bag_idxs].unsqueeze(0)
 
-        label = label.float()
+        # label = label.float()
         
         logits, Y_prob, Y_hat = self.step(input) 
 
         #---->loss
         loss = self.loss(logits, label)
+
+        one_hot_label = torch.nn.functional.one_hot(label, num_classes=self.n_classes)
+        # aucm_loss = self.aucm_loss(torch.sigmoid(logits), one_hot_label)
+        # total_loss = torch.mean(loss + aucm_loss)
+        Y = int(label)
+        # print(logits, label)
+        # loss = cross_entropy_torch(logits.squeeze(0), label)
         # loss = self.asl(logits, label.squeeze())
 
         #---->acc log
@@ -278,11 +313,14 @@ class ModelInterface(pl.LightningModule):
         # if self.n_classes == 2:
         #     Y = int(label[0][1])
         # else: 
-        Y = torch.argmax(label)
+        # Y = torch.argmax(label)
+        
             # Y = int(label[0])
         self.data[Y]["count"] += 1
         self.data[Y]["correct"] += (int(Y_hat) == Y)
-        self.log('loss', loss, prog_bar=True, on_epoch=True, logger=True, batch_size=1, sync_dist=True)
+        # self.log('total_loss', total_loss, prog_bar=True, on_epoch=True, logger=True, batch_size=1, sync_dist=True)
+        # self.log('aucm_loss', aucm_loss, prog_bar=True, on_epoch=True, logger=True, batch_size=1, sync_dist=True)
+        self.log('lsce_loss', loss, prog_bar=True, on_epoch=True, logger=True, batch_size=1, sync_dist=True)
 
         # if self.current_epoch % 10 == 0:
 
@@ -298,7 +336,7 @@ class ModelInterface(pl.LightningModule):
         #     self.loggers[0].experiment.add_image(f'{self.current_epoch}/input', grid)
 
 
-        return {'loss': loss, 'Y_prob': Y_prob, 'Y_hat': Y_hat, 'label': Y} 
+        return {'loss': loss, 'Y_prob': Y_prob, 'Y_hat': Y_hat, 'label': label} 
 
     def training_epoch_end(self, training_step_outputs):
         # logits = torch.cat([x['logits'] for x in training_step_outputs], dim = 0)
@@ -324,50 +362,65 @@ class ModelInterface(pl.LightningModule):
         if self.current_epoch % 10 == 0:
             self.log_confusion_matrix(max_probs, target, stage='train')
 
-        self.log('Train/auc', self.AUROC(probs, target), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        self.log('Train/auc', self.AUROC(probs, target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
 
     def validation_step(self, batch, batch_idx):
 
-        input, label, _ = batch
-        label = label.float()
+        input, label, (wsi_name, batch_names, patient) = batch
+        # label = label.float()
         
         logits, Y_prob, Y_hat = self.step(input) 
 
         #---->acc log
         # Y = int(label[0][1])
-        Y = torch.argmax(label)
+        # Y = torch.argmax(label)
+        loss = self.loss(logits, label)
+        # loss = self.loss(logits, label)
+        # print(loss)
+        Y = int(label)
 
         # print(Y_hat)
         self.data[Y]["count"] += 1
         self.data[Y]["correct"] += (int(Y_hat) == Y)
+        
         # self.data[Y]["correct"] += (Y_hat.item() == Y)
 
-        return {'logits' : logits, 'Y_prob' : Y_prob, 'Y_hat' : Y_hat, 'label' : Y}
+        return {'logits' : logits, 'Y_prob' : Y_prob, 'Y_hat' : Y_hat, 'label' : label, 'name': wsi_name, 'patient': patient, 'loss':loss}
 
 
     def validation_epoch_end(self, val_step_outputs):
+
+        # print(val_step_outputs)
+        # print(torch.cat([x['Y_prob'] for x in val_step_outputs], dim=0))
+        # print(torch.stack([x['Y_prob'] for x in val_step_outputs]))
+        
         logits = torch.cat([x['logits'] for x in val_step_outputs], dim = 0)
         probs = torch.cat([x['Y_prob'] for x in val_step_outputs])
         max_probs = torch.stack([x['Y_hat'] for x in val_step_outputs])
-        target = torch.stack([x['label'] for x in val_step_outputs])
+        target = torch.stack([x['label'] for x in val_step_outputs], dim=0).int()
+        slide_names = [x['name'] for x in val_step_outputs]
+        patients = [x['patient'] for x in val_step_outputs]
+
+        loss = torch.stack([x['loss'] for x in val_step_outputs])
+        # loss = torch.cat([x['loss'] for x in val_step_outputs])
+        # print(loss.shape)
         
-        self.log_dict(self.valid_metrics(logits, target),
+
+        # self.log('val_loss', cross_entropy_torch(logits.squeeze(), target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        self.log('val_loss', loss, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        
+        # print(logits)
+        # print(target)
+        self.log_dict(self.valid_metrics(max_probs.squeeze(), target.squeeze()),
                           on_epoch = True, logger = True, sync_dist=True)
         
-        #---->
-        # logits = logits.long()
-        # target = target.squeeze().long()
-        # logits = logits.squeeze(0)
+
         if len(target.unique()) != 1:
-            self.log('val_auc', self.AUROC(probs, target), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+            self.log('val_auc', self.AUROC(probs, target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+            # self.log('val_patient_auc', self.AUROC(patient_score, patient_target), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
         else:    
             self.log('val_auc', 0.0, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
 
-        self.log('val_loss', cross_entropy_torch(logits, target), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
-        
-
-        precision, recall, thresholds = self.PRC(probs, target)
-
 
 
         # print(max_probs.squeeze(0).shape)
@@ -376,6 +429,62 @@ class ModelInterface(pl.LightningModule):
 
         #----> log confusion matrix
         self.log_confusion_matrix(max_probs, target, stage='val')
+
+        #----> log per patient metrics
+        complete_patient_dict = {}
+        patient_list = []            
+        patient_score = []      
+        patient_target = []
+
+        for p, s, pr, t in zip(patients, slide_names, probs, target):
+            if p not in complete_patient_dict.keys():
+                complete_patient_dict[p] = [(s, pr)]
+                patient_target.append(t)
+            else:
+                complete_patient_dict[p].append((s, pr))
+
+       
+
+        for p in complete_patient_dict.keys():
+            score = []
+            for (slide, probs) in complete_patient_dict[p]:
+                # max_probs = torch.argmax(probs)
+                # if self.n_classes == 2:
+                #     score.append(max_probs)
+                # else: score.append(probs)
+                score.append(probs)
+
+            # if self.n_classes == 2:
+                # score =
+            score = torch.mean(torch.stack(score), dim=0) #.cpu().detach().numpy()
+            # complete_patient_dict[p]['score'] = score
+            # print(p, score)
+            # patient_list.append(p)    
+            patient_score.append(score)    
+
+        patient_score = torch.stack(patient_score)
+        # print(patient_target)
+        # print(torch.cat(patient_target))
+        # print(self.AUROC(patient_score.squeeze(), torch.cat(patient_target)))
+
+        
+        patient_target = torch.cat(patient_target)
+
+        # print(patient_score.shape)
+        # print(patient_target.shape)
+        
+        if len(patient_target.unique()) != 1:
+            self.log('val_patient_auc', self.AUROC(patient_score.squeeze(), patient_target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        else:    
+            self.log('val_patient_auc', 0.0, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        
+        self.log_dict(self.valid_patient_metrics(patient_score, patient_target),
+                          on_epoch = True, logger = True, sync_dist=True)
+        
+            
+
+        # precision, recall, thresholds = self.PRC(probs, target)
+
         
 
         #---->acc log
@@ -394,178 +503,117 @@ class ModelInterface(pl.LightningModule):
             self.count = self.count+1
             random.seed(self.count*50)
 
-    def test_step(self, batch, batch_idx):
 
-        torch.set_grad_enabled(True)
-        data, label, (wsi_name, batch_names) = batch
-        wsi_name = wsi_name[0]
-        label = label.float()
-        # logits, Y_prob, Y_hat = self.step(data) 
-        # print(data.shape)
-        data = data.squeeze(0).float()
-        logits, attn = self(data)
-        attn = attn.detach()
-        logits = logits.detach()
-
-        Y = torch.argmax(label)
-        Y_hat = torch.argmax(logits, dim=1)
-        Y_prob = F.softmax(logits, dim = 1)
-        
-        #----> Get GradCam maps, map each instance to attention value, assemble, overlay on original WSI 
-        if self.model_name == 'TransMIL':
-           
-            target_layers = [self.model.layer2.norm] # 32x32
-            # target_layers = [self.model_ft[0].features[-1]] # 32x32
-            self.cam = GradCAM(model=self.model, target_layers = target_layers, use_cuda=True, reshape_transform=self.reshape_transform) #, reshape_transform=self.reshape_transform
-            # self.cam_ft = GradCAM(model=self.model, target_layers = target_layers_ft, use_cuda=True) #, reshape_transform=self.reshape_transform
-        else:
-            target_layers = [self.model.attention_weights]
-            self.cam = GradCAM(model = self.model, target_layers = target_layers, use_cuda=True)
-
-
-        data_ft = self.model_ft(data).unsqueeze(0).float()
-        instance_count = data.size(0)
-        target = [ClassifierOutputTarget(Y)]
-        grayscale_cam = self.cam(input_tensor=data_ft, targets=target)
-        grayscale_cam = torch.Tensor(grayscale_cam)[:instance_count, :]
-
-        # attention_map = grayscale_cam[:, :, 1].squeeze()
-        # attention_map = F.relu(attention_map)
-        # mask = torch.zeros((instance_count, 3, 256, 256)).to(self.device)
-        # for i, v in enumerate(attention_map):
-        #     mask[i, :, :, :] = v
-
-        # mask = self.assemble(mask, batch_names)
-        # mask = (mask - mask.min())/(mask.max()-mask.min())
-        # mask = mask.cpu().numpy()
-        # wsi = self.assemble(data, batch_names)
-        # wsi = wsi.cpu().numpy()
-
-        # def show_cam_on_image(img, mask):
-        #     heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
-        #     heatmap = np.float32(heatmap) / 255
-        #     cam = heatmap*0.4 + np.float32(img)
-        #     cam = cam / np.max(cam)
-        #     return cam
-
-        # wsi = show_cam_on_image(wsi, mask)
-        # wsi = ((wsi-wsi.min())/(wsi.max()-wsi.min()) * 255.0).astype(np.uint8)
-        
-        # img = Image.fromarray(wsi)
-        # img = img.convert('RGB')
-        
-
-        # output_path = self.save_path / str(Y.item())
-        # output_path.mkdir(parents=True, exist_ok=True)
-        # img.save(f'{output_path}/{wsi_name}.jpg')
 
+    def test_step(self, batch, batch_idx):
 
-        #----> Get Topk Tiles and Topk Patients
-        summed = torch.mean(grayscale_cam, dim=2)
-        topk_tiles, topk_indices = torch.topk(summed.squeeze(0), 5, dim=0)
-        topk_data = data[topk_indices].detach()
+        input, label, (wsi_name, batch_names, patient) = batch
+        label = label.float()
         
-        # target_ft = 
-        # grayscale_cam_ft = self.cam_ft(input_tensor=data, )
-        # for i in range(data.shape[0]):
-            
-            # vis_img = data[i, :, :, :].cpu().numpy()
-            # vis_img = np.transpose(vis_img, (1,2,0))
-            # print(vis_img.shape)
-            # cam_img = grayscale_cam.squeeze(0)
-        # cam_img = self.reshape_transform(grayscale_cam)
-
-        # print(cam_img.shape)
-            
-            # visualization = show_cam_on_image(vis_img, cam_img, use_rgb=True)
-            # visualization = ((visualization/visualization.max())*255.0).astype(np.uint8)
-            # print(visualization)
-        # cv2.imwrite(f'{test_path}/{Y}/{name}/gradcam.jpg', cam_img)
+        logits, Y_prob, Y_hat = self.step(input) 
 
         #---->acc log
-        Y = torch.argmax(label)
+        Y = int(label)
+        # Y = torch.argmax(label)
+
+        # print(Y_hat)
         self.data[Y]["count"] += 1
-        self.data[Y]["correct"] += (Y_hat.item() == Y)
+        self.data[Y]["correct"] += (int(Y_hat) == Y)
+        # self.data[Y]["correct"] += (Y_hat.item() == Y)
 
-        return {'logits' : logits, 'Y_prob' : Y_prob, 'Y_hat' : Y_hat, 'label' : Y, 'name': wsi_name, 'topk_data': topk_data} #
-        # return {'logits' : logits, 'Y_prob' : Y_prob, 'Y_hat' : Y_hat, 'label' : label, 'name': name} #, 'topk_data': topk_data
+        return {'logits' : logits, 'Y_prob' : Y_prob, 'Y_hat' : Y_hat, 'label' : label, 'name': wsi_name, 'patient': patient}
 
     def test_epoch_end(self, output_results):
         logits = torch.cat([x['logits'] for x in output_results], dim = 0)
         probs = torch.cat([x['Y_prob'] for x in output_results])
         max_probs = torch.stack([x['Y_hat'] for x in output_results])
-        # target = torch.stack([x['label'] for x in output_results], dim = 0)
-        target = torch.stack([x['label'] for x in output_results])
-        # target = torch.argmax(target, dim=1)
-        patients = [x['name'] for x in output_results]
-        topk_tiles = [x['topk_data'] for x in output_results]
-        #---->
-        auc = self.AUROC(probs, target)
-        fpr, tpr, thresholds = self.ROC(probs, target)
-        fpr = fpr.cpu().numpy()
-        tpr = tpr.cpu().numpy()
+        target = torch.stack([x['label'] for x in output_results]).int()
+        slide_names = [x['name'] for x in output_results]
+        patients = [x['patient'] for x in output_results]
+        
+        self.log_dict(self.test_metrics(max_probs.squeeze(), target.squeeze()),
+                          on_epoch = True, logger = True, sync_dist=True)
+        self.log('test_loss', cross_entropy_torch(logits.squeeze(), target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
 
-        plt.figure(1)
-        plt.plot(fpr, tpr)
-        plt.xlabel('False positive rate')
-        plt.ylabel('True positive rate')
-        plt.title('ROC curve')
-        plt.savefig(f'{self.save_path}/roc.jpg')
-        # self.loggers[0].experiment.add_figure(f'{stage}/Confusion matrix', fig_, self.current_epoch)
+        if len(target.unique()) != 1:
+            self.log('test_auc', self.AUROC(probs, target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+            # self.log('val_patient_auc', self.AUROC(patient_score, patient_target), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        else:    
+            self.log('test_auc', 0.0, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
 
-        metrics = self.test_metrics(logits , target)
 
 
-        # metrics = self.test_metrics(max_probs.squeeze() , torch.argmax(target.squeeze(), dim=1))
-        metrics['test_auc'] = auc
+        #----> log confusion matrix
+        self.log_confusion_matrix(max_probs, target, stage='test')
 
-        # self.log('auc', auc, prog_bar=True, on_epoch=True, logger=True)
+        #----> log per patient metrics
+        complete_patient_dict = {}
+        patient_list = []            
+        patient_score = []      
+        patient_target = []
+        patient_class_score = 0
+
+        for p, s, pr, t in zip(patients, slide_names, probs, target):
+            if p not in complete_patient_dict.keys():
+                complete_patient_dict[p] = [(s, pr)]
+                patient_target.append(t)
+            else:
+                complete_patient_dict[p].append((s, pr))
 
-        #---->get highest scoring patients for each class
-        # test_path = Path(self.save_path) / 'most_predictive' 
-        
-        # Path.mkdir(output_path, exist_ok=True)
-        topk, topk_indices = torch.topk(probs.squeeze(0), 5, dim=0)
-        for n in range(self.n_classes):
-            print('class: ', n)
-            
-            topk_patients = [patients[i[n]] for i in topk_indices]
-            topk_patient_tiles = [topk_tiles[i[n]] for i in topk_indices]
-            for x, p, t in zip(topk, topk_patients, topk_patient_tiles):
-                print(p, x[n])
-                patient = p
-                # outpath = test_path / str(n) / patient 
-                outpath = Path(self.save_path) / str(n) / patient
-                outpath.mkdir(parents=True, exist_ok=True)
-                for i in range(len(t)):
-                    tile = t[i]
-                    tile = tile.cpu().numpy().transpose(1,2,0)
-                    tile = (tile - tile.min())/ (tile.max() - tile.min()) * 255
-                    tile = tile.astype(np.uint8)
-                    img = Image.fromarray(tile)
+       
+
+        for p in complete_patient_dict.keys():
+            score = []
+            for (slide, probs) in complete_patient_dict[p]:
+                # if self.n_classes == 2:
+                #     if probs.argmax().item() == 1: # only if binary and if class 1 is more important!!! Normal vs Diseased or Rejection vs Other
+                #         score.append(probs)
                     
-                    img.save(f'{outpath}/{i}.jpg')
+                # else: 
+                score.append(probs)
+            # print(score)
+            score = torch.stack(score)
+            # print(score)
+            if self.n_classes == 2:
+                positive_positions = (score.argmax(dim=1) == 1).nonzero().squeeze()
+                if positive_positions.numel() != 0:
+                    score = score[positive_positions]
+            else:
+            # score = torch.stack(torch.score)
+            ## get scores that predict class 1:
+            # positive_scores = score.argmax(dim=1)
+            # score = torch.sum(score.argmax(dim=1))
+
+            # if score.item() == 1:
+            #     patient_class_score = 1
+                score = torch.mean(score) #.cpu().detach().numpy()
+            # complete_patient_dict[p]['score'] = score
+            # print(p, score)
+            # patient_list.append(p)    
+            patient_score.append(score)    
+
+        print(patient_score)
+
+        patient_score = torch.stack(patient_score)
+        # patient_target = torch.stack(patient_target)
+        patient_target = torch.cat(patient_target)
 
-            
-            
-        #----->visualize top predictive tiles
         
+        if len(patient_target.unique()) != 1:
+            self.log('test_patient_auc', self.AUROC(patient_score.squeeze(), patient_target.squeeze()), prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
+        else:    
+            self.log('test_patient_auc', 0.0, prog_bar=True, on_epoch=True, logger=True, sync_dist=True)
         
-
+        self.log_dict(self.test_patient_metrics(patient_score, patient_target),
+                          on_epoch = True, logger = True, sync_dist=True)
         
-                # img = img.squeeze(0).cpu().numpy()
-                # img = np.transpose(img, (1,2,0))
-                # # print(img)
-                # # print(grayscale_cam.shape)
-                # visualization = show_cam_on_image(img, grayscale_cam, use_rgb=True)
-
+            
 
-        for keys, values in metrics.items():
-            print(f'{keys} = {values}')
-            metrics[keys] = values.cpu().numpy()
-        #---->acc log
+        # precision, recall, thresholds = self.PRC(probs, target)
 
+        
 
+        #---->acc log
         for c in range(self.n_classes):
             count = self.data[c]["count"]
             correct = self.data[c]["correct"]
@@ -573,37 +621,25 @@ class ModelInterface(pl.LightningModule):
                 acc = None
             else:
                 acc = float(correct) / count
-            print('class {}: acc {}, correct {}/{}'.format(c, acc, correct, count))
+            print('test class {}: acc {}, correct {}/{}'.format(c, acc, correct, count))
         self.data = [{"count": 0, "correct": 0} for i in range(self.n_classes)]
-
-        #---->plot auroc curve
-        # stats = stat_scores(probs, target, reduce='macro', num_classes=self.n_classes)
-        # fpr = {}
-        # tpr = {}
-        # for n in self.n_classes: 
-
-        # fpr, tpr, thresh = roc_curve(target.cpu().numpy(), probs.cpu().numpy())
-        #[tp, fp, tn, fn, tp+fn]
-
-
-        self.log_confusion_matrix(max_probs, target, stage='test')
-        #---->
-        result = pd.DataFrame([metrics])
-        result.to_csv(Path(self.save_path) / f'test_result.csv', mode='a', header=not Path(self.save_path).exists())
-
-        # with open(f'{self.save_path}/test_metrics.txt', 'a') as f:
-
-        #     f.write([metrics])
+        
+        #---->random, if shuffle data, change seed
+        if self.shuffle == True:
+            self.count = self.count+1
+            random.seed(self.count*50)
 
     def configure_optimizers(self):
         # optimizer_ft = optim.Adam(self.model_ft.parameters(), lr=self.optimizer.lr*0.1)
         optimizer = create_optimizer(self.optimizer, self.model)
-        # optimizer = PESG(self.model, a=self.loss.a, b=self.loss.b, loss_fn=self.loss, lr=self.optimizer.lr, margin=1.0, epoch_decay=2e-3, weight_decay=1e-5, device=self.device)
+        # optimizer = PESG(self.model, loss_fn=self.aucm_loss, lr=self.optimizer.lr, margin=1.0, epoch_decay=2e-3, weight_decay=1e-5, device=self.device)
         # optimizer = PDSCA(self.model, loss_fn=self.loss, lr=self.optimizer.lr, margin=1.0, epoch_decay=2e-3, weight_decay=1e-5, device=self.device)
-        return optimizer     
+        scheduler = {'scheduler': ReduceLROnPlateau(optimizer, mode='min', factor=0.5), 'monitor': 'val_loss', 'frequency': 5}
+        
+        return [optimizer], [scheduler]     
 
-    def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
-        optimizer.zero_grad(set_to_none=True)
+    # def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
+    #     optimizer.zero_grad(set_to_none=True)
 
     def reshape_transform(self, tensor):
         # print(tensor.shape)
@@ -618,10 +654,12 @@ class ModelInterface(pl.LightningModule):
 
     def load_model(self):
         name = self.hparams.model.name
-        backbone = self.hparams.model.backbone
         # Change the `trans_unet.py` file name to `TransUnet` class name.
         # Please always name your model file name as `trans_unet.py` and
         # class name or funciton name corresponding `TransUnet`.
+        if name == 'ViT':
+            self.model = ViT
+
         if '_' in name:
             camel_name = ''.join([i.capitalize() for i in name.split('_')])
         else:
@@ -686,7 +724,7 @@ class ModelInterface(pl.LightningModule):
         if stage == 'train':
             self.loggers[0].experiment.add_figure(f'{stage}/Confusion matrix', fig_, self.current_epoch)
         else:
-            fig_.savefig(f'{self.loggers[0].log_dir}/cm_test.png', dpi=400)
+            fig_.savefig(f'{self.loggers[0].log_dir}/cm_{stage}.png', dpi=400)
 
         fig_.clf()
 
diff --git a/code/monai_test.json b/code/monai_test.json
new file mode 100644
index 0000000..093623d
--- /dev/null
+++ b/code/monai_test.json
@@ -0,0 +1 @@
+{"training": [{"image": "Aachen_KiBiDatabase_KiBiAcRCIQ360_01_018_PAS.svs", "label": 0}, {"image": "Aachen_KiBiDatabase_KiBiAcRCIQ360_01_018_PAS.svs", "label": 0}, {"image": "Aachen_KiBiDatabase_KiBiAcRCIQ360_01_018_PAS.svs", "label": 0}], "validation": [{"image": "Aachen_KiBiDatabase_KiBiAcRCIQ360_01_018_PAS.svs", "label": 0}]}
\ No newline at end of file
diff --git a/code/test_visualize.py b/code/test_visualize.py
index 5e4a5e7..79f814c 100644
--- a/code/test_visualize.py
+++ b/code/test_visualize.py
@@ -38,7 +38,7 @@ def make_parse():
     parser.add_argument('--config', default='../DeepGraft/TransMIL.yaml',type=str)
     parser.add_argument('--version', default=0,type=int)
     parser.add_argument('--epoch', default='0',type=str)
-    parser.add_argument('--gpus', default = 2, type=int)
+    parser.add_argument('--gpus', default = 0, type=int)
     parser.add_argument('--loss', default = 'CrossEntropyLoss', type=str)
     parser.add_argument('--fold', default = 0)
     parser.add_argument('--bag_size', default = 10000, type=int)
@@ -54,6 +54,7 @@ class custom_test_module(ModelInterface):
     def test_step(self, batch, batch_idx):
 
         torch.set_grad_enabled(True)
+
         input_data, label, (wsi_name, batch_names, patient) = batch
         patient = patient[0]
         wsi_name = wsi_name[0]
@@ -61,14 +62,18 @@ class custom_test_module(ModelInterface):
         # logits, Y_prob, Y_hat = self.step(data) 
         # print(data.shape)
         input_data = input_data.squeeze(0).float()
-        logits, attn = self(input_data)
-        attn = attn.detach()
-        logits = logits.detach()
+        # print(self.model_ft)
+        # print(self.model)
+        logits, _ = self(input_data)
+        # attn = attn.detach()
+        # logits = logits.detach()
 
         Y = torch.argmax(label)
         Y_hat = torch.argmax(logits, dim=1)
         Y_prob = F.softmax(logits, dim=1)
 
+        
+
         # print('Y_hat:', Y_hat)
         # print('Y_prob:', Y_prob)
 
@@ -87,9 +92,16 @@ class custom_test_module(ModelInterface):
             target_layers = [self.model.attention_weights]
             self.cam = GradCAM(model = self.model, target_layers = target_layers, use_cuda=True)
 
-        data_ft = self.model_ft(input_data).unsqueeze(0).float()
+        if self.model_ft:
+            data_ft = self.model_ft(input_data).unsqueeze(0).float()
+        else:
+            data_ft = input_data.unsqueeze(0).float()
         instance_count = input_data.size(0)
+        # data_ft.requires_grad=True
+        
         target = [ClassifierOutputTarget(Y)]
+        # print(target)
+        
         grayscale_cam = self.cam(input_tensor=data_ft, targets=target, eigen_smooth=True)
         grayscale_cam = torch.Tensor(grayscale_cam)[:instance_count, :] #.to(self.device)
 
@@ -100,6 +112,7 @@ class custom_test_module(ModelInterface):
         summed = torch.mean(grayscale_cam, dim=2)
         topk_tiles, topk_indices = torch.topk(summed.squeeze(0), k, dim=0)
         topk_data = input_data[topk_indices].detach()
+        # print(topk_tiles)
         
         #----------------------------------------------------
         # Log Correct/Count
@@ -115,7 +128,7 @@ class custom_test_module(ModelInterface):
         # print(input_data.shape)
         # print(len(batch_names))
         # if visualize:
-        #     self.save_attention_map(wsi_name, input_data, batch_names, grayscale_cam, target=Y)
+        # self.save_attention_map(wsi_name, batch_names, grayscale_cam, target=Y)
         # print('test_step_patient: ', patient)
 
         return {'logits' : logits, 'Y_prob' : Y_prob, 'Y_hat' : Y_hat, 'label' : Y, 'name': wsi_name, 'patient': patient, 'topk_data': topk_data} #
@@ -128,7 +141,6 @@ class custom_test_module(ModelInterface):
 
         pp = pprint.PrettyPrinter(indent=4)
 
-
         logits = torch.cat([x['logits'] for x in output_results], dim = 0)
         probs = torch.cat([x['Y_prob'] for x in output_results])
         max_probs = torch.stack([x['Y_hat'] for x in output_results])
@@ -158,7 +170,6 @@ class custom_test_module(ModelInterface):
         '''
         Patient
         -> slides:
-            
             -> SlideName:
                 ->probs = [0.5, 0.5] 
                 ->topk = [10,3,224,224]
@@ -180,11 +191,11 @@ class custom_test_module(ModelInterface):
                 score.append(complete_patient_dict[p]['slides'][s]['probs'])
             score = torch.mean(torch.stack(score), dim=0) #.cpu().detach().numpy()
             complete_patient_dict[p]['score'] = score
-            print(p, score)
+            # print(p, score)
             patient_list.append(p)    
             patient_score.append(score)    
 
-        print(patient_list)
+        # print(patient_list)
         #topk patients: 
 
 
@@ -212,37 +223,34 @@ class custom_test_module(ModelInterface):
             output_dict[class_name] = {}
             # class_name = str(n)
             print('class: ', class_name)
-            print(score)
+            # print(score)
             _, topk_indices = torch.topk(score, k_patient, dim=0) # change to 3
-            print(topk_indices)
+            # print(topk_indices)
 
             topk_patients = [patient_list[i] for i in topk_indices]
 
             patient_top_slides = {} 
             for p in topk_patients:
-                print(p)
+                # print(p)
                 output_dict[class_name][p] = {}
                 output_dict[class_name][p]['Patient_Score'] = complete_patient_dict[p]['score'].cpu().detach().numpy().tolist()
 
                 slides = list(complete_patient_dict[p]['slides'].keys())
                 slide_scores = [complete_patient_dict[p]['slides'][s]['probs'] for s in slides]
                 slide_scores = torch.stack(slide_scores)
-                print(slide_scores)
+                # print(slide_scores)
                 _, topk_slide_indices = torch.topk(slide_scores, k_slide, dim=0)
                 # topk_slide_indices = topk_slide_indices.squeeze(0)
-                print(topk_slide_indices[0])
+                # print(topk_slide_indices[0])
                 topk_patient_slides = [slides[i] for i in topk_slide_indices[0]]
                 patient_top_slides[p] = topk_patient_slides
 
                 output_dict[class_name][p]['Top_Slides'] = [{slides[i]: {'Slide_Score': slide_scores[i].cpu().detach().numpy().tolist()}} for i in topk_slide_indices[0]]
-            
-
-            
 
             for p in topk_patients: 
 
                 score = complete_patient_dict[p]['score']
-                print(p, score)
+                # print(p, score)
                 print('Topk Slides:')
                 for slide in patient_top_slides[p]:
                     print(slide)
@@ -250,21 +258,18 @@ class custom_test_module(ModelInterface):
                     outpath.mkdir(parents=True, exist_ok=True)
                 
                     topk_tiles = complete_patient_dict[p]['slides'][slide]['topk']
-                    for i in range(topk_tiles.shape[0]):
-                        tile = topk_tiles[i]
-                        tile = tile.cpu().numpy().transpose(1,2,0)
-                        tile = (tile - tile.min())/ (tile.max() - tile.min()) * 255
-                        tile = tile.astype(np.uint8)
-                        img = Image.fromarray(tile)
+                    # for i in range(topk_tiles.shape[0]):
+                    #     tile = topk_tiles[i]
+                    #     tile = tile.cpu().numpy().transpose(1,2,0)
+                    #     tile = (tile - tile.min())/ (tile.max() - tile.min()) * 255
+                    #     tile = tile.astype(np.uint8)
+                    #     img = Image.fromarray(tile)
                     
-                    img.save(f'{outpath}/{i}.jpg')
+                    #     img.save(f'{outpath}/{i}.jpg')
         output_dict['Test_Metrics'] = np_metrics
         pp.pprint(output_dict)
         json.dump(output_dict, open(f'{self.save_path}/test_metrics.json', 'w'))
 
-
-        
-
         for keys, values in metrics.items():
             print(f'{keys} = {values}')
             metrics[keys] = values.cpu().numpy()
@@ -286,20 +291,35 @@ class custom_test_module(ModelInterface):
         result = pd.DataFrame([metrics])
         result.to_csv(Path(self.save_path) / f'test_result.csv', mode='a', header=not Path(self.save_path).exists())
 
-    def save_attention_map(self, wsi_name, data, batch_names, grayscale_cam, target):
+    def save_attention_map(self, wsi_name, batch_names, grayscale_cam, target):
 
-        def get_coords(batch_names): #ToDO: Change function for precise coords
-            coords = []
+        # def get_coords(batch_names): #ToDO: Change function for precise coords
+        #     coords = []
             
-            for tile_name in batch_names: 
-                pos = re.findall(r'\((.*?)\)', tile_name[0])
-                x, y = pos[-1].split('_')
-                coords.append((int(x),int(y)))
-            return coords
-        
-        coords = get_coords(batch_names)
+        #     for tile_name in batch_names: 
+        #         pos = re.findall(r'\((.*?)\)', tile_name[0])
+        #         x, y = pos[-1].split('_')
+        #         coords.append((int(x),int(y)))
+        #     return coords
+
+        home = Path.cwd().parts[1]
+        jpg_dir = f'/{home}/ylan/data/DeepGraft/224_128um_annotated/Aachen_Biopsy_Slides/BLOCKS'
+
+        coords = batch_names.squeeze()
+        data = []
+        for co in coords:
+
+            tile_path =  Path(jpg_dir) / wsi_name / f'{wsi_name}_({co[0]}_{co[1]}).jpg'
+            img = np.asarray(Image.open(tile_path)).astype(np.uint8)
+            img = torch.from_numpy(img)
+            # print(img.shape)
+            data.append(img)
         # coords_set = set(coords)
-
+        # data = data.unsqueeze(0)
+        # print(data.shape)
+        data = torch.stack(data)
+        # print(data.max())
+        # print(data.min())
         # print(coords)
         # temp_data = data.cpu()
         # print(data.shape)
@@ -307,7 +327,7 @@ class custom_test_module(ModelInterface):
         # wsi = (wsi-wsi.min())/(wsi.max()-wsi.min())
         # wsi = wsi
         # print(coords)
-        print('wsi.shape: ', wsi.shape)
+        # print('wsi.shape: ', wsi.shape)
         #--> Get interpolated mask from GradCam
         W, H = wsi.shape[0], wsi.shape[1]
         
@@ -318,8 +338,8 @@ class custom_test_module(ModelInterface):
         input_h = 224
         
         mask = torch.ones(( int(W/input_h), int(H/input_h))).to(self.device)
-        print('mask.shape: ', mask.shape)
-        print('attention_map.shape: ', attention_map.shape)
+        # print('mask.shape: ', mask.shape)
+        # print('attention_map.shape: ', attention_map.shape)
         for i, (x,y) in enumerate(coords):
             mask[y][x] = attention_map[i]
         mask = mask.unsqueeze(0).unsqueeze(0)
@@ -343,12 +363,12 @@ class custom_test_module(ModelInterface):
         
         size = (20000, 20000)
 
-        img = Image.fromarray(wsi_cam)
-        img = img.convert('RGB')
-        img.thumbnail(size, Image.ANTIALIAS)
-        output_path = self.save_path / str(target.item())
-        output_path.mkdir(parents=True, exist_ok=True)
-        img.save(f'{output_path}/{wsi_name}_gradcam.jpg')
+        # img = Image.fromarray(wsi_cam)
+        # img = img.convert('RGB')
+        # img.thumbnail(size, Image.ANTIALIAS)
+        # output_path = self.save_path / str(target.item())
+        # output_path.mkdir(parents=True, exist_ok=True)
+        # img.save(f'{output_path}/{wsi_name}_gradcam.jpg')
 
         wsi = ((wsi-wsi.min())/(wsi.max()-wsi.min()) * 255.0).astype(np.uint8)
         img = Image.fromarray(wsi)
@@ -365,11 +385,11 @@ class custom_test_module(ModelInterface):
 
     def assemble(self, tiles, coords): # with coordinates (x-y)
         
-        def getPosition(img_name):
-            pos = re.findall(r'\((.*?)\)', img_name) #get strings in brackets (0-0)
-            a = int(pos[0].split('-')[0])
-            b = int(pos[0].split('-')[1])
-            return a, b
+        # def getPosition(img_name):
+        #     pos = re.findall(r'\((.*?)\)', img_name) #get strings in brackets (0-0)
+        #     a = int(pos[0].split('-')[0])
+        #     b = int(pos[0].split('-')[1])
+        #     return a, b
 
         position_dict = {}
         assembled = []
@@ -384,19 +404,23 @@ class custom_test_module(ModelInterface):
 
         for i, (x,y) in enumerate(coords):
             if x not in position_dict.keys():
-                position_dict[x] = [(y, i)]
-            else: position_dict[x].append((y, i))
+                position_dict[x.item()] = [(y.item(), i)]
+            else: position_dict[x.item()].append((y.item(), i))
         # x_positions = sorted(position_dict.keys())
 
         test_img_compl = torch.ones([(y_max+1)*224, (x_max+1)*224, 3]).to(self.device)
+
         for i in range(x_max+1):
             if i in position_dict.keys():
                 for j in position_dict[i]:
                     sample_idx = j[1]
-                    if tiles[sample_idx, :, :, :].shape != [3,224,224]:
-                        img = tiles[sample_idx, :, :, :].permute(1,2,0)
-                    else: 
-                        img = tiles[sample_idx, :, :, :]
+                    # if tiles[sample_idx, :, :, :].shape != [3,224,224]:
+                    #     img = tiles[sample_idx, :, :, :].permute(2,0,1)
+                    # else: 
+                    img = tiles[sample_idx, :, :, :]
+                    # print(img.shape)
+                    # print(img.max())
+                    # print(img.min())
                     y_coord = int(j[0])
                     x_coord = int(i)
                     test_img_compl[y_coord*224:(y_coord+1)*224, x_coord*224:(x_coord+1)*224, :] = img
@@ -453,6 +477,9 @@ def main(cfg):
     # cfg.Data.label_file = '/home/ylan/DeepGraft/training_tables/dg_limit_20_split_PAS_HE_Jones_norm_rest.json'
     # cfg.Data.patient_slide = '/homeStor1/ylan/DeepGraft/training_tables/cohort_stain_dict.json'
     # cfg.Data.data_dir = '/homeStor1/ylan/data/DeepGraft/224_128um_v2/'
+    if cfg.Model.backbone == 'features':
+        use_features = True
+    else: use_features = False
     DataInterface_dict = {
                 'data_root': cfg.Data.data_dir,
                 'label_path': cfg.Data.label_file,
@@ -461,6 +488,7 @@ def main(cfg):
                 'n_classes': cfg.Model.n_classes,
                 'backbone': cfg.Model.backbone,
                 'bag_size': cfg.Data.bag_size,
+                'use_features': use_features,
                 }
 
     dm = MILDataModule(**DataInterface_dict)
@@ -489,7 +517,8 @@ def main(cfg):
         # callbacks=cfg.callbacks,
         max_epochs= cfg.General.epochs,
         min_epochs = 200,
-        gpus=cfg.General.gpus,
+        accelerator='gpu',
+        devices=cfg.General.gpus,
         # gpus = [0,2],
         # strategy='ddp',
         amp_backend='native',
@@ -508,7 +537,7 @@ def main(cfg):
     # log_path = Path('lightning_logs/2/checkpoints')
     model_paths = list(log_path.glob('*.ckpt'))
 
-
+    # print(model_paths)
     if cfg.epoch == 'last':
         model_paths = [str(model_path) for model_path in model_paths if f'last' in str(model_path)]
     else:
@@ -583,5 +612,121 @@ if __name__ == '__main__':
     
 
     #---->main
-    main(cfg)
+    # main(cfg)
+    from models import TransMIL
+    from datasets.zarr_feature_dataloader_simple import ZarrFeatureBagLoader
+    from datasets.feature_dataloader import FeatureBagLoader
+    from torch.utils.data import random_split, DataLoader
+    import time
+    from tqdm import tqdm
+    import torchmetrics
+
+    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
+    print(device)
+    scaler = torch.cuda.amp.GradScaler()
+    
+    log_path = Path(cfg.log_path) / 'checkpoints'
+    model_paths = list(log_path.glob('*.ckpt'))
+
+    # print(model_paths)
+    if cfg.epoch == 'last':
+        model_paths = [str(model_path) for model_path in model_paths if f'last' in str(model_path)]
+    else:
+        model_paths = [str(model_path) for model_path in model_paths if f'epoch={cfg.epoch}' in str(model_path)]
+
+    # checkpoint = torch.load(f'{cfg.log_path}/checkpoints/epoch=04-val_loss=0.4243-val_auc=0.8243-val_patient_auc=0.8282244801521301.ckpt')
+    # checkpoint = torch.load(f'{cfg.log_path}/checkpoints/epoch=73-val_loss=0.8574-val_auc=0.9682-val_patient_auc=0.9724310636520386.ckpt')
+    checkpoint = torch.load(model_paths[0])
+
+    hyper_parameters = checkpoint['hyper_parameters']
+    n_classes = hyper_parameters['model']['n_classes']
+
+    # model = TransMIL()
+    model = TransMIL(n_classes).to(device)
+    model_weights = checkpoint['state_dict']
+
+    for key in list(model_weights):
+        model_weights[key.replace('model.', '')] = model_weights.pop(key)
+    
+    model.load_state_dict(model_weights)
+
+    count = 0
+    # for m in model.modules():
+    #     if isinstance(m, torch.nn.BatchNorm2d):
+    #         # # m.track_running_stats = False
+    #         # count += 1 #skip the first BatchNorm layer in my ResNet50 based encoder
+    #         # if count >= 2:
+    #             # m.eval()
+    #         print(m)
+    #         m.track_running_stats = False
+    #         m.running_mean = None
+    #         m.running_var = None
+    
+    for param in model.parameters():
+        param.requires_grad = False
+    model.eval()
+
+    home = Path.cwd().parts[1]
+    data_root = f'/{home}/ylan/data/DeepGraft/224_128uM_annotated'
+    label_path = f'/{home}/ylan/DeepGraft/training_tables/dg_split_PAS_HE_Jones_norm_rest.json'
+    dataset = FeatureBagLoader(data_root, label_path=label_path, mode='test', cache=False, n_classes=n_classes)
+
+    dl = DataLoader(dataset, batch_size=1, num_workers=8)
+
+    
+
+    AUROC = torchmetrics.AUROC(num_classes = n_classes)
+
+    start = time.time()
+    test_logits = []
+    test_probs = []
+    test_labels = []
+    data = [{"count": 0, "correct": 0} for i in range(n_classes)]
+
+    for item in tqdm(dl): 
+
+        bag, label, (name, batch_coords, patient) = item
+        # label = label.float()
+        Y = int(label)
+
+        bag = bag.float().to(device)
+        # print(bag.shape)
+        bag = bag.unsqueeze(0)
+        with torch.cuda.amp.autocast():
+            logits = model(bag)
+        Y_hat = torch.argmax(logits, dim=1)
+        Y_prob = F.softmax(logits, dim = 1)
+
+        # print(Y_prob)
+
+        test_logits.append(logits)
+        test_probs.append(Y_prob)
+
+        test_labels.append(label)
+        data[Y]['count'] += 1
+        data[Y]['correct'] += (int(Y_hat) == Y)
+    probs = torch.cat(test_probs).detach().cpu()
+    targets = torch.stack(test_labels).squeeze().detach().cpu()
+    print(probs.shape)
+    print(targets.shape)
+
+    
+    for c in range(n_classes):
+        count = data[c]['count']
+        correct = data[c]['correct']
+        if count == 0:
+            acc = None
+        else: 
+            acc = float(correct) / count
+        print('class {}: acc {}, correct {}/{}'.format(c, acc, correct, count))
+
+
+
+    auroc = AUROC(probs, targets)
+    print(auroc)
+    end = time.time()
+    print('Bag Time: ', end-start)
+
+
+
  
\ No newline at end of file
diff --git a/code/train.py b/code/train.py
index e01bc52..53ab165 100644
--- a/code/train.py
+++ b/code/train.py
@@ -5,7 +5,8 @@ import glob
 
 from sklearn.model_selection import KFold
 
-from datasets.data_interface import DataInterface, MILDataModule, CrossVal_MILDataModule
+from datasets.data_interface import MILDataModule, CrossVal_MILDataModule
+# from datasets.data_interface import DataInterface, MILDataModule, CrossVal_MILDataModule
 from models.model_interface import ModelInterface
 from models.model_interface_dtfd import ModelInterface_DTFD
 import models.vision_transformer as vits
@@ -63,7 +64,9 @@ def make_parse():
     parser.add_argument('--stage', default='train', type=str)
     parser.add_argument('--config', default='DeepGraft/TransMIL.yaml',type=str)
     parser.add_argument('--version', default=2,type=int)
-    parser.add_argument('--gpus', nargs='+', default = [2], type=int)
+    parser.add_argument('--epoch', default='0',type=str)
+
+    parser.add_argument('--gpus', nargs='+', default = [0], type=int)
     parser.add_argument('--loss', default = 'CrossEntropyLoss', type=str)
     parser.add_argument('--fold', default = 0)
     parser.add_argument('--bag_size', default = 1024, type=int)
@@ -78,7 +81,7 @@ def make_parse():
 #---->main
 def main(cfg):
 
-    torch.set_num_threads(16)
+    torch.set_num_threads(8)
 
     #---->Initialize seed
     pl.seed_everything(cfg.General.seed)
@@ -111,6 +114,8 @@ def main(cfg):
                 'n_classes': cfg.Model.n_classes,
                 'bag_size': cfg.Data.bag_size,
                 'use_features': use_features,
+                'mixup': cfg.Data.mixup,
+                'aug': cfg.Data.aug,
                 }
 
     if cfg.Data.cross_val:
@@ -142,7 +147,7 @@ def main(cfg):
             logger=cfg.load_loggers,
             callbacks=cfg.callbacks,
             max_epochs= cfg.General.epochs,
-            min_epochs = 100,
+            min_epochs = 500,
             accelerator='gpu',
             # plugins=plugins,
             devices=cfg.General.gpus,
@@ -156,7 +161,7 @@ def main(cfg):
             # limit_train_batches=1,
             
             # deterministic=True,
-            check_val_every_n_epoch=5,
+            check_val_every_n_epoch=1,
         )
     else:
         trainer = Trainer(
@@ -167,7 +172,7 @@ def main(cfg):
             min_epochs = 100,
 
             # gpus=cfg.General.gpus,
-            accelerator='gpu'
+            accelerator='gpu',
             devices=cfg.General.gpus,
             amp_backend='native',
             # amp_level=cfg.General.amp_level,  
@@ -178,7 +183,7 @@ def main(cfg):
             # limit_train_batches=1,
             
             # deterministic=True,
-            check_val_every_n_epoch=5,
+            check_val_every_n_epoch=1,
         )
     # print(cfg.log_path)
     # print(trainer.loggers[0].log_dir)
@@ -215,18 +220,29 @@ def main(cfg):
         else:
             trainer.fit(model = model, datamodule = dm)
     else:
-        log_path = Path(cfg.log_path) / 'lightning_logs' / f'version_{cfg.version}' 
+        log_path = Path(cfg.log_path) / 'lightning_logs' / f'version_{cfg.version}'/'checkpoints' 
 
+        print(log_path)
         test_path = Path(log_path) / 'test'
-        for n in range(cfg.Model.n_classes):
-            n_output_path = test_path / str(n)
-            n_output_path.mkdir(parents=True, exist_ok=True)
+        # for n in range(cfg.Model.n_classes):
+        #     n_output_path = test_path / str(n)
+        #     n_output_path.mkdir(parents=True, exist_ok=True)
         # print(cfg.log_path)
         model_paths = list(log_path.glob('*.ckpt'))
-        model_paths = [str(model_path) for model_path in model_paths if 'epoch' in str(model_path)]
+        # print(model_paths)
+        # print(cfg.epoch)
+        # model_paths = [str(model_path) for model_path in model_paths if 'epoch' in str(model_path)]
+        if cfg.epoch == 'last':
+            model_paths = [str(model_path) for model_path in model_paths if f'last' in str(model_path)]
+        elif int(cfg.epoch) < 10:
+            cfg.epoch = f'0{cfg.epoch}'
+        
+        else:
+            model_paths = [str(model_path) for model_path in model_paths if f'epoch={cfg.epoch}' in str(model_path)]
         # model_paths = [f'{log_path}/epoch=279-val_loss=0.4009.ckpt']
+
         for path in model_paths:
-            print(path)
+            # print(path)
             new_model = model.load_from_checkpoint(checkpoint_path=path, cfg=cfg)
             trainer.test(model=new_model, datamodule=dm)
 
@@ -257,6 +273,7 @@ def check_home(cfg):
 if __name__ == '__main__':
 
     args = make_parse()
+
     cfg = read_yaml(args.config)
 
     #---->update
@@ -283,7 +300,10 @@ if __name__ == '__main__':
     cfg.task = task
     # task = Path(cfg.config).name[:-5].split('_')[2:][0]
     cfg.log_path = log_path / f'{cfg.Model.name}' / task / log_name 
-    
+
+
+
+    cfg.epoch = args.epoch
     
 
     # ---->main
diff --git a/code/utils/__pycache__/utils.cpython-39.pyc b/code/utils/__pycache__/utils.cpython-39.pyc
index 26de104113838d7e9f56ecbe311a3c8813d08d77..df4436cad807f2a148eddd27875e344eb4dcbe14 100644
GIT binary patch
delta 1491
zcmX@FFi(juk(ZZ?fq{YH>74dtC&`I?GK{Y#YM-rVWJqC15y(-=RgF^3Rf|%~RgY57
z)riu_)r`^v%Q5F@MQNolq%h}bM`@=pqzLBdfYdQEq^PF|r3kk$M(L(7q=@8*=jui2
zGcu%zriisLL>Z(oq=@Gz<r+pA<{CvA<r+sBgH4mjG08QJG6l0GbIfw<&7;g28B(ND
zq+1xGEWp;VMzMlv%Umla21bTlo+w^Mh7_3;*%pQ<>lB6*IVfaKk)Oks!VFTI!ki+P
z0^x(?6;c#an9=x3P(DZ{lDrDoK#+h^iZa+hl{sutHYutpYAp;=wkgcP44N8~`IsJy
zG2RmPNlh%u%gjp;N-RnB&Ckm$$uFAxfk}-~bh0?}G;L`H28JS01_lOA=3A_31&JjY
zw^-6Lb5d_HSLWudWGGT%U|{%Vt)G#fn>zU}v!kFS0|P@b8z@Q``4|Nli&Q4-vpnV2
z1eu}^A~Yt;v$`>APEKRh)dPt}aU~a*#pmRwr>7Qy4TBKM3=9mncyjU+Q=oFiijxnr
zYBCy5e$86J$HBnBz{9}6;0&_t-{eBJJjP3tAF#P_)G#ezDd9*Fnass5t1X%$*2`GS
z1`?}bTgb@BP{N%eo+8o9Sj!F;V+V=VFa<McN=~+8Pi2&vyo7xrqxIyI94e{`dM0VL
zy47WgIq?OFC7G#tCGm-+$+a(;K<;AB&CJu}D6*Mc%ONgn4-(-lPAp4}&q*vUxy1!$
zmgE=2XBSCL-pC=&Xf^pHhlVnyVYm3vjGz3GLnPxCOL0kRfhOZE0Z^c1=B3AH=9Q!t
zl_lnA^4wz0%}&WIDl!Av$?l$-ms*sVQ)CC?u_YE1q~@g*8G{1Z6f9DlT2z)=RAdC=
zFc+5;6`6qKgh6r1QDiXLk5k&^7I%JWNkM4|$dehjgi{lXaw_ACL5@#O%*jbgOwKNH
zVPIg0VlFOEyv3DM6rT%8AdpA`#eI?T<b|BVY+j(4D44v9Ghz~#I2#uu7gJH<WJxYr
z#<0oeT>5e)j2%qPjLnQL49$!S8EctJm=>_qFfC-{X6RtbVx8Q^RVD^84CGcWo1Dzz
zlH~l{0=xdnLfl0*ASq3zTdXDdMadbrm~-;ei&8<pV7kRw5nr62R+5`oaf`7M;wVtc
zDavGEV3+{%ZPw%i+~S_uAY<7|QuB)Qi*7L&m*(DLEzU?RNY!L1$^fa#1No7)0+iHl
zvE<|z7lVz`WQNG*!ju##PgdZO5bgu{6_f-S7^N7Mn8X;1N+)~s$TIp)&g7X4b}U$X
z5y&OC#FO*$%2JC;;&T#{Qge#q)AEbrOA?E-=YtFZr6vZZqBf9c82vQuCR^~zF=kH=
z=Z#=Yp1gr~6XUJP$$W*A<@jZJK+$%Kt)#LbGcSGe13nS^A_=gkiZe<}GINS#K|Wwh
zOi9U1E-6w0Ian1$XoCnt5CKXBki-E>1-FbQ%kxW5PUDwgG@V?}Zx9MfQALR$6NSN{
zte2OkmsXmWT#}iemzZ;lDKD?66l6>@hyb}4;zdx*Kv-bUa@geNr<CTT+A)HB=QWu{
sKv9Q>QHGI&g@ciYk&BIoi9=O{k%y6oiNlG5QHYU)Q-Bc;f3a}_08~VVV*mgE

delta 1448
zcmbQIbY6ikk(ZZ?fq{WR=B{%xtJp+78OD1Pwa?b)szj;esz#~iszs^gsz<3aGNdr1
zFz0AQX@F_XD9sdx6oDKqkUB<&6txt=6rmQzDD4!66yY54T%9OgMurrT6wwxjD7_Sh
z6tNurT!ScsT*D~CT%#x>uxa8s#<?a@CSbNij%lu0lo=yKie!pZ3qw@BIoKN3C{{3S
zk!#7sz{rrx6UEEOkRqKT)4~vCmBNrB3x&)na&y>Hm_ce&m{Vj^AbgO#e2PK}Ga6qJ
z$_J^$F0Uj3Hc)vETa<N*N{VU=LzGPlb1;LZ`eZ4l$CE{vRT)JlTQN^FkYZq9C=y{{
zV9;c~#hO--SdwvzB`q^2^%iqwZq7=EB1HxUhF=c)8Tq-X!6o@chLe9YI|_<3FfbIe
zfdZY8k5PcJNO`g^%TsO*kWp$NLVdC=s~e-n<Q7(4J&<S=S8{P#d`^CPdTJ5aL<ph8
zz`$^eCnrBK1u9o8KlwhZCZoY*PPPg@b_NCp9tH*mXOLwlCik=DF>aXrkIhA@gbNfe
zy^OVNHB1XwYS<PsGBVUK1v6-hO`gP_$|yPc2m3-s&B^+lD%uKqCTX=VnLze1=Vs<<
zauivE*z75pMPRRTfV`TQn44N;H~BY*w4@|RR<Jm+EH%D7H8VY<q&Pl5FQ>9fW3oP{
zMtyp<$U66jRW0_KjJMc}%M;^sii$P4Z?WcPr(_ltnS%7PyQk)*7A58s*@Ad%i3J6z
zc_~Fkpg=JJixj68m8BLH8G<;>#U({W#-OlgFVY7IaDr_Hxh#s?w=y{~CnqT}IlDND
zJHND~pp?BN9xOFko=a2@6e>~7#pQ{Rpaexz5jZ^gz~Kqk>j4VAsL6R;5vrVAj9g4b
zQ49<W$)FGcMIQ*WFfcHH;*uGpGGa0#x4uFLQx;1KizGucqYFbb<3h$-rV^$FtTjvv
z8Mzrcn6g+V`*D|vA*|%G$;m7(NzTtLuxpxpi@Qh!B&Er8i?t-bC^_R6b54GGQQYJN
z9+7&G1lXBHNem1O&7fdO1`&*)Kw!JYT9lZVo_dQpCpE7q1r#7`C8>GE`9-%_i!%}n
zQg1OAm*#4+6eWVRq%$xuM6p(YQqC=woc!Wqa6o7>LuAunO2D?=5>774FD{O^OU)}O
z$}gyl2Rp10<Y5j_5HLzHsxXQ%7UeN8Fua7ih0)*^qv0(^qg#x|w-`-sF`5?nOg7}5
z%nkMem{6L0o>xY6BFGeQcrq2$gS^4$r)fKhPmVEVvOHe|W6b0VzD-O~GL!xJB__M@
zM|iW9R2F3Br56c<TnNf|MWP^<7>EGnt6OZv8KoteIYlxc53(huq+}+S6e)wGRX~Im
zh%f*Jj`ZX}0ck;SQi7x~P*S>OG&xW}X0nNZIHSpA9|40<P_hI^`YmCwul4fs^wLW6
zl1no4^AdAzG3Dify<JoTG8g0la5y3n;8ew7lbfGXnv-e=3Y}t)$rl9_wPYAMm^m1E
g7`fPZm^f5L7<m|Zm^jQi7=;))I0P8s@E02=07-mfGynhq

diff --git a/code/utils/utils.py b/code/utils/utils.py
index 5596208..9a756d5 100755
--- a/code/utils/utils.py
+++ b/code/utils/utils.py
@@ -14,9 +14,11 @@ from pytorch_lightning import LightningModule
 from pytorch_lightning.loops.base import Loop
 from pytorch_lightning.loops.fit_loop import FitLoop
 from pytorch_lightning.trainer.states import TrainerFn
+from pytorch_lightning.callbacks import LearningRateMonitor
 from typing import Any, Dict, List, Optional, Type
 import shutil
 
+
 #---->read yaml
 import yaml
 from addict import Dict
@@ -103,7 +105,7 @@ def load_callbacks(cfg, save_path):
         # save_path = Path(cfg.log_path) / 'lightning_logs' / f'version_{cfg.resume_version}' / last.ckpt
         Mycallbacks.append(ModelCheckpoint(monitor = 'val_loss',
                                          dirpath = str(output_path),
-                                         filename = '{epoch:02d}-{val_loss:.4f}-{val_auc: .4f}',
+                                         filename = '{epoch:02d}-{val_loss:.4f}-{val_auc: .4f}-{val_patient_auc}',
                                          verbose = True,
                                          save_last = True,
                                          save_top_k = 2,
@@ -111,7 +113,15 @@ def load_callbacks(cfg, save_path):
                                          save_weights_only = True))
         Mycallbacks.append(ModelCheckpoint(monitor = 'val_auc',
                                          dirpath = str(output_path),
-                                         filename = '{epoch:02d}-{val_loss:.4f}-{val_auc:.4f}',
+                                         filename = '{epoch:02d}-{val_loss:.4f}-{val_auc:.4f}-{val_patient_auc}',
+                                         verbose = True,
+                                         save_last = True,
+                                         save_top_k = 2,
+                                         mode = 'max',
+                                         save_weights_only = True))
+        Mycallbacks.append(ModelCheckpoint(monitor = 'val_patient_auc',
+                                         dirpath = str(output_path),
+                                         filename = '{epoch:02d}-{val_loss:.4f}-{val_auc:.4f}-{val_patient_auc}',
                                          verbose = True,
                                          save_last = True,
                                          save_top_k = 2,
@@ -121,6 +131,9 @@ def load_callbacks(cfg, save_path):
     swa = StochasticWeightAveraging(swa_lrs=1e-2)
     Mycallbacks.append(swa)
 
+    lr_monitor = LearningRateMonitor(logging_interval='step')
+    Mycallbacks.append(lr_monitor)
+
     return Mycallbacks
 
 #---->val loss
@@ -128,7 +141,8 @@ import torch
 import torch.nn.functional as F
 def cross_entropy_torch(x, y):
     x_softmax = [F.softmax(x[i], dim=0) for i in range(len(x))]
-    x_log = torch.tensor([torch.log(x_softmax[i][y[i]]) for i in range(y.shape[0])])
+    x_log = torch.tensor([torch.log(x_softmax[i][y[i]]) for i in range(len(y))])
+    # x_log = torch.tensor([torch.log(x_softmax[i][y[i]]) for i in range(y.shape[0])])
     loss = - torch.sum(x_log) / y.shape[0]
     return loss
 
diff --git a/monai_test.json b/monai_test.json
new file mode 100644
index 0000000..093623d
--- /dev/null
+++ b/monai_test.json
@@ -0,0 +1 @@
+{"training": [{"image": "Aachen_KiBiDatabase_KiBiAcRCIQ360_01_018_PAS.svs", "label": 0}, {"image": "Aachen_KiBiDatabase_KiBiAcRCIQ360_01_018_PAS.svs", "label": 0}, {"image": "Aachen_KiBiDatabase_KiBiAcRCIQ360_01_018_PAS.svs", "label": 0}], "validation": [{"image": "Aachen_KiBiDatabase_KiBiAcRCIQ360_01_018_PAS.svs", "label": 0}]}
\ No newline at end of file
diff --git a/paper_structure.md b/paper_structure.md
new file mode 100644
index 0000000..c674719
--- /dev/null
+++ b/paper_structure.md
@@ -0,0 +1,35 @@
+# Paper Outline
+
+## Abstract
+
+## Introduction
+
+## Methods
+
+    - Fig 1: Model/Workflow
+
+## Dataset
+
+    - Fig: cohorts, data selection
+    - Fig: Preprocessing
+
+## Results
+
+    - Fig: Metrics on Testset for each task:
+
+| Model        | Accuracy | Precision | Recall | AUROC |
+| ------------ | -------- | --------- | ------ | ----- |
+| Resnet18     |          |           |        |       |
+| ViT          |          |           |        |       |
+| CLAM         |          |           |        |       |
+| AttentionMIL |          |           |        |       |
+| TransMIL     |          |           |        |       |
+|              |          |           |        |       |
+
+    - Fig: AUROC Curves (Best Model, Rest in Appendix)
+
+    - Fig: Attention Maps (Best Model, Rest in Appendix)
+
+## Discussion
+
+## Appendix
diff --git a/project_plan.md b/project_plan.md
new file mode 100644
index 0000000..b0e8379
--- /dev/null
+++ b/project_plan.md
@@ -0,0 +1,102 @@
+#   Benchmarking weakly supervised deep learning models for transplant pathology classification
+
+With this project, we aim to esatablish a benchmark for weakly supervised deep learning models for transplant pathology classification, especially for multiple instance learning approaches. 
+
+
+## Cohorts:
+
+#### Original Lancet Set:
+
+    * Training:
+        * AMS: 1130 Biopsies (3390 WSI)
+        * Utrecht: 717 Biopsies (2151WSI)
+    * Testing:
+        * Aachen: 101 Biopsies (303 WSI)
+
+
+#### Extended:
+
+* Training:
+  * AMS + Utrecht + Leuven
+* Testing:
+  * Aachen_extended:
+
+## Models:
+
+    For our Benchmark, we chose the following models: 
+
+    - AttentionMIL
+    - Resnet18/50
+    - ViT
+    - CLAM
+    - TransMIL
+    - Monai MIL (optional)
+
+    Resnet18 and Resnet50 are basic CNNs that can be applied for a variety of tasks. Although domain or task specific architectures mostly outperform them, they remain a good baseline for comparison. 
+
+    The vision transformer is the first transformer based model that was adapted to computer vision tasks. Benchmarking on ViT can provide more insight on the performance of generic transformer based models on multiple instance learning. 
+
+    The AttentionMIL was the first simple, yet relatively successful deep MIL model and should be used as a baseline for benchmarking MIL methods. 
+
+    CLAM is a recent model proposed by Mahmood lab which was explicitely trained for histopathological whole slide images and should be used as a baseline for benchmarking MIL methods in histopathology. 
+
+    TransMIL is another model proposed by Shao et al, which achieved SOTA on histopathological WSI classification tasks using MIL. It was benchmarked on TCGA and compared to CLAM and AttMIL. It utilizes the self-attention module from transformer models.
+
+    Monai MIL (not official name) is a MIL architecture proposed by Myronenk et al (Nvidia). It applies the self-attention mechanism as well. It is included because it shows promising results and it's included in MONAI. 
+
+## Tasks:
+
+    The Original tasks mimic the ones published in the original DeepGraft Lancet paper. 
+    Before we go for more challenging tasks (future tasks), we want to establish that our models outperform the simpler approach from the previous paper and that going for MIL in this setting is indeed profitable. 
+
+    All available classes: 
+        * Normal
+        * TCMR
+        * ABMR
+        * Mixed
+        * Viral
+        * Other
+
+#### Original:
+
+    The explicit classes are simplified/grouped together such as this: 
+    Diseased = all classes other than Normal 
+    Rejection = TCMR, ABMR, Mixed 
+
+    - (1) Normal vs Diseased (all other classes)
+    - (2) Rejection vs (Viral + Others)
+    - (3) Normal vs Rejection vs (Viral + Others)
+
+#### Future:
+
+    After validating Original tasks, the next step is to challenge the models by attempting more complicated tasks. 
+    These experiments may vary depending on the results from previous experiments
+
+    - (4) Normal vs TCMR vs Mixed vs ABMR vs Viral vs Others
+    - (5) TCMR vs Mixed vs ABMR
+
+## Plan:
+
+    1. Train models for current tasks on AMS+Utrecht -> Validate on Aachen
+
+    2. Visualization, AUC Curves
+
+    3. Train best model on extended training set (AMS+Utrecht+Leuven) (Tasks 1,2,3) -> Validate on Aachen_extended
+        - Investigate if a larger training cohort increases performance
+    4. Train best model on extended dataset on future tasks (Task 4, 5)
+
+
+    Notes: 
+        * Resnet18, ViT and CLAM are all trained on HIA (Training Framework from Kather / Narmin)
+    
+
+## Status: 
+
+        - Resnet18: Trained on all tasks via HIA  
+        - Vit: Trained on all tasks via HIA 
+        - CLAM: Trained on (1) via HIA 
+        - TransMIL: Trained, but overfitting
+            - Check if the problems are not on model side by evaluating on RCC data. 
+            - (mixing in 10 slides from Aachen increases auc performance from 0.7 to 0.89)
+        - AttentionMIL: WIP
+        - Monai MIL: WIP
-- 
GitLab