From 51899280acc6201cb4d5a95acda27439c4ed556d Mon Sep 17 00:00:00 2001 From: NBouteldja <40466985+NBouteldja@users.noreply.github.com> Date: Tue, 22 Jun 2021 21:07:37 +0200 Subject: [PATCH] Add files via upload --- data/__init__.py | 90 ++++ data/base_dataset.py | 161 +++++++ data/image_folder.py | 84 ++++ data/preprocessing.py | 39 ++ data/unaligned_dataset.py | 81 ++++ evaluation.py | 85 ++++ image_pool.py | 56 +++ models/U_GAT_IT_2_model.py | 568 ++++++++++++++++++++++++ models/__init__.py | 67 +++ models/base_model.py | 257 +++++++++++ models/cycle_gan_model.py | 318 ++++++++++++++ models/model.py | 217 ++++++++++ models/networks.py | 868 +++++++++++++++++++++++++++++++++++++ models/nutils.py | 612 ++++++++++++++++++++++++++ models/radam.py | 212 +++++++++ models/utils.py | 310 +++++++++++++ options.py | 214 +++++++++ postprocessing.py | 108 +++++ test.py | 149 +++++++ train.py | 82 ++++ train_UGATIT.py | 95 ++++ utils.py | 793 +++++++++++++++++++++++++++++++++ 22 files changed, 5466 insertions(+) create mode 100644 data/__init__.py create mode 100644 data/base_dataset.py create mode 100644 data/image_folder.py create mode 100644 data/preprocessing.py create mode 100644 data/unaligned_dataset.py create mode 100644 evaluation.py create mode 100644 image_pool.py create mode 100644 models/U_GAT_IT_2_model.py create mode 100644 models/__init__.py create mode 100644 models/base_model.py create mode 100644 models/cycle_gan_model.py create mode 100644 models/model.py create mode 100644 models/networks.py create mode 100644 models/nutils.py create mode 100644 models/radam.py create mode 100644 models/utils.py create mode 100644 options.py create mode 100644 postprocessing.py create mode 100644 test.py create mode 100644 train.py create mode 100644 train_UGATIT.py create mode 100644 utils.py diff --git a/data/__init__.py b/data/__init__.py new file mode 100644 index 0000000..b2e5b4a --- /dev/null +++ b/data/__init__.py @@ -0,0 +1,90 @@ +# This code was copied from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix and adapted for our purpose accordingly. +"""This package includes all the modules related to data loading and preprocessing + + To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset. + You need to implement four functions: + -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). + -- <__len__>: return the size of dataset. + -- <__getitem__>: get a data point from data loader. + -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options. + +Now you can use the dataset class by specifying flag '--dataset_mode dummy'. +See our template dataset class 'template_dataset.py' for more details. +""" +import importlib +import torch.utils.data +from data.base_dataset import BaseDataset + + +def find_dataset_using_name(dataset_name): + """Import the module "data/[dataset_name]_dataset.py". + + In the file, the class called DatasetNameDataset() will + be instantiated. It has to be a subclass of BaseDataset, + and it is case-insensitive. + """ + dataset_filename = "data." + dataset_name + "_dataset" + datasetlib = importlib.import_module(dataset_filename) + + dataset = None + target_dataset_name = dataset_name.replace('_', '') + 'dataset' + for name, cls in datasetlib.__dict__.items(): + if name.lower() == target_dataset_name.lower() \ + and issubclass(cls, BaseDataset): + dataset = cls + + if dataset is None: + raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name)) + + return dataset + + +def get_option_setter(dataset_name): + """Return the static method <modify_commandline_options> of the dataset class.""" + dataset_class = find_dataset_using_name(dataset_name) + return dataset_class.modify_commandline_options + + +def create_dataset(opt): + """Create a dataset given the option. + + This function wraps the class CustomDatasetDataLoader. + This is the main interface between this package and 'train.py'/'test.py' + """ + data_loader = CustomDatasetDataLoader(opt) + dataset = data_loader.load_data() + return dataset + + +class CustomDatasetDataLoader: + """Wrapper class of Dataset class that performs multi-threaded data loading""" + + def __init__(self, opt): + """Initialize this class + + Step 1: create a dataset instance given the name [dataset_mode] + Step 2: create a multi-threaded data loader. + """ + self.opt = opt + dataset_class = find_dataset_using_name(opt.dataset_mode) + self.dataset = dataset_class(opt) + opt.logger.info("dataset [%s] was created" % type(self.dataset).__name__) + self.dataloader = torch.utils.data.DataLoader( + self.dataset, + batch_size=opt.batch_size, + shuffle=not opt.serial_batches, + num_workers=int(opt.num_threads)) + + def load_data(self): + return self + + def __len__(self): + """Return the number of data in the dataset""" + return min(len(self.dataset), self.opt.max_dataset_size) + + def __iter__(self): + """Return a batch of data""" + for i, data in enumerate(self.dataloader): + if i * self.opt.batch_size >= self.opt.max_dataset_size: + break + yield data diff --git a/data/base_dataset.py b/data/base_dataset.py new file mode 100644 index 0000000..3226c7e --- /dev/null +++ b/data/base_dataset.py @@ -0,0 +1,161 @@ +# This code was copied from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix and adapted for our purpose accordingly. +"""This module implements an abstract base class (ABC) 'BaseDataset' for datasets. + +It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses. +""" +import random +import numpy as np +import torch.utils.data as data +from PIL import Image +import torchvision.transforms as transforms +from abc import ABC, abstractmethod + +import sys +# sys.path.insert(1, './UGATIT/Nassim_Code') +# sys.path.insert(1, '../UGATIT/Nassim_Code') + +from .preprocessing import RandomRotation, RandomGamma + +class BaseDataset(data.Dataset, ABC): + """This class is an abstract base class (ABC) for datasets. + + To create a subclass, you need to implement the following four functions: + -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). + -- <__len__>: return the size of dataset. + -- <__getitem__>: get a data point. + -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options. + """ + + def __init__(self, opt): + """Initialize the class; save the options in the class + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + self.opt = opt + self.root = opt.dataroot + + @staticmethod + def modify_commandline_options(parser, is_train): + """Add new dataset-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + return parser + + @abstractmethod + def __len__(self): + """Return the total number of images in the dataset.""" + return 0 + + @abstractmethod + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index - - a random integer for data indexing + + Returns: + a dictionary of data with their names. It ususally contains the data itself and its metadata information. + """ + pass + + +def get_params(opt, size): + w, h = size + new_h = h + new_w = w + if opt.preprocess == 'resize_and_crop': + new_h = new_w = opt.load_size + elif opt.preprocess == 'scale_width_and_crop': + new_w = opt.load_size + new_h = opt.load_size * h // w + + x = random.randint(0, np.maximum(0, new_w - opt.crop_size)) + y = random.randint(0, np.maximum(0, new_h - opt.crop_size)) + + flip = random.random() > 0.5 + + return {'crop_pos': (x, y), 'flip': flip} + + +def get_transform(opt, grayscale=False, method=Image.BILINEAR): + transform_list = [] + if grayscale: + transform_list.append(transforms.Grayscale(1)) + #if 'resize' in opt.preprocess: + # osize = [opt.load_size, opt.load_size] + # transform_list.append(transforms.Resize(osize, method)) + if 'scale_width' in opt.preprocess: + transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, method))) + + if 'crop' in opt.preprocess: + transform_list.append(transforms.RandomCrop(opt.crop_size)) + + if opt.preprocess == 'none': + transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method))) + + if not opt.no_flip: + transform_list.append(transforms.RandomHorizontalFlip()) + + transform_list.append(RandomRotation([0,90,180,270])) + transform_list.append(RandomGamma(.2)) + + transform_list += [transforms.ToTensor()] # (Divides image values by 255.) + if grayscale: + transform_list += [transforms.Normalize((0.5,), (0.5,))] + else: + transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + + + return transforms.Compose(transform_list) + + +def __make_power_2(img, base, method=Image.BILINEAR): + ow, oh = img.size + h = int(round(oh / base) * base) + w = int(round(ow / base) * base) + if (h == oh) and (w == ow): + return img + + __print_size_warning(ow, oh, w, h) + return img.resize((w, h), method) + + +def __scale_width(img, target_width, method=Image.BICUBIC): + ow, oh = img.size + if (ow == target_width): + return img + w = target_width + h = int(target_width * oh / ow) + return img.resize((w, h), method) + + +def __crop(img, pos, size): + ow, oh = img.size + x1, y1 = pos + tw = th = size + if (ow > tw or oh > th): + return img.crop((x1, y1, x1 + tw, y1 + th)) + return img + + +def __flip(img, flip): + if flip: + return img.transpose(Image.FLIP_LEFT_RIGHT) + return img + + +def __print_size_warning(ow, oh, w, h): + """Print warning information about image size(only print once)""" + if not hasattr(__print_size_warning, 'has_printed'): + print("The image size needs to be a multiple of 4. " + "The loaded image size was (%d, %d), so it was adjusted to " + "(%d, %d). This adjustment will be done to all images " + "whose sizes are not multiples of 4" % (ow, oh, w, h)) + __print_size_warning.has_printed = True diff --git a/data/image_folder.py b/data/image_folder.py new file mode 100644 index 0000000..dee105c --- /dev/null +++ b/data/image_folder.py @@ -0,0 +1,84 @@ +# This code was copied from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix and adapted for our purpose accordingly. +"""A modified image folder class + +We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) +so that this class can load images from both current directory and its subdirectories. +""" + +import torch.utils.data as data + +from PIL import Image +import os +import os.path +import glob + +IMG_EXTENSIONS = [ + '.jpg', '.JPG', '.jpeg', '.JPEG', + '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', +] + + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) + + +def make_dataset(dir, preload, loadsize, max_dataset_size=float("inf")): + paths = [] + images = [] + assert os.path.isdir(dir), '%s is not a valid directory' % dir + + for root, _, fnames in sorted(os.walk(dir)): + for fname in fnames: + if is_image_file(fname): + path = os.path.join(root, fname) + paths.append(path) + if preload: + images.append(default_loader(path).resize((loadsize,loadsize), Image.BILINEAR)) + + return paths[:min(max_dataset_size, len(paths))], images[:min(max_dataset_size, len(images))] + + +def make_dataset_NonRecursive(dir, preload, loadsize, max_dataset_size=float("inf")): + paths = glob.glob(dir + '/*.png') + images = [] + assert os.path.isdir(dir), '%s is not a valid directory' % dir + + if preload: + for imgpath in paths: + images.append(default_loader(imgpath).resize((loadsize,loadsize), Image.BILINEAR)) + + return paths[:min(max_dataset_size, len(paths))], images[:min(max_dataset_size, len(images))] + + +def default_loader(path): + return Image.open(path).convert('RGB') + + +class ImageFolder(data.Dataset): + + def __init__(self, root, transform=None, return_paths=False, + loader=default_loader): + imgs = make_dataset(root) + if len(imgs) == 0: + raise(RuntimeError("Found 0 images in: " + root + "\n" + "Supported image extensions are: " + + ",".join(IMG_EXTENSIONS))) + + self.root = root + self.imgs = imgs + self.transform = transform + self.return_paths = return_paths + self.loader = loader + + def __getitem__(self, index): + path = self.imgs[index] + img = self.loader(path) + if self.transform is not None: + img = self.transform(img) + if self.return_paths: + return img, path + else: + return img + + def __len__(self): + return len(self.imgs) diff --git a/data/preprocessing.py b/data/preprocessing.py new file mode 100644 index 0000000..179e55d --- /dev/null +++ b/data/preprocessing.py @@ -0,0 +1,39 @@ +# This code was copied from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix and adapted for our purpose accordingly. +import torch +import numpy as np +import torchvision.transforms.functional as F +import random + +class RangeNormalization(object): + def __call__(self, sample): + img, lbl = sample + return img / 255.0 * 2.0 - 1.0, lbl + + +class ToTensor(object): + def __call__(self, sample): + img, lbl = sample + + #lbl = torch.from_numpy(lbl).long() + img = torch.from_numpy(np.array(img, np.float32).transpose(2, 0, 1)) + + return img, lbl + + +class RandomRotation(object): + def __init__(self,angles): + self. angles = angles + + def __call__(self, img): + angle = random.choice(self.angles) + return F.rotate(img, angle) + +class RandomGamma(object): + def __init__(self,delta): + self.delta = delta + + def __call__(self, img): + p = random.gauss(1,self.delta) + if p <= 0.5: p = 0.5 + elif p>= 1.5: p = 1.5 + return F.adjust_gamma(img, p) diff --git a/data/unaligned_dataset.py b/data/unaligned_dataset.py new file mode 100644 index 0000000..fd5c43e --- /dev/null +++ b/data/unaligned_dataset.py @@ -0,0 +1,81 @@ +# This code was copied from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix and adapted for our purpose accordingly. +import os.path +from data.base_dataset import BaseDataset, get_transform +from data.image_folder import make_dataset +from PIL import Image +import random + + +class UnalignedDataset(BaseDataset): + """ + This dataset class can load unaligned/unpaired datasets. + + It requires two directories to host training images from domain A '/path/to/data/stain/trainA' + and from domain B '/path/to/data/PAS/trainB' (B is PAS) respectively + You can train the model with the dataset flag '--dataroot /path/to/data'. + Similarly, you need to prepare two directories: + '/path/to/data/testA' and '/path/to/data/testB' during test time. + """ + + def __init__(self, opt): + """Initialize this dataset class. + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseDataset.__init__(self, opt) + + self.dir_A = os.path.join(opt.dataroot, opt.stain, opt.phase) # create a path '/path/to/data/stainA/train' + self.A_paths, self.A_imgs = make_dataset(self.dir_A, opt.preload, opt.load_size, opt.max_dataset_size) # load images from '/path/to/data/trainA' + + self.dir_B = os.path.join(opt.dataroot, opt.stainB, opt.phase) # create a path '/path/to/data/stainB/train' + self.B_paths, self.B_imgs = make_dataset(self.dir_B, opt.preload, opt.load_size, opt.max_dataset_size) # load images from '/path/to/data/trainB' + + self.A_size = len(self.A_paths) # get the size of dataset A + self.B_size = len(self.B_paths) # get the size of dataset B + opt.logger.info('Size of '+ opt.phase +' data set '+ opt.stain +' (A): ' + str(self.A_size)) + opt.logger.info('Size of '+ opt.phase +' data set '+ opt.stainB +' (B): ' + str(self.B_size)) + + self.transform_A = get_transform(self.opt, grayscale=(opt.input_nc == 1)) + self.transform_B = get_transform(self.opt, grayscale=(opt.output_nc == 1)) + self.preload = opt.preload + self.resize = (opt.load_size,opt.load_size) + + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index (int) -- a random integer for data indexing + + Returns a dictionary that contains A, B, A_paths and B_paths + A (tensor) -- an image in the input domain + B (tensor) -- (its corresponding) image in the target domain + A_paths (str) -- image paths + B_paths (str) -- image paths + """ + A_path = self.A_paths[index % self.A_size] # make sure index is within the range + if self.opt.serial_batches: # make sure index is within then range + index_B = index % self.B_size + else: # randomize the index for domain B to avoid fixed pairs. + index_B = random.randint(0, self.B_size - 1) + B_path = self.B_paths[index_B] + + if self.preload: + A_img = self.A_imgs[index % self.A_size] + B_img = self.B_imgs[index_B] + else: + A_img = Image.open(A_path).convert('RGB').resize(self.resize, Image.BILINEAR) + B_img = Image.open(B_path).convert('RGB').resize(self.resize, Image.BILINEAR) + # apply image transformation + A = self.transform_A(A_img) + B = self.transform_B(B_img) + + return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path} + + def __len__(self): + """Return the total number of images in the dataset. + + As we have two datasets with potentially different number of images, + we take a maximum of + """ + return max(self.A_size, self.B_size) diff --git a/evaluation.py b/evaluation.py new file mode 100644 index 0000000..e2f5842 --- /dev/null +++ b/evaluation.py @@ -0,0 +1,85 @@ +import skimage as ski +import numpy as np + +class ClassEvaluator(object): + + def __init__(self, thres=None): + self.thres = np.arange(start=0.5, stop=0.95, step=0.05) if thres is None else thres + self.TP = np.zeros(self.thres.__len__()) + self.FN = np.zeros(self.thres.__len__()) + self.FP = np.zeros(self.thres.__len__()) + self.diceScores = [] + + def add_example(self, pred, gt): + gtInstances = np.unique(gt) + gt_num = len(gtInstances[gtInstances != 0]) + IoU_dict = [] # (prediction label)-(IoU) + # match_dict = {} # (prediction label)-(matched gt label) + + pred_area = self.get_area_dict(pred) + gt_area = self.get_area_dict(gt) + unique = np.unique(pred) + + for label in unique: + if label == 0: + continue + u, c = np.unique(gt[pred == label], return_counts=True) + ind = np.argsort(c, kind='mergesort') + if len(u) == 1 and u[0] == 0: # only background contained + IoU_dict.append(0) + # match_dict[label] = None + self.diceScores.append(0) + else: + # take the gt label with the largest overlap + i = ind[-2] if u[ind[-1]] == 0 else ind[-1] + intersect = c[i] + union = pred_area[label] + gt_area[u[i]] - c[i] + IoU_dict.append(intersect / union) + # match_dict[label] = u[i] + diceScore = 2*intersect / (pred_area[label] + gt_area[u[i]]) + self.diceScores.append(diceScore) + + IoU_dict = np.array(IoU_dict) + for i, threshold in enumerate(self.thres): + tp = np.sum(IoU_dict > threshold) + self.FP[i] += len(IoU_dict) - tp + self.FN[i] += gt_num - tp + self.TP[i] += tp + + + uniqueGT = np.unique(gt) + for label in uniqueGT: + if label == 0: + continue + u, c = np.unique(pred[gt == label], return_counts=True) + ind = np.argsort(c, kind='mergesort') + if len(u) == 1 and u[0] == 0: # only background contained + self.diceScores.append(0) + else: + # take the gt label with the largest overlap + i = ind[-2] if u[ind[-1]] == 0 else ind[-1] + intersect = c[i] + diceScore = 2 * intersect / (gt_area[label] + pred_area[u[i]]) + self.diceScores.append(diceScore) + + + def get_area_dict(self, label_map): + props = ski.measure.regionprops(label_map) + return {p.label: p.area for p in props} + + + def score(self): + precisions = self.TP / (self.TP + self.FN + self.FP) + avg_precision = np.mean(precisions) + + avg_dice_score = np.mean(np.array(self.diceScores)) + std_dice_score = np.std(np.array(self.diceScores)) + min_dice_score = np.min(np.array(self.diceScores)) + max_dice_score = np.max(np.array(self.diceScores)) + + return precisions, avg_precision, avg_dice_score, std_dice_score, min_dice_score, max_dice_score + + +if __name__ == "__main__": + print('') + diff --git a/image_pool.py b/image_pool.py new file mode 100644 index 0000000..7cf309e --- /dev/null +++ b/image_pool.py @@ -0,0 +1,56 @@ +# This code was copied from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix and adapted for our purpose accordingly. + +import random +import torch + + +class ImagePool(): + """This class implements an image buffer that stores previously generated images. + + This buffer enables us to update discriminators using a history of generated images + rather than the ones produced by the latest generators. + """ + + def __init__(self, pool_size): + """Initialize the ImagePool class + + Parameters: + pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created + """ + self.pool_size = pool_size + if self.pool_size > 0: # create an empty pool + self.num_imgs = 0 + self.images = [] + + def query(self, images): + """Return an image from the pool. + + Parameters: + images: the latest generated images from the generator + + Returns images from the buffer. + + By 50/100, the buffer will return input images. + By 50/100, the buffer will return images previously stored in the buffer, + and insert the current images to the buffer. + """ + if self.pool_size == 0: # if the buffer size is 0, do nothing + return images + return_images = [] + for image in images: + image = torch.unsqueeze(image.data, 0) + if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer + self.num_imgs = self.num_imgs + 1 + self.images.append(image) + return_images.append(image) + else: + p = random.uniform(0, 1) + if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer + random_id = random.randint(0, self.pool_size - 1) # randint is inclusive + tmp = self.images[random_id].clone() + self.images[random_id] = image + return_images.append(tmp) + else: # by another 50% chance, the buffer will return the current image + return_images.append(image) + return_images = torch.cat(return_images, 0) # collect all the images and return + return return_images diff --git a/models/U_GAT_IT_2_model.py b/models/U_GAT_IT_2_model.py new file mode 100644 index 0000000..d00056b --- /dev/null +++ b/models/U_GAT_IT_2_model.py @@ -0,0 +1,568 @@ +# This code was copied from https://github.com/znxlwm/UGATIT-pytorch and adapted for our purpose accordingly. +# From: https://github.com/znxlwm/UGATIT-pytorch +# Author: Junho Kim +# Paper: U-GAT-IT: Unsupervised Generative Attentional Networks with Adaptive Layer-Instance Normalization for Image-to-Image Translation +# License: MIT License + +import torch +import itertools +from image_pool import ImagePool +from .base_model import BaseModel +from . import networks +from .radam import RAdam +from .model import Custom +from .nutils import WeightedL1Loss + + +class UGATIT2Model(BaseModel): + """ + This class implements the UGATIT model, for learning image-to-image translation without paired data. + U-GAT-IT paper: https://arxiv.org/abs/1907.10830 + """ + @staticmethod + def modify_commandline_options(parser, is_train=True): + parser.set_defaults(no_dropout=True) # default U_GAT_IT did not use dropout + parser.set_defaults(norm='instance') # default U_GAT_IT did use instance norm + if is_train: + parser.add_argument('--weight_decay', type=float, default=0.0001) + + parser.add_argument('--lambda_A', type=float, default=1.0, help='weight for cycle loss (A -> B -> A)') + parser.add_argument('--lambda_B', type=float, default=1.0, help='weight for cycle loss (B -> A -> B)') + parser.add_argument('--lambda_Seg', type=float, default=.5, help='weight for cycle loss (Seg(A), A -> B -> A, Seg(A))') + parser.add_argument('--lambda_id', type=float, default=1.0, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1') + parser.add_argument('--lambda_cam', type=int, default=1000, help='Weight for CAM') + + parser.add_argument('--n_layers_D_G', type=int, default=7, help='depth of global discriminators') + parser.add_argument('--n_layers_D_L', type=int, default=5, help='depth of local discriminators') + + parser.add_argument('--lightFC', action='store_true', help='use light FC version which is input resolution independent') + return parser + + def __init__(self, opt): + """Initialize the U_GAT_IT class. + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseModel.__init__(self, opt) + + self.model_names = ['genA2B', 'genB2A', 'disGA', 'disGB', 'disLA', 'disLB'] + self.model_names_save = ['genA2B', 'genB2A'] + + if opt.netG.startswith('unet'): + norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) + self.genA2B = UnetGenerator(input_nc=opt.input_nc, output_nc=opt.output_nc, ngf=opt.ngf, n_downsampling=int(opt.netG.split('_')[-1]), norm_layer=norm_layer, use_dropout=False).to(self.device) + self.genB2A = UnetGenerator(input_nc=opt.output_nc, output_nc=opt.input_nc, ngf=opt.ngf, n_downsampling=int(opt.netG.split('_')[-1]), norm_layer=norm_layer, use_dropout=False).to(self.device) + else: #ResnetGenerator + self.genA2B = ResnetGenerator(input_nc=opt.input_nc, output_nc=opt.output_nc, ngf=opt.ngf, n_blocks=int(opt.netG.split('_')[-1][:-6]),n_downsampling=int(opt.netG.split('_')[1]), img_size=opt.crop_size, lightFC=opt.lightFC).to(self.device) + self.genB2A = ResnetGenerator(input_nc=opt.output_nc, output_nc=opt.input_nc, ngf=opt.ngf, n_blocks=int(opt.netG.split('_')[-1][:-6]),n_downsampling=int(opt.netG.split('_')[1]), img_size=opt.crop_size, lightFC=opt.lightFC).to(self.device) + + if self.isTrain: + self.disGA = Discriminator(input_nc=3, ndf=opt.ndf, n_layers=7).to(self.device) + self.disGB = Discriminator(input_nc=3, ndf=opt.ndf, n_layers=7).to(self.device) + self.disLA = Discriminator(input_nc=3, ndf=opt.ndf, n_layers=5).to(self.device) + self.disLB = Discriminator(input_nc=3, ndf=opt.ndf, n_layers=5).to(self.device) + + """ Define Loss """ + self.L1_loss = torch.nn.L1Loss().to(self.device) + self.MSE_loss = torch.nn.MSELoss().to(self.device) + self.BCE_loss = torch.nn.BCEWithLogitsLoss().to(self.device) + + """ Trainer """ + self.G_optim = torch.optim.Adam(itertools.chain(self.genA2B.parameters(), self.genB2A.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay) + self.D_optim = torch.optim.Adam(itertools.chain(self.disGA.parameters(), self.disGB.parameters(), self.disLA.parameters(), self.disLB.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay) + + self.optimizers.append(self.D_optim) + self.optimizers.append(self.G_optim) + + """ Define Rho clipper to constraint the value of rho in AdaILN and ILN""" + self.Rho_clipper = RhoClipper(0, 1) + + if self.opt.use_segm_model: + self.segm_model = Custom(input_ch=3, output_ch=8, modelDim=2) + self.segm_model.load_state_dict(torch.load(opt.segm_model_path)) + opt.logger.info('### Segmentation Model Loaded ###') + + self.segm_model.to(self.device) + self.set_requires_grad(self.segm_model, False) + self.segm_model.train(False) + + self.segmentationloss = WeightedL1Loss(weights = torch.FloatTensor([1., 1., 1., 1., 1., 10., 1., 10.]).to(self.device)) + + + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input (dict): include the data itself and its metadata information. + + The option 'direction' can be used to swap domain A and domain B. + """ + AtoB = self.opt.direction == 'AtoB' + self.real_A = input['A' if AtoB else 'B'].to(self.device) + self.real_B = input['B' if AtoB else 'A'].to(self.device) + self.image_paths = input['A_paths' if AtoB else 'B_paths'] + + if self.opt.use_segm_model: + if self.opt.dataset_mode == 'seg_map': + self.segm_B = input['segm_B'].to(self.device) + else: + with torch.no_grad(): + self.segm_B = self.segm_model(self.real_B) + + + def optimize_parameters(self): + identity_weight = self.opt.lambda_id + cycle_weight_A = self.opt.lambda_A + cycle_weight_B = self.opt.lambda_B + cam_weight = self.opt.lambda_cam + seg_weight = self.opt.lambda_Seg + + self.D_optim.zero_grad() + + fake_A2B, _, _ = self.genA2B(self.real_A) + fake_B2A, _, _ = self.genB2A(self.real_B) + + real_GA_logit, real_GA_cam_logit, _ = self.disGA(self.real_A) + real_LA_logit, real_LA_cam_logit, _ = self.disLA(self.real_A) + real_GB_logit, real_GB_cam_logit, _ = self.disGB(self.real_B) + real_LB_logit, real_LB_cam_logit, _ = self.disLB(self.real_B) + + fake_GA_logit, fake_GA_cam_logit, _ = self.disGA(fake_B2A) + fake_LA_logit, fake_LA_cam_logit, _ = self.disLA(fake_B2A) + fake_GB_logit, fake_GB_cam_logit, _ = self.disGB(fake_A2B) + fake_LB_logit, fake_LB_cam_logit, _ = self.disLB(fake_A2B) + + D_ad_loss_GA = self.MSE_loss(real_GA_logit, torch.ones_like(real_GA_logit).to(self.device)) + self.MSE_loss(fake_GA_logit, torch.zeros_like(fake_GA_logit).to(self.device)) + D_ad_cam_loss_GA = self.MSE_loss(real_GA_cam_logit,torch.ones_like(real_GA_cam_logit).to(self.device)) + self.MSE_loss(fake_GA_cam_logit, torch.zeros_like(fake_GA_cam_logit).to(self.device)) + D_ad_loss_LA = self.MSE_loss(real_LA_logit, torch.ones_like(real_LA_logit).to(self.device)) + self.MSE_loss(fake_LA_logit, torch.zeros_like(fake_LA_logit).to(self.device)) + D_ad_cam_loss_LA = self.MSE_loss(real_LA_cam_logit,torch.ones_like(real_LA_cam_logit).to(self.device)) + self.MSE_loss(fake_LA_cam_logit, torch.zeros_like(fake_LA_cam_logit).to(self.device)) + D_ad_loss_GB = self.MSE_loss(real_GB_logit, torch.ones_like(real_GB_logit).to(self.device)) + self.MSE_loss(fake_GB_logit, torch.zeros_like(fake_GB_logit).to(self.device)) + D_ad_cam_loss_GB = self.MSE_loss(real_GB_cam_logit,torch.ones_like(real_GB_cam_logit).to(self.device)) + self.MSE_loss(fake_GB_cam_logit, torch.zeros_like(fake_GB_cam_logit).to(self.device)) + D_ad_loss_LB = self.MSE_loss(real_LB_logit, torch.ones_like(real_LB_logit).to(self.device)) + self.MSE_loss(fake_LB_logit, torch.zeros_like(fake_LB_logit).to(self.device)) + D_ad_cam_loss_LB = self.MSE_loss(real_LB_cam_logit,torch.ones_like(real_LB_cam_logit).to(self.device)) + self.MSE_loss(fake_LB_cam_logit, torch.zeros_like(fake_LB_cam_logit).to(self.device)) + + self.D_loss_A = D_ad_loss_GA + D_ad_cam_loss_GA + D_ad_loss_LA + D_ad_cam_loss_LA + self.D_loss_B = D_ad_loss_GB + D_ad_cam_loss_GB + D_ad_loss_LB + D_ad_cam_loss_LB + + Discriminator_loss = self.D_loss_A + self.D_loss_B + Discriminator_loss.backward() + self.D_optim.step() + + # Update G + self.G_optim.zero_grad() + + self.fake_A2B, fake_A2B_cam_logit, fake_A2B_heatmap = self.genA2B(self.real_A) + self.fake_B2A, fake_B2A_cam_logit, fake_B2A_heatmap = self.genB2A(self.real_B) + + self.fake_A2B2A, _, fake_A2B2A_heatmap = self.genB2A(self.fake_A2B) + self.fake_B2A2B, _, fake_B2A2B_heatmap = self.genA2B(self.fake_B2A) + + fake_A2A, fake_A2A_cam_logit, _ = self.genB2A(self.real_A) + fake_B2B, fake_B2B_cam_logit, _ = self.genA2B(self.real_B) + + fake_GA_logit, fake_GA_cam_logit, _ = self.disGA(self.fake_B2A) + fake_LA_logit, fake_LA_cam_logit, _ = self.disLA(self.fake_B2A) + fake_GB_logit, fake_GB_cam_logit, _ = self.disGB(self.fake_A2B) + fake_LB_logit, fake_LB_cam_logit, _ = self.disLB(self.fake_A2B) + + G_ad_loss_GA = self.MSE_loss(fake_GA_logit, torch.ones_like(fake_GA_logit).to(self.device)) + G_ad_loss_LA = self.MSE_loss(fake_LA_logit, torch.ones_like(fake_LA_logit).to(self.device)) + G_ad_loss_GB = self.MSE_loss(fake_GB_logit, torch.ones_like(fake_GB_logit).to(self.device)) + G_ad_loss_LB = self.MSE_loss(fake_LB_logit, torch.ones_like(fake_LB_logit).to(self.device)) + G_ad_cam_loss_GA = self.MSE_loss(fake_GA_cam_logit, torch.ones_like(fake_GA_cam_logit).to(self.device)) + G_ad_cam_loss_LA = self.MSE_loss(fake_LA_cam_logit, torch.ones_like(fake_LA_cam_logit).to(self.device)) + G_ad_cam_loss_GB = self.MSE_loss(fake_GB_cam_logit, torch.ones_like(fake_GB_cam_logit).to(self.device)) + G_ad_cam_loss_LB = self.MSE_loss(fake_LB_cam_logit, torch.ones_like(fake_LB_cam_logit).to(self.device)) + + G_recon_loss_A = self.L1_loss(self.fake_A2B2A, self.real_A) + G_recon_loss_B = self.L1_loss(self.fake_B2A2B, self.real_B) + + G_identity_loss_A = self.L1_loss(fake_A2A, self.real_A) + G_identity_loss_B = self.L1_loss(fake_B2B, self.real_B) + + G_cam_loss_A = self.BCE_loss(fake_B2A_cam_logit,torch.ones_like(fake_B2A_cam_logit).to(self.device)) + self.BCE_loss(fake_A2A_cam_logit, torch.zeros_like(fake_A2A_cam_logit).to(self.device)) + G_cam_loss_B = self.BCE_loss(fake_A2B_cam_logit,torch.ones_like(fake_A2B_cam_logit).to(self.device)) + self.BCE_loss(fake_B2B_cam_logit, torch.zeros_like(fake_B2B_cam_logit).to(self.device)) + + self.G_loss_A = G_ad_loss_GA + G_ad_cam_loss_GA + G_ad_loss_LA + G_ad_cam_loss_LA + cycle_weight_A * G_recon_loss_A + identity_weight * G_identity_loss_A + cam_weight * G_cam_loss_A + self.G_loss_B = G_ad_loss_GB + G_ad_cam_loss_GB + G_ad_loss_LB + G_ad_cam_loss_LB + cycle_weight_B * G_recon_loss_B + identity_weight * G_identity_loss_B + cam_weight * G_cam_loss_B + + if self.opt.use_segm_model: + self.rec_B_Segm = self.segm_model(self.fake_B2A2B) #Segm(G_B(G_A(A))) + self.idt_B_Segm = self.segm_model(fake_B2B) + self.G_loss_B += seg_weight * (self.segmentationloss(self.rec_B_Segm, self.segm_B) + self.segmentationloss(self.idt_B_Segm, self.segm_B)) + + Generator_loss = self.G_loss_A + self.G_loss_B # + G_heatmap_loss * 10 + + Generator_loss.backward() + self.G_optim.step() + + # clip parameter of AdaILN and ILN, applied after optimizer step + self.genA2B.apply(self.Rho_clipper) + self.genB2A.apply(self.Rho_clipper) + + def forward(self): + return 0 + + def computeLosses(self): + return 0 + + def perform_test_conversion(self, input): + if self.opt.direction == 'AtoB': + res, _, _ = self.genA2B(input) + return res + else: + res, _, _ = self.genB2A(input) + return res + + + +import torch +import torch.nn as nn +from torch.nn.parameter import Parameter +from torch.utils.checkpoint import checkpoint, checkpoint_sequential +import functools + +class ResnetBlock(nn.Module): + def __init__(self, dim, use_bias): + super(ResnetBlock, self).__init__() + conv_block = [] + conv_block += [nn.ReflectionPad2d(1), + nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias), + nn.InstanceNorm2d(dim), + nn.ReLU(True)] + + conv_block += [nn.ReflectionPad2d(1), + nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias), + nn.InstanceNorm2d(dim)] + + self.conv_block = nn.Sequential(*conv_block) + + def forward(self, x): + out = x + self.conv_block(x) + return out + + +class ResnetAdaILNBlock(nn.Module): + def __init__(self, dim, use_bias): + super(ResnetAdaILNBlock, self).__init__() + self.pad1 = nn.ReflectionPad2d(1) + self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias) + self.norm1 = adaILN(dim) + self.relu1 = nn.ReLU(True) + + self.pad2 = nn.ReflectionPad2d(1) + self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias) + self.norm2 = adaILN(dim) + + def forward(self, x, gamma, beta): + out = self.pad1(x) + out = self.conv1(out) + out = self.norm1(out, gamma, beta) + out = self.relu1(out) + out = self.pad2(out) + out = self.conv2(out) + out = self.norm2(out, gamma, beta) + + return out + x + + +class adaILN(nn.Module): + def __init__(self, num_features, eps=1e-5): + super(adaILN, self).__init__() + self.eps = eps + self.rho = Parameter(torch.Tensor(1, num_features, 1, 1)) + self.rho.data.fill_(0.9) + + def forward(self, input, gamma, beta): + in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True), torch.var(input, dim=[2, 3], keepdim=True) + out_in = (input - in_mean) / torch.sqrt(in_var + self.eps) + ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True), torch.var(input, dim=[1, 2, 3], keepdim=True) + out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps) + out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1 - self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln + out = out * gamma.unsqueeze(2).unsqueeze(3) + beta.unsqueeze(2).unsqueeze(3) + + return out + + +class ILN(nn.Module): + def __init__(self, num_features, eps=1e-5): + super(ILN, self).__init__() + self.eps = eps + self.rho = Parameter(torch.Tensor(1, num_features, 1, 1)) + self.gamma = Parameter(torch.Tensor(1, num_features, 1, 1)) + self.beta = Parameter(torch.Tensor(1, num_features, 1, 1)) + self.rho.data.fill_(0.0) + self.gamma.data.fill_(1.0) + self.beta.data.fill_(0.0) + + def forward(self, input): + in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True), torch.var(input, dim=[2, 3], keepdim=True) + out_in = (input - in_mean) / torch.sqrt(in_var + self.eps) + ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True), torch.var(input, dim=[1, 2, 3], keepdim=True) + out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps) + out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1 - self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln + out = out * self.gamma.expand(input.shape[0], -1, -1, -1) + self.beta.expand(input.shape[0], -1, -1, -1) + + return out + + +class Discriminator(nn.Module): + def __init__(self, input_nc, ndf=64, n_layers=5): + super(Discriminator, self).__init__() + model = [nn.ReflectionPad2d(1), + nn.utils.spectral_norm(nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=0, bias=True)), + nn.LeakyReLU(0.2, True)] + + for i in range(1, n_layers - 2): + mult = 2 ** (i - 1) + model += [nn.ReflectionPad2d(1), + nn.utils.spectral_norm(nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=2, padding=0, bias=True)), + nn.LeakyReLU(0.2, True)] + + mult = 2 ** (n_layers - 2 - 1) + model += [nn.ReflectionPad2d(1), + nn.utils.spectral_norm(nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=1, padding=0, bias=True)), + nn.LeakyReLU(0.2, True)] + + # Class Activation Map + mult = 2 ** (n_layers - 2) + self.gap_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False)) + self.gmp_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False)) + self.conv1x1 = nn.Conv2d(ndf * mult * 2, ndf * mult, kernel_size=1, stride=1, bias=True) + self.leaky_relu = nn.LeakyReLU(0.2, True) + + self.pad = nn.ReflectionPad2d(1) + self.conv = nn.utils.spectral_norm(nn.Conv2d(ndf * mult, 1, kernel_size=4, stride=1, padding=0, bias=False)) + + self.model = nn.Sequential(*model) + + def forward(self, input): + x = self.model(input) + + gap = torch.nn.functional.adaptive_avg_pool2d(x, 1) + gap_logit = self.gap_fc(gap.view(x.shape[0], -1)) + gap_weight = list(self.gap_fc.parameters())[0] + gap = x * gap_weight.unsqueeze(2).unsqueeze(3) + + gmp = torch.nn.functional.adaptive_max_pool2d(x, 1) + gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1)) + gmp_weight = list(self.gmp_fc.parameters())[0] + gmp = x * gmp_weight.unsqueeze(2).unsqueeze(3) + + cam_logit = torch.cat([gap_logit, gmp_logit], 1) + x = torch.cat([gap, gmp], 1) + x = self.leaky_relu(self.conv1x1(x)) + + heatmap = torch.sum(x, dim=1, keepdim=True) + + x = self.pad(x) + out = self.conv(x) + + return out, cam_logit, heatmap + + +class RhoClipper(object): + def __init__(self, min, max): + self.clip_min = min + self.clip_max = max + assert min < max + + def __call__(self, module): + if hasattr(module, 'rho'): + w = module.rho.data + w = w.clamp(self.clip_min, self.clip_max) + module.rho.data = w + + + +class ResnetGenerator(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, n_blocks=6, n_downsampling = 2, img_size=640, lightFC=True): + assert (n_blocks >= 0) + super(ResnetGenerator, self).__init__() + self.input_nc = input_nc + self.output_nc = output_nc + self.ngf = ngf + self.n_blocks = n_blocks + self.img_size = img_size + self.lightFC = lightFC + + DownBlock = [] + DownBlock += [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, stride=1, padding=0, bias=False), + nn.InstanceNorm2d(ngf), + nn.ReLU(True)] + + # Down-Sampling + for i in range(n_downsampling): + mult = 2 ** i + DownBlock += [nn.ReflectionPad2d(1), + nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=0, bias=False), + nn.InstanceNorm2d(ngf * mult * 2), + nn.ReLU(True)] + + # Down-Sampling Bottleneck + mult = 2 ** n_downsampling + for i in range(n_blocks): + DownBlock += [ResnetBlock(ngf * mult, use_bias=False)] + + # Class Activation Map + self.gap_fc = nn.Linear(ngf * mult, 1, bias=False) + self.gmp_fc = nn.Linear(ngf * mult, 1, bias=False) + self.conv1x1 = nn.Conv2d(ngf * mult * 2, ngf * mult, kernel_size=1, stride=1, bias=True) + self.relu = nn.ReLU(True) + + # Gamma, Beta block + if self.lightFC: + FC = [nn.Linear(ngf * mult, ngf * mult, bias=False), nn.ReLU(True), + nn.Linear(ngf * mult, ngf * mult, bias=False), nn.ReLU(True)] + else: + FC = [nn.Linear(img_size // mult * img_size // mult * ngf * mult, ngf * mult, bias=False), nn.ReLU(True), + nn.Linear(ngf * mult, ngf * mult, bias=False), nn.ReLU(True)] + self.gamma = nn.Linear(ngf * mult, ngf * mult, bias=False) + self.beta = nn.Linear(ngf * mult, ngf * mult, bias=False) + + # Up-Sampling Bottleneck + for i in range(n_blocks): + setattr(self, 'UpBlock1_' + str(i + 1), ResnetAdaILNBlock(ngf * mult, use_bias=False)) + + # Up-Sampling + UpBlock2 = [] + for i in range(n_downsampling): + mult = 2 ** (n_downsampling - i) + UpBlock2 += [nn.Upsample(scale_factor=2, mode='nearest'), + nn.ReflectionPad2d(1), + nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0, bias=False), + ILN(int(ngf * mult / 2)), + nn.ReLU(True)] + + UpBlock2 += [nn.ReflectionPad2d(3), + nn.Conv2d(ngf, output_nc, kernel_size=7, stride=1, padding=0, bias=False), + nn.Tanh()] + + self.DownBlock = nn.Sequential(*DownBlock) + self.FC = nn.Sequential(*FC) + self.UpBlock2 = nn.Sequential(*UpBlock2) + + def forward(self, input): + x = self.DownBlock(input) + + gap = torch.nn.functional.adaptive_avg_pool2d(x, 1) + gap_logit = self.gap_fc(gap.view(x.shape[0], -1)) + gap_weight = list(self.gap_fc.parameters())[0] + gap = x * gap_weight.unsqueeze(2).unsqueeze(3) + + gmp = torch.nn.functional.adaptive_max_pool2d(x, 1) + gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1)) + gmp_weight = list(self.gmp_fc.parameters())[0] + gmp = x * gmp_weight.unsqueeze(2).unsqueeze(3) + + cam_logit = torch.cat([gap_logit, gmp_logit], 1) + x = torch.cat([gap, gmp], 1) + x = self.relu(self.conv1x1(x)) + + heatmap = torch.sum(x, dim=1, keepdim=True) + + if self.lightFC: + x_ = torch.nn.functional.adaptive_avg_pool2d(x, 1) + x_ = self.FC(x_.view(x_.shape[0], -1)) + else: + x_ = self.FC(x.view(x.shape[0], -1)) + gamma, beta = self.gamma(x_), self.beta(x_) + + for i in range(self.n_blocks): + x = getattr(self, 'UpBlock1_' + str(i + 1))(x, gamma, beta) + out = self.UpBlock2(x) + + return out, cam_logit, heatmap + + +class UnetGenerator(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=6, norm_layer=None, use_dropout=False): + super(UnetGenerator, self).__init__() + # construct unet structure + self.unet_block_First = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=self.unet_block_First, norm_layer=norm_layer, use_dropout=use_dropout) + for i in range(n_downsampling - 6): # add intermediate layers with ngf * 8 filters + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout) + # gradually reduce the number of filters from ngf * 8 to ngf + unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer + + def forward(self, input): + res = self.model(input) + return res, self.unet_block_First.cam_logit, self.unet_block_First.heatmap + + +class UnetSkipConnectionBlock(nn.Module): + def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): + super(UnetSkipConnectionBlock, self).__init__() + self.outermost = outermost + self.innermost = innermost + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + if input_nc is None: + input_nc = outer_nc + downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) + downrelu = nn.LeakyReLU(0.2, True) + downnorm = norm_layer(inner_nc) + uprelu = nn.ReLU(True) + upnorm = norm_layer(outer_nc) + + if outermost: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1) + #conv = nn.Conv2d(inner_nc,outer_nc,kernel_size=5,stride=1,padding=2 ) + down = [downconv] + up = [uprelu, upconv, nn.Tanh()] + model = down + [submodule] + up #+ [conv] + elif innermost: + upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) + down = [downrelu, downconv] + # Class Activation Map + self.gap_fc = nn.Linear(inner_nc, 1, bias=False) + self.gmp_fc = nn.Linear(inner_nc, 1, bias=False) + self.conv1x1 = nn.Conv2d(inner_nc * 2, inner_nc, kernel_size=1, stride=1, bias=True) + self.relu = nn.ReLU(False) + + self.up = nn.Sequential(*[uprelu, upconv, upnorm]) + model = down + else: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) + down = [downrelu, downconv, downnorm] + up = [uprelu, upconv, upnorm] + + if use_dropout: + model = down + [submodule] + up + [nn.Dropout(0.5)] + else: + model = down + [submodule] + up + + self.model = nn.Sequential(*model) + + def forward(self, x): + if self.innermost: + bottomX = self.model(x) + gap = torch.nn.functional.adaptive_avg_pool2d(bottomX, 1) + gap_logit = self.gap_fc(gap.view(bottomX.shape[0], -1)) + gap_weight = list(self.gap_fc.parameters())[0] + gap = bottomX * gap_weight.unsqueeze(2).unsqueeze(3) + + gmp = torch.nn.functional.adaptive_max_pool2d(bottomX, 1) + gmp_logit = self.gmp_fc(gmp.view(bottomX.shape[0], -1)) + gmp_weight = list(self.gmp_fc.parameters())[0] + gmp = bottomX * gmp_weight.unsqueeze(2).unsqueeze(3) + + self.cam_logit = torch.cat([gap_logit, gmp_logit], 1) + bottomX = torch.cat([gap, gmp], 1) + bottomX = self.relu(self.conv1x1(bottomX)) + + self.heatmap = torch.sum(bottomX, dim=1, keepdim=True) + + return torch.cat([x, self.up(bottomX)], 1) + elif self.outermost: + return self.model(x) + else: # add skip connections + return torch.cat([x, self.model(x)], 1) \ No newline at end of file diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000..57f5d25 --- /dev/null +++ b/models/__init__.py @@ -0,0 +1,67 @@ +# This code was copied from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix and adapted for our purpose accordingly. +"""This package contains modules related to objective functions, optimizations, and network architectures. + +To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel. +You need to implement the following five functions: + -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). + -- <set_input>: unpack data from dataset and apply preprocessing. + -- <forward>: produce intermediate results. + -- <optimize_parameters>: calculate loss, gradients, and update network weights. + -- <modify_commandline_options>: (optionally) add model-specific options and set default options. + +In the function <__init__>, you need to define four lists: + -- self.loss_names (str list): specify the training losses that you want to plot and save. + -- self.model_names (str list): define networks used in our training. + -- self.visual_names (str list): specify the images that you want to display and save. + -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage. + +Now you can use the model class by specifying flag '--model dummy'. +See our template model class 'template_model.py' for more details. +""" + +import importlib +from models.base_model import BaseModel + + +def find_model_using_name(model_name): + """Import the module "models/[model_name]_model.py". + + In the file, the class called DatasetNameModel() will + be instantiated. It has to be a subclass of BaseModel, + and it is case-insensitive. + """ + model_filename = "models." + model_name + "_model" + modellib = importlib.import_module(model_filename) + model = None + target_model_name = model_name.replace('_', '') + 'model' + for name, cls in modellib.__dict__.items(): + if name.lower() == target_model_name.lower() and issubclass(cls, BaseModel): + model = cls + + if model is None: + print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) + exit(0) + + return model + + +def get_option_setter(model_name): + """Return the static method <modify_commandline_options> of the model class.""" + model_class = find_model_using_name(model_name) + return model_class.modify_commandline_options + + +def create_model(opt): + """Create a model given the option. + + This function warps the class CustomDatasetDataLoader. + This is the main interface between this package and 'train.py'/'test.py' + + Example: + >>> from models import create_model + >>> model = create_model(opt) + """ + model = find_model_using_name(opt.model) + instance = model(opt) + opt.logger.info("model [%s] was created" % type(instance).__name__) + return instance diff --git a/models/base_model.py b/models/base_model.py new file mode 100644 index 0000000..97aa5bc --- /dev/null +++ b/models/base_model.py @@ -0,0 +1,257 @@ +# This code was copied from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix and adapted for our purpose accordingly. +import os +import torch +from collections import OrderedDict +from abc import ABC, abstractmethod +from . import networks + + +class BaseModel(ABC): + """This class is an abstract base class (ABC) for models. + To create a subclass, you need to implement the following five functions: + -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). + -- <set_input>: unpack data from dataset and apply preprocessing. + -- <forward>: produce intermediate results. + -- <optimize_parameters>: calculate losses, gradients, and update network weights. + -- <modify_commandline_options>: (optionally) add model-specific options and set default options. + """ + + def __init__(self, opt): + """Initialize the BaseModel class. + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + + When creating your custom class, you need to implement your own initialization. + In this fucntion, you should first call <BaseModel.__init__(self, opt)> + Then, you need to define four lists: + -- self.loss_names (str list): specify the training losses that you want to plot and save. + -- self.model_names (str list): specify the images that you want to display and save. + -- self.visual_names (str list): define networks used in our training. + -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. + """ + self.opt = opt + self.gpu_ids = opt.gpu_ids + self.use_MC = opt.use_MC + self.isTrain = opt.phase == 'train' + self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU + self.save_dir = opt.checkpoints_dir # save all the checkpoints to save_dir + if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark. + torch.backends.cudnn.benchmark = True + self.loss_names = [] + self.model_names = [] + self.model_names_save = [] + self.visual_names = [] + self.optimizers = [] + self.image_paths = [] + self.metric = 0 # used for learning rate policy 'plateau' + + @staticmethod + def modify_commandline_options(parser, is_train): + """Add new model-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + return parser + + @abstractmethod + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input (dict): includes the data itself and its metadata information. + """ + pass + + @abstractmethod + def forward(self): + """Run forward pass; called by both functions <optimize_parameters> and <test>.""" + pass + + @abstractmethod + def optimize_parameters(self): + """Calculate losses, gradients, and update network weights; called in every training iteration""" + pass + + @abstractmethod + def computeLosses(self): + """Calculate losses, called in every validation iteration""" + pass + + def setup(self, opt): + """Load and print networks; create schedulers + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + if self.isTrain: + self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] + else: + self.load_networks(opt.load_iter) + + self.print_networks(opt.verbose) + + + def setModelTrain(self, trainMode=True): + """Make models eval mode during val/test time""" + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, name) + net.train(trainMode) + + def setSavedNetsEval(self): + """Make models eval mode during val/test time""" + for name in self.model_names_save: + if isinstance(name, str): + net = getattr(self, name) + net.eval() + + def test(self): + """Forward + loss computation function used in val/test time. + + This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop + It also calls <compute_visuals> to produce additional visualization results + """ + with torch.no_grad(): + self.forward() + self.compute_visuals() + self.computeLosses() + + def testForward(self): + """Forward function used in val/test time. + + This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop + It also calls <compute_visuals> to produce additional visualization results + """ + with torch.no_grad(): + self.forward() + self.compute_visuals() + + def compute_visuals(self): + """Calculate additional output images for visdom and HTML visualization""" + pass + + def get_image_paths(self): + """ Return image paths that are used to load current data""" + return self.image_paths + + def update_learning_rate(self, currIteration): + """Update learning rates for all the networks; called at the end of every epoch""" + for scheduler in self.schedulers: + if self.opt.lr_policy == 'plateau': + # self.opt.logger.info('call scheduler.step(self.metric) with self.metric = ' + str(self.metric)) + scheduler.step(self.metric) + else: + scheduler.step() + + if currIteration % 10000 == 0: + self.opt.logger.info('-> Current learning rate = %.7f' % self.optimizers[0].param_groups[0]['lr']) + + def get_current_visuals(self): + """Return visualization images. train.py will display these images with visdom, and save the images to a HTML""" + visual_ret = OrderedDict() + for name in self.visual_names: + if isinstance(name, str): + visual_ret[name] = getattr(self, name) + return visual_ret + + def get_current_losses(self): + """Return traning losses / errors. train.py will print out these errors on console, and save them to a file""" + errors_ret = OrderedDict() + for name in self.loss_names: + if isinstance(name, str): + errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number + return errors_ret + + def save_networks(self, iteration): + """Save all the networks to the disk. + + Parameters: + epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) + """ + for name in self.model_names_save: + if isinstance(name, str): + save_filename = '%s_%s.pth' % (iteration, name) + save_path = os.path.join(self.save_dir, save_filename) + net = getattr(self, name) + + if len(self.gpu_ids) > 0 and torch.cuda.is_available(): + torch.save(net.module.cpu().state_dict(), save_path) + net.cuda(self.gpu_ids[0]) + else: + torch.save(net.cpu().state_dict(), save_path) + + def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): + """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" + key = keys[i] + if i + 1 == len(keys): # at the end, pointing to a parameter/buffer + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'running_mean' or key == 'running_var'): + if getattr(module, key) is None: + state_dict.pop('.'.join(keys)) + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'num_batches_tracked'): + state_dict.pop('.'.join(keys)) + else: + self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) + + def load_networks(self, iteration): + """Load all the networks from the disk. + + Parameters: + iteration (int) -- current iteration; used in the file name '%s_net_%s.pth' % (iteration, modelname) + """ + for name in self.model_names_save: + if isinstance(name, str): + # load_filename = '%s_net_%s.pth' % (iteration, name) # USED FOR U_GAT_IT_2 MODELS + load_filename = '%s_%s.pth' % (iteration, name) + load_path = os.path.join(self.save_dir, load_filename) + assert os.path.exists(load_path), 'Network file at %s does not exist.' %load_path + net = getattr(self, name) + if isinstance(net, torch.nn.DataParallel): + net = net.module + self.opt.logger.info('### loading model {} from {} ###'.format(name, load_path)) + state_dict = torch.load(load_path, map_location=self.device) + if hasattr(state_dict, '_metadata'): + del state_dict._metadata + + net.load_state_dict(state_dict) + + def print_networks(self, verbose): + """Print the total number of parameters in the network and (if verbose) network architecture + + Parameters: + verbose (bool) -- if verbose: print the network architecture + """ + self.opt.logger.info('---------- Networks initialized -------------') + modelnames = self.model_names_save + if self.isTrain: + modelnames = self.model_names + for name in modelnames: + if isinstance(name, str): + net = getattr(self, name) + num_params = 0 + for param in net.parameters(): + num_params += param.numel() + if verbose: + self.opt.logger.info(net) + self.opt.logger.info('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) + self.opt.logger.info('-----------------------------------------------') + + def set_requires_grad(self, nets, requires_grad=False): + """Set requies_grad=Fasle for all the networks to avoid unnecessary computations + Parameters: + nets (network list) -- a list of networks + requires_grad (bool) -- whether the networks require gradients or not + """ + if not isinstance(nets, list): + nets = [nets] + for net in nets: + if net is not None: + for param in net.parameters(): + param.requires_grad = requires_grad diff --git a/models/cycle_gan_model.py b/models/cycle_gan_model.py new file mode 100644 index 0000000..255d66e --- /dev/null +++ b/models/cycle_gan_model.py @@ -0,0 +1,318 @@ +# This code was copied from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix and adapted for our purpose accordingly. +import torch +import itertools +from image_pool import ImagePool +from .base_model import BaseModel +from . import networks +from .radam import RAdam +from .model import Custom +from .nutils import WeightedL1Loss +from .utils import concat_zeros, split_6_ch_to_3 + + +class CycleGANModel(BaseModel): + """ + This class implements the CycleGAN model, for learning image-to-image translation without paired data. + + The model training requires '--dataset_mode unaligned' dataset. + By default, it uses a '--netG resnet_9blocks' ResNet generator, + a '--netD basic' discriminator (PatchGAN introduced by pix2pix), + and a least-square GANs objective ('--gan_mode lsgan'). + + CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf + """ + @staticmethod + def modify_commandline_options(parser, is_train=True): + """Add new dataset-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + + For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses. + A (source domain), B (target domain). + Generators: G_A: A -> B; G_B: B -> A. + Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A. + Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper) + Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper) + Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper) + Dropout is not used in the original CycleGAN paper. + """ + parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout + if is_train: + parser.add_argument('--lambda_A', type=float, default=1.0, help='weight for cycle loss (A -> B -> A)') + parser.add_argument('--lambda_B', type=float, default=1.0, help='weight for cycle loss (B -> A -> B)') + parser.add_argument('--lambda_Seg', type=float, default=.5, help='weight for cycle loss (Seg(A), A -> B -> A, Seg(A))') + parser.add_argument('--lambda_id', type=float, default=1.0, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1') + + return parser + + def __init__(self, opt): + """Initialize the CycleGAN class. + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseModel.__init__(self, opt) + # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses> + self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B'] + # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals> + visual_names_A = ['real_A', 'fake_B', 'rec_A'] + visual_names_B = ['real_B', 'fake_A', 'rec_B'] + if self.isTrain:# and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B) + visual_names_A.append('idt_A') + visual_names_B.append('idt_B') + + self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B + # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>. + # if self.isTrain: + # self.model_names = ['netG_A', 'netG_B', 'netD_A', 'netD_B'] + # else: # during test time, only load Gs + # self.model_names = ['netG_A', 'netG_B'] + self.model_names = ['netG_A', 'netG_B', 'netD_A', 'netD_B'] + self.model_names_save = ['netG_A', 'netG_B'] + + # define networks (both Generators and discriminators). The naming is different from those used in the paper. + # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X) + if self.isTrain: # define discriminators + self.netD_A = networks.define_D(opt.input_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) + self.netD_B = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) + + if self.use_MC: + opt.input_nc += 3 + opt.output_nc += 3 + + self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) + self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) + + + if self.isTrain: + if opt.lambda_id > 0.0: # only works when input and output images have the same number of channels + assert(opt.input_nc == opt.output_nc) + self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images + self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images + # define loss functions + self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss. + self.criterionCycle = torch.nn.L1Loss().to(self.device) + self.criterionIdt = torch.nn.L1Loss().to(self.device) + # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>. + self.optimizer_G = RAdam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) + self.optimizer_D = RAdam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) + self.optimizers.append(self.optimizer_G) + self.optimizers.append(self.optimizer_D) + + if self.opt.use_segm_model: + self.segm_model = Custom(input_ch=3, output_ch=8, modelDim=2) + self.segm_model.load_state_dict(torch.load(opt.segm_model_path)) + opt.logger.info('### Segmentation Model Loaded ###') + + if len(self.gpu_ids) > 0: + self.segm_model.to(self.gpu_ids[0]) + self.segm_model = torch.nn.DataParallel(self.segm_model, self.gpu_ids) # multi-GPUs + + self.set_requires_grad(self.segm_model, False) + self.segm_model.train(False) + + self.segmentationloss = torch.nn.L1Loss().to(self.device) + self.loss_names.append('segm_cyc') + self.loss_names.append('segm_idt') + + self.visual_names += ['segm_B', 'rec_B_Segm', 'idt_B_Segm'] + + + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input (dict): include the data itself and its metadata information. + + The option 'direction' can be used to swap domain A and domain B. + """ + AtoB = self.opt.direction == 'AtoB' + self.real_A = input['A' if AtoB else 'B'].to(self.device) + self.real_B = input['B' if AtoB else 'A'].to(self.device) + self.image_paths = input['A_paths' if AtoB else 'B_paths'] + + if self.opt.use_segm_model: + with torch.no_grad(): + self.segm_B = self.segm_model(self.real_B) + + + def forward(self): + """Run forward pass; called by both functions <optimize_parameters> and <test>.""" + if self.use_MC: + self.real_A, self.real_B = concat_zeros([self.real_A, self.real_B]) + + self.fake_B = self.netG_A(self.real_A) # G_A(A) + self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A)) + self.fake_A = self.netG_B(self.real_B) # G_B(B) + self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B)) + + self.idt_A = self.netG_B(self.real_A) # G_A should be identity if real_B is fed: ||G_A(B) - B|| + self.idt_B = self.netG_A(self.real_B) # G_B should be identity if real_A is fed: ||G_B(A) - A|| + + if self.use_MC: + self.idt_A,self.idt_B, self.fake_B, self.fake_A, self.rec_A, self.rec_B, self.real_A, self.real_B = split_6_ch_to_3([self.idt_A, self.idt_B, self.fake_B, self.fake_A, self.rec_A, self.rec_B, self.real_A, self.real_B]) + + if self.opt.use_segm_model: + self.rec_B_Segm = self.segm_model(self.rec_B) #Segm(G_B(G_A(A))) + self.idt_B_Segm = self.segm_model(self.idt_B) + + + def backward_D_basic(self, netD, real, fake): + """Calculate GAN loss for the discriminator + + Parameters: + netD (network) -- the discriminator D + real (tensor array) -- real images + fake (tensor array) -- images generated by a generator + + Return the discriminator loss. + We also call loss_D.backward() to calculate the gradients. + """ + # Real + pred_real = netD(real) + loss_D_real = self.criterionGAN(pred_real, True) + # Fake + pred_fake = netD(fake.detach()) + loss_D_fake = self.criterionGAN(pred_fake, False) + # Combined loss and calculate gradients + loss_D = (loss_D_real + loss_D_fake) * 0.5 + loss_D.backward() + return loss_D + + def backward_D_A(self): + """Calculate GAN loss for discriminator D_A""" + self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_A, self.fake_A if self.opt.suffix != '_PoolD' else self.fake_A_pool.query(self.fake_A)) + + def backward_D_B(self): + """Calculate GAN loss for discriminator D_B""" + self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_B, self.fake_B if self.opt.suffix != '_PoolD' else self.fake_B_pool.query(self.fake_B)) + + def backward_G(self): + """Calculate the loss for generators G_A and G_B""" + lambda_idt = self.opt.lambda_id + lambda_A = self.opt.lambda_A + lambda_B = self.opt.lambda_B + lambda_Seg = self.opt.lambda_Seg + # Identity loss + # G_A should be identity if real_B is fed: ||G_A(B) - B|| + self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_A) * lambda_idt + # G_B should be identity if real_A is fed: ||G_B(A) - A|| + self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_B) * lambda_idt + # GAN loss D_A(G_A(A)) + self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_A), True) + # GAN loss D_B(G_B(B)) + self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_B), True) + # Forward cycle loss ||G_B(G_A(A)) - A|| + self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A + # Backward cycle loss ||G_A(G_B(B)) - B|| + self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B + + if self.opt.use_segm_model: + self.loss_segm_cyc = self.segmentationloss(self.rec_B_Segm, self.segm_B) * lambda_Seg + self.loss_segm_idt = self.segmentationloss(self.idt_B_Segm, self.segm_B) * lambda_Seg + else: + self.loss_segm_cyc = 0 + self.loss_segm_idt = 0 + + # combined loss and calculate gradients + self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_idt_A + self.loss_segm_cyc + self.loss_idt_B + self.loss_cycle_B + self.loss_segm_idt + self.loss_G.backward() + + def optimize_parameters(self): + """Calculate losses, gradients, and update network weights; called in every training iteration""" + # forward + self.forward() # compute fake images and reconstruction images. + # G_A and G_B + self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs + self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero + self.backward_G() # calculate gradients for G_A and G_B + self.optimizer_G.step() # update G_A and G_B's weights + # D_A and D_B + self.set_requires_grad([self.netD_A, self.netD_B], True) + self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero + self.backward_D_A() # calculate gradients for D_A + self.backward_D_B() # calculate gradients for D_B + self.optimizer_D.step() # update D_A and D_B's weights + + + def optimize_gen_for_id_loss(self): + """Calculate losses, gradients, and update network weights; called in every training iteration""" + if self.use_MC: + self.real_A, self.real_B = concat_zeros([self.real_A, self.real_B]) + + # Identity loss + self.idt_A = self.netG_B(self.real_A) # G_B should be identity if real_A is fed: ||G_B(A) - A|| + self.idt_B = self.netG_A(self.real_B) # G_A should be identity if real_B is fed: ||G_A(B) - B|| + + if self.use_MC: + self.idt_A, self.idt_B, self.real_A, self.real_B = split_6_ch_to_3([self.idt_A, self.idt_B, self.real_A, self.real_B]) + + # G_A and G_B + self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs + self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero + + self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_A) + self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_B) + + self.loss_G = self.loss_idt_A + self.loss_idt_B + self.loss_G.backward() + + self.optimizer_G.step() # update G_A and G_B's weights + + self.loss_segm_idt = self.loss_segm_cyc = self.loss_D_A = self.loss_D_B = self.loss_cycle_A = self.loss_cycle_B = self.loss_G_A = self.loss_G_B = 0 + self.fake_A = self.fake_B = self.rec_A = self.rec_B = self.segm_B = self.rec_B_Segm = self.idt_B_Segm = None + + + def computeLosses(self): + """Calculate losses assuming forward has been performed before""" + # G_A and G_B + """Calculate the loss for generators G_A and G_B""" + lambda_idt = self.opt.lambda_id + lambda_A = self.opt.lambda_A + lambda_B = self.opt.lambda_B + lambda_Seg = self.opt.lambda_Seg + # Identity loss + # G_A should be identity if real_B is fed: ||G_A(B) - B|| + self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_A) * lambda_idt + # G_B should be identity if real_A is fed: ||G_B(A) - A|| + self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_B) * lambda_idt + # GAN loss D_A(G_A(A)) + self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_A), True) + # GAN loss D_B(G_B(B)) + self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_B), True) + # Forward cycle loss || G_B(G_A(A)) - A|| + self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A + # Backward cycle loss || G_A(G_B(B)) - B|| + self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B + + if self.opt.use_segm_model: + self.loss_segm_cyc = self.segmentationloss(self.rec_B_Segm, self.segm_B) * lambda_Seg + self.loss_segm_idt = self.segmentationloss(self.idt_B_Segm, self.segm_B) * lambda_Seg + else: + self.loss_segm_cyc = 0 + self.loss_segm_idt = 0 + + # D_A + loss_D_real = self.criterionGAN(self.netD_A(self.real_A), True) + loss_D_fake = self.criterionGAN(self.netD_A(self.fake_A.detach()), False) + self.loss_D_A = (loss_D_real + loss_D_fake) * 0.5 + + # D_B + loss_D_real = self.criterionGAN(self.netD_B(self.real_B), True) + loss_D_fake = self.criterionGAN(self.netD_B(self.fake_B.detach()), False) + self.loss_D_B = (loss_D_real + loss_D_fake) * 0.5 + + + def perform_test_conversion(self, input): + if self.use_MC: + input = concat_zeros([input]) + + if self.opt.direction == 'AtoB': + return self.netG_A(input) # G_A(A) + else: + return self.netG_B(input) # G_B(B) diff --git a/models/model.py b/models/model.py new file mode 100644 index 0000000..0978ed3 --- /dev/null +++ b/models/model.py @@ -0,0 +1,217 @@ +import sys +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision import models +from functools import partial +import math + + + + + +####################################### XAVIER WEIGHT INIT ######################################### +def init_weights_xavier_normal(m): + if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv3d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.ConvTranspose3d): + nn.init.xavier_normal_(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0.0) + +def init_weights_xavier_uniform(m): + if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv3d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.ConvTranspose3d): + nn.init.xavier_uniform_(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0.0) + + + + +#################################################################################################### +# ----- Modified Unet from segmentation paper ----- # +class Custom(nn.Module): + def __init__(self, input_ch=3, output_ch=1, modelDim=2): + super(Custom, self).__init__() + assert modelDim == 2 or modelDim == 3, "Wrong unet-model dimension: " + str(modelDim) + + self.inc = initialconv(input_ch, 32, modelDim) + self.down1 = down(32, 64, modelDim) + self.down2 = down(64, 128, modelDim) + self.down3 = down(128, 256, modelDim) + self.down4 = down(256, 512, modelDim) + self.down5 = down(512, 1024, modelDim) + + self.up0 = up(1024, 512, 512, modelDim, upsampling=False) + self.up1 = up(512, 256, 256, modelDim, upsampling=False) + self.up2 = up(256, 128, 128, modelDim, upsampling=False) + self.up3 = up(128, 64, 64, modelDim, upsampling=False) + self.up4 = up(64, 32, 32, modelDim, upsampling=False) + self.outc = outconv(32, output_ch, modelDim) + + self.apply(init_weights_xavier_uniform) + + def forward(self, x): + x1 = self.inc(x) + x2 = self.down1(x1) + x3 = self.down2(x2) + x4 = self.down3(x3) + x5 = self.down4(x4) + x6 = self.down5(x5) + + x = self.up0(x6, x5) + x = self.up1(x, x4) + x = self.up2(x, x3) + x = self.up3(x, x2) + x = self.up4(x, x1) + x = self.outc(x) + + return torch.softmax(x, 1) + + +class initialconv(nn.Module): + def __init__(self, in_ch, out_ch, modelDim): + super(initialconv, self).__init__() + self.conv = conv_block(in_ch, out_ch, modelDim) + + def forward(self, x): + x = self.conv(x) + return x + + +class down(nn.Module): + def __init__(self, in_ch, out_ch, modelDim): + super(down, self).__init__() + if modelDim == 2: + self.max_pool_conv = nn.Sequential( + nn.MaxPool2d(2), + # nn.Conv2d(in_ch, in_ch, kernel_size=2, stride=2, groups=in_ch), + conv_block(in_ch, out_ch, modelDim) + ) + elif modelDim == 3: + self.max_pool_conv = nn.Sequential( + nn.MaxPool3d(2), + conv_block(in_ch, out_ch, modelDim) + ) + else: + sys.exit('Wrong dimension ' + str(modelDim) + ' given!') + + def forward(self, x): + x = self.max_pool_conv(x) + return x + + +class conv_block(nn.Module): + def __init__(self, in_ch, out_ch, modelDim): + super(conv_block, self).__init__() + if modelDim == 2: + self.conv = nn.Sequential( + nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1), + nn.InstanceNorm2d(out_ch), + nn.LeakyReLU(inplace=True), + nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1), + nn.InstanceNorm2d(out_ch), + nn.LeakyReLU(inplace=True) + ) + elif modelDim == 3: + self.conv = nn.Sequential( + nn.Conv3d(in_ch, out_ch, kernel_size=3, padding=1), + nn.InstanceNorm3d(out_ch), + nn.LeakyReLU(inplace=True), + nn.Conv3d(out_ch, out_ch, kernel_size=3, padding=1), + nn.InstanceNorm3d(out_ch), + nn.LeakyReLU(inplace=True) + ) + else: + sys.exit('Wrong dimension '+str(modelDim)+' given!') + + def forward(self, x): + x = self.conv(x) + return x + + + +############################### don't forget about spatial size changes: ################################ +# max pool (kernel=2, stride=2) -> Input: (10x5) -> Output: (5x2) +# torch.nn.Conv2d(1,1,3,stride=2,padding=1) -> Input: (10x5) -> Output: (5x3) +# nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) -> Input: (10x5) -> Output: (20x10) +# nn.ConvTranspose2d(1, 1, 3, padding=1, stride=2) -> Input: (10x5) -> Output: (19x9) : *2-1 +# nn.ConvTranspose2d(1, 1, 3, padding=1, stride=2, output_padding=1) -> Input: (10x5) -> Output: (20x10) : *2 +# nn.ConvTranspose2d(1, 1, 2, padding=0, stride=2) -> Input: (10x5) -> Output: (20x10) : *2 +# nn.ConvTranspose2d(1, 1, 2, padding=0, stride=2, output_padding=1) -> Input: (10x5) -> Output: (21x11) : *2+1 +# => Vanilla Unet & nnUnet => Both use max pooling (in encoder) and transposed conv (in decoder)! +######################################################################################################### +class up(nn.Module): + def __init__(self, in_ch_1, in_ch_2, out_ch, modelDim, upsampling=False): + super(up, self).__init__() + self.modelDim = modelDim + if modelDim == 2: + if upsampling: + self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) + else: + # self.up = nn.ConvTranspose2d(in_ch_1, in_ch_1//2, 2, padding=0, stride=2) + self.up = nn.ConvTranspose2d(in_ch_1, in_ch_1, 2, padding=0, stride=2) + elif modelDim == 3: + if upsampling: + self.up = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True) + else: + # self.up = nn.ConvTranspose3d(in_ch_1, in_ch_1//2, 2, padding=0, stride=2) + self.up = nn.ConvTranspose3d(in_ch_1, in_ch_1, 2, padding=0, stride=2) + else: + sys.exit('Wrong dimension ' + str(modelDim) + ' given!') + + # self.conv = conv_block_noPadding(in_ch_1//2 + in_ch_2, out_ch, modelDim) + self.conv = conv_block_noPadding(in_ch_1 + in_ch_2, out_ch, modelDim) + + def forward(self, x1, x2): #x2 provides equal/decreased by 1 axis sizes + x1 = self.up(x1) + startIndexDim2 = (x2.size()[2]-x1.size()[2])//2 + startIndexDim3 = (x2.size()[3]-x1.size()[3])//2 + x = torch.cat([x2[:,:,startIndexDim2:x1.size()[2]+startIndexDim2, startIndexDim3:x1.size()[3]+startIndexDim3], x1], dim=1) + x = self.conv(x) + return x + + +class conv_block_noPadding(nn.Module): + def __init__(self, in_ch, out_ch, modelDim): + super(conv_block_noPadding, self).__init__() + if modelDim == 2: + self.conv = nn.Sequential( + nn.Conv2d(in_ch, out_ch, kernel_size=3), + nn.InstanceNorm2d(out_ch), + nn.LeakyReLU(inplace=True), + nn.Conv2d(out_ch, out_ch, kernel_size=3), + nn.InstanceNorm2d(out_ch), + nn.LeakyReLU(inplace=True) + ) + elif modelDim == 3: + self.conv = nn.Sequential( + nn.Conv3d(in_ch, out_ch, kernel_size=3), + nn.InstanceNorm3d(out_ch), + nn.LeakyReLU(inplace=True), + nn.Conv3d(out_ch, out_ch, kernel_size=3), + nn.InstanceNorm3d(out_ch), + nn.LeakyReLU(inplace=True) + ) + else: + sys.exit('Wrong dimension '+str(modelDim)+' given!') + + def forward(self, x): + x = self.conv(x) + return x + + +class outconv(nn.Module): + def __init__(self, in_ch, out_ch, modelDim): + super(outconv, self).__init__() + if modelDim == 2: + self.conv = nn.Conv2d(in_ch, out_ch, kernel_size=1) + elif modelDim == 3: + self.conv = nn.Conv3d(in_ch, out_ch, kernel_size=1) + else: + sys.exit('Wrong dimension ' + str(modelDim) + ' given!') + + def forward(self, x): + x = self.conv(x) + return x + + + diff --git a/models/networks.py b/models/networks.py new file mode 100644 index 0000000..45806b5 --- /dev/null +++ b/models/networks.py @@ -0,0 +1,868 @@ +# This code was copied from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix and adapted for our purpose accordingly. +import torch +import torch.nn as nn +from torch.nn import init +import functools +from torch.optim import lr_scheduler +from torch.nn.parameter import Parameter + +############################################################################### +# Helper Functions +############################################################################### + + +class Identity(nn.Module): + def forward(self, x): + return x + + +def get_norm_layer(norm_type='instance'): + """Return a normalization layer + + Parameters: + norm_type (str) -- the name of the normalization layer: batch | instance | none + + For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev). + For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics. + """ + if norm_type == 'batch': + norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True) + elif norm_type == 'instance': + norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) + elif norm_type == 'none': + norm_layer = lambda x: Identity() + else: + raise NotImplementedError('normalization layer [%s] is not found' % norm_type) + return norm_layer + + +def get_scheduler(optimizer, opt): + """Return a learning rate scheduler + + Parameters: + optimizer -- the optimizer of the network + opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.  + opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine + + For 'linear', we keep the same learning rate for the first <opt.niter> epochs + and linearly decay the rate to zero over the next <opt.niter_decay> epochs. + For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. + See https://pytorch.org/docs/stable/optim.html for more details. + """ + if opt.lr_policy == 'linear': + def lambda_rule(iteration): + lr_l = 1.0 - max(0, iteration - (opt.niters - opt.niters_linDecay)) / float(opt.niters_linDecay + 1) + return lr_l + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) + elif opt.lr_policy == 'step': + scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.niters_stepDecay, gamma=0.1) + elif opt.lr_policy == 'plateau': + scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) ## default patience 5 + elif opt.lr_policy == 'cosine': + scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0) + else: + return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) + return scheduler + + +def init_weights(net, init_type='normal', init_gain=0.02): + """Initialize network weights. + + Parameters: + net (network) -- network to be initialized + init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + + We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might + work better for some applications. Feel free to try yourself. + """ + def init_func(m): # define the initialization function + classname = m.__class__.__name__ + if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): + if init_type == 'normal': + init.normal_(m.weight.data, 0.0, init_gain) + elif init_type == 'xavier': + init.xavier_normal_(m.weight.data, gain=init_gain) + elif init_type == 'kaiming': + init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') + elif init_type == 'orthogonal': + init.orthogonal_(m.weight.data, gain=init_gain) + else: + raise NotImplementedError('initialization method [%s] is not implemented' % init_type) + if hasattr(m, 'bias') and m.bias is not None: + init.constant_(m.bias.data, 0.0) + elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies. + init.normal_(m.weight.data, 1.0, init_gain) + init.constant_(m.bias.data, 0.0) + + print('initialize network with %s' % init_type) + net.apply(init_func) # apply the initialization function <init_func> + + +def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]): + """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights + Parameters: + net (network) -- the network to be initialized + init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal + gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Return an initialized network. + """ + if len(gpu_ids) > 0: + assert(torch.cuda.is_available()) + net.to(gpu_ids[0]) + net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs + init_weights(net, init_type, init_gain=init_gain) + return net + + +def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]): + """Create a generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128 + norm (str) -- the name of normalization layers used in the network: batch | instance | none + use_dropout (bool) -- if use dropout layers. + init_type (str) -- the name of our initialization method. + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Returns a generator + + Our current implementation provides two types of generators: + U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images) + The original U-Net paper: https://arxiv.org/abs/1505.04597 + + Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks) + Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations. + We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style). + + + The generator has been initialized by <init_net>. It uses RELU for non-linearity. + """ + net = None + norm_layer = get_norm_layer(norm_type=norm) + + if netG.startswith('resnet'): # netG = 'resnet_X_Yblocks' + net = ResnetGenerator(input_nc, output_nc, int(netG.split('_')[1]), ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=int(netG.split('_')[-1][:-6])) + elif netG.startswith('unet'): # netG = 'unet_8' + net = UnetGenerator(input_nc, output_nc, int(netG.split('_')[-1]), ngf, norm_layer=norm_layer, use_dropout=use_dropout) + else: + raise NotImplementedError('Generator model name [%s] is not recognized' % netG) + return init_net(net, init_type, init_gain, gpu_ids) + + +def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]): + """Create a discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the first conv layer + netD (str) -- the architecture's name: basic | n_layers | pixel + n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers' + norm (str) -- the type of normalization layers used in the network. + init_type (str) -- the name of the initialization method. + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Returns a discriminator + + Our current implementation provides three types of discriminators: + [basic]: 'PatchGAN' classifier described in the original pix2pix paper. + It can classify whether 70×70 overlapping patches are real or fake. + Such a patch-level discriminator architecture has fewer parameters + than a full-image discriminator and can work on arbitrarily-sized images + in a fully convolutional fashion. + + [n_layers]: With this mode, you cna specify the number of conv layers in the discriminator + with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).) + + [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not. + It encourages greater color diversity but has no effect on spatial statistics. + + The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity. + """ + net = None + norm_layer = get_norm_layer(norm_type=norm) + + if netD == 'basic': # default PatchGAN classifier + net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer) + elif netD == 'n_layers': # more options + net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer) + elif netD == 'pixel': # classify if each pixel is real or fake + net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer) + else: + raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD) + return init_net(net, init_type, init_gain, gpu_ids) + + + + +############################################################################## +# Classes +############################################################################## +class GANLoss(nn.Module): + """Define different GAN objectives. + + The GANLoss class abstracts away the need to create the target label tensor + that has the same size as the input. + """ + + def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0): + """ Initialize the GANLoss class. + + Parameters: + gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. + target_real_label (bool) - - label for a real image + target_fake_label (bool) - - label of a fake image + + Note: Do not use sigmoid as the last layer of Discriminator. + LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss. + """ + super(GANLoss, self).__init__() + self.register_buffer('real_label', torch.tensor(target_real_label)) + self.register_buffer('fake_label', torch.tensor(target_fake_label)) + self.gan_mode = gan_mode + if gan_mode == 'lsgan': + self.loss = nn.MSELoss() + elif gan_mode == 'vanilla': + self.loss = nn.BCEWithLogitsLoss() + elif gan_mode in ['wgangp']: + self.loss = None + else: + raise NotImplementedError('gan mode %s not implemented' % gan_mode) + + def get_target_tensor(self, prediction, target_is_real): + """Create label tensors with the same size as the input. + + Parameters: + prediction (tensor) - - tpyically the prediction from a discriminator + target_is_real (bool) - - if the ground truth label is for real images or fake images + + Returns: + A label tensor filled with ground truth label, and with the size of the input + """ + + if target_is_real: + target_tensor = self.real_label + else: + target_tensor = self.fake_label + return target_tensor.expand_as(prediction) + + def __call__(self, prediction, target_is_real): + """Calculate loss given Discriminator's output and grount truth labels. + + Parameters: + prediction (tensor) - - tpyically the prediction output from a discriminator + target_is_real (bool) - - if the ground truth label is for real images or fake images + + Returns: + the calculated loss. + """ + if self.gan_mode in ['lsgan', 'vanilla']: + target_tensor = self.get_target_tensor(prediction, target_is_real) + loss = self.loss(prediction, target_tensor) + elif self.gan_mode == 'wgangp': + if target_is_real: + loss = -prediction.mean() + else: + loss = prediction.mean() + return loss + +class CAMLoss(nn.Module): + def __init__(self, target_real_label=1.0, target_fake_label=0.0): + super(CAMLoss, self).__init__() + self.register_buffer('real_label', torch.tensor(target_real_label)) + self.register_buffer('fake_label', torch.tensor(target_fake_label)) + self.loss = nn.BCEWithLogitsLoss() + + def get_target_tensor(self, prediction, target_is_real): + if target_is_real: + target_tensor = self.real_label + else: + target_tensor = self.fake_label + return target_tensor.expand_as(prediction) + + def __call__(self, prediction, target_is_real): + target_tensor = self.get_target_tensor(prediction, target_is_real) + return self.loss(prediction, target_tensor) + + +def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0): + """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028 + + Arguments: + netD (network) -- discriminator network + real_data (tensor array) -- real images + fake_data (tensor array) -- generated images from the generator + device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') + type (str) -- if we mix real and fake data or not [real | fake | mixed]. + constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2 + lambda_gp (float) -- weight for this loss + + Returns the gradient penalty loss + """ + if lambda_gp > 0.0: + if type == 'real': # either use real images, fake images, or a linear interpolation of two. + interpolatesv = real_data + elif type == 'fake': + interpolatesv = fake_data + elif type == 'mixed': + alpha = torch.rand(real_data.shape[0], 1, device=device) + alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape) + interpolatesv = alpha * real_data + ((1 - alpha) * fake_data) + else: + raise NotImplementedError('{} not implemented'.format(type)) + interpolatesv.requires_grad_(True) + disc_interpolates = netD(interpolatesv) + gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv, + grad_outputs=torch.ones(disc_interpolates.size()).to(device), + create_graph=True, retain_graph=True, only_inputs=True) + gradients = gradients[0].view(real_data.size(0), -1) # flat the data + gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps + return gradient_penalty, gradients + else: + return 0.0, None + + +class ResnetGenerator(nn.Module): + """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations. + + We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style) + """ + + def __init__(self, input_nc, output_nc, n_downsampling = 2, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'): + """Construct a Resnet-based generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers + n_blocks (int) -- the number of ResNet blocks + padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero + """ + assert(n_blocks >= 0) + super(ResnetGenerator, self).__init__() + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + for i in range(n_downsampling): # add downsampling layers + mult = 2 ** i + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + + mult = 2 ** n_downsampling + for i in range(n_blocks): # add ResNet blocks + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + for i in range(n_downsampling): # add upsampling layers + mult = 2 ** (n_downsampling - i) + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model += [nn.Tanh()] + + self.model = nn.Sequential(*model) + + def forward(self, input): + """Standard forward""" + return self.model(input) + + +class ResnetGenerator_UGATIT(nn.Module): + def __init__(self, input_nc, output_nc, n_downsampling = 2, ngf=64, n_blocks=6, img_size=256): + assert(n_blocks >= 0) + super(ResnetGenerator_UGATIT, self).__init__() + self.input_nc = input_nc + self.output_nc = output_nc + self.ngf = ngf + self.n_blocks = n_blocks + self.img_size = img_size + + DownBlock = [] + DownBlock += [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, stride=1, padding=0, bias=False), + nn.InstanceNorm2d(ngf), + nn.ReLU(True)] + + # Down-Sampling + for i in range(n_downsampling): + mult = 2**i + DownBlock += [nn.ReflectionPad2d(1), + nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=0, bias=False), + nn.InstanceNorm2d(ngf * mult * 2), + nn.ReLU(True)] + + # Down-Sampling Bottleneck + mult = 2**n_downsampling + for i in range(n_blocks): + DownBlock += [ResnetBlock_UGATIT(ngf * mult, use_bias=False)] + + # Class Activation Map + self.gap_fc = nn.Linear(ngf * mult, 1, bias=False) + self.gmp_fc = nn.Linear(ngf * mult, 1, bias=False) + self.conv1x1 = nn.Conv2d(ngf * mult * 2, ngf * mult, kernel_size=1, stride=1, bias=True) + self.relu = nn.ReLU(True) + + # Gamma, Beta block + FC = [nn.Linear(img_size // mult * img_size // mult * ngf * mult, ngf * mult, bias=False), nn.ReLU(True), + nn.Linear(ngf * mult, ngf * mult, bias=False), nn.ReLU(True)] + self.gamma = nn.Linear(ngf * mult, ngf * mult, bias=False) + self.beta = nn.Linear(ngf * mult, ngf * mult, bias=False) + + # Up-Sampling Bottleneck + for i in range(n_blocks): + setattr(self, 'UpBlock1_' + str(i+1), ResnetAdaILNBlock(ngf * mult, use_bias=False)) + + # Up-Sampling + UpBlock2 = [] + for i in range(n_downsampling): + mult = 2**(n_downsampling - i) + UpBlock2 += [nn.Upsample(scale_factor=2, mode='nearest'), + nn.ReflectionPad2d(1), + nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0, bias=False), + ILN(int(ngf * mult / 2)), + nn.ReLU(True)] + + UpBlock2 += [nn.ReflectionPad2d(3), + nn.Conv2d(ngf, output_nc, kernel_size=7, stride=1, padding=0, bias=False), + nn.Tanh()] + + self.DownBlock = nn.Sequential(*DownBlock) + self.FC = nn.Sequential(*FC) + self.UpBlock2 = nn.Sequential(*UpBlock2) + + def forward(self, input): + x = self.DownBlock(input) + + gap = torch.nn.functional.adaptive_avg_pool2d(x, 1) + gap_logit = self.gap_fc(gap.view(x.shape[0], -1)) + gap_weight = list(self.gap_fc.parameters())[0] + gap = x * gap_weight.unsqueeze(2).unsqueeze(3) + + gmp = torch.nn.functional.adaptive_max_pool2d(x, 1) + gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1)) + gmp_weight = list(self.gmp_fc.parameters())[0] + gmp = x * gmp_weight.unsqueeze(2).unsqueeze(3) + + cam_logit = torch.cat([gap_logit, gmp_logit], 1) + x = torch.cat([gap, gmp], 1) + x = self.relu(self.conv1x1(x)) + + heatmap = torch.sum(x, dim=1, keepdim=True) + + x_ = self.FC(x.view(x.shape[0], -1)) + gamma, beta = self.gamma(x_), self.beta(x_) + + + for i in range(self.n_blocks): + x = getattr(self, 'UpBlock1_' + str(i+1))(x, gamma, beta) + out = self.UpBlock2(x) + + return out, cam_logit, heatmap + +class ResnetAdaILNBlock(nn.Module): + def __init__(self, dim, use_bias): + super(ResnetAdaILNBlock, self).__init__() + self.pad1 = nn.ReflectionPad2d(1) + self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias) + self.norm1 = adaILN(dim) + self.relu1 = nn.ReLU(True) + + self.pad2 = nn.ReflectionPad2d(1) + self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias) + self.norm2 = adaILN(dim) + + def forward(self, x, gamma, beta): + out = self.pad1(x) + out = self.conv1(out) + out = self.norm1(out, gamma, beta) + out = self.relu1(out) + out = self.pad2(out) + out = self.conv2(out) + out = self.norm2(out, gamma, beta) + return out + x + +class adaILN(nn.Module): + def __init__(self, num_features, eps=1e-5): + super(adaILN, self).__init__() + self.eps = eps + self.rho = Parameter(torch.Tensor(1, num_features, 1, 1)) + self.rho.data.fill_(0.9) + + def forward(self, input, gamma, beta): + in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True), torch.var(input, dim=[2, 3], keepdim=True) + out_in = (input - in_mean) / torch.sqrt(in_var + self.eps) + ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True), torch.var(input, dim=[1, 2, 3], keepdim=True) + out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps) + out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1-self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln + out = out * gamma.unsqueeze(2).unsqueeze(3) + beta.unsqueeze(2).unsqueeze(3) + return out + +class ILN(nn.Module): + def __init__(self, num_features, eps=1e-5): + super(ILN, self).__init__() + self.eps = eps + self.rho = Parameter(torch.Tensor(1, num_features, 1, 1)) + self.gamma = Parameter(torch.Tensor(1, num_features, 1, 1)) + self.beta = Parameter(torch.Tensor(1, num_features, 1, 1)) + self.rho.data.fill_(0.0) + self.gamma.data.fill_(1.0) + self.beta.data.fill_(0.0) + + def forward(self, input): + in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True), torch.var(input, dim=[2, 3], keepdim=True) + out_in = (input - in_mean) / torch.sqrt(in_var + self.eps) + ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True), torch.var(input, dim=[1, 2, 3], keepdim=True) + out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps) + out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1-self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln + out = out * self.gamma.expand(input.shape[0], -1, -1, -1) + self.beta.expand(input.shape[0], -1, -1, -1) + return out + +class ResnetBlock_UGATIT(nn.Module): + def __init__(self, dim, use_bias): + super(ResnetBlock_UGATIT, self).__init__() + conv_block = [] + conv_block += [nn.ReflectionPad2d(1), + nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias), + nn.InstanceNorm2d(dim), + nn.ReLU(True)] + + conv_block += [nn.ReflectionPad2d(1), + nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias), + nn.InstanceNorm2d(dim)] + + self.conv_block = nn.Sequential(*conv_block) + + def forward(self, x): + out = x + self.conv_block(x) + return out + + + +class ResnetBlock(nn.Module): + """Define a Resnet block""" + + def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): + """Initialize the Resnet block + + A resnet block is a conv block with skip connections + We construct a conv block with build_conv_block function, + and implement skip connections in <forward> function. + Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf + """ + super(ResnetBlock, self).__init__() + self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias) + + def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias): + """Construct a convolutional block. + + Parameters: + dim (int) -- the number of channels in the conv layer. + padding_type (str) -- the name of padding layer: reflect | replicate | zero + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers. + use_bias (bool) -- if the conv layer uses bias or not + + Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU)) + """ + conv_block = [] + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)] + if use_dropout: + conv_block += [nn.Dropout(0.5)] + + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)] + + return nn.Sequential(*conv_block) + + def forward(self, x): + """Forward function (with skip connections)""" + out = x + self.conv_block(x) # add skip connections + return out + + +class UnetGenerator(nn.Module): + """Create a Unet-based generator""" + + def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False): + """Construct a Unet generator + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7, + image of size 128x128 will become of size 1x1 # at the bottleneck + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + + We construct the U-Net from the innermost layer to the outermost layer. + It is a recursive process. + """ + super(UnetGenerator, self).__init__() + # construct unet structure + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer + for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout) + # gradually reduce the number of filters from ngf * 8 to ngf + unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer + + def forward(self, input): + """Standard forward""" + return self.model(input) + + +class UnetSkipConnectionBlock(nn.Module): + """Defines the Unet submodule with skip connection. + X -------------------identity---------------------- + |-- downsampling -- |submodule| -- upsampling --| + """ + + def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): + """Construct a Unet submodule with skip connections. + + Parameters: + outer_nc (int) -- the number of filters in the outer conv layer + inner_nc (int) -- the number of filters in the inner conv layer + input_nc (int) -- the number of channels in input images/features + submodule (UnetSkipConnectionBlock) -- previously defined submodules + outermost (bool) -- if this module is the outermost module + innermost (bool) -- if this module is the innermost module + norm_layer -- normalization layer + user_dropout (bool) -- if use dropout layers. + """ + super(UnetSkipConnectionBlock, self).__init__() + self.outermost = outermost + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + if input_nc is None: + input_nc = outer_nc + downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) + downrelu = nn.LeakyReLU(0.2, True) + downnorm = norm_layer(inner_nc) + uprelu = nn.ReLU(True) + upnorm = norm_layer(outer_nc) + + if outermost: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1) + #conv = nn.Conv2d(inner_nc,outer_nc,kernel_size=5,stride=1,padding=2 ) + down = [downconv] + up = [uprelu, upconv, nn.Tanh()] + model = down + [submodule] + up #+ [conv] + elif innermost: + upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) + down = [downrelu, downconv] + up = [uprelu, upconv, upnorm] + model = down + up + else: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) + down = [downrelu, downconv, downnorm] + up = [uprelu, upconv, upnorm] + + if use_dropout: + model = down + [submodule] + up + [nn.Dropout(0.5)] + else: + model = down + [submodule] + up + + self.model = nn.Sequential(*model) + + def forward(self, x): + if self.outermost: + return self.model(x) + else: # add skip connections + return torch.cat([x, self.model(x)], 1) + + +class NLayerDiscriminator(nn.Module): + """Defines a PatchGAN discriminator""" + + def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d): + """Construct a PatchGAN discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the last conv layer + n_layers (int) -- the number of conv layers in the discriminator + norm_layer -- normalization layer + """ + super(NLayerDiscriminator, self).__init__() + if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + kw = 4 + padw = 1 + sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)] + nf_mult = 1 + nf_mult_prev = 1 + for n in range(1, n_layers): # gradually increase the number of filters + nf_mult_prev = nf_mult + nf_mult = min(2 ** n, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + nf_mult_prev = nf_mult + nf_mult = min(2 ** n_layers, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map + self.model = nn.Sequential(*sequence) + + def forward(self, input): + """Standard forward.""" + return self.model(input) + + + +class Discriminator_UGATIT(nn.Module): + def __init__(self, input_nc, ndf=64, n_layers=5): + super(Discriminator_UGATIT, self).__init__() + model = [nn.ReflectionPad2d(1), + nn.utils.spectral_norm( + nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=0, bias=True)), + nn.LeakyReLU(0.2, True)] + + for i in range(1, n_layers - 2): + mult = 2 ** (i - 1) + model += [nn.ReflectionPad2d(1), + nn.utils.spectral_norm( + nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=2, padding=0, bias=True)), + nn.LeakyReLU(0.2, True)] + + mult = 2 ** (n_layers - 2 - 1) + model += [nn.ReflectionPad2d(1), + nn.utils.spectral_norm( + nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=1, padding=0, bias=True)), + nn.LeakyReLU(0.2, True)] + + # Class Activation Map + mult = 2 ** (n_layers - 2) + self.gap_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False)) + self.gmp_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False)) + self.conv1x1 = nn.Conv2d(ndf * mult * 2, ndf * mult, kernel_size=1, stride=1, bias=True) + self.leaky_relu = nn.LeakyReLU(0.2, True) + + self.pad = nn.ReflectionPad2d(1) + self.conv = nn.utils.spectral_norm( + nn.Conv2d(ndf * mult, 1, kernel_size=4, stride=1, padding=0, bias=False)) + + self.model = nn.Sequential(*model) + + def forward(self, input): + x = self.model(input) + + gap = torch.nn.functional.adaptive_avg_pool2d(x, 1) + gap_logit = self.gap_fc(gap.view(x.shape[0], -1)) + gap_weight = list(self.gap_fc.parameters())[0] + gap = x * gap_weight.unsqueeze(2).unsqueeze(3) + + gmp = torch.nn.functional.adaptive_max_pool2d(x, 1) + gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1)) + gmp_weight = list(self.gmp_fc.parameters())[0] + gmp = x * gmp_weight.unsqueeze(2).unsqueeze(3) + + cam_logit = torch.cat([gap_logit, gmp_logit], 1) + x = torch.cat([gap, gmp], 1) + x = self.leaky_relu(self.conv1x1(x)) + + heatmap = torch.sum(x, dim=1, keepdim=True) + + x = self.pad(x) + out = self.conv(x) + + return out, cam_logit, heatmap + + +class PixelDiscriminator(nn.Module): + """Defines a 1x1 PatchGAN discriminator (pixelGAN)""" + + def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d): + """Construct a 1x1 PatchGAN discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + """ + super(PixelDiscriminator, self).__init__() + if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + self.net = [ + nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0), + nn.LeakyReLU(0.2, True), + nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias), + norm_layer(ndf * 2), + nn.LeakyReLU(0.2, True), + nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)] + + self.net = nn.Sequential(*self.net) + + def forward(self, input): + """Standard forward.""" + return self.net(input) + + +class RhoClipper(object): + def __init__(self, min, max): + self.clip_min = min + self.clip_max = max + assert min < max + + def __call__(self, module): + if hasattr(module, 'rho'): + w = module.rho.data + w = w.clamp(self.clip_min, self.clip_max) + module.rho.data = w + diff --git a/models/nutils.py b/models/nutils.py new file mode 100644 index 0000000..9816958 --- /dev/null +++ b/models/nutils.py @@ -0,0 +1,612 @@ +import numpy as np +import torch +from subprocess import check_output +import os +import psutil +import math + +import matplotlib as mpl +import matplotlib.pyplot as plt + +from torch.utils.data import DataLoader +from torch.utils.data.sampler import SubsetRandomSampler +import torch.nn as nn + +from scipy.ndimage.measurements import label +from scipy.ndimage.morphology import binary_dilation, binary_fill_holes + +import torch.nn as nn +import torch.nn.functional as F + + +colors = torch.tensor([[ 0, 0, 0], # Black + [255, 0, 0], # Red + [ 0, 255, 0], # Green + [ 0, 0, 255], # Blue + [ 0, 255, 255], # Cyan + [255, 0, 255], # Magenta + [255, 255, 0], # Yellow + [139, 69, 19], # Brown (saddlebrown) + [128, 0, 128], # Purple + [255, 140, 0], # Orange + [255, 255, 255]], dtype=torch.uint8) # White + + + +def convert_labelmap_to_rgb(labelmap): + """ + Method used to generate rgb label maps for tensorboard visualization + :param labelmap: HxW label map tensor containing values from 0 to n_classes + :return: 3xHxW RGB label map containing colors in the following order: Black (background), Red, Green, Blue, Cyan, Magenta, Yellow, Brown, Orange, Purple + """ + n_classes = labelmap.max() + + result = torch.zeros(size=(labelmap.size()[0], labelmap.size()[1], 3), dtype=torch.uint8) + for i in range(1, n_classes+1): + result[labelmap == i] = colors[i] + + return result.permute(2, 0, 1) + +def convert_labelmap_to_rgb_with_instance_first_class(labelmap, structure): + """ + Method used to generate rgb label maps for tensorboard visualization + :param labelmap: HxW label map tensor containing values from 0 to n_classes + :return: 3xHxW RGB label map containing colors in the following order: Black (background), Red, Green, Blue, Cyan, Magenta, Yellow, Brown, Orange, Purple + """ + n_classes = labelmap.max() + + result = np.zeros(shape=(labelmap.shape[0], labelmap.shape[1], 3), dtype=np.uint8) + for i in range(2, n_classes+1): + result[labelmap == i] = colors[i].numpy() + + structure = np.ones((3, 3), dtype=np.int) + + labeledTubuli, numberTubuli = label(np.asarray(labelmap == 1, np.uint8), structure) # datatype of 'labeledTubuli': int32 + for i in range(1, numberTubuli + 1): + result[binary_dilation(binary_dilation(binary_dilation(labeledTubuli == i)))] = np.random.randint(low=0, high=256, size=3, dtype=np.uint8) # assign random colors to tubuli + + return result + +def convert_labelmap_to_rgb_except_first_class(labelmap): + """ + Method used to generate rgb label maps for tensorboard visualization + :param labelmap: HxW label map tensor containing values from 0 to n_classes + :return: 3xHxW RGB label map containing colors in the following order: Black (background), Red, Green, Blue, Cyan, Magenta, Yellow, Brown, Orange, Purple + """ + n_classes = labelmap.max() + + result = torch.zeros(size=(labelmap.size()[0], labelmap.size()[1], 3), dtype=torch.uint8) + for i in range(2, n_classes+1): + result[labelmap == i] = colors[i] + + return result.permute(2, 0, 1) + + +def getColorMapForLabelMap(): + return ['black', 'red', 'green', 'blue', 'cyan', 'magenta', 'yellow', 'brown', 'orange', 'purple', 'white'] + +def saveFigureResults(img, outputPrediction, postprocessedPrediction, finalPredictionRGB, GT, preprocessedGT, preprocessedGTrgb, fullResultPath, alpha=0.4): + figpath = fullResultPath + + customColors = getColorMapForLabelMap() + max_number_of_labels = len(customColors) + assert outputPrediction.max() < max_number_of_labels, 'Too many labels -> Not enough colors available in custom colormap! Add some colors!' + customColorMap = mpl.colors.ListedColormap(getColorMapForLabelMap()) + + # avoid brown color (border visualization) in output for final GT and prediction + postprocessedPrediction[postprocessedPrediction==7] = 0 + preprocessedGT[preprocessedGT==7] = 0 + + # also dilate tubuli here + postprocessedPrediction[binary_dilation(postprocessedPrediction==1)] = 1 + + predictionMask = np.ma.masked_where(postprocessedPrediction == 0, postprocessedPrediction) + + + + #plt.figure(figsize=(16, 8.1)) + saveImage(img,figpath[:-4] + '_org.png',(6.4,6.4)) + + + figSize = (5.12,5.12) + fig = plt.figure(figsize=figSize, dpi = 100) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(outputPrediction, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + s = str(figpath[:-4] + '_pred.png') + plt.savefig(s) + + plt.close() + + + fig = plt.figure(figsize=figSize, dpi = 100) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(postprocessedPrediction, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + + plt.savefig(figpath[:-4] + '_ppred.png') + + plt.close() + + + saveImage(finalPredictionRGB,figpath[:-4] + '_fpred.png',figSize) + + fig = plt.figure(figsize = (5.12,5.12),dpi = 100) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(img[(img.shape[0]-outputPrediction.shape[0])//2:(img.shape[0]-outputPrediction.shape[0])//2+outputPrediction.shape[0],(img.shape[1]-outputPrediction.shape[1])//2:(img.shape[1]-outputPrediction.shape[1])//2+outputPrediction.shape[1],:]) + ax.imshow(predictionMask, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1, alpha = 1) + plt.savefig(figpath[:-4] + '_ovpred.png', pad_inches=0) + plt.close() + + fig = plt.figure(figsize = (5.12,5.12),dpi = 100) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(GT, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.savefig(figpath[:-4] + '_GT.png', pad_inches=0) + plt.close() + + fig = plt.figure(figsize = (5.12,5.12),dpi = 100) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(preprocessedGT, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.savefig(figpath[:-4] + '_ppGT.png', pad_inches=0) + plt.close() + + +def savePredictionResults(predictionWithoutTubuliDilation, fullResultPath, figSize): + prediction = predictionWithoutTubuliDilation.copy() + prediction[binary_dilation(binary_dilation(binary_dilation(prediction == 1)))] = 1 + + customColors = getColorMapForLabelMap() + max_number_of_labels = len(customColors) + assert prediction.max() < max_number_of_labels, 'Too many labels -> Not enough colors available in custom colormap! Add some colors!' + customColorMap = mpl.colors.ListedColormap(getColorMapForLabelMap()) + + fig = plt.figure(figsize=figSize) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(prediction, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.savefig(fullResultPath) + plt.close() + +def savePredictionResultsWithoutDilation(prediction, fullResultPath, figSize): + customColors = getColorMapForLabelMap() + max_number_of_labels = len(customColors) + assert prediction.max() < max_number_of_labels, 'Too many labels -> Not enough colors available in custom colormap! Add some colors!' + customColorMap = mpl.colors.ListedColormap(getColorMapForLabelMap()) + + fig = plt.figure(figsize=figSize) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(prediction, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.savefig(fullResultPath) + plt.close() + +def savePredictionOverlayResults(img, predictionWithoutTubuliDilation, fullResultPath, figSize, alpha=0.4): + prediction = predictionWithoutTubuliDilation.copy() + prediction[binary_dilation(binary_dilation(binary_dilation(prediction == 1)))] = 1 + predictionMask = np.ma.masked_where(prediction == 0, prediction) + + customColors = getColorMapForLabelMap() + max_number_of_labels = len(customColors) + assert prediction.max() < max_number_of_labels, 'Too many labels -> Not enough colors available in custom colormap! Add some colors!' + customColorMap = mpl.colors.ListedColormap(getColorMapForLabelMap()) + + fig = plt.figure(figsize=figSize) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(img) + ax.imshow(predictionMask, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1, alpha = alpha) + plt.savefig(fullResultPath) + plt.close() + +def saveRGBPredictionOverlayResults(img, prediction, fullResultPath, figSize, alpha=0.4): + predictionMask = prediction.sum(2)==0 + predictionCopy = prediction.copy() + predictionCopy[predictionMask] = img[predictionMask] + fig = plt.figure(figsize=figSize) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(np.asarray(np.round(predictionCopy*alpha+(1-alpha)*img), np.uint8)) + plt.savefig(fullResultPath) + plt.close() + +def saveImage(img, fullResultPath, figSize): + fig = plt.figure(figsize=figSize, dpi = 100) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(img) + + plt.savefig(fullResultPath) + plt.close() + + + +def getCrossValSplits(dataIDX, amountFolds, foldNo, setting): + """ + Cross-Validation-Split of indices according to fold number and setting + Usage: + dataIDX = np.arange(dataset.__len__()) + # np.random.shuffle(dataIDX) + for i in range(amountFolds): + train_idx, val_idx, test_idx = getCrossFoldSplits(dataIDX=dataIDX, amountFolds=amountFolds, foldNo=i+1, setting=setting) + :param dataIDX: All data indices stored in numpy array + :param amountFolds: Total amount of folds + :param foldNo: Fold number, # CARE: Fold numbers start with 1 and go up to amountFolds ! # + :param setting: Train / Train+Test / Train+Val / Train+Test+Val + :return: tuple consisting of 3 numpy arrays (trainIDX, valIDX, testIDX) containing indices according to split + """ + assert (setting in ['train_val_test', 'train_test', 'train_val', 'train']), 'Given setting >'+setting+'< is incorrect!' + + num_total_data = dataIDX.__len__() + + if setting == 'train': + return dataIDX, None, None + + elif setting == 'train_val': + valIDX = dataIDX[num_total_data * (foldNo - 1) // amountFolds: num_total_data * foldNo // amountFolds] + trainIDX = np.setxor1d(dataIDX, valIDX) + return trainIDX, valIDX, None + + elif setting == 'train_test': + testIDX = dataIDX[num_total_data * (foldNo - 1) // amountFolds: num_total_data * foldNo // amountFolds] + trainIDX = np.setxor1d(dataIDX, testIDX) + return trainIDX, None, testIDX + + elif setting == 'train_val_test': + testIDX = dataIDX[num_total_data * (foldNo - 1) // amountFolds: num_total_data * foldNo // amountFolds] + if foldNo != amountFolds: + valIDX = dataIDX[num_total_data * foldNo // amountFolds: num_total_data * (foldNo+1) // amountFolds] + else: + valIDX = dataIDX[0 : num_total_data // amountFolds] + trainIDX = np.setxor1d(np.setxor1d(dataIDX, testIDX), valIDX) + return trainIDX, valIDX, testIDX + + else: + raise ValueError('Given setting >'+str(setting)+'< is invalid!') + + +def parse_nvidia_smi(unit=0): + result = check_output(["nvidia-smi", "-i", str(unit),]).decode('utf-8').split('\n') + return 'Current GPU usage: ' + result[0] + '\r\n' + result[5] + '\r\n' + result[8] + + +def parse_RAM_info(): + return 'Current RAM usage: '+str(round(psutil.Process(os.getpid()).memory_info().rss / 1E6, 2))+' MB' + + +def countParam(model): + model_parameters = filter(lambda p: p.requires_grad, model.parameters()) + params = sum([np.prod(p.size()) for p in model_parameters]) + return params + + +def getOneHotEncoding(imgBatch, labelBatch): + """ + :param imgBatch: image minibatch (FloatTensor) to extract shape and device info for output + :param labelBatch: label minibatch (LongTensor) to be converted to one-hot encoding + :return: One-hot encoded label minibatch with equal size as imgBatch and stored on same device + """ + if imgBatch.size()[1] != 1: # Multi-label segmentation otherwise binary segmentation + labelBatch = labelBatch.unsqueeze(1) + onehotEncoding = torch.zeros_like(imgBatch) + onehotEncoding.scatter_(1, labelBatch, 1) + return onehotEncoding + return labelBatch + + +def getWeightsForCEloss(dataset, train_idx, areLabelsOnehotEncoded, device, logger): + # Choice 1) Manually set custom weights + weights = torch.tensor([1,2,4,6,2,3], dtype=torch.float32, device=device) + weights = weights / weights.sum() + + # Choice 2) Compute weights as "np.mean(histogram) / histogram" + dataloader = DataLoader(dataset=dataset, batch_size=6, sampler=SubsetRandomSampler(train_idx), num_workers=6) + + if areLabelsOnehotEncoded: + histograms = 0 + for batch in dataloader: + imgBatch, segBatch = batch + amountLabels = segBatch.size()[1] + if amountLabels == 1: # binary segmentation + histograms = histograms + torch.tensor([(segBatch==0).sum(),(segBatch==1).sum()]) + else: # multi-label segmentation + if imgBatch.dim() == 4: #2D data + histograms = histograms + segBatch.sum(3).sum(2).sum(0) + else: #3D data + histograms = histograms + segBatch.sum(4).sum(3).sum(2).sum(0) + + histograms = histograms.numpy() + else: + histograms = np.array([0]) + for batch in dataloader: + _, segBatch = batch + + segHistogram = np.histogram(segBatch.numpy(), segBatch.numpy().max()+1)[0] + + if len(histograms) >= len(segHistogram): #(segHistogram could have different size than histograms) + histograms[:len(segHistogram)] += segHistogram + else: + segHistogram[:len(histograms)] += histograms + histograms = segHistogram + + weights = np.mean(histograms) / histograms + weights = torch.from_numpy(weights).float().to(device) + + logger.info('=> Weights for CE-loss: '+str(weights)) + + return weights + + + +def getMeanDiceScores(diceScores, logger): + """ + Compute mean label dice scores of numpy dice score array (2d) (and its mean) + :return: mean label dice scores with '-1' representing totally missing label (meanLabelDiceScores), mean overall dice score (meanOverallDice) + """ + meanLabelDiceScores = np.ma.masked_where(diceScores == -1, diceScores).mean(0).data + label_GT_occurrences = (diceScores != -1).sum(0) + if (label_GT_occurrences == 0).any(): + logger.info('[# WARNING #] Label(s): ' + str(np.argwhere(label_GT_occurrences == 0).flatten() + 1) + ' not present at all in current dataset split!') + meanLabelDiceScores[label_GT_occurrences == 0] = -1 + meanOverallDice = meanLabelDiceScores[meanLabelDiceScores != -1].mean() + + return meanLabelDiceScores, meanOverallDice + + +def getDiceScores(prediction, segBatch): + """ + Compute mean dice scores of predicted foreground labels. + NOTE: Dice scores of missing gt labels will be excluded and are thus represented by -1 value entries in returned dice score matrix! + NOTE: Method changes prediction to 0/1 values in the binary case! + :param prediction: BxCxHxW (if 2D) or BxCxHxWxD (if 3D) FloatTensor (care: prediction has not undergone any final activation!) (note: C=1 for binary segmentation task) + :param segBatch: BxCxHxW (if 2D) or BxCxHxWxD (if 3D) FloatTensor (Onehot-Encoding) or Bx1xHxW (if 2D) or Bx1xHxWxD (if 3D) LongTensor + :return: Numpy array containing BxC-1 (background excluded) dice scores + """ + batchSize, amountClasses = prediction.size()[0], prediction.size()[1] + + if amountClasses == 1: # binary segmentation task => simulate sigmoid to get label results + prediction[prediction >= 0] = 1 + prediction[prediction < 0] = 0 + prediction = prediction.squeeze(1) + segBatch = segBatch.squeeze(1) + amountClasses += 1 + else: # multi-label segmentation task + prediction = prediction.argmax(1) # LongTensor without C-channel + if segBatch.dtype == torch.float32: # segBatch is onehot-encoded + segBatch = segBatch.argmax(1) + else: + segBatch = segBatch.squeeze(1) + + prediction = prediction.view(batchSize, -1) + segBatch = segBatch.view(batchSize, -1) + + labelDiceScores = np.zeros((batchSize, amountClasses-1), dtype=np.float32) - 1 #ignore background class! + for b in range(batchSize): + currPred = prediction[b,:] + currGT = segBatch[b,:] + + for c in range(1,amountClasses): + classPred = (currPred == c).float() + classGT = (currGT == c).float() + + if classGT.sum() != 0: # only evaluate label prediction when is also present in ground-truth + labelDiceScores[b, c-1] = ((2. * (classPred * classGT).sum()) / (classGT.sum() + classPred.sum())).item() + + return labelDiceScores + + + +import numpy as np +from skimage.util import view_as_windows +from itertools import product +from typing import Tuple + +def patchify(patches: np.ndarray, patch_size: Tuple[int, int], step: int = 1): + return view_as_windows(patches, patch_size, step) + +def unpatchify(patches: np.ndarray, imsize: Tuple[int, int]): + + assert len(patches.shape) == 4 + + i_h, i_w = imsize + image = np.zeros(imsize, dtype=patches.dtype) + divisor = np.zeros(imsize, dtype=patches.dtype) + + n_h, n_w, p_h, p_w = patches.shape + + # Calculat the overlap size in each axis + o_w = (n_w * p_w - i_w) / (n_w - 1) + o_h = (n_h * p_h - i_h) / (n_h - 1) + + # The overlap should be integer, otherwise the patches are unable to reconstruct into a image with given shape + assert int(o_w) == o_w + assert int(o_h) == o_h + + o_w = int(o_w) + o_h = int(o_h) + + s_w = p_w - o_w + s_h = p_h - o_h + + for i, j in product(range(n_h), range(n_w)): + patch = patches[i,j] + image[(i * s_h):(i * s_h) + p_h, (j * s_w):(j * s_w) + p_w] += patch + divisor[(i * s_h):(i * s_h) + p_h, (j * s_w):(j * s_w) + p_w] += 1 + + return image / divisor + +# Examplary use: +# # # # # # # # # +# import numpy as np +# from patchify import patchify, unpatchify +# +# image = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) +# +# patches = patchify(image, (2,2), step=1) # split image into 2*3 small 2*2 patches. +# +# assert patches.shape == (2, 3, 2, 2) +# reconstructed_image = unpatchify(patches, image.shape) +# +# assert (reconstructed_image == image).all() + + + +def getChannelSmootingConvLayer(channels, kernel_size=5, sigma=1.5): + + # Create a x, y coordinate grid of shape (kernel_size, kernel_size, 2) + x_cord = torch.arange(kernel_size) + x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size) + y_grid = x_grid.t() + xy_grid = torch.stack([x_grid, y_grid], dim=-1) + + mean = (kernel_size - 1) / 2. + variance = sigma ** 2. + + # Calculate the 2-dimensional gaussian kernel which is + # the product of two gaussian distributions for two different + # variables (in this case called x and y) + gaussian_kernel = (1. / (2. * math.pi * variance)) * \ + torch.exp( + (-torch.sum((xy_grid - mean) ** 2., dim=-1) / \ + (2 * variance)).float() + ) + # Make sure sum of values in gaussian kernel equals 1. + gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel) + + # Reshape to 2d depthwise convolutional weight + gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size) + gaussian_kernel = gaussian_kernel.repeat(channels, 1, 1, 1) + + gaussian_filter = nn.Conv2d(in_channels=channels, out_channels=channels, + kernel_size=kernel_size, groups=channels, bias=False, padding=2) + + gaussian_filter.weight.data = gaussian_kernel + gaussian_filter.weight.requires_grad = False + + return gaussian_filter + + +def print_TTA(classEvaluatorsTTA): + print('########## NOW: Detection (average precision) and segmentation accuracies (object-level dice): ##########') + precisionsTub, avg_precisionTub, avg_dice_scoreTub, std_dice_scoreTub, min_dice_scoreTub, max_dice_scoreTub = classEvaluatorsTTA[0].score() # tubuliresults + precisionsGlom, avg_precisionGlom, avg_dice_scoreGlom, std_dice_scoreGlom, min_dice_scoreGlom, max_dice_scoreGlom = classEvaluatorsTTA[1].score() # tubuliresults + precisionsTuft, avg_precisionTuft, avg_dice_scoreTuft, std_dice_scoreTuft, min_dice_scoreTuft, max_dice_scoreTuft = classEvaluatorsTTA[2].score() # tubuliresults + precisionsVeins, avg_precisionVeins, avg_dice_scoreVeins, std_dice_scoreVeins, min_dice_scoreVeins, max_dice_scoreVeins = classEvaluatorsTTA[3].score() # tubuliresults + precisionsArtery, avg_precisionArtery, avg_dice_scoreArtery, std_dice_scoreArtery, min_dice_scoreArtery, max_dice_scoreArtery = classEvaluatorsTTA[4].score() # tubuliresults + precisionsLumen, avg_precisionLumen, avg_dice_scoreLumen, std_dice_scoreLumen, min_dice_scoreLumen, max_dice_scoreLumen = classEvaluatorsTTA[5].score() # tubuliresults + print('TTA DETECTION RESULTS MEASURED BY AVERAGE PRECISION:') + print('0.5 0.55 0.6 0.65 0.7 0.75 0.8 0.85 0.9 <- Thresholds') + print(str(np.round(precisionsTub, 4)) + ', Mean: ' + str(np.round(avg_precisionTub, 4)) + ' <-- Tubuli') + print(str(np.round(precisionsGlom, 4)) + ', Mean: ' + str(np.round(avg_precisionGlom, 4)) + ' <-- Glomeruli (incl. tuft)') + print(str(np.round(precisionsTuft, 4)) + ', Mean: ' + str(np.round(avg_precisionTuft, 4)) + ' <-- Tuft') + print(str(np.round(precisionsVeins, 4)) + ', Mean: ' + str(np.round(avg_precisionVeins, 4)) + ' <-- Veins') + print(str(np.round(precisionsArtery, 4)) + ', Mean: ' + str(np.round(avg_precisionArtery, 4)) + ' <-- Artery (incl. lumen)') + print(str(np.round(precisionsLumen, 4)) + ', Mean: ' + str(np.round(avg_precisionLumen, 4)) + ' <-- Artery lumen') + print('TTA SEGMENTATION RESULTS MEASURED BY OBJECT-LEVEL DICE SCORES:') + print('Mean: ' + str(np.round(avg_dice_scoreTub, 4)) + ', Std: ' + str(np.round(std_dice_scoreTub, 4)) + ', Min: ' + str(np.round(min_dice_scoreTub, 4)) + ', Max: ' + str(np.round(max_dice_scoreTub, 4)) + ' <-- Tubuli') + print('Mean: ' + str(np.round(avg_dice_scoreGlom, 4)) + ', Std: ' + str(np.round(std_dice_scoreGlom, 4)) + ', Min: ' + str(np.round(min_dice_scoreGlom, 4)) + ', Max: ' + str(np.round(max_dice_scoreGlom, 4)) + ' <-- Glomeruli (incl. tuft)') + print('Mean: ' + str(np.round(avg_dice_scoreTuft, 4)) + ', Std: ' + str(np.round(std_dice_scoreTuft, 4)) + ', Min: ' + str(np.round(min_dice_scoreTuft, 4)) + ', Max: ' + str(np.round(max_dice_scoreTuft, 4)) + ' <-- Tuft') + print('Mean: ' + str(np.round(avg_dice_scoreVeins, 4)) + ', Std: ' + str(np.round(std_dice_scoreVeins, 4)) + ', Min: ' + str(np.round(min_dice_scoreVeins, 4)) + ', Max: ' + str(np.round(max_dice_scoreVeins, 4)) + ' <-- Veins') + print('Mean: ' + str(np.round(avg_dice_scoreArtery, 4)) + ', Std: ' + str(np.round(std_dice_scoreArtery, 4)) + ', Min: ' + str(np.round(min_dice_scoreArtery, 4)) + ', Max: ' + str(np.round(max_dice_scoreArtery, 4)) + ' <-- Artery (incl. lumen)') + print('Mean: ' + str(np.round(avg_dice_scoreLumen, 4)) + ', Std: ' + str(np.round(std_dice_scoreLumen, 4)) + ', Min: ' + str(np.round(min_dice_scoreLumen, 4)) + ', Max: ' + str(np.round(max_dice_scoreLumen, 4)) + ' <-- Artery lumen') + + +def write_TTA(classEvaluatorsTTA, file_path, episode= 100000): + with open(file_path, 'a+') as f: + f.write('\n ------- EPISODE: ' + str(episode) + '----------------- \n') + f.write('########## NOW: Detection (average precision) and segmentation accuracies (object-level dice): ##########\n') + precisionsTub, avg_precisionTub, avg_dice_scoreTub, std_dice_scoreTub, min_dice_scoreTub, max_dice_scoreTub = classEvaluatorsTTA[0].score() # tubuliresults + precisionsGlom, avg_precisionGlom, avg_dice_scoreGlom, std_dice_scoreGlom, min_dice_scoreGlom, max_dice_scoreGlom = classEvaluatorsTTA[1].score() # tubuliresults + precisionsTuft, avg_precisionTuft, avg_dice_scoreTuft, std_dice_scoreTuft, min_dice_scoreTuft, max_dice_scoreTuft = classEvaluatorsTTA[2].score() # tubuliresults + precisionsVeins, avg_precisionVeins, avg_dice_scoreVeins, std_dice_scoreVeins, min_dice_scoreVeins, max_dice_scoreVeins = classEvaluatorsTTA[3].score() # tubuliresults + precisionsArtery, avg_precisionArtery, avg_dice_scoreArtery, std_dice_scoreArtery, min_dice_scoreArtery, max_dice_scoreArtery = classEvaluatorsTTA[4].score() # tubuliresults + precisionsLumen, avg_precisionLumen, avg_dice_scoreLumen, std_dice_scoreLumen, min_dice_scoreLumen, max_dice_scoreLumen = classEvaluatorsTTA[5].score() # tubuliresults + f.write('TTA DETECTION RESULTS MEASURED BY AVERAGE PRECISION:\n') + f.write('0.5 0.55 0.6 0.65 0.7 0.75 0.8 0.85 0.9 <- Thresholds\n') + f.write(str(np.round(precisionsTub, 4)) + ', Mean: ' + str(np.round(avg_precisionTub, 4)) + ' <-- Tubuli\n') + f.write(str(np.round(precisionsGlom, 4)) + ', Mean: ' + str(np.round(avg_precisionGlom, 4)) + ' <-- Glomeruli (incl. tuft)\n') + f.write(str(np.round(precisionsTuft, 4)) + ', Mean: ' + str(np.round(avg_precisionTuft, 4)) + ' <-- Tuft\n') + f.write(str(np.round(precisionsVeins, 4)) + ', Mean: ' + str(np.round(avg_precisionVeins, 4)) + ' <-- Veins\n') + f.write(str(np.round(precisionsArtery, 4)) + ', Mean: ' + str(np.round(avg_precisionArtery, 4)) + ' <-- Artery (incl. lumen)\n') + f.write(str(np.round(precisionsLumen, 4)) + ', Mean: ' + str(np.round(avg_precisionLumen, 4)) + ' <-- Artery lumen\n') + f.write('TTA SEGMENTATION RESULTS MEASURED BY OBJECT-LEVEL DICE SCORES:\n') + f.write('Mean: ' + str(np.round(avg_dice_scoreTub, 4)) + ', Std: ' + str(np.round(std_dice_scoreTub, 4)) + ', Min: ' + str(np.round(min_dice_scoreTub, 4)) + ', Max: ' + str(np.round(max_dice_scoreTub, 4)) + ' <-- Tubuli\n') + f.write('Mean: ' + str(np.round(avg_dice_scoreGlom, 4)) + ', Std: ' + str(np.round(std_dice_scoreGlom, 4)) + ', Min: ' + str(np.round(min_dice_scoreGlom, 4)) + ', Max: ' + str(np.round(max_dice_scoreGlom, 4)) + ' <-- Glomeruli (incl. tuft)\n') + f.write('Mean: ' + str(np.round(avg_dice_scoreTuft, 4)) + ', Std: ' + str(np.round(std_dice_scoreTuft, 4)) + ', Min: ' + str(np.round(min_dice_scoreTuft, 4)) + ', Max: ' + str(np.round(max_dice_scoreTuft, 4)) + ' <-- Tuft\n') + f.write('Mean: ' + str(np.round(avg_dice_scoreVeins, 4)) + ', Std: ' + str(np.round(std_dice_scoreVeins, 4)) + ', Min: ' + str(np.round(min_dice_scoreVeins, 4)) + ', Max: ' + str(np.round(max_dice_scoreVeins, 4)) + ' <-- Veins\n') + f.write('Mean: ' + str(np.round(avg_dice_scoreArtery, 4)) + ', Std: ' + str(np.round(std_dice_scoreArtery, 4)) + ', Min: ' + str(np.round(min_dice_scoreArtery, 4)) + ', Max: ' + str(np.round(max_dice_scoreArtery, 4)) + ' <-- Artery (incl. lumen)\n') + f.write('Mean: ' + str(np.round(avg_dice_scoreLumen, 4)) + ', Std: ' + str(np.round(std_dice_scoreLumen, 4)) + ', Min: ' + str(np.round(min_dice_scoreLumen, 4)) + ', Max: ' + str(np.round(max_dice_scoreLumen, 4)) + ' <-- Artery lumen\n') + f.close() + + +class DiceLoss(nn.Module): + def init(self, ignore_index=-1): + super(DiceLoss, self).__init__() + self.eps = 1e-6 + self.smooth = 1. + self.ignore_index = ignore_index + def forward(self, prediction, target): + """ + Computes the dice loss (averaging dice scores across B x C) between network prediction and target used for training + :param prediction: BxCxHxW (2d) or BxCxHxWxD (3d) float tensor, CARE: prediction results straight + after last conv without being finally propagated through an activation (softmax, sigmoid) + :param target: BxCxHxW (2d) or BxCxHxWxD (3d) float tensor representing the ground-truth as one-hot encoding + :return: 1 - mean dice score across BxC + """ + + predictionNorm = F.sigmoid(prediction) + # predictionNorm = F.softmax(prediction, dim=1) + if self.ignore_index != -1: + target = target.clone().detach() + mask = target == self.ignore_index + target[mask] = 0 + + if target.dtype == torch.int64: + target = getOneHotEncoding(prediction, target) + + if self.ignore_index != -1 and target.size()[1] != 1: + mask = mask.unsqueeze(1).expand_as(target) + target[mask] = 0 + + denominator = predictionNorm + target + if self.ignore_index != -1: + denominator[mask] = 0 + + if target.dim() == 4: #2D + numerator = 2. * (predictionNorm * target).sum(3).sum(2) + self.smooth + denominator = denominator.sum(3).sum(2) + self.eps + self.smooth + dice_score = numerator / denominator + return 1.0 - dice_score.mean() + + elif target.dim() == 5: #3D + numerator = 2. * (predictionNorm * target).sum(4).sum(3).sum(2) + self.smooth + denominator = denominator.sum(4).sum(3).sum(2) + self.eps + self.smooth + dice_score = numerator / denominator + return 1.0 - dice_score.mean() + else: + ValueError('Given data dimension >' + str(target.dim()) + 'd< not supported!') + + +class WeightedL1Loss(nn.Module): + def __init__(self, weights): + super(WeightedL1Loss, self).__init__() + self.L1= torch.nn.L1Loss(reduction = 'none') + self.weights = weights + def forward(self, x, y): + return (self.L1(x, y).mean(dim=[2,3]) * self.weights).mean() + + +if __name__ == '__main__': + print() diff --git a/models/radam.py b/models/radam.py new file mode 100644 index 0000000..9fafac7 --- /dev/null +++ b/models/radam.py @@ -0,0 +1,212 @@ +# From: https://github.com/LiyuanLucasLiu/RAdam/blob/master/radam/radam.py +# Author: Liyuan Liu +# Paper: Liyuan Liu , Haoming Jiang, Pengcheng He, Weizhu Chen, Xiaodong Liu, Jianfeng Gao, and Jiawei Han (2020). On the Variance of the Adaptive Learning Rate and Beyond. the Eighth International Conference on Learning Representations. +# License: Apache License 2.0 + +import math +import torch +from torch.optim.optimizer import Optimizer, required + +class RAdam(Optimizer): + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + self.buffer = [[None, None, None] for ind in range(10)] + super(RAdam, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RAdam, self).__setstate__(state) + + def step(self, closure=None): + + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data.float() + if grad.is_sparse: + raise RuntimeError('RAdam does not support sparse gradients') + + p_data_fp32 = p.data.float() + + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p_data_fp32) + state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) + else: + state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) + state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + exp_avg.mul_(beta1).add_(1 - beta1, grad) + + state['step'] += 1 + buffered = self.buffer[int(state['step'] % 10)] + if state['step'] == buffered[0]: + N_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + N_sma_max = 2 / (1 - beta2) - 1 + N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = N_sma + + # more conservative since it's an approximated value + if N_sma >= 5: + step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step']) + else: + step_size = 1.0 / (1 - beta1 ** state['step']) + buffered[2] = step_size + + if group['weight_decay'] != 0: + p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) + + # more conservative since it's an approximated value + if N_sma >= 5: + denom = exp_avg_sq.sqrt().add_(group['eps']) + p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom) + else: + p_data_fp32.add_(-step_size * group['lr'], exp_avg) + + p.data.copy_(p_data_fp32) + + return loss + +class PlainRAdam(Optimizer): + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + + super(PlainRAdam, self).__init__(params, defaults) + + def __setstate__(self, state): + super(PlainRAdam, self).__setstate__(state) + + def step(self, closure=None): + + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data.float() + if grad.is_sparse: + raise RuntimeError('RAdam does not support sparse gradients') + + p_data_fp32 = p.data.float() + + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p_data_fp32) + state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) + else: + state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) + state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + exp_avg.mul_(beta1).add_(1 - beta1, grad) + + state['step'] += 1 + beta2_t = beta2 ** state['step'] + N_sma_max = 2 / (1 - beta2) - 1 + N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + + if group['weight_decay'] != 0: + p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) + + # more conservative since it's an approximated value + if N_sma >= 5: + step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step']) + denom = exp_avg_sq.sqrt().add_(group['eps']) + p_data_fp32.addcdiv_(-step_size, exp_avg, denom) + else: + step_size = group['lr'] / (1 - beta1 ** state['step']) + p_data_fp32.add_(-step_size, exp_avg) + + p.data.copy_(p_data_fp32) + + return loss + + +class AdamW(Optimizer): + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup = 0): + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, warmup = warmup) + super(AdamW, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdamW, self).__setstate__(state) + + def step(self, closure=None): + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data.float() + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + + p_data_fp32 = p.data.float() + + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p_data_fp32) + state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) + else: + state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) + state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + exp_avg.mul_(beta1).add_(1 - beta1, grad) + + denom = exp_avg_sq.sqrt().add_(group['eps']) + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + if group['warmup'] > state['step']: + scheduled_lr = 1e-8 + state['step'] * group['lr'] / group['warmup'] + else: + scheduled_lr = group['lr'] + + step_size = scheduled_lr * math.sqrt(bias_correction2) / bias_correction1 + + if group['weight_decay'] != 0: + p_data_fp32.add_(-group['weight_decay'] * scheduled_lr, p_data_fp32) + + p_data_fp32.addcdiv_(-step_size, exp_avg, denom) + + p.data.copy_(p_data_fp32) + + return loss diff --git a/models/utils.py b/models/utils.py new file mode 100644 index 0000000..39eed6e --- /dev/null +++ b/models/utils.py @@ -0,0 +1,310 @@ +# This code was copied from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix and adapted for our purpose accordingly. +from scipy import misc +import os, cv2, torch +import numpy as np +from torch.utils.tensorboard import SummaryWriter + +def load_test_data(image_path, size=256): + img = misc.imread(image_path, mode='RGB') + img = misc.imresize(img, [size, size]) + img = np.expand_dims(img, axis=0) + img = preprocessing(img) + + return img + +def norm(t_list): + for i,t in enumerate(t_list): + t_list[i] = (t-t.mean())/(t.std()+1e-15)*2 + .5 + return t_list + + +def concat_zeros(tensor_list): + for i in range(len(tensor_list)): + tensor_list[i]= torch.cat((tensor_list[i],torch.zeros_like(tensor_list[i])),dim = 1) + return tuple(tensor_list) if len(tensor_list) >= 2 else tensor_list[0] + +def split_6_ch_to_3(tensor_list): + for i in range(len(tensor_list)): + tensor_list[i]= tensor_list[i].split(3,dim = 1)[0] + return tuple(tensor_list) if len(tensor_list) >= 2 else tensor_list[0] + +def split_6_ch_to_2x3(tensor_list): + for i in range(len(tensor_list)): + a,b = tensor_list[i].split(3,dim = 1) + tensor_list[i] = torch.cat((a,b),dim = 3) + return tuple(tensor_list) + + +def preprocessing(x): + x = x/127.5 - 1 # -1 ~ 1 + return x + + +def get_puzzle_params(ps = 256, n_patches = 9, imgsize= 640): + n = int(np.sqrt(n_patches)) + assert(n>=2),"n_patches should be at least 4" + to = n * ps - imgsize + oo = int(to/(n-1)+.5) + + l = [] + for i in range(n): + for j in range(n): + if j == 0: + x = 0 + elif j == n-1: + x = imgsize - ps + else: + x = j * (ps - oo) + if i == 0: + y = 0 + elif i == n-1: + y = imgsize - ps + else: + y = i * (ps - oo) + l.append((x ,y ,x+ps ,y+ps )) + return l + +def get_imgs(img, params): + cropped_np_imgs = [] + for param in params: + img.crop(param) + cropped_np_imgs.append(np.array(img.crop(param))) + return cropped_np_imgs + +def get_overlap_weights(params, imgsize): + arr = np.zeros((imgsize,imgsize)) + for param in params: + for x in range(param[1],param[3]): + for y in range(param[0],param[2]): + arr[x][y] = arr[x][y] + 1 + return 1/arr + +def puzzle(np_imgs, out_size): + n_patches = len(np_imgs) + ps = np_imgs[0].shape[0] + assert(np.sqrt(n_patches) * ps >= out_size), "There are not enough patches to fill the desired outputsize" + params = get_puzzle_params(ps, n_patches, out_size) + arr = np.zeros((out_size,out_size,3)) + for i,param in enumerate(params): + arr[param[1]:param[3], param[0]:param[2]] += np_imgs[i] + weights = get_overlap_weights(params, out_size) + weights = np.stack((weights,weights,weights), axis = -1) + arr = arr*weights + return arr.astype('uint8') + +def save_images(images, size, image_path): + return imsave(inverse_transform(images), size, image_path) + +def inverse_transform(images): + return (images+1.) / 2 + +def imsave(images, size, path): + return misc.imsave(path, merge(images, size)) + +def merge(images, size): + h, w = images.shape[1], images.shape[2] + img = np.zeros((h * size[0], w * size[1], 3)) + for idx, image in enumerate(images): + i = idx % size[1] + j = idx // size[1] + img[h*j:h*(j+1), w*i:w*(i+1), :] = image + + return img + +def check_folder(log_dir): + if not os.path.exists(log_dir): + os.makedirs(log_dir) + return log_dir + +def str2bool(x): + return x.lower() in ('true') + +def cam(x, size = 256): + x = x - np.min(x) + cam_img = x / np.max(x) + cam_img = np.uint8(255 * cam_img) + cam_img = cv2.resize(cam_img, (size, size)) + cam_img = cv2.applyColorMap(cam_img, cv2.COLORMAP_JET) + return cam_img / 255.0 + +def imagenet_norm(x): + mean = [0.485, 0.456, 0.406] + std = [0.299, 0.224, 0.225] + mean = torch.FloatTensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3).to(x.device) + std = torch.FloatTensor(std).unsqueeze(0).unsqueeze(2).unsqueeze(3).to(x.device) + return (x - mean) / std + +def denorm(x): + return x * 0.5 + 0.5 + +def tensor2numpy(x): + return x.detach().cpu().numpy().transpose(1,2,0) + +def RGB2BGR(x): + return cv2.cvtColor(x, cv2.COLOR_RGB2BGR) + +def BGR2RGB(x): + return cv2.cvtColor(x, cv2.COLOR_BGR2RGB) + +def one_hot(num_labels,n): + c_org = torch.FloatTensor(torch.eye(num_labels)[n]) + c_trg = torch.FloatTensor(torch.eye(num_labels)[np.random.choice([i for i in range(0,num_labels) if i not in [n]])]) + return c_org, c_trg + +def get_trg_label(num_labels,n): + if n == 0: + return n, np.random.choice([i for i in range(0,num_labels)]) + elif np.random.randint(0,2): return n, 0 + return n, np.random.choice([i for i in range(0,num_labels)]) + + +def get_trg_label_2(num_labels,n): + if n == 0: + return 0, np.random.choice([i for i in range(0,num_labels)]) + else: + return n, 0 # translate non Pas into Pas + + +### gradient_penalty from the Stargan paper +def gradient_penalty(device , y, x): + """Compute gradient penalty: (L2_norm(dy/dx) - 1)**2.""" + weight = torch.ones(y.size()).to(device) + dydx = torch.autograd.grad(outputs=y, + inputs=x, + grad_outputs=weight, + retain_graph=True, + create_graph=True, + only_inputs=True)[0] + + dydx = dydx.view(dydx.size(0), -1) + dydx_l2norm = torch.sqrt(torch.sum(dydx**2, dim=1)) + return torch.mean((dydx_l2norm-1)**2) + +def load_dataset(loader): + batches=[] + for batch in loader: + batches.append(batch) + return batches +def set_name(args): + if args.stain_root == 'stains[0]': + stain_root = '' + args.stains.sort() + for stain in args.stains: + stain_root = stain_root + stain + '_' + + if args.star: + if args.gen_org: + stain_root = 'org_' + stain_root + elif args.gen_trg: + stain_root = 'trg_' + stain_root + if args.no_cross: + stain_root = 'no_cross_' + stain_root + stain_root = 'stargan_' + stain_root + elif args.guided: + stain_root = 'guided_' + stain_root + elif args.hydra: + stain_root = 'hydra_' + stain_root + else: + stain_root = 'baseline_' + stain_root + + args.stain_root = stain_root + 'sixpack' if args.six_pack else stain_root[:-1] + + +def init_model(self, args): + self.light = args.light + + if self.light : + self.model_name = 'UGATIT_light' + else : + self.model_name = 'UGATIT' + + if args.star: self.model_name = 'Stargan_' + self.model_name + elif args.guided:self.model_name = 'Guided_' + self.model_name + + self.result_dir = args.result_dir + self.dataset = args.dataset + self.stains = args.stains + self.stain_root = args.stain_root + self.mult_stains = len(self.stains) >= 2 + self.num_stains = len(self.stains) + self.stainB = args.stainB + + + self.iteration = args.iteration + self.decay_flag = args.decay_flag + + self.batch_size = args.batch_size + self.print_freq = args.print_freq + self.save_freq = args.save_freq + + self.lr = args.lr + self.weight_decay = args.weight_decay + self.ch = args.ch + + self.puzzle = args.puzzle + self.preload = args.preload + if args.phase == 'train': + self.writer = SummaryWriter(log_dir = os.path.join(args.result_dir, args.stain_root, 'logs')) + + """ Weight """ + self.adv_weight = args.adv_weight + self.cycle_weight = args.cycle_weight + self.identity_weight = args.identity_weight + self.cam_weight = args.cam_weight + self.cls_weight = args.cls_weight + self.gp_weight = args.gp_weight + + + """ Generator """ + self.n_res = args.n_res + self.six_pack = args.six_pack + + """ Discriminator """ + self.n_dis = args.n_dis + + self.img_size = args.img_size + self.img_ch = args.img_ch + self.crop_size = args.crop_size + + self.device = args.device + self.benchmark_flag = args.benchmark_flag + self.resume = args.resume + + self.phase = args.phase + + if torch.backends.cudnn.enabled and self.benchmark_flag: + print('set benchmark !') + torch.backends.cudnn.benchmark = True + + print() + + print("##### Information #####") + print("# Name : ", self.stain_root) + print("# light : ", self.light) + print("# dataset : ", self.dataset) + print("# stains : ", self.stains) + print("# batch_size : ", self.batch_size) + print("# iteration per epoch : ", self.iteration) + if self.six_pack: print("# this net went to the gym! ") + print() + print("#Image size: ", self.crop_size) + + print("##### Generator #####") + print("# residual blocks : ", self.n_res) + + print() + + print("##### Discriminator #####") + print("# discriminator layer : ", self.n_dis) + + print() + + print("##### Weight #####") + print("# adv_weight : ", self.adv_weight) + print("# cycle_weight : ", self.cycle_weight) + print("# identity_weight : ", self.identity_weight) + print("# cam_weight : ", self.cam_weight) + if args.star: + print("# class loss weight : ", self.cls_weight) + print("# gradient penalty loss weight : ", self.gp_weight) + diff --git a/options.py b/options.py new file mode 100644 index 0000000..99977c0 --- /dev/null +++ b/options.py @@ -0,0 +1,214 @@ +# This code was copied from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix and adapted for our purpose accordingly. + +import sys +import argparse +import os +import torch +import models +import data +import shutil +import logging as log + +from tensorboardX import SummaryWriter + + +class Options: + """This class defines options used during both training and test time. + + It also implements several helper functions such as parsing, printing, and saving the options. + It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class. + """ + def initialize(self, parser): + """Define the common options that are used in both training and test.""" + # basic parameters + parser.add_argument('--dataroot', required=True, help='path to folder with the stain specific datafolders') + parser.add_argument('--stain', required=True, help='stain-imagefolder in dataroot (should have subfolders trainA, valA) possible options:[ErHr3 | F4-80 | NGAL | Fibronectin | CD31 | CD44 | CD45 | Ly6G | Col_I | ColIII | Col_IV | PAS]') + parser.add_argument('--stainB', required=True, help='stain-imagefolder in dataroot (should have subfolders trainB, valB) possible options:[ErHr3 | F4-80 | NGAL | Fibronectin | CD31 | CD44 | CD45 | Ly6G | Col_I | ColIII | Col_IV | PAS]') + parser.add_argument('--resultsPath', required=True, help='path to base results folder') + parser.add_argument('--suffix', default='', type=str, help='customized suffix for model subfolders, e.g. _dropout => cycle_gan_..._dropout') + parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') + # model parameters + parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | U_GAT_IT | pix2pix | test | colorization]') + parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale') + parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale') + parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer') + parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer') + parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator') + parser.add_argument('--netG', type=str, default='resnet_3_9blocks', help='specify generator architecture [resnet_X_Yblocks -> X=depth, Y= #blocks | unet_X -> X=depth') + parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') + parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]') + parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]') + parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') + parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') + parser.add_argument('--use_segm_model', action='store_true',help='use pretrained segmentation model') + parser.add_argument('--segm_model_path', type=str, default ='/homeStor1/nbouteldja/Results_ActivePAS/custom_train_val_test_e500_b6_r0.001_w1e-05_516_640_32_RAdam_instance_1deeper_Healthy_UUO_Adenine_Alport_IRI_NTN_fewSpecies_fewHuman_Background_+-1range_X/Model/finalModel.pt', help='path to the segmentation model for domain B' ) + parser.add_argument('--use_MC', action='store_true',help='use multi channel inputs') + # training parameters + parser.add_argument('--niters', type=int, default=20000, help='# of iterations at starting learning rate') + parser.add_argument('--niters_init', type=int, default=0, help='number of iterations training with only identity loss') + parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') + parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') + parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.') + parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') + parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]') + parser.add_argument('--niters_linDecay', type=int, default=10000, help='[policy: linear] # of iterations to linearly decay learning rate to zero') + parser.add_argument('--niters_stepDecay', type=int, default=50, help='[policy: step] multiply by a gamma every lr_decay_iters iterations') + parser.add_argument('--validation_freq', type=int, default=500, help='iteration frequency for validation') + # dataset parameters + parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]') + parser.add_argument('--preload', action='store_true', help='if specified, the images will be put into RAM before training starts') + parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA') + parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') + parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') + parser.add_argument('--batch_size', type=int, default=1, help='input batch size') + parser.add_argument('--load_size', type=int, default=640, help='scale images to this size') + parser.add_argument('--crop_size', type=int, default=640, help='then crop to this size') + parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a randomly varying subset is loaded.') + parser.add_argument('--preprocess', type=str, default='none', help='scaling and cropping of images at load time [resize_and_crop | resize | crop | scale_width | scale_width_and_crop | none]') + parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') + # network saving and loading parameters + parser.add_argument('--saveModelEachNIteration', type=int, default=float("inf"), help='iteration frequency to save models') + parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') + # additional debugging parameters + parser.add_argument('--verbose', action='store_true', help='if specified, print network architectures') + parser.add_argument('--print_memory_usage_freq', type=int, default=5000, help='iteration frequency to print RAM & VRAM usage info') + parser.add_argument('--update_TB_images_freq', type=int, default=1000, help='iteration frequency to update inference images in tensorboard') + + + return parser + + def gather_options(self): + """Initialize our parser with basic options(only once). + Add additional model-specific and dataset-specific options. + These options are defined in the <modify_commandline_options> function + in model and dataset classes. + """ + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = self.initialize(parser) + + # get the basic options + opt, _ = parser.parse_known_args() + + # modify model-related parser options + model_name = opt.model + model_option_setter = models.get_option_setter(model_name) + parser = model_option_setter(parser, opt.phase == 'train') + opt, _ = parser.parse_known_args() # parse again with new defaults + + # modify dataset-related parser options + dataset_name = opt.dataset_mode + dataset_option_setter = data.get_option_setter(dataset_name) + parser = dataset_option_setter(parser, opt.phase == 'train') + + # save and return the parser + self.parser = parser + return parser.parse_args() + + def print_options(self, opt, logger): + """Print and save options + + It will print both current options and default values(if different). + It will save options into a text file / [checkpoints_dir] / opt.txt + """ + message = 'Script call arguments are:\n\n' + ' '.join(sys.argv[1:]) + '\n\n' + message += '----------------- Options ---------------\n' + for k, v in sorted(vars(opt).items()): + comment = '' + default = self.parser.get_default(k) + if v != default: + comment = '\t[default: %s]' % str(default) + message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) + message += '----------------- End -------------------' + logger.info(message) + + + def parseTrain(self): + """Parse our options, create checkpoints directory suffix, and set up gpu device.""" + opt = self.gather_options() + + opt.model_foldername = self.setOptModelFolderName(opt) + + opt.finalResultsFolder = os.path.join(opt.resultsPath, opt.model_foldername, opt.stain + '_' + opt.stainB) + opt.checkpoints_dir = os.path.join(opt.finalResultsFolder, 'Models') + + if not os.path.exists(opt.checkpoints_dir): + os.makedirs(opt.checkpoints_dir) + + # Set up logger + log.basicConfig( + level=log.INFO, + format='%(asctime)s %(message)s', + datefmt='%Y/%m/%d %I:%M:%S %p', + handlers=[ + log.FileHandler(opt.finalResultsFolder + '/Train_LOGS.log', 'w'), + log.StreamHandler(sys.stdout) + ]) + logger = log.getLogger() + + self.print_options(opt, logger) + # setting up tensorboard visualization + tensorboardPath = os.path.join(opt.finalResultsFolder, 'TB') + shutil.rmtree(tensorboardPath, ignore_errors=True) # <- remove existing TB events + opt.tbWriter = SummaryWriter(log_dir=tensorboardPath) + opt.logger = logger + + # set gpu ids + str_ids = opt.gpu_ids.split(',') + opt.gpu_ids = [] + for str_id in str_ids: + id = int(str_id) + if id >= 0: + opt.gpu_ids.append(id) + if len(opt.gpu_ids) > 0: + torch.cuda.set_device(opt.gpu_ids[0]) + + torch.backends.cudnn.benchmark = True + + return opt + + + def parseTestStain(self, stain): + """Parse our options, create checkpoints directory suffix, and set up gpu device.""" + opt = self.gather_options() + + opt.stain = stain + + opt.model_foldername = self.setOptModelFolderName(opt) + + opt.finalResultsFolder = os.path.join(opt.resultsPath, opt.model_foldername, opt.stain + '_' + opt.stainB) + opt.checkpoints_dir = os.path.join(opt.finalResultsFolder, 'Models') + opt.testResultsPath = os.path.join(opt.resultsPath, opt.model_foldername, 'TestResults') + opt.testResultImagesPath = os.path.join(opt.testResultsPath, stain) + + if not os.path.exists(opt.testResultImagesPath): + os.makedirs(opt.testResultImagesPath) + + return opt + + @staticmethod + def setOptModelFolderName(opt): + """Set opt.model_foldername specifying the folder of trained model""" + + model_foldername = opt.model + '_gen_' + opt.netG + '_' + str(opt.ngf) + '_dis_' + opt.netD + '_' + str(opt.n_layers_D) + '_' + str( + opt.ndf) + '_batch_' + str(opt.batch_size) + '_init_' + opt.init_type + '_lr_' + str( + opt.lr) + '_policy_' + opt.lr_policy + '_initItersId_' + str(opt.niters_init) + '_loss_' + opt.gan_mode + '_cropSize_' + str(opt.crop_size) + + if opt.model == 'cycle_gan': + model_foldername += '_lamA_' + str(opt.lambda_A) + '_lamB_' + str(opt.lambda_B) + '_lamId_' + str(opt.lambda_id) + + elif opt.model == 'U_GAT_IT' or opt.model == 'U_GAT_IT_2': + model_foldername += '_lamA_' + str(opt.lambda_A) + '_lamB_' + str(opt.lambda_B) + '_lamId_' + str(opt.lambda_id) + '_lamCam_' + str( + opt.lambda_cam) + '_DlayersG' + str(opt.n_layers_D_G) + '_DlayersL' + str(opt.n_layers_D_L) + + if opt.use_segm_model: + model_foldername += '_SegNet_lamSeg_' + str(opt.lambda_Seg) + + if opt.use_MC: + model_foldername += '_useMC' + + if opt.suffix != '': + model_foldername += opt.suffix + + return model_foldername + + diff --git a/postprocessing.py b/postprocessing.py new file mode 100644 index 0000000..bc2cf50 --- /dev/null +++ b/postprocessing.py @@ -0,0 +1,108 @@ +import numpy as np +import torch +import torch.nn as nn +import cv2 +import math +from scipy.ndimage.measurements import label +from scipy.ndimage.morphology import binary_dilation, binary_fill_holes +from skimage.morphology import remove_small_objects + +from utils import getChannelSmootingConvLayer +from utils import generate_ball + + +def postprocessPredictionAndGT(prediction, GTpre, device, tubuliInstanceID_StartsWith, predictionsmoothing=False, holefilling=True): + ################# PREDICTION SMOOTHING ################ + if predictionsmoothing: + smoothingKernel = getChannelSmootingConvLayer(8).to(device) + prediction = smoothingKernel(prediction) + + netOutputPrediction = torch.argmax(prediction, dim=1).squeeze(0).to("cpu").numpy() # Label 0/1/2/3/4/5/6/7: Background/tubuli/glom_full/glom_tuft/veins/artery_full/artery_lumen/border + labelMap = netOutputPrediction.copy() + + ################# HOLE FILLING ################ + if holefilling: + labelMap[binary_fill_holes(labelMap==1)] = 1 #tubuli + labelMap[binary_fill_holes(labelMap==4)] = 4 #veins + tempTuftMask = binary_fill_holes(labelMap==3) #tuft + labelMap[binary_fill_holes(np.logical_or(labelMap==3, labelMap==2))] = 2 #glom + labelMap[tempTuftMask] = 3 #tuft + tempArteryLumenMask = binary_fill_holes(labelMap == 6) #artery_lumen + labelMap[binary_fill_holes(np.logical_or(labelMap == 5, labelMap == 6))] = 5 #full_artery + labelMap[tempArteryLumenMask] = 6 #artery_lumen + + ###### REMOVING TOO SMALL CONNECTED REGIONS ###### + TUBULI_MIN_SIZE = 400 + GLOM_MIN_SIZE = 1500 + TUFT_MIN_SIZE = 500 + VEIN_MIN_SIZE = 3000 + ARTERY_MIN_SIZE = 400 + LUMEN_MIN_SIZE = 20 + + temp, _ = label(labelMap == 1) + finalResults_Instance = np.asarray(remove_small_objects(temp, min_size=TUBULI_MIN_SIZE) > 0, np.uint16) + temp, _ = label(np.logical_or(labelMap == 2, labelMap == 3)) + finalResults_Instance[remove_small_objects(temp, min_size=GLOM_MIN_SIZE) > 0] = 2 + temp, _ = label(labelMap == 3) + finalResults_Instance[remove_small_objects(temp, min_size=TUFT_MIN_SIZE) > 0] = 3 + temp, _ = label(labelMap == 4) + finalResults_Instance[remove_small_objects(temp, min_size=VEIN_MIN_SIZE) > 0] = 4 + temp, _ = label(np.logical_or(labelMap == 5, labelMap == 6)) + finalResults_Instance[remove_small_objects(temp, min_size=ARTERY_MIN_SIZE) > 0] = 5 + temp, _ = label(labelMap == 6) + finalResults_Instance[remove_small_objects(temp, min_size=LUMEN_MIN_SIZE) > 0] = 6 + + ############ PERFORM TUBULE DILATION ############ + temp, numberTubuli = label(finalResults_Instance == 1) + assert numberTubuli < 65500, print('ERROR: TOO MANY TUBULI DETECTED - MAX ARE 2^16=65k COZ OF UINT16 !') + temp[temp > 0] += (tubuliInstanceID_StartsWith-1) + temp = cv2.dilate(np.asarray(temp, np.uint16), kernel=np.asarray(generate_ball(2), np.uint8), iterations=1) + mask = temp > 0 + finalResults_Instance[mask] = temp[mask] + + + + + GT = np.asarray(GTpre, np.uint16) + + ############ PERFORM TUBULE DILATION, Note: In order to separate touching instances in Ground-truth, + # tubules have been eroded by 1 pixel before exporting from QuPath (all providing label 1). Thus, for evaluation, + # we here need to dilate them back. ############ + temp, numberTubuli = label(GT == 1) + assert numberTubuli < 65500, print('ERROR: TOO MANY TUBULI DETECTED - MAX ARE 2^16=65k COZ OF UINT16 !') + temp[temp > 0] += (tubuliInstanceID_StartsWith-1) + temp = cv2.dilate(np.asarray(temp, np.uint16), kernel=np.asarray(generate_ball(1), np.uint8), iterations=1) + mask = temp > 0 + GT[mask] = temp[mask] + + + return finalResults_Instance, netOutputPrediction, GT + + + + +def extractInstanceChannels(postprocessedPredInstance, postprocessedGTInstance, tubuliInstanceID_StartsWith): + labeledGlom, _ = label(np.logical_or(postprocessedPredInstance == 2, postprocessedPredInstance == 3)) + labeledTuft, _ = label(postprocessedPredInstance == 3) + labeledVeins, _ = label(postprocessedPredInstance == 4) + labeledArtery, _ = label(np.logical_or(postprocessedPredInstance == 5, postprocessedPredInstance == 6)) + labeledArteryLumen, _ = label(postprocessedPredInstance == 6) + + labeledGlomGT, _ = label(np.logical_or(postprocessedGTInstance == 2, postprocessedGTInstance == 3)) + labeledTuftGT, _ = label(postprocessedGTInstance == 3) + labeledVeinsGT, _ = label(postprocessedGTInstance == 4) + labeledArteryGT, _ = label(np.logical_or(postprocessedGTInstance == 5, postprocessedGTInstance == 6)) + labeledArteryLumenGT, _ = label(postprocessedGTInstance == 6) + + tubuliInstanceChannel = postprocessedPredInstance.copy() + tubuliInstanceChannel[tubuliInstanceChannel < tubuliInstanceID_StartsWith] = 0 + tubuliInstanceChannel -= (tubuliInstanceID_StartsWith-1) + + tubuliInstanceChannelGT = postprocessedGTInstance.copy() + tubuliInstanceChannelGT[tubuliInstanceChannelGT < tubuliInstanceID_StartsWith] = 0 + tubuliInstanceChannelGT -= (tubuliInstanceID_StartsWith-1) + + return [tubuliInstanceChannel, labeledGlom, labeledTuft, labeledVeins, labeledArtery, labeledArteryLumen], [tubuliInstanceChannelGT, labeledGlomGT, labeledTuftGT, labeledVeinsGT, labeledArteryGT, labeledArteryLumenGT] + + + diff --git a/test.py b/test.py new file mode 100644 index 0000000..460aefc --- /dev/null +++ b/test.py @@ -0,0 +1,149 @@ +import os +import math +import time +import sys +import logging as log +from PIL import Image +import cv2 +import numpy as np +import torch +from options import Options +from models import create_model +from postprocessing import postprocessPredictionAndGT, extractInstanceChannels +from evaluation import ClassEvaluator +from utils import saveIHCTranslation_PredictionOverlay, printResultsForDiseaseModel +from models.utils import split_6_ch_to_3 + +from models.model import Custom + + + +chosenModelIterationForAllStains = 300000 +stainsToValidate = ['aSMA', 'NGAL', 'CD31', 'ColIII'] + +saveImageResults = True +saveImageResultsSeparately = False +onlyApplyTestTimeAugmentation = False +saveInstanceDiceScoresForTTA = True + +GPUno = 0 +device = torch.device("cuda:" + str(GPUno) if torch.cuda.is_available() else "cpu") +torch.cuda.set_device(GPUno) + +opt = Options().parseTestStain(stain = stainsToValidate[0]) + +# Set up logger +log.basicConfig( + level=log.INFO, + format='%(asctime)s %(message)s', + datefmt='%Y/%m/%d %I:%M:%S %p', + handlers=[ + log.FileHandler(opt.testResultsPath + '/testPerformance_model' + str(chosenModelIterationForAllStains) + '.log', 'w'), + log.StreamHandler(sys.stdout) + ]) +logger = log.getLogger() +logger.info('Script call arguments are:\n\n' + ' '.join(sys.argv[1:]) + '\n\n') + +segModel = Custom(input_ch=3, output_ch=8, modelDim=2) +segModel.load_state_dict(torch.load('/homeStor1/nbouteldja/Results_ActivePAS/custom_train_val_test_e500_b6_r0.001_w1e-05_516_640_32_RAdam_instance_1deeper_Healthy_UUO_Adenine_Alport_IRI_NTN_fewSpecies_fewHuman_Background_+-1range_X/Model/finalModel.pt', map_location=lambda storage, loc: storage)) +segModel.eval() +segModel = segModel.to(device) + + +numberClassesToEvaluate = 6 +classEvaluators = [] +classEvaluatorsTTA = [] +for i in range(len(stainsToValidate)): + classEvaluators.append([ClassEvaluator() for _ in range(numberClassesToEvaluate)]) + classEvaluatorsTTA.append([ClassEvaluator() for _ in range(numberClassesToEvaluate)]) + +tubuliInstanceID_StartsWith = 10 + + +for s, stain in enumerate(stainsToValidate): + + opt = Options().parseTestStain(stain = stain) # get options => Use same training options args + opt.logger = logger + opt.gpu_ids = [GPUno] + opt.phase = 'test' + opt.load_iter = chosenModelIterationForAllStains + + model = create_model(opt) # create cyclegan given opt.model and init and push to GPU + model.setup(opt) # load networks + model.setSavedNetsEval() + + image_dir = os.path.join(opt.dataroot, stain, 'test') + files = sorted(list(filter(lambda x: ').png' in x, os.listdir(image_dir)))) + + logger.info('Loading '+stain+' dataset with size: ' + str(len(files))) + for k, fname in enumerate(files): + imgOrig = np.array(Image.open(os.path.join(image_dir, fname)))[:,:,:3] + lbl = np.array(Image.open(os.path.join(image_dir, fname.replace('.png', '-labels.png')))) + + assert imgOrig.shape == (640, 640, 3) and lbl.shape == (516, 516), 'Image size {} or {} unsupported.'.format(imgOrig.shape, lbl.shape) + + img = (np.asarray(imgOrig, np.float32) / 255.0 - 0.5) / 0.5 + + lbl = np.asarray(lbl, np.long) + img = torch.from_numpy(img.transpose(2, 0, 1)).unsqueeze(0).to(device) + + with torch.no_grad(): + STAIN_to_PAS_img = model.perform_test_conversion(img) + + if opt.use_MC: + STAIN_to_PAS_img = split_6_ch_to_3([STAIN_to_PAS_img]) + + prediction = segModel(STAIN_to_PAS_img) + + if not onlyApplyTestTimeAugmentation: + postprocessedPredInstance, outputPrediction, postprocessedGTInstance = postprocessPredictionAndGT(prediction, lbl, device, tubuliInstanceID_StartsWith, predictionsmoothing=False, holefilling=True) + classInstancePredictionList, classInstanceGTList = extractInstanceChannels(postprocessedPredInstance, postprocessedGTInstance, tubuliInstanceID_StartsWith) + for i in range(numberClassesToEvaluate): + classEvaluators[s][i].add_example(classInstancePredictionList[i], classInstanceGTList[i]) + + prediction = torch.softmax(prediction, 1) + STAIN_to_PAS_img = STAIN_to_PAS_img.flip(2) + prediction += torch.softmax(segModel(STAIN_to_PAS_img), 1).flip(2) + STAIN_to_PAS_img = STAIN_to_PAS_img.flip(3) + prediction += torch.softmax(segModel(STAIN_to_PAS_img), 1).flip(3).flip(2) + STAIN_to_PAS_img = STAIN_to_PAS_img.flip(2) + prediction += torch.softmax(segModel(STAIN_to_PAS_img), 1).flip(3) + STAIN_to_PAS_img = STAIN_to_PAS_img.flip(3) + prediction /= 4. + + postprocessedPredInstanceTTA, outputPredictionTTA, postprocessedGTInstanceTTA = postprocessPredictionAndGT(prediction, lbl, device, tubuliInstanceID_StartsWith, predictionsmoothing=False, holefilling=True) + classInstancePredictionListTTA, classInstanceGTListTTA = extractInstanceChannels(postprocessedPredInstanceTTA, postprocessedGTInstanceTTA, tubuliInstanceID_StartsWith) + for i in range(numberClassesToEvaluate): + classEvaluatorsTTA[s][i].add_example(classInstancePredictionListTTA[i], classInstanceGTListTTA[i]) + + if saveImageResults: + fname_Result = fname[:-4] if fname.endswith('.svs') else fname[:-5] + figPath = os.path.join(opt.testResultImagesPath, fname_Result + '.png') + saveIHCTranslation_PredictionOverlay(stain_img=imgOrig, + translated_img=np.asarray(np.round((STAIN_to_PAS_img.squeeze(0).to("cpu").numpy().transpose(1, 2, 0)*0.5+0.5)*255.), np.uint8), + network_output=outputPredictionTTA, + instance_pred=postprocessedPredInstanceTTA, + instance_gt=postprocessedGTInstanceTTA, + savePath=figPath, + tubuliInstanceID_StartsWith=tubuliInstanceID_StartsWith, + saveImageResultsSeparately = saveImageResultsSeparately, + figHeight=12, + alpha=0.4) + + + +for s, stain in enumerate(stainsToValidate): + logger.info('############################### RESULTS FOR -> '+ stain +' <- ###############################') + if not onlyApplyTestTimeAugmentation: + printResultsForDiseaseModel(evaluatorID=s, allClassEvaluators=classEvaluators, logger=logger) + logger.info('=> TTA Results <=') + printResultsForDiseaseModel(evaluatorID=s, allClassEvaluators=classEvaluatorsTTA, logger=logger) + + if saveInstanceDiceScoresForTTA: + resultsDic = '/'.join(opt.testResultImagesPath.split('/')[:-1])+'/'+stain + np.save(os.path.join(resultsDic, 'diceScores_Tubule_TTA.npy'), np.array(classEvaluatorsTTA[s][0].diceScores)) + np.save(os.path.join(resultsDic, 'diceScores_Glom_TTA.npy'), np.array(classEvaluatorsTTA[s][1].diceScores)) + np.save(os.path.join(resultsDic, 'diceScores_Tuft_TTA.npy'), np.array(classEvaluatorsTTA[s][2].diceScores)) + np.save(os.path.join(resultsDic, 'diceScores_Vein_TTA.npy'), np.array(classEvaluatorsTTA[s][3].diceScores)) + np.save(os.path.join(resultsDic, 'diceScores_Artery_TTA.npy'), np.array(classEvaluatorsTTA[s][4].diceScores)) + np.save(os.path.join(resultsDic, 'diceScores_Lumen_TTA.npy'), np.array(classEvaluatorsTTA[s][5].diceScores)) diff --git a/train.py b/train.py new file mode 100644 index 0000000..f6df686 --- /dev/null +++ b/train.py @@ -0,0 +1,82 @@ +"""As a whole, this code framework is inspired and heavily based on the great CylceGAN/pix2pix Repository from +https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix. We copied and adapted several code fragments. +The specified repo was written by Jun-Yan Zhu and Taesung Park, and supported by Tongzhou Wang. +""" +# From: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix +# Author: Liyuan Liu +# Paper: CycleGAN (https://arxiv.org/pdf/1703.10593.pdf) and pix2pix (https://arxiv.org/pdf/1611.07004.pdf) +# License: BSD License + + +import numpy as np +import os +import math +import time +import sys +import logging as log + +import torch + +from options import Options +from data import create_dataset +from models import create_model + +from utils import parse_RAM_info, parse_nvidia_smi, visualizeForwardsNoGrad, printDict + + + + +if __name__ == '__main__': + + opt = Options().parseTrain() # get training options + + # opt.phase = 'val' + # val_dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options + + opt.phase = 'train' + dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options + + model = create_model(opt) # create a model given opt.model and other options + model.setup(opt) # regular setup: load and print networks; create schedulers + + loader_iter = iter(dataset) + + opt.logger.info('################################ TRAINING STARTS ################################ - 1 Epoch = {} iterations'.format(math.ceil(dataset.__len__() / opt.batch_size))) + + try: + for i in range(1, opt.niters+1): + try: + data = next(loader_iter) + except StopIteration: + loader_iter = iter(dataset) + data = next(loader_iter) + + model.set_input(data) # unpack data from dataset and apply preprocessing + if i < opt.niters_init: + model.optimize_gen_for_id_loss() # update generators using only identity losses in first iterations + else: + model.optimize_parameters() # calculate loss functions, get gradients, update network weights + + if i % opt.update_TB_images_freq == 1: # Update inferences in tensorboard every 'opt.update_TB_images_freq' iterations + visualizeForwardsNoGrad(model, iteration=i, tbWriter=opt.tbWriter, phase='Train') + + currLossesDict = model.get_current_losses() + opt.tbWriter.add_scalars('Plot/train', currLossesDict, i) + opt.logger.info('[Iteration {}] '.format(i) + printDict(currLossesDict)) + + if i % opt.saveModelEachNIteration == 0: + opt.logger.info('Saving model at iteration {}'.format(i)) + model.save_networks(str(i)) + + if i % opt.print_memory_usage_freq == 0: + # opt.logger.info('[Iteration ' + str(i) + '] ' + parse_nvidia_smi(opt.gpu_ids[0])) + opt.logger.info('[Iteration ' + str(i) + '] ' + parse_RAM_info()) + + model.update_learning_rate(i) # update learning rates at the end of every iteration. + + opt.logger.info('########################## TRAINING DONE ##########################') + + except: + opt.logger.exception('! Exception !') + raise + diff --git a/train_UGATIT.py b/train_UGATIT.py new file mode 100644 index 0000000..cd89593 --- /dev/null +++ b/train_UGATIT.py @@ -0,0 +1,95 @@ +# This code was copied from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix and adapted for our purpose accordingly. + +import numpy as np +import os +import math +import time +import sys +import logging as log + +import torch + +from options import Options +from data import create_dataset +from models import create_model + +from utils import parse_RAM_info, parse_nvidia_smi, visualizeForwardsNoGrad, printDict +from models.networks import get_scheduler + + + +if __name__ == '__main__': + + opt = Options().parseTrain() # get training options + + # opt.phase = 'val' + # val_dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options + + opt.phase = 'train' + dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options + + model = create_model(opt) # create a model given opt.model and other options + model.schedulers = [get_scheduler(optimizer, opt) for optimizer in model.optimizers] + + loader_iter = iter(dataset) + + opt.logger.info('################################ TRAINING STARTS ################################ - 1 Epoch = {} iterations'.format(math.ceil(dataset.__len__() / opt.batch_size))) + + try: + for i in range(1, opt.niters+1): + try: + data = next(loader_iter) + except StopIteration: + loader_iter = iter(dataset) + data = next(loader_iter) + + model.set_input(data) # unpack data from dataset and apply preprocessing + model.optimize_parameters() # calculate loss functions, get gradients, update network weights + + if i % opt.update_TB_images_freq == 1: # Update inferences in tensorboard every 'opt.update_TB_images_freq' iterations + with torch.no_grad(): + opt.tbWriter.add_image('Train/RealA', torch.round((model.real_A[0] + 1.) / 2.0 * 255.0).byte(), i) + opt.tbWriter.add_image('Train/RealB', torch.round((model.real_B[0] + 1.) / 2.0 * 255.0).byte(), i) + opt.tbWriter.add_image('Train/Fake_A2B', torch.round((model.fake_A2B[0] + 1.) / 2.0 * 255.0).byte(), i) + opt.tbWriter.add_image('Train/Fake_B2A', torch.round((model.fake_B2A[0] + 1.) / 2.0 * 255.0).byte(), i) + opt.tbWriter.add_image('Train/Recon_A2B2A', torch.round((model.fake_A2B2A[0] + 1.) / 2.0 * 255.0).byte(), i) + opt.tbWriter.add_image('Train/Recon_B2A2B', torch.round((model.fake_B2A2B[0] + 1.) / 2.0 * 255.0).byte(), i) + if opt.use_segm_model: + labelMap = torch.argmax(model.rec_B_Segm[0], 0).byte() * (255 / 7) + opt.tbWriter.add_image('Train/Rec_B2A2B_Seg',torch.stack([labelMap, labelMap, labelMap], dim=0), i) + labelMap = torch.argmax(model.idt_B_Segm[0], 0).byte() * (255 / 7) + opt.tbWriter.add_image('Train/Idt_B2B_Seg',torch.stack([labelMap, labelMap, labelMap], dim=0), i) + + currLossesDict = {} + currLossesDict['G_A'] = model.G_loss_A.item() + currLossesDict['G_B'] = model.G_loss_B.item() + currLossesDict['D_A'] = model.D_loss_A.item() + currLossesDict['D_B'] = model.D_loss_B.item() + opt.tbWriter.add_scalars('Plot/train', currLossesDict, i) + opt.logger.info('[Iteration {}] '.format(i) + printDict(currLossesDict)) + + if i % opt.saveModelEachNIteration == 0: + opt.logger.info('Saving model at iteration {}'.format(str(i))) + for name in model.model_names_save: + if isinstance(name, str): + save_filename = '%s_net_%s.pth' % (str(i), name) + save_path = os.path.join(model.save_dir, save_filename) + net = getattr(model, name) + if len(model.gpu_ids) > 0 and torch.cuda.is_available(): + torch.save(net.cpu().state_dict(), save_path) + net.cuda(model.gpu_ids[0]) + else: + torch.save(net.cpu().state_dict(), save_path) + + if i % opt.print_memory_usage_freq == 0: + # opt.logger.info('[Iteration ' + str(i) + '] ' + parse_nvidia_smi(opt.gpu_ids[0])) + opt.logger.info('[Iteration ' + str(i) + '] ' + parse_RAM_info()) + + model.update_learning_rate(i) # update learning rates at the end of every iteration. + + opt.logger.info('########################## TRAINING DONE ##########################') + + except: + opt.logger.exception('! Exception !') + raise + diff --git a/utils.py b/utils.py new file mode 100644 index 0000000..1596efa --- /dev/null +++ b/utils.py @@ -0,0 +1,793 @@ +from __future__ import print_function +from PIL import Image +from glob import glob + +import numpy as np +import torch +from subprocess import check_output +import os +import psutil +import math + +import matplotlib as mpl +import matplotlib.pyplot as plt + +from torch.utils.data import DataLoader +from torch.utils.data.sampler import SubsetRandomSampler +import torch.nn as nn + +from scipy.ndimage.measurements import label +from scipy.ndimage.morphology import binary_dilation, binary_fill_holes + + +colors = torch.tensor([[ 0, 0, 0], # Black + [255, 0, 0], # Red + [ 0, 255, 0], # Green + [ 0, 0, 255], # Blue + [ 0, 255, 255], # Cyan + [255, 0, 255], # Magenta + [255, 255, 0], # Yellow + [139, 69, 19], # Brown (saddlebrown) + [128, 0, 128], # Purple + [255, 140, 0], # Orange + [255, 255, 255]], dtype=torch.uint8) # White + + + + + + +def tensor2im(input_image, imtype=np.uint8): + """"Converts a Tensor array into a numpy image array. + + Parameters: + input_image (tensor) -- the input image tensor array + imtype (type) -- the desired type of the converted numpy array + """ + if not isinstance(input_image, np.ndarray): + if isinstance(input_image, torch.Tensor): # get the data from a variable + image_tensor = input_image.data + else: + return input_image + image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array + if image_numpy.shape[0] == 1: # grayscale to RGB + image_numpy = np.tile(image_numpy, (3, 1, 1)) + image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling + else: # if it is a numpy array, do nothing + image_numpy = input_image + return image_numpy.astype(imtype) + + +def diagnose_network(net, name='network'): + """Calculate and print the mean of average absolute(gradients) + + Parameters: + net (torch network) -- Torch network + name (str) -- the name of the network + """ + mean = 0.0 + count = 0 + for param in net.parameters(): + if param.grad is not None: + mean += torch.mean(torch.abs(param.grad.data)) + count += 1 + if count > 0: + mean = mean / count + print(name) + print(mean) + + +def save_image(image_numpy, image_path): + """Save a numpy image to the disk + + Parameters: + image_numpy (numpy array) -- input numpy array + image_path (str) -- the path of the image + """ + image_pil = Image.fromarray(image_numpy) + image_pil.save(image_path) + + +def print_numpy(x, val=True, shp=False): + """Print the mean, min, max, median, std, and size of a numpy array + + Parameters: + val (bool) -- if print the values of the numpy array + shp (bool) -- if print the shape of the numpy array + """ + x = x.astype(np.float64) + if shp: + print('shape,', x.shape) + if val: + x = x.flatten() + print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( + np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) + + +def mkdirs(paths): + """create empty directories if they don't exist + + Parameters: + paths (str list) -- a list of directory paths + """ + if isinstance(paths, list) and not isinstance(paths, str): + for path in paths: + mkdir(path) + else: + mkdir(paths) + + +def mkdir(path): + """create a single empty directory if it didn't exist + + Parameters: + path (str) -- a single directory path + """ + if not os.path.exists(path): + os.makedirs(path) + +def get_starting_iteration(opt): + model_list = glob(os.path.join(opt.checkpoints_dir,'*.pth')) + if not len(model_list) == 0: + model_list.sort() + start_iter = int(model_list[-1][-13]) + opt.logger.info("Loading former Network at Iteration " + str(start_iter)) + else: + opt.logger.info("Starting new network") + return 1 + return start_iter + + +def visualizeForwardsNoGrad(model, iteration, tbWriter, phase='Train'): + with torch.no_grad(): + for label, image in model.get_current_visuals().items(): + if image is not None: + if label == 'segm_B' or label == 'rec_B_Segm' or label == 'idt_B_Segm': + labelMap = torch.argmax(image[0], 0).byte() * (255 / 7) + tbWriter.add_image(phase + '/' + label, torch.stack([labelMap, labelMap, labelMap], dim=0), iteration) + else: + tbWriter.add_image(phase + '/' + label, torch.round((image[0] + 1.) / 2.0 * 255.0).byte(), iteration) + +def printDict(dict): + s = "" + sum = 0 + for label, key in dict.items(): + s += label + ": {},\t".format(round(key,4)) + sum += key + return s + "Mean: {}".format(round(sum/len(dict),4)) + + +def generate_ball(radius): + structure = np.zeros((3, 3), dtype=np.int) + structure[1, :] = 1 + structure[:, 1] = 1 + + ball = np.zeros((radius * 2 + 1, radius * 2 + 1), dtype=np.uint8) + ball[radius, radius] = 1 + for i in range(radius): + ball = binary_dilation(ball, structure=structure) + return np.asarray(ball, dtype=np.int) + + +def saveIHCTranslation_PredictionOverlay(stain_img, translated_img, network_output, instance_pred, instance_gt, savePath, tubuliInstanceID_StartsWith, saveImageResultsSeparately, figHeight, alpha=0.4): + colorMap = np.array([[0, 0, 0], # Black + [255, 0, 0], # Red + [0, 255, 0], # Green + [0, 0, 255], # Blue + [0, 255, 255], # Cyan + [255, 0, 255], # Magenta + [255, 255, 0], # Yellow + [139, 69, 19], # Brown (saddlebrown) + [128, 0, 128], # Purple + [255, 140, 0], # Orange + [255, 255, 255]], dtype=np.uint8) # White + + newRandomColors = np.random.randint(low=0, high=256, dtype=np.uint8, size=(max(instance_pred.max(), instance_gt.max()), 3)) + colorMap = np.concatenate((colorMap, newRandomColors)) + colorMap = colorMap / 255. + customColorMap = mpl.colors.ListedColormap(colorMap) + max_number_of_labels = len(colorMap) + + instance_pred_Mask = np.ma.masked_where(instance_pred == 0, instance_pred) + + pixelShiftX = (translated_img.shape[0]-instance_pred_Mask.shape[0])//2 + 1 + pixelShiftY = (translated_img.shape[1]-instance_pred_Mask.shape[1])//2 + 1 + + plt.figure(figsize=(figHeight*2.5, figHeight)) + plt.subplot(251) + plt.imshow(stain_img) + plt.axis('off') + plt.subplot(252) + plt.imshow(translated_img) + plt.axis('off') + plt.subplot(253) + plt.imshow(stain_img[pixelShiftX:pixelShiftX+instance_pred_Mask.shape[0], pixelShiftY:pixelShiftY+instance_pred_Mask.shape[1],:3]) + plt.axis('off') + plt.subplot(254) + plt.imshow(translated_img[pixelShiftX:pixelShiftX+instance_pred_Mask.shape[0], pixelShiftY:pixelShiftY+instance_pred_Mask.shape[1],:3]) + plt.axis('off') + plt.subplot(255) + plt.imshow(network_output, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.axis('off') + plt.subplot(256) + finalPredictionNonInstance = instance_pred.copy() + finalPredictionNonInstance[finalPredictionNonInstance >= tubuliInstanceID_StartsWith] = 1 + plt.imshow(finalPredictionNonInstance, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.axis('off') + plt.subplot(257) + finalPredictionNonInstance_Mask = np.ma.masked_where(finalPredictionNonInstance == 0, finalPredictionNonInstance) + plt.imshow(translated_img[pixelShiftX:pixelShiftX+instance_pred_Mask.shape[0], pixelShiftY:pixelShiftY+instance_pred_Mask.shape[1],:3]) + plt.imshow(finalPredictionNonInstance_Mask, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1, alpha=alpha) + plt.axis('off') + plt.subplot(258) + plt.imshow(instance_pred, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.axis('off') + plt.subplot(259) + plt.imshow(translated_img[pixelShiftX:pixelShiftX+instance_pred_Mask.shape[0], pixelShiftY:pixelShiftY+instance_pred_Mask.shape[1],:3]) + plt.imshow(instance_pred_Mask, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1, alpha=alpha) + plt.axis('off') + plt.subplot(2,5,10) + plt.imshow(instance_gt, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.axis('off') + + plt.subplots_adjust(wspace=0, hspace=0) + + plt.savefig(savePath) + plt.close() + + if saveImageResultsSeparately: + fileName = savePath.split('/')[-1][:-4] + savePathName = "/".join(savePath.split('/')[:-1])+"/separateImages" + if not os.path.exists(savePathName): + os.makedirs(savePathName) + + plt.figure(figsize=(10, 10)) + plt.imshow(stain_img) + plt.axis('off') + plt.savefig(savePathName + '/' + fileName + '_1.png', bbox_inches = 'tight', pad_inches = 0) + + plt.imshow(translated_img) + plt.axis('off') + plt.savefig(savePathName + '/' + fileName + '_2.png', bbox_inches = 'tight', pad_inches = 0) + + plt.imshow(stain_img[pixelShiftX:pixelShiftX+instance_pred_Mask.shape[0], pixelShiftY:pixelShiftY+instance_pred_Mask.shape[1],:3]) + plt.axis('off') + plt.savefig(savePathName + '/' + fileName + '_3.png', bbox_inches = 'tight', pad_inches = 0) + + plt.imshow(translated_img[pixelShiftX:pixelShiftX+instance_pred_Mask.shape[0], pixelShiftY:pixelShiftY+instance_pred_Mask.shape[1],:3]) + plt.axis('off') + plt.savefig(savePathName + '/' + fileName + '_4.png', bbox_inches = 'tight', pad_inches = 0) + + plt.imshow(network_output, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.axis('off') + plt.savefig(savePathName + '/' + fileName + '_5.png', bbox_inches = 'tight', pad_inches = 0) + + finalPredictionNonInstance = instance_pred.copy() + finalPredictionNonInstance[finalPredictionNonInstance >= tubuliInstanceID_StartsWith] = 1 + plt.imshow(finalPredictionNonInstance, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.axis('off') + plt.savefig(savePathName + '/' + fileName + '_6.png', bbox_inches = 'tight', pad_inches = 0) + + finalPredictionNonInstance_Mask = np.ma.masked_where(finalPredictionNonInstance == 0, finalPredictionNonInstance) + plt.imshow(translated_img[pixelShiftX:pixelShiftX+instance_pred_Mask.shape[0], pixelShiftY:pixelShiftY+instance_pred_Mask.shape[1],:3]) + plt.imshow(finalPredictionNonInstance_Mask, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1, alpha=alpha) + plt.axis('off') + plt.savefig(savePathName + '/' + fileName + '_7.png', bbox_inches = 'tight', pad_inches = 0) + + plt.imshow(instance_pred, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.axis('off') + plt.savefig(savePathName + '/' + fileName + '_8.png', bbox_inches = 'tight', pad_inches = 0) + + plt.imshow(translated_img[pixelShiftX:pixelShiftX+instance_pred_Mask.shape[0], pixelShiftY:pixelShiftY+instance_pred_Mask.shape[1],:3]) + plt.imshow(instance_pred_Mask, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1, alpha=alpha) + plt.axis('off') + plt.savefig(savePathName + '/' + fileName + '_9.png', bbox_inches = 'tight', pad_inches = 0) + + plt.imshow(instance_gt, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.axis('off') + plt.savefig(savePathName + '/' + fileName + '_10.png', bbox_inches = 'tight', pad_inches = 0) + + instance_gt_copy = instance_gt.copy() + instance_gt_copy[instance_gt_copy >= tubuliInstanceID_StartsWith] = 1 + plt.imshow(instance_gt_copy, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.axis('off') + plt.savefig(savePathName + '/' + fileName + '_11.png', bbox_inches = 'tight', pad_inches = 0) + + plt.close() + + + +def convert_labelmap_to_rgb(labelmap): + """ + Method used to generate rgb label maps for tensorboard visualization + :param labelmap: HxW label map tensor containing values from 0 to n_classes + :return: 3xHxW RGB label map containing colors in the following order: Black (background), Red, Green, Blue, Cyan, Magenta, Yellow, Brown, Orange, Purple + """ + n_classes = labelmap.max() + + result = torch.zeros(size=(labelmap.size()[0], labelmap.size()[1], 3), dtype=torch.uint8) + for i in range(1, n_classes+1): + result[labelmap == i] = colors[i] + + return result.permute(2, 0, 1) + +def convert_labelmap_to_rgb_with_instance_first_class(labelmap, structure): + """ + Method used to generate rgb label maps for tensorboard visualization + :param labelmap: HxW label map tensor containing values from 0 to n_classes + :return: 3xHxW RGB label map containing colors in the following order: Black (background), Red, Green, Blue, Cyan, Magenta, Yellow, Brown, Orange, Purple + """ + n_classes = labelmap.max() + + result = np.zeros(shape=(labelmap.shape[0], labelmap.shape[1], 3), dtype=np.uint8) + for i in range(2, n_classes+1): + result[labelmap == i] = colors[i].numpy() + + structure = np.ones((3, 3), dtype=np.int) + + labeledTubuli, numberTubuli = label(np.asarray(labelmap == 1, np.uint8), structure) # datatype of 'labeledTubuli': int32 + for i in range(1, numberTubuli + 1): + result[binary_dilation(binary_dilation(binary_dilation(labeledTubuli == i)))] = np.random.randint(low=0, high=256, size=3, dtype=np.uint8) # assign random colors to tubuli + + return result + +def convert_labelmap_to_rgb_except_first_class(labelmap): + """ + Method used to generate rgb label maps for tensorboard visualization + :param labelmap: HxW label map tensor containing values from 0 to n_classes + :return: 3xHxW RGB label map containing colors in the following order: Black (background), Red, Green, Blue, Cyan, Magenta, Yellow, Brown, Orange, Purple + """ + n_classes = labelmap.max() + + result = torch.zeros(size=(labelmap.size()[0], labelmap.size()[1], 3), dtype=torch.uint8) + for i in range(2, n_classes+1): + result[labelmap == i] = colors[i] + + return result.permute(2, 0, 1) + + +def getColorMapForLabelMap(): + return ['black', 'red', 'green', 'blue', 'cyan', 'magenta', 'yellow', 'brown', 'purple', 'orange', 'white'] + +def saveFigureResults(img, outputPrediction, postprocessedPrediction, finalPredictionRGB, GT, preprocessedGT, preprocessedGTrgb, fullResultPath, alpha=0.4): + customColors = getColorMapForLabelMap() + max_number_of_labels = len(customColors) + assert outputPrediction.max() < max_number_of_labels, 'Too many labels -> Not enough colors available in custom colormap! Add some colors!' + customColorMap = mpl.colors.ListedColormap(getColorMapForLabelMap()) + + # avoid brown color (border visualization) in output for final GT and prediction + postprocessedPrediction[postprocessedPrediction==7] = 0 + preprocessedGT[preprocessedGT==7] = 0 + + # also dilate tubuli here + postprocessedPrediction[binary_dilation(postprocessedPrediction==1)] = 1 + + predictionMask = np.ma.masked_where(postprocessedPrediction == 0, postprocessedPrediction) + + plt.figure(figsize=(16, 8.1)) + plt.subplot(241) + plt.imshow(img) + plt.axis('off') + plt.subplot(242) + plt.imshow(outputPrediction, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.axis('off') + plt.subplot(243) + plt.imshow(postprocessedPrediction, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.axis('off') + plt.subplot(244) + plt.imshow(finalPredictionRGB) + plt.axis('off') + plt.subplot(245) + plt.imshow(img[(img.shape[0]-outputPrediction.shape[0])//2:(img.shape[0]-outputPrediction.shape[0])//2+outputPrediction.shape[0],(img.shape[1]-outputPrediction.shape[1])//2:(img.shape[1]-outputPrediction.shape[1])//2+outputPrediction.shape[1],:]) + plt.imshow(predictionMask, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1, alpha = alpha) + plt.axis('off') + plt.subplot(246) + plt.imshow(GT, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.axis('off') + plt.subplot(247) + plt.imshow(preprocessedGT, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.axis('off') + plt.subplot(248) + plt.imshow(preprocessedGTrgb) + plt.axis('off') + + plt.subplots_adjust(wspace=0, hspace=0) + + plt.savefig(fullResultPath) + plt.close() + +def savePredictionResults(predictionWithoutTubuliDilation, fullResultPath, figSize): + prediction = predictionWithoutTubuliDilation.copy() + prediction[binary_dilation(binary_dilation(binary_dilation(binary_dilation(prediction == 1))))] = 1 + + customColors = getColorMapForLabelMap() + max_number_of_labels = len(customColors) + assert prediction.max() < max_number_of_labels, 'Too many labels -> Not enough colors available in custom colormap! Add some colors!' + customColorMap = mpl.colors.ListedColormap(getColorMapForLabelMap()) + + fig = plt.figure(figsize=figSize) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(prediction, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.savefig(fullResultPath) + plt.close() + +def savePredictionResultsWithoutDilation(prediction, fullResultPath, figSize): + customColors = getColorMapForLabelMap() + max_number_of_labels = len(customColors) + assert prediction.max() < max_number_of_labels, 'Too many labels -> Not enough colors available in custom colormap! Add some colors!' + customColorMap = mpl.colors.ListedColormap(getColorMapForLabelMap()) + + fig = plt.figure(figsize=figSize) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(prediction, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1) + plt.savefig(fullResultPath) + plt.close() + +def savePredictionOverlayResults(img, prediction, fullResultPath, figSize, alpha=0.4): + predictionMask = np.ma.masked_where(prediction == 0, prediction) + + colorMap = np.array([[0, 0, 0], # Black + [255, 0, 0], # Red + [0, 255, 0], # Green + [0, 0, 255], # Blue + [0, 255, 255], # Cyan + [255, 0, 255], # Magenta + [255, 255, 0], # Yellow + [139, 69, 19], # Brown (saddlebrown) + [128, 0, 128], # Purple + [255, 140, 0], # Orange + [255, 255, 255]], dtype=np.uint8) # White + + newRandomColors = np.random.randint(low=0, high=256, dtype=np.uint8, size=(prediction.max(), 3)) + colorMap = np.concatenate((colorMap, newRandomColors)) + colorMap = colorMap / 255. + + max_number_of_labels = len(colorMap) + assert prediction.max() < max_number_of_labels, 'Too many labels -> Not enough colors available in custom colormap! Add some colors!' + customColorMap = mpl.colors.ListedColormap(colorMap) + + fig = plt.figure(figsize=figSize) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(img) + ax.imshow(predictionMask, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1, alpha = alpha) + plt.savefig(fullResultPath) + plt.close() + +def saveOverlayResults(img, seg, fullResultPath, figHeight, alpha=0.4): + segMask = np.ma.masked_where(seg == 0, seg) + + customColors = getColorMapForLabelMap() + max_number_of_labels = len(customColors) + assert seg.max() < max_number_of_labels, 'Too many labels -> Not enough colors available in custom colormap! Add some colors!' + customColorMap = mpl.colors.ListedColormap(getColorMapForLabelMap()) + + fig = plt.figure(figsize=(figHeight*seg.shape[1]/seg.shape[0], figHeight)) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(img) + ax.imshow(segMask, cmap=customColorMap, vmin = 0, vmax = max_number_of_labels-1, alpha = alpha) + plt.savefig(fullResultPath) + plt.close() + +def saveRGBPredictionOverlayResults(img, prediction, fullResultPath, figSize, alpha=0.4): + predictionMask = prediction.sum(2)==0 + predictionCopy = prediction.copy() + predictionCopy[predictionMask] = img[predictionMask] + fig = plt.figure(figsize=figSize) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(np.asarray(np.round(predictionCopy*alpha+(1-alpha)*img), np.uint8)) + plt.savefig(fullResultPath) + plt.close() + +def saveImage(img, fullResultPath, figSize): + fig = plt.figure(figsize=figSize) + ax = plt.Axes(fig, [0., 0., 1., 1., ]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(img) + plt.savefig(fullResultPath) + plt.close() + + + +def getCrossValSplits(dataIDX, amountFolds, foldNo, setting): + """ + Cross-Validation-Split of indices according to fold number and setting + Usage: + dataIDX = np.arange(dataset.__len__()) + # np.random.shuffle(dataIDX) + for i in range(amountFolds): + train_idx, val_idx, test_idx = getCrossFoldSplits(dataIDX=dataIDX, amountFolds=amountFolds, foldNo=i+1, setting=setting) + :param dataIDX: All data indices stored in numpy array + :param amountFolds: Total amount of folds + :param foldNo: Fold number, # CARE: Fold numbers start with 1 and go up to amountFolds ! # + :param setting: Train / Train+Test / Train+Val / Train+Test+Val + :return: tuple consisting of 3 numpy arrays (trainIDX, valIDX, testIDX) containing indices according to split + """ + assert (setting in ['train_val_test', 'train_test', 'train_val', 'train']), 'Given setting >'+setting+'< is incorrect!' + + num_total_data = dataIDX.__len__() + + if setting == 'train': + return dataIDX, None, None + + elif setting == 'train_val': + valIDX = dataIDX[num_total_data * (foldNo - 1) // amountFolds: num_total_data * foldNo // amountFolds] + trainIDX = np.setxor1d(dataIDX, valIDX) + return trainIDX, valIDX, None + + elif setting == 'train_test': + testIDX = dataIDX[num_total_data * (foldNo - 1) // amountFolds: num_total_data * foldNo // amountFolds] + trainIDX = np.setxor1d(dataIDX, testIDX) + return trainIDX, None, testIDX + + elif setting == 'train_val_test': + testIDX = dataIDX[num_total_data * (foldNo - 1) // amountFolds: num_total_data * foldNo // amountFolds] + if foldNo != amountFolds: + valIDX = dataIDX[num_total_data * foldNo // amountFolds: num_total_data * (foldNo+1) // amountFolds] + else: + valIDX = dataIDX[0 : num_total_data // amountFolds] + trainIDX = np.setxor1d(np.setxor1d(dataIDX, testIDX), valIDX) + return trainIDX, valIDX, testIDX + + else: + raise ValueError('Given setting >'+str(setting)+'< is invalid!') + + +def parse_nvidia_smi(unit=0): + result = check_output(["nvidia-smi", "-i", str(unit),]).decode('utf-8').split('\n') + return 'Current GPU usage: ' + result[0] + '\r\n' + result[5] + '\r\n' + result[8] + + +def parse_RAM_info(): + return 'Current RAM usage: '+str(round(psutil.Process(os.getpid()).memory_info().rss / 1E6, 2))+' MB' + + +def countParam(model): + model_parameters = filter(lambda p: p.requires_grad, model.parameters()) + params = sum([np.prod(p.size()) for p in model_parameters]) + return params + + +def getOneHotEncoding(imgBatch, labelBatch): + """ + :param imgBatch: image minibatch (FloatTensor) to extract shape and device info for output + :param labelBatch: label minibatch (LongTensor) to be converted to one-hot encoding + :return: One-hot encoded label minibatch with equal size as imgBatch and stored on same device + """ + if imgBatch.size()[1] != 1: # Multi-label segmentation otherwise binary segmentation + labelBatch = labelBatch.unsqueeze(1) + onehotEncoding = torch.zeros_like(imgBatch) + onehotEncoding.scatter_(1, labelBatch, 1) + return onehotEncoding + return labelBatch + + +def getWeightsForCEloss(dataset, train_idx, areLabelsOnehotEncoded, device, logger): + # Choice 1) Manually set custom weights + weights = torch.tensor([1,2,4,6,2,3], dtype=torch.float32, device=device) + weights = weights / weights.sum() + + # Choice 2) Compute weights as "np.mean(histogram) / histogram" + dataloader = DataLoader(dataset=dataset, batch_size=6, sampler=SubsetRandomSampler(train_idx), num_workers=6) + + if areLabelsOnehotEncoded: + histograms = 0 + for batch in dataloader: + imgBatch, segBatch = batch + amountLabels = segBatch.size()[1] + if amountLabels == 1: # binary segmentation + histograms = histograms + torch.tensor([(segBatch==0).sum(),(segBatch==1).sum()]) + else: # multi-label segmentation + if imgBatch.dim() == 4: #2D data + histograms = histograms + segBatch.sum(3).sum(2).sum(0) + else: #3D data + histograms = histograms + segBatch.sum(4).sum(3).sum(2).sum(0) + + histograms = histograms.numpy() + else: + histograms = np.array([0]) + for batch in dataloader: + _, segBatch = batch + + segHistogram = np.histogram(segBatch.numpy(), segBatch.numpy().max()+1)[0] + + if len(histograms) >= len(segHistogram): #(segHistogram could have different size than histograms) + histograms[:len(segHistogram)] += segHistogram + else: + segHistogram[:len(histograms)] += histograms + histograms = segHistogram + + weights = np.mean(histograms) / histograms + weights = torch.from_numpy(weights).float().to(device) + + logger.info('=> Weights for CE-loss: '+str(weights)) + + return weights + + + +def getMeanDiceScores(diceScores, logger): + """ + Compute mean label dice scores of numpy dice score array (2d) (and its mean) + :return: mean label dice scores with '-1' representing totally missing label (meanLabelDiceScores), mean overall dice score (meanOverallDice) + """ + meanLabelDiceScores = np.ma.masked_where(diceScores == -1, diceScores).mean(0).data + label_GT_occurrences = (diceScores != -1).sum(0) + if (label_GT_occurrences == 0).any(): + logger.info('[# WARNING #] Label(s): ' + str(np.argwhere(label_GT_occurrences == 0).flatten() + 1) + ' not present at all in current dataset split!') + meanLabelDiceScores[label_GT_occurrences == 0] = -1 + meanOverallDice = meanLabelDiceScores[meanLabelDiceScores != -1].mean() + + return meanLabelDiceScores, meanOverallDice + + +def getDiceScores(prediction, segBatch): + """ + Compute mean dice scores of predicted foreground labels. + NOTE: Dice scores of missing gt labels will be excluded and are thus represented by -1 value entries in returned dice score matrix! + NOTE: Method changes prediction to 0/1 values in the binary case! + :param prediction: BxCxHxW (if 2D) or BxCxHxWxD (if 3D) FloatTensor (care: prediction has not undergone any final activation!) (note: C=1 for binary segmentation task) + :param segBatch: BxCxHxW (if 2D) or BxCxHxWxD (if 3D) FloatTensor (Onehot-Encoding) or Bx1xHxW (if 2D) or Bx1xHxWxD (if 3D) LongTensor + :return: Numpy array containing BxC-1 (background excluded) dice scores + """ + batchSize, amountClasses = prediction.size()[0], prediction.size()[1] + + if amountClasses == 1: # binary segmentation task => simulate sigmoid to get label results + prediction[prediction >= 0] = 1 + prediction[prediction < 0] = 0 + prediction = prediction.squeeze(1) + segBatch = segBatch.squeeze(1) + amountClasses += 1 + else: # multi-label segmentation task + prediction = prediction.argmax(1) # LongTensor without C-channel + if segBatch.dtype == torch.float32: # segBatch is onehot-encoded + segBatch = segBatch.argmax(1) + else: + segBatch = segBatch.squeeze(1) + + prediction = prediction.view(batchSize, -1) + segBatch = segBatch.view(batchSize, -1) + + labelDiceScores = np.zeros((batchSize, amountClasses-1), dtype=np.float32) - 1 #ignore background class! + for b in range(batchSize): + currPred = prediction[b,:] + currGT = segBatch[b,:] + + for c in range(1,amountClasses): + classPred = (currPred == c).float() + classGT = (currGT == c).float() + + if classGT.sum() != 0: # only evaluate label prediction when is also present in ground-truth + labelDiceScores[b, c-1] = ((2. * (classPred * classGT).sum()) / (classGT.sum() + classPred.sum())).item() + + return labelDiceScores + + +def printResultsForDiseaseModel(evaluatorID, allClassEvaluators, logger): + logger.info('########## NOW: Detection (average precision) and segmentation accuracies (object-level dice): ##########') + precisionsTub, avg_precisionTub, avg_dice_scoreTub, std_dice_scoreTub, min_dice_scoreTub, max_dice_scoreTub = allClassEvaluators[evaluatorID][0].score() # tubuliresults + precisionsGlom, avg_precisionGlom, avg_dice_scoreGlom, std_dice_scoreGlom, min_dice_scoreGlom, max_dice_scoreGlom = allClassEvaluators[evaluatorID][1].score() # tubuliresults + precisionsTuft, avg_precisionTuft, avg_dice_scoreTuft, std_dice_scoreTuft, min_dice_scoreTuft, max_dice_scoreTuft = allClassEvaluators[evaluatorID][2].score() # tubuliresults + precisionsVeins, avg_precisionVeins, avg_dice_scoreVeins, std_dice_scoreVeins, min_dice_scoreVeins, max_dice_scoreVeins = allClassEvaluators[evaluatorID][3].score() # tubuliresults + precisionsArtery, avg_precisionArtery, avg_dice_scoreArtery, std_dice_scoreArtery, min_dice_scoreArtery, max_dice_scoreArtery = allClassEvaluators[evaluatorID][4].score() # tubuliresults + precisionsLumen, avg_precisionLumen, avg_dice_scoreLumen, std_dice_scoreLumen, min_dice_scoreLumen, max_dice_scoreLumen = allClassEvaluators[evaluatorID][5].score() # tubuliresults + logger.info('DETECTION RESULTS MEASURED BY AVERAGE PRECISION:') + logger.info('0.5 0.55 0.6 0.65 0.7 0.75 0.8 0.85 0.9 <- Thresholds') + logger.info(str(np.round(precisionsTub, 4)) + ', Mean: ' + str(np.round(avg_precisionTub, 4)) + ' <-- Tubuli') + logger.info(str(np.round(precisionsGlom, 4)) + ', Mean: ' + str(np.round(avg_precisionGlom, 4)) + ' <-- Glomeruli (incl. tuft)') + logger.info(str(np.round(precisionsTuft, 4)) + ', Mean: ' + str(np.round(avg_precisionTuft, 4)) + ' <-- Tuft') + logger.info(str(np.round(precisionsVeins, 4)) + ', Mean: ' + str(np.round(avg_precisionVeins, 4)) + ' <-- Veins') + logger.info(str(np.round(precisionsArtery, 4)) + ', Mean: ' + str(np.round(avg_precisionArtery, 4)) + ' <-- Artery (incl. lumen)') + logger.info(str(np.round(precisionsLumen, 4)) + ', Mean: ' + str(np.round(avg_precisionLumen, 4)) + ' <-- Artery lumen') + logger.info('SEGMENTATION RESULTS MEASURED BY OBJECT-LEVEL DICE SCORES:') + logger.info('Mean: ' + str(np.round(avg_dice_scoreTub, 4)) + ', Std: ' + str(np.round(std_dice_scoreTub, 4)) + ', Min: ' + str(np.round(min_dice_scoreTub, 4)) + ', Max: ' + str(np.round(max_dice_scoreTub, 4)) + ' <-- Tubuli') + logger.info('Mean: ' + str(np.round(avg_dice_scoreGlom, 4)) + ', Std: ' + str(np.round(std_dice_scoreGlom, 4)) + ', Min: ' + str(np.round(min_dice_scoreGlom, 4)) + ', Max: ' + str(np.round(max_dice_scoreGlom, 4)) + ' <-- Glomeruli (incl. tuft)') + logger.info('Mean: ' + str(np.round(avg_dice_scoreTuft, 4)) + ', Std: ' + str(np.round(std_dice_scoreTuft, 4)) + ', Min: ' + str(np.round(min_dice_scoreTuft, 4)) + ', Max: ' + str(np.round(max_dice_scoreTuft, 4)) + ' <-- Tuft') + logger.info('Mean: ' + str(np.round(avg_dice_scoreVeins, 4)) + ', Std: ' + str(np.round(std_dice_scoreVeins, 4)) + ', Min: ' + str(np.round(min_dice_scoreVeins, 4)) + ', Max: ' + str(np.round(max_dice_scoreVeins, 4)) + ' <-- Veins') + logger.info('Mean: ' + str(np.round(avg_dice_scoreArtery, 4)) + ', Std: ' + str(np.round(std_dice_scoreArtery, 4)) + ', Min: ' + str(np.round(min_dice_scoreArtery, 4)) + ', Max: ' + str(np.round(max_dice_scoreArtery, 4)) + ' <-- Artery (incl. lumen)') + logger.info('Mean: ' + str(np.round(avg_dice_scoreLumen, 4)) + ', Std: ' + str(np.round(std_dice_scoreLumen, 4)) + ', Min: ' + str(np.round(min_dice_scoreLumen, 4)) + ', Max: ' + str(np.round(max_dice_scoreLumen, 4)) + ' <-- Artery lumen') + + + +import numpy as np +from skimage.util import view_as_windows +from itertools import product +from typing import Tuple + +def patchify(patches: np.ndarray, patch_size: Tuple[int, int], step: int = 1): + return view_as_windows(patches, patch_size, step) + +def unpatchify(patches: np.ndarray, imsize: Tuple[int, int]): + + assert len(patches.shape) == 4 + + i_h, i_w = imsize + image = np.zeros(imsize, dtype=patches.dtype) + divisor = np.zeros(imsize, dtype=patches.dtype) + + n_h, n_w, p_h, p_w = patches.shape + + # Calculat the overlap size in each axis + o_w = (n_w * p_w - i_w) / (n_w - 1) + o_h = (n_h * p_h - i_h) / (n_h - 1) + + # The overlap should be integer, otherwise the patches are unable to reconstruct into a image with given shape + assert int(o_w) == o_w + assert int(o_h) == o_h + + o_w = int(o_w) + o_h = int(o_h) + + s_w = p_w - o_w + s_h = p_h - o_h + + for i, j in product(range(n_h), range(n_w)): + patch = patches[i,j] + image[(i * s_h):(i * s_h) + p_h, (j * s_w):(j * s_w) + p_w] += patch + divisor[(i * s_h):(i * s_h) + p_h, (j * s_w):(j * s_w) + p_w] += 1 + + return image / divisor + +# Examplary use: +# # # # # # # # # +# import numpy as np +# from patchify import patchify, unpatchify +# +# image = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) +# +# patches = patchify(image, (2,2), step=1) # split image into 2*3 small 2*2 patches. +# +# assert patches.shape == (2, 3, 2, 2) +# reconstructed_image = unpatchify(patches, image.shape) +# +# assert (reconstructed_image == image).all() + + + +def getChannelSmootingConvLayer(channels, kernel_size=5, sigma=1.5): + + # Create a x, y coordinate grid of shape (kernel_size, kernel_size, 2) + x_cord = torch.arange(kernel_size) + x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size) + y_grid = x_grid.t() + xy_grid = torch.stack([x_grid, y_grid], dim=-1) + + mean = (kernel_size - 1) / 2. + variance = sigma ** 2. + + # Calculate the 2-dimensional gaussian kernel which is + # the product of two gaussian distributions for two different + # variables (in this case called x and y) + gaussian_kernel = (1. / (2. * math.pi * variance)) * \ + torch.exp( + (-torch.sum((xy_grid - mean) ** 2., dim=-1) / \ + (2 * variance)).float() + ) + # Make sure sum of values in gaussian kernel equals 1. + gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel) + + # Reshape to 2d depthwise convolutional weight + gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size) + gaussian_kernel = gaussian_kernel.repeat(channels, 1, 1, 1) + + gaussian_filter = nn.Conv2d(in_channels=channels, out_channels=channels, + kernel_size=kernel_size, groups=channels, bias=False, padding=2) + + gaussian_filter.weight.data = gaussian_kernel + gaussian_filter.weight.requires_grad = False + + return gaussian_filter + + +if __name__ == '__main__': + print() \ No newline at end of file -- GitLab