diff --git a/code/config.py b/code/config.py new file mode 100644 index 0000000..184154c --- /dev/null +++ b/code/config.py @@ -0,0 +1,246 @@ +# -------------------------------------------------------- +# Swin Transformer +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# --------------------------------------------------------' + +import os +import yaml +from yacs.config import CfgNode as CN + +_C = CN() + +# Base config files +_C.BASE = [''] + +# ----------------------------------------------------------------------------- +# Data settings +# ----------------------------------------------------------------------------- +_C.DATA = CN() +# Batch size for a single GPU, could be overwritten by command line argument +_C.DATA.BATCH_SIZE = 128 +# Path to dataset, could be overwritten by command line argument +_C.DATA.DATA_PATH = '' +# Dataset name +_C.DATA.DATASET = 'imagenet' +# Input image size +_C.DATA.IMG_SIZE = 224 +# Interpolation to resize image (random, bilinear, bicubic) +_C.DATA.INTERPOLATION = 'bicubic' +# Use zipped dataset instead of folder dataset +# could be overwritten by command line argument +_C.DATA.ZIP_MODE = False +# Cache Data in Memory, could be overwritten by command line argument +_C.DATA.CACHE_MODE = 'part' +# Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU. +_C.DATA.PIN_MEMORY = True +# Number of data loading threads +_C.DATA.NUM_WORKERS = 8 + +# ----------------------------------------------------------------------------- +# Model settings +# ----------------------------------------------------------------------------- +_C.MODEL = CN() +# Model type +_C.MODEL.TYPE = 'swin' +# Model name +_C.MODEL.NAME = 'swin_tiny_patch4_window7_224' +# Checkpoint to resume, could be overwritten by command line argument +_C.MODEL.PRETRAIN_CKPT = '/mnt/sdd/tb/WSL4MIS/code/pretrained_ckpt/swin_tiny_patch4_window7_224.pth' +_C.MODEL.RESUME = '' +# Number of classes, overwritten in data preparation +_C.MODEL.NUM_CLASSES = 1000 +# Dropout rate +_C.MODEL.DROP_RATE = 0.0 +# Drop path rate +_C.MODEL.DROP_PATH_RATE = 0.1 +# Label Smoothing +_C.MODEL.LABEL_SMOOTHING = 0.1 + +# Swin Transformer parameters +# _C.MODEL.SWIN = CN() +# _C.MODEL.SWIN.PATCH_SIZE = 4 +# _C.MODEL.SWIN.IN_CHANS = 3 +# _C.MODEL.SWIN.EMBED_DIM = 96 +# _C.MODEL.SWIN.DEPTHS = [2, 2, 6, 2] +# _C.MODEL.SWIN.DECODER_DEPTHS = [2, 2, 6, 2] +# _C.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24] +# _C.MODEL.SWIN.WINDOW_SIZE = 7 +# _C.MODEL.SWIN.MLP_RATIO = 4. +# _C.MODEL.SWIN.QKV_BIAS = True +# _C.MODEL.SWIN.QK_SCALE =False# None +# _C.MODEL.SWIN.APE = False +# _C.MODEL.SWIN.PATCH_NORM = True +# _C.MODEL.SWIN.FINAL_UPSAMPLE= "expand_first" +# Swin Transformer parameters +_C.MODEL.SWIN = CN() +_C.MODEL.SWIN.PATCH_SIZE = 4 +_C.MODEL.SWIN.IN_CHANS = 3 +_C.MODEL.SWIN.EMBED_DIM = 96 +_C.MODEL.SWIN.DEPTHS = [2, 2, 6, 2] +_C.MODEL.SWIN.DECODER_DEPTHS = [2, 2, 6, 2] +_C.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24] +_C.MODEL.SWIN.WINDOW_SIZE = 7 +_C.MODEL.SWIN.MLP_RATIO = 4. +_C.MODEL.SWIN.QKV_BIAS = True +_C.MODEL.SWIN.QK_SCALE = False +_C.MODEL.SWIN.APE = False +_C.MODEL.SWIN.PATCH_NORM = True +_C.MODEL.SWIN.FINAL_UPSAMPLE= "expand_first" + + + +# ----------------------------------------------------------------------------- +# Training settings +# ----------------------------------------------------------------------------- +_C.TRAIN = CN() +_C.TRAIN.START_EPOCH = 0 +_C.TRAIN.EPOCHS = 300 +_C.TRAIN.WARMUP_EPOCHS = 20 +_C.TRAIN.WEIGHT_DECAY = 0.05 +_C.TRAIN.BASE_LR = 5e-4 +_C.TRAIN.WARMUP_LR = 5e-7 +_C.TRAIN.MIN_LR = 5e-6 +# Clip gradient norm +_C.TRAIN.CLIP_GRAD = 5.0 +# Auto resume from latest checkpoint +_C.TRAIN.AUTO_RESUME = True +# Gradient accumulation steps +# could be overwritten by command line argument +_C.TRAIN.ACCUMULATION_STEPS = 0 +# Whether to use gradient checkpointing to save memory +# could be overwritten by command line argument +_C.TRAIN.USE_CHECKPOINT = False + +# LR scheduler +_C.TRAIN.LR_SCHEDULER = CN() +_C.TRAIN.LR_SCHEDULER.NAME = 'cosine' +# Epoch interval to decay LR, used in StepLRScheduler +_C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30 +# LR decay rate, used in StepLRScheduler +_C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1 + +# Optimizer +_C.TRAIN.OPTIMIZER = CN() +_C.TRAIN.OPTIMIZER.NAME = 'adamw' +# Optimizer Epsilon +_C.TRAIN.OPTIMIZER.EPS = 1e-8 +# Optimizer Betas +_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999) +# SGD momentum +_C.TRAIN.OPTIMIZER.MOMENTUM = 0.9 + +# ----------------------------------------------------------------------------- +# Augmentation settings +# ----------------------------------------------------------------------------- +_C.AUG = CN() +# Color jitter factor +_C.AUG.COLOR_JITTER = 0.4 +# Use AutoAugment policy. "v0" or "original" +_C.AUG.AUTO_AUGMENT = 'rand-m9-mstd0.5-inc1' +# Random erase prob +_C.AUG.REPROB = 0.25 +# Random erase mode +_C.AUG.REMODE = 'pixel' +# Random erase count +_C.AUG.RECOUNT = 1 +# Mixup alpha, mixup enabled if > 0 +_C.AUG.MIXUP = 0.8 +# Cutmix alpha, cutmix enabled if > 0 +_C.AUG.CUTMIX = 1.0 +# Cutmix min/max ratio, overrides alpha and enables cutmix if set +_C.AUG.CUTMIX_MINMAX =False #None +# Probability of performing mixup or cutmix when either/both is enabled +_C.AUG.MIXUP_PROB = 1.0 +# Probability of switching to cutmix when both mixup and cutmix enabled +_C.AUG.MIXUP_SWITCH_PROB = 0.5 +# How to apply mixup/cutmix params. Per "batch", "pair", or "elem" +_C.AUG.MIXUP_MODE = 'batch' + +# ----------------------------------------------------------------------------- +# Testing settings +# ----------------------------------------------------------------------------- +_C.TEST = CN() +# Whether to use center crop when testing +_C.TEST.CROP = True + +# ----------------------------------------------------------------------------- +# Misc +# ----------------------------------------------------------------------------- +# Mixed precision opt level, if O0, no amp is used ('O0', 'O1', 'O2') +# overwritten by command line argument +_C.AMP_OPT_LEVEL = '' +# Path to output folder, overwritten by command line argument +_C.OUTPUT = '' +# Tag of experiment, overwritten by command line argument +_C.TAG = 'default' +# Frequency to save checkpoint +_C.SAVE_FREQ = 1 +# Frequency to logging info +_C.PRINT_FREQ = 10 +# Fixed random seed +_C.SEED = 0 +# Perform evaluation only, overwritten by command line argument +_C.EVAL_MODE = False +# Test throughput only, overwritten by command line argument +_C.THROUGHPUT_MODE = False +# local rank for DistributedDataParallel, given by command line argument +_C.LOCAL_RANK = 0 + + +def _update_config_from_file(config, cfg_file): + config.defrost() + with open(cfg_file, 'r') as f: + yaml_cfg = yaml.load(f, Loader=yaml.FullLoader) + + for cfg in yaml_cfg.setdefault('BASE', ['']): + if cfg: + _update_config_from_file( + config, os.path.join(os.path.dirname(cfg_file), cfg) + ) + print('=> merge config from {}'.format(cfg_file)) + config.merge_from_file(cfg_file) + config.freeze() + + +def update_config(config, args): + _update_config_from_file(config, args.cfg) + + config.defrost() + if args.opts: + config.merge_from_list(args.opts) + + # merge from specific arguments + if args.batch_size: + config.DATA.BATCH_SIZE = args.batch_size + if args.zip: + config.DATA.ZIP_MODE = True + if args.cache_mode: + config.DATA.CACHE_MODE = args.cache_mode + if args.resume: + config.MODEL.RESUME = args.resume + if args.accumulation_steps: + config.TRAIN.ACCUMULATION_STEPS = args.accumulation_steps + if args.use_checkpoint: + config.TRAIN.USE_CHECKPOINT = True + if args.amp_opt_level: + config.AMP_OPT_LEVEL = args.amp_opt_level + if args.tag: + config.TAG = args.tag + if args.eval: + config.EVAL_MODE = True + if args.throughput: + config.THROUGHPUT_MODE = True + + config.freeze() + + +def get_config(args): + """Get a yacs CfgNode object with default values.""" + # Return a clone so that the defaults will not be altered + # This is for the "local variable" use pattern + config = _C.clone() + update_config(config, args) + + return config diff --git a/code/configs/swin_tiny_patch4_window7_224_lite.yaml b/code/configs/swin_tiny_patch4_window7_224_lite.yaml new file mode 100644 index 0000000..599b4f0 --- /dev/null +++ b/code/configs/swin_tiny_patch4_window7_224_lite.yaml @@ -0,0 +1,12 @@ +MODEL: + TYPE: swin + NAME: swin_tiny_patch4_window7_224 + DROP_PATH_RATE: 0.2 + PRETRAIN_CKPT: "../code/pretrained_ckpt/swin_tiny_patch4_window7_224.pth" + SWIN: + FINAL_UPSAMPLE: "expand_first" + EMBED_DIM: 96 + DEPTHS: [ 2, 2, 2, 2 ] + DECODER_DEPTHS: [ 2, 2, 2, 1] + NUM_HEADS: [ 3, 6, 12, 24 ] + WINDOW_SIZE: 7 \ No newline at end of file diff --git a/code/dataloaders/dataset_semi.py b/code/dataloaders/dataset_semi.py index 22b4eba..70b3cdf 100644 --- a/code/dataloaders/dataset_semi.py +++ b/code/dataloaders/dataset_semi.py @@ -13,7 +13,24 @@ from torch.utils.data import Dataset from torch.utils.data.sampler import Sampler - +def pseudo_label_generator_acdc(data, seed, beta=100, mode='bf'): + from skimage.exposure import rescale_intensity + from skimage.segmentation import random_walker + if 1 not in np.unique(seed) or 2 not in np.unique(seed) or 3 not in np.unique(seed): + pseudo_label = np.zeros_like(seed) + else: + markers = np.ones_like(seed) + markers[seed == 4] = 0 + markers[seed == 0] = 1 + markers[seed == 1] = 2 + markers[seed == 2] = 3 + markers[seed == 3] = 4 + sigma = 0.35 + data = rescale_intensity(data, in_range=(-sigma, 1 + sigma), + out_range=(-1, 1)) + segmentation = random_walker(data, markers, beta, mode) + pseudo_label = segmentation - 1 + return pseudo_label class BaseDataSets(Dataset): def __init__(self, base_dir=None, num=4, labeled_type="labeled", split='train', transform=None, fold="fold1", sup_type="label"): self._base_dir = base_dir @@ -23,9 +40,12 @@ def __init__(self, base_dir=None, num=4, labeled_type="labeled", split='train', self.transform = transform self.num = num self.labeled_type = labeled_type + self.input_size = 256 + self.crop_size = 128 + self.patch_num=1 train_ids, test_ids = self._get_fold_ids(fold) - all_labeled_ids = ["patient{:0>3}".format( - 10 * i) for i in range(1, 11)] + + all_labeled_ids = ["patient{:0>3}".format(10 * i) for i in range(1, 11)] if self.split == 'train': self.all_slices = os.listdir( self._base_dir + "/ACDC_training_slices") @@ -104,19 +124,31 @@ def __len__(self): def __getitem__(self, idx): case = self.sample_list[idx] if self.split == "train": - h5f = h5py.File(self._base_dir + - "/ACDC_training_slices/{}".format(case), 'r') + h5f = h5py.File(self._base_dir +"/ACDC_training_slices/{}".format(case), 'r') else: - h5f = h5py.File(self._base_dir + - "/ACDC_training_volumes/{}".format(case), 'r') + h5f = h5py.File(self._base_dir +"/ACDC_training_volumes/{}".format(case), 'r') + boxes = self.box_generation() + image = h5f['image'][:] label = h5f['label'][:] sample = {'image': image, 'label': label} if self.split == "train": image = h5f['image'][:] - label = h5f[self.sup_type][:] - sample = {'image': image, 'label': label} - sample = self.transform(sample) + + label_wr = pseudo_label_generator_acdc(image, h5f["scribble"][:]) + label = h5f['scribble'][:] + sample = {'image': image, 'label': label,'random_walker':label_wr} + sample = self.transform(sample) + + crop_images = [] + for i in range(len(boxes)): + box = boxes[i][1:] + crop_images.append(sample['image'][:, box[1]:box[3], box[0]:box[2]].clone()[None]) + crop_images = torch.cat(crop_images, dim=0) + # crop_images=(sample['image'][:, box[1]:box[3], box[0]:box[2]].clone()[None]) + sample['boxes']=boxes + sample['crop_images']=crop_images + else: image = h5f['image'][:] label = h5f['label'][:] @@ -124,50 +156,67 @@ def __getitem__(self, idx): sample["idx"] = case.split("_")[0] return sample + def box_generation(self): + max_range = self.input_size - self.crop_size + boxes = [] + for i in range(self.patch_num): + ind_h, ind_w = np.random.randint(0, max_range, size=2) + boxes.append(torch.tensor([0, ind_w, ind_h, ind_w + self.crop_size, ind_h + self.crop_size])[None]) + boxes = torch.cat(boxes, dim=0) + + return boxes # K, 5 + + -def random_rot_flip(image, label): +def random_rot_flip(image, label,label_wr): k = np.random.randint(0, 4) image = np.rot90(image, k) label = np.rot90(label, k) + label_wr = np.rot90(label_wr, k) + axis = np.random.randint(0, 2) + image = np.flip(image, axis=axis).copy() label = np.flip(label, axis=axis).copy() - return image, label + label_wr = np.flip(label_wr, axis=axis).copy() + return image, label,label_wr -def random_rotate(image, label, cval): + +def random_rotate(image, label,label_wr, cval): angle = np.random.randint(-20, 20) image = ndimage.rotate(image, angle, order=0, reshape=False) - label = ndimage.rotate(label, angle, order=0, - reshape=False, mode="constant", cval=cval) - return image, label + label = ndimage.rotate(label, angle, order=0,reshape=False, mode="constant", cval=cval) + label_wr = ndimage.rotate(label_wr, angle, order=0,reshape=False, mode="constant", cval=cval) + return image, label,label_wr class RandomGenerator(object): def __init__(self, output_size): self.output_size = output_size - + #'random_walker':label_wr def __call__(self, sample): - image, label = sample['image'], sample['label'] + image, label ,label_wr= sample['image'], sample['label'], sample['random_walker'] # ind = random.randrange(0, img.shape[0]) # image = img[ind, ...] # label = lab[ind, ...] if random.random() > 0.5: - image, label = random_rot_flip(image, label) + image, label,label_wr = random_rot_flip(image, label,label_wr) elif random.random() > 0.5: if 4 in np.unique(label): - image, label = random_rotate(image, label, cval=4) + image, label,label_wr = random_rotate(image, label,label_wr, cval=4) else: - image, label = random_rotate(image, label, cval=0) + image, label,label_wr = random_rotate(image, label,label_wr, cval=0) x, y = image.shape - image = zoom( - image, (self.output_size[0] / x, self.output_size[1] / y), order=0) - label = zoom( - label, (self.output_size[0] / x, self.output_size[1] / y), order=0) - image = torch.from_numpy( - image.astype(np.float32)).unsqueeze(0) + image = zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=0) + label = zoom(label, (self.output_size[0] / x, self.output_size[1] / y), order=0) + label_wr = zoom(label_wr, (self.output_size[0] / x, self.output_size[1] / y), order=0) + + image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0) label = torch.from_numpy(label.astype(np.uint8)) - sample = {'image': image, 'label': label} + label_wr = torch.from_numpy(label_wr.astype(np.uint8)) + + sample = {'image': image, 'label': label,'random_walker':label_wr} return sample diff --git a/code/dataloaders/dataset_semi_mscmr.py b/code/dataloaders/dataset_semi_mscmr.py new file mode 100644 index 0000000..68e0f38 --- /dev/null +++ b/code/dataloaders/dataset_semi_mscmr.py @@ -0,0 +1,271 @@ +import itertools +import os +import random +import re +from glob import glob +import math +import cv2 +import h5py +import numpy as np +import torch +from scipy import ndimage +from scipy.ndimage.interpolation import zoom +from torch.utils.data import Dataset +from torch.utils.data.sampler import Sampler +import logging + + +def read_txt(list_path): + """读取txt文件中的内容""" + sample_list = [] + with open(list_path, "r") as f: #按行读取txt中的图片名称 + for line in f.readlines(): + line = line.strip('\n') + sample_list.append(line) + return sample_list + + +def pseudo_label_generator_acdc(data, seed, beta=100, mode='bf'): + from skimage.exposure import rescale_intensity + from skimage.segmentation import random_walker + if 1 not in np.unique(seed) or 2 not in np.unique(seed) or 3 not in np.unique(seed): + pseudo_label = np.zeros_like(seed) + else: + markers = np.ones_like(seed) + markers[seed == 4] = 0 + markers[seed == 0] = 1 + markers[seed == 1] = 2 + markers[seed == 2] = 3 + markers[seed == 3] = 4 + sigma = 0.35 + data = rescale_intensity(data, in_range=(-sigma, 1 + sigma), + out_range=(-1, 1)) + segmentation = random_walker(data, markers, beta, mode) + pseudo_label = segmentation - 1 + return pseudo_label +class BaseDataSets(Dataset): + def __init__(self, base_dir=None, num=4, labeled_type="labeled", split='train', transform=None, sup_type="label",label_ratio=0.1): + self._base_dir = base_dir + self.sample_list = [] + self.split = split + self.sup_type = sup_type + self.transform = transform + self.num = num + self.labeled_type = labeled_type + self.label_ratio = label_ratio + self.input_size = 256 + self.crop_size = 128 + self.patch_num=1 + + all_train_ids = read_txt("/mnt/sdd/tb/data/MSCMR/train_volumes.list") + val_ids = read_txt("/mnt/sdd/tb/data/MSCMR/val_volumes.list") + test_ids = read_txt("/mnt/sdd/tb/data/MSCMR/test_volumes.list") + all_train_ids.sort() + labeled_ids = all_train_ids[:math.ceil(len(all_train_ids) * self.label_ratio)] + # str_labeled_ids=labeled_ids[:len(labeled_ids)] + + unlabeled_ids = [i for i in all_train_ids if i not in labeled_ids] + # unlabeled_ids = all_train_ids[int(len(all_train_ids) * self.label_ratio):] + + if self.split == 'train': + self.all_slices = os.listdir(self._base_dir + "/MSCMR_training_slices") + self.sample_list = [] + + if self.labeled_type == "labeled": + print("Labeled patients IDs", labeled_ids) + for ids in labeled_ids: + new_data_list = list(filter(lambda x: re.match( + '{}.*'.format(ids), x) != None, self.all_slices)) + self.sample_list.extend(new_data_list) + print("total labeled {} samples".format(len(self.sample_list))) + logging.info("Unlabeled patients IDs:") + logging.info(str(labeled_ids)) + + + if self.labeled_type == "unlabeled": + print("Unlabeled patients IDs", unlabeled_ids) + for ids in unlabeled_ids: + new_data_list = list(filter(lambda x: re.match( + '{}.*'.format(ids), x) != None, self.all_slices)) + self.sample_list.extend(new_data_list) + print("total unlabeled {} samples".format(len(self.sample_list))) + logging.info("Unlabeled patients IDs:") + logging.info(str(unlabeled_ids)) + + + if self.split == 'val': + self.all_volumes = os.listdir( + self._base_dir + "/MSCMR_training_volumes") + self.sample_list = [] + for ids in val_ids: + new_data_list = list(filter(lambda x: re.match( + '{}.*'.format(ids), x) != None, self.all_volumes)) + self.sample_list.extend(new_data_list) + + if self.split == 'test': + self.all_volumes = os.listdir( + self._base_dir + "/MSCMR_training_volumes") + self.sample_list = [] + for ids in test_ids: + new_data_list = list(filter(lambda x: re.match( + '{}.*'.format(ids), x) != None, self.all_volumes)) + self.sample_list.extend(new_data_list) + + def __len__(self): + return len(self.sample_list) + + def __getitem__(self, idx): + case = self.sample_list[idx] + if self.split == "train": + h5f = h5py.File(self._base_dir +"/MSCMR_training_slices/{}".format(case), 'r') + else: + h5f = h5py.File(self._base_dir +"/MSCMR_training_volumes/{}".format(case), 'r') + boxes = self.box_generation() + + image = h5f['image'][:] + label = h5f['label'][:] + sample = {'image': image, 'label': label} + if self.split == "train": + image = h5f['image'][:] + + label_wr = pseudo_label_generator_acdc(image, h5f["scribble"][:]) + label = h5f['scribble'][:] + sample = {'image': image, 'label': label,'random_walker':label_wr} + sample = self.transform(sample) + + crop_images = [] + for i in range(len(boxes)): + box = boxes[i][1:] + crop_images.append(sample['image'][:, box[1]:box[3], box[0]:box[2]].clone()[None]) + crop_images = torch.cat(crop_images, dim=0) + # crop_images=(sample['image'][:, box[1]:box[3], box[0]:box[2]].clone()[None]) + sample['boxes']=boxes + sample['crop_images']=crop_images + + else: + image = h5f['image'][:] + label = h5f['label'][:] + label[label==200]=2 + label[label==500]=3 + label[label==600]=1 + image=image.astype(np.float32) + label=label.astype(np.uint8) + # x, y = image.shape[1],image.shape[2] + # image = zoom(image, (self.input_size[0] / x, self.input_size[1] / y), order=0) + # label = zoom(label, (self.input_size[0] / x, self.input_size[1] / y), order=0) + + sample = {'image': image, 'label': label} + sample["idx"] = case.split("_")[0] + return sample + + def box_generation(self): + max_range = self.input_size - self.crop_size + boxes = [] + for i in range(self.patch_num): + ind_h, ind_w = np.random.randint(0, max_range, size=2) + boxes.append(torch.tensor([0, ind_w, ind_h, ind_w + self.crop_size, ind_h + self.crop_size])[None]) + boxes = torch.cat(boxes, dim=0) + + return boxes # K, 5 + + + +def random_rot_flip(image, label,label_wr): + k = np.random.randint(0, 4) + image = np.rot90(image, k) + label = np.rot90(label, k) + label_wr = np.rot90(label_wr, k) + + axis = np.random.randint(0, 2) + + image = np.flip(image, axis=axis).copy() + label = np.flip(label, axis=axis).copy() + label_wr = np.flip(label_wr, axis=axis).copy() + + return image, label,label_wr + + +def random_rotate(image, label,label_wr, cval): + angle = np.random.randint(-20, 20) + image = ndimage.rotate(image, angle, order=0, reshape=False) + label = ndimage.rotate(label, angle, order=0,reshape=False, mode="constant", cval=cval) + label_wr = ndimage.rotate(label_wr, angle, order=0,reshape=False, mode="constant", cval=cval) + return image, label,label_wr + + +class RandomGenerator(object): + def __init__(self, output_size): + self.output_size = output_size + #'random_walker':label_wr + def __call__(self, sample): + image, label ,label_wr= sample['image'], sample['label'], sample['random_walker'] + # ind = random.randrange(0, img.shape[0]) + # image = img[ind, ...] + # label = lab[ind, ...] + if random.random() > 0.5: + image, label,label_wr = random_rot_flip(image, label,label_wr) + elif random.random() > 0.5: + if 4 in np.unique(label): + image, label,label_wr = random_rotate(image, label,label_wr, cval=4) + else: + image, label,label_wr = random_rotate(image, label,label_wr, cval=0) + x, y = image.shape + image = zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=0) + label = zoom(label, (self.output_size[0] / x, self.output_size[1] / y), order=0) + label_wr = zoom(label_wr, (self.output_size[0] / x, self.output_size[1] / y), order=0) + + image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0) + label = torch.from_numpy(label.astype(np.uint8)) + label_wr = torch.from_numpy(label_wr.astype(np.uint8)) + + sample = {'image': image, 'label': label,'random_walker':label_wr} + return sample + + +class TwoStreamBatchSampler(Sampler): + """Iterate two sets of indices + + An 'epoch' is one iteration through the primary indices. + During the epoch, the secondary indices are iterated through + as many times as needed. + """ + + def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size): + self.primary_indices = primary_indices + self.secondary_indices = secondary_indices + self.secondary_batch_size = secondary_batch_size + self.primary_batch_size = batch_size - secondary_batch_size + + assert len(self.primary_indices) >= self.primary_batch_size > 0 + assert len(self.secondary_indices) >= self.secondary_batch_size > 0 + + def __iter__(self): + primary_iter = iterate_once(self.primary_indices) + secondary_iter = iterate_eternally(self.secondary_indices) + return ( + primary_batch + secondary_batch + for (primary_batch, secondary_batch) + in zip(grouper(primary_iter, self.primary_batch_size), + grouper(secondary_iter, self.secondary_batch_size)) + ) + + def __len__(self): + return len(self.primary_indices) // self.primary_batch_size + + +def iterate_once(iterable): + return np.random.permutation(iterable) + + +def iterate_eternally(indices): + def infinite_shuffles(): + while True: + yield np.random.permutation(indices) + return itertools.chain.from_iterable(infinite_shuffles()) + + +def grouper(iterable, n): + "Collect data into fixed-length chunks or blocks" + # grouper('ABCDEFG', 3) --> ABC DEF" + args = [iter(iterable)] * n + return zip(*args) diff --git a/code/dataloaders/dataset_semi_mscmr_v5.py b/code/dataloaders/dataset_semi_mscmr_v5.py new file mode 100644 index 0000000..6f66d30 --- /dev/null +++ b/code/dataloaders/dataset_semi_mscmr_v5.py @@ -0,0 +1,276 @@ +import itertools +import os +import random +import re +from glob import glob + +import cv2 +import h5py +import numpy as np +import torch +from scipy import ndimage +from scipy.ndimage.interpolation import zoom +from torch.utils.data import Dataset +from torch.utils.data.sampler import Sampler + +def read_txt(list_path): + """读取txt文件中的内容""" + sample_list = [] + with open(list_path, "r") as f: #按行读取txt中的图片名称 + for line in f.readlines(): + line = line.strip('\n') + sample_list.append(line) + return sample_list + +def pseudo_label_generator_acdc(data, seed, beta=100, mode='bf'): + from skimage.exposure import rescale_intensity + from skimage.segmentation import random_walker + if 1 not in np.unique(seed) or 2 not in np.unique(seed) or 3 not in np.unique(seed): + pseudo_label = np.zeros_like(seed) + else: + markers = np.ones_like(seed) + markers[seed == 4] = 0 + markers[seed == 0] = 1 + markers[seed == 1] = 2 + markers[seed == 2] = 3 + markers[seed == 3] = 4 + sigma = 0.35 + data = rescale_intensity(data, in_range=(-sigma, 1 + sigma), + out_range=(-1, 1)) + segmentation = random_walker(data, markers, beta, mode) + pseudo_label = segmentation - 1 + return pseudo_label +class BaseDataSets(Dataset): + def __init__(self, base_dir=None, num=4, labeled_type="labeled", split='train', transform=None, fold="fold1", sup_type="label",label_ratio=0.1): + self._base_dir = base_dir + self.sample_list = [] + self.split = split + self.sup_type = sup_type + self.transform = transform + self.num = num + self.labeled_type = labeled_type + self.input_size = 256 + self.crop_size = 128 + self.patch_num=1 + self.label_ratio=label_ratio + random.seed(42) + train_ids, test_ids = self._get_fold_ids(fold) + # all_labeled_ids = ["patient{:0>3}".format(10 * i) for i in range(1, 11)] + random.shuffle(train_ids) + all_labeled_ids = train_ids[:int(len(train_ids) * self.label_ratio)] + if self.split == 'train': + self.all_slices = os.listdir( + self._base_dir + "/MSCMR_training_slices") + self.sample_list = [] + labeled_ids = [i for i in all_labeled_ids if i in train_ids] + unlabeled_ids = [i for i in train_ids if i not in labeled_ids] + if self.labeled_type == "labeled": + print("Labeled patients IDs", labeled_ids) + for ids in labeled_ids: + new_data_list = list(filter(lambda x: re.match( + '{}.*'.format(ids), x) != None, self.all_slices)) + self.sample_list.extend(new_data_list) + print("total labeled {} samples".format(len(self.sample_list))) + else: + print("Unlabeled patients IDs", unlabeled_ids) + for ids in unlabeled_ids: + new_data_list = list(filter(lambda x: re.match( + '{}.*'.format(ids), x) != None, self.all_slices)) + self.sample_list.extend(new_data_list) + print("total unlabeled {} samples".format(len(self.sample_list))) + + elif self.split == 'val': + self.all_volumes = os.listdir( + self._base_dir + "/MSCMR_training_volumes") + self.sample_list = [] + for ids in test_ids: + new_data_list = list(filter(lambda x: re.match( + '{}.*'.format(ids), x) != None, self.all_volumes)) + self.sample_list.extend(new_data_list) + + # if num is not None and self.split == "train": + # self.sample_list = self.sample_list[:num] + + def _get_fold_ids(self, fold): + all_cases_set = read_txt('/mnt/sdd/tb/data/MSCMR_v5/train_volumes.list') + + fold1_testing_set = read_txt('/mnt/sdd/tb/data/MSCMR_v5/train_volumes_p1.list') + fold2_testing_set = read_txt('/mnt/sdd/tb/data/MSCMR_v5/train_volumes_p2.list') + fold3_testing_set = read_txt('/mnt/sdd/tb/data/MSCMR_v5/train_volumes_p3.list') + fold4_testing_set = read_txt('/mnt/sdd/tb/data/MSCMR_v5/train_volumes_p4.list') + fold5_testing_set = read_txt('/mnt/sdd/tb/data/MSCMR_v5/train_volumes_p5.list') + + + fold1_training_set = [i for i in all_cases_set if i not in fold1_testing_set] + + fold2_training_set = [i for i in all_cases_set if i not in fold2_testing_set] + + fold3_training_set = [i for i in all_cases_set if i not in fold3_testing_set] + + fold4_training_set = [i for i in all_cases_set if i not in fold4_testing_set] + + fold5_training_set = [i for i in all_cases_set if i not in fold5_testing_set] + + + if fold == "fold1": + return [fold1_training_set, fold1_testing_set] + elif fold == "fold2": + return [fold2_training_set, fold2_testing_set] + elif fold == "fold3": + return [fold3_training_set, fold3_testing_set] + elif fold == "fold4": + return [fold4_training_set, fold4_testing_set] + elif fold == "fold5": + return [fold5_training_set, fold5_testing_set] + else: + return "ERROR KEY" + + def __len__(self): + return len(self.sample_list) + + def __getitem__(self, idx): + case = self.sample_list[idx] + if self.split == "train": + h5f = h5py.File(self._base_dir +"/MSCMR_training_slices/{}".format(case), 'r') + else: + h5f = h5py.File(self._base_dir +"/MSCMR_training_volumes/{}".format(case), 'r') + boxes = self.box_generation() + + image = h5f['image'][:] + label = h5f['label'][:] + sample = {'image': image, 'label': label} + if self.split == "train": + image = h5f['image'][:] + + label_wr = pseudo_label_generator_acdc(image, h5f["scribble"][:]) + label = h5f['scribble'][:] + sample = {'image': image, 'label': label,'random_walker':label_wr} + sample = self.transform(sample) + + crop_images = [] + for i in range(len(boxes)): + box = boxes[i][1:] + crop_images.append(sample['image'][:, box[1]:box[3], box[0]:box[2]].clone()[None]) + crop_images = torch.cat(crop_images, dim=0) + # crop_images=(sample['image'][:, box[1]:box[3], box[0]:box[2]].clone()[None]) + sample['boxes']=boxes + sample['crop_images']=crop_images + + else: + image = h5f['image'][:] + label = h5f['label'][:] + sample = {'image': image, 'label': label} + sample["idx"] = case.split("_")[0] + return sample + + def box_generation(self): + max_range = self.input_size - self.crop_size + boxes = [] + for i in range(self.patch_num): + ind_h, ind_w = np.random.randint(0, max_range, size=2) + boxes.append(torch.tensor([0, ind_w, ind_h, ind_w + self.crop_size, ind_h + self.crop_size])[None]) + boxes = torch.cat(boxes, dim=0) + + return boxes # K, 5 + + + +def random_rot_flip(image, label,label_wr): + k = np.random.randint(0, 4) + image = np.rot90(image, k) + label = np.rot90(label, k) + label_wr = np.rot90(label_wr, k) + + axis = np.random.randint(0, 2) + + image = np.flip(image, axis=axis).copy() + label = np.flip(label, axis=axis).copy() + label_wr = np.flip(label_wr, axis=axis).copy() + + return image, label,label_wr + + +def random_rotate(image, label,label_wr, cval): + angle = np.random.randint(-20, 20) + image = ndimage.rotate(image, angle, order=0, reshape=False) + label = ndimage.rotate(label, angle, order=0,reshape=False, mode="constant", cval=cval) + label_wr = ndimage.rotate(label_wr, angle, order=0,reshape=False, mode="constant", cval=cval) + return image, label,label_wr + + +class RandomGenerator(object): + def __init__(self, output_size): + self.output_size = output_size + #'random_walker':label_wr + def __call__(self, sample): + image, label ,label_wr= sample['image'], sample['label'], sample['random_walker'] + # ind = random.randrange(0, img.shape[0]) + # image = img[ind, ...] + # label = lab[ind, ...] + if random.random() > 0.5: + image, label,label_wr = random_rot_flip(image, label,label_wr) + elif random.random() > 0.5: + if 4 in np.unique(label): + image, label,label_wr = random_rotate(image, label,label_wr, cval=4) + else: + image, label,label_wr = random_rotate(image, label,label_wr, cval=0) + x, y = image.shape + image = zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=0) + label = zoom(label, (self.output_size[0] / x, self.output_size[1] / y), order=0) + label_wr = zoom(label_wr, (self.output_size[0] / x, self.output_size[1] / y), order=0) + + image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0) + label = torch.from_numpy(label.astype(np.uint8)) + label_wr = torch.from_numpy(label_wr.astype(np.uint8)) + + sample = {'image': image, 'label': label,'random_walker':label_wr} + return sample + + +class TwoStreamBatchSampler(Sampler): + """Iterate two sets of indices + + An 'epoch' is one iteration through the primary indices. + During the epoch, the secondary indices are iterated through + as many times as needed. + """ + + def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size): + self.primary_indices = primary_indices + self.secondary_indices = secondary_indices + self.secondary_batch_size = secondary_batch_size + self.primary_batch_size = batch_size - secondary_batch_size + + assert len(self.primary_indices) >= self.primary_batch_size > 0 + assert len(self.secondary_indices) >= self.secondary_batch_size > 0 + + def __iter__(self): + primary_iter = iterate_once(self.primary_indices) + secondary_iter = iterate_eternally(self.secondary_indices) + return ( + primary_batch + secondary_batch + for (primary_batch, secondary_batch) + in zip(grouper(primary_iter, self.primary_batch_size), + grouper(secondary_iter, self.secondary_batch_size)) + ) + + def __len__(self): + return len(self.primary_indices) // self.primary_batch_size + + +def iterate_once(iterable): + return np.random.permutation(iterable) + + +def iterate_eternally(indices): + def infinite_shuffles(): + while True: + yield np.random.permutation(indices) + return itertools.chain.from_iterable(infinite_shuffles()) + + +def grouper(iterable, n): + "Collect data into fixed-length chunks or blocks" + # grouper('ABCDEFG', 3) --> ABC DEF" + args = [iter(iterable)] * n + return zip(*args) diff --git a/code/mscmr.sh b/code/mscmr.sh new file mode 100644 index 0000000..2d72a19 --- /dev/null +++ b/code/mscmr.sh @@ -0,0 +1,15 @@ +CUDA_VISIBLE_DEVICES=0 python -u train_Trans_teacher_21_mscmr.py --base_lr 0.005 --num_classes 4 & +CUDA_VISIBLE_DEVICES=1 python -u train_Trans_teacher_21_mscmr.py --base_lr 0.01 --num_classes 4 & +CUDA_VISIBLE_DEVICES=3 python -u train_Trans_teacher_21_mscmr.py --base_lr 0.001 --num_classes 4 & +CUDA_VISIBLE_DEVICES=4 python -u train_Trans_teacher_21_mscmr.py --base_lr 0.0005 --num_classes 4 & +CUDA_VISIBLE_DEVICES=5 python -u train_Trans_teacher_21_mscmr.py --base_lr 0.0001 --num_classes 4 & +CUDA_VISIBLE_DEVICES=6 python -u train_Trans_teacher_21_mscmr.py --seed 2023 --base_lr 0.005 --num_classes 4 + + + + + + + + + diff --git a/code/networks/attention.py b/code/networks/attention.py index a9e9e6c..9084b8e 100755 --- a/code/networks/attention.py +++ b/code/networks/attention.py @@ -108,3 +108,88 @@ def forward(self, x): class Flatten(nn.Module): def forward(self, x): return x.view(x.shape[0], -1) + + +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ +import math +import torch.nn.functional as F + +class Self_Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1): + super().__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.q = nn.Linear(dim, dim, bias=qkv_bias) + self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) + self.norm = nn.LayerNorm(dim) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x, H, W): + B, N, C = x.shape + q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + if self.sr_ratio > 1: + x_ = x.permute(0, 2, 1).reshape(B, C, H, W) # [bz, 64, 128, 128] + x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1) # [bz, 64, 16, 16] + x_ = self.norm(x_) + kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + else: + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + k, v = kv[0], kv[1] + + attn_ = (q @ k.transpose(-2, -1)) + + '''if self.sr_ratio == 1: + attn_ = attn_ + attn_.permute(0, 1, 3, 2)''' + + attn_ = (attn_ * self.scale).softmax(dim=-1) + attn = self.attn_drop(attn_) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + + return attn_ + # # ##### + # # #attn_ = attn_.clone().mean(1).reshape(-1, H, W, attn.shape[-1],) + # # #attn_ = F.avg_pool2d(attn_.permute(0,3,1,2), kernel_size=self.sr_ratio, stride=self.sr_ratio) + # # #attn_ = attn_.reshape(-1, attn.shape[-1], attn.shape[-1]) + # attn_copy = attn_.clone().reshape(B, self.num_heads, H, W) # [bz, 1, 128, 128, 16*16] + # if self.sr_ratio > 1: + # attn_copy = F.avg_pool3d(attn_copy, kernel_size=(self.sr_ratio, self.sr_ratio, 1), stride=(self.sr_ratio, self.sr_ratio, 1)) + # # #attn_copy = attn_copy.reshape(B, self.num_heads, self.sr_ratio, -1, W, attn.shape[-1],).mean(2) + # # #attn_copy = attn_copy.reshape(B, self.num_heads, attn_copy.shape[2], self.sr_ratio, -1, attn.shape[-1],).mean(3) + # # #print(attn_copy.shape) + # # #attn_ = F.avg_pool2d(attn_.permute(0,3,1,2), kernel_size=self.sr_ratio, stride=self.sr_ratio) + # attn_copy = attn_copy.reshape(-1, self.num_heads, attn.shape[-1], attn.shape[-1]) + # # ##### + + # return attn_copy \ No newline at end of file diff --git a/code/networks/config.py b/code/networks/config.py new file mode 100644 index 0000000..3abbbca --- /dev/null +++ b/code/networks/config.py @@ -0,0 +1,229 @@ +# -------------------------------------------------------- +# Swin Transformer +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# --------------------------------------------------------' + +import os +import yaml +from yacs.config import CfgNode as CN + +_C = CN() + +# Base config files +_C.BASE = [''] + +# ----------------------------------------------------------------------------- +# Data settings +# ----------------------------------------------------------------------------- +_C.DATA = CN() +# Batch size for a single GPU, could be overwritten by command line argument +_C.DATA.BATCH_SIZE = 128 +# Path to dataset, could be overwritten by command line argument +_C.DATA.DATA_PATH = '' +# Dataset name +_C.DATA.DATASET = 'imagenet' +# Input image size +_C.DATA.IMG_SIZE = 224 +# Interpolation to resize image (random, bilinear, bicubic) +_C.DATA.INTERPOLATION = 'bicubic' +# Use zipped dataset instead of folder dataset +# could be overwritten by command line argument +_C.DATA.ZIP_MODE = False +# Cache Data in Memory, could be overwritten by command line argument +_C.DATA.CACHE_MODE = 'part' +# Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU. +_C.DATA.PIN_MEMORY = True +# Number of data loading threads +_C.DATA.NUM_WORKERS = 8 + +# ----------------------------------------------------------------------------- +# Model settings +# ----------------------------------------------------------------------------- +_C.MODEL = CN() +# Model type +_C.MODEL.TYPE = 'swin' +# Model name +_C.MODEL.NAME = 'swin_tiny_patch4_window7_224' +# Checkpoint to resume, could be overwritten by command line argument +_C.MODEL.PRETRAIN_CKPT = './pretrained_ckpt/swin_tiny_patch4_window7_224.pth' +_C.MODEL.RESUME = '' +# Number of classes, overwritten in data preparation +_C.MODEL.NUM_CLASSES = 1000 +# Dropout rate +_C.MODEL.DROP_RATE = 0.0 +# Drop path rate +_C.MODEL.DROP_PATH_RATE = 0.1 +# Label Smoothing +_C.MODEL.LABEL_SMOOTHING = 0.1 + +# Swin Transformer parameters +_C.MODEL.SWIN = CN() +_C.MODEL.SWIN.PATCH_SIZE = 4 +_C.MODEL.SWIN.IN_CHANS = 3 +_C.MODEL.SWIN.EMBED_DIM = 96 +_C.MODEL.SWIN.DEPTHS = [2, 2, 6, 2] +_C.MODEL.SWIN.DECODER_DEPTHS = [2, 2, 6, 2] +_C.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24] +_C.MODEL.SWIN.WINDOW_SIZE = 7 +_C.MODEL.SWIN.MLP_RATIO = 4. +_C.MODEL.SWIN.QKV_BIAS = True +_C.MODEL.SWIN.QK_SCALE = False +_C.MODEL.SWIN.APE = False +_C.MODEL.SWIN.PATCH_NORM = True +_C.MODEL.SWIN.FINAL_UPSAMPLE= "expand_first" + +# ----------------------------------------------------------------------------- +# Training settings +# ----------------------------------------------------------------------------- +_C.TRAIN = CN() +_C.TRAIN.START_EPOCH = 0 +_C.TRAIN.EPOCHS = 300 +_C.TRAIN.WARMUP_EPOCHS = 20 +_C.TRAIN.WEIGHT_DECAY = 0.05 +_C.TRAIN.BASE_LR = 5e-4 +_C.TRAIN.WARMUP_LR = 5e-7 +_C.TRAIN.MIN_LR = 5e-6 +# Clip gradient norm +_C.TRAIN.CLIP_GRAD = 5.0 +# Auto resume from latest checkpoint +_C.TRAIN.AUTO_RESUME = True +# Gradient accumulation steps +# could be overwritten by command line argument +_C.TRAIN.ACCUMULATION_STEPS = 0 +# Whether to use gradient checkpointing to save memory +# could be overwritten by command line argument +_C.TRAIN.USE_CHECKPOINT = False + +# LR scheduler +_C.TRAIN.LR_SCHEDULER = CN() +_C.TRAIN.LR_SCHEDULER.NAME = 'cosine' +# Epoch interval to decay LR, used in StepLRScheduler +_C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30 +# LR decay rate, used in StepLRScheduler +_C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1 + +# Optimizer +_C.TRAIN.OPTIMIZER = CN() +_C.TRAIN.OPTIMIZER.NAME = 'adamw' +# Optimizer Epsilon +_C.TRAIN.OPTIMIZER.EPS = 1e-8 +# Optimizer Betas +_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999) +# SGD momentum +_C.TRAIN.OPTIMIZER.MOMENTUM = 0.9 + +# ----------------------------------------------------------------------------- +# Augmentation settings +# ----------------------------------------------------------------------------- +_C.AUG = CN() +# Color jitter factor +_C.AUG.COLOR_JITTER = 0.4 +# Use AutoAugment policy. "v0" or "original" +_C.AUG.AUTO_AUGMENT = 'rand-m9-mstd0.5-inc1' +# Random erase prob +_C.AUG.REPROB = 0.25 +# Random erase mode +_C.AUG.REMODE = 'pixel' +# Random erase count +_C.AUG.RECOUNT = 1 +# Mixup alpha, mixup enabled if > 0 +_C.AUG.MIXUP = 0.8 +# Cutmix alpha, cutmix enabled if > 0 +_C.AUG.CUTMIX = 1.0 +# Cutmix min/max ratio, overrides alpha and enables cutmix if set +_C.AUG.CUTMIX_MINMAX = False +# Probability of performing mixup or cutmix when either/both is enabled +_C.AUG.MIXUP_PROB = 1.0 +# Probability of switching to cutmix when both mixup and cutmix enabled +_C.AUG.MIXUP_SWITCH_PROB = 0.5 +# How to apply mixup/cutmix params. Per "batch", "pair", or "elem" +_C.AUG.MIXUP_MODE = 'batch' + +# ----------------------------------------------------------------------------- +# Testing settings +# ----------------------------------------------------------------------------- +_C.TEST = CN() +# Whether to use center crop when testing +_C.TEST.CROP = True + +# ----------------------------------------------------------------------------- +# Misc +# ----------------------------------------------------------------------------- +# Mixed precision opt level, if O0, no amp is used ('O0', 'O1', 'O2') +# overwritten by command line argument +_C.AMP_OPT_LEVEL = '' +# Path to output folder, overwritten by command line argument +_C.OUTPUT = '' +# Tag of experiment, overwritten by command line argument +_C.TAG = 'default' +# Frequency to save checkpoint +_C.SAVE_FREQ = 1 +# Frequency to logging info +_C.PRINT_FREQ = 10 +# Fixed random seed +_C.SEED = 0 +# Perform evaluation only, overwritten by command line argument +_C.EVAL_MODE = False +# Test throughput only, overwritten by command line argument +_C.THROUGHPUT_MODE = False +# local rank for DistributedDataParallel, given by command line argument +_C.LOCAL_RANK = 0 + + +def _update_config_from_file(config, cfg_file): + config.defrost() + with open(cfg_file, 'r') as f: + yaml_cfg = yaml.load(f, Loader=yaml.FullLoader) + + for cfg in yaml_cfg.setdefault('BASE', ['']): + if cfg: + _update_config_from_file( + config, os.path.join(os.path.dirname(cfg_file), cfg) + ) + print('=> merge config from {}'.format(cfg_file)) + config.merge_from_file(cfg_file) + config.freeze() + + +def update_config(config, args): + _update_config_from_file(config, args.cfg) + + config.defrost() + if args.opts: + config.merge_from_list(args.opts) + + # merge from specific arguments + if args.batch_size: + config.DATA.BATCH_SIZE = args.batch_size + if args.zip: + config.DATA.ZIP_MODE = True + if args.cache_mode: + config.DATA.CACHE_MODE = args.cache_mode + if args.resume: + config.MODEL.RESUME = args.resume + if args.accumulation_steps: + config.TRAIN.ACCUMULATION_STEPS = args.accumulation_steps + if args.use_checkpoint: + config.TRAIN.USE_CHECKPOINT = True + if args.amp_opt_level: + config.AMP_OPT_LEVEL = args.amp_opt_level + if args.tag: + config.TAG = args.tag + if args.eval: + config.EVAL_MODE = True + if args.throughput: + config.THROUGHPUT_MODE = True + + config.freeze() + + +def get_config(args): + """Get a yacs CfgNode object with default values.""" + # Return a clone so that the defaults will not be altered + # This is for the "local variable" use pattern + config = _C.clone() + update_config(config, args) + + return config diff --git a/code/networks/decode_head.py b/code/networks/decode_head.py new file mode 100755 index 0000000..f28e2f1 --- /dev/null +++ b/code/networks/decode_head.py @@ -0,0 +1,285 @@ +from abc import ABCMeta, abstractmethod + +import torch +import torch.nn as nn +from mmcv.cnn import normal_init +from mmcv.runner import auto_fp16, force_fp32 + +from mmseg.core import build_pixel_sampler +from mmseg.ops import resize +# from ..builder import build_loss +# from ..losses import accuracy + + +class BaseDecodeHead(nn.Module, metaclass=ABCMeta): + """Base class for BaseDecodeHead. + + Args: + in_channels (int|Sequence[int]): Input channels. + channels (int): Channels after modules, before conv_seg. + num_classes (int): Number of classes. + dropout_ratio (float): Ratio of dropout layer. Default: 0.1. + conv_cfg (dict|None): Config of conv layers. Default: None. + norm_cfg (dict|None): Config of norm layers. Default: None. + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU') + in_index (int|Sequence[int]): Input feature index. Default: -1 + input_transform (str|None): Transformation type of input features. + Options: 'resize_concat', 'multiple_select', None. + 'resize_concat': Multiple feature maps will be resize to the + same size as first one and than concat together. + Usually used in FCN head of HRNet. + 'multiple_select': Multiple feature maps will be bundle into + a list and passed into decode head. + None: Only one select feature map is allowed. + Default: None. + loss_decode (dict): Config of decode loss. + Default: dict(type='CrossEntropyLoss'). + ignore_index (int | None): The label index to be ignored. When using + masked BCE loss, ignore_index should be set to None. Default: 255 + sampler (dict|None): The config of segmentation map sampler. + Default: None. + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + """ + + def __init__(self, + in_channels, + channels, + *, + num_classes, + dropout_ratio=0.1, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + in_index=-1, + input_transform=None, + loss_decode=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + decoder_params=None, + ignore_index=255, + sampler=None, + align_corners=False): + super(BaseDecodeHead, self).__init__() + self._init_inputs(in_channels, in_index, input_transform) + self.channels = channels + self.num_classes = num_classes + self.dropout_ratio = dropout_ratio + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.in_index = in_index + # self.loss_decode = build_loss(loss_decode) + self.ignore_index = ignore_index + self.align_corners = align_corners + + if sampler is not None: + self.sampler = build_pixel_sampler(sampler, context=self) + else: + self.sampler = None + + self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1) + if dropout_ratio > 0: + self.dropout = nn.Dropout2d(dropout_ratio) + else: + self.dropout = None + self.fp16_enabled = False + + def extra_repr(self): + """Extra repr.""" + s = f'input_transform={self.input_transform}, ' \ + f'ignore_index={self.ignore_index}, ' \ + f'align_corners={self.align_corners}' + return s + + def _init_inputs(self, in_channels, in_index, input_transform): + """Check and initialize input transforms. + + The in_channels, in_index and input_transform must match. + Specifically, when input_transform is None, only single feature map + will be selected. So in_channels and in_index must be of type int. + When input_transform + + Args: + in_channels (int|Sequence[int]): Input channels. + in_index (int|Sequence[int]): Input feature index. + input_transform (str|None): Transformation type of input features. + Options: 'resize_concat', 'multiple_select', None. + 'resize_concat': Multiple feature maps will be resize to the + same size as first one and than concat together. + Usually used in FCN head of HRNet. + 'multiple_select': Multiple feature maps will be bundle into + a list and passed into decode head. + None: Only one select feature map is allowed. + """ + + if input_transform is not None: + assert input_transform in ['resize_concat', 'multiple_select'] + self.input_transform = input_transform + self.in_index = in_index + if input_transform is not None: + assert isinstance(in_channels, (list, tuple)) + assert isinstance(in_index, (list, tuple)) + assert len(in_channels) == len(in_index) + if input_transform == 'resize_concat': + self.in_channels = sum(in_channels) + else: + self.in_channels = in_channels + else: + assert isinstance(in_channels, int) + assert isinstance(in_index, int) + self.in_channels = in_channels + + def init_weights(self): + """Initialize weights of classification layer.""" + normal_init(self.conv_seg, mean=0, std=0.01) + + def _transform_inputs(self, inputs): + """Transform inputs for decoder. + + Args: + inputs (list[Tensor]): List of multi-level img features. + + Returns: + Tensor: The transformed inputs + """ + + if self.input_transform == 'resize_concat': + inputs = [inputs[i] for i in self.in_index] + upsampled_inputs = [ + resize( + input=x, + size=inputs[0].shape[2:], + mode='bilinear', + align_corners=self.align_corners) for x in inputs + ] + inputs = torch.cat(upsampled_inputs, dim=1) + elif self.input_transform == 'multiple_select': + inputs = [inputs[i] for i in self.in_index] + else: + inputs = inputs[self.in_index] + + return inputs + + @auto_fp16() + @abstractmethod + def forward(self, inputs): + """Placeholder of forward function.""" + pass + + def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg): + """Forward function for training. + Args: + inputs (list[Tensor]): List of multi-level img features. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + gt_semantic_seg (Tensor): Semantic segmentation masks + used if the architecture supports semantic segmentation task. + train_cfg (dict): The training config. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + seg_logits = self.forward(inputs) + losses = self.losses(seg_logits, gt_semantic_seg) + return losses + + def forward_test(self, inputs, img_metas, test_cfg): + """Forward function for testing. + + Args: + inputs (list[Tensor]): List of multi-level img features. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + test_cfg (dict): The testing config. + + Returns: + Tensor: Output segmentation map. + """ + return self.forward(inputs) + + def cls_seg(self, feat): + """Classify each pixel.""" + if self.dropout is not None: + feat = self.dropout(feat) + output = self.conv_seg(feat) + return output + + @force_fp32(apply_to=('seg_logit', )) + def losses(self, seg_logit, seg_label): + """Compute segmentation loss.""" + loss = dict() + seg_logit = resize( + input=seg_logit, + size=seg_label.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + if self.sampler is not None: + seg_weight = self.sampler.sample(seg_logit, seg_label) + else: + seg_weight = None + seg_label = seg_label.squeeze(1) + loss['loss_seg'] = self.loss_decode( + seg_logit, + seg_label, + weight=seg_weight, + ignore_index=self.ignore_index) + # loss['acc_seg'] = accuracy(seg_logit, seg_label) + return seg_logit + + +# class Classification_head(nn.Module): + +# def __init__(self, embed_dim,num_classes, ndf=64, out_channel=1): +# super(Classification_head, self).__init__() +# # downsample 16 +# self.conv0 = nn.Conv3d(1, ndf, kernel_size=4, stride=2, padding=1) +# self.conv1 = nn.Conv3d(ndf, ndf*2, kernel_size=4, stride=2, padding=1) +# self.conv2 = nn.Conv3d(ndf*2, ndf*4, kernel_size=4, stride=2, padding=1) +# self.conv3 = nn.Conv3d(ndf*4, ndf*8, kernel_size=4, stride=2, padding=1) +# self.avgpool = nn.AvgPool3d((7, 7, 5)) +# # self.avgpool = nn.AvgPool3d((5, 7, 7)) +# # self.avgpool = nn.AvgPool3d((5, 16, 16)) +# self.fc1 = nn.Linear(ndf*8, 512) +# self.fc2 = nn.Linear(512, num_classes) + +# self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) +# self.dropout = nn.Dropout3d(0.5) +# self.Softmax = nn.Softmax() +# self.out = nn.Conv3d(ndf*2,num_classes,kernel_size=1) + +# self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + +# def forward(self, map): +# batch_size = map.shape[0] +# map_feature = self.conv0(map)#(2,112,112,80)->(64,56,56,40) +# x = self.leaky_relu(map_feature) +# x = self.dropout(x) + +# x = self.conv1(x)#(64,56,56,40)->(128,28,28,20) +# x = self.leaky_relu(x) +# x = self.dropout(x) +# #x = self.out(x) + +# x = self.conv2(x)#(128,28,28,20)->(256,14,14,10) +# x = self.leaky_relu(x) +# x = self.dropout(x) + +# x = self.conv3(x)#(256,14,14,10)->(512,7,7,5) +# x = self.leaky_relu(x) + +# x = self.avgpool(x)#(512) + +# x = x.view(batch_size, -1) +# x = self.fc1(x) +# x = self.fc2(x) + +# return x diff --git a/code/networks/head.py b/code/networks/head.py new file mode 100644 index 0000000..36103fd --- /dev/null +++ b/code/networks/head.py @@ -0,0 +1,365 @@ +from networks.decode_head import BaseDecodeHead +from mmseg.ops import resize +from mmseg.models.utils import * +from mmcv.cnn import ConvModule +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial +from torch.distributions.uniform import Uniform +import numpy as np + +class MLP(nn.Module): + """ + Linear Embedding + """ + def __init__(self, input_dim=2048, embed_dim=768): + super().__init__() + self.proj = nn.Linear(input_dim, embed_dim) + + def forward(self, x): + x = x.flatten(2).transpose(1, 2) + x = self.proj(x) + return x + + +class SegFormerHead(BaseDecodeHead): + """ + SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers + """ + def __init__(self, feature_strides, **kwargs): + super(SegFormerHead, self).__init__(input_transform='multiple_select', **kwargs) + assert len(feature_strides) == len(self.in_channels) + assert min(feature_strides) == feature_strides[0] + self.feature_strides = feature_strides + + c1_in_channels, c2_in_channels, c3_in_channels, c4_in_channels = self.in_channels + + decoder_params = kwargs['decoder_params'] + embedding_dim = decoder_params['embed_dim'] + + self.linear_c4 = MLP(input_dim=c4_in_channels, embed_dim=embedding_dim) + self.linear_c3 = MLP(input_dim=c3_in_channels, embed_dim=embedding_dim) + self.linear_c2 = MLP(input_dim=c2_in_channels, embed_dim=embedding_dim) + self.linear_c1 = MLP(input_dim=c1_in_channels, embed_dim=embedding_dim) + + self.linear_fuse = ConvModule( + in_channels=embedding_dim*4, + out_channels=embedding_dim, + kernel_size=1, + norm_cfg=dict(type='BN', requires_grad=True) + # norm_cfg=dict(type='SyncBN', requires_grad=True) + ) + + self.linear_pred = nn.Conv2d(embedding_dim, self.num_classes, kernel_size=1) + self.pre_linear_pred = nn.Conv2d(64, self.num_classes, kernel_size=1) + # add classifiation + self.conv0 = nn.Conv2d(1, embedding_dim, kernel_size=4, stride=2, padding=1) + self.conv1 = nn.Conv2d(embedding_dim, embedding_dim*4, kernel_size=4, stride=2, padding=1) + self.conv2 = nn.Conv2d(embedding_dim*4, embedding_dim*8, kernel_size=4, stride=2, padding=1) + self.conv3 = nn.Conv2d(embedding_dim*8, embedding_dim*16, kernel_size=4, stride=2, padding=1) + self.avgpool = nn.AvgPool2d((16, 16)) + # self.avgpool = nn.AvgPool3d((5, 7, 7)) + # self.64vgpool = nn.AvgPool3d((5, 16, 16)) + self.fc1 = nn.Linear(embedding_dim*4, 512) + self.fc2 = nn.Linear(512, self.num_classes) + + self.fc3 = nn.Linear(embedding_dim, 128) + self.fc4 = nn.Linear(128, self.num_classes) + + self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) + self.dropout = nn.Dropout2d(0.5) + self.Softmax = nn.Softmax() + self.out = nn.Conv2d(embedding_dim*8,self.num_classes,kernel_size=1) + + + + + + def forward(self, inputs): + c1, c2, c3, c4 = inputs # [bz,64,128,128], [bz,128,64,64], [bz,320,32,32], [bz,512,32,32] + + ############## MLP decoder on C1-C4 ########### + n, _, h, w = c4.shape + + _c4 = self.linear_c4(c4).permute(0,2,1).reshape(n, -1, c4.shape[2], c4.shape[3]) + _c4 = F.interpolate(_c4, size=c1.size()[2:],mode='bilinear',align_corners=False) # [bz,256,128,128] + + _c3_ = self.linear_c3(c3).permute(0,2,1).reshape(n, -1, c3.shape[2], c3.shape[3]) + _c3 = F.interpolate(_c3_, size=c1.size()[2:],mode='bilinear',align_corners=False) # [bz,256,128,128] + + _c2_ = self.linear_c2(c2).permute(0,2,1).reshape(n, -1, c2.shape[2], c2.shape[3]) + _c2 = F.interpolate(_c2_, size=c1.size()[2:],mode='bilinear',align_corners=False) # [bz,256,128,128] + + _c1 = self.linear_c1(c1).permute(0,2,1).reshape(n, -1, c1.shape[2], c1.shape[3]) # [bz,256,128,128] + + logit = self.linear_fuse(torch.cat([_c4, _c3, _c2, _c1], dim=1)) # [bz,256,128,128] + + x = self.dropout(logit) + x = self.linear_pred(x) # [bz,21,128,128] + + x_16 = F.interpolate(x, size=[8,8],mode='bilinear',align_corners=False) + x_32 = F.interpolate(x, size=[16,16],mode='bilinear',align_corners=False) + x_64 = F.interpolate(x, size=[32,32],mode='bilinear',align_corners=False) + + return x, x_16, x_32, x_64 + + # _c3_ = self.linear_pred(_c3_) + # _c2_ = self.linear_pred(_c2_) + # return x,x_16,_c3_,_c2_ + + + + + + +class class_Head(BaseDecodeHead): + + def __init__(self, feature_strides, **kwargs): + super(class_Head, self).__init__(input_transform='multiple_select', **kwargs) + assert len(feature_strides) == len(self.in_channels) + assert min(feature_strides) == feature_strides[0] + self.feature_strides = feature_strides + + c1_in_channels, c2_in_channels, c3_in_channels, c4_in_channels = self.in_channels + + decoder_params = kwargs['decoder_params'] + embedding_dim = decoder_params['embed_dim'] + + self.linear_c4 = MLP(input_dim=c4_in_channels, embed_dim=embedding_dim) + self.linear_c3 = MLP(input_dim=c3_in_channels, embed_dim=embedding_dim) + self.linear_c2 = MLP(input_dim=c2_in_channels, embed_dim=embedding_dim) + self.linear_c1 = MLP(input_dim=c1_in_channels, embed_dim=embedding_dim) + + self.linear_fuse = ConvModule( + in_channels=embedding_dim*4, + out_channels=embedding_dim, + kernel_size=1, + norm_cfg=dict(type='BN', requires_grad=True) + # norm_cfg=dict(type='SyncBN', requires_grad=True) + ) + + self.linear_pred = nn.Conv2d(embedding_dim, self.num_classes, kernel_size=1) + self.pre_linear_pred = nn.Conv2d(64, self.num_classes, kernel_size=1) + # add classifiation + self.conv0 = nn.Conv2d(256, embedding_dim, kernel_size=4, stride=2, padding=1) + self.conv1 = nn.Conv2d(embedding_dim, embedding_dim*4, kernel_size=4, stride=2, padding=1) + self.conv2 = nn.Conv2d(embedding_dim*4, embedding_dim*8, kernel_size=4, stride=2, padding=1) + self.conv3 = nn.Conv2d(embedding_dim*8, embedding_dim*16, kernel_size=4, stride=2, padding=1) + self.avgpool = nn.AvgPool2d((4, 4)) + # self.avgpool = nn.AvgPool3d((5, 7, 7)) + # self.64vgpool = nn.AvgPool3d((5, 16, 16)) + self.fc1 = nn.Linear(embedding_dim*4, 512) + self.fc2 = nn.Linear(512, self.num_classes) + + self.fc3 = nn.Linear(embedding_dim, 128) + self.fc4 = nn.Linear(128, self.num_classes) + + self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) + self.dropout = nn.Dropout2d(0.5) + self.Softmax = nn.Softmax() + self.out = nn.Conv2d(embedding_dim*8,self.num_classes,kernel_size=1) + + + + + + def forward(self, inputs): + # x = self._transform_inputs(inputs) # len=4, 1/4,1/8,1/16,1/32 + + + outs_class = [] + batch_size = inputs.shape[0] + + # for i in (range(0,self.num_classes)): + map_feature = self.conv0(inputs) + + class_1234 = self.leaky_relu(map_feature) + class_1234= self.dropout(class_1234) + + class_1234 = self.conv1(class_1234) + class_1234= self.leaky_relu(class_1234) + class_1234 = self.dropout(class_1234) + + class_1234 = self.avgpool(class_1234) + class_1234 = class_1234.view(batch_size,-1) + class_1234 = self.fc1(class_1234) + class_1234 = self.fc2(class_1234) + + outs_class=class_1234 + + + + + return outs_class + + + +class ConvBlock(nn.Module): + """two convolution layers with batch norm and leaky relu""" + + def __init__(self, in_channels, out_channels, dropout_p): + super(ConvBlock, self).__init__() + self.conv_conv = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), + nn.BatchNorm2d(out_channels), + nn.LeakyReLU(), + nn.Dropout(dropout_p), + nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), + nn.BatchNorm2d(out_channels), + nn.LeakyReLU() + ) + + def forward(self, x): + return self.conv_conv(x) + +class UpBlock(nn.Module): + """Upssampling followed by ConvBlock""" + + def __init__(self, in_channels1, in_channels2, out_channels, dropout_p, + bilinear=True): + super(UpBlock, self).__init__() + self.bilinear = bilinear + if bilinear: + self.conv1x1 = nn.Conv2d(in_channels1, in_channels2, kernel_size=1) + self.up = nn.Upsample( + scale_factor=2, mode='bilinear', align_corners=True) + else: + self.up = nn.ConvTranspose2d( + in_channels1, in_channels2, kernel_size=2, stride=2) + self.conv = ConvBlock(in_channels2 * 2, out_channels, dropout_p) + + def forward(self, x1, x2): + if self.bilinear: + x1 = self.conv1x1(x1) + x1 = self.up(x1) + x = torch.cat([x2, x1], dim=1) + return self.conv(x) + + +class UpBlock1(nn.Module): + """Upssampling followed by ConvBlock""" + + def __init__(self, in_channels1, in_channels2, out_channels, dropout_p, + bilinear=True): + super(UpBlock1, self).__init__() + self.bilinear = bilinear + if bilinear: + self.conv1x1 = nn.Conv2d(in_channels1, in_channels2, kernel_size=1) + self.up = nn.Upsample( + scale_factor=2, mode='bilinear', align_corners=True) + else: + self.up = nn.ConvTranspose2d( + in_channels1, in_channels2, kernel_size=2, stride=2) + self.conv = ConvBlock(in_channels2 * 2, out_channels, dropout_p) + + def forward(self, x1, x2): + if self.bilinear: + x1 = self.conv1x1(x1) + # x1 = self.up(x1) + # x2 = self.up(x2) + x = torch.cat([x2, x1], dim=1) + return self.conv(x) + +class Unet_Decoder(nn.Module): + def __init__(self, params): + super(Unet_Decoder, self).__init__() + self.params = params + + self.ft_chns = self.params['feature_chns'] + self.n_class = self.params['class_num'] + self.bilinear = self.params['bilinear'] + assert (len(self.ft_chns) == 4) + + # self.up1 = UpBlock( + # self.ft_chns[4], self.ft_chns[3], self.ft_chns[3], dropout_p=0.0) + self.up2 = UpBlock1( + self.ft_chns[3], self.ft_chns[2], self.ft_chns[2], dropout_p=0.2) + self.up3 = UpBlock( + self.ft_chns[2], self.ft_chns[1], self.ft_chns[1], dropout_p=0.4) + self.up4 = UpBlock( + self.ft_chns[1], self.ft_chns[0], self.ft_chns[0], dropout_p=0.5) + + self.out_conv = nn.Conv2d(self.ft_chns[0], self.n_class,kernel_size=3, padding=1) + + # self.out_conv_dp4 = nn.Conv2d(self.ft_chns[4], self.n_class,kernel_size=3, padding=1) + # self.out_conv_dp3 = nn.Conv2d(self.ft_chns[3], self.n_class,kernel_size=3, padding=1) + self.out_conv_dp2 = nn.Conv2d(self.ft_chns[2], self.n_class,kernel_size=3, padding=1) + self.out_conv_dp3 = nn.Conv2d(self.ft_chns[1], self.n_class,kernel_size=3, padding=1) + self.feature_noise = FeatureNoise() + + + self.out_conv_x23= nn.Conv2d(self.ft_chns[3]*2, self.ft_chns[3],kernel_size=1, padding=1) + + def forward(self, feature,shape): + x0 = feature[0] + x1 = feature[1] + x2 = feature[2] + x = feature[3] + + + + x = self.up2(x, x2) + if self.training: + dp2_out_seg = self.out_conv_dp2(Dropout(x, p=0.5)) + else: + dp2_out_seg = self.out_conv_dp2(x) + # dp2_out_seg = torch.nn.functional.interpolate(dp2_out_seg, shape) + + x = self.up3(x, x1) + if self.training: + dp3_out_seg = self.out_conv_dp3(FeatureDropout(x)) + else: + dp3_out_seg = self.out_conv_dp3(x) + # dp3_out_seg = torch.nn.functional.interpolate(dp3_out_seg, shape) + + x = self.up4(x, x0) + dp0_out_seg = self.out_conv(x) + + + + + + + return dp0_out_seg, dp2_out_seg, dp3_out_seg + + + # x = self.up2(x, x2) + # x = self.up3(x, x1) + # x = self.up4(x, x0) + # output = self.out_conv(x) + # return output + + + + +def Dropout(x, p=0.3): + x = torch.nn.functional.dropout(x, p) + return x + + +def FeatureDropout(x): + attention = torch.mean(x, dim=1, keepdim=True) + max_val, _ = torch.max(attention.view( + x.size(0), -1), dim=1, keepdim=True) + threshold = max_val * np.random.uniform(0.7, 0.9) + threshold = threshold.view(x.size(0), 1, 1, 1).expand_as(attention) + drop_mask = (attention < threshold).float() + x = x.mul(drop_mask) + return x + + +class FeatureNoise(nn.Module): + def __init__(self, uniform_range=0.3): + super(FeatureNoise, self).__init__() + self.uni_dist = Uniform(-uniform_range, uniform_range) + + def feature_based_noise(self, x): + noise_vector = self.uni_dist.sample( + x.shape[1:]).to(x.device).unsqueeze(0) + x_noise = x.mul(noise_vector) + x + return x_noise + + def forward(self, x): + x = self.feature_based_noise(x) + return x \ No newline at end of file diff --git a/code/networks/mix_transformer.py b/code/networks/mix_transformer.py new file mode 100644 index 0000000..065ca93 --- /dev/null +++ b/code/networks/mix_transformer.py @@ -0,0 +1,449 @@ +# --------------------------------------------------------------- +# Copyright (c) 2021, NVIDIA Corporation. All rights reserved. +# +# This work is licensed under the NVIDIA Source Code License +# --------------------------------------------------------------- +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ + +#from mmseg.utils import get_root_logger +#from mmcv.runner import load_checkpoint +import math + +import pdb + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.dwconv = DWConv(hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x, H, W): + x = self.fc1(x) + x = self.dwconv(x, H, W) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1): + super().__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.q = nn.Linear(dim, dim, bias=qkv_bias) + self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) + self.norm = nn.LayerNorm(dim) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x, H, W): + B, N, C = x.shape + q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + if self.sr_ratio > 1: + x_ = x.permute(0, 2, 1).reshape(B, C, H, W) # [bz, 64, 128, 128] + x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1) # [bz, 64, 16, 16] + x_ = self.norm(x_) + kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + else: + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + k, v = kv[0], kv[1] + + attn_ = (q @ k.transpose(-2, -1)) + + '''if self.sr_ratio == 1: + attn_ = attn_ + attn_.permute(0, 1, 3, 2)''' + + attn_ = (attn_ * self.scale).softmax(dim=-1) + attn = self.attn_drop(attn_) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + + # ##### + # #attn_ = attn_.clone().mean(1).reshape(-1, H, W, attn.shape[-1],) + # #attn_ = F.avg_pool2d(attn_.permute(0,3,1,2), kernel_size=self.sr_ratio, stride=self.sr_ratio) + # #attn_ = attn_.reshape(-1, attn.shape[-1], attn.shape[-1]) + # attn_copy = attn_.clone().reshape(B, self.num_heads, H, W, attn.shape[-1],) # [bz, 1, 128, 128, 16*16] + # if self.sr_ratio > 1: + # attn_copy = F.avg_pool3d(attn_copy, kernel_size=(self.sr_ratio, self.sr_ratio, 1), stride=(self.sr_ratio, self.sr_ratio, 1)) + # #attn_copy = attn_copy.reshape(B, self.num_heads, self.sr_ratio, -1, W, attn.shape[-1],).mean(2) + # #attn_copy = attn_copy.reshape(B, self.num_heads, attn_copy.shape[2], self.sr_ratio, -1, attn.shape[-1],).mean(3) + # #print(attn_copy.shape) + # #attn_ = F.avg_pool2d(attn_.permute(0,3,1,2), kernel_size=self.sr_ratio, stride=self.sr_ratio) + # attn_copy = attn_copy.reshape(-1, self.num_heads, attn.shape[-1], attn.shape[-1]) + # ##### + + return x, attn_ + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, + attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x, H, W): + _x, _attn = self.attn(self.norm1(x), H, W) + x = x + self.drop_path(_x) + x = x + self.drop_path(self.mlp(self.norm2(x), H, W)) + + return x, _attn + + +class OverlapPatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=7, stride=4, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + self.img_size = img_size + self.patch_size = patch_size + self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1] + self.num_patches = self.H * self.W + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride, + padding=(patch_size[0] // 2, patch_size[1] // 2)) + self.norm = nn.LayerNorm(embed_dim) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x): + x = self.proj(x) + _, _, H, W = x.shape + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + + return x, H, W + + +class MixVisionTransformer(nn.Module): + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512], + num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0., + attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, + depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], stride=None): + super().__init__() + self.num_classes = num_classes + self.depths = depths + self.embed_dims = embed_dims + self.stride = stride + + # patch_embed + self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=stride[0], in_chans=in_chans, + embed_dim=embed_dims[0]) + self.patch_embed2 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=stride[1], in_chans=embed_dims[0], + embed_dim=embed_dims[1]) + self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=stride[2], in_chans=embed_dims[1], + embed_dim=embed_dims[2]) + self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=3, stride=stride[3], in_chans=embed_dims[2], + embed_dim=embed_dims[3]) + + # transformer encoder + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + cur = 0 + self.block1 = nn.ModuleList([Block( + dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, + sr_ratio=sr_ratios[0]) + for i in range(depths[0])]) + self.norm1 = norm_layer(embed_dims[0]) + + cur += depths[0] + self.block2 = nn.ModuleList([Block( + dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, + sr_ratio=sr_ratios[1]) + for i in range(depths[1])]) + self.norm2 = norm_layer(embed_dims[1]) + + cur += depths[1] + self.block3 = nn.ModuleList([Block( + dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, + sr_ratio=sr_ratios[2]) + for i in range(depths[2])]) + self.norm3 = norm_layer(embed_dims[2]) + + cur += depths[2] + self.block4 = nn.ModuleList([Block( + dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, + sr_ratio=sr_ratios[3]) + for i in range(depths[3])]) + self.norm4 = norm_layer(embed_dims[3]) + + # classification head + # self.head = nn.Linear(embed_dims[3], num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + ''' + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = get_root_logger() + load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger) + ''' + def reset_drop_path(self, drop_path_rate): + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))] + cur = 0 + for i in range(self.depths[0]): + self.block1[i].drop_path.drop_prob = dpr[cur + i] + + cur += self.depths[0] + for i in range(self.depths[1]): + self.block2[i].drop_path.drop_prob = dpr[cur + i] + + cur += self.depths[1] + for i in range(self.depths[2]): + self.block3[i].drop_path.drop_prob = dpr[cur + i] + + cur += self.depths[2] + for i in range(self.depths[3]): + self.block4[i].drop_path.drop_prob = dpr[cur + i] + + def freeze_patch_emb(self): + self.patch_embed1.requires_grad = False + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed1', 'pos_embed2', 'pos_embed3', 'pos_embed4', 'cls_token'} # has pos_embed may be better + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + outs = [] + attns = [] + + # stage 1 + x, H, W = self.patch_embed1(x) # x-[bz,128*128, 64] + attns1 = [] + for i, blk in enumerate(self.block1): + x, attn = blk(x, H, W) + attns1.append(attn) + x = self.norm1(x) + x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + outs.append(x) + attns.append(attns1) + + # stage 2 + x, H, W = self.patch_embed2(x) + attns2 = [] + for i, blk in enumerate(self.block2): + x, attn = blk(x, H, W) + attns2.append(attn) + x = self.norm2(x) + x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + outs.append(x) + attns.append(attns2) + + # stage 3 + x, H, W = self.patch_embed3(x) + attns3 = [] + for i, blk in enumerate(self.block3): + x, attn = blk(x, H, W) + attns3.append(attn) + x = self.norm3(x) + x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + outs.append(x) + attns.append(attns3) + + # stage 4 + x, H, W = self.patch_embed4(x) + attns4 = [] + for i, blk in enumerate(self.block4): + x, attn = blk(x, H, W) + attns4.append(attn) + x = self.norm4(x) + x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + outs.append(x) + attns.append(attns4) + + return outs, attns + + def forward(self, x): # x-[bz,3,512,512] + x, attns = self.forward_features(x) + # x = self.head(x) + + return x, attns + + +class DWConv(nn.Module): + def __init__(self, dim=768): + super(DWConv, self).__init__() + self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) + + def forward(self, x, H, W): + B, N, C = x.shape + x = x.transpose(1, 2).view(B, C, H, W) + x = self.dwconv(x) + x = x.flatten(2).transpose(1, 2) + + return x + +class mit_b0(MixVisionTransformer): + def __init__(self, stride=None, **kwargs): + super(mit_b0, self).__init__( + patch_size=4, embed_dims=[32, 64, 160, 256], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], + drop_rate=0.0, drop_path_rate=0.1, stride=stride) + + +class mit_b1(MixVisionTransformer): + def __init__(self, stride=None, **kwargs): + super(mit_b1, self).__init__( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], + drop_rate=0.0, drop_path_rate=0.1, stride=stride) + + +class mit_b2(MixVisionTransformer): + def __init__(self, stride=None, **kwargs): + super(mit_b2, self).__init__( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], + drop_rate=0.0, drop_path_rate=0.1, stride=stride) + + +class mit_b3(MixVisionTransformer): + def __init__(self, stride=None, **kwargs): + super(mit_b3, self).__init__( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1], + drop_rate=0.0, drop_path_rate=0.1, stride=stride) + + +class mit_b4(MixVisionTransformer): + def __init__(self, stride=None, **kwargs): + super(mit_b4, self).__init__( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], + drop_rate=0.0, drop_path_rate=0.1, stride=stride) + + +class mit_b5(MixVisionTransformer): + def __init__(self, stride=None, **kwargs): + super(mit_b5, self).__init__( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 6, 40, 3], sr_ratios=[8, 4, 2, 1], + drop_rate=0.0, drop_path_rate=0.1, stride=stride) \ No newline at end of file diff --git a/code/networks/net_factory.py b/code/networks/net_factory.py index 38d7daf..8ff6f44 100644 --- a/code/networks/net_factory.py +++ b/code/networks/net_factory.py @@ -1,22 +1,99 @@ -from networks.efficientunet import Effi_UNet +# from networks.efficientunet import Effi_UNet +# from networks.enet import ENet from networks.pnet import PNet2D -from networks.unet import UNet, UNet_DS, UNet_CCT, UNet_CCT_3H +from networks.unet import UNet, UNet_DS, UNet_CCT +from networks.unet_new import UNet_new +import argparse +from networks.vision_transformer import SwinUnet as ViT_seg +from networks.config import get_config +# from networks.nnunet import initialize_network + + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='../data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC/Cross_Supervision_CNN_Trans2D', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet', help='model_name') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=8, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[224, 224], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=1337, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=4, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=7, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.1, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') +args = parser.parse_args() +config = get_config(args) def net_factory(net_type="unet", in_chns=1, class_num=3): if net_type == "unet": - net = UNet(in_chns=in_chns, class_num=class_num).cuda() - elif net_type == "unet_cct": - net = UNet_CCT(in_chns=in_chns, class_num=class_num).cuda() - elif net_type == "unet_cct_3h": - net = UNet_CCT_3H(in_chns=in_chns, class_num=class_num).cuda() + net = UNet(in_chns=in_chns, class_num=class_num) + # elif net_type == "enet": + # net = ENet(in_channels=in_chns, num_classes=class_num).cuda() elif net_type == "unet_ds": - net = UNet_DS(in_chns=in_chns, class_num=class_num).cuda() - elif net_type == "efficient_unet": - net = Effi_UNet('efficientnet-b3', encoder_weights='imagenet', - in_channels=in_chns, classes=class_num).cuda() + net = UNet_DS(in_chns=in_chns, class_num=class_num) + elif net_type == "unet_new": + net = UNet_new(in_chns=in_chns, class_num=class_num) + # elif net_type == "unet_urpc": + # net = UNet_URPC(in_chns=in_chns, class_num=class_num).cuda() + # elif net_type == "efficient_unet": + # net = Effi_UNet('efficientnet-b3', encoder_weights='imagenet', + # in_channels=in_chns, classes=class_num) + elif net_type == "ViT_Seg": + net = ViT_seg(config, img_size=args.patch_size, + num_classes=args.num_classes) elif net_type == "pnet": - net = PNet2D(in_chns, class_num, 64, [1, 2, 4, 8, 16]).cuda() + net = PNet2D(in_chns, class_num, 64, [1, 2, 4, 8, 16]) + # elif net_type == "nnUNet": + # net = initialize_network(num_classes=class_num).cuda() else: net = None return net diff --git a/code/networks/net_factory_w.py b/code/networks/net_factory_w.py new file mode 100644 index 0000000..38d7daf --- /dev/null +++ b/code/networks/net_factory_w.py @@ -0,0 +1,22 @@ +from networks.efficientunet import Effi_UNet +from networks.pnet import PNet2D +from networks.unet import UNet, UNet_DS, UNet_CCT, UNet_CCT_3H + + +def net_factory(net_type="unet", in_chns=1, class_num=3): + if net_type == "unet": + net = UNet(in_chns=in_chns, class_num=class_num).cuda() + elif net_type == "unet_cct": + net = UNet_CCT(in_chns=in_chns, class_num=class_num).cuda() + elif net_type == "unet_cct_3h": + net = UNet_CCT_3H(in_chns=in_chns, class_num=class_num).cuda() + elif net_type == "unet_ds": + net = UNet_DS(in_chns=in_chns, class_num=class_num).cuda() + elif net_type == "efficient_unet": + net = Effi_UNet('efficientnet-b3', encoder_weights='imagenet', + in_channels=in_chns, classes=class_num).cuda() + elif net_type == "pnet": + net = PNet2D(in_chns, class_num, 64, [1, 2, 4, 8, 16]).cuda() + else: + net = None + return net diff --git a/code/networks/swin_transformer_unet_skip_expand_decoder_sys.py b/code/networks/swin_transformer_unet_skip_expand_decoder_sys.py new file mode 100644 index 0000000..a85885e --- /dev/null +++ b/code/networks/swin_transformer_unet_skip_expand_decoder_sys.py @@ -0,0 +1,804 @@ +# This file borrowed from Swin-UNet: https://github.com/HuCaoFighting/Swin-Unet +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint +from einops import rearrange +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, + W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous( + ).view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, + window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - \ + coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute( + 1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - \ + 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", + relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // + self.num_heads).permute(2, 0, 3, 1, 4) + # make torchscript happy (cannot use tensor as tuple) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def extra_repr(self) -> str: + return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}' + + def flops(self, N): + # calculate flops for 1 window with token length of N + flops = 0 + # qkv = self.qkv(x) + flops += N * self.dim * 3 * self.dim + # attn = (q @ k.transpose(-2, -1)) + flops += self.num_heads * N * (self.dim // self.num_heads) * N + # x = (attn @ v) + flops += self.num_heads * N * N * (self.dim // self.num_heads) + # x = self.proj(x) + flops += N * self.dim * self.dim + return flops + + +class SwinTransformerBlock(nn.Module): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, + qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, + act_layer=act_layer, drop=drop) + + if self.shift_size > 0: + # calculate attention mask for SW-MSA + H, W = self.input_resolution + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + # nW, window_size, window_size, 1 + mask_windows = window_partition(img_mask, self.window_size) + mask_windows = mask_windows.view(-1, + self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill( + attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll( + x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + # nW*B, window_size, window_size, C + x_windows = window_partition(shifted_x, self.window_size) + # nW*B, window_size*window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) + + # W-MSA/SW-MSA + # nW*B, window_size*window_size, C + attn_windows = self.attn(x_windows, mask=self.attn_mask) + + # merge windows + attn_windows = attn_windows.view(-1, + self.window_size, self.window_size, C) + shifted_x = window_reverse( + attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=( + self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ + f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" + + def flops(self): + flops = 0 + H, W = self.input_resolution + # norm1 + flops += self.dim * H * W + # W-MSA/SW-MSA + nW = H * W / self.window_size / self.window_size + flops += nW * self.attn.flops(self.window_size * self.window_size) + # mlp + flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio + # norm2 + flops += self.dim * H * W + return flops + + +class PatchMerging(nn.Module): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." + + x = x.view(B, H, W, C) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + def extra_repr(self) -> str: + return f"input_resolution={self.input_resolution}, dim={self.dim}" + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.dim + flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim + return flops + + +class PatchExpand(nn.Module): + def __init__(self, input_resolution, dim, dim_scale=2, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.expand = nn.Linear( + dim, 2*dim, bias=False) if dim_scale == 2 else nn.Identity() + self.norm = norm_layer(dim // dim_scale) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + x = self.expand(x) + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + x = x.view(B, H, W, C) + x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', + p1=2, p2=2, c=C//4) + x = x.view(B, -1, C//4) + x = self.norm(x) + + return x + + +class FinalPatchExpand_X4(nn.Module): + def __init__(self, input_resolution, dim, dim_scale=4, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.dim_scale = dim_scale + self.expand = nn.Linear(dim, 16*dim, bias=False) + self.output_dim = dim + self.norm = norm_layer(self.output_dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + x = self.expand(x) + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + x = x.view(B, H, W, C) + x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', + p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2)) + x = x.view(B, -1, self.output_dim) + x = self.norm(x) + + return x + + +class BasicLayer(nn.Module): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock(dim=dim, input_resolution=input_resolution, + num_heads=num_heads, window_size=window_size, + shift_size=0 if ( + i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance( + drop_path, list) else drop_path, + norm_layer=norm_layer) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample( + input_resolution, dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" + + def flops(self): + flops = 0 + for blk in self.blocks: + flops += blk.flops() + if self.downsample is not None: + flops += self.downsample.flops() + return flops + + +class BasicLayer_up(nn.Module): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, upsample=None, use_checkpoint=False): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock(dim=dim, input_resolution=input_resolution, + num_heads=num_heads, window_size=window_size, + shift_size=0 if ( + i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance( + drop_path, list) else drop_path, + norm_layer=norm_layer) + for i in range(depth)]) + + # patch merging layer + if upsample is not None: + self.upsample = PatchExpand( + input_resolution, dim=dim, dim_scale=2, norm_layer=norm_layer) + else: + self.upsample = None + + def forward(self, x): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.upsample is not None: + x = self.upsample(x) + return x + + +class PatchEmbed(nn.Module): + r""" Image to Patch Embedding + + Args: + img_size (int): Image size. Default: 224. + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [img_size[0] // + patch_size[0], img_size[1] // patch_size[1]] + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + self.num_patches = patches_resolution[0] * patches_resolution[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv2d(in_chans, embed_dim, + kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C + if self.norm is not None: + x = self.norm(x) + return x + + def flops(self): + Ho, Wo = self.patches_resolution + flops = Ho * Wo * self.embed_dim * self.in_chans * \ + (self.patch_size[0] * self.patch_size[1]) + if self.norm is not None: + flops += Ho * Wo * self.embed_dim + return flops + + +class SwinTransformerSys(nn.Module): + r""" Swin Transformer + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + + Args: + img_size (int | tuple(int)): Input image size. Default 224 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, + embed_dim=96, depths=[2, 2, 2, 2], depths_decoder=[1, 2, 2, 2], num_heads=[3, 6, 12, 24], + window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, + norm_layer=nn.LayerNorm, ape=False, patch_norm=True, + use_checkpoint=False, final_upsample="expand_first", **kwargs): + super().__init__() + + print("SwinTransformerSys expand initial----depths:{};depths_decoder:{};drop_path_rate:{};num_classes:{}".format(depths, + depths_decoder, drop_path_rate, num_classes)) + + self.num_classes = num_classes + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) + self.num_features_up = int(embed_dim * 2) + self.mlp_ratio = mlp_ratio + self.final_upsample = final_upsample + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = nn.Parameter( + torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.absolute_pos_embed, std=.02) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, + sum(depths))] # stochastic depth decay rule + + # build encoder and bottleneck layers + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer), + input_resolution=(patches_resolution[0] // (2 ** i_layer), + patches_resolution[1] // (2 ** i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum( + depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if ( + i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint) + self.layers.append(layer) + + # build decoder layers + self.layers_up = nn.ModuleList() + self.concat_back_dim = nn.ModuleList() + for i_layer in range(self.num_layers): + concat_linear = nn.Linear(2*int(embed_dim*2**(self.num_layers-1-i_layer)), + int(embed_dim*2**(self.num_layers-1-i_layer))) if i_layer > 0 else nn.Identity() + if i_layer == 0: + layer_up = PatchExpand(input_resolution=(patches_resolution[0] // (2 ** (self.num_layers-1-i_layer)), + patches_resolution[1] // (2 ** (self.num_layers-1-i_layer))), dim=int(embed_dim * 2 ** (self.num_layers-1-i_layer)), dim_scale=2, norm_layer=norm_layer) + else: + layer_up = BasicLayer_up(dim=int(embed_dim * 2 ** (self.num_layers-1-i_layer)), + input_resolution=(patches_resolution[0] // (2 ** (self.num_layers-1-i_layer)), + patches_resolution[1] // (2 ** (self.num_layers-1-i_layer))), + depth=depths[( + self.num_layers-1-i_layer)], + num_heads=num_heads[( + self.num_layers-1-i_layer)], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:( + self.num_layers-1-i_layer)]):sum(depths[:(self.num_layers-1-i_layer) + 1])], + norm_layer=norm_layer, + upsample=PatchExpand if ( + i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint) + self.layers_up.append(layer_up) + self.concat_back_dim.append(concat_linear) + + self.norm = norm_layer(self.num_features) + self.norm_up = norm_layer(self.embed_dim) + + if self.final_upsample == "expand_first": + print("---final upsample expand_first---") + self.up = FinalPatchExpand_X4(input_resolution=( + img_size//patch_size, img_size//patch_size), dim_scale=4, dim=embed_dim) + self.output = nn.Conv2d( + in_channels=embed_dim, out_channels=self.num_classes, kernel_size=1, bias=False) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'absolute_pos_embed'} + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {'relative_position_bias_table'} + + #Encoder and Bottleneck + def forward_features(self, x): + x = self.patch_embed(x) + if self.ape: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + x_downsample = [] + + for layer in self.layers: + x_downsample.append(x) + x = layer(x) + + x = self.norm(x) # B L C + + return x, x_downsample + + # Dencoder and Skip connection + def forward_up_features(self, x, x_downsample): + for inx, layer_up in enumerate(self.layers_up): + if inx == 0: + x = layer_up(x) + else: + x = torch.cat([x, x_downsample[3-inx]], -1) + x = self.concat_back_dim[inx](x) + x = layer_up(x) + + x = self.norm_up(x) # B L C + + return x + + def up_x4(self, x): + H, W = self.patches_resolution + B, L, C = x.shape + assert L == H*W, "input features has wrong size" + + if self.final_upsample == "expand_first": + x = self.up(x) + x = x.view(B, 4*H, 4*W, -1) + x = x.permute(0, 3, 1, 2) # B,C,H,W + x = self.output(x) + + return x + + def forward(self, x): + x, x_downsample = self.forward_features(x) + x = self.forward_up_features(x, x_downsample) + x = self.up_x4(x) + + return x + + def flops(self): + flops = 0 + flops += self.patch_embed.flops() + for i, layer in enumerate(self.layers): + flops += layer.flops() + flops += self.num_features * \ + self.patches_resolution[0] * \ + self.patches_resolution[1] // (2 ** self.num_layers) + flops += self.num_features * self.num_classes + return flops diff --git a/code/networks/unet_new.py b/code/networks/unet_new.py new file mode 100644 index 0000000..41ad6ce --- /dev/null +++ b/code/networks/unet_new.py @@ -0,0 +1,448 @@ +# -*- coding: utf-8 -*- +""" +The implementation is borrowed from: https://github.com/HiLab-git/PyMIC + +add attation and calssifiation +""" +from __future__ import division, print_function + +import numpy as np +import torch +import torch.nn as nn +from torch.distributions.uniform import Uniform +from networks.head import SegFormerHead,class_Head +from networks.attention import Self_Attention as Attention + + +class ConvBlock(nn.Module): + """two convolution layers with batch norm and leaky relu""" + + def __init__(self, in_channels, out_channels, dropout_p): + super(ConvBlock, self).__init__() + self.conv_conv = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), + nn.BatchNorm2d(out_channels), + nn.LeakyReLU(), + nn.Dropout(dropout_p), + nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), + nn.BatchNorm2d(out_channels), + nn.LeakyReLU() + ) + + def forward(self, x): + return self.conv_conv(x) + + +class DownBlock(nn.Module): + """Downsampling followed by ConvBlock""" + + def __init__(self, in_channels, out_channels, dropout_p): + super(DownBlock, self).__init__() + self.maxpool_conv = nn.Sequential( + nn.MaxPool2d(2), + ConvBlock(in_channels, out_channels, dropout_p) + + ) + + def forward(self, x): + return self.maxpool_conv(x) + + +class UpBlock(nn.Module): + """Upssampling followed by ConvBlock""" + + def __init__(self, in_channels1, in_channels2, out_channels, dropout_p, + bilinear=True): + super(UpBlock, self).__init__() + self.bilinear = bilinear + if bilinear: + self.conv1x1 = nn.Conv2d(in_channels1, in_channels2, kernel_size=1) + self.up = nn.Upsample( + scale_factor=2, mode='bilinear', align_corners=True) + else: + self.up = nn.ConvTranspose2d( + in_channels1, in_channels2, kernel_size=2, stride=2) + self.conv = ConvBlock(in_channels2 * 2, out_channels, dropout_p) + + def forward(self, x1, x2): + if self.bilinear: + x1 = self.conv1x1(x1) + x1 = self.up(x1) + x = torch.cat([x2, x1], dim=1) + return self.conv(x) + + +class Encoder(nn.Module): + def __init__(self, params): + super(Encoder, self).__init__() + self.params = params + self.in_chns = self.params['in_chns'] + self.ft_chns = self.params['feature_chns'] + self.n_class = self.params['class_num'] + self.bilinear = self.params['bilinear'] + self.dropout = self.params['dropout'] + assert (len(self.ft_chns) == 5) + self.in_conv = ConvBlock( + self.in_chns, self.ft_chns[0], self.dropout[0]) + self.down1 = DownBlock( + self.ft_chns[0], self.ft_chns[1], self.dropout[1]) + self.down2 = DownBlock( + self.ft_chns[1], self.ft_chns[2], self.dropout[2]) + self.down3 = DownBlock( + self.ft_chns[2], self.ft_chns[3], self.dropout[3]) + self.down4 = DownBlock( + self.ft_chns[3], self.ft_chns[4], self.dropout[4]) + + def forward(self, x): + x0 = self.in_conv(x) + x1 = self.down1(x0) + x2 = self.down2(x1) + x3 = self.down3(x2) + x4 = self.down4(x3) + return [x0, x1, x2, x3, x4] + +class Encoder_self_attention(nn.Module): + def __init__(self, params): + super(Encoder_self_attention, self).__init__() + self.params = params + self.in_chns = self.params['in_chns'] + self.ft_chns = self.params['feature_chns'] + self.n_class = self.params['class_num'] + self.bilinear = self.params['bilinear'] + self.dropout = self.params['dropout'] + assert (len(self.ft_chns) == 5) + self.in_conv = ConvBlock( + self.in_chns, self.ft_chns[0], self.dropout[0]) + self.down1 = DownBlock( + self.ft_chns[0], self.ft_chns[1], self.dropout[1]) + self.down2 = DownBlock( + self.ft_chns[1], self.ft_chns[2], self.dropout[2]) + self.down3 = DownBlock( + self.ft_chns[2], self.ft_chns[3], self.dropout[3]) + self.down4 = DownBlock( + self.ft_chns[3], self.ft_chns[4], self.dropout[4]) + self.attn = Attention(dim=256, + num_heads=8, qkv_bias=True, qk_scale=None, + attn_drop=0, proj_drop=0.1, sr_ratio=1) + self.norm1 = nn.LayerNorm(256) + + def forward(self, x): + x0 = self.in_conv(x) + x1 = self.down1(x0) + x2 = self.down2(x1) + x3 = self.down3(x2) + x4 = self.down4(x3) + att=x4.view(x4.shape[0],-1,x4.shape[1]) + att=self.attn(att,256,256) + return [x0, x1, x2, x3, x4],att + + + +class Decoder(nn.Module): + def __init__(self, params): + super(Decoder, self).__init__() + self.params = params + self.in_chns = self.params['in_chns'] + self.ft_chns = self.params['feature_chns'] + self.n_class = self.params['class_num'] + self.bilinear = self.params['bilinear'] + assert (len(self.ft_chns) == 5) + + self.up1 = UpBlock( + self.ft_chns[4], self.ft_chns[3], self.ft_chns[3], dropout_p=0.0) + self.up2 = UpBlock( + self.ft_chns[3], self.ft_chns[2], self.ft_chns[2], dropout_p=0.0) + self.up3 = UpBlock( + self.ft_chns[2], self.ft_chns[1], self.ft_chns[1], dropout_p=0.0) + self.up4 = UpBlock( + self.ft_chns[1], self.ft_chns[0], self.ft_chns[0], dropout_p=0.0) + + self.out_conv = nn.Conv2d(self.ft_chns[0], self.n_class, + kernel_size=3, padding=1) + + def forward(self, feature): + x0 = feature[0] + x1 = feature[1] + x2 = feature[2] + x3 = feature[3] + x4 = feature[4] + + x = self.up1(x4, x3) + x = self.up2(x, x2) + x = self.up3(x, x1) + x = self.up4(x, x0) + output = self.out_conv(x) + return output + + +class Decoder_DS(nn.Module): + def __init__(self, params): + super(Decoder_DS, self).__init__() + self.params = params + self.in_chns = self.params['in_chns'] + self.ft_chns = self.params['feature_chns'] + self.n_class = self.params['class_num'] + self.bilinear = self.params['bilinear'] + assert (len(self.ft_chns) == 5) + + self.up1 = UpBlock( + self.ft_chns[4], self.ft_chns[3], self.ft_chns[3], dropout_p=0.0) + self.up2 = UpBlock( + self.ft_chns[3], self.ft_chns[2], self.ft_chns[2], dropout_p=0.0) + self.up3 = UpBlock( + self.ft_chns[2], self.ft_chns[1], self.ft_chns[1], dropout_p=0.0) + self.up4 = UpBlock( + self.ft_chns[1], self.ft_chns[0], self.ft_chns[0], dropout_p=0.0) + + self.out_conv = nn.Conv2d(self.ft_chns[0], self.n_class, + kernel_size=3, padding=1) + self.out_conv_dp4 = nn.Conv2d(self.ft_chns[4], self.n_class, + kernel_size=3, padding=1) + self.out_conv_dp3 = nn.Conv2d(self.ft_chns[3], self.n_class, + kernel_size=3, padding=1) + self.out_conv_dp2 = nn.Conv2d(self.ft_chns[2], self.n_class, + kernel_size=3, padding=1) + self.out_conv_dp1 = nn.Conv2d(self.ft_chns[1], self.n_class, + kernel_size=3, padding=1) + + def forward(self, feature, shape): + x0 = feature[0] + x1 = feature[1] + x2 = feature[2] + x3 = feature[3] + x4 = feature[4] + x = self.up1(x4, x3) + dp3_out_seg = self.out_conv_dp3(x) + dp3_out_seg = torch.nn.functional.interpolate(dp3_out_seg, shape) + + x = self.up2(x, x2) + dp2_out_seg = self.out_conv_dp2(x) + dp2_out_seg = torch.nn.functional.interpolate(dp2_out_seg, shape) + + x = self.up3(x, x1) + dp1_out_seg = self.out_conv_dp1(x) + dp1_out_seg = torch.nn.functional.interpolate(dp1_out_seg, shape) + + x = self.up4(x, x0) + dp0_out_seg = self.out_conv(x) + return dp0_out_seg, dp1_out_seg, dp2_out_seg, dp3_out_seg + + +class Decoder_URDS(nn.Module): + def __init__(self, params): + super(Decoder_URDS, self).__init__() + self.params = params + self.in_chns = self.params['in_chns'] + self.ft_chns = self.params['feature_chns'] + self.n_class = self.params['class_num'] + self.bilinear = self.params['bilinear'] + assert (len(self.ft_chns) == 5) + + self.up1 = UpBlock( + self.ft_chns[4], self.ft_chns[3], self.ft_chns[3], dropout_p=0.0) + self.up2 = UpBlock( + self.ft_chns[3], self.ft_chns[2], self.ft_chns[2], dropout_p=0.0) + self.up3 = UpBlock( + self.ft_chns[2], self.ft_chns[1], self.ft_chns[1], dropout_p=0.0) + self.up4 = UpBlock( + self.ft_chns[1], self.ft_chns[0], self.ft_chns[0], dropout_p=0.0) + + self.out_conv = nn.Conv2d(self.ft_chns[0], self.n_class, + kernel_size=3, padding=1) + self.out_conv_dp4 = nn.Conv2d(self.ft_chns[4], self.n_class, + kernel_size=3, padding=1) + self.out_conv_dp3 = nn.Conv2d(self.ft_chns[3], self.n_class, + kernel_size=3, padding=1) + self.out_conv_dp2 = nn.Conv2d(self.ft_chns[2], self.n_class, + kernel_size=3, padding=1) + self.out_conv_dp1 = nn.Conv2d(self.ft_chns[1], self.n_class, + kernel_size=3, padding=1) + self.feature_noise = FeatureNoise() + + def forward(self, feature, shape): + x0 = feature[0] + x1 = feature[1] + x2 = feature[2] + x3 = feature[3] + x4 = feature[4] + x = self.up1(x4, x3) + if self.training: + dp3_out_seg = self.out_conv_dp3(Dropout(x, p=0.5)) + else: + dp3_out_seg = self.out_conv_dp3(x) + dp3_out_seg = torch.nn.functional.interpolate(dp3_out_seg, shape) + + x = self.up2(x, x2) + if self.training: + dp2_out_seg = self.out_conv_dp2(FeatureDropout(x)) + else: + dp2_out_seg = self.out_conv_dp2(x) + dp2_out_seg = torch.nn.functional.interpolate(dp2_out_seg, shape) + + x = self.up3(x, x1) + if self.training: + dp1_out_seg = self.out_conv_dp1(self.feature_noise(x)) + else: + dp1_out_seg = self.out_conv_dp1(x) + dp1_out_seg = torch.nn.functional.interpolate(dp1_out_seg, shape) + + x = self.up4(x, x0) + dp0_out_seg = self.out_conv(x) + return dp0_out_seg, dp1_out_seg, dp2_out_seg, dp3_out_seg + + +def Dropout(x, p=0.5): + x = torch.nn.functional.dropout2d(x, p) + return x + + +def FeatureDropout(x): + attention = torch.mean(x, dim=1, keepdim=True) + max_val, _ = torch.max(attention.view( + x.size(0), -1), dim=1, keepdim=True) + threshold = max_val * np.random.uniform(0.7, 0.9) + threshold = threshold.view(x.size(0), 1, 1, 1).expand_as(attention) + drop_mask = (attention < threshold).float() + x = x.mul(drop_mask) + return x + + +class FeatureNoise(nn.Module): + def __init__(self, uniform_range=0.3): + super(FeatureNoise, self).__init__() + self.uni_dist = Uniform(-uniform_range, uniform_range) + + def feature_based_noise(self, x): + noise_vector = self.uni_dist.sample( + x.shape[1:]).to(x.device).unsqueeze(0) + x_noise = x.mul(noise_vector) + x + return x_noise + + def forward(self, x): + x = self.feature_based_noise(x) + return x + + +class UNet_new(nn.Module): + def __init__(self, in_chns, class_num): + super(UNet_new, self).__init__() + + params = {'in_chns': in_chns, + 'feature_chns': [16, 32, 64, 128, 256], + 'dropout': [0.05, 0.1, 0.2, 0.3, 0.5], + 'class_num': class_num, + 'bilinear': False, + 'acti_func': 'relu'} + + self.encoder = Encoder_self_attention(params) + self.decoder = Decoder(params) + + self.seg_head=class_Head( + # type='SegFormerHead', + in_channels=[32, 64, 128, 256], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=0.1, + num_classes=4, + norm_cfg=dict(type='BN', requires_grad=True), + align_corners=False, + decoder_params=dict(embed_dim=256), + loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + + self.attn_proj = nn.Conv2d(in_channels=8, out_channels=4, kernel_size=1, bias=True) + nn.init.kaiming_normal_(self.attn_proj.weight, a=np.sqrt(5), mode="fan_out") + + + + + + + def forward(self, x): + feature,_attns = self.encoder(x) + + # attn_cat = torch.cat(_attns[-2:], dim=1)#.detach() + + attn_cat=_attns + attn_cat = attn_cat + attn_cat.permute(0, 1, 3, 2) + attn_pred = self.attn_proj(attn_cat) + attn_pred = torch.sigmoid(attn_pred)#[:,0,...] + # if feature[4].shape[-1]== 8: + # logits_=0 + # else: + # logits_ =self.seg_head(feature[4]) + # calss=logits_ + + + output = self.decoder(feature) + return output,attn_pred + + + + +class UNet_DS(nn.Module): + def __init__(self, in_chns, class_num): + super(UNet_DS, self).__init__() + + params = {'in_chns': in_chns, + 'feature_chns': [16, 32, 64, 128, 256], + 'dropout': [0.05, 0.1, 0.2, 0.3, 0.5], + 'class_num': class_num, + 'bilinear': False, + 'acti_func': 'relu'} + self.encoder = Encoder(params) + self.decoder = Decoder_DS(params) + + def forward(self, x): + shape = x.shape[2:] + feature = self.encoder(x) + dp0_out_seg, dp1_out_seg, dp2_out_seg, dp3_out_seg = self.decoder( + feature, shape) + return dp0_out_seg, dp1_out_seg, dp2_out_seg, dp3_out_seg + + +class UNet_CCT(nn.Module): + def __init__(self, in_chns, class_num): + super(UNet_CCT, self).__init__() + + params = {'in_chns': in_chns, + 'feature_chns': [16, 32, 64, 128, 256], + 'dropout': [0.05, 0.1, 0.2, 0.3, 0.5], + 'class_num': class_num, + 'bilinear': False, + 'acti_func': 'relu'} + self.encoder = Encoder(params) + self.main_decoder = Decoder(params) + self.aux_decoder1 = Decoder(params) + + def forward(self, x): + feature = self.encoder(x) + main_seg = self.main_decoder(feature) + aux1_feature = [Dropout(i) for i in feature] + aux_seg1 = self.aux_decoder1(aux1_feature) + return main_seg, aux_seg1 + + +class UNet_CCT_3H(nn.Module): + def __init__(self, in_chns, class_num): + super(UNet_CCT_3H, self).__init__() + + params = {'in_chns': in_chns, + 'feature_chns': [16, 32, 64, 128, 256], + 'dropout': [0.05, 0.1, 0.2, 0.3, 0.5], + 'class_num': class_num, + 'bilinear': False, + 'acti_func': 'relu'} + self.encoder = Encoder(params) + self.main_decoder = Decoder(params) + self.aux_decoder1 = Decoder(params) + self.aux_decoder2 = Decoder(params) + + def forward(self, x): + feature = self.encoder(x) + main_seg = self.main_decoder(feature) + aux1_feature = [Dropout(i) for i in feature] + aux_seg1 = self.aux_decoder1(aux1_feature) + aux2_feature = [FeatureNoise()(i) for i in feature] + aux_seg2 = self.aux_decoder1(aux2_feature) + return main_seg, aux_seg1, aux_seg2 diff --git a/code/networks/vision_transformer.py b/code/networks/vision_transformer.py new file mode 100644 index 0000000..e731171 --- /dev/null +++ b/code/networks/vision_transformer.py @@ -0,0 +1,192 @@ +# coding=utf-8 +# This file borrowed from Swin-UNet: https://github.com/HuCaoFighting/Swin-Unet +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import logging +import math + +from os.path import join as pjoin + +import torch +import torch.nn as nn +import numpy as np +import torch.nn.functional as F +from torch.nn import CrossEntropyLoss, Dropout, Softmax, Linear, Conv2d, LayerNorm +from torch.nn.modules.utils import _pair +from scipy import ndimage +from networks.swin_transformer_unet_skip_expand_decoder_sys import SwinTransformerSys +from mmseg.ops import resize + +from networks.mix_transformer import MixVisionTransformer +from functools import partial +from networks.head import SegFormerHead,Unet_Decoder,class_Head +from functools import partial +import pickle +from utils.util import FeatureDropout + +# from networks.decode_head import Classification_head +# model settings +norm_cfg = dict(type='BN', requires_grad=True) +# dict(type='SyncBN', requires_grad=True) + +logger = logging.getLogger(__name__) + + + + +logger = logging.getLogger(__name__) + +class SwinUnet(nn.Module): + def __init__(self, config, img_size=224, num_classes=21843, zero_head=False, vis=False): + super(SwinUnet, self).__init__() + self.num_classes = num_classes + self.zero_head = zero_head + self.config = config + + # self.swin_unet = SwinTransformerSys(img_size=config.DATA.IMG_SIZE, + # patch_size=config.MODEL.SWIN.PATCH_SIZE, + # in_chans=config.MODEL.SWIN.IN_CHANS, + # num_classes=self.num_classes, + # embed_dim=config.MODEL.SWIN.EMBED_DIM, + # depths=config.MODEL.SWIN.DEPTHS, + # num_heads=config.MODEL.SWIN.NUM_HEADS, + # window_size=config.MODEL.SWIN.WINDOW_SIZE, + # mlp_ratio=config.MODEL.SWIN.MLP_RATIO, + # qkv_bias=config.MODEL.SWIN.QKV_BIAS, + # qk_scale=config.MODEL.SWIN.QK_SCALE, + # drop_rate=config.MODEL.DROP_RATE, + # drop_path_rate=config.MODEL.DROP_PATH_RATE, + # ape=config.MODEL.SWIN.APE, + # patch_norm=config.MODEL.SWIN.PATCH_NORM, + # use_checkpoint=config.TRAIN.USE_CHECKPOINT) + + self.mix_transformer = MixVisionTransformer(img_size=config.DATA.IMG_SIZE, + patch_size=4, + in_chans=config.MODEL.SWIN.IN_CHANS, + num_classes=self.num_classes, + embed_dims=[32, 64, 160, 256], + depths=[2, 2, 2, 2], + num_heads=[1, 2, 5, 8], + mlp_ratios=[4, 4, 4, 4], + qkv_bias=True, + qk_scale=config.MODEL.SWIN.QK_SCALE, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + drop_rate=0.0, + drop_path_rate=0.1, + stride=[4,2,2,1], + ) +# class mit_b0(MixVisionTransformer): +# def __init__(self, stride=None, **kwargs): +# super(mit_b0, self).__init__( +# patch_size=4, embed_dims=[32, 64, 160, 256], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], +# qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], +# drop_rate=0.0, drop_path_rate=0.1, stride=stride) + + self.seg_head=SegFormerHead( + # type='SegFormerHead', + in_channels=[32, 64, 160, 256], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=0.1, + num_classes=4, + norm_cfg=norm_cfg, + align_corners=False, + decoder_params=dict(embed_dim=256), + loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + + params = { + 'feature_chns': [32, 64, 160, 256], + 'dropout': [0.05, 0.1, 0.2, 0.3], + 'class_num': 4, + 'bilinear': False, + 'acti_func': 'relu'} + self.unet_decoder = Unet_Decoder(params) + + + + self.calss_head=class_Head( + # type='SegFormerHead', + in_channels=[32, 64, 160, 256], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=0.1, + num_classes=4, + norm_cfg=norm_cfg, + align_corners=False, + decoder_params=dict(embed_dim=256), + loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + + + + + self.attn_proj = nn.Conv2d(in_channels=16, out_channels=1, kernel_size=1, bias=True) + nn.init.kaiming_normal_(self.attn_proj.weight, a=np.sqrt(5), mode="fan_out") + + self.classifier = nn.Conv2d(in_channels=256, out_channels=self.num_classes-1, kernel_size=1, bias=False) + + + + def forward(self, x,aux=False): + if x.size()[1] == 1: + x = x.repeat(1,3,1,1) + # pickle.load = partial(pickle.load, encoding="latin1") + # pickle.Unpickler = partial(pickle.Unpickler, encoding="latin1") + + + + state_dict = torch.load('/mnt/sdd/tb/pretrained/'+'mit_b0'+'.pth') + state_dict.pop('head.weight') + state_dict.pop('head.bias') + self.mix_transformer.load_state_dict(state_dict,) + + logits = self.mix_transformer(x) + + _attns =logits[1] + + # attn_cat = torch.cat(_attns[3], dim=1)#.detach() + # attn_cat = attn_cat + attn_cat.permute(0, 1, 3, 2) + # _attns = self.attn_proj(attn_cat) + # _attns = torch.sigmoid(_attns)#[:,0,...] + + attn_cat3 = torch.cat([_attns[3][0],_attns[3][1]] ,dim=1)#.detach() + attn_cat3 = attn_cat3 + attn_cat3.permute(0, 1, 3, 2) + attn_pred3 = self.attn_proj(attn_cat3) + attn_pred3 = torch.sigmoid(attn_pred3)[:,0,...] + + + + + + + mlp_f=self.seg_head(logits[0]) + # calss=self.calss_head(logits[0][3]) + if aux: + return mlp_f ,attn_pred3,_attns + # aux3_feature = [FeatureDropout(i) for i in logits[0]] + else: + shape = x.shape[2:] + dp0_out_seg,dp2_out_seg,dp3_out_seg=self.unet_decoder(logits[0],shape) + # mlp_seg_aux3=self.seg_head(aux3_feature) + + + + # mlp_seg = F.interpolate(input=mlp_seg,size=x.shape[2:], mode='bilinear',align_corners=False) + + + # logits_unethead=self.unet_decoder(logits[0],shape) + logits_unethead = F.interpolate(input=dp0_out_seg,size=x.shape[2:], mode='bilinear',align_corners=False) + + + + return logits_unethead,mlp_f,attn_pred3,_attns + + + + + + \ No newline at end of file diff --git a/code/pretrained_ckpt/readme.txt b/code/pretrained_ckpt/readme.txt new file mode 100644 index 0000000..a1691a6 --- /dev/null +++ b/code/pretrained_ckpt/readme.txt @@ -0,0 +1 @@ +download pre-trained model to this folder, link:https://drive.google.com/drive/folders/1UC3XOoezeum0uck4KBVGa8osahs6rKUY diff --git a/code/test_2D_fully_sps.py b/code/test_2D_fully_sps.py index 22fbb8d..fc2a0f8 100644 --- a/code/test_2D_fully_sps.py +++ b/code/test_2D_fully_sps.py @@ -2,7 +2,7 @@ import os import re import shutil - +import logging import h5py import nibabel as nib import numpy as np @@ -12,6 +12,7 @@ from scipy.ndimage import zoom from scipy.ndimage.interpolation import zoom from tqdm import tqdm +import sys # from networks.efficientunet import UNet from networks.net_factory import net_factory @@ -77,7 +78,10 @@ def calculate_metric_percase(pred, gt, spacing): dice = metric.binary.dc(pred, gt) asd = metric.binary.asd(pred, gt, voxelspacing=spacing) hd95 = metric.binary.hd95(pred, gt, voxelspacing=spacing) - return dice, hd95, asd + assd = metric.binary.assd(pred, gt, voxelspacing=spacing) + sensitivity=metric.binary.sensitivity(pred, gt) + + return dice, hd95, asd,assd,sensitivity def test_single_volume(case, net, test_save_path, FLAGS): @@ -114,15 +118,15 @@ def test_single_volume(case, net, test_save_path, FLAGS): third_metric = calculate_metric_percase( prediction == 3, label == 3, (spacing[2], spacing[0], spacing[1])) - img_itk = sitk.GetImageFromArray(image.astype(np.float32)) - img_itk.CopyInformation(org_img_itk) - prd_itk = sitk.GetImageFromArray(prediction.astype(np.float32)) - prd_itk.CopyInformation(org_img_itk) - lab_itk = sitk.GetImageFromArray(label.astype(np.float32)) - lab_itk.CopyInformation(org_img_itk) - sitk.WriteImage(prd_itk, test_save_path + case + "_pred.nii.gz") - sitk.WriteImage(img_itk, test_save_path + case + "_img.nii.gz") - sitk.WriteImage(lab_itk, test_save_path + case + "_gt.nii.gz") + # img_itk = sitk.GetImageFromArray(image.astype(np.float32)) + # img_itk.CopyInformation(org_img_itk) + # prd_itk = sitk.GetImageFromArray(prediction.astype(np.float32)) + # prd_itk.CopyInformation(org_img_itk) + # lab_itk = sitk.GetImageFromArray(label.astype(np.float32)) + # lab_itk.CopyInformation(org_img_itk) + # sitk.WriteImage(prd_itk, test_save_path + case + "_pred.nii.gz") + # sitk.WriteImage(img_itk, test_save_path + case + "_img.nii.gz") + # sitk.WriteImage(lab_itk, test_save_path + case + "_gt.nii.gz") return first_metric, second_metric, third_metric @@ -139,23 +143,26 @@ def Inference(FLAGS): FLAGS.exp, FLAGS.fold, FLAGS.sup_type) test_save_path = "../model/{}_{}/{}/{}_predictions/".format( FLAGS.exp, FLAGS.fold, FLAGS.sup_type, FLAGS.model) + logging.basicConfig(filename=test_save_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + if os.path.exists(test_save_path): shutil.rmtree(test_save_path) os.makedirs(test_save_path) net = net_factory(net_type=FLAGS.model, in_chns=1, class_num=FLAGS.num_classes) - save_mode_path = os.path.join( - snapshot_path, 'iter_60000.pth') + save_mode_path = os.path.join(snapshot_path, 'iter_60000.pth') net.load_state_dict(torch.load(save_mode_path)) - print("init weight from {}".format(save_mode_path)) + logging.info("init weight from {}".format(save_mode_path)) net.eval() first_total = 0.0 second_total = 0.0 third_total = 0.0 for case in tqdm(image_list): - print(case) + logging.info(case) first_metric, second_metric, third_metric = test_single_volume( case, net, test_save_path, FLAGS) first_total += np.asarray(first_metric) @@ -163,18 +170,20 @@ def Inference(FLAGS): third_total += np.asarray(third_metric) avg_metric = [first_total / len(image_list), second_total / len(image_list), third_total / len(image_list)] - print(avg_metric) - print((avg_metric[0] + avg_metric[1] + avg_metric[2]) / 3) + logging.info(avg_metric) + logging.info((avg_metric[0] + avg_metric[1] + avg_metric[2]) / 3) return ((avg_metric[0] + avg_metric[1] + avg_metric[2]) / 3)[0] if __name__ == '__main__': FLAGS = parser.parse_args() + + total = 0.0 for i in [1, 2, 3, 4, 5]: # for i in [5]: FLAGS.fold = "fold{}".format(i) - print("Inference fold{}".format(i)) + logging.info("Inference fold{}".format(i)) mean_dice = Inference(FLAGS) total += mean_dice - print(total/5.0) + logging.info(total/5.0) diff --git a/code/train_Trans_teacher.py b/code/train_Trans_teacher.py new file mode 100644 index 0000000..e3be276 --- /dev/null +++ b/code/train_Trans_teacher.py @@ -0,0 +1,434 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator, + TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +# """选择GPU ID""" +# gpu_list = [4] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/yd2tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=24, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=2022, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=12, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/yd2tb/SSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + # if args.local_rank != -1: + # torch.cuda.set_device(args.local_rank) + # device=torch.device("cuda", args.local_rank) + # torch.distributed.init_process_group(backend="nccl", init_method='env://') + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + if ema: + for param in model.parameters(): + param.detach_() + return model + + model = create_model() + ema_model = create_model(ema=True) + + model=model.to(device) + ema_model=ema_model.to(device) + # model = nn.MMDistributedDataParallel( + # model.cuda(), + # device_ids=[torch.cuda.current_device()], + # broadcast_buffers=False, + # find_unused_parameters=find_unused_parameters) + + + num_gpus = torch.cuda.device_count() + + # if num_gpus > 1: + # # logger.info('use {} gpus!'.format(num_gpus)) + # model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + # output_device=args.local_rank,broadcast_buffers=False) + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + #步骤四:定义数据集 + # train_datasets = ...#自己定义的Dataset子类 + # train_sampler_labeled = DistributedSampler(db_train_labeled) + # train_sampler_unlabeled = DistributedSampler(db_train_unlabeled) + + # trainloader_labeled = DataLoader(db_train_labeled, sampler=train_sampler_labeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + # trainloader_unlabeled = DataLoader(db_train_unlabeled, sampler=train_sampler_unlabeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + + optimizer = optim.SGD(model.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + + iter_num = 0 + max_epoch = max_iterations // len(trainloader_labeled) + 1 + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + # print("Labeled slices: ", sampled_batch_labeled["idx"]) + # print("Unlabeled slices: ", sampled_batch_unlabeled["idx"]) + with torch.autograd.set_detect_anomaly(True): + noise = torch.clamp(torch.randn_like( + unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + outputs,logits,logits_ = model(volume_batch) + outputs_soft = torch.softmax(outputs, dim=1) + + # outputs_unlabeled,_,_ = model(volume_batch[args.labeled_bs:,...]) + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + + with torch.no_grad(): + ema_output,ema_logits,ema_logits_ = ema_model(ema_inputs) + ema_output_soft = torch.softmax(ema_output, dim=1) + # out_gatedcrf = gatecrf_loss( + # outputs_soft, + # loss_gatedcrf_kernels_desc, + # loss_gatedcrf_radius, + # volume_batch, + # 256, + # 256, + # )["loss"] + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + loss_dice =ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long())#dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce + consistency_weight = get_current_consistency_weight(iter_num // 300) + # if iter_num < 1000: + # consistency_loss = 0.0 + # else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft) ** 2) + + + create_center_1_bg = logits[0].unsqueeze(1)# 4,1,x,y,z->4,2 + create_center_1_a = logits[1].unsqueeze(1) + create_center_1_b = logits[2].unsqueeze(1) + create_center_1_c = logits[3].unsqueeze(1) + + + + create_center_2_bg = logits_[0].unsqueeze(1) + create_center_2_a = logits_[1].unsqueeze(1) + create_center_2_b = logits_[2].unsqueeze(1) + create_center_2_c = logits_[3].unsqueeze(1) + + create_center_soft_1_bg = F.softmax(create_center_1_bg, dim=1)# dims(4,2) + create_center_soft_1_a = F.softmax(create_center_1_a, dim=1) + create_center_soft_1_b = F.softmax(create_center_1_b, dim=1) + create_center_soft_1_c = F.softmax(create_center_1_c, dim=1) + + + create_center_soft_2_bg = F.softmax(create_center_2_bg, dim=1)# dims(4,2) + create_center_soft_2_a = F.softmax(create_center_2_a, dim=1) + create_center_soft_2_b = F.softmax(create_center_2_b, dim=1) + create_center_soft_2_c = F.softmax(create_center_2_c, dim=1) + + + lb_center_12_bg = torch.cat((create_center_soft_1_bg[:args.labeled_bs,...], create_center_soft_2_bg[:args.labeled_bs,...]),dim=0)# 4,2 + lb_center_12_a = torch.cat((create_center_soft_1_a[:args.labeled_bs,...], create_center_soft_2_a[:args.labeled_bs,...]),dim=0) + lb_center_12_b = torch.cat((create_center_soft_1_b[:args.labeled_bs,...], create_center_soft_2_b[:args.labeled_bs,...]),dim=0) + lb_center_12_c = torch.cat((create_center_soft_1_c[:args.labeled_bs,...], create_center_soft_2_c[:args.labeled_bs,...]),dim=0) + + + un_center_12_bg = torch.cat((create_center_soft_1_bg[args.labeled_bs:,...], create_center_soft_2_bg[args.labeled_bs:,...]),dim=0) + un_center_12_a = torch.cat((create_center_soft_1_a[args.labeled_bs:,...], create_center_soft_2_a[args.labeled_bs:,...]),dim=0) + un_center_12_b = torch.cat((create_center_soft_1_b[args.labeled_bs:,...], create_center_soft_2_b[args.labeled_bs:,...]),dim=0) + un_center_12_c = torch.cat((create_center_soft_1_c[args.labeled_bs:,...], create_center_soft_2_c[args.labeled_bs:,...]),dim=0) + + # cosine similarity + loss_contrast = losses.scc_loss(cos_sim, args.tau, lb_center_12_bg, + lb_center_12_a,un_center_12_bg, un_center_12_a, + lb_center_12_b,lb_center_12_c,un_center_12_b,un_center_12_c) + + if args.consistency!=0: + consistency_weight = get_current_consistency_weight(iter_num//150) + loss = supervised_loss + consistency_weight*loss_contrast+ consistency_weight *consistency_loss#+out_gatedcrf*0.1 + else: + loss = supervised_loss + args.my_lambda * loss_contrast+ args.my_lambda * consistency_loss#+out_gatedcrf*0.1 + + + # loss = supervised_loss + optimizer.zero_grad() + + # loss.backward(retain_graph=True) + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer.param_groups: + param_group['lr'] = lr_ + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_dice, iter_num) + writer.add_scalar('info/consistency_loss', + consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight', + consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_dice.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name3) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + # shutil.copy('dataloaders/' + dataset_name2, code_path + '/' + dataset_name2) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/yd2tb/work_dirs/model/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + # backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_10.py b/code/train_Trans_teacher_10.py new file mode 100644 index 0000000..39df602 --- /dev/null +++ b/code/train_Trans_teacher_10.py @@ -0,0 +1,459 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=40, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=20, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=15) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + # model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + affinityenergyLoss=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + + iter_num = 0 + max_epoch = max_iterations // len(trainloader_labeled) + 1 + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_labeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + # ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + outputs,attpred = model(volume_batch) + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(outputs[:args.labeled_bs,...], dim=1) + + bs, bxs, c, h, w = crop_images.shape + crop_images = crop_images.reshape(bs * bxs, c, h, w) + + feat_local,logits_local = model(crop_images) + seg_soft_crop = torch.softmax(feat_local, dim=1) + + boxes = boxes.to(device).type_as(outputs) + + # visualize + feat_local_label = feat_local.clone().detach() # 4, 20, 224, 224 + + # # normalize + # ba = logits_local.shape[0] + # feat_local_label[feat_local_label < 0] = 0 + # ll_max = torch.max(torch.max(feat_local_label, dim=3)[0], dim=2)[0] + # feat_local_label = feat_local_label / (ll_max.unsqueeze(2).unsqueeze(3) + 1e-8) + # for i in range(bs): + # ind = torch.nonzero(label_batch[i] == 0) + # feat_local_label[i * bxs:(i + 1) * bxs, ind] = 0 + + # keep max value among all classes + n, c, h, w = feat_local_label.shape + feat_local_label_c = feat_local_label.permute(1, 0, 2, 3).reshape(c, -1) + ind_f = torch.argsort(-feat_local_label_c, axis=0) + pos = torch.eye(c)[ind_f[0]].transpose(0, 1).type_as(feat_local_label_c) + feat_local_label_c = pos * feat_local_label_c + feat_local_label = feat_local_label_c.reshape(c, n, h, w).permute(1, 0, 2, 3) + + + # match the sal label hyper-parameter + feat_local_label = (feat_local_label > 0.35).type_as(feat_local_label) + + + # roi align + feat_aligned = [] + crop_label = [] + att_crop = [] + _, _, h_att, w_att = logits_local.shape + for i in range(n): + feat_aligned_=ops.roi_align(outputs[:args.labeled_bs,...], boxes[i], (h, w), 1 / 8.0) + feat_aligned.append(feat_aligned_.clone()[None]) + + label_aligned_=ops.roi_align(label_batch.unsqueeze(1).type(torch.float32), boxes[i], (h, w), 1 / 8.0) + crop_label.append(label_aligned_.clone()[None]) + + attpred_=ops.roi_align(attpred[:args.labeled_bs,...], boxes[i], (h_att , w_att ), 1 / 8.0) + att_crop.append(attpred_.clone()[None]) + + + feat_aligned = torch.cat(feat_aligned, dim=0) + crop_label = torch.cat(crop_label, dim=0) + att_crop = torch.cat(att_crop, dim=0) + feat_aligned=feat_aligned.squeeze() + crop_label =crop_label.squeeze() + att_crop =att_crop.squeeze() + + loss_kd = criterion(feat_aligned, feat_local_label[:args.labeled_bs,...]) * args.kd_weights + loss_ce_corp = ce_loss(feat_local,crop_label[:].long()) + + + + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr = ce_loss(outputs[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr= dice_loss(outputs_seg_soft[:args.labeled_bs,...], label_batch_wr.unsqueeze(1)) + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce+ loss_ce_corp #+loss_dice_wr+loss_ce_wr + + + # pseudo_outputs1 = torch.argmax(outputs_seg_soft[args.labeled_bs:].detach(), dim=1, keepdim=False) + + # pseudo_outputs2 = torch.argmax(seg_soft_crop[args.labeled_bs:].detach(), dim=1, keepdim=False) + + # pseudo_supervision1 = dice_loss(outputs_seg_soft[args.labeled_bs:], pseudo_outputs2.unsqueeze(1)) + # pseudo_supervision2 = dice_loss(seg_soft_crop[args.labeled_bs:], pseudo_outputs1.unsqueeze(1)) + + + # with torch.cuda.amp.autocast(): + # # -1: unlabeled pixels (其中60%-70%是没有标注信息的) + # unlabeled_RoIs = (label_batch == 0) + # label_batch[label_batch < 0] = 0 + # affinity_loss = affinityenergyLoss(outputs, attpred, unlabeled_RoIs, label_batch) + + # loss = supervised_loss + affinity_loss + + with torch.no_grad(): + ema_output,ema_attpred = ema_model(ema_inputs) + ema_output_soft = torch.softmax(ema_output, dim=1) + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 300) + if iter_num < 1000: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft[args.labeled_bs:,...]) ** 2) + + #aff_loss + + aff_loss = losses.get_aff_loss(att_crop,logits_local) + + loss = supervised_loss+aff_loss+loss_kd+consistency_weight*consistency_loss #+affinity_loss + optimizer.zero_grad() + # optimizer2.zero_grad() + + loss.backward() + + optimizer.step() + # optimizer2.step() + # optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + + + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer.param_groups: + param_group['lr'] = lr_ + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + # writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + # writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + # shutil.copy('dataloaders/' + dataset_name2, code_path + '/' + dataset_name2) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_tiaoshi/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + # backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_11.py b/code/train_Trans_teacher_11.py new file mode 100644 index 0000000..c1b2b78 --- /dev/null +++ b/code/train_Trans_teacher_11.py @@ -0,0 +1,393 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=40, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=20, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=15) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:4' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + affinityenergyLoss=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + + iter_num = 0 + max_epoch = max_iterations // len(trainloader_labeled) + 1 + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_labeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + outputs,attpred,att,out_feats= model(volume_batch) + # outputs_unlabeled,_,_,_= model(unlabeled_volume_batch) + + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(outputs[:args.labeled_bs,...], dim=1) + + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr = ce_loss(outputs[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr= dice_loss(outputs_seg_soft, label_batch_wr.unsqueeze(1)) + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce + 0.5 * (loss_ce_wr + loss_dice_wr) + # loss=loss_ce + with torch.no_grad(): + ema_output,ema_attpred,_,_ = ema_model(ema_inputs) + + ema_outputs_unlabeled_soft = torch.softmax(ema_output[args.labeled_bs:,...], dim=1) + ema_outputs_seg_soft = torch.softmax(ema_output[:args.labeled_bs,...], dim=1) + + # #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 300) + if iter_num < 1000: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_outputs_unlabeled_soft) ** 2) + # with torch.cuda.amp.autocast(): + # # -2: padded pixels; -1: unlabeled pixels (其中60%-70%是没有标注信息的) + unlabeled_RoIs = (label_batch == 4) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + label_batch[label_batch < 0] = 0 + #aff_loss + outs = [] + outs.append(out_feats[0][:args.labeled_bs,...]) + outs.append(out_feats[1][:args.labeled_bs,...]) + outs.append(out_feats[2][:args.labeled_bs,...]) + outs.append(out_feats[3][:args.labeled_bs,...]) + + + affinityenergyloss = affinityenergyLoss(outs, att, unlabeled_RoIs,label_batch) + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],label_batch_wr) + + loss = 5*supervised_loss+affinityenergyloss*args.kd_weights+affinity_loss+consistency_weight*consistency_loss#+affinity_loss#+affinityenergyLoss*args.kd_weights + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer.param_groups: + param_group['lr'] = lr_ + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_12.py b/code/train_Trans_teacher_12.py new file mode 100644 index 0000000..c06af8e --- /dev/null +++ b/code/train_Trans_teacher_12.py @@ -0,0 +1,431 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math + + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=40, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=20, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=15) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + affinityenergyLoss=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_labeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + outputs,attpred,att,out_feats= model(volume_batch) + # outputs_unlabeled,_,_,_= model(unlabeled_volume_batch) + + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(outputs[:args.labeled_bs,...], dim=1) + + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr = ce_loss(outputs[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr= dice_loss(outputs_seg_soft, label_batch_wr.unsqueeze(1)) + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce + 0.5 * (loss_ce_wr + loss_dice_wr) + # loss=loss_ce + with torch.no_grad(): + ema_output,ema_attpred,_,_ = ema_model(ema_inputs) + + ema_outputs_unlabeled_soft = torch.softmax(ema_output[args.labeled_bs:,...], dim=1) + ema_outputs_seg_soft = torch.softmax(ema_output[:args.labeled_bs,...], dim=1) + + # #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 300) + if iter_num < 1000: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_outputs_unlabeled_soft) ** 2) + # with torch.cuda.amp.autocast(): + # # -2: padded pixels; -1: unlabeled pixels (其中60%-70%是没有标注信息的) + unlabeled_RoIs = (label_batch == 4) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + label_batch[label_batch < 0] = 0 + #aff_loss + outs = [] + outs.append(out_feats[0][:args.labeled_bs,...]) + outs.append(out_feats[1][:args.labeled_bs,...]) + outs.append(out_feats[2][:args.labeled_bs,...]) + outs.append(out_feats[3][:args.labeled_bs,...]) + + + affinityenergyloss = affinityenergyLoss(outs, att, unlabeled_RoIs,label_batch) + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],label_batch_wr) + + loss = 5*supervised_loss+affinityenergyloss*args.kd_weights+affinity_loss+consistency_weight*consistency_loss#+affinity_loss#+affinityenergyLoss*args.kd_weights + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_13.py b/code/train_Trans_teacher_13.py new file mode 100644 index 0000000..2ad9366 --- /dev/null +++ b/code/train_Trans_teacher_13.py @@ -0,0 +1,478 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math + + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=40, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=20, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=15) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:7' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + affinityenergyLoss=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_labeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + outputs,attpred,att,out_feats,class_seg= model(volume_batch) + # outputs_unlabeled,_,_,_= model(unlabeled_volume_batch) + + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(outputs[:args.labeled_bs,...], dim=1) + + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr = ce_loss(outputs[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr= dice_loss(outputs_seg_soft, label_batch_wr.unsqueeze(1)) + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce + 0.5 * (loss_ce_wr + loss_dice_wr) + # loss=loss_ce + with torch.no_grad(): + ema_output,ema_attpred,_,_,calss_ema = ema_model(ema_inputs) + + ema_outputs_unlabeled_soft = torch.softmax(ema_output[args.labeled_bs:,...], dim=1) + ema_outputs_seg_soft = torch.softmax(ema_output[:args.labeled_bs,...], dim=1) + + # #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 300) + if iter_num < 1000: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_outputs_unlabeled_soft) ** 2) + # with torch.cuda.amp.autocast(): + # # -2: padded pixels; -1: unlabeled pixels (其中60%-70%是没有标注信息的) + unlabeled_RoIs = (label_batch == 4) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + label_batch[label_batch < 0] = 0 + #aff_loss + outs = [] + outs.append(out_feats[0][:args.labeled_bs,...]) + outs.append(out_feats[1][:args.labeled_bs,...]) + outs.append(out_feats[2][:args.labeled_bs,...]) + outs.append(out_feats[3][:args.labeled_bs,...]) + + + affinityenergyloss = affinityenergyLoss(outs, att, unlabeled_RoIs,label_batch) + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],label_batch_wr) + + + # cosine similarity loss + create_center_1_bg = class_seg[:,0,...].unsqueeze(1)# 4,1,x,y,z->4,2 + create_center_1_a = class_seg[:,1,...].unsqueeze(1) + create_center_1_b = class_seg[:,2,...].unsqueeze(1) + create_center_1_c = class_seg[:,3,...].unsqueeze(1) + + + + create_center_2_bg = calss_ema[:,0,...].unsqueeze(1) + create_center_2_a = calss_ema[:,1,...].unsqueeze(1) + create_center_2_b = calss_ema[:,2,...].unsqueeze(1) + create_center_2_c = calss_ema[:,3,...].unsqueeze(1) + + create_center_soft_1_bg = F.softmax(create_center_1_bg, dim=1)# dims(4,2) + create_center_soft_1_a = F.softmax(create_center_1_a, dim=1) + create_center_soft_1_b = F.softmax(create_center_1_b, dim=1) + create_center_soft_1_c = F.softmax(create_center_1_c, dim=1) + + + create_center_soft_2_bg = F.softmax(create_center_2_bg, dim=1)# dims(4,2) + create_center_soft_2_a = F.softmax(create_center_2_a, dim=1) + create_center_soft_2_b = F.softmax(create_center_2_b, dim=1) + create_center_soft_2_c = F.softmax(create_center_2_c, dim=1) + + + lb_center_12_bg = torch.cat((create_center_soft_1_bg[:args.labeled_bs,...], create_center_soft_2_bg[:args.labeled_bs,...]),dim=0)# 4,2 + lb_center_12_a = torch.cat((create_center_soft_1_a[:args.labeled_bs,...], create_center_soft_2_a[:args.labeled_bs,...]),dim=0) + lb_center_12_b = torch.cat((create_center_soft_1_b[:args.labeled_bs,...], create_center_soft_2_b[:args.labeled_bs,...]),dim=0) + lb_center_12_c = torch.cat((create_center_soft_1_c[:args.labeled_bs,...], create_center_soft_2_c[:args.labeled_bs,...]),dim=0) + + + un_center_12_bg = torch.cat((create_center_soft_1_bg[args.labeled_bs:,...], create_center_soft_2_bg[args.labeled_bs:,...]),dim=0) + un_center_12_a = torch.cat((create_center_soft_1_a[args.labeled_bs:,...], create_center_soft_2_a[args.labeled_bs:,...]),dim=0) + un_center_12_b = torch.cat((create_center_soft_1_b[args.labeled_bs:,...], create_center_soft_2_b[args.labeled_bs:,...]),dim=0) + un_center_12_c = torch.cat((create_center_soft_1_c[args.labeled_bs:,...], create_center_soft_2_c[args.labeled_bs:,...]),dim=0) + + + + + loss_contrast = losses.scc_loss(cos_sim, args.tau, lb_center_12_bg, + lb_center_12_a,un_center_12_bg, un_center_12_a, + lb_center_12_b,lb_center_12_c,un_center_12_b,un_center_12_c) + + + + loss = 5*supervised_loss+affinityenergyloss*args.kd_weights+affinity_loss+consistency_weight*consistency_loss+loss_contrast + #+affinity_loss#+affinityenergyLoss*args.kd_weights + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_14.py b/code/train_Trans_teacher_14.py new file mode 100644 index 0000000..f7972f8 --- /dev/null +++ b/code/train_Trans_teacher_14.py @@ -0,0 +1,442 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math + + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=40, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=20, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=15) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:6' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + affinityenergyLoss=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_labeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + outputs,attpred,att,out_feats =model(volume_batch) + # outputs_unlabeled,_,_,_= model(unlabeled_volume_batch) + + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(outputs[:args.labeled_bs,...], dim=1) + + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr = ce_loss(outputs[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr= dice_loss(outputs_seg_soft, label_batch_wr.unsqueeze(1)) + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce + 0.5 * (loss_ce_wr + loss_dice_wr) + # loss=loss_ce + # with torch.no_grad(): + # ema_output,ema_attpred,_,_,calss_ema = ema_model(ema_inputs) + + # ema_outputs_unlabeled_soft = torch.softmax(ema_output[args.labeled_bs:,...], dim=1) + # ema_outputs_seg_soft = torch.softmax(ema_output[:args.labeled_bs,...], dim=1) + # + # + out_seg_mlp=F.interpolate(out_feats[0], size=volume_batch.shape[2:], mode='bilinear', align_corners=False) + # #consistency loss + consistency_loss = torch.mean((outputs_unlabeled_soft - out_seg_mlp[args.labeled_bs:,...]) ** 2) + consistency_weight = get_current_consistency_weight(iter_num // 300) + + + # if iter_num < 1000: + # consistency_loss = 0.0 + # else: + # consistency_loss = torch.mean((outputs_unlabeled_soft - ema_outputs_unlabeled_soft) ** 2) + # with torch.cuda.amp.autocast(): + # # -2: padded pixels; -1: unlabeled pixels (其中60%-70%是没有标注信息的) + unlabeled_RoIs = (label_batch == 4) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + label_batch[label_batch < 0] = 0 + #aff_loss + outs = [] + outs.append(out_feats[0][:args.labeled_bs,...]) + outs.append(out_feats[1][:args.labeled_bs,...]) + outs.append(out_feats[2][:args.labeled_bs,...]) + outs.append(out_feats[3][:args.labeled_bs,...]) + + + affinityenergyloss = affinityenergyLoss(outs, att, unlabeled_RoIs,label_batch) + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],label_batch_wr) + + + # cosine similarity loss + + + + loss = 5*supervised_loss+affinityenergyloss*args.kd_weights+affinity_loss+consistency_weight*consistency_loss + #+affinity_loss#+affinityenergyLoss*args.kd_weights + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_15.py b/code/train_Trans_teacher_15.py new file mode 100644 index 0000000..4d27467 --- /dev/null +++ b/code/train_Trans_teacher_15.py @@ -0,0 +1,443 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math + + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=40, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.005, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=20, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=15) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:5' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + affinityenergyLoss=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_labeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + outputs,attpred,att,out_feats =model(volume_batch) + # outputs_unlabeled,_,_,_= model(unlabeled_volume_batch) + + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(outputs[:args.labeled_bs,...], dim=1) + + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + + + + # loss=loss_ce + # with torch.no_grad(): + # ema_output,ema_attpred,_,_,calss_ema = ema_model(ema_inputs) + + # ema_outputs_unlabeled_soft = torch.softmax(ema_output[args.labeled_bs:,...], dim=1) + # ema_outputs_seg_soft = torch.softmax(ema_output[:args.labeled_bs,...], dim=1) + # + # + out_seg_mlp=F.interpolate(out_feats[0], size=volume_batch.shape[2:], mode='bilinear', align_corners=False) + # #consistency loss + consistency_loss = torch.mean((outputs_unlabeled_soft - out_seg_mlp[args.labeled_bs:,...]) ** 2) + consistency_weight = get_current_consistency_weight(iter_num // 300) + + + # if iter_num < 1000: + # consistency_loss = 0.0 + # else: + # consistency_loss = torch.mean((outputs_unlabeled_soft - ema_outputs_unlabeled_soft) ** 2) + # with torch.cuda.amp.autocast(): + # # -2: padded pixels; -1: unlabeled pixels (其中60%-70%是没有标注信息的) + unlabeled_RoIs = (label_batch == 4) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + label_batch[label_batch < 0] = 0 + #aff_loss + outs = [] + outs.append(out_feats[0][:args.labeled_bs,...]) + outs.append(out_feats[1][:args.labeled_bs,...]) + outs.append(out_feats[2][:args.labeled_bs,...]) + outs.append(out_feats[3][:args.labeled_bs,...]) + + + affinityenergyloss,pseudo_label = affinityenergyLoss(outs, att, unlabeled_RoIs,label_batch) + pseudo_label = torch.argmax(pseudo_label.detach(), dim=1, keepdim=False) + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],pseudo_label) + + loss_ce_wr = ce_loss(outputs[:args.labeled_bs,...], pseudo_label[:].long()) + loss_dice_wr= dice_loss(outputs_seg_soft, pseudo_label.unsqueeze(1)) + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce + 0.5 * (loss_ce_wr + loss_dice_wr) + # cosine similarity loss + + + + loss = 8*supervised_loss+affinityenergyloss*args.kd_weights+affinity_loss+consistency_weight*consistency_loss + #+affinity_loss#+affinityenergyLoss*args.kd_weights + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_16.py b/code/train_Trans_teacher_16.py new file mode 100644 index 0000000..ee4619a --- /dev/null +++ b/code/train_Trans_teacher_16.py @@ -0,0 +1,476 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps,util +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math +from utils.util import cams_to_refine_label + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=32, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=16, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=15) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:7' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + aff_2_pseudo_label=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_labeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + outputs,out_feats,attpred,att =model(volume_batch) + + outputs_unlabeled_soft = torch.softmax(outputs[0][args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(outputs[0][:args.labeled_bs,...], dim=1) + + outputs_unlabeled_soft_mlp = torch.softmax(out_feats[0][args.labeled_bs:,...], dim=1) + outputs_seg_soft_mlp = torch.softmax(out_feats[0][:args.labeled_bs,...], dim=1) + + #TODO: pCE loss + loss_ce = ce_loss(outputs[0][:args.labeled_bs,...], label_batch[:].long()) + + + #TODO: Equivariant Regularization Loss + # scale_factor=0.3 + # img2 = F.interpolate(volume_batch[:args.labeled_bs,...], scale_factor=scale_factor, mode='bilinear', align_corners=True) + # att2=model(img2,aux=True) + # # outputs_seg_soft_ER = torch.softmax(outputs_ER, dim=1) + # att1 = F.interpolate(att[3][1], scale_factor=scale_factor, mode='bilinear', align_corners=True) + # att2 = F.interpolate(att2, size=img2.shape[2:], mode='bilinear', align_corners=True) + # loss_er = torch.mean(torch.abs(att1[:args.labeled_bs,...] - att2)) + + + with torch.no_grad(): + ema_output,_,_,_ = ema_model(ema_inputs) + ema_output_soft = torch.softmax(ema_output[0][args.labeled_bs:,...], dim=1) + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 300) + if iter_num < 500: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft) ** 2) + + #consistency loss + # outputs_unlabeled_soft1 = torch.softmax(outputs[1][args.labeled_bs:,...], dim=1) + # outputs_unlabeled_soft2 = torch.softmax(outputs[2][args.labeled_bs:,...], dim=1) + + # outputs_unlabeled_soft_mlp1 = torch.softmax(out_feats[2][args.labeled_bs:,...], dim=1) + # outputs_unlabeled_soft_mlp2 = torch.softmax(out_feats[3][args.labeled_bs:,...], dim=1) + + # consistency_loss = torch.mean((outputs_unlabeled_soft - outputs_unlabeled_soft_mlp) ** 2) + # consistency_loss1 = torch.mean((outputs_unlabeled_soft1 - outputs_unlabeled_soft_mlp1) ** 2) + # consistency_loss2 = torch.mean((outputs_unlabeled_soft2 - outputs_unlabeled_soft_mlp2) ** 2) + # consistency_loss=consistency_loss#+consistency_loss1+consistency_loss2 + + # consistency_weight = get_current_consistency_weight(iter_num // 300) + + + # if iter_num < 1000: + # consistency_loss = 0.0 + # else: + # consistency_loss = torch.mean((outputs_unlabeled_soft - ema_outputs_unlabeled_soft) ** 2) + # with torch.cuda.amp.autocast(): + # # -2: padded pixels; -1: unlabeled pixels (其中60%-70%是没有标注信息的) + unlabeled_RoIs = (label_batch == 4) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + label_batch[label_batch < 0] = 0 + #aff_loss + outs = [] + outs.append(out_feats[0][:args.labeled_bs,...]) + outs.append(out_feats[1][:args.labeled_bs,...]) + outs.append(out_feats[2][:args.labeled_bs,...]) + outs.append(out_feats[3][:args.labeled_bs,...]) + + + local_affinity_loss,pseudo_label = aff_2_pseudo_label(outs, att, unlabeled_RoIs,label_batch) + + pseudo_label = torch.argmax(pseudo_label.detach(), dim=1, keepdim=False) + ref_label = cams_to_refine_label(pseudo_label, ignore_index=4) + + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ref_label) + + loss_ce_wr = ce_loss(outputs[0][:args.labeled_bs,...], pseudo_label[:].long()) + + loss_dice_wr= dice_loss(outputs_seg_soft, pseudo_label.unsqueeze(1)) + + + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + + supervised_loss=loss_ce + 0.5 * (loss_ce_wr + loss_dice_wr) + # cosine similarity loss + + + + loss = 5*supervised_loss+affinity_loss+local_affinity_loss*args.kd_weights+consistency_weight*consistency_loss# +loss_er + #+affinity_loss#+affinityenergyLoss*args.kd_weights + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax(outputs[0], dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction',outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + loss_name = 'losses.py' + util_name = 'util.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('utils/' + loss_name, code_path + '/' + loss_name) + shutil.copy('utils/' + util_name, code_path + '/' + util_name) + + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_17.py b/code/train_Trans_teacher_17.py new file mode 100644 index 0000000..a44e72a --- /dev/null +++ b/code/train_Trans_teacher_17.py @@ -0,0 +1,452 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps,util +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math +from utils.util import cams_to_refine_label + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold5', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=32, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=16, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=15) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:5' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + labeled_bs = args.labeled_bs + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + aff_2_pseudo_label=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_labeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + outputs,attpred,att =model(volume_batch) + + # outputs_unlabeled_soft = torch.softmax(outputs[0][args.labeled_bs:,...], dim=1) + # outputs_seg_soft = torch.softmax(outputs[0][:args.labeled_bs,...], dim=1) + + # outputs_unlabeled_soft_mlp = torch.softmax(out_feats[0][args.labeled_bs:,...], dim=1) + # outputs_seg_soft_mlp = torch.softmax(out_feats[0][:args.labeled_bs,...], dim=1) + outputs_pred=F.interpolate(outputs[0], size=volume_batch.shape[2:], mode='bilinear', align_corners=True) + + #TODO: pCE loss + loss_ce = ce_loss(outputs_pred[:args.labeled_bs,...], label_batch[:].long()) + + + #TODO: Equivariant Regularization Loss + # scale_factor=0.3 + # img2 = F.interpolate(volume_batch, scale_factor=scale_factor, mode='bilinear', align_corners=True) + # _,_,att2=model(img2) + + # att1=att[3][1][:args.labeled_bs,...] + # att2=att2[3][1][:args.labeled_bs,...] + # att1 = F.interpolate(att1, scale_factor=scale_factor, mode='bilinear', align_corners=True) + # att2 = F.interpolate(att2, size=img2.shape[2:], mode='bilinear', align_corners=True) + + + # loss_er = torch.mean(torch.abs(att1 - att2)) + + + # with torch.cuda.amp.autocast(): + # # -2: padded pixels; -1: unlabeled pixels (其中60%-70%是没有标注信息的) + unlabeled_RoIs = (label_batch == 4) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + label_batch[label_batch < 0] = 0 + #aff_loss + # outs = [] + # outs.append(outputs[0]) #[:args.labeled_bs,...] + # outs.append(outputs[1]) + # outs.append(outputs[2]) + # outs.append(outputs[3]) + + + local_affinity_loss,pseudo_label = aff_2_pseudo_label(outputs, att, unlabeled_RoIs,label_batch) + + pseudo_label = torch.argmax(pseudo_label.detach(), dim=1, keepdim=False) + ref_label = cams_to_refine_label(pseudo_label[:args.labeled_bs,...], ignore_index=4) + + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ref_label) + + # loss_ce_wr = ce_loss(outputs[0][:args.labeled_bs,...], pseudo_label[:].long()) + + # loss_dice_wr= dice_loss(outputs_seg_soft, pseudo_label.unsqueeze(1)) + + unlabel_loss = ce_loss(outputs_pred[args.labeled_bs:,...], + pseudo_label[args.labeled_bs:,...][:].long())+dice_loss(outputs_pred[args.labeled_bs:,...], pseudo_label[args.labeled_bs:,...].unsqueeze(1)) + + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + + supervised_loss=loss_ce #+ 0.5 * (loss_ce_wr + loss_dice_wr) + consistency_weight = get_current_consistency_weight(iter_num // 150) + loss = 5*supervised_loss+affinity_loss+local_affinity_loss*args.kd_weights+consistency_weight*unlabel_loss + #+affinity_loss#+affinityenergyLoss*args.kd_weights + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + # writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + # writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax(outputs[0], dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction',outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + loss_name = 'losses.py' + util_name = 'util.py' + val_name = 'val_2D.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('utils/' + loss_name, code_path + '/' + loss_name) + shutil.copy('utils/' + util_name, code_path + '/' + util_name) + shutil.copy(val_name, code_path + '/' + val_name) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_18.py b/code/train_Trans_teacher_18.py new file mode 100644 index 0000000..26dc912 --- /dev/null +++ b/code/train_Trans_teacher_18.py @@ -0,0 +1,436 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps,util +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math +from utils.util import cams_to_refine_label + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold5', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=32, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.005, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=16, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=15) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:7' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + labeled_bs = args.labeled_bs + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + aff_2_pseudo_label=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_labeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + outputs,attpred,att =model(volume_batch) + + outputs_pred=F.interpolate(outputs[0], size=volume_batch.shape[2:], mode='bilinear', align_corners=True) + + outputs_unlabeled_soft = torch.softmax(outputs_pred[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(outputs_pred[:args.labeled_bs,...], dim=1) + + + #TODO: pCE loss + loss_ce = ce_loss(outputs_pred[:args.labeled_bs,...], label_batch[:].long()) + + + with torch.no_grad(): + outputs_ema,attpred_ema,att_ema = ema_model(ema_inputs) + outputs_pred_ema=F.interpolate(outputs_ema[0], size=volume_batch.shape[2:], mode='bilinear', align_corners=True) + ema_outputs_unlabeled_soft = torch.softmax(outputs_pred_ema[args.labeled_bs:,...], dim=1) + ema_outputs_seg_soft = torch.softmax(outputs_pred_ema[:args.labeled_bs,...], dim=1) + + + #TODO: consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 150) + if iter_num < 1000: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_outputs_unlabeled_soft) ** 2) + + # with torch.cuda.amp.autocast(): + # # -2: padded pixels; -1: unlabeled pixels (其中60%-70%是没有标注信息的) + unlabeled_RoIs = (label_batch == 4) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + # label_batch[label_batch < 0] = 0 + + #aff_loss + local_affinity_loss,pseudo_label = aff_2_pseudo_label(outputs, att, unlabeled_RoIs,label_batch) + pseudo_label = torch.argmax(pseudo_label.detach(), dim=1, keepdim=False) + ref_label = cams_to_refine_label(pseudo_label[:args.labeled_bs,...], ignore_index=4) + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ref_label) + + _,pseudo_label_ema = aff_2_pseudo_label(outputs_ema, att_ema, unlabeled_RoIs,label_batch) + pseudo_label_ema = torch.argmax(pseudo_label_ema.detach(), dim=1, keepdim=False) + + + unsup_loss = ce_loss(outputs_pred[args.labeled_bs:,...], + pseudo_label_ema[args.labeled_bs:,...][:].long())+dice_loss(outputs_seg_soft, pseudo_label_ema[args.labeled_bs:,...].unsqueeze(1)) + + supervised_loss=loss_ce+affinity_loss+local_affinity_loss + + + loss = 5*supervised_loss+consistency_loss*consistency_weight+unsup_loss + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + # writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + # writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax(outputs[0], dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction',outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + loss_name = 'losses.py' + util_name = 'util.py' + val_name = 'val_2D.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('utils/' + loss_name, code_path + '/' + loss_name) + shutil.copy('utils/' + util_name, code_path + '/' + util_name) + shutil.copy(val_name, code_path + '/' + val_name) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_19.py b/code/train_Trans_teacher_19.py new file mode 100644 index 0000000..6293063 --- /dev/null +++ b/code/train_Trans_teacher_19.py @@ -0,0 +1,439 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps,util +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math +from utils.util import cams_to_refine_label + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold5', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=32, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.005, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=16, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=15) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:6' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + labeled_bs = args.labeled_bs + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + aff_2_pseudo_label=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_labeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + outputs,attpred,att =model(volume_batch) + + outputs_pred=F.interpolate(outputs[0], size=volume_batch.shape[2:], mode='bilinear', align_corners=True) + + outputs_unlabeled_soft = torch.softmax(outputs_pred[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(outputs_pred[:args.labeled_bs,...], dim=1) + + + #TODO: pCE loss + loss_ce = ce_loss(outputs_pred[:args.labeled_bs,...], label_batch[:].long()) + + + with torch.no_grad(): + outputs_ema,attpred_ema,att_ema = ema_model(ema_inputs) + outputs_pred_ema=F.interpolate(outputs_ema[0], size=volume_batch.shape[2:], mode='bilinear', align_corners=True) + ema_outputs_unlabeled_soft = torch.softmax(outputs_pred_ema[args.labeled_bs:,...], dim=1) + ema_outputs_seg_soft = torch.softmax(outputs_pred_ema[:args.labeled_bs,...], dim=1) + + + #TODO: consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 150) + if iter_num < 1000: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_outputs_unlabeled_soft) ** 2) + + # with torch.cuda.amp.autocast(): + # # -2: padded pixels; -1: unlabeled pixels (其中60%-70%是没有标注信息的) + unlabeled_RoIs = (label_batch == 4) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + # label_batch[label_batch < 0] = 0 + + #aff_loss + local_affinity_loss,pseudo_label = aff_2_pseudo_label(outputs, att, unlabeled_RoIs,label_batch) + pseudo_label = torch.argmax(pseudo_label.detach(), dim=1, keepdim=False) + ref_label = cams_to_refine_label(pseudo_label[:args.labeled_bs,...], ignore_index=4) + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ref_label) + + _,pseudo_label_ema = aff_2_pseudo_label(outputs_ema, att_ema, unlabeled_RoIs,label_batch) + pseudo_label_ema = torch.argmax(pseudo_label_ema.detach(), dim=1, keepdim=False) + + loss_ce_wr = ce_loss(outputs_pred[:args.labeled_bs,...], pseudo_label_ema[:args.labeled_bs,...][:].long()) + + loss_dice_wr= dice_loss(outputs_seg_soft, pseudo_label_ema[:args.labeled_bs,...].unsqueeze(1)) + + unsup_loss = ce_loss(outputs_pred[args.labeled_bs:,...], + pseudo_label_ema[args.labeled_bs:,...][:].long())+dice_loss(outputs_seg_soft, pseudo_label_ema[args.labeled_bs:,...].unsqueeze(1)) + + supervised_loss=loss_ce+0.5*(loss_ce_wr+loss_dice_wr) + + + loss = 5*supervised_loss+consistency_loss*consistency_weight+affinity_loss+local_affinity_loss*args.kd_weights+unsup_loss + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + # writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + # writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax(outputs[0], dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction',outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + loss_name = 'losses.py' + util_name = 'util.py' + val_name = 'val_2D.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('utils/' + loss_name, code_path + '/' + loss_name) + shutil.copy('utils/' + util_name, code_path + '/' + util_name) + shutil.copy(val_name, code_path + '/' + val_name) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_2.py b/code/train_Trans_teacher_2.py new file mode 100644 index 0000000..b9984bf --- /dev/null +++ b/code/train_Trans_teacher_2.py @@ -0,0 +1,436 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +# """选择GPU ID""" +# gpu_list = [4] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/yd2tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=24, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=2022, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=12, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:7' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + # if args.local_rank != -1: + # torch.cuda.set_device(args.local_rank) + # device=torch.device("cuda", args.local_rank) + # torch.distributed.init_process_group(backend="nccl", init_method='env://') + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + if ema: + for param in model.parameters(): + param.detach_() + return model + + model = create_model() + ema_model = create_model(ema=True) + + model=model.to(device) + ema_model=ema_model.to(device) + # model = nn.MMDistributedDataParallel( + # model.cuda(), + # device_ids=[torch.cuda.current_device()], + # broadcast_buffers=False, + # find_unused_parameters=find_unused_parameters) + + + num_gpus = torch.cuda.device_count() + + # if num_gpus > 1: + # # logger.info('use {} gpus!'.format(num_gpus)) + # model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + # output_device=args.local_rank,broadcast_buffers=False) + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + #步骤四:定义数据集 + # train_datasets = ...#自己定义的Dataset子类 + # train_sampler_labeled = DistributedSampler(db_train_labeled) + # train_sampler_unlabeled = DistributedSampler(db_train_unlabeled) + + # trainloader_labeled = DataLoader(db_train_labeled, sampler=train_sampler_labeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + # trainloader_unlabeled = DataLoader(db_train_unlabeled, sampler=train_sampler_unlabeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + + optimizer = optim.SGD(model.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + + iter_num = 0 + max_epoch = max_iterations // len(trainloader_labeled) + 1 + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + outputs,calss,attpred = model(volume_batch) + + outputs_soft = torch.softmax(outputs, dim=1) + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + + with torch.no_grad(): + ema_output,ema_calss,ema_attpred = ema_model(ema_inputs) + ema_output_soft = torch.softmax(ema_output, dim=1) + + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + loss_dice =ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr = ce_loss(outputs[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr= dice_loss(outputs_soft[:args.labeled_bs,...], label_batch_wr.unsqueeze(1)) + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce+loss_dice_wr+loss_ce_wr + + + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 300) + if iter_num < 1000: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft[args.labeled_bs:,...]) ** 2) + + + #aff_loss + aff_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],label_batch_wr) + + # cosine similarity loss + create_center_1_bg = calss[0].unsqueeze(1)# 4,1,x,y,z->4,2 + create_center_1_a = calss[1].unsqueeze(1) + create_center_1_b = calss[2].unsqueeze(1) + create_center_1_c = calss[3].unsqueeze(1) + + + + create_center_2_bg = ema_calss[0].unsqueeze(1) + create_center_2_a = ema_calss[1].unsqueeze(1) + create_center_2_b = ema_calss[2].unsqueeze(1) + create_center_2_c = ema_calss[3].unsqueeze(1) + + create_center_soft_1_bg = F.softmax(create_center_1_bg, dim=1)# dims(4,2) + create_center_soft_1_a = F.softmax(create_center_1_a, dim=1) + create_center_soft_1_b = F.softmax(create_center_1_b, dim=1) + create_center_soft_1_c = F.softmax(create_center_1_c, dim=1) + + + create_center_soft_2_bg = F.softmax(create_center_2_bg, dim=1)# dims(4,2) + create_center_soft_2_a = F.softmax(create_center_2_a, dim=1) + create_center_soft_2_b = F.softmax(create_center_2_b, dim=1) + create_center_soft_2_c = F.softmax(create_center_2_c, dim=1) + + + lb_center_12_bg = torch.cat((create_center_soft_1_bg[:args.labeled_bs,...], create_center_soft_2_bg[:args.labeled_bs,...]),dim=0)# 4,2 + lb_center_12_a = torch.cat((create_center_soft_1_a[:args.labeled_bs,...], create_center_soft_2_a[:args.labeled_bs,...]),dim=0) + lb_center_12_b = torch.cat((create_center_soft_1_b[:args.labeled_bs,...], create_center_soft_2_b[:args.labeled_bs,...]),dim=0) + lb_center_12_c = torch.cat((create_center_soft_1_c[:args.labeled_bs,...], create_center_soft_2_c[:args.labeled_bs,...]),dim=0) + + + un_center_12_bg = torch.cat((create_center_soft_1_bg[args.labeled_bs:,...], create_center_soft_2_bg[args.labeled_bs:,...]),dim=0) + un_center_12_a = torch.cat((create_center_soft_1_a[args.labeled_bs:,...], create_center_soft_2_a[args.labeled_bs:,...]),dim=0) + un_center_12_b = torch.cat((create_center_soft_1_b[args.labeled_bs:,...], create_center_soft_2_b[args.labeled_bs:,...]),dim=0) + un_center_12_c = torch.cat((create_center_soft_1_c[args.labeled_bs:,...], create_center_soft_2_c[args.labeled_bs:,...]),dim=0) + + + + + loss_contrast = losses.scc_loss(cos_sim, args.tau, lb_center_12_bg, + lb_center_12_a,un_center_12_bg, un_center_12_a, + lb_center_12_b,lb_center_12_c,un_center_12_b,un_center_12_c) + + loss = supervised_loss+consistency_loss+loss_contrast*args.my_lambda+aff_loss + optimizer.zero_grad() + + # loss.backward(retain_graph=True) + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer.param_groups: + param_group['lr'] = lr_ + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_dice, iter_num) + writer.add_scalar('info/consistency_loss', + consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight', + consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_dice.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name3) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + # shutil.copy('dataloaders/' + dataset_name2, code_path + '/' + dataset_name2) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/yd2tb/work_dirs/model/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_20.py b/code/train_Trans_teacher_20.py new file mode 100644 index 0000000..12c80c3 --- /dev/null +++ b/code/train_Trans_teacher_20.py @@ -0,0 +1,442 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps,util +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math +from utils.util import cams_to_refine_label + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold2', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=32, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=16, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=0.8) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + aff_2_pseudo_label=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + kl_distance = nn.KLDivLoss(reduction='none') + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_labeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + + seg,outputs,attpred,att =model(volume_batch,aux=False) + + outputs_unlabeled_soft = torch.softmax(seg[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(seg[:args.labeled_bs,...], dim=1) + + + #TODO: pCE loss + loss_ce = ce_loss(seg[:args.labeled_bs,...], label_batch[:].long()) + + #TODO: Equivariant Regularization Loss + # scale_factor=0.3 + # img2 = F.interpolate(volume_batch, scale_factor=scale_factor, mode='bilinear', align_corners=True) + # mlp_f,attn_pred3,_attns = model(img2[args.labeled_bs:,...],aux=True) + + # attn_pred1 = F.interpolate(attpred[args.labeled_bs:,...].unsqueeze(1), scale_factor=scale_factor, mode='bilinear', align_corners=True) + # attn_pred3 = F.interpolate(attn_pred3.unsqueeze(1), size=img2.shape[2:], mode='bilinear', align_corners=True) + # loss_er = torch.mean((attn_pred3 - attn_pred1) ** 2) + + with torch.no_grad(): + ema_seg,ema_output,ema_attpred,ema_att = ema_model(ema_inputs) + ema_output_soft = torch.softmax(ema_seg[args.labeled_bs:,...], dim=1) + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 150) + if iter_num < 200: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft) ** 2) + + unlabeled_RoIs = (label_batch == 0) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + + + local_affinity_loss,pseudo_label = aff_2_pseudo_label(outputs,att, unlabeled_RoIs,label_batch) + + pseudo_label = torch.argmax(pseudo_label.detach(), dim=1, keepdim=False) + ref_label = cams_to_refine_label(pseudo_label[:args.labeled_bs,...], ignore_index=4) + + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ref_label) + + loss_ce_wr = ce_loss(seg[:args.labeled_bs,...], pseudo_label[:args.labeled_bs,...][:].long()) + + loss_dice_wr= dice_loss(outputs_seg_soft, pseudo_label[:args.labeled_bs,...].unsqueeze(1)) + + variance_main = torch.sum(kl_distance(torch.log(attpred[args.labeled_bs:]), + ema_attpred[args.labeled_bs:]), dim=1, keepdim=True) + exp_variance_main = torch.exp(-variance_main) + + consistency_dist_main = ( + attpred[args.labeled_bs:] - ema_attpred[args.labeled_bs:]) ** 2 + consistency_loss_main = torch.mean( + consistency_dist_main * exp_variance_main) / (torch.mean(exp_variance_main) + 1e-8) + torch.mean(variance_main) + supervised_loss=loss_ce + + loss = 5*supervised_loss+ 0.5 * (loss_ce_wr + loss_dice_wr)+3*affinity_loss+local_affinity_loss*args.kd_weights+consistency_weight*consistency_loss+consistency_weight*consistency_loss_main #+loss_er + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax(outputs[0], dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction',outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + loss_name = 'losses.py' + util_name = 'util.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('utils/' + loss_name, code_path + '/' + loss_name) + shutil.copy('utils/' + util_name, code_path + '/' + util_name) + + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_21.py b/code/train_Trans_teacher_21.py new file mode 100644 index 0000000..aec1ee9 --- /dev/null +++ b/code/train_Trans_teacher_21.py @@ -0,0 +1,507 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps,util +from val_2D import test_single_volume2,test_single_volume_7 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math +from utils.util import cams_to_refine_label + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=32, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.005, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=16, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=0.8) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + aff_2_pseudo_label=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + kl_distance = nn.KLDivLoss(reduction='none') + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_unlabeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + + seg,outputs,attpred,att =model(volume_batch,aux=False) + + outputs_unlabeled_soft = torch.softmax(seg[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(seg[:args.labeled_bs,...], dim=1) + + + #TODO: pCE loss + loss_ce = ce_loss(seg[:args.labeled_bs,...], label_batch[:].long()) + + #TODO: Equivariant Regularization Loss + # scale_factor=0.3 + # img2 = F.interpolate(volume_batch, scale_factor=scale_factor, mode='bilinear', align_corners=True) + # mlp_f,attn_pred3,_attns = model(img2[args.labeled_bs:,...],aux=True) + + # attn_pred1 = F.interpolate(attpred[args.labeled_bs:,...].unsqueeze(1), scale_factor=scale_factor, mode='bilinear', align_corners=True) + # attn_pred3 = F.interpolate(attn_pred3.unsqueeze(1), size=img2.shape[2:], mode='bilinear', align_corners=True) + # loss_er = torch.mean((attn_pred3 - attn_pred1) ** 2) + + with torch.no_grad(): + ema_seg,ema_output,ema_attpred,ema_att = ema_model(ema_inputs) + ema_output_soft = torch.softmax(ema_seg[args.labeled_bs:,...], dim=1) + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 150) + # if iter_num < 200: + # consistency_loss = 0.0 + # else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft) ** 2) + + unlabeled_RoIs = (label_batch == 0) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + + + + local_affinity_loss,pseudo_label = aff_2_pseudo_label(outputs,att, unlabeled_RoIs,label_batch,ema_att) + + + + pseudo_label = torch.argmax(pseudo_label.detach(), dim=1, keepdim=False) + ref_label = cams_to_refine_label(pseudo_label[:args.labeled_bs,...], ignore_index=4) + + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ref_label) + + loss_ce_wr = ce_loss(seg[:args.labeled_bs,...], pseudo_label[:args.labeled_bs,...][:].long()) + + loss_dice_wr= dice_loss(outputs_seg_soft, pseudo_label[:args.labeled_bs,...].unsqueeze(1)) + + supervised_loss=loss_ce + 0.5 * (loss_ce_wr + loss_dice_wr) + + bs, bxs, c, h, w = crop_images.shape + crop_images = crop_images.reshape(bs * bxs, c, h, w) + box_ind = torch.cat([torch.zeros(4).fill_(i) for i in range(bs)]) + boxes = boxes.reshape(bs * bxs, 5) + # boxes[:, 0] = box_ind + + + boxes = boxes.cuda(non_blocking=True).type_as(seg) + + crop_out,_,_,_=ema_model(crop_images) + crop_out=F.softmax(crop_out, dim=1) + n, c, h, w = crop_out.shape + + # roi align + feat_aligned = ops.roi_align(seg[args.labeled_bs:,...], boxes, (h, w), 1 / 8.0) + feat_aligned = F.softmax(feat_aligned, dim=1) + loss_kd = criterion(feat_aligned, crop_out) * args.kd_weights + + + + loss = loss_kd+5*supervised_loss+3*affinity_loss+local_affinity_loss+consistency_weight*consistency_loss #+loss_er + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax(outputs[0], dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction',outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + loss_name = 'losses.py' + util_name = 'util.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('utils/' + loss_name, code_path + '/' + loss_name) + shutil.copy('utils/' + util_name, code_path + '/' + util_name) + + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdc/tianbiao/work_dirs_baseline/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + # backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) + + +# num_classes=4 + +# save_best_model='/mnt/sdd/tb/work_dirs/model_ours/ACDC_Semi/Mean_Teacher_fold1/scribble-2023-03-28 16:21:28.002836/iter_24800_dice_0.8175.pth' +# logging.info('============= Start Test ==============') +# device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu') +# model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) +# # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) +# model.load_state_dict(torch.load(save_best_model)) +# print("init weight from {}".format(save_best_model)) +# model.eval() + +# db_val = BaseDataSets(base_dir=args.root_path,fold=args.fold, split="val", ) +# valloader = DataLoader(db_val, batch_size=1, shuffle=False,num_workers=1) + + +# logging.info("{} iterations per epoch".format(len(valloader))) +# model=model.to(device) +# metric_list = 0.0 +# for i_batch, sampled_batch in enumerate(valloader): +# metric_i = test_single_volume_7( +# sampled_batch["image"], sampled_batch["label"], model, classes=num_classes,device=device) +# metric_list += np.array(metric_i) +# print("metric_list:",metric_list) +# metric_list = metric_list / len(db_val) + + +# performance_test = np.mean(metric_list, axis=0)[0] +# mean_hd95_test = np.mean(metric_list, axis=0)[1] +# ppv = np.mean(metric_list, axis=0)[2] +# sen = np.mean(metric_list, axis=0)[3] +# iou = np.mean(metric_list, axis=0)[4] +# biou = np.mean(metric_list, axis=0)[5] +# asd = np.mean(metric_list, axis=0)[7] + + +# # # dice, hd95,sen,iou,asd +# # #dice, hd95, ppv, sen, iou, boundary_iou, hd + +# logging.info("Mean dice on all patients:{:.4f} ".format(performance_test)) +# logging.info("Mean hd95 on all patients:{:.4f} ".format(mean_hd95_test)) +# logging.info("Mean IOU on all patients:{:.4f} ".format(iou)) +# logging.info("Mean PPV on all patients:{:.4f} ".format(ppv)) +# logging.info("Mean SEN on all patients:{:.4f} ".format(sen)) +# logging.info("Mean biou on all patients:{:.4f} ".format(biou)) +# logging.info("Mean asd on all patients:{:.4f} ".format(asd)) + +# os.rename(snapshot_path,snapshot_path+"_DSC_"+str(performance_test)[2:6]+"_SEN_"+str(sen)[2:6]+"_DH95_"+str(mean_hd95_test)[0:6]+"_IOU_"+str(iou)[0:6]) + diff --git a/code/train_Trans_teacher_21_ab1.py b/code/train_Trans_teacher_21_ab1.py new file mode 100644 index 0000000..effc77f --- /dev/null +++ b/code/train_Trans_teacher_21_ab1.py @@ -0,0 +1,455 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps,util +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math +from utils.util import cams_to_refine_label + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=32, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.005, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=16, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.7, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=0.8) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + aff_2_pseudo_label=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + kl_distance = nn.KLDivLoss(reduction='none') + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_unlabeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + + seg,outputs,attpred,att =model(volume_batch,aux=False) + + outputs_unlabeled_soft = torch.softmax(seg[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(seg[:args.labeled_bs,...], dim=1) + + + #TODO: pCE loss + loss_ce = ce_loss(seg[:args.labeled_bs,...], label_batch[:].long()) + + #TODO: Equivariant Regularization Loss + # scale_factor=0.3 + # img2 = F.interpolate(volume_batch, scale_factor=scale_factor, mode='bilinear', align_corners=True) + # mlp_f,attn_pred3,_attns = model(img2[args.labeled_bs:,...],aux=True) + + # attn_pred1 = F.interpolate(attpred[args.labeled_bs:,...].unsqueeze(1), scale_factor=scale_factor, mode='bilinear', align_corners=True) + # attn_pred3 = F.interpolate(attn_pred3.unsqueeze(1), size=img2.shape[2:], mode='bilinear', align_corners=True) + # loss_er = torch.mean((attn_pred3 - attn_pred1) ** 2) + + with torch.no_grad(): + ema_seg,ema_output,ema_attpred,ema_att = ema_model(ema_inputs) + ema_output_soft = torch.softmax(ema_seg[args.labeled_bs:,...], dim=1) + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 150) + if iter_num < 200: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft) ** 2) + + unlabeled_RoIs = (label_batch == 0) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + + + + local_affinity_loss,pseudo_label = aff_2_pseudo_label(outputs,att, unlabeled_RoIs,label_batch) + + pseudo_label = torch.argmax(pseudo_label.detach(), dim=1, keepdim=False) + ref_label = cams_to_refine_label(pseudo_label[:args.labeled_bs,...], ignore_index=4) + + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ref_label) + + loss_ce_wr = ce_loss(seg[:args.labeled_bs,...], pseudo_label[:args.labeled_bs,...][:].long()) + + loss_dice_wr= dice_loss(outputs_seg_soft, pseudo_label[:args.labeled_bs,...].unsqueeze(1)) + + supervised_loss=loss_ce + + bs, bxs, c, h, w = crop_images.shape + crop_images = crop_images.reshape(bs * bxs, c, h, w) + box_ind = torch.cat([torch.zeros(4).fill_(i) for i in range(bs)]) + boxes = boxes.reshape(bs * bxs, 5) + # boxes[:, 0] = box_ind + + + boxes = boxes.cuda(non_blocking=True).type_as(seg) + + crop_out,_,_,_=ema_model(crop_images) + crop_out=F.softmax(crop_out, dim=1) + n, c, h, w = crop_out.shape + + # roi align + feat_aligned = ops.roi_align(seg[args.labeled_bs:,...], boxes, (h, w), 1 / 8.0) + feat_aligned = F.softmax(feat_aligned, dim=1) + loss_kd = criterion(feat_aligned, crop_out) * args.kd_weights + + + + loss = loss_kd+5*supervised_loss+3*affinity_loss+local_affinity_loss+consistency_weight*consistency_loss #+loss_er + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax(outputs[0], dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction',outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + loss_name = 'losses.py' + util_name = 'util.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('utils/' + loss_name, code_path + '/' + loss_name) + shutil.copy('utils/' + util_name, code_path + '/' + util_name) + + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_ours_ab/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_21_ab2.py b/code/train_Trans_teacher_21_ab2.py new file mode 100644 index 0000000..9878c1a --- /dev/null +++ b/code/train_Trans_teacher_21_ab2.py @@ -0,0 +1,455 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps,util +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math +from utils.util import cams_to_refine_label + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=32, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.005, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=16, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=0.8) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + aff_2_pseudo_label=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + kl_distance = nn.KLDivLoss(reduction='none') + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_unlabeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + + seg,outputs,attpred,att =model(volume_batch,aux=False) + + outputs_unlabeled_soft = torch.softmax(seg[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(seg[:args.labeled_bs,...], dim=1) + + + #TODO: pCE loss + loss_ce = ce_loss(seg[:args.labeled_bs,...], label_batch[:].long()) + + #TODO: Equivariant Regularization Loss + # scale_factor=0.3 + # img2 = F.interpolate(volume_batch, scale_factor=scale_factor, mode='bilinear', align_corners=True) + # mlp_f,attn_pred3,_attns = model(img2[args.labeled_bs:,...],aux=True) + + # attn_pred1 = F.interpolate(attpred[args.labeled_bs:,...].unsqueeze(1), scale_factor=scale_factor, mode='bilinear', align_corners=True) + # attn_pred3 = F.interpolate(attn_pred3.unsqueeze(1), size=img2.shape[2:], mode='bilinear', align_corners=True) + # loss_er = torch.mean((attn_pred3 - attn_pred1) ** 2) + + with torch.no_grad(): + ema_seg,ema_output,ema_attpred,ema_att = ema_model(ema_inputs) + ema_output_soft = torch.softmax(ema_seg[args.labeled_bs:,...], dim=1) + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 150) + if iter_num < 200: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft) ** 2) + + unlabeled_RoIs = (label_batch == 0) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + + + + local_affinity_loss,pseudo_label = aff_2_pseudo_label(outputs,att, unlabeled_RoIs,label_batch) + + pseudo_label = torch.argmax(pseudo_label.detach(), dim=1, keepdim=False) + ref_label = cams_to_refine_label(pseudo_label[:args.labeled_bs,...], ignore_index=4) + + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ref_label) + + loss_ce_wr = ce_loss(seg[:args.labeled_bs,...], pseudo_label[:args.labeled_bs,...][:].long()) + + loss_dice_wr= dice_loss(outputs_seg_soft, pseudo_label[:args.labeled_bs,...].unsqueeze(1)) + + supervised_loss=loss_ce + + bs, bxs, c, h, w = crop_images.shape + crop_images = crop_images.reshape(bs * bxs, c, h, w) + box_ind = torch.cat([torch.zeros(4).fill_(i) for i in range(bs)]) + boxes = boxes.reshape(bs * bxs, 5) + # boxes[:, 0] = box_ind + + + boxes = boxes.cuda(non_blocking=True).type_as(seg) + + crop_out,_,_,_=ema_model(crop_images) + crop_out=F.softmax(crop_out, dim=1) + n, c, h, w = crop_out.shape + + # roi align + feat_aligned = ops.roi_align(seg[args.labeled_bs:,...], boxes, (h, w), 1 / 8.0) + feat_aligned = F.softmax(feat_aligned, dim=1) + loss_kd = criterion(feat_aligned, crop_out) * args.kd_weights + + + + loss = loss_kd+5*supervised_loss+3*affinity_loss+consistency_weight*consistency_loss #+loss_er + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax(outputs[0], dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction',outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + loss_name = 'losses.py' + util_name = 'util.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('utils/' + loss_name, code_path + '/' + loss_name) + shutil.copy('utils/' + util_name, code_path + '/' + util_name) + + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_ours_ab/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_21_ab3.py b/code/train_Trans_teacher_21_ab3.py new file mode 100644 index 0000000..b3f1da6 --- /dev/null +++ b/code/train_Trans_teacher_21_ab3.py @@ -0,0 +1,455 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps,util +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math +from utils.util import cams_to_refine_label + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=32, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.005, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=16, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=0.8) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + aff_2_pseudo_label=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + kl_distance = nn.KLDivLoss(reduction='none') + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_unlabeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + + seg,outputs,attpred,att =model(volume_batch,aux=False) + + outputs_unlabeled_soft = torch.softmax(seg[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(seg[:args.labeled_bs,...], dim=1) + + + #TODO: pCE loss + loss_ce = ce_loss(seg[:args.labeled_bs,...], label_batch[:].long()) + + #TODO: Equivariant Regularization Loss + # scale_factor=0.3 + # img2 = F.interpolate(volume_batch, scale_factor=scale_factor, mode='bilinear', align_corners=True) + # mlp_f,attn_pred3,_attns = model(img2[args.labeled_bs:,...],aux=True) + + # attn_pred1 = F.interpolate(attpred[args.labeled_bs:,...].unsqueeze(1), scale_factor=scale_factor, mode='bilinear', align_corners=True) + # attn_pred3 = F.interpolate(attn_pred3.unsqueeze(1), size=img2.shape[2:], mode='bilinear', align_corners=True) + # loss_er = torch.mean((attn_pred3 - attn_pred1) ** 2) + + with torch.no_grad(): + ema_seg,ema_output,ema_attpred,ema_att = ema_model(ema_inputs) + ema_output_soft = torch.softmax(ema_seg[args.labeled_bs:,...], dim=1) + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 150) + if iter_num < 200: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft) ** 2) + + unlabeled_RoIs = (label_batch == 0) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + + + + local_affinity_loss,pseudo_label = aff_2_pseudo_label(outputs,att, unlabeled_RoIs,label_batch) + + pseudo_label = torch.argmax(pseudo_label.detach(), dim=1, keepdim=False) + ref_label = cams_to_refine_label(pseudo_label[:args.labeled_bs,...], ignore_index=4) + + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ref_label) + + loss_ce_wr = ce_loss(seg[:args.labeled_bs,...], pseudo_label[:args.labeled_bs,...][:].long()) + + loss_dice_wr= dice_loss(outputs_seg_soft, pseudo_label[:args.labeled_bs,...].unsqueeze(1)) + + supervised_loss=loss_ce + + bs, bxs, c, h, w = crop_images.shape + crop_images = crop_images.reshape(bs * bxs, c, h, w) + box_ind = torch.cat([torch.zeros(4).fill_(i) for i in range(bs)]) + boxes = boxes.reshape(bs * bxs, 5) + # boxes[:, 0] = box_ind + + + boxes = boxes.cuda(non_blocking=True).type_as(seg) + + crop_out,_,_,_=ema_model(crop_images) + crop_out=F.softmax(crop_out, dim=1) + n, c, h, w = crop_out.shape + + # roi align + feat_aligned = ops.roi_align(seg[args.labeled_bs:,...], boxes, (h, w), 1 / 8.0) + feat_aligned = F.softmax(feat_aligned, dim=1) + loss_kd = criterion(feat_aligned, crop_out) * args.kd_weights + + + + loss = loss_kd+5*supervised_loss+consistency_weight*consistency_loss #+loss_er + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax(outputs[0], dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction',outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + loss_name = 'losses.py' + util_name = 'util.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('utils/' + loss_name, code_path + '/' + loss_name) + shutil.copy('utils/' + util_name, code_path + '/' + util_name) + + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_ours_ab/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_21_ab4.py b/code/train_Trans_teacher_21_ab4.py new file mode 100644 index 0000000..c54b479 --- /dev/null +++ b/code/train_Trans_teacher_21_ab4.py @@ -0,0 +1,455 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps,util +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math +from utils.util import cams_to_refine_label + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=32, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.005, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=16, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=0.8) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + aff_2_pseudo_label=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + kl_distance = nn.KLDivLoss(reduction='none') + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_unlabeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + + seg,outputs,attpred,att =model(volume_batch,aux=False) + + outputs_unlabeled_soft = torch.softmax(seg[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(seg[:args.labeled_bs,...], dim=1) + + + #TODO: pCE loss + loss_ce = ce_loss(seg[:args.labeled_bs,...], label_batch[:].long()) + + #TODO: Equivariant Regularization Loss + # scale_factor=0.3 + # img2 = F.interpolate(volume_batch, scale_factor=scale_factor, mode='bilinear', align_corners=True) + # mlp_f,attn_pred3,_attns = model(img2[args.labeled_bs:,...],aux=True) + + # attn_pred1 = F.interpolate(attpred[args.labeled_bs:,...].unsqueeze(1), scale_factor=scale_factor, mode='bilinear', align_corners=True) + # attn_pred3 = F.interpolate(attn_pred3.unsqueeze(1), size=img2.shape[2:], mode='bilinear', align_corners=True) + # loss_er = torch.mean((attn_pred3 - attn_pred1) ** 2) + + with torch.no_grad(): + ema_seg,ema_output,ema_attpred,ema_att = ema_model(ema_inputs) + ema_output_soft = torch.softmax(ema_seg[args.labeled_bs:,...], dim=1) + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 150) + if iter_num < 200: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft) ** 2) + + unlabeled_RoIs = (label_batch == 0) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + + + + local_affinity_loss,pseudo_label = aff_2_pseudo_label(outputs,att, unlabeled_RoIs,label_batch) + + pseudo_label = torch.argmax(pseudo_label.detach(), dim=1, keepdim=False) + ref_label = cams_to_refine_label(pseudo_label[:args.labeled_bs,...], ignore_index=4) + + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ref_label) + + loss_ce_wr = ce_loss(seg[:args.labeled_bs,...], pseudo_label[:args.labeled_bs,...][:].long()) + + loss_dice_wr= dice_loss(outputs_seg_soft, pseudo_label[:args.labeled_bs,...].unsqueeze(1)) + + supervised_loss=loss_ce + + bs, bxs, c, h, w = crop_images.shape + crop_images = crop_images.reshape(bs * bxs, c, h, w) + box_ind = torch.cat([torch.zeros(4).fill_(i) for i in range(bs)]) + boxes = boxes.reshape(bs * bxs, 5) + # boxes[:, 0] = box_ind + + + boxes = boxes.cuda(non_blocking=True).type_as(seg) + + crop_out,_,_,_=ema_model(crop_images) + crop_out=F.softmax(crop_out, dim=1) + n, c, h, w = crop_out.shape + + # roi align + feat_aligned = ops.roi_align(seg[args.labeled_bs:,...], boxes, (h, w), 1 / 8.0) + feat_aligned = F.softmax(feat_aligned, dim=1) + loss_kd = criterion(feat_aligned, crop_out) * args.kd_weights + + + + loss = 5*supervised_loss+consistency_weight*consistency_loss #+loss_er + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax(outputs[0], dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction',outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + loss_name = 'losses.py' + util_name = 'util.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('utils/' + loss_name, code_path + '/' + loss_name) + shutil.copy('utils/' + util_name, code_path + '/' + util_name) + + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_ours_ab/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_21_mscmr.py b/code/train_Trans_teacher_21_mscmr.py new file mode 100644 index 0000000..e3f355b --- /dev/null +++ b/code/train_Trans_teacher_21_mscmr.py @@ -0,0 +1,512 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi_mscmr_v5 import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps,util +from val_2D import test_single_volume2,test_single_volume_7 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math +from utils.util import cams_to_refine_label + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--snapshot_path', type=str,default='/mnt/sdc/tianbiao/model_ours', help='Name of results') +parser.add_argument('--label_ratio', type=float, default=0.1,help='label data') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/MSCMR', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='MSCMR_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold2', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=32, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.005, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=16, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdc/tianbiao/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=0.8) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + # db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + # RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + + # db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + # RandomGenerator(args.patch_size)])) + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),label_ratio=args.label_ratio) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled",split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path,split="val") + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + aff_2_pseudo_label=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + kl_distance = nn.KLDivLoss(reduction='none') + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_unlabeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + + seg,outputs,attpred,att =model(volume_batch,aux=False) + + outputs_unlabeled_soft = torch.softmax(seg[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(seg[:args.labeled_bs,...], dim=1) + + + #TODO: pCE loss + loss_ce = ce_loss(seg[:args.labeled_bs,...], label_batch[:].long()) + + #TODO: Equivariant Regularization Loss + # scale_factor=0.3 + # img2 = F.interpolate(volume_batch, scale_factor=scale_factor, mode='bilinear', align_corners=True) + # mlp_f,attn_pred3,_attns = model(img2[args.labeled_bs:,...],aux=True) + + # attn_pred1 = F.interpolate(attpred[args.labeled_bs:,...].unsqueeze(1), scale_factor=scale_factor, mode='bilinear', align_corners=True) + # attn_pred3 = F.interpolate(attn_pred3.unsqueeze(1), size=img2.shape[2:], mode='bilinear', align_corners=True) + # loss_er = torch.mean((attn_pred3 - attn_pred1) ** 2) + + with torch.no_grad(): + ema_seg,ema_output,ema_attpred,ema_att = ema_model(ema_inputs) + ema_output_soft = torch.softmax(ema_seg[args.labeled_bs:,...], dim=1) + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 150) + if iter_num < 200: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft) ** 2) + + unlabeled_RoIs = (label_batch == 0) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + + + + local_affinity_loss,pseudo_label = aff_2_pseudo_label(outputs,att, unlabeled_RoIs,label_batch) + + pseudo_label = torch.argmax(pseudo_label.detach(), dim=1, keepdim=False) + ref_label = cams_to_refine_label(pseudo_label[:args.labeled_bs,...], ignore_index=4) + + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ref_label) + + loss_ce_wr = ce_loss(seg[:args.labeled_bs,...], pseudo_label[:args.labeled_bs,...][:].long()) + + loss_dice_wr= dice_loss(outputs_seg_soft, pseudo_label[:args.labeled_bs,...].unsqueeze(1)) + + supervised_loss=loss_ce + + bs, bxs, c, h, w = crop_images.shape + crop_images = crop_images.reshape(bs * bxs, c, h, w) + box_ind = torch.cat([torch.zeros(4).fill_(i) for i in range(bs)]) + boxes = boxes.reshape(bs * bxs, 5) + # boxes[:, 0] = box_ind + + + boxes = boxes.cuda(non_blocking=True).type_as(seg) + + crop_out,_,_,_=ema_model(crop_images) + crop_out=F.softmax(crop_out, dim=1) + n, c, h, w = crop_out.shape + + # roi align + feat_aligned = ops.roi_align(seg[args.labeled_bs:,...], boxes, (h, w), 1 / 8.0) + feat_aligned = F.softmax(feat_aligned, dim=1) + loss_kd = criterion(feat_aligned, crop_out) * args.kd_weights + + + + loss = loss_kd+5*supervised_loss+ 0.5 * (loss_ce_wr + loss_dice_wr)+3*affinity_loss+local_affinity_loss+consistency_weight*consistency_loss #+loss_er + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + # if iter_num % 20 == 0: + # image = volume_batch[1, 0:1, :, :] + # writer.add_image('train/Image', image, iter_num) + # outputs = torch.argmax(torch.softmax(outputs[0], dim=1), dim=1, keepdim=True) + # writer.add_image('train/Prediction',outputs[1, ...] * 50, iter_num) + # labs = label_batch[1, ...].unsqueeze(0) * 50 + # writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200== 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi_mscmr.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + loss_name = 'losses.py' + util_name = 'util.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('utils/' + loss_name, code_path + '/' + loss_name) + shutil.copy('utils/' + util_name, code_path + '/' + util_name) + + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + snapshot_path = args.snapshot_path + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + + snapshot_path =os.path.join(snapshot_path, "{}/{}-{}".format(args.exp,args.sup_type,datetime.datetime.now())) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + # backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) + + +# num_classes=4 + +# save_best_model='/mnt/sdd/tb/work_dirs/model_ours/MSCMR_Semi/Mean_Teacher/scribble-2023-04-04 10:59:13.249873/iter_15800_dice_0.7385.pth' +# logging.info('============= Start Test ==============') +# device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu') +# model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) +# # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) +# model.load_state_dict(torch.load(save_best_model)) +# print("init weight from {}".format(save_best_model)) +# model.eval() + +# db_val = BaseDataSets(base_dir=args.root_path,split="val") +# valloader = DataLoader(db_val, batch_size=1, shuffle=False,num_workers=1) + + +# logging.info("{} iterations per epoch".format(len(valloader))) +# model=model.to(device) +# metric_list = 0.0 +# for i_batch, sampled_batch in enumerate(valloader): +# metric_i = test_single_volume_7(sampled_batch["image"], sampled_batch["label"], model, classes=num_classes,device=device) +# metric_list += np.array(metric_i) +# print("metric_list:",metric_list) +# metric_list = metric_list / len(db_val) + + +# performance_test = np.mean(metric_list, axis=0)[0] +# mean_hd95_test = np.mean(metric_list, axis=0)[1] +# ppv = np.mean(metric_list, axis=0)[2] +# sen = np.mean(metric_list, axis=0)[3] +# iou = np.mean(metric_list, axis=0)[4] +# biou = np.mean(metric_list, axis=0)[5] +# asd = np.mean(metric_list, axis=0)[7] + + +# # # dice, hd95,sen,iou,asd +# # #dice, hd95, ppv, sen, iou, boundary_iou, hd + +# logging.info("Mean dice on all patients:{:.4f} ".format(performance_test)) +# logging.info("Mean hd95 on all patients:{:.4f} ".format(mean_hd95_test)) +# logging.info("Mean IOU on all patients:{:.4f} ".format(iou)) +# logging.info("Mean PPV on all patients:{:.4f} ".format(ppv)) +# logging.info("Mean SEN on all patients:{:.4f} ".format(sen)) +# logging.info("Mean biou on all patients:{:.4f} ".format(biou)) +# logging.info("Mean asd on all patients:{:.4f} ".format(asd)) + +# os.rename(snapshot_path,snapshot_path+'_label_ratio_'+str(args.label_ratio)+"_DSC_"+str(performance_test)[2:6]+"_SEN_"+str(sen)[2:6]+"_DH95_"+str(mean_hd95_test)[0:6]+"_IOU_"+str(iou)[0:6]) \ No newline at end of file diff --git a/code/train_Trans_teacher_22.py b/code/train_Trans_teacher_22.py new file mode 100644 index 0000000..28a1cc9 --- /dev/null +++ b/code/train_Trans_teacher_22.py @@ -0,0 +1,510 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle +os.environ["CUDA_VISIBLE_DEVICES"]="6" +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps,util +from val_2D import test_single_volume2,test_single_volume_7 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +import math +from utils.util import cams_to_refine_label + +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--optim_name', type=str,default='adam', help='optimizer name') +parser.add_argument('--lr_scheduler', type=str,default='warmupCosine', help='lr scheduler') + +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold5', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=32, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.03, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=16, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=0.8) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + + model = create_model() + ema_model = create_model(ema=True) + + + + model=model.to(device) + ema_model =ema_model.to(device) + + num_gpus = torch.cuda.device_count() + + # model = nn.DataParallel(model) + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + # optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + max_epoch = max_iterations // len(trainloader_labeled) + 1 + warm_up_epochs = int(max_epoch * 0.1) + if args.optim_name=='adam': + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + elif args.optim_name=='sgd': + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9,weight_decay=0.0001) + elif args.optim_name=='adamW': + optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + # elif args.optim_name=='Radam': + # optimizer = optim2.RAdam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + # warm_up_with_multistep_lr + if args.lr_scheduler=='warmupMultistep': + lr1,lr2,lr3 = int(max_epoch*0.25) , int(max_epoch*0.4) , int(max_epoch*0.6) + lr_milestones = [lr1,lr2,lr3] + # lr1,lr2,lr3,lr4 = int(max_epoch*0.15) , int(max_epoch*0.35) , int(max_epoch*0.55) , int(max_epoch*0.7) + # lr_milestones = [lr1,lr2,lr3,lr4] + warm_up_with_multistep_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.1**len([m for m in lr_milestones if m <= epoch]) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_multistep_lr) + elif args.lr_scheduler=='warmupCosine': + # warm_up_with_cosine_lr + warm_up_with_cosine_lr = lambda epoch: (epoch+1) / warm_up_epochs if epoch < warm_up_epochs \ + else 0.5 * ( math.cos((epoch - warm_up_epochs) /(max_epoch - warm_up_epochs) * math.pi) + 1) + scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warm_up_with_cosine_lr) + elif args.lr_scheduler=='autoReduce': + scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.5, patience=6, verbose=True, cooldown=2,min_lr=0) + + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + aff_2_pseudo_label=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + kl_distance = nn.KLDivLoss(reduction='none') + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + lr_curve = list() + iter_num = 0 + + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_unlabeled['crop_images'] + boxes = sampled_batch_unlabeled['boxes'] + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + noise_crop = torch.clamp(torch.randn_like(crop_images) * 0.3, -0.5, 0.5) + crop_images = crop_images+noise_crop + + + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + seg,outputs,attpred,att =model(volume_batch,aux=False) + + outputs_unlabeled_soft = torch.softmax(seg[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(seg, dim=1) + + + #TODO: pCE loss + loss_ce = ce_loss(seg[:args.labeled_bs,...], label_batch[:].long()) + + #TODO: Equivariant Regularization Loss + # scale_factor=0.3 + # img2 = F.interpolate(volume_batch, scale_factor=scale_factor, mode='bilinear', align_corners=True,recompute_scale_factor=True) + # mlp_f,attn_pred3,_attns = model(img2,aux=True) + + # attn_pred1 = F.interpolate(attpred[args.labeled_bs:,...].unsqueeze(1), scale_factor=scale_factor, mode='bilinear', align_corners=True,recompute_scale_factor=True) + # attn_pred3 = F.interpolate(attn_pred3[args.labeled_bs:,...].unsqueeze(1), size=img2.shape[2:], mode='bilinear', align_corners=True) + # loss_er = torch.mean((attn_pred3 - attn_pred1) ** 2) + + with torch.no_grad(): + ema_seg,ema_output,ema_attpred,ema_att = ema_model(ema_inputs) + ema_output_soft = torch.softmax(ema_seg, dim=1) + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 150) + # if iter_num < 200: + # consistency_loss = 0.0 + # else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft[args.labeled_bs:,...]) ** 2) + + unlabeled_RoIs = (label_batch == 0) + unlabeled_RoIs=unlabeled_RoIs.type(torch.FloatTensor).to(device) + + + + local_affinity_loss,pseudo_label = aff_2_pseudo_label(outputs,att, unlabeled_RoIs,label_batch,ema_att,max_iterations,iter_num) + pseudo_label = torch.argmax(pseudo_label.detach(), dim=1, keepdim=False) + ref_label = cams_to_refine_label(pseudo_label[:args.labeled_bs,...], ignore_index=4) + + affinity_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ref_label) + + loss_ce_wr = ce_loss(seg[:args.labeled_bs,...], pseudo_label[:args.labeled_bs,...][:].long()) + + loss_dice_wr= dice_loss(outputs_seg_soft[:args.labeled_bs,...], pseudo_label[:args.labeled_bs,...].unsqueeze(1)) + + supervised_loss=loss_ce + 0.5 * (loss_ce_wr + loss_dice_wr) + + bs, bxs, c, h, w = crop_images.shape + crop_images = crop_images.reshape(bs * bxs, c, h, w) + box_ind = torch.cat([torch.zeros(4).fill_(i) for i in range(bs)]) + boxes = boxes.reshape(bs * bxs, 5) + # boxes[:, 0] = box_ind + + + boxes = boxes.cuda(non_blocking=True).type_as(seg) + + crop_out,_,_,_=ema_model(crop_images) + crop_out=F.softmax(crop_out, dim=1) + n, c, h, w = crop_out.shape + + # roi align + feat_aligned = ops.roi_align(seg[args.labeled_bs:,...], boxes, (h, w), 1 / 8.0) + feat_aligned = F.softmax(feat_aligned, dim=1) + loss_kd = criterion(feat_aligned, crop_out) * args.kd_weights + + # loss_kd+ + + loss = consistency_weight*loss_kd+8*supervised_loss+5*affinity_loss+local_affinity_loss+consistency_weight*consistency_loss + 0.5 * (loss_ce_wr + loss_dice_wr) #+loss_er + + optimizer.zero_grad() + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + # for param_group in optimizer.param_groups: + # param_group['lr'] = lr_ + ##更新学习率 + scheduler_lr.step() + lr_iter = optimizer.param_groups[0]['lr'] + lr_curve.append(lr_iter) + + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_iter, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax(outputs[0], dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction',outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + net_name4 = 'head.py' + loss_name = 'losses.py' + util_name = 'util.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('networks/' + net_name4, code_path + '/' + net_name4) + shutil.copy('utils/' + loss_name, code_path + '/' + loss_name) + shutil.copy('utils/' + util_name, code_path + '/' + util_name) + + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_ours_tiaoshi/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + # backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) + + +# num_classes=4 + +# save_best_model='/mnt/sdd/tb/work_dirs/model_ours/ACDC_Semi/Mean_Teacher_fold1/scribble-2023-03-28 16:21:28.002836/iter_24800_dice_0.8175.pth' +# logging.info('============= Start Test ==============') +# device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu') +# model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) +# # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) +# model.load_state_dict(torch.load(save_best_model)) +# print("init weight from {}".format(save_best_model)) +# model.eval() + +# db_val = BaseDataSets(base_dir=args.root_path,fold=args.fold, split="val", ) +# valloader = DataLoader(db_val, batch_size=1, shuffle=False,num_workers=1) + + +# logging.info("{} iterations per epoch".format(len(valloader))) +# model=model.to(device) +# metric_list = 0.0 +# for i_batch, sampled_batch in enumerate(valloader): +# metric_i = test_single_volume_7( +# sampled_batch["image"], sampled_batch["label"], model, classes=num_classes,device=device) +# metric_list += np.array(metric_i) +# print("metric_list:",metric_list) +# metric_list = metric_list / len(db_val) + + +# performance_test = np.mean(metric_list, axis=0)[0] +# mean_hd95_test = np.mean(metric_list, axis=0)[1] +# ppv = np.mean(metric_list, axis=0)[2] +# sen = np.mean(metric_list, axis=0)[3] +# iou = np.mean(metric_list, axis=0)[4] +# biou = np.mean(metric_list, axis=0)[5] +# asd = np.mean(metric_list, axis=0)[7] + + +# # # dice, hd95,sen,iou,asd +# # #dice, hd95, ppv, sen, iou, boundary_iou, hd + +# logging.info("Mean dice on all patients:{:.4f} ".format(performance_test)) +# logging.info("Mean hd95 on all patients:{:.4f} ".format(mean_hd95_test)) +# logging.info("Mean IOU on all patients:{:.4f} ".format(iou)) +# logging.info("Mean PPV on all patients:{:.4f} ".format(ppv)) +# logging.info("Mean SEN on all patients:{:.4f} ".format(sen)) +# logging.info("Mean biou on all patients:{:.4f} ".format(biou)) +# logging.info("Mean asd on all patients:{:.4f} ".format(asd)) + +# os.rename(snapshot_path,snapshot_path+"_DSC_"+str(performance_test)[2:6]+"_SEN_"+str(sen)[2:6]+"_DH95_"+str(mean_hd95_test)[0:6]+"_IOU_"+str(iou)[0:6]) + diff --git a/code/train_Trans_teacher_3.py b/code/train_Trans_teacher_3.py new file mode 100644 index 0000000..1adebe1 --- /dev/null +++ b/code/train_Trans_teacher_3.py @@ -0,0 +1,451 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +# """选择GPU ID""" +# gpu_list = [4] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=32, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=2022, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=16, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:7' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + # if args.local_rank != -1: + # torch.cuda.set_device(args.local_rank) + # device=torch.device("cuda", args.local_rank) + # torch.distributed.init_process_group(backend="nccl", init_method='env://') + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + if ema: + for param in model.parameters(): + param.detach_() + return model + + model = create_model() + ema_model = create_model(ema=True) + ema_model2 = create_model(ema=True) + + model=model.to(device) + ema_model =ema_model.to(device) + ema_model2=ema_model.to(device) + # model = nn.MMDistributedDataParallel( + # model.cuda(), + # device_ids=[torch.cuda.current_device()], + # broadcast_buffers=False, + # find_unused_parameters=find_unused_parameters) + + + num_gpus = torch.cuda.device_count() + + # if num_gpus > 1: + # # logger.info('use {} gpus!'.format(num_gpus)) + # model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + # output_device=args.local_rank,broadcast_buffers=False) + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + #步骤四:定义数据集 + # train_datasets = ...#自己定义的Dataset子类 + # train_sampler_labeled = DistributedSampler(db_train_labeled) + # train_sampler_unlabeled = DistributedSampler(db_train_unlabeled) + + # trainloader_labeled = DataLoader(db_train_labeled, sampler=train_sampler_labeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + # trainloader_unlabeled = DataLoader(db_train_unlabeled, sampler=train_sampler_unlabeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + + optimizer = optim.SGD(model.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + + iter_num = 0 + max_epoch = max_iterations // len(trainloader_labeled) + 1 + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + noise2 = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs2 = unlabeled_volume_batch + noise2 + ema_inputs2 = torch.cat([volume_batch,ema_inputs2],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + outputs,calss,attpred = model(volume_batch) + + outputs_soft = torch.softmax(outputs, dim=1) + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + + + + + with torch.no_grad(): + ema_output,ema_calss,ema_attpred = ema_model(ema_inputs) + ema_output_soft = torch.softmax(ema_output, dim=1) + + ema_output2,ema_calss2,ema_attpred = ema_model(ema_inputs2) + ema_output_soft2 = torch.softmax(ema_output2, dim=1) + ema_output_soft=(ema_output_soft+ema_output_soft2)/2 + + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + loss_dice =ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr = ce_loss(outputs[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr= dice_loss(outputs_soft[:args.labeled_bs,...], label_batch_wr.unsqueeze(1)) + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce+loss_dice_wr+loss_ce_wr + + + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 300) + if iter_num < 1000: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft[args.labeled_bs:,...]) ** 2) + + + #aff_loss + aff_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],label_batch_wr) + + # cosine similarity loss + create_center_1_bg = calss[0].unsqueeze(1)# 4,1,x,y,z->4,2 + create_center_1_a = calss[1].unsqueeze(1) + create_center_1_b = calss[2].unsqueeze(1) + create_center_1_c = calss[3].unsqueeze(1) + + + + create_center_2_bg = ema_calss[0].unsqueeze(1) + create_center_2_a = ema_calss[1].unsqueeze(1) + create_center_2_b = ema_calss[2].unsqueeze(1) + create_center_2_c = ema_calss[3].unsqueeze(1) + + create_center_soft_1_bg = F.softmax(create_center_1_bg, dim=1)# dims(4,2) + create_center_soft_1_a = F.softmax(create_center_1_a, dim=1) + create_center_soft_1_b = F.softmax(create_center_1_b, dim=1) + create_center_soft_1_c = F.softmax(create_center_1_c, dim=1) + + + create_center_soft_2_bg = F.softmax(create_center_2_bg, dim=1)# dims(4,2) + create_center_soft_2_a = F.softmax(create_center_2_a, dim=1) + create_center_soft_2_b = F.softmax(create_center_2_b, dim=1) + create_center_soft_2_c = F.softmax(create_center_2_c, dim=1) + + + lb_center_12_bg = torch.cat((create_center_soft_1_bg[:args.labeled_bs,...], create_center_soft_2_bg[:args.labeled_bs,...]),dim=0)# 4,2 + lb_center_12_a = torch.cat((create_center_soft_1_a[:args.labeled_bs,...], create_center_soft_2_a[:args.labeled_bs,...]),dim=0) + lb_center_12_b = torch.cat((create_center_soft_1_b[:args.labeled_bs,...], create_center_soft_2_b[:args.labeled_bs,...]),dim=0) + lb_center_12_c = torch.cat((create_center_soft_1_c[:args.labeled_bs,...], create_center_soft_2_c[:args.labeled_bs,...]),dim=0) + + + un_center_12_bg = torch.cat((create_center_soft_1_bg[args.labeled_bs:,...], create_center_soft_2_bg[args.labeled_bs:,...]),dim=0) + un_center_12_a = torch.cat((create_center_soft_1_a[args.labeled_bs:,...], create_center_soft_2_a[args.labeled_bs:,...]),dim=0) + un_center_12_b = torch.cat((create_center_soft_1_b[args.labeled_bs:,...], create_center_soft_2_b[args.labeled_bs:,...]),dim=0) + un_center_12_c = torch.cat((create_center_soft_1_c[args.labeled_bs:,...], create_center_soft_2_c[args.labeled_bs:,...]),dim=0) + + + + + loss_contrast = losses.scc_loss(cos_sim, args.tau, lb_center_12_bg, + lb_center_12_a,un_center_12_bg, un_center_12_a, + lb_center_12_b,lb_center_12_c,un_center_12_b,un_center_12_c) + + loss = supervised_loss+consistency_loss+loss_contrast*args.my_lambda+aff_loss + optimizer.zero_grad() + + # loss.backward(retain_graph=True) + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + update_ema_variables(model, ema_model, args.ema_decay2, iter_num) + + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer.param_groups: + param_group['lr'] = lr_ + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_dice, iter_num) + writer.add_scalar('info/consistency_loss', + consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight', + consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_dice.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name3) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + # shutil.copy('dataloaders/' + dataset_name2, code_path + '/' + dataset_name2) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_4.py b/code/train_Trans_teacher_4.py new file mode 100644 index 0000000..b4fc3e6 --- /dev/null +++ b/code/train_Trans_teacher_4.py @@ -0,0 +1,453 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +# """选择GPU ID""" +# gpu_list = [4] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=40, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=20, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:5' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + # if args.local_rank != -1: + # torch.cuda.set_device(args.local_rank) + # device=torch.device("cuda", args.local_rank) + # torch.distributed.init_process_group(backend="nccl", init_method='env://') + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + if ema: + for param in model.parameters(): + param.detach_() + return model + + model = create_model() + ema_model = create_model(ema=True) + ema_model2 = create_model(ema=True) + + model=model.to(device) + ema_model =ema_model.to(device) + ema_model2=ema_model.to(device) + # model = nn.MMDistributedDataParallel( + # model.cuda(), + # device_ids=[torch.cuda.current_device()], + # broadcast_buffers=False, + # find_unused_parameters=find_unused_parameters) + + + num_gpus = torch.cuda.device_count() + + # if num_gpus > 1: + # # logger.info('use {} gpus!'.format(num_gpus)) + # model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + # output_device=args.local_rank,broadcast_buffers=False) + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + #步骤四:定义数据集 + # train_datasets = ...#自己定义的Dataset子类 + # train_sampler_labeled = DistributedSampler(db_train_labeled) + # train_sampler_unlabeled = DistributedSampler(db_train_unlabeled) + + # trainloader_labeled = DataLoader(db_train_labeled, sampler=train_sampler_labeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + # trainloader_unlabeled = DataLoader(db_train_unlabeled, sampler=train_sampler_unlabeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + + optimizer = optim.SGD(model.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + + iter_num = 0 + max_epoch = max_iterations // len(trainloader_labeled) + 1 + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + # noise2 = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + # ema_inputs2 = unlabeled_volume_batch + noise2 + # ema_inputs2 = torch.cat([volume_batch,ema_inputs2],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + outputs,calss,attpred = model(volume_batch) + + outputs_soft = torch.softmax(outputs, dim=1) + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + + + + + with torch.no_grad(): + ema_output,ema_calss,ema_attpred = ema_model(ema_inputs) + ema_output_soft = torch.softmax(ema_output, dim=1) + + # ema_output2,ema_calss2,ema_attpred = ema_model(ema_inputs2) + # ema_output_soft2 = torch.softmax(ema_output2, dim=1) + # ema_output_soft=(ema_output_soft+ema_output_soft2)/2 + + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + loss_dice =ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr = ce_loss(outputs[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr= dice_loss(outputs_soft[:args.labeled_bs,...], label_batch_wr.unsqueeze(1)) + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce+loss_dice_wr+loss_ce_wr + + # loss_un=torch.sum(torch.square(outputs_soft[:args.labeled_bs,...] - torch.mean(outputs_soft[:args.labeled_bs,...]))) + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 300) + if iter_num < 1000: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft[args.labeled_bs:,...]) ** 2) + + + #aff_loss + aff_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],label_batch_wr) + + # cosine similarity loss + create_center_1_bg = calss[0].unsqueeze(1)# 4,1,x,y,z->4,2 + create_center_1_a = calss[1].unsqueeze(1) + create_center_1_b = calss[2].unsqueeze(1) + create_center_1_c = calss[3].unsqueeze(1) + + + + create_center_2_bg = ema_calss[0].unsqueeze(1) + create_center_2_a = ema_calss[1].unsqueeze(1) + create_center_2_b = ema_calss[2].unsqueeze(1) + create_center_2_c = ema_calss[3].unsqueeze(1) + + create_center_soft_1_bg = F.softmax(create_center_1_bg, dim=1)# dims(4,2) + create_center_soft_1_a = F.softmax(create_center_1_a, dim=1) + create_center_soft_1_b = F.softmax(create_center_1_b, dim=1) + create_center_soft_1_c = F.softmax(create_center_1_c, dim=1) + + + create_center_soft_2_bg = F.softmax(create_center_2_bg, dim=1)# dims(4,2) + create_center_soft_2_a = F.softmax(create_center_2_a, dim=1) + create_center_soft_2_b = F.softmax(create_center_2_b, dim=1) + create_center_soft_2_c = F.softmax(create_center_2_c, dim=1) + + + lb_center_12_bg = torch.cat((create_center_soft_1_bg[:args.labeled_bs,...], create_center_soft_2_bg[:args.labeled_bs,...]),dim=0)# 4,2 + lb_center_12_a = torch.cat((create_center_soft_1_a[:args.labeled_bs,...], create_center_soft_2_a[:args.labeled_bs,...]),dim=0) + lb_center_12_b = torch.cat((create_center_soft_1_b[:args.labeled_bs,...], create_center_soft_2_b[:args.labeled_bs,...]),dim=0) + lb_center_12_c = torch.cat((create_center_soft_1_c[:args.labeled_bs,...], create_center_soft_2_c[:args.labeled_bs,...]),dim=0) + + + un_center_12_bg = torch.cat((create_center_soft_1_bg[args.labeled_bs:,...], create_center_soft_2_bg[args.labeled_bs:,...]),dim=0) + un_center_12_a = torch.cat((create_center_soft_1_a[args.labeled_bs:,...], create_center_soft_2_a[args.labeled_bs:,...]),dim=0) + un_center_12_b = torch.cat((create_center_soft_1_b[args.labeled_bs:,...], create_center_soft_2_b[args.labeled_bs:,...]),dim=0) + un_center_12_c = torch.cat((create_center_soft_1_c[args.labeled_bs:,...], create_center_soft_2_c[args.labeled_bs:,...]),dim=0) + + + + + loss_contrast = losses.scc_loss(cos_sim, args.tau, lb_center_12_bg, + lb_center_12_a,un_center_12_bg, un_center_12_a, + lb_center_12_b,lb_center_12_c,un_center_12_b,un_center_12_c) + + + + loss = supervised_loss+consistency_loss+loss_contrast*args.my_lambda+aff_loss #+loss_un + optimizer.zero_grad() + + # loss.backward(retain_graph=True) + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + update_ema_variables(model, ema_model, args.ema_decay2, iter_num) + + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer.param_groups: + param_group['lr'] = lr_ + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_dice, iter_num) + writer.add_scalar('info/consistency_loss', + consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight', + consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_dice.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name3) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + # shutil.copy('dataloaders/' + dataset_name2, code_path + '/' + dataset_name2) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + # backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_5.py b/code/train_Trans_teacher_5.py new file mode 100644 index 0000000..d1ab4da --- /dev/null +++ b/code/train_Trans_teacher_5.py @@ -0,0 +1,471 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +# """选择GPU ID""" +# gpu_list = [4] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold3', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=16, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=8, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:7' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + # if args.local_rank != -1: + # torch.cuda.set_device(args.local_rank) + # device=torch.device("cuda", args.local_rank) + # torch.distributed.init_process_group(backend="nccl", init_method='env://') + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + model1 = create_model() + # ema_model = create_model(ema=True) + model2 = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + # ema_model2 = create_model(ema=True) + + model1=model1.to(device) + model2 =model2.to(device) + + # model = nn.MMDistributedDataParallel( + # model.cuda(), + # device_ids=[torch.cuda.current_device()], + # broadcast_buffers=False, + # find_unused_parameters=find_unused_parameters) + + + num_gpus = torch.cuda.device_count() + + # if num_gpus > 1: + # # logger.info('use {} gpus!'.format(num_gpus)) + # model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + # output_device=args.local_rank,broadcast_buffers=False) + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + #步骤四:定义数据集 + # train_datasets = ...#自己定义的Dataset子类 + # train_sampler_labeled = DistributedSampler(db_train_labeled) + # train_sampler_unlabeled = DistributedSampler(db_train_unlabeled) + + # trainloader_labeled = DataLoader(db_train_labeled, sampler=train_sampler_labeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + # trainloader_unlabeled = DataLoader(db_train_unlabeled, sampler=train_sampler_unlabeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model1.train() + model2.train() + + optimizer1 = optim.SGD(model1.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + optimizer2 = optim.SGD(model2.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + + iter_num = 0 + max_epoch = max_iterations // len(trainloader_labeled) + 1 + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + # noise2 = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + # ema_inputs2 = unlabeled_volume_batch + noise2 + # ema_inputs2 = torch.cat([volume_batch,ema_inputs2],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + outputs,calss,attpred = model1(volume_batch) + + outputs_soft = torch.softmax(outputs, dim=1) + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + + + + + + ema_output,ema_calss,ema_attpred = model2(volume_batch) + ema_output_soft = torch.softmax(ema_output, dim=1) + + # ema_output2,ema_calss2,ema_attpred = ema_model(ema_inputs2) + # ema_output_soft2 = torch.softmax(ema_output2, dim=1) + # ema_output_soft=(ema_output_soft+ema_output_soft2)/2 + + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + loss_dice =ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr = ce_loss(outputs[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr= dice_loss(outputs_soft[:args.labeled_bs,...], label_batch_wr.unsqueeze(1)) + + + loss_ce2 = ce_loss(ema_output[:args.labeled_bs,...], label_batch[:].long()) + loss_dice2 =ce_loss(ema_output[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr2 = ce_loss(ema_output_soft[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr2= dice_loss(ema_output_soft[:args.labeled_bs,...], label_batch_wr.unsqueeze(1)) + + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce+loss_dice_wr+loss_ce_wr+loss_ce2+loss_dice_wr2+loss_ce_wr2 + + # loss_un=torch.sum(torch.square(outputs_soft[:args.labeled_bs,...] - torch.mean(outputs_soft[:args.labeled_bs,...]))) + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 300) + if iter_num < 1000: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft[args.labeled_bs:,...]) ** 2) + + + #aff_loss + aff_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ema_attpred[:args.labeled_bs,...]) + + # cosine similarity loss + create_center_1_bg = calss.unsqueeze(1)# 4,1,x,y,z->4,2 + create_center_1_a = calss.unsqueeze(1) + create_center_1_b = calss.unsqueeze(1) + create_center_1_c = calss.unsqueeze(1) + + + + create_center_2_bg = ema_calss[0].unsqueeze(1) + create_center_2_a = ema_calss[1].unsqueeze(1) + create_center_2_b = ema_calss[2].unsqueeze(1) + create_center_2_c = ema_calss[3].unsqueeze(1) + + create_center_soft_1_bg = F.softmax(create_center_1_bg, dim=1)# dims(4,2) + create_center_soft_1_a = F.softmax(create_center_1_a, dim=1) + create_center_soft_1_b = F.softmax(create_center_1_b, dim=1) + create_center_soft_1_c = F.softmax(create_center_1_c, dim=1) + + + create_center_soft_2_bg = F.softmax(create_center_2_bg, dim=1)# dims(4,2) + create_center_soft_2_a = F.softmax(create_center_2_a, dim=1) + create_center_soft_2_b = F.softmax(create_center_2_b, dim=1) + create_center_soft_2_c = F.softmax(create_center_2_c, dim=1) + + + lb_center_12_bg = torch.cat((create_center_soft_1_bg[:args.labeled_bs,...], create_center_soft_2_bg[:args.labeled_bs,...]),dim=0)# 4,2 + lb_center_12_a = torch.cat((create_center_soft_1_a[:args.labeled_bs,...], create_center_soft_2_a[:args.labeled_bs,...]),dim=0) + lb_center_12_b = torch.cat((create_center_soft_1_b[:args.labeled_bs,...], create_center_soft_2_b[:args.labeled_bs,...]),dim=0) + lb_center_12_c = torch.cat((create_center_soft_1_c[:args.labeled_bs,...], create_center_soft_2_c[:args.labeled_bs,...]),dim=0) + + + un_center_12_bg = torch.cat((create_center_soft_1_bg[args.labeled_bs:,...], create_center_soft_2_bg[args.labeled_bs:,...]),dim=0) + un_center_12_a = torch.cat((create_center_soft_1_a[args.labeled_bs:,...], create_center_soft_2_a[args.labeled_bs:,...]),dim=0) + un_center_12_b = torch.cat((create_center_soft_1_b[args.labeled_bs:,...], create_center_soft_2_b[args.labeled_bs:,...]),dim=0) + un_center_12_c = torch.cat((create_center_soft_1_c[args.labeled_bs:,...], create_center_soft_2_c[args.labeled_bs:,...]),dim=0) + + + + + loss_contrast = losses.scc_loss(cos_sim, args.tau, lb_center_12_bg, + lb_center_12_a,un_center_12_bg, un_center_12_a, + lb_center_12_b,lb_center_12_c,un_center_12_b,un_center_12_c) + + + + loss = 5*supervised_loss+consistency_loss+loss_contrast*args.my_lambda+aff_loss #+loss_un + optimizer1.zero_grad() + optimizer2.zero_grad() + + loss.backward() + + optimizer1.step() + optimizer2.step() + # update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # update_ema_variables(model, ema_model, args.ema_decay2, iter_num) + + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer1.param_groups: + param_group['lr'] = lr_ + + for param_group in optimizer2.param_groups: + param_group['lr'] = lr_ + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_dice, iter_num) + writer.add_scalar('info/consistency_loss', + consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight', + consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_dice.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model1.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model1, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model1.state_dict(), save_mode_path) + torch.save(model1.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model1.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model1.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name3) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + # shutil.copy('dataloaders/' + dataset_name2, code_path + '/' + dataset_name2) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_6.py b/code/train_Trans_teacher_6.py new file mode 100644 index 0000000..66db67c --- /dev/null +++ b/code/train_Trans_teacher_6.py @@ -0,0 +1,470 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +# """选择GPU ID""" +# gpu_list = [4] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold2', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=40, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=20, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:6' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + # if args.local_rank != -1: + # torch.cuda.set_device(args.local_rank) + # device=torch.device("cuda", args.local_rank) + # torch.distributed.init_process_group(backend="nccl", init_method='env://') + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + model1 = create_model() + model2 = create_model(ema=True) + # model2 = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + # ema_model2 = create_model(ema=True) + + model1=model1.to(device) + model2 =model2.to(device) + + # model = nn.MMDistributedDataParallel( + # model.cuda(), + # device_ids=[torch.cuda.current_device()], + # broadcast_buffers=False, + # find_unused_parameters=find_unused_parameters) + + + num_gpus = torch.cuda.device_count() + + # if num_gpus > 1: + # # logger.info('use {} gpus!'.format(num_gpus)) + # model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + # output_device=args.local_rank,broadcast_buffers=False) + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + #步骤四:定义数据集 + # train_datasets = ...#自己定义的Dataset子类 + # train_sampler_labeled = DistributedSampler(db_train_labeled) + # train_sampler_unlabeled = DistributedSampler(db_train_unlabeled) + + # trainloader_labeled = DataLoader(db_train_labeled, sampler=train_sampler_labeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + # trainloader_unlabeled = DataLoader(db_train_unlabeled, sampler=train_sampler_unlabeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model1.train() + # model2.train() + + optimizer1 = optim.SGD(model1.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + # optimizer2 = optim.SGD(ema_model.parameters(), lr=base_lr, + # momentum=0.9, weight_decay=0.0001) + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + + iter_num = 0 + max_epoch = max_iterations // len(trainloader_labeled) + 1 + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + # noise2 = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + # ema_inputs2 = unlabeled_volume_batch + noise2 + # ema_inputs2 = torch.cat([volume_batch,ema_inputs2],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + outputs,calss,attpred = model1(volume_batch) + + outputs_soft = torch.softmax(outputs, dim=1) + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + + + + + ema_output,ema_calss,ema_attpred = model2(ema_inputs) + ema_output_soft = torch.softmax(ema_output, dim=1) + + # ema_output2,ema_calss2,ema_attpred = ema_model(ema_inputs2) + # ema_output_soft2 = torch.softmax(ema_output2, dim=1) + # ema_output_soft=(ema_output_soft+ema_output_soft2)/2 + + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + loss_dice =ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr = ce_loss(outputs[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr= dice_loss(outputs_soft[:args.labeled_bs,...], label_batch_wr.unsqueeze(1)) + + + loss_ce2 = ce_loss(ema_output[:args.labeled_bs,...], label_batch[:].long()) + loss_dice2 =ce_loss(ema_output[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr2 = ce_loss(ema_output_soft[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr2= dice_loss(ema_output_soft[:args.labeled_bs,...], label_batch_wr.unsqueeze(1)) + + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce+loss_dice_wr+loss_ce_wr#loss_ce+loss_dice_wr+loss_ce_wr+loss_ce2+loss_dice_wr2+loss_ce_wr2 + + # loss_un=torch.sum(torch.square(outputs_soft[:args.labeled_bs,...] - torch.mean(outputs_soft[:args.labeled_bs,...]))) + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 300) + if iter_num < 1000: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft[args.labeled_bs:,...]) ** 2) + + + #aff_loss + aff_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ema_attpred[:args.labeled_bs,...]) + + # cosine similarity loss + create_center_1_bg = calss.unsqueeze(1)# 4,1,x,y,z->4,2 + create_center_1_a = calss.unsqueeze(1) + create_center_1_b = calss.unsqueeze(1) + create_center_1_c = calss.unsqueeze(1) + + + + create_center_2_bg = ema_calss.unsqueeze(1) + create_center_2_a = ema_calss.unsqueeze(1) + create_center_2_b = ema_calss.unsqueeze(1) + create_center_2_c = ema_calss.unsqueeze(1) + + create_center_soft_1_bg = F.softmax(create_center_1_bg, dim=1)# dims(4,2) + create_center_soft_1_a = F.softmax(create_center_1_a, dim=1) + create_center_soft_1_b = F.softmax(create_center_1_b, dim=1) + create_center_soft_1_c = F.softmax(create_center_1_c, dim=1) + + + create_center_soft_2_bg = F.softmax(create_center_2_bg, dim=1)# dims(4,2) + create_center_soft_2_a = F.softmax(create_center_2_a, dim=1) + create_center_soft_2_b = F.softmax(create_center_2_b, dim=1) + create_center_soft_2_c = F.softmax(create_center_2_c, dim=1) + + + lb_center_12_bg = torch.cat((create_center_soft_1_bg[:args.labeled_bs,...], create_center_soft_2_bg[:args.labeled_bs,...]),dim=0)# 4,2 + lb_center_12_a = torch.cat((create_center_soft_1_a[:args.labeled_bs,...], create_center_soft_2_a[:args.labeled_bs,...]),dim=0) + lb_center_12_b = torch.cat((create_center_soft_1_b[:args.labeled_bs,...], create_center_soft_2_b[:args.labeled_bs,...]),dim=0) + lb_center_12_c = torch.cat((create_center_soft_1_c[:args.labeled_bs,...], create_center_soft_2_c[:args.labeled_bs,...]),dim=0) + + + un_center_12_bg = torch.cat((create_center_soft_1_bg[args.labeled_bs:,...], create_center_soft_2_bg[args.labeled_bs:,...]),dim=0) + un_center_12_a = torch.cat((create_center_soft_1_a[args.labeled_bs:,...], create_center_soft_2_a[args.labeled_bs:,...]),dim=0) + un_center_12_b = torch.cat((create_center_soft_1_b[args.labeled_bs:,...], create_center_soft_2_b[args.labeled_bs:,...]),dim=0) + un_center_12_c = torch.cat((create_center_soft_1_c[args.labeled_bs:,...], create_center_soft_2_c[args.labeled_bs:,...]),dim=0) + + + + + loss_contrast = losses.scc_loss(cos_sim, args.tau, lb_center_12_bg, + lb_center_12_a,un_center_12_bg, un_center_12_a, + lb_center_12_b,lb_center_12_c,un_center_12_b,un_center_12_c) + + + + loss = 5*supervised_loss+consistency_loss+loss_contrast*args.my_lambda+aff_loss #+loss_un + optimizer1.zero_grad() + # optimizer2.zero_grad() + + loss.backward() + + optimizer1.step() + # optimizer2.step() + update_ema_variables(model1, model2, args.ema_decay, iter_num) + # update_ema_variables(model, ema_model, args.ema_decay2, iter_num) + + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer1.param_groups: + param_group['lr'] = lr_ + + # for param_group in optimizer2.param_groups: + # param_group['lr'] = lr_ + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_dice, iter_num) + writer.add_scalar('info/consistency_loss', + consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight', + consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_dice.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model1.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model1, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model1.state_dict(), save_mode_path) + torch.save(model1.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model1.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model1.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name3) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + # shutil.copy('dataloaders/' + dataset_name2, code_path + '/' + dataset_name2) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_7 copy.py b/code/train_Trans_teacher_7 copy.py new file mode 100644 index 0000000..0ca93a7 --- /dev/null +++ b/code/train_Trans_teacher_7 copy.py @@ -0,0 +1,470 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +# """选择GPU ID""" +# gpu_list = [4] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold3', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=16, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=8, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:7' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + # if args.local_rank != -1: + # torch.cuda.set_device(args.local_rank) + # device=torch.device("cuda", args.local_rank) + # torch.distributed.init_process_group(backend="nccl", init_method='env://') + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + model1 = create_model() + # ema_model = create_model(ema=True) + model2 = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + # ema_model2 = create_model(ema=True) + + model1=model1.to(device) + model2 =model2.to(device) + + # model = nn.MMDistributedDataParallel( + # model.cuda(), + # device_ids=[torch.cuda.current_device()], + # broadcast_buffers=False, + # find_unused_parameters=find_unused_parameters) + + + num_gpus = torch.cuda.device_count() + + # if num_gpus > 1: + # # logger.info('use {} gpus!'.format(num_gpus)) + # model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + # output_device=args.local_rank,broadcast_buffers=False) + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + #步骤四:定义数据集 + # train_datasets = ...#自己定义的Dataset子类 + # train_sampler_labeled = DistributedSampler(db_train_labeled) + # train_sampler_unlabeled = DistributedSampler(db_train_unlabeled) + + # trainloader_labeled = DataLoader(db_train_labeled, sampler=train_sampler_labeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + # trainloader_unlabeled = DataLoader(db_train_unlabeled, sampler=train_sampler_unlabeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model1.train() + model2.train() + + optimizer1 = optim.SGD(model1.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + optimizer2 = optim.SGD(model2.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + + iter_num = 0 + max_epoch = max_iterations // len(trainloader_labeled) + 1 + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + # noise2 = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + # ema_inputs2 = unlabeled_volume_batch + noise2 + # ema_inputs2 = torch.cat([volume_batch,ema_inputs2],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + outputs,calss,attpred = model1(volume_batch) + + outputs_soft = torch.softmax(outputs, dim=1) + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + + + + + + ema_output,ema_calss,ema_attpred = model2(volume_batch) + ema_output_soft = torch.softmax(ema_output, dim=1) + + # ema_output2,ema_calss2,ema_attpred = ema_model(ema_inputs2) + # ema_output_soft2 = torch.softmax(ema_output2, dim=1) + # ema_output_soft=(ema_output_soft+ema_output_soft2)/2 + + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + loss_dice =ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr = ce_loss(outputs[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr= dice_loss(outputs_soft[:args.labeled_bs,...], label_batch_wr.unsqueeze(1)) + + + loss_ce2 = ce_loss(ema_output[:args.labeled_bs,...], label_batch[:].long()) + loss_dice2 =ce_loss(ema_output[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr2 = ce_loss(ema_output_soft[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr2= dice_loss(ema_output_soft[:args.labeled_bs,...], label_batch_wr.unsqueeze(1)) + + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce+loss_dice_wr+loss_ce_wr+loss_ce2+loss_dice_wr2+loss_ce_wr2 + + # loss_un=torch.sum(torch.square(outputs_soft[:args.labeled_bs,...] - torch.mean(outputs_soft[:args.labeled_bs,...]))) + + #consistency loss + # consistency_weight = get_current_consistency_weight(iter_num // 300) + # if iter_num < 1000: + # consistency_loss = 0.0 + # else: + # consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft[args.labeled_bs:,...]) ** 2) + + + #aff_loss + aff_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ema_attpred[:args.labeled_bs,...]) + + # cosine similarity loss + create_center_1_bg = calss.unsqueeze(1)# 4,1,x,y,z->4,2 + create_center_1_a = calss.unsqueeze(1) + create_center_1_b = calss.unsqueeze(1) + create_center_1_c = calss.unsqueeze(1) + + + + create_center_2_bg = ema_calss[0].unsqueeze(1) + create_center_2_a = ema_calss[1].unsqueeze(1) + create_center_2_b = ema_calss[2].unsqueeze(1) + create_center_2_c = ema_calss[3].unsqueeze(1) + + create_center_soft_1_bg = F.softmax(create_center_1_bg, dim=1)# dims(4,2) + create_center_soft_1_a = F.softmax(create_center_1_a, dim=1) + create_center_soft_1_b = F.softmax(create_center_1_b, dim=1) + create_center_soft_1_c = F.softmax(create_center_1_c, dim=1) + + + create_center_soft_2_bg = F.softmax(create_center_2_bg, dim=1)# dims(4,2) + create_center_soft_2_a = F.softmax(create_center_2_a, dim=1) + create_center_soft_2_b = F.softmax(create_center_2_b, dim=1) + create_center_soft_2_c = F.softmax(create_center_2_c, dim=1) + + + lb_center_12_bg = torch.cat((create_center_soft_1_bg[:args.labeled_bs,...], create_center_soft_2_bg[:args.labeled_bs,...]),dim=0)# 4,2 + lb_center_12_a = torch.cat((create_center_soft_1_a[:args.labeled_bs,...], create_center_soft_2_a[:args.labeled_bs,...]),dim=0) + lb_center_12_b = torch.cat((create_center_soft_1_b[:args.labeled_bs,...], create_center_soft_2_b[:args.labeled_bs,...]),dim=0) + lb_center_12_c = torch.cat((create_center_soft_1_c[:args.labeled_bs,...], create_center_soft_2_c[:args.labeled_bs,...]),dim=0) + + + un_center_12_bg = torch.cat((create_center_soft_1_bg[args.labeled_bs:,...], create_center_soft_2_bg[args.labeled_bs:,...]),dim=0) + un_center_12_a = torch.cat((create_center_soft_1_a[args.labeled_bs:,...], create_center_soft_2_a[args.labeled_bs:,...]),dim=0) + un_center_12_b = torch.cat((create_center_soft_1_b[args.labeled_bs:,...], create_center_soft_2_b[args.labeled_bs:,...]),dim=0) + un_center_12_c = torch.cat((create_center_soft_1_c[args.labeled_bs:,...], create_center_soft_2_c[args.labeled_bs:,...]),dim=0) + + + + + loss_contrast = losses.scc_loss(cos_sim, args.tau, lb_center_12_bg, + lb_center_12_a,un_center_12_bg, un_center_12_a, + lb_center_12_b,lb_center_12_c,un_center_12_b,un_center_12_c) + + + + loss = 5*supervised_loss+consistency_loss+loss_contrast*args.my_lambda+aff_loss #+loss_un + optimizer1.zero_grad() + optimizer2.zero_grad() + + loss.backward() + + optimizer1.step() + optimizer2.step() + # update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # update_ema_variables(model, ema_model, args.ema_decay2, iter_num) + + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer1.param_groups: + param_group['lr'] = lr_ + for param_group in optimizer2.param_groups: + param_group['lr'] = lr_ + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_dice, iter_num) + writer.add_scalar('info/consistency_loss', + consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight', + consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_dice.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model1.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model1, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model1.state_dict(), save_mode_path) + torch.save(model1.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model1.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model1.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name3) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + # shutil.copy('dataloaders/' + dataset_name2, code_path + '/' + dataset_name2) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_7.py b/code/train_Trans_teacher_7.py new file mode 100644 index 0000000..0ca93a7 --- /dev/null +++ b/code/train_Trans_teacher_7.py @@ -0,0 +1,470 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +# """选择GPU ID""" +# gpu_list = [4] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold3', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=16, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=8, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:7' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + # if args.local_rank != -1: + # torch.cuda.set_device(args.local_rank) + # device=torch.device("cuda", args.local_rank) + # torch.distributed.init_process_group(backend="nccl", init_method='env://') + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + model1 = create_model() + # ema_model = create_model(ema=True) + model2 = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + # ema_model2 = create_model(ema=True) + + model1=model1.to(device) + model2 =model2.to(device) + + # model = nn.MMDistributedDataParallel( + # model.cuda(), + # device_ids=[torch.cuda.current_device()], + # broadcast_buffers=False, + # find_unused_parameters=find_unused_parameters) + + + num_gpus = torch.cuda.device_count() + + # if num_gpus > 1: + # # logger.info('use {} gpus!'.format(num_gpus)) + # model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + # output_device=args.local_rank,broadcast_buffers=False) + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + #步骤四:定义数据集 + # train_datasets = ...#自己定义的Dataset子类 + # train_sampler_labeled = DistributedSampler(db_train_labeled) + # train_sampler_unlabeled = DistributedSampler(db_train_unlabeled) + + # trainloader_labeled = DataLoader(db_train_labeled, sampler=train_sampler_labeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + # trainloader_unlabeled = DataLoader(db_train_unlabeled, sampler=train_sampler_unlabeled, batch_size=args.train_batch_size, + # num_workers=args.num_workers, drop_last=True,pin_memory=True) + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model1.train() + model2.train() + + optimizer1 = optim.SGD(model1.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + optimizer2 = optim.SGD(model2.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + + iter_num = 0 + max_epoch = max_iterations // len(trainloader_labeled) + 1 + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + # noise2 = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + # ema_inputs2 = unlabeled_volume_batch + noise2 + # ema_inputs2 = torch.cat([volume_batch,ema_inputs2],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + outputs,calss,attpred = model1(volume_batch) + + outputs_soft = torch.softmax(outputs, dim=1) + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + + + + + + ema_output,ema_calss,ema_attpred = model2(volume_batch) + ema_output_soft = torch.softmax(ema_output, dim=1) + + # ema_output2,ema_calss2,ema_attpred = ema_model(ema_inputs2) + # ema_output_soft2 = torch.softmax(ema_output2, dim=1) + # ema_output_soft=(ema_output_soft+ema_output_soft2)/2 + + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + loss_dice =ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr = ce_loss(outputs[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr= dice_loss(outputs_soft[:args.labeled_bs,...], label_batch_wr.unsqueeze(1)) + + + loss_ce2 = ce_loss(ema_output[:args.labeled_bs,...], label_batch[:].long()) + loss_dice2 =ce_loss(ema_output[:args.labeled_bs,...], label_batch[:].long()) + + + loss_ce_wr2 = ce_loss(ema_output_soft[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr2= dice_loss(ema_output_soft[:args.labeled_bs,...], label_batch_wr.unsqueeze(1)) + + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce+loss_dice_wr+loss_ce_wr+loss_ce2+loss_dice_wr2+loss_ce_wr2 + + # loss_un=torch.sum(torch.square(outputs_soft[:args.labeled_bs,...] - torch.mean(outputs_soft[:args.labeled_bs,...]))) + + #consistency loss + # consistency_weight = get_current_consistency_weight(iter_num // 300) + # if iter_num < 1000: + # consistency_loss = 0.0 + # else: + # consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft[args.labeled_bs:,...]) ** 2) + + + #aff_loss + aff_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],ema_attpred[:args.labeled_bs,...]) + + # cosine similarity loss + create_center_1_bg = calss.unsqueeze(1)# 4,1,x,y,z->4,2 + create_center_1_a = calss.unsqueeze(1) + create_center_1_b = calss.unsqueeze(1) + create_center_1_c = calss.unsqueeze(1) + + + + create_center_2_bg = ema_calss[0].unsqueeze(1) + create_center_2_a = ema_calss[1].unsqueeze(1) + create_center_2_b = ema_calss[2].unsqueeze(1) + create_center_2_c = ema_calss[3].unsqueeze(1) + + create_center_soft_1_bg = F.softmax(create_center_1_bg, dim=1)# dims(4,2) + create_center_soft_1_a = F.softmax(create_center_1_a, dim=1) + create_center_soft_1_b = F.softmax(create_center_1_b, dim=1) + create_center_soft_1_c = F.softmax(create_center_1_c, dim=1) + + + create_center_soft_2_bg = F.softmax(create_center_2_bg, dim=1)# dims(4,2) + create_center_soft_2_a = F.softmax(create_center_2_a, dim=1) + create_center_soft_2_b = F.softmax(create_center_2_b, dim=1) + create_center_soft_2_c = F.softmax(create_center_2_c, dim=1) + + + lb_center_12_bg = torch.cat((create_center_soft_1_bg[:args.labeled_bs,...], create_center_soft_2_bg[:args.labeled_bs,...]),dim=0)# 4,2 + lb_center_12_a = torch.cat((create_center_soft_1_a[:args.labeled_bs,...], create_center_soft_2_a[:args.labeled_bs,...]),dim=0) + lb_center_12_b = torch.cat((create_center_soft_1_b[:args.labeled_bs,...], create_center_soft_2_b[:args.labeled_bs,...]),dim=0) + lb_center_12_c = torch.cat((create_center_soft_1_c[:args.labeled_bs,...], create_center_soft_2_c[:args.labeled_bs,...]),dim=0) + + + un_center_12_bg = torch.cat((create_center_soft_1_bg[args.labeled_bs:,...], create_center_soft_2_bg[args.labeled_bs:,...]),dim=0) + un_center_12_a = torch.cat((create_center_soft_1_a[args.labeled_bs:,...], create_center_soft_2_a[args.labeled_bs:,...]),dim=0) + un_center_12_b = torch.cat((create_center_soft_1_b[args.labeled_bs:,...], create_center_soft_2_b[args.labeled_bs:,...]),dim=0) + un_center_12_c = torch.cat((create_center_soft_1_c[args.labeled_bs:,...], create_center_soft_2_c[args.labeled_bs:,...]),dim=0) + + + + + loss_contrast = losses.scc_loss(cos_sim, args.tau, lb_center_12_bg, + lb_center_12_a,un_center_12_bg, un_center_12_a, + lb_center_12_b,lb_center_12_c,un_center_12_b,un_center_12_c) + + + + loss = 5*supervised_loss+consistency_loss+loss_contrast*args.my_lambda+aff_loss #+loss_un + optimizer1.zero_grad() + optimizer2.zero_grad() + + loss.backward() + + optimizer1.step() + optimizer2.step() + # update_ema_variables(model, ema_model, args.ema_decay, iter_num) + # update_ema_variables(model, ema_model, args.ema_decay2, iter_num) + + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer1.param_groups: + param_group['lr'] = lr_ + for param_group in optimizer2.param_groups: + param_group['lr'] = lr_ + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_dice, iter_num) + writer.add_scalar('info/consistency_loss', + consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight', + consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_dice.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model1.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model1, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model1.state_dict(), save_mode_path) + torch.save(model1.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model1.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model1.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name3) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + # shutil.copy('dataloaders/' + dataset_name2, code_path + '/' + dataset_name2) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_8.py b/code/train_Trans_teacher_8.py new file mode 100644 index 0000000..b8c14b9 --- /dev/null +++ b/code/train_Trans_teacher_8.py @@ -0,0 +1,454 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=32, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=16, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + + + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001) + # optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=0.0001) + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + + iter_num = 0 + max_epoch = max_iterations // len(trainloader_labeled) + 1 + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + outputs,calss,attpred,attpred_ = model(volume_batch) + + outputs_soft = torch.softmax(outputs, dim=1) + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + + + + + with torch.no_grad(): + ema_output,ema_calss,ema_attpred ,ema_attpred_ = ema_model(ema_inputs) + ema_output_soft = torch.softmax(ema_output, dim=1) + + # ema_output2,ema_calss2,ema_attpred = ema_model2(ema_inputs2) + # ema_output_soft2 = torch.softmax(ema_output2, dim=1) + # ema_output_soft=(ema_output_soft+ema_output_soft2)/2 + + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + loss_dice =ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + + + + + # _,token_b4_n1, token_b4_n2 = attpred_[3][0].size() + bz, _, token_b4_n1, token_b4_n2 = attpred_[3].size() + attn_avg4 = torch.zeros(bz, token_b4_n1, token_b4_n2, dtype=outputs.dtype, device=outputs.device) + for attn in attpred_[3]: + attn = attn.mean(dim=0) + attn = attn / attn.sum(dim=-1,keepdim=True) + attn_avg4 += attn + attn_avg4 = attn_avg4 / len(attpred_[3]) + attn_avg4=attn_avg4.unsqueeze(1) + + + pseudo_labels = torch.zeros_like(ema_output) + + channel_map = ema_attpred + threshold = torch.quantile(channel_map, 0.95) # Set the threshold for each channel + binary_mask = (channel_map > threshold).float() + pseudo_labels= binary_mask * outputs + + pseudo_labels = torch.argmax( + pseudo_labels[:args.labeled_bs].detach(), dim=1, keepdim=False) + loss_ce_wr = ce_loss(outputs[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr= dice_loss(outputs_soft[:args.labeled_bs,...], label_batch_wr.unsqueeze(1)) + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce+loss_dice_wr+loss_ce_wr + + + #consistency loss + consistency_weight = get_current_consistency_weight(iter_num // 300) + if iter_num < 1000: + consistency_loss = 0.0 + else: + consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft[args.labeled_bs:,...]) ** 2) + + + #aff_loss + aff_loss = losses.get_aff_loss(attpred[args.labeled_bs:,...],ema_attpred[args.labeled_bs:,...]) + + attn_avg4=F.interpolate(attn_avg4, size=outputs.shape[2:], mode='bilinear', align_corners=False) + affinity_loss_mat = torch.abs(torch.softmax(torch.matmul(attn_avg4[:args.labeled_bs,...] + , outputs[:args.labeled_bs,...]),dim=-1) - outputs_soft[:args.labeled_bs,...]) + affinity_loss=affinity_loss_mat.mean() + # cosine similarity loss + create_center_1_bg = calss[0].unsqueeze(1)# 4,1,x,y,z->4,2 + create_center_1_a = calss[1].unsqueeze(1) + create_center_1_b = calss[2].unsqueeze(1) + create_center_1_c = calss[3].unsqueeze(1) + + + + create_center_2_bg = ema_calss[0].unsqueeze(1) + create_center_2_a = ema_calss[1].unsqueeze(1) + create_center_2_b = ema_calss[2].unsqueeze(1) + create_center_2_c = ema_calss[3].unsqueeze(1) + + create_center_soft_1_bg = F.softmax(create_center_1_bg, dim=1)# dims(4,2) + create_center_soft_1_a = F.softmax(create_center_1_a, dim=1) + create_center_soft_1_b = F.softmax(create_center_1_b, dim=1) + create_center_soft_1_c = F.softmax(create_center_1_c, dim=1) + + + create_center_soft_2_bg = F.softmax(create_center_2_bg, dim=1)# dims(4,2) + create_center_soft_2_a = F.softmax(create_center_2_a, dim=1) + create_center_soft_2_b = F.softmax(create_center_2_b, dim=1) + create_center_soft_2_c = F.softmax(create_center_2_c, dim=1) + + + lb_center_12_bg = torch.cat((create_center_soft_1_bg[:args.labeled_bs,...], create_center_soft_2_bg[:args.labeled_bs,...]),dim=0)# 4,2 + lb_center_12_a = torch.cat((create_center_soft_1_a[:args.labeled_bs,...], create_center_soft_2_a[:args.labeled_bs,...]),dim=0) + lb_center_12_b = torch.cat((create_center_soft_1_b[:args.labeled_bs,...], create_center_soft_2_b[:args.labeled_bs,...]),dim=0) + lb_center_12_c = torch.cat((create_center_soft_1_c[:args.labeled_bs,...], create_center_soft_2_c[:args.labeled_bs,...]),dim=0) + + + un_center_12_bg = torch.cat((create_center_soft_1_bg[args.labeled_bs:,...], create_center_soft_2_bg[args.labeled_bs:,...]),dim=0) + un_center_12_a = torch.cat((create_center_soft_1_a[args.labeled_bs:,...], create_center_soft_2_a[args.labeled_bs:,...]),dim=0) + un_center_12_b = torch.cat((create_center_soft_1_b[args.labeled_bs:,...], create_center_soft_2_b[args.labeled_bs:,...]),dim=0) + un_center_12_c = torch.cat((create_center_soft_1_c[args.labeled_bs:,...], create_center_soft_2_c[args.labeled_bs:,...]),dim=0) + + + + + loss_contrast = losses.scc_loss(cos_sim, args.tau, lb_center_12_bg, + lb_center_12_a,un_center_12_bg, un_center_12_a, + lb_center_12_b,lb_center_12_c,un_center_12_b,un_center_12_c) + + loss = supervised_loss+consistency_loss+loss_contrast*args.my_lambda+aff_loss+affinity_loss + optimizer.zero_grad() + + # loss.backward(retain_graph=True) + loss.backward() + optimizer.step() + update_ema_variables(model, ema_model, args.ema_decay, iter_num) + + + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer.param_groups: + param_group['lr'] = lr_ + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_dice, iter_num) + writer.add_scalar('info/consistency_loss', + consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight', + consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_dice.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + # shutil.copy('dataloaders/' + dataset_name2, code_path + '/' + dataset_name2) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_Trans_teacher_9.py b/code/train_Trans_teacher_9.py new file mode 100644 index 0000000..13258ce --- /dev/null +++ b/code/train_Trans_teacher_9.py @@ -0,0 +1,471 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms,ops +from torchvision.utils import make_grid +from tqdm import tqdm +import datetime +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator,TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume2 +from networks.vision_transformer import SwinUnet as ViT_seg +from config import get_config +from torch.nn import CosineSimilarity +from torch.utils.data.distributed import DistributedSampler +"""选择GPU ID""" +# gpu_list = [1,2] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +from utils.gate_crf_loss import ModelLossSemsegGatedCRF + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/Mean_Teacher', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet_new', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=40, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=42, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=20, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--ema_decay2', type=float, default=0.8, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.5, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') + +#trans parameters +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/tb/WSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +parser.add_argument('--my_lambda', type=float, default=1, help='balance factor to control contrastive loss') +parser.add_argument('--tau', type=float, default=1, help='temperature of the contrastive loss') + +parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 2), type=int) +parser.add_argument("--kd_weights", type=int, default=15) + +args = parser.parse_args() +config = get_config(args) +# +device = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + + + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + # model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + + if ema: + for param in model.parameters(): + param.detach_() + return model + + model_aux = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + model = create_model() + ema_model = create_model(ema=True) + + + model=model.to(device) + ema_model =ema_model.to(device) + model_aux = model_aux.to(device) + + + num_gpus = torch.cuda.device_count() + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, drop_last=True,worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + model_aux.train() + + optimizer2 = optim.SGD(model_aux.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001) + optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001) + + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + cos_sim = CosineSimilarity(dim=1,eps=1e-6) + affinityenergyLoss=losses.SegformerAffinityEnergyLoss() + criterion = torch.nn.MSELoss() + + + gatecrf_loss = ModelLossSemsegGatedCRF() + loss_gatedcrf_kernels_desc = [{"weight": 1, "xy": 6, "rgb": 0.1}] + loss_gatedcrf_radius = 5 + + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + + iter_num = 0 + max_epoch = max_iterations // len(trainloader_labeled) + 1 + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + # train_sampler_labeled.set_epoch(epoch_num) + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + label_batch_wr = sampled_batch_labeled['random_walker'] + crop_images = sampled_batch_labeled['crop_images'] + boxes = sampled_batch_labeled['boxes'] + + + crop_images = crop_images.to(device) + label_batch_wr = label_batch_wr.to(device) + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + + noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2) + ema_inputs = unlabeled_volume_batch + noise + # ema_inputs = torch.cat([volume_batch,ema_inputs],0) + + volume_batch=torch.cat([volume_batch,unlabeled_volume_batch],0) + + + + outputs,attpred = model(volume_batch) + outputs_unlabeled_soft = torch.softmax(outputs[args.labeled_bs:,...], dim=1) + outputs_seg_soft = torch.softmax(outputs[:args.labeled_bs,...], dim=1) + + bs, bxs, c, h, w = crop_images.shape + crop_images = crop_images.reshape(bs * bxs, c, h, w) + + feat_local,logits_local = model_aux(crop_images) + seg_soft_crop = torch.softmax(feat_local, dim=1) + + boxes = boxes.to(device).type_as(outputs) + + # visualize + feat_local_label = feat_local.clone().detach() # 4, 20, 224, 224 + + # # normalize + # ba = logits_local.shape[0] + # feat_local_label[feat_local_label < 0] = 0 + # ll_max = torch.max(torch.max(feat_local_label, dim=3)[0], dim=2)[0] + # feat_local_label = feat_local_label / (ll_max.unsqueeze(2).unsqueeze(3) + 1e-8) + # for i in range(bs): + # ind = torch.nonzero(label_batch[i] == 0) + # feat_local_label[i * bxs:(i + 1) * bxs, ind] = 0 + + # keep max value among all classes + n, c, h, w = feat_local_label.shape + feat_local_label_c = feat_local_label.permute(1, 0, 2, 3).reshape(c, -1) + ind_f = torch.argsort(-feat_local_label_c, axis=0) + pos = torch.eye(c)[ind_f[0]].transpose(0, 1).type_as(feat_local_label_c) + feat_local_label_c = pos * feat_local_label_c + feat_local_label = feat_local_label_c.reshape(c, n, h, w).permute(1, 0, 2, 3) + + + # match the sal label hyper-parameter + feat_local_label = (feat_local_label > 0.35).type_as(feat_local_label) + + + # roi align + feat_aligned = [] + crop_label = [] + + for i in range(n): + feat_aligned_=ops.roi_align(outputs[:args.labeled_bs,...], boxes[i], (h, w), 1 / 8.0) + feat_aligned.append(feat_aligned_.clone()[None]) + + label_aligned_=ops.roi_align(label_batch.unsqueeze(1).type(torch.float32), boxes[i], (h, w), 1 / 8.0) + + crop_label.append(label_aligned_.clone()[None]) + + feat_aligned = torch.cat(feat_aligned, dim=0) + crop_label = torch.cat(crop_label, dim=0) + + feat_aligned=feat_aligned.squeeze() + crop_label =crop_label.squeeze() + + + # feat_aligned = ops.roi_align(outputs, boxes[-1], (h, w), 1 / 8.0) + feat_aligned = F.softmax(feat_aligned, dim=1) + loss_kd = criterion(feat_aligned, feat_local_label[:args.labeled_bs,...]) * args.kd_weights + loss_ce_corp = ce_loss(feat_local,crop_label[:].long())* args.kd_weights + + + loss_ce = ce_loss(outputs[:args.labeled_bs,...], label_batch[:].long()) + # loss_dice =ce_loss(outputs_seg[:args.labeled_bs,...], label_batch[:].long()) + + # pseudo_labels = torch.zeros_like(ema_output) + + # channel_map = ema_attpred + # threshold = torch.quantile(channel_map, 0.95) # Set the threshold for each channel + # binary_mask = (channel_map > threshold).float() + # pseudo_labels= binary_mask * outputs + + # pseudo_labels = torch.argmax(pseudo_labels[:args.labeled_bs].detach(), dim=1, keepdim=False) + + + loss_ce_wr = ce_loss(outputs[:args.labeled_bs,...], label_batch_wr[:].long()) + loss_dice_wr= dice_loss(outputs_seg_soft[:args.labeled_bs,...], label_batch_wr.unsqueeze(1)) + #dice_loss(outputs_soft[:args.labeled_bs,...], label_batch.unsqueeze(1)) + # supervised_loss = 0.5 * (loss_dice + loss_ce) + supervised_loss=loss_ce #+loss_dice_wr+loss_ce_wr + + + pseudo_outputs1 = torch.argmax(outputs_seg_soft[args.labeled_bs:].detach(), dim=1, keepdim=False) + + pseudo_outputs2 = torch.argmax(seg_soft_crop[args.labeled_bs:].detach(), dim=1, keepdim=False) + + pseudo_supervision1 = dice_loss(outputs_seg_soft[args.labeled_bs:], pseudo_outputs2.unsqueeze(1)) + pseudo_supervision2 = dice_loss(seg_soft_crop[args.labeled_bs:], pseudo_outputs1.unsqueeze(1)) + + + # with torch.cuda.amp.autocast(): + # # -1: unlabeled pixels (其中60%-70%是没有标注信息的) + # unlabeled_RoIs = (label_batch == 0) + # label_batch[label_batch < 0] = 0 + # affinity_loss = affinityenergyLoss(outputs, attpred, unlabeled_RoIs, label_batch) + + # loss = supervised_loss + affinity_loss + + # with torch.no_grad(): + # ema_output,ema_attpred = ema_model(ema_inputs) + # ema_output_soft = torch.softmax(ema_output, dim=1) + + # #consistency loss + # consistency_weight = get_current_consistency_weight(iter_num // 300) + # if iter_num < 1000: + # consistency_loss = 0.0 + # else: + # consistency_loss = torch.mean((outputs_unlabeled_soft - ema_output_soft[args.labeled_bs:,...]) ** 2) + + #aff_loss + aff_loss = losses.get_aff_loss(attpred[:args.labeled_bs,...],label_batch_wr) + + loss = supervised_loss+aff_loss+loss_kd #+affinity_loss+consistency_weight*consistency_loss + optimizer.zero_grad() + optimizer2.zero_grad() + + loss.backward() + + optimizer.step() + optimizer2.step() + optimizer.step() + # update_ema_variables(model, ema_model, args.ema_decay, iter_num) + + + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer.param_groups: + param_group['lr'] = lr_ + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_ce, iter_num) + # writer.add_scalar('info/consistency_loss',consistency_loss, iter_num) + # writer.add_scalar('info/consistency_weight',consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume2( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + +def backup_code(base_dir): + ###备份当前train代码文件及dataset代码文件 + code_path = os.path.join(base_dir, 'code') + if not os.path.exists(code_path): + os.makedirs(code_path) + train_name = os.path.basename(__file__) + dataset_name = 'dataset_semi.py' + # dataset_name2 = 'dataset_semi_weak_newnew_20.py' + net_name1 = 'mix_transformer.py' + net_name2 = 'net_factory.py' + net_name3 = 'vision_transformer.py' + shutil.copy('networks/' + net_name1, code_path + '/' + net_name1) + shutil.copy('networks/' + net_name2, code_path + '/' + net_name2) + shutil.copy('networks/' + net_name3, code_path + '/' + net_name3) + shutil.copy('dataloaders/' + dataset_name, code_path + '/' + dataset_name) + # shutil.copy('dataloaders/' + dataset_name2, code_path + '/' + dataset_name2) + shutil.copy(train_name, code_path + '/' + train_name) + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/tb/work_dirs/model_tiaoshi/{}_{}/{}-{}".format(args.exp, args.fold, args.sup_type,datetime.datetime.now()) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + # backup_code(snapshot_path) + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_cross_teaching_between_cnn_transformer_2D copy.py b/code/train_cross_teaching_between_cnn_transformer_2D copy.py new file mode 100644 index 0000000..d84ed23 --- /dev/null +++ b/code/train_cross_teaching_between_cnn_transformer_2D copy.py @@ -0,0 +1,418 @@ +# -*- coding: utf-8 -*- +# Author: Xiangde Luo +# Date: 16 Dec. 2021 +# Implementation for Semi-Supervised Medical Image Segmentation via Cross Teaching between CNN and Transformer. +# # Reference: +# @article{luo2021ctbct, +# title={Semi-Supervised Medical Image Segmentation via Cross Teaching between CNN and Transformer}, +# author={Luo, Xiangde and Hu, Minhao and Song, Tao and Wang, Guotai and Zhang, Shaoting}, +# journal={arXiv preprint arXiv:2112.04894}, +# year={2021}} +# In the original paper, we don't use the validation set to select checkpoints and use the last iteration to inference for all methods. +# In addition, we combine the validation set and test set to report the results. +# We found that the random data split has some bias (the validation set is very tough and the test set is very easy). +# Actually, this setting is also a fair comparison. +# download pre-trained model to "code/pretrained_ckpt" folder, link:https://drive.google.com/drive/folders/1UC3XOoezeum0uck4KBVGa8osahs6rKUY + +import argparse +import logging +import os +import random +import shutil +import sys +import time + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms +from tqdm import tqdm + +from config import get_config +from dataloaders import utils +from dataloaders.dataset import (BaseDataSets, RandomGenerator, + TwoStreamBatchSampler) +from networks.net_factory import net_factory +from networks.vision_transformer import SwinUnet as ViT_seg +from utils import losses, metrics, ramps +from val_2D import test_single_volume + + +# """选择GPU ID""" +# gpu_list = [4] #[0,1] +# gpu_list_str = ','.join(map(str, gpu_list)) +# os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/yd2tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC/Cross_Teaching_Between_CNN_Transformer_2', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet', help='model_name') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=8, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=1337, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') +parser.add_argument( + '--cfg', type=str, default="/mnt/sdd/yd2tb/SSL4MIS/code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=4, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=14, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.1, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') +args = parser.parse_args() +config = get_config(args) +device = torch.device('cuda:4' if torch.cuda.is_available() else 'cpu') + +def kaiming_normal_init_weight(model): + for m in model.modules(): + if isinstance(m, nn.Conv2d): + torch.nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + return model + + +def xavier_normal_init_weight(model): + for m in model.modules(): + if isinstance(m, nn.Conv2d): + torch.nn.init.xavier_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + return model + + +def patients_to_slices(dataset, patiens_num): + ref_dict = None + if "ACDC" in dataset: + ref_dict = {"3": 68, "7": 136, + "14": 256, "21": 396, "28": 512, "35": 664, "140": 1312} + elif "Prostate": + ref_dict = {"2": 27, "4": 53, "8": 120, + "12": 179, "16": 256, "21": 312, "42": 623} + else: + print("Error") + return ref_dict[str(patiens_num)] + + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + + def create_model(ema=False): + # Network definition + model = net_factory(net_type=args.model, in_chns=1,class_num=num_classes) + if ema: + for param in model.parameters(): + param.detach_() + return model + + model1 = create_model() + + # model1 = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + model2 = ViT_seg(config, img_size=args.patch_size,num_classes=args.num_classes) + # model2.load_from(config) + + model1=model1.to(device) + model2=model2.to(device) + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([ + RandomGenerator(args.patch_size) + ])) + db_val = BaseDataSets(base_dir=args.root_path, split="val") + + total_slices = len(db_train) + labeled_slice = patients_to_slices(args.root_path, args.labeled_num) + print("Total silices is: {}, labeled slices is: {}".format( + total_slices, labeled_slice)) + labeled_idxs = list(range(0, labeled_slice)) + unlabeled_idxs = list(range(labeled_slice, total_slices)) + batch_sampler = TwoStreamBatchSampler( + labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs) + + trainloader = DataLoader(db_train, batch_sampler=batch_sampler, + num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn) + + model1.train() + model2.train() + + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + optimizer1 = optim.SGD(model1.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + optimizer2 = optim.SGD(model2.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + # ce_loss = CrossEntropyLoss() + # dice_loss = losses.DiceLoss(num_classes) + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.DiceLoss(num_classes) + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader))) + + iter_num = 0 + max_epoch = max_iterations // len(trainloader) + 1 + best_performance1 = 0.0 + best_performance2 = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + for i_batch, sampled_batch in enumerate(trainloader): + + volume_batch, label_batch = sampled_batch['image'], sampled_batch['label'] + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + + outputs1 = model1(volume_batch) + outputs_soft1 = torch.softmax(outputs1, dim=1) + + outputs2 = model2(volume_batch) + outputs_soft2 = torch.softmax(outputs2, dim=1) + consistency_weight = get_current_consistency_weight( + iter_num // 150) + + # loss1 = 0.5 * (ce_loss(outputs1[:args.labeled_bs], label_batch[:args.labeled_bs].long()) + dice_loss( + # outputs_soft1[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))) + # loss2 = 0.5 * (ce_loss(outputs2[:args.labeled_bs], label_batch[:args.labeled_bs].long()) + dice_loss( + # outputs_soft2[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))) + loss1 = ce_loss(outputs1[:args.labeled_bs], label_batch[:args.labeled_bs].long()) + loss2 = ce_loss(outputs2[:args.labeled_bs], label_batch[:args.labeled_bs].long()) + + pseudo_outputs1 = torch.argmax( + outputs_soft1[args.labeled_bs:].detach(), dim=1, keepdim=False) + pseudo_outputs2 = torch.argmax( + outputs_soft2[args.labeled_bs:].detach(), dim=1, keepdim=False) + + pseudo_supervision1 = dice_loss( + outputs_soft1[args.labeled_bs:], pseudo_outputs2.unsqueeze(1)) + pseudo_supervision2 = dice_loss( + outputs_soft2[args.labeled_bs:], pseudo_outputs1.unsqueeze(1)) + + model1_loss = loss1 + consistency_weight * pseudo_supervision1 + model2_loss = loss2 + consistency_weight * pseudo_supervision2 + + loss = model1_loss + model2_loss + + optimizer1.zero_grad() + optimizer2.zero_grad() + + loss.backward() + + optimizer1.step() + optimizer2.step() + + iter_num = iter_num + 1 + + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer1.param_groups: + param_group['lr'] = lr_ + for param_group in optimizer2.param_groups: + param_group['lr'] = lr_ + + writer.add_scalar('lr', lr_, iter_num) + writer.add_scalar( + 'consistency_weight/consistency_weight', consistency_weight, iter_num) + writer.add_scalar('loss/model1_loss', + model1_loss, iter_num) + writer.add_scalar('loss/model2_loss', + model2_loss, iter_num) + logging.info('iteration %d : model1 loss : %f model2 loss : %f' % ( + iter_num, model1_loss.item(), model2_loss.item())) + if iter_num % 50 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs1, dim=1), dim=1, keepdim=True) + writer.add_image('train/model1_Prediction', + outputs[1, ...] * 50, iter_num) + outputs = torch.argmax(torch.softmax( + outputs2, dim=1), dim=1, keepdim=True) + writer.add_image('train/model2_Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model1.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model1, device=device,classes=num_classes, patch_size=args.patch_size) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/model1_val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/model1_val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance1 = np.mean(metric_list, axis=0)[0] + + mean_hd951 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/model1_val_mean_dice', + performance1, iter_num) + writer.add_scalar('info/model1_val_mean_hd95', + mean_hd951, iter_num) + + if performance1 > best_performance1: + best_performance1 = performance1 + save_mode_path = os.path.join(snapshot_path, + 'model1_iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance1, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model1.pth'.format(args.model)) + torch.save(model1.state_dict(), save_mode_path) + torch.save(model1.state_dict(), save_best) + + logging.info( + 'iteration %d : model1_mean_dice : %f model1_mean_hd95 : %f' % (iter_num, performance1, mean_hd951)) + model1.train() + + model2.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model2, device=device,classes=num_classes, patch_size=args.patch_size) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/model2_val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/model2_val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance2 = np.mean(metric_list, axis=0)[0] + + mean_hd952 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/model2_val_mean_dice', + performance2, iter_num) + writer.add_scalar('info/model2_val_mean_hd95', + mean_hd952, iter_num) + + if performance2 > best_performance2: + best_performance2 = performance2 + save_mode_path = os.path.join(snapshot_path, + 'model2_iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance2, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model2.pth'.format(args.model)) + torch.save(model2.state_dict(), save_mode_path) + torch.save(model2.state_dict(), save_best) + + logging.info( + 'iteration %d : model2_mean_dice : %f model2_mean_hd95 : %f' % (iter_num, performance2, mean_hd952)) + model2.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'model1_iter_' + str(iter_num) + '.pth') + torch.save(model1.state_dict(), save_mode_path) + logging.info("save model1 to {}".format(save_mode_path)) + + save_mode_path = os.path.join( + snapshot_path, 'model2_iter_' + str(iter_num) + '.pth') + torch.save(model2.state_dict(), save_mode_path) + logging.info("save model2 to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + time1 = time.time() + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/yd2tb/work_dirs/model/{}_{}/{}".format( + args.exp, args.labeled_num, args.model) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + + + logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_cross_teaching_between_cnn_transformer_2D.py b/code/train_cross_teaching_between_cnn_transformer_2D.py new file mode 100644 index 0000000..d35f4dc --- /dev/null +++ b/code/train_cross_teaching_between_cnn_transformer_2D.py @@ -0,0 +1,406 @@ +# -*- coding: utf-8 -*- +# Author: Xiangde Luo +# Date: 16 Dec. 2021 +# Implementation for Semi-Supervised Medical Image Segmentation via Cross Teaching between CNN and Transformer. +# # Reference: +# @article{luo2021ctbct, +# title={Semi-Supervised Medical Image Segmentation via Cross Teaching between CNN and Transformer}, +# author={Luo, Xiangde and Hu, Minhao and Song, Tao and Wang, Guotai and Zhang, Shaoting}, +# journal={arXiv preprint arXiv:2112.04894}, +# year={2021}} +# In the original paper, we don't use the validation set to select checkpoints and use the last iteration to inference for all methods. +# In addition, we combine the validation set and test set to report the results. +# We found that the random data split has some bias (the validation set is very tough and the test set is very easy). +# Actually, this setting is also a fair comparison. +# download pre-trained model to "code/pretrained_ckpt" folder, link:https://drive.google.com/drive/folders/1UC3XOoezeum0uck4KBVGa8osahs6rKUY + +import argparse +import logging +import os +import random +import shutil +import sys +import time + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms +from tqdm import tqdm + +from config import get_config +from dataloaders import utils +from dataloaders.dataset import (BaseDataSets, RandomGenerator, + TwoStreamBatchSampler) +from networks.net_factory import net_factory +from networks.vision_transformer import SwinUnet as ViT_seg +from utils import losses, metrics, ramps +from val_2D import test_single_volume + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='../data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC/Cross_Teaching_Between_CNN_Transformer', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet', help='model_name') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=16, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[224, 224], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=1337, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') +parser.add_argument( + '--cfg', type=str, default="../code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', +) +parser.add_argument('--zip', action='store_true', + help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, + help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', + help='Test throughput only') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=8, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=7, + help='labeled data') +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.1, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') +args = parser.parse_args() +config = get_config(args) + + +def kaiming_normal_init_weight(model): + for m in model.modules(): + if isinstance(m, nn.Conv2d): + torch.nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + return model + + +def xavier_normal_init_weight(model): + for m in model.modules(): + if isinstance(m, nn.Conv2d): + torch.nn.init.xavier_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + return model + + +def patients_to_slices(dataset, patiens_num): + ref_dict = None + if "ACDC" in dataset: + ref_dict = {"3": 68, "7": 136, + "14": 256, "21": 396, "28": 512, "35": 664, "140": 1312} + elif "Prostate": + ref_dict = {"2": 27, "4": 53, "8": 120, + "12": 179, "16": 256, "21": 312, "42": 623} + else: + print("Error") + return ref_dict[str(patiens_num)] + + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def create_model(ema=False): + # Network definition + model = net_factory(net_type=args.model, in_chns=1, + class_num=num_classes) + if ema: + for param in model.parameters(): + param.detach_() + return model + + model1 = create_model() + model2 = ViT_seg(config, img_size=args.patch_size, + num_classes=args.num_classes).cuda() + model2.load_from(config) + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([ + RandomGenerator(args.patch_size) + ])) + db_val = BaseDataSets(base_dir=args.root_path, split="val") + + total_slices = len(db_train) + labeled_slice = patients_to_slices(args.root_path, args.labeled_num) + print("Total silices is: {}, labeled slices is: {}".format( + total_slices, labeled_slice)) + labeled_idxs = list(range(0, labeled_slice)) + unlabeled_idxs = list(range(labeled_slice, total_slices)) + batch_sampler = TwoStreamBatchSampler( + labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs) + + trainloader = DataLoader(db_train, batch_sampler=batch_sampler, + num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn) + + model1.train() + model2.train() + + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + optimizer1 = optim.SGD(model1.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + optimizer2 = optim.SGD(model2.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + ce_loss = CrossEntropyLoss() + dice_loss = losses.DiceLoss(num_classes) + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader))) + + iter_num = 0 + max_epoch = max_iterations // len(trainloader) + 1 + best_performance1 = 0.0 + best_performance2 = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + for i_batch, sampled_batch in enumerate(trainloader): + + volume_batch, label_batch = sampled_batch['image'], sampled_batch['label'] + volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda() + + outputs1 = model1(volume_batch) + outputs_soft1 = torch.softmax(outputs1, dim=1) + + outputs2 = model2(volume_batch) + outputs_soft2 = torch.softmax(outputs2, dim=1) + consistency_weight = get_current_consistency_weight( + iter_num // 150) + + loss1 = 0.5 * (ce_loss(outputs1[:args.labeled_bs], label_batch[:args.labeled_bs].long()) + dice_loss( + outputs_soft1[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))) + loss2 = 0.5 * (ce_loss(outputs2[:args.labeled_bs], label_batch[:args.labeled_bs].long()) + dice_loss( + outputs_soft2[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))) + + pseudo_outputs1 = torch.argmax( + outputs_soft1[args.labeled_bs:].detach(), dim=1, keepdim=False) + pseudo_outputs2 = torch.argmax( + outputs_soft2[args.labeled_bs:].detach(), dim=1, keepdim=False) + + pseudo_supervision1 = dice_loss( + outputs_soft1[args.labeled_bs:], pseudo_outputs2.unsqueeze(1)) + pseudo_supervision2 = dice_loss( + outputs_soft2[args.labeled_bs:], pseudo_outputs1.unsqueeze(1)) + + model1_loss = loss1 + consistency_weight * pseudo_supervision1 + model2_loss = loss2 + consistency_weight * pseudo_supervision2 + + loss = model1_loss + model2_loss + + optimizer1.zero_grad() + optimizer2.zero_grad() + + loss.backward() + + optimizer1.step() + optimizer2.step() + + iter_num = iter_num + 1 + + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer1.param_groups: + param_group['lr'] = lr_ + for param_group in optimizer2.param_groups: + param_group['lr'] = lr_ + + writer.add_scalar('lr', lr_, iter_num) + writer.add_scalar( + 'consistency_weight/consistency_weight', consistency_weight, iter_num) + writer.add_scalar('loss/model1_loss', + model1_loss, iter_num) + writer.add_scalar('loss/model2_loss', + model2_loss, iter_num) + logging.info('iteration %d : model1 loss : %f model2 loss : %f' % ( + iter_num, model1_loss.item(), model2_loss.item())) + if iter_num % 50 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs1, dim=1), dim=1, keepdim=True) + writer.add_image('train/model1_Prediction', + outputs[1, ...] * 50, iter_num) + outputs = torch.argmax(torch.softmax( + outputs2, dim=1), dim=1, keepdim=True) + writer.add_image('train/model2_Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model1.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume( + sampled_batch["image"], sampled_batch["label"], model1, classes=num_classes, patch_size=args.patch_size) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/model1_val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/model1_val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance1 = np.mean(metric_list, axis=0)[0] + + mean_hd951 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/model1_val_mean_dice', + performance1, iter_num) + writer.add_scalar('info/model1_val_mean_hd95', + mean_hd951, iter_num) + + if performance1 > best_performance1: + best_performance1 = performance1 + save_mode_path = os.path.join(snapshot_path, + 'model1_iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance1, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model1.pth'.format(args.model)) + torch.save(model1.state_dict(), save_mode_path) + torch.save(model1.state_dict(), save_best) + + logging.info( + 'iteration %d : model1_mean_dice : %f model1_mean_hd95 : %f' % (iter_num, performance1, mean_hd951)) + model1.train() + + model2.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume( + sampled_batch["image"], sampled_batch["label"], model2, classes=num_classes, patch_size=args.patch_size) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/model2_val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/model2_val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance2 = np.mean(metric_list, axis=0)[0] + + mean_hd952 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/model2_val_mean_dice', + performance2, iter_num) + writer.add_scalar('info/model2_val_mean_hd95', + mean_hd952, iter_num) + + if performance2 > best_performance2: + best_performance2 = performance2 + save_mode_path = os.path.join(snapshot_path, + 'model2_iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance2, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model2.pth'.format(args.model)) + torch.save(model2.state_dict(), save_mode_path) + torch.save(model2.state_dict(), save_best) + + logging.info( + 'iteration %d : model2_mean_dice : %f model2_mean_hd95 : %f' % (iter_num, performance2, mean_hd952)) + model2.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'model1_iter_' + str(iter_num) + '.pth') + torch.save(model1.state_dict(), save_mode_path) + logging.info("save model1 to {}".format(save_mode_path)) + + save_mode_path = os.path.join( + snapshot_path, 'model2_iter_' + str(iter_num) + '.pth') + torch.save(model2.state_dict(), save_mode_path) + logging.info("save model2 to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + time1 = time.time() + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "../model/{}_{}/{}".format( + args.exp, args.labeled_num, args.model) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + if os.path.exists(snapshot_path + '/code'): + shutil.rmtree(snapshot_path + '/code') + shutil.copytree('.', snapshot_path + '/code', + shutil.ignore_patterns(['.git', '__pycache__'])) + + logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_fully_supervised_2D.py b/code/train_fully_supervised_2D.py index ba0669c..6286470 100644 --- a/code/train_fully_supervised_2D.py +++ b/code/train_fully_supervised_2D.py @@ -19,7 +19,7 @@ from torchvision import transforms from torchvision.utils import make_grid from tqdm import tqdm - + from dataloaders import utils from dataloaders.dataset import BaseDataSets, RandomGenerator from networks.net_factory import net_factory @@ -28,11 +28,11 @@ parser = argparse.ArgumentParser() parser.add_argument('--root_path', type=str, - default='../data/ACDC', help='Name of Experiment') + default='/mnt/sdd/yd2tb/data/ACDC', help='Name of Experiment') parser.add_argument('--exp', type=str, default='ACDC', help='experiment_name') parser.add_argument('--fold', type=str, - default='fold5', help='cross validation') + default='fold1', help='cross validation') parser.add_argument('--sup_type', type=str, default='label', help='supervision type') parser.add_argument('--model', type=str, @@ -98,9 +98,9 @@ def worker_init_fn(worker_id): outputs_soft = torch.softmax(outputs, dim=1) loss_ce = ce_loss(outputs, label_batch[:].long()) - loss = 0.5 * (loss_ce + dice_loss(outputs_soft, - label_batch.unsqueeze(1))) - # loss = loss_ce + # loss = 0.5 * (loss_ce + dice_loss(outputs_soft, + # label_batch.unsqueeze(1))) + loss = loss_ce optimizer.zero_grad() loss.backward() optimizer.step() @@ -191,14 +191,11 @@ def worker_init_fn(worker_id): torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) - snapshot_path = "../model/{}_{}/{}".format( + snapshot_path = "/mnt/sdd/yd2tb/work_dirs/model/{}_{}/{}".format( args.exp, args.fold, args.sup_type) if not os.path.exists(snapshot_path): os.makedirs(snapshot_path) - if os.path.exists(snapshot_path + '/code'): - shutil.rmtree(snapshot_path + '/code') - shutil.copytree('.', snapshot_path + '/code', - shutil.ignore_patterns(['.git', '__pycache__'])) + logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO, format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') diff --git a/code/train_ict.py b/code/train_ict.py new file mode 100644 index 0000000..147ebf5 --- /dev/null +++ b/code/train_ict.py @@ -0,0 +1,290 @@ +import argparse +import logging +import os +import random +import shutil +import sys +import time +from itertools import cycle + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn import BCEWithLogitsLoss +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.utils import make_grid +from tqdm import tqdm + +from dataloaders import utils +from dataloaders.dataset_semi import (BaseDataSets, RandomGenerator, + TwoStreamBatchSampler) +from networks.discriminator import FCDiscriminator +from networks.net_factory import net_factory +from utils import losses, metrics, ramps +from val_2D import test_single_volume + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='/mnt/sdd/yd2tb/data/ACDC', help='Name of Experiment') +parser.add_argument('--exp', type=str, + default='ACDC_Semi/ICT_scribble', help='experiment_name') +parser.add_argument('--model', type=str, + default='unet', help='model_name') +parser.add_argument('--fold', type=str, + default='fold1', help='cross validation') +parser.add_argument('--sup_type', type=str, + default='scribble', help='supervision type') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=12, + help='batch_size per gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--patch_size', type=list, default=[256, 256], + help='patch size of network input') +parser.add_argument('--seed', type=int, default=2022, help='random seed') +parser.add_argument('--num_classes', type=int, default=4, + help='output channel of network') + +# label and unlabel +parser.add_argument('--labeled_bs', type=int, default=6, + help='labeled_batch_size per gpu') +parser.add_argument('--labeled_num', type=int, default=4, + help='labeled data') +parser.add_argument('--ict_alpha', type=int, default=0.2, + help='ict_alpha') + +# costs +parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay') +parser.add_argument('--consistency_type', type=str, + default="mse", help='consistency_type') +parser.add_argument('--consistency', type=float, + default=0.1, help='consistency') +parser.add_argument('--consistency_rampup', type=float, + default=200.0, help='consistency_rampup') +args = parser.parse_args() + +device = torch.device('cuda:7' if torch.cuda.is_available() else 'cpu') + +def get_current_consistency_weight(epoch): + # Consistency ramp-up from https://arxiv.org/abs/1610.02242 + return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) + + +def update_ema_variables(model, ema_model, alpha, global_step): + # Use the true average until the exponential average is more correct + alpha = min(1 - 1 / (global_step + 1), alpha) + for ema_param, param in zip(ema_model.parameters(), model.parameters()): + ema_param.data.mul_(alpha).add_(1 - alpha, param.data) + + +def train(args, snapshot_path): + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size + max_iterations = args.max_iterations + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + def create_model(ema=False): + # Network definition + model = net_factory(net_type=args.model, in_chns=1, + class_num=num_classes) + if ema: + for param in model.parameters(): + param.detach_() + return model + + model = create_model() + ema_model = create_model(ema=True) + + model=model.to(device) + ema_model=ema_model.to(device) + + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size) + ])) + db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ + RandomGenerator(args.patch_size)])) + + trainloader_labeled = DataLoader(db_train_labeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn) + trainloader_unlabeled = DataLoader(db_train_unlabeled, batch_size=args.batch_size//2, shuffle=True, + num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn) + + db_val = BaseDataSets(base_dir=args.root_path, + fold=args.fold, split="val", ) + valloader = DataLoader(db_val, batch_size=1, shuffle=False, + num_workers=1) + + model.train() + + optimizer = optim.SGD(model.parameters(), lr=base_lr, + momentum=0.9, weight_decay=0.0001) + + # ce_loss = CrossEntropyLoss() + # dice_loss = losses.DiceLoss(num_classes) + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) + + writer = SummaryWriter(snapshot_path + '/log') + logging.info("{} iterations per epoch".format(len(trainloader_labeled))) + + iter_num = 0 + max_epoch = max_iterations // len(trainloader_labeled) + 1 + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + for i, data in enumerate(zip(cycle(trainloader_labeled), trainloader_unlabeled)): + sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] + + volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) + + # ICT mix factors + ict_mix_factors = np.random.beta( + args.ict_alpha, args.ict_alpha, size=(args.labeled_bs//2, 1, 1, 1)) + ict_mix_factors = torch.tensor( + ict_mix_factors, dtype=torch.float).to(device) + unlabeled_volume_batch_0 = unlabeled_volume_batch[0:args.labeled_bs//2, ...] + unlabeled_volume_batch_1 = unlabeled_volume_batch[args.labeled_bs//2:, ...] + + # Mix images + batch_ux_mixed = unlabeled_volume_batch_0 * \ + (1.0 - ict_mix_factors) + \ + unlabeled_volume_batch_1 * ict_mix_factors + input_volume_batch = torch.cat([volume_batch, batch_ux_mixed], dim=0) + outputs = model(input_volume_batch) + outputs_soft = torch.softmax(outputs, dim=1) + with torch.no_grad(): + ema_output_ux0 = torch.softmax( + ema_model(unlabeled_volume_batch_0), dim=1) + ema_output_ux1 = torch.softmax( + ema_model(unlabeled_volume_batch_1), dim=1) + batch_pred_mixed = ema_output_ux0 * \ + (1.0 - ict_mix_factors) + ema_output_ux1 * ict_mix_factors + + loss_ce = ce_loss(outputs[:args.labeled_bs], + label_batch[:args.labeled_bs][:].long()) + loss_dice = dice_loss( + outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)) + supervised_loss = 0.5 * (loss_dice + loss_ce) + consistency_weight = get_current_consistency_weight(iter_num//150) + consistency_loss = torch.mean( + (outputs_soft[args.labeled_bs:] - batch_pred_mixed) ** 2) + loss = supervised_loss + consistency_weight * consistency_loss + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer.param_groups: + param_group['lr'] = lr_ + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + writer.add_scalar('info/loss_dice', loss_dice, iter_num) + writer.add_scalar('info/consistency_loss', + consistency_loss, iter_num) + writer.add_scalar('info/consistency_weight', + consistency_weight, iter_num) + + logging.info( + 'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' % + (iter_num, loss.item(), loss_ce.item(), loss_dice.item())) + + if iter_num % 20 == 0: + image = volume_batch[1, 0:1, :, :] + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax( + outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', + outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + if iter_num > 0 and iter_num % 200 == 0: + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in enumerate(valloader): + metric_i = test_single_volume( + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) + metric_list += np.array(metric_i) + metric_list = metric_list / len(db_val) + for class_i in range(num_classes-1): + writer.add_scalar('info/val_{}_dice'.format(class_i+1), + metric_list[class_i, 0], iter_num) + writer.add_scalar('info/val_{}_hd95'.format(class_i+1), + metric_list[class_i, 1], iter_num) + + performance = np.mean(metric_list, axis=0)[0] + + mean_hd95 = np.mean(metric_list, axis=0)[1] + writer.add_scalar('info/val_mean_dice', performance, iter_num) + writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num) + + if performance > best_performance: + best_performance = performance + save_mode_path = os.path.join(snapshot_path, + 'iter_{}_dice_{}.pth'.format( + iter_num, round(best_performance, 4))) + save_best = os.path.join(snapshot_path, + '{}_best_model.pth'.format(args.model)) + torch.save(model.state_dict(), save_mode_path) + torch.save(model.state_dict(), save_best) + + logging.info( + 'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95)) + model.train() + + if iter_num % 3000 == 0: + save_mode_path = os.path.join( + snapshot_path, 'iter_' + str(iter_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if iter_num >= max_iterations: + break + if iter_num >= max_iterations: + iterator.close() + break + writer.close() + return "Training Finished!" + + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + snapshot_path = "/mnt/sdd/yd2tb/work_dirs/model/{}_{}/{}".format(args.exp, args.fold, args.sup_type) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + + + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + train(args, snapshot_path) diff --git a/code/train_mean_teacher_2D.py b/code/train_mean_teacher_2D.py index f3f20e5..7dbfdb2 100644 --- a/code/train_mean_teacher_2D.py +++ b/code/train_mean_teacher_2D.py @@ -31,15 +31,15 @@ parser = argparse.ArgumentParser() parser.add_argument('--root_path', type=str, - default='../data/ACDC', help='Name of Experiment') + default='/mnt/sdd/yd2tb/data/ACDC', help='Name of Experiment') parser.add_argument('--exp', type=str, - default='ACDC_Semi/Mean_Teacher', help='experiment_name') + default='ACDC_Semi/Mean_Teacher_scribble', help='experiment_name') parser.add_argument('--model', type=str, default='unet', help='model_name') parser.add_argument('--fold', type=str, - default='fold2', help='cross validation') + default='fold1', help='cross validation') parser.add_argument('--sup_type', type=str, - default='label', help='supervision type') + default='scribble', help='supervision type') parser.add_argument('--max_iterations', type=int, default=30000, help='maximum epoch number to train') parser.add_argument('--batch_size', type=int, default=12, @@ -69,6 +69,7 @@ default=200.0, help='consistency_rampup') args = parser.parse_args() +device = torch.device('cuda:7' if torch.cuda.is_available() else 'cpu') def get_current_consistency_weight(epoch): # Consistency ramp-up from https://arxiv.org/abs/1610.02242 @@ -103,6 +104,9 @@ def create_model(ema=False): model = create_model() ema_model = create_model(ema=True) + model=model.to(device) + ema_model=ema_model.to(device) + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=8, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ RandomGenerator(args.patch_size) ])) @@ -124,8 +128,10 @@ def create_model(ema=False): optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001) - ce_loss = CrossEntropyLoss() - dice_loss = losses.DiceLoss(num_classes) + # ce_loss = CrossEntropyLoss() + # dice_loss = losses.DiceLoss(num_classes) + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) writer = SummaryWriter(snapshot_path + '/log') logging.info("{} iterations per epoch".format(len(trainloader_labeled))) @@ -139,8 +145,8 @@ def create_model(ema=False): sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] - volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda() - unlabeled_volume_batch = sampled_batch_unlabeled['image'].cuda() + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) print("Labeled slices: ", sampled_batch_labeled["idx"]) print("Unlabeled slices: ", sampled_batch_unlabeled["idx"]) @@ -159,7 +165,7 @@ def create_model(ema=False): ema_output_soft = torch.softmax(ema_output, dim=1) loss_ce = ce_loss(outputs, label_batch[:].long()) - loss_dice = dice_loss(outputs_soft, label_batch.unsqueeze(1)) + loss_dice = ce_loss(outputs, label_batch[:].long())#dice_loss(outputs_soft, label_batch.unsqueeze(1)) supervised_loss = 0.5 * (loss_dice + loss_ce) consistency_weight = get_current_consistency_weight( iter_num // 300) @@ -206,7 +212,7 @@ def create_model(ema=False): metric_list = 0.0 for i_batch, sampled_batch in enumerate(valloader): metric_i = test_single_volume( - sampled_batch["image"], sampled_batch["label"], model, classes=num_classes) + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) metric_list += np.array(metric_i) metric_list = metric_list / len(db_val) for class_i in range(num_classes-1): @@ -263,14 +269,10 @@ def create_model(ema=False): torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) - snapshot_path = "../model/{}_{}/{}".format( - args.exp, args.fold, args.sup_type) + snapshot_path = "/mnt/sdd/yd2tb/work_dirs/model/{}_{}/{}".format(args.exp, args.fold, args.sup_type) if not os.path.exists(snapshot_path): os.makedirs(snapshot_path) - if os.path.exists(snapshot_path + '/code'): - shutil.rmtree(snapshot_path + '/code') - shutil.copytree('.', snapshot_path + '/code', - shutil.ignore_patterns(['.git', '__pycache__'])) + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') diff --git a/code/train_semi.sh b/code/train_semi.sh deleted file mode 100644 index 5fd736d..0000000 --- a/code/train_semi.sh +++ /dev/null @@ -1,20 +0,0 @@ -python -u train_entropy_minimization_2D.py --fold fold1 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Entropy_Minimization --max_iterations 60000 --batch_size 12 & -python -u train_entropy_minimization_2D.py --fold fold2 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Entropy_Minimization --max_iterations 60000 --batch_size 12 & -python -u train_entropy_minimization_2D.py --fold fold3 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Entropy_Minimization --max_iterations 60000 --batch_size 12 & -python -u train_entropy_minimization_2D.py --fold fold4 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Entropy_Minimization --max_iterations 60000 --batch_size 12 & -python -u train_entropy_minimization_2D.py --fold fold5 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Entropy_Minimization --max_iterations 60000 --batch_size 12 & -python -u train_deep_adversarial_network_2D.py --fold fold1 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Deep_Adversarial_Network --max_iterations 60000 --batch_size 12 & -python -u train_deep_adversarial_network_2D.py --fold fold2 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Deep_Adversarial_Network --max_iterations 60000 --batch_size 12 & -python -u train_deep_adversarial_network_2D.py --fold fold3 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Deep_Adversarial_Network --max_iterations 60000 --batch_size 12 & -python -u train_deep_adversarial_network_2D.py --fold fold4 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Deep_Adversarial_Network --max_iterations 60000 --batch_size 12 & -python -u train_deep_adversarial_network_2D.py --fold fold5 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Deep_Adversarial_Network --max_iterations 60000 --batch_size 12 & -python -u train_uncertainty_aware_mean_teacher_2D.py --fold fold1 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Uncertainty_Aware_Mean_Teacher --max_iterations 60000 --batch_size 12 & -python -u train_uncertainty_aware_mean_teacher_2D.py --fold fold2 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Uncertainty_Aware_Mean_Teacher --max_iterations 60000 --batch_size 12 & -python -u train_uncertainty_aware_mean_teacher_2D.py --fold fold3 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Uncertainty_Aware_Mean_Teacher --max_iterations 60000 --batch_size 12 & -python -u train_uncertainty_aware_mean_teacher_2D.py --fold fold4 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Uncertainty_Aware_Mean_Teacher --max_iterations 60000 --batch_size 12 & -python -u train_uncertainty_aware_mean_teacher_2D.py --fold fold5 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Uncertainty_Aware_Mean_Teacher --max_iterations 60000 --batch_size 12 & -python -u train_mean_teacher_2D.py --fold fold1 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Mean_Teacher --max_iterations 60000 --batch_size 12 & -python -u train_mean_teacher_2D.py --fold fold2 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Mean_Teacher --max_iterations 60000 --batch_size 12 & -python -u train_mean_teacher_2D.py --fold fold3 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Mean_Teacher --max_iterations 60000 --batch_size 12 & -python -u train_mean_teacher_2D.py --fold fold4 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Mean_Teacher --max_iterations 60000 --batch_size 12 & -python -u train_mean_teacher_2D.py --fold fold5 --num_classes 4 --root_path ../data/ACDC --exp ACDC_Semi/Mean_Teacher --max_iterations 60000 --batch_size 12 \ No newline at end of file diff --git a/code/train_uncertainty_aware_mean_teacher_2D.py b/code/train_uncertainty_aware_mean_teacher_2D.py index bede36a..02246be 100644 --- a/code/train_uncertainty_aware_mean_teacher_2D.py +++ b/code/train_uncertainty_aware_mean_teacher_2D.py @@ -29,20 +29,26 @@ from utils import losses, metrics, ramps from val_2D import test_single_volume +"""选择GPU ID""" +gpu_list = [4] #[0,1] +gpu_list_str = ','.join(map(str, gpu_list)) +os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + parser = argparse.ArgumentParser() parser.add_argument('--root_path', type=str, - default='../data/ACDC', help='Name of Experiment') + default='/mnt/sdd/yd2tb/data/ACDC', help='Name of Experiment') parser.add_argument('--exp', type=str, - default='ACDC_Semi/Uncertainty_Aware_Mean_Teacher', help='experiment_name') + default='ACDC_Semi/Uncertainty_Aware_Mean_Teacher_scribble', help='experiment_name') parser.add_argument('--model', type=str, default='unet', help='model_name') parser.add_argument('--fold', type=str, - default='fold2', help='cross validation') + default='fold1', help='cross validation') parser.add_argument('--sup_type', type=str, - default='label', help='supervision type') + default='scribble', help='supervision type') parser.add_argument('--max_iterations', type=int, default=30000, help='maximum epoch number to train') -parser.add_argument('--batch_size', type=int, default=12, +parser.add_argument('--batch_size', type=int, default=24, help='batch_size per gpu') parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training') @@ -55,7 +61,7 @@ help='output channel of network') # label and unlabel -parser.add_argument('--labeled_bs', type=int, default=6, +parser.add_argument('--labeled_bs', type=int, default=12, help='labeled_batch_size per gpu') parser.add_argument('--labeled_num', type=int, default=4, help='labeled data') @@ -69,7 +75,7 @@ default=200.0, help='consistency_rampup') args = parser.parse_args() - +device = torch.device('cuda:4' if torch.cuda.is_available() else 'cpu') def get_current_consistency_weight(epoch): # Consistency ramp-up from https://arxiv.org/abs/1610.02242 return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) @@ -103,9 +109,11 @@ def create_model(ema=False): model = create_model() ema_model = create_model(ema=True) + model=model.to(device) + ema_model=ema_model.to(device) + db_train_labeled = BaseDataSets(base_dir=args.root_path, num=4, labeled_type="labeled", fold=args.fold, split="train", transform=transforms.Compose([ - RandomGenerator(args.patch_size) - ])) + RandomGenerator(args.patch_size)]),sup_type=args.sup_type) db_train_unlabeled = BaseDataSets(base_dir=args.root_path, num=4, labeled_type="unlabeled", fold=args.fold, split="train", transform=transforms.Compose([ RandomGenerator(args.patch_size)])) @@ -124,9 +132,10 @@ def create_model(ema=False): optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001) - ce_loss = CrossEntropyLoss() - dice_loss = losses.DiceLoss(num_classes) - + # ce_loss = CrossEntropyLoss() + # dice_loss = losses.DiceLoss(num_classes) + ce_loss = CrossEntropyLoss(ignore_index=4) + dice_loss = losses.pDLoss(num_classes, ignore_index=4) writer = SummaryWriter(snapshot_path + '/log') logging.info("{} iterations per epoch".format(len(trainloader_labeled))) @@ -139,8 +148,8 @@ def create_model(ema=False): sampled_batch_labeled, sampled_batch_unlabeled = data[0], data[1] volume_batch, label_batch = sampled_batch_labeled['image'], sampled_batch_labeled['label'] - volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda() - unlabeled_volume_batch = sampled_batch_unlabeled['image'].cuda() + volume_batch, label_batch = volume_batch.to(device), label_batch.to(device) + unlabeled_volume_batch = sampled_batch_unlabeled['image'].to(device) print("Labeled slices: ", sampled_batch_labeled["idx"]) print("Unlabeled slices: ", sampled_batch_unlabeled["idx"]) @@ -160,7 +169,7 @@ def create_model(ema=False): _, _, w, h = unlabeled_volume_batch.shape volume_batch_r = unlabeled_volume_batch.repeat(2, 1, 1, 1) stride = volume_batch_r.shape[0] // 2 - preds = torch.zeros([stride * T, num_classes, w, h]).cuda() + preds = torch.zeros([stride * T, num_classes, w, h]).to(device) for i in range(T // 2): ema_inputs = volume_batch_r + \ torch.clamp(torch.randn_like( @@ -225,7 +234,7 @@ def create_model(ema=False): metric_list = 0.0 for i_batch, sampled_batch in enumerate(valloader): metric_i = test_single_volume( - sampled_batch["image"], sampled_batch["label"], model, classes=num_classes) + sampled_batch["image"].to(device), sampled_batch["label"].to(device), model, device=device,classes=num_classes) metric_list += np.array(metric_i) metric_list = metric_list / len(db_val) for class_i in range(num_classes-1): @@ -282,14 +291,10 @@ def create_model(ema=False): torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) - snapshot_path = "../model/{}_{}/{}".format( - args.exp, args.fold, args.sup_type) + snapshot_path = "/mnt/sdd/yd2tb/work_dirs/model/{}_{}/{}".format(args.exp, args.fold, args.sup_type) if not os.path.exists(snapshot_path): os.makedirs(snapshot_path) - if os.path.exists(snapshot_path + '/code'): - shutil.rmtree(snapshot_path + '/code') - shutil.copytree('.', snapshot_path + '/code', - shutil.ignore_patterns(['.git', '__pycache__'])) + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') diff --git a/code/train_weakly_supervised_pCE_2D.py b/code/train_weakly_supervised_pCE_2D.py index 64f39bf..f0080b9 100644 --- a/code/train_weakly_supervised_pCE_2D.py +++ b/code/train_weakly_supervised_pCE_2D.py @@ -28,7 +28,7 @@ parser = argparse.ArgumentParser() parser.add_argument('--root_path', type=str, - default='../data/ACDC', help='Name of Experiment') + default='/mnt/sdd/yd2tb/data/ACDC', help='Name of Experiment') parser.add_argument('--exp', type=str, default='ACDC_pCE', help='experiment_name') parser.add_argument('--fold', type=str, @@ -52,6 +52,12 @@ parser.add_argument('--seed', type=int, default=2022, help='random seed') args = parser.parse_args() +"""选择GPU ID""" +gpu_list = [6] #[0,1] +gpu_list_str = ','.join(map(str, gpu_list)) +os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str) +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + def train(args, snapshot_path): base_lr = args.base_lr @@ -63,8 +69,7 @@ def train(args, snapshot_path): db_train = BaseDataSets(base_dir=args.root_path, split="train", transform=transforms.Compose([ RandomGenerator(args.patch_size) ]), fold=args.fold, sup_type=args.sup_type) - db_val = BaseDataSets(base_dir=args.root_path, - fold=args.fold, split="val") + db_val = BaseDataSets(base_dir=args.root_path,fold=args.fold, split="val") def worker_init_fn(worker_id): random.seed(args.seed + worker_id) @@ -189,14 +194,10 @@ def worker_init_fn(worker_id): torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) - snapshot_path = "../model/{}_{}/{}".format( - args.exp, args.fold, args.sup_type) + snapshot_path = "/mnt/sdd/yd2tb/work_dirs/model/{}_{}/{}".format(args.exp, args.fold, args.sup_type) if not os.path.exists(snapshot_path): os.makedirs(snapshot_path) - if os.path.exists(snapshot_path + '/code'): - shutil.rmtree(snapshot_path + '/code') - shutil.copytree('.', snapshot_path + '/code', - shutil.ignore_patterns(['.git', '__pycache__'])) + logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO, format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') diff --git a/code/train_weakly_supervised_pCE_random_walker_2D.py b/code/train_weakly_supervised_pCE_random_walker_2D.py index a612d91..fb16c35 100644 --- a/code/train_weakly_supervised_pCE_random_walker_2D.py +++ b/code/train_weakly_supervised_pCE_random_walker_2D.py @@ -194,10 +194,7 @@ def worker_init_fn(worker_id): args.exp, args.fold, args.sup_type) if not os.path.exists(snapshot_path): os.makedirs(snapshot_path) - if os.path.exists(snapshot_path + '/code'): - shutil.rmtree(snapshot_path + '/code') - shutil.copytree('.', snapshot_path + '/code', - shutil.ignore_patterns(['.git', '__pycache__'])) + logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO, format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') diff --git a/code/utils/distributed_utils.py b/code/utils/distributed_utils.py new file mode 100644 index 0000000..651655b --- /dev/null +++ b/code/utils/distributed_utils.py @@ -0,0 +1,70 @@ +import os + +import torch +import torch.distributed as dist + + +def init_distributed_mode(args): + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' # 通信后端,nvidia GPU推荐使用NCCL + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + dist.barrier() + + +def cleanup(): + dist.destroy_process_group() + + +def is_dist_avail_and_initialized(): + """检查是否支持分布式环境""" + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def reduce_value(value, average=True): + world_size = get_world_size() + if world_size < 2: # 单GPU的情况 + return value + + with torch.no_grad(): + dist.all_reduce(value) + if average: + value /= world_size + + return value diff --git a/code/utils/losses.py b/code/utils/losses.py index fcb0074..1cd3fd8 100644 --- a/code/utils/losses.py +++ b/code/utils/losses.py @@ -3,7 +3,7 @@ import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F - +from utils import losses, metrics, ramps,util def dice_loss(score, target): target = target.float() @@ -307,3 +307,304 @@ def forward(self, image, prediction): loss_level = self.levelsetLoss(image, prediction) loss_tv = self.gradientLoss2d(prediction) return loss_level + loss_tv + +def scc_loss(cos_sim,tau,lb_center_12_bg, + lb_center_12_a,un_center_12_bg, + un_center_12_a,lb_center_12_b,lb_center_12_c,un_center_12_b,un_center_12_c): + + loss_intra_bg = torch.exp((cos_sim(lb_center_12_bg, un_center_12_bg))/tau) + loss_intra_la = torch.exp((cos_sim(lb_center_12_a, un_center_12_a))/tau) + loss_intra_lb = torch.exp((cos_sim(lb_center_12_b, un_center_12_b))/tau) + loss_intra_lc = torch.exp((cos_sim(lb_center_12_c, un_center_12_c))/tau) + + + loss_inter_bg_la = torch.exp((cos_sim(lb_center_12_bg, un_center_12_a))/tau) + loss_inter_bg_lb = torch.exp((cos_sim(lb_center_12_bg, un_center_12_b))/tau) + loss_inter_bg_lc = torch.exp((cos_sim(lb_center_12_bg, un_center_12_c))/tau) + + + loss_inter_la_bg = torch.exp((cos_sim(lb_center_12_a, un_center_12_bg))/tau) + loss_inter_lb_bg = torch.exp((cos_sim(lb_center_12_b, un_center_12_bg))/tau) + loss_inter_lc_bg = torch.exp((cos_sim(lb_center_12_c, un_center_12_bg))/tau) + + + loss_contrast_bg = -torch.log(loss_intra_bg)+torch.log(loss_inter_bg_la)+torch.log(loss_inter_bg_lb)+torch.log(loss_inter_bg_lc) + loss_contrast_la = -torch.log(loss_intra_la)+torch.log(loss_inter_la_bg)+torch.log(loss_inter_lb_bg)+torch.log(loss_inter_lc_bg) + loss_contrast_lb = -torch.log(loss_intra_lb)+torch.log(loss_inter_la_bg)+torch.log(loss_inter_lb_bg)+torch.log(loss_inter_lc_bg) + loss_contrast_lc = -torch.log(loss_intra_lc)+torch.log(loss_inter_la_bg)+torch.log(loss_inter_lb_bg)+torch.log(loss_inter_lc_bg) + + loss_contrast = torch.mean(loss_contrast_bg+loss_contrast_la+loss_contrast_lb+loss_contrast_lc) + return loss_contrast + +def get_aff_loss(inputs, targets): + + # pos_label = (targets == 1).type(torch.int16) + # pos_label2 = (targets == 2).type(torch.int16) + # pos_label3 = (targets == 3).type(torch.int16) + + # pos_count = pos_label.sum() + 1 + # pos_count2 = pos_label2.sum() + 1 + # pos_count3 = pos_label3.sum() + 1 + + # neg_label = (targets == 0).type(torch.int16) + # neg_count = neg_label.sum() + 1 + # #inputs = torch.sigmoid(input=inputs) + + # pos_loss = torch.sum(pos_label * (1 - inputs)) / pos_count + # pos_loss2 = torch.sum(pos_label2 * (1 - inputs)) / pos_count2 + # pos_loss3 = torch.sum(pos_label3 * (1 - inputs)) / pos_count3 + + # neg_loss = torch.sum(neg_label * (inputs)) / neg_count + + # return 0.5 * (pos_loss+pos_loss2+pos_loss3) + 0.5 * neg_loss + pos_label = (targets == 1).type(torch.int16) + pos_count = pos_label.sum() + 1 + neg_label = (targets == 0).type(torch.int16) + neg_count = neg_label.sum() + 1 + #inputs = torch.sigmoid(input=inputs) + + pos_loss = torch.sum(pos_label * (1 - inputs)) / pos_count + neg_loss = torch.sum(neg_label * (inputs)) / neg_count + + return 0.5 * pos_loss + 0.5 * neg_loss + + +class SegformerAffinityEnergyLoss(nn.Module): + def __init__(self, ): + super(SegformerAffinityEnergyLoss, self).__init__() + + + self.weight = 0.78 + self.class_num = 4 + self.loss_index = 3 + + #self.mst_layers = MinimumSpanningTree(TreeFilter2D.norm2_distance) + #self.tree_filter_layers = TreeFilter2D(groups=1, sigma=self.configer.get('tree_loss', 'sigma')) + + # [bz,21,128,128], [bz,21,16,16], [bz,21,32,32], [bz,21,64,64], _attns- a list of 4 + def forward(self, outputs, low_feats, unlabeled_ROIs, targets,ema_att,max_iterations,iter_num): + + + seg, seg_16, seg_32, seg_64, = outputs + attns =low_feats + bz_label,_,_= targets.size() + + bz, _, token_b1_n1, token_b1_n2 = attns[0][0].size() + + attn_avg1 = torch.zeros(bz, token_b1_n1, token_b1_n2, dtype=seg.dtype, device=seg.device) + for attn in attns[0]: + attn = attn.mean(dim=1) + attn = attn / attn.sum(dim=-1,keepdim=True) + attn_avg1 += attn + attn_avg1 = attn_avg1 / len(attns[0]) + + # attn_avg2 [bz, 64*64, 16*16] + bz, _, token_b2_n1, token_b2_n2 = attns[1][0].size() + attn_avg2 = torch.zeros(bz, token_b2_n1, token_b2_n2, dtype=seg.dtype, device=seg.device) + for attn in attns[1]: + attn = attn.mean(dim=1) + attn = attn / attn.sum(dim=-1,keepdim=True) + attn_avg2 += attn + attn_avg2 = attn_avg2 / len(attns[1]) + + # attn_avg3 [bz, 32*32, 16*16] + bz, _, token_b3_n1, token_b3_n2 = attns[2][0].size() + attn_avg3 = torch.zeros(bz, token_b3_n1, token_b3_n2, dtype=seg.dtype, device=seg.device) + for attn in attns[2]: + attn = attn.mean(dim=1) + attn = attn / attn.sum(dim=-1,keepdim=True) + attn_avg3 += attn + attn_avg3 = attn_avg3 / len(attns[2]) + + # attn_avg4 [bz, 32*32, 32*32] + bz, _, token_b4_n1, token_b4_n2 = attns[3][0].size() + attn_avg4 = torch.zeros(bz, token_b4_n1, token_b4_n2, dtype=seg.dtype, device=seg.device) + for attn in attns[3]: + attn = attn.mean(dim=1) + attn = attn / attn.sum(dim=-1,keepdim=True) + attn_avg4 += attn + attn_avg4 = attn_avg4 / len(attns[3]) + + #TODO: uncertrainty + # bz_ema, _, token_b1_n1, token_b1_n2 = ema_att[0][0].size() + + attn_avg1_ema = torch.zeros(bz, token_b1_n1, token_b1_n2, dtype=seg.dtype, device=seg.device) + for attn in ema_att[0]: + attn = attn.mean(dim=1) + attn = attn / attn.sum(dim=-1,keepdim=True) + attn_avg1_ema += attn + attn_avg1_ema = attn_avg1_ema / len(attns[0]) + + # attn_avg2 [bz, 64*64, 16*16] + # bz, _, token_b2_n1, token_b2_n2 = attns[1][0].size() + attn_avg2_ema = torch.zeros(bz, token_b2_n1, token_b2_n2, dtype=seg.dtype, device=seg.device) + for attn in attns[1]: + attn = attn.mean(dim=1) + attn = attn / attn.sum(dim=-1,keepdim=True) + attn_avg2_ema += attn + attn_avg2_ema = attn_avg2_ema / len(attns[1]) + + # attn_avg3 [bz, 32*32, 16*16] + # bz, _, token_b3_n1, token_b3_n2 = attns[2][0].size() + attn_avg3_ema = torch.zeros(bz, token_b3_n1, token_b3_n2, dtype=seg.dtype, device=seg.device) + for attn in attns[2]: + attn = attn.mean(dim=1) + attn = attn / attn.sum(dim=-1,keepdim=True) + attn_avg3_ema += attn + attn_avg3_ema = attn_avg3_ema / len(attns[2]) + + # attn_avg4 [bz, 32*32, 32*32] + # bz, _, token_b4_n1, token_b4_n2 = attns[3][0].size() + attn_avg4_ema = torch.zeros(bz, token_b4_n1, token_b4_n2, dtype=seg.dtype, device=seg.device) + for attn in attns[3]: + attn = attn.mean(dim=1) + attn = attn / attn.sum(dim=-1,keepdim=True) + attn_avg4_ema += attn + attn_avg4_ema = attn_avg4_ema / len(attns[3]) + + + + + # soft affinity probability + _, _, h128,w128 = seg.size() + prob128 = torch.softmax(seg, dim=1) # prob-[bz,21,128,128] + prob128 = prob128.view(bz,self.class_num,-1).permute(0,2,1) # [bz, 128*128, 21] + prob128_softmax = torch.softmax(prob128, dim=-1) + + _, _, h16,w16 = seg_16.size() + prob16 = torch.softmax(seg_16, dim=1) + prob16 = prob16.view(bz,self.class_num,-1).permute(0,2,1) + prob16_softmax = torch.softmax(prob16, dim=-1) + + _, _, h32,w32 = seg_32.size() + prob32 = torch.softmax(seg_32, dim=1) + prob32 = prob32.view(bz,self.class_num,-1).permute(0,2,1) + prob32_softmax = torch.softmax(prob32, dim=-1) + + _, _, h64,w64 = seg_64.size() + prob64 = torch.softmax(seg_64, dim=1) + prob64 = prob64.view(bz,self.class_num,-1).permute(0,2,1) + prob64_softmax = torch.softmax(prob64, dim=-1) + + + + # attn_avg1_ema= F.interpolate(attn_avg1_ema, size=targets.shape[1:], mode='bilinear', align_corners=False) + # attn_avg2_ema= F.interpolate(attn_avg2_ema, size=targets.shape[1:], mode='bilinear', align_corners=False) + # attn_avg3_ema= F.interpolate(attn_avg3_ema, size=targets.shape[1:], mode='bilinear', align_corners=False) + # attn_avg1_ema= F.interpolate(attn_avg4_ema, size=targets.shape[1:], mode='bilinear', align_corners=False) + + # attn_avg_ema = torch.mean(torch.stack([F.softmax(attn_avg1_ema, dim=1), F.softmax(attn_avg2_ema, dim=1)]), dim=0) + + threshold = (0.75 + 0.25 * ramps.sigmoid_rampup(iter_num, max_iterations)) * np.log(2) + + uncertainty1 = -1.0 * torch.sum(attn_avg1_ema.permute(0,2,1) * torch.log(attn_avg1_ema.permute(0,2,1) + 1e-6), dim=1, keepdim=True) + uncertainty_mask1 = (uncertainty1 > threshold) + # pusdo_att = torch.argmax(F.softmax(pusdo_label, dim=1).detach(), dim=1, keepdim=True).float() + certainty_att1 = attn_avg1.permute(0,2,1).clone() + uncertainty_mask1 = uncertainty_mask1.repeat(1,64,1) + certainty_att1[uncertainty_mask1] = 0 + + + uncertainty2 = -1.0 * torch.sum(attn_avg2_ema.permute(0,2,1) * torch.log(attn_avg2_ema.permute(0,2,1) + 1e-6), dim=1, keepdim=True) + uncertainty_mask2 = (uncertainty2 > threshold) + # pusdo_att = torch.argmax(F.softmax(pusdo_label, dim=1).detach(), dim=1, keepdim=True).float() + certainty_att2 = attn_avg2.permute(0,2,1).clone() + uncertainty_mask2 = uncertainty_mask2.repeat(1,64,1) + certainty_att2[uncertainty_mask2] = 0 + + + uncertainty3 = -1.0 * torch.sum(attn_avg3_ema.permute(0,2,1) * torch.log(attn_avg3_ema.permute(0,2,1) + 1e-6), dim=1, keepdim=True) + + + uncertainty_mask3 = (uncertainty3 > threshold) + # pusdo_att = torch.argmax(F.softmax(pusdo_label, dim=1).detach(), dim=1, keepdim=True).float() + certainty_att3 = attn_avg3.permute(0,2,1).clone() + + uncertainty_mask3 = uncertainty_mask3.repeat(1,64,1) + certainty_att3[uncertainty_mask3] = 0 + + + uncertainty4 = -1.0 * torch.sum(attn_avg4_ema.permute(0,2,1) * torch.log(attn_avg4_ema.permute(0,2,1) + 1e-6), dim=1, keepdim=True) + uncertainty_mask4 = (uncertainty4 > threshold) + # pusdo_att = torch.argmax(F.softmax(pusdo_label, dim=1).detach(), dim=1, keepdim=True).float() + certainty_att4 = attn_avg4.permute(0,2,1).clone() + + uncertainty_mask4 = uncertainty_mask4.repeat(1,256,1) + certainty_att4[uncertainty_mask4] = 0 + + + # loss + # affinity_loss1 = torch.abs(torch.matmul(attn_avg1, prob16) - prob128) # [bz, 128*128, 21] + # affinity_loss2 = torch.abs(torch.matmul(attn_avg2, prob16) - prob64) + # affinity_loss3 = torch.abs(torch.matmul(attn_avg3, prob16) - prob32) + # affinity_loss4 = torch.abs(torch.matmul(attn_avg4, prob32) - prob32) + pusdo_label1= torch.softmax(torch.matmul(certainty_att1.permute(0,2,1), prob16),dim=-1) + pusdo_label1= pusdo_label1.view(bz,self.class_num,h128,w128) + pusdo_label1= F.interpolate(pusdo_label1, size=targets.shape[1:], mode='bilinear', align_corners=False) + + pusdo_label2=torch.softmax(torch.matmul(certainty_att2.permute(0,2,1), prob16),dim=-1) + pusdo_label2= pusdo_label2.view(bz,self.class_num,h64,w64) + pusdo_label2= F.interpolate(pusdo_label2, size=targets.shape[1:], mode='bilinear', align_corners=False) + # return pusdo_label + pusdo_label3=torch.softmax(torch.matmul(certainty_att3.permute(0,2,1), prob16),dim=-1) + pusdo_label3= pusdo_label3.view(bz,self.class_num,h32,w32) + pusdo_label3= F.interpolate(pusdo_label3, size=targets.shape[1:], mode='bilinear', align_corners=False) + + pusdo_label4=torch.softmax(torch.matmul(certainty_att4.permute(0,2,1), prob32),dim=-1) + pusdo_label4= pusdo_label4.view(bz,self.class_num,h32,w32) + pusdo_label4= F.interpolate(pusdo_label4, size=targets.shape[1:], mode='bilinear', align_corners=False) + pusdo_label=(pusdo_label1+pusdo_label2+pusdo_label3+pusdo_label4)/4 + + affinity_loss1 = torch.abs(torch.softmax(torch.matmul(attn_avg1[:bz_label,...], + prob16[:bz_label,...]),dim=-1) - prob128_softmax[:bz_label,...]) # [bz, 128*128, 21] + affinity_loss2 = torch.abs(torch.softmax(torch.matmul(attn_avg2[:bz_label,...], + prob16[:bz_label,...]),dim=-1) - prob64_softmax[:bz_label,...]) + affinity_loss3 = torch.abs(torch.softmax(torch.matmul(attn_avg3[:bz_label,...], + prob16[:bz_label,...]),dim=-1) - prob32_softmax[:bz_label,...]) + affinity_loss4 = torch.abs(torch.softmax(torch.matmul(attn_avg4[:bz_label,...], + prob32[:bz_label,...]),dim=-1) - prob32_softmax[:bz_label,...]) + + # affinity_loss1 = F.kl_div(F.log_softmax(torch.matmul(attn_avg1, prob16),dim=-1) , prob128_softmax) # [bz, 128*128, 21] + # affinity_loss2 = F.kl_div(F.log_softmax(torch.matmul(attn_avg2, prob16),dim=-1) , prob64_softmax) + # affinity_loss3 = F.kl_div(F.log_softmax(torch.matmul(attn_avg3, prob16),dim=-1) , prob32_softmax) + # affinity_loss4 = F.kl_div(F.log_softmax(torch.matmul(attn_avg4, prob32),dim=-1) , prob32_softmax) + + # # affinity loss number + with torch.no_grad(): + unlabeled_ROIs128 = F.interpolate(unlabeled_ROIs.unsqueeze(1), size=(h128, w128), mode='nearest') # [bz, 1, 128, 128] + unlabeled_ROIs128 = unlabeled_ROIs128.view(bz_label, -1).unsqueeze(-1) + N128 = unlabeled_ROIs128.sum() + + unlabeled_ROIs16 = F.interpolate(unlabeled_ROIs.unsqueeze(1).float(), size=(h16, w16), mode='nearest') # [bz, 1, 16, 16] + unlabeled_ROIs16 = unlabeled_ROIs16.view(bz, -1).unsqueeze(-1) + N16 = unlabeled_ROIs16.sum() + + unlabeled_ROIs32 = F.interpolate(unlabeled_ROIs.unsqueeze(1), size=(h32, w32), mode='nearest') # [bz, 1, 16, 16] + unlabeled_ROIs32 = unlabeled_ROIs32.view(bz_label, -1).unsqueeze(-1) + N32 = unlabeled_ROIs32.sum() + + unlabeled_ROIs64 = F.interpolate(unlabeled_ROIs.unsqueeze(1), size=(h64, w64), mode='nearest') # [bz, 1, 16, 16] + unlabeled_ROIs64 = unlabeled_ROIs64.view(bz_label, -1).unsqueeze(-1) + N64 = unlabeled_ROIs64.sum() + + if N128>0: + affinity_loss1 = (unlabeled_ROIs128 * affinity_loss1).sum() / N128 + if N64>0: + affinity_loss2 = (unlabeled_ROIs64 * affinity_loss2).sum() / N64 + if N32>0: + affinity_loss3 = (unlabeled_ROIs32 * affinity_loss3).sum() / N32 + if N32>0: + affinity_loss4 = (unlabeled_ROIs32 * affinity_loss4).sum() / N32 + + if self.loss_index == 0: + affinity_loss = affinity_loss1 + elif self.loss_index == 1: + affinity_loss = affinity_loss1 + affinity_loss2 + elif self.loss_index == 2: + affinity_loss = affinity_loss1 + affinity_loss2 + affinity_loss3 + elif self.loss_index == 3: + affinity_loss = affinity_loss1 + affinity_loss2 + affinity_loss3 + affinity_loss4 + else: + affinity_loss = torch.zeros(1, dtype=seg.dtype, device=seg.device) + return affinity_loss,pusdo_label + diff --git a/code/utils/util.py b/code/utils/util.py index c319c68..9640946 100644 --- a/code/utils/util.py +++ b/code/utils/util.py @@ -5,7 +5,7 @@ from skimage import segmentation as skimage_seg import torch from torch.utils.data.sampler import Sampler - +import torch.nn.functional as F import networks def load_model(path): @@ -141,4 +141,36 @@ def compute_sdf(img_gt, out_shape): # assert np.min(sdf) == -1.0, print(np.min(posdis), np.max(posdis), np.min(negdis), np.max(negdis)) # assert np.max(sdf) == 1.0, print(np.min(posdis), np.min(negdis), np.max(posdis), np.max(negdis)) - return normalized_sdf \ No newline at end of file + return normalized_sdf + + + +def cams_to_refine_label(cam_label, ignore_index=255): + + b,h,w = cam_label.shape + + cam_label_resized = F.interpolate(cam_label.unsqueeze(1).type(torch.float32), size=[h//16, w//16], mode="nearest") + + _cam_label = cam_label_resized.reshape(b, 1, -1) + _cam_label_rep = _cam_label.repeat([1, _cam_label.shape[-1], 1]) + _cam_label_rep_t = _cam_label_rep.permute(0,2,1) + ref_label = (_cam_label_rep == _cam_label_rep_t).type(torch.long) + #ref_label[(_cam_label_rep+_cam_label_rep_t) == 0] = ignore_index + # for i in range(b): + + # ref_label[i, :, _cam_label_rep[i, 0, :]==ignore_index] = ignore_index + # ref_label[i, _cam_label_rep[i, 0, :]==ignore_index, :] = ignore_index + + return ref_label + + + +def FeatureDropout(x): + attention = torch.mean(x, dim=1, keepdim=True) + max_val, _ = torch.max(attention.view( + x.size(0), -1), dim=1, keepdim=True) + threshold = max_val * np.random.uniform(0.7, 0.9) + threshold = threshold.view(x.size(0), 1, 1, 1).expand_as(attention) + drop_mask = (attention < threshold).float() + x = x.mul(drop_mask) + return x \ No newline at end of file diff --git a/code/val_2D.py b/code/val_2D.py index b81fb50..542bd29 100644 --- a/code/val_2D.py +++ b/code/val_2D.py @@ -2,8 +2,8 @@ import torch from medpy import metric from scipy.ndimage import zoom - - +import torch.nn.functional as F +import cv2 def calculate_metric_percase(pred, gt): pred[pred > 0] = 1 gt[gt > 0] = 1 @@ -15,7 +15,7 @@ def calculate_metric_percase(pred, gt): return 0, 0 -def test_single_volume(image, label, net, classes, patch_size=[256, 256]): +def test_single_volume(image, label, net, classes, device,patch_size=[256, 256]): image, label = image.squeeze(0).cpu().detach( ).numpy(), label.squeeze(0).cpu().detach().numpy() if len(image.shape) == 3: @@ -26,7 +26,7 @@ def test_single_volume(image, label, net, classes, patch_size=[256, 256]): slice = zoom( slice, (patch_size[0] / x, patch_size[1] / y), order=0) input = torch.from_numpy(slice).unsqueeze( - 0).unsqueeze(0).float().cuda() + 0).unsqueeze(0).float().to(device) net.eval() with torch.no_grad(): out = torch.argmax(torch.softmax( @@ -37,7 +37,7 @@ def test_single_volume(image, label, net, classes, patch_size=[256, 256]): prediction[ind] = pred else: input = torch.from_numpy(image).unsqueeze( - 0).unsqueeze(0).float().cuda() + 0).unsqueeze(0).float().to(device) net.eval() with torch.no_grad(): out = torch.argmax(torch.softmax( @@ -49,8 +49,46 @@ def test_single_volume(image, label, net, classes, patch_size=[256, 256]): prediction == i, label == i)) return metric_list +def test_single_volume2(image, label, net, classes, device,patch_size=[256, 256]): + image, label = image.squeeze(0).cpu().detach( + ).numpy(), label.squeeze(0).cpu().detach().numpy() + if len(image.shape) == 3: + prediction = np.zeros_like(label) + for ind in range(image.shape[0]): + slice = image[ind, :, :] + x, y = slice.shape[0], slice.shape[1] + slice = zoom( + slice, (patch_size[0] / x, patch_size[1] / y), order=0) + input = torch.from_numpy(slice).unsqueeze( + 0).unsqueeze(0).float().to(device) + net.eval() + with torch.no_grad(): + # output=F.interpolate(net(input)[0], size=label.shape[1:], mode='bilinear', align_corners=False) + out=net(input)[0] + # out=F.interpolate(out, size=patch_size, mode='bilinear', align_corners=False) + out = torch.argmax(torch.softmax(out, dim=1), dim=1).squeeze(0) + out = out.cpu().detach().numpy() + + pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0) + prediction[ind] = pred + else: + input = torch.from_numpy(image).unsqueeze( + 0).unsqueeze(0).float().to(device) + net.eval() + with torch.no_grad(): + out = torch.argmax(torch.softmax(net(input)[0], dim=1), dim=1).squeeze(0) + out=F.interpolate(out, size=label.shape[1:], mode='bilinear', align_corners=False) + prediction = out.cpu().detach().numpy() + + metric_list = [] + for i in range(1, classes): + metric_list.append(calculate_metric_percase( + prediction == i, label == i)) + return metric_list + -def test_single_volume_ds(image, label, net, classes, patch_size=[256, 256]): + +def test_single_volume_ds(image, label, net, classes, device,patch_size=[256, 256]): image, label = image.squeeze(0).cpu().detach( ).numpy(), label.squeeze(0).cpu().detach().numpy() if len(image.shape) == 3: @@ -61,7 +99,7 @@ def test_single_volume_ds(image, label, net, classes, patch_size=[256, 256]): slice = zoom( slice, (patch_size[0] / x, patch_size[1] / y), order=0) input = torch.from_numpy(slice).unsqueeze( - 0).unsqueeze(0).float().cuda() + 0).unsqueeze(0).float().to(device) net.eval() with torch.no_grad(): output_main, _, _, _ = net(input) @@ -73,7 +111,7 @@ def test_single_volume_ds(image, label, net, classes, patch_size=[256, 256]): prediction[ind] = pred else: input = torch.from_numpy(image).unsqueeze( - 0).unsqueeze(0).float().cuda() + 0).unsqueeze(0).float().to(device) net.eval() with torch.no_grad(): output_main, _, _, _ = net(input) @@ -87,7 +125,7 @@ def test_single_volume_ds(image, label, net, classes, patch_size=[256, 256]): return metric_list -def test_single_volume_cct(image, label, net, classes, patch_size=[256, 256]): +def test_single_volume_cct(image, label, net, classes, device,patch_size=[256, 256]): image, label = image.squeeze(0).cpu().detach( ).numpy(), label.squeeze(0).cpu().detach().numpy() if len(image.shape) == 3: @@ -98,7 +136,7 @@ def test_single_volume_cct(image, label, net, classes, patch_size=[256, 256]): slice = zoom( slice, (patch_size[0] / x, patch_size[1] / y), order=0) input = torch.from_numpy(slice).unsqueeze( - 0).unsqueeze(0).float().cuda() + 0).unsqueeze(0).float().to(device) net.eval() with torch.no_grad(): output_main = net(input)[0] @@ -110,7 +148,7 @@ def test_single_volume_cct(image, label, net, classes, patch_size=[256, 256]): prediction[ind] = pred else: input = torch.from_numpy(image).unsqueeze( - 0).unsqueeze(0).float().cuda() + 0).unsqueeze(0).float().to(device) net.eval() with torch.no_grad(): output_main, _, _, _ = net(input) @@ -122,3 +160,150 @@ def test_single_volume_cct(image, label, net, classes, patch_size=[256, 256]): metric_list.append(calculate_metric_percase( prediction == i, label == i)) return metric_list + +def mask_to_boundary(mask, dilation_ratio=0.02): + """ + Convert binary mask to boundary mask. + :param mask (numpy array, uint8): binary mask + :param dilation_ratio (float): ratio to calculate dilation = dilation_ratio * image_diagonal + :return: boundary mask (numpy array) + """ + h, w = mask.shape + img_diag = np.sqrt(h ** 2 + w ** 2) # 计算图像对角线长度 + dilation = int(round(dilation_ratio * img_diag)) + if dilation < 1: + dilation = 1 + + mask = mask.astype(np.uint8) + # Pad image so mask truncated by the image border is also considered as boundary. + new_mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0) + kernel = np.ones((3, 3), dtype=np.uint8) + new_mask_erode = cv2.erode(new_mask, kernel, iterations=dilation) + + # 因为之前向四周填充了0, 故而这里不再需要四周 + mask_erode = new_mask_erode[1 : h + 1, 1 : w + 1] + # G_d intersects G in the paper. + return mask - mask_erode + + +def calculate_metric_percase_7(y_pred, y_true): + """ pred[pred > 0] = 1 + gt[gt > 0] = 1 + if pred.sum() > 0: + dice = metric.binary.dc(pred, gt) + hd95 = metric.binary.hd95(pred, gt) + if gt.sum() > 0: + hd = metric.binary.hd(pred, gt) + else: + hd = 0 + + eps = 0.0001 + dilation_ratio=0.005 + c_pred, h_pred, w_pred = pred.shape + y_pred, y_true = np.array(pred), np.array(gt) + y_pred, y_true = np.round(pred).astype(int), np.round(gt).astype(int) + a_unin_b = np.sum(y_pred[y_true == 1]) + a_plus_b = np.sum(y_pred) + np.sum(y_true) + eps + # dice + #dice_value = (a_unin_b * 2.0 + eps) / a_plus_b + # PPV + ppv_value = (a_unin_b * 1.0 + eps) / (np.sum(y_pred) + eps) + + # sensitivity + sen_val = (a_unin_b * 1.0 + eps) / (np.sum(y_true) + eps) + #print('ppv_value and sen_val has been calculated') + iou = a_unin_b / (a_plus_b - a_unin_b) # a_plus_b里边有eps,所以不加了 + + boundary_iou_all = 0.0 + for i in range(c_pred): + gt_boundary = mask_to_boundary(y_true[i], dilation_ratio) + dt_boundary = mask_to_boundary(y_pred[i], dilation_ratio) + intersection = ((gt_boundary * dt_boundary) > 0).sum() + union = ((gt_boundary + dt_boundary) > 0).sum() + boundary_iou = intersection / (union + eps) + boundary_iou_all += boundary_iou + boundary_iou = boundary_iou_all / c_pred """ + eps = 0.0001 + c_pred, h_pred, w_pred = y_pred.shape + y_pred, y_true = np.array(y_pred), np.array(y_true) + y_pred, y_true = np.round(y_pred).astype(int), np.round(y_true).astype(int) + TP = np.sum(y_pred[y_true == 1]) + + a_plus_b = np.sum(y_pred) + np.sum(y_true) + eps + #dice + dice=(TP * 2.0 + eps) / a_plus_b + denominator1=np.sum(y_pred) + eps + #PPV + ppv=(TP*1.0 + eps) / denominator1 + denominator2 = np.sum(y_true) + eps + #Sen + sen=(TP*1.0 + eps) / denominator2 + + # hd and hd95 + if y_pred.sum() > 0 and y_true.sum() > 0: + hd = metric.binary.hd(y_pred, y_true) + hd95 = metric.binary.hd95(y_pred, y_true) + asd = metric.binary.asd(y_pred, y_true) + else: + hd = 0 + hd95 = 0 + asd = 0 + # iou + a_unin_b = np.sum(y_pred[y_true == 1]) + eps + a_plus_b = np.sum(y_pred) + np.sum(y_true) + eps + iou = a_unin_b / (a_plus_b - a_unin_b) + + # biou + boundary_iou_all = 0.0 + dilation_ratio=0.005 + for i in range(c_pred): + gt_boundary = mask_to_boundary(y_true[i], dilation_ratio) + dt_boundary = mask_to_boundary(y_pred[i], dilation_ratio) + intersection = ((gt_boundary * dt_boundary) > 0).sum() + union = ((gt_boundary + dt_boundary) > 0).sum() + boundary_iou = intersection / (union + eps) + boundary_iou_all += boundary_iou + boundary_iou = boundary_iou_all / c_pred + + return dice, hd95, ppv, sen, iou, boundary_iou, hd,asd + +def test_single_volume_7(image, label, net, classes, device,patch_size=[256, 256],): + image, label = image.squeeze(0).cpu().detach( + ).numpy(), label.squeeze(0).cpu().detach().numpy() + + + if len(image.shape) == 3: + prediction = np.zeros_like(label) + for ind in range(image.shape[0]): + slice = image[ind, :, :] + x, y = slice.shape[0], slice.shape[1] + slice = zoom( + slice, (patch_size[0] / x, patch_size[1] / y), order=0) + input = torch.from_numpy(slice).unsqueeze( + 0).unsqueeze(0).float().to(device) + net.eval() + with torch.no_grad(): + out = torch.argmax(torch.softmax( + net(input)[0], dim=1), dim=1).squeeze(0) + out = out.cpu().detach().numpy() + pred = zoom( + out, (x / patch_size[0], y / patch_size[1]), order=0) + prediction[ind] = pred + else: + input = torch.from_numpy(image).unsqueeze( + 0).unsqueeze(0).float().cuda() + net.eval() + with torch.no_grad(): + out = torch.argmax(torch.softmax( + net(input)[0], dim=1), dim=1).squeeze(0) + prediction = out.cpu().detach().numpy() + metric_list = [] + for i in range(1, classes): + metric_list.append(calculate_metric_percase_7( + prediction == i, label == i)) + + performance_test = np.mean(metric_list, axis=0)[0] + # if is_save_img==True: + # save_imgs_rgb(out_path,data_name,img_np,mask_np,pred_mask,patientSliceID,performance_test,exp) + + return metric_list diff --git a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient001_frame01_slice_0.h5 deleted file mode 100644 index a0c07eb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient001_frame01_slice_1.h5 deleted file mode 100644 index f20b3b2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient001_frame01_slice_2.h5 deleted file mode 100644 index 4eb38e9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient001_frame01_slice_3.h5 deleted file mode 100644 index a0bd9da..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient001_frame01_slice_4.h5 deleted file mode 100644 index bcda772..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient001_frame01_slice_5.h5 deleted file mode 100644 index 6846ef4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient001_frame01_slice_6.h5 deleted file mode 100644 index 71684a7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient001_frame01_slice_7.h5 deleted file mode 100644 index dcd59dd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient001_frame01_slice_8.h5 deleted file mode 100644 index e09bcf4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient001_frame01_slice_9.h5 deleted file mode 100644 index 151df35..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient001_frame12_slice_0.h5 deleted file mode 100644 index d565fc9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient001_frame12_slice_1.h5 deleted file mode 100644 index 56c599e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient001_frame12_slice_2.h5 deleted file mode 100644 index 3c3bd89..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient001_frame12_slice_3.h5 deleted file mode 100644 index 7b4d417..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient001_frame12_slice_4.h5 deleted file mode 100644 index b913082..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient001_frame12_slice_5.h5 deleted file mode 100644 index f6704a8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient001_frame12_slice_6.h5 deleted file mode 100644 index 7d59496..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient001_frame12_slice_7.h5 deleted file mode 100644 index 864a0fb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient001_frame12_slice_8.h5 deleted file mode 100644 index 30edece..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient001_frame12_slice_9.h5 deleted file mode 100644 index a67b0b4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient001_frame12_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient002_frame01_slice_0.h5 deleted file mode 100644 index 60c2b9b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient002_frame01_slice_1.h5 deleted file mode 100644 index 3c722a6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient002_frame01_slice_2.h5 deleted file mode 100644 index ca67b97..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient002_frame01_slice_3.h5 deleted file mode 100644 index 5396aba..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient002_frame01_slice_4.h5 deleted file mode 100644 index 53812b0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient002_frame01_slice_5.h5 deleted file mode 100644 index b897aa8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient002_frame01_slice_6.h5 deleted file mode 100644 index e5aaf7a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient002_frame01_slice_7.h5 deleted file mode 100644 index aa20cb8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient002_frame01_slice_8.h5 deleted file mode 100644 index 7a25ec9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient002_frame01_slice_9.h5 deleted file mode 100644 index ec34feb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient002_frame12_slice_0.h5 deleted file mode 100644 index 520d471..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient002_frame12_slice_1.h5 deleted file mode 100644 index 00dc762..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient002_frame12_slice_2.h5 deleted file mode 100644 index 28adce1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient002_frame12_slice_3.h5 deleted file mode 100644 index 781dfdb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient002_frame12_slice_4.h5 deleted file mode 100644 index ef70721..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient002_frame12_slice_5.h5 deleted file mode 100644 index 1025936..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient002_frame12_slice_6.h5 deleted file mode 100644 index 43f8d87..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient002_frame12_slice_7.h5 deleted file mode 100644 index 8416b62..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient002_frame12_slice_8.h5 deleted file mode 100644 index dc26eb8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient002_frame12_slice_9.h5 deleted file mode 100644 index 23a54b2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient002_frame12_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient003_frame01_slice_0.h5 deleted file mode 100644 index 6894112..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient003_frame01_slice_1.h5 deleted file mode 100644 index b6a3e5e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient003_frame01_slice_2.h5 deleted file mode 100644 index af6360d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient003_frame01_slice_3.h5 deleted file mode 100644 index 95c6f30..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient003_frame01_slice_4.h5 deleted file mode 100644 index 0509231..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient003_frame01_slice_5.h5 deleted file mode 100644 index be5d9cc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient003_frame01_slice_6.h5 deleted file mode 100644 index a125ff5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient003_frame01_slice_7.h5 deleted file mode 100644 index e6cdd02..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient003_frame01_slice_8.h5 deleted file mode 100644 index 6206164..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient003_frame01_slice_9.h5 deleted file mode 100644 index f2ae0a9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient003_frame15_slice_0.h5 deleted file mode 100644 index 5a7b313..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient003_frame15_slice_1.h5 deleted file mode 100644 index 8d25676..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient003_frame15_slice_2.h5 deleted file mode 100644 index afe7bed..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient003_frame15_slice_3.h5 deleted file mode 100644 index bc1a032..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient003_frame15_slice_4.h5 deleted file mode 100644 index 9a52b70..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient003_frame15_slice_5.h5 deleted file mode 100644 index 71f5f17..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient003_frame15_slice_6.h5 deleted file mode 100644 index 89d88c4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient003_frame15_slice_7.h5 deleted file mode 100644 index 393e409..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient003_frame15_slice_8.h5 deleted file mode 100644 index 82279a8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient003_frame15_slice_9.h5 deleted file mode 100644 index 42de07c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient003_frame15_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient004_frame01_slice_0.h5 deleted file mode 100644 index c744186..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient004_frame01_slice_1.h5 deleted file mode 100644 index 5528d96..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient004_frame01_slice_2.h5 deleted file mode 100644 index 0e301bb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient004_frame01_slice_3.h5 deleted file mode 100644 index b148448..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient004_frame01_slice_4.h5 deleted file mode 100644 index 28cb382..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient004_frame01_slice_5.h5 deleted file mode 100644 index 077f52b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient004_frame01_slice_6.h5 deleted file mode 100644 index e8c2faa..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient004_frame01_slice_7.h5 deleted file mode 100644 index ecde0aa..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient004_frame01_slice_8.h5 deleted file mode 100644 index 6385c50..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient004_frame01_slice_9.h5 deleted file mode 100644 index 31fa149..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient004_frame15_slice_0.h5 deleted file mode 100644 index ee40f5b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient004_frame15_slice_1.h5 deleted file mode 100644 index e166228..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient004_frame15_slice_2.h5 deleted file mode 100644 index d017561..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient004_frame15_slice_3.h5 deleted file mode 100644 index 48ab03f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient004_frame15_slice_4.h5 deleted file mode 100644 index 13d6849..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient004_frame15_slice_5.h5 deleted file mode 100644 index 0e6ea80..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient004_frame15_slice_6.h5 deleted file mode 100644 index 1632760..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient004_frame15_slice_7.h5 deleted file mode 100644 index 3791be2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient004_frame15_slice_8.h5 deleted file mode 100644 index ffb06ef..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient004_frame15_slice_9.h5 deleted file mode 100644 index feb141a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient004_frame15_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient005_frame01_slice_0.h5 deleted file mode 100644 index 96753df..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient005_frame01_slice_1.h5 deleted file mode 100644 index 9692236..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient005_frame01_slice_2.h5 deleted file mode 100644 index 4a009bc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient005_frame01_slice_3.h5 deleted file mode 100644 index da20cbc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient005_frame01_slice_4.h5 deleted file mode 100644 index f8c59cd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient005_frame01_slice_5.h5 deleted file mode 100644 index 069d143..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient005_frame01_slice_6.h5 deleted file mode 100644 index b031e60..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient005_frame01_slice_7.h5 deleted file mode 100644 index ccb6bc9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient005_frame01_slice_8.h5 deleted file mode 100644 index 14c7423..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient005_frame01_slice_9.h5 deleted file mode 100644 index 8576ad9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient005_frame13_slice_0.h5 deleted file mode 100644 index 52c9ffa..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient005_frame13_slice_1.h5 deleted file mode 100644 index b4e4442..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient005_frame13_slice_2.h5 deleted file mode 100644 index 849b0f2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient005_frame13_slice_3.h5 deleted file mode 100644 index d38755c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient005_frame13_slice_4.h5 deleted file mode 100644 index 77cbb19..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient005_frame13_slice_5.h5 deleted file mode 100644 index f9874f8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient005_frame13_slice_6.h5 deleted file mode 100644 index 0d280e8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient005_frame13_slice_7.h5 deleted file mode 100644 index 0f60d2d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient005_frame13_slice_8.h5 deleted file mode 100644 index 7c6a08d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient005_frame13_slice_9.h5 deleted file mode 100644 index c51004a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient005_frame13_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient006_frame01_slice_0.h5 deleted file mode 100644 index ede54cf..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient006_frame01_slice_1.h5 deleted file mode 100644 index a085faa..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient006_frame01_slice_10.h5 deleted file mode 100644 index 770fcd5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient006_frame01_slice_2.h5 deleted file mode 100644 index fbc6bd8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient006_frame01_slice_3.h5 deleted file mode 100644 index 411621c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient006_frame01_slice_4.h5 deleted file mode 100644 index d25e09d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient006_frame01_slice_5.h5 deleted file mode 100644 index b7badbe..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient006_frame01_slice_6.h5 deleted file mode 100644 index b262a3b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient006_frame01_slice_7.h5 deleted file mode 100644 index d5c938e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient006_frame01_slice_8.h5 deleted file mode 100644 index 0f446f5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient006_frame01_slice_9.h5 deleted file mode 100644 index 833b8e5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient006_frame16_slice_0.h5 deleted file mode 100644 index e9e25f9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient006_frame16_slice_1.h5 deleted file mode 100644 index 7f56c01..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient006_frame16_slice_10.h5 deleted file mode 100644 index f1c4989..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient006_frame16_slice_2.h5 deleted file mode 100644 index 8b485e4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient006_frame16_slice_3.h5 deleted file mode 100644 index 0360ea3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient006_frame16_slice_4.h5 deleted file mode 100644 index f45c855..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient006_frame16_slice_5.h5 deleted file mode 100644 index b05b0cd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient006_frame16_slice_6.h5 deleted file mode 100644 index cabb3a1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient006_frame16_slice_7.h5 deleted file mode 100644 index 3c3dc97..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient006_frame16_slice_8.h5 deleted file mode 100644 index f5fbe44..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient006_frame16_slice_9.h5 deleted file mode 100644 index 94e38f2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient006_frame16_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient007_frame01_slice_0.h5 deleted file mode 100644 index 7cf8073..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient007_frame01_slice_1.h5 deleted file mode 100644 index e5653be..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient007_frame01_slice_2.h5 deleted file mode 100644 index 36fc4d6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient007_frame01_slice_3.h5 deleted file mode 100644 index e3f48d1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient007_frame01_slice_4.h5 deleted file mode 100644 index 96b1b1b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient007_frame01_slice_5.h5 deleted file mode 100644 index 7956c16..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient007_frame01_slice_6.h5 deleted file mode 100644 index 18a931d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient007_frame01_slice_7.h5 deleted file mode 100644 index fc5f8f9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient007_frame01_slice_8.h5 deleted file mode 100644 index 72fe533..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient007_frame01_slice_9.h5 deleted file mode 100644 index 9166a8f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient007_frame07_slice_0.h5 deleted file mode 100644 index 76d289b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient007_frame07_slice_1.h5 deleted file mode 100644 index 3db13fc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient007_frame07_slice_2.h5 deleted file mode 100644 index e379c78..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient007_frame07_slice_3.h5 deleted file mode 100644 index 338d4c7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient007_frame07_slice_4.h5 deleted file mode 100644 index 6dee412..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient007_frame07_slice_5.h5 deleted file mode 100644 index c1e6df3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient007_frame07_slice_6.h5 deleted file mode 100644 index fada63e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient007_frame07_slice_7.h5 deleted file mode 100644 index 6c3a7f1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient007_frame07_slice_8.h5 deleted file mode 100644 index c24a9e9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient007_frame07_slice_9.h5 deleted file mode 100644 index 50aef0a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient007_frame07_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient008_frame01_slice_0.h5 deleted file mode 100644 index df5045e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient008_frame01_slice_1.h5 deleted file mode 100644 index cbce5c7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient008_frame01_slice_2.h5 deleted file mode 100644 index 5613042..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient008_frame01_slice_3.h5 deleted file mode 100644 index 42699f8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient008_frame01_slice_4.h5 deleted file mode 100644 index 048b2b4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient008_frame01_slice_5.h5 deleted file mode 100644 index 2c76bc2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient008_frame01_slice_6.h5 deleted file mode 100644 index 8d7dcdd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient008_frame01_slice_7.h5 deleted file mode 100644 index 7d696a2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient008_frame01_slice_8.h5 deleted file mode 100644 index 6dfe7eb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient008_frame01_slice_9.h5 deleted file mode 100644 index 280ad02..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient008_frame13_slice_0.h5 deleted file mode 100644 index f535ead..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient008_frame13_slice_1.h5 deleted file mode 100644 index 9ab7982..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient008_frame13_slice_2.h5 deleted file mode 100644 index 7eb639a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient008_frame13_slice_3.h5 deleted file mode 100644 index 71875af..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient008_frame13_slice_4.h5 deleted file mode 100644 index bbf12f3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient008_frame13_slice_5.h5 deleted file mode 100644 index 36a5ca9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient008_frame13_slice_6.h5 deleted file mode 100644 index 7946982..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient008_frame13_slice_7.h5 deleted file mode 100644 index 3dd8eac..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient008_frame13_slice_8.h5 deleted file mode 100644 index 41a0a03..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient008_frame13_slice_9.h5 deleted file mode 100644 index 6fe0bae..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient008_frame13_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient009_frame01_slice_0.h5 deleted file mode 100644 index 444e1a9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient009_frame01_slice_1.h5 deleted file mode 100644 index a3382b8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient009_frame01_slice_2.h5 deleted file mode 100644 index 6369178..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient009_frame01_slice_3.h5 deleted file mode 100644 index 2e17416..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient009_frame01_slice_4.h5 deleted file mode 100644 index 68e03a0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient009_frame01_slice_5.h5 deleted file mode 100644 index f0a6738..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient009_frame01_slice_6.h5 deleted file mode 100644 index 9702a9c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient009_frame01_slice_7.h5 deleted file mode 100644 index 5d47b5d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient009_frame01_slice_8.h5 deleted file mode 100644 index 934cfcb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient009_frame01_slice_9.h5 deleted file mode 100644 index 2002c60..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient009_frame13_slice_0.h5 deleted file mode 100644 index b1225ba..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient009_frame13_slice_1.h5 deleted file mode 100644 index 0e967e8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient009_frame13_slice_2.h5 deleted file mode 100644 index a734ee9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient009_frame13_slice_3.h5 deleted file mode 100644 index c1a3866..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient009_frame13_slice_4.h5 deleted file mode 100644 index 56a9097..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient009_frame13_slice_5.h5 deleted file mode 100644 index 2e00fc8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient009_frame13_slice_6.h5 deleted file mode 100644 index ddc79d4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient009_frame13_slice_7.h5 deleted file mode 100644 index a111a7b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient009_frame13_slice_8.h5 deleted file mode 100644 index 767321d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient009_frame13_slice_9.h5 deleted file mode 100644 index c51598b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient009_frame13_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient010_frame01_slice_0.h5 deleted file mode 100644 index 95dac76..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient010_frame01_slice_1.h5 deleted file mode 100644 index 3f38e8c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient010_frame01_slice_2.h5 deleted file mode 100644 index dd69d32..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient010_frame01_slice_3.h5 deleted file mode 100644 index 1b36164..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient010_frame01_slice_4.h5 deleted file mode 100644 index b1ba661..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient010_frame01_slice_5.h5 deleted file mode 100644 index d4bde06..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient010_frame01_slice_6.h5 deleted file mode 100644 index d9fb3e7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient010_frame01_slice_7.h5 deleted file mode 100644 index a2616ea..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient010_frame01_slice_8.h5 deleted file mode 100644 index 51ae284..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient010_frame01_slice_9.h5 deleted file mode 100644 index 44c7ae8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient010_frame13_slice_0.h5 deleted file mode 100644 index 915c8a5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient010_frame13_slice_1.h5 deleted file mode 100644 index dfb24fd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient010_frame13_slice_2.h5 deleted file mode 100644 index dffef48..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient010_frame13_slice_3.h5 deleted file mode 100644 index 5e31764..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient010_frame13_slice_4.h5 deleted file mode 100644 index 9157935..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient010_frame13_slice_5.h5 deleted file mode 100644 index 9fccea8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient010_frame13_slice_6.h5 deleted file mode 100644 index 5b14930..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient010_frame13_slice_7.h5 deleted file mode 100644 index 235a555..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient010_frame13_slice_8.h5 deleted file mode 100644 index 39e6abf..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient010_frame13_slice_9.h5 deleted file mode 100644 index e9e86a2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient010_frame13_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient011_frame01_slice_0.h5 deleted file mode 100644 index 9d17236..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient011_frame01_slice_1.h5 deleted file mode 100644 index df51b6e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient011_frame01_slice_2.h5 deleted file mode 100644 index 97d1d18..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient011_frame01_slice_3.h5 deleted file mode 100644 index 3b01a0a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient011_frame01_slice_4.h5 deleted file mode 100644 index bd26afc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient011_frame01_slice_5.h5 deleted file mode 100644 index 470741a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient011_frame01_slice_6.h5 deleted file mode 100644 index 4b37e66..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient011_frame01_slice_7.h5 deleted file mode 100644 index 5033efa..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient011_frame01_slice_8.h5 deleted file mode 100644 index 3ae3a9b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient011_frame08_slice_0.h5 deleted file mode 100644 index 5828ec6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient011_frame08_slice_1.h5 deleted file mode 100644 index a1c75db..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient011_frame08_slice_2.h5 deleted file mode 100644 index b219f1b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient011_frame08_slice_3.h5 deleted file mode 100644 index c837c30..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient011_frame08_slice_4.h5 deleted file mode 100644 index 047cdd7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient011_frame08_slice_5.h5 deleted file mode 100644 index 8779a27..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient011_frame08_slice_6.h5 deleted file mode 100644 index fdd5291..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient011_frame08_slice_7.h5 deleted file mode 100644 index afd6165..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient011_frame08_slice_8.h5 deleted file mode 100644 index 8a74d7c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient011_frame08_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient012_frame01_slice_0.h5 deleted file mode 100644 index 2082233..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient012_frame01_slice_1.h5 deleted file mode 100644 index 30ff215..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient012_frame01_slice_2.h5 deleted file mode 100644 index 1ad2a4b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient012_frame01_slice_3.h5 deleted file mode 100644 index 889391d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient012_frame01_slice_4.h5 deleted file mode 100644 index 36965fa..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient012_frame01_slice_5.h5 deleted file mode 100644 index b25cb31..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient012_frame01_slice_6.h5 deleted file mode 100644 index 2b22eb2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient012_frame01_slice_7.h5 deleted file mode 100644 index 8bcd579..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient012_frame01_slice_8.h5 deleted file mode 100644 index d756d50..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient012_frame01_slice_9.h5 deleted file mode 100644 index 620d33c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient012_frame13_slice_0.h5 deleted file mode 100644 index 809dc56..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient012_frame13_slice_1.h5 deleted file mode 100644 index c998220..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient012_frame13_slice_2.h5 deleted file mode 100644 index 15c0ffe..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient012_frame13_slice_3.h5 deleted file mode 100644 index b671cfe..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient012_frame13_slice_4.h5 deleted file mode 100644 index d3964e0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient012_frame13_slice_5.h5 deleted file mode 100644 index 3d18c3c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient012_frame13_slice_6.h5 deleted file mode 100644 index c0accdb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient012_frame13_slice_7.h5 deleted file mode 100644 index 5b04cb6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient012_frame13_slice_8.h5 deleted file mode 100644 index 9460216..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient012_frame13_slice_9.h5 deleted file mode 100644 index 1064377..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient012_frame13_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient013_frame01_slice_0.h5 deleted file mode 100644 index 4088c25..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient013_frame01_slice_1.h5 deleted file mode 100644 index 787b153..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient013_frame01_slice_2.h5 deleted file mode 100644 index a27f3d6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient013_frame01_slice_3.h5 deleted file mode 100644 index 35b4030..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient013_frame01_slice_4.h5 deleted file mode 100644 index 36ee334..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient013_frame01_slice_5.h5 deleted file mode 100644 index 53a7870..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient013_frame01_slice_6.h5 deleted file mode 100644 index ba51f6c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient013_frame01_slice_7.h5 deleted file mode 100644 index 562a3e7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient013_frame01_slice_8.h5 deleted file mode 100644 index 7b1e465..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient013_frame01_slice_9.h5 deleted file mode 100644 index 9043b6f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient013_frame14_slice_0.h5 deleted file mode 100644 index 3344731..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient013_frame14_slice_1.h5 deleted file mode 100644 index e28944e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient013_frame14_slice_2.h5 deleted file mode 100644 index 0b9a8eb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient013_frame14_slice_3.h5 deleted file mode 100644 index 2b3e757..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient013_frame14_slice_4.h5 deleted file mode 100644 index 2e9cdbe..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient013_frame14_slice_5.h5 deleted file mode 100644 index 68897e9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient013_frame14_slice_6.h5 deleted file mode 100644 index 0b2a557..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient013_frame14_slice_7.h5 deleted file mode 100644 index 6175863..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient013_frame14_slice_8.h5 deleted file mode 100644 index b8fa7ff..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient013_frame14_slice_9.h5 deleted file mode 100644 index 3be7c7d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient013_frame14_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient014_frame01_slice_0.h5 deleted file mode 100644 index 920f708..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient014_frame01_slice_1.h5 deleted file mode 100644 index 0aa666b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient014_frame01_slice_2.h5 deleted file mode 100644 index 39c6004..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient014_frame01_slice_3.h5 deleted file mode 100644 index 4166178..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient014_frame01_slice_4.h5 deleted file mode 100644 index 39b968e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient014_frame01_slice_5.h5 deleted file mode 100644 index 2f836c6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient014_frame01_slice_6.h5 deleted file mode 100644 index 5c8923e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient014_frame01_slice_7.h5 deleted file mode 100644 index 2a9977d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient014_frame01_slice_8.h5 deleted file mode 100644 index 026fc6c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient014_frame01_slice_9.h5 deleted file mode 100644 index 0426e03..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient014_frame13_slice_0.h5 deleted file mode 100644 index 0c3e350..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient014_frame13_slice_1.h5 deleted file mode 100644 index cea6018..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient014_frame13_slice_2.h5 deleted file mode 100644 index 71e8d92..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient014_frame13_slice_3.h5 deleted file mode 100644 index ffd91ce..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient014_frame13_slice_4.h5 deleted file mode 100644 index f7e8c73..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient014_frame13_slice_5.h5 deleted file mode 100644 index 80221ef..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient014_frame13_slice_6.h5 deleted file mode 100644 index b681146..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient014_frame13_slice_7.h5 deleted file mode 100644 index bae8cab..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient014_frame13_slice_8.h5 deleted file mode 100644 index c1638e0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient014_frame13_slice_9.h5 deleted file mode 100644 index 4ff2c3f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient014_frame13_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient015_frame01_slice_0.h5 deleted file mode 100644 index c465c67..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient015_frame01_slice_1.h5 deleted file mode 100644 index 6e20db0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient015_frame01_slice_2.h5 deleted file mode 100644 index 7c13ef9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient015_frame01_slice_3.h5 deleted file mode 100644 index 6df29c8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient015_frame01_slice_4.h5 deleted file mode 100644 index 9133d62..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient015_frame01_slice_5.h5 deleted file mode 100644 index 29fa15e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient015_frame01_slice_6.h5 deleted file mode 100644 index 7f2aa1b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient015_frame01_slice_7.h5 deleted file mode 100644 index 46ee5e5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient015_frame01_slice_8.h5 deleted file mode 100644 index 0b3122f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient015_frame10_slice_0.h5 deleted file mode 100644 index ec423bc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient015_frame10_slice_1.h5 deleted file mode 100644 index f8b66de..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient015_frame10_slice_2.h5 deleted file mode 100644 index 67e1475..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient015_frame10_slice_3.h5 deleted file mode 100644 index 8a64ecc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient015_frame10_slice_4.h5 deleted file mode 100644 index 3ee3578..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient015_frame10_slice_5.h5 deleted file mode 100644 index 4d8081a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient015_frame10_slice_6.h5 deleted file mode 100644 index 3570dd2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient015_frame10_slice_7.h5 deleted file mode 100644 index ad3a048..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient015_frame10_slice_8.h5 deleted file mode 100644 index 08d0ec5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient015_frame10_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient016_frame01_slice_0.h5 deleted file mode 100644 index deff98d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient016_frame01_slice_1.h5 deleted file mode 100644 index a9b670b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient016_frame01_slice_2.h5 deleted file mode 100644 index 112a955..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient016_frame01_slice_3.h5 deleted file mode 100644 index e675708..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient016_frame01_slice_4.h5 deleted file mode 100644 index 00e27f8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient016_frame01_slice_5.h5 deleted file mode 100644 index eee945e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient016_frame01_slice_6.h5 deleted file mode 100644 index 8ef4700..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient016_frame01_slice_7.h5 deleted file mode 100644 index 8bb2286..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient016_frame01_slice_8.h5 deleted file mode 100644 index 0f3947e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient016_frame01_slice_9.h5 deleted file mode 100644 index 11d93b1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient016_frame12_slice_0.h5 deleted file mode 100644 index 031b0f3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient016_frame12_slice_1.h5 deleted file mode 100644 index 99b387d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient016_frame12_slice_2.h5 deleted file mode 100644 index 2388037..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient016_frame12_slice_3.h5 deleted file mode 100644 index d28034e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient016_frame12_slice_4.h5 deleted file mode 100644 index 20eedbb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient016_frame12_slice_5.h5 deleted file mode 100644 index ec44174..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient016_frame12_slice_6.h5 deleted file mode 100644 index be70a5b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient016_frame12_slice_7.h5 deleted file mode 100644 index 078517f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient016_frame12_slice_8.h5 deleted file mode 100644 index e8668d4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient016_frame12_slice_9.h5 deleted file mode 100644 index 5bd9461..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient016_frame12_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient017_frame01_slice_0.h5 deleted file mode 100644 index adf98a3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient017_frame01_slice_1.h5 deleted file mode 100644 index a38bfcc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient017_frame01_slice_2.h5 deleted file mode 100644 index da35a08..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient017_frame01_slice_3.h5 deleted file mode 100644 index 3fe0f38..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient017_frame01_slice_4.h5 deleted file mode 100644 index 90916d2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient017_frame01_slice_5.h5 deleted file mode 100644 index 98c4d5d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient017_frame01_slice_6.h5 deleted file mode 100644 index 492db5c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient017_frame01_slice_7.h5 deleted file mode 100644 index 4dbe77f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient017_frame01_slice_8.h5 deleted file mode 100644 index a22c15e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient017_frame09_slice_0.h5 deleted file mode 100644 index 71d3c60..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient017_frame09_slice_1.h5 deleted file mode 100644 index 71adcec..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient017_frame09_slice_2.h5 deleted file mode 100644 index 040ce0d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient017_frame09_slice_3.h5 deleted file mode 100644 index 1ed2c55..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient017_frame09_slice_4.h5 deleted file mode 100644 index 4719182..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient017_frame09_slice_5.h5 deleted file mode 100644 index 1fefbac..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient017_frame09_slice_6.h5 deleted file mode 100644 index 8fdc26c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient017_frame09_slice_7.h5 deleted file mode 100644 index 4205e8c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient017_frame09_slice_8.h5 deleted file mode 100644 index f0115c2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient017_frame09_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient018_frame01_slice_0.h5 deleted file mode 100644 index 652835d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient018_frame01_slice_1.h5 deleted file mode 100644 index 7009d50..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient018_frame01_slice_2.h5 deleted file mode 100644 index 38f6cc0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient018_frame01_slice_3.h5 deleted file mode 100644 index 59d9747..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient018_frame01_slice_4.h5 deleted file mode 100644 index 9559ac8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient018_frame01_slice_5.h5 deleted file mode 100644 index 2ab4b9e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient018_frame01_slice_6.h5 deleted file mode 100644 index d1ea049..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient018_frame01_slice_7.h5 deleted file mode 100644 index da58654..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient018_frame10_slice_0.h5 deleted file mode 100644 index db677cb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient018_frame10_slice_1.h5 deleted file mode 100644 index a3baab7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient018_frame10_slice_2.h5 deleted file mode 100644 index 119fecf..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient018_frame10_slice_3.h5 deleted file mode 100644 index 7b6049c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient018_frame10_slice_4.h5 deleted file mode 100644 index 731d51b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient018_frame10_slice_5.h5 deleted file mode 100644 index fc719eb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient018_frame10_slice_6.h5 deleted file mode 100644 index f653cd1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient018_frame10_slice_7.h5 deleted file mode 100644 index 48a1f5e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient018_frame10_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient019_frame01_slice_0.h5 deleted file mode 100644 index b3e4ee3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient019_frame01_slice_1.h5 deleted file mode 100644 index 97604f8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient019_frame01_slice_10.h5 deleted file mode 100644 index 0f01ee9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient019_frame01_slice_2.h5 deleted file mode 100644 index 517639a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient019_frame01_slice_3.h5 deleted file mode 100644 index 19aadf2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient019_frame01_slice_4.h5 deleted file mode 100644 index 6302df3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient019_frame01_slice_5.h5 deleted file mode 100644 index ff9a05c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient019_frame01_slice_6.h5 deleted file mode 100644 index 32c07ec..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient019_frame01_slice_7.h5 deleted file mode 100644 index a6d1b78..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient019_frame01_slice_8.h5 deleted file mode 100644 index 66acf40..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient019_frame01_slice_9.h5 deleted file mode 100644 index c627b13..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient019_frame11_slice_0.h5 deleted file mode 100644 index f87ed60..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient019_frame11_slice_1.h5 deleted file mode 100644 index 8ba40f1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient019_frame11_slice_10.h5 deleted file mode 100644 index 72df7d9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient019_frame11_slice_2.h5 deleted file mode 100644 index 14d512b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient019_frame11_slice_3.h5 deleted file mode 100644 index d1ad95a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient019_frame11_slice_4.h5 deleted file mode 100644 index 455360f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient019_frame11_slice_5.h5 deleted file mode 100644 index a9f0233..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient019_frame11_slice_6.h5 deleted file mode 100644 index 27824e3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient019_frame11_slice_7.h5 deleted file mode 100644 index 9657ce1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient019_frame11_slice_8.h5 deleted file mode 100644 index c629927..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient019_frame11_slice_9.h5 deleted file mode 100644 index 984822f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient019_frame11_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient020_frame01_slice_0.h5 deleted file mode 100644 index 0d73158..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient020_frame01_slice_1.h5 deleted file mode 100644 index 74bf05e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient020_frame01_slice_2.h5 deleted file mode 100644 index 788e16e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient020_frame01_slice_3.h5 deleted file mode 100644 index 53ce977..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient020_frame01_slice_4.h5 deleted file mode 100644 index 9841822..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient020_frame01_slice_5.h5 deleted file mode 100644 index b5a7867..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient020_frame01_slice_6.h5 deleted file mode 100644 index bb91712..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient020_frame01_slice_7.h5 deleted file mode 100644 index f223b9f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient020_frame11_slice_0.h5 deleted file mode 100644 index e1fe881..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient020_frame11_slice_1.h5 deleted file mode 100644 index 287d700..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient020_frame11_slice_2.h5 deleted file mode 100644 index d3de479..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient020_frame11_slice_3.h5 deleted file mode 100644 index 10c6ab0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient020_frame11_slice_4.h5 deleted file mode 100644 index 968bdf1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient020_frame11_slice_5.h5 deleted file mode 100644 index 1acf050..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient020_frame11_slice_6.h5 deleted file mode 100644 index 91322e0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient020_frame11_slice_7.h5 deleted file mode 100644 index 9271db4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient020_frame11_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient021_frame01_slice_0.h5 deleted file mode 100644 index 08fd7d5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient021_frame01_slice_1.h5 deleted file mode 100644 index f600a13..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient021_frame01_slice_2.h5 deleted file mode 100644 index b67c470..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient021_frame01_slice_3.h5 deleted file mode 100644 index a0bf68f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient021_frame01_slice_4.h5 deleted file mode 100644 index 70a60d4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient021_frame01_slice_5.h5 deleted file mode 100644 index 6012759..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient021_frame01_slice_6.h5 deleted file mode 100644 index a29fdea..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient021_frame01_slice_7.h5 deleted file mode 100644 index 9ac1670..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient021_frame01_slice_8.h5 deleted file mode 100644 index d806173..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient021_frame01_slice_9.h5 deleted file mode 100644 index c625ba7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient021_frame13_slice_0.h5 deleted file mode 100644 index 2854ca6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient021_frame13_slice_1.h5 deleted file mode 100644 index 9b25b75..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient021_frame13_slice_2.h5 deleted file mode 100644 index 72619cb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient021_frame13_slice_3.h5 deleted file mode 100644 index 5fc2b47..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient021_frame13_slice_4.h5 deleted file mode 100644 index 662b8d9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient021_frame13_slice_5.h5 deleted file mode 100644 index 6485cac..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient021_frame13_slice_6.h5 deleted file mode 100644 index 63c9974..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient021_frame13_slice_7.h5 deleted file mode 100644 index cf1b461..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient021_frame13_slice_8.h5 deleted file mode 100644 index 5eb4095..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient021_frame13_slice_9.h5 deleted file mode 100644 index 051026e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient021_frame13_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient022_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient022_frame01_slice_0.h5 deleted file mode 100644 index 2649400..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient022_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient022_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient022_frame01_slice_1.h5 deleted file mode 100644 index 1c4b0f3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient022_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient022_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient022_frame01_slice_2.h5 deleted file mode 100644 index 10f4a7f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient022_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient022_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient022_frame01_slice_3.h5 deleted file mode 100644 index 4b00a6a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient022_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient022_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient022_frame01_slice_4.h5 deleted file mode 100644 index 46a9aed..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient022_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient022_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient022_frame01_slice_5.h5 deleted file mode 100644 index 4998fbd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient022_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient022_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient022_frame01_slice_6.h5 deleted file mode 100644 index 51f43d3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient022_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient022_frame11_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient022_frame11_slice_0.h5 deleted file mode 100644 index db304ae..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient022_frame11_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient022_frame11_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient022_frame11_slice_1.h5 deleted file mode 100644 index 75d314f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient022_frame11_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient022_frame11_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient022_frame11_slice_2.h5 deleted file mode 100644 index f4ae1e1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient022_frame11_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient022_frame11_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient022_frame11_slice_3.h5 deleted file mode 100644 index bb23e62..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient022_frame11_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient022_frame11_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient022_frame11_slice_4.h5 deleted file mode 100644 index 083bd52..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient022_frame11_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient022_frame11_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient022_frame11_slice_5.h5 deleted file mode 100644 index 2783e66..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient022_frame11_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient022_frame11_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient022_frame11_slice_6.h5 deleted file mode 100644 index 6fdcee8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient022_frame11_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient023_frame01_slice_0.h5 deleted file mode 100644 index 4d8ea54..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient023_frame01_slice_1.h5 deleted file mode 100644 index 117e85e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient023_frame01_slice_2.h5 deleted file mode 100644 index 87bf868..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient023_frame01_slice_3.h5 deleted file mode 100644 index 33fc5d1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient023_frame01_slice_4.h5 deleted file mode 100644 index 92c7b18..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient023_frame01_slice_5.h5 deleted file mode 100644 index b778ba9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient023_frame01_slice_6.h5 deleted file mode 100644 index c412c62..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient023_frame01_slice_7.h5 deleted file mode 100644 index 12081e3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient023_frame01_slice_8.h5 deleted file mode 100644 index b99627d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient023_frame09_slice_0.h5 deleted file mode 100644 index a83461d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient023_frame09_slice_1.h5 deleted file mode 100644 index 6119435..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient023_frame09_slice_2.h5 deleted file mode 100644 index 5c17312..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient023_frame09_slice_3.h5 deleted file mode 100644 index e9a38a6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient023_frame09_slice_4.h5 deleted file mode 100644 index 46ac072..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient023_frame09_slice_5.h5 deleted file mode 100644 index 4ce3ad8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient023_frame09_slice_6.h5 deleted file mode 100644 index c0d6fec..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient023_frame09_slice_7.h5 deleted file mode 100644 index 49068cf..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient023_frame09_slice_8.h5 deleted file mode 100644 index 8966313..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient023_frame09_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient024_frame01_slice_0.h5 deleted file mode 100644 index 5d147d3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient024_frame01_slice_1.h5 deleted file mode 100644 index b614d07..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient024_frame01_slice_2.h5 deleted file mode 100644 index 30daa93..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient024_frame01_slice_3.h5 deleted file mode 100644 index 5a039f4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient024_frame01_slice_4.h5 deleted file mode 100644 index 36041ff..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient024_frame01_slice_5.h5 deleted file mode 100644 index 8f719e6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient024_frame01_slice_6.h5 deleted file mode 100644 index ae0c721..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient024_frame01_slice_7.h5 deleted file mode 100644 index b45c82b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient024_frame09_slice_0.h5 deleted file mode 100644 index 8658dc2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient024_frame09_slice_1.h5 deleted file mode 100644 index 6b361db..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient024_frame09_slice_2.h5 deleted file mode 100644 index 23300df..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient024_frame09_slice_3.h5 deleted file mode 100644 index 3a8fc80..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient024_frame09_slice_4.h5 deleted file mode 100644 index 2d2193c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient024_frame09_slice_5.h5 deleted file mode 100644 index 5cc4ad0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient024_frame09_slice_6.h5 deleted file mode 100644 index 167f2e7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient024_frame09_slice_7.h5 deleted file mode 100644 index f1ef84f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient024_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient025_frame01_slice_0.h5 deleted file mode 100644 index 9978e82..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient025_frame01_slice_1.h5 deleted file mode 100644 index 37ed21e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient025_frame01_slice_2.h5 deleted file mode 100644 index 4f39087..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient025_frame01_slice_3.h5 deleted file mode 100644 index 2174d92..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient025_frame01_slice_4.h5 deleted file mode 100644 index 1219523..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient025_frame01_slice_5.h5 deleted file mode 100644 index 63f23fb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient025_frame01_slice_6.h5 deleted file mode 100644 index 6cb328a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient025_frame01_slice_7.h5 deleted file mode 100644 index 842112b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient025_frame01_slice_8.h5 deleted file mode 100644 index f7c7b8f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient025_frame09_slice_0.h5 deleted file mode 100644 index 49f8c70..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient025_frame09_slice_1.h5 deleted file mode 100644 index 6809d6b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient025_frame09_slice_2.h5 deleted file mode 100644 index 96ae6b9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient025_frame09_slice_3.h5 deleted file mode 100644 index 880ab19..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient025_frame09_slice_4.h5 deleted file mode 100644 index 39c5764..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient025_frame09_slice_5.h5 deleted file mode 100644 index 9f34cb0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient025_frame09_slice_6.h5 deleted file mode 100644 index 3019825..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient025_frame09_slice_7.h5 deleted file mode 100644 index 5b43f3c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient025_frame09_slice_8.h5 deleted file mode 100644 index e23c226..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient025_frame09_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient026_frame01_slice_0.h5 deleted file mode 100644 index 65b408c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient026_frame01_slice_1.h5 deleted file mode 100644 index c919bb3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient026_frame01_slice_2.h5 deleted file mode 100644 index 2d61c01..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient026_frame01_slice_3.h5 deleted file mode 100644 index fff8708..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient026_frame01_slice_4.h5 deleted file mode 100644 index afb6ba5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient026_frame01_slice_5.h5 deleted file mode 100644 index 072cd5f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient026_frame01_slice_6.h5 deleted file mode 100644 index d0cedc4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient026_frame01_slice_7.h5 deleted file mode 100644 index 0e3e7f7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient026_frame01_slice_8.h5 deleted file mode 100644 index bdd7af5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient026_frame01_slice_9.h5 deleted file mode 100644 index bfe51ed..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient026_frame12_slice_0.h5 deleted file mode 100644 index 26e4bfe..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient026_frame12_slice_1.h5 deleted file mode 100644 index aa0913d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient026_frame12_slice_2.h5 deleted file mode 100644 index 8f118be..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient026_frame12_slice_3.h5 deleted file mode 100644 index b45ecd4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient026_frame12_slice_4.h5 deleted file mode 100644 index 85fd140..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient026_frame12_slice_5.h5 deleted file mode 100644 index b7da718..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient026_frame12_slice_6.h5 deleted file mode 100644 index 20e1e17..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient026_frame12_slice_7.h5 deleted file mode 100644 index 6cce0e9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient026_frame12_slice_8.h5 deleted file mode 100644 index 3015631..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient026_frame12_slice_9.h5 deleted file mode 100644 index 6082b11..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient026_frame12_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient027_frame01_slice_0.h5 deleted file mode 100644 index 1ed755f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient027_frame01_slice_1.h5 deleted file mode 100644 index 0cdbeec..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient027_frame01_slice_2.h5 deleted file mode 100644 index fd67cd0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient027_frame01_slice_3.h5 deleted file mode 100644 index 785db58..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient027_frame01_slice_4.h5 deleted file mode 100644 index ab84756..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient027_frame01_slice_5.h5 deleted file mode 100644 index 9c61edf..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient027_frame01_slice_6.h5 deleted file mode 100644 index 0cdaba1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient027_frame01_slice_7.h5 deleted file mode 100644 index 75b931b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient027_frame01_slice_8.h5 deleted file mode 100644 index 3383ef7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient027_frame01_slice_9.h5 deleted file mode 100644 index e0d65cf..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient027_frame11_slice_0.h5 deleted file mode 100644 index 22cfdf1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient027_frame11_slice_1.h5 deleted file mode 100644 index 0ad55b4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient027_frame11_slice_2.h5 deleted file mode 100644 index ff0e714..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient027_frame11_slice_3.h5 deleted file mode 100644 index 0878e1b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient027_frame11_slice_4.h5 deleted file mode 100644 index 2203825..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient027_frame11_slice_5.h5 deleted file mode 100644 index 93e02c4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient027_frame11_slice_6.h5 deleted file mode 100644 index 4c6f6de..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient027_frame11_slice_7.h5 deleted file mode 100644 index a53a8cc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient027_frame11_slice_8.h5 deleted file mode 100644 index 2ce6d23..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient027_frame11_slice_9.h5 deleted file mode 100644 index e5f5d0d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient027_frame11_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient028_frame01_slice_0.h5 deleted file mode 100644 index b9561c9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient028_frame01_slice_1.h5 deleted file mode 100644 index 374d89d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient028_frame01_slice_2.h5 deleted file mode 100644 index 760d529..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient028_frame01_slice_3.h5 deleted file mode 100644 index c10fc62..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient028_frame01_slice_4.h5 deleted file mode 100644 index fa1eabe..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient028_frame01_slice_5.h5 deleted file mode 100644 index 1a44ba4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient028_frame01_slice_6.h5 deleted file mode 100644 index fd71180..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient028_frame01_slice_7.h5 deleted file mode 100644 index cf615e1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient028_frame01_slice_8.h5 deleted file mode 100644 index 856e4a9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient028_frame01_slice_9.h5 deleted file mode 100644 index b2c6409..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient028_frame09_slice_0.h5 deleted file mode 100644 index 2dc21e2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient028_frame09_slice_1.h5 deleted file mode 100644 index 3791f91..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient028_frame09_slice_2.h5 deleted file mode 100644 index 56930f2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient028_frame09_slice_3.h5 deleted file mode 100644 index 0d067e2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient028_frame09_slice_4.h5 deleted file mode 100644 index e33ce20..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient028_frame09_slice_5.h5 deleted file mode 100644 index 97d305c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient028_frame09_slice_6.h5 deleted file mode 100644 index 37a744e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient028_frame09_slice_7.h5 deleted file mode 100644 index 19318f7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient028_frame09_slice_8.h5 deleted file mode 100644 index 7687099..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient028_frame09_slice_9.h5 deleted file mode 100644 index 34c7cec..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient028_frame09_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient029_frame01_slice_0.h5 deleted file mode 100644 index dc340cb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient029_frame01_slice_1.h5 deleted file mode 100644 index 5c35eb8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient029_frame01_slice_10.h5 deleted file mode 100644 index eeb0151..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient029_frame01_slice_2.h5 deleted file mode 100644 index 2b7ff6e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient029_frame01_slice_3.h5 deleted file mode 100644 index 6fbc016..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient029_frame01_slice_4.h5 deleted file mode 100644 index e52dd67..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient029_frame01_slice_5.h5 deleted file mode 100644 index c978e89..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient029_frame01_slice_6.h5 deleted file mode 100644 index 83d4218..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient029_frame01_slice_7.h5 deleted file mode 100644 index 40112c5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient029_frame01_slice_8.h5 deleted file mode 100644 index b183825..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient029_frame01_slice_9.h5 deleted file mode 100644 index 56fdd18..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient029_frame12_slice_0.h5 deleted file mode 100644 index 3b0d8d9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient029_frame12_slice_1.h5 deleted file mode 100644 index 80ed5ab..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient029_frame12_slice_10.h5 deleted file mode 100644 index 6229a33..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient029_frame12_slice_2.h5 deleted file mode 100644 index 06dfd72..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient029_frame12_slice_3.h5 deleted file mode 100644 index 4118863..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient029_frame12_slice_4.h5 deleted file mode 100644 index 08fc30f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient029_frame12_slice_5.h5 deleted file mode 100644 index 9ad3f32..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient029_frame12_slice_6.h5 deleted file mode 100644 index 6535c01..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient029_frame12_slice_7.h5 deleted file mode 100644 index 2d91711..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient029_frame12_slice_8.h5 deleted file mode 100644 index a9fa718..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient029_frame12_slice_9.h5 deleted file mode 100644 index cf8e90a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient029_frame12_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient030_frame01_slice_0.h5 deleted file mode 100644 index a9288a7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient030_frame01_slice_1.h5 deleted file mode 100644 index ce91a7c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient030_frame01_slice_2.h5 deleted file mode 100644 index 2f3fc3b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient030_frame01_slice_3.h5 deleted file mode 100644 index 1ee6b80..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient030_frame01_slice_4.h5 deleted file mode 100644 index 1078c03..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient030_frame01_slice_5.h5 deleted file mode 100644 index 540856f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient030_frame01_slice_6.h5 deleted file mode 100644 index aec6c4b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient030_frame01_slice_7.h5 deleted file mode 100644 index 153bc06..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient030_frame01_slice_8.h5 deleted file mode 100644 index fdbf130..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient030_frame01_slice_9.h5 deleted file mode 100644 index 7af74a9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient030_frame12_slice_0.h5 deleted file mode 100644 index 7f301a8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient030_frame12_slice_1.h5 deleted file mode 100644 index 899333a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient030_frame12_slice_2.h5 deleted file mode 100644 index 02ee7f2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient030_frame12_slice_3.h5 deleted file mode 100644 index 4766013..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient030_frame12_slice_4.h5 deleted file mode 100644 index 0d32777..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient030_frame12_slice_5.h5 deleted file mode 100644 index 2127e0b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient030_frame12_slice_6.h5 deleted file mode 100644 index 151d819..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient030_frame12_slice_7.h5 deleted file mode 100644 index 5f6e32c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient030_frame12_slice_8.h5 deleted file mode 100644 index f3e5f59..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient030_frame12_slice_9.h5 deleted file mode 100644 index b41a78a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient030_frame12_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient031_frame01_slice_0.h5 deleted file mode 100644 index 148d59b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient031_frame01_slice_1.h5 deleted file mode 100644 index bcbd52b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient031_frame01_slice_2.h5 deleted file mode 100644 index a0b7a9f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient031_frame01_slice_3.h5 deleted file mode 100644 index 941de05..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient031_frame01_slice_4.h5 deleted file mode 100644 index 3e04770..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient031_frame01_slice_5.h5 deleted file mode 100644 index 7dd77f1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient031_frame01_slice_6.h5 deleted file mode 100644 index b9244bb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient031_frame01_slice_7.h5 deleted file mode 100644 index 02b79c4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient031_frame01_slice_8.h5 deleted file mode 100644 index 31ef062..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient031_frame01_slice_9.h5 deleted file mode 100644 index d1f61d5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient031_frame10_slice_0.h5 deleted file mode 100644 index 04c96f8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient031_frame10_slice_1.h5 deleted file mode 100644 index 95cfa6f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient031_frame10_slice_2.h5 deleted file mode 100644 index 3999aa6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient031_frame10_slice_3.h5 deleted file mode 100644 index a36e7c8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient031_frame10_slice_4.h5 deleted file mode 100644 index 22900af..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient031_frame10_slice_5.h5 deleted file mode 100644 index 55b2dc2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient031_frame10_slice_6.h5 deleted file mode 100644 index 2705a1a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient031_frame10_slice_7.h5 deleted file mode 100644 index 17d0d54..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient031_frame10_slice_8.h5 deleted file mode 100644 index 021ffdc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient031_frame10_slice_9.h5 deleted file mode 100644 index 1320814..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient031_frame10_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient032_frame01_slice_0.h5 deleted file mode 100644 index e1ae028..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient032_frame01_slice_1.h5 deleted file mode 100644 index 179258f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient032_frame01_slice_2.h5 deleted file mode 100644 index d66be35..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient032_frame01_slice_3.h5 deleted file mode 100644 index c84e777..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient032_frame01_slice_4.h5 deleted file mode 100644 index c39558d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient032_frame01_slice_5.h5 deleted file mode 100644 index d05381f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient032_frame01_slice_6.h5 deleted file mode 100644 index 3e46d32..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient032_frame01_slice_7.h5 deleted file mode 100644 index dc6f946..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient032_frame01_slice_8.h5 deleted file mode 100644 index b763513..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient032_frame01_slice_9.h5 deleted file mode 100644 index fb92f59..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient032_frame12_slice_0.h5 deleted file mode 100644 index 12ebd01..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient032_frame12_slice_1.h5 deleted file mode 100644 index b830986..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient032_frame12_slice_2.h5 deleted file mode 100644 index f77d9b2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient032_frame12_slice_3.h5 deleted file mode 100644 index 67c2df2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient032_frame12_slice_4.h5 deleted file mode 100644 index ff95f01..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient032_frame12_slice_5.h5 deleted file mode 100644 index 39c54d9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient032_frame12_slice_6.h5 deleted file mode 100644 index 8519e14..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient032_frame12_slice_7.h5 deleted file mode 100644 index d64ebc2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient032_frame12_slice_8.h5 deleted file mode 100644 index 61c69a9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient032_frame12_slice_9.h5 deleted file mode 100644 index 8708cef..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient032_frame12_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient033_frame01_slice_0.h5 deleted file mode 100644 index 40c0953..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient033_frame01_slice_1.h5 deleted file mode 100644 index 5fd7e09..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient033_frame01_slice_2.h5 deleted file mode 100644 index 1f94d35..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient033_frame01_slice_3.h5 deleted file mode 100644 index 57f1d2d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient033_frame01_slice_4.h5 deleted file mode 100644 index 4bca247..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient033_frame01_slice_5.h5 deleted file mode 100644 index be61ea3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient033_frame01_slice_6.h5 deleted file mode 100644 index 24a3f0b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient033_frame01_slice_7.h5 deleted file mode 100644 index ec206be..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient033_frame01_slice_8.h5 deleted file mode 100644 index 843bf6a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient033_frame01_slice_9.h5 deleted file mode 100644 index b84d515..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient033_frame14_slice_0.h5 deleted file mode 100644 index fa6dabb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient033_frame14_slice_1.h5 deleted file mode 100644 index f72f331..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient033_frame14_slice_2.h5 deleted file mode 100644 index 831861f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient033_frame14_slice_3.h5 deleted file mode 100644 index 01a7e4e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient033_frame14_slice_4.h5 deleted file mode 100644 index 91e034c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient033_frame14_slice_5.h5 deleted file mode 100644 index 92b9eeb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient033_frame14_slice_6.h5 deleted file mode 100644 index b80e4be..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient033_frame14_slice_7.h5 deleted file mode 100644 index c30d676..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient033_frame14_slice_8.h5 deleted file mode 100644 index 125ddce..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient033_frame14_slice_9.h5 deleted file mode 100644 index 3ed5e10..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient033_frame14_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient034_frame01_slice_0.h5 deleted file mode 100644 index 85aa66c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient034_frame01_slice_1.h5 deleted file mode 100644 index b0327e1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient034_frame01_slice_2.h5 deleted file mode 100644 index e9f62e2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient034_frame01_slice_3.h5 deleted file mode 100644 index 772be37..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient034_frame01_slice_4.h5 deleted file mode 100644 index 543fdae..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient034_frame01_slice_5.h5 deleted file mode 100644 index f8f50d1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient034_frame01_slice_6.h5 deleted file mode 100644 index d6ba3b2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient034_frame01_slice_7.h5 deleted file mode 100644 index 4abd4f7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient034_frame01_slice_8.h5 deleted file mode 100644 index 2793b3e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient034_frame01_slice_9.h5 deleted file mode 100644 index 2fb0af9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient034_frame16_slice_0.h5 deleted file mode 100644 index 7381791..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient034_frame16_slice_1.h5 deleted file mode 100644 index 7069b73..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient034_frame16_slice_2.h5 deleted file mode 100644 index b6c7031..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient034_frame16_slice_3.h5 deleted file mode 100644 index 856dd67..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient034_frame16_slice_4.h5 deleted file mode 100644 index 1fc98c4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient034_frame16_slice_5.h5 deleted file mode 100644 index 077b24f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient034_frame16_slice_6.h5 deleted file mode 100644 index 99efa24..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient034_frame16_slice_7.h5 deleted file mode 100644 index 4aa9269..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient034_frame16_slice_8.h5 deleted file mode 100644 index f465c50..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient034_frame16_slice_9.h5 deleted file mode 100644 index 77d45c8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient034_frame16_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient035_frame01_slice_0.h5 deleted file mode 100644 index 3c26121..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient035_frame01_slice_1.h5 deleted file mode 100644 index 4f01280..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient035_frame01_slice_10.h5 deleted file mode 100644 index 78ea001..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient035_frame01_slice_11.h5 deleted file mode 100644 index ca51957..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient035_frame01_slice_12.h5 deleted file mode 100644 index fb3392d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient035_frame01_slice_2.h5 deleted file mode 100644 index 5544a2f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient035_frame01_slice_3.h5 deleted file mode 100644 index 7badb34..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient035_frame01_slice_4.h5 deleted file mode 100644 index 9492f25..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient035_frame01_slice_5.h5 deleted file mode 100644 index 2f248a1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient035_frame01_slice_6.h5 deleted file mode 100644 index b004c39..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient035_frame01_slice_7.h5 deleted file mode 100644 index 9889a34..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient035_frame01_slice_8.h5 deleted file mode 100644 index daf33c7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient035_frame01_slice_9.h5 deleted file mode 100644 index dcf0d11..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient035_frame11_slice_0.h5 deleted file mode 100644 index 5e10780..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient035_frame11_slice_1.h5 deleted file mode 100644 index c15c08b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient035_frame11_slice_10.h5 deleted file mode 100644 index 069b34d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient035_frame11_slice_11.h5 deleted file mode 100644 index 4f494df..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient035_frame11_slice_12.h5 deleted file mode 100644 index baf5f7b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient035_frame11_slice_2.h5 deleted file mode 100644 index 0e6b0d3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient035_frame11_slice_3.h5 deleted file mode 100644 index 8395f6c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient035_frame11_slice_4.h5 deleted file mode 100644 index dc9c107..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient035_frame11_slice_5.h5 deleted file mode 100644 index 9eeceec..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient035_frame11_slice_6.h5 deleted file mode 100644 index b153015..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient035_frame11_slice_7.h5 deleted file mode 100644 index c734691..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient035_frame11_slice_8.h5 deleted file mode 100644 index 24105ac..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient035_frame11_slice_9.h5 deleted file mode 100644 index b389a76..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient035_frame11_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient036_frame01_slice_0.h5 deleted file mode 100644 index 825e74d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient036_frame01_slice_1.h5 deleted file mode 100644 index fa38e91..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient036_frame01_slice_2.h5 deleted file mode 100644 index 7fe5048..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient036_frame01_slice_3.h5 deleted file mode 100644 index b5e8a7c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient036_frame01_slice_4.h5 deleted file mode 100644 index e939b90..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient036_frame01_slice_5.h5 deleted file mode 100644 index a34e4da..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient036_frame01_slice_6.h5 deleted file mode 100644 index f952f00..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient036_frame01_slice_7.h5 deleted file mode 100644 index a0e3478..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient036_frame12_slice_0.h5 deleted file mode 100644 index a364153..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient036_frame12_slice_1.h5 deleted file mode 100644 index c2a3afe..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient036_frame12_slice_2.h5 deleted file mode 100644 index cf3d62d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient036_frame12_slice_3.h5 deleted file mode 100644 index 3ad7e21..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient036_frame12_slice_4.h5 deleted file mode 100644 index e5767ce..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient036_frame12_slice_5.h5 deleted file mode 100644 index 66f1548..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient036_frame12_slice_6.h5 deleted file mode 100644 index 35b2dd3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient036_frame12_slice_7.h5 deleted file mode 100644 index f890981..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient036_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient037_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient037_frame01_slice_0.h5 deleted file mode 100644 index 3fa6b88..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient037_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient037_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient037_frame01_slice_1.h5 deleted file mode 100644 index 996e22c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient037_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient037_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient037_frame01_slice_2.h5 deleted file mode 100644 index bed85b2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient037_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient037_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient037_frame01_slice_3.h5 deleted file mode 100644 index b174e15..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient037_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient037_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient037_frame01_slice_4.h5 deleted file mode 100644 index 169ea31..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient037_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient037_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient037_frame01_slice_5.h5 deleted file mode 100644 index d126781..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient037_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient037_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient037_frame01_slice_6.h5 deleted file mode 100644 index 113ce42..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient037_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient037_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient037_frame12_slice_0.h5 deleted file mode 100644 index 8efe72b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient037_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient037_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient037_frame12_slice_1.h5 deleted file mode 100644 index e892601..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient037_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient037_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient037_frame12_slice_2.h5 deleted file mode 100644 index ace8047..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient037_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient037_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient037_frame12_slice_3.h5 deleted file mode 100644 index e485000..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient037_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient037_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient037_frame12_slice_4.h5 deleted file mode 100644 index 466ff6e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient037_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient037_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient037_frame12_slice_5.h5 deleted file mode 100644 index ff5f7b3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient037_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient037_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient037_frame12_slice_6.h5 deleted file mode 100644 index df5ea22..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient037_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient038_frame01_slice_0.h5 deleted file mode 100644 index e1950a9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient038_frame01_slice_1.h5 deleted file mode 100644 index 0cbec96..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient038_frame01_slice_2.h5 deleted file mode 100644 index ebd6a09..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient038_frame01_slice_3.h5 deleted file mode 100644 index 8f6b271..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient038_frame01_slice_4.h5 deleted file mode 100644 index f63beb2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient038_frame01_slice_5.h5 deleted file mode 100644 index 6ca0923..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient038_frame01_slice_6.h5 deleted file mode 100644 index edd9349..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient038_frame01_slice_7.h5 deleted file mode 100644 index a1311d7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient038_frame11_slice_0.h5 deleted file mode 100644 index d2c5d64..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient038_frame11_slice_1.h5 deleted file mode 100644 index af0dd52..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient038_frame11_slice_2.h5 deleted file mode 100644 index 4ecaac3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient038_frame11_slice_3.h5 deleted file mode 100644 index f3ba332..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient038_frame11_slice_4.h5 deleted file mode 100644 index 41f926c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient038_frame11_slice_5.h5 deleted file mode 100644 index 475c94b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient038_frame11_slice_6.h5 deleted file mode 100644 index 114b708..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient038_frame11_slice_7.h5 deleted file mode 100644 index 97bf55e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient038_frame11_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient039_frame01_slice_0.h5 deleted file mode 100644 index 60a059a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient039_frame01_slice_1.h5 deleted file mode 100644 index 6c1d715..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient039_frame01_slice_2.h5 deleted file mode 100644 index 1db3874..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient039_frame01_slice_3.h5 deleted file mode 100644 index 66e9d70..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient039_frame01_slice_4.h5 deleted file mode 100644 index 3369e9f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient039_frame01_slice_5.h5 deleted file mode 100644 index e360029..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient039_frame01_slice_6.h5 deleted file mode 100644 index 706ea66..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient039_frame01_slice_7.h5 deleted file mode 100644 index 33d4e74..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient039_frame01_slice_8.h5 deleted file mode 100644 index 68e8d78..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient039_frame10_slice_0.h5 deleted file mode 100644 index 8185233..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient039_frame10_slice_1.h5 deleted file mode 100644 index 797f1cb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient039_frame10_slice_2.h5 deleted file mode 100644 index cdf4074..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient039_frame10_slice_3.h5 deleted file mode 100644 index b6a6bbd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient039_frame10_slice_4.h5 deleted file mode 100644 index 2bb224a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient039_frame10_slice_5.h5 deleted file mode 100644 index 48946f3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient039_frame10_slice_6.h5 deleted file mode 100644 index 09cf6ca..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient039_frame10_slice_7.h5 deleted file mode 100644 index b5db979..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient039_frame10_slice_8.h5 deleted file mode 100644 index daab880..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient039_frame10_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient040_frame01_slice_0.h5 deleted file mode 100644 index 9e1e018..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient040_frame01_slice_1.h5 deleted file mode 100644 index 41a59d5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient040_frame01_slice_2.h5 deleted file mode 100644 index 7f3e7ff..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient040_frame01_slice_3.h5 deleted file mode 100644 index 79d0b49..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient040_frame01_slice_4.h5 deleted file mode 100644 index deed6ed..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient040_frame01_slice_5.h5 deleted file mode 100644 index cd62bf5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient040_frame01_slice_6.h5 deleted file mode 100644 index be8284f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient040_frame01_slice_7.h5 deleted file mode 100644 index 48fb039..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient040_frame01_slice_8.h5 deleted file mode 100644 index bc42c3d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient040_frame01_slice_9.h5 deleted file mode 100644 index c3c6d03..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient040_frame13_slice_0.h5 deleted file mode 100644 index 3cc9562..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient040_frame13_slice_1.h5 deleted file mode 100644 index 8461234..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient040_frame13_slice_2.h5 deleted file mode 100644 index 173db4c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient040_frame13_slice_3.h5 deleted file mode 100644 index 23f41f8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient040_frame13_slice_4.h5 deleted file mode 100644 index 0b04b34..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient040_frame13_slice_5.h5 deleted file mode 100644 index 09c8ac6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient040_frame13_slice_6.h5 deleted file mode 100644 index 3868b4e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient040_frame13_slice_7.h5 deleted file mode 100644 index 7122423..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient040_frame13_slice_8.h5 deleted file mode 100644 index 06c664d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient040_frame13_slice_9.h5 deleted file mode 100644 index 62df96b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient040_frame13_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient041_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient041_frame01_slice_0.h5 deleted file mode 100644 index 3e046d3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient041_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient041_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient041_frame01_slice_1.h5 deleted file mode 100644 index 841d964..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient041_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient041_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient041_frame01_slice_2.h5 deleted file mode 100644 index ee35d8b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient041_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient041_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient041_frame01_slice_3.h5 deleted file mode 100644 index 0521c2b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient041_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient041_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient041_frame01_slice_4.h5 deleted file mode 100644 index c4bfbbb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient041_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient041_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient041_frame01_slice_5.h5 deleted file mode 100644 index b7c2b7f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient041_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient041_frame11_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient041_frame11_slice_0.h5 deleted file mode 100644 index 1308a37..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient041_frame11_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient041_frame11_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient041_frame11_slice_1.h5 deleted file mode 100644 index 2d2def7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient041_frame11_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient041_frame11_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient041_frame11_slice_2.h5 deleted file mode 100644 index c1a1947..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient041_frame11_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient041_frame11_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient041_frame11_slice_3.h5 deleted file mode 100644 index b451a17..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient041_frame11_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient041_frame11_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient041_frame11_slice_4.h5 deleted file mode 100644 index 3d8c069..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient041_frame11_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient041_frame11_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient041_frame11_slice_5.h5 deleted file mode 100644 index de3e2be..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient041_frame11_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient042_frame01_slice_0.h5 deleted file mode 100644 index 805a391..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient042_frame01_slice_1.h5 deleted file mode 100644 index 4c4a852..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient042_frame01_slice_2.h5 deleted file mode 100644 index a610d68..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient042_frame01_slice_3.h5 deleted file mode 100644 index 83a28be..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient042_frame01_slice_4.h5 deleted file mode 100644 index bf472b5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient042_frame01_slice_5.h5 deleted file mode 100644 index 71cbb55..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient042_frame01_slice_6.h5 deleted file mode 100644 index 60993e7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient042_frame01_slice_7.h5 deleted file mode 100644 index d6ccf41..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient042_frame01_slice_8.h5 deleted file mode 100644 index 6c22da7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient042_frame16_slice_0.h5 deleted file mode 100644 index b869747..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient042_frame16_slice_1.h5 deleted file mode 100644 index 89ab88a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient042_frame16_slice_2.h5 deleted file mode 100644 index 7aec017..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient042_frame16_slice_3.h5 deleted file mode 100644 index 20e1b17..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient042_frame16_slice_4.h5 deleted file mode 100644 index 3b4c14f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient042_frame16_slice_5.h5 deleted file mode 100644 index 064bf14..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient042_frame16_slice_6.h5 deleted file mode 100644 index 6abd7fe..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient042_frame16_slice_7.h5 deleted file mode 100644 index c9e3208..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient042_frame16_slice_8.h5 deleted file mode 100644 index 41dc044..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient042_frame16_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient043_frame01_slice_0.h5 deleted file mode 100644 index 6b65047..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient043_frame01_slice_1.h5 deleted file mode 100644 index 3d99849..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient043_frame01_slice_10.h5 deleted file mode 100644 index 842a1eb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient043_frame01_slice_11.h5 deleted file mode 100644 index 3a44873..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient043_frame01_slice_2.h5 deleted file mode 100644 index 893e788..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient043_frame01_slice_3.h5 deleted file mode 100644 index d10206e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient043_frame01_slice_4.h5 deleted file mode 100644 index 1eecc64..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient043_frame01_slice_5.h5 deleted file mode 100644 index f5b462f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient043_frame01_slice_6.h5 deleted file mode 100644 index 7b1ae32..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient043_frame01_slice_7.h5 deleted file mode 100644 index 0f9390d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient043_frame01_slice_8.h5 deleted file mode 100644 index a17596f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient043_frame01_slice_9.h5 deleted file mode 100644 index 5cf233d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient043_frame07_slice_0.h5 deleted file mode 100644 index 1d7422a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient043_frame07_slice_1.h5 deleted file mode 100644 index 80d33a3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient043_frame07_slice_10.h5 deleted file mode 100644 index fb16a74..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient043_frame07_slice_11.h5 deleted file mode 100644 index ea2cd9f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient043_frame07_slice_2.h5 deleted file mode 100644 index 9ffa37d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient043_frame07_slice_3.h5 deleted file mode 100644 index 0c86fce..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient043_frame07_slice_4.h5 deleted file mode 100644 index e9ad501..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient043_frame07_slice_5.h5 deleted file mode 100644 index 40c3283..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient043_frame07_slice_6.h5 deleted file mode 100644 index 05fe9e1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient043_frame07_slice_7.h5 deleted file mode 100644 index 15091e1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient043_frame07_slice_8.h5 deleted file mode 100644 index b5341ce..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient043_frame07_slice_9.h5 deleted file mode 100644 index 17da0fc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient043_frame07_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient044_frame01_slice_0.h5 deleted file mode 100644 index 609c7c4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient044_frame01_slice_1.h5 deleted file mode 100644 index 9caa3e9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient044_frame01_slice_2.h5 deleted file mode 100644 index 3e7ad2e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient044_frame01_slice_3.h5 deleted file mode 100644 index 28809f6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient044_frame01_slice_4.h5 deleted file mode 100644 index f30f2d9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient044_frame01_slice_5.h5 deleted file mode 100644 index 544da7c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient044_frame01_slice_6.h5 deleted file mode 100644 index 3175131..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient044_frame01_slice_7.h5 deleted file mode 100644 index b11f6fd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient044_frame01_slice_8.h5 deleted file mode 100644 index 1d837e6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient044_frame11_slice_0.h5 deleted file mode 100644 index 4e56672..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient044_frame11_slice_1.h5 deleted file mode 100644 index 9bd1dac..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient044_frame11_slice_2.h5 deleted file mode 100644 index 9b8f7df..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient044_frame11_slice_3.h5 deleted file mode 100644 index abe84a4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient044_frame11_slice_4.h5 deleted file mode 100644 index 7a63a96..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient044_frame11_slice_5.h5 deleted file mode 100644 index b401487..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient044_frame11_slice_6.h5 deleted file mode 100644 index be5f4da..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient044_frame11_slice_7.h5 deleted file mode 100644 index 1a9dfe6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient044_frame11_slice_8.h5 deleted file mode 100644 index 7010c3f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient044_frame11_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient045_frame01_slice_0.h5 deleted file mode 100644 index 9a10714..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient045_frame01_slice_1.h5 deleted file mode 100644 index fada59d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient045_frame01_slice_2.h5 deleted file mode 100644 index e1806c9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient045_frame01_slice_3.h5 deleted file mode 100644 index 42d8c62..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient045_frame01_slice_4.h5 deleted file mode 100644 index 4b0c1d1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient045_frame01_slice_5.h5 deleted file mode 100644 index 63509e5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient045_frame01_slice_6.h5 deleted file mode 100644 index c44a48f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient045_frame01_slice_7.h5 deleted file mode 100644 index 22df041..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient045_frame13_slice_0.h5 deleted file mode 100644 index 04721c7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient045_frame13_slice_1.h5 deleted file mode 100644 index 53a41a2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient045_frame13_slice_2.h5 deleted file mode 100644 index c0a6717..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient045_frame13_slice_3.h5 deleted file mode 100644 index 5ea937d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient045_frame13_slice_4.h5 deleted file mode 100644 index dfbef19..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient045_frame13_slice_5.h5 deleted file mode 100644 index df82ee9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient045_frame13_slice_6.h5 deleted file mode 100644 index 0e4da79..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient045_frame13_slice_7.h5 deleted file mode 100644 index 76b2f6c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient045_frame13_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient046_frame01_slice_0.h5 deleted file mode 100644 index ed1580e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient046_frame01_slice_1.h5 deleted file mode 100644 index b6779e6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient046_frame01_slice_2.h5 deleted file mode 100644 index 9eb7eb8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient046_frame01_slice_3.h5 deleted file mode 100644 index 2085919..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient046_frame01_slice_4.h5 deleted file mode 100644 index 1ab7171..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient046_frame01_slice_5.h5 deleted file mode 100644 index bd515cc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient046_frame01_slice_6.h5 deleted file mode 100644 index 133f905..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient046_frame01_slice_7.h5 deleted file mode 100644 index 1a2a8b4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient046_frame01_slice_8.h5 deleted file mode 100644 index 32063c7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient046_frame10_slice_0.h5 deleted file mode 100644 index 290e525..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient046_frame10_slice_1.h5 deleted file mode 100644 index fd3aea0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient046_frame10_slice_2.h5 deleted file mode 100644 index 069e3ad..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient046_frame10_slice_3.h5 deleted file mode 100644 index 6d2dfb8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient046_frame10_slice_4.h5 deleted file mode 100644 index 0dac7e7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient046_frame10_slice_5.h5 deleted file mode 100644 index 7adfd7e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient046_frame10_slice_6.h5 deleted file mode 100644 index 99cfb0b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient046_frame10_slice_7.h5 deleted file mode 100644 index a25540e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient046_frame10_slice_8.h5 deleted file mode 100644 index 7ee7654..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient046_frame10_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient047_frame01_slice_0.h5 deleted file mode 100644 index da61345..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient047_frame01_slice_1.h5 deleted file mode 100644 index f99767a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient047_frame01_slice_2.h5 deleted file mode 100644 index bd051a9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient047_frame01_slice_3.h5 deleted file mode 100644 index 53d133f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient047_frame01_slice_4.h5 deleted file mode 100644 index 9f20c32..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient047_frame01_slice_5.h5 deleted file mode 100644 index 58ffcda..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient047_frame01_slice_6.h5 deleted file mode 100644 index e4b10c7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient047_frame01_slice_7.h5 deleted file mode 100644 index 1dd79a2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient047_frame01_slice_8.h5 deleted file mode 100644 index 6526d2a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient047_frame09_slice_0.h5 deleted file mode 100644 index 8176865..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient047_frame09_slice_1.h5 deleted file mode 100644 index 495dd5b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient047_frame09_slice_2.h5 deleted file mode 100644 index d801d6e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient047_frame09_slice_3.h5 deleted file mode 100644 index abe8099..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient047_frame09_slice_4.h5 deleted file mode 100644 index c7f1d66..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient047_frame09_slice_5.h5 deleted file mode 100644 index 334eb52..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient047_frame09_slice_6.h5 deleted file mode 100644 index 0164c25..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient047_frame09_slice_7.h5 deleted file mode 100644 index e0b0dfa..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient047_frame09_slice_8.h5 deleted file mode 100644 index 60de6cb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient047_frame09_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient048_frame01_slice_0.h5 deleted file mode 100644 index 829d114..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient048_frame01_slice_1.h5 deleted file mode 100644 index 0811e9f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient048_frame01_slice_2.h5 deleted file mode 100644 index 6a17a5b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient048_frame01_slice_3.h5 deleted file mode 100644 index a087cd1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient048_frame01_slice_4.h5 deleted file mode 100644 index 9720e35..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient048_frame01_slice_5.h5 deleted file mode 100644 index fc13ca3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient048_frame01_slice_6.h5 deleted file mode 100644 index 532a7b5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient048_frame01_slice_7.h5 deleted file mode 100644 index 73c1c2f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient048_frame08_slice_0.h5 deleted file mode 100644 index 1574ba6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient048_frame08_slice_1.h5 deleted file mode 100644 index 2d28635..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient048_frame08_slice_2.h5 deleted file mode 100644 index 8dbfa81..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient048_frame08_slice_3.h5 deleted file mode 100644 index 8071764..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient048_frame08_slice_4.h5 deleted file mode 100644 index 745c197..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient048_frame08_slice_5.h5 deleted file mode 100644 index 9185558..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient048_frame08_slice_6.h5 deleted file mode 100644 index 8e5cae4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient048_frame08_slice_7.h5 deleted file mode 100644 index a8b4fc4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient048_frame08_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient049_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient049_frame01_slice_0.h5 deleted file mode 100644 index f9f7112..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient049_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient049_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient049_frame01_slice_1.h5 deleted file mode 100644 index 4b37482..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient049_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient049_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient049_frame01_slice_2.h5 deleted file mode 100644 index dd732dd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient049_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient049_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient049_frame01_slice_3.h5 deleted file mode 100644 index 6b6a9af..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient049_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient049_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient049_frame01_slice_4.h5 deleted file mode 100644 index d88db3f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient049_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient049_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient049_frame01_slice_5.h5 deleted file mode 100644 index 46a262e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient049_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient049_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient049_frame01_slice_6.h5 deleted file mode 100644 index 3b56e07..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient049_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient049_frame11_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient049_frame11_slice_0.h5 deleted file mode 100644 index 66d3e9d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient049_frame11_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient049_frame11_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient049_frame11_slice_1.h5 deleted file mode 100644 index d95e542..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient049_frame11_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient049_frame11_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient049_frame11_slice_2.h5 deleted file mode 100644 index ca80b5e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient049_frame11_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient049_frame11_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient049_frame11_slice_3.h5 deleted file mode 100644 index fc644f3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient049_frame11_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient049_frame11_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient049_frame11_slice_4.h5 deleted file mode 100644 index 9544793..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient049_frame11_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient049_frame11_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient049_frame11_slice_5.h5 deleted file mode 100644 index 2343b05..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient049_frame11_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient049_frame11_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient049_frame11_slice_6.h5 deleted file mode 100644 index 22557fc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient049_frame11_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient050_frame01_slice_0.h5 deleted file mode 100644 index 466710d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient050_frame01_slice_1.h5 deleted file mode 100644 index 48ac643..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient050_frame01_slice_2.h5 deleted file mode 100644 index 0a85488..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient050_frame01_slice_3.h5 deleted file mode 100644 index 067b340..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient050_frame01_slice_4.h5 deleted file mode 100644 index d43546c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient050_frame01_slice_5.h5 deleted file mode 100644 index f10506f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient050_frame01_slice_6.h5 deleted file mode 100644 index 4901b8d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient050_frame01_slice_7.h5 deleted file mode 100644 index 6d4e940..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient050_frame01_slice_8.h5 deleted file mode 100644 index fe14988..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient050_frame01_slice_9.h5 deleted file mode 100644 index 02183f5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient050_frame12_slice_0.h5 deleted file mode 100644 index d6eba6b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient050_frame12_slice_1.h5 deleted file mode 100644 index 3c5f2f4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient050_frame12_slice_2.h5 deleted file mode 100644 index 47e2b49..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient050_frame12_slice_3.h5 deleted file mode 100644 index e2ed822..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient050_frame12_slice_4.h5 deleted file mode 100644 index 41c9adb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient050_frame12_slice_5.h5 deleted file mode 100644 index 8f54511..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient050_frame12_slice_6.h5 deleted file mode 100644 index f7bd4c9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient050_frame12_slice_7.h5 deleted file mode 100644 index 9dfbaa7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient050_frame12_slice_8.h5 deleted file mode 100644 index 041f879..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient050_frame12_slice_9.h5 deleted file mode 100644 index bcd7833..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient050_frame12_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient051_frame01_slice_0.h5 deleted file mode 100644 index 86917a3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient051_frame01_slice_1.h5 deleted file mode 100644 index f142f7f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient051_frame01_slice_2.h5 deleted file mode 100644 index 5329b65..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient051_frame01_slice_3.h5 deleted file mode 100644 index 1b075f7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient051_frame01_slice_4.h5 deleted file mode 100644 index d7c478b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient051_frame01_slice_5.h5 deleted file mode 100644 index 01ad4b3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient051_frame01_slice_6.h5 deleted file mode 100644 index 2d1c9ea..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient051_frame01_slice_7.h5 deleted file mode 100644 index 36d9a78..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient051_frame01_slice_8.h5 deleted file mode 100644 index 3276e25..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient051_frame01_slice_9.h5 deleted file mode 100644 index 3cdfa97..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient051_frame11_slice_0.h5 deleted file mode 100644 index 424215d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient051_frame11_slice_1.h5 deleted file mode 100644 index 1e0578d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient051_frame11_slice_2.h5 deleted file mode 100644 index d7eb206..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient051_frame11_slice_3.h5 deleted file mode 100644 index b34c76f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient051_frame11_slice_4.h5 deleted file mode 100644 index d5d1bbf..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient051_frame11_slice_5.h5 deleted file mode 100644 index 9ae75db..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient051_frame11_slice_6.h5 deleted file mode 100644 index 275dda2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient051_frame11_slice_7.h5 deleted file mode 100644 index 0645d39..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient051_frame11_slice_8.h5 deleted file mode 100644 index b4c1117..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient051_frame11_slice_9.h5 deleted file mode 100644 index 307a4c9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient051_frame11_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient052_frame01_slice_0.h5 deleted file mode 100644 index 8dc4f96..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient052_frame01_slice_1.h5 deleted file mode 100644 index 4c55176..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient052_frame01_slice_2.h5 deleted file mode 100644 index 76dea89..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient052_frame01_slice_3.h5 deleted file mode 100644 index fcc8f69..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient052_frame01_slice_4.h5 deleted file mode 100644 index 4477dc3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient052_frame01_slice_5.h5 deleted file mode 100644 index 5bf3250..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient052_frame01_slice_6.h5 deleted file mode 100644 index d128fe1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient052_frame01_slice_7.h5 deleted file mode 100644 index 19c4df7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient052_frame09_slice_0.h5 deleted file mode 100644 index 6c6d5fb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient052_frame09_slice_1.h5 deleted file mode 100644 index 6c156a0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient052_frame09_slice_2.h5 deleted file mode 100644 index ffef5d0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient052_frame09_slice_3.h5 deleted file mode 100644 index 3cab245..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient052_frame09_slice_4.h5 deleted file mode 100644 index 3f0a12b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient052_frame09_slice_5.h5 deleted file mode 100644 index 00060de..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient052_frame09_slice_6.h5 deleted file mode 100644 index 4d9d665..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient052_frame09_slice_7.h5 deleted file mode 100644 index 2368b63..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient052_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient053_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient053_frame01_slice_0.h5 deleted file mode 100644 index 10db9c4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient053_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient053_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient053_frame01_slice_1.h5 deleted file mode 100644 index 4bbf328..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient053_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient053_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient053_frame01_slice_2.h5 deleted file mode 100644 index 69e0397..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient053_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient053_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient053_frame01_slice_3.h5 deleted file mode 100644 index b73771a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient053_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient053_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient053_frame01_slice_4.h5 deleted file mode 100644 index d34b2a2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient053_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient053_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient053_frame01_slice_5.h5 deleted file mode 100644 index 810f022..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient053_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient053_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient053_frame01_slice_6.h5 deleted file mode 100644 index f0ec69a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient053_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient053_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient053_frame12_slice_0.h5 deleted file mode 100644 index 2a5a42c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient053_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient053_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient053_frame12_slice_1.h5 deleted file mode 100644 index 28e3a47..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient053_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient053_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient053_frame12_slice_2.h5 deleted file mode 100644 index df95a78..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient053_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient053_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient053_frame12_slice_3.h5 deleted file mode 100644 index 2ff7d5b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient053_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient053_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient053_frame12_slice_4.h5 deleted file mode 100644 index 6f2a6c7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient053_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient053_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient053_frame12_slice_5.h5 deleted file mode 100644 index d810df4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient053_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient053_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient053_frame12_slice_6.h5 deleted file mode 100644 index 5a2ba96..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient053_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient054_frame01_slice_0.h5 deleted file mode 100644 index 900e48e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient054_frame01_slice_1.h5 deleted file mode 100644 index 73861bf..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient054_frame01_slice_2.h5 deleted file mode 100644 index d717c8e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient054_frame01_slice_3.h5 deleted file mode 100644 index db110a3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient054_frame01_slice_4.h5 deleted file mode 100644 index ae386fd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient054_frame01_slice_5.h5 deleted file mode 100644 index 919402c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient054_frame01_slice_6.h5 deleted file mode 100644 index c4dfa7d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient054_frame01_slice_7.h5 deleted file mode 100644 index a82402a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient054_frame12_slice_0.h5 deleted file mode 100644 index 55a67b5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient054_frame12_slice_1.h5 deleted file mode 100644 index 1f84dac..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient054_frame12_slice_2.h5 deleted file mode 100644 index d08fb3c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient054_frame12_slice_3.h5 deleted file mode 100644 index 9d1f485..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient054_frame12_slice_4.h5 deleted file mode 100644 index 5a90310..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient054_frame12_slice_5.h5 deleted file mode 100644 index 23e2225..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient054_frame12_slice_6.h5 deleted file mode 100644 index 752ed16..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient054_frame12_slice_7.h5 deleted file mode 100644 index b61d76e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient054_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient055_frame01_slice_0.h5 deleted file mode 100644 index 045c384..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient055_frame01_slice_1.h5 deleted file mode 100644 index 4c2d3d7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient055_frame01_slice_2.h5 deleted file mode 100644 index 285a1a7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient055_frame01_slice_3.h5 deleted file mode 100644 index ce144ba..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient055_frame01_slice_4.h5 deleted file mode 100644 index aa71578..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient055_frame01_slice_5.h5 deleted file mode 100644 index 0ffbf96..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient055_frame01_slice_6.h5 deleted file mode 100644 index 8d40de1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient055_frame01_slice_7.h5 deleted file mode 100644 index 2804609..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient055_frame01_slice_8.h5 deleted file mode 100644 index 3206f30..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient055_frame10_slice_0.h5 deleted file mode 100644 index 25f9344..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient055_frame10_slice_1.h5 deleted file mode 100644 index 279325f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient055_frame10_slice_2.h5 deleted file mode 100644 index b057be4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient055_frame10_slice_3.h5 deleted file mode 100644 index 555c3b5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient055_frame10_slice_4.h5 deleted file mode 100644 index fb15012..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient055_frame10_slice_5.h5 deleted file mode 100644 index 878a08c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient055_frame10_slice_6.h5 deleted file mode 100644 index a6e7c5b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient055_frame10_slice_7.h5 deleted file mode 100644 index 7121cab..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient055_frame10_slice_8.h5 deleted file mode 100644 index 66e0eaa..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient055_frame10_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient056_frame01_slice_0.h5 deleted file mode 100644 index e885146..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient056_frame01_slice_1.h5 deleted file mode 100644 index 33de719..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient056_frame01_slice_2.h5 deleted file mode 100644 index cfbb543..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient056_frame01_slice_3.h5 deleted file mode 100644 index 6f01be7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient056_frame01_slice_4.h5 deleted file mode 100644 index d4e4687..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient056_frame01_slice_5.h5 deleted file mode 100644 index ddc0041..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient056_frame01_slice_6.h5 deleted file mode 100644 index 343d00c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient056_frame01_slice_7.h5 deleted file mode 100644 index d288f96..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient056_frame01_slice_8.h5 deleted file mode 100644 index 1018939..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient056_frame12_slice_0.h5 deleted file mode 100644 index 34ff93c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient056_frame12_slice_1.h5 deleted file mode 100644 index f553630..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient056_frame12_slice_2.h5 deleted file mode 100644 index 0890c5e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient056_frame12_slice_3.h5 deleted file mode 100644 index 632ce7d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient056_frame12_slice_4.h5 deleted file mode 100644 index 71d134d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient056_frame12_slice_5.h5 deleted file mode 100644 index 9cce511..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient056_frame12_slice_6.h5 deleted file mode 100644 index 536472b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient056_frame12_slice_7.h5 deleted file mode 100644 index 23fbb53..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient056_frame12_slice_8.h5 deleted file mode 100644 index 63242ec..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient056_frame12_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient057_frame01_slice_0.h5 deleted file mode 100644 index 6847ec1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient057_frame01_slice_1.h5 deleted file mode 100644 index d6991e8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient057_frame01_slice_2.h5 deleted file mode 100644 index 380cab6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient057_frame01_slice_3.h5 deleted file mode 100644 index 356ae29..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient057_frame01_slice_4.h5 deleted file mode 100644 index 984e933..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient057_frame01_slice_5.h5 deleted file mode 100644 index 49e2fe0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient057_frame01_slice_6.h5 deleted file mode 100644 index 07dc421..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient057_frame01_slice_7.h5 deleted file mode 100644 index 4ef0ad9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient057_frame09_slice_0.h5 deleted file mode 100644 index b477b5d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient057_frame09_slice_1.h5 deleted file mode 100644 index 1c0647b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient057_frame09_slice_2.h5 deleted file mode 100644 index e26b7a4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient057_frame09_slice_3.h5 deleted file mode 100644 index a11202d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient057_frame09_slice_4.h5 deleted file mode 100644 index 7cd7ea9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient057_frame09_slice_5.h5 deleted file mode 100644 index b9dbfb8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient057_frame09_slice_6.h5 deleted file mode 100644 index 857603f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient057_frame09_slice_7.h5 deleted file mode 100644 index ec8a90a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient057_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient058_frame01_slice_0.h5 deleted file mode 100644 index 5ee51bd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient058_frame01_slice_1.h5 deleted file mode 100644 index 2cf21cc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient058_frame01_slice_2.h5 deleted file mode 100644 index 88ced76..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient058_frame01_slice_3.h5 deleted file mode 100644 index f9e1c45..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient058_frame01_slice_4.h5 deleted file mode 100644 index dcb02b9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient058_frame01_slice_5.h5 deleted file mode 100644 index 189fdad..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient058_frame01_slice_6.h5 deleted file mode 100644 index a8100ca..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient058_frame01_slice_7.h5 deleted file mode 100644 index 5e34530..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient058_frame01_slice_8.h5 deleted file mode 100644 index 15c60c4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient058_frame14_slice_0.h5 deleted file mode 100644 index a58fcf3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient058_frame14_slice_1.h5 deleted file mode 100644 index b259cc1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient058_frame14_slice_2.h5 deleted file mode 100644 index a66053d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient058_frame14_slice_3.h5 deleted file mode 100644 index 613587e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient058_frame14_slice_4.h5 deleted file mode 100644 index 0d5881c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient058_frame14_slice_5.h5 deleted file mode 100644 index 0a375d3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient058_frame14_slice_6.h5 deleted file mode 100644 index 8422054..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient058_frame14_slice_7.h5 deleted file mode 100644 index f6d3b4b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient058_frame14_slice_8.h5 deleted file mode 100644 index 6f3700a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient058_frame14_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient059_frame01_slice_0.h5 deleted file mode 100644 index 755311a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient059_frame01_slice_1.h5 deleted file mode 100644 index 92dcefa..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient059_frame01_slice_2.h5 deleted file mode 100644 index e8e298a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient059_frame01_slice_3.h5 deleted file mode 100644 index 539a75a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient059_frame01_slice_4.h5 deleted file mode 100644 index 8dcb863..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient059_frame01_slice_5.h5 deleted file mode 100644 index 609f37b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient059_frame01_slice_6.h5 deleted file mode 100644 index c5028fc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient059_frame01_slice_7.h5 deleted file mode 100644 index 6894cca..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient059_frame01_slice_8.h5 deleted file mode 100644 index 0a58a34..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient059_frame09_slice_0.h5 deleted file mode 100644 index 76e8152..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient059_frame09_slice_1.h5 deleted file mode 100644 index b1cb843..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient059_frame09_slice_2.h5 deleted file mode 100644 index e661951..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient059_frame09_slice_3.h5 deleted file mode 100644 index 140d263..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient059_frame09_slice_4.h5 deleted file mode 100644 index f441820..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient059_frame09_slice_5.h5 deleted file mode 100644 index 8b17175..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient059_frame09_slice_6.h5 deleted file mode 100644 index 8de9d2e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient059_frame09_slice_7.h5 deleted file mode 100644 index 3350440..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient059_frame09_slice_8.h5 deleted file mode 100644 index 1668e44..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient059_frame09_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient060_frame01_slice_0.h5 deleted file mode 100644 index 190a937..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient060_frame01_slice_1.h5 deleted file mode 100644 index 0d66ed3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient060_frame01_slice_2.h5 deleted file mode 100644 index 3637f54..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient060_frame01_slice_3.h5 deleted file mode 100644 index 59ed53f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient060_frame01_slice_4.h5 deleted file mode 100644 index 941abc7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient060_frame01_slice_5.h5 deleted file mode 100644 index a61627b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient060_frame01_slice_6.h5 deleted file mode 100644 index 90a7002..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient060_frame01_slice_7.h5 deleted file mode 100644 index 542e466..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient060_frame01_slice_8.h5 deleted file mode 100644 index 43ddde2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient060_frame14_slice_0.h5 deleted file mode 100644 index d799ff2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient060_frame14_slice_1.h5 deleted file mode 100644 index 3eadd09..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient060_frame14_slice_2.h5 deleted file mode 100644 index cde9554..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient060_frame14_slice_3.h5 deleted file mode 100644 index 96d5ee2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient060_frame14_slice_4.h5 deleted file mode 100644 index 1cd5ad9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient060_frame14_slice_5.h5 deleted file mode 100644 index a8e11d9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient060_frame14_slice_6.h5 deleted file mode 100644 index 931c917..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient060_frame14_slice_7.h5 deleted file mode 100644 index 8e22135..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient060_frame14_slice_8.h5 deleted file mode 100644 index a87d4b3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient060_frame14_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient061_frame01_slice_0.h5 deleted file mode 100644 index 04e3c4c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient061_frame01_slice_1.h5 deleted file mode 100644 index 1219734..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient061_frame01_slice_2.h5 deleted file mode 100644 index 40c6143..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient061_frame01_slice_3.h5 deleted file mode 100644 index 6cbc44a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient061_frame01_slice_4.h5 deleted file mode 100644 index 2c112fb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient061_frame01_slice_5.h5 deleted file mode 100644 index f5ca8b7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient061_frame01_slice_6.h5 deleted file mode 100644 index 6d181ad..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient061_frame01_slice_7.h5 deleted file mode 100644 index 0dffb50..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient061_frame01_slice_8.h5 deleted file mode 100644 index 265dbee..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient061_frame10_slice_0.h5 deleted file mode 100644 index 5a6c507..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient061_frame10_slice_1.h5 deleted file mode 100644 index 5b134c2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient061_frame10_slice_2.h5 deleted file mode 100644 index f43c6bb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient061_frame10_slice_3.h5 deleted file mode 100644 index 279b95e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient061_frame10_slice_4.h5 deleted file mode 100644 index b0998fc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient061_frame10_slice_5.h5 deleted file mode 100644 index 72fad86..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient061_frame10_slice_6.h5 deleted file mode 100644 index b7c9b57..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient061_frame10_slice_7.h5 deleted file mode 100644 index 28db466..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient061_frame10_slice_8.h5 deleted file mode 100644 index 405732d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient061_frame10_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient062_frame01_slice_0.h5 deleted file mode 100644 index b93d919..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient062_frame01_slice_1.h5 deleted file mode 100644 index 766c134..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient062_frame01_slice_2.h5 deleted file mode 100644 index c9d0cb1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient062_frame01_slice_3.h5 deleted file mode 100644 index d7e860b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient062_frame01_slice_4.h5 deleted file mode 100644 index bdfdf94..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient062_frame01_slice_5.h5 deleted file mode 100644 index adbc3e3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient062_frame01_slice_6.h5 deleted file mode 100644 index e15014e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient062_frame01_slice_7.h5 deleted file mode 100644 index 65bb8fd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient062_frame01_slice_8.h5 deleted file mode 100644 index b175e5f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient062_frame01_slice_9.h5 deleted file mode 100644 index a446961..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient062_frame09_slice_0.h5 deleted file mode 100644 index a663a46..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient062_frame09_slice_1.h5 deleted file mode 100644 index 626be58..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient062_frame09_slice_2.h5 deleted file mode 100644 index 0d6ae73..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient062_frame09_slice_3.h5 deleted file mode 100644 index df9f03f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient062_frame09_slice_4.h5 deleted file mode 100644 index 1e5539e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient062_frame09_slice_5.h5 deleted file mode 100644 index b22205c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient062_frame09_slice_6.h5 deleted file mode 100644 index 82b6c35..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient062_frame09_slice_7.h5 deleted file mode 100644 index 56ac70f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient062_frame09_slice_8.h5 deleted file mode 100644 index e7ab391..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient062_frame09_slice_9.h5 deleted file mode 100644 index acbf389..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient062_frame09_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient063_frame01_slice_0.h5 deleted file mode 100644 index 2d9bf9d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient063_frame01_slice_1.h5 deleted file mode 100644 index 6547836..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient063_frame01_slice_2.h5 deleted file mode 100644 index c90a749..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient063_frame01_slice_3.h5 deleted file mode 100644 index 524e871..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient063_frame01_slice_4.h5 deleted file mode 100644 index e699b59..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient063_frame01_slice_5.h5 deleted file mode 100644 index fcf928f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient063_frame01_slice_6.h5 deleted file mode 100644 index b685ce5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient063_frame01_slice_7.h5 deleted file mode 100644 index cb09dc2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient063_frame16_slice_0.h5 deleted file mode 100644 index ee1a326..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient063_frame16_slice_1.h5 deleted file mode 100644 index 9e3b934..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient063_frame16_slice_2.h5 deleted file mode 100644 index 983ba4d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient063_frame16_slice_3.h5 deleted file mode 100644 index 7792f0a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient063_frame16_slice_4.h5 deleted file mode 100644 index ae95091..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient063_frame16_slice_5.h5 deleted file mode 100644 index 0025957..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient063_frame16_slice_6.h5 deleted file mode 100644 index 808eca8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient063_frame16_slice_7.h5 deleted file mode 100644 index 8fca174..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient063_frame16_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient064_frame01_slice_0.h5 deleted file mode 100644 index 23fa39f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient064_frame01_slice_1.h5 deleted file mode 100644 index a5e1d4a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient064_frame01_slice_2.h5 deleted file mode 100644 index 744e77a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient064_frame01_slice_3.h5 deleted file mode 100644 index 6ecc566..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient064_frame01_slice_4.h5 deleted file mode 100644 index dd5a114..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient064_frame01_slice_5.h5 deleted file mode 100644 index 523bea1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient064_frame01_slice_6.h5 deleted file mode 100644 index 51fd72b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient064_frame01_slice_7.h5 deleted file mode 100644 index bc9677a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient064_frame01_slice_8.h5 deleted file mode 100644 index 45bdb71..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient064_frame01_slice_9.h5 deleted file mode 100644 index 58af798..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient064_frame12_slice_0.h5 deleted file mode 100644 index 223a356..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient064_frame12_slice_1.h5 deleted file mode 100644 index 1fb1acb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient064_frame12_slice_2.h5 deleted file mode 100644 index a0c4bc9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient064_frame12_slice_3.h5 deleted file mode 100644 index 1d38a4d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient064_frame12_slice_4.h5 deleted file mode 100644 index f6e5ea2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient064_frame12_slice_5.h5 deleted file mode 100644 index 8b301a2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient064_frame12_slice_6.h5 deleted file mode 100644 index 6e92623..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient064_frame12_slice_7.h5 deleted file mode 100644 index 9089bc3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient064_frame12_slice_8.h5 deleted file mode 100644 index a8def06..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient064_frame12_slice_9.h5 deleted file mode 100644 index c3d0bde..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient064_frame12_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient065_frame01_slice_0.h5 deleted file mode 100644 index 138a1dd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient065_frame01_slice_1.h5 deleted file mode 100644 index 33fe93a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient065_frame01_slice_2.h5 deleted file mode 100644 index 2f1223c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient065_frame01_slice_3.h5 deleted file mode 100644 index 905d799..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient065_frame01_slice_4.h5 deleted file mode 100644 index e26871c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient065_frame01_slice_5.h5 deleted file mode 100644 index fbe0aa3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient065_frame01_slice_6.h5 deleted file mode 100644 index 33a7c65..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient065_frame01_slice_7.h5 deleted file mode 100644 index 5c413d1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient065_frame14_slice_0.h5 deleted file mode 100644 index 55e84d9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient065_frame14_slice_1.h5 deleted file mode 100644 index 612df94..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient065_frame14_slice_2.h5 deleted file mode 100644 index 01297b1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient065_frame14_slice_3.h5 deleted file mode 100644 index c7545e5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient065_frame14_slice_4.h5 deleted file mode 100644 index 7d17496..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient065_frame14_slice_5.h5 deleted file mode 100644 index 1940bfb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient065_frame14_slice_6.h5 deleted file mode 100644 index f22c684..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient065_frame14_slice_7.h5 deleted file mode 100644 index 7676a3b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient065_frame14_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient066_frame01_slice_0.h5 deleted file mode 100644 index 9112ceb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient066_frame01_slice_1.h5 deleted file mode 100644 index 8a286c4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient066_frame01_slice_2.h5 deleted file mode 100644 index 935ea80..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient066_frame01_slice_3.h5 deleted file mode 100644 index 2aa5703..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient066_frame01_slice_4.h5 deleted file mode 100644 index d180b57..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient066_frame01_slice_5.h5 deleted file mode 100644 index d1c422e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient066_frame01_slice_6.h5 deleted file mode 100644 index 59eb333..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient066_frame01_slice_7.h5 deleted file mode 100644 index 61e90d5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient066_frame01_slice_8.h5 deleted file mode 100644 index 59e71cd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient066_frame11_slice_0.h5 deleted file mode 100644 index bbd1861..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient066_frame11_slice_1.h5 deleted file mode 100644 index 227d073..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient066_frame11_slice_2.h5 deleted file mode 100644 index 5d81233..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient066_frame11_slice_3.h5 deleted file mode 100644 index 4c9bbc6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient066_frame11_slice_4.h5 deleted file mode 100644 index ecded7f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient066_frame11_slice_5.h5 deleted file mode 100644 index 0bdc4a3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient066_frame11_slice_6.h5 deleted file mode 100644 index cd26bca..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient066_frame11_slice_7.h5 deleted file mode 100644 index 99753d6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient066_frame11_slice_8.h5 deleted file mode 100644 index f870364..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient066_frame11_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient067_frame01_slice_0.h5 deleted file mode 100644 index 39077a6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient067_frame01_slice_1.h5 deleted file mode 100644 index 6afe675..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient067_frame01_slice_2.h5 deleted file mode 100644 index e6fa7dc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient067_frame01_slice_3.h5 deleted file mode 100644 index a071082..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient067_frame01_slice_4.h5 deleted file mode 100644 index dc068d1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient067_frame01_slice_5.h5 deleted file mode 100644 index a91e8d5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient067_frame01_slice_6.h5 deleted file mode 100644 index 56af799..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient067_frame01_slice_7.h5 deleted file mode 100644 index e8bc0de..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient067_frame01_slice_8.h5 deleted file mode 100644 index 5ca3d1f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient067_frame01_slice_9.h5 deleted file mode 100644 index e069041..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient067_frame10_slice_0.h5 deleted file mode 100644 index 882f355..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient067_frame10_slice_1.h5 deleted file mode 100644 index d269956..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient067_frame10_slice_2.h5 deleted file mode 100644 index 8110e74..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient067_frame10_slice_3.h5 deleted file mode 100644 index b5477dc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient067_frame10_slice_4.h5 deleted file mode 100644 index 5fbb3a8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient067_frame10_slice_5.h5 deleted file mode 100644 index 3c843cc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient067_frame10_slice_6.h5 deleted file mode 100644 index 561c616..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient067_frame10_slice_7.h5 deleted file mode 100644 index b72e08c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient067_frame10_slice_8.h5 deleted file mode 100644 index 6db8697..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient067_frame10_slice_9.h5 deleted file mode 100644 index 56382d0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient067_frame10_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient068_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient068_frame01_slice_0.h5 deleted file mode 100644 index 1a2603f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient068_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient068_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient068_frame01_slice_1.h5 deleted file mode 100644 index d88031d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient068_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient068_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient068_frame01_slice_2.h5 deleted file mode 100644 index a8ca983..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient068_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient068_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient068_frame01_slice_3.h5 deleted file mode 100644 index f609be5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient068_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient068_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient068_frame01_slice_4.h5 deleted file mode 100644 index c0ca4f5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient068_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient068_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient068_frame01_slice_5.h5 deleted file mode 100644 index 560041a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient068_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient068_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient068_frame01_slice_6.h5 deleted file mode 100644 index fd01851..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient068_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient068_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient068_frame12_slice_0.h5 deleted file mode 100644 index 518cf55..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient068_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient068_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient068_frame12_slice_1.h5 deleted file mode 100644 index c9fd45d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient068_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient068_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient068_frame12_slice_2.h5 deleted file mode 100644 index 0fb2c72..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient068_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient068_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient068_frame12_slice_3.h5 deleted file mode 100644 index a4cdccb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient068_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient068_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient068_frame12_slice_4.h5 deleted file mode 100644 index 3958067..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient068_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient068_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient068_frame12_slice_5.h5 deleted file mode 100644 index 73646f1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient068_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient068_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient068_frame12_slice_6.h5 deleted file mode 100644 index 76e09e1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient068_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient069_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient069_frame01_slice_0.h5 deleted file mode 100644 index b3291c5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient069_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient069_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient069_frame01_slice_1.h5 deleted file mode 100644 index a847d25..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient069_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient069_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient069_frame01_slice_2.h5 deleted file mode 100644 index b141946..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient069_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient069_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient069_frame01_slice_3.h5 deleted file mode 100644 index bda64e8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient069_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient069_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient069_frame01_slice_4.h5 deleted file mode 100644 index a77482e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient069_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient069_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient069_frame01_slice_5.h5 deleted file mode 100644 index bf9d2d4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient069_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient069_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient069_frame01_slice_6.h5 deleted file mode 100644 index 06c46cc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient069_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient069_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient069_frame12_slice_0.h5 deleted file mode 100644 index 5f58386..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient069_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient069_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient069_frame12_slice_1.h5 deleted file mode 100644 index 319200b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient069_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient069_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient069_frame12_slice_2.h5 deleted file mode 100644 index 68ee6d7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient069_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient069_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient069_frame12_slice_3.h5 deleted file mode 100644 index 677a109..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient069_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient069_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient069_frame12_slice_4.h5 deleted file mode 100644 index dcdfdce..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient069_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient069_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient069_frame12_slice_5.h5 deleted file mode 100644 index 8687309..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient069_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient069_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient069_frame12_slice_6.h5 deleted file mode 100644 index 5055ef9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient069_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient070_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient070_frame01_slice_0.h5 deleted file mode 100644 index 8d94278..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient070_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient070_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient070_frame01_slice_1.h5 deleted file mode 100644 index ffc7f23..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient070_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient070_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient070_frame01_slice_2.h5 deleted file mode 100644 index 4f2652a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient070_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient070_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient070_frame01_slice_3.h5 deleted file mode 100644 index 769bd98..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient070_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient070_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient070_frame01_slice_4.h5 deleted file mode 100644 index 3fb06a4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient070_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient070_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient070_frame01_slice_5.h5 deleted file mode 100644 index d70ecc5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient070_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient070_frame10_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient070_frame10_slice_0.h5 deleted file mode 100644 index f3bf837..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient070_frame10_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient070_frame10_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient070_frame10_slice_1.h5 deleted file mode 100644 index e50ffa6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient070_frame10_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient070_frame10_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient070_frame10_slice_2.h5 deleted file mode 100644 index 0bc008c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient070_frame10_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient070_frame10_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient070_frame10_slice_3.h5 deleted file mode 100644 index a118b63..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient070_frame10_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient070_frame10_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient070_frame10_slice_4.h5 deleted file mode 100644 index 7e1d0a5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient070_frame10_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient070_frame10_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient070_frame10_slice_5.h5 deleted file mode 100644 index 0a5798a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient070_frame10_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient071_frame01_slice_0.h5 deleted file mode 100644 index 5a90ee2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient071_frame01_slice_1.h5 deleted file mode 100644 index 2c3d06d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient071_frame01_slice_2.h5 deleted file mode 100644 index 2e44905..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient071_frame01_slice_3.h5 deleted file mode 100644 index 6aace0d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient071_frame01_slice_4.h5 deleted file mode 100644 index 458475d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient071_frame01_slice_5.h5 deleted file mode 100644 index 0631f62..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient071_frame01_slice_6.h5 deleted file mode 100644 index b9f8648..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient071_frame01_slice_7.h5 deleted file mode 100644 index bb1129a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient071_frame01_slice_8.h5 deleted file mode 100644 index 9c5579f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient071_frame01_slice_9.h5 deleted file mode 100644 index 37a8f6a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient071_frame09_slice_0.h5 deleted file mode 100644 index 66633d0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient071_frame09_slice_1.h5 deleted file mode 100644 index ce1feef..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient071_frame09_slice_2.h5 deleted file mode 100644 index 56c43b3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient071_frame09_slice_3.h5 deleted file mode 100644 index b5ae6ca..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient071_frame09_slice_4.h5 deleted file mode 100644 index 30d21c2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient071_frame09_slice_5.h5 deleted file mode 100644 index 5dba46c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient071_frame09_slice_6.h5 deleted file mode 100644 index 6bc81e0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient071_frame09_slice_7.h5 deleted file mode 100644 index 562b1e3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient071_frame09_slice_8.h5 deleted file mode 100644 index 664afa0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient071_frame09_slice_9.h5 deleted file mode 100644 index 5a434d4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient071_frame09_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient072_frame01_slice_0.h5 deleted file mode 100644 index 426d19f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient072_frame01_slice_1.h5 deleted file mode 100644 index 0b59d02..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient072_frame01_slice_2.h5 deleted file mode 100644 index 029bda4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient072_frame01_slice_3.h5 deleted file mode 100644 index efdfb04..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient072_frame01_slice_4.h5 deleted file mode 100644 index 7fec3c7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient072_frame01_slice_5.h5 deleted file mode 100644 index c6ca3b8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient072_frame01_slice_6.h5 deleted file mode 100644 index c14f779..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient072_frame01_slice_7.h5 deleted file mode 100644 index f6eec19..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient072_frame11_slice_0.h5 deleted file mode 100644 index 004fc87..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient072_frame11_slice_1.h5 deleted file mode 100644 index 952bb5b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient072_frame11_slice_2.h5 deleted file mode 100644 index 9f162be..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient072_frame11_slice_3.h5 deleted file mode 100644 index 826e91a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient072_frame11_slice_4.h5 deleted file mode 100644 index d89fad3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient072_frame11_slice_5.h5 deleted file mode 100644 index ab6687a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient072_frame11_slice_6.h5 deleted file mode 100644 index f41062b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient072_frame11_slice_7.h5 deleted file mode 100644 index ca2d5db..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient072_frame11_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient073_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient073_frame01_slice_0.h5 deleted file mode 100644 index a8c28de..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient073_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient073_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient073_frame01_slice_1.h5 deleted file mode 100644 index f35caba..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient073_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient073_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient073_frame01_slice_2.h5 deleted file mode 100644 index 9dd710c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient073_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient073_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient073_frame01_slice_3.h5 deleted file mode 100644 index 5ec1e9e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient073_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient073_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient073_frame01_slice_4.h5 deleted file mode 100644 index fef6eff..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient073_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient073_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient073_frame01_slice_5.h5 deleted file mode 100644 index 3d28010..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient073_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient073_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient073_frame01_slice_6.h5 deleted file mode 100644 index 732f9df..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient073_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient073_frame10_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient073_frame10_slice_0.h5 deleted file mode 100644 index 28824eb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient073_frame10_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient073_frame10_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient073_frame10_slice_1.h5 deleted file mode 100644 index c30039a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient073_frame10_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient073_frame10_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient073_frame10_slice_2.h5 deleted file mode 100644 index aca1970..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient073_frame10_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient073_frame10_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient073_frame10_slice_3.h5 deleted file mode 100644 index 8f3bf5e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient073_frame10_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient073_frame10_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient073_frame10_slice_4.h5 deleted file mode 100644 index 3f60655..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient073_frame10_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient073_frame10_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient073_frame10_slice_5.h5 deleted file mode 100644 index 6f695b8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient073_frame10_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient073_frame10_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient073_frame10_slice_6.h5 deleted file mode 100644 index 049c998..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient073_frame10_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient074_frame01_slice_0.h5 deleted file mode 100644 index eaccddd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient074_frame01_slice_1.h5 deleted file mode 100644 index d7c1418..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient074_frame01_slice_2.h5 deleted file mode 100644 index fea01c2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient074_frame01_slice_3.h5 deleted file mode 100644 index bc9738f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient074_frame01_slice_4.h5 deleted file mode 100644 index a8b137e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient074_frame01_slice_5.h5 deleted file mode 100644 index 903fa4d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient074_frame01_slice_6.h5 deleted file mode 100644 index 86e28ca..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient074_frame01_slice_7.h5 deleted file mode 100644 index aa16513..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient074_frame12_slice_0.h5 deleted file mode 100644 index 9ba8921..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient074_frame12_slice_1.h5 deleted file mode 100644 index 393fbee..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient074_frame12_slice_2.h5 deleted file mode 100644 index a14f953..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient074_frame12_slice_3.h5 deleted file mode 100644 index 9dd3564..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient074_frame12_slice_4.h5 deleted file mode 100644 index 9d7cc2d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient074_frame12_slice_5.h5 deleted file mode 100644 index 30cd995..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient074_frame12_slice_6.h5 deleted file mode 100644 index 7b8d744..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient074_frame12_slice_7.h5 deleted file mode 100644 index fb7e45b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient074_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient075_frame01_slice_0.h5 deleted file mode 100644 index dc921bc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient075_frame01_slice_1.h5 deleted file mode 100644 index 6e947df..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient075_frame01_slice_10.h5 deleted file mode 100644 index 8db033d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient075_frame01_slice_11.h5 deleted file mode 100644 index a731426..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient075_frame01_slice_12.h5 deleted file mode 100644 index 4bcd42a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient075_frame01_slice_13.h5 deleted file mode 100644 index 808a800..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient075_frame01_slice_2.h5 deleted file mode 100644 index 1323fe8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient075_frame01_slice_3.h5 deleted file mode 100644 index 81738c8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient075_frame01_slice_4.h5 deleted file mode 100644 index a4f6827..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient075_frame01_slice_5.h5 deleted file mode 100644 index 6527bce..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient075_frame01_slice_6.h5 deleted file mode 100644 index 0ce2f02..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient075_frame01_slice_7.h5 deleted file mode 100644 index 3057a45..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient075_frame01_slice_8.h5 deleted file mode 100644 index dd36bed..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient075_frame01_slice_9.h5 deleted file mode 100644 index 8c35861..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient075_frame06_slice_0.h5 deleted file mode 100644 index bcc86cf..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient075_frame06_slice_1.h5 deleted file mode 100644 index e2c13c6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient075_frame06_slice_10.h5 deleted file mode 100644 index c321912..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient075_frame06_slice_11.h5 deleted file mode 100644 index a3462c5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient075_frame06_slice_12.h5 deleted file mode 100644 index a3d68ed..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient075_frame06_slice_13.h5 deleted file mode 100644 index 7afb4b4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient075_frame06_slice_2.h5 deleted file mode 100644 index 9112dfd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient075_frame06_slice_3.h5 deleted file mode 100644 index 653764d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient075_frame06_slice_4.h5 deleted file mode 100644 index 21193f5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient075_frame06_slice_5.h5 deleted file mode 100644 index a11c495..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient075_frame06_slice_6.h5 deleted file mode 100644 index 523edff..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient075_frame06_slice_7.h5 deleted file mode 100644 index dac74dd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient075_frame06_slice_8.h5 deleted file mode 100644 index 720d10b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient075_frame06_slice_9.h5 deleted file mode 100644 index 19428d7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient075_frame06_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient076_frame01_slice_0.h5 deleted file mode 100644 index a4307f6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient076_frame01_slice_1.h5 deleted file mode 100644 index 612cbb4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient076_frame01_slice_2.h5 deleted file mode 100644 index 994978b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient076_frame01_slice_3.h5 deleted file mode 100644 index 6b422f9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient076_frame01_slice_4.h5 deleted file mode 100644 index 54b81d7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient076_frame01_slice_5.h5 deleted file mode 100644 index 241e155..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient076_frame01_slice_6.h5 deleted file mode 100644 index c281abe..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient076_frame01_slice_7.h5 deleted file mode 100644 index 42c8f5f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient076_frame12_slice_0.h5 deleted file mode 100644 index edb9469..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient076_frame12_slice_1.h5 deleted file mode 100644 index ee503a6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient076_frame12_slice_2.h5 deleted file mode 100644 index 7b635fa..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient076_frame12_slice_3.h5 deleted file mode 100644 index da73db0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient076_frame12_slice_4.h5 deleted file mode 100644 index c23d0d8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient076_frame12_slice_5.h5 deleted file mode 100644 index 344d15c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient076_frame12_slice_6.h5 deleted file mode 100644 index 010fb1c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient076_frame12_slice_7.h5 deleted file mode 100644 index 65b7eab..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient076_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient077_frame01_slice_0.h5 deleted file mode 100644 index 9ae2b58..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient077_frame01_slice_1.h5 deleted file mode 100644 index 3ab911d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient077_frame01_slice_2.h5 deleted file mode 100644 index 5f48aa2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient077_frame01_slice_3.h5 deleted file mode 100644 index 64051ea..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient077_frame01_slice_4.h5 deleted file mode 100644 index 164c9dc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient077_frame01_slice_5.h5 deleted file mode 100644 index a19994c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient077_frame01_slice_6.h5 deleted file mode 100644 index 9326f8b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient077_frame01_slice_7.h5 deleted file mode 100644 index c1afa72..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient077_frame09_slice_0.h5 deleted file mode 100644 index 4bbf2fe..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient077_frame09_slice_1.h5 deleted file mode 100644 index 0f19cb0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient077_frame09_slice_2.h5 deleted file mode 100644 index b2640c4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient077_frame09_slice_3.h5 deleted file mode 100644 index 1fda7e6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient077_frame09_slice_4.h5 deleted file mode 100644 index 77a2235..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient077_frame09_slice_5.h5 deleted file mode 100644 index ad420ba..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient077_frame09_slice_6.h5 deleted file mode 100644 index 7d5de94..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient077_frame09_slice_7.h5 deleted file mode 100644 index f4ac102..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient077_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient078_frame01_slice_0.h5 deleted file mode 100644 index db3d3cd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient078_frame01_slice_1.h5 deleted file mode 100644 index 1183948..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient078_frame01_slice_2.h5 deleted file mode 100644 index 54619da..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient078_frame01_slice_3.h5 deleted file mode 100644 index e54d593..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient078_frame01_slice_4.h5 deleted file mode 100644 index 4ff57a8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient078_frame01_slice_5.h5 deleted file mode 100644 index b461903..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient078_frame01_slice_6.h5 deleted file mode 100644 index 5f3c9ca..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient078_frame01_slice_7.h5 deleted file mode 100644 index b8c6844..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient078_frame09_slice_0.h5 deleted file mode 100644 index f56f542..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient078_frame09_slice_1.h5 deleted file mode 100644 index 45af111..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient078_frame09_slice_2.h5 deleted file mode 100644 index 98347bd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient078_frame09_slice_3.h5 deleted file mode 100644 index b227306..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient078_frame09_slice_4.h5 deleted file mode 100644 index 7a19ca3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient078_frame09_slice_5.h5 deleted file mode 100644 index 6a0192e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient078_frame09_slice_6.h5 deleted file mode 100644 index 7daabc9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient078_frame09_slice_7.h5 deleted file mode 100644 index 2856741..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient078_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient079_frame01_slice_0.h5 deleted file mode 100644 index dc4e3fc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient079_frame01_slice_1.h5 deleted file mode 100644 index d506ca4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient079_frame01_slice_2.h5 deleted file mode 100644 index 8bef0e5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient079_frame01_slice_3.h5 deleted file mode 100644 index 9e022b2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient079_frame01_slice_4.h5 deleted file mode 100644 index af82042..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient079_frame01_slice_5.h5 deleted file mode 100644 index 9f054a8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient079_frame01_slice_6.h5 deleted file mode 100644 index 2265d35..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient079_frame01_slice_7.h5 deleted file mode 100644 index d89c987..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient079_frame01_slice_8.h5 deleted file mode 100644 index 60a9871..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient079_frame11_slice_0.h5 deleted file mode 100644 index 0676985..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient079_frame11_slice_1.h5 deleted file mode 100644 index 5b0f451..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient079_frame11_slice_2.h5 deleted file mode 100644 index 1c7a16f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient079_frame11_slice_3.h5 deleted file mode 100644 index 1d753c1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient079_frame11_slice_4.h5 deleted file mode 100644 index 4ff95c3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient079_frame11_slice_5.h5 deleted file mode 100644 index a51b90f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient079_frame11_slice_6.h5 deleted file mode 100644 index a25ae89..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient079_frame11_slice_7.h5 deleted file mode 100644 index e3374be..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient079_frame11_slice_8.h5 deleted file mode 100644 index 90d46e9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient079_frame11_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient080_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient080_frame01_slice_0.h5 deleted file mode 100644 index fcb4ea4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient080_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient080_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient080_frame01_slice_1.h5 deleted file mode 100644 index 9f16991..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient080_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient080_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient080_frame01_slice_2.h5 deleted file mode 100644 index 489e3d4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient080_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient080_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient080_frame01_slice_3.h5 deleted file mode 100644 index 4174942..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient080_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient080_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient080_frame01_slice_4.h5 deleted file mode 100644 index 7fb9f36..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient080_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient080_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient080_frame01_slice_5.h5 deleted file mode 100644 index 84cec59..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient080_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient080_frame10_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient080_frame10_slice_0.h5 deleted file mode 100644 index bdd43c5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient080_frame10_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient080_frame10_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient080_frame10_slice_1.h5 deleted file mode 100644 index 2942702..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient080_frame10_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient080_frame10_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient080_frame10_slice_2.h5 deleted file mode 100644 index bae7080..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient080_frame10_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient080_frame10_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient080_frame10_slice_3.h5 deleted file mode 100644 index 90556f8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient080_frame10_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient080_frame10_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient080_frame10_slice_4.h5 deleted file mode 100644 index e23bfa1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient080_frame10_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient080_frame10_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient080_frame10_slice_5.h5 deleted file mode 100644 index 5c177da..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient080_frame10_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_0.h5 deleted file mode 100644 index e977d0c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_1.h5 deleted file mode 100644 index 98695c9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_10.h5 deleted file mode 100644 index 2c2d8a6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_11.h5 deleted file mode 100644 index 433dd4f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_12.h5 deleted file mode 100644 index 410dace..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_13.h5 deleted file mode 100644 index b747f35..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_14.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_14.h5 deleted file mode 100644 index 84da94a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_15.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_15.h5 deleted file mode 100644 index 446d698..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_15.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_16.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_16.h5 deleted file mode 100644 index 6549152..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_16.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_2.h5 deleted file mode 100644 index efd96b8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_3.h5 deleted file mode 100644 index 0fcd513..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_4.h5 deleted file mode 100644 index 4b407fd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_5.h5 deleted file mode 100644 index b08a087..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_6.h5 deleted file mode 100644 index 86deb80..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_7.h5 deleted file mode 100644 index bb194c2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_8.h5 deleted file mode 100644 index c3b82fa..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient081_frame01_slice_9.h5 deleted file mode 100644 index 74921fb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_0.h5 deleted file mode 100644 index fceb8f2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_1.h5 deleted file mode 100644 index f3c4ad8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_10.h5 deleted file mode 100644 index c7fa035..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_11.h5 deleted file mode 100644 index b18dd77..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_12.h5 deleted file mode 100644 index 5ccb01b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_13.h5 deleted file mode 100644 index 68233bb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_14.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_14.h5 deleted file mode 100644 index 19f938b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_15.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_15.h5 deleted file mode 100644 index c8d9ef9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_15.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_16.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_16.h5 deleted file mode 100644 index fc66683..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_16.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_2.h5 deleted file mode 100644 index 34498d8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_3.h5 deleted file mode 100644 index 6c5b643..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_4.h5 deleted file mode 100644 index c1687d5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_5.h5 deleted file mode 100644 index 59c0075..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_6.h5 deleted file mode 100644 index d754be2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_7.h5 deleted file mode 100644 index c492cb6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_8.h5 deleted file mode 100644 index b8be7a2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient081_frame07_slice_9.h5 deleted file mode 100644 index 194d0e0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient081_frame07_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_0.h5 deleted file mode 100644 index e61f388..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_1.h5 deleted file mode 100644 index 6654b42..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_10.h5 deleted file mode 100644 index 4b2c013..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_11.h5 deleted file mode 100644 index ca01e5d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_12.h5 deleted file mode 100644 index 5caeae3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_13.h5 deleted file mode 100644 index 33e3659..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_14.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_14.h5 deleted file mode 100644 index 2fc9fb0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_15.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_15.h5 deleted file mode 100644 index 9cbd7a2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_15.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_2.h5 deleted file mode 100644 index c4e8b11..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_3.h5 deleted file mode 100644 index aaa8ea4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_4.h5 deleted file mode 100644 index 35e0293..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_5.h5 deleted file mode 100644 index e1034c8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_6.h5 deleted file mode 100644 index a323051..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_7.h5 deleted file mode 100644 index 43dac16..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_8.h5 deleted file mode 100644 index 92d2504..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient082_frame01_slice_9.h5 deleted file mode 100644 index 4447270..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_0.h5 deleted file mode 100644 index dcb1d59..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_1.h5 deleted file mode 100644 index a41a4f8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_10.h5 deleted file mode 100644 index 6b71220..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_11.h5 deleted file mode 100644 index fe4664f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_12.h5 deleted file mode 100644 index dfd58e6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_13.h5 deleted file mode 100644 index 750d515..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_14.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_14.h5 deleted file mode 100644 index 5c24038..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_15.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_15.h5 deleted file mode 100644 index d51e005..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_15.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_2.h5 deleted file mode 100644 index 8fb68e8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_3.h5 deleted file mode 100644 index 086b749..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_4.h5 deleted file mode 100644 index 4dea37f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_5.h5 deleted file mode 100644 index 6cac84b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_6.h5 deleted file mode 100644 index bbfceb2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_7.h5 deleted file mode 100644 index 6cdfad0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_8.h5 deleted file mode 100644 index f055467..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient082_frame07_slice_9.h5 deleted file mode 100644 index 0cda4b7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient082_frame07_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient083_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient083_frame01_slice_0.h5 deleted file mode 100644 index 0e8ce9a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient083_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient083_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient083_frame01_slice_1.h5 deleted file mode 100644 index 0efe357..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient083_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient083_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient083_frame01_slice_2.h5 deleted file mode 100644 index 1e589b1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient083_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient083_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient083_frame01_slice_3.h5 deleted file mode 100644 index af7011e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient083_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient083_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient083_frame01_slice_4.h5 deleted file mode 100644 index 62b7b17..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient083_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient083_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient083_frame01_slice_5.h5 deleted file mode 100644 index d4af9fc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient083_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient083_frame08_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient083_frame08_slice_0.h5 deleted file mode 100644 index 0134fca..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient083_frame08_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient083_frame08_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient083_frame08_slice_1.h5 deleted file mode 100644 index 020149e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient083_frame08_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient083_frame08_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient083_frame08_slice_2.h5 deleted file mode 100644 index 1170943..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient083_frame08_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient083_frame08_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient083_frame08_slice_3.h5 deleted file mode 100644 index 067f478..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient083_frame08_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient083_frame08_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient083_frame08_slice_4.h5 deleted file mode 100644 index 074ad97..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient083_frame08_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient083_frame08_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient083_frame08_slice_5.h5 deleted file mode 100644 index 876edf8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient083_frame08_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient084_frame01_slice_0.h5 deleted file mode 100644 index fa47348..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient084_frame01_slice_1.h5 deleted file mode 100644 index b0c8ac6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient084_frame01_slice_10.h5 deleted file mode 100644 index 2536b4b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient084_frame01_slice_11.h5 deleted file mode 100644 index 5b2df5b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient084_frame01_slice_2.h5 deleted file mode 100644 index df719a0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient084_frame01_slice_3.h5 deleted file mode 100644 index e1ff6b0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient084_frame01_slice_4.h5 deleted file mode 100644 index c7ee274..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient084_frame01_slice_5.h5 deleted file mode 100644 index 401198a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient084_frame01_slice_6.h5 deleted file mode 100644 index 6733f7b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient084_frame01_slice_7.h5 deleted file mode 100644 index 053d92c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient084_frame01_slice_8.h5 deleted file mode 100644 index 2da115c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient084_frame01_slice_9.h5 deleted file mode 100644 index 1f1d0a3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient084_frame10_slice_0.h5 deleted file mode 100644 index 38c7cb1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient084_frame10_slice_1.h5 deleted file mode 100644 index 680558c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient084_frame10_slice_10.h5 deleted file mode 100644 index 6ced9c3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient084_frame10_slice_11.h5 deleted file mode 100644 index d4c2fd4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient084_frame10_slice_2.h5 deleted file mode 100644 index 4b382f8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient084_frame10_slice_3.h5 deleted file mode 100644 index 648deba..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient084_frame10_slice_4.h5 deleted file mode 100644 index 8be5837..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient084_frame10_slice_5.h5 deleted file mode 100644 index 2b78531..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient084_frame10_slice_6.h5 deleted file mode 100644 index 9d8d088..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient084_frame10_slice_7.h5 deleted file mode 100644 index 4a11cbc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient084_frame10_slice_8.h5 deleted file mode 100644 index fee908c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient084_frame10_slice_9.h5 deleted file mode 100644 index a269fd4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient084_frame10_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient085_frame01_slice_0.h5 deleted file mode 100644 index 415f9a1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient085_frame01_slice_1.h5 deleted file mode 100644 index a0793f9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient085_frame01_slice_10.h5 deleted file mode 100644 index 6bcc0c7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient085_frame01_slice_11.h5 deleted file mode 100644 index 41374c0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient085_frame01_slice_12.h5 deleted file mode 100644 index a82e538..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient085_frame01_slice_13.h5 deleted file mode 100644 index c2dc366..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_14.h5 b/data/ACDC/ACDC_training_slices/patient085_frame01_slice_14.h5 deleted file mode 100644 index 1b4a9c4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient085_frame01_slice_2.h5 deleted file mode 100644 index e2e7250..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient085_frame01_slice_3.h5 deleted file mode 100644 index a5ea3e9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient085_frame01_slice_4.h5 deleted file mode 100644 index 0980d4a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient085_frame01_slice_5.h5 deleted file mode 100644 index 7413c29..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient085_frame01_slice_6.h5 deleted file mode 100644 index b632941..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient085_frame01_slice_7.h5 deleted file mode 100644 index 8e5e6c9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient085_frame01_slice_8.h5 deleted file mode 100644 index db7d040..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient085_frame01_slice_9.h5 deleted file mode 100644 index b1d447e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient085_frame09_slice_0.h5 deleted file mode 100644 index 9e76334..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient085_frame09_slice_1.h5 deleted file mode 100644 index 3de8e22..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient085_frame09_slice_10.h5 deleted file mode 100644 index 5a98d4d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient085_frame09_slice_11.h5 deleted file mode 100644 index 62c0f16..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient085_frame09_slice_12.h5 deleted file mode 100644 index 0679343..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient085_frame09_slice_13.h5 deleted file mode 100644 index 48eab0f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_14.h5 b/data/ACDC/ACDC_training_slices/patient085_frame09_slice_14.h5 deleted file mode 100644 index d05f8df..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient085_frame09_slice_2.h5 deleted file mode 100644 index 11bdf32..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient085_frame09_slice_3.h5 deleted file mode 100644 index 3175e9e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient085_frame09_slice_4.h5 deleted file mode 100644 index 81fc9c7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient085_frame09_slice_5.h5 deleted file mode 100644 index 67e17cc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient085_frame09_slice_6.h5 deleted file mode 100644 index 26ad3f9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient085_frame09_slice_7.h5 deleted file mode 100644 index 3fc5788..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient085_frame09_slice_8.h5 deleted file mode 100644 index e4b3445..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient085_frame09_slice_9.h5 deleted file mode 100644 index 2eb5387..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient085_frame09_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient086_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient086_frame01_slice_0.h5 deleted file mode 100644 index 290b47a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient086_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient086_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient086_frame01_slice_1.h5 deleted file mode 100644 index a8fbfc6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient086_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient086_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient086_frame01_slice_2.h5 deleted file mode 100644 index b992e23..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient086_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient086_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient086_frame01_slice_3.h5 deleted file mode 100644 index a9765e0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient086_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient086_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient086_frame01_slice_4.h5 deleted file mode 100644 index 819e8f8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient086_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient086_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient086_frame01_slice_5.h5 deleted file mode 100644 index 7e70cd4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient086_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient086_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient086_frame01_slice_6.h5 deleted file mode 100644 index bfa60a4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient086_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient086_frame08_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient086_frame08_slice_0.h5 deleted file mode 100644 index c5b771f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient086_frame08_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient086_frame08_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient086_frame08_slice_1.h5 deleted file mode 100644 index ac9a205..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient086_frame08_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient086_frame08_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient086_frame08_slice_2.h5 deleted file mode 100644 index 2e79839..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient086_frame08_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient086_frame08_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient086_frame08_slice_3.h5 deleted file mode 100644 index 1b9e099..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient086_frame08_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient086_frame08_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient086_frame08_slice_4.h5 deleted file mode 100644 index 858cb28..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient086_frame08_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient086_frame08_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient086_frame08_slice_5.h5 deleted file mode 100644 index f5efd69..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient086_frame08_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient086_frame08_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient086_frame08_slice_6.h5 deleted file mode 100644 index 3854b84..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient086_frame08_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient087_frame01_slice_0.h5 deleted file mode 100644 index b660adf..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient087_frame01_slice_1.h5 deleted file mode 100644 index 81e0713..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient087_frame01_slice_2.h5 deleted file mode 100644 index d82b557..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient087_frame01_slice_3.h5 deleted file mode 100644 index 71fd018..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient087_frame01_slice_4.h5 deleted file mode 100644 index ed397ca..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient087_frame01_slice_5.h5 deleted file mode 100644 index 6c32b5f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient087_frame01_slice_6.h5 deleted file mode 100644 index f216540..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient087_frame01_slice_7.h5 deleted file mode 100644 index 2bc3888..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient087_frame10_slice_0.h5 deleted file mode 100644 index 147dab4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient087_frame10_slice_1.h5 deleted file mode 100644 index af3d86a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient087_frame10_slice_2.h5 deleted file mode 100644 index 001fc68..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient087_frame10_slice_3.h5 deleted file mode 100644 index a4d81ec..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient087_frame10_slice_4.h5 deleted file mode 100644 index 9b60456..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient087_frame10_slice_5.h5 deleted file mode 100644 index a0ecabc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient087_frame10_slice_6.h5 deleted file mode 100644 index 8337f22..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient087_frame10_slice_7.h5 deleted file mode 100644 index dcae42a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient087_frame10_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_0.h5 deleted file mode 100644 index b65fc60..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_1.h5 deleted file mode 100644 index b1af33a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_10.h5 deleted file mode 100644 index 3dfc170..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_11.h5 deleted file mode 100644 index b958eb8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_12.h5 deleted file mode 100644 index b657767..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_13.h5 deleted file mode 100644 index 4ae6b00..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_14.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_14.h5 deleted file mode 100644 index 1c2eefc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_15.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_15.h5 deleted file mode 100644 index 6d7afc8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_15.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_2.h5 deleted file mode 100644 index 5e64bc4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_3.h5 deleted file mode 100644 index 480ad7c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_4.h5 deleted file mode 100644 index 0c535eb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_5.h5 deleted file mode 100644 index 22a6cfa..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_6.h5 deleted file mode 100644 index f9ed418..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_7.h5 deleted file mode 100644 index f7b0daa..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_8.h5 deleted file mode 100644 index be5dac7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient088_frame01_slice_9.h5 deleted file mode 100644 index 9f4a44e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_0.h5 deleted file mode 100644 index 983c91e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_1.h5 deleted file mode 100644 index bbd4a06..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_10.h5 deleted file mode 100644 index 753a036..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_11.h5 deleted file mode 100644 index eaff28b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_12.h5 deleted file mode 100644 index e582bc4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_13.h5 deleted file mode 100644 index c29f3cd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_14.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_14.h5 deleted file mode 100644 index d63692d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_15.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_15.h5 deleted file mode 100644 index b6c5b73..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_15.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_2.h5 deleted file mode 100644 index 61f6f64..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_3.h5 deleted file mode 100644 index 6b594a1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_4.h5 deleted file mode 100644 index 5552391..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_5.h5 deleted file mode 100644 index f2c395d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_6.h5 deleted file mode 100644 index 3c52f20..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_7.h5 deleted file mode 100644 index ee56584..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_8.h5 deleted file mode 100644 index b257b5f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient088_frame12_slice_9.h5 deleted file mode 100644 index 47c721a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient088_frame12_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient089_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient089_frame01_slice_0.h5 deleted file mode 100644 index d992d5e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient089_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient089_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient089_frame01_slice_1.h5 deleted file mode 100644 index c722d73..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient089_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient089_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient089_frame01_slice_2.h5 deleted file mode 100644 index bdc6692..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient089_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient089_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient089_frame01_slice_3.h5 deleted file mode 100644 index 56aa262..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient089_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient089_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient089_frame01_slice_4.h5 deleted file mode 100644 index 9bcf3ed..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient089_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient089_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient089_frame01_slice_5.h5 deleted file mode 100644 index 9f11bce..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient089_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient089_frame10_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient089_frame10_slice_0.h5 deleted file mode 100644 index 88ae1d9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient089_frame10_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient089_frame10_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient089_frame10_slice_1.h5 deleted file mode 100644 index 411f01d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient089_frame10_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient089_frame10_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient089_frame10_slice_2.h5 deleted file mode 100644 index f0113f3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient089_frame10_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient089_frame10_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient089_frame10_slice_3.h5 deleted file mode 100644 index adc03f9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient089_frame10_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient089_frame10_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient089_frame10_slice_4.h5 deleted file mode 100644 index 254d1c9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient089_frame10_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient089_frame10_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient089_frame10_slice_5.h5 deleted file mode 100644 index 0a13c85..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient089_frame10_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient090_frame04_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient090_frame04_slice_0.h5 deleted file mode 100644 index 97edb38..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient090_frame04_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient090_frame04_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient090_frame04_slice_1.h5 deleted file mode 100644 index 218a211..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient090_frame04_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient090_frame04_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient090_frame04_slice_2.h5 deleted file mode 100644 index 24871c7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient090_frame04_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient090_frame04_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient090_frame04_slice_3.h5 deleted file mode 100644 index 62bd839..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient090_frame04_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient090_frame04_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient090_frame04_slice_4.h5 deleted file mode 100644 index 55aec7f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient090_frame04_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient090_frame04_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient090_frame04_slice_5.h5 deleted file mode 100644 index d393447..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient090_frame04_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient090_frame04_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient090_frame04_slice_6.h5 deleted file mode 100644 index d7a94a3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient090_frame04_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient090_frame11_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient090_frame11_slice_0.h5 deleted file mode 100644 index 6f0ef31..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient090_frame11_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient090_frame11_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient090_frame11_slice_1.h5 deleted file mode 100644 index 708cb4c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient090_frame11_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient090_frame11_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient090_frame11_slice_2.h5 deleted file mode 100644 index ccc383d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient090_frame11_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient090_frame11_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient090_frame11_slice_3.h5 deleted file mode 100644 index 8bdccdf..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient090_frame11_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient090_frame11_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient090_frame11_slice_4.h5 deleted file mode 100644 index 3d03690..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient090_frame11_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient090_frame11_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient090_frame11_slice_5.h5 deleted file mode 100644 index 82cda35..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient090_frame11_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient090_frame11_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient090_frame11_slice_6.h5 deleted file mode 100644 index 85ea237..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient090_frame11_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient091_frame01_slice_0.h5 deleted file mode 100644 index bd03892..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient091_frame01_slice_1.h5 deleted file mode 100644 index e80318d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient091_frame01_slice_2.h5 deleted file mode 100644 index 0a8e8e1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient091_frame01_slice_3.h5 deleted file mode 100644 index e45ae3c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient091_frame01_slice_4.h5 deleted file mode 100644 index d949025..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient091_frame01_slice_5.h5 deleted file mode 100644 index fdd5ba2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient091_frame01_slice_6.h5 deleted file mode 100644 index 15abd12..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient091_frame01_slice_7.h5 deleted file mode 100644 index 438fb11..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient091_frame09_slice_0.h5 deleted file mode 100644 index 7f085e1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient091_frame09_slice_1.h5 deleted file mode 100644 index a407c2f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient091_frame09_slice_2.h5 deleted file mode 100644 index 6ec67cd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient091_frame09_slice_3.h5 deleted file mode 100644 index 7dbf6c7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient091_frame09_slice_4.h5 deleted file mode 100644 index c74f09e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient091_frame09_slice_5.h5 deleted file mode 100644 index 186cec3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient091_frame09_slice_6.h5 deleted file mode 100644 index ea693ef..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient091_frame09_slice_7.h5 deleted file mode 100644 index 7656807..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient091_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient092_frame01_slice_0.h5 deleted file mode 100644 index 0e56163..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient092_frame01_slice_1.h5 deleted file mode 100644 index 34ddb0d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient092_frame01_slice_10.h5 deleted file mode 100644 index 7e377be..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient092_frame01_slice_11.h5 deleted file mode 100644 index 3939b89..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient092_frame01_slice_12.h5 deleted file mode 100644 index 85d546c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient092_frame01_slice_13.h5 deleted file mode 100644 index 3c2fb95..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_14.h5 b/data/ACDC/ACDC_training_slices/patient092_frame01_slice_14.h5 deleted file mode 100644 index 7b8b432..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient092_frame01_slice_2.h5 deleted file mode 100644 index e7657b0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient092_frame01_slice_3.h5 deleted file mode 100644 index c033506..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient092_frame01_slice_4.h5 deleted file mode 100644 index 2e9fe8c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient092_frame01_slice_5.h5 deleted file mode 100644 index 08864b0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient092_frame01_slice_6.h5 deleted file mode 100644 index a4c03dc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient092_frame01_slice_7.h5 deleted file mode 100644 index d9d123c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient092_frame01_slice_8.h5 deleted file mode 100644 index 66007ca..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient092_frame01_slice_9.h5 deleted file mode 100644 index b1f3fbb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient092_frame06_slice_0.h5 deleted file mode 100644 index e479a5a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient092_frame06_slice_1.h5 deleted file mode 100644 index a3660a0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient092_frame06_slice_10.h5 deleted file mode 100644 index d3eda8a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient092_frame06_slice_11.h5 deleted file mode 100644 index ccb5ac3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient092_frame06_slice_12.h5 deleted file mode 100644 index 05acc95..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient092_frame06_slice_13.h5 deleted file mode 100644 index abfcba9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_14.h5 b/data/ACDC/ACDC_training_slices/patient092_frame06_slice_14.h5 deleted file mode 100644 index 6a70bbc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient092_frame06_slice_2.h5 deleted file mode 100644 index 9dce7b8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient092_frame06_slice_3.h5 deleted file mode 100644 index a8c978e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient092_frame06_slice_4.h5 deleted file mode 100644 index 48f5926..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient092_frame06_slice_5.h5 deleted file mode 100644 index d5a50ae..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient092_frame06_slice_6.h5 deleted file mode 100644 index 67099e3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient092_frame06_slice_7.h5 deleted file mode 100644 index 23f673e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient092_frame06_slice_8.h5 deleted file mode 100644 index 32005aa..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient092_frame06_slice_9.h5 deleted file mode 100644 index 522377e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient092_frame06_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient093_frame01_slice_0.h5 deleted file mode 100644 index 2520878..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient093_frame01_slice_1.h5 deleted file mode 100644 index 1de6086..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient093_frame01_slice_2.h5 deleted file mode 100644 index 8704061..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient093_frame01_slice_3.h5 deleted file mode 100644 index 9e5a292..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient093_frame01_slice_4.h5 deleted file mode 100644 index c5ea5d0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient093_frame01_slice_5.h5 deleted file mode 100644 index 9ef7241..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient093_frame01_slice_6.h5 deleted file mode 100644 index 9d60bd1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient093_frame01_slice_7.h5 deleted file mode 100644 index 33112c4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient093_frame01_slice_8.h5 deleted file mode 100644 index 1a262d1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient093_frame01_slice_9.h5 deleted file mode 100644 index 00a47c4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient093_frame14_slice_0.h5 deleted file mode 100644 index 01bbb2e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient093_frame14_slice_1.h5 deleted file mode 100644 index c826583..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient093_frame14_slice_2.h5 deleted file mode 100644 index 4dab8c4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient093_frame14_slice_3.h5 deleted file mode 100644 index 37b292c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient093_frame14_slice_4.h5 deleted file mode 100644 index 3d3efd0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient093_frame14_slice_5.h5 deleted file mode 100644 index b79fc71..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient093_frame14_slice_6.h5 deleted file mode 100644 index 979177f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient093_frame14_slice_7.h5 deleted file mode 100644 index 861ff4d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient093_frame14_slice_8.h5 deleted file mode 100644 index ed6b982..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient093_frame14_slice_9.h5 deleted file mode 100644 index 60ced54..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient093_frame14_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient094_frame01_slice_0.h5 deleted file mode 100644 index 7475613..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient094_frame01_slice_1.h5 deleted file mode 100644 index 3260df6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient094_frame01_slice_2.h5 deleted file mode 100644 index a495e84..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient094_frame01_slice_3.h5 deleted file mode 100644 index 1a05987..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient094_frame01_slice_4.h5 deleted file mode 100644 index 6100959..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient094_frame01_slice_5.h5 deleted file mode 100644 index c3854de..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient094_frame01_slice_6.h5 deleted file mode 100644 index 31c20e1..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient094_frame01_slice_7.h5 deleted file mode 100644 index 2fef2fe..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient094_frame01_slice_8.h5 deleted file mode 100644 index 75e7c21..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient094_frame01_slice_9.h5 deleted file mode 100644 index c172302..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient094_frame07_slice_0.h5 deleted file mode 100644 index a8d63ab..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient094_frame07_slice_1.h5 deleted file mode 100644 index 4d3c435..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient094_frame07_slice_2.h5 deleted file mode 100644 index 8572915..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient094_frame07_slice_3.h5 deleted file mode 100644 index 1d45cbd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient094_frame07_slice_4.h5 deleted file mode 100644 index a9d1e4c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient094_frame07_slice_5.h5 deleted file mode 100644 index 2f2f57c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient094_frame07_slice_6.h5 deleted file mode 100644 index 453346e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient094_frame07_slice_7.h5 deleted file mode 100644 index f65b0eb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient094_frame07_slice_8.h5 deleted file mode 100644 index b978aaa..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient094_frame07_slice_9.h5 deleted file mode 100644 index 8d67eeb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient094_frame07_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient095_frame01_slice_0.h5 deleted file mode 100644 index 6ff2e86..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient095_frame01_slice_1.h5 deleted file mode 100644 index 2157155..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient095_frame01_slice_10.h5 deleted file mode 100644 index 316bbc2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient095_frame01_slice_11.h5 deleted file mode 100644 index 7d095cd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient095_frame01_slice_12.h5 deleted file mode 100644 index 2c7bbc7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient095_frame01_slice_13.h5 deleted file mode 100644 index 5f82ee5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient095_frame01_slice_2.h5 deleted file mode 100644 index e9b956a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient095_frame01_slice_3.h5 deleted file mode 100644 index 6467a63..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient095_frame01_slice_4.h5 deleted file mode 100644 index 7d888e8..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient095_frame01_slice_5.h5 deleted file mode 100644 index 8390130..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient095_frame01_slice_6.h5 deleted file mode 100644 index 6908ef3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient095_frame01_slice_7.h5 deleted file mode 100644 index cd54417..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient095_frame01_slice_8.h5 deleted file mode 100644 index 53fe2ff..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient095_frame01_slice_9.h5 deleted file mode 100644 index 235c268..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient095_frame12_slice_0.h5 deleted file mode 100644 index 7e3ed46..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient095_frame12_slice_1.h5 deleted file mode 100644 index f923c34..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient095_frame12_slice_10.h5 deleted file mode 100644 index 6d67991..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient095_frame12_slice_11.h5 deleted file mode 100644 index 4dbb6c3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient095_frame12_slice_12.h5 deleted file mode 100644 index 8a61f1d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient095_frame12_slice_13.h5 deleted file mode 100644 index 40a676a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient095_frame12_slice_2.h5 deleted file mode 100644 index 502e3c4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient095_frame12_slice_3.h5 deleted file mode 100644 index 22738c7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient095_frame12_slice_4.h5 deleted file mode 100644 index 239d224..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient095_frame12_slice_5.h5 deleted file mode 100644 index 0ea7d3f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient095_frame12_slice_6.h5 deleted file mode 100644 index 9304ee5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient095_frame12_slice_7.h5 deleted file mode 100644 index 31410da..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient095_frame12_slice_8.h5 deleted file mode 100644 index 38ee178..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient095_frame12_slice_9.h5 deleted file mode 100644 index 3c8ccc9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient095_frame12_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_0.h5 deleted file mode 100644 index e1ac63d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_1.h5 deleted file mode 100644 index b2d4a25..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_10.h5 deleted file mode 100644 index 79bebb4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_11.h5 deleted file mode 100644 index 8801111..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_12.h5 deleted file mode 100644 index 9ce3d27..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_13.h5 deleted file mode 100644 index cb710d6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_14.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_14.h5 deleted file mode 100644 index 50a857e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_15.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_15.h5 deleted file mode 100644 index 66199da..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_15.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_16.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_16.h5 deleted file mode 100644 index 64c24fd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_16.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_17.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_17.h5 deleted file mode 100644 index 9b88294..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_17.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_2.h5 deleted file mode 100644 index 71c055d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_3.h5 deleted file mode 100644 index 7eead1e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_4.h5 deleted file mode 100644 index e735e30..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_5.h5 deleted file mode 100644 index 0d184be..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_6.h5 deleted file mode 100644 index 6211ed7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_7.h5 deleted file mode 100644 index 3f95842..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_8.h5 deleted file mode 100644 index c5cb7fe..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient096_frame01_slice_9.h5 deleted file mode 100644 index 1db34b2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_0.h5 deleted file mode 100644 index 522099a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_1.h5 deleted file mode 100644 index 1bdc6a9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_10.h5 deleted file mode 100644 index 8c476ac..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_11.h5 deleted file mode 100644 index 4f0e654..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_12.h5 deleted file mode 100644 index 6d5bec0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_13.h5 deleted file mode 100644 index 14f16be..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_14.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_14.h5 deleted file mode 100644 index 96f4960..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_15.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_15.h5 deleted file mode 100644 index 359b6c9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_15.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_16.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_16.h5 deleted file mode 100644 index 238e28b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_16.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_17.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_17.h5 deleted file mode 100644 index 15d50cb..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_17.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_2.h5 deleted file mode 100644 index 483be0f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_3.h5 deleted file mode 100644 index a86b189..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_4.h5 deleted file mode 100644 index 8211130..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_5.h5 deleted file mode 100644 index 4d6c91e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_6.h5 deleted file mode 100644 index 2e8b823..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_7.h5 deleted file mode 100644 index e48fe42..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_8.h5 deleted file mode 100644 index 7a674b5..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient096_frame08_slice_9.h5 deleted file mode 100644 index bfbc662..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient096_frame08_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient097_frame01_slice_0.h5 deleted file mode 100644 index 9aff2ba..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient097_frame01_slice_1.h5 deleted file mode 100644 index a4c5538..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient097_frame01_slice_2.h5 deleted file mode 100644 index d78979b..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient097_frame01_slice_3.h5 deleted file mode 100644 index 97c0568..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient097_frame01_slice_4.h5 deleted file mode 100644 index 55b2b1a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient097_frame01_slice_5.h5 deleted file mode 100644 index 0f0f433..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient097_frame01_slice_6.h5 deleted file mode 100644 index 76bc94c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient097_frame01_slice_7.h5 deleted file mode 100644 index 933905f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient097_frame11_slice_0.h5 deleted file mode 100644 index 0daadd0..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient097_frame11_slice_1.h5 deleted file mode 100644 index dcc40c2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient097_frame11_slice_2.h5 deleted file mode 100644 index 5f879f3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient097_frame11_slice_3.h5 deleted file mode 100644 index d03129c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient097_frame11_slice_4.h5 deleted file mode 100644 index 73cb11c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient097_frame11_slice_5.h5 deleted file mode 100644 index a4bb146..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient097_frame11_slice_6.h5 deleted file mode 100644 index 419107d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient097_frame11_slice_7.h5 deleted file mode 100644 index e235a08..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient097_frame11_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient098_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient098_frame01_slice_0.h5 deleted file mode 100644 index 4aef7be..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient098_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient098_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient098_frame01_slice_1.h5 deleted file mode 100644 index 63b9c63..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient098_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient098_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient098_frame01_slice_2.h5 deleted file mode 100644 index 45261bd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient098_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient098_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient098_frame01_slice_3.h5 deleted file mode 100644 index 63ecf23..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient098_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient098_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient098_frame01_slice_4.h5 deleted file mode 100644 index a8af5bf..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient098_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient098_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient098_frame01_slice_5.h5 deleted file mode 100644 index 9b126a3..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient098_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient098_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient098_frame01_slice_6.h5 deleted file mode 100644 index 9ab7819..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient098_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient098_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient098_frame09_slice_0.h5 deleted file mode 100644 index e77d951..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient098_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient098_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient098_frame09_slice_1.h5 deleted file mode 100644 index 8fa6f73..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient098_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient098_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient098_frame09_slice_2.h5 deleted file mode 100644 index 3583faf..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient098_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient098_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient098_frame09_slice_3.h5 deleted file mode 100644 index cc8884a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient098_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient098_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient098_frame09_slice_4.h5 deleted file mode 100644 index dfa1ec7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient098_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient098_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient098_frame09_slice_5.h5 deleted file mode 100644 index b9a5c9f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient098_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient098_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient098_frame09_slice_6.h5 deleted file mode 100644 index 089b061..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient098_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_0.h5 deleted file mode 100644 index 8d17e71..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_1.h5 deleted file mode 100644 index 328fd09..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_10.h5 deleted file mode 100644 index 5fd6837..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_11.h5 deleted file mode 100644 index 2dc962a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_12.h5 deleted file mode 100644 index 3a9b4f7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_13.h5 deleted file mode 100644 index f93a7e7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_14.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_14.h5 deleted file mode 100644 index 72f4103..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_15.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_15.h5 deleted file mode 100644 index d29ec20..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_15.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_2.h5 deleted file mode 100644 index 376fd79..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_3.h5 deleted file mode 100644 index 9312b5c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_4.h5 deleted file mode 100644 index c3f615f..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_5.h5 deleted file mode 100644 index f54f9ad..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_6.h5 deleted file mode 100644 index 804d649..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_7.h5 deleted file mode 100644 index 723301c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_8.h5 deleted file mode 100644 index 163eb95..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient099_frame01_slice_9.h5 deleted file mode 100644 index 8f6b9a6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame01_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_0.h5 deleted file mode 100644 index d18cbcd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_1.h5 deleted file mode 100644 index a330013..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_10.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_10.h5 deleted file mode 100644 index 98692fd..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_11.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_11.h5 deleted file mode 100644 index e3f1035..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_12.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_12.h5 deleted file mode 100644 index b7ccf97..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_13.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_13.h5 deleted file mode 100644 index eb57bec..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_14.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_14.h5 deleted file mode 100644 index b4acbdc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_15.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_15.h5 deleted file mode 100644 index ecd3ae2..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_15.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_2.h5 deleted file mode 100644 index 55fab07..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_3.h5 deleted file mode 100644 index ed8f1d9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_4.h5 deleted file mode 100644 index bc3fecf..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_5.h5 deleted file mode 100644 index 6cfeaad..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_6.h5 deleted file mode 100644 index 793f7c9..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_7.h5 deleted file mode 100644 index 2949dee..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_8.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_8.h5 deleted file mode 100644 index 5d1b44e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_8.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_9.h5 b/data/ACDC/ACDC_training_slices/patient099_frame09_slice_9.h5 deleted file mode 100644 index c061e6c..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient099_frame09_slice_9.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient100_frame01_slice_0.h5 deleted file mode 100644 index 7a31612..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient100_frame01_slice_1.h5 deleted file mode 100644 index 4b171f6..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient100_frame01_slice_2.h5 deleted file mode 100644 index 988b565..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient100_frame01_slice_3.h5 deleted file mode 100644 index c22d20a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient100_frame01_slice_4.h5 deleted file mode 100644 index b3e7d53..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient100_frame01_slice_5.h5 deleted file mode 100644 index 85e210e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient100_frame01_slice_6.h5 deleted file mode 100644 index e40c7ee..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient100_frame01_slice_7.h5 deleted file mode 100644 index 1ea0118..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame01_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_0.h5 b/data/ACDC/ACDC_training_slices/patient100_frame13_slice_0.h5 deleted file mode 100644 index ed1047a..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_0.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_1.h5 b/data/ACDC/ACDC_training_slices/patient100_frame13_slice_1.h5 deleted file mode 100644 index 2302156..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_1.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_2.h5 b/data/ACDC/ACDC_training_slices/patient100_frame13_slice_2.h5 deleted file mode 100644 index 44079fc..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_2.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_3.h5 b/data/ACDC/ACDC_training_slices/patient100_frame13_slice_3.h5 deleted file mode 100644 index 6970ff4..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_3.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_4.h5 b/data/ACDC/ACDC_training_slices/patient100_frame13_slice_4.h5 deleted file mode 100644 index c600e02..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_4.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_5.h5 b/data/ACDC/ACDC_training_slices/patient100_frame13_slice_5.h5 deleted file mode 100644 index 028a1b7..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_5.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_6.h5 b/data/ACDC/ACDC_training_slices/patient100_frame13_slice_6.h5 deleted file mode 100644 index 181da3d..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_6.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_7.h5 b/data/ACDC/ACDC_training_slices/patient100_frame13_slice_7.h5 deleted file mode 100644 index 70dd65e..0000000 Binary files a/data/ACDC/ACDC_training_slices/patient100_frame13_slice_7.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient001_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient001_frame01.h5 deleted file mode 100644 index 7739c1e..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient001_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient001_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient001_frame12.h5 deleted file mode 100644 index 3d9aa6c..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient001_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient002_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient002_frame01.h5 deleted file mode 100644 index c703c4b..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient002_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient002_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient002_frame12.h5 deleted file mode 100644 index d905a6a..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient002_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient003_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient003_frame01.h5 deleted file mode 100644 index fb938c3..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient003_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient003_frame15.h5 b/data/ACDC/ACDC_training_volumes/patient003_frame15.h5 deleted file mode 100644 index b703be0..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient003_frame15.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient004_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient004_frame01.h5 deleted file mode 100644 index ac19d2b..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient004_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient004_frame15.h5 b/data/ACDC/ACDC_training_volumes/patient004_frame15.h5 deleted file mode 100644 index 526126f..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient004_frame15.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient005_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient005_frame01.h5 deleted file mode 100644 index 5c317fa..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient005_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient005_frame13.h5 b/data/ACDC/ACDC_training_volumes/patient005_frame13.h5 deleted file mode 100644 index 3a34a8d..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient005_frame13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient006_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient006_frame01.h5 deleted file mode 100644 index e639eaf..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient006_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient006_frame16.h5 b/data/ACDC/ACDC_training_volumes/patient006_frame16.h5 deleted file mode 100644 index a74660b..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient006_frame16.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient007_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient007_frame01.h5 deleted file mode 100644 index 55ac024..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient007_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient007_frame07.h5 b/data/ACDC/ACDC_training_volumes/patient007_frame07.h5 deleted file mode 100644 index a7c9b32..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient007_frame07.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient008_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient008_frame01.h5 deleted file mode 100644 index b4bbe1b..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient008_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient008_frame13.h5 b/data/ACDC/ACDC_training_volumes/patient008_frame13.h5 deleted file mode 100644 index 61b101e..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient008_frame13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient009_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient009_frame01.h5 deleted file mode 100644 index 1ee22a6..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient009_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient009_frame13.h5 b/data/ACDC/ACDC_training_volumes/patient009_frame13.h5 deleted file mode 100644 index fab4f71..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient009_frame13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient010_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient010_frame01.h5 deleted file mode 100644 index 4b4d22f..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient010_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient010_frame13.h5 b/data/ACDC/ACDC_training_volumes/patient010_frame13.h5 deleted file mode 100644 index a1d3003..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient010_frame13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient011_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient011_frame01.h5 deleted file mode 100644 index 5ae5a9c..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient011_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient011_frame08.h5 b/data/ACDC/ACDC_training_volumes/patient011_frame08.h5 deleted file mode 100644 index 2f1b3de..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient011_frame08.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient012_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient012_frame01.h5 deleted file mode 100644 index 98c88b6..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient012_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient012_frame13.h5 b/data/ACDC/ACDC_training_volumes/patient012_frame13.h5 deleted file mode 100644 index e22e006..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient012_frame13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient013_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient013_frame01.h5 deleted file mode 100644 index 2607310..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient013_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient013_frame14.h5 b/data/ACDC/ACDC_training_volumes/patient013_frame14.h5 deleted file mode 100644 index c3e6307..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient013_frame14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient014_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient014_frame01.h5 deleted file mode 100644 index e46965c..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient014_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient014_frame13.h5 b/data/ACDC/ACDC_training_volumes/patient014_frame13.h5 deleted file mode 100644 index 6e99a56..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient014_frame13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient015_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient015_frame01.h5 deleted file mode 100644 index 970a2de..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient015_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient015_frame10.h5 b/data/ACDC/ACDC_training_volumes/patient015_frame10.h5 deleted file mode 100644 index afbe278..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient015_frame10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient016_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient016_frame01.h5 deleted file mode 100644 index e0d85f3..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient016_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient016_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient016_frame12.h5 deleted file mode 100644 index 998f008..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient016_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient017_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient017_frame01.h5 deleted file mode 100644 index fd3a196..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient017_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient017_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient017_frame09.h5 deleted file mode 100644 index a528039..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient017_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient018_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient018_frame01.h5 deleted file mode 100644 index b869092..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient018_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient018_frame10.h5 b/data/ACDC/ACDC_training_volumes/patient018_frame10.h5 deleted file mode 100644 index f7448f4..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient018_frame10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient019_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient019_frame01.h5 deleted file mode 100644 index 221d579..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient019_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient019_frame11.h5 b/data/ACDC/ACDC_training_volumes/patient019_frame11.h5 deleted file mode 100644 index 6da50d6..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient019_frame11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient020_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient020_frame01.h5 deleted file mode 100644 index c9f9c78..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient020_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient020_frame11.h5 b/data/ACDC/ACDC_training_volumes/patient020_frame11.h5 deleted file mode 100644 index bb15459..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient020_frame11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient021_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient021_frame01.h5 deleted file mode 100644 index d1554c1..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient021_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient021_frame13.h5 b/data/ACDC/ACDC_training_volumes/patient021_frame13.h5 deleted file mode 100644 index cef5a29..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient021_frame13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient022_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient022_frame01.h5 deleted file mode 100644 index 59cda89..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient022_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient022_frame11.h5 b/data/ACDC/ACDC_training_volumes/patient022_frame11.h5 deleted file mode 100644 index 51bb540..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient022_frame11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient023_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient023_frame01.h5 deleted file mode 100644 index 93d69ee..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient023_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient023_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient023_frame09.h5 deleted file mode 100644 index 7475b3d..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient023_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient024_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient024_frame01.h5 deleted file mode 100644 index b6dd49a..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient024_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient024_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient024_frame09.h5 deleted file mode 100644 index 6f6f026..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient024_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient025_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient025_frame01.h5 deleted file mode 100644 index 1fb23aa..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient025_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient025_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient025_frame09.h5 deleted file mode 100644 index e52310d..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient025_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient026_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient026_frame01.h5 deleted file mode 100644 index f7b48b7..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient026_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient026_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient026_frame12.h5 deleted file mode 100644 index 870653d..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient026_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient027_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient027_frame01.h5 deleted file mode 100644 index 46f9f02..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient027_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient027_frame11.h5 b/data/ACDC/ACDC_training_volumes/patient027_frame11.h5 deleted file mode 100644 index 3eece80..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient027_frame11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient028_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient028_frame01.h5 deleted file mode 100644 index 4d202c4..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient028_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient028_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient028_frame09.h5 deleted file mode 100644 index 4209e79..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient028_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient029_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient029_frame01.h5 deleted file mode 100644 index f114fe8..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient029_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient029_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient029_frame12.h5 deleted file mode 100644 index a5b8188..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient029_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient030_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient030_frame01.h5 deleted file mode 100644 index 2eed744..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient030_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient030_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient030_frame12.h5 deleted file mode 100644 index 381f33b..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient030_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient031_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient031_frame01.h5 deleted file mode 100644 index 8621373..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient031_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient031_frame10.h5 b/data/ACDC/ACDC_training_volumes/patient031_frame10.h5 deleted file mode 100644 index 4cdbcb1..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient031_frame10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient032_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient032_frame01.h5 deleted file mode 100644 index e40dc25..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient032_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient032_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient032_frame12.h5 deleted file mode 100644 index 079ee0e..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient032_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient033_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient033_frame01.h5 deleted file mode 100644 index 67a4e78..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient033_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient033_frame14.h5 b/data/ACDC/ACDC_training_volumes/patient033_frame14.h5 deleted file mode 100644 index b973537..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient033_frame14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient034_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient034_frame01.h5 deleted file mode 100644 index 0615571..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient034_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient034_frame16.h5 b/data/ACDC/ACDC_training_volumes/patient034_frame16.h5 deleted file mode 100644 index a6faa88..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient034_frame16.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient035_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient035_frame01.h5 deleted file mode 100644 index 0825ddf..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient035_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient035_frame11.h5 b/data/ACDC/ACDC_training_volumes/patient035_frame11.h5 deleted file mode 100644 index ec05d89..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient035_frame11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient036_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient036_frame01.h5 deleted file mode 100644 index b37c481..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient036_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient036_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient036_frame12.h5 deleted file mode 100644 index cb339ee..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient036_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient037_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient037_frame01.h5 deleted file mode 100644 index 9a72ab8..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient037_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient037_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient037_frame12.h5 deleted file mode 100644 index 43f16e7..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient037_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient038_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient038_frame01.h5 deleted file mode 100644 index 78f2c91..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient038_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient038_frame11.h5 b/data/ACDC/ACDC_training_volumes/patient038_frame11.h5 deleted file mode 100644 index e08539f..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient038_frame11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient039_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient039_frame01.h5 deleted file mode 100644 index 0e59bf4..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient039_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient039_frame10.h5 b/data/ACDC/ACDC_training_volumes/patient039_frame10.h5 deleted file mode 100644 index f3dcc30..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient039_frame10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient040_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient040_frame01.h5 deleted file mode 100644 index 924a592..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient040_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient040_frame13.h5 b/data/ACDC/ACDC_training_volumes/patient040_frame13.h5 deleted file mode 100644 index c5f021c..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient040_frame13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient041_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient041_frame01.h5 deleted file mode 100644 index eb7cd58..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient041_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient041_frame11.h5 b/data/ACDC/ACDC_training_volumes/patient041_frame11.h5 deleted file mode 100644 index a408616..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient041_frame11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient042_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient042_frame01.h5 deleted file mode 100644 index 9070e7d..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient042_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient042_frame16.h5 b/data/ACDC/ACDC_training_volumes/patient042_frame16.h5 deleted file mode 100644 index 84a6a18..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient042_frame16.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient043_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient043_frame01.h5 deleted file mode 100644 index b1eaf97..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient043_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient043_frame07.h5 b/data/ACDC/ACDC_training_volumes/patient043_frame07.h5 deleted file mode 100644 index d35df4a..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient043_frame07.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient044_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient044_frame01.h5 deleted file mode 100644 index 8fd5811..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient044_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient044_frame11.h5 b/data/ACDC/ACDC_training_volumes/patient044_frame11.h5 deleted file mode 100644 index f6928c7..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient044_frame11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient045_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient045_frame01.h5 deleted file mode 100644 index e4821dc..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient045_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient045_frame13.h5 b/data/ACDC/ACDC_training_volumes/patient045_frame13.h5 deleted file mode 100644 index 4cbd7db..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient045_frame13.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient046_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient046_frame01.h5 deleted file mode 100644 index 3eb3a34..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient046_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient046_frame10.h5 b/data/ACDC/ACDC_training_volumes/patient046_frame10.h5 deleted file mode 100644 index c4555d3..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient046_frame10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient047_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient047_frame01.h5 deleted file mode 100644 index 0229206..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient047_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient047_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient047_frame09.h5 deleted file mode 100644 index 51dad86..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient047_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient048_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient048_frame01.h5 deleted file mode 100644 index 7a47286..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient048_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient048_frame08.h5 b/data/ACDC/ACDC_training_volumes/patient048_frame08.h5 deleted file mode 100644 index 54e4f12..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient048_frame08.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient049_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient049_frame01.h5 deleted file mode 100644 index f84fd41..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient049_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient049_frame11.h5 b/data/ACDC/ACDC_training_volumes/patient049_frame11.h5 deleted file mode 100644 index 3a587a7..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient049_frame11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient050_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient050_frame01.h5 deleted file mode 100644 index 9dbe820..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient050_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient050_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient050_frame12.h5 deleted file mode 100644 index f284d39..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient050_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient051_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient051_frame01.h5 deleted file mode 100644 index 1ea9e01..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient051_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient051_frame11.h5 b/data/ACDC/ACDC_training_volumes/patient051_frame11.h5 deleted file mode 100644 index e2e5f32..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient051_frame11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient052_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient052_frame01.h5 deleted file mode 100644 index 3701183..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient052_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient052_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient052_frame09.h5 deleted file mode 100644 index e56db80..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient052_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient053_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient053_frame01.h5 deleted file mode 100644 index f4092ac..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient053_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient053_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient053_frame12.h5 deleted file mode 100644 index c6f33b6..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient053_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient054_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient054_frame01.h5 deleted file mode 100644 index 62136c6..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient054_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient054_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient054_frame12.h5 deleted file mode 100644 index 9a561f1..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient054_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient055_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient055_frame01.h5 deleted file mode 100644 index 36d57c5..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient055_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient055_frame10.h5 b/data/ACDC/ACDC_training_volumes/patient055_frame10.h5 deleted file mode 100644 index 203ebe4..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient055_frame10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient056_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient056_frame01.h5 deleted file mode 100644 index 0315619..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient056_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient056_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient056_frame12.h5 deleted file mode 100644 index 240c469..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient056_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient057_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient057_frame01.h5 deleted file mode 100644 index 11276d3..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient057_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient057_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient057_frame09.h5 deleted file mode 100644 index 22c414b..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient057_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient058_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient058_frame01.h5 deleted file mode 100644 index d810ca0..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient058_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient058_frame14.h5 b/data/ACDC/ACDC_training_volumes/patient058_frame14.h5 deleted file mode 100644 index 55dc070..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient058_frame14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient059_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient059_frame01.h5 deleted file mode 100644 index 267a16b..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient059_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient059_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient059_frame09.h5 deleted file mode 100644 index 3a2e615..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient059_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient060_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient060_frame01.h5 deleted file mode 100644 index f8bedc4..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient060_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient060_frame14.h5 b/data/ACDC/ACDC_training_volumes/patient060_frame14.h5 deleted file mode 100644 index 62ffd15..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient060_frame14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient061_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient061_frame01.h5 deleted file mode 100644 index c93c889..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient061_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient061_frame10.h5 b/data/ACDC/ACDC_training_volumes/patient061_frame10.h5 deleted file mode 100644 index 38fb4cd..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient061_frame10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient062_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient062_frame01.h5 deleted file mode 100644 index 488f3cf..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient062_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient062_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient062_frame09.h5 deleted file mode 100644 index 67f7e18..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient062_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient063_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient063_frame01.h5 deleted file mode 100644 index 4beb2aa..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient063_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient063_frame16.h5 b/data/ACDC/ACDC_training_volumes/patient063_frame16.h5 deleted file mode 100644 index 861c06d..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient063_frame16.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient064_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient064_frame01.h5 deleted file mode 100644 index 23795a2..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient064_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient064_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient064_frame12.h5 deleted file mode 100644 index 5f1421e..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient064_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient065_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient065_frame01.h5 deleted file mode 100644 index 8f017c3..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient065_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient065_frame14.h5 b/data/ACDC/ACDC_training_volumes/patient065_frame14.h5 deleted file mode 100644 index 4e15d72..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient065_frame14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient066_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient066_frame01.h5 deleted file mode 100644 index 9ed500a..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient066_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient066_frame11.h5 b/data/ACDC/ACDC_training_volumes/patient066_frame11.h5 deleted file mode 100644 index bccf771..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient066_frame11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient067_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient067_frame01.h5 deleted file mode 100644 index d460782..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient067_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient067_frame10.h5 b/data/ACDC/ACDC_training_volumes/patient067_frame10.h5 deleted file mode 100644 index 2103e54..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient067_frame10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient068_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient068_frame01.h5 deleted file mode 100644 index 5514048..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient068_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient068_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient068_frame12.h5 deleted file mode 100644 index 3e6dd0d..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient068_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient069_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient069_frame01.h5 deleted file mode 100644 index 657a3b4..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient069_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient069_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient069_frame12.h5 deleted file mode 100644 index 8b8cb8e..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient069_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient070_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient070_frame01.h5 deleted file mode 100644 index 1fbdc6e..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient070_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient070_frame10.h5 b/data/ACDC/ACDC_training_volumes/patient070_frame10.h5 deleted file mode 100644 index aeac3e3..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient070_frame10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient071_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient071_frame01.h5 deleted file mode 100644 index 2f81ae6..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient071_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient071_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient071_frame09.h5 deleted file mode 100644 index 81d9511..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient071_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient072_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient072_frame01.h5 deleted file mode 100644 index 35c2d3e..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient072_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient072_frame11.h5 b/data/ACDC/ACDC_training_volumes/patient072_frame11.h5 deleted file mode 100644 index 36e103b..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient072_frame11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient073_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient073_frame01.h5 deleted file mode 100644 index 8636aed..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient073_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient073_frame10.h5 b/data/ACDC/ACDC_training_volumes/patient073_frame10.h5 deleted file mode 100644 index 8f3ebd8..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient073_frame10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient074_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient074_frame01.h5 deleted file mode 100644 index 10c8b64..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient074_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient074_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient074_frame12.h5 deleted file mode 100644 index e6c2857..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient074_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient075_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient075_frame01.h5 deleted file mode 100644 index 020c203..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient075_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient075_frame06.h5 b/data/ACDC/ACDC_training_volumes/patient075_frame06.h5 deleted file mode 100644 index 63f3e9f..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient075_frame06.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient076_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient076_frame01.h5 deleted file mode 100644 index 1b09acd..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient076_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient076_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient076_frame12.h5 deleted file mode 100644 index 385e1c9..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient076_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient077_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient077_frame01.h5 deleted file mode 100644 index 98547f9..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient077_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient077_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient077_frame09.h5 deleted file mode 100644 index 3a65025..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient077_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient078_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient078_frame01.h5 deleted file mode 100644 index 7ddb8e9..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient078_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient078_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient078_frame09.h5 deleted file mode 100644 index 35d3a6f..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient078_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient079_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient079_frame01.h5 deleted file mode 100644 index a302d74..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient079_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient079_frame11.h5 b/data/ACDC/ACDC_training_volumes/patient079_frame11.h5 deleted file mode 100644 index c13b417..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient079_frame11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient080_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient080_frame01.h5 deleted file mode 100644 index 36df293..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient080_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient080_frame10.h5 b/data/ACDC/ACDC_training_volumes/patient080_frame10.h5 deleted file mode 100644 index 43c835f..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient080_frame10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient081_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient081_frame01.h5 deleted file mode 100644 index 0e725db..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient081_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient081_frame07.h5 b/data/ACDC/ACDC_training_volumes/patient081_frame07.h5 deleted file mode 100644 index 555cf31..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient081_frame07.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient082_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient082_frame01.h5 deleted file mode 100644 index 9366b5f..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient082_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient082_frame07.h5 b/data/ACDC/ACDC_training_volumes/patient082_frame07.h5 deleted file mode 100644 index 7c6d1bf..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient082_frame07.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient083_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient083_frame01.h5 deleted file mode 100644 index 5bf4eec..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient083_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient083_frame08.h5 b/data/ACDC/ACDC_training_volumes/patient083_frame08.h5 deleted file mode 100644 index b483567..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient083_frame08.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient084_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient084_frame01.h5 deleted file mode 100644 index e4192ff..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient084_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient084_frame10.h5 b/data/ACDC/ACDC_training_volumes/patient084_frame10.h5 deleted file mode 100644 index 826c0fe..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient084_frame10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient085_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient085_frame01.h5 deleted file mode 100644 index b18132e..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient085_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient085_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient085_frame09.h5 deleted file mode 100644 index ef246c5..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient085_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient086_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient086_frame01.h5 deleted file mode 100644 index a43a697..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient086_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient086_frame08.h5 b/data/ACDC/ACDC_training_volumes/patient086_frame08.h5 deleted file mode 100644 index 417b320..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient086_frame08.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient087_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient087_frame01.h5 deleted file mode 100644 index 0cefa48..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient087_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient087_frame10.h5 b/data/ACDC/ACDC_training_volumes/patient087_frame10.h5 deleted file mode 100644 index de75307..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient087_frame10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient088_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient088_frame01.h5 deleted file mode 100644 index 7aa240f..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient088_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient088_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient088_frame12.h5 deleted file mode 100644 index 12f8fa6..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient088_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient089_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient089_frame01.h5 deleted file mode 100644 index 2a9ef7b..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient089_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient089_frame10.h5 b/data/ACDC/ACDC_training_volumes/patient089_frame10.h5 deleted file mode 100644 index df9beeb..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient089_frame10.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient090_frame04.h5 b/data/ACDC/ACDC_training_volumes/patient090_frame04.h5 deleted file mode 100644 index 63157fe..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient090_frame04.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient090_frame11.h5 b/data/ACDC/ACDC_training_volumes/patient090_frame11.h5 deleted file mode 100644 index 2efc80a..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient090_frame11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient091_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient091_frame01.h5 deleted file mode 100644 index e955454..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient091_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient091_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient091_frame09.h5 deleted file mode 100644 index ef17454..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient091_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient092_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient092_frame01.h5 deleted file mode 100644 index cdbab6d..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient092_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient092_frame06.h5 b/data/ACDC/ACDC_training_volumes/patient092_frame06.h5 deleted file mode 100644 index 806c8de..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient092_frame06.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient093_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient093_frame01.h5 deleted file mode 100644 index 91df96a..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient093_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient093_frame14.h5 b/data/ACDC/ACDC_training_volumes/patient093_frame14.h5 deleted file mode 100644 index afbb46d..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient093_frame14.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient094_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient094_frame01.h5 deleted file mode 100644 index d1fc191..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient094_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient094_frame07.h5 b/data/ACDC/ACDC_training_volumes/patient094_frame07.h5 deleted file mode 100644 index fb1c91d..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient094_frame07.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient095_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient095_frame01.h5 deleted file mode 100644 index 2619af0..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient095_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient095_frame12.h5 b/data/ACDC/ACDC_training_volumes/patient095_frame12.h5 deleted file mode 100644 index 61f59e2..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient095_frame12.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient096_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient096_frame01.h5 deleted file mode 100644 index 4cc5e0e..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient096_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient096_frame08.h5 b/data/ACDC/ACDC_training_volumes/patient096_frame08.h5 deleted file mode 100644 index ea17682..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient096_frame08.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient097_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient097_frame01.h5 deleted file mode 100644 index 4d14461..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient097_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient097_frame11.h5 b/data/ACDC/ACDC_training_volumes/patient097_frame11.h5 deleted file mode 100644 index 1d0ab3c..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient097_frame11.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient098_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient098_frame01.h5 deleted file mode 100644 index 2474329..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient098_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient098_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient098_frame09.h5 deleted file mode 100644 index ec67675..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient098_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient099_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient099_frame01.h5 deleted file mode 100644 index 870ae17..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient099_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient099_frame09.h5 b/data/ACDC/ACDC_training_volumes/patient099_frame09.h5 deleted file mode 100644 index 7ad0f22..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient099_frame09.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient100_frame01.h5 b/data/ACDC/ACDC_training_volumes/patient100_frame01.h5 deleted file mode 100644 index 6d77453..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient100_frame01.h5 and /dev/null differ diff --git a/data/ACDC/ACDC_training_volumes/patient100_frame13.h5 b/data/ACDC/ACDC_training_volumes/patient100_frame13.h5 deleted file mode 100644 index e23c9fd..0000000 Binary files a/data/ACDC/ACDC_training_volumes/patient100_frame13.h5 and /dev/null differ