code
stringlengths
17
6.64M
def dataloader_msrvtt_test(args, tokenizer, subset='test'): msrvtt_testset = MSRVTTDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) try: test_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_testset) except: test_sampler = None dataloader_msrvtt = DataLoader(msrvtt_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False) return (dataloader_msrvtt, len(msrvtt_testset))
def dataloader_lsmdc_train(args, tokenizer): lsmdc_dataset = LsmdcDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) train_sampler = torch.utils.data.distributed.DistributedSampler(lsmdc_dataset) dataloader = DataLoader(lsmdc_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(lsmdc_dataset), train_sampler)
def dataloader_lsmdc_test(args, tokenizer, subset='test'): lsmdc_testset = LsmdcDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) try: test_sampler = torch.utils.data.distributed.DistributedSampler(lsmdc_testset) except: test_sampler = None dataloader_lsmdc = DataLoader(lsmdc_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False) return (dataloader_lsmdc, len(lsmdc_testset))
def dataloader_activity_train(args, tokenizer): activity_dataset = ActivityNetDataset(subset='train', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames) train_sampler = torch.utils.data.distributed.DistributedSampler(activity_dataset) dataloader = DataLoader(activity_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(activity_dataset), train_sampler)
def dataloader_activity_test(args, tokenizer, subset='test'): activity_testset = ActivityNetDataset(subset=subset, data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames) try: test_sampler = torch.utils.data.distributed.DistributedSampler(activity_testset) except: test_sampler = None dataloader_activity = DataLoader(activity_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False) return (dataloader_activity, len(activity_testset))
def dataloader_msvd_train(args, tokenizer): msvd_dataset = MsvdDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) train_sampler = torch.utils.data.distributed.DistributedSampler(msvd_dataset) dataloader = DataLoader(msvd_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(msvd_dataset), train_sampler)
def dataloader_msvd_test(args, tokenizer, subset='test'): msvd_testset = MsvdDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) dataloader_msvd = DataLoader(msvd_testset, batch_size=args.batch_size_val, num_workers=args.workers, shuffle=False, drop_last=False) return (dataloader_msvd, len(msvd_testset))
def dataloader_didemo_train(args, tokenizer): didemo_dataset = DiDeMoDataset(subset='train', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames) train_sampler = torch.utils.data.distributed.DistributedSampler(didemo_dataset) dataloader = DataLoader(didemo_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(didemo_dataset), train_sampler)
def dataloader_didemo_test(args, tokenizer, subset='test'): didemo_testset = DiDeMoDataset(subset=subset, data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames) try: test_sampler = torch.utils.data.distributed.DistributedSampler(didemo_testset) except: test_sampler = None dataloader_didemo = DataLoader(didemo_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False) return (dataloader_didemo, len(didemo_testset))
class LsmdcDataset(RetrievalDataset): 'LSMDC dataset.' def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None): super(LsmdcDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config) pass def _get_anns(self, subset='train'): '\n video_dict: dict: video_id -> video_path\n sentences_dict: list: [(video_id, caption)] , caption (list: [text:, start, end])\n ' video_json_path_dict = {} video_json_path_dict['train'] = os.path.join(self.anno_path, 'LSMDC16_annos_training.csv') video_json_path_dict['val'] = os.path.join(self.anno_path, 'LSMDC16_annos_val.csv') video_json_path_dict['test'] = os.path.join(self.anno_path, 'LSMDC16_challenge_1000_publictect.csv') video_id_list = [] caption_dict = {} with open(video_json_path_dict[self.subset], 'r') as fp: for line in fp: line = line.strip() line_split = line.split('\t') assert (len(line_split) == 6) (clip_id, start_aligned, end_aligned, start_extracted, end_extracted, sentence) = line_split if (clip_id not in ['0017_Pianist_00.23.28.872-00.23.34.843', '0017_Pianist_00.30.36.767-00.30.38.009', '3064_SPARKLE_2012_01.41.07.000-01.41.11.793']): caption_dict[len(caption_dict)] = (clip_id, (sentence, None, None)) if (clip_id not in video_id_list): video_id_list.append(clip_id) video_dict = OrderedDict() sentences_dict = OrderedDict() for (root, dub_dir, video_files) in os.walk(self.video_path): for video_file in video_files: video_id_ = '.'.join(video_file.split('.')[:(- 1)]) if (video_id_ not in video_id_list): continue file_path_ = os.path.join(root, video_file) video_dict[video_id_] = file_path_ for (clip_id, sentence) in caption_dict.values(): if (clip_id not in video_dict): continue sentences_dict[len(sentences_dict)] = (clip_id, sentence) unique_sentence = set([v[1][0] for v in sentences_dict.values()]) print('[{}] Unique sentence is {} , all num is {}'.format(subset, len(unique_sentence), len(sentences_dict))) return (video_dict, sentences_dict)
class MSRVTTDataset(RetrievalDataset): 'MSRVTT dataset.' def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None): super(MSRVTTDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config) pass def _get_anns(self, subset='train'): '\n video_dict: dict: video_id -> video_path\n sentences_dict: list: [(video_id, caption)] , caption (list: [text:, start, end])\n ' csv_path = {'train': join(self.anno_path, 'MSRVTT_train.9k.csv'), 'val': join(self.anno_path, 'MSRVTT_JSFUSION_test.csv'), 'test': join(self.anno_path, 'MSRVTT_JSFUSION_test.csv')}[subset] if exists(csv_path): csv = pd.read_csv(csv_path) else: raise FileNotFoundError video_id_list = list(csv['video_id'].values) video_dict = OrderedDict() sentences_dict = OrderedDict() if (subset == 'train'): anno_path = join(self.anno_path, 'MSRVTT_data.json') data = json.load(open(anno_path, 'r')) for itm in data['sentences']: if (itm['video_id'] in video_id_list): sentences_dict[len(sentences_dict)] = (itm['video_id'], (itm['caption'], None, None)) video_dict[itm['video_id']] = join(self.video_path, '{}.mp4'.format(itm['video_id'])) else: for (_, itm) in csv.iterrows(): sentences_dict[len(sentences_dict)] = (itm['video_id'], (itm['sentence'], None, None)) video_dict[itm['video_id']] = join(self.video_path, '{}.mp4'.format(itm['video_id'])) unique_sentence = set([v[1][0] for v in sentences_dict.values()]) print('[{}] Unique sentence is {} , all num is {}'.format(subset, len(unique_sentence), len(sentences_dict))) return (video_dict, sentences_dict)
class MsvdDataset(RetrievalDataset): 'MSVD dataset loader.' def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None): super(MsvdDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config) pass def _get_anns(self, subset='train'): self.sample_len = 0 self.cut_off_points = [] self.multi_sentence_per_video = True video_id_path_dict = {} video_id_path_dict['train'] = os.path.join(self.anno_path, 'train_list.txt') video_id_path_dict['val'] = os.path.join(self.anno_path, 'val_list.txt') video_id_path_dict['test'] = os.path.join(self.anno_path, 'test_list.txt') caption_file = os.path.join(self.anno_path, 'raw-captions.pkl') with open(video_id_path_dict[subset], 'r') as fp: video_ids = [itm.strip() for itm in fp.readlines()] with open(caption_file, 'rb') as f: captions = pickle.load(f) video_dict = OrderedDict() sentences_dict = OrderedDict() for (root, dub_dir, video_files) in os.walk(self.video_path): for video_file in video_files: video_id_ = '.'.join(video_file.split('.')[:(- 1)]) if (video_id_ not in video_ids): continue file_path_ = os.path.join(root, video_file) video_dict[video_id_] = file_path_ for video_id in video_ids: assert (video_id in captions) for cap in captions[video_id]: cap_txt = ' '.join(cap) sentences_dict[len(sentences_dict)] = (video_id, (cap_txt, None, None)) self.cut_off_points.append((len(sentences_dict) - 1)) if ((subset == 'val') or (subset == 'test')): self.sentence_num = len(sentences_dict) self.video_num = len(video_ids) assert (len(self.cut_off_points) == self.video_num) print('For {}, sentence number: {}'.format(subset, self.sentence_num)) print('For {}, video number: {}'.format(subset, self.video_num)) print('Video number: {}'.format(len(video_dict))) print('Total Paire: {}'.format(len(sentences_dict))) self.sample_len = len(sentences_dict) return (video_dict, sentences_dict)
def _interpolation(kwargs): interpolation = kwargs.pop('resample', Image.BILINEAR) if isinstance(interpolation, (list, tuple)): return random.choice(interpolation) else: return interpolation
def _check_args_tf(kwargs): if (('fillcolor' in kwargs) and (_PIL_VER < (5, 0))): kwargs.pop('fillcolor') kwargs['resample'] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
def shear_y(img, factor, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
def translate_x_rel(img, pct, **kwargs): pixels = (pct * img.size[0]) _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_rel(img, pct, **kwargs): pixels = (pct * img.size[1]) _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def translate_x_abs(img, pixels, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_abs(img, pixels, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def rotate(img, degrees, **kwargs): _check_args_tf(kwargs) if (_PIL_VER >= (5, 2)): return img.rotate(degrees, **kwargs) elif (_PIL_VER >= (5, 0)): (w, h) = img.size post_trans = (0, 0) rotn_center = ((w / 2.0), (h / 2.0)) angle = (- math.radians(degrees)) matrix = [round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0, round((- math.sin(angle)), 15), round(math.cos(angle), 15), 0.0] def transform(x, y, matrix): (a, b, c, d, e, f) = matrix return ((((a * x) + (b * y)) + c), (((d * x) + (e * y)) + f)) (matrix[2], matrix[5]) = transform(((- rotn_center[0]) - post_trans[0]), ((- rotn_center[1]) - post_trans[1]), matrix) matrix[2] += rotn_center[0] matrix[5] += rotn_center[1] return img.transform(img.size, Image.AFFINE, matrix, **kwargs) else: return img.rotate(degrees, resample=kwargs['resample'])
def auto_contrast(img, **__): return ImageOps.autocontrast(img)
def invert(img, **__): return ImageOps.invert(img)
def equalize(img, **__): return ImageOps.equalize(img)
def solarize(img, thresh, **__): return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__): lut = [] for i in range(256): if (i < thresh): lut.append(min(255, (i + add))) else: lut.append(i) if (img.mode in ('L', 'RGB')): if ((img.mode == 'RGB') and (len(lut) == 256)): lut = ((lut + lut) + lut) return img.point(lut) else: return img
def posterize(img, bits_to_keep, **__): if (bits_to_keep >= 8): return img return ImageOps.posterize(img, bits_to_keep)
def contrast(img, factor, **__): return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__): return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__): return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__): return ImageEnhance.Sharpness(img).enhance(factor)
def _randomly_negate(v): 'With 50% prob, negate the value' return ((- v) if (random.random() > 0.5) else v)
def _rotate_level_to_arg(level, _hparams): level = ((level / _MAX_LEVEL) * 30.0) level = _randomly_negate(level) return (level,)
def _enhance_level_to_arg(level, _hparams): return ((((level / _MAX_LEVEL) * 1.8) + 0.1),)
def _enhance_increasing_level_to_arg(level, _hparams): level = ((level / _MAX_LEVEL) * 0.9) level = (1.0 + _randomly_negate(level)) return (level,)
def _shear_level_to_arg(level, _hparams): level = ((level / _MAX_LEVEL) * 0.3) level = _randomly_negate(level) return (level,)
def _translate_abs_level_to_arg(level, hparams): translate_const = hparams['translate_const'] level = ((level / _MAX_LEVEL) * float(translate_const)) level = _randomly_negate(level) return (level,)
def _translate_rel_level_to_arg(level, hparams): translate_pct = hparams.get('translate_pct', 0.45) level = ((level / _MAX_LEVEL) * translate_pct) level = _randomly_negate(level) return (level,)
def _posterize_level_to_arg(level, _hparams): return (int(((level / _MAX_LEVEL) * 4)),)
def _posterize_increasing_level_to_arg(level, hparams): return ((4 - _posterize_level_to_arg(level, hparams)[0]),)
def _posterize_original_level_to_arg(level, _hparams): return ((int(((level / _MAX_LEVEL) * 4)) + 4),)
def _solarize_level_to_arg(level, _hparams): return (int(((level / _MAX_LEVEL) * 256)),)
def _solarize_increasing_level_to_arg(level, _hparams): return ((256 - _solarize_level_to_arg(level, _hparams)[0]),)
def _solarize_add_level_to_arg(level, _hparams): return (int(((level / _MAX_LEVEL) * 110)),)
class AugmentOp(): '\n Apply for video.\n ' def __init__(self, name, prob=0.5, magnitude=10, hparams=None): hparams = (hparams or _HPARAMS_DEFAULT) self.aug_fn = NAME_TO_OP[name] self.level_fn = LEVEL_TO_ARG[name] self.prob = prob self.magnitude = magnitude self.hparams = hparams.copy() self.kwargs = {'fillcolor': (hparams['img_mean'] if ('img_mean' in hparams) else _FILL), 'resample': (hparams['interpolation'] if ('interpolation' in hparams) else _RANDOM_INTERPOLATION)} self.magnitude_std = self.hparams.get('magnitude_std', 0) def __call__(self, img_list): if ((self.prob < 1.0) and (random.random() > self.prob)): return img_list magnitude = self.magnitude if (self.magnitude_std and (self.magnitude_std > 0)): magnitude = random.gauss(magnitude, self.magnitude_std) magnitude = min(_MAX_LEVEL, max(0, magnitude)) level_args = (self.level_fn(magnitude, self.hparams) if (self.level_fn is not None) else ()) if isinstance(img_list, list): return [self.aug_fn(img, *level_args, **self.kwargs) for img in img_list] else: return self.aug_fn(img_list, *level_args, **self.kwargs)
def _select_rand_weights(weight_idx=0, transforms=None): transforms = (transforms or _RAND_TRANSFORMS) assert (weight_idx == 0) rand_weights = _RAND_CHOICE_WEIGHTS_0 probs = [rand_weights[k] for k in transforms] probs /= np.sum(probs) return probs
def rand_augment_ops(magnitude=10, hparams=None, transforms=None): hparams = (hparams or _HPARAMS_DEFAULT) transforms = (transforms or _RAND_TRANSFORMS) return [AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms]
class RandAugment(): def __init__(self, ops, num_layers=2, choice_weights=None): self.ops = ops self.num_layers = num_layers self.choice_weights = choice_weights def __call__(self, img): ops = np.random.choice(self.ops, self.num_layers, replace=(self.choice_weights is None), p=self.choice_weights) for op in ops: img = op(img) return img
def rand_augment_transform(config_str, hparams): "\n RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719\n\n Create a RandAugment transform\n :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by\n dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining\n sections, not order sepecific determine\n 'm' - integer magnitude of rand augment\n 'n' - integer num layers (number of transform ops selected per image)\n 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)\n 'mstd' - float std deviation of magnitude noise applied\n 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)\n Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5\n 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2\n :param hparams: Other hparams (kwargs) for the RandAugmentation scheme\n :return: A PyTorch compatible Transform\n " magnitude = _MAX_LEVEL num_layers = 2 weight_idx = None transforms = _RAND_TRANSFORMS config = config_str.split('-') assert (config[0] == 'rand') config = config[1:] for c in config: cs = re.split('(\\d.*)', c) if (len(cs) < 2): continue (key, val) = cs[:2] if (key == 'mstd'): hparams.setdefault('magnitude_std', float(val)) elif (key == 'inc'): if bool(val): transforms = _RAND_INCREASING_TRANSFORMS elif (key == 'm'): magnitude = int(val) elif (key == 'n'): num_layers = int(val) elif (key == 'w'): weight_idx = int(val) else: assert NotImplementedError ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams, transforms=transforms) choice_weights = (None if (weight_idx is None) else _select_rand_weights(weight_idx)) return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
class RawVideoExtractorCV2(): def __init__(self, centercrop=False, size=224, framerate=(- 1), subset='test'): self.centercrop = centercrop self.size = size self.framerate = framerate self.transform = self._transform(self.size) self.subset = subset self.tsfm_dict = {'clip_test': Compose([Resize(size, interpolation=InterpolationMode.BICUBIC), CenterCrop(size), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))]), 'clip_train': Compose([RandomResizedCrop(size, scale=(0.5, 1.0)), RandomHorizontalFlip(), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))])} self.aug_transform = video_transforms.create_random_augment(input_size=(size, size), auto_augment='rand-m7-n4-mstd0.5-inc1', interpolation='bicubic') def _transform(self, n_px): return Compose([Resize(n_px, interpolation=InterpolationMode.BICUBIC), CenterCrop(n_px), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))]) def video_to_tensor(self, video_file, preprocess, sample_fp=0, start_time=None, end_time=None, _no_process=False): if ((start_time is not None) or (end_time is not None)): assert (isinstance(start_time, int) and isinstance(end_time, int) and (start_time > (- 1)) and (end_time > start_time)) assert (sample_fp > (- 1)) cap = cv2.VideoCapture(video_file) frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = int(cap.get(cv2.CAP_PROP_FPS)) if (fps == 0): print(((video_file + '\n') * 10)) total_duration = (((frameCount + fps) - 1) // fps) (start_sec, end_sec) = (0, total_duration) if (start_time is not None): (start_sec, end_sec) = (start_time, (end_time if (end_time <= total_duration) else total_duration)) cap.set(cv2.CAP_PROP_POS_FRAMES, int((start_time * fps))) interval = 1 if (sample_fp > 0): interval = (fps // sample_fp) else: sample_fp = fps if (interval == 0): interval = 1 inds = [ind for ind in np.arange(0, fps, interval)] assert (len(inds) >= sample_fp) inds = inds[:sample_fp] ret = True (images, included) = ([], []) for sec in np.arange(start_sec, (end_sec + 1)): if (not ret): break sec_base = int((sec * fps)) for ind in inds: cap.set(cv2.CAP_PROP_POS_FRAMES, (sec_base + ind)) (ret, frame) = cap.read() if (not ret): break frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if _no_process: images.append(Image.fromarray(frame_rgb).convert('RGB')) else: images.append(Image.fromarray(frame_rgb)) cap.release() if (len(images) > 0): if _no_process: video_data = images else: if (self.subset == 'train'): images = self.aug_transform(images) video_data = th.stack([preprocess(img) for img in images]) else: video_data = th.zeros(1) return {'video': video_data} def get_video_data(self, video_path, start_time=None, end_time=None, _no_process=False): image_input = self.video_to_tensor(video_path, self.transform, sample_fp=self.framerate, start_time=start_time, end_time=end_time, _no_process=_no_process) return image_input def process_raw_data(self, raw_video_data): tensor_size = raw_video_data.size() tensor = raw_video_data.view((- 1), 1, tensor_size[(- 3)], tensor_size[(- 2)], tensor_size[(- 1)]) return tensor def process_frame_order(self, raw_video_data, frame_order=0): if (frame_order == 0): pass elif (frame_order == 1): reverse_order = np.arange((raw_video_data.size(0) - 1), (- 1), (- 1)) raw_video_data = raw_video_data[(reverse_order, ...)] elif (frame_order == 2): random_order = np.arange(raw_video_data.size(0)) np.random.shuffle(random_order) raw_video_data = raw_video_data[(random_order, ...)] return raw_video_data
class LayerNorm(nn.LayerNorm): "Subclass torch's LayerNorm to handle fp16." def forward(self, x: torch.Tensor): orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type)
class QuickGELU(nn.Module): def forward(self, x: torch.Tensor): return (x * torch.sigmoid((1.702 * x)))
class ResidualAttentionBlock(nn.Module): def __init__(self, d_model: int, n_head: int, attn_mask=None): super(ResidualAttentionBlock, self).__init__() self.attn = nn.MultiheadAttention(d_model, n_head) self.ln_1 = LayerNorm(d_model) self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model, (d_model * 4))), ('gelu', QuickGELU()), ('c_proj', nn.Linear((d_model * 4), d_model))])) self.ln_2 = LayerNorm(d_model) self.attn_mask = attn_mask self.n_head = n_head def attention(self, x: torch.Tensor, attn_mask_: torch.Tensor): attn_mask_ = attn_mask_.repeat_interleave(self.n_head, dim=0) attn_mask_ = (attn_mask_.to(dtype=x.dtype, device=x.device) if (attn_mask_ is not None) else None) return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask_)[0] def forward(self, para_tuple: tuple): (x, attn_mask) = para_tuple x = (x + self.attention(self.ln_1(x), attn_mask)) x = (x + self.mlp(self.ln_2(x))) return (x, attn_mask)
class Transformer(nn.Module): def __init__(self, width: int, layers: int, heads: int, attn_mask=None): super(Transformer, self).__init__() self.width = width self.layers = layers self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads) for _ in range(layers)]) def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): return self.resblocks((x, attn_mask))[0]
def warmup_cosine(x, warmup=0.002): if (x < warmup): return (x / warmup) return (0.5 * (1.0 + math.cos((math.pi * x))))
def warmup_constant(x, warmup=0.002): ' Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.\n Learning rate is 1. afterwards. ' if (x < warmup): return (x / warmup) return 1.0
def warmup_linear(x, warmup=0.002): ' Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.\n After `t_total`-th training step, learning rate is zero. ' if (x < warmup): return (x / warmup) return max(((x - 1.0) / (warmup - 1.0)), 0)
class BertAdam(Optimizer): "Implements BERT version of Adam algorithm with weight decay fix.\n Params:\n lr: learning rate\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate. Default: -1\n schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'\n b1: Adams b1. Default: 0.9\n b2: Adams b2. Default: 0.999\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n " def __init__(self, params, lr=required, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-06, weight_decay=0.01, max_grad_norm=1.0): if ((lr is not required) and (lr < 0.0)): raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr)) if (schedule not in SCHEDULES): raise ValueError('Invalid schedule parameter: {}'.format(schedule)) if ((not (0.0 <= warmup < 1.0)) and (not (warmup == (- 1)))): raise ValueError('Invalid warmup: {} - should be in [0.0, 1.0[ or -1'.format(warmup)) if (not (0.0 <= b1 < 1.0)): raise ValueError('Invalid b1 parameter: {} - should be in [0.0, 1.0['.format(b1)) if (not (0.0 <= b2 < 1.0)): raise ValueError('Invalid b2 parameter: {} - should be in [0.0, 1.0['.format(b2)) if (not (e >= 0.0)): raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(e)) defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, b1=b1, b2=b2, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm) super(BertAdam, self).__init__(params, defaults) def get_lr(self): lr = [] for group in self.param_groups: for p in group['params']: if (p.grad is None): continue state = self.state[p] if (len(state) == 0): return [0] if (group['t_total'] != (- 1)): schedule_fct = SCHEDULES[group['schedule']] lr_scheduled = (group['lr'] * schedule_fct((state['step'] / group['t_total']), group['warmup'])) else: lr_scheduled = group['lr'] lr.append(lr_scheduled) return lr def step(self, closure=None): 'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n ' loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] if (len(state) == 0): state['step'] = 0 state['next_m'] = torch.zeros_like(p.data) state['next_v'] = torch.zeros_like(p.data) (next_m, next_v) = (state['next_m'], state['next_v']) (beta1, beta2) = (group['b1'], group['b2']) if (group['max_grad_norm'] > 0): clip_grad_norm_(p, group['max_grad_norm']) next_m.mul_(beta1).add_(grad, alpha=(1 - beta1)) next_v.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2)) update = (next_m / (next_v.sqrt() + group['e'])) if (group['weight_decay'] > 0.0): update += (group['weight_decay'] * p.data) if (group['t_total'] != (- 1)): schedule_fct = SCHEDULES[group['schedule']] progress = (state['step'] / group['t_total']) lr_scheduled = (group['lr'] * schedule_fct(progress, group['warmup'])) else: lr_scheduled = group['lr'] update_with_lr = (lr_scheduled * update) p.data.add_((- update_with_lr)) state['step'] += 1 return loss
@lru_cache() def default_bpe(): return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
@lru_cache() def bytes_to_unicode(): "\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n " bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1)))) cs = bs[:] n = 0 for b in range((2 ** 8)): if (b not in bs): bs.append(b) cs.append(((2 ** 8) + n)) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs))
def get_pairs(word): 'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n ' pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs
def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip()
def whitespace_clean(text): text = re.sub('\\s+', ' ', text) text = text.strip() return text
class SimpleTokenizer(object): def __init__(self, bpe_path: str=default_bpe()): self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()} merges = gzip.open(bpe_path).read().decode('utf-8').split('\n') merges = merges[1:(((49152 - 256) - 2) + 1)] merges = [tuple(merge.split()) for merge in merges] vocab = list(bytes_to_unicode().values()) vocab = (vocab + [(v + '</w>') for v in vocab]) for merge in merges: vocab.append(''.join(merge)) vocab.extend(['<|startoftext|>', '<|endoftext|>']) self.encoder = dict(zip(vocab, range(len(vocab)))) self.decoder = {v: k for (k, v) in self.encoder.items()} self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE) self.vocab = self.encoder def bpe(self, token): if (token in self.cache): return self.cache[token] word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),)) pairs = get_pairs(word) if (not pairs): return (token + '</w>') while True: bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf')))) if (bigram not in self.bpe_ranks): break (first, second) = bigram new_word = [] i = 0 while (i < len(word)): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)): new_word.append((first + second)) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if (len(word) == 1): break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def encode(self, text): bpe_tokens = [] text = whitespace_clean(basic_clean(text)).lower() for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def decode(self, tokens): text = ''.join([self.decoder[token] for token in tokens]) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ') return text def tokenize(self, text): tokens = [] text = whitespace_clean(basic_clean(text)).lower() for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' '))) return tokens def convert_tokens_to_ids(self, tokens): return [self.encoder[bpe_token] for bpe_token in tokens]
def get_world_size(): if (not dist.is_available()): return 1 if (not dist.is_initialized()): return 1 return dist.get_world_size()
def get_rank(): if (not dist.is_available()): return 0 if (not dist.is_initialized()): return 0 return dist.get_rank()
def is_main_process(): return (get_rank() == 0)
def synchronize(): '\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n ' if (not dist.is_available()): return if (not dist.is_initialized()): return world_size = dist.get_world_size() if (world_size == 1): return dist.barrier()
def all_gather(data): '\n Run all_gather on arbitrary picklable data (not necessarily tensors)\n Args:\n data: any picklable object\n Returns:\n list[data]: list of data gathered from each rank\n ' world_size = get_world_size() if (world_size == 1): return [data] buffer = pickle.dumps(data) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to('cuda') local_size = torch.LongTensor([tensor.numel()]).to('cuda') size_list = [torch.LongTensor([0]).to('cuda') for _ in range(world_size)] dist.all_gather(size_list, local_size) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) tensor_list = [] for _ in size_list: tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda')) if (local_size != max_size): padding = torch.ByteTensor(size=((max_size - local_size),)).to('cuda') tensor = torch.cat((tensor, padding), dim=0) dist.all_gather(tensor_list, tensor) data_list = [] for (size, tensor) in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list
def reduce_dict(input_dict, average=True): '\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that process with rank\n 0 has the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n ' world_size = get_world_size() if (world_size < 2): return input_dict with torch.no_grad(): names = [] values = [] for k in sorted(input_dict.keys()): names.append(k) values.append(input_dict[k]) values = torch.stack(values, dim=0) dist.reduce(values, dst=0) if ((dist.get_rank() == 0) and average): values /= world_size reduced_dict = {k: v for (k, v) in zip(names, values)} return reduced_dict
def setup_logger(name, save_dir, dist_rank, filename='log.txt'): logger = logging.getLogger(name) logger.setLevel(logging.ERROR) if (dist_rank > 0): return logger logger.setLevel(logging.DEBUG) ch = logging.StreamHandler(stream=sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter('[%(asctime)s %(name)s %(lineno)s %(levelname)s]: %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) logger.propagate = False if save_dir: fh = logging.FileHandler(os.path.join(save_dir, filename)) fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) logger.addHandler(fh) return logger
class SmoothedValue(object): 'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n ' def __init__(self, window_size=20): self.deque = deque(maxlen=window_size) self.series = [] self.total = 0.0 self.count = 0 def update(self, value): self.deque.append(value) self.series.append(value) self.count += 1 self.total += value @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque)) return d.mean().item() @property def global_avg(self): return (self.total / self.count)
class MetricLogger(object): def __init__(self, delimiter='\t'): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for (k, v) in kwargs.items(): if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if (attr in self.meters): return self.meters[attr] if (attr in self.__dict__): return self.__dict__[attr] raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr)) def __str__(self): loss_str = [] for (name, meter) in self.meters.items(): loss_str.append('{}: {:.4f} ({:.4f})'.format(name, meter.median, meter.global_avg)) return self.delimiter.join(loss_str)
def load_data_table(table, image_dir, corrupt_images=None): 'Read data table, find corresponding images, filter out corrupt, missing and MCI images, and return the samples as a pandas dataframe.' print('Loading dataframe for', table) df = pd.read_csv(table) print('Found', len(df), 'images in table') df['filepath'] = df.apply((lambda row: get_image_filepath(row, image_dir)), axis=1) len_before = len(df) if (corrupt_images is not None): df = df[df.apply((lambda row: ('{}/{}'.format(row['PTID'], row['Visit']) not in corrupt_images)), axis=1)] print('Filtered out', (len_before - len(df)), 'of', len_before, 'images because of failed preprocessing') len_before = len(df) df = df[map(os.path.exists, df['filepath'])] print('Filtered out', (len_before - len(df)), 'of', len_before, 'images because of missing files') len_before = len(df) df = df[(df['DX'] != 'MCI')] print('Filtered out', (len_before - len(df)), 'of', len_before, 'images that were MCI') print('Final dataframe contains', len(df), 'images from', len(df['PTID'].unique()), 'patients') print() return df
def load_data_table_3T(): 'Load the data table for all 3 Tesla images.' return load_data_table(table_3T, image_dir_3T, corrupt_images_3T)
def load_data_table_15T(): 'Load the data table for all 1.5 Tesla images.' return load_data_table(table_15T, image_dir_15T, corrupt_images_15T)
def load_data_table_both(): 'Load the data tables for all 1.5 Tesla and 3 Tesla images and combine them.' df_15T = load_data_table(table_15T, image_dir_15T, corrupt_images_15T) df_3T = load_data_table(table_3T, image_dir_3T, corrupt_images_3T) df = pd.concat([df_15T, df_3T]) return df
def get_image_filepath(df_row, root_dir=''): 'Return the filepath of the image that is described in the row of the data table.' filedir = os.path.join(df_row['PTID'], df_row['Visit'].replace(' ', '')) filename = '{}_{}_{}_{}_{}_Warped.nii.gz'.format(df_row['PTID'], df_row['Scan.Date'].replace('/', '-'), df_row['Visit'].replace(' ', ''), df_row['Image.ID'], df_row['DX']) return os.path.join(root_dir, filedir, filename)
class ADNIDataset(Dataset): '\n PyTorch dataset that consists of MRI images and labels.\n \n Args:\n filenames (iterable of strings): The filenames fo the MRI images.\n labels (iterable): The labels for the images.\n mask (array): If not None (default), images are masked by multiplying with this array.\n transform: Any transformations to apply to the images.\n ' def __init__(self, filenames, labels, mask=None, transform=None): self.filenames = filenames self.labels = torch.LongTensor(labels) self.mask = mask self.transform = transform self.num_inputs = 1 self.num_targets = 1 self.mean = 0 self.std = 1 def __len__(self): return len(self.filenames) def __getitem__(self, idx): 'Return the image as a numpy array and the label.' label = self.labels[idx] struct_arr = utils.load_nifti(self.filenames[idx], mask=self.mask) struct_arr = ((struct_arr - self.mean) / (self.std + 1e-10)) struct_arr = struct_arr[None] struct_arr = torch.FloatTensor(struct_arr) if (self.transform is not None): struct_arr = self.transform(struct_arr) return (struct_arr, label) def image_shape(self): 'The shape of the MRI images.' return utils.load_nifti(self.filenames[0], mask=mask).shape def fit_normalization(self, num_sample=None, show_progress=False): '\n Calculate the voxel-wise mean and std across the dataset for normalization.\n \n Args:\n num_sample (int or None): If None (default), calculate the values across the complete dataset, \n otherwise sample a number of images.\n show_progress (bool): Show a progress bar during the calculation."\n ' if (num_sample is None): num_sample = len(self) image_shape = self.image_shape() all_struct_arr = np.zeros((num_sample, image_shape[0], image_shape[1], image_shape[2])) sampled_filenames = np.random.choice(self.filenames, num_sample, replace=False) if show_progress: sampled_filenames = tqdm_notebook(sampled_filenames) for (i, filename) in enumerate(sampled_filenames): struct_arr = utils.load_nifti(filename, mask=mask) all_struct_arr[i] = struct_arr self.mean = all_struct_arr.mean(0) self.std = all_struct_arr.std(0) def get_raw_image(self, idx): 'Return the raw image at index idx (i.e. not normalized, no color channel, no transform.' return utils.load_nifti(self.filenames[idx], mask=self.mask)
def print_df_stats(df, df_train, df_val): 'Print some statistics about the patients and images in a dataset.' headers = ['Images', '-> AD', '-> CN', 'Patients', '-> AD', '-> CN'] def get_stats(df): df_ad = df[(df['DX'] == 'Dementia')] df_cn = df[(df['DX'] == 'CN')] return [len(df), len(df_ad), len(df_cn), len(df['PTID'].unique()), len(df_ad['PTID'].unique()), len(df_cn['PTID'].unique())] stats = [] stats.append((['All'] + get_stats(df))) stats.append((['Train'] + get_stats(df_train))) stats.append((['Val'] + get_stats(df_val))) print(tabulate(stats, headers=headers)) print()
def build_datasets(df, patients_train, patients_val, print_stats=True, normalize=True): '\n Build PyTorch datasets based on a data table and a patient-wise train-test split.\n \n Args:\n df (pandas dataframe): The data table from ADNI.\n patients_train (iterable of strings): The patients to include in the train set.\n patients_val (iterable of strings): The patients to include in the val set.\n print_stats (boolean): Whether to print some statistics about the datasets.\n normalize (boolean): Whether to caluclate mean and std across the dataset for later normalization.\n \n Returns:\n The train and val dataset.\n ' df_train = df[df.apply((lambda row: (row['PTID'] in patients_train)), axis=1)] df_val = df[df.apply((lambda row: (row['PTID'] in patients_val)), axis=1)] if print_stats: print_df_stats(df, df_train, df_val) train_filenames = np.array(df_train['filepath']) val_filenames = np.array(df_val['filepath']) train_labels = np.array((df_train['DX'] == 'Dementia'), dtype=int) val_labels = np.array((df_val['DX'] == 'Dementia'), dtype=int) train_dataset = ADNIDataset(train_filenames, train_labels, mask=mask) val_dataset = ADNIDataset(val_filenames, val_labels, mask=mask) if normalize: print('Calculating mean and std for normalization:') train_dataset.fit_normalization(200, show_progress=True) (val_dataset.mean, val_dataset.std) = (train_dataset.mean, train_dataset.std) else: print('Dataset is not normalized, this could dramatically decrease performance') return (train_dataset, val_dataset)
def build_loaders(train_dataset, val_dataset): 'Build PyTorch data loaders from the datasets.' train_loader = DataLoader(train_dataset, batch_size=5, shuffle=True, num_workers=multiprocessing.cpu_count(), pin_memory=torch.cuda.is_available()) val_loader = DataLoader(val_dataset, batch_size=5, shuffle=False, num_workers=multiprocessing.cpu_count(), pin_memory=torch.cuda.is_available()) return (train_loader, val_loader)
class ClassificationModel3D(nn.Module): 'The model we use in the paper.' def __init__(self, dropout=0, dropout2=0): nn.Module.__init__(self) self.Conv_1 = nn.Conv3d(1, 8, 3) self.Conv_1_bn = nn.BatchNorm3d(8) self.Conv_2 = nn.Conv3d(8, 16, 3) self.Conv_2_bn = nn.BatchNorm3d(16) self.Conv_3 = nn.Conv3d(16, 32, 3) self.Conv_3_bn = nn.BatchNorm3d(32) self.Conv_4 = nn.Conv3d(32, 64, 3) self.Conv_4_bn = nn.BatchNorm3d(64) self.dense_1 = nn.Linear(5120, 128) self.dense_2 = nn.Linear(128, 64) self.dense_3 = nn.Linear(64, 2) self.relu = nn.ReLU() self.dropout = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout2) def forward(self, x): x = self.relu(self.Conv_1_bn(self.Conv_1(x))) x = F.max_pool3d(x, 2) x = self.relu(self.Conv_2_bn(self.Conv_2(x))) x = F.max_pool3d(x, 3) x = self.relu(self.Conv_3_bn(self.Conv_3(x))) x = F.max_pool3d(x, 2) x = self.relu(self.Conv_4_bn(self.Conv_4(x))) x = F.max_pool3d(x, 3) x = x.view(x.size(0), (- 1)) x = self.dropout(x) x = self.relu(self.dense_1(x)) x = self.dropout2(x) x = self.relu(self.dense_2(x)) x = self.dense_3(x) return x
class KorolevModel(nn.Module): 'The model used in Korolev et al. 2017 (https://arxiv.org/abs/1701.06643).' def __init__(self): nn.Module.__init__(self) self.relu = nn.ReLU() self.conv = nn.Sequential(nn.Conv3d(1, 8, 3), self.relu, nn.Conv3d(8, 8, 3), self.relu, nn.BatchNorm3d(8), nn.MaxPool3d(2), nn.Conv3d(8, 16, 3), self.relu, nn.Conv3d(16, 16, 3), self.relu, nn.BatchNorm3d(16), nn.MaxPool3d(2), nn.Conv3d(16, 32, 3), self.relu, nn.Conv3d(32, 32, 3), self.relu, nn.Conv3d(32, 32, 3), self.relu, nn.BatchNorm3d(32), nn.MaxPool3d(2), nn.Conv3d(32, 64, 3), self.relu, nn.Conv3d(64, 64, 3), self.relu, nn.Conv3d(64, 64, 3), self.relu, nn.Conv3d(64, 64, 3), self.relu, nn.BatchNorm3d(64), nn.MaxPool3d(3)) self.fc = nn.Sequential(nn.Linear(2880, 128), self.relu, nn.Dropout(0.7), nn.Linear(128, 64), self.relu, nn.Linear(64, 1)) def forward(self, x): x = self.conv(x) x = x.view(x.size(0), (- 1)) x = self.fc(x) return x
def build_model(): 'Build the model as used in the paper, wrap it in a torchsample trainer and move it to cuda.' net = ClassificationModel3D(dropout=0.8, dropout2=0) optimizer = torch.optim.Adam(net.parameters(), lr=0.0001) loss_function = nn.CrossEntropyLoss() callbacks = [] trainer = torchsample.modules.ModuleTrainer(net) trainer.compile(loss=loss_function, optimizer=optimizer, metrics=[CategoricalAccuracyWithLogits()], callbacks=callbacks) if torch.cuda.is_available(): net.cuda() cuda_device = torch.cuda.current_device() print('Moved network to GPU') else: cuda_device = (- 1) print('GPU not available') return (net, trainer, cuda_device)
def train_model(trainer, train_loader, val_loader, cuda_device, num_epoch=1): 'Train and evaluate the model via torchsample.' trainer.fit_loader(train_loader, val_loader=val_loader, num_epoch=num_epoch, verbose=1, cuda_device=cuda_device)
def calculate_roc_auc(trainer, val_loader, cuda_device): y_val_pred = F.softmax(trainer.predict_loader(val_loader, cuda_device=cuda_device)).data.cpu().numpy() y_val_true = torch.cat([y for (x, y) in val_loader]).numpy() y_val_true = y_val_true[:len(y_val_pred)] return roc_auc_score(y_val_true, y_val_pred.argmax(1))
class BinaryAccuracyWithLogits(torchsample.metrics.BinaryAccuracy): 'Same as torchsample.metrics.BinaryAccuracy, but applies a sigmoid function to the network output before calculating the accuracy. This is intended to be used in combination with BCEWightLogitsLoss.' def __call__(self, y_pred, y_true): return super(BinaryAccuracyWithLogits, self).__call__(F.sigmoid(y_pred), y_true)
class CategoricalAccuracyWithLogits(torchsample.metrics.CategoricalAccuracy): 'Same as torchsample.metrics.CategoricalAccuracy, but applies a softmax function to the network output before calculating the accuracy. This is intended to be used in combination with CrossEntropyLoss.' def __call__(self, y_pred, y_true): return super(CategoricalAccuracyWithLogits, self).__call__(F.softmax(y_pred), y_true)
def start_instance(): print('Starting new instance') tag = str(int(time.time())) config_name = ((('clone_' + tag) + '_') + 'spotty.yaml') shutil.copyfile('spotty.yaml', config_name) os.system(((("sed -i 's/instancename/" + tag) + "/g' ") + config_name)) os.system(('spotty start -c ' + config_name)) os.system(((' spotty exec -c ' + config_name) + " -- tmux new-session -d -s my_session 'bash startworker.sh'"))
def download_rico(tmp_path='tmp', dataset_path='rico'): if (not os.path.exists(tmp_path)): os.makedirs(tmp_path) output_path = os.path.join(tmp_path, 'unique_uis.tar.gz') urllib.request.urlretrieve(DATASET_RICO_URL, output_path) extract_path = os.path.join(tmp_path, 'extract') cmd = ['7z', 'x', output_path, ('-o' + tmp_path)] sp = subprocess.Popen(cmd) sp.communicate() cmd = ['7z', 'x', os.path.join(tmp_path, 'unique_uis.tar'), ('-o' + extract_path)] sp = subprocess.Popen(cmd) sp.communicate() if (not os.path.exists(dataset_path)): os.makedirs(dataset_path) os.rename(os.path.join(extract_path, 'combined'), os.path.join(dataset_path, 'combined')) shutil.rmtree(tmp_path)
def download_vins(tmp_path='tmp', dataset_path='vins'): if (not os.path.exists(tmp_path)): os.makedirs(tmp_path) gdown.download(DATASET_VINS_URL, output=os.path.join(tmp_path, 'VINS Dataset.zip'), fuzzy=True, use_cookies=False) extract_path = os.path.join(tmp_path, 'extract') cmd = ['7z', 'x', os.path.join(tmp_path, 'VINS Dataset.zip'), ('-o' + str(extract_path))] sp = subprocess.Popen(cmd) sp.communicate() os.rename(extract_path, dataset_path) shutil.rmtree(tmp_path)
def download_boxes_gdown(tmp_path='tmp', dataset_path='webui-boxes'): if (not os.path.exists(tmp_path)): os.makedirs(tmp_path) gdown.download(DATASET_BOXES_URL, output=os.path.join(tmp_path, 'all_boxes.zip'), fuzzy=True, use_cookies=False) extract_path = os.path.join(tmp_path, 'extract') cmd = ['7z', 'x', os.path.join(tmp_path, 'all_boxes.zip'), ('-o' + str(extract_path))] sp = subprocess.Popen(cmd) sp.communicate() os.rename(extract_path, dataset_path) shutil.rmtree(tmp_path)
def download_enrico(tmp_path='tmp', dataset_path='enrico', screenclassification_metadata_path='../metadata/screenclassification'): if (not os.path.exists(tmp_path)): os.makedirs(tmp_path) output_path = os.path.join(tmp_path, 'screenshots.zip') urllib.request.urlretrieve(DATASET_ENRICO_URL, output_path) extract_path = os.path.join(tmp_path, 'extract') cmd = ['7z', 'x', output_path, ('-o' + str(extract_path))] sp = subprocess.Popen(cmd) sp.communicate() if (not os.path.exists(dataset_path)): os.makedirs(dataset_path) os.rename(os.path.join(extract_path, 'screenshots'), os.path.join(dataset_path, 'screenshots')) shutil.rmtree(tmp_path) if (not os.path.exists(screenclassification_metadata_path)): os.makedirs(screenclassification_metadata_path) metadata_output_path = os.path.join(screenclassification_metadata_path, 'design_topics.csv') urllib.request.urlretrieve(METADATA_ENRICO_URL, metadata_output_path)
def download_metadata_gdown(metadata_key, metadata_path='../metadata'): if (not os.path.exists(metadata_path)): os.makedirs(metadata_path) gdown.download_folder(METADATA_GDRIVE_URLS[metadata_key], output=os.path.join(metadata_path, metadata_key), use_cookies=False)
def download_dataset_gdown(dataset_key, tmp_path='tmp', dataset_path='ds'): if (not os.path.exists(tmp_path)): os.makedirs(tmp_path) if (not os.path.exists(os.path.join(tmp_path, dataset_key))): gdown.download_folder(DATASET_GDRIVE_URLS[dataset_key], output=os.path.join(tmp_path, dataset_key), use_cookies=False) extract_file = glob.glob((os.path.join(tmp_path, dataset_key) + '/*.zip.001'))[0] split_json_file = glob.glob((os.path.join(tmp_path, dataset_key) + '/*.json'))[0] if (not os.path.exists(os.path.basename(split_json_file))): shutil.move(split_json_file, '.') extract_path = os.path.join(tmp_path, 'extract') if (not os.path.exists(extract_path)): os.makedirs(extract_path) cmd = ['7z', 'x', extract_file, ('-o' + str(extract_path))] sp = subprocess.Popen(cmd) sp.communicate() if (not os.path.exists(dataset_path)): os.makedirs(dataset_path) dataset_ids = glob.glob((extract_path + '/*/*')) for folder in dataset_ids: if (not os.path.exists(os.path.join(dataset_path, os.path.basename(folder)))): os.rename(folder, os.path.join(dataset_path, os.path.basename(folder))) shutil.rmtree(tmp_path)
def download_model_gdown(model_name, model_key, model_path='checkpoints'): if (not os.path.exists(model_path)): os.makedirs(model_path) gdown.download(MODEL_GDRIVE_URLS[model_name][model_key], output=os.path.join(model_path, model_key), fuzzy=True, use_cookies=False)
def makeOneHotVec(idx, num_classes): vec = [(1 if (i == idx) else 0) for i in range(num_classes)] return vec
def collate_fn_silver(batch): res = defaultdict(list) for d in batch: for (k, v) in d.items(): res[k].append(v) res['label'] = torch.tensor(res['label'], dtype=torch.long) return res
def collate_fn_silver_multi(batch): res = defaultdict(list) for d in batch: for (k, v) in d.items(): res[k].append(v) res['label'] = torch.stack(res['label'], dim=0) return res