code
stringlengths
17
6.64M
def _randomly_negate(v): 'With 50% prob, negate the value' return ((- v) if (random.random() > 0.5) else v)
def _rotate_level_to_arg(level, _hparams): level = ((level / _MAX_LEVEL) * 30.0) level = _randomly_negate(level) return (level,)
def _enhance_level_to_arg(level, _hparams): return ((((level / _MAX_LEVEL) * 1.8) + 0.1),)
def _enhance_increasing_level_to_arg(level, _hparams): level = ((level / _MAX_LEVEL) * 0.9) level = (1.0 + _randomly_negate(level)) return (level,)
def _shear_level_to_arg(level, _hparams): level = ((level / _MAX_LEVEL) * 0.3) level = _randomly_negate(level) return (level,)
def _translate_abs_level_to_arg(level, hparams): translate_const = hparams['translate_const'] level = ((level / _MAX_LEVEL) * float(translate_const)) level = _randomly_negate(level) return (level,)
def _translate_rel_level_to_arg(level, hparams): translate_pct = hparams.get('translate_pct', 0.45) level = ((level / _MAX_LEVEL) * translate_pct) level = _randomly_negate(level) return (level,)
def _posterize_level_to_arg(level, _hparams): return (int(((level / _MAX_LEVEL) * 4)),)
def _posterize_increasing_level_to_arg(level, hparams): return ((4 - _posterize_level_to_arg(level, hparams)[0]),)
def _posterize_original_level_to_arg(level, _hparams): return ((int(((level / _MAX_LEVEL) * 4)) + 4),)
def _solarize_level_to_arg(level, _hparams): return (int(((level / _MAX_LEVEL) * 256)),)
def _solarize_increasing_level_to_arg(level, _hparams): return ((256 - _solarize_level_to_arg(level, _hparams)[0]),)
def _solarize_add_level_to_arg(level, _hparams): return (int(((level / _MAX_LEVEL) * 110)),)
class AugmentOp(): '\n Apply for video.\n ' def __init__(self, name, prob=0.5, magnitude=10, hparams=None): hparams = (hparams or _HPARAMS_DEFAULT) self.aug_fn = NAME_TO_OP[name] self.level_fn = LEVEL_TO_ARG[name] self.prob = prob self.magnitude = magnitude self.hparams = hparams.copy() self.kwargs = {'fillcolor': (hparams['img_mean'] if ('img_mean' in hparams) else _FILL), 'resample': (hparams['interpolation'] if ('interpolation' in hparams) else _RANDOM_INTERPOLATION)} self.magnitude_std = self.hparams.get('magnitude_std', 0) def __call__(self, img_list): if ((self.prob < 1.0) and (random.random() > self.prob)): return img_list magnitude = self.magnitude if (self.magnitude_std and (self.magnitude_std > 0)): magnitude = random.gauss(magnitude, self.magnitude_std) magnitude = min(_MAX_LEVEL, max(0, magnitude)) level_args = (self.level_fn(magnitude, self.hparams) if (self.level_fn is not None) else ()) if isinstance(img_list, list): return [self.aug_fn(img, *level_args, **self.kwargs) for img in img_list] else: return self.aug_fn(img_list, *level_args, **self.kwargs)
def _select_rand_weights(weight_idx=0, transforms=None): transforms = (transforms or _RAND_TRANSFORMS) assert (weight_idx == 0) rand_weights = _RAND_CHOICE_WEIGHTS_0 probs = [rand_weights[k] for k in transforms] probs /= np.sum(probs) return probs
def rand_augment_ops(magnitude=10, hparams=None, transforms=None): hparams = (hparams or _HPARAMS_DEFAULT) transforms = (transforms or _RAND_TRANSFORMS) return [AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms]
class RandAugment(): def __init__(self, ops, num_layers=2, choice_weights=None): self.ops = ops self.num_layers = num_layers self.choice_weights = choice_weights def __call__(self, img): ops = np.random.choice(self.ops, self.num_layers, replace=(self.choice_weights is None), p=self.choice_weights) for op in ops: img = op(img) return img
def rand_augment_transform(config_str, hparams): "\n RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719\n\n Create a RandAugment transform\n :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by\n dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining\n sections, not order sepecific determine\n 'm' - integer magnitude of rand augment\n 'n' - integer num layers (number of transform ops selected per image)\n 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)\n 'mstd' - float std deviation of magnitude noise applied\n 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)\n Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5\n 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2\n :param hparams: Other hparams (kwargs) for the RandAugmentation scheme\n :return: A PyTorch compatible Transform\n " magnitude = _MAX_LEVEL num_layers = 2 weight_idx = None transforms = _RAND_TRANSFORMS config = config_str.split('-') assert (config[0] == 'rand') config = config[1:] for c in config: cs = re.split('(\\d.*)', c) if (len(cs) < 2): continue (key, val) = cs[:2] if (key == 'mstd'): hparams.setdefault('magnitude_std', float(val)) elif (key == 'inc'): if bool(val): transforms = _RAND_INCREASING_TRANSFORMS elif (key == 'm'): magnitude = int(val) elif (key == 'n'): num_layers = int(val) elif (key == 'w'): weight_idx = int(val) else: assert NotImplementedError ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams, transforms=transforms) choice_weights = (None if (weight_idx is None) else _select_rand_weights(weight_idx)) return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
class RawVideoExtractorCV2(): def __init__(self, centercrop=False, size=224, framerate=(- 1), subset='test'): self.centercrop = centercrop self.size = size self.framerate = framerate self.transform = self._transform(self.size) self.subset = subset self.tsfm_dict = {'clip_test': Compose([Resize(size, interpolation=InterpolationMode.BICUBIC), CenterCrop(size), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))]), 'clip_train': Compose([RandomResizedCrop(size, scale=(0.5, 1.0)), RandomHorizontalFlip(), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))])} self.aug_transform = video_transforms.create_random_augment(input_size=(size, size), auto_augment='rand-m7-n4-mstd0.5-inc1', interpolation='bicubic') def _transform(self, n_px): return Compose([Resize(n_px, interpolation=InterpolationMode.BICUBIC), CenterCrop(n_px), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))]) def video_to_tensor(self, video_file, preprocess, sample_fp=0, start_time=None, end_time=None, _no_process=False): if ((start_time is not None) or (end_time is not None)): assert (isinstance(start_time, int) and isinstance(end_time, int) and (start_time > (- 1)) and (end_time > start_time)) assert (sample_fp > (- 1)) cap = cv2.VideoCapture(video_file) frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = int(cap.get(cv2.CAP_PROP_FPS)) if (fps == 0): print(((video_file + '\n') * 10)) total_duration = (((frameCount + fps) - 1) // fps) (start_sec, end_sec) = (0, total_duration) if (start_time is not None): (start_sec, end_sec) = (start_time, (end_time if (end_time <= total_duration) else total_duration)) cap.set(cv2.CAP_PROP_POS_FRAMES, int((start_time * fps))) interval = 1 if (sample_fp > 0): interval = (fps // sample_fp) else: sample_fp = fps if (interval == 0): interval = 1 inds = [ind for ind in np.arange(0, fps, interval)] assert (len(inds) >= sample_fp) inds = inds[:sample_fp] ret = True (images, included) = ([], []) for sec in np.arange(start_sec, (end_sec + 1)): if (not ret): break sec_base = int((sec * fps)) for ind in inds: cap.set(cv2.CAP_PROP_POS_FRAMES, (sec_base + ind)) (ret, frame) = cap.read() if (not ret): break frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if _no_process: images.append(Image.fromarray(frame_rgb).convert('RGB')) else: images.append(Image.fromarray(frame_rgb)) cap.release() if (len(images) > 0): if _no_process: video_data = images else: if (self.subset == 'train'): images = self.aug_transform(images) video_data = th.stack([preprocess(img) for img in images]) else: video_data = th.zeros(1) return {'video': video_data} def get_video_data(self, video_path, start_time=None, end_time=None, _no_process=False): image_input = self.video_to_tensor(video_path, self.transform, sample_fp=self.framerate, start_time=start_time, end_time=end_time, _no_process=_no_process) return image_input def process_raw_data(self, raw_video_data): tensor_size = raw_video_data.size() tensor = raw_video_data.view((- 1), 1, tensor_size[(- 3)], tensor_size[(- 2)], tensor_size[(- 1)]) return tensor def process_frame_order(self, raw_video_data, frame_order=0): if (frame_order == 0): pass elif (frame_order == 1): reverse_order = np.arange((raw_video_data.size(0) - 1), (- 1), (- 1)) raw_video_data = raw_video_data[(reverse_order, ...)] elif (frame_order == 2): random_order = np.arange(raw_video_data.size(0)) np.random.shuffle(random_order) raw_video_data = raw_video_data[(random_order, ...)] return raw_video_data
def url_to_filename(url: str, etag: str=None) -> str: "\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.\n " url_bytes = url.encode('utf-8') url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode('utf-8') etag_hash = sha256(etag_bytes) filename += ('.' + etag_hash.hexdigest()) return filename
def filename_to_url(filename: str, cache_dir: Union[(str, Path)]=None) -> Tuple[(str, str)]: '\n Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.\n ' if (cache_dir is None): cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if (not os.path.exists(cache_path)): raise FileNotFoundError('file {} not found'.format(cache_path)) meta_path = (cache_path + '.json') if (not os.path.exists(meta_path)): raise FileNotFoundError('file {} not found'.format(meta_path)) with open(meta_path) as meta_file: metadata = json.load(meta_file) url = metadata['url'] etag = metadata['etag'] return (url, etag)
def cached_path(url_or_filename: Union[(str, Path)], cache_dir: Union[(str, Path)]=None) -> str: "\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n " if (cache_dir is None): cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) parsed = urlparse(url_or_filename) if (parsed.scheme in ('http', 'https', 's3')): return get_from_cache(url_or_filename, cache_dir) elif os.path.exists(url_or_filename): return url_or_filename elif (parsed.scheme == ''): raise FileNotFoundError('file {} not found'.format(url_or_filename)) else: raise ValueError('unable to parse {} as a URL or as a local path'.format(url_or_filename))
def split_s3_path(url: str) -> Tuple[(str, str)]: 'Split a full s3 path into the bucket name and path.' parsed = urlparse(url) if ((not parsed.netloc) or (not parsed.path)): raise ValueError('bad s3 path {}'.format(url)) bucket_name = parsed.netloc s3_path = parsed.path if s3_path.startswith('/'): s3_path = s3_path[1:] return (bucket_name, s3_path)
def s3_request(func: Callable): '\n Wrapper function for s3 requests in order to create more helpful error\n messages.\n ' @wraps(func) def wrapper(url: str, *args, **kwargs): try: return func(url, *args, **kwargs) except ClientError as exc: if (int(exc.response['Error']['Code']) == 404): raise FileNotFoundError('file {} not found'.format(url)) else: raise return wrapper
@s3_request def s3_etag(url: str) -> Optional[str]: 'Check ETag on S3 object.' s3_resource = boto3.resource('s3') (bucket_name, s3_path) = split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag
@s3_request def s3_get(url: str, temp_file: IO) -> None: 'Pull a file directly from S3.' s3_resource = boto3.resource('s3') (bucket_name, s3_path) = split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None: req = requests.get(url, stream=True) content_length = req.headers.get('Content-Length') total = (int(content_length) if (content_length is not None) else None) progress = tqdm(unit='B', total=total) for chunk in req.iter_content(chunk_size=1024): if chunk: progress.update(len(chunk)) temp_file.write(chunk) progress.close()
def get_from_cache(url: str, cache_dir: Union[(str, Path)]=None) -> str: "\n Given a URL, look for the corresponding dataset in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n " if (cache_dir is None): cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) if url.startswith('s3://'): etag = s3_etag(url) else: response = requests.head(url, allow_redirects=True) if (response.status_code != 200): raise IOError('HEAD request failed for url {} with status code {}'.format(url, response.status_code)) etag = response.headers.get('ETag') filename = url_to_filename(url, etag) cache_path = os.path.join(cache_dir, filename) if (not os.path.exists(cache_path)): with tempfile.NamedTemporaryFile() as temp_file: logger.info('%s not found in cache, downloading to %s', url, temp_file.name) if url.startswith('s3://'): s3_get(url, temp_file) else: http_get(url, temp_file) temp_file.flush() temp_file.seek(0) logger.info('copying %s to cache at %s', temp_file.name, cache_path) with open(cache_path, 'wb') as cache_file: shutil.copyfileobj(temp_file, cache_file) logger.info('creating metadata file for %s', cache_path) meta = {'url': url, 'etag': etag} meta_path = (cache_path + '.json') with open(meta_path, 'w') as meta_file: json.dump(meta, meta_file) logger.info('removing temp file %s', temp_file.name) return cache_path
def read_set_from_file(filename: str) -> Set[str]: '\n Extract a de-duped collection (set) of text from a file.\n Expected file format is one item per line.\n ' collection = set() with open(filename, 'r', encoding='utf-8') as file_: for line in file_: collection.add(line.rstrip()) return collection
def get_file_extension(path: str, dot=True, lower: bool=True): ext = os.path.splitext(path)[1] ext = (ext if dot else ext[1:]) return (ext.lower() if lower else ext)
class LayerNorm(nn.LayerNorm): "Subclass torch's LayerNorm to handle fp16." def forward(self, x: torch.Tensor): orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type)
class QuickGELU(nn.Module): def forward(self, x: torch.Tensor): return (x * torch.sigmoid((1.702 * x)))
class ResidualAttentionBlock(nn.Module): def __init__(self, d_model: int, n_head: int, attn_mask=None): super(ResidualAttentionBlock, self).__init__() self.attn = nn.MultiheadAttention(d_model, n_head) self.ln_1 = LayerNorm(d_model) self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model, (d_model * 4))), ('gelu', QuickGELU()), ('c_proj', nn.Linear((d_model * 4), d_model))])) self.ln_2 = LayerNorm(d_model) self.attn_mask = attn_mask self.n_head = n_head def attention(self, x: torch.Tensor, attn_mask_: torch.Tensor): attn_mask_ = attn_mask_.repeat_interleave(self.n_head, dim=0) attn_mask_ = (attn_mask_.to(dtype=x.dtype, device=x.device) if (attn_mask_ is not None) else None) return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask_)[0] def forward(self, para_tuple: tuple): (x, attn_mask) = para_tuple x = (x + self.attention(self.ln_1(x), attn_mask)) x = (x + self.mlp(self.ln_2(x))) return (x, attn_mask)
class Transformer(nn.Module): def __init__(self, width: int, layers: int, heads: int, attn_mask=None): super(Transformer, self).__init__() self.width = width self.layers = layers self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads) for _ in range(layers)]) def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): return self.resblocks((x, attn_mask))[0]
def warmup_cosine(x, warmup=0.002): if (x < warmup): return (x / warmup) return (0.5 * (1.0 + math.cos((math.pi * x))))
def warmup_constant(x, warmup=0.002): ' Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.\n Learning rate is 1. afterwards. ' if (x < warmup): return (x / warmup) return 1.0
def warmup_linear(x, warmup=0.002): ' Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.\n After `t_total`-th training step, learning rate is zero. ' if (x < warmup): return (x / warmup) return max(((x - 1.0) / (warmup - 1.0)), 0)
class BertAdam(Optimizer): "Implements BERT version of Adam algorithm with weight decay fix.\n Params:\n lr: learning rate\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate. Default: -1\n schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'\n b1: Adams b1. Default: 0.9\n b2: Adams b2. Default: 0.999\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n " def __init__(self, params, lr=required, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-06, weight_decay=0.01, max_grad_norm=1.0): if ((lr is not required) and (lr < 0.0)): raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr)) if (schedule not in SCHEDULES): raise ValueError('Invalid schedule parameter: {}'.format(schedule)) if ((not (0.0 <= warmup < 1.0)) and (not (warmup == (- 1)))): raise ValueError('Invalid warmup: {} - should be in [0.0, 1.0[ or -1'.format(warmup)) if (not (0.0 <= b1 < 1.0)): raise ValueError('Invalid b1 parameter: {} - should be in [0.0, 1.0['.format(b1)) if (not (0.0 <= b2 < 1.0)): raise ValueError('Invalid b2 parameter: {} - should be in [0.0, 1.0['.format(b2)) if (not (e >= 0.0)): raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(e)) defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, b1=b1, b2=b2, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm) super(BertAdam, self).__init__(params, defaults) def get_lr(self): lr = [] for group in self.param_groups: for p in group['params']: if (p.grad is None): continue state = self.state[p] if (len(state) == 0): return [0] if (group['t_total'] != (- 1)): schedule_fct = SCHEDULES[group['schedule']] lr_scheduled = (group['lr'] * schedule_fct((state['step'] / group['t_total']), group['warmup'])) else: lr_scheduled = group['lr'] lr.append(lr_scheduled) return lr def step(self, closure=None): 'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n ' loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] if (len(state) == 0): state['step'] = 0 state['next_m'] = torch.zeros_like(p.data) state['next_v'] = torch.zeros_like(p.data) (next_m, next_v) = (state['next_m'], state['next_v']) (beta1, beta2) = (group['b1'], group['b2']) if (group['max_grad_norm'] > 0): clip_grad_norm_(p, group['max_grad_norm']) next_m.mul_(beta1).add_(grad, alpha=(1 - beta1)) next_v.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2)) update = (next_m / (next_v.sqrt() + group['e'])) if (group['weight_decay'] > 0.0): update += (group['weight_decay'] * p.data) if (group['t_total'] != (- 1)): schedule_fct = SCHEDULES[group['schedule']] progress = (state['step'] / group['t_total']) lr_scheduled = (group['lr'] * schedule_fct(progress, group['warmup'])) else: lr_scheduled = group['lr'] update_with_lr = (lr_scheduled * update) p.data.add_((- update_with_lr)) state['step'] += 1 return loss
@lru_cache() def default_bpe(): return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
@lru_cache() def bytes_to_unicode(): "\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n " bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1)))) cs = bs[:] n = 0 for b in range((2 ** 8)): if (b not in bs): bs.append(b) cs.append(((2 ** 8) + n)) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs))
def get_pairs(word): 'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n ' pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs
def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip()
def whitespace_clean(text): text = re.sub('\\s+', ' ', text) text = text.strip() return text
class SimpleTokenizer(object): def __init__(self, bpe_path: str=default_bpe()): self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()} merges = gzip.open(bpe_path).read().decode('utf-8').split('\n') merges = merges[1:(((49152 - 256) - 2) + 1)] merges = [tuple(merge.split()) for merge in merges] vocab = list(bytes_to_unicode().values()) vocab = (vocab + [(v + '</w>') for v in vocab]) for merge in merges: vocab.append(''.join(merge)) vocab.extend(['<|startoftext|>', '<|endoftext|>']) self.encoder = dict(zip(vocab, range(len(vocab)))) self.decoder = {v: k for (k, v) in self.encoder.items()} self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE) self.vocab = self.encoder def bpe(self, token): if (token in self.cache): return self.cache[token] word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),)) pairs = get_pairs(word) if (not pairs): return (token + '</w>') while True: bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf')))) if (bigram not in self.bpe_ranks): break (first, second) = bigram new_word = [] i = 0 while (i < len(word)): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)): new_word.append((first + second)) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if (len(word) == 1): break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def encode(self, text): bpe_tokens = [] text = whitespace_clean(basic_clean(text)).lower() for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def decode(self, tokens): text = ''.join([self.decoder[token] for token in tokens]) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ') return text def tokenize(self, text): tokens = [] text = whitespace_clean(basic_clean(text)).lower() for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' '))) return tokens def convert_tokens_to_ids(self, tokens): return [self.encoder[bpe_token] for bpe_token in tokens]
class PretrainedConfig(object): pretrained_model_archive_map = {} config_name = '' weights_name = '' @classmethod def get_config(cls, pretrained_model_name, cache_dir, type_vocab_size, state_dict, task_config=None): archive_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), pretrained_model_name) if (os.path.exists(archive_file) is False): if (pretrained_model_name in cls.pretrained_model_archive_map): archive_file = cls.pretrained_model_archive_map[pretrained_model_name] else: archive_file = pretrained_model_name try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) except FileNotFoundError: if ((task_config is None) or (task_config.local_rank == 0)): logger.error("Model name '{}' was not found in model name list. We assumed '{}' was a path or url but couldn't find any file associated to this path or url.".format(pretrained_model_name, archive_file)) return None if (resolved_archive_file == archive_file): if ((task_config is None) or (task_config.local_rank == 0)): logger.info('loading archive file {}'.format(archive_file)) elif ((task_config is None) or (task_config.local_rank == 0)): logger.info('loading archive file {} from cache at {}'.format(archive_file, resolved_archive_file)) tempdir = None if os.path.isdir(resolved_archive_file): serialization_dir = resolved_archive_file else: tempdir = tempfile.mkdtemp() if ((task_config is None) or (task_config.local_rank == 0)): logger.info('extracting archive file {} to temp dir {}'.format(resolved_archive_file, tempdir)) with tarfile.open(resolved_archive_file, 'r:gz') as archive: archive.extractall(tempdir) serialization_dir = tempdir config_file = os.path.join(serialization_dir, cls.config_name) config = cls.from_json_file(config_file) config.type_vocab_size = type_vocab_size if ((task_config is None) or (task_config.local_rank == 0)): logger.info('Model config {}'.format(config)) if (state_dict is None): weights_path = os.path.join(serialization_dir, cls.weights_name) if os.path.exists(weights_path): state_dict = torch.load(weights_path, map_location='cpu') elif ((task_config is None) or (task_config.local_rank == 0)): logger.info("Weight doesn't exsits. {}".format(weights_path)) if tempdir: shutil.rmtree(tempdir) return (config, state_dict) @classmethod def from_dict(cls, json_object): 'Constructs a `BertConfig` from a Python dictionary of parameters.' config = cls(vocab_size_or_config_json_file=(- 1)) for (key, value) in json_object.items(): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): 'Constructs a `BertConfig` from a json file of parameters.' with open(json_file, 'r', encoding='utf-8') as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __repr__(self): return str(self.to_json_string()) def to_dict(self): 'Serializes this instance to a Python dictionary.' output = copy.deepcopy(self.__dict__) return output def to_json_string(self): 'Serializes this instance to a JSON string.' return (json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\n')
def get_world_size(): if (not dist.is_available()): return 1 if (not dist.is_initialized()): return 1 return dist.get_world_size()
def get_rank(): if (not dist.is_available()): return 0 if (not dist.is_initialized()): return 0 return dist.get_rank()
def is_main_process(): return (get_rank() == 0)
def synchronize(): '\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n ' if (not dist.is_available()): return if (not dist.is_initialized()): return world_size = dist.get_world_size() if (world_size == 1): return dist.barrier()
def all_gather(data): '\n Run all_gather on arbitrary picklable data (not necessarily tensors)\n Args:\n data: any picklable object\n Returns:\n list[data]: list of data gathered from each rank\n ' world_size = get_world_size() if (world_size == 1): return [data] buffer = pickle.dumps(data) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to('cuda') local_size = torch.LongTensor([tensor.numel()]).to('cuda') size_list = [torch.LongTensor([0]).to('cuda') for _ in range(world_size)] dist.all_gather(size_list, local_size) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) tensor_list = [] for _ in size_list: tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda')) if (local_size != max_size): padding = torch.ByteTensor(size=((max_size - local_size),)).to('cuda') tensor = torch.cat((tensor, padding), dim=0) dist.all_gather(tensor_list, tensor) data_list = [] for (size, tensor) in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list
def reduce_dict(input_dict, average=True): '\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that process with rank\n 0 has the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n ' world_size = get_world_size() if (world_size < 2): return input_dict with torch.no_grad(): names = [] values = [] for k in sorted(input_dict.keys()): names.append(k) values.append(input_dict[k]) values = torch.stack(values, dim=0) dist.reduce(values, dst=0) if ((dist.get_rank() == 0) and average): values /= world_size reduced_dict = {k: v for (k, v) in zip(names, values)} return reduced_dict
def setup_logger(name, save_dir, dist_rank, filename='log.txt'): logger = logging.getLogger(name) logger.setLevel(logging.ERROR) if (dist_rank > 0): return logger logger.setLevel(logging.DEBUG) ch = logging.StreamHandler(stream=sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter('[%(asctime)s %(name)s %(lineno)s %(levelname)s]: %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) logger.propagate = False if save_dir: fh = logging.FileHandler(os.path.join(save_dir, filename)) fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) logger.addHandler(fh) return logger
class SmoothedValue(object): 'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n ' def __init__(self, window_size=20): self.deque = deque(maxlen=window_size) self.series = [] self.total = 0.0 self.count = 0 def update(self, value): self.deque.append(value) self.series.append(value) self.count += 1 self.total += value @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque)) return d.mean().item() @property def global_avg(self): return (self.total / self.count)
class MetricLogger(object): def __init__(self, delimiter='\t'): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for (k, v) in kwargs.items(): if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if (attr in self.meters): return self.meters[attr] if (attr in self.__dict__): return self.__dict__[attr] raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr)) def __str__(self): loss_str = [] for (name, meter) in self.meters.items(): loss_str.append('{}: {:.4f} ({:.4f})'.format(name, meter.median, meter.global_avg)) return self.delimiter.join(loss_str)
class AverageMeter(object): 'Computes and stores the average and current value' def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += (val * n) self.count += n self.avg = (self.sum / self.count)
def accuracy(output, target, topk=(1,)): 'Computes the precision@k for the specified values of k' with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, (- 1)).expand_as(pred)).contiguous() res = [] for k in topk: correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True) res.append(correct_k.mul_((100.0 / batch_size))) return res
def get_a_var(obj): if isinstance(obj, torch.Tensor): return obj if (isinstance(obj, list) or isinstance(obj, tuple)): for result in map(get_a_var, obj): if isinstance(result, torch.Tensor): return result if isinstance(obj, dict): for result in map(get_a_var, obj.items()): if isinstance(result, torch.Tensor): return result return None
def parallel_apply(fct, model, inputs, device_ids): modules = nn.parallel.replicate(model, device_ids) assert (len(modules) == len(inputs)) lock = threading.Lock() results = {} grad_enabled = torch.is_grad_enabled() def _worker(i, module, input): torch.set_grad_enabled(grad_enabled) device = get_a_var(input).get_device() try: with torch.cuda.device(device): if (not isinstance(input, (list, tuple))): input = (input,) output = fct(module, *input) with lock: results[i] = output except Exception: with lock: results[i] = ExceptionWrapper(where='in replica {} on device {}'.format(i, device)) if (len(modules) > 1): threads = [threading.Thread(target=_worker, args=(i, module, input)) for (i, (module, input)) in enumerate(zip(modules, inputs))] for thread in threads: thread.start() for thread in threads: thread.join() else: _worker(0, modules[0], inputs[0]) outputs = [] for i in range(len(inputs)): output = results[i] if isinstance(output, ExceptionWrapper): output.reraise() outputs.append(output) return outputs
def get_logger(filename=None): logger = logging.getLogger('logger') logger.setLevel(logging.DEBUG) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) if (filename is not None): handler = logging.FileHandler(filename) handler.setLevel(logging.DEBUG) handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s')) logging.getLogger().addHandler(handler) return logger
def get_args(description='Video-Text as Game Players: Hierarchical Banzhaf Interaction for Cross-Modal Representation Learning'): parser = argparse.ArgumentParser(description=description) parser.add_argument('--do_train', type=int, default=0, help='Whether to run training.') parser.add_argument('--do_eval', type=int, default=0, help='Whether to run evaluation.') parser.add_argument('--datatype', default='msrvtt', type=str, help='Point the dataset to finetune.') parser.add_argument('--anno_path', type=str, default='data/MSR-VTT/anns', help='annotation path') parser.add_argument('--video_path', type=str, default='data/MSR-VTT/videos', help='video path') parser.add_argument('--seed', type=int, default=42, help='random seed') parser.add_argument('--workers', default=4, type=int, help='number of data loading workers (default: 4)') parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate') parser.add_argument('--coef_lr', type=float, default=0.001, help='coefficient for bert branch.') parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10% of training.') parser.add_argument('--weight_decay', type=float, default=0.2, help='weight decay') parser.add_argument('--epochs', type=int, default=5, help='upper epoch limit') parser.add_argument('--batch_size', type=int, default=128, help='batch size') parser.add_argument('--batch_size_val', type=int, default=128, help='batch size eval') parser.add_argument('--max_words', type=int, default=32, help='max text token number') parser.add_argument('--max_frames', type=int, default=12, help='max key frames') parser.add_argument('--video_framerate', type=int, default=1, help='framerate to sample video frame') parser.add_argument('--device', default='cpu', type=str, help='cpu/cuda') parser.add_argument('--world_size', default=1, type=int, help='distribted training') parser.add_argument('--local_rank', default=0, type=int, help='distribted training') parser.add_argument('--distributed', default=0, type=int, help='multi machine DDP') parser.add_argument('--n_display', type=int, default=50, help='Information display frequence') parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.') parser.add_argument('--base_encoder', default='ViT-B/32', type=str, help='Choose a CLIP version') parser.add_argument('--agg_module', type=str, default='seqTransf', choices=['None', 'seqLSTM', 'seqTransf'], help='choice a feature aggregation module for video.') parser.add_argument('--interaction', type=str, default='wti', help='interaction type for retrieval.') parser.add_argument('--num_hidden_layers', type=int, default=4) parser.add_argument('--init_model', default=None, type=str, required=False, help='Initial model.') args = parser.parse_args() return args
def set_seed_logger(args): global logger random.seed(args.seed) os.environ['PYTHONHASHSEED'] = str(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True if torch.cuda.is_available(): torch.distributed.init_process_group(backend='nccl') torch.cuda.set_device(args.local_rank) args.device = torch.device('cuda', args.local_rank) args.world_size = (int(os.environ['WORLD_SIZE']) if ('WORLD_SIZE' in os.environ) else 1) if torch.cuda.is_available(): torch.distributed.barrier() logger.info('local_rank: {} world_size: {}'.format(args.local_rank, args.world_size)) if (((args.batch_size % args.world_size) != 0) or ((args.batch_size_val % args.world_size) != 0)): raise ValueError('Invalid batch_size/batch_size_val and world_size parameter: {}%{} and {}%{}, should be == 0'.format(args.batch_size, args.world_size, args.batch_size_val, args.world_size)) logger.info('Effective parameters:') for key in sorted(args.__dict__): logger.info(' <<< {}: {}'.format(key, args.__dict__[key])) return args
def build_model(args): model = HBI(args) if args.init_model: if (not exists(args.init_model)): raise FileNotFoundError model_state_dict = torch.load(args.init_model, map_location='cpu') model.load_state_dict(model_state_dict, strict=False) model.to(args.device) return model
def build_dataloader(args): tokenizer = ClipTokenizer() assert (args.datatype in DATALOADER_DICT) assert ((DATALOADER_DICT[args.datatype]['test'] is not None) or (DATALOADER_DICT[args.datatype]['val'] is not None)) (test_dataloader, test_length) = (None, 0) if (DATALOADER_DICT[args.datatype]['test'] is not None): (test_dataloader, test_length) = DATALOADER_DICT[args.datatype]['test'](args, tokenizer) if (DATALOADER_DICT[args.datatype]['val'] is not None): (val_dataloader, val_length) = DATALOADER_DICT[args.datatype]['val'](args, tokenizer, subset='val') else: (val_dataloader, val_length) = (test_dataloader, test_length) if (test_dataloader is None): (test_dataloader, test_length) = (val_dataloader, val_length) if isinstance(test_length, int): logger.info('***** Running test *****') logger.info(' Num examples = %d', test_length) logger.info(' Batch size = %d', args.batch_size_val) logger.info(' Num steps = %d', len(test_dataloader)) logger.info('***** Running val *****') logger.info(' Num examples = %d', val_length) elif (len(test_length) == 2): logger.info('***** Running test *****') logger.info(' Num examples = %dt %dv', test_length[0], test_length[1]) logger.info(' Batch size = %d', args.batch_size_val) logger.info(' Num steps = %d %d', len(test_dataloader[0]), len(test_dataloader[1])) logger.info('***** Running val *****') logger.info(' Num examples = %dt %dv', val_length[0], val_length[1]) if args.do_train: (train_dataloader, train_length, train_sampler) = DATALOADER_DICT[args.datatype]['train'](args, tokenizer) logger.info('***** Running training *****') logger.info(' Num examples = %d', train_length) logger.info(' Batch size = %d', args.batch_size) logger.info(' Num steps = %d', (len(train_dataloader) * args.epochs)) else: (train_dataloader, train_sampler) = (None, None) return (test_dataloader, val_dataloader, train_dataloader, train_sampler)
def prep_optimizer(args, model, num_train_optimization_steps, local_rank): if hasattr(model, 'module'): model = model.module lr = args.lr coef_lr = args.coef_lr weight_decay = args.weight_decay warmup_proportion = args.warmup_proportion param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] decay_param_tp = [(n, p) for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))] no_decay_param_tp = [(n, p) for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))] decay_clip_param_tp = [(n, p) for (n, p) in decay_param_tp if ('clip.' in n)] decay_noclip_param_tp = [(n, p) for (n, p) in decay_param_tp if ('clip.' not in n)] no_decay_clip_param_tp = [(n, p) for (n, p) in no_decay_param_tp if ('clip.' in n)] no_decay_noclip_param_tp = [(n, p) for (n, p) in no_decay_param_tp if ('clip.' not in n)] optimizer_grouped_parameters = [{'params': [p for (n, p) in decay_clip_param_tp], 'weight_decay': weight_decay, 'lr': (lr * coef_lr)}, {'params': [p for (n, p) in decay_noclip_param_tp], 'weight_decay': weight_decay}, {'params': [p for (n, p) in no_decay_clip_param_tp], 'weight_decay': 0.0, 'lr': (lr * coef_lr)}, {'params': [p for (n, p) in no_decay_noclip_param_tp], 'weight_decay': 0.0}] scheduler = None optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=warmup_proportion, schedule='warmup_cosine', b1=0.9, b2=0.98, e=1e-06, t_total=num_train_optimization_steps, weight_decay=weight_decay, max_grad_norm=1.0) if torch.cuda.is_available(): model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True) return (optimizer, scheduler, model)
def save_model(epoch, args, model, type_name=''): model_to_save = (model.module.banzhafteacher if hasattr(model, 'module') else model.banzhafteacher) output_model_file = join(args.output_dir, 'pytorch_model.bin.{}{}'.format(('' if (type_name == '') else (type_name + '.')), epoch)) torch.save(model_to_save.state_dict(), output_model_file) logger.info('Model saved to %s', output_model_file) return output_model_file
def reduce_loss(loss, args): world_size = args.world_size if (world_size < 2): return loss with torch.no_grad(): torch.distributed.reduce(loss, dst=0) if (torch.distributed.get_rank() == 0): loss /= world_size return loss
def train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, max_steps, val_dataloader): global logger global best_score global meters torch.cuda.empty_cache() model.train() log_step = args.n_display total_loss = 0 end = time.time() logit_scale = 0 for (step, batch) in enumerate(train_dataloader, start=1): global_step += 1 data_time = (time.time() - end) if (n_gpu == 1): batch = tuple((t.to(device=device, non_blocking=True) for t in batch)) (text_ids, text_mask, video, video_mask, inds, idx) = batch loss = model(text_ids, text_mask, video, video_mask, idx, global_step) if (n_gpu > 1): loss = loss.mean() with torch.autograd.detect_anomaly(): loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() if (scheduler is not None): scheduler.step() optimizer.zero_grad() batch_time = (time.time() - end) end = time.time() reduced_l = reduce_loss(loss, args) meters.update(time=batch_time, data=data_time, loss=float(reduced_l)) eta_seconds = (meters.time.global_avg * (max_steps - global_step)) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if ((((global_step % log_step) == 0) or (global_step == 1)) and is_main_process()): logger.info(meters.delimiter.join(['eta: {eta}', 'epoch: {epoch}/{max_epoch}', 'iteration: {iteration}/{max_iteration}', '{meters}', 'lr: {lr}', 'logit_scale: {logit_scale:.2f}max mem: {memory:.0f}']).format(eta=eta_string, epoch=epoch, max_epoch=args.epochs, iteration=global_step, max_iteration=max_steps, meters=str(meters), lr='/'.join([str(('%.9f' % itm)) for itm in sorted(list(set(optimizer.get_lr())))]), logit_scale=logit_scale, memory=((torch.cuda.max_memory_allocated() / 1024.0) / 1024.0))) total_loss = (total_loss / len(train_dataloader)) return (total_loss, global_step)
def main(): global logger global best_score global meters meters = MetricLogger(delimiter=' ') args = get_args() if (not exists(args.output_dir)): os.makedirs(args.output_dir, exist_ok=True) logger = setup_logger('tvr', args.output_dir, args.local_rank) args = set_seed_logger(args) model = build_model(args) (test_dataloader, val_dataloader, train_dataloader, train_sampler) = build_dataloader(args) if args.do_train: tic = time.time() max_steps = (len(train_dataloader) * args.epochs) _max_steps = (len(train_dataloader) * 5) (optimizer, scheduler, model) = prep_optimizer(args, model, _max_steps, args.local_rank) best_score = 1e-05 best_output_model_file = 'None' global_step = 0 for epoch in range(args.epochs): if (train_sampler is not None): train_sampler.set_epoch(epoch) synchronize() torch.cuda.empty_cache() (tr_loss, global_step) = train_epoch(epoch, args, model, train_dataloader, args.device, args.world_size, optimizer, scheduler, global_step, max_steps, val_dataloader) if (args.local_rank == 0): output_model_file = save_model(epoch, args, model, type_name='') synchronize() toc = (time.time() - tic) training_time = time.strftime('%Hh %Mmin %Ss', time.gmtime(toc)) logger.info(((((('*' * 20) + '\n') + f'training finished with {training_time}') + ('*' * 20)) + '\n'))
def get_args(description='Video-Text as Game Players: Hierarchical Banzhaf Interaction for Cross-Modal Representation Learning'): parser = argparse.ArgumentParser(description=description) parser.add_argument('--do_pretrain', action='store_true', help='Whether to run training.') parser.add_argument('--do_train', action='store_true', help='Whether to run training.') parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.') parser.add_argument('--train_csv', type=str, default='data/.train.csv', help='') parser.add_argument('--val_csv', type=str, default='data/.val.csv', help='') parser.add_argument('--data_path', type=str, default='train_ans2label.json', help='data pickle file path') parser.add_argument('--features_path', type=str, default='MSRVTT_Videos', help='feature path') parser.add_argument('--num_thread_reader', type=int, default=1, help='') parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate') parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit') parser.add_argument('--batch_size', type=int, default=256, help='batch size') parser.add_argument('--batch_size_val', type=int, default=3500, help='batch size eval') parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay') parser.add_argument('--n_display', type=int, default=100, help='Information display frequence') parser.add_argument('--video_dim', type=int, default=1024, help='video feature dimension') parser.add_argument('--seed', type=int, default=42, help='random seed') parser.add_argument('--max_words', type=int, default=20, help='') parser.add_argument('--max_frames', type=int, default=100, help='') parser.add_argument('--feature_framerate', type=int, default=1, help='') parser.add_argument('--margin', type=float, default=0.1, help='margin for loss') parser.add_argument('--hard_negative_rate', type=float, default=0.5, help='rate of intra negative sample') parser.add_argument('--negative_weighting', type=int, default=1, help='Weight the loss for intra negative') parser.add_argument('--n_pair', type=int, default=1, help='Num of pair to output from data loader') parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.') parser.add_argument('--cross_model', default='cross-base', type=str, required=False, help='Cross module') parser.add_argument('--init_model', default=None, type=str, required=False, help='Initial model.') parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.') parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.') parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.') parser.add_argument('--n_gpu', type=int, default=1, help='Changed in the execute process.') parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3') parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit') parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--task_type', default='retrieval', type=str, help='Point the task `retrieval` to finetune.') parser.add_argument('--datatype', default='msrvtt', type=str, help='Point the dataset to finetune.') parser.add_argument('--world_size', default=0, type=int, help='distribted training') parser.add_argument('--local_rank', default=0, type=int, help='distribted training') parser.add_argument('--rank', default=0, type=int, help='distribted training') parser.add_argument('--coef_lr', type=float, default=0.001, help='coefficient for bert branch.') parser.add_argument('--use_mil', action='store_true', help='Whether use MIL as Miech et. al. (2020).') parser.add_argument('--sampled_use_mil', action='store_true', help='Whether MIL, has a high priority than use_mil.') parser.add_argument('--text_num_hidden_layers', type=int, default=12, help='Layer NO. of text.') parser.add_argument('--visual_num_hidden_layers', type=int, default=12, help='Layer NO. of visual.') parser.add_argument('--cross_num_hidden_layers', type=int, default=4, help='Layer NO. of cross.') parser.add_argument('--loose_type', action='store_true', help='Default using tight type for retrieval.') parser.add_argument('--expand_msrvtt_sentences', action='store_true', help='') parser.add_argument('--train_frame_order', type=int, default=0, choices=[0, 1, 2], help='Frame order, 0: ordinary order; 1: reverse order; 2: random order.') parser.add_argument('--eval_frame_order', type=int, default=0, choices=[0, 1, 2], help='Frame order, 0: ordinary order; 1: reverse order; 2: random order.') parser.add_argument('--freeze_layer_num', type=int, default=0, help='Layer NO. of CLIP need to freeze.') parser.add_argument('--slice_framepos', type=int, default=0, choices=[0, 1, 2], help='0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly.') parser.add_argument('--linear_patch', type=str, default='2d', choices=['2d', '3d'], help='linear projection of flattened patches.') parser.add_argument('--sim_header', type=str, default='meanP', choices=['meanP', 'seqLSTM', 'seqTransf', 'tightTransf', 'BTransf', 'denseTransf'], help='choice a similarity header.') parser.add_argument('--loss', type=str, default='CrossEn', choices=['CrossEn', 'DualLoss', 'CrossCLR', 'MarginLoss', 'DCWLoss']) parser.add_argument('--estimator', default='None', type=str, required=False, help=' Banzhaf Interaction Estimator.') parser.add_argument('--kl', type=float, default=0.2) parser.add_argument('--skl', type=float, default=1) parser.add_argument('--num_labels', type=int, default=1000) parser.add_argument('--rate', type=float, default=[0.4, 0.4, 0.2]) args = parser.parse_args() if (args.sim_header == 'tightTransf'): args.loose_type = False if (args.gradient_accumulation_steps < 1): raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps)) if ((not args.do_train) and (not args.do_eval)): raise ValueError('At least one of `do_train` or `do_eval` must be True.') args.batch_size = int((args.batch_size / args.gradient_accumulation_steps)) return args
def set_seed_logger(args): global logger random.seed(args.seed) os.environ['PYTHONHASHSEED'] = str(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True world_size = torch.distributed.get_world_size() torch.cuda.set_device(args.local_rank) args.world_size = world_size rank = torch.distributed.get_rank() args.rank = rank if (not os.path.exists(args.output_dir)): os.makedirs(args.output_dir, exist_ok=True) logger = get_logger(os.path.join(args.output_dir, 'log.txt')) if (args.local_rank == 0): logger.info('Effective parameters:') for key in sorted(args.__dict__): logger.info(' <<< {}: {}'.format(key, args.__dict__[key])) return args
def init_device(args, local_rank): global logger device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'), local_rank) n_gpu = torch.cuda.device_count() logger.info('device: {} n_gpu: {}'.format(device, n_gpu)) args.n_gpu = n_gpu if (((args.batch_size % args.n_gpu) != 0) or ((args.batch_size_val % args.n_gpu) != 0)): raise ValueError('Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0'.format(args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu)) return (device, n_gpu)
def init_model(args, device, n_gpu, local_rank): if args.init_model: model_state_dict = torch.load(args.init_model, map_location='cpu') else: model_state_dict = None cache_dir = (args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')) model = HBI.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args) model.to(device) return model
def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.0): if hasattr(model, 'module'): model = model.module param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] decay_param_tp = [(n, p) for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))] no_decay_param_tp = [(n, p) for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))] decay_clip_param_tp = [(n, p) for (n, p) in decay_param_tp if ('clip.' in n)] decay_noclip_param_tp = [(n, p) for (n, p) in decay_param_tp if ('clip.' not in n)] no_decay_clip_param_tp = [(n, p) for (n, p) in no_decay_param_tp if ('clip.' in n)] no_decay_noclip_param_tp = [(n, p) for (n, p) in no_decay_param_tp if ('clip.' not in n)] weight_decay = 0.2 optimizer_grouped_parameters = [{'params': [p for (n, p) in decay_clip_param_tp], 'weight_decay': weight_decay, 'lr': (args.lr * coef_lr)}, {'params': [p for (n, p) in decay_noclip_param_tp], 'weight_decay': weight_decay}, {'params': [p for (n, p) in no_decay_clip_param_tp], 'weight_decay': 0.0, 'lr': (args.lr * coef_lr)}, {'params': [p for (n, p) in no_decay_noclip_param_tp], 'weight_decay': 0.0}] scheduler = None optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion, schedule='warmup_cosine', b1=0.9, b2=0.98, e=1e-06, t_total=num_train_optimization_steps, weight_decay=weight_decay, max_grad_norm=1.0) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True) return (optimizer, scheduler, model)
def dataloader_msrvtt_train(args, tokenizer): msrvtt_dataset = MSRVTT_TrainDataLoader(jsonl_path=args.train_csv, ans2label_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames, unfold_sentences=args.expand_msrvtt_sentences, frame_order=args.train_frame_order, slice_framepos=args.slice_framepos, use_num=args.num_labels) train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset) dataloader = DataLoader(msrvtt_dataset, batch_size=(args.batch_size // args.n_gpu), num_workers=args.num_thread_reader, pin_memory=True, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(msrvtt_dataset), train_sampler)
def dataloader_msrvtt_test(args, tokenizer): msrvtt_testset = MSRVTT_DataLoader(jsonl_path=args.val_csv, train_jsonl=args.train_csv, ans2label_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames, unfold_sentences=args.expand_msrvtt_sentences, frame_order=args.train_frame_order, slice_framepos=args.slice_framepos, use_num=args.num_labels) dataloader_msrvtt = DataLoader(msrvtt_testset, batch_size=args.batch_size_val, num_workers=args.num_thread_reader, shuffle=False, drop_last=False) return (dataloader_msrvtt, len(msrvtt_testset))
def save_model(epoch, args, model, type_name=''): model_to_save = (model.module if hasattr(model, 'module') else model) output_model_file = os.path.join(args.output_dir, 'pytorch_model.bin.{}{}'.format(('' if (type_name == '') else (type_name + '.')), epoch)) torch.save(model_to_save.state_dict(), output_model_file) logger.info('Model saved to %s', output_model_file) return output_model_file
def load_model(epoch, args, n_gpu, device, model_file=None): if ((model_file is None) or (len(model_file) == 0)): model_file = os.path.join(args.output_dir, 'pytorch_model.bin.{}'.format(epoch)) if os.path.exists(model_file): model_state_dict = torch.load(model_file, map_location='cpu') if (args.local_rank == 0): logger.info('Model loaded from %s', model_file) cache_dir = (args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')) model = EMCL4QA.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args) model.to(device) else: model = None return model
def train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=0, tokenizer=ClipTokenizer()): global logger torch.cuda.empty_cache() model.train() log_step = args.n_display start_time = time.time() total_loss = 0 for (step, batch) in enumerate(train_dataloader): if (n_gpu == 1): batch = tuple((t.to(device=device, non_blocking=True) for t in batch)) (input_ids, input_mask, segment_ids, video, video_mask, labels) = batch ce_loss = model(input_ids, segment_ids, input_mask, video, video_mask, labels) if (n_gpu > 1): ce_loss = ce_loss.mean() if (args.gradient_accumulation_steps > 1): ce_loss = (ce_loss / args.gradient_accumulation_steps) loss = ce_loss loss.backward() total_loss += float(loss) if (((step + 1) % args.gradient_accumulation_steps) == 0): torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) if (scheduler is not None): scheduler.step() optimizer.step() optimizer.zero_grad() if hasattr(model, 'module'): torch.clamp_(model.module.clip.logit_scale.data, max=np.log(100)) else: torch.clamp_(model.clip.logit_scale.data, max=np.log(100)) global_step += 1 if (((global_step % log_step) == 0) and (local_rank == 0)): logger.info('Epoch: %d/%s, Step: %d/%d, Lr: %s, CeLoss: %f, Time/step: %f', (epoch + 1), args.epochs, (step + 1), len(train_dataloader), '-'.join([str(('%.9f' % itm)) for itm in sorted(list(set(optimizer.get_lr())))]), float(ce_loss), ((time.time() - start_time) / (log_step * args.gradient_accumulation_steps))) start_time = time.time() total_loss = (total_loss / len(train_dataloader)) return (total_loss, global_step)
def eval_epoch(args, model, test_dataloader, device, n_gpu): top1 = AverageMeter() top5 = AverageMeter() if hasattr(model, 'module'): model = model.module.to(device) else: model = model.to(device) model.eval() with torch.no_grad(): for (bid, batch) in enumerate(test_dataloader): batch = tuple((t.to(device) for t in batch)) (input_ids, input_mask, segment_ids, video, video_mask, labels) = batch output = model(input_ids, segment_ids, input_mask, video, video_mask, labels) (prec1, prec5) = accuracy(output, labels, topk=(1, 5)) top1.update(prec1[0], input_ids.size(0)) top5.update(prec5[0], input_ids.size(0)) print('{}/{}\r'.format(bid, len(test_dataloader)), end='') logger.info('Video QA:') logger.info('\t>>> Prec@1: {top1.avg:.3f} - Prec@5: {top5.avg:.3f}'.format(top1=top1, top5=top5)) R1 = top1.avg return R1
def main(): global logger args = get_args() args = set_seed_logger(args) (device, n_gpu) = init_device(args, args.local_rank) tokenizer = ClipTokenizer() assert (args.task_type == 'retrieval') args.num_labels = 1500 model = init_model(args, device, n_gpu, args.local_rank) assert ((args.freeze_layer_num <= 12) and (args.freeze_layer_num >= (- 1))) if (hasattr(model, 'clip') and (args.freeze_layer_num > (- 1))): for (name, param) in model.clip.named_parameters(): if ((name.find('ln_final.') == 0) or (name.find('text_projection') == 0) or (name.find('logit_scale') == 0) or (name.find('visual.ln_post.') == 0) or (name.find('visual.proj') == 0)): continue elif ((name.find('visual.transformer.resblocks.') == 0) or (name.find('transformer.resblocks.') == 0)): layer_num = int(name.split('.resblocks.')[1].split('.')[0]) if (layer_num >= args.freeze_layer_num): continue if ((args.linear_patch == '3d') and name.find('conv2.')): continue else: param.requires_grad = False assert (args.datatype in DATALOADER_DICT) (test_dataloader, test_length) = DATALOADER_DICT[args.datatype]['test'](args, tokenizer) if (DATALOADER_DICT[args.datatype]['val'] is not None): (val_dataloader, val_length) = DATALOADER_DICT[args.datatype]['val'](args, tokenizer, subset='val') else: (val_dataloader, val_length) = (test_dataloader, test_length) if (args.local_rank == 0): logger.info('***** Running test *****') logger.info(' Num examples = %d', test_length) logger.info(' Batch size = %d', args.batch_size_val) logger.info(' Num steps = %d', len(test_dataloader)) logger.info('***** Running val *****') logger.info(' Num examples = %d', val_length) if args.do_train: (train_dataloader, train_length, train_sampler) = DATALOADER_DICT[args.datatype]['train'](args, tokenizer) num_train_optimization_steps = ((int(((len(train_dataloader) + args.gradient_accumulation_steps) - 1)) / args.gradient_accumulation_steps) * args.epochs) coef_lr = args.coef_lr (optimizer, scheduler, model) = prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, args.local_rank, coef_lr=coef_lr) if (args.local_rank == 0): logger.info('***** Running training *****') logger.info(' Num examples = %d', train_length) logger.info(' Batch size = %d', args.batch_size) logger.info(' Num steps = %d', (num_train_optimization_steps * args.gradient_accumulation_steps)) best_score = 1e-05 best_output_model_file = 'None' global_step = 0 for epoch in range(args.epochs): train_sampler.set_epoch(epoch) (tr_loss, global_step) = train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=args.local_rank, tokenizer=tokenizer) if (args.local_rank == 0): logger.info('Epoch %d/%s Finished, Train Loss: %f', (epoch + 1), args.epochs, tr_loss) output_model_file = save_model(epoch, args, model, type_name='') logger.info('Eval on val dataset') R1 = eval_epoch(args, model, val_dataloader, device, n_gpu) if (best_score <= R1): best_score = R1 best_output_model_file = output_model_file logger.info('The best model is: {}, the Top1 Acc is: {:.4f}'.format(best_output_model_file, best_score)) if (args.local_rank == 0): model = load_model((- 1), args, n_gpu, device, model_file=best_output_model_file) eval_epoch(args, model, test_dataloader, device, n_gpu) elif args.do_eval: if (args.local_rank == 0): eval_epoch(args, model, test_dataloader, device, n_gpu)
def compress(paras): (input_video_path, output_video_path) = paras try: command = ['ffmpeg', '-y', '-i', input_video_path, '-filter:v', "scale='if(gt(a,1),trunc(oh*a/2)*2,224)':'if(gt(a,1),224,trunc(ow*a/2)*2)'", '-map', '0:v', '-r', '3', output_video_path] ffmpeg = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = ffmpeg.communicate() retcode = ffmpeg.poll() except Exception as e: raise e
def prepare_input_output_pairs(input_root, output_root): input_video_path_list = [] output_video_path_list = [] for (root, dirs, files) in os.walk(input_root): for file_name in files: input_video_path = os.path.join(root, file_name) output_video_path = os.path.join(output_root, file_name) if (os.path.exists(output_video_path) and (os.path.getsize(output_video_path) > 0)): pass else: input_video_path_list.append(input_video_path) output_video_path_list.append(output_video_path) return (input_video_path_list, output_video_path_list)
def perturb(V, word): print(model[word].predict(asarray(V).reshape(1, (- 1)))[0][1]) phonemes = ((len(V) - 1) // 4) pbs = [] for n in range(phonemes): Z = list(V) Z[((n * 4) + 1)] *= 1.5 Z[((n * 4) + 2)] *= 1.5 Z[((n * 4) + 3)] *= 1.5 p_i = model[word].predict(asarray(Z).reshape(1, (- 1)))[0][1] print(p_i) pbs.append(p_i) return [int(i) for i in rankdata(pbs)]
class SepConvGRU(nn.Module): def __init__(self): super(SepConvGRU, self).__init__() hidden_dim = 128 catt = 256 self.convz1 = nn.Conv2d(catt, hidden_dim, (1, 3), padding=(0, 1)) self.convr1 = nn.Conv2d(catt, hidden_dim, (1, 3), padding=(0, 1)) self.convq1 = nn.Conv2d(catt, hidden_dim, (1, 3), padding=(0, 1)) self.convz2 = nn.Conv2d(catt, hidden_dim, (3, 1), padding=(1, 0)) self.convr2 = nn.Conv2d(catt, hidden_dim, (3, 1), padding=(1, 0)) self.convq2 = nn.Conv2d(catt, hidden_dim, (3, 1), padding=(1, 0)) def forward(self, h, x): hx = torch.cat([h, x], dim=1) z = torch.sigmoid(self.convz1(hx)) r = torch.sigmoid(self.convr1(hx)) q = torch.tanh(self.convq1(torch.cat([(r * h), x], dim=1))) h = (((1 - z) * h) + (z * q)) hx = torch.cat([h, x], dim=1) z = torch.sigmoid(self.convz2(hx)) r = torch.sigmoid(self.convr2(hx)) q = torch.tanh(self.convq2(torch.cat([(r * h), x], dim=1))) h = (((1 - z) * h) + (z * q)) return h
class R_MSFM3(nn.Module): def __init__(self, x): super(R_MSFM3, self).__init__() self.convX11 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=64, out_channels=96, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.LeakyReLU(inplace=True), nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=96, out_channels=128, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.Tanh()) if x: self.convX21 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.Tanh()) self.convX31 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=512, out_channels=128, kernel_size=3, stride=1, padding=0, bias=True), torch.nn.Tanh()) else: self.convX21 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.Tanh()) self.convX31 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=0, bias=True), torch.nn.Tanh()) self.sigmoid = nn.Sigmoid() self.update_block = BasicUpdateBlock() self.gruc = SepConvGRU() def upsample_depth(self, flow, mask): ' Upsample depth field [H/8, W/8, 2] -> [H, W, 2] using convex combination ' (N, _, H, W) = flow.shape mask = mask.view(N, 1, 9, 8, 8, H, W) mask = torch.softmax(mask, dim=2) up_flow = F.unfold(flow, [3, 3], padding=1) up_flow = up_flow.view(N, 1, 9, 1, 1, H, W) up_flow = torch.sum((mask * up_flow), dim=2) up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) return up_flow.reshape(N, 1, (8 * H), (8 * W)) def forward(self, features, iters=3): ' Estimate depth for a single image ' (x1, x2, x3) = features disp_predictions = {} (b, c, h, w) = x3.shape dispFea = torch.zeros([b, 1, h, w], requires_grad=True).to(x1.device) net = torch.zeros([b, 256, h, w], requires_grad=True).to(x1.device) for itr in range(iters): if (itr in [0]): corr = self.convX31(x3) elif (itr in [1]): corrh = corr corr = self.convX21(x2) corr = self.gruc(corrh, corr) elif (itr in [2]): corrh = corr corr = self.convX11(x1) corr = self.gruc(corrh, corr) (net, up_mask, delta_disp) = self.update_block(net, corr, dispFea) dispFea = (dispFea + delta_disp) disp = self.sigmoid(dispFea) if self.training: disp_up = self.upsample_depth(disp, up_mask) disp_predictions[('disp_up', itr)] = disp_up elif ((iters - 1) == itr): disp_up = self.upsample_depth(disp, up_mask) disp_predictions[('disp_up', itr)] = disp_up return disp_predictions
class R_MSFM6(nn.Module): def __init__(self, x): super(R_MSFM6, self).__init__() self.convX11 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=64, out_channels=96, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.LeakyReLU(inplace=True), nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=96, out_channels=128, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.Tanh()) self.convX12 = torch.nn.Sequential(nn.Conv2d(128, 128, (1, 3), padding=(0, 1)), torch.nn.Tanh(), nn.Conv2d(128, 128, (3, 1), padding=(1, 0)), torch.nn.Tanh()) if x: self.convX21 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.Tanh()) self.convX31 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=512, out_channels=128, kernel_size=3, stride=1, padding=0, bias=True), torch.nn.Tanh()) else: self.convX21 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.Tanh()) self.convX31 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=0, dilation=1, bias=True), torch.nn.Tanh()) self.convX22 = torch.nn.Sequential(nn.Conv2d(128, 128, (1, 3), padding=(0, 1)), torch.nn.Tanh(), nn.Conv2d(128, 128, (3, 1), padding=(1, 0)), torch.nn.Tanh()) self.convX32 = torch.nn.Sequential(nn.Conv2d(128, 128, (1, 3), padding=(0, 1)), torch.nn.Tanh(), nn.Conv2d(128, 128, (3, 1), padding=(1, 0)), torch.nn.Tanh()) self.sigmoid = nn.Sigmoid() self.gruc = SepConvGRU() self.update_block = BasicUpdateBlock() def upsample_depth(self, flow, mask): ' Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination ' (N, _, H, W) = flow.shape mask = mask.view(N, 1, 9, 8, 8, H, W) mask = torch.softmax(mask, dim=2) up_flow = F.unfold(flow, [3, 3], padding=1) up_flow = up_flow.view(N, 1, 9, 1, 1, H, W) up_flow = torch.sum((mask * up_flow), dim=2) up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) return up_flow.reshape(N, 1, (8 * H), (8 * W)) def forward(self, features, iters=6): ' Estimate depth for a single image ' (x1, x2, x3) = features disp_predictions = {} (b, c, h, w) = x3.shape dispFea = torch.zeros([b, 1, h, w], requires_grad=True).to(x1.device) net = torch.zeros([b, 256, h, w], requires_grad=True).to(x1.device) for itr in range(iters): if (itr in [0]): corr = self.convX31(x3) elif (itr in [1]): corrh = corr corr = self.convX32(corr) corr = self.gruc(corrh, corr) elif (itr in [2]): corrh = corr corr = self.convX21(x2) corr = self.gruc(corrh, corr) elif (itr in [3]): corrh = corr corr = self.convX22(corr) corr = self.gruc(corrh, corr) elif (itr in [4]): corrh = corr corr = self.convX11(x1) corr = self.gruc(corrh, corr) elif (itr in [5]): corrh = corr corr = self.convX12(corr) corr = self.gruc(corrh, corr) (net, up_mask, delta_disp) = self.update_block(net, corr, dispFea) dispFea = (dispFea + delta_disp) disp = self.sigmoid(dispFea) if self.training: disp_up = self.upsample_depth(disp, up_mask) disp_predictions[('disp_up', itr)] = disp_up elif ((iters - 1) == itr): disp_up = self.upsample_depth(disp, up_mask) disp_predictions[('disp_up', itr)] = disp_up return disp_predictions
class ConvBlock(nn.Module): 'Layer to perform a convolution followed by LeakyReLU\n ' def __init__(self, in_channels, out_channels): super(ConvBlock, self).__init__() self.conv = Conv3x3(in_channels, out_channels) self.nonlin = nn.LeakyReLU(inplace=True) def forward(self, x): out = self.conv(x) out = self.nonlin(out) return out
class Conv3x3(nn.Module): 'Layer to pad and convolve input\n ' def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, x): out = self.pad(x) out = self.conv(out) return out
class dispHead(nn.Module): def __init__(self): super(dispHead, self).__init__() outD = 1 self.covd1 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=192, out_channels=256, kernel_size=3, stride=1, padding=0, bias=True), torch.nn.LeakyReLU(inplace=True)) self.covd2 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=256, out_channels=outD, kernel_size=3, stride=1, padding=0, bias=True)) def forward(self, x): return self.covd2(self.covd1(x))
class BasicMotionEncoder(nn.Module): def __init__(self): super(BasicMotionEncoder, self).__init__() self.convc1 = ConvBlock(128, 160) self.convc2 = ConvBlock(160, 128) self.convf1 = torch.nn.Sequential(nn.ReflectionPad2d(3), torch.nn.Conv2d(in_channels=1, out_channels=64, kernel_size=7, padding=0, bias=True), torch.nn.LeakyReLU(inplace=True)) self.convf2 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=0, bias=True), torch.nn.LeakyReLU(inplace=True)) self.conv = ConvBlock((128 + 32), (192 - 1)) def forward(self, depth, corr): cor = self.convc1(corr) cor = self.convc2(cor) dep = self.convf1(depth) dep = self.convf2(dep) cor_depth = torch.cat([cor, dep], dim=1) out = self.conv(cor_depth) return torch.cat([out, depth], dim=1)
class BasicUpdateBlock(nn.Module): def __init__(self): super(BasicUpdateBlock, self).__init__() self.encoder = BasicMotionEncoder() self.flow_head = dispHead() self.mask = nn.Sequential(nn.ReflectionPad2d(1), nn.Conv2d(192, 324, 3), nn.LeakyReLU(inplace=True), nn.Conv2d(324, (64 * 9), 1, padding=0)) def forward(self, net, corr, depth): net = self.encoder(depth, corr) delta_depth = self.flow_head(net) mask = (0.25 * self.mask(net)) return (net, mask, delta_depth)
class ResNetMultiImageInput(models.ResNet): 'Constructs a resnet model with varying number of input images.\n Adapted from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n ' def __init__(self, block, layers, num_classes=1000, num_input_images=1): super(ResNetMultiImageInput, self).__init__(block, layers) self.inplanes = 64 self.conv1 = nn.Conv2d((num_input_images * 3), 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)
def resnet_multiimage_input(num_layers, pretrained=False, num_input_images=1): 'Constructs a ResNet model.\n Args:\n num_layers (int): Number of resnet layers. Must be 18 or 50\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n num_input_images (int): Number of frames stacked as input\n ' assert (num_layers in [18, 50]), 'Can only run with 18 or 50 layer resnet' blocks = {18: [2, 2, 2, 2], 50: [3, 4, 6, 3]}[num_layers] block_type = {18: models.resnet.BasicBlock, 50: models.resnet.Bottleneck}[num_layers] model = ResNetMultiImageInput(block_type, blocks, num_input_images=num_input_images) if pretrained: loaded = model_zoo.load_url(models.resnet.model_urls['resnet{}'.format(num_layers)]) loaded['conv1.weight'] = (torch.cat(([loaded['conv1.weight']] * num_input_images), 1) / num_input_images) model.load_state_dict(loaded) return model
class ResnetEncoder(nn.Module): 'Pytorch module for a resnet encoder\n ' def __init__(self, num_layers, pretrained, num_input_images=1): super(ResnetEncoder, self).__init__() self.num_ch_enc = np.array([64, 64, 128, 256, 512]) resnets = {18: models.resnet18, 34: models.resnet34, 50: models.resnet50, 101: models.resnet101, 152: models.resnet152} if (num_layers not in resnets): raise ValueError('{} is not a valid number of resnet layers'.format(num_layers)) if (num_input_images > 1): self.encoder = resnet_multiimage_input(num_layers, pretrained, num_input_images) else: self.encoder = resnets[num_layers](pretrained) if (num_layers > 34): self.num_ch_enc[1:] *= 4 def forward(self, input_image): self.features = [] x = ((input_image - 0.45) / 0.225) x = self.encoder.conv1(x) x = self.encoder.bn1(x) self.features.append(self.encoder.relu(x)) self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[(- 1)]))) self.features.append(self.encoder.layer2(self.features[(- 1)])) return self.features
class ResnetEncoder2(nn.Module): 'Pytorch module for a resnet encoder\n ' def __init__(self, num_layers, pretrained, num_input_images=1): super(ResnetEncoder2, self).__init__() self.num_ch_enc = np.array([64, 64, 128, 256, 512]) resnets = {18: models.resnet18, 34: models.resnet34, 50: models.resnet50, 101: models.resnet101, 152: models.resnet152} if (num_layers not in resnets): raise ValueError('{} is not a valid number of resnet layers'.format(num_layers)) if (num_input_images > 1): self.encoder = resnet_multiimage_input(num_layers, pretrained, num_input_images) else: self.encoder = resnets[num_layers](pretrained) if (num_layers > 34): self.num_ch_enc[1:] *= 4 def forward(self, input_image): self.features = [] x = ((input_image - 0.45) / 0.225) x = self.encoder.conv1(x) x = self.encoder.bn1(x) self.features.append(self.encoder.relu(x)) self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[(- 1)]))) self.features.append(self.encoder.layer2(self.features[(- 1)])) self.features.append(self.encoder.layer3(self.features[(- 1)])) self.features.append(self.encoder.layer4(self.features[(- 1)])) return self.features
def start_instance(): print('Starting new instance') tag = str(int(time.time())) config_name = ((('clone_' + tag) + '_') + 'spotty.yaml') shutil.copyfile('spotty.yaml', config_name) os.system(((("sed -i 's/instancename/" + tag) + "/g' ") + config_name)) os.system(('spotty start -c ' + config_name)) os.system(((' spotty exec -c ' + config_name) + " -- tmux new-session -d -s my_session 'bash startworker.sh'"))
def download_rico(tmp_path='tmp', dataset_path='rico'): if (not os.path.exists(tmp_path)): os.makedirs(tmp_path) output_path = os.path.join(tmp_path, 'unique_uis.tar.gz') urllib.request.urlretrieve(DATASET_RICO_URL, output_path) extract_path = os.path.join(tmp_path, 'extract') cmd = ['7z', 'x', output_path, ('-o' + tmp_path)] sp = subprocess.Popen(cmd) sp.communicate() cmd = ['7z', 'x', os.path.join(tmp_path, 'unique_uis.tar'), ('-o' + extract_path)] sp = subprocess.Popen(cmd) sp.communicate() if (not os.path.exists(dataset_path)): os.makedirs(dataset_path) os.rename(os.path.join(extract_path, 'combined'), os.path.join(dataset_path, 'combined')) shutil.rmtree(tmp_path)
def download_vins(tmp_path='tmp', dataset_path='vins'): if (not os.path.exists(tmp_path)): os.makedirs(tmp_path) gdown.download(DATASET_VINS_URL, output=os.path.join(tmp_path, 'VINS Dataset.zip'), fuzzy=True, use_cookies=False) extract_path = os.path.join(tmp_path, 'extract') cmd = ['7z', 'x', os.path.join(tmp_path, 'VINS Dataset.zip'), ('-o' + str(extract_path))] sp = subprocess.Popen(cmd) sp.communicate() os.rename(extract_path, dataset_path) shutil.rmtree(tmp_path)
def download_boxes_gdown(tmp_path='tmp', dataset_path='webui-boxes'): if (not os.path.exists(tmp_path)): os.makedirs(tmp_path) gdown.download(DATASET_BOXES_URL, output=os.path.join(tmp_path, 'all_boxes.zip'), fuzzy=True, use_cookies=False) extract_path = os.path.join(tmp_path, 'extract') cmd = ['7z', 'x', os.path.join(tmp_path, 'all_boxes.zip'), ('-o' + str(extract_path))] sp = subprocess.Popen(cmd) sp.communicate() os.rename(extract_path, dataset_path) shutil.rmtree(tmp_path)
def download_enrico(tmp_path='tmp', dataset_path='enrico', screenclassification_metadata_path='../metadata/screenclassification'): if (not os.path.exists(tmp_path)): os.makedirs(tmp_path) output_path = os.path.join(tmp_path, 'screenshots.zip') urllib.request.urlretrieve(DATASET_ENRICO_URL, output_path) extract_path = os.path.join(tmp_path, 'extract') cmd = ['7z', 'x', output_path, ('-o' + str(extract_path))] sp = subprocess.Popen(cmd) sp.communicate() if (not os.path.exists(dataset_path)): os.makedirs(dataset_path) os.rename(os.path.join(extract_path, 'screenshots'), os.path.join(dataset_path, 'screenshots')) shutil.rmtree(tmp_path) if (not os.path.exists(screenclassification_metadata_path)): os.makedirs(screenclassification_metadata_path) metadata_output_path = os.path.join(screenclassification_metadata_path, 'design_topics.csv') urllib.request.urlretrieve(METADATA_ENRICO_URL, metadata_output_path)