code stringlengths 17 6.64M |
|---|
def warmup_constant(x, warmup=0.002):
' Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.\n Learning rate is 1. afterwards. '
if (x < warmup):
return (x / warmup)
return 1.0
|
def warmup_linear(x, warmup=0.002):
' Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.\n After `t_total`-th training step, learning rate is zero. '
if (x < warmup):
return (x / warmup)
return max(((x - 1.0) / (warmup - 1.0)), 0)
|
class BertAdam(Optimizer):
"Implements BERT version of Adam algorithm with weight decay fix.\n Params:\n lr: learning rate\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate. Default: -1\n schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'\n b1: Adams b1. Default: 0.9\n b2: Adams b2. Default: 0.999\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n "
def __init__(self, params, lr=required, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-06, weight_decay=0.01, max_grad_norm=1.0):
if ((lr is not required) and (lr < 0.0)):
raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))
if (schedule not in SCHEDULES):
raise ValueError('Invalid schedule parameter: {}'.format(schedule))
if ((not (0.0 <= warmup < 1.0)) and (not (warmup == (- 1)))):
raise ValueError('Invalid warmup: {} - should be in [0.0, 1.0[ or -1'.format(warmup))
if (not (0.0 <= b1 < 1.0)):
raise ValueError('Invalid b1 parameter: {} - should be in [0.0, 1.0['.format(b1))
if (not (0.0 <= b2 < 1.0)):
raise ValueError('Invalid b2 parameter: {} - should be in [0.0, 1.0['.format(b2))
if (not (e >= 0.0)):
raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, b1=b1, b2=b2, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
state = self.state[p]
if (len(state) == 0):
return [0]
if (group['t_total'] != (- 1)):
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = (group['lr'] * schedule_fct((state['step'] / group['t_total']), group['warmup']))
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['next_m'] = torch.zeros_like(p.data)
state['next_v'] = torch.zeros_like(p.data)
(next_m, next_v) = (state['next_m'], state['next_v'])
(beta1, beta2) = (group['b1'], group['b2'])
if (group['max_grad_norm'] > 0):
clip_grad_norm_(p, group['max_grad_norm'])
next_m.mul_(beta1).add_(grad, alpha=(1 - beta1))
next_v.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
update = (next_m / (next_v.sqrt() + group['e']))
if (group['weight_decay'] > 0.0):
update += (group['weight_decay'] * p.data)
if (group['t_total'] != (- 1)):
schedule_fct = SCHEDULES[group['schedule']]
progress = (state['step'] / group['t_total'])
lr_scheduled = (group['lr'] * schedule_fct(progress, group['warmup']))
else:
lr_scheduled = group['lr']
update_with_lr = (lr_scheduled * update)
p.data.add_((- update_with_lr))
state['step'] += 1
return loss
|
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
|
@lru_cache()
def bytes_to_unicode():
"\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n "
bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1))))
cs = bs[:]
n = 0
for b in range((2 ** 8)):
if (b not in bs):
bs.append(b)
cs.append(((2 ** 8) + n))
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
|
def get_pairs(word):
'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n '
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
|
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
|
def whitespace_clean(text):
text = re.sub('\\s+', ' ', text)
text = text.strip()
return text
|
class SimpleTokenizer(object):
def __init__(self, bpe_path: str=default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode('utf-8').split('\n')
merges = merges[1:(((49152 - 256) - 2) + 1)]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = (vocab + [(v + '</w>') for v in vocab])
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE)
self.vocab = self.encoder
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ')
return text
def tokenize(self, text):
tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return tokens
def convert_tokens_to_ids(self, tokens):
return [self.encoder[bpe_token] for bpe_token in tokens]
|
class PretrainedConfig(object):
pretrained_model_archive_map = {}
config_name = ''
weights_name = ''
@classmethod
def get_config(cls, pretrained_model_name, cache_dir, type_vocab_size, state_dict, task_config=None):
archive_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), pretrained_model_name)
if (os.path.exists(archive_file) is False):
if (pretrained_model_name in cls.pretrained_model_archive_map):
archive_file = cls.pretrained_model_archive_map[pretrained_model_name]
else:
archive_file = pretrained_model_name
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
if ((task_config is None) or (task_config.local_rank == 0)):
logger.error("Model name '{}' was not found in model name list. We assumed '{}' was a path or url but couldn't find any file associated to this path or url.".format(pretrained_model_name, archive_file))
return None
if (resolved_archive_file == archive_file):
if ((task_config is None) or (task_config.local_rank == 0)):
logger.info('loading archive file {}'.format(archive_file))
elif ((task_config is None) or (task_config.local_rank == 0)):
logger.info('loading archive file {} from cache at {}'.format(archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
tempdir = tempfile.mkdtemp()
if ((task_config is None) or (task_config.local_rank == 0)):
logger.info('extracting archive file {} to temp dir {}'.format(resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
config_file = os.path.join(serialization_dir, cls.config_name)
config = cls.from_json_file(config_file)
config.type_vocab_size = type_vocab_size
if ((task_config is None) or (task_config.local_rank == 0)):
logger.info('Model config {}'.format(config))
if (state_dict is None):
weights_path = os.path.join(serialization_dir, cls.weights_name)
if os.path.exists(weights_path):
state_dict = torch.load(weights_path, map_location='cpu')
elif ((task_config is None) or (task_config.local_rank == 0)):
logger.info("Weight doesn't exsits. {}".format(weights_path))
if tempdir:
shutil.rmtree(tempdir)
return (config, state_dict)
@classmethod
def from_dict(cls, json_object):
'Constructs a `BertConfig` from a Python dictionary of parameters.'
config = cls(vocab_size_or_config_json_file=(- 1))
for (key, value) in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
'Constructs a `BertConfig` from a json file of parameters.'
with open(json_file, 'r', encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
'Serializes this instance to a Python dictionary.'
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
'Serializes this instance to a JSON string.'
return (json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\n')
|
def gelu(x):
"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n "
return ((x * 0.5) * (1.0 + torch.erf((x / math.sqrt(2.0)))))
|
def swish(x):
return (x * torch.sigmoid(x))
|
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
'Construct a layernorm module in the TF style (epsilon inside the square root).\n '
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean((- 1), keepdim=True)
s = (x - u).pow(2).mean((- 1), keepdim=True)
x = ((x - u) / torch.sqrt((s + self.variance_epsilon)))
return ((self.weight * x) + self.bias)
|
class PreTrainedModel(nn.Module):
' An abstract class to handle weights initialization and\n a simple interface for dowloading and loading pretrained models.\n '
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedModel, self).__init__()
if (not isinstance(config, PretrainedConfig)):
raise ValueError('Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. To create a model from a Google pretrained model use `model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`'.format(self.__class__.__name__, self.__class__.__name__))
self.config = config
def init_weights(self, module):
' Initialize the weights.\n '
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
if (('beta' in dir(module)) and ('gamma' in dir(module))):
module.beta.data.zero_()
module.gamma.data.fill_(1.0)
else:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if (isinstance(module, nn.Linear) and (module.bias is not None)):
module.bias.data.zero_()
def resize_token_embeddings(self, new_num_tokens=None):
raise NotImplementedError
@classmethod
def init_preweight(cls, model, state_dict, prefix=None, task_config=None):
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if ('gamma' in key):
new_key = key.replace('gamma', 'weight')
if ('beta' in key):
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for (old_key, new_key) in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
if (prefix is not None):
old_keys = []
new_keys = []
for key in state_dict.keys():
old_keys.append(key)
new_keys.append((prefix + key))
for (old_key, new_key) in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if (metadata is not None):
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = ({} if (metadata is None) else metadata.get(prefix[:(- 1)], {}))
module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for (name, child) in module._modules.items():
if (child is not None):
load(child, ((prefix + name) + '.'))
load(model, prefix='')
if ((prefix is None) and ((task_config is None) or (task_config.local_rank == 0))):
logger.info(('-' * 20))
if (len(missing_keys) > 0):
logger.info('Weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, ('\n ' + '\n '.join(missing_keys))))
if (len(unexpected_keys) > 0):
logger.info('Weights from pretrained model not used in {}: {}'.format(model.__class__.__name__, ('\n ' + '\n '.join(unexpected_keys))))
if (len(error_msgs) > 0):
logger.error('Weights from pretrained model cause errors in {}: {}'.format(model.__class__.__name__, ('\n ' + '\n '.join(error_msgs))))
return model
@property
def dtype(self):
'\n :obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).\n '
try:
return next(self.parameters()).dtype
except StopIteration:
def find_tensor_attributes(module: nn.Module):
tuples = [(k, v) for (k, v) in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
@classmethod
def from_pretrained(cls, config, state_dict=None, *inputs, **kwargs):
'\n Instantiate a PreTrainedModel from a pre-trained model file or a pytorch state dict.\n Download and cache the pre-trained model file if needed.\n '
model = cls(config, *inputs, **kwargs)
if (state_dict is None):
return model
model = cls.init_preweight(model, state_dict)
return model
|
class CrossEn(nn.Module):
def __init__(self):
super(CrossEn, self).__init__()
def forward(self, sim_matrix, target):
logpt = F.log_softmax(sim_matrix, dim=(- 1))
logpt = torch.index_select(logpt, (- 1), target)
loss = (- logpt)
sim_loss = loss.mean()
return sim_loss
|
class MILNCELoss(nn.Module):
def __init__(self, batch_size=1, n_pair=1):
super(MILNCELoss, self).__init__()
self.batch_size = batch_size
self.n_pair = n_pair
torch_v = float('.'.join(torch.__version__.split('.')[:2]))
self.bool_dtype = (torch.bool if (torch_v >= 1.3) else torch.uint8)
def forward(self, sim_matrix):
mm_mask = np.eye(self.batch_size)
mm_mask = np.kron(mm_mask, np.ones((self.n_pair, self.n_pair)))
mm_mask = torch.tensor(mm_mask).float().to(sim_matrix.device)
from_text_matrix = (sim_matrix + (mm_mask * (- 1000000000000.0)))
from_video_matrix = sim_matrix.transpose(1, 0)
new_sim_matrix = torch.cat([from_video_matrix, from_text_matrix], dim=(- 1))
logpt = F.log_softmax(new_sim_matrix, dim=(- 1))
mm_mask_logpt = torch.cat([mm_mask, torch.zeros_like(mm_mask)], dim=(- 1))
masked_logpt = (logpt + ((torch.ones_like(mm_mask_logpt) - mm_mask_logpt) * (- 1000000000000.0)))
new_logpt = (- torch.logsumexp(masked_logpt, dim=(- 1)))
logpt_choice = torch.zeros_like(new_logpt)
mark_ind = ((torch.arange(self.batch_size).to(sim_matrix.device) * self.n_pair) + (self.n_pair // 2))
logpt_choice[mark_ind] = 1
sim_loss = new_logpt.masked_select(logpt_choice.to(dtype=self.bool_dtype)).mean()
return sim_loss
|
class MaxMarginRankingLoss(nn.Module):
def __init__(self, margin=1.0, negative_weighting=False, batch_size=1, n_pair=1, hard_negative_rate=0.5):
super(MaxMarginRankingLoss, self).__init__()
self.margin = margin
self.n_pair = n_pair
self.batch_size = batch_size
easy_negative_rate = (1 - hard_negative_rate)
self.easy_negative_rate = easy_negative_rate
self.negative_weighting = negative_weighting
if ((n_pair > 1) and (batch_size > 1)):
alpha = (easy_negative_rate / ((batch_size - 1) * (1 - easy_negative_rate)))
mm_mask = (((1 - alpha) * np.eye(self.batch_size)) + alpha)
mm_mask = np.kron(mm_mask, np.ones((n_pair, n_pair)))
mm_mask = (torch.tensor(mm_mask) * (batch_size * (1 - easy_negative_rate)))
self.mm_mask = mm_mask.float()
def forward(self, x):
d = torch.diag(x)
max_margin = (F.relu(((self.margin + x) - d.view((- 1), 1))) + F.relu(((self.margin + x) - d.view(1, (- 1)))))
if (self.negative_weighting and (self.n_pair > 1) and (self.batch_size > 1)):
max_margin = (max_margin * self.mm_mask.to(max_margin.device))
return max_margin.mean()
|
class Emcl(object):
def __init__(self, k=32, stage_num=9, momentum=0.9, lamd=1, beta=3):
self.k = k
self.lamd = lamd
self.stage_num = stage_num
self.beta = beta
self.momentum = momentum
self.mu = torch.Tensor(1, self.k)
self.mu.normal_(0, math.sqrt((2.0 / self.k)))
self.mu = (self.mu / (1e-06 + self.mu.norm(dim=0, keepdim=True)))
def __call__(self, embds, if_train=True):
(b, n) = embds.size()
mu = self.mu.repeat(b, 1).cuda(embds.device)
_embds = embds
with torch.no_grad():
for i in range(self.stage_num):
_embds_t = _embds.permute(1, 0)
z = torch.mm(_embds_t, mu)
z = (z / self.lamd)
z = F.softmax(z, dim=1)
z = (z / (1e-06 + z.sum(dim=0, keepdim=True)))
mu = torch.mm(_embds, z)
mu = (mu / (1e-06 + mu.norm(dim=0, keepdim=True)))
z_t = z.permute(1, 0)
_embds = torch.mm(mu, z_t)
if if_train:
mu = mu.cpu()
self.mu = ((self.momentum * self.mu) + ((1 - self.momentum) * mu.mean(dim=0, keepdim=True)))
return ((self.beta * _embds) + embds)
|
class AllGather(torch.autograd.Function):
'An autograd function that performs allgather on a tensor.'
@staticmethod
def forward(ctx, tensor, args):
output = [torch.empty_like(tensor) for _ in range(args.world_size)]
torch.distributed.all_gather(output, tensor)
ctx.rank = args.rank
ctx.batch_size = tensor.shape[0]
return torch.cat(output, dim=0)
@staticmethod
def backward(ctx, grad_output):
return (grad_output[(ctx.batch_size * ctx.rank):(ctx.batch_size * (ctx.rank + 1))], None)
|
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if (isinstance(obj, list) or isinstance(obj, tuple)):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
|
def parallel_apply(fct, model, inputs, device_ids):
modules = nn.parallel.replicate(model, device_ids)
assert (len(modules) == len(inputs))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input):
torch.set_grad_enabled(grad_enabled)
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
if (not isinstance(input, (list, tuple))):
input = (input,)
output = fct(module, *input)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(where='in replica {} on device {}'.format(i, device))
if (len(modules) > 1):
threads = [threading.Thread(target=_worker, args=(i, module, input)) for (i, (module, input)) in enumerate(zip(modules, inputs))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
|
def get_logger(filename=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
if (filename is not None):
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
|
def compress(paras):
(input_video_path, output_video_path) = paras
try:
command = ['ffmpeg', '-y', '-i', input_video_path, '-filter:v', "scale='if(gt(a,1),trunc(oh*a/2)*2,224)':'if(gt(a,1),224,trunc(ow*a/2)*2)'", '-map', '0:v', '-r', '3', output_video_path]
ffmpeg = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = ffmpeg.communicate()
retcode = ffmpeg.poll()
except Exception as e:
raise e
|
def prepare_input_output_pairs(input_root, output_root):
input_video_path_list = []
output_video_path_list = []
for (root, dirs, files) in os.walk(input_root):
for file_name in files:
input_video_path = os.path.join(root, file_name)
output_video_path = os.path.join(output_root, file_name)
if (os.path.exists(output_video_path) and (os.path.getsize(output_video_path) > 0)):
pass
else:
input_video_path_list.append(input_video_path)
output_video_path_list.append(output_video_path)
return (input_video_path_list, output_video_path_list)
|
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if (isinstance(obj, list) or isinstance(obj, tuple)):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
|
def parallel_apply(fct, model, inputs, device_ids):
modules = nn.parallel.replicate(model, device_ids)
assert (len(modules) == len(inputs))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input):
torch.set_grad_enabled(grad_enabled)
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
if (not isinstance(input, (list, tuple))):
input = (input,)
output = fct(module, *input)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(where='in replica {} on device {}'.format(i, device))
if (len(modules) > 1):
threads = [threading.Thread(target=_worker, args=(i, module, input)) for (i, (module, input)) in enumerate(zip(modules, inputs))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
|
def get_logger(filename=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
if (filename is not None):
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
|
class BaseDataLoader(DataLoader):
'Base class for all data loaders.'
def __init__(self, dataset, batch_size, shuffle, validation_split, num_workers, collate_fn=default_collate):
self.validation_split = validation_split
self.shuffle = shuffle
self.batch_idx = 0
self.n_samples = len(dataset)
(self.sampler, self.valid_sampler) = self._split_sampler(self.validation_split)
self.init_kwargs = {'dataset': dataset, 'batch_size': batch_size, 'shuffle': self.shuffle, 'collate_fn': collate_fn, 'num_workers': num_workers}
super().__init__(sampler=self.sampler, **self.init_kwargs)
def _split_sampler(self, split):
if (split == 0.0):
return (None, None)
idx_full = np.arange(self.n_samples)
np.random.seed(0)
np.random.shuffle(idx_full)
if isinstance(split, int):
assert (split > 0)
assert (split < self.n_samples), 'validation set size is configured to be larger than entire dataset.'
len_valid = split
else:
len_valid = int((self.n_samples * split))
valid_idx = idx_full[0:len_valid]
train_idx = np.delete(idx_full, np.arange(0, len_valid))
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
self.shuffle = False
self.n_samples = len(train_idx)
return (train_sampler, valid_sampler)
def split_validation(self):
if (self.valid_sampler is None):
return None
else:
return DataLoader(sampler=self.valid_sampler, **self.init_kwargs)
|
class BaseModel(nn.Module):
'Base class for all models.'
@abc.abstractmethod
def forward(self, *inputs):
'Forward pass logic.'
raise NotImplementedError
def __str__(self):
'Model prints with number of trainable parameters.'
model_parameters = filter((lambda p: p.requires_grad), self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return (super().__str__() + f'''
Trainable parameters: {params}''')
|
class BaseTrainer():
'Base class for all trainers.'
def __init__(self, model, loss, metrics, optimizer, lr_scheduler, config):
self.config = config
self.hparams = get_hparams_from_config(self.config)
(self.device, device_ids) = self._prepare_device(config['n_gpu'])
self.model = model.to(self.device)
if (len(device_ids) > 1):
self.model = torch.nn.DataParallel(model, device_ids=device_ids)
self.loss = loss
self.metrics = metrics
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.exp_dir = config.save_dir
self.checkpoint_dir = config.save_dir
self.perf_log_path = os.path.join(config.save_dir, 'perf_log.txt')
self.info_checkpoint_path = os.path.join(config.save_dir, 'info_checkpoint.txt')
self.monitoring_path = os.path.join(config.save_dir, 'monitoring.json')
cfg_trainer = config['trainer']
self.epochs = cfg_trainer['epochs']
self.save_period = cfg_trainer['save_period']
self.monitor = cfg_trainer.get('monitor', 'off')
self.timer = AverageMeter()
if (self.monitor == 'off'):
self.mnt_mode = 'off'
self.mnt_best = 0
elif self.monitor.startswith('given_epoch'):
(self.mnt_mode, self.given_epoch) = self.monitor.split()
assert (self.mnt_mode in ['given_epoch'])
self.mnt_best = 0
self.given_epoch = int(self.given_epoch)
else:
(self.mnt_mode, self.mnt_metric) = self.monitor.split()
assert (self.mnt_mode in ['min', 'max'])
self.mnt_best = (inf if (self.mnt_mode == 'min') else (- inf))
self.early_stop = cfg_trainer.get('early_stop', inf)
self.start_epoch = 0
self.epoch = 0
self.n_samples = 0
self.n_steps = 0
self.writer = SummaryWriter(config.log_dir)
self.include_optim_in_ckpts = config['trainer'].get('include_optim_in_ckpts', False)
if (config.resume is not None):
self._resume_checkpoint(config.resume)
@abc.abstractmethod
def _train_epoch(self, epoch):
'Training logic for an epoch.'
raise NotImplementedError
@abc.abstractmethod
def _valid_epoch(self, epoch, sets):
'Validation logic for an epoch.'
raise NotImplementedError
def train(self):
'Full training logic.'
not_improved_count = 0
for epoch in range(self.start_epoch, (self.epochs + 1)):
self.epoch = epoch
epoch_start = time.time()
logger.debug('Starting training epoch %s ...', str(epoch))
train_start = time.time()
result = self._train_epoch(epoch)
for (key, val) in result.items():
self.writer.add_scalar(f'{key}', val, epoch)
self.timer.update('epoch.train', (time.time() - train_start))
logger.debug('Starting evaluating epoch %s ...', str(epoch))
valid_start = time.time()
val_log = self._valid_epoch(epoch, sets='continuous_eval')
logger.debug('Updating val log with results ...')
result.update(val_log)
self.timer.update('epoch.valid', (time.time() - valid_start))
checkpoint_start = time.time()
log = {'epoch': epoch}
for (key, value) in result.items():
if (key == 'metrics'):
for (dataset_name, dataset_metrics) in value.items():
for (metric_type, metric_dict) in dataset_metrics.items():
for (metric_name, metric_value) in metric_dict.items():
log[f'{dataset_name}/{metric_type}/{metric_name}'] = metric_value
else:
log[key] = value
best = False
if (self.mnt_mode in ['min', 'max']):
try:
lower = (log[self.mnt_metric] <= self.mnt_best)
higher = (log[self.mnt_metric] >= self.mnt_best)
improved = (((self.mnt_mode == 'min') and lower) or ((self.mnt_mode == 'max') and higher))
except KeyError:
logger.warning('Warning: Metric %s not found, perf monitoring is disabled.', self.mnt_metric)
self.mnt_mode = 'off'
improved = False
not_improved_count = 0
if improved:
self.mnt_best = log[self.mnt_metric]
not_improved_count = 0
best = True
else:
not_improved_count += 1
if (not_improved_count > self.early_stop):
logger.info("Val performance didn't improve for %s epochs. Training stops.", self.early_stop)
break
save_best = (best and (self.mnt_metric != 'epoch'))
if ((self.mnt_mode in ['given_epoch']) and (epoch == self.given_epoch)):
save_best = True
if (epoch < self.skip_first_n_saves):
msg = f'Skipping ckpt save at epoch {epoch} < {self.skip_first_n_saves}'
logger.info(msg)
elif (((epoch % self.save_period) == 0) or save_best):
self._save_checkpoint(epoch, save_best=best)
if (epoch > self.num_keep_ckpts):
self.purge_stale_checkpoints()
self.timer.update('epoch.checkpoint', (time.time() - checkpoint_start))
self.timer.update('epoch.total', (time.time() - epoch_start))
for (key, val) in self.timer.dic.items():
for metric in ['avg', 'sum']:
log[f'timer.{key}.{metric}'] = self.timer.dic[key][metric]
self.writer.add_scalar(f'timer_epoch/{key}', self.timer.dic[key]['sum'], epoch)
self.writer.add_text('exp_dir', str(self.exp_dir), epoch)
self.timer.reset()
log['best'] = self.mnt_best
log['not_improved_count'] = not_improved_count
self.writer.add_scalar('best', self.mnt_best, epoch)
for (metric_name, metric_value) in log.items():
if ('/cols' in metric_name):
continue
if ('timer.' in metric_name):
logger.debug(' {:15s}: {}'.format(str(metric_name), metric_value))
else:
logger.info(' {:15s}: {}'.format(str(metric_name), metric_value))
log_light = {}
for (key, value) in log.items():
if (not key.endswith('cols')):
log_light[key] = value
update_perf_log(log_light, self.perf_log_path)
self.writer.add_hparams(self.hparams, {'hparam/accuracy': log[self.mnt_metric], 'hparam/mnt_best': self.mnt_best, 'hparam/epoch': epoch}, name='hparams')
def evaluate(self):
'Final evaluation.'
sets = 'final_eval'
ckpt_path = (self.config.save_dir / 'trained_model.pth')
if os.path.exists(ckpt_path):
self._resume_checkpoint(ckpt_path)
else:
msg = f'The checkpoint {ckpt_path} does not exist and cannot be loaded. The model will not be resumed to that checkpoint.'
logger.info(msg)
final_result = self._valid_epoch(epoch=self.epoch, sets=sets)
nested_metrics = final_result['metrics']
log = {}
for (dataset_name, dataset_metrics) in nested_metrics.items():
log[dataset_name] = {}
for (metric_type, metric_dict) in dataset_metrics.items():
for (metric_name, metric_value) in metric_dict.items():
log[dataset_name][f'{metric_type}/{metric_name}/{sets}'] = metric_value
for (dataset_name, metric_dict) in log.items():
logger.info('%s:', dataset_name)
for (metric_name, metric_value) in metric_dict.items():
if ('/cols' in metric_name):
continue
if ('timer.' in metric_name):
logger.debug(' {:15s}: {}'.format(str(metric_name), metric_value))
else:
logger.info(' {:15s}: {}'.format(str(metric_name), metric_value))
save_dir = self.config.save_dir
results_on_datasets_log_path = os.path.join(save_dir, 'exp_results.json')
if os.path.exists(results_on_datasets_log_path):
with open(results_on_datasets_log_path) as json_file:
res = json.load(json_file)
else:
res = collections.OrderedDict({})
if ('perfs' not in res.keys()):
res['perfs'] = {}
res['perfs'] = log
res['checkpoint_epoch'] = self.loaded_epoch
logger.info('Best epoch for the monitored metric: %s', self.loaded_epoch)
with open(results_on_datasets_log_path, 'w') as fp:
json.dump(res, fp, indent=4)
exp_completed_flag_path = os.path.join(save_dir, 'exp_completed_flag.txt')
with open(exp_completed_flag_path, 'a'):
os.utime(exp_completed_flag_path, None)
def purge_stale_checkpoints(self):
'Remove checkpoints that are no longer neededself.\n\n NOTE: This function assumes that the `best` checkpoint has already been\n renamed\n to have a format that differs from `checkpoint-epoch<num>.pth`\n '
found_epoch_ckpts = list(self.checkpoint_dir.glob('checkpoint-epoch*.pth'))
if (len(found_epoch_ckpts) <= self.num_keep_ckpts):
return
regex = '.*checkpoint-epoch(\\d+)[.]pth$'
epochs = [int(re.search(regex, str(x)).groups()[0]) for x in found_epoch_ckpts]
sorted_ckpts = sorted(list(zip(epochs, found_epoch_ckpts)), key=(lambda x: (- x[0])))
for (epoch, stale_ckpt) in sorted_ckpts[self.num_keep_ckpts:]:
tic = time.time()
stale_ckpt.unlink()
msg = f'removing stale ckpt [epoch {epoch}] [took {(time.time() - tic):.2f}s]'
logger.info(msg)
def _prepare_device(self, n_gpu_use):
'Setup GPU device if available, move model into configured device.'
n_gpu = torch.cuda.device_count()
msg = f'n_gpu = torch.cuda.device_count(): {n_gpu} (nb of gpus available)'
logger.debug(msg)
if ((n_gpu_use > 0) and (n_gpu == 0)):
logger.warning("Warning: There's no GPU available on this machine,training will be performed on CPU.")
n_gpu_use = 0
if (n_gpu_use > n_gpu):
msg = "Warning: The number of GPU's configured to use is {}, but only {} are available on this machine.".format(n_gpu_use, n_gpu)
logger.warning(msg)
n_gpu_use = n_gpu
device = torch.device(('cuda:0' if (n_gpu_use > 0) else 'cpu'))
logger.debug('device: %s', device)
list_ids = list(range(n_gpu_use))
logger.debug('list_ids: %s', list_ids)
return (device, list_ids)
def _save_checkpoint(self, epoch, save_best=False):
'Saving checkpoints.'
arch = type(self.model).__name__
try:
state_dict = self.model.module.state_dict()
except AttributeError:
state_dict = self.model.state_dict()
state = {'arch': arch, 'epoch': epoch, 'state_dict': state_dict, 'monitor_best': self.mnt_best, 'config': self.config, 'n_samples': self.n_samples, 'n_steps': self.n_steps}
if self.include_optim_in_ckpts:
state['optimizer'] = self.optimizer.state_dict()
state['lr_scheduler'] = self.lr_scheduler.state_dict()
filename = str((self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch)))
filename_tmp = (filename + '_')
tic = time.time()
logger.info('Saving checkpoint: %s ...', filename)
torch.save(state, filename_tmp)
os.rename(filename_tmp, filename)
msg = f'Done in {(time.time() - tic):.3f}s'
logger.info(msg)
if save_best:
logger.info("Updating 'best' checkpoint: %s ...", filename)
best_path = str((self.checkpoint_dir / 'trained_model.pth'))
best_path_tmp = (best_path + '_')
torch.save(state, best_path_tmp)
os.rename(best_path_tmp, best_path)
msg = f'Done in {(time.time() - tic):.3f}s'
logger.info(msg)
def _resume_last_checkpoint(self):
checkpoint_path = get_last_checkpoint_path(self.exp_dir)
self._resume_checkpoint(checkpoint_path)
def match_checkpoint_to_model(self, checkpoint, model):
'Adapt the loaded checkpoint so that is fits the current architecture.'
modules = ['vid_bert.embeddings.position_embeddings.weight']
for module in modules:
if ((module in model) and (checkpoint[module].shape != model[module].shape)):
padding = (model[module].shape[0] - checkpoint[module].shape[0])
padding_shape = list(model[module].shape)
padding_shape[0] = padding
device = checkpoint[module].device
checkpoint[module] = torch.cat([checkpoint[module], torch.zeros(padding_shape, device=device)], 0)
logger.warning('Size mismatch for module %s fixed by zero padding', module)
modules = []
for module in modules:
if ((module in model) and (module not in checkpoint)):
padding_shape = model[module].shape
checkpoint[module] = torch.Tensor(padding_shape).cuda()
logger.warning('Size mismatch for module %s', module)
elif ((module in model) and (checkpoint[module].shape != model[module].shape)):
padding_shape = model[module].shape
checkpoint[module] = torch.Tensor(padding_shape).cuda()
logger.warning('Size mismatch for module %s', module)
def _resume_checkpoint(self, resume_path):
'Resume from saved checkpoints.'
resume_path = str(resume_path)
logger.info('Loading checkpoint from: %s ...', resume_path)
checkpoint = torch.load(resume_path, map_location=self.device)
self.loaded_epoch = checkpoint['epoch']
self.epoch = checkpoint['epoch']
self.start_epoch = (checkpoint['epoch'] + 1)
self.n_samples = checkpoint['n_samples']
self.n_steps = checkpoint['n_steps']
exp_dir_src = os.path.dirname(resume_path)
restart = (exp_dir_src == str(self.exp_dir))
if (checkpoint['config']['arch'] != self.config['arch']):
msg = 'Warning: Architecture configuration given in config file isdifferent from that of checkpoint. This may yield an exception while state_dict is being loaded.'
logger.warning(msg)
logger.warning('Created model conf: %s', self.config['arch'])
logger.warning('Loaded model conf: %s', checkpoint['config']['arch'])
self.match_checkpoint_to_model(checkpoint['state_dict'], self.model.state_dict())
self.model.load_state_dict(checkpoint['state_dict'], strict=restart)
if restart:
optim_args = checkpoint['config']['optimizer']
if (optim_args['type'] != self.config['optimizer']['type']):
msg = 'Warning: Optimizer type given in config file differs from that of checkpoint. Optimizer parameters not being resumed.'
logger.warning(msg)
else:
self.optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler_args = checkpoint['config']['lr_scheduler']
if (lr_scheduler_args['type'] != self.config['lr_scheduler']['type']):
msg = 'Warning: Lr_scheduler type given in config file differs from that of checkpoint. Lr_scheduler parameters not being resumed.'
logger.warning(msg)
else:
self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
self.mnt_best = checkpoint['monitor_best']
else:
self.loaded_epoch = 0
self.epoch = 0
self.start_epoch = 0
self.n_samples = 0
self.n_steps = 0
with open(self.info_checkpoint_path, 'a') as f:
f.write(f"This experiment is based on the checkpoint {resume_path}loaded at epoch {checkpoint['epoch']}")
logger.info('Ckpt loaded at epoch %s.', str(checkpoint['epoch']))
|
class ActivityNet(BaseDataset):
'ActivityNet captions dataset.'
def configure_train_test_splits(self, cut_name, split_name):
if (cut_name in ['val1']):
train_list_path = 'train_list.txt'
test_list_path = 'val_1_list.txt'
test_list_path = os.path.join(self.data_dir, test_list_path)
with open(test_list_path) as f:
test_vid_list = f.readlines()
nb_test_samples = len(test_vid_list)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = os.path.join(self.data_dir, train_list_path)
with open(train_list_path) as f:
train_vid_list = f.readlines()
nb_train_samples = len(train_vid_list)
cross_vid_list = train_vid_list
cross_vid_list = [x.strip() for x in cross_vid_list]
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[nb_test_samples:]
if (split_name in ['trn']):
self.vid_list = self.vid_list[:nb_test_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[:nb_test_samples]
elif (split_name == 'test'):
self.vid_list = test_vid_list
self.vid_list = [x.strip() for x in self.vid_list]
elif (cut_name in ['c']):
self.expert_paths = get_expert_paths(self.data_dir)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = 'train_list.txt'
train_list_path = os.path.join(self.data_dir, train_list_path)
with open(train_list_path) as f:
train_vid_list = f.readlines()
nb_train_samples = len(train_vid_list)
val_list_path = 'val_list.txt'
val_list_path = os.path.join(self.data_dir, val_list_path)
with open(val_list_path) as f:
val_vid_list = f.readlines()
nb_val_samples = len(val_vid_list)
cross_vid_list = (train_vid_list + val_vid_list)
cross_vid_list = [x.strip() for x in cross_vid_list]
if (self.cross_seed != 0):
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[:nb_train_samples]
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
self.vid_list = self.vid_list[:nb_val_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[nb_train_samples:]
else:
if (split_name == 'test1'):
list_path = 'public_server_val.txt'
elif (split_name == 'test2'):
list_path = 'public_server_test.txt'
list_path = os.path.join(self.data_dir, list_path)
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
else:
msg = 'unrecognised cut: {}'
raise ValueError(msg.format(cut_name))
self.split_name = split_name
self.dataset_name = f'ActivityNet_{cut_name}_{split_name}'
|
class ExpertDataLoader():
'Data loading of a dataset.'
def __init__(self, mix, num_workers, batch_size, raw_input_dims, until_epoch=float('inf'), pin_memory=False, n_pairs=1, training=False, tokenizer=None, loaded_data=None, cross_seed=0):
self.batch_size = batch_size
self.until_epoch = until_epoch
self.n_pairs = n_pairs
dataset = MixDataset(mix=mix, raw_input_dims=raw_input_dims, training=training, tokenizer=tokenizer, n_pairs=n_pairs, loaded_data=loaded_data, cross_seed=cross_seed)
loader = DataLoader(dataset=dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=dataset.collate_data, drop_last=training, shuffle=training, pin_memory=pin_memory)
self.dataloaders = {'loader': loader, 'dataset': dataset}
logger.debug('Loading data with %d workers', num_workers)
def __getitem__(self, key):
return self.dataloaders[key]
|
class DiDeMo(BaseDataset):
'DiDeMo dataset.'
def configure_train_test_splits(self, cut_name, split_name):
if (cut_name in ['full']):
if (split_name in ['train', 'trn']):
list_path = 'train_list.txt'
elif (split_name in ['val']):
list_path = 'val_list.txt'
elif (split_name in ['test']):
list_path = 'test_list.txt'
else:
raise ValueError(f'unrecognised DiDeMo split: {split_name}')
list_path = os.path.join(self.root_feat, list_path)
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
if (cut_name in ['c']):
self.vid_list = self.vid_list[:840]
else:
self.vid_list = self.vid_list[:1065]
elif (cut_name in ['c']):
self.expert_paths = get_expert_paths(self.data_dir)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = 'train_list.txt'
train_list_path = os.path.join(self.data_dir, train_list_path)
with open(train_list_path) as f:
train_vid_list = f.readlines()
nb_train_samples = len(train_vid_list)
val_list_path = 'val_list.txt'
val_list_path = os.path.join(self.data_dir, val_list_path)
with open(val_list_path) as f:
val_vid_list = f.readlines()
nb_val_samples = len(val_vid_list)
cross_vid_list = (train_vid_list + val_vid_list)
cross_vid_list = [x.strip() for x in cross_vid_list]
if (self.cross_seed != 0):
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[:nb_train_samples]
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
self.vid_list = self.vid_list[:nb_val_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[nb_train_samples:]
else:
if (split_name == 'test1'):
list_path = 'public_server_val.txt'
elif (split_name == 'test2'):
list_path = 'public_server_test.txt'
list_path = os.path.join(self.data_dir, list_path)
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
else:
msg = 'unrecognised cut: {}'
raise ValueError(msg.format(cut_name))
self.split_name = split_name
self.dataset_name = f'DiDeMo_{cut_name}_{split_name}'
self.expert_timings = {}
|
class HowTo100M(BaseDataset):
'HowTo100M dataset.'
def configure_train_test_splits(self, cut_name, split_name):
self.restrict_test_captions = None
list_path = None
if (cut_name in ['full']):
if (split_name in ['train']):
list_path = 'train_list_full.txt'
elif (split_name in ['trn']):
list_path = 'trn_list_full.txt'
elif (split_name in ['val', 'valong', 'val3-30']):
list_path = 'val_list_full.txt'
elif (split_name in ['test', 'testlong', 'test3-30']):
list_path = 'test_list_full.txt'
else:
msg = 'unrecognised HowTo100M cut: {}'
raise ValueError(msg.format(cut_name))
list_path = os.path.join(self.root_feat, list_path)
print('loading training/val splits....')
tic = time.time()
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
print('done in {:.3f}s'.format((time.time() - tic)))
self.split_name = split_name
self.dataset_name = f'HowTo100M_{cut_name}_{split_name}'
|
class LSMDC(BaseDataset):
'LSMDC dataset.'
def configure_train_test_splits(self, cut_name, split_name):
if (cut_name in ['full']):
train_list_path = 'LSMDC16_annos_training.csv'
test_list_path = 'LSMDC16_challenge_1000_publictect.csv'
test_list_path = os.path.join(self.data_dir, test_list_path)
df = pd.read_csv(test_list_path, delimiter='\t', header=None)
test_vid_list = list(df[0])
nb_test_samples = len(test_vid_list)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = os.path.join(self.data_dir, train_list_path)
df = pd.read_csv(train_list_path, delimiter='\t', header=None)
train_vid_list = list(df[0])
cross_vid_list = train_vid_list
cross_vid_list = [x.strip() for x in cross_vid_list]
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[nb_test_samples:]
if (split_name in ['trn']):
self.vid_list = self.vid_list[:nb_test_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[:nb_test_samples]
elif (split_name == 'test'):
self.vid_list = test_vid_list
self.vid_list = [x.strip() for x in self.vid_list]
movies = ['0024_THE_LORD_OF_THE_RINGS_THE_FELLOWSHIP_OF_THE_RING_00.31.10.217-00.31.10.706', '1014_2012_00.01.21.399-00.01.23.997', '1014_2012_00.27.58.174-00.27.59.021', '1018_Body_Of_Lies_00.42.15.677-00.42.18.534', '1037_The_Curious_Case_Of_Benjamin_Button_02.25.14.743-02.25.17.312']
for movie in movies:
if (movie in self.vid_list):
self.vid_list.remove(movie)
self.split_name = split_name
self.dataset_name = f'LSMDC_{cut_name}_{split_name}'
|
class MixDataset(Dataset):
'Dataset composed of a mix of different datasets.'
@abc.abstractmethod
def configure_train_test_splits(self, split_name):
'Partition the datset into train/val/test splits.'
raise NotImplementedError
@abc.abstractmethod
def sanity_checks(self):
'Run sanity checks on loaded data.'
raise NotImplementedError
@abc.abstractmethod
def load_features(self):
'Load features from disk.'
raise NotImplementedError
def __init__(self, mix, raw_input_dims, training=False, tokenizer=None, n_pairs=1, loaded_data=None, cross_seed=0):
self.sanity_checks = False
self.mix = mix
self.experts = set(raw_input_dims.keys())
self.train = training
self.tokenizer = tokenizer
self.n_pairs = n_pairs
if (len(mix) == 1):
self.dataset_name = '_'.join([mix[0]['dataset_name'], mix[0]['cut_name'], mix[0]['split_name']])
self.split_name = mix[0]['split_name']
else:
self.dataset_name = 'Mix'
self.split_name = 'mic'
dataset_classes = {'MSVD': MSVD, 'LSMDC': LSMDC, 'MSRVTT': MSRVTT, 'DiDeMo': DiDeMo, 'ActivityNet': ActivityNet, 'YouCook2': YouCook2, 'HowTo100M': HowTo100M}
self.datasets = []
self.mix_weights = []
self.dataset_names = []
for config in mix:
dataset_config = config.copy()
if ('mix_weight' in dataset_config.keys()):
self.mix_weights.append(dataset_config['mix_weight'])
dataset_config.pop('mix_weight')
else:
self.mix_weights.append(1)
dataset_name = dataset_config['dataset_name']
self.dataset_names.append(dataset_name)
dataset_config.pop('dataset_name')
dataset = dataset_classes[dataset_name](**dataset_config, raw_input_dims=raw_input_dims, training=training, tokenizer=tokenizer, n_pairs=n_pairs, loaded_data=loaded_data, cross_seed=cross_seed)
self.datasets.append(dataset)
self.mix_weights = [(float(i) / sum(self.mix_weights)) for i in self.mix_weights]
logger.debug('Datasets: %s', self.dataset_names)
logger.debug('mix_weights: %s', self.mix_weights)
def collate_data(self, data):
text_keys = data[0]['text_tensors'].keys()
text_tensors = {key: [] for key in text_keys}
vid_keys = data[0]['vid_tensors'].keys()
vid_tensors = {key: {expert: [] for expert in self.experts} for key in vid_keys}
l_keys = data[0]['lists'].keys()
lists = {key: [] for key in l_keys}
for (_, vid) in enumerate(data):
for key in text_keys:
text_tensors[key].append(vid['text_tensors'][key])
for key in vid_keys:
for expert in self.experts:
vid_tensors[key][expert].append(vid['vid_tensors'][key][expert])
for key in l_keys:
lists[key].extend(vid['lists'][key])
for key in text_keys:
text_tensors[key] = np.concatenate(text_tensors[key], axis=0).astype(np.int32)
for key in vid_keys:
for expert in self.experts:
vid_tensors[key][expert] = np.concatenate(vid_tensors[key][expert], axis=0).astype(np.float32)
minibatch = {**text_tensors, **vid_tensors, **lists}
return minibatch
def __len__(self):
if (len(self.mix) == 1):
if self.train:
return int(10000000.0)
else:
return len(self.datasets[0])
elif self.train:
return int(10000000.0)
else:
return 1000
def __getitem__(self, idx):
if self.train:
rng = np.random
else:
rng = np.random.RandomState(idx)
dataset_nb = rng.choice(len(self.mix), p=self.mix_weights)
dataset = self.datasets[dataset_nb]
return dataset[idx]
|
class MSRVTT(BaseDataset):
'MSR-VTT dataset.'
def configure_train_test_splits(self, cut_name, split_name):
self.restrict_test_captions = None
if (cut_name in ['miech', 'jsfusion']):
if (cut_name in ['miech']):
train_list_path = 'train_list_miech.txt'
test_list_path = 'test_list_miech.txt'
elif (cut_name in ['jsfusion']):
train_list_path = 'train_list_jsfusion.txt'
test_list_path = 'val_list_jsfusion.txt'
test_cap_idx_path = os.path.join(self.data_dir, 'jsfusion_val_caption_idx.pkl')
self.restrict_test_captions = memcache(test_cap_idx_path)
test_list_path = os.path.join(self.data_dir, test_list_path)
with open(test_list_path) as f:
test_vid_list = f.readlines()
nb_test_samples = len(test_vid_list)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = os.path.join(self.data_dir, train_list_path)
with open(train_list_path) as f:
train_vid_list = f.readlines()
nb_train_samples = len(train_vid_list)
cross_vid_list = train_vid_list
cross_vid_list = [x.strip() for x in cross_vid_list]
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[nb_test_samples:]
if (split_name in ['trn']):
self.vid_list = self.vid_list[:nb_test_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[:nb_test_samples]
elif (split_name == 'test'):
self.vid_list = test_vid_list
self.vid_list = [x.strip() for x in self.vid_list]
elif (cut_name in ['full']):
if (split_name in ['train', 'trn']):
list_path = 'train_list.txt'
elif (split_name in ['val']):
list_path = 'val_list.txt'
elif (split_name in ['test']):
list_path = 'test_list.txt'
else:
raise ValueError(f'unrecognised split: {split_name}')
list_path = os.path.join(self.data_dir, list_path)
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
self.vid_list = self.vid_list[:497]
elif (cut_name in ['c']):
self.expert_paths = get_expert_paths(self.data_dir)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = 'train_list.txt'
train_list_path = os.path.join(self.data_dir, train_list_path)
with open(train_list_path) as f:
train_vid_list = f.readlines()
nb_train_samples = len(train_vid_list)
val_list_path = 'val_list.txt'
val_list_path = os.path.join(self.data_dir, val_list_path)
with open(val_list_path) as f:
val_vid_list = f.readlines()
nb_val_samples = len(val_vid_list)
cross_vid_list = (train_vid_list + val_vid_list)
cross_vid_list = [x.strip() for x in cross_vid_list]
if (self.cross_seed != 0):
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[:nb_train_samples]
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
self.vid_list = self.vid_list[:nb_val_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[nb_train_samples:]
else:
if (split_name == 'test1'):
list_path = 'public_server_val.txt'
elif (split_name == 'test2'):
list_path = 'public_server_test.txt'
list_path = os.path.join(self.data_dir, list_path)
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
else:
msg = 'unrecognised cut: {}'
raise ValueError(msg.format(cut_name))
self.split_name = split_name
self.dataset_name = f'MSRVTT_{cut_name}_{split_name}'
|
class MSVD(BaseDataset):
'MSVD dataset.'
def configure_train_test_splits(self, cut_name, split_name):
if (cut_name in ['full']):
if (split_name in ['train', 'trn']):
list_path = 'train_list.txt'
elif (split_name in ['val']):
list_path = 'val_list.txt'
elif (split_name in ['test']):
list_path = 'test_list.txt'
else:
raise ValueError(f'unrecognised MSVD split: {split_name}')
list_path = os.path.join(self.root_feat, list_path)
print('loading split ...')
tic = time.time()
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
print('done in {:.3f}s'.format((time.time() - tic)))
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
if (cut_name in ['c']):
self.vid_list = self.vid_list[:120]
else:
self.vid_list = self.vid_list[:670]
elif (cut_name in ['c']):
self.expert_paths = get_expert_paths(self.data_dir)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = 'train_list.txt'
train_list_path = os.path.join(self.data_dir, train_list_path)
with open(train_list_path) as f:
train_vid_list = f.readlines()
nb_train_samples = len(train_vid_list)
val_list_path = 'val_list.txt'
val_list_path = os.path.join(self.data_dir, val_list_path)
with open(val_list_path) as f:
val_vid_list = f.readlines()
nb_val_samples = len(val_vid_list)
cross_vid_list = (train_vid_list + val_vid_list)
cross_vid_list = [x.strip() for x in cross_vid_list]
if (self.cross_seed != 0):
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[:nb_train_samples]
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
self.vid_list = self.vid_list[:nb_val_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[nb_train_samples:]
else:
if (split_name == 'test1'):
list_path = 'public_server_val.txt'
elif (split_name == 'test2'):
list_path = 'public_server_test.txt'
list_path = os.path.join(self.data_dir, list_path)
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
else:
msg = 'unrecognised cut: {}'
raise ValueError(msg.format(cut_name))
self.split_name = split_name
self.dataset_name = f'MSVD_{cut_name}_{split_name}'
|
class YouCook2(BaseDataset):
'YouCook2 dataset.'
def configure_train_test_splits(self, cut_name, split_name):
if (cut_name in ['full']):
if (split_name in ['train', 'trn']):
list_path = 'train_list.txt'
elif (split_name in ['val']):
list_path = 'val_list.txt'
elif (split_name in ['test']):
list_path = 'test_list.txt'
else:
raise ValueError(f'unrecognised split: {split_name}')
list_path = os.path.join(self.root_feat, list_path)
print('loading split ...')
tic = time.time()
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
print('done in {:.3f}s'.format((time.time() - tic)))
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
self.vid_list = self.vid_list[:3310]
elif (cut_name in ['c']):
self.expert_paths = get_expert_paths(self.data_dir)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = 'train_list.txt'
train_list_path = os.path.join(self.data_dir, train_list_path)
with open(train_list_path) as f:
train_vid_list = f.readlines()
nb_train_samples = len(train_vid_list)
val_list_path = 'val_list.txt'
val_list_path = os.path.join(self.data_dir, val_list_path)
with open(val_list_path) as f:
val_vid_list = f.readlines()
nb_val_samples = len(val_vid_list)
cross_vid_list = (train_vid_list + val_vid_list)
cross_vid_list = [x.strip() for x in cross_vid_list]
if (self.cross_seed != 0):
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[:nb_train_samples]
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
self.vid_list = self.vid_list[:nb_val_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[nb_train_samples:]
else:
if (split_name == 'test1'):
list_path = 'public_server_val.txt'
elif (split_name == 'test2'):
list_path = 'public_server_test.txt'
list_path = os.path.join(self.data_dir, list_path)
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
else:
msg = 'unrecognised cut: {}'
raise ValueError(msg.format(cut_name))
self.split_name = split_name
self.dataset_name = f'YouCook2_{cut_name}_{split_name}'
|
class MaxMarginRankingLoss(nn.Module):
'Implementation of the Max-margin ranking loss.'
def __init__(self, margin=1, fix_norm=True):
super().__init__()
self.fix_norm = fix_norm
self.loss = th.nn.MarginRankingLoss(margin)
self.margin = margin
def forward(self, x):
n = x.size()[0]
x1 = th.diag(x)
x1 = x1.unsqueeze(1)
x1 = x1.expand(n, n)
x1 = x1.contiguous().view((- 1), 1)
x1 = th.cat((x1, x1), 0)
x2 = x.view((- 1), 1)
x3 = x.transpose(0, 1).contiguous().view((- 1), 1)
x2 = th.cat((x2, x3), 0)
max_margin = F.relu((self.margin - (x1 - x2)))
if self.fix_norm:
keep = (th.ones(x.shape) - th.eye(x.shape[0]))
keep1 = keep.view((- 1), 1)
keep2 = keep.transpose(0, 1).contiguous().view((- 1), 1)
keep_idx = th.nonzero(th.cat((keep1, keep2), 0).flatten()).flatten()
if x1.is_cuda:
keep_idx = keep_idx.cuda()
x1_ = th.index_select(x1, dim=0, index=keep_idx)
x2_ = th.index_select(x2, dim=0, index=keep_idx)
max_margin = F.relu((self.margin - (x1_ - x2_)))
return max_margin.mean()
|
class TripletLoss(object):
def __init__(self, margin=None, mining_type='hard', topk=1):
self.margin = margin
if ((self.margin is not None) and (self.margin > 0)):
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
else:
self.ranking_loss = nn.SoftMarginLoss()
self.mining_type = mining_type
self.type = type
self.topk = topk
def __call__(self, mat_dist):
if (self.mining_type == 'hard'):
(dist_ap, dist_an) = hard_example_mining(mat_dist)
elif (self.mining_type == 'topk'):
(dist_ap, dist_an) = topk_example_mining(mat_dist, self.topk)
elif (self.mining_type == 'weighted'):
(dist_ap, dist_an) = batch_weight(mat_dist)
elif (self.mining_type == 'topk2'):
(dist_ap, dist_an) = topk_example_mining2(mat_dist, self.topk)
elif (self.mining_type == 'topk3'):
(_dist_ap, _dist_an) = topk_example_mining(mat_dist, self.topk)
dist_ap = F.softmax(_dist_ap, dim=1)
dist_an = F.softmax(_dist_an, dim=1)
y = dist_ap.new().resize_as_(dist_an).fill_(1)
if ((self.margin is not None) and (self.margin > 0)):
loss = self.ranking_loss(dist_ap, dist_an, y)
else:
loss = self.ranking_loss((dist_ap - dist_an), y)
return loss
|
def hard_example_mining(dist_mat):
assert (len(dist_mat.size()) == 2)
assert (dist_mat.size(0) == dist_mat.size(1))
N = dist_mat.size(0)
is_pos = th.eye(N)
is_neg = (th.ones(dist_mat.shape) - th.eye(N))
is_pos = is_pos.cuda()
is_neg = is_neg.cuda()
dist_ap = th.mul(dist_mat, is_pos)
dist_ap[(dist_ap == 0.0)] = 100000000.0
(dist_ap, relative_p_inds) = th.min(dist_ap, dim=1, keepdim=True)
dist_ap2 = th.mul(dist_mat.t(), is_pos)
dist_ap2[(dist_ap2 == 0.0)] = 100000000.0
(dist_ap2, relative_p_inds) = th.min(dist_ap2, dim=1, keepdim=True)
dist_ap = th.cat((dist_ap, dist_ap2), dim=0)
dist_an = th.mul(dist_mat, is_neg)
dist_an[(dist_an == 0.0)] = (- 100000000.0)
(dist_an, relative_n_inds) = th.max(dist_an, dim=1, keepdim=True)
dist_an2 = th.mul(dist_mat.t(), is_neg)
dist_an2[(dist_an2 == 0.0)] = (- 100000000.0)
(dist_an2, relative_n_inds) = th.max(dist_an2, dim=1, keepdim=True)
dist_an = th.cat((dist_an, dist_an2), dim=0)
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
return (dist_ap, dist_an)
|
def topk_example_mining(dist_mat, topk):
assert (len(dist_mat.size()) == 2)
assert (dist_mat.size(0) == dist_mat.size(1))
N = dist_mat.size(0)
is_pos = th.eye(N)
is_neg = (th.ones(dist_mat.shape) - th.eye(N))
is_pos = is_pos.cuda()
is_neg = is_neg.cuda()
dist_ap = th.mul(dist_mat, is_pos)
dist_ap[(dist_ap == 0.0)] = 100000000.0
(dist_ap, relative_p_inds) = th.topk(dist_ap, k=1, dim=1, largest=False)
dist_ap2 = th.mul(dist_mat.t(), is_pos)
dist_ap2[(dist_ap2 == 0.0)] = 100000000.0
(dist_ap2, relative_p_inds) = th.topk(dist_ap2, k=1, dim=1, largest=False)
dist_ap = th.cat((dist_ap, dist_ap2), dim=0)
temp = dist_ap
for i in range((topk - 1)):
dist_ap = th.cat((dist_ap, temp), dim=1)
dist_an = th.mul(dist_mat, is_neg)
dist_an[(dist_an == 0.0)] = (- 100000000.0)
(dist_an, relative_n_inds) = th.topk(dist_an, k=topk, dim=1, largest=True)
dist_an2 = th.mul(dist_mat.t(), is_neg)
dist_an2[(dist_an2 == 0.0)] = (- 100000000.0)
(dist_an2, relative_n_inds) = th.topk(dist_an2, k=topk, dim=1, largest=True)
dist_an = th.cat((dist_an, dist_an2), dim=0)
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
return (dist_ap, dist_an)
|
def topk_example_mining2(dist_mat, topk):
assert (len(dist_mat.size()) == 2)
assert (dist_mat.size(0) == dist_mat.size(1))
N = dist_mat.size(0)
is_pos = th.eye(N)
is_neg = (th.ones(dist_mat.shape) - th.eye(N))
_dist_mat = (F.softmax(dist_mat, dim=1) * dist_mat)
_dist_mat_t = (F.softmax(dist_mat, dim=0) * dist_mat)
is_pos = is_pos.cuda()
is_neg = is_neg.cuda()
dist_ap = th.mul(_dist_mat, is_pos)
dist_ap[(dist_ap == 0.0)] = 100000000.0
(dist_ap, relative_p_inds) = th.topk(dist_ap, k=1, dim=1, largest=False)
dist_ap2 = th.mul(_dist_mat_t.t(), is_pos)
dist_ap2[(dist_ap2 == 0.0)] = 100000000.0
(dist_ap2, relative_p_inds) = th.topk(dist_ap2, k=1, dim=1, largest=False)
dist_ap = th.cat((dist_ap, dist_ap2), dim=0)
temp = dist_ap
for i in range((topk - 1)):
dist_ap = th.cat((dist_ap, temp), dim=1)
dist_an = th.mul(_dist_mat, is_neg)
dist_an[(dist_an == 0.0)] = (- 100000000.0)
(dist_an, relative_n_inds) = th.topk(dist_an, k=topk, dim=1, largest=True)
dist_an2 = th.mul(_dist_mat_t.t(), is_neg)
dist_an2[(dist_an2 == 0.0)] = (- 100000000.0)
(dist_an2, relative_n_inds) = th.topk(dist_an2, k=topk, dim=1, largest=True)
dist_an = th.cat((dist_an, dist_an2), dim=0)
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
return (dist_ap, dist_an)
|
def batch_all(dist_mat):
assert (len(dist_mat.size()) == 2)
assert (dist_mat.size(0) == dist_mat.size(1))
N = dist_mat.size(0)
is_pos = th.eye(N)
is_neg = (th.ones(dist_mat.shape) - th.eye(N))
is_pos = is_pos.cuda()
is_neg = is_neg.cuda()
dist_ap = th.mul(dist_mat, is_pos)
dist_an = th.mul(dist_mat, is_neg)
dist_ap_pos = dist_ap[(dist_ap > 0)]
dist_an_pos = dist_an[(dist_an > 0)]
ap_num = int((dist_ap_pos.size()[0] / N))
an_num = int((dist_an_pos.size()[0] / N))
all_num = ((N * ap_num) * an_num)
dist_ap_re = dist_ap_pos.reshape(N, ap_num, 1)
dist_ap_re = dist_ap_re.expand(N, ap_num, an_num)
dist_ap_re = dist_ap_re.reshape(all_num)
dist_an_re = dist_an_pos.reshape(N, an_num, 1)
dist_an_re = dist_an_re.expand(N, an_num, ap_num)
dist_an_re = th.transpose(dist_an_re, 1, 2)
dist_an_re = dist_an_re.reshape(all_num)
return (dist_ap_re, dist_an_re)
|
def batch_weight(dist_mat):
assert (len(dist_mat.size()) == 2)
assert (dist_mat.size(0) == dist_mat.size(1))
N = dist_mat.size(0)
is_pos = th.eye(N)
is_neg = (th.ones(dist_mat.shape) - th.eye(N))
is_pos = is_pos.cuda()
is_neg = is_neg.cuda()
dist_ap = th.mul(dist_mat, is_pos)
dist_an = th.mul(dist_mat, is_neg)
dist_ap_weighted = F.softmax(dist_ap, dim=1)
dist_an_weighted = F.softmax((- dist_an), dim=1)
dist_ap_w = (dist_ap * dist_ap_weighted)
dist_an_w = (dist_an * dist_an_weighted)
dist_ap_pos = dist_ap_w[(dist_ap_w > 0)]
dist_an_pos = dist_an_w[(dist_an_w > 0)]
ap_num = int((dist_ap_pos.size()[0] / N))
an_num = int((dist_an_pos.size()[0] / N))
all_num = ((N * ap_num) * an_num)
dist_ap_re = dist_ap_pos.reshape(N, ap_num, 1)
dist_ap_re = dist_ap_re.expand(N, ap_num, an_num)
dist_ap_re = dist_ap_re.reshape(all_num)
dist_an_re = dist_an_pos.reshape(N, an_num, 1)
dist_an_re = dist_an_re.expand(N, an_num, ap_num)
dist_an_re = th.transpose(dist_an_re, 1, 2)
dist_an_re = dist_an_re.reshape(all_num)
return (dist_ap_re, dist_an_re)
|
class LSTMModel(nn.Module):
'Long Short-Term memory network.'
def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):
super(LSTMModel, self).__init__()
self.hidden_dim = hidden_dim
self.layer_dim = layer_dim
self.lstm = nn.LSTM(input_dim, hidden_dim, layer_dim, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x, x_lengths):
device = x.device
h0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).requires_grad_()
h0 = h0.to(device)
c0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).requires_grad_()
c0 = c0.to(device)
x = torch.nn.utils.rnn.pack_padded_sequence(x, x_lengths, enforce_sorted=False, batch_first=True)
(out, (hn, _)) = self.lstm(x, (h0.detach(), c0.detach()))
(out, _) = torch.nn.utils.rnn.pad_packed_sequence(out, batch_first=True)
res = self.fc(hn[(- 1)])
return res
|
class NetVLAD(nn.Module):
'Net Vlad module.'
def __init__(self, cluster_size, feature_size, add_batch_norm=True):
super().__init__()
self.feature_size = feature_size
self.cluster_size = cluster_size
init_sc = (1 / math.sqrt(feature_size))
self.clusters = nn.Parameter((init_sc * th.randn(feature_size, cluster_size)))
self.clusters2 = nn.Parameter((init_sc * th.randn(1, feature_size, cluster_size)))
self.add_batch_norm = add_batch_norm
self.batch_norm = nn.BatchNorm1d(cluster_size)
self.out_dim = (cluster_size * feature_size)
def forward(self, x):
self.sanity_checks(x)
max_sample = x.size()[1]
x = x.view((- 1), self.feature_size)
if (x.device != self.clusters.device):
ipdb.set_trace()
assignment = th.matmul(x, self.clusters)
if self.add_batch_norm:
assignment = self.batch_norm(assignment)
assignment = F.softmax(assignment, dim=1)
assignment = assignment.view((- 1), max_sample, self.cluster_size)
a_sum = th.sum(assignment, dim=1, keepdim=True)
a = (a_sum * self.clusters2)
assignment = assignment.transpose(1, 2)
x = x.view((- 1), max_sample, self.feature_size)
vlad = th.matmul(assignment, x)
vlad = vlad.transpose(1, 2)
vlad = (vlad - a)
vlad = F.normalize(vlad)
vlad = vlad.reshape((- 1), (self.cluster_size * self.feature_size))
vlad = F.normalize(vlad)
return vlad
def sanity_checks(self, x):
'Catch any nans in the inputs/clusters.'
if th.isnan(th.sum(x)):
print('nan inputs')
ipdb.set_trace()
if th.isnan(self.clusters[0][0]):
print('nan clusters')
ipdb.set_trace()
|
class TxtEmbeddings(nn.Module):
'Construct the embeddings from word, position and token_type embeddings.'
def __init__(self, vocab_size=None, emb_dim=None, ckpt=None, freeze=False):
super(TxtEmbeddings, self).__init__()
if (ckpt is not None):
if isinstance(ckpt, str):
logger.debug('Loading the pretrained word embeddings from %s ...', ckpt)
pretrained_dict = torch.load(ckpt)
weight = pretrained_dict['bert.embeddings.word_embeddings.weight']
elif isinstance(ckpt, torch.FloatTensor):
weight = ckpt
self.nb_words = weight.size()[0]
logger.debug('Nb of words in the embedding table: %d', self.nb_words)
self.text_dim = weight.size()[1]
self.word_embeddings = nn.Embedding.from_pretrained(weight, freeze=freeze, padding_idx=0)
else:
self.word_embeddings = nn.Embedding(vocab_size, emb_dim, padding_idx=0)
self.text_dim = emb_dim
if freeze:
model = self.word_embeddings
for param in model.parameters():
param.requires_grad = False
def forward(self, input_ids=None):
inputs_embeds = self.word_embeddings(input_ids)
return inputs_embeds
|
class WeTokenizer():
'Word embeddings tokenizer.'
def __init__(self, we_filepath, freeze=False):
if we_filepath.endswith('.bin'):
binary = True
self.we = KeyedVectors.load_word2vec_format(we_filepath, binary=binary)
elif we_filepath.endswith('.txt'):
w2v_format_path = we_filepath.replace('.txt', '.w2v')
if (not os.path.exists(w2v_format_path)):
glove2word2vec(we_filepath, w2v_format_path)
self.we = KeyedVectors.load_word2vec_format(w2v_format_path, binary=False)
self.text_dim = self.we.vectors.shape[1]
pad_vec = torch.zeros((2, self.text_dim))
raw_table = torch.FloatTensor(self.we.vectors)
self.weights = torch.cat((pad_vec, raw_table))
self.words = (['[PAD]', '[UNK]'] + list(self.we.vocab.keys()))
self.we_model = TxtEmbeddings(ckpt=self.weights, freeze=freeze)
def tokenize(self, text):
'Convert a text into tokens.'
text = text.lower()
words = text.split(' ')
words = [''.join((e for e in word if e.isalnum())) for word in words]
words = [word for word in words if (word in self.words)]
if (not words):
words = ['[UNK]']
return words
def convert_tokens_to_ids(self, tokens):
return [self.words.index(token) for token in tokens]
def convert_ids_to_tokens(self, ids):
return [self.words[idx] for idx in ids]
|
class ConfigParser():
'Config parser.'
def __init__(self, args, options=''):
if args.resume:
msg_cfg = 'If resuming experiment then no config should be provided'
assert (args.config is None), msg_cfg
msg_cfg = 'If resuming experiment then no checkpoint should be provided'
assert (args.load_checkpoint is None), msg_cfg
exp_dir = pathlib.Path(args.resume)
checkpoint_path = get_last_checkpoint_path(exp_dir)
self.resume = checkpoint_path
self.cfg_fname = (exp_dir / 'config.json')
else:
msg_no_cfg = 'Config file must be specified'
assert (args.config is not None), msg_no_cfg
self.resume = None
self.cfg_fname = pathlib.Path(args.config)
if args.load_checkpoint:
checkpoint_path = args.load_checkpoint
self.resume = checkpoint_path
if args.only_eval:
self.only_eval = True
else:
self.only_eval = False
config = read_json(self.cfg_fname)
self._config = _update_config(config, options, args)
if ('exp_name' in self.config.keys()):
exper_name = self.config['exp_name']
else:
exper_name = pathlib.Path(args.config).stem
self._config['exp_name'] = exper_name
if ('save_dir' in self.config['trainer'].keys()):
save_dir = pathlib.Path(self.config['trainer']['save_dir'])
else:
save_dir = ((pathlib.Path.cwd() / 'exps') / exper_name)
self._config['trainer']['save_dir'] = str(save_dir)
self._save_dir = save_dir
self._log_dir = save_dir
self._web_dirs = [(save_dir / 'visualisations')]
self._exper_name = exper_name
self._args = args
if ('external_save_dir' in self.config['trainer'].keys()):
external_save_dir = pathlib.Path(self.config['trainer']['external_save_dir'])
self._web_dirs.append((external_save_dir / 'visualisations'))
else:
external_save_root = (pathlib.Path.cwd() / 'external_save_dir')
if external_save_root.exists():
external_save_dir = ((external_save_root / 'exps') / exper_name)
self._config['trainer']['external_save_dir'] = str(save_dir)
self._web_dirs.append((external_save_dir / 'visualisations'))
self.save_dir.mkdir(parents=True, exist_ok=True)
self.log_dir.mkdir(parents=True, exist_ok=True)
logpath = (save_dir / 'log.txt')
if args.verbose:
logging.basicConfig(level=os.environ.get('LOGLEVEL', 'DEBUG'), format='%(message)s')
else:
logging.basicConfig(level=os.environ.get('LOGLEVEL', 'INFO'), handlers=[logging.FileHandler(logpath), logging.StreamHandler()], format='%(message)s')
logger.info('Experiment directory: %s', save_dir)
if (args.device == 'cpu'):
os.environ['CUDA_VISIBLE_DEVICES'] = ''
elif args.device:
os.environ['CUDA_VISIBLE_DEVICES'] = args.device
logger.debug('CUDA_VISIBLE_DEVICES: %s', os.environ['CUDA_VISIBLE_DEVICES'])
n_gpu = torch.cuda.device_count()
logger.debug('n_gpu = torch.cuda.device_count(): %d (nb of gpus available)', n_gpu)
write_json(self.config, (self.save_dir / 'config.json'))
logging.debug(pprint.pformat(self.config))
def init(self, name, module, *args, **kwargs):
"Finds a function handle with the name given as 'type' in config."
module_name = self[name]['type']
module_args = dict(self[name]['args'])
msg = 'Overwriting kwargs given in config file is not allowed'
assert all([(k not in module_args) for k in kwargs]), msg
module_args.update(kwargs)
return getattr(module, module_name)(*args, **module_args)
def __getitem__(self, name):
return self.config[name]
def get(self, name, default):
return self.config.get(name, default)
@property
def config(self):
return self._config
@property
def save_dir(self):
return self._save_dir
@property
def log_dir(self):
return self._log_dir
@property
def exper_name(self):
return self._exper_name
@property
def web_dirs(self):
return self._web_dirs
def __repr__(self):
return pprint.PrettyPrinter().pprint.pformat(self.__dict__)
|
def _update_config(config, options, args):
for opt in options:
value = getattr(args, _get_opt_name(opt.flags))
if (value is not None):
_set_by_path(config, opt.target, value)
return config
|
def _get_opt_name(flags):
for flg in flags:
if flg.startswith('--'):
return flg.replace('--', '')
return flags[0].replace('--', '')
|
def _set_by_path(tree, keys, value):
'Set a value in a nested object in tree by sequence of keys.'
_get_by_path(tree, keys[:(- 1)])[keys[(- 1)]] = value
|
def _get_by_path(tree, keys):
'Access a nested object in tree by sequence of keys.'
return functools.reduce(operator.getitem, keys, tree)
|
def train(config):
expert_dims = compute_dims(config)
raw_input_dims = {}
for (expert, expert_dic) in expert_dims.items():
raw_input_dims[expert] = expert_dic['dim']
tic = time.time()
seed = config['seed']
cross_seed = config.get('cross_seed', seed)
logger.debug('Setting experiment random seed to %d', seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
tokenizer = create_tokenizer(config['arch']['args']['txt_inp'])
logger.info('Preparing the dataloaders ...')
dataset_types = ['train_sets', 'continuous_eval_sets', 'final_eval_sets']
data_loaders = {}
loaded_data = {}
for dataset_type in dataset_types:
training = (dataset_type == 'train_sets')
if (not config.get(dataset_type, False)):
continue
data_loaders[dataset_type] = []
for (_, data_loader) in enumerate(config[dataset_type]):
data_loaders[dataset_type].append(getattr(module_data, data_loader['type'])(**data_loader['args'], raw_input_dims=raw_input_dims, training=training, tokenizer=tokenizer, loaded_data=loaded_data, cross_seed=cross_seed))
model = config.init(name='arch', module=module_arch, expert_dims=expert_dims, tokenizer=tokenizer)
loss = config.init(name='loss', module=module_loss)
metrics = [getattr(module_metric, met) for met in config['metrics']]
trainable_params = filter((lambda p: p.requires_grad), model.parameters())
txt_bert_params = []
params = []
for (name, param) in model.named_parameters():
if param.requires_grad:
if ('txt_bert' in name):
txt_bert_params.append(param)
else:
params.append(param)
if (config['optimizer']['type'] == 'Ranger'):
optimizer = config.init('optimizer', ranger, trainable_params)
elif (config['optimizer']['type'] == 'Adam_'):
optimizer = torch.optim.Adam([{'params': params, 'lr': config['optimizer']['vid']['lr']}, {'params': txt_bert_params, 'lr': config['optimizer']['txt']['lr']}], lr=config['optimizer']['args']['lr'], weight_decay=config['optimizer']['args']['weight_decay'])
else:
optimizer = config.init('optimizer', torch.optim, trainable_params)
lr_scheduler = config.init('lr_scheduler', torch.optim.lr_scheduler, optimizer)
if ('warmup_iterations' in config['optimizer']):
warmup_iterations = config['optimizer']['warmup_iterations']
else:
warmup_iterations = (- 1)
visualizer = config.init(name='visualizer', module=module_vis, exp_name=config.exper_name, web_dirs=config.web_dirs)
trainer = Trainer(model, loss, metrics, optimizer, config=config, data_loaders=data_loaders, lr_scheduler=lr_scheduler, visualizer=visualizer, skip_first_n_saves=config['trainer'].get('skip_first_n_saves', 0), include_optim_in_ckpts=config['trainer'].get('include_optim_in_ckpts', False), expert_dims=expert_dims, tokenizer=tokenizer, warmup_iterations=warmup_iterations)
if (not config.only_eval):
logger.info('Training ...')
trainer.train()
logger.info('Final evaluation ...')
trainer.evaluate()
duration = time.strftime('%Hh%Mm%Ss', time.gmtime((time.time() - tic)))
logger.info('Script took %s', duration)
best_ckpt_path = (config.save_dir / 'trained_model.pth')
if os.path.exists(best_ckpt_path):
logger.info('The best performing ckpt can be found at %s', str(best_ckpt_path))
|
def main_train(raw_args=None):
parser = argparse.ArgumentParser(description='PyTorch Template')
parser.add_argument('--config', default=None, type=str, help='config file path (default: None)')
parser.add_argument('--resume', default=None, type=str, help='path to the experiment dir to resume (default: None)')
parser.add_argument('--load_checkpoint', default=None, type=str, help='path to the checkpoint to load (default: None)')
parser.add_argument('--device', type=str, help='indices of GPUs to enable')
parser.add_argument('--only_eval', action='store_true')
parser.add_argument('-v', '--verbose', help='increase output verbosity', action='store_true')
args = parser.parse_args(raw_args)
args = ConfigParser(args)
msg = f"Expected the number of training epochs ({args['trainer']['epochs']})to exceed the save period ({args['trainer']['save_period']}), otherwise no checkpoints will be saved."
assert (args['trainer']['epochs'] >= args['trainer']['save_period']), msg
train(config=args)
|
class HTML():
def __init__(self, web_dir, title, refresh=0):
self.title = title
self.web_dir = web_dir
self.img_dir = os.path.join(self.web_dir, 'images')
if (not os.path.exists(self.web_dir)):
os.makedirs(self.web_dir)
if (not os.path.exists(self.img_dir)):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if (refresh > 0):
with self.doc.head:
meta(http_equiv='refresh', content=str(refresh))
def get_image_dir(self):
'Return the directory that stores images.'
return self.img_dir
def add_header(self, text):
with self.doc:
h3(text)
def add_videos(self, vids, txts, links, width=400, hidden_tag='hidden'):
self.t = table(border=1, style='table-layout: fixed;')
self.doc.add(self.t)
colors = ['red', 'blue', 'gold', 'salman']
with self.t:
with tr():
for (vid, txt, link) in zip(vids, txts, links):
td_style = 'word-wrap: break-word; width:{}px'.format(width)
with td(style=td_style, halign='center', valign='top'):
with p():
vid_path = str(vid)
if (vid_path == hidden_tag):
p_style = 'font-weight: bold; width:{}px;'
p_style = p_style.format((width * 3))
p('hidden video', style=p_style)
else:
with a(href=str(link)):
with video():
attr(controls='controls', width=width)
source(src=vid_path, type='video/mp4')
br()
rows = txt.split('<br>')
for (idx, row) in enumerate(rows):
color = colors[(idx % len(colors))]
bold_tag = '<b>'
if (not row.startswith(bold_tag)):
s_style = 'color:{};'.format(color)
else:
s_style = 'color:black; font-weight: bold;'
row = row[len(bold_tag):]
span(row, style=s_style)
br()
def add_images(self, ims, txts, links, width=400):
self.t = table(border=1, style='table-layout: fixed;')
self.doc.add(self.t)
with self.t:
with tr():
for (im, txt, link) in zip(ims, txts, links):
td_style = 'word-wrap: break-word;'
with td(style=td_style, halign='center', valign='top'):
with p():
with a(href=os.path.join('images', link)):
img(style=('width:%dpx' % width), src=os.path.join('images', im))
br()
p(txt)
def save(self):
'Save the current content to the HMTL file.'
html_file = ('%s/index.html' % self.web_dir)
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
|
def create_tokenizer(tokenizer_type):
'Creates a tokenizer given a tokenizer type.'
if tokenizer_type.endswith('frz'):
freeze = True
elif tokenizer_type.endswith('ftn'):
freeze = False
if tokenizer_type.startswith('bert'):
model_name_or_path = 'bert-base-cased'
do_lower_case = True
cache_dir = 'data/cache_dir'
tokenizer_class = BertTokenizer
tokenizer = tokenizer_class.from_pretrained(model_name_or_path, do_lower_case=do_lower_case, cache_dir=cache_dir)
elif tokenizer_type.startswith('wo2v'):
we_filepath = 'data/word_embeddings/word2vec/GoogleNews-vectors-negative300.bin'
tokenizer = WeTokenizer(we_filepath, freeze=freeze)
elif tokenizer_type.startswith('grvl'):
we_filepath = 'data/word_embeddings/GrOVLE/mt_grovle.txt'
tokenizer = WeTokenizer(we_filepath, freeze=freeze)
else:
tokenizer = None
return tokenizer
|
def update_perf_log(epoch_perf, perf_log_path):
now = time.strftime('%c')
line = 't: {}, '.format(now)
for key in epoch_perf:
line += '{}: {}, '.format(key, epoch_perf[key])
line += '\n'
with open(perf_log_path, 'a') as file:
file.write(line)
|
class Ranger(Optimizer):
def __init__(self, params, lr=0.001, alpha=0.5, k=6, n_sma_threshhold=5, betas=(0.95, 0.999), eps=1e-05, weight_decay=0):
if (not (0.0 <= alpha <= 1.0)):
raise ValueError(f'Invalid slow update rate: {alpha}')
if (not (1 <= k)):
raise ValueError(f'Invalid lookahead steps: {k}')
if (not (lr > 0)):
raise ValueError(f'Invalid Learning Rate: {lr}')
if (not (eps > 0)):
raise ValueError(f'Invalid eps: {eps}')
defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, n_sma_threshhold=n_sma_threshhold, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults)
self.n_sma_threshhold = n_sma_threshhold
self.alpha = alpha
self.k = k
self.radam_buffer = [[None, None, None] for ind in range(10)]
def __setstate__(self, state):
print('set state called')
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ranger optimizer does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
state['slow_buffer'] = torch.empty_like(p.data)
state['slow_buffer'].copy_(p.data)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
exp_avg.mul_(beta1).add_((1 - beta1), grad)
state['step'] += 1
buffered = self.radam_buffer[int((state['step'] % 10))]
if (state['step'] == buffered[0]):
(n_sma, step_size) = (buffered[1], buffered[2])
else:
buffered[0] = state['step']
beta2_t = (beta2 ** state['step'])
n_sma_max = ((2 / (1 - beta2)) - 1)
n_sma = (n_sma_max - (((2 * state['step']) * beta2_t) / (1 - beta2_t)))
buffered[1] = n_sma
if (n_sma > self.n_sma_threshhold):
step_size = (math.sqrt((((((((1 - beta2_t) * (n_sma - 4)) / (n_sma_max - 4)) * (n_sma - 2)) / n_sma) * n_sma_max) / (n_sma_max - 2))) / (1 - (beta1 ** state['step'])))
else:
step_size = (1.0 / (1 - (beta1 ** state['step'])))
buffered[2] = step_size
if (group['weight_decay'] != 0):
p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32)
if (n_sma > self.n_sma_threshhold):
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(((- step_size) * group['lr']), exp_avg, denom)
else:
p_data_fp32.add_(((- step_size) * group['lr']), exp_avg)
p.data.copy_(p_data_fp32)
if ((state['step'] % group['k']) == 0):
slow_p = state['slow_buffer']
slow_p.add_(self.alpha, (p.data - slow_p))
p.data.copy_(slow_p)
return loss
|
class AverageMeter(object):
def __init__(self):
self.dic = {}
self.reset()
def reset(self):
for key in self.dic:
for metric in self.dic[key]:
self.dic[key][metric] = 0
def update(self, key, val, n=1):
self.dic.setdefault(key, {'val': 0, 'sum': 0, 'count': 0, 'avg': 0})
self.dic[key]['val'] = val
self.dic[key]['sum'] += (val * n)
self.dic[key]['count'] += n
self.dic[key]['avg'] = (self.dic[key]['sum'] / self.dic[key]['count'])
|
class RawFrameExtractor():
'frame extractor for a given of directory with video\n\n Attributes:\n centercrop: center crop for pre-preprocess\n size: resolution of images\n framerate: frame rate for sampling\n transform: transform method for pre-process\n train: set train for random sampling in the uniform interval\n '
def __init__(self, centercrop=False, size=224, framerate=(- 1), train='subset'):
self.centercrop = centercrop
self.size = size
self.framerate = framerate
self.transform = self._transform(self.size)
self.train = (True if (train == 'train') else False)
def _transform(self, n_px):
return Compose([Resize(n_px, interpolation=Image.BICUBIC), CenterCrop(n_px), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))])
def video_to_tensor(self, video_file, max_frame, preprocess, sample_fp=0):
'sample video into tensor\n Args:\n video_file: location of video file\n max_frame: max frame number\n preprocessL preprocess method\n sample_fp: sampling rate\n\n Returns:\n image_input: sample frames\n '
assert (sample_fp > (- 1))
video_name = os.listdir(video_file)
video_name.sort()
current_frame = (len(video_name) // sample_fp)
current_sample_indx = np.linspace(0, (len(video_name) - 1), num=current_frame, dtype=int)
if (max_frame >= current_sample_indx.shape[0]):
frame_index = np.arange(0, current_sample_indx.shape[0])
else:
frame_index = np.linspace(0, (current_sample_indx.shape[0] - 1), num=max_frame, dtype=int)
if self.train:
step_len = ((frame_index[1] - frame_index[0]) // 2)
if (step_len > 2):
random_index = np.random.randint(((- 1) * step_len), step_len, (frame_index.shape[0] - 2))
zero_index = np.zeros(1)
index = np.concatenate((zero_index, random_index, zero_index))
frame_index = (frame_index + index)
images = []
for index in frame_index:
image_path = os.path.join(video_file, video_name[current_sample_indx[int(index)]])
images.append(preprocess(Image.open(image_path).convert('RGB')))
if (len(images) > 0):
video_data = torch.tensor(np.stack(images))
else:
video_data = torch.zeros(1)
return {'video': video_data}
def get_video_data(self, video_path, max_frame):
'get video data\n Args:\n video_path: id\n max_frame: max frame number\n\n Returns:\n image_input: sample frames\n '
image_input = self.video_to_tensor(video_path, max_frame, self.transform, sample_fp=self.framerate)
return image_input
def process_raw_data(self, raw_video_data):
'reshape the raw video\n Args:\n raw_video_data: sampled frames\n\n Returns:\n tensor: reshaped tensor\n '
tensor_size = raw_video_data.size()
tensor = raw_video_data.view((- 1), 1, tensor_size[(- 3)], tensor_size[(- 2)], tensor_size[(- 1)])
return tensor
|
def _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list):
'run similarity in one single gpu\n Args:\n model: CLIP2Video\n batch_list_t: id of text embedding\n batch_list_v: id of visual embedding\n batch_sequence_output_list: batch text embedding\n batch_visual_output_list: batch visual embedding\n Returns:\n sim_matrix: similarity\n\n '
sim_matrix = []
for (idx1, b1) in enumerate(batch_list_t):
(input_mask, segment_ids, *_tmp) = b1
sequence_output = batch_sequence_output_list[idx1]
each_row = []
for (idx2, b2) in enumerate(batch_list_v):
(video_mask, *_tmp) = b2
visual_output = batch_visual_output_list[idx2]
(b1b2_logits, *_tmp) = model.get_inference_logits(sequence_output, visual_output, input_mask, video_mask)
b1b2_logits = b1b2_logits.cpu().detach().numpy()
each_row.append(b1b2_logits)
each_row = np.concatenate(tuple(each_row), axis=(- 1))
sim_matrix.append(each_row)
return sim_matrix
|
def eval_epoch(model, test_dataloader, device, n_gpu, logger):
'run similarity in one single gpu\n Args:\n model: CLIP2Video\n test_dataloader: data loader for test\n device: device to run model\n n_gpu: GPU number\n batch_sequence_output_list: batch text embedding\n batch_visual_output_list: batch visual embedding\n Returns:\n R1: rank 1 of text-to-video retrieval\n\n '
if hasattr(model, 'module'):
model = model.module.to(device)
else:
model = model.to(device)
multi_sentence_ = False
(cut_off_points_, sentence_num_, video_num_) = ([], (- 1), (- 1))
if (hasattr(test_dataloader.dataset, 'multi_sentence_per_video') and test_dataloader.dataset.multi_sentence_per_video):
multi_sentence_ = True
cut_off_points_ = test_dataloader.dataset.cut_off_points
sentence_num_ = test_dataloader.dataset.sentence_num
video_num_ = test_dataloader.dataset.video_num
cut_off_points_ = [(itm - 1) for itm in cut_off_points_]
if multi_sentence_:
logger.warning('Eval under the multi-sentence per video clip setting.')
logger.warning('sentence num: {}, video num: {}'.format(sentence_num_, video_num_))
model.eval()
with torch.no_grad():
batch_list_t = []
batch_list_v = []
(batch_sequence_output_list, batch_visual_output_list) = ([], [])
total_video_num = 0
for (bid, batch) in enumerate(test_dataloader):
batch = tuple((t.to(device) for t in batch))
(input_ids, input_mask, segment_ids, video, video_mask) = batch
if multi_sentence_:
(b, *_t) = video.shape
sequence_output = model.get_sequence_output(input_ids, segment_ids, input_mask)
batch_sequence_output_list.append(sequence_output)
batch_list_t.append((input_mask, segment_ids))
(s_, e_) = (total_video_num, (total_video_num + b))
filter_inds = [(itm - s_) for itm in cut_off_points_ if ((itm >= s_) and (itm < e_))]
if (len(filter_inds) > 0):
(video, video_mask) = (video[(filter_inds, ...)], video_mask[(filter_inds, ...)])
visual_output = model.get_visual_output(video, video_mask)
batch_visual_output_list.append(visual_output)
batch_list_v.append((video_mask,))
total_video_num += b
else:
(sequence_output, visual_output) = model.get_sequence_visual_output(input_ids, segment_ids, input_mask, video, video_mask)
batch_sequence_output_list.append(sequence_output)
batch_list_t.append((input_mask, segment_ids))
batch_visual_output_list.append(visual_output)
batch_list_v.append((video_mask,))
print('{}/{}\r'.format(bid, len(test_dataloader)), end='')
if (n_gpu > 1):
device_ids = list(range(n_gpu))
batch_list_t_splits = []
batch_list_v_splits = []
batch_t_output_splits = []
batch_v_output_splits = []
bacth_len = len(batch_list_t)
split_len = (((bacth_len + n_gpu) - 1) // n_gpu)
for dev_id in device_ids:
(s_, e_) = ((dev_id * split_len), ((dev_id + 1) * split_len))
if (dev_id == 0):
batch_list_t_splits.append(batch_list_t[s_:e_])
batch_list_v_splits.append(batch_list_v)
batch_t_output_splits.append(batch_sequence_output_list[s_:e_])
batch_v_output_splits.append(batch_visual_output_list)
else:
devc = torch.device('cuda:{}'.format(str(dev_id)))
devc_batch_list = [tuple((t.to(devc) for t in b)) for b in batch_list_t[s_:e_]]
batch_list_t_splits.append(devc_batch_list)
devc_batch_list = [tuple((t.to(devc) for t in b)) for b in batch_list_v]
batch_list_v_splits.append(devc_batch_list)
if isinstance(batch_sequence_output_list[s_], tuple):
devc_batch_list = [(b[0].to(devc), b[1].to(devc)) for b in batch_sequence_output_list[s_:e_]]
else:
devc_batch_list = [b.to(devc) for b in batch_sequence_output_list[s_:e_]]
batch_t_output_splits.append(devc_batch_list)
devc_batch_list = [b.to(devc) for b in batch_visual_output_list]
batch_v_output_splits.append(devc_batch_list)
parameters_tuple_list = [(batch_list_t_splits[dev_id], batch_list_v_splits[dev_id], batch_t_output_splits[dev_id], batch_v_output_splits[dev_id]) for dev_id in device_ids]
parallel_outputs = parallel_apply(_run_on_single_gpu, model, parameters_tuple_list, device_ids)
sim_matrix = []
for idx in range(len(parallel_outputs)):
sim_matrix += parallel_outputs[idx]
sim_matrix = np.concatenate(tuple(sim_matrix), axis=0)
else:
sim_matrix = _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list)
sim_matrix = np.concatenate(tuple(sim_matrix), axis=0)
R1 = logging_rank(sim_matrix, multi_sentence_, cut_off_points_, logger)
return R1
|
def logging_rank(sim_matrix, multi_sentence_, cut_off_points_, logger):
'run similarity in one single gpu\n Args:\n sim_matrix: similarity matrix\n multi_sentence_: indicate whether the multi sentence retrieval\n cut_off_points_: tag the label when calculate the metric\n logger: logger for metric\n Returns:\n R1: rank 1 of text-to-video retrieval\n\n '
if multi_sentence_:
logger.info('before reshape, sim matrix size: {} x {}'.format(sim_matrix.shape[0], sim_matrix.shape[1]))
cut_off_points2len_ = [(itm + 1) for itm in cut_off_points_]
max_length = max([(e_ - s_) for (s_, e_) in zip(([0] + cut_off_points2len_[:(- 1)]), cut_off_points2len_)])
sim_matrix_new = []
for (s_, e_) in zip(([0] + cut_off_points2len_[:(- 1)]), cut_off_points2len_):
sim_matrix_new.append(np.concatenate((sim_matrix[s_:e_], np.full((((max_length - e_) + s_), sim_matrix.shape[1]), (- np.inf))), axis=0))
sim_matrix = np.stack(tuple(sim_matrix_new), axis=0)
logger.info('after reshape, sim matrix size: {} x {} x {}'.format(sim_matrix.shape[0], sim_matrix.shape[1], sim_matrix.shape[2]))
tv_metrics = tensor_text_to_video_metrics(sim_matrix)
vt_metrics = compute_metrics(tensor_video_to_text_sim(sim_matrix))
else:
logger.info('sim matrix size: {}, {}'.format(sim_matrix.shape[0], sim_matrix.shape[1]))
tv_metrics = compute_metrics(sim_matrix)
vt_metrics = compute_metrics(sim_matrix.T)
logger.info('\t Length-T: {}, Length-V:{}'.format(len(sim_matrix), len(sim_matrix[0])))
logger.info('Text-to-Video:')
logger.info('\t>>> R@1: {:.1f} - R@5: {:.1f} - R@10: {:.1f} - Median R: {:.1f} - Mean R: {:.1f}'.format(tv_metrics['R1'], tv_metrics['R5'], tv_metrics['R10'], tv_metrics['MR'], tv_metrics['MeanR']))
logger.info('Video-to-Text:')
logger.info('\t>>> V2T$R@1: {:.1f} - V2T$R@5: {:.1f} - V2T$R@10: {:.1f} - V2T$Median R: {:.1f} - V2T$Mean R: {:.1f}'.format(vt_metrics['R1'], vt_metrics['R5'], vt_metrics['R10'], vt_metrics['MR'], vt_metrics['MeanR']))
R1 = tv_metrics['R1']
return R1
|
def set_seed_logger(args):
'Initialize the seed and environment variable\n\n Args:\n args: the hyper-parameters.\n\n Returns:\n args: the hyper-parameters modified by the random seed.\n\n '
global logger
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
logger = get_logger(os.path.join(args.output_dir))
return args
|
def init_device(args, local_rank):
'Initialize device to determine CPU or GPU\n\n Args:\n args: the hyper-parameters\n local_rank: GPU id\n\n Returns:\n devices: cuda\n n_gpu: number of gpu\n\n '
global logger
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'), local_rank)
n_gpu = torch.cuda.device_count()
logger.info('device: {} n_gpu: {}'.format(device, n_gpu))
args.n_gpu = n_gpu
if ((args.batch_size_val % args.n_gpu) != 0):
raise ValueError('Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0'.format(args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu))
return (device, n_gpu)
|
def init_model(args, device):
"Initialize model.\n\n if location of args.init_model exists, model will be initialized from the pretrained model.\n if no model exists, the training will be initialized from CLIP's parameters.\n\n Args:\n args: the hyper-parameters\n devices: cuda\n\n Returns:\n model: the initialized model\n\n "
model_file = os.path.join(args.checkpoint, 'pytorch_model.bin.{}'.format(args.model_num))
if os.path.exists(model_file):
model_state_dict = torch.load(model_file, map_location='cpu')
if (args.local_rank == 0):
logger.info('Model loaded from %s', model_file)
else:
model_state_dict = None
if (args.local_rank == 0):
logger.info('Model loaded fail %s', model_file)
model = CLIP2Video.from_pretrained(args.cross_model, cache_dir=None, state_dict=model_state_dict, task_config=args)
model.to(device)
return model
|
def main():
global logger
args = get_args()
args = set_seed_logger(args)
(device, n_gpu) = init_device(args, args.local_rank)
tokenizer = ClipTokenizer()
model = init_model(args, device)
assert (args.datatype in DATALOADER_DICT)
(test_dataloader, test_length) = DATALOADER_DICT[args.datatype]['test'](args, tokenizer)
if (args.local_rank == 0):
logger.info('***** Running test *****')
logger.info(' Num examples = %d', test_length)
logger.info(' Batch size = %d', args.batch_size_val)
logger.info(' Num steps = %d', len(test_dataloader))
if (args.local_rank == 0):
eval_epoch(model, test_dataloader, device, n_gpu, logger)
|
class CrossConfig(PretrainedConfig):
'Configuration class to store the configuration of a `CrossModel`.\n '
pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
config_name = CONFIG_NAME
weights_name = WEIGHTS_NAME
def __init__(self, vocab_size_or_config_json_file, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02):
'Constructs CrossConfig.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `CrossModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the "intermediate" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler. If string, "gelu", "relu" and "swish" are supported.\n hidden_dropout_prob: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `CrossModel`.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.\n '
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, 'r', encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for (key, value) in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError('First argument must be either a vocabulary size (int)or the path to a pretrained model config file (str)')
|
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return (x * torch.sigmoid((1.702 * x)))
|
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model, (d_model * 4))), ('gelu', QuickGELU()), ('c_proj', nn.Linear((d_model * 4), d_model))]))
self.ln_2 = LayerNorm(d_model)
self.n_head = n_head
def attention(self, x: torch.Tensor, attn_mask: torch.Tensor):
attn_mask_ = attn_mask.repeat(self.n_head, 1, 1)
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask_)[0]
def forward(self, para_tuple: tuple):
(x, attn_mask) = para_tuple
x = (x + self.attention(self.ln_1(x), attn_mask))
x = (x + self.mlp(self.ln_2(x)))
return (x, attn_mask)
|
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads) for _ in range(layers)])
def forward(self, x: torch.Tensor, attn_mask: torch.Tensor):
return self.resblocks((x, attn_mask))[0]
|
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
|
@lru_cache()
def bytes_to_unicode():
"\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n "
bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1))))
cs = bs[:]
n = 0
for b in range((2 ** 8)):
if (b not in bs):
bs.append(b)
cs.append(((2 ** 8) + n))
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
|
def get_pairs(word):
'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n '
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
|
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
|
def whitespace_clean(text):
text = re.sub('\\s+', ' ', text)
text = text.strip()
return text
|
class SimpleTokenizer(object):
def __init__(self, bpe_path: str=default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode('utf-8').split('\n')
merges = merges[1:(((49152 - 256) - 2) + 1)]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = (vocab + [(v + '</w>') for v in vocab])
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE)
self.vocab = self.encoder
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ')
return text
def tokenize(self, text):
tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return tokens
def convert_tokens_to_ids(self, tokens):
return [self.encoder[bpe_token] for bpe_token in tokens]
|
class PretrainedConfig(object):
pretrained_model_archive_map = {}
config_name = ''
weights_name = ''
@classmethod
def get_config(cls, pretrained_model_name, cache_dir, type_vocab_size, state_dict, task_config=None):
archive_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), pretrained_model_name)
if (os.path.exists(archive_file) is False):
if (pretrained_model_name in cls.pretrained_model_archive_map):
archive_file = cls.pretrained_model_archive_map[pretrained_model_name]
else:
archive_file = pretrained_model_name
resolved_archive_file = archive_file
if (resolved_archive_file == archive_file):
if ((task_config is None) or (task_config.local_rank == 0)):
logger.info('loading archive file {}'.format(archive_file))
elif ((task_config is None) or (task_config.local_rank == 0)):
logger.info('loading archive file {} from cache at {}'.format(archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
tempdir = tempfile.mkdtemp()
if ((task_config is None) or (task_config.local_rank == 0)):
logger.info('extracting archive file {} to temp dir {}'.format(resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
config_file = os.path.join(serialization_dir, cls.config_name)
config = cls.from_json_file(config_file)
config.type_vocab_size = type_vocab_size
if ((task_config is None) or (task_config.local_rank == 0)):
logger.info('Model config {}'.format(config))
if (state_dict is None):
weights_path = os.path.join(serialization_dir, cls.weights_name)
if os.path.exists(weights_path):
state_dict = torch.load(weights_path, map_location='cpu')
elif ((task_config is None) or (task_config.local_rank == 0)):
logger.info("Weight doesn't exsits. {}".format(weights_path))
if tempdir:
shutil.rmtree(tempdir)
return (config, state_dict)
@classmethod
def from_dict(cls, json_object):
'Constructs a `BertConfig` from a Python dictionary of parameters.'
config = cls(vocab_size_or_config_json_file=(- 1))
for (key, value) in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
'Constructs a `BertConfig` from a json file of parameters.'
with open(json_file, 'r', encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
'Serializes this instance to a Python dictionary.'
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
'Serializes this instance to a JSON string.'
return (json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\n')
|
def gelu(x):
"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n "
return ((x * 0.5) * (1.0 + torch.erf((x / math.sqrt(2.0)))))
|
def swish(x):
return (x * torch.sigmoid(x))
|
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
'Construct a layernorm module in the TF style (epsilon inside the square root).\n '
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean((- 1), keepdim=True)
s = (x - u).pow(2).mean((- 1), keepdim=True)
x = ((x - u) / torch.sqrt((s + self.variance_epsilon)))
return ((self.weight * x) + self.bias)
|
class PreTrainedModel(nn.Module):
' An abstract class to handle weights initialization and\n a simple interface for dowloading and loading pretrained models.\n '
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedModel, self).__init__()
if (not isinstance(config, PretrainedConfig)):
raise ValueError('Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. To create a model from a Google pretrained model use `model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`'.format(self.__class__.__name__, self.__class__.__name__))
self.config = config
def init_weights(self, module):
' Initialize the weights.\n '
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
if (('beta' in dir(module)) and ('gamma' in dir(module))):
module.beta.data.zero_()
module.gamma.data.fill_(1.0)
else:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if (isinstance(module, nn.Linear) and (module.bias is not None)):
module.bias.data.zero_()
def resize_token_embeddings(self, new_num_tokens=None):
raise NotImplementedError
@classmethod
def init_preweight(cls, model, state_dict, prefix=None, task_config=None):
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if ('gamma' in key):
new_key = key.replace('gamma', 'weight')
if ('beta' in key):
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for (old_key, new_key) in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
if (prefix is not None):
old_keys = []
new_keys = []
for key in state_dict.keys():
old_keys.append(key)
new_keys.append((prefix + key))
for (old_key, new_key) in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if (metadata is not None):
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = ({} if (metadata is None) else metadata.get(prefix[:(- 1)], {}))
module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for (name, child) in module._modules.items():
if (child is not None):
load(child, ((prefix + name) + '.'))
load(model, prefix='')
if ((prefix is None) and ((task_config is None) or (task_config.local_rank == 0))):
logger.info(('-' * 20))
if (len(missing_keys) > 0):
logger.info('Weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, ('\n ' + '\n '.join(missing_keys))))
if (len(unexpected_keys) > 0):
logger.info('Weights from pretrained model not used in {}: {}'.format(model.__class__.__name__, ('\n ' + '\n '.join(unexpected_keys))))
if (len(error_msgs) > 0):
logger.error('Weights from pretrained model cause errors in {}: {}'.format(model.__class__.__name__, ('\n ' + '\n '.join(error_msgs))))
return model
@property
def dtype(self):
'\n :obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).\n '
try:
return next(self.parameters()).dtype
except StopIteration:
def find_tensor_attributes(module: nn.Module):
tuples = [(k, v) for (k, v) in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
@classmethod
def from_pretrained(cls, config, state_dict=None, *inputs, **kwargs):
'\n Instantiate a PreTrainedModel from a pre-trained model file or a pytorch state dict.\n Download and cache the pre-trained model file if needed.\n '
model = cls(config, *inputs, **kwargs)
if (state_dict is None):
return model
model = cls.init_preweight(model, state_dict)
return model
|
class CrossEn(nn.Module):
'cross entroy loss'
def __init__(self):
super(CrossEn, self).__init__()
def forward(self, sim_matrix):
logpt = F.log_softmax(sim_matrix, dim=(- 1))
logpt = torch.diag(logpt)
nce_loss = (- logpt)
sim_loss = nce_loss.mean()
return sim_loss
|
def extract_frames(video_name, out_folder, fps=5):
if os.path.exists(out_folder):
os.system((('rm -rf ' + out_folder) + '/*'))
os.system(('rm -rf ' + out_folder))
os.makedirs(out_folder)
cmd = ('ffmpeg -v 0 -i %s -r %d -q 0 %s/%s.jpg' % (video_name, fps, out_folder, '%08d'))
os.system(cmd)
|
def process(line):
print(line)
(mp4_name, folder_frame) = line
extract_frames(mp4_name, folder_frame)
|
def get_args(description='CLIP2Video on Dideo-Text Retrieval Task'):
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--val_csv', type=str, default='data/.val.csv', help='')
parser.add_argument('--data_path', type=str, default='data/caption.pickle', help='data pickle file path')
parser.add_argument('--features_path', type=str, default='data/videos_feature.pickle', help='feature path')
parser.add_argument('--num_thread_reader', type=int, default=1, help='')
parser.add_argument('--batch_size_val', type=int, default=3500, help='batch size eval')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--max_words', type=int, default=32, help='')
parser.add_argument('--max_frames', type=int, default=100, help='')
parser.add_argument('--feature_framerate', type=int, default=1, help='frame rate for uniformly sampling the video')
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--cross_model', default='cross-base', type=str, required=False, help='Cross module')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--n_gpu', type=int, default=1, help='Changed in the execute process.')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit')
parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--cross_num_hidden_layers', type=int, default=4, help='Layer NO. of cross.')
parser.add_argument('--sim_type', type=str, default='meanP', choices=['meanP', 'seqTransf'], help='choice a similarity header.')
parser.add_argument('--checkpoint', type=str, default='', help='checkpoint dir')
parser.add_argument('--model_num', type=str, default='', help='model id')
parser.add_argument('--local_rank', default=0, type=int, help='shard_id: node rank for distributed training')
parser.add_argument('--datatype', default='msrvtt', type=str, help='msvd | msrvtt | vatexEnglish | msrvttfull')
parser.add_argument('--vocab_size', type=int, default=49408, help='the number of vocab size')
parser.add_argument('--temporal_type', type=str, default='', help='TDB type')
parser.add_argument('--temporal_proj', type=str, default='', help='sigmoid_mlp | sigmoid_selfA')
parser.add_argument('--center_type', type=str, default='', help='TAB')
parser.add_argument('--centerK', type=int, default=5, help='center number for clustering.')
parser.add_argument('--center_weight', type=float, default=0.5, help='the weight to adopt the main simiarility')
parser.add_argument('--center_proj', type=str, default='', help='TAB | TAB_TDB')
parser.add_argument('--clip_path', type=str, default='/data/ceph_11015/ssd/howiefang/videoCLIP/CLIP2Clip/ViT-B-32.pt', help='model path of CLIP')
parser.add_argument('--EMCL', type=int, default=0)
parser.add_argument('--K', type=int, default=16)
parser.add_argument('--stage_num', type=int, default=5)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--lamd', type=float, default=1)
parser.add_argument('--beta', type=float, default=1)
args = parser.parse_args()
return args
|
def dataloader_vatexEnglish_train(args, tokenizer):
'return dataloader for training VATEX with English annotations\n Args:\n args: hyper-parameters\n tokenizer: tokenizer\n Returns:\n dataloader: dataloader\n len(vatexEnglish_dataset): length\n train_sampler: sampler for distributed training\n '
vatexEnglish_dataset = VATEXENGLISH_multi_sentence_dataLoader(subset='train', data_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
train_sampler = torch.utils.data.distributed.DistributedSampler(vatexEnglish_dataset)
dataloader = DataLoader(vatexEnglish_dataset, batch_size=(args.batch_size // args.n_gpu), num_workers=args.num_thread_reader, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(vatexEnglish_dataset), train_sampler)
|
def dataloader_vatexEnglish_test(args, tokenizer, subset='test'):
'return dataloader for testing VATEX with English annotations in multi-sentence captions\n Args:\n args: hyper-parameters\n tokenizer: tokenizer\n Returns:\n dataloader: dataloader\n len(vatexEnglish_dataset): length\n '
vatexEnglish_dataset = VATEXENGLISH_multi_sentence_dataLoader(subset=subset, data_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
dataloader = DataLoader(vatexEnglish_dataset, batch_size=args.batch_size_val, num_workers=args.num_thread_reader, shuffle=False, drop_last=False)
return (dataloader, len(vatexEnglish_dataset))
|
def dataloader_msrvtt_train(args, tokenizer):
'return dataloader for training msrvtt-9k\n Args:\n args: hyper-parameters\n tokenizer: tokenizer\n Returns:\n dataloader: dataloader\n len(msrvtt_train_set): length\n train_sampler: sampler for distributed training\n '
msrvtt_train_set = MSRVTT_multi_sentence_dataLoader(csv_path=args.train_csv, json_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_train_set)
dataloader = DataLoader(msrvtt_train_set, batch_size=(args.batch_size // args.n_gpu), num_workers=args.num_thread_reader, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(msrvtt_train_set), train_sampler)
|
def dataloader_msrvtt_test(args, tokenizer):
'return dataloader for testing 1k-A protocol\n Args:\n args: hyper-parameters\n tokenizer: tokenizer\n Returns:\n dataloader: dataloader\n len(msrvtt_test_set): length\n '
msrvtt_test_set = MSRVTT_single_sentence_dataLoader(csv_path=args.val_csv, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
dataloader = DataLoader(msrvtt_test_set, batch_size=args.batch_size_val, num_workers=args.num_thread_reader, shuffle=False, drop_last=False)
return (dataloader, len(msrvtt_test_set))
|
def dataloader_msrvttfull_test(args, tokenizer):
'return dataloader for testing full protocol\n Args:\n args: hyper-parameters\n tokenizer: tokenizer\n Returns:\n dataloader: dataloader\n len(msrvtt_test_set): length\n '
msrvtt_test_set = MSRVTTFULL_multi_sentence_dataLoader(subset='test', csv_path=args.val_csv, json_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
dataloader = DataLoader(msrvtt_test_set, batch_size=args.batch_size_val, num_workers=args.num_thread_reader, shuffle=False, drop_last=False)
return (dataloader, len(msrvtt_test_set))
|
def dataloader_msvd_train(args, tokenizer):
'return dataloader for training msvd\n Args:\n args: hyper-parameters\n tokenizer: tokenizer\n Returns:\n dataloader: dataloader\n len(msvd_dataset): length\n train_sampler: sampler for distributed training\n '
msvd_dataset = MSVD_multi_sentence_dataLoader(subset='train', data_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
train_sampler = torch.utils.data.distributed.DistributedSampler(msvd_dataset)
dataloader = DataLoader(msvd_dataset, batch_size=(args.batch_size // args.n_gpu), num_workers=args.num_thread_reader, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(msvd_dataset), train_sampler)
|
def dataloader_msvd_test(args, tokenizer, subset='test'):
'return dataloader for testing msvd in multi-sentence captions\n Args:\n args: hyper-parameters\n tokenizer: tokenizer\n Returns:\n dataloader: dataloader\n len(msvd_dataset): length\n '
msvd_test_set = MSVD_multi_sentence_dataLoader(subset=subset, data_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
dataloader = DataLoader(msvd_test_set, batch_size=args.batch_size_val, num_workers=args.num_thread_reader, shuffle=False, drop_last=False)
return (dataloader, len(msvd_test_set))
|
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if (isinstance(obj, list) or isinstance(obj, tuple)):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
|
def parallel_apply(fct, model, inputs, device_ids):
modules = nn.parallel.replicate(model, device_ids)
assert (len(modules) == len(inputs))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input):
torch.set_grad_enabled(grad_enabled)
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
if (not isinstance(input, (list, tuple))):
input = (input,)
output = fct(module, *input)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(where='in replica {} on device {}'.format(i, device))
if (len(modules) > 1):
threads = [threading.Thread(target=_worker, args=(i, module, input)) for (i, (module, input)) in enumerate(zip(modules, inputs))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
|
def get_logger(filename=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
if (filename is not None):
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
|
def dataloader_msrvtt_train(args, tokenizer):
msrvtt_dataset = MSRVTTDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
try:
train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset)
except:
train_sampler = None
dataloader = DataLoader(msrvtt_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(msrvtt_dataset), train_sampler)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.