code stringlengths 17 6.64M |
|---|
def set_comm(comm):
get_current().set_comm(comm)
|
def get_dir():
"\n Get directory that log files are being written to.\n will be None if there is no output directory (i.e., if you didn't call start)\n "
return get_current().get_dir()
|
@contextmanager
def profile_kv(scopename):
logkey = ('wait_' + scopename)
tstart = time.time()
try:
(yield)
finally:
get_current().name2val[logkey] += (time.time() - tstart)
|
def profile(n):
'\n Usage:\n @profile("my_func")\n def my_func(): code\n '
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
|
def get_current():
if (Logger.CURRENT is None):
_configure_default_logger()
return Logger.CURRENT
|
class Logger(object):
DEFAULT = None
CURRENT = None
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float)
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
(oldval, cnt) = (self.name2val[key], self.name2cnt[key])
self.name2val[key] = (((oldval * cnt) / (cnt + 1)) + (val / (cnt + 1)))
self.name2cnt[key] = (cnt + 1)
def dumpkvs(self):
if (self.comm is None):
d = self.name2val
else:
d = mpi_weighted_mean(self.comm, {name: (val, self.name2cnt.get(name, 1)) for (name, val) in self.name2val.items()})
if (self.comm.rank != 0):
d['dummy'] = 1
out = d.copy()
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if (self.level <= level):
self._do_log(args)
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
|
def get_rank_without_mpi_import():
for varname in ['PMI_RANK', 'OMPI_COMM_WORLD_RANK']:
if (varname in os.environ):
return int(os.environ[varname])
return 0
|
def mpi_weighted_mean(comm, local_name2valcount):
'\n Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110\n Perform a weighted average over dicts that are each on a different node\n Input: local_name2valcount: dict mapping key -> (value, count)\n Returns: key -> mean\n '
all_name2valcount = comm.gather(local_name2valcount)
if (comm.rank == 0):
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if (comm.rank == 0):
warnings.warn('WARNING: tried to compute mean on non-float {}={}'.format(name, val))
else:
name2sum[name] += (val * count)
name2count[name] += count
return {name: (name2sum[name] / name2count[name]) for name in name2sum}
else:
return {}
|
def configure(dir=None, format_strs=None, comm=None, log_suffix=''):
'\n If comm is provided, average all numerical stats across that comm\n '
if (dir is None):
dir = os.getenv('OPENAI_LOGDIR')
if (dir is None):
dir = osp.join(tempfile.gettempdir(), datetime.datetime.now().strftime('openai-%Y-%m-%d-%H-%M-%S-%f'))
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if (rank > 0):
log_suffix = (log_suffix + ('-rank%03i' % rank))
if (format_strs is None):
if (rank == 0):
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',')
else:
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',')
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log(('Logging to %s' % dir))
|
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
|
def reset():
if (Logger.CURRENT is not Logger.DEFAULT):
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
|
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
(yield)
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
|
def normal_kl(mean1, logvar1, mean2, logvar2):
'\n Compute the KL divergence between two gaussians.\n\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n '
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert (tensor is not None), 'at least one argument must be a Tensor'
(logvar1, logvar2) = [(x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)) for x in (logvar1, logvar2)]
return (0.5 * (((((- 1.0) + logvar2) - logvar1) + th.exp((logvar1 - logvar2))) + (((mean1 - mean2) ** 2) * th.exp((- logvar2)))))
|
def approx_standard_normal_cdf(x):
'\n A fast approximation of the cumulative distribution function of the\n standard normal.\n '
return (0.5 * (1.0 + th.tanh((np.sqrt((2.0 / np.pi)) * (x + (0.044715 * th.pow(x, 3)))))))
|
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
'\n Compute the log-likelihood of a Gaussian distribution discretizing to a\n given image.\n\n :param x: the target images. It is assumed that this was uint8 values,\n rescaled to the range [-1, 1].\n :param means: the Gaussian mean Tensor.\n :param log_scales: the Gaussian log stddev Tensor.\n :return: a tensor like x of log probabilities (in nats).\n '
assert (x.shape == means.shape == log_scales.shape)
centered_x = (x - means)
inv_stdv = th.exp((- log_scales))
plus_in = (inv_stdv * (centered_x + (1.0 / 255.0)))
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = (inv_stdv * (centered_x - (1.0 / 255.0)))
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = (cdf_plus - cdf_min)
log_probs = th.where((x < (- 0.999)), log_cdf_plus, th.where((x > 0.999), log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))))
assert (log_probs.shape == x.shape)
return log_probs
|
def space_timesteps(num_timesteps, section_counts):
'\n Create a list of timesteps to use from an original diffusion process,\n given the number of timesteps we want to take from equally-sized portions\n of the original process.\n\n For example, if there\'s 300 timesteps and the section counts are [10,15,20]\n then the first 100 timesteps are strided to be 10 timesteps, the second 100\n are strided to be 15 timesteps, and the final 100 are strided to be 20.\n\n If the stride is a string starting with "ddim", then the fixed striding\n from the DDIM paper is used, and only one section is allowed.\n\n :param num_timesteps: the number of diffusion steps in the original\n process to divide up.\n :param section_counts: either a list of numbers, or a string containing\n comma-separated numbers, indicating the step count\n per section. As a special case, use "ddimN" where N\n is a number of steps to use the striding from the\n DDIM paper.\n :return: a set of diffusion steps from the original process to use.\n '
if isinstance(section_counts, str):
if section_counts.startswith('ddim'):
desired_count = int(section_counts[len('ddim'):])
for i in range(1, num_timesteps):
if (len(range(0, num_timesteps, i)) == desired_count):
return set(range(0, num_timesteps, i))
raise ValueError(f'cannot create exactly {num_timesteps} steps with an integer stride')
section_counts = [int(x) for x in section_counts.split(',')]
size_per = (num_timesteps // len(section_counts))
extra = (num_timesteps % len(section_counts))
start_idx = 0
all_steps = []
for (i, section_count) in enumerate(section_counts):
size = (size_per + (1 if (i < extra) else 0))
if (size < section_count):
raise ValueError(f'cannot divide section of {size} steps into {section_count}')
if (section_count <= 1):
frac_stride = 1
else:
frac_stride = ((size - 1) / (section_count - 1))
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append((start_idx + round(cur_idx)))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
|
class SpacedDiffusion(GaussianDiffusion):
'\n A diffusion process which can skip steps in a base diffusion process.\n\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n '
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs['betas'])
base_diffusion = GaussianDiffusion(**kwargs)
last_alpha_cumprod = 1.0
new_betas = []
for (i, alpha_cumprod) in enumerate(base_diffusion.alphas_cumprod):
if (i in self.use_timesteps):
new_betas.append((1 - (alpha_cumprod / last_alpha_cumprod)))
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs['betas'] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(self, model, *args, **kwargs):
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(self, model, *args, **kwargs):
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(model, self.timestep_map, self.rescale_timesteps, self.original_num_steps)
def _scale_timesteps(self, t):
return t
|
class _WrappedModel():
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
if self.rescale_timesteps:
new_ts = (new_ts.float() * (1000.0 / self.original_num_steps))
return self.model(x, new_ts, **kwargs)
|
def compress(paras):
(input_video_path, output_video_path) = paras
try:
command = ['ffmpeg', '-y', '-i', input_video_path, '-filter:v', "scale='if(gt(a,1),trunc(oh*a/2)*2,224)':'if(gt(a,1),224,trunc(ow*a/2)*2)'", '-map', '0:v', '-r', '3', output_video_path]
ffmpeg = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = ffmpeg.communicate()
retcode = ffmpeg.poll()
except Exception as e:
raise e
|
def prepare_input_output_pairs(input_root, output_root):
input_video_path_list = []
output_video_path_list = []
for (root, dirs, files) in os.walk(input_root):
for file_name in files:
input_video_path = os.path.join(root, file_name)
output_video_path = os.path.join(output_root, file_name)
if (os.path.exists(output_video_path) and (os.path.getsize(output_video_path) > 0)):
pass
else:
input_video_path_list.append(input_video_path)
output_video_path_list.append(output_video_path)
return (input_video_path_list, output_video_path_list)
|
class Data():
'Standard data format. \n '
def __init__(self, X_train=None, y_train=None, X_test=None, y_test=None):
self.X_train = X_train
self.y_train = y_train
self.X_test = X_test
self.y_test = y_test
self.__device = None
self.__dtype = None
def get_batch(self, batch_size):
@map_elementwise
def batch_mask(X, num):
return np.random.choice(X.size(0), num, replace=False)
@map_elementwise
def batch(X, mask):
return X[mask]
mask = batch_mask(self.y_train, batch_size)
return (batch(self.X_train, mask), batch(self.y_train, mask))
def save(self, path):
if (not os.path.isdir(path)):
os.makedirs(path)
def save_data(fname, data):
if isinstance(data, dict):
np.savez_compressed(((path + '/') + fname), **data)
elif (isinstance(data, list) or isinstance(data, tuple)):
np.savez_compressed(((path + '/') + fname), *data)
else:
np.save(((path + '/') + fname), data)
save_data('X_train', self.X_train_np)
save_data('y_train', self.y_train_np)
save_data('X_test', self.X_test_np)
save_data('y_test', self.y_test_np)
@property
def device(self):
return self.__device
@property
def dtype(self):
return self.__dtype
@device.setter
def device(self, d):
if (d == 'cpu'):
self.__to_cpu()
self.__device = torch.device('cpu')
elif (d == 'gpu'):
self.__to_gpu()
self.__device = torch.device('cuda')
else:
raise ValueError
@dtype.setter
def dtype(self, d):
if (d == 'float'):
self.__to_float()
self.__dtype = torch.float32
elif (d == 'double'):
self.__to_double()
self.__dtype = torch.float64
else:
raise ValueError
@property
def dim(self):
if isinstance(self.X_train, np.ndarray):
return self.X_train.shape[(- 1)]
elif isinstance(self.X_train, torch.Tensor):
return self.X_train.size((- 1))
@property
def K(self):
if isinstance(self.y_train, np.ndarray):
return self.y_train.shape[(- 1)]
elif isinstance(self.y_train, torch.Tensor):
return self.y_train.size((- 1))
@property
def X_train_np(self):
return Data.tc_to_np(self.X_train)
@property
def y_train_np(self):
return Data.tc_to_np(self.y_train)
@property
def X_test_np(self):
return Data.tc_to_np(self.X_test)
@property
def y_test_np(self):
return Data.tc_to_np(self.y_test)
@staticmethod
@map_elementwise
def tc_to_np(d):
if isinstance(d, torch.Tensor):
return d.cpu().detach().numpy()
else:
return d
def __to_cpu(self):
@map_elementwise
def trans(d):
if isinstance(d, np.ndarray):
return torch.DoubleTensor(d)
elif isinstance(d, torch.Tensor):
return d.cpu()
else:
return d
for d in ['X_train', 'y_train', 'X_test', 'y_test']:
setattr(self, d, trans(getattr(self, d)))
def __to_gpu(self):
@map_elementwise
def trans(d):
if isinstance(d, np.ndarray):
return torch.cuda.DoubleTensor(d)
elif isinstance(d, torch.Tensor):
return d.cuda()
else:
return d
for d in ['X_train', 'y_train', 'X_test', 'y_test']:
setattr(self, d, trans(getattr(self, d)))
def __to_float(self):
if (self.device is None):
raise RuntimeError('device is not set')
@map_elementwise
def trans(d):
if isinstance(d, torch.Tensor):
return d.float()
else:
return d
for d in ['X_train', 'y_train', 'X_test', 'y_test']:
setattr(self, d, trans(getattr(self, d)))
def __to_double(self):
if (self.device is None):
raise RuntimeError('device is not set')
@map_elementwise
def trans(d):
if isinstance(d, torch.Tensor):
return d.double()
else:
return d
for d in ['X_train', 'y_train', 'X_test', 'y_test']:
setattr(self, d, trans(getattr(self, d)))
|
class Data_MIONet_Cartesian(Data):
'Data format for MIONet (Cartesian product version).\n '
def __init__(self, X_train=None, y_train=None, X_test=None, y_test=None):
super(Data_MIONet_Cartesian, self).__init__(X_train, y_train, X_test, y_test)
def get_batch(self, batch_size):
@map_elementwise
def batch_mask(X, num):
return np.random.choice(X.size(0), num, replace=False)
@map_elementwise
def batch(X, mask):
return X[mask]
mask = batch_mask(self.y_train, batch_size)
return ((*batch(self.X_train[:(- 1)], mask), self.X_train[(- 1)]), batch(self.y_train, mask))
|
class AE(Map):
'Autoencoder.\n '
def __init__(self, encoder_size, decoder_size, activation='sigmoid', initializer='default'):
super(AE, self).__init__()
self.encoder_size = encoder_size
self.decoder_size = decoder_size
self.activation = activation
self.initializer = initializer
self.ms = self.__init_modules()
def forward(self, x):
return self.ms['decoder'](self.ms['encoder'](x))
def encode(self, x, returnnp=False):
if (not isinstance(x, torch.Tensor)):
x = torch.tensor(x, dtype=self.dtype, device=self.device)
return (self.ms['encoder'](x).cpu().detach().numpy() if returnnp else self.ms['encoder'](x))
def decode(self, x, returnnp=False):
if (not isinstance(x, torch.Tensor)):
x = torch.tensor(x, dtype=self.dtype, device=self.device)
return (self.ms['decoder'](x).cpu().detach().numpy() if returnnp else self.ms['decoder'](x))
def __init_modules(self):
modules = torch.nn.ModuleDict()
modules['encoder'] = FNN(self.encoder_size, self.activation, self.initializer)
modules['decoder'] = FNN(self.decoder_size, self.activation, self.initializer)
return modules
|
class DeepONet(Map):
'Deep operator network.\n Input: ([batch size, branch_dim], [batch size, trunk_dim])\n Output: [batch size, 1]\n '
def __init__(self, branch_size, trunk_size, activation='relu', initializer='Glorot normal'):
super(DeepONet, self).__init__()
self.branch_size = branch_size
self.trunk_size = trunk_size
self.activation = activation
self.initializer = initializer
self.ms = self.__init_modules()
self.ps = self.__init_params()
def forward(self, x):
(x_branch, x_trunk) = (self.ms['Branch'](x[0]), self.ms['Trunk'](x[1]))
return (torch.sum((x_branch * x_trunk), dim=(- 1), keepdim=True) + self.ps['bias'])
def __init_modules(self):
modules = nn.ModuleDict()
modules['Branch'] = FNN(self.branch_size, self.activation, self.initializer)
modules['Trunk'] = FNN(self.trunk_size, self.activation, self.initializer)
return modules
def __init_params(self):
params = nn.ParameterDict()
params['bias'] = nn.Parameter(torch.zeros([1]))
return params
|
class FNN(Map):
'Fully-connected neural network.\n Note that\n len(size) >= 2,\n [..., N1, -N2, ...] denotes a linear layer from dim N1 to N2 without bias,\n [..., N, 0] denotes an identity map (as output linear layer).\n '
def __init__(self, size, activation='relu', initializer='default'):
super(FNN, self).__init__()
self.size = size
self.activation = activation
self.initializer = initializer
self.ms = self.__init_modules()
self.__initialize()
def forward(self, x):
for i in range(1, (len(self.size) - 1)):
x = self.act(self.ms['LinM{}'.format(i)](x))
return (self.ms['LinM{}'.format((len(self.size) - 1))](x) if (self.size[(- 1)] != 0) else x)
def __init_modules(self):
modules = nn.ModuleDict()
for i in range(1, len(self.size)):
if (self.size[i] != 0):
bias = (True if (self.size[i] > 0) else False)
modules['LinM{}'.format(i)] = nn.Linear(abs(self.size[(i - 1)]), abs(self.size[i]), bias)
return modules
def __initialize(self):
for i in range(1, len(self.size)):
if (self.size[i] != 0):
self.weight_init_(self.ms['LinM{}'.format(i)].weight)
if (self.size[i] > 0):
self.bias_init_(self.ms['LinM{}'.format(i)].bias)
|
class MIONet(Map):
'Multiple-input operator network.\n Input: ([batch, sensors1], [batch, sensors2], [batch, dim_loc])\n Output: [batch, 1]\n '
def __init__(self, sizes, activation='relu', initializer='default', bias=True):
super(MIONet, self).__init__()
self.sizes = sizes
self.activation = activation
self.initializer = initializer
self.bias = bias
self.ms = self.__init_modules()
self.ps = self.__init_parameters()
def forward(self, x):
y = torch.stack([self.ms['Net{}'.format((i + 1))](x[i]) for i in range(len(self.sizes))])
y = torch.sum(torch.prod(y, dim=0), dim=(- 1), keepdim=True)
return ((y + self.ps['bias']) if self.bias else y)
def __init_modules(self):
modules = torch.nn.ModuleDict()
for i in range(len(self.sizes)):
modules['Net{}'.format((i + 1))] = FNN(self.sizes[i], self.activation, self.initializer)
return modules
def __init_parameters(self):
parameters = torch.nn.ParameterDict()
if self.bias:
parameters['bias'] = torch.nn.Parameter(torch.zeros([1]))
return parameters
|
class MIONet_Cartesian(Map):
'Multiple-input operator network (Cartesian product version).\n Input: ([batch, sensors1], [batch, sensors2], [num_loc, dim_loc])\n Output: [batch, num_loc]\n '
def __init__(self, sizes, activation='relu', initializer='default', bias=True):
super(MIONet_Cartesian, self).__init__()
self.sizes = sizes
self.activation = activation
self.initializer = initializer
self.bias = bias
self.ms = self.__init_modules()
self.ps = self.__init_parameters()
def forward(self, x):
y1 = torch.stack([self.ms['Net{}'.format((i + 1))](x[i]) for i in range((len(self.sizes) - 1))])
y1 = torch.prod(y1, dim=0)
y2 = self.ms['Net{}'.format(len(self.sizes))](x[(- 1)])
y = (y1 @ y2.t())
return ((y + self.ps['bias']) if self.bias else y)
def __init_modules(self):
modules = torch.nn.ModuleDict()
for i in range(len(self.sizes)):
modules['Net{}'.format((i + 1))] = FNN(self.sizes[i], self.activation, self.initializer)
return modules
def __init_parameters(self):
parameters = torch.nn.ParameterDict()
if self.bias:
parameters['bias'] = torch.nn.Parameter(torch.zeros([1]))
return parameters
|
class Module(torch.nn.Module):
'Standard module format.\n '
def __init__(self):
super(Module, self).__init__()
self.activation = None
self.initializer = None
self.__device = None
self.__dtype = None
@property
def device(self):
return self.__device
@property
def dtype(self):
return self.__dtype
@device.setter
def device(self, d):
if (d == 'cpu'):
self.cpu()
for module in self.modules():
if isinstance(module, Module):
module.__device = torch.device('cpu')
elif (d == 'gpu'):
self.cuda()
for module in self.modules():
if isinstance(module, Module):
module.__device = torch.device('cuda')
else:
raise ValueError
@dtype.setter
def dtype(self, d):
if (d == 'float'):
self.to(torch.float32)
for module in self.modules():
if isinstance(module, Module):
module.__dtype = torch.float32
elif (d == 'double'):
self.to(torch.float64)
for module in self.modules():
if isinstance(module, Module):
module.__dtype = torch.float64
else:
raise ValueError
@property
def act(self):
if callable(self.activation):
return self.activation
elif (self.activation == 'sigmoid'):
return torch.sigmoid
elif (self.activation == 'relu'):
return torch.relu
elif (self.activation == 'tanh'):
return torch.tanh
elif (self.activation == 'elu'):
return torch.elu
else:
raise NotImplementedError
@property
def weight_init_(self):
if (self.initializer == 'He normal'):
return torch.nn.init.kaiming_normal_
elif (self.initializer == 'He uniform'):
return torch.nn.init.kaiming_uniform_
elif (self.initializer == 'Glorot normal'):
return torch.nn.init.xavier_normal_
elif (self.initializer == 'Glorot uniform'):
return torch.nn.init.xavier_uniform_
elif (self.initializer == 'orthogonal'):
return torch.nn.init.orthogonal_
elif (self.initializer == 'default'):
return (lambda x: None)
else:
raise NotImplementedError
@property
def bias_init_(self):
if (self.initializer == 'He normal'):
return torch.nn.init.zeros_
elif (self.initializer == 'He uniform'):
return torch.nn.init.zeros_
elif (self.initializer == 'Glorot normal'):
return torch.nn.init.zeros_
elif (self.initializer == 'Glorot uniform'):
return torch.nn.init.zeros_
elif (self.initializer == 'orthogonal'):
return torch.nn.init.zeros_
elif (self.initializer == 'default'):
return (lambda x: None)
else:
raise NotImplementedError
@map_elementwise
def _to_tensor(self, x):
if (not isinstance(x, torch.Tensor)):
x = torch.tensor(x, dtype=self.dtype, device=self.device)
return x
|
class Map(Module):
'Structure-oriented neural network used as a general map based on designing architecture.\n '
def __init__(self):
super(Map, self).__init__()
def predict(self, x, returnnp=False):
x = self._to_tensor(x)
return (self(x).cpu().detach().numpy() if returnnp else self(x))
|
class Algorithm(Module, abc.ABC):
'Loss-oriented neural network used as an algorithm based on designing loss.\n '
def __init__(self):
super(Algorithm, self).__init__()
def forward(self, x):
return x
@abc.abstractmethod
def criterion(self, X, y):
pass
@abc.abstractmethod
def predict(self):
pass
|
class PNN(Map):
'INN-based Poisson neural network.\n '
def __init__(self, inn, sympnet, recurrent=1):
super(PNN, self).__init__()
self.inn = inn
self.sympnet = sympnet
self.recurrent = recurrent
self.dim = sympnet.dim
def forward(self, x):
x = self.inn(x)
for i in range(self.recurrent):
x = self.sympnet(x)
return self.inn.inverse(x)
def predict(self, x, steps=1, keepinitx=False, returnnp=False):
x = self._to_tensor(x)
size = len(x.size())
pred = [self.inn(x)]
for _ in range(steps):
pred.append(self.sympnet(pred[(- 1)]))
pred = list(map(self.inn.inverse, pred))
if keepinitx:
steps = (steps + 1)
else:
pred = pred[1:]
res = torch.cat(pred, dim=(- 1))
if (steps > 1):
res = res.view([(- 1), steps, self.dim][(2 - size):])
return (res.cpu().detach().numpy() if returnnp else res)
|
class AEPNN(Algorithm):
'Autoencoder-based Poisson neural network.\n '
def __init__(self, ae, sympnet, lam=1, recurrent=1):
super(AEPNN, self).__init__()
self.ae = ae
self.sympnet = sympnet
self.lam = lam
self.recurrent = recurrent
self.dim = ae.encoder_size[0]
def criterion(self, X, y):
(X_latent, y_latent) = (self.ae.encode(X), self.ae.encode(y))
X_latent_step = X_latent
for i in range(self.recurrent):
X_latent_step = self.sympnet(X_latent_step)
symp_loss = torch.nn.MSELoss()(X_latent_step, y_latent)
ae_loss = (torch.nn.MSELoss()(self.ae.decode(X_latent), X) + torch.nn.MSELoss()(self.ae.decode(y_latent), y))
return (symp_loss + (self.lam * ae_loss))
def predict(self, x, steps=1, keepinitx=False, returnnp=False):
x = self._to_tensor(x)
size = len(x.size())
pred = [self.ae.encode(x)]
for _ in range(steps):
pred.append(self.sympnet(pred[(- 1)]))
pred = list(map(self.ae.decode, pred))
if keepinitx:
steps = (steps + 1)
else:
pred = pred[1:]
res = torch.cat(pred, dim=(- 1))
if (steps > 1):
res = res.view([(- 1), steps, self.dim][(2 - size):])
return (res.cpu().detach().numpy() if returnnp else res)
|
class S2S(Map):
'Seq2seq model.\n Input: [batch_size, len_in, dim_in]\n Output: [batch_size, len_out, dim_out]\n '
def __init__(self, dim_in, len_in, dim_out, len_out, hidden_size=10, cell='LSTM'):
super(S2S, self).__init__()
self.dim_in = dim_in
self.len_in = len_in
self.dim_out = dim_out
self.len_out = len_out
self.hidden_size = hidden_size
self.cell = cell
self.encoder = self.__init_encoder()
self.decoder = self.__init_decoder()
self.att_weights = self.__init_att_weights()
self.out = self.__init_out()
def forward(self, x):
to_squeeze = (True if (len(x.size()) == 2) else False)
if to_squeeze:
x = x.view(1, self.len_in, self.dim_in)
zeros = torch.zeros([1, x.size(0), self.hidden_size], dtype=x.dtype, device=x.device)
init_state = ((zeros, zeros) if (self.cell == 'LSTM') else zeros)
(x, _) = self.encoder(x, init_state)
x = (torch.softmax(self.att_weights, dim=1) @ x)
(x, _) = self.decoder(x, init_state)
x = self.out(x)
return (x.squeeze(0) if to_squeeze else x)
def __init_encoder(self):
if (self.cell == 'RNN'):
return torch.nn.RNN(self.dim_in, self.hidden_size, batch_first=True)
elif (self.cell == 'LSTM'):
return torch.nn.LSTM(self.dim_in, self.hidden_size, batch_first=True)
elif (self.cell == 'GRU'):
return torch.nn.GRU(self.dim_in, self.hidden_size, batch_first=True)
else:
raise NotImplementedError
def __init_decoder(self):
if (self.cell == 'RNN'):
return torch.nn.RNN(self.hidden_size, self.hidden_size, batch_first=True)
elif (self.cell == 'LSTM'):
return torch.nn.LSTM(self.hidden_size, self.hidden_size, batch_first=True)
elif (self.cell == 'GRU'):
return torch.nn.GRU(self.hidden_size, self.hidden_size, batch_first=True)
else:
raise NotImplementedError
def __init_att_weights(self):
return torch.nn.Parameter(torch.zeros([self.len_out, self.len_in]))
def __init_out(self):
return torch.nn.Linear(self.hidden_size, self.dim_out)
|
def timing(func):
@wraps(func)
def wrapper(*args, **kwargs):
t = time.time()
result = func(*args, **kwargs)
print(((("'" + func.__name__) + "'") + ' took {} s'.format((time.time() - t))))
return result
return wrapper
|
def str_current_time():
return time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
|
def map_elementwise(func):
@wraps(func)
def wrapper(*args, **kwargs):
(container, idx) = (None, None)
for arg in args:
if (type(arg) in (list, tuple, dict)):
(container, idx) = (type(arg), (arg.keys() if (type(arg) == dict) else len(arg)))
break
if (container is None):
for value in kwargs.values():
if (type(value) in (list, tuple, dict)):
(container, idx) = (type(value), (value.keys() if (type(value) == dict) else len(value)))
break
if (container is None):
return func(*args, **kwargs)
elif (container in (list, tuple)):
get = (lambda element, i: (element[i] if (type(element) is container) else element))
return container((wrapper(*[get(arg, i) for arg in args], **{key: get(value, i) for (key, value) in kwargs.items()}) for i in range(idx)))
elif (container is dict):
get = (lambda element, key: (element[key] if (type(element) is dict) else element))
return {key: wrapper(*[get(arg, key) for arg in args], **{key_: get(value_, key) for (key_, value_) in kwargs.items()}) for key in idx}
return wrapper
|
class lazy_property():
def __init__(self, func):
self.func = func
def __get__(self, instance, cls):
val = self.func(instance)
setattr(instance, self.func.__name__, val)
return val
|
def softmax(x):
e_x = np.exp((x - np.max(x, axis=(- 1), keepdims=True)))
return (e_x / np.sum(e_x, axis=(- 1), keepdims=True))
|
def mse(x, y):
return torch.nn.MSELoss()(x, y)
|
def cross_entropy_loss(y_pred, y_label):
if (y_pred.size() == y_label.size()):
return torch.mean((- torch.sum((torch.log_softmax(y_pred, dim=(- 1)) * y_label), dim=(- 1))))
else:
return torch.nn.CrossEntropyLoss()(y_pred, y_label.long())
|
def grad(y, x, create_graph=True, keepdim=False):
'\n y: [N, Ny] or [Ny]\n x: [N, Nx] or [Nx]\n Return dy/dx ([N, Ny, Nx] or [Ny, Nx]).\n '
N = (y.size(0) if (len(y.size()) == 2) else 1)
Ny = y.size((- 1))
Nx = x.size((- 1))
z = torch.ones_like(y[(..., 0)])
dy = []
for i in range(Ny):
dy.append(torch.autograd.grad(y[(..., i)], x, grad_outputs=z, create_graph=create_graph)[0])
shape = np.array([N, Ny])[(2 - len(y.size())):]
shape = (list(shape) if keepdim else list(shape[(shape > 1)]))
return torch.cat(dy, dim=(- 1)).view((shape + [Nx]))
|
def dataloader_msrvtt_train(args, tokenizer):
msrvtt_dataset = MSRVTTDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
try:
train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset)
except:
train_sampler = None
dataloader = DataLoader(msrvtt_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(msrvtt_dataset), train_sampler)
|
def dataloader_msrvtt_test(args, tokenizer, subset='test'):
msrvtt_testset = MSRVTTDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
try:
test_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_testset)
except:
test_sampler = None
dataloader_msrvtt = DataLoader(msrvtt_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False)
return (dataloader_msrvtt, len(msrvtt_testset))
|
def dataloader_msrvtt_train_test(args, tokenizer):
msrvtt_dataset = MSRVTTDataset(subset='train_test', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
try:
train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset)
except:
train_sampler = None
dataloader = DataLoader(msrvtt_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(msrvtt_dataset), train_sampler)
|
def dataloader_lsmdc_train(args, tokenizer):
lsmdc_dataset = LsmdcDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
train_sampler = torch.utils.data.distributed.DistributedSampler(lsmdc_dataset)
dataloader = DataLoader(lsmdc_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(lsmdc_dataset), train_sampler)
|
def dataloader_lsmdc_train_test(args, tokenizer):
lsmdc_dataset = LsmdcDataset(subset='train_test', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
train_sampler = torch.utils.data.distributed.DistributedSampler(lsmdc_dataset)
dataloader = DataLoader(lsmdc_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(lsmdc_dataset), train_sampler)
|
def dataloader_lsmdc_test(args, tokenizer, subset='test'):
lsmdc_testset = LsmdcDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
try:
test_sampler = torch.utils.data.distributed.DistributedSampler(lsmdc_testset)
except:
test_sampler = None
dataloader_lsmdc = DataLoader(lsmdc_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False)
return (dataloader_lsmdc, len(lsmdc_testset))
|
def dataloader_activity_train(args, tokenizer):
activity_dataset = ActivityNetDataset(subset='train', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
train_sampler = torch.utils.data.distributed.DistributedSampler(activity_dataset)
dataloader = DataLoader(activity_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(activity_dataset), train_sampler)
|
def dataloader_activity_train_test(args, tokenizer):
activity_dataset = ActivityNetDataset(subset='train_test', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
train_sampler = torch.utils.data.distributed.DistributedSampler(activity_dataset)
dataloader = DataLoader(activity_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(activity_dataset), train_sampler)
|
def dataloader_activity_test(args, tokenizer, subset='test'):
activity_testset = ActivityNetDataset(subset=subset, data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
try:
test_sampler = torch.utils.data.distributed.DistributedSampler(activity_testset)
except:
test_sampler = None
dataloader_activity = DataLoader(activity_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False)
return (dataloader_activity, len(activity_testset))
|
def dataloader_msvd_train(args, tokenizer):
msvd_dataset = MsvdDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
train_sampler = torch.utils.data.distributed.DistributedSampler(msvd_dataset)
dataloader = DataLoader(msvd_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(msvd_dataset), train_sampler)
|
def dataloader_msvd_train_test(args, tokenizer):
msvd_dataset = MsvdDataset(subset='train_test', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
train_sampler = torch.utils.data.distributed.DistributedSampler(msvd_dataset)
dataloader = DataLoader(msvd_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(msvd_dataset), train_sampler)
|
def dataloader_msvd_test(args, tokenizer, subset='test'):
msvd_testset = MsvdDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
try:
test_sampler = torch.utils.data.distributed.DistributedSampler(msvd_testset)
except:
test_sampler = None
dataloader_msvd = DataLoader(msvd_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False)
return (dataloader_msvd, len(msvd_testset))
|
def dataloader_didemo_train(args, tokenizer):
didemo_dataset = DiDeMoDataset(subset='train', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
train_sampler = torch.utils.data.distributed.DistributedSampler(didemo_dataset)
dataloader = DataLoader(didemo_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(didemo_dataset), train_sampler)
|
def dataloader_didemo_train_test(args, tokenizer):
didemo_dataset = DiDeMoDataset(subset='train_test', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
train_sampler = torch.utils.data.distributed.DistributedSampler(didemo_dataset)
dataloader = DataLoader(didemo_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(didemo_dataset), train_sampler)
|
def dataloader_didemo_test(args, tokenizer, subset='test'):
didemo_testset = DiDeMoDataset(subset=subset, data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
try:
test_sampler = torch.utils.data.distributed.DistributedSampler(didemo_testset)
except:
test_sampler = None
dataloader_didemo = DataLoader(didemo_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False)
return (dataloader_didemo, len(didemo_testset))
|
class LsmdcDataset(RetrievalDataset):
'LSMDC dataset.'
def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None):
super(LsmdcDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config)
pass
def _get_anns(self, subset='train'):
'\n video_dict: dict: video_id -> video_path\n sentences_dict: list: [(video_id, caption)] , caption (list: [text:, start, end])\n '
video_json_path_dict = {}
video_json_path_dict['train'] = os.path.join(self.anno_path, 'LSMDC16_annos_training.csv')
video_json_path_dict['train_test'] = os.path.join(self.anno_path, 'LSMDC16_annos_val.csv')
video_json_path_dict['val'] = os.path.join(self.anno_path, 'LSMDC16_annos_val.csv')
video_json_path_dict['test'] = os.path.join(self.anno_path, 'LSMDC16_challenge_1000_publictect.csv')
video_id_list = []
caption_dict = {}
with open(video_json_path_dict[self.subset], 'r') as fp:
for line in fp:
line = line.strip()
line_split = line.split('\t')
assert (len(line_split) == 6)
(clip_id, start_aligned, end_aligned, start_extracted, end_extracted, sentence) = line_split
if (clip_id not in ['0017_Pianist_00.23.28.872-00.23.34.843', '0017_Pianist_00.30.36.767-00.30.38.009', '3064_SPARKLE_2012_01.41.07.000-01.41.11.793']):
caption_dict[len(caption_dict)] = (clip_id, (sentence, None, None))
if (clip_id not in video_id_list):
video_id_list.append(clip_id)
video_dict = OrderedDict()
sentences_dict = OrderedDict()
for (root, dub_dir, video_files) in os.walk(self.video_path):
for video_file in video_files:
video_id_ = '.'.join(video_file.split('.')[:(- 1)])
if (video_id_ not in video_id_list):
continue
file_path_ = os.path.join(root, video_file)
video_dict[video_id_] = file_path_
for (clip_id, sentence) in caption_dict.values():
if (clip_id not in video_dict):
continue
sentences_dict[len(sentences_dict)] = (clip_id, sentence)
unique_sentence = set([v[1][0] for v in sentences_dict.values()])
print('[{}] Unique sentence is {} , all num is {}'.format(subset, len(unique_sentence), len(sentences_dict)))
return (video_dict, sentences_dict)
|
class MSRVTTDataset(RetrievalDataset):
'MSRVTT dataset.'
def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None):
super(MSRVTTDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config)
pass
def _get_anns(self, subset='train'):
'\n video_dict: dict: video_id -> video_path\n sentences_dict: list: [(video_id, caption)] , caption (list: [text:, start, end])\n '
csv_path = {'train': join(self.anno_path, 'MSRVTT_train.9k.csv'), 'val': join(self.anno_path, 'MSRVTT_JSFUSION_test.csv'), 'test': join(self.anno_path, 'MSRVTT_JSFUSION_test.csv'), 'train_test': join(self.anno_path, 'MSRVTT_train.9k.csv')}[subset]
if exists(csv_path):
csv = pd.read_csv(csv_path)
else:
raise FileNotFoundError
video_id_list = list(csv['video_id'].values)
video_dict = OrderedDict()
sentences_dict = OrderedDict()
if (subset == 'train'):
anno_path = join(self.anno_path, 'MSRVTT_data.json')
data = json.load(open(anno_path, 'r'))
for itm in data['sentences']:
if (itm['video_id'] in video_id_list):
sentences_dict[len(sentences_dict)] = (itm['video_id'], (itm['caption'], None, None))
video_dict[itm['video_id']] = join(self.video_path, '{}.mp4'.format(itm['video_id']))
elif (subset == 'train_test'):
anno_path = join(self.anno_path, 'MSRVTT_data.json')
data = json.load(open(anno_path, 'r'))
used = []
for itm in data['sentences']:
if ((itm['video_id'] in video_id_list) and (itm['video_id'] not in used)):
used.append(itm['video_id'])
sentences_dict[len(sentences_dict)] = (itm['video_id'], (itm['caption'], None, None))
video_dict[itm['video_id']] = join(self.video_path, '{}.mp4'.format(itm['video_id']))
else:
for (_, itm) in csv.iterrows():
sentences_dict[len(sentences_dict)] = (itm['video_id'], (itm['sentence'], None, None))
video_dict[itm['video_id']] = join(self.video_path, '{}.mp4'.format(itm['video_id']))
unique_sentence = set([v[1][0] for v in sentences_dict.values()])
print('[{}] Unique sentence is {} , all num is {}'.format(subset, len(unique_sentence), len(sentences_dict)))
return (video_dict, sentences_dict)
|
class MsvdDataset(RetrievalDataset):
'MSVD dataset loader.'
def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None):
super(MsvdDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config)
pass
def _get_anns(self, subset='train'):
self.sample_len = 0
self.cut_off_points = []
self.multi_sentence_per_video = True
video_id_path_dict = {}
video_id_path_dict['train'] = os.path.join(self.anno_path, 'train_list.txt')
video_id_path_dict['train_test'] = os.path.join(self.anno_path, 'train_list.txt')
video_id_path_dict['val'] = os.path.join(self.anno_path, 'val_list.txt')
video_id_path_dict['test'] = os.path.join(self.anno_path, 'test_list.txt')
caption_file = os.path.join(self.anno_path, 'raw-captions.pkl')
with open(video_id_path_dict[subset], 'r') as fp:
video_ids = [itm.strip() for itm in fp.readlines()]
with open(caption_file, 'rb') as f:
captions = pickle.load(f)
video_dict = OrderedDict()
sentences_dict = OrderedDict()
for (root, dub_dir, video_files) in os.walk(self.video_path):
for video_file in video_files:
video_id_ = '.'.join(video_file.split('.')[:(- 1)])
if (video_id_ not in video_ids):
continue
file_path_ = os.path.join(root, video_file)
video_dict[video_id_] = file_path_
for video_id in video_ids:
assert (video_id in captions)
for cap in captions[video_id]:
cap_txt = ' '.join(cap)
sentences_dict[len(sentences_dict)] = (video_id, (cap_txt, None, None))
self.cut_off_points.append((len(sentences_dict) - 1))
if ((subset == 'val') or (subset == 'test')):
self.sentence_num = len(sentences_dict)
self.video_num = len(video_ids)
assert (len(self.cut_off_points) == self.video_num)
print('For {}, sentence number: {}'.format(subset, self.sentence_num))
print('For {}, video number: {}'.format(subset, self.video_num))
print('Video number: {}'.format(len(video_dict)))
print('Total Paire: {}'.format(len(sentences_dict)))
self.sample_len = len(sentences_dict)
return (video_dict, sentences_dict)
|
def _interpolation(kwargs):
interpolation = kwargs.pop('resample', Image.BILINEAR)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
else:
return interpolation
|
def _check_args_tf(kwargs):
if (('fillcolor' in kwargs) and (_PIL_VER < (5, 0))):
kwargs.pop('fillcolor')
kwargs['resample'] = _interpolation(kwargs)
|
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
|
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
|
def translate_x_rel(img, pct, **kwargs):
pixels = (pct * img.size[0])
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
|
def translate_y_rel(img, pct, **kwargs):
pixels = (pct * img.size[1])
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
|
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
|
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
|
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if (_PIL_VER >= (5, 2)):
return img.rotate(degrees, **kwargs)
elif (_PIL_VER >= (5, 0)):
(w, h) = img.size
post_trans = (0, 0)
rotn_center = ((w / 2.0), (h / 2.0))
angle = (- math.radians(degrees))
matrix = [round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0, round((- math.sin(angle)), 15), round(math.cos(angle), 15), 0.0]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return ((((a * x) + (b * y)) + c), (((d * x) + (e * y)) + f))
(matrix[2], matrix[5]) = transform(((- rotn_center[0]) - post_trans[0]), ((- rotn_center[1]) - post_trans[1]), matrix)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs['resample'])
|
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
|
def invert(img, **__):
return ImageOps.invert(img)
|
def equalize(img, **__):
return ImageOps.equalize(img)
|
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
|
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if (i < thresh):
lut.append(min(255, (i + add)))
else:
lut.append(i)
if (img.mode in ('L', 'RGB')):
if ((img.mode == 'RGB') and (len(lut) == 256)):
lut = ((lut + lut) + lut)
return img.point(lut)
else:
return img
|
def posterize(img, bits_to_keep, **__):
if (bits_to_keep >= 8):
return img
return ImageOps.posterize(img, bits_to_keep)
|
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
|
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
|
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
|
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
|
def _randomly_negate(v):
'With 50% prob, negate the value'
return ((- v) if (random.random() > 0.5) else v)
|
def _rotate_level_to_arg(level, _hparams):
level = ((level / _MAX_LEVEL) * 30.0)
level = _randomly_negate(level)
return (level,)
|
def _enhance_level_to_arg(level, _hparams):
return ((((level / _MAX_LEVEL) * 1.8) + 0.1),)
|
def _enhance_increasing_level_to_arg(level, _hparams):
level = ((level / _MAX_LEVEL) * 0.9)
level = (1.0 + _randomly_negate(level))
return (level,)
|
def _shear_level_to_arg(level, _hparams):
level = ((level / _MAX_LEVEL) * 0.3)
level = _randomly_negate(level)
return (level,)
|
def _translate_abs_level_to_arg(level, hparams):
translate_const = hparams['translate_const']
level = ((level / _MAX_LEVEL) * float(translate_const))
level = _randomly_negate(level)
return (level,)
|
def _translate_rel_level_to_arg(level, hparams):
translate_pct = hparams.get('translate_pct', 0.45)
level = ((level / _MAX_LEVEL) * translate_pct)
level = _randomly_negate(level)
return (level,)
|
def _posterize_level_to_arg(level, _hparams):
return (int(((level / _MAX_LEVEL) * 4)),)
|
def _posterize_increasing_level_to_arg(level, hparams):
return ((4 - _posterize_level_to_arg(level, hparams)[0]),)
|
def _posterize_original_level_to_arg(level, _hparams):
return ((int(((level / _MAX_LEVEL) * 4)) + 4),)
|
def _solarize_level_to_arg(level, _hparams):
return (int(((level / _MAX_LEVEL) * 256)),)
|
def _solarize_increasing_level_to_arg(level, _hparams):
return ((256 - _solarize_level_to_arg(level, _hparams)[0]),)
|
def _solarize_add_level_to_arg(level, _hparams):
return (int(((level / _MAX_LEVEL) * 110)),)
|
class AugmentOp():
'\n Apply for video.\n '
def __init__(self, name, prob=0.5, magnitude=10, hparams=None):
hparams = (hparams or _HPARAMS_DEFAULT)
self.aug_fn = NAME_TO_OP[name]
self.level_fn = LEVEL_TO_ARG[name]
self.prob = prob
self.magnitude = magnitude
self.hparams = hparams.copy()
self.kwargs = {'fillcolor': (hparams['img_mean'] if ('img_mean' in hparams) else _FILL), 'resample': (hparams['interpolation'] if ('interpolation' in hparams) else _RANDOM_INTERPOLATION)}
self.magnitude_std = self.hparams.get('magnitude_std', 0)
def __call__(self, img_list):
if ((self.prob < 1.0) and (random.random() > self.prob)):
return img_list
magnitude = self.magnitude
if (self.magnitude_std and (self.magnitude_std > 0)):
magnitude = random.gauss(magnitude, self.magnitude_std)
magnitude = min(_MAX_LEVEL, max(0, magnitude))
level_args = (self.level_fn(magnitude, self.hparams) if (self.level_fn is not None) else ())
if isinstance(img_list, list):
return [self.aug_fn(img, *level_args, **self.kwargs) for img in img_list]
else:
return self.aug_fn(img_list, *level_args, **self.kwargs)
|
def _select_rand_weights(weight_idx=0, transforms=None):
transforms = (transforms or _RAND_TRANSFORMS)
assert (weight_idx == 0)
rand_weights = _RAND_CHOICE_WEIGHTS_0
probs = [rand_weights[k] for k in transforms]
probs /= np.sum(probs)
return probs
|
def rand_augment_ops(magnitude=10, hparams=None, transforms=None):
hparams = (hparams or _HPARAMS_DEFAULT)
transforms = (transforms or _RAND_TRANSFORMS)
return [AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms]
|
class RandAugment():
def __init__(self, ops, num_layers=2, choice_weights=None):
self.ops = ops
self.num_layers = num_layers
self.choice_weights = choice_weights
def __call__(self, img):
ops = np.random.choice(self.ops, self.num_layers, replace=(self.choice_weights is None), p=self.choice_weights)
for op in ops:
img = op(img)
return img
|
def rand_augment_transform(config_str, hparams):
"\n RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719\n\n Create a RandAugment transform\n :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by\n dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining\n sections, not order sepecific determine\n 'm' - integer magnitude of rand augment\n 'n' - integer num layers (number of transform ops selected per image)\n 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)\n 'mstd' - float std deviation of magnitude noise applied\n 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)\n Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5\n 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2\n :param hparams: Other hparams (kwargs) for the RandAugmentation scheme\n :return: A PyTorch compatible Transform\n "
magnitude = _MAX_LEVEL
num_layers = 2
weight_idx = None
transforms = _RAND_TRANSFORMS
config = config_str.split('-')
assert (config[0] == 'rand')
config = config[1:]
for c in config:
cs = re.split('(\\d.*)', c)
if (len(cs) < 2):
continue
(key, val) = cs[:2]
if (key == 'mstd'):
hparams.setdefault('magnitude_std', float(val))
elif (key == 'inc'):
if bool(val):
transforms = _RAND_INCREASING_TRANSFORMS
elif (key == 'm'):
magnitude = int(val)
elif (key == 'n'):
num_layers = int(val)
elif (key == 'w'):
weight_idx = int(val)
else:
assert NotImplementedError
ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams, transforms=transforms)
choice_weights = (None if (weight_idx is None) else _select_rand_weights(weight_idx))
return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
|
class RawVideoExtractorCV2():
def __init__(self, centercrop=False, size=224, framerate=(- 1), subset='test'):
self.centercrop = centercrop
self.size = size
self.framerate = framerate
self.transform = self._transform(self.size)
self.subset = subset
self.tsfm_dict = {'clip_test': Compose([Resize(size, interpolation=InterpolationMode.BICUBIC), CenterCrop(size), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))]), 'clip_train': Compose([RandomResizedCrop(size, scale=(0.5, 1.0)), RandomHorizontalFlip(), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))])}
self.aug_transform = video_transforms.create_random_augment(input_size=(size, size), auto_augment='rand-m7-n4-mstd0.5-inc1', interpolation='bicubic')
def _transform(self, n_px):
return Compose([Resize(n_px, interpolation=InterpolationMode.BICUBIC), CenterCrop(n_px), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))])
def video_to_tensor(self, video_file, preprocess, sample_fp=0, start_time=None, end_time=None, _no_process=False):
if ((start_time is not None) or (end_time is not None)):
assert (isinstance(start_time, int) and isinstance(end_time, int) and (start_time > (- 1)) and (end_time > start_time))
assert (sample_fp > (- 1))
cap = cv2.VideoCapture(video_file)
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
if (fps == 0):
print(((video_file + '\n') * 10))
total_duration = (((frameCount + fps) - 1) // fps)
(start_sec, end_sec) = (0, total_duration)
if (start_time is not None):
(start_sec, end_sec) = (start_time, (end_time if (end_time <= total_duration) else total_duration))
cap.set(cv2.CAP_PROP_POS_FRAMES, int((start_time * fps)))
interval = 1
if (sample_fp > 0):
interval = (fps // sample_fp)
else:
sample_fp = fps
if (interval == 0):
interval = 1
inds = [ind for ind in np.arange(0, fps, interval)]
assert (len(inds) >= sample_fp)
inds = inds[:sample_fp]
ret = True
(images, included) = ([], [])
for sec in np.arange(start_sec, (end_sec + 1)):
if (not ret):
break
sec_base = int((sec * fps))
for ind in inds:
cap.set(cv2.CAP_PROP_POS_FRAMES, (sec_base + ind))
(ret, frame) = cap.read()
if (not ret):
break
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if _no_process:
images.append(Image.fromarray(frame_rgb).convert('RGB'))
else:
images.append(Image.fromarray(frame_rgb))
cap.release()
if (len(images) > 0):
if _no_process:
video_data = images
else:
if (self.subset == 'train'):
images = self.aug_transform(images)
video_data = th.stack([preprocess(img) for img in images])
else:
video_data = th.zeros(1)
return {'video': video_data}
def get_video_data(self, video_path, start_time=None, end_time=None, _no_process=False):
image_input = self.video_to_tensor(video_path, self.transform, sample_fp=self.framerate, start_time=start_time, end_time=end_time, _no_process=_no_process)
return image_input
def process_raw_data(self, raw_video_data):
tensor_size = raw_video_data.size()
tensor = raw_video_data.view((- 1), 1, tensor_size[(- 3)], tensor_size[(- 2)], tensor_size[(- 1)])
return tensor
def process_frame_order(self, raw_video_data, frame_order=0):
if (frame_order == 0):
pass
elif (frame_order == 1):
reverse_order = np.arange((raw_video_data.size(0) - 1), (- 1), (- 1))
raw_video_data = raw_video_data[(reverse_order, ...)]
elif (frame_order == 2):
random_order = np.arange(raw_video_data.size(0))
np.random.shuffle(random_order)
raw_video_data = raw_video_data[(random_order, ...)]
return raw_video_data
|
class LayerNorm(nn.LayerNorm):
"Subclass torch's LayerNorm to handle fp16."
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
|
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return (x * torch.sigmoid((1.702 * x)))
|
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask=None):
super(ResidualAttentionBlock, self).__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model, (d_model * 4))), ('gelu', QuickGELU()), ('c_proj', nn.Linear((d_model * 4), d_model))]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
self.n_head = n_head
def attention(self, x: torch.Tensor, attn_mask_: torch.Tensor):
attn_mask_ = attn_mask_.repeat_interleave(self.n_head, dim=0)
attn_mask_ = (attn_mask_.to(dtype=x.dtype, device=x.device) if (attn_mask_ is not None) else None)
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask_)[0]
def forward(self, para_tuple: tuple):
(x, attn_mask) = para_tuple
x = (x + self.attention(self.ln_1(x), attn_mask))
x = (x + self.mlp(self.ln_2(x)))
return (x, attn_mask)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.