code stringlengths 17 6.64M |
|---|
def version_greaterorequal(l1, l2):
if (l1[0] > l2[0]):
return True
elif (l1[0] < l2[0]):
return False
elif (l1[0] == l2[0]):
if (len(l1) == 1):
return True
else:
return version_greaterorequal(l1[1:], l2[1:])
|
def get_git_version():
result = subprocess.run(['git', '--version'], stdout=subprocess.PIPE).stdout.decode('utf-8')
version = [int(c) for c in result.replace('git version ', '').replace('\n', '').split('.')]
return version
|
def run_command(command, commit_sha, specific_path_underscore='0', git_path=None, pass_logdir_sha=None, old_new_flag=None):
'pass_logdir_sha is either null or tuple with arg name and function taking commit sha as input to produce arg value'
if pass_logdir_sha:
shutil.rmtree(pass_logdir_sha, ignore_errors=True)
if (commit_sha == 'CURRENT'):
if (not pass_logdir_sha):
simulation_start_time = dt.datetime.now()
os.system(command)
simulation_end_time = dt.datetime.now()
else:
simulation_start_time = dt.datetime.now()
os.system(f'{command} {pass_logdir_sha[0]} {pass_logdir_sha[1](commit_sha)}')
simulation_end_time = dt.datetime.now()
else:
assert version_greaterorequal(get_git_version(), [2, 17]), 'git version needs to be >= 2.17'
orig_pwd = os.getcwd()
path_tmp_worktree = (((('/'.join(git_path.split('/')[:(- 1)]) + f'/tmp_{old_new_flag}_') + commit_sha) + '_') + specific_path_underscore)
subprocess.run(['git', 'worktree', 'add', '--detach', path_tmp_worktree, commit_sha], stdout=subprocess.DEVNULL)
os.chdir(path_tmp_worktree)
if (not pass_logdir_sha):
simulation_start_time = dt.datetime.now()
os.system(command)
simulation_end_time = dt.datetime.now()
else:
simulation_start_time = dt.datetime.now()
os.system(f'{command} {pass_logdir_sha[0]} {pass_logdir_sha[1](commit_sha)}')
simulation_end_time = dt.datetime.now()
subprocess.run(['git', 'worktree', 'remove', path_tmp_worktree], stdout=subprocess.DEVNULL)
os.chdir(orig_pwd)
return (simulation_end_time - simulation_start_time)
|
def get_path(level):
path = pathlib.Path(__file__).parent.absolute()
path = str(path)
if (level == 0):
return path
else:
path = path.split('/')[:(- level)]
return '/'.join(path)
|
def get_paths(parameters):
specific_path = f"{parameters['new']['config']}/{parameters['shared']['end-time'].replace(':', '-')}/{parameters['shared']['seed']}"
specific_path_underscore = f"{parameters['new']['config']}_{parameters['shared']['end-time'].replace(':', '-')}_{parameters['shared']['seed']}"
return (specific_path, specific_path_underscore)
|
def run_test(test_):
(parameters, old_new_flag) = test_
(specific_path, specific_path_underscore) = get_paths(parameters)
now = dt.datetime.now()
stamp = now.strftime('%Y%m%d%H%M%S')
time = runasof.run_command(parameters['command'][old_new_flag], commit_sha=parameters[old_new_flag]['sha'], specific_path_underscore=specific_path_underscore, git_path=root_path_abides, old_new_flag=old_new_flag, pass_logdir_sha=('--log_dir', (lambda x: ((((root_path_ec2 + f'/tmp/{old_new_flag}_{stamp}/') + x) + '/') + specific_path))))
output = {}
output['sha'] = parameters[old_new_flag]['sha']
output['config'] = parameters[old_new_flag]['config']
output['end-time'] = parameters['shared']['end-time']
output['seed'] = parameters['shared']['seed']
output['time'] = time
if parameters['with_log']:
path_to_ob = (root_path_ec2 + f"/tmp/{old_new_flag}_{stamp}/{parameters[old_new_flag]['sha']}/{specific_path}/ORDERBOOK_ABM_FULL.bz2")
else:
path_to_ob = 'no_log'
output['path_to_ob'] = path_to_ob
output['flag'] = old_new_flag
return output
|
def compute_ob(path_old, path_new):
ob_old = pd.read_pickle(path_old)
ob_new = pd.read_pickle(path_new)
if ob_old.equals(ob_new):
return 0
else:
return 1
|
def run_tests(LIST_PARAMETERS, varying_parameters):
old_new_flags = ['old', 'new']
tests = list(itertools.product(LIST_PARAMETERS, old_new_flags))
outputs = p_map(run_test, tests)
df = pd.DataFrame(outputs)
df_old = df[(df['flag'] == 'old')]
df_new = df[(df['flag'] == 'new')]
print(f'THERE ARE {len(df_new)} TESTS RESULTS.')
if LIST_PARAMETERS[0]['with_log']:
path_olds = list(df_old['path_to_ob'])
path_news = list(df_new['path_to_ob'])
ob_comps = p_map(compute_ob, path_olds, path_news)
if (sum(ob_comps) == 0):
print('ALL TESTS ARE SUCCESS!')
else:
print(f'ALERT: {sum(ob_comps)}TEST FAILURE')
df_old = df_old[(varying_parameters + ['seed', 'time'])].set_index((varying_parameters + ['seed']))
df_new = df_new[(varying_parameters + ['seed', 'time'])].set_index((varying_parameters + ['seed']))
df_diff = (df_old - df_new)
df_results = df_diff.groupby(['config', 'end-time'])['time'].describe()[['mean', 'std']]
df_diff_pct = ((100 * (df_old - df_new)) / df_old)
df_results_pct = df_diff_pct.groupby(['config', 'end-time'])['time'].describe()[['mean', 'std']]
print('*********************************************')
print('*********************************************')
print('OLD RUNNING TIME')
print(df_old.groupby(['config', 'end-time'])['time'].describe()[['mean', 'std']])
print('*********************************************')
print('*********************************************')
print('NEW RUNNING TIME')
with pd.option_context('display.float_format', '{:0.2f}'.format):
print(df_new.groupby(['config', 'end-time'])['time'].describe()[['mean', 'std']])
print('*********************************************')
print('*********************************************')
print('TIME DIFFERENCE in seconds')
with pd.option_context('display.float_format', '{:0.2f}'.format):
df_results['mean'] = df_results['mean'].dt.total_seconds()
df_results['std'] = df_results['std'].dt.total_seconds()
print(df_results)
print('*********************************************')
print('*********************************************')
print('TIME DIFFERENCE in %')
with pd.option_context('display.float_format', '{:0.2f}'.format):
print(df_results_pct)
|
def get_path(level):
path = pathlib.Path(__file__).parent.absolute()
path = str(path)
if (level == 0):
return path
else:
path = path.split('/')[:(- level)]
return '/'.join(path)
|
def generate_parameter_dict(seed, config, end_time, with_log):
if with_log:
log_orders = True
exchange_log_orders = True
book_freq = 0
else:
log_orders = None
exchange_log_orders = None
book_freq = None
parameters = {'old': {'sha': 'f1968a56fdb55fd7c70be1db052be07cb701a5fb', 'script': 'abides_cmd.py', 'config': config}, 'new': {'sha': 'f1968a56fdb55fd7c70be1db052be07cb701a5fb', 'script': 'abides_cmd.py', 'config': config}, 'config_new': config, 'end-time': end_time, 'with_log': with_log, 'shared': {'end-time': end_time, 'end_time': end_time, 'seed': seed, 'verbose': 0, 'log_orders': log_orders, 'exchange_log_orders': exchange_log_orders, 'book_freq': book_freq}}
parameters['command'] = generate_command(parameters)
return parameters
|
def generate_command(parameters):
specific_command_old = f"{parameters['old']['script']} -config {parameters['old']['config']}"
specific_command_new = f"{parameters['new']['script']} -config {parameters['new']['config']}"
shared_command = [f'--{key} {val}' for (key, val) in parameters['shared'].items()]
shared_command = ' '.join(shared_command)
command_old = (((f'python3 -W ignore -u ' + specific_command_old) + ' ') + shared_command)
command_new = (((f'python3 -W ignore -u ' + specific_command_new) + ' ') + shared_command)
return {'old': command_old, 'new': command_new}
|
def get_path(level):
path = pathlib.Path(__file__).parent.absolute()
path = str(path)
if (level == 0):
return path
else:
path = path.split('/')[:(- level)]
return '/'.join(path)
|
def generate_parameter_dict(seed):
parameters = {'sha_old': '8ab374e8d7c9f6fa6ab522502259e94e550e81b5', 'sha_new': 'ccdb7b3b0b099b89b86a6500e4f8f731a5dc6410', 'script_old': 'abides.py', 'script_new': 'abides_cmd.py', 'config_old': 'rmsc03', 'config_new': 'rmsc03_function', 'end-time': '10', 'seed': seed}
return parameters
|
class Data():
'Standard data format. \n '
def __init__(self):
self.X_train = None
self.y_train = None
self.X_test = None
self.y_test = None
self.__device = None
self.__dtype = None
@property
def device(self):
return self.__device
@property
def dtype(self):
return self.__dtype
@device.setter
def device(self, d):
if (d == 'cpu'):
self.__to_cpu()
elif (d == 'gpu'):
self.__to_gpu()
else:
raise ValueError
self.__device = d
@dtype.setter
def dtype(self, d):
if (d == 'float'):
self.__to_float()
elif (d == 'double'):
self.__to_double()
else:
raise ValueError
self.__dtype = d
@property
def Device(self):
if (self.__device == 'cpu'):
return torch.device('cpu')
elif (self.__device == 'gpu'):
return torch.device('cuda')
@property
def Dtype(self):
if (self.__dtype == 'float'):
return torch.float32
elif (self.__dtype == 'double'):
return torch.float64
@property
def dim(self):
if isinstance(self.X_train, np.ndarray):
return self.X_train.shape[(- 1)]
elif isinstance(self.X_train, torch.Tensor):
return self.X_train.size((- 1))
@property
def K(self):
if isinstance(self.y_train, np.ndarray):
return self.y_train.shape[(- 1)]
elif isinstance(self.y_train, torch.Tensor):
return self.y_train.size((- 1))
@property
def X_train_np(self):
return Data.to_np(self.X_train)
@property
def y_train_np(self):
return Data.to_np(self.y_train)
@property
def X_test_np(self):
return Data.to_np(self.X_test)
@property
def y_test_np(self):
return Data.to_np(self.y_test)
@staticmethod
def to_np(d):
if (isinstance(d, np.ndarray) or (d is None)):
return d
elif isinstance(d, torch.Tensor):
return d.cpu().detach().numpy()
else:
raise ValueError
def __to_cpu(self):
for d in ['X_train', 'y_train', 'X_test', 'y_test']:
if isinstance(getattr(self, d), np.ndarray):
setattr(self, d, torch.DoubleTensor(getattr(self, d)))
elif isinstance(getattr(self, d), torch.Tensor):
setattr(self, d, getattr(self, d).cpu())
def __to_gpu(self):
for d in ['X_train', 'y_train', 'X_test', 'y_test']:
if isinstance(getattr(self, d), np.ndarray):
setattr(self, d, torch.cuda.DoubleTensor(getattr(self, d)))
elif isinstance(getattr(self, d), torch.Tensor):
setattr(self, d, getattr(self, d).cuda())
def __to_float(self):
if (self.device is None):
raise RuntimeError('device is not set')
for d in ['X_train', 'y_train', 'X_test', 'y_test']:
if isinstance(getattr(self, d), torch.Tensor):
setattr(self, d, getattr(self, d).float())
def __to_double(self):
if (self.device is None):
raise RuntimeError('device is not set')
for d in ['X_train', 'y_train', 'X_test', 'y_test']:
if isinstance(getattr(self, d), torch.Tensor):
setattr(self, d, getattr(self, d).double())
|
class FNN(StructureNN):
'Fully connected neural networks.\n '
def __init__(self, ind, outd, layers=2, width=50, activation='relu', initializer='default', softmax=False):
super(FNN, self).__init__()
self.ind = ind
self.outd = outd
self.layers = layers
self.width = width
self.activation = activation
self.initializer = initializer
self.softmax = softmax
self.modus = self.__init_modules()
self.__initialize()
def forward(self, x):
for i in range(1, self.layers):
LinM = self.modus['LinM{}'.format(i)]
NonM = self.modus['NonM{}'.format(i)]
x = NonM(LinM(x))
x = self.modus['LinMout'](x)
if self.softmax:
x = nn.functional.softmax(x, dim=(- 1))
return x
def __init_modules(self):
modules = nn.ModuleDict()
if (self.layers > 1):
modules['LinM1'] = nn.Linear(self.ind, self.width)
modules['NonM1'] = self.Act
for i in range(2, self.layers):
modules['LinM{}'.format(i)] = nn.Linear(self.width, self.width)
modules['NonM{}'.format(i)] = self.Act
modules['LinMout'] = nn.Linear(self.width, self.outd)
else:
modules['LinMout'] = nn.Linear(self.ind, self.outd)
return modules
def __initialize(self):
for i in range(1, self.layers):
self.weight_init_(self.modus['LinM{}'.format(i)].weight)
nn.init.constant_(self.modus['LinM{}'.format(i)].bias, 0)
self.weight_init_(self.modus['LinMout'].weight)
nn.init.constant_(self.modus['LinMout'].bias, 0)
|
class Module(torch.nn.Module):
'Standard module format. \n '
def __init__(self):
super(Module, self).__init__()
self.activation = None
self.initializer = None
self.__device = None
self.__dtype = None
@property
def device(self):
return self.__device
@property
def dtype(self):
return self.__dtype
@device.setter
def device(self, d):
if (d == 'cpu'):
self.cpu()
elif (d == 'gpu'):
self.cuda()
else:
raise ValueError
self.__device = d
@dtype.setter
def dtype(self, d):
if (d == 'float'):
self.to(torch.float)
elif (d == 'double'):
self.to(torch.double)
else:
raise ValueError
self.__dtype = d
@property
def Device(self):
if (self.__device == 'cpu'):
return torch.device('cpu')
elif (self.__device == 'gpu'):
return torch.device('cuda')
@property
def Dtype(self):
if (self.__dtype == 'float'):
return torch.float32
elif (self.__dtype == 'double'):
return torch.float64
@property
def act(self):
if (self.activation == 'sigmoid'):
return torch.sigmoid
elif (self.activation == 'relu'):
return torch.relu
elif (self.activation == 'tanh'):
return torch.tanh
elif (self.activation == 'elu'):
return torch.elu
else:
raise NotImplementedError
@property
def Act(self):
if (self.activation == 'sigmoid'):
return torch.nn.Sigmoid()
elif (self.activation == 'relu'):
return torch.nn.ReLU()
elif (self.activation == 'tanh'):
return torch.nn.Tanh()
elif (self.activation == 'elu'):
return torch.nn.ELU()
else:
raise NotImplementedError
@property
def weight_init_(self):
if (self.initializer == 'He normal'):
return torch.nn.init.kaiming_normal_
elif (self.initializer == 'He uniform'):
return torch.nn.init.kaiming_uniform_
elif (self.initializer == 'Glorot normal'):
return torch.nn.init.xavier_normal_
elif (self.initializer == 'Glorot uniform'):
return torch.nn.init.xavier_uniform_
elif (self.initializer == 'orthogonal'):
return torch.nn.init.orthogonal_
elif (self.initializer == 'default'):
if (self.activation == 'relu'):
return torch.nn.init.kaiming_normal_
elif (self.activation == 'tanh'):
return torch.nn.init.orthogonal_
else:
return (lambda x: None)
else:
raise NotImplementedError
|
class StructureNN(Module):
'Structure-oriented neural network used as a general map based on designing architecture.\n '
def __init__(self):
super(StructureNN, self).__init__()
def predict(self, x, returnnp=False):
return (self(x).cpu().detach().numpy() if returnnp else self(x))
|
class LossNN(Module, abc.ABC):
'Loss-oriented neural network used as an algorithm based on designing loss.\n '
def __init__(self):
super(LossNN, self).__init__()
def forward(self, x):
return x
@abc.abstractmethod
def criterion(self, X, y):
pass
@abc.abstractmethod
def predict(self):
pass
|
def timing(func):
@wraps(func)
def wrapper(*args, **kwargs):
t = time.time()
result = func(*args, **kwargs)
print(((("'" + func.__name__) + "'") + ' took {} s'.format((time.time() - t))))
return result
return wrapper
|
class lazy_property():
def __init__(self, func):
self.func = func
def __get__(self, instance, cls):
val = self.func(instance)
setattr(instance, self.func.__name__, val)
return val
|
def softmax(x):
e_x = np.exp((x - np.max(x, axis=(- 1), keepdims=True)))
return (e_x / np.sum(e_x, axis=(- 1), keepdims=True))
|
def cross_entropy_loss(y_pred, y_label):
if (y_pred.size() == y_label.size()):
return torch.mean((- torch.sum((torch.log_softmax(y_pred, dim=(- 1)) * y_label), dim=(- 1))))
else:
return torch.nn.CrossEntropyLoss()(y_pred, y_label.long())
|
def grad(y, x, create_graph=True, keepdim=False):
'\n y: [N, Ny] or [Ny]\n x: [N, Nx] or [Nx]\n Return dy/dx ([N, Ny, Nx] or [Ny, Nx]).\n '
N = (y.size(0) if (len(y.size()) == 2) else 1)
Ny = y.size((- 1))
Nx = x.size((- 1))
z = torch.ones_like(y[(..., 0)])
dy = []
for i in range(Ny):
dy.append(torch.autograd.grad(y[(..., i)], x, grad_outputs=z, create_graph=create_graph)[0])
shape = np.array([N, Ny])[(2 - len(y.size())):]
shape = (list(shape) if keepdim else list(shape[(shape > 1)]))
return torch.cat(dy, dim=(- 1)).view((shape + [Nx]))
|
class Data():
'Standard data format. \n '
def __init__(self, X_train=None, y_train=None, X_test=None, y_test=None):
self.X_train = X_train
self.y_train = y_train
self.X_test = X_test
self.y_test = y_test
self.__device = None
self.__dtype = None
def get_batch(self, batch_size):
@map_elementwise
def batch_mask(X, num):
return np.random.choice(X.size(0), num, replace=False)
@map_elementwise
def batch(X, mask):
return X[mask]
mask = batch_mask(self.y_train, batch_size)
return (batch(self.X_train, mask), batch(self.y_train, mask))
def save(self, path):
if (not os.path.isdir(path)):
os.makedirs(path)
def save_data(fname, data):
if isinstance(data, dict):
np.savez_compressed(((path + '/') + fname), **data)
elif (isinstance(data, list) or isinstance(data, tuple)):
np.savez_compressed(((path + '/') + fname), *data)
else:
np.save(((path + '/') + fname), data)
save_data('X_train', self.X_train_np)
save_data('y_train', self.y_train_np)
save_data('X_test', self.X_test_np)
save_data('y_test', self.y_test_np)
@property
def device(self):
return self.__device
@property
def dtype(self):
return self.__dtype
@device.setter
def device(self, d):
if (d == 'cpu'):
self.__to_cpu()
self.__device = torch.device('cpu')
elif (d == 'gpu'):
self.__to_gpu()
self.__device = torch.device('cuda')
else:
raise ValueError
@dtype.setter
def dtype(self, d):
if (d == 'float'):
self.__to_float()
self.__dtype = torch.float32
elif (d == 'double'):
self.__to_double()
self.__dtype = torch.float64
else:
raise ValueError
@property
def dim(self):
if isinstance(self.X_train, np.ndarray):
return self.X_train.shape[(- 1)]
elif isinstance(self.X_train, torch.Tensor):
return self.X_train.size((- 1))
@property
def K(self):
if isinstance(self.y_train, np.ndarray):
return self.y_train.shape[(- 1)]
elif isinstance(self.y_train, torch.Tensor):
return self.y_train.size((- 1))
@property
def X_train_np(self):
return Data.tc_to_np(self.X_train)
@property
def y_train_np(self):
return Data.tc_to_np(self.y_train)
@property
def X_test_np(self):
return Data.tc_to_np(self.X_test)
@property
def y_test_np(self):
return Data.tc_to_np(self.y_test)
@staticmethod
@map_elementwise
def tc_to_np(d):
if isinstance(d, torch.Tensor):
return d.cpu().detach().numpy()
else:
return d
def __to_cpu(self):
@map_elementwise
def trans(d):
if isinstance(d, np.ndarray):
return torch.DoubleTensor(d)
elif isinstance(d, torch.Tensor):
return d.cpu()
else:
return d
for d in ['X_train', 'y_train', 'X_test', 'y_test']:
setattr(self, d, trans(getattr(self, d)))
def __to_gpu(self):
@map_elementwise
def trans(d):
if isinstance(d, np.ndarray):
return torch.cuda.DoubleTensor(d)
elif isinstance(d, torch.Tensor):
return d.cuda()
else:
return d
for d in ['X_train', 'y_train', 'X_test', 'y_test']:
setattr(self, d, trans(getattr(self, d)))
def __to_float(self):
if (self.device is None):
raise RuntimeError('device is not set')
@map_elementwise
def trans(d):
if isinstance(d, torch.Tensor):
return d.float()
else:
return d
for d in ['X_train', 'y_train', 'X_test', 'y_test']:
setattr(self, d, trans(getattr(self, d)))
def __to_double(self):
if (self.device is None):
raise RuntimeError('device is not set')
@map_elementwise
def trans(d):
if isinstance(d, torch.Tensor):
return d.double()
else:
return d
for d in ['X_train', 'y_train', 'X_test', 'y_test']:
setattr(self, d, trans(getattr(self, d)))
|
class Data_MIONet_Cartesian(Data):
'Data format for MIONet (Cartesian product version).\n '
def __init__(self, X_train=None, y_train=None, X_test=None, y_test=None):
super(Data_MIONet_Cartesian, self).__init__(X_train, y_train, X_test, y_test)
def get_batch(self, batch_size):
@map_elementwise
def batch_mask(X, num):
return np.random.choice(X.size(0), num, replace=False)
@map_elementwise
def batch(X, mask):
return X[mask]
mask = batch_mask(self.y_train, batch_size)
return ((*batch(self.X_train[:(- 1)], mask), self.X_train[(- 1)]), batch(self.y_train, mask))
|
class AE(Map):
'Autoencoder.\n '
def __init__(self, encoder_size, decoder_size, activation='sigmoid', initializer='default'):
super(AE, self).__init__()
self.encoder_size = encoder_size
self.decoder_size = decoder_size
self.activation = activation
self.initializer = initializer
self.ms = self.__init_modules()
def forward(self, x):
return self.ms['decoder'](self.ms['encoder'](x))
def encode(self, x, returnnp=False):
if (not isinstance(x, torch.Tensor)):
x = torch.tensor(x, dtype=self.dtype, device=self.device)
return (self.ms['encoder'](x).cpu().detach().numpy() if returnnp else self.ms['encoder'](x))
def decode(self, x, returnnp=False):
if (not isinstance(x, torch.Tensor)):
x = torch.tensor(x, dtype=self.dtype, device=self.device)
return (self.ms['decoder'](x).cpu().detach().numpy() if returnnp else self.ms['decoder'](x))
def __init_modules(self):
modules = torch.nn.ModuleDict()
modules['encoder'] = FNN(self.encoder_size, self.activation, self.initializer)
modules['decoder'] = FNN(self.decoder_size, self.activation, self.initializer)
return modules
|
class DeepONet(Map):
'Deep operator network.\n Input: ([batch size, branch_dim], [batch size, trunk_dim])\n Output: [batch size, 1]\n '
def __init__(self, branch_size, trunk_size, activation='relu', initializer='Glorot normal'):
super(DeepONet, self).__init__()
self.branch_size = branch_size
self.trunk_size = trunk_size
self.activation = activation
self.initializer = initializer
self.ms = self.__init_modules()
self.ps = self.__init_params()
def forward(self, x):
(x_branch, x_trunk) = (self.ms['Branch'](x[0]), self.ms['Trunk'](x[1]))
return (torch.sum((x_branch * x_trunk), dim=(- 1), keepdim=True) + self.ps['bias'])
def __init_modules(self):
modules = nn.ModuleDict()
modules['Branch'] = FNN(self.branch_size, self.activation, self.initializer)
modules['Trunk'] = FNN(self.trunk_size, self.activation, self.initializer)
return modules
def __init_params(self):
params = nn.ParameterDict()
params['bias'] = nn.Parameter(torch.zeros([1]))
return params
|
class FNN(Map):
'Fully-connected neural network.\n Note that\n len(size) >= 2,\n [..., N1, -N2, ...] denotes a linear layer from dim N1 to N2 without bias,\n [..., N, 0] denotes an identity map (as output linear layer).\n '
def __init__(self, size, activation='relu', initializer='default'):
super(FNN, self).__init__()
self.size = size
self.activation = activation
self.initializer = initializer
self.ms = self.__init_modules()
self.__initialize()
def forward(self, x):
for i in range(1, (len(self.size) - 1)):
x = self.act(self.ms['LinM{}'.format(i)](x))
return (self.ms['LinM{}'.format((len(self.size) - 1))](x) if (self.size[(- 1)] != 0) else x)
def __init_modules(self):
modules = nn.ModuleDict()
for i in range(1, len(self.size)):
if (self.size[i] != 0):
bias = (True if (self.size[i] > 0) else False)
modules['LinM{}'.format(i)] = nn.Linear(abs(self.size[(i - 1)]), abs(self.size[i]), bias)
return modules
def __initialize(self):
for i in range(1, len(self.size)):
if (self.size[i] != 0):
self.weight_init_(self.ms['LinM{}'.format(i)].weight)
if (self.size[i] > 0):
self.bias_init_(self.ms['LinM{}'.format(i)].bias)
|
class MIONet(Map):
'Multiple-input operator network.\n Input: ([batch, sensors1], [batch, sensors2], [batch, dim_loc])\n Output: [batch, 1]\n '
def __init__(self, sizes, activation='relu', initializer='default', bias=True):
super(MIONet, self).__init__()
self.sizes = sizes
self.activation = activation
self.initializer = initializer
self.bias = bias
self.ms = self.__init_modules()
self.ps = self.__init_parameters()
def forward(self, x):
y = torch.stack([self.ms['Net{}'.format((i + 1))](x[i]) for i in range(len(self.sizes))])
y = torch.sum(torch.prod(y, dim=0), dim=(- 1), keepdim=True)
return ((y + self.ps['bias']) if self.bias else y)
def __init_modules(self):
modules = torch.nn.ModuleDict()
for i in range(len(self.sizes)):
modules['Net{}'.format((i + 1))] = FNN(self.sizes[i], self.activation, self.initializer)
return modules
def __init_parameters(self):
parameters = torch.nn.ParameterDict()
if self.bias:
parameters['bias'] = torch.nn.Parameter(torch.zeros([1]))
return parameters
|
class MIONet_Cartesian(Map):
'Multiple-input operator network (Cartesian product version).\n Input: ([batch, sensors1], [batch, sensors2], [num_loc, dim_loc])\n Output: [batch, num_loc]\n '
def __init__(self, sizes, activation='relu', initializer='default', bias=True):
super(MIONet_Cartesian, self).__init__()
self.sizes = sizes
self.activation = activation
self.initializer = initializer
self.bias = bias
self.ms = self.__init_modules()
self.ps = self.__init_parameters()
def forward(self, x):
y1 = torch.stack([self.ms['Net{}'.format((i + 1))](x[i]) for i in range((len(self.sizes) - 1))])
y1 = torch.prod(y1, dim=0)
y2 = self.ms['Net{}'.format(len(self.sizes))](x[(- 1)])
y = (y1 @ y2.t())
return ((y + self.ps['bias']) if self.bias else y)
def __init_modules(self):
modules = torch.nn.ModuleDict()
for i in range(len(self.sizes)):
modules['Net{}'.format((i + 1))] = FNN(self.sizes[i], self.activation, self.initializer)
return modules
def __init_parameters(self):
parameters = torch.nn.ParameterDict()
if self.bias:
parameters['bias'] = torch.nn.Parameter(torch.zeros([1]))
return parameters
|
class Module(torch.nn.Module):
'Standard module format.\n '
def __init__(self):
super(Module, self).__init__()
self.activation = None
self.initializer = None
self.__device = None
self.__dtype = None
@property
def device(self):
return self.__device
@property
def dtype(self):
return self.__dtype
@device.setter
def device(self, d):
if (d == 'cpu'):
self.cpu()
for module in self.modules():
if isinstance(module, Module):
module.__device = torch.device('cpu')
elif (d == 'gpu'):
self.cuda()
for module in self.modules():
if isinstance(module, Module):
module.__device = torch.device('cuda')
else:
raise ValueError
@dtype.setter
def dtype(self, d):
if (d == 'float'):
self.to(torch.float32)
for module in self.modules():
if isinstance(module, Module):
module.__dtype = torch.float32
elif (d == 'double'):
self.to(torch.float64)
for module in self.modules():
if isinstance(module, Module):
module.__dtype = torch.float64
else:
raise ValueError
@property
def act(self):
if callable(self.activation):
return self.activation
elif (self.activation == 'sigmoid'):
return torch.sigmoid
elif (self.activation == 'relu'):
return torch.relu
elif (self.activation == 'tanh'):
return torch.tanh
elif (self.activation == 'elu'):
return torch.elu
else:
raise NotImplementedError
@property
def weight_init_(self):
if (self.initializer == 'He normal'):
return torch.nn.init.kaiming_normal_
elif (self.initializer == 'He uniform'):
return torch.nn.init.kaiming_uniform_
elif (self.initializer == 'Glorot normal'):
return torch.nn.init.xavier_normal_
elif (self.initializer == 'Glorot uniform'):
return torch.nn.init.xavier_uniform_
elif (self.initializer == 'orthogonal'):
return torch.nn.init.orthogonal_
elif (self.initializer == 'default'):
return (lambda x: None)
else:
raise NotImplementedError
@property
def bias_init_(self):
if (self.initializer == 'He normal'):
return torch.nn.init.zeros_
elif (self.initializer == 'He uniform'):
return torch.nn.init.zeros_
elif (self.initializer == 'Glorot normal'):
return torch.nn.init.zeros_
elif (self.initializer == 'Glorot uniform'):
return torch.nn.init.zeros_
elif (self.initializer == 'orthogonal'):
return torch.nn.init.zeros_
elif (self.initializer == 'default'):
return (lambda x: None)
else:
raise NotImplementedError
@map_elementwise
def _to_tensor(self, x):
if (not isinstance(x, torch.Tensor)):
x = torch.tensor(x, dtype=self.dtype, device=self.device)
return x
|
class Map(Module):
'Structure-oriented neural network used as a general map based on designing architecture.\n '
def __init__(self):
super(Map, self).__init__()
def predict(self, x, returnnp=False):
x = self._to_tensor(x)
return (self(x).cpu().detach().numpy() if returnnp else self(x))
|
class Algorithm(Module, abc.ABC):
'Loss-oriented neural network used as an algorithm based on designing loss.\n '
def __init__(self):
super(Algorithm, self).__init__()
def forward(self, x):
return x
@abc.abstractmethod
def criterion(self, X, y):
pass
@abc.abstractmethod
def predict(self):
pass
|
class PNN(Map):
'INN-based Poisson neural network.\n '
def __init__(self, inn, sympnet, recurrent=1):
super(PNN, self).__init__()
self.inn = inn
self.sympnet = sympnet
self.recurrent = recurrent
self.dim = sympnet.dim
def forward(self, x):
x = self.inn(x)
for i in range(self.recurrent):
x = self.sympnet(x)
return self.inn.inverse(x)
def predict(self, x, steps=1, keepinitx=False, returnnp=False):
x = self._to_tensor(x)
size = len(x.size())
pred = [self.inn(x)]
for _ in range(steps):
pred.append(self.sympnet(pred[(- 1)]))
pred = list(map(self.inn.inverse, pred))
if keepinitx:
steps = (steps + 1)
else:
pred = pred[1:]
res = torch.cat(pred, dim=(- 1))
if (steps > 1):
res = res.view([(- 1), steps, self.dim][(2 - size):])
return (res.cpu().detach().numpy() if returnnp else res)
|
class AEPNN(Algorithm):
'Autoencoder-based Poisson neural network.\n '
def __init__(self, ae, sympnet, lam=1, recurrent=1):
super(AEPNN, self).__init__()
self.ae = ae
self.sympnet = sympnet
self.lam = lam
self.recurrent = recurrent
self.dim = ae.encoder_size[0]
def criterion(self, X, y):
(X_latent, y_latent) = (self.ae.encode(X), self.ae.encode(y))
X_latent_step = X_latent
for i in range(self.recurrent):
X_latent_step = self.sympnet(X_latent_step)
symp_loss = torch.nn.MSELoss()(X_latent_step, y_latent)
ae_loss = (torch.nn.MSELoss()(self.ae.decode(X_latent), X) + torch.nn.MSELoss()(self.ae.decode(y_latent), y))
return (symp_loss + (self.lam * ae_loss))
def predict(self, x, steps=1, keepinitx=False, returnnp=False):
x = self._to_tensor(x)
size = len(x.size())
pred = [self.ae.encode(x)]
for _ in range(steps):
pred.append(self.sympnet(pred[(- 1)]))
pred = list(map(self.ae.decode, pred))
if keepinitx:
steps = (steps + 1)
else:
pred = pred[1:]
res = torch.cat(pred, dim=(- 1))
if (steps > 1):
res = res.view([(- 1), steps, self.dim][(2 - size):])
return (res.cpu().detach().numpy() if returnnp else res)
|
class S2S(Map):
'Seq2seq model.\n Input: [batch_size, len_in, dim_in]\n Output: [batch_size, len_out, dim_out]\n '
def __init__(self, dim_in, len_in, dim_out, len_out, hidden_size=10, cell='LSTM'):
super(S2S, self).__init__()
self.dim_in = dim_in
self.len_in = len_in
self.dim_out = dim_out
self.len_out = len_out
self.hidden_size = hidden_size
self.cell = cell
self.encoder = self.__init_encoder()
self.decoder = self.__init_decoder()
self.att_weights = self.__init_att_weights()
self.out = self.__init_out()
def forward(self, x):
to_squeeze = (True if (len(x.size()) == 2) else False)
if to_squeeze:
x = x.view(1, self.len_in, self.dim_in)
zeros = torch.zeros([1, x.size(0), self.hidden_size], dtype=x.dtype, device=x.device)
init_state = ((zeros, zeros) if (self.cell == 'LSTM') else zeros)
(x, _) = self.encoder(x, init_state)
x = (torch.softmax(self.att_weights, dim=1) @ x)
(x, _) = self.decoder(x, init_state)
x = self.out(x)
return (x.squeeze(0) if to_squeeze else x)
def __init_encoder(self):
if (self.cell == 'RNN'):
return torch.nn.RNN(self.dim_in, self.hidden_size, batch_first=True)
elif (self.cell == 'LSTM'):
return torch.nn.LSTM(self.dim_in, self.hidden_size, batch_first=True)
elif (self.cell == 'GRU'):
return torch.nn.GRU(self.dim_in, self.hidden_size, batch_first=True)
else:
raise NotImplementedError
def __init_decoder(self):
if (self.cell == 'RNN'):
return torch.nn.RNN(self.hidden_size, self.hidden_size, batch_first=True)
elif (self.cell == 'LSTM'):
return torch.nn.LSTM(self.hidden_size, self.hidden_size, batch_first=True)
elif (self.cell == 'GRU'):
return torch.nn.GRU(self.hidden_size, self.hidden_size, batch_first=True)
else:
raise NotImplementedError
def __init_att_weights(self):
return torch.nn.Parameter(torch.zeros([self.len_out, self.len_in]))
def __init_out(self):
return torch.nn.Linear(self.hidden_size, self.dim_out)
|
def timing(func):
@wraps(func)
def wrapper(*args, **kwargs):
t = time.time()
result = func(*args, **kwargs)
print(((("'" + func.__name__) + "'") + ' took {} s'.format((time.time() - t))))
return result
return wrapper
|
def str_current_time():
return time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
|
def map_elementwise(func):
@wraps(func)
def wrapper(*args, **kwargs):
(container, idx) = (None, None)
for arg in args:
if (type(arg) in (list, tuple, dict)):
(container, idx) = (type(arg), (arg.keys() if (type(arg) == dict) else len(arg)))
break
if (container is None):
for value in kwargs.values():
if (type(value) in (list, tuple, dict)):
(container, idx) = (type(value), (value.keys() if (type(value) == dict) else len(value)))
break
if (container is None):
return func(*args, **kwargs)
elif (container in (list, tuple)):
get = (lambda element, i: (element[i] if (type(element) is container) else element))
return container((wrapper(*[get(arg, i) for arg in args], **{key: get(value, i) for (key, value) in kwargs.items()}) for i in range(idx)))
elif (container is dict):
get = (lambda element, key: (element[key] if (type(element) is dict) else element))
return {key: wrapper(*[get(arg, key) for arg in args], **{key_: get(value_, key) for (key_, value_) in kwargs.items()}) for key in idx}
return wrapper
|
class lazy_property():
def __init__(self, func):
self.func = func
def __get__(self, instance, cls):
val = self.func(instance)
setattr(instance, self.func.__name__, val)
return val
|
def softmax(x):
e_x = np.exp((x - np.max(x, axis=(- 1), keepdims=True)))
return (e_x / np.sum(e_x, axis=(- 1), keepdims=True))
|
def mse(x, y):
return torch.nn.MSELoss()(x, y)
|
def cross_entropy_loss(y_pred, y_label):
if (y_pred.size() == y_label.size()):
return torch.mean((- torch.sum((torch.log_softmax(y_pred, dim=(- 1)) * y_label), dim=(- 1))))
else:
return torch.nn.CrossEntropyLoss()(y_pred, y_label.long())
|
def grad(y, x, create_graph=True, keepdim=False):
'\n y: [N, Ny] or [Ny]\n x: [N, Nx] or [Nx]\n Return dy/dx ([N, Ny, Nx] or [Ny, Nx]).\n '
N = (y.size(0) if (len(y.size()) == 2) else 1)
Ny = y.size((- 1))
Nx = x.size((- 1))
z = torch.ones_like(y[(..., 0)])
dy = []
for i in range(Ny):
dy.append(torch.autograd.grad(y[(..., i)], x, grad_outputs=z, create_graph=create_graph)[0])
shape = np.array([N, Ny])[(2 - len(y.size())):]
shape = (list(shape) if keepdim else list(shape[(shape > 1)]))
return torch.cat(dy, dim=(- 1)).view((shape + [Nx]))
|
def dataloader_msrvtt_train(args, tokenizer):
msrvtt_dataset = MSRVTTDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
try:
train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset)
except:
train_sampler = None
dataloader = DataLoader(msrvtt_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(msrvtt_dataset), train_sampler)
|
def dataloader_msrvtt_test(args, tokenizer, subset='test'):
msrvtt_testset = MSRVTTDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
try:
test_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_testset)
except:
test_sampler = None
dataloader_msrvtt = DataLoader(msrvtt_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False)
return (dataloader_msrvtt, len(msrvtt_testset))
|
def dataloader_activity_train(args, tokenizer):
activity_dataset = ActivityNetDataset(subset='train', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
train_sampler = torch.utils.data.distributed.DistributedSampler(activity_dataset)
dataloader = DataLoader(activity_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(activity_dataset), train_sampler)
|
def dataloader_activity_test(args, tokenizer, subset='test'):
activity_testset = ActivityNetDataset(subset=subset, data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
try:
test_sampler = torch.utils.data.distributed.DistributedSampler(activity_testset)
except:
test_sampler = None
dataloader_activity = DataLoader(activity_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False)
return (dataloader_activity, len(activity_testset))
|
def dataloader_didemo_train(args, tokenizer):
didemo_dataset = DiDeMoDataset(subset='train', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
train_sampler = torch.utils.data.distributed.DistributedSampler(didemo_dataset)
dataloader = DataLoader(didemo_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(didemo_dataset), train_sampler)
|
def dataloader_didemo_test(args, tokenizer, subset='test'):
didemo_testset = DiDeMoDataset(subset=subset, data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
try:
test_sampler = torch.utils.data.distributed.DistributedSampler(didemo_testset)
except:
test_sampler = None
dataloader_didemo = DataLoader(didemo_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False)
return (dataloader_didemo, len(didemo_testset))
|
def dataloader_lsmdc_train(args, tokenizer):
lsmdc_dataset = LsmdcDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
train_sampler = torch.utils.data.distributed.DistributedSampler(lsmdc_dataset)
dataloader = DataLoader(lsmdc_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(lsmdc_dataset), train_sampler)
|
def dataloader_lsmdc_test(args, tokenizer, subset='test'):
lsmdc_testset = LsmdcDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
try:
test_sampler = torch.utils.data.distributed.DistributedSampler(lsmdc_testset)
except:
test_sampler = None
dataloader_lsmdc = DataLoader(lsmdc_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False)
return (dataloader_lsmdc, len(lsmdc_testset))
|
def dataloader_msvd_train(args, tokenizer):
msvd_dataset = MsvdDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
train_sampler = torch.utils.data.distributed.DistributedSampler(msvd_dataset)
dataloader = DataLoader(msvd_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(msvd_dataset), train_sampler)
|
def dataloader_msvd_test(args, tokenizer, subset='test'):
msvd_testset = MsvdDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
dataloader_msvd = DataLoader(msvd_testset, batch_size=args.batch_size_val, num_workers=args.workers, shuffle=False, drop_last=False)
return (dataloader_msvd, len(msvd_testset))
|
class LsmdcDataset(RetrievalDataset):
'LSMDC dataset.'
def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None):
super(LsmdcDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config)
pass
def _get_anns(self, subset='train'):
'\n video_dict: dict: video_id -> video_path\n sentences_dict: list: [(video_id, caption)] , caption (list: [text:, start, end])\n '
video_json_path_dict = {}
video_json_path_dict['train'] = os.path.join(self.anno_path, 'LSMDC16_annos_training.csv')
video_json_path_dict['train_test'] = os.path.join(self.anno_path, 'LSMDC16_annos_val.csv')
video_json_path_dict['val'] = os.path.join(self.anno_path, 'LSMDC16_annos_val.csv')
video_json_path_dict['test'] = os.path.join(self.anno_path, 'LSMDC16_challenge_1000_publictect.csv')
video_id_list = []
caption_dict = {}
with open(video_json_path_dict[self.subset], 'r') as fp:
for line in fp:
line = line.strip()
line_split = line.split('\t')
assert (len(line_split) == 6)
(clip_id, start_aligned, end_aligned, start_extracted, end_extracted, sentence) = line_split
if (clip_id not in ['0017_Pianist_00.23.28.872-00.23.34.843', '0017_Pianist_00.30.36.767-00.30.38.009', '3064_SPARKLE_2012_01.41.07.000-01.41.11.793', '3087_WE_BOUGHT_A_ZOO_01.37.34.502-01.37.39.361', '3044_KNOCKED_UP_00.45.19.000-00.45.23.549', '3023_DISTRICT_9_01.12.44.778-01.12.48.729']):
caption_dict[len(caption_dict)] = (clip_id, (sentence, None, None))
if (clip_id not in video_id_list):
video_id_list.append(clip_id)
video_dict = OrderedDict()
sentences_dict = OrderedDict()
for (root, dub_dir, video_files) in os.walk(self.video_path):
for video_file in video_files:
video_id_ = '.'.join(video_file.split('.')[:(- 1)])
if (video_id_ not in video_id_list):
continue
file_path_ = os.path.join(root, video_file)
video_dict[video_id_] = file_path_
for (clip_id, sentence) in caption_dict.values():
if (clip_id not in video_dict):
continue
sentences_dict[len(sentences_dict)] = (clip_id, sentence)
unique_sentence = set([v[1][0] for v in sentences_dict.values()])
print('[{}] Unique sentence is {} , all num is {}'.format(subset, len(unique_sentence), len(sentences_dict)))
return (video_dict, sentences_dict)
|
class MSRVTTDataset(RetrievalDataset):
'MSRVTT dataset.'
def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None):
super(MSRVTTDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config)
pass
def _get_anns(self, subset='train'):
'\n video_dict: dict: video_id -> video_path\n sentences_dict: list: [(video_id, caption)] , caption (list: [text:, start, end])\n '
csv_path = {'train': join(self.anno_path, 'MSRVTT_train.9k.csv'), 'val': join(self.anno_path, 'MSRVTT_JSFUSION_test.csv'), 'test': join(self.anno_path, 'MSRVTT_JSFUSION_test.csv')}[subset]
if exists(csv_path):
csv = pd.read_csv(csv_path)
else:
raise FileNotFoundError
video_id_list = list(csv['video_id'].values)
video_dict = OrderedDict()
sentences_dict = OrderedDict()
if (subset == 'train'):
anno_path = join(self.anno_path, 'MSRVTT_data.json')
data = json.load(open(anno_path, 'r'))
for itm in data['sentences']:
if (itm['video_id'] in video_id_list):
sentences_dict[len(sentences_dict)] = (itm['video_id'], (itm['caption'], None, None))
video_dict[itm['video_id']] = join(self.video_path, '{}.mp4'.format(itm['video_id']))
else:
for (_, itm) in csv.iterrows():
sentences_dict[len(sentences_dict)] = (itm['video_id'], (itm['sentence'], None, None))
video_dict[itm['video_id']] = join(self.video_path, '{}.mp4'.format(itm['video_id']))
unique_sentence = set([v[1][0] for v in sentences_dict.values()])
print('[{}] Unique sentence is {} , all num is {}'.format(subset, len(unique_sentence), len(sentences_dict)))
return (video_dict, sentences_dict)
|
class MsvdDataset(RetrievalDataset):
'MSVD dataset loader.'
def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None):
super(MsvdDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config)
pass
def _get_anns(self, subset='train'):
self.sample_len = 0
self.cut_off_points = []
self.multi_sentence_per_video = True
video_id_path_dict = {}
video_id_path_dict['train'] = os.path.join(self.anno_path, 'train_list.txt')
video_id_path_dict['val'] = os.path.join(self.anno_path, 'val_list.txt')
video_id_path_dict['test'] = os.path.join(self.anno_path, 'test_list.txt')
caption_file = os.path.join(self.anno_path, 'raw-captions.pkl')
with open(video_id_path_dict[subset], 'r') as fp:
video_ids = [itm.strip() for itm in fp.readlines()]
with open(caption_file, 'rb') as f:
captions = pickle.load(f)
video_dict = OrderedDict()
sentences_dict = OrderedDict()
for (root, dub_dir, video_files) in os.walk(self.video_path):
for video_file in video_files:
video_id_ = '.'.join(video_file.split('.')[:(- 1)])
if (video_id_ not in video_ids):
continue
file_path_ = os.path.join(root, video_file)
video_dict[video_id_] = file_path_
for video_id in video_ids:
assert (video_id in captions)
for cap in captions[video_id]:
cap_txt = ' '.join(cap)
sentences_dict[len(sentences_dict)] = (video_id, (cap_txt, None, None))
self.cut_off_points.append((len(sentences_dict) - 1))
if ((subset == 'val') or (subset == 'test')):
self.sentence_num = len(sentences_dict)
self.video_num = len(video_ids)
assert (len(self.cut_off_points) == self.video_num)
print('For {}, sentence number: {}'.format(subset, self.sentence_num))
print('For {}, video number: {}'.format(subset, self.video_num))
print('Video number: {}'.format(len(video_dict)))
print('Total Paire: {}'.format(len(sentences_dict)))
self.sample_len = len(sentences_dict)
return (video_dict, sentences_dict)
|
def _interpolation(kwargs):
interpolation = kwargs.pop('resample', Image.BILINEAR)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
else:
return interpolation
|
def _check_args_tf(kwargs):
if (('fillcolor' in kwargs) and (_PIL_VER < (5, 0))):
kwargs.pop('fillcolor')
kwargs['resample'] = _interpolation(kwargs)
|
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
|
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
|
def translate_x_rel(img, pct, **kwargs):
pixels = (pct * img.size[0])
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
|
def translate_y_rel(img, pct, **kwargs):
pixels = (pct * img.size[1])
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
|
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
|
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
|
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if (_PIL_VER >= (5, 2)):
return img.rotate(degrees, **kwargs)
elif (_PIL_VER >= (5, 0)):
(w, h) = img.size
post_trans = (0, 0)
rotn_center = ((w / 2.0), (h / 2.0))
angle = (- math.radians(degrees))
matrix = [round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0, round((- math.sin(angle)), 15), round(math.cos(angle), 15), 0.0]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return ((((a * x) + (b * y)) + c), (((d * x) + (e * y)) + f))
(matrix[2], matrix[5]) = transform(((- rotn_center[0]) - post_trans[0]), ((- rotn_center[1]) - post_trans[1]), matrix)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs['resample'])
|
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
|
def invert(img, **__):
return ImageOps.invert(img)
|
def equalize(img, **__):
return ImageOps.equalize(img)
|
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
|
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if (i < thresh):
lut.append(min(255, (i + add)))
else:
lut.append(i)
if (img.mode in ('L', 'RGB')):
if ((img.mode == 'RGB') and (len(lut) == 256)):
lut = ((lut + lut) + lut)
return img.point(lut)
else:
return img
|
def posterize(img, bits_to_keep, **__):
if (bits_to_keep >= 8):
return img
return ImageOps.posterize(img, bits_to_keep)
|
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
|
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
|
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
|
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
|
def _randomly_negate(v):
'With 50% prob, negate the value'
return ((- v) if (random.random() > 0.5) else v)
|
def _rotate_level_to_arg(level, _hparams):
level = ((level / _MAX_LEVEL) * 30.0)
level = _randomly_negate(level)
return (level,)
|
def _enhance_level_to_arg(level, _hparams):
return ((((level / _MAX_LEVEL) * 1.8) + 0.1),)
|
def _enhance_increasing_level_to_arg(level, _hparams):
level = ((level / _MAX_LEVEL) * 0.9)
level = (1.0 + _randomly_negate(level))
return (level,)
|
def _shear_level_to_arg(level, _hparams):
level = ((level / _MAX_LEVEL) * 0.3)
level = _randomly_negate(level)
return (level,)
|
def _translate_abs_level_to_arg(level, hparams):
translate_const = hparams['translate_const']
level = ((level / _MAX_LEVEL) * float(translate_const))
level = _randomly_negate(level)
return (level,)
|
def _translate_rel_level_to_arg(level, hparams):
translate_pct = hparams.get('translate_pct', 0.45)
level = ((level / _MAX_LEVEL) * translate_pct)
level = _randomly_negate(level)
return (level,)
|
def _posterize_level_to_arg(level, _hparams):
return (int(((level / _MAX_LEVEL) * 4)),)
|
def _posterize_increasing_level_to_arg(level, hparams):
return ((4 - _posterize_level_to_arg(level, hparams)[0]),)
|
def _posterize_original_level_to_arg(level, _hparams):
return ((int(((level / _MAX_LEVEL) * 4)) + 4),)
|
def _solarize_level_to_arg(level, _hparams):
return (int(((level / _MAX_LEVEL) * 256)),)
|
def _solarize_increasing_level_to_arg(level, _hparams):
return ((256 - _solarize_level_to_arg(level, _hparams)[0]),)
|
def _solarize_add_level_to_arg(level, _hparams):
return (int(((level / _MAX_LEVEL) * 110)),)
|
class AugmentOp():
'\n Apply for video.\n '
def __init__(self, name, prob=0.5, magnitude=10, hparams=None):
hparams = (hparams or _HPARAMS_DEFAULT)
self.aug_fn = NAME_TO_OP[name]
self.level_fn = LEVEL_TO_ARG[name]
self.prob = prob
self.magnitude = magnitude
self.hparams = hparams.copy()
self.kwargs = {'fillcolor': (hparams['img_mean'] if ('img_mean' in hparams) else _FILL), 'resample': (hparams['interpolation'] if ('interpolation' in hparams) else _RANDOM_INTERPOLATION)}
self.magnitude_std = self.hparams.get('magnitude_std', 0)
def __call__(self, img_list):
if ((self.prob < 1.0) and (random.random() > self.prob)):
return img_list
magnitude = self.magnitude
if (self.magnitude_std and (self.magnitude_std > 0)):
magnitude = random.gauss(magnitude, self.magnitude_std)
magnitude = min(_MAX_LEVEL, max(0, magnitude))
level_args = (self.level_fn(magnitude, self.hparams) if (self.level_fn is not None) else ())
if isinstance(img_list, list):
return [self.aug_fn(img, *level_args, **self.kwargs) for img in img_list]
else:
return self.aug_fn(img_list, *level_args, **self.kwargs)
|
def _select_rand_weights(weight_idx=0, transforms=None):
transforms = (transforms or _RAND_TRANSFORMS)
assert (weight_idx == 0)
rand_weights = _RAND_CHOICE_WEIGHTS_0
probs = [rand_weights[k] for k in transforms]
probs /= np.sum(probs)
return probs
|
def rand_augment_ops(magnitude=10, hparams=None, transforms=None):
hparams = (hparams or _HPARAMS_DEFAULT)
transforms = (transforms or _RAND_TRANSFORMS)
return [AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms]
|
class RandAugment():
def __init__(self, ops, num_layers=2, choice_weights=None):
self.ops = ops
self.num_layers = num_layers
self.choice_weights = choice_weights
def __call__(self, img):
ops = np.random.choice(self.ops, self.num_layers, replace=(self.choice_weights is None), p=self.choice_weights)
for op in ops:
img = op(img)
return img
|
def rand_augment_transform(config_str, hparams):
"\n RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719\n\n Create a RandAugment transform\n :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by\n dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining\n sections, not order sepecific determine\n 'm' - integer magnitude of rand augment\n 'n' - integer num layers (number of transform ops selected per image)\n 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)\n 'mstd' - float std deviation of magnitude noise applied\n 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)\n Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5\n 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2\n :param hparams: Other hparams (kwargs) for the RandAugmentation scheme\n :return: A PyTorch compatible Transform\n "
magnitude = _MAX_LEVEL
num_layers = 2
weight_idx = None
transforms = _RAND_TRANSFORMS
config = config_str.split('-')
assert (config[0] == 'rand')
config = config[1:]
for c in config:
cs = re.split('(\\d.*)', c)
if (len(cs) < 2):
continue
(key, val) = cs[:2]
if (key == 'mstd'):
hparams.setdefault('magnitude_std', float(val))
elif (key == 'inc'):
if bool(val):
transforms = _RAND_INCREASING_TRANSFORMS
elif (key == 'm'):
magnitude = int(val)
elif (key == 'n'):
num_layers = int(val)
elif (key == 'w'):
weight_idx = int(val)
else:
assert NotImplementedError
ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams, transforms=transforms)
choice_weights = (None if (weight_idx is None) else _select_rand_weights(weight_idx))
return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
|
class RawVideoExtractorCV2():
def __init__(self, centercrop=False, size=224, framerate=(- 1), subset='test'):
self.centercrop = centercrop
self.size = size
self.framerate = framerate
self.transform = self._transform(self.size)
self.subset = subset
self.tsfm_dict = {'clip_test': Compose([Resize(size, interpolation=InterpolationMode.BICUBIC), CenterCrop(size), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))]), 'clip_train': Compose([RandomResizedCrop(size, scale=(0.5, 1.0)), RandomHorizontalFlip(), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))])}
self.aug_transform = video_transforms.create_random_augment(input_size=(size, size), auto_augment='rand-m7-n4-mstd0.5-inc1', interpolation='bicubic')
def _transform(self, n_px):
return Compose([Resize(n_px, interpolation=InterpolationMode.BICUBIC), CenterCrop(n_px), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))])
def video_to_tensor(self, video_file, preprocess, sample_fp=0, start_time=None, end_time=None, _no_process=False):
if ((start_time is not None) or (end_time is not None)):
assert (isinstance(start_time, int) and isinstance(end_time, int) and (start_time > (- 1)) and (end_time > start_time))
assert (sample_fp > (- 1))
cap = cv2.VideoCapture(video_file)
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
if (fps == 0):
print(((video_file + '\n') * 10))
total_duration = (((frameCount + fps) - 1) // fps)
(start_sec, end_sec) = (0, total_duration)
if (start_time is not None):
(start_sec, end_sec) = (start_time, (end_time if (end_time <= total_duration) else total_duration))
cap.set(cv2.CAP_PROP_POS_FRAMES, int((start_time * fps)))
interval = 1
if (sample_fp > 0):
interval = (fps // sample_fp)
else:
sample_fp = fps
if (interval == 0):
interval = 1
inds = [ind for ind in np.arange(0, fps, interval)]
assert (len(inds) >= sample_fp)
inds = inds[:sample_fp]
ret = True
(images, included) = ([], [])
for sec in np.arange(start_sec, (end_sec + 1)):
if (not ret):
break
sec_base = int((sec * fps))
for ind in inds:
cap.set(cv2.CAP_PROP_POS_FRAMES, (sec_base + ind))
(ret, frame) = cap.read()
if (not ret):
break
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if _no_process:
images.append(Image.fromarray(frame_rgb).convert('RGB'))
else:
images.append(Image.fromarray(frame_rgb))
cap.release()
if (len(images) > 0):
if _no_process:
video_data = images
else:
if (self.subset == 'train'):
images = self.aug_transform(images)
video_data = th.stack([preprocess(img) for img in images])
else:
video_data = th.zeros(1)
return {'video': video_data}
def get_video_data(self, video_path, start_time=None, end_time=None, _no_process=False):
image_input = self.video_to_tensor(video_path, self.transform, sample_fp=self.framerate, start_time=start_time, end_time=end_time, _no_process=_no_process)
return image_input
def process_raw_data(self, raw_video_data):
tensor_size = raw_video_data.size()
tensor = raw_video_data.view((- 1), 1, tensor_size[(- 3)], tensor_size[(- 2)], tensor_size[(- 1)])
return tensor
def process_frame_order(self, raw_video_data, frame_order=0):
if (frame_order == 0):
pass
elif (frame_order == 1):
reverse_order = np.arange((raw_video_data.size(0) - 1), (- 1), (- 1))
raw_video_data = raw_video_data[(reverse_order, ...)]
elif (frame_order == 2):
random_order = np.arange(raw_video_data.size(0))
np.random.shuffle(random_order)
raw_video_data = raw_video_data[(random_order, ...)]
return raw_video_data
|
def url_to_filename(url: str, etag: str=None) -> str:
"\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.\n "
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += ('.' + etag_hash.hexdigest())
return filename
|
def filename_to_url(filename: str, cache_dir: Union[(str, Path)]=None) -> Tuple[(str, str)]:
'\n Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.\n '
if (cache_dir is None):
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if (not os.path.exists(cache_path)):
raise FileNotFoundError('file {} not found'.format(cache_path))
meta_path = (cache_path + '.json')
if (not os.path.exists(meta_path)):
raise FileNotFoundError('file {} not found'.format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return (url, etag)
|
def cached_path(url_or_filename: Union[(str, Path)], cache_dir: Union[(str, Path)]=None) -> str:
"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n "
if (cache_dir is None):
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if (parsed.scheme in ('http', 'https', 's3')):
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
return url_or_filename
elif (parsed.scheme == ''):
raise FileNotFoundError('file {} not found'.format(url_or_filename))
else:
raise ValueError('unable to parse {} as a URL or as a local path'.format(url_or_filename))
|
def split_s3_path(url: str) -> Tuple[(str, str)]:
'Split a full s3 path into the bucket name and path.'
parsed = urlparse(url)
if ((not parsed.netloc) or (not parsed.path)):
raise ValueError('bad s3 path {}'.format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
if s3_path.startswith('/'):
s3_path = s3_path[1:]
return (bucket_name, s3_path)
|
def s3_request(func: Callable):
'\n Wrapper function for s3 requests in order to create more helpful error\n messages.\n '
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if (int(exc.response['Error']['Code']) == 404):
raise FileNotFoundError('file {} not found'.format(url))
else:
raise
return wrapper
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.