code stringlengths 17 6.64M |
|---|
def image2C3(image):
if (image.ndim == 3):
return image
if (image.ndim == 2):
return np.repeat(image[(..., np.newaxis)], 3, axis=2)
raise ValueError('image.ndim = {}, invalid image.'.format(image.ndim))
|
def resize_height(image, height):
if (image.shape[0] == height):
return image
(h, w) = image.shape[:2]
width = ((height * w) // h)
image = cv2.resize(image, (width, height))
return image
|
def resize_width(image, width):
if (image.shape[1] == width):
return image
(h, w) = image.shape[:2]
height = ((width * h) // w)
image = cv2.resize(image, (width, height))
return image
|
def imtext(image, text, space=(3, 3), color=(0, 0, 0), thickness=1, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1.0):
assert isinstance(text, str), type(text)
size = cv2.getTextSize(text, fontFace, fontScale, thickness)
image = cv2.putText(image, text, (space[0], (size[1] + space[1])), fontFace, fontScale, color, thickness)
return image
|
def setGPU(gpus):
len_gpus = len(gpus.split(','))
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
gpus = ','.join(map(str, range(len_gpus)))
return gpus
|
def getTime():
return datetime.now().strftime('%m-%d %H:%M:%S')
|
class Timer(object):
curr_record = None
prev_record = None
@classmethod
def record(cls):
cls.prev_record = cls.curr_record
cls.curr_record = time.time()
@classmethod
def interval(cls):
if (cls.prev_record is None):
return 0
return (cls.curr_record - cls.prev_record)
|
def wrapColor(string, color):
try:
header = {'red': '\x1b[91m', 'green': '\x1b[92m', 'yellow': '\x1b[93m', 'blue': '\x1b[94m', 'purple': '\x1b[95m', 'cyan': '\x1b[96m', 'darkcyan': '\x1b[36m', 'bold': '\x1b[1m', 'underline': '\x1b[4m'}[color.lower()]
except KeyError:
raise ValueError('Unknown color: {}'.format(color))
return ((header + string) + '\x1b[0m')
|
def info(logger, msg, color=None):
msg = ('[{}]'.format(getTime()) + msg)
if (logger is not None):
logger.info(msg)
if (color is not None):
msg = wrapColor(msg, color)
print(msg)
|
def summaryArgs(logger, args, color=None):
if isinstance(args, ModuleType):
args = vars(args)
keys = [key for key in args.keys() if (key[:2] != '__')]
keys.sort()
length = max([len(x) for x in keys])
msg = [(('{:<' + str(length)) + '}: {}').format(k, args[k]) for k in keys]
msg = ('\n' + '\n'.join(msg))
info(logger, msg, color)
|
def loadParams(filename):
data = mx.nd.load(filename)
(arg_params, aux_params) = ({}, {})
for (name, value) in data.items():
if (name[:3] == 'arg'):
arg_params[name[4:]] = value
elif (name[:3] == 'aux'):
aux_params[name[4:]] = value
if (len(arg_params) == 0):
arg_params = None
if (len(aux_params) == 0):
aux_params = None
return (arg_params, aux_params)
|
class SaveParams(object):
def __init__(self, model, snapshot, model_name, num_save=5):
self.model = model
self.snapshot = snapshot
self.model_name = model_name
self.num_save = num_save
self.save_params = []
def save(self, n_epoch):
self.save_params += [os.path.join(self.snapshot, '{}-{:04d}.params'.format(self.model_name, n_epoch)), os.path.join(self.snapshot, '{}-{:04d}.states'.format(self.model_name, n_epoch))]
self.model.save_params(self.save_params[(- 2)])
self.model.save_optimizer_states(self.save_params[(- 1)])
if (len(self.save_params) > (2 * self.num_save)):
call(['rm', self.save_params[0], self.save_params[1]])
self.save_params = self.save_params[2:]
return self.save_params[(- 2):]
def __call__(self, n_epoch):
return self.save(n_epoch)
|
def getLogger(snapshot, model_name):
if (not os.path.exists(snapshot)):
os.makedirs(snapshot)
logging.basicConfig(filename=os.path.join(snapshot, (model_name + '.log')), level=logging.INFO)
logger = logging.getLogger()
return logger
|
class LrScheduler(object):
def __init__(self, method, init_lr, kwargs):
self.method = method
self.init_lr = init_lr
if (method == 'step'):
self.step_list = kwargs['step_list']
self.factor = kwargs['factor']
self.get = self._step
elif (method == 'poly'):
self.num_epoch = kwargs['num_epoch']
self.power = kwargs['power']
self.get = self._poly
elif (method == 'ramp'):
self.ramp_up = kwargs['ramp_up']
self.ramp_down = kwargs['ramp_down']
self.num_epoch = kwargs['num_epoch']
self.scale = kwargs['scale']
self.get = self._ramp
else:
raise ValueError(method)
def _step(self, current_epoch):
lr = self.init_lr
step_list = [x for x in self.step_list]
while ((len(step_list) > 0) and (current_epoch >= step_list[0])):
lr *= self.factor
del step_list[0]
return lr
def _poly(self, current_epoch):
lr = (self.init_lr * ((1.0 - (float(current_epoch) / self.num_epoch)) ** self.power))
return lr
def _ramp(self, current_epoch):
if (current_epoch < self.ramp_up):
decay = np.exp(((- ((1 - (float(current_epoch) / self.ramp_up)) ** 2)) * self.scale))
elif (current_epoch > (self.num_epoch - self.ramp_down)):
decay = np.exp(((- ((float(((current_epoch + self.ramp_down) - self.num_epoch)) / self.ramp_down) ** 2)) * self.scale))
else:
decay = 1.0
lr = (self.init_lr * decay)
return lr
|
class GradBuffer(object):
def __init__(self, model):
self.model = model
self.cache = None
def write(self):
if (self.cache is None):
self.cache = [[(None if (g is None) else g.copyto(g.context)) for g in g_list] for g_list in self.model._exec_group.grad_arrays]
else:
for (gs_src, gs_dst) in zip(self.model._exec_group.grad_arrays, self.cache):
for (g_src, g_dst) in zip(gs_src, gs_dst):
if (g_src is None):
continue
g_src.copyto(g_dst)
def read_add(self):
assert (self.cache is not None)
for (gs_src, gs_dst) in zip(self.model._exec_group.grad_arrays, self.cache):
for (g_src, g_dst) in zip(gs_src, gs_dst):
if (g_src is None):
continue
g_src += g_dst
|
def initNormal(mean, std, name, shape):
if name.endswith('_weight'):
return mx.nd.normal(mean, std, shape)
if name.endswith('_bias'):
return mx.nd.zeros(shape)
if name.endswith('_gamma'):
return mx.nd.ones(shape)
if name.endswith('_beta'):
return mx.nd.zeros(shape)
if name.endswith('_moving_mean'):
return mx.nd.zeros(shape)
if name.endswith('_moving_var'):
return mx.nd.ones(shape)
raise ValueError('Unknown name type for `{}`'.format(name))
|
def checkParams(mod, arg_params, aux_params, auto_fix=True, initializer=mx.init.Normal(0.01), logger=None):
arg_params = ({} if (arg_params is None) else arg_params)
aux_params = ({} if (aux_params is None) else aux_params)
arg_shapes = {name: array[0].shape for (name, array) in zip(mod._exec_group.param_names, mod._exec_group.param_arrays)}
aux_shapes = {name: array[0].shape for (name, array) in zip(mod._exec_group.aux_names, mod._exec_group.aux_arrays)}
(extra_arg_params, extra_aux_params) = ([], [])
for name in arg_params.keys():
if (name not in arg_shapes):
extra_arg_params.append(name)
for name in aux_params.keys():
if (name not in aux_shapes):
extra_aux_params.append(name)
(miss_arg_params, miss_aux_params) = ([], [])
for name in arg_shapes.keys():
if (name not in arg_params):
miss_arg_params.append(name)
for name in aux_shapes.keys():
if (name not in aux_params):
miss_aux_params.append(name)
(mismatch_arg_params, mismatch_aux_params) = ([], [])
for name in arg_params.keys():
if ((name in arg_shapes) and (arg_shapes[name] != arg_params[name].shape)):
mismatch_arg_params.append(name)
for name in aux_params.keys():
if ((name in aux_shapes) and (aux_shapes[name] != aux_params[name].shape)):
mismatch_aux_params.append(name)
for name in extra_arg_params:
info(logger, 'Find extra arg_params: {}: given {}'.format(name, arg_params[name].shape), 'red')
for name in extra_aux_params:
info(logger, 'Find extra aux_params: {}: given {}'.format(name, aux_params[name].shape), 'red')
for name in miss_arg_params:
info(logger, 'Find missing arg_params: {}: target {}'.format(name, arg_shapes[name]), 'red')
for name in miss_aux_params:
info(logger, 'Find missing aux_params: {}: target {}'.format(name, aux_shapes[name]), 'red')
for name in mismatch_arg_params:
info(logger, 'Find mismatch arg_params: {}: given {}, target {}'.format(name, arg_params[name].shape, arg_shapes[name]), 'red')
for name in mismatch_aux_params:
info(logger, 'Find mismatch aux_params: {}: given {}, target {}'.format(name, aux_params[name].shape, aux_shapes[name]), 'red')
if (len((((((extra_arg_params + extra_aux_params) + miss_arg_params) + miss_aux_params) + mismatch_arg_params) + mismatch_aux_params)) == 0):
return (arg_params, aux_params)
if (not auto_fix):
info(logger, 'Bad params not fixed.', 'red')
return (arg_params, aux_params)
for name in (extra_arg_params + mismatch_arg_params):
del arg_params[name]
for name in (extra_aux_params + mismatch_aux_params):
del aux_params[name]
attrs = mod._symbol.attr_dict()
for name in (miss_arg_params + mismatch_arg_params):
arg_params[name] = mx.nd.zeros(arg_shapes[name])
try:
initializer(mx.init.InitDesc(name, attrs.get(name, None)), arg_params[name])
except ValueError:
initializer(name, arg_params[name])
for name in (miss_aux_params + mismatch_aux_params):
aux_params[name] = mx.nd.zeros(aux_shapes[name])
try:
initializer(mx.init.InitDesc(name, attrs.get(name, None)), aux_params[name])
except ValueError:
initializer(name, aux_params[name])
info(logger, 'Bad params auto fixed successfully.', 'red')
return (arg_params, aux_params)
|
def compute_embeddings(dataset: str, architecture: str, seed: int, step: int, layer: int) -> np.ndarray:
'\n Compute the representations of a layer specified by the arguments and save to a npy file\n\n :param dataset: Dataset to compute embeddings for\n :param architecture: Model weights to load\n :param seed: Random seed used in the model pretraining\n :param step: Checkpoint during pretraining to use\n :param layer: Layer of the model to load\n :return: embedding (i.e. representation) just computed\n '
assert (dataset in ['ptb_dev', 'mnli_matched', 'mnli_matched_100', 'mnli_mismatched', 'hans_evaluation', 'hans_evaluation_100'])
if (dataset == 'ptb_dev'):
datapath = PTB_PATH
elif (dataset == 'mnli_matched'):
datapath = MNLI_MATCHED_PATH
elif (dataset == 'mnli_matched_100'):
datapath = MNLI_MATCHED_100_PATH
elif (dataset == 'mnli_mismatched'):
datapath = MNLI_MISMATCHED_PATH
elif (dataset == 'hans_evaluation'):
datapath = HANS_PATH
elif (dataset == 'hans_evaluation_100'):
datapath = HANS_100_PATH
else:
datapath = None
if (architecture == 'feather'):
bertnumber = str(seed)
if (seed < 10):
bertnumber = ('0' + bertnumber)
model_path = '{head}/feather/bert_{number}'.format(head=BERT_CHECKPOINT_PATH, number=bertnumber)
output_path = get_embedding_folder(dataset, architecture, seed, step, layer)
json_output = (output_path / pathlib.Path('rep.json'))
npy_output = (output_path / pathlib.Path('rep.npy'))
command_outline = 'python extract_features.py --input_file={data} --output_file={output} --vocab_file={bertbase}/vocab.txt --bert_config_file={bertbase}/bert_config.json --init_checkpoint={model}/model.ckpt-36815 --layers={layer} --max_seq_length=128 --batch_size=8'
command = command_outline.format(data=datapath, output=str(json_output), bertbase=BERT_BASE_DIR, model=model_path, layer=layer)
else:
model_path = '{head}/{architecture}/pretrain_seed{seed}step{step}'.format(head=EMBEDDING_PATH, architecture=architecture, seed=seed, step=step)
output_path = get_embedding_folder(dataset, architecture, seed, step, layer)
json_output = (output_path / pathlib.Path('rep.json'))
npy_output = (output_path / pathlib.Path('rep.npy'))
command_outline = 'python extract_features.py --input_file={data} --output_file={output} --vocab_file={model}/vocab.txt --bert_config_file={model}/bert_config.json --init_checkpoint={model}/bert_model.ckpt --layers={layer} --max_seq_length=128 --batch_size=8'
command = command_outline.format(data=datapath, output=str(json_output), model=model_path, layer=layer)
os.system('echo {}'.format(command))
os.system('cd {}'.format(BERT_PATH))
os.system(command)
representation = []
with open(json_output) as f:
for line in f:
data = json.loads(line)
for token in data['features']:
representation.append(token['layers'][0]['values'])
representation = np.array(representation).T
print('Saving representations at {}'.format(npy_output))
np.save(npy_output, representation)
os.system('rm {}'.format(str(json_output)))
return representation
|
def get_filepath(dataset, architecture, seed, step, layer, folder=False):
'\n Get filepath for embedding of interest (in order to check whether it has already\n been computed\n '
if folder:
return os.path.join(EMBEDDING_PATH, dataset, architecture, str(seed), str(step), str(layer))
else:
return os.path.join(EMBEDDING_PATH, dataset, architecture, str(seed), str(step), str(layer), 'rep.npy')
|
def get_string_filepath(dataset, architecture, seed, step, layer):
return '{head}/{dataset}/{architecture}/{seed}/{step}/{layer}'.format(head=EMBEDDING_PATH, dataset=dataset, architecture=architecture, seed=seed, step=step, layer=layer)
|
def get_embedding_folderpath(dataset: str, architecture: str, seed: int, step: int) -> pathlib.Path:
'\n Return path of folder containing embedding arrays corresponding to:\n - layers of model specified by architecture, seed and step\n - inputs from dataset\n\n Args:\n dataset (str): name of the dataset on which to compute embedding, eg "tiny_imagenet"\n architecture (str): name of model architecture, eg "resnet18"\n seed (int): seed used to train model\n step (int): number of training steps to train model\n\n Returns:\n pathlib.Path: path to embedding folder\n '
path_suffix = f'embeddings/{dataset}/{architecture}/{seed}/{step}/'
return (SCRATCH_PATH / pathlib.Path(path_suffix))
|
def get_checkpoint_filepath(architecture: str, seed: int, step: int) -> pathlib.Path:
'\n Return path to model checkpoint specified by architecture, seed and step\n\n Args:\n architecture (str): name of model architecture, eg "resnet18"\n seed (int): seed used to train model\n step (int): number of training steps to train model\n\n Returns:\n pathlib.Path: path to model checkpoint\n '
path_suffix = f'checkpoints/{architecture}/seed_{seed}_step_{step}.pt'
return (DATA_PATH / pathlib.Path(path_suffix))
|
def initialise_model(architecture: str) -> nn.Module:
'\n Return initialised network of a given architecture\n Currently: only works for resnet18, resnet34, resnet50, resnet101, resnet152\n\n Args:\n architecture (str): name of model architecture, eg "resnet18"\n\n Returns:\n nn.Module: initialised network\n '
assert (architecture in ARCHITECTURES)
assert (architecture != 'inceptionv1')
if (architecture == 'resnet18'):
blocked_model = models.resnet18(pretrained=True)
if (architecture == 'resnet34'):
blocked_model = models.resnet34(pretrained=True)
if (architecture == 'resnet50'):
blocked_model = models.resnet50(pretrained=True)
if (architecture == 'resnet101'):
blocked_model = models.resnet101(pretrained=True)
if (architecture == 'resnet152'):
blocked_model = models.resnet152(pretrained=True)
return blocked_model
|
def initialise_dataset(dataset: str, sample_size: int, sample_seed: int, normalize=True):
'\n Return Dataset object corresponding to a dataset name\n\n Args:\n dataset (str): name of dataset\n sample_size (int): number of inputs to subsample\n sample_seed (int): seed to use when subsampling inputs\n\n Returns:\n torch.utils.data.Dataset: Dataset object corresponding to the name\n '
dataset_folderpath = (DATA_PATH / pathlib.Path('datasets/'))
if (dataset == 'tiny_imagenet'):
ds = datasets.ImageFolder(root=(dataset_folderpath / pathlib.Path('tiny-imagenet-200/val/')), transform=transforms.ToTensor())
if (dataset == 'imagenet'):
if normalize:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])
else:
transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()])
ds = datasets.ImageFolder(root=(dataset_folderpath / pathlib.Path('imagenet/val/')), transform=transform)
if (sample_seed == None):
torch.manual_seed(0)
else:
torch.manual_seed(sample_seed)
random_indices = torch.randperm(len(ds))[:sample_size]
ds = torch.utils.data.Subset(ds, indices=random_indices)
return (ds, random_indices)
|
def get_embedding_folder(dataset, architecture, seed, step, layer):
suffix = pathlib.Path(f'embeddings/{dataset}/{architecture}/{seed}/{step}/{layer}')
return (resources_path / suffix)
|
def load_embedding(dataset: str, architecture: str, seed: int, step: int, layer: int) -> np.ndarray:
folder_path = get_embedding_folder(dataset, architecture, seed, step, layer)
if (not os.path.exists(folder_path)):
print('Computing representations for model')
os.makedirs(folder_path)
rep = compute_embeddings(dataset, architecture, seed, step, layer)
else:
print('Representation already exists...loading...')
rep = np.load((folder_path / pathlib.Path('rep.npy')))
return rep
|
def score_pair_to_csv(rep1_dict: dict, rep2_dict: dict, filename: str, metrics: list) -> None:
'\n Compute metric distance between two representations and save it to a csv file\n\n Args:\n rep1_dict (dict): dictionary specifying configuration of representation 1, to load its representation from disk\n rep2_dict (dict): dictionary specifying configuration of representation 2, to load its representation from disk\n filename (str): output filename to save results to\n metrics (list, optional): list of metrics to apply, eg CCA and/or CKA and/or GLD (by default all)\n '
rep1 = load_embedding(rep1_dict['dataset'], rep1_dict['architecture'], rep1_dict['seed'], rep1_dict['step'], rep1_dict['layer'])
rep2 = load_embedding(rep2_dict['dataset'], rep2_dict['architecture'], rep2_dict['seed'], rep2_dict['step'], rep2_dict['layer'])
logging.info(f'representation 1 shape: {rep1.shape}')
logging.info(f'representation 2 shape: {rep2.shape}')
results = {'dataset1': rep1_dict['dataset'], 'architecture1': rep1_dict['architecture'], 'seed1': rep1_dict['seed'], 'step1': rep1_dict['step'], 'layer1': rep1_dict['layer'], 'dataset2': rep2_dict['dataset'], 'architecture2': rep2_dict['architecture'], 'seed2': rep2_dict['seed'], 'step2': rep2_dict['step'], 'layer2': rep2_dict['layer']}
score_local_pair(rep1=rep1, rep2=rep2, metrics=metrics, filename=filename, metadata=results)
|
def score_local_pair(rep1: np.ndarray, rep2: np.ndarray, filename: str, metrics: list, metadata: dict={}) -> None:
'\n Compute metric distances between two representations (in numpy array format) and\n save results to a csv file\n\n Args:\n rep1 (np.ndarray): representation 1 to compare\n rep2 (np.ndarray): representation 2 to compare\n filename (str): file name for output csv\n metrics (list, optional): list of metrics to apply (by default all)\n metadata (dict, optional): metadata for the representations to print to the csv (by default empty)\n '
rep1 = (rep1 - rep1.mean(axis=1, keepdims=True))
rep2 = (rep2 - rep2.mean(axis=1, keepdims=True))
rep1 = (rep1 / np.linalg.norm(rep1))
rep2 = (rep2 / np.linalg.norm(rep2))
results = metadata
if (('PWCCA' in metrics) or ('mean_sq_cca_corr' in metrics) or ('mean_cca_corr' in metrics)):
logging.info('Computing CCA decomposition...')
(cca_u, cca_rho, cca_vh, transformed_rep1, transformed_rep2) = cca_decomp(rep1, rep2)
if ('PWCCA' in metrics):
logging.info('Computing PWCCA distance...')
results['PWCCA'] = pwcca_dist(rep1, cca_rho, transformed_rep1)
if ('mean_sq_cca_corr' in metrics):
logging.info('Computing mean square CCA corelation...')
results['mean_sq_cca_corr'] = mean_sq_cca_corr(cca_rho)
if ('mean_cca_corr' in metrics):
logging.info('Computing mean CCA corelation...')
results['mean_cca_corr'] = mean_cca_corr(cca_rho)
if ('CKA' in metrics):
logging.info('Computing Linear CKA dist...')
lin_cka_sim = lin_cka_dist(rep1, rep2)
results['CKA'] = lin_cka_sim
if ("CKA'" in metrics):
logging.info("Computing Linear CKA' dist...")
lin_cka_sim = lin_cka_prime_dist(rep1, rep2)
results["CKA'"] = lin_cka_sim
if ('Procrustes' in metrics):
logging.info('Computing GLD dist...')
results['Procrustes'] = procrustes(rep1, rep2)
with open(filename, mode='a') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=results.keys())
if (csv_file.tell() == 0):
writer.writeheader()
writer.writerow(results)
|
def cca_decomp(A, B):
'Computes CCA vectors, correlations, and transformed matrices\n requires a < n and b < n\n Args:\n A: np.array of size a x n where a is the number of neurons and n is the dataset size\n B: np.array of size b x n where b is the number of neurons and n is the dataset size\n Returns:\n u: left singular vectors for the inner SVD problem\n s: canonical correlation coefficients\n vh: right singular vectors for the inner SVD problem\n transformed_a: canonical vectors for matrix A, a x n array\n transformed_b: canonical vectors for matrix B, b x n array\n '
assert (A.shape[0] < A.shape[1])
assert (B.shape[0] < B.shape[1])
(evals_a, evecs_a) = np.linalg.eigh((A @ A.T))
evals_a = ((evals_a + np.abs(evals_a)) / 2)
inv_a = np.array([((1 / np.sqrt(x)) if (x > 0) else 0) for x in evals_a])
(evals_b, evecs_b) = np.linalg.eigh((B @ B.T))
evals_b = ((evals_b + np.abs(evals_b)) / 2)
inv_b = np.array([((1 / np.sqrt(x)) if (x > 0) else 0) for x in evals_b])
cov_ab = (A @ B.T)
temp = ((((evecs_a @ np.diag(inv_a)) @ evecs_a.T) @ cov_ab) @ ((evecs_b @ np.diag(inv_b)) @ evecs_b.T))
try:
(u, s, vh) = np.linalg.svd(temp)
except:
(u, s, vh) = np.linalg.svd((temp * 100))
s = (s / 100)
transformed_a = ((u.T @ ((evecs_a @ np.diag(inv_a)) @ evecs_a.T)) @ A).T
transformed_b = ((vh @ ((evecs_b @ np.diag(inv_b)) @ evecs_b.T)) @ B).T
return (u, s, vh, transformed_a, transformed_b)
|
def mean_sq_cca_corr(rho):
'Compute mean squared CCA correlation\n :param rho: canonical correlation coefficients returned by cca_decomp(A,B)\n '
return (np.sum((rho * rho)) / len(rho))
|
def mean_cca_corr(rho):
'Compute mean CCA correlation\n :param rho: canonical correlation coefficients returned by cca_decomp(A,B)\n '
return (np.sum(rho) / len(rho))
|
def pwcca_dist(A, rho, transformed_a):
'Computes projection weighted CCA distance between A and B given the correlation\n coefficients rho and the transformed matrices after running CCA\n :param A: np.array of size a x n where a is the number of neurons and n is the dataset size\n :param B: np.array of size b x n where b is the number of neurons and n is the dataset size\n :param rho: canonical correlation coefficients returned by cca_decomp(A,B)\n :param transformed_a: canonical vectors for A returned by cca_decomp(A,B)\n :param transformed_b: canonical vectors for B returned by cca_decomp(A,B)\n :return: PWCCA distance\n '
in_prod = (transformed_a.T @ A.T)
weights = np.sum(np.abs(in_prod), axis=1)
weights = (weights / np.sum(weights))
dim = min(len(weights), len(rho))
return (1 - np.dot(weights[:dim], rho[:dim]))
|
def lin_cka_dist(A, B):
'\n Computes Linear CKA distance bewteen representations A and B\n '
similarity = (np.linalg.norm((B @ A.T), ord='fro') ** 2)
normalization = (np.linalg.norm((A @ A.T), ord='fro') * np.linalg.norm((B @ B.T), ord='fro'))
return (1 - (similarity / normalization))
|
def lin_cka_prime_dist(A, B):
'\n Computes Linear CKA prime distance bewteen representations A and B\n The version here is suited to a, b >> n\n '
if (A.shape[0] > A.shape[1]):
At_A = (A.T @ A)
Bt_B = (B.T @ B)
numerator = np.sum(((At_A - Bt_B) ** 2))
denominator = ((np.sum((A ** 2)) ** 2) + (np.sum((B ** 2)) ** 2))
return (numerator / denominator)
else:
similarity = (np.linalg.norm((B @ A.T), ord='fro') ** 2)
denominator = ((np.sum((A ** 2)) ** 2) + (np.sum((B ** 2)) ** 2))
return (1 - ((2 * similarity) / denominator))
|
def procrustes(A, B):
'\n Computes Procrustes distance bewteen representations A and B\n '
A_sq_frob = np.sum((A ** 2))
B_sq_frob = np.sum((B ** 2))
nuc = np.linalg.norm((A @ B.T), ord='nuc')
return ((A_sq_frob + B_sq_frob) - (2 * nuc))
|
def get_acc_diff(row, scores_df, task_list):
score_row1 = scores_df.iloc[row['seed1']]
score_row2 = scores_df.iloc[row['seed2']]
for task in task_list:
acc1 = score_row1[task]
acc2 = score_row2[task]
row[f'{task}_diff'] = abs((acc1 - acc2))
return row
|
def rename_scores(scores_df):
scores_df = scores_df.rename(columns={'MNLI dev acc.': 'mnli_dev_acc', 'Lexical (entailed)': 'lex_ent', 'Subseq (entailed)': 'sub_ent', 'Constituent (entailed)': 'const_ent', 'Lexical (nonent)': 'lex_nonent', 'Subseq (nonent)': 'sub_nonent', 'Constituent (nonent)': 'const_nonent', 'Overall accuracy': 'overall_accuracy'})
return scores_df
|
def get_full_df(scores_path, dists_path, full_df_path):
scores_df = pd.read_csv(scores_path)[0:100]
scores_df = rename_scores(scores_df)
task_list = list(scores_df.columns[1:9])
print('got scores_df')
dists_df = pd.read_csv(dists_path)
print('got dists_df')
print('getting full_df, will take a while')
full_df = dists_df.apply((lambda row: get_acc_diff(row, scores_df, task_list)), axis=1)
print('got full_df, saving:')
full_df.to_csv(full_df_path)
print('saved')
return full_df
|
def feather_sub_df(df, task, ref_depth):
seeds = list(df.seed1.unique())
accs = [scores_df.iloc[seed][task] for seed in seeds]
acc_dict = dict(zip(seeds, accs))
best_seed = max(acc_dict, key=acc_dict.get)
sub_df = df[(((df.layer1 == ref_depth) & (df.layer2 == ref_depth)) & ((df.seed1 == best_seed) | (df.seed2 == best_seed)))]
return sub_df
|
def feather_sub_df(df, task, ref_depth):
seeds = list(df.seed1.unique())
accs = [scores_df.iloc[seed][task] for seed in seeds]
acc_dict = dict(zip(seeds, accs))
best_seed = max(acc_dict, key=acc_dict.get)
sub_df = df[(((df.layer1 == ref_depth) & (df.layer2 == ref_depth)) & ((df.seed1 == best_seed) | (df.seed2 == best_seed)))]
return sub_df
|
def get_probing_accuracy(data_dict, task, seed, depth):
'\n average accuracy of model finetuned with finetuning seed seed on mnli\n when probing layer layer on task\n '
return np.mean(data_dict[task][seed][(depth + 1)][0][0])
|
def get_full_df(scores_path, dists_path, full_df_path):
dists_df = pd.read_csv(dists_path)
print('got dists_df')
print('adding probing scores to get full_df')
full_df = dists_df
data_dict = pkl.load(open(scores_path, 'rb'))
for task in task_list:
task_diff_list = []
for (_, row) in dists_df.iterrows():
acc1 = get_probing_accuracy(data_dict, task, row['seed1'], row['layer1'])
acc2 = get_probing_accuracy(data_dict, task, row['seed2'], row['layer2'])
task_diff_list.append(np.abs((acc1 - acc2)))
full_df[f'{task}_diff'] = np.array(task_diff_list)
print('got full_df, saving:')
full_df.to_csv(full_df_path)
print('saved')
return full_df
|
def best_probing_seed(task, ref_depth, list_ref_seeds):
data_dict = pkl.load(open(scores_path, 'rb'))
list_to_max = [np.mean(data_dict[task][seed][(ref_depth + 1)][0][0]) for seed in list_ref_seeds]
(idx, _) = max(enumerate(list_to_max), key=(lambda x: x[1]))
return list_ref_seeds[idx]
|
def layer_sub_df(df, ref_depth, ref_seed):
sub_df = df.loc[(((df['seed1'] == ref_seed) & (df['layer1'] == ref_depth)) | ((df['seed2'] == ref_seed) & (df['layer2'] == ref_depth)))].reset_index()
num_layers = 12
assert (len(sub_df) == (num_layers * 10))
return sub_df
|
def aggregate_rank_corrs(df, task, layer_depths, list_ref_seeds, METRICS, sub_df_fn):
rho = {metric: [] for metric in METRICS}
rho_p = {metric: [] for metric in METRICS}
tau = {metric: [] for metric in METRICS}
tau_p = {metric: [] for metric in METRICS}
bad_fracs = {metric: [] for metric in METRICS}
for ref_depth in layer_depths:
ref_seed = best_probing_seed(task, ref_depth, list_ref_seeds)
sub_df = sub_df_fn(df, ref_depth, ref_seed)
for metric in METRICS:
(rho_corr, rho_os_p, tau_corr, tau_os_p, bad_frac) = get_rank_corrs(sub_df, metric, task)
rho[metric].append(rho_corr)
rho_p[metric].append(rho_os_p)
tau[metric].append(tau_corr)
tau_p[metric].append(tau_os_p)
bad_fracs[metric].append(bad_frac)
return (rho, rho_p, tau, tau_p, bad_fracs)
|
def best_probing_seed(task, ref_depth, list_ref_seeds):
data_dict = pkl.load(open(scores_path, 'rb'))
list_to_max = [np.mean(data_dict[task][seed][(ref_depth + 1)][0][0]) for seed in list_ref_seeds]
(idx, _) = max(enumerate(list_to_max), key=(lambda x: x[1]))
return list_ref_seeds[idx]
|
def layer_sub_df(df, ref_depth, ref_seed):
sub_df = df.loc[(((df['seed1'] == ref_seed) & (df['layer1'] == ref_depth)) | ((df['seed2'] == ref_seed) & (df['layer2'] == ref_depth)))].reset_index()
num_layers = 12
assert (len(sub_df) == (num_layers * 10))
return sub_df
|
def aggregate_rank_corrs(df, task, layer_depths, list_ref_seeds, METRICS, sub_df_fn):
rho = {metric: [] for metric in METRICS}
rho_p = {metric: [] for metric in METRICS}
tau = {metric: [] for metric in METRICS}
tau_p = {metric: [] for metric in METRICS}
bad_fracs = {metric: [] for metric in METRICS}
for ref_depth in layer_depths:
ref_seed = best_probing_seed(task, ref_depth, list_ref_seeds)
sub_df = sub_df_fn(df, ref_depth, ref_seed)
for metric in METRICS:
(rho_corr, rho_os_p, tau_corr, tau_os_p, bad_frac) = get_rank_corrs(sub_df, metric, task)
rho[metric].append(rho_corr)
rho_p[metric].append(rho_os_p)
tau[metric].append(tau_corr)
tau_p[metric].append(tau_os_p)
bad_fracs[metric].append(bad_frac)
return (rho, rho_p, tau, tau_p, bad_fracs)
|
def get_acc(data_dict, task, seed, layer, dims, run='average'):
if (run == 'average'):
return np.mean(data_dict[task][seed][(layer + 1)][dims])
elif (run == 'std'):
return np.std(data_dict[task][seed][(layer + 1)][dims])
else:
return data_dict[task][seed][(layer + 1)][dims][run]
|
def get_acc_diff(data_dict, row):
acc1 = get_acc(data_dict, task=probe_task, seed=row['seed1'], layer=row['layer1'], dims=0, run='average')
acc2 = get_acc(data_dict, task=probe_task, seed=row['seed2'], layer=row['layer2'], dims=row['dims_deleted'], run='average')
return np.abs((acc1 - acc2))
|
def get_full_df(scores_path, dists_path, full_df_path):
dists_df = pd.read_csv(dists_path)
print('got dists_df')
full_df = pd.DataFrame(dists_df[(((dists_df['seed1'].isin(REF_SEEDS) & dists_df['seed2'].isin(REF_SEEDS)) & dists_df['layer1'].isin(LAYERS)) & dists_df['layer2'].isin(LAYERS))])
print('filtered full_df layers and seeds')
print('adding probing scores to get full_df')
data_dict = pkl.load(open(scores_path, 'rb'))
f = (lambda row: get_acc_diff(data_dict, row))
full_df[f'{probe_task}_diff'] = full_df.apply(f, axis=1)
print('got full_df, saving:')
full_df.to_csv(full_df_path)
print('saved')
return full_df
|
def pca_sub_df(df, task, ref_depth):
data_dict = pkl.load(open(scores_path, 'rb'))
accs = [get_acc(data_dict, probe_task, seed, layer=ref_depth, dims=0, run='average') for seed in REF_SEEDS]
acc_dict = dict(zip(REF_SEEDS, accs))
best_seed = max(acc_dict, key=acc_dict.get)
sub_df = df[(((df.layer1 == ref_depth) & (df.layer2 == ref_depth)) & ((df.seed1 == best_seed) | (df.seed2 == best_seed)))]
return sub_df
|
def pca_sub_df(df, task, ref_depth):
data_dict = pkl.load(open(scores_path, 'rb'))
accs = [get_acc(data_dict, probe_task, seed, layer=ref_depth, dims=0, run='average') for seed in REF_SEEDS]
acc_dict = dict(zip(REF_SEEDS, accs))
best_seed = max(acc_dict, key=acc_dict.get)
sub_df = df[(((df.layer1 == ref_depth) & (df.layer2 == ref_depth)) & ((df.seed1 == best_seed) | (df.seed2 == best_seed)))]
return sub_df
|
def collect_scores(scores_path):
(model2correctness_tensor, data_dict) = pkl.load(open(scores_path, 'rb'))
guid_set = set()
for datapoint in data_dict:
guid_set.add(datapoint['guid'].split('-')[0])
acc_dict = {}
for test_set in guid_set:
test_set_idxes = [idx for (idx, d) in enumerate(data_dict) if (d['guid'].split('-')[0] == test_set)]
acc_dict[test_set] = []
for pretraining_seed in range(1, 11):
these_seed_accs = [np.mean(model2correctness_tensor[pretraining_seed][finetuning_seed][test_set_idxes]) for finetuning_seed in range(1, 11)]
acc_dict[test_set].append(these_seed_accs)
acc_dict[test_set] = np.array(acc_dict[test_set])
lex_nonent_idxes = [idx for (idx, d) in enumerate(data_dict) if (('HANS' in d['guid']) and (d['heuristic'] == 'lexical_overlap') and (d['label'] == 'non-entailment'))]
acc_dict['lex_nonent'] = []
for pretraining_seed in range(1, 11):
these_seed_accs = [np.mean(model2correctness_tensor[pretraining_seed][finetuning_seed][lex_nonent_idxes]) for finetuning_seed in range(1, 11)]
acc_dict['lex_nonent'].append(these_seed_accs)
acc_dict['lex_nonent'] = np.array(acc_dict['lex_nonent'])
guid_set.add('lex_nonent')
return (guid_set, acc_dict)
|
def get_accuracy(acc_dict, stress_test, pretraining_seed, finetuning_seed):
return acc_dict[stress_test][pretraining_seed][finetuning_seed]
|
def get_acc_diff(acc_dict, stress_test, pre_seed1, pre_seed2, fine_seed1, fine_seed2):
avg_acc1 = get_accuracy(acc_dict, stress_test, pre_seed1, fine_seed1)
avg_acc2 = get_accuracy(acc_dict, stress_test, pre_seed2, fine_seed2)
return np.abs((avg_acc2 - avg_acc1))
|
def add_acc_diff_cols(dists_df, acc_dict, guid_set):
for stress_test in guid_set:
new_column = []
for pre_seed1 in range(1, 11):
for fine_seed1 in range(1, 11):
for pre_seed2 in range(pre_seed1, 11):
for fine_seed2 in range(1, 11):
if ((pre_seed2 == pre_seed1) and (fine_seed2 < fine_seed1)):
continue
else:
new_column += (num_layers * [get_acc_diff(acc_dict, stress_test, (pre_seed1 - 1), (pre_seed2 - 1), (fine_seed1 - 1), (fine_seed2 - 1))])
dists_df[f'{stress_test}_diff'] = np.array(new_column)
return dists_df
|
def get_full_df(scores_path, dists_path, full_df_path):
dists_df = pd.read_csv(dists_path)
dists_df = dists_df.rename(columns={'step1': 'fine_seed1', 'step2': 'fine_seed2', 'seed1': 'pre_seed1', 'seed2': 'pre_seed2'})
print('got dists_df')
print('adding probing scores to get full_df')
(guid_set, acc_dict) = collect_scores(scores_path)
full_df = add_acc_diff_cols(dists_df, acc_dict, guid_set)
print('got full_df, saving:')
full_df.to_csv(full_df_path)
print('saved')
return full_df
|
def best_seed_pair(task):
(_, acc_dict) = collect_scores(scores_path)
acc_array = acc_dict[task].flatten()
idxs = acc_array.argsort()[(- 1):][::(- 1)]
ref_seeds = []
for idx in idxs:
ref_seeds.append((int((idx / 10)), (idx % 10)))
return ref_seeds[0]
|
def ftvft_sub_df(df, task, ref_depth):
(best_pre_seed, best_fine_seed) = best_seed_pair(task)
sub_df = df[(((df.layer1 == ref_depth) & (df.layer2 == ref_depth)) & (((df.pre_seed1 == best_pre_seed) & (df.fine_seed1 == best_fine_seed)) | ((df.pre_seed2 == best_pre_seed) & (df.fine_seed2 == best_fine_seed))))]
return sub_df
|
def best_seed_pair(task):
(_, acc_dict) = collect_scores(scores_path)
acc_array = acc_dict[task].flatten()
idxs = acc_array.argsort()[(- 1):][::(- 1)]
ref_seeds = []
for idx in idxs:
ref_seeds.append((int((idx / 10)), (idx % 10)))
return ref_seeds[0]
|
def ftvft_sub_df(df, task, ref_depth):
(best_pre_seed, best_fine_seed) = best_seed_pair(task)
sub_df = df[(((df.layer1 == ref_depth) & (df.layer2 == ref_depth)) & (((df.pre_seed1 == best_pre_seed) & (df.fine_seed1 == best_fine_seed)) | ((df.pre_seed2 == best_pre_seed) & (df.fine_seed2 == best_fine_seed))))]
return sub_df
|
def qs(xs):
return np.array(list(map((lambda x: (pc(xs, x, 'rank') / 100)), xs)))
|
def plot_rank_corrs(rho, rho_p, tau, tau_p, METRICS, scatter=False, title=''):
(fig, ax) = plt.subplots(2, 2, figsize=(10, 10))
fig.suptitle(title)
if scatter:
(x, y) = ([], [])
for (i, metric) in enumerate(METRICS):
x += (len(rho[metric]) * [i])
y += rho[metric]
ax[(0, 0)].scatter(x, y)
ax[(0, 0)].scatter(list(range(len(METRICS))), [np.mean(rho[metric]) for metric in METRICS])
else:
ax[(0, 0)].bar(x=list(range(len(METRICS))), height=[np.mean(rho[metric]) for metric in METRICS])
ax[(0, 0)].set_title("Spearman's rho")
ax[(0, 0)].set_xticks(list(range(len(METRICS))))
ax[(0, 0)].set_xticklabels(METRICS)
if scatter:
(x, y) = ([], [])
for (i, metric) in enumerate(METRICS):
x += (len(rho_p[metric]) * [i])
y += rho_p[metric]
ax[(0, 1)].scatter(x, y)
ax[(0, 1)].scatter(list(range(len(METRICS))), [np.mean(rho_p[metric]) for metric in METRICS])
else:
ax[(0, 1)].bar(x=list(range(len(METRICS))), height=[np.mean(rho_p[metric]) for metric in METRICS])
ax[(0, 1)].set_title("Spearman's rho: p-values")
ax[(0, 1)].set_xticks(list(range(len(METRICS))))
ax[(0, 1)].set_xticklabels(METRICS)
ax[(0, 1)].set_yscale('log')
if scatter:
(x, y) = ([], [])
for (i, metric) in enumerate(METRICS):
x += (len(tau[metric]) * [i])
y += tau[metric]
ax[(1, 0)].scatter(x, y)
ax[(1, 0)].scatter(list(range(len(METRICS))), [np.mean(tau[metric]) for metric in METRICS])
else:
ax[(1, 0)].bar(x=list(range(len(METRICS))), height=[np.mean(tau[metric]) for metric in METRICS])
ax[(1, 0)].set_title("Kendall's tau")
ax[(1, 0)].set_xticks(list(range(len(METRICS))))
ax[(1, 0)].set_xticklabels(METRICS)
if scatter:
(x, y) = ([], [])
for (i, metric) in enumerate(METRICS):
x += (len(tau_p[metric]) * [i])
y += tau_p[metric]
ax[(1, 1)].scatter(x, y)
ax[(1, 1)].scatter(list(range(len(METRICS))), [np.mean(tau_p[metric]) for metric in METRICS])
else:
ax[(1, 1)].bar(x=list(range(len(METRICS))), height=[np.mean(tau_p[metric]) for metric in METRICS])
ax[(1, 1)].set_title("Kendall's tau: p-values")
ax[(1, 1)].set_xticks(list(range(len(METRICS))))
ax[(1, 1)].set_xticklabels(METRICS)
ax[(1, 1)].set_yscale('log')
plt.show()
|
def get_rank_corrs(sub_df, metric, task):
plot_x = sub_df[metric]
plot_y = sub_df[f'{task}_diff']
rho = spearmanr(plot_x, plot_y)
rho_corr = rho.correlation
rho_os_p = ((rho.pvalue / 2) if (rho_corr > 0) else (1 - (rho.pvalue / 2)))
tau = kendalltau(plot_x, plot_y)
tau_corr = tau.correlation
tau_os_p = ((tau.pvalue / 2) if (tau_corr > 0) else (1 - (tau.pvalue / 2)))
q_x = qs(plot_x)
q_y = qs(plot_y)
bad_frac = np.mean(((q_x < 0.2) * (q_y > 0.8)))
return (rho_corr, rho_os_p, tau_corr, tau_os_p, bad_frac)
|
def aggregate_rank_corrs(full_df, task, num_layers, METRICS, sub_df_fn, list_layers=None):
if (list_layers == None):
list_layers = list(range(num_layers))
rho = {metric: [] for metric in METRICS}
rho_p = {metric: [] for metric in METRICS}
tau = {metric: [] for metric in METRICS}
tau_p = {metric: [] for metric in METRICS}
bad_fracs = {metric: [] for metric in METRICS}
for ref_depth in list_layers:
sub_df = sub_df_fn(full_df, task, ref_depth)
for metric in METRICS:
(rho_corr, rho_os_p, tau_corr, tau_os_p, bad_frac) = get_rank_corrs(sub_df, metric, task)
rho[metric].append(rho_corr)
rho_p[metric].append(rho_os_p)
tau[metric].append(tau_corr)
tau_p[metric].append(tau_os_p)
bad_fracs[metric].append(bad_frac)
return (rho, rho_p, tau, tau_p, bad_fracs)
|
def init_fourier_(tensor, norm='ortho'):
'Initialise convolution weight with Inverse Fourier Transform'
with torch.no_grad():
(nc_out, nc_in, N, kernel_size) = tensor.shape
for k in range(N):
for n in range(N):
tensor.data[(k, 0, n, (kernel_size // 2))] = np.cos(((((2 * np.pi) * n) * k) / N))
tensor.data[(k, 1, n, (kernel_size // 2))] = (- np.sin(((((2 * np.pi) * n) * k) / N)))
tensor.data[((k + N), 0, n, (kernel_size // 2))] = np.sin(((((2 * np.pi) * n) * k) / N))
tensor.data[((k + N), 1, n, (kernel_size // 2))] = np.cos(((((2 * np.pi) * n) * k) / N))
if (norm == 'ortho'):
tensor.data[...] = (tensor.data[...] / np.sqrt(N))
return tensor
|
def init_fourier_2d(N, M, inverse=True, norm='ortho', out_tensor=None, complex_type=np.complex64):
"Initialise fully connected layer as 2D Fourier transform\n\n Parameters\n ----------\n\n N, M: a number of rows and columns\n\n inverse: bool (default: True) - if True, initialise with the weights for\n inverse fourier transform\n\n norm: 'ortho' or None (default: 'ortho')\n\n out_tensor: torch.Tensor (default: None) - if given, copies the values to\n out_tensor\n\n "
dft1mat_m = np.zeros((M, M), dtype=complex_type)
dft1mat_n = np.zeros((N, N), dtype=complex_type)
sign = (1 if inverse else (- 1))
for (l, m) in itertools.product(range(M), range(M)):
dft1mat_m[(l, m)] = np.exp(((((sign * 2) * np.pi) * 1j) * ((m * l) / M)))
for (k, n) in itertools.product(range(N), range(N)):
dft1mat_n[(k, n)] = np.exp(((((sign * 2) * np.pi) * 1j) * ((n * k) / N)))
mat_kron = np.kron(dft1mat_n, dft1mat_m)
mat_split = np.block([[np.real(mat_kron), (- np.imag(mat_kron))], [np.imag(mat_kron), np.real(mat_kron)]])
if (norm == 'ortho'):
mat_split /= np.sqrt((N * M))
elif inverse:
mat_split /= (N * M)
if (out_tensor is not None):
out_tensor.data[...] = torch.Tensor(mat_split)
else:
out_tensor = mat_split
return out_tensor
|
def init_noise_(tensor, init):
with torch.no_grad():
return (getattr(torch.nn.init, init)(tensor) if init else tensor.zero_())
|
class GeneralisedIFT2Layer(nn.Module):
def __init__(self, nrow, ncol, nch_in, nch_int=None, nch_out=None, kernel_size=1, nl=None, init_fourier=True, init=None, bias=False, batch_norm=False, share_tfxs=False, learnable=True):
"Generalised domain transform layer\n\n The layer can be initialised as Fourier transform if nch_in == nch_int\n == nch_out == 2 and if init_fourier == True.\n\n It can also be initialised\n as Fourier transform plus noise by setting init_fourier == True and\n init == 'kaiming', for example.\n\n If nonlinearity nl is used, it is recommended to set bias = True\n\n One can use this layer as 2D Fourier transform by setting nch_in == nch_int\n == nch_out == 2 and learnable == False\n\n\n Parameters\n ----------\n nrow: int - the number of columns of input\n\n ncol: int - the number of rows of input\n\n nch_in: int - the number of input channels. One can put real & complex\n here, or put temporal coil channels, temporal frames, multiple\n z-slices, etc..\n\n nch_int: int - the number of intermediate channel after the transformation\n has been applied for each row. By default, this is the same as the input channel\n\n nch_out: int - the number of output channels. By default, this is the same as the input channel\n\n kernel_size: int - kernel size for second axis of 1d transforms\n\n init_fourier: bool - initialise generalised kernel with inverse fourier transform\n\n init_noise: str - initialise generalised kernel with standard initialisation. Option: ['kaiming', 'normal']\n\n nl: ('tanh', 'sigmoid', 'relu', 'lrelu') - add nonlinearity between two transformations. Currently only supports tanh\n\n bias: bool - add bias for each kernels\n\n share_tfxs: bool - whether to share two transformations\n\n learnable: bool\n\n "
super(GeneralisedIFT2Layer, self).__init__()
self.nrow = nrow
self.ncol = ncol
self.nch_in = nch_in
self.nch_int = nch_int
self.nch_out = nch_out
self.kernel_size = kernel_size
self.init_fourier = init_fourier
self.init = init
self.nl = nl
if (not self.nch_int):
self.nch_int = self.nch_in
if (not self.nch_out):
self.nch_out = self.nch_in
idft1 = torch.nn.Conv2d(self.nch_in, (self.nch_int * self.nrow), (self.nrow, kernel_size), padding=(0, (kernel_size // 2)), bias=bias)
idft2 = torch.nn.Conv2d(self.nch_int, (self.nch_out * self.ncol), (self.ncol, kernel_size), padding=(0, (kernel_size // 2)), bias=bias)
init_noise_(idft1.weight, self.init)
init_noise_(idft2.weight, self.init)
if self.init_fourier:
if (not (self.nch_in == self.nch_int == self.nch_out == 2)):
raise ValueError
if self.init:
idft1.weight.data = F.normalize(idft1.weight.data, dim=2)
idft2.weight.data = F.normalize(idft2.weight.data, dim=2)
init_fourier_(idft1.weight)
init_fourier_(idft2.weight)
self.idft1 = idft1
self.idft2 = idft2
if (share_tfxs and (nrow == ncol)):
self.idft2 = self.idft1
self.learnable = learnable
self.set_learnable(self.learnable)
self.batch_norm = batch_norm
if self.batch_norm:
self.bn1 = torch.nn.BatchNorm2d(self.nch_int)
self.bn2 = torch.nn.BatchNorm2d(self.nch_out)
def forward(self, X):
batch_size = len(X)
x_t = self.idft1(X)
x_t = x_t.reshape([batch_size, self.nch_int, self.nrow, self.ncol]).permute(0, 1, 3, 2)
if self.batch_norm:
x_t = self.bn1(x_t.contiguous())
if self.nl:
if (self.nl == 'tanh'):
x_t = F.tanh(x_t)
elif (self.nl == 'relu'):
x_t = F.relu(x_t)
elif (self.nl == 'sigmoid'):
x_t = F.sigmoid(x_t)
else:
raise ValueError
x_t = self.idft2(x_t)
x_t = x_t.reshape([batch_size, self.nch_out, self.ncol, self.nrow]).permute(0, 1, 3, 2)
if self.batch_norm:
x_t = self.bn2(x_t.contiguous())
return x_t
def set_learnable(self, flag=True):
self.learnable = flag
self.idft1.weight.requires_grad = flag
self.idft2.weight.requires_grad = flag
|
def get_refinement_block(model='automap_scae', in_channel=1, out_channel=1):
if (model == 'automap_scae'):
return nn.Sequential(nn.Conv2d(in_channel, 64, 5, 1, 2), nn.ReLU(True), nn.Conv2d(64, 64, 5, 1, 2), nn.ReLU(True), nn.ConvTranspose2d(64, out_channel, 7, 1, 3))
elif (model == 'simple'):
return nn.Sequential(nn.Conv2d(in_channel, 64, 3, 1, 1), nn.ReLU(True), nn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(True), nn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(True), nn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(True), nn.Conv2d(64, out_channel, 3, 1, 1))
else:
raise NotImplementedError
|
class AUTOMAP(nn.Module):
'\n Pytorch implementation of AUTOMAP [1].\n\n Reference:\n ----------\n [1] Zhu et al., AUTOMAP, Nature 2018. <url:https://www.nature.com/articles/nature25988.pdf>\n '
def __init__(self, input_shape, output_shape, init_fc2_fourier=False, init_fc3_fourier=False):
super(AUTOMAP, self).__init__()
self.input_shape = input_shape
self.output_shape = output_shape
self.ndim = input_shape[(- 1)]
self.input_reshape = int(np.prod(self.input_shape))
self.output_reshape = int(np.prod(self.output_shape))
self.domain_transform = nn.Linear(self.input_reshape, self.output_reshape)
self.domain_transform2 = nn.Linear(self.output_reshape, self.output_reshape)
if (init_fc2_fourier or init_fc3_fourier):
if (input_shape != output_shape):
raise ValueError('To initialise the kernels with Fourier transform,the input and output shapes must be the same')
if init_fc2_fourier:
init_fourier_2d(input_shape[(- 2)], input_shape[(- 1)], self.domain_transform.weight)
if init_fc3_fourier:
init_fourier_2d(input_shape[(- 2)], input_shape[(- 1)], self.domain_transform2.weight)
self.sparse_convolutional_autoencoder = get_refinement_block('automap_scae', output_shape[0], output_shape[0])
def forward(self, x):
'Expects input_shape (batch_size, 2, ndim, ndim)'
batch_size = len(x)
x = x.reshape(batch_size, int(np.prod(self.input_shape)))
x = F.tanh(self.domain_transform(x))
x = F.tanh(self.domain_transform2(x))
x = x.reshape((- 1), *self.output_shape)
x = self.sparse_convolutional_autoencoder(x)
return x
|
class dAUTOMAP(nn.Module):
'\n Pytorch implementation of dAUTOMAP\n\n Decomposes the automap kernel into 2 Generalised "1D" transforms to make it scalable.\n '
def __init__(self, input_shape, output_shape, tfx_params, tfx_params2=None):
super(dAUTOMAP, self).__init__()
self.input_shape = input_shape
self.output_shape = output_shape
if (tfx_params2 is None):
tfx_params2 = tfx_params
self.domain_transform = GeneralisedIFT2Layer(**tfx_params)
self.domain_transform2 = GeneralisedIFT2Layer(**tfx_params2)
self.refinement_block = get_refinement_block('automap_scae', input_shape[0], output_shape[0])
def forward(self, x):
'Assumes input to be (batch_size, 2, nrow, ncol)'
x_mapped = self.domain_transform(x)
x_mapped = F.tanh(x_mapped)
x_mapped2 = self.domain_transform2(x_mapped)
x_mapped2 = F.tanh(x_mapped2)
out = self.refinement_block(x_mapped2)
return out
|
class dAUTOMAPExt(nn.Module):
'\n Pytorch implementation of dAUTOMAP with adjustable depth and nonlinearity\n\n Decomposes the automap kernel into 2 Generalised "1D" transforms to make it scalable.\n\n Parameters\n ----------\n\n input_shape: tuple (n_channel, nx, ny)\n\n output_shape: tuple (n_channel, nx, ny)\n\n depth: int (default: 2)\n\n tfx_params: list of dict or dict. If list of dict, it must provide the parameter for each. If dict, then the same parameter config will be shared for all the layers.\n\n\n '
def __init__(self, input_shape, output_shape, tfx_params=None, depth=2, nl='tanh'):
super(dAUTOMAPExt, self).__init__()
self.input_shape = input_shape
self.output_shape = output_shape
self.depth = depth
self.nl = nl
domain_transforms = []
if isinstance(tfx_params, list):
if (self.depth and (self.depth != len(tfx_params))):
raise ValueError('Depth and the length of tfx_params must be the same')
else:
tfx_params = ([tfx_params] * self.depth)
for tfx_param in tfx_params:
domain_transform = GeneralisedIFT2Layer(**tfx_param)
domain_transforms.append(domain_transform)
self.domain_transforms = nn.ModuleList(domain_transforms)
self.refinement_block = get_refinement_block('automap_scae', input_shape[0], output_shape[0])
def forward(self, x):
'Assumes input to be (batch_size, 2, nrow, ncol)'
for i in range(self.depth):
x = self.domain_transforms[i](x)
x = getattr(F, self.nl)(x)
out = self.refinement_block(x)
return out
|
class ProgressLogger(Callback):
def __init__(self, metric_monitor: dict, precision: int=3):
self.metric_monitor = metric_monitor
self.precision = precision
def on_train_start(self, trainer: Trainer, pl_module: LightningModule, **kwargs) -> None:
logger.info('Training started')
def on_train_end(self, trainer: Trainer, pl_module: LightningModule, **kwargs) -> None:
logger.info('Training done')
def on_validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule, **kwargs) -> None:
if trainer.sanity_checking:
logger.info('Sanity checking ok.')
def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule, padding=False, **kwargs) -> None:
metric_format = f'{{:.{self.precision}e}}'
line = f'Epoch {trainer.current_epoch}'
if padding:
line = f"{line:>{len('Epoch xxxx')}}"
metrics_str = []
losses_dict = trainer.callback_metrics
for (metric_name, dico_name) in self.metric_monitor.items():
if (dico_name in losses_dict):
metric = losses_dict[dico_name].item()
metric = metric_format.format(metric)
metric = f'{metric_name} {metric}'
metrics_str.append(metric)
if (len(metrics_str) == 0):
return
memory = f'Memory {psutil.virtual_memory().percent}%'
line = ((((line + ': ') + ' '.join(metrics_str)) + ' ') + memory)
logger.info(line)
|
def get_module_config(cfg_model, path='modules'):
files = os.listdir(f'./configs/{path}/')
for file in files:
if file.endswith('.yaml'):
with open((f'./configs/{path}/' + file), 'r') as f:
cfg_model.merge_with(OmegaConf.load(f))
return cfg_model
|
def get_obj_from_str(string, reload=False):
(module, cls) = string.rsplit('.', 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
|
def instantiate_from_config(config):
if (not ('target' in config)):
if (config == '__is_first_stage__'):
return None
elif (config == '__is_unconditional__'):
return None
raise KeyError('Expected key `target` to instantiate.')
return get_obj_from_str(config['target'])(**config.get('params', dict()))
|
def parse_args(phase='train'):
parser = ArgumentParser()
group = parser.add_argument_group('Training options')
if (phase in ['train', 'test', 'demo']):
group.add_argument('--cfg', type=str, required=False, default='./configs/config.yaml', help='config file')
group.add_argument('--cfg_assets', type=str, required=False, default='./configs/assets.yaml', help='config file for asset paths')
group.add_argument('--batch_size', type=int, required=False, help='training batch size')
group.add_argument('--device', type=int, nargs='+', required=False, help='training device')
group.add_argument('--nodebug', action='store_true', required=False, help='debug or not')
group.add_argument('--dir', type=str, required=False, help='evaluate existing npys')
if (phase == 'demo'):
group.add_argument('--render', action='store_true', help='Render visulizaed figures')
group.add_argument('--render_mode', type=str, help='video or sequence')
group.add_argument('--frame_rate', type=float, default=12.5, help='the frame rate for the input/output motion')
group.add_argument('--replication', type=int, default=1, help='the frame rate for the input/output motion')
group.add_argument('--example', type=str, required=False, help='input text and lengths with txt format')
group.add_argument('--task', type=str, required=False, help='random_sampling, reconstrucion or text_motion')
group.add_argument('--out_dir', type=str, required=False, help='output dir')
group.add_argument('--allinone', action='store_true', required=False, help='output seperate or combined npy file')
if (phase == 'render'):
group.add_argument('--cfg', type=str, required=False, default='./configs/render.yaml', help='config file')
group.add_argument('--cfg_assets', type=str, required=False, default='./configs/assets.yaml', help='config file for asset paths')
group.add_argument('--npy', type=str, required=False, default=None, help='npy motion files')
group.add_argument('--dir', type=str, required=False, default=None, help='npy motion folder')
group.add_argument('--mode', type=str, required=False, default='sequence', help='render target: video, sequence, frame')
group.add_argument('--joint_type', type=str, required=False, default=None, help='mmm or vertices for skeleton')
params = parser.parse_args()
cfg_base = OmegaConf.load('./configs/base.yaml')
cfg_exp = OmegaConf.merge(cfg_base, OmegaConf.load(params.cfg))
cfg_model = get_module_config(cfg_exp.model, cfg_exp.model.target)
cfg_assets = OmegaConf.load(params.cfg_assets)
cfg = OmegaConf.merge(cfg_exp, cfg_model, cfg_assets)
if (phase in ['train', 'test']):
cfg.TRAIN.BATCH_SIZE = (params.batch_size if params.batch_size else cfg.TRAIN.BATCH_SIZE)
cfg.DEVICE = (params.device if params.device else cfg.DEVICE)
cfg.DEBUG = ((not params.nodebug) if (params.nodebug is not None) else cfg.DEBUG)
cfg.DEBUG = (False if (phase == 'test') else cfg.DEBUG)
if (phase == 'test'):
cfg.DEBUG = False
cfg.DEVICE = [0]
print('Force no debugging and one gpu when testing')
cfg.TEST.TEST_DIR = (params.dir if params.dir else cfg.TEST.TEST_DIR)
if (phase == 'demo'):
cfg.DEMO.RENDER = params.render
cfg.DEMO.FRAME_RATE = params.frame_rate
cfg.DEMO.EXAMPLE = params.example
cfg.DEMO.TASK = params.task
cfg.TEST.FOLDER = (params.out_dir if params.dir else cfg.TEST.FOLDER)
cfg.DEMO.REPLICATION = params.replication
cfg.DEMO.OUTALL = params.allinone
if (phase == 'render'):
if params.npy:
cfg.RENDER.NPY = params.npy
cfg.RENDER.INPUT_MODE = 'npy'
if params.dir:
cfg.RENDER.DIR = params.dir
cfg.RENDER.INPUT_MODE = 'dir'
cfg.RENDER.JOINT_TYPE = params.joint_type
cfg.RENDER.MODE = params.mode
if cfg.DEBUG:
cfg.NAME = ('debug--' + cfg.NAME)
cfg.LOGGER.WANDB.OFFLINE = True
cfg.LOGGER.VAL_EVERY_STEPS = 1
return cfg
|
class HumanML3DDataModule(BASEDataModule):
def __init__(self, cfg, batch_size, num_workers, collate_fn=None, phase='train', **kwargs):
super().__init__(batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)
self.save_hyperparameters(logger=False)
self.name = 'humanml3d'
self.njoints = 22
if (phase == 'text_only'):
self.Dataset = TextOnlyDataset
else:
self.Dataset = Text2MotionDatasetV2
self.cfg = cfg
sample_overrides = {'split': 'val', 'tiny': True, 'progress_bar': False}
self._sample_set = self.get_sample_set(overrides=sample_overrides)
self.nfeats = self._sample_set.nfeats
def feats2joints(self, features):
mean = torch.tensor(self.hparams.mean).to(features)
std = torch.tensor(self.hparams.std).to(features)
features = ((features * std) + mean)
return recover_from_ric(features, self.njoints)
def joints2feats(self, features):
features = process_file(features, self.njoints)[0]
return features
def renorm4t2m(self, features):
ori_mean = torch.tensor(self.hparams.mean).to(features)
ori_std = torch.tensor(self.hparams.std).to(features)
eval_mean = torch.tensor(self.hparams.mean_eval).to(features)
eval_std = torch.tensor(self.hparams.std_eval).to(features)
features = ((features * ori_std) + ori_mean)
features = ((features - eval_mean) / eval_std)
return features
def mm_mode(self, mm_on=True):
if mm_on:
self.is_mm = True
self.name_list = self.test_dataset.name_list
self.mm_list = np.random.choice(self.name_list, self.cfg.TEST.MM_NUM_SAMPLES, replace=False)
self.test_dataset.name_list = self.mm_list
else:
self.is_mm = False
self.test_dataset.name_list = self.name_list
|
class Humanact12DataModule(BASEDataModule):
def __init__(self, cfg, batch_size, num_workers, collate_fn=None, phase='train', **kwargs):
super().__init__(batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)
self.save_hyperparameters(logger=False)
self.name = 'HumanAct12'
self.Dataset = HumanAct12Poses
self.cfg = cfg
sample_overrides = {'num_seq_max': 2, 'split': 'test', 'tiny': True, 'progress_bar': False}
self.nfeats = 150
self.njoints = 25
self.nclasses = 12
|
class KitDataModule(BASEDataModule):
def __init__(self, cfg, phase='train', collate_fn=all_collate, batch_size: int=32, num_workers: int=16, **kwargs):
super().__init__(batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)
self.save_hyperparameters(logger=False)
self.name = 'kit'
self.njoints = 21
if (phase == 'text_only'):
self.Dataset = TextOnlyDataset
else:
self.Dataset = Text2MotionDatasetV2
self.cfg = cfg
sample_overrides = {'split': 'val', 'tiny': True, 'progress_bar': False}
self._sample_set = self.get_sample_set(overrides=sample_overrides)
self.nfeats = self._sample_set.nfeats
def feats2joints(self, features):
mean = torch.tensor(self.hparams.mean).to(features)
std = torch.tensor(self.hparams.std).to(features)
features = ((features * std) + mean)
return recover_from_ric(features, self.njoints)
def renorm4t2m(self, features):
ori_mean = torch.tensor(self.hparams.mean).to(features)
ori_std = torch.tensor(self.hparams.std).to(features)
eval_mean = torch.tensor(self.hparams.mean_eval).to(features)
eval_std = torch.tensor(self.hparams.std_eval).to(features)
features = ((features * ori_std) + ori_mean)
features = ((features - eval_mean) / eval_std)
return features
def mm_mode(self, mm_on=True):
if mm_on:
self.is_mm = True
self.name_list = self.test_dataset.name_list
self.mm_list = np.random.choice(self.name_list, self.cfg.TEST.MM_NUM_SAMPLES, replace=False)
self.test_dataset.name_list = self.mm_list
else:
self.is_mm = False
self.test_dataset.name_list = self.name_list
|
class UestcDataModule(BASEDataModule):
def __init__(self, cfg, batch_size, num_workers, collate_fn=None, method_name='vibe', phase='train', **kwargs):
super().__init__(batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)
self.save_hyperparameters(logger=False)
self.name = 'Uestc'
self.Dataset = UESTC
self.cfg = cfg
self.nfeats = 150
self.njoints = 25
self.nclasses = 40
|
class HumanAct12Poses(Dataset):
dataname = 'humanact12'
def __init__(self, datapath='data/HumanAct12Poses', **kargs):
self.datapath = datapath
super().__init__(**kargs)
pkldatafilepath = os.path.join(datapath, 'humanact12poses.pkl')
with rich.progress.open(pkldatafilepath, 'rb', description='loading humanact12 pkl') as f:
data = pkl.load(f)
self._pose = [x for x in data['poses']]
self._num_frames_in_video = [p.shape[0] for p in self._pose]
self._joints = [x for x in data['joints3D']]
self._actions = [x for x in data['y']]
total_num_actions = 12
self.num_classes = total_num_actions
self._train = list(range(len(self._pose)))
keep_actions = np.arange(0, total_num_actions)
self._action_to_label = {x: i for (i, x) in enumerate(keep_actions)}
self._label_to_action = {i: x for (i, x) in enumerate(keep_actions)}
self._action_classes = humanact12_coarse_action_enumerator
def _load_joints3D(self, ind, frame_ix):
return self._joints[ind][frame_ix]
def _load_rotvec(self, ind, frame_ix):
pose = self._pose[ind][frame_ix].reshape((- 1), 24, 3)
return pose
|
def parse_info_name(path):
name = os.path.splitext(os.path.split(path)[(- 1)])[0]
info = {}
current_letter = None
for letter in name:
if (letter in string.ascii_letters):
info[letter] = []
current_letter = letter
else:
info[current_letter].append(letter)
for key in info.keys():
info[key] = ''.join(info[key])
return info
|
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif (type(tensor).__module__ != 'numpy'):
raise ValueError('Cannot convert {} to numpy array'.format(type(tensor)))
return tensor
|
def to_torch(ndarray):
if (type(ndarray).__module__ == 'numpy'):
return torch.from_numpy(ndarray)
elif (not torch.is_tensor(ndarray)):
raise ValueError('Cannot convert {} to torch tensor'.format(type(ndarray)))
return ndarray
|
def cleanexit():
import sys
import os
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
def lengths_to_mask(lengths):
max_len = max(lengths)
mask = (torch.arange(max_len, device=lengths.device).expand(len(lengths), max_len) < lengths.unsqueeze(1))
return mask
|
def collate_tensors(batch):
dims = batch[0].dim()
max_size = [max([b.size(i) for b in batch]) for i in range(dims)]
size = ((len(batch),) + tuple(max_size))
canvas = batch[0].new_zeros(size=size)
for (i, b) in enumerate(batch):
sub_tensor = canvas[i]
for d in range(dims):
sub_tensor = sub_tensor.narrow(d, 0, b.size(d))
sub_tensor.add_(b)
return canvas
|
def collate(batch):
databatch = [b[0] for b in batch]
labelbatch = [b[1] for b in batch]
lenbatch = [len(b[0][0][0]) for b in batch]
databatchTensor = collate_tensors(databatch)
labelbatchTensor = torch.as_tensor(labelbatch)
lenbatchTensor = torch.as_tensor(lenbatch)
maskbatchTensor = lengths_to_mask(lenbatchTensor)
batch = {'x': databatchTensor, 'y': labelbatchTensor, 'mask': maskbatchTensor, 'lengths': lenbatchTensor}
return batch
|
class BASEDataModule(pl.LightningDataModule):
def __init__(self, collate_fn, batch_size: int, num_workers: int):
super().__init__()
self.dataloader_options = {'batch_size': batch_size, 'num_workers': num_workers, 'collate_fn': collate_fn}
self.persistent_workers = True
self.is_mm = False
def get_sample_set(self, overrides={}):
sample_params = self.hparams.copy()
sample_params.update(overrides)
split_file = pjoin(eval(f'self.cfg.DATASET.{self.name.upper()}.SPLIT_ROOT'), (self.cfg.EVAL.SPLIT + '.txt'))
return self.Dataset(split_file=split_file, **sample_params)
def __getattr__(self, item):
if (item.endswith('_dataset') and (not item.startswith('_'))):
subset = item[:(- len('_dataset'))]
item_c = ('_' + item)
if (item_c not in self.__dict__):
subset = (subset.upper() if (subset != 'val') else 'EVAL')
split = eval(f'self.cfg.{subset}.SPLIT')
split_file = pjoin(eval(f'self.cfg.DATASET.{self.name.upper()}.SPLIT_ROOT'), (eval(f'self.cfg.{subset}.SPLIT') + '.txt'))
self.__dict__[item_c] = self.Dataset(split_file=split_file, split=split, **self.hparams)
return getattr(self, item_c)
classname = self.__class__.__name__
raise AttributeError(f"'{classname}' object has no attribute '{item}'")
def setup(self, stage=None):
self.stage = stage
if (stage in (None, 'fit')):
_ = self.train_dataset
_ = self.val_dataset
if (stage in (None, 'test')):
_ = self.test_dataset
def train_dataloader(self):
return DataLoader(self.train_dataset, shuffle=True, persistent_workers=True, **self.dataloader_options)
def predict_dataloader(self):
dataloader_options = self.dataloader_options.copy()
dataloader_options['batch_size'] = (1 if self.is_mm else self.cfg.TEST.BATCH_SIZE)
dataloader_options['num_workers'] = self.cfg.TEST.NUM_WORKERS
dataloader_options['shuffle'] = False
return DataLoader(self.test_dataset, persistent_workers=True, **dataloader_options)
def val_dataloader(self):
dataloader_options = self.dataloader_options.copy()
dataloader_options['batch_size'] = self.cfg.EVAL.BATCH_SIZE
dataloader_options['num_workers'] = self.cfg.EVAL.NUM_WORKERS
dataloader_options['shuffle'] = False
return DataLoader(self.val_dataset, persistent_workers=True, **dataloader_options)
def test_dataloader(self):
dataloader_options = self.dataloader_options.copy()
dataloader_options['batch_size'] = (1 if self.is_mm else self.cfg.TEST.BATCH_SIZE)
dataloader_options['num_workers'] = self.cfg.TEST.NUM_WORKERS
dataloader_options['shuffle'] = False
return DataLoader(self.test_dataset, persistent_workers=True, **dataloader_options)
|
def get_mean_std(phase, cfg, dataset_name):
name = ('t2m' if (dataset_name == 'humanml3d') else dataset_name)
assert (name in ['t2m', 'kit'])
if (phase in ['val']):
if (name == 't2m'):
data_root = pjoin(cfg.model.t2m_path, name, 'Comp_v6_KLD01', 'meta')
elif (name == 'kit'):
data_root = pjoin(cfg.model.t2m_path, name, 'Comp_v6_KLD005', 'meta')
else:
raise ValueError('Only support t2m and kit')
mean = np.load(pjoin(data_root, 'mean.npy'))
std = np.load(pjoin(data_root, 'std.npy'))
else:
data_root = eval(f'cfg.DATASET.{dataset_name.upper()}.ROOT')
mean = np.load(pjoin(data_root, 'Mean.npy'))
std = np.load(pjoin(data_root, 'Std.npy'))
return (mean, std)
|
def get_WordVectorizer(cfg, phase, dataset_name):
if (phase not in ['text_only']):
if (dataset_name.lower() in ['humanml3d', 'kit']):
return WordVectorizer(cfg.DATASET.WORD_VERTILIZER_PATH, 'our_vab')
else:
raise ValueError('Only support WordVectorizer for HumanML3D')
else:
return None
|
def get_collate_fn(name, phase='train'):
if (name.lower() in ['humanml3d', 'kit']):
return mld_collate
elif (name.lower() in ['humanact12', 'uestc']):
return a2m_collate
|
def get_datasets(cfg, logger=None, phase='train'):
dataset_names = eval(f'cfg.{phase.upper()}.DATASETS')
datasets = []
for dataset_name in dataset_names:
if (dataset_name.lower() in ['humanml3d', 'kit']):
data_root = eval(f'cfg.DATASET.{dataset_name.upper()}.ROOT')
(mean, std) = get_mean_std(phase, cfg, dataset_name)
(mean_eval, std_eval) = get_mean_std('val', cfg, dataset_name)
wordVectorizer = get_WordVectorizer(cfg, phase, dataset_name)
collate_fn = get_collate_fn(dataset_name, phase)
if (dataset_name.lower() in ['kit']):
dataset = dataset_module_map[dataset_name.lower()](cfg=cfg, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKERS, debug=cfg.DEBUG, collate_fn=collate_fn, mean=mean, std=std, mean_eval=mean_eval, std_eval=std_eval, w_vectorizer=wordVectorizer, text_dir=pjoin(data_root, 'texts'), motion_dir=pjoin(data_root, motion_subdir[dataset_name]), max_motion_length=cfg.DATASET.SAMPLER.MAX_LEN, min_motion_length=24, max_text_len=cfg.DATASET.SAMPLER.MAX_TEXT_LEN, unit_length=eval(f'cfg.DATASET.{dataset_name.upper()}.UNIT_LEN'))
datasets.append(dataset)
else:
dataset = dataset_module_map[dataset_name.lower()](cfg=cfg, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKERS, debug=cfg.DEBUG, collate_fn=collate_fn, mean=mean, std=std, mean_eval=mean_eval, std_eval=std_eval, w_vectorizer=wordVectorizer, text_dir=pjoin(data_root, 'texts'), motion_dir=pjoin(data_root, motion_subdir[dataset_name]), max_motion_length=cfg.DATASET.SAMPLER.MAX_LEN, min_motion_length=cfg.DATASET.SAMPLER.MIN_LEN, max_text_len=cfg.DATASET.SAMPLER.MAX_TEXT_LEN, unit_length=eval(f'cfg.DATASET.{dataset_name.upper()}.UNIT_LEN'))
datasets.append(dataset)
elif (dataset_name.lower() in ['humanact12', 'uestc']):
collate_fn = get_collate_fn(dataset_name, phase)
dataset = dataset_module_map[dataset_name.lower()](datapath=eval(f'cfg.DATASET.{dataset_name.upper()}.ROOT'), cfg=cfg, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKERS, debug=cfg.DEBUG, collate_fn=collate_fn, num_frames=cfg.DATASET.HUMANACT12.NUM_FRAMES, sampling=cfg.DATASET.SAMPLER.SAMPLING, sampling_step=cfg.DATASET.SAMPLER.SAMPLING_STEP, pose_rep=cfg.DATASET.HUMANACT12.POSE_REP, max_len=cfg.DATASET.SAMPLER.MAX_LEN, min_len=cfg.DATASET.SAMPLER.MIN_LEN, num_seq_max=(cfg.DATASET.SAMPLER.MAX_SQE if (not cfg.DEBUG) else 100), glob=cfg.DATASET.HUMANACT12.GLOB, translation=cfg.DATASET.HUMANACT12.TRANSLATION)
cfg.DATASET.NCLASSES = dataset.nclasses
datasets.append(dataset)
elif (dataset_name.lower() in ['amass']):
raise NotImplementedError
else:
raise NotImplementedError
cfg.DATASET.NFEATS = datasets[0].nfeats
cfg.DATASET.NJOINTS = datasets[0].njoints
return datasets
|
def is_float(numStr):
flag = False
numStr = str(numStr).strip().lstrip('-').lstrip('+')
try:
reg = re.compile('^[-+]?[0-9]+\\.[0-9]+$')
res = reg.match(str(numStr))
if res:
flag = True
except Exception as ex:
print(('is_float() - error: ' + str(ex)))
return flag
|
def is_number(numStr):
flag = False
numStr = str(numStr).strip().lstrip('-').lstrip('+')
if str(numStr).isdigit():
flag = True
return flag
|
def get_opt(opt_path, device):
opt = Namespace()
opt_dict = vars(opt)
skip = ('-------------- End ----------------', '------------ Options -------------', '\n')
print('Reading', opt_path)
with open(opt_path) as f:
for line in f:
if (line.strip() not in skip):
(key, value) = line.strip().split(': ')
if (value in ('True', 'False')):
opt_dict[key] = bool(value)
elif is_float(value):
opt_dict[key] = float(value)
elif is_number(value):
opt_dict[key] = int(value)
else:
opt_dict[key] = str(value)
opt_dict['which_epoch'] = 'latest'
opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name)
opt.model_dir = pjoin(opt.save_root, 'model')
opt.meta_dir = pjoin(opt.save_root, 'meta')
if (opt.dataset_name == 't2m'):
opt.data_root = './dataset/HumanML3D'
opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')
opt.text_dir = pjoin(opt.data_root, 'texts')
opt.joints_num = 22
opt.dim_pose = 263
opt.max_motion_length = 196
elif (opt.dataset_name == 'kit'):
opt.data_root = './dataset/KIT-ML'
opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')
opt.text_dir = pjoin(opt.data_root, 'texts')
opt.joints_num = 21
opt.dim_pose = 251
opt.max_motion_length = 196
else:
raise KeyError('Dataset not recognized')
opt.dim_word = 300
opt.num_classes = (200 // opt.unit_length)
opt.dim_pos_ohot = len(POS_enumerator)
opt.is_train = False
opt.is_continue = False
opt.device = device
return opt
|
def save_json(save_path, data):
with open(save_path, 'w') as file:
json.dump(data, file)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.