code stringlengths 17 6.64M |
|---|
def remove_chumpy_dep(dico):
output_dict = {}
for (key, val) in dico.items():
if ('chumpy' in str(type(val))):
output_dict[key] = np.array(val)
else:
output_dict[key] = val
return output_dict
|
def load_and_remove_chumpy_dep(path):
with open(path, 'rb') as pkl_file:
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
data = pickle.load(pkl_file, encoding='latin1')
data = remove_chumpy_dep(data)
return data
|
def load_npz_into_dict(path):
data = {key: val for (key, val) in np.load(smplh_fn).items()}
data = remove_chumpy_dep(data)
return data
|
def load_and_clean_data(path):
ext = os.path.splitext(path)[(- 1)]
if (ext == '.npz'):
data = load_npz_into_dict(path)
elif (ext == '.pkl'):
data = load_and_remove_chumpy_dep(path)
else:
raise TypeError('The format should be pkl or npz')
return data
|
def merge_models(smplh_fn, mano_left_fn, mano_right_fn, output_folder='output'):
body_data = load_and_clean_data(smplh_fn)
lhand_data = load_and_clean_data(mano_left_fn)
rhand_data = load_and_clean_data(mano_right_fn)
modelname = osp.split(smplh_fn)[1]
parent_folder = osp.split(osp.split(smplh_fn)[0])[1]
if ('female' in ((parent_folder + '_') + modelname.lower())):
out_fn = 'SMPLH_FEMALE.npz'
elif ('male' in ((parent_folder + '_') + modelname.lower())):
out_fn = 'SMPLH_MALE.npz'
elif ('neutral' in ((parent_folder + '_') + modelname.lower())):
out_fn = 'SMPLH_NEUTRAL.npz'
else:
out_fn = modelname
output_data = body_data.copy()
output_data['hands_componentsl'] = lhand_data['hands_components']
output_data['hands_componentsr'] = rhand_data['hands_components']
output_data['hands_coeffsl'] = lhand_data['hands_coeffs']
output_data['hands_coeffsr'] = rhand_data['hands_coeffs']
output_data['hands_meanl'] = lhand_data['hands_mean']
output_data['hands_meanr'] = rhand_data['hands_mean']
output_data = remove_chumpy_dep(output_data)
out_path = osp.join(output_folder, out_fn)
print('Saving to {}'.format(out_path))
np.savez_compressed(out_path, **output_data)
|
def save_json(save_path, data):
with open(save_path, 'w') as file:
json.dump(data, file)
|
def load_json(file_path):
with open(file_path, 'r') as file:
data = json.load(file)
return data
|
def process(graph):
(V, entities, relations) = ({}, {}, [])
for i in graph['verbs']:
description = i['description']
pos = 0
flag = 0
(_words, _spans) = ([], [])
(tags, verb) = ({}, 0)
for i in description.split():
if ('[' in i):
_role = i[1:(- 1)]
flag = 1
_spans = [pos]
_words = []
elif (']' in i):
_words.append(i[:(- 1)])
pos += 1
flag = 0
if (_role == 'V'):
V[len(V)] = {'role': _role, 'spans': _spans, 'words': _words}
verb = (len(V) - 1)
else:
entities[len(entities)] = {'role': _role, 'spans': _spans, 'words': _words}
tags[(len(entities) - 1)] = _role
else:
pos += 1
if flag:
_words.append(i)
_spans.append(pos)
for i in tags:
relations.append((verb, i, tags[i]))
output = {'V': V, 'entities': entities, 'relations': relations}
return output
|
def extend_paths(path, keyids, *, onesample=True, number_of_samples=1):
if (not onesample):
template_path = str((path / 'KEYID_INDEX.npy'))
paths = [template_path.replace('INDEX', str(index)) for i in range(number_of_samples)]
else:
paths = [str((path / 'KEYID.npy'))]
all_paths = []
for path in paths:
all_paths.extend([path.replace('KEYID', keyid) for keyid in keyids])
return all_paths
|
def render_cli() -> None:
cfg = parse_args(phase='render')
cfg.FOLDER = cfg.RENDER.FOLDER
if (cfg.RENDER.INPUT_MODE.lower() == 'npy'):
output_dir = Path(os.path.dirname(cfg.RENDER.NPY))
paths = [cfg.RENDER.NPY]
elif (cfg.RENDER.INPUT_MODE.lower() == 'dir'):
output_dir = Path(cfg.RENDER.DIR)
paths = []
file_list = natsort.natsorted(os.listdir(cfg.RENDER.DIR))
begin_id = random.randrange(0, len(file_list))
file_list = (file_list[begin_id:] + file_list[:begin_id])
for item in file_list:
if item.endswith('_mesh.npy'):
paths.append(os.path.join(cfg.RENDER.DIR, item))
for item in file_list:
if (item.endswith('.npy') and (not item.endswith('_mesh.npy'))):
paths.append(os.path.join(cfg.RENDER.DIR, item))
print(f'begin to render for {paths[0]}')
import numpy as np
from GraphMotion.render.blender import render
from GraphMotion.render.blender.tools import mesh_detect
from GraphMotion.render.video import Video
init = True
for path in paths:
if (cfg.RENDER.MODE == 'video'):
if (os.path.exists(path.replace('.npy', '.mp4')) or os.path.exists(path.replace('.npy', '_frames'))):
print(f'npy is rendered or under rendering {path}')
continue
elif os.path.exists(path.replace('.npy', '.png')):
print(f'npy is rendered or under rendering {path}')
continue
if (cfg.RENDER.MODE == 'video'):
frames_folder = os.path.join(output_dir, path.replace('.npy', '_frames').split('/')[(- 1)])
os.makedirs(frames_folder, exist_ok=True)
else:
frames_folder = os.path.join(output_dir, path.replace('.npy', '.png').split('/')[(- 1)])
try:
data = np.load(path)
if (cfg.RENDER.JOINT_TYPE.lower() == 'humanml3d'):
is_mesh = mesh_detect(data)
if (not is_mesh):
data = (data * smplh_to_mmm_scaling_factor)
except FileNotFoundError:
print(f'{path} not found')
continue
if (cfg.RENDER.MODE == 'video'):
frames_folder = os.path.join(output_dir, path.replace('.npy', '_frames').split('/')[(- 1)])
else:
frames_folder = os.path.join(output_dir, path.replace('.npy', '.png').split('/')[(- 1)])
out = render(data, frames_folder, canonicalize=cfg.RENDER.CANONICALIZE, exact_frame=cfg.RENDER.EXACT_FRAME, num=cfg.RENDER.NUM, mode=cfg.RENDER.MODE, faces_path=cfg.RENDER.FACES_PATH, downsample=cfg.RENDER.DOWNSAMPLE, always_on_floor=cfg.RENDER.ALWAYS_ON_FLOOR, oldrender=cfg.RENDER.OLDRENDER, jointstype=cfg.RENDER.JOINT_TYPE.lower(), res=cfg.RENDER.RES, init=init, gt=cfg.RENDER.GT, accelerator=cfg.ACCELERATOR, device=cfg.DEVICE)
init = False
if (cfg.RENDER.MODE == 'video'):
if cfg.RENDER.DOWNSAMPLE:
video = Video(frames_folder, fps=cfg.RENDER.FPS)
else:
video = Video(frames_folder, fps=cfg.RENDER.FPS)
vid_path = frames_folder.replace('_frames', '.mp4')
video.save(out_path=vid_path)
shutil.rmtree(frames_folder)
print(f'remove tmp fig folder and save video in {vid_path}')
else:
print(f'Frame generated at: {out}')
|
def Rodrigues(rotvec):
theta = np.linalg.norm(rotvec)
r = ((rotvec / theta).reshape(3, 1) if (theta > 0.0) else rotvec)
cost = np.cos(theta)
mat = np.asarray([[0, (- r[2]), r[1]], [r[2], 0, (- r[0])], [(- r[1]), r[0], 0]])
return (((cost * np.eye(3)) + ((1 - cost) * r.dot(r.T))) + (np.sin(theta) * mat))
|
def setup_scene(model_path, fps_target):
scene = bpy.data.scenes['Scene']
scene.render.fps = fps_target
if ('Cube' in bpy.data.objects):
bpy.data.objects['Cube'].select_set(True)
bpy.ops.object.delete()
bpy.ops.import_scene.fbx(filepath=model_path)
|
def process_pose(current_frame, pose, trans, pelvis_position):
if (pose.shape[0] == 72):
rod_rots = pose.reshape(24, 3)
else:
rod_rots = pose.reshape(26, 3)
mat_rots = [Rodrigues(rod_rot) for rod_rot in rod_rots]
armature = bpy.data.objects['Armature']
bones = armature.pose.bones
bones[bone_name_from_index[0]].location = (Vector(((100 * trans[1]), (100 * trans[2]), (100 * trans[0]))) - pelvis_position)
bones[bone_name_from_index[0]].keyframe_insert('location', frame=current_frame)
for (index, mat_rot) in enumerate(mat_rots, 0):
if (index >= 24):
continue
bone = bones[bone_name_from_index[index]]
bone_rotation = Matrix(mat_rot).to_quaternion()
quat_x_90_cw = Quaternion((1.0, 0.0, 0.0), radians((- 90)))
quat_z_90_cw = Quaternion((0.0, 0.0, 1.0), radians((- 90)))
if (index == 0):
bone.rotation_quaternion = ((quat_x_90_cw @ quat_z_90_cw) @ bone_rotation)
else:
bone.rotation_quaternion = bone_rotation
bone.keyframe_insert('rotation_quaternion', frame=current_frame)
return
|
def process_poses(input_path, gender, fps_source, fps_target, start_origin, person_id=1):
print(('Processing: ' + input_path))
data = joblib.load(input_path)
person_id = list(data.keys())[0]
poses = data[person_id]['pose']
if ('trans' not in data[person_id].keys()):
trans = np.zeros((poses.shape[0], 3))
else:
trans = data[person_id]['trans']
if (gender == 'female'):
model_path = female_model_path
for (k, v) in bone_name_from_index.items():
bone_name_from_index[k] = ('f_avg_' + v)
elif (gender == 'male'):
model_path = male_model_path
for (k, v) in bone_name_from_index.items():
bone_name_from_index[k] = ('m_avg_' + v)
else:
print(('ERROR: Unsupported gender: ' + gender))
sys.exit(1)
if (fps_target > fps_source):
fps_target = fps_source
print(f'Gender: {gender}')
print(f'Number of source poses: {str(poses.shape[0])}')
print(f'Source frames-per-second: {str(fps_source)}')
print(f'Target frames-per-second: {str(fps_target)}')
print('--------------------------------------------------')
setup_scene(model_path, fps_target)
scene = bpy.data.scenes['Scene']
sample_rate = int((fps_source / fps_target))
scene.frame_end = int((poses.shape[0] / sample_rate))
bpy.ops.object.mode_set(mode='EDIT')
pelvis_position = Vector(bpy.data.armatures[0].edit_bones[bone_name_from_index[0]].head)
bpy.ops.object.mode_set(mode='OBJECT')
source_index = 0
frame = 1
offset = np.array([0.0, 0.0, 0.0])
while (source_index < poses.shape[0]):
print(('Adding pose: ' + str(source_index)))
if start_origin:
if (source_index == 0):
offset = np.array([trans[source_index][0], trans[source_index][1], 0])
scene.frame_set(frame)
process_pose(frame, poses[source_index], (trans[source_index] - offset), pelvis_position)
source_index += sample_rate
frame += 1
return frame
|
def export_animated_mesh(output_path):
output_dir = os.path.dirname(output_path)
if (not os.path.isdir(output_dir)):
os.makedirs(output_dir, exist_ok=True)
bpy.ops.object.select_all(action='DESELECT')
bpy.data.objects['Armature'].select_set(True)
bpy.data.objects['Armature'].children[0].select_set(True)
if output_path.endswith('.glb'):
print('Exporting to glTF binary (.glb)')
bpy.ops.export_scene.gltf(filepath=output_path, export_format='GLB', export_selected=True, export_morph=False)
elif output_path.endswith('.fbx'):
print('Exporting to FBX binary (.fbx)')
bpy.ops.export_scene.fbx(filepath=output_path, use_selection=True, add_leaf_bones=False)
else:
print(('ERROR: Unsupported export format: ' + output_path))
sys.exit(1)
return
|
def Rodrigues(rotvec):
theta = np.linalg.norm(rotvec)
r = ((rotvec / theta).reshape(3, 1) if (theta > 0.0) else rotvec)
cost = np.cos(theta)
mat = np.asarray([[0, (- r[2]), r[1]], [r[2], 0, (- r[0])], [(- r[1]), r[0], 0]])
return (((cost * np.eye(3)) + ((1 - cost) * r.dot(r.T))) + (np.sin(theta) * mat))
|
def setup_scene(model_path, fps_target):
scene = bpy.data.scenes['Scene']
scene.render.fps = fps_target
if ('Cube' in bpy.data.objects):
bpy.data.objects['Cube'].select_set(True)
bpy.ops.object.delete()
bpy.ops.import_scene.fbx(filepath=model_path)
|
def process_pose(current_frame, pose, lhandpose, rhandpose, trans, pelvis_position):
rod_rots = pose.reshape(24, 4)
lhrod_rots = lhandpose.reshape(15, 4)
rhrod_rots = rhandpose.reshape(15, 4)
armature = bpy.data.objects[ROOT_NAME]
bones = armature.pose.bones
bones[BODY_JOINT_NAMES[0]].location = (Vector(((100 * trans[1]), (100 * trans[2]), (100 * trans[0]))) - pelvis_position)
bones[BODY_JOINT_NAMES[0]].keyframe_insert('location', frame=current_frame)
for (index, mat_rot) in enumerate(rod_rots, 0):
if (index >= 24):
continue
bone = bones[BODY_JOINT_NAMES[index]]
bone_rotation = Quaternion(mat_rot)
quat_x_90_cw = Quaternion((1.0, 0.0, 0.0), radians((- 90)))
quat_z_90_cw = Quaternion((0.0, 0.0, 1.0), radians((- 90)))
if (index == 0):
bone.rotation_quaternion = ((quat_x_90_cw @ quat_z_90_cw) @ bone_rotation)
else:
bone.rotation_quaternion = bone_rotation
bone.keyframe_insert('rotation_quaternion', frame=current_frame)
for (index, mat_rot) in enumerate(lhrod_rots, 0):
if (index >= 15):
continue
bone = bones[LHAND_JOINT_NAMES[index]]
bone_rotation = Quaternion(mat_rot)
bone.rotation_quaternion = bone_rotation
bone.keyframe_insert('rotation_quaternion', frame=current_frame)
for (index, mat_rot) in enumerate(rhrod_rots, 0):
if (index >= 15):
continue
bone = bones[RHAND_JOINT_NAMES[index]]
bone_rotation = Quaternion(mat_rot)
bone.rotation_quaternion = bone_rotation
bone.keyframe_insert('rotation_quaternion', frame=current_frame)
return
|
def process_poses(input_path, gender, fps_source, fps_target, start_origin, person_id=1):
print(('Processing: ' + input_path))
smpl_params = joblib.load(input_path)
(poses, lhposes, rhposes) = ([], [], [])
for iframe in smpl_params.keys():
poses.append(smpl_params[iframe]['rot'])
lhposes.append(smpl_params[iframe]['hand_quaternions'][4:64].copy().reshape((- 1), 4))
rhposes.append(smpl_params[iframe]['hand_quaternions'][68:128].copy().reshape((- 1), 4))
poses = np.vstack(poses)
lhposes = np.stack(lhposes)
rhposes = np.stack(rhposes)
trans = np.zeros((poses.shape[0], 3))
model_path = neural_smplx_path
if (fps_target > fps_source):
fps_target = fps_source
print(f'Gender: {gender}')
print(f'Number of source poses: {str(poses.shape[0])}')
print(f'Source frames-per-second: {str(fps_source)}')
print(f'Target frames-per-second: {str(fps_target)}')
print('--------------------------------------------------')
setup_scene(model_path, fps_target)
scene = bpy.data.scenes['Scene']
sample_rate = int((fps_source / fps_target))
scene.frame_end = int((poses.shape[0] / sample_rate))
bpy.ops.object.mode_set(mode='EDIT')
pelvis_position = Vector(bpy.data.armatures[0].edit_bones[BODY_JOINT_NAMES[0]].head)
bpy.ops.object.mode_set(mode='OBJECT')
source_index = 0
frame = 1
offset = np.array([0.0, 0.0, 0.0])
while (source_index < poses.shape[0]):
if start_origin:
if (source_index == 0):
offset = np.array([trans[source_index][0], trans[source_index][1], 0])
scene.frame_set(frame)
process_pose(frame, poses[source_index], lhposes[source_index], rhposes[source_index], (trans[source_index] - offset), pelvis_position)
source_index += sample_rate
frame += 1
return frame
|
def export_animated_mesh(output_path):
output_dir = os.path.dirname(output_path)
if (not os.path.isdir(output_dir)):
os.makedirs(output_dir, exist_ok=True)
bpy.ops.object.select_all(action='DESELECT')
bpy.data.objects[ROOT_NAME].select_set(True)
bpy.data.objects[ROOT_NAME].children[0].select_set(True)
if output_path.endswith('.glb'):
print('Exporting to glTF binary (.glb)')
bpy.ops.export_scene.gltf(filepath=output_path, export_format='GLB', export_selected=True, export_morph=False)
elif output_path.endswith('.fbx'):
print('Exporting to FBX binary (.fbx)')
bpy.ops.export_scene.fbx(filepath=output_path, use_selection=True, add_leaf_bones=False)
else:
print(('ERROR: Unsupported export format: ' + output_path))
sys.exit(1)
return
|
def print_table(title, metrics):
table = Table(title=title)
table.add_column('Metrics', style='cyan', no_wrap=True)
table.add_column('Value', style='magenta')
for (key, value) in metrics.items():
table.add_row(key, str(value))
console = get_console()
console.print(table, justify='center')
|
def get_metric_statistics(values, replication_times):
mean = np.mean(values, axis=0)
std = np.std(values, axis=0)
conf_interval = ((1.96 * std) / np.sqrt(replication_times))
return (mean, conf_interval)
|
def main():
cfg = parse_args(phase='test')
cfg.FOLDER = cfg.TEST.FOLDER
logger = create_logger(cfg, phase='test')
output_dir = Path(os.path.join(cfg.FOLDER, str(cfg.model.model_type), str(cfg.NAME), ('samples_' + cfg.TIME)))
output_dir.mkdir(parents=True, exist_ok=True)
logger.info(OmegaConf.to_yaml(cfg))
pl.seed_everything(cfg.SEED_VALUE)
if (cfg.ACCELERATOR == 'gpu'):
os.environ['PYTHONWARNINGS'] = 'ignore'
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
datasets = get_datasets(cfg, logger=logger, phase='test')[0]
logger.info('datasets module {} initialized'.format(''.join(cfg.TRAIN.DATASETS)))
model = get_model(cfg, datasets)
logger.info('model {} loaded'.format(cfg.model.model_type))
metric_monitor = {'Train_jf': 'recons/text2jfeats/train', 'Val_jf': 'recons/text2jfeats/val', 'Train_rf': 'recons/text2rfeats/train', 'Val_rf': 'recons/text2rfeats/val', 'APE root': 'Metrics/APE_root', 'APE mean pose': 'Metrics/APE_mean_pose', 'AVE root': 'Metrics/AVE_root', 'AVE mean pose': 'Metrics/AVE_mean_pose'}
callbacks = [pl.callbacks.RichProgressBar(), ProgressLogger(metric_monitor=metric_monitor)]
logger.info('Callbacks initialized')
trainer = pl.Trainer(benchmark=False, max_epochs=cfg.TRAIN.END_EPOCH, accelerator=cfg.ACCELERATOR, devices=list(range(len(cfg.DEVICE))), default_root_dir=cfg.FOLDER_EXP, reload_dataloaders_every_n_epochs=1, log_every_n_steps=cfg.LOGGER.LOG_EVERY_STEPS, deterministic=False, detect_anomaly=False, enable_progress_bar=True, logger=None, callbacks=callbacks)
logger.info('Loading checkpoints from {}'.format(cfg.TEST.CHECKPOINTS))
state_dict = torch.load(cfg.TEST.CHECKPOINTS, map_location='cpu')['state_dict']
model.load_state_dict(state_dict)
macs_lst = []
params_lst = []
flops_lst = []
for (i, batch) in enumerate(datasets.test_dataloader()):
print('batch size', len(batch['text']))
(macs, params) = profile(model, (batch,))
print('macs', (macs / 1000000000.0), 'G')
return
macs_lst.append(macs)
params_lst.append(params)
if (len(flops_lst) == 1):
break
print(macs_lst)
print(params_lst)
print((np.mean(macs_lst) / 1000000000.0))
print((np.mean(params_lst) / 1000000.0))
|
def main():
parser = ArgumentParser()
group = parser.add_argument_group('Params')
group.add_argument('--ply_dir', type=str, required=True, help='ply set')
group.add_argument('--out_dir', type=str, required=True, help='output folder')
params = parser.parse_args()
plys2npy(params.ply_dir, params.out_dir)
|
def plys2npy(ply_dir, out_dir):
ply_dir = Path(ply_dir)
paths = []
file_list = natsort.natsorted(os.listdir(ply_dir))
for item in file_list:
if (item.endswith('.ply') and (not item.endswith('_gt.ply'))):
paths.append(os.path.join(ply_dir, item))
meshs = np.zeros((len(paths), 6890, 3))
for (i, path) in enumerate(paths):
mesh = trimesh.load_mesh(path, process=False)
vs = mesh.vertices
assert (vs.shape == (6890, 3))
meshs[i] = vs
basename = os.path.basename(ply_dir)
if basename.startswith('SMPLFit_'):
basename = basename[len('SMPLFit_'):]
file_name = os.path.join(out_dir, (basename + '_mesh.npy'))
np.save(file_name, meshs)
|
def print_table(title, metrics):
table = Table(title=title)
table.add_column('Metrics', style='cyan', no_wrap=True)
table.add_column('Value', style='magenta')
for (key, value) in metrics.items():
table.add_row(key, str(value))
console = get_console()
console.print(table, justify='center')
|
def get_metric_statistics(values, replication_times):
mean = np.mean(values, axis=0)
std = np.std(values, axis=0)
conf_interval = ((1.96 * std) / np.sqrt(replication_times))
return (mean, conf_interval)
|
def main():
cfg = parse_args(phase='test')
cfg.FOLDER = cfg.TEST.FOLDER
logger = create_logger(cfg, phase='test')
output_dir = Path(os.path.join(cfg.FOLDER, str(cfg.model.model_type), str(cfg.NAME), ('samples_' + cfg.TIME)))
output_dir.mkdir(parents=True, exist_ok=True)
logger.info(OmegaConf.to_yaml(cfg))
pl.seed_everything(cfg.SEED_VALUE)
if (cfg.ACCELERATOR == 'gpu'):
os.environ['PYTHONWARNINGS'] = 'ignore'
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
datasets = get_datasets(cfg, logger=logger, phase='test')[0]
logger.info('datasets module {} initialized'.format(''.join(cfg.TRAIN.DATASETS)))
model = get_model(cfg, datasets)
logger.info('model {} loaded'.format(cfg.model.model_type))
metric_monitor = {'Train_jf': 'recons/text2jfeats/train', 'Val_jf': 'recons/text2jfeats/val', 'Train_rf': 'recons/text2rfeats/train', 'Val_rf': 'recons/text2rfeats/val', 'APE root': 'Metrics/APE_root', 'APE mean pose': 'Metrics/APE_mean_pose', 'AVE root': 'Metrics/AVE_root', 'AVE mean pose': 'Metrics/AVE_mean_pose'}
callbacks = [pl.callbacks.RichProgressBar(), ProgressLogger(metric_monitor=metric_monitor)]
logger.info('Callbacks initialized')
trainer = pl.Trainer(benchmark=False, max_epochs=cfg.TRAIN.END_EPOCH, accelerator=cfg.ACCELERATOR, devices=list(range(len(cfg.DEVICE))), default_root_dir=cfg.FOLDER_EXP, reload_dataloaders_every_n_epochs=1, log_every_n_steps=cfg.LOGGER.LOG_EVERY_STEPS, deterministic=False, detect_anomaly=False, enable_progress_bar=True, logger=None, callbacks=callbacks)
logger.info('Loading checkpoints from {}'.format(cfg.TEST.CHECKPOINTS))
state_dict = torch.load(cfg.TEST.CHECKPOINTS, map_location='cpu')['state_dict']
model.load_state_dict(state_dict)
all_metrics = {}
replication_times = cfg.TEST.REPLICATION_TIMES
for i in range(replication_times):
metrics_type = ', '.join(cfg.METRIC.TYPE)
logger.info(f'Evaluating {metrics_type} - Replication {i}')
metrics = trainer.test(model, datamodule=datasets)[0]
if ('TM2TMetrics' in metrics_type):
logger.info(f'Evaluating MultiModality - Replication {i}')
datasets.mm_mode(True)
mm_metrics = trainer.test(model, datamodule=datasets)[0]
metrics.update(mm_metrics)
datasets.mm_mode(False)
for (key, item) in metrics.items():
if (key not in all_metrics):
all_metrics[key] = [item]
else:
all_metrics[key] += [item]
all_metrics_new = {}
for (key, item) in all_metrics.items():
(mean, conf_interval) = get_metric_statistics(np.array(item), replication_times)
all_metrics_new[(key + '/mean')] = mean
all_metrics_new[(key + '/conf_interval')] = conf_interval
print_table(f'Mean Metrics', all_metrics_new)
all_metrics_new.update(all_metrics)
metric_file = (output_dir.parent / f'metrics_{cfg.TIME}.json')
with open(metric_file, 'w', encoding='utf-8') as f:
json.dump(all_metrics_new, f, indent=4)
logger.info(f'Testing done, the metrics are saved to {str(metric_file)}')
|
def main():
cfg = parse_args()
logger = create_logger(cfg, phase='train')
if cfg.TRAIN.RESUME:
resume = cfg.TRAIN.RESUME
backcfg = cfg.TRAIN.copy()
if os.path.exists(resume):
file_list = sorted(os.listdir(resume), reverse=True)
for item in file_list:
if item.endswith('.yaml'):
cfg = OmegaConf.load(os.path.join(resume, item))
cfg.TRAIN = backcfg
break
checkpoints = sorted(os.listdir(os.path.join(resume, 'checkpoints')), key=(lambda x: int(x[6:(- 5)])), reverse=True)
for checkpoint in checkpoints:
if ('epoch=' in checkpoint):
cfg.TRAIN.PRETRAINED = os.path.join(resume, 'checkpoints', checkpoint)
break
wandb_list = sorted(os.listdir(os.path.join(resume, 'wandb')), reverse=True)
for item in wandb_list:
if ('run-' in item):
cfg.LOGGER.WANDB.RESUME_ID = item.split('-')[(- 1)]
else:
raise ValueError('Resume path is not right.')
pl.seed_everything(cfg.SEED_VALUE)
if (cfg.ACCELERATOR == 'gpu'):
os.environ['PYTHONWARNINGS'] = 'ignore'
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
loggers = []
if cfg.LOGGER.WANDB.PROJECT:
wandb_logger = pl_loggers.WandbLogger(project=cfg.LOGGER.WANDB.PROJECT, offline=cfg.LOGGER.WANDB.OFFLINE, id=cfg.LOGGER.WANDB.RESUME_ID, save_dir=cfg.FOLDER_EXP, version='', name=cfg.NAME, anonymous=False, log_model=False)
loggers.append(wandb_logger)
if cfg.LOGGER.TENSORBOARD:
tb_logger = pl_loggers.TensorBoardLogger(save_dir=cfg.FOLDER_EXP, sub_dir='tensorboard', version='', name='')
loggers.append(tb_logger)
logger.info(OmegaConf.to_yaml(cfg))
datasets = get_datasets(cfg, logger=logger)
logger.info('datasets module {} initialized'.format(''.join(cfg.TRAIN.DATASETS)))
model = get_model(cfg, datasets[0])
logger.info('model {} loaded'.format(cfg.model.model_type))
metric_monitor = {'Train_jf': 'recons/text2jfeats/train', 'Val_jf': 'recons/text2jfeats/val', 'Train_rf': 'recons/text2rfeats/train', 'Val_rf': 'recons/text2rfeats/val', 'APE root': 'Metrics/APE_root', 'APE mean pose': 'Metrics/APE_mean_pose', 'AVE root': 'Metrics/AVE_root', 'AVE mean pose': 'Metrics/AVE_mean_pose', 'R_TOP_1': 'Metrics/R_precision_top_1', 'R_TOP_2': 'Metrics/R_precision_top_2', 'R_TOP_3': 'Metrics/R_precision_top_3', '1_R_TOP_1': 'Metrics/s1_R_precision_top_1', '1_R_TOP_2': 'Metrics/s1_R_precision_top_2', '1_R_TOP_3': 'Metrics/s1_R_precision_top_3', '2_R_TOP_1': 'Metrics/s2_R_precision_top_1', '2_R_TOP_2': 'Metrics/s2_R_precision_top_2', '2_R_TOP_3': 'Metrics/s2_R_precision_top_3', '3_R_TOP_1': 'Metrics/s3_R_precision_top_1', '3_R_TOP_2': 'Metrics/s3_R_precision_top_2', '3_R_TOP_3': 'Metrics/s3_R_precision_top_3', 'gt_R_TOP_1': 'Metrics/gt_R_precision_top_1', 'gt_R_TOP_2': 'Metrics/gt_R_precision_top_2', 'gt_R_TOP_3': 'Metrics/gt_R_precision_top_3', 'FID': 'Metrics/FID', '1_FID': 'Metrics/1_FID', '2_FID': 'Metrics/2_FID', '3_FID': 'Metrics/3_FID', 'gt_FID': 'Metrics/gt_FID', 'Diversity': 'Metrics/Diversity', '1_Diversity': 'Metrics/1_Diversity', '2_Diversity': 'Metrics/2_Diversity', '3_Diversity': 'Metrics/3_Diversity', 'gt_Diversity': 'Metrics/gt_Diversity', 'MM dist': 'Metrics/Matching_score', '1_MM dist': 'Metrics/s1_Matching_score', '2_MM dist': 'Metrics/s2_Matching_score', '3_MM dist': 'Metrics/s3_Matching_score', 'MultiModality': 'Metrics/MultiModality', '1_MultiModality': 'Metrics/s1_MultiModality', '2_MultiModality': 'Metrics/s2_MultiModality', '3_MultiModality': 'Metrics/s3_MultiModality', 'Accuracy': 'Metrics/accuracy', 'gt_Accuracy': 'Metrics/gt_accuracy'}
callbacks = [pl.callbacks.RichProgressBar(), ProgressLogger(metric_monitor=metric_monitor), ModelCheckpoint(dirpath=os.path.join(cfg.FOLDER_EXP, 'checkpoints'), filename='{epoch}', monitor='step', mode='max', every_n_epochs=cfg.LOGGER.SACE_CHECKPOINT_EPOCH, save_top_k=(- 1), save_last=False, save_on_train_epoch_end=True)]
logger.info('Callbacks initialized')
if (len(cfg.DEVICE) > 1):
ddp_strategy = DDPStrategy(find_unused_parameters=True)
else:
ddp_strategy = 'auto'
trainer = pl.Trainer(benchmark=False, max_epochs=cfg.TRAIN.END_EPOCH, accelerator=cfg.ACCELERATOR, devices=cfg.DEVICE, strategy=ddp_strategy, default_root_dir=cfg.FOLDER_EXP, log_every_n_steps=cfg.LOGGER.VAL_EVERY_STEPS, deterministic=False, detect_anomaly=False, enable_progress_bar=True, logger=loggers, callbacks=callbacks, check_val_every_n_epoch=cfg.LOGGER.VAL_EVERY_STEPS)
logger.info('Trainer initialized')
vae_type = cfg.model.motion_vae.target.split('.')[(- 1)].lower().replace('vae', '')
if cfg.TRAIN.PRETRAINED_VAE:
logger.info('Loading pretrain vae from {}'.format(cfg.TRAIN.PRETRAINED_VAE))
state_dict = torch.load(cfg.TRAIN.PRETRAINED_VAE, map_location='cpu')['state_dict']
from collections import OrderedDict
if (vae_type in ['actor']):
(encoder_dict, decoder_dict) = (OrderedDict(), OrderedDict())
for (k, v) in state_dict.items():
if (k.split('.')[0] == 'motion_encoder'):
name = k.replace('motion_encoder.', '')
encoder_dict[name] = v
elif (k.split('.')[0] == 'motion_decoder'):
name = k.replace('motion_decoder.', '')
decoder_dict[name] = v
model.motion_encoder.load_state_dict(encoder_dict, strict=True)
model.motion_decoder.load_state_dict(decoder_dict, strict=True)
elif (vae_type in ['GraphMotion']):
vae_dict = OrderedDict()
for (k, v) in state_dict.items():
if (k.split('.')[0] == 'vae'):
name = k.replace('vae.', '')
vae_dict[name] = v
model.vae.load_state_dict(vae_dict, strict=True)
if cfg.TRAIN.PRETRAINED:
logger.info('Loading pretrain mode from {}'.format(cfg.TRAIN.PRETRAINED))
logger.info('Attention! VAE will be recovered')
state_dict = torch.load(cfg.TRAIN.PRETRAINED, map_location='cpu')['state_dict']
from collections import OrderedDict
new_state_dict = OrderedDict()
for (k, v) in state_dict.items():
if (k not in ['denoiser.sequence_pos_encoding.pe']):
new_state_dict[k] = v
model.load_state_dict(new_state_dict, strict=False)
if cfg.TRAIN.RESUME:
trainer.fit(model, datamodule=datasets[0], ckpt_path=cfg.TRAIN.PRETRAINED)
else:
trainer.fit(model, datamodule=datasets[0])
checkpoint_folder = trainer.checkpoint_callback.dirpath
logger.info(f'The checkpoints are stored in {checkpoint_folder}')
logger.info(f'The outputs of this experiment are stored in {cfg.FOLDER_EXP}')
logger.info('Training ends!')
|
def get_gaussian_dataset(role, size, dim, std):
x = (std * torch.randn(size, dim))
y = torch.zeros(size).long()
return SupervisedDataset(f'gaussian-dim{dim}-std{std}', role, x, y)
|
def get_well_conditioned_gaussian_datasets(dim, std, oos_std):
train_dset = get_gaussian_dataset(role='train', size=50000, dim=dim, std=std)
valid_dset = get_gaussian_dataset(role='valid', size=5000, dim=dim, std=std)
test_dsets = [get_gaussian_dataset(role='test', size=10000, dim=dim, std=std), get_gaussian_dataset(role='test', size=10000, dim=dim, std=oos_std)]
return (train_dset, valid_dset, test_dsets)
|
def get_linear_gaussian_dataset(role, size):
A = torch.tensor([[(- 4.0)], [1.0]])
b = torch.tensor([1.0, (- 3.0)])
sigma = 0.1
z = torch.randn(size, A.shape[1], 1)
Az = torch.matmul(A, z).view(size, A.shape[0])
x = ((Az + b) + (sigma * torch.randn_like(Az)))
return SupervisedDataset(name='linear-gaussian', role=role, x=x)
|
def get_linear_gaussian_datasets():
train_dset = get_linear_gaussian_dataset(role='train', size=100000)
valid_dset = get_linear_gaussian_dataset(role='valid', size=10000)
test_dset = get_linear_gaussian_dataset(role='test', size=10000)
return (train_dset, valid_dset, test_dset)
|
class NotMNIST(Dataset):
def __init__(self, root, train=False, download=False):
assert (not train), 'Only test set available for NotMNIST'
(self.data, self.targets) = self._load_tensors(root)
def _load_tensors(self, root):
data_path = os.path.join(root, 'data.pt')
targets_path = os.path.join(root, 'targets.pt')
try:
with open(data_path, 'rb') as f:
data = torch.load(f)
with open(targets_path, 'rb') as f:
targets = torch.load(f)
except FileNotFoundError:
(data, targets) = self._load_raw_images(os.path.join(root, 'notMNIST_small'))
torch.save(data, data_path)
torch.save(targets, targets_path)
return (data, targets)
def _load_raw_images(self, root):
data = []
targets = []
for letter in os.listdir(root):
folder_path = os.path.join(root, letter)
for basename in os.listdir(folder_path):
try:
img_path = os.path.join(folder_path, basename)
data.append(np.array(imageio.imread(img_path)))
targets.append('ABCDEFGHIJ'.index(letter))
except ValueError:
print('File {}/{} is broken'.format(letter, basename), flush=True)
data = torch.tensor(data)
targets = torch.tensor(targets)
return (data, targets)
|
def get_raw_image_tensors(dataset_name, train, data_root):
data_dir = os.path.join(data_root, dataset_name)
if (dataset_name == 'cifar10'):
dataset = torchvision.datasets.CIFAR10(root=data_dir, train=train, download=True)
images = torch.tensor(dataset.data).permute((0, 3, 1, 2))
labels = torch.tensor(dataset.targets)
elif (dataset_name == 'svhn'):
dataset = torchvision.datasets.SVHN(root=data_dir, split=('train' if train else 'test'), download=True)
images = torch.tensor(dataset.data)
labels = torch.tensor(dataset.labels)
elif (dataset_name in ['mnist', 'fashion-mnist']):
dataset_class = {'mnist': torchvision.datasets.MNIST, 'fashion-mnist': torchvision.datasets.FashionMNIST}[dataset_name]
dataset = dataset_class(root=data_dir, train=train, download=True)
images = dataset.data.unsqueeze(1)
labels = dataset.targets
else:
raise ValueError(f'Unknown dataset {dataset_name}')
return (images.to(torch.uint8), labels.to(torch.uint8))
|
def image_tensors_to_supervised_dataset(dataset_name, dataset_role, images, labels):
images = images.to(dtype=torch.get_default_dtype())
labels = labels.long()
return SupervisedDataset(dataset_name, dataset_role, images, labels)
|
def get_train_valid_image_datasets(dataset_name, data_root, valid_fraction, add_train_hflips):
(images, labels) = get_raw_image_tensors(dataset_name, train=True, data_root=data_root)
perm = torch.randperm(images.shape[0])
shuffled_images = images[perm]
shuffled_labels = labels[perm]
valid_size = int((valid_fraction * images.shape[0]))
valid_images = shuffled_images[:valid_size]
valid_labels = shuffled_labels[:valid_size]
train_images = shuffled_images[valid_size:]
train_labels = shuffled_labels[valid_size:]
if add_train_hflips:
train_images = torch.cat((train_images, train_images.flip([3])))
train_labels = torch.cat((train_labels, train_labels))
train_dset = image_tensors_to_supervised_dataset(dataset_name, 'train', train_images, train_labels)
valid_dset = image_tensors_to_supervised_dataset(dataset_name, 'valid', valid_images, valid_labels)
return (train_dset, valid_dset)
|
def get_test_image_dataset(dataset_name, data_root):
(images, labels) = get_raw_image_tensors(dataset_name, train=False, data_root=data_root)
return image_tensors_to_supervised_dataset(dataset_name, 'test', images, labels)
|
def get_image_datasets(dataset_name, data_root, make_valid_dset):
valid_fraction = (0.1 if make_valid_dset else 0)
add_train_hflips = False
(train_dset, valid_dset) = get_train_valid_image_datasets(dataset_name, data_root, valid_fraction, add_train_hflips)
test_dset = get_test_image_dataset(dataset_name, data_root)
return (train_dset, valid_dset, test_dset)
|
def get_loader(dset, device, batch_size, drop_last):
return torch.utils.data.DataLoader(dset.to(device), batch_size=batch_size, shuffle=True, drop_last=drop_last, num_workers=0, pin_memory=False)
|
def get_loaders(dataset, device, data_root, make_valid_loader, train_batch_size, valid_batch_size, test_batch_size):
print('Loading data...', end='', flush=True, file=sys.stderr)
if (dataset in ['cifar10', 'svhn', 'mnist', 'fashion-mnist']):
(train_dset, valid_dset, test_dset) = get_image_datasets(dataset, data_root, make_valid_loader)
elif (dataset in ['miniboone', 'hepmass', 'power', 'gas', 'bsds300']):
(train_dset, valid_dset, test_dset) = get_tabular_datasets(dataset, data_root)
elif (dataset == 'linear-gaussian'):
(train_dset, valid_dset, test_dset) = get_linear_gaussian_datasets()
else:
(train_dset, valid_dset, test_dset) = get_2d_datasets(dataset)
print('Done.', file=sys.stderr)
train_loader = get_loader(train_dset, device, train_batch_size, drop_last=True)
if make_valid_loader:
valid_loader = get_loader(valid_dset, device, valid_batch_size, drop_last=False)
else:
valid_loader = None
test_loader = get_loader(test_dset, device, test_batch_size, drop_last=False)
return (train_loader, valid_loader, test_loader)
|
class SupervisedDataset(torch.utils.data.Dataset):
def __init__(self, name, role, x, y=None):
if (y is None):
y = torch.zeros(x.shape[0]).long()
assert (x.shape[0] == y.shape[0])
assert (role in ['train', 'valid', 'test'])
self.name = name
self.role = role
self.x = x
self.y = y
def __len__(self):
return self.x.shape[0]
def __getitem__(self, index):
return (self.x[index], self.y[index])
def to(self, device):
return SupervisedDataset(self.name, self.role, self.x.to(device), self.y.to(device))
|
def train(config, load_dir):
(density, trainer, writer) = setup_experiment(config=config, load_dir=load_dir, checkpoint_to_load='latest')
writer.write_json('config', config)
writer.write_json('model', {'num_params': num_params(density), 'schema': get_schema(config)})
writer.write_textfile('git-head', subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii'))
writer.write_textfile('git-diff', subprocess.check_output(['git', 'diff']).decode('ascii'))
print('\nConfig:')
print(json.dumps(config, indent=4))
print(f'''
Number of parameters: {num_params(density):,}
''')
trainer.train()
|
def print_test_metrics(config, load_dir):
(_, trainer, _) = setup_experiment(config={**config, 'write_to_disk': False}, load_dir=load_dir, checkpoint_to_load='best_valid')
with torch.no_grad():
test_metrics = trainer.test()
test_metrics = {k: v.item() for (k, v) in test_metrics.items()}
print(json.dumps(test_metrics, indent=4))
|
def print_model(config):
(density, _, _, _) = setup_density_and_loaders(config={**config, 'write_to_disk': False}, device=torch.device('cpu'))
print(density)
|
def print_num_params(config):
(density, _, _, _) = setup_density_and_loaders(config={**config, 'write_to_disk': False}, device=torch.device('cpu'))
print(f'Number of parameters: {num_params(density):,}')
|
def setup_density_and_loaders(config, device):
(train_loader, valid_loader, test_loader) = get_loaders(dataset=config['dataset'], device=device, data_root=config['data_root'], make_valid_loader=config['early_stopping'], train_batch_size=config['train_batch_size'], valid_batch_size=config['valid_batch_size'], test_batch_size=config['test_batch_size'])
density = get_density(schema=get_schema(config=config), x_train=train_loader.dataset.x)
density.to(device)
return (density, train_loader, valid_loader, test_loader)
|
def load_run(run_dir, device):
run_dir = Path(run_dir)
with open((run_dir / 'config.json'), 'r') as f:
config = json.load(f)
(density, train_loader, valid_loader, test_loader) = setup_density_and_loaders(config=config, device=device)
try:
checkpoint = torch.load(((run_dir / 'checkpoints') / 'best_valid.pt'), map_location=device)
except FileNotFoundError:
checkpoint = torch.load(((run_dir / 'checkpoints') / 'latest.pt'), map_location=device)
print('Loaded checkpoint after epoch', checkpoint['epoch'])
density.load_state_dict(checkpoint['module_state_dict'])
return (density, train_loader, valid_loader, test_loader, config, checkpoint)
|
def setup_experiment(config, load_dir, checkpoint_to_load):
torch.manual_seed(config['seed'])
np.random.seed((config['seed'] + 1))
random.seed((config['seed'] + 2))
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
(density, train_loader, valid_loader, test_loader) = setup_density_and_loaders(config=config, device=device)
if config['write_to_disk']:
if (load_dir is None):
logdir = config['logdir_root']
make_subdir = True
else:
logdir = load_dir
make_subdir = False
writer = Writer(logdir=logdir, make_subdir=make_subdir, tag_group=config['dataset'])
else:
writer = DummyWriter(logdir=load_dir)
if (config['dataset'] in ['cifar10', 'svhn', 'fashion-mnist', 'mnist']):
visualizer = ImageDensityVisualizer(writer=writer)
elif (train_loader.dataset.x.shape[1:] == (2,)):
visualizer = TwoDimensionalDensityVisualizer(writer=writer, x_train=train_loader.dataset.x, num_importance_samples=config['num_test_importance_samples'], device=device)
else:
visualizer = DummyDensityVisualizer(writer=writer)
(train_metrics, opts) = get_train_metrics(density, config)
lr_schedulers = {param_name: get_lr_scheduler(opt, len(train_loader), config) for (param_name, opt) in opts.items()}
def valid_loss(density, x):
key = f"iwae-{config['num_valid_importance_samples']}"
return (- metrics(density, x, config['num_valid_importance_samples'])[key])
def test_metrics(density, x):
return metrics(density, x, config['num_test_importance_samples'])
trainer = Trainer(module=density, train_metrics=train_metrics, valid_loss=valid_loss, test_metrics=test_metrics, train_loader=train_loader, valid_loader=valid_loader, test_loader=test_loader, opts=opts, lr_schedulers=lr_schedulers, max_epochs=config['max_epochs'], max_grad_norm=config['max_grad_norm'], early_stopping=config['early_stopping'], max_bad_valid_epochs=config['max_bad_valid_epochs'], visualizer=visualizer, writer=writer, epochs_per_test=config['epochs_per_test'], should_checkpoint_latest=config['should_checkpoint_latest'], should_checkpoint_best_valid=config['should_checkpoint_best_valid'], checkpoint_to_load=checkpoint_to_load, device=device)
return (density, trainer, writer)
|
def get_lr_scheduler(opt, num_train_batches, config):
if (config['lr_schedule'] == 'cosine'):
return torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=opt, T_max=(config['max_epochs'] * num_train_batches), eta_min=0.0)
elif (config['lr_schedule'] == 'none'):
return torch.optim.lr_scheduler.LambdaLR(optimizer=opt, lr_lambda=(lambda epoch: 1.0))
else:
assert False, f"Invalid learning rate schedule `{config['lr_schedule']}'"
|
def get_train_metrics(density, config):
if (config['train_objective'] == 'iwae'):
train_metric = (lambda density, x: {'losses': {'pq-loss': iwae(density, x, config['num_train_importance_samples'], detach_q=False)}})
opt = get_opt(density.parameters(), config)
return (train_metric, {'pq-loss': opt})
else:
assert (config['num_u_channels'] > 0), f"Invalid training objective `{config['train_objective']}' for a deterministic model"
q_loss = get_q_loss(config)
train_metrics = (lambda density, x: {'losses': {'p-loss': iwae(density, x, config['num_train_importance_samples'], detach_q=True), 'q-loss': q_loss(density, x)}})
p_opt = get_opt(density.p_parameters(), config)
q_opt = get_opt(density.q_parameters(), config)
return (train_metrics, {'p-loss': p_opt, 'q-loss': q_opt})
|
def get_q_loss(config):
train_objective = config['train_objective']
if (train_objective == 'rws'):
return (lambda density, x: rws(density, x, config['num_train_importance_samples']))
elif (train_objective == 'rws-dreg'):
return (lambda density, x: rws_dreg(density, x, config['num_train_importance_samples']))
elif (train_objective in ['iwae-stl', 'iwae-dreg']):
grad_weight_pow = (1 if (train_objective == 'iwae-stl') else 2)
return (lambda density, x: iwae_alt(density, x, config['num_train_importance_samples'], grad_weight_pow))
else:
assert False, f"Invalid training objective `{train_objective}'"
|
def get_opt(parameters, config):
if (config['opt'] == 'sgd'):
opt_class = optim.SGD
elif (config['opt'] == 'adam'):
opt_class = optim.Adam
elif (config['opt'] == 'adamax'):
opt_class = optim.Adamax
else:
assert False, f"Invalid optimiser type {config['opt']}"
return opt_class(parameters, lr=config['lr'], weight_decay=config['weight_decay'])
|
def num_params(module):
return sum((p.view((- 1)).shape[0] for p in module.parameters()))
|
def metrics(density, x, num_importance_samples):
result = density.elbo(x, num_importance_samples, detach_q_params=False, detach_q_samples=False)
elbo_samples = result['log-w']
elbo = elbo_samples.mean(dim=1)
iwae = (elbo_samples.logsumexp(dim=1) - np.log(num_importance_samples))
dim = int(np.prod(x.shape[1:]))
bpd = (((- iwae) / dim) / np.log(2))
elbo_gap = (iwae - elbo)
return {'elbo': elbo, f'iwae-{num_importance_samples}': iwae, 'bpd': bpd, 'elbo-gap': elbo_gap}
|
def iwae(density, x, num_importance_samples, detach_q):
log_w = density.elbo(x=x, num_importance_samples=num_importance_samples, detach_q_params=detach_q, detach_q_samples=detach_q)['log-w']
return (- log_w.logsumexp(dim=1).mean())
|
def iwae_alt(density, x, num_importance_samples, grad_weight_pow):
log_w = density.elbo(x=x, num_importance_samples=num_importance_samples, detach_q_params=True, detach_q_samples=False)['log-w']
log_Z = log_w.logsumexp(dim=1).view(x.shape[0], 1, 1)
grad_weight = ((log_w - log_Z).exp() ** grad_weight_pow)
return (- (grad_weight.detach() * log_w).sum(dim=1).mean())
|
def rws(density, x, num_importance_samples):
log_w = density.elbo(x=x, num_importance_samples=num_importance_samples, detach_q_params=False, detach_q_samples=True)['log-w']
log_Z = log_w.logsumexp(dim=1).view(x.shape[0], 1, 1)
grad_weight = (log_w - log_Z).exp()
return (grad_weight.detach() * log_w).sum(dim=1).mean()
|
def rws_dreg(density, x, num_importance_samples):
log_w = density.elbo(x=x, num_importance_samples=num_importance_samples, detach_q_params=True, detach_q_samples=False)['log-w']
log_Z = log_w.logsumexp(dim=1).view(x.shape[0], 1, 1)
grad_weight = (log_w - log_Z).exp().detach()
return (- ((grad_weight - (grad_weight ** 2)) * log_w).sum(dim=1).mean())
|
class ActNormBijection(Bijection):
def __init__(self, x_shape):
super().__init__(x_shape=x_shape, z_shape=x_shape)
self.actnorm = ActNormNd(num_features=x_shape[0])
self.actnorm.shape = ((1, (- 1)) + ((1,) * len(x_shape[1:])))
def _x_to_z(self, x, **kwargs):
(z, neg_log_jac) = self.actnorm(x=x, logpx=0.0)
return {'z': z, 'log-jac': (- neg_log_jac)}
def _z_to_x(self, z, **kwargs):
(x, neg_log_jac) = self.actnorm.inverse(y=z, logpy=0.0)
return {'x': x, 'log-jac': (- neg_log_jac)}
|
class AffineBijection(Bijection):
def __init__(self, x_shape, per_channel):
super().__init__(x_shape=x_shape, z_shape=x_shape)
if per_channel:
param_shape = (x_shape[0], *[1 for _ in x_shape[1:]])
self.log_jac_factor = np.prod(x_shape[1:])
else:
param_shape = x_shape
self.log_jac_factor = 1
self.shift = nn.Parameter(torch.zeros(param_shape))
self.log_scale = nn.Parameter(torch.zeros(param_shape))
def _x_to_z(self, x, **kwargs):
return {'z': ((x * torch.exp(self.log_scale)) + self.shift), 'log-jac': self._log_jac_x_to_z(x.shape[0])}
def _z_to_x(self, z, **kwargs):
return {'x': ((z - self.shift) * torch.exp((- self.log_scale))), 'log-jac': (- self._log_jac_x_to_z(z.shape[0]))}
def _log_jac_x_to_z(self, batch_size):
log_jac_single = (self.log_jac_factor * torch.sum(self.log_scale))
return log_jac_single.view(1, 1).expand(batch_size, 1)
|
class ConditionalAffineBijection(Bijection):
def __init__(self, x_shape, coupler):
super().__init__(x_shape, x_shape)
self.coupler = coupler
def _x_to_z(self, x, **kwargs):
(shift, log_scale) = self._shift_log_scale(kwargs['u'])
z = ((x + shift) * torch.exp(log_scale))
return {'z': z, 'log-jac': self._log_jac_x_to_z(log_scale)}
def _z_to_x(self, z, **kwargs):
(shift, log_scale) = self._shift_log_scale(kwargs['u'])
x = ((z * torch.exp((- log_scale))) - shift)
return {'x': x, 'log-jac': self._log_jac_z_to_x(log_scale)}
def _shift_log_scale(self, u):
shift_log_scale = self.coupler(u)
return (shift_log_scale['shift'], shift_log_scale['log-scale'])
def _log_jac_x_to_z(self, log_scale):
return log_scale.flatten(start_dim=1).sum(dim=1, keepdim=True)
def _log_jac_z_to_x(self, log_scale):
return (- self._log_jac_x_to_z(log_scale))
|
class BatchNormBijection(Bijection):
def __init__(self, x_shape, per_channel, apply_affine, momentum, eps=1e-05):
super().__init__(x_shape=x_shape, z_shape=x_shape)
assert (0 <= momentum <= 1)
self.momentum = momentum
assert (eps > 0)
self.eps = eps
if per_channel:
param_shape = (x_shape[0], *[1 for _ in x_shape[1:]])
self.average_dims = ([0] + list(range(2, (len(x_shape) + 1))))
self.log_jac_factor = np.prod(x_shape[1:])
else:
param_shape = x_shape
self.average_dims = [0]
self.log_jac_factor = 1
self.register_buffer('running_mean', torch.zeros(param_shape))
self.register_buffer('running_var', torch.ones(param_shape))
self.apply_affine = apply_affine
if apply_affine:
self.shift = nn.Parameter(torch.zeros(param_shape))
self.log_scale = nn.Parameter(torch.zeros(param_shape))
def _x_to_z(self, x, **kwargs):
if self.training:
mean = self._average(x)
var = self._average(((x - mean) ** 2))
if (self.momentum == 1):
self.running_mean = mean
self.running_var = var
elif (self.momentum > 0):
self.running_mean.mul_((1 - self.momentum)).add_((self.momentum * mean.data))
self.running_var.mul_((1 - self.momentum)).add_((self.momentum * var.data))
else:
mean = self.running_mean
var = self.running_var
z = ((x - mean) / torch.sqrt((var + self.eps)))
if self.apply_affine:
z = ((z * torch.exp(self.log_scale)) + self.shift)
return {'z': z, 'log-jac': self._log_jac_x_to_z(var, x.shape[0])}
def _z_to_x(self, z, **kwargs):
assert (not self.training)
if self.apply_affine:
z = ((z - self.shift) * torch.exp((- self.log_scale)))
x = ((z * torch.sqrt((self.running_var + self.eps))) + self.running_mean)
return {'x': x, 'log-jac': (- self._log_jac_x_to_z(self.running_var, z.shape[0]))}
def _average(self, data):
return torch.mean(data, dim=self.average_dims, keepdim=True).squeeze(0)
def _log_jac_x_to_z(self, var, batch_size):
summands = ((- 0.5) * torch.log((var + self.eps)))
if self.apply_affine:
summands = (self.log_scale + summands)
log_jac_single = (self.log_jac_factor * torch.sum(summands))
return log_jac_single.view(1, 1).expand(batch_size, 1)
|
class Bijection(nn.Module):
def __init__(self, x_shape, z_shape):
super().__init__()
self.x_shape = x_shape
self.z_shape = z_shape
def forward(self, inputs, direction, **kwargs):
if (direction == 'x-to-z'):
assert (inputs.shape[1:] == self.x_shape), f'Expected shape {self.x_shape}; received {inputs.shape[1:]}'
result = self._x_to_z(inputs, **kwargs)
assert (result['z'].shape[1:] == self.z_shape)
return result
elif (direction == 'z-to-x'):
assert (inputs.shape[1:] == self.z_shape)
result = self._z_to_x(inputs, **kwargs)
assert (result['x'].shape[1:] == self.x_shape)
return result
else:
assert False, f'Invalid direction {direction}'
def x_to_z(self, x, **kwargs):
return self(x, 'x-to-z', **kwargs)
def z_to_x(self, z, **kwargs):
return self(z, 'z-to-x', **kwargs)
def inverse(self):
return InverseBijection(self)
def condition(self, u):
return ConditionedBijection(bijection=self, u=u)
def _x_to_z(self, x, **kwargs):
raise NotImplementedError
def _z_to_x(self, z, **kwargs):
raise NotImplementedError
|
class ConditionedBijection(Bijection):
def __init__(self, bijection, u):
super().__init__(x_shape=bijection.x_shape, z_shape=bijection.z_shape)
self.bijection = bijection
self.register_buffer('u', u)
def _x_to_z(self, x, **kwargs):
return self.bijection.x_to_z(x, u=self._expand_u(x))
def _z_to_x(self, z, **kwargs):
return self.bijection.z_to_x(z, u=self._expand_u(z))
def _expand_u(self, inputs):
return self.u.unsqueeze(0).expand(inputs.shape[0], *[(- 1) for _ in self.u.shape])
|
class InverseBijection(Bijection):
def __init__(self, bijection):
super().__init__(x_shape=bijection.z_shape, z_shape=bijection.x_shape)
self.bijection = bijection
def _x_to_z(self, x, **kwargs):
result = self.bijection.z_to_x(x, **kwargs)
z = result.pop('x')
return {'z': z, **result}
def _z_to_x(self, z, **kwargs):
result = self.bijection.x_to_z(z, **kwargs)
x = result.pop('z')
return {'x': x, **result}
|
class IdentityBijection(Bijection):
def __init__(self, x_shape):
super().__init__(x_shape=x_shape, z_shape=x_shape)
def _x_to_z(self, x, **kwargs):
return {'z': x, 'log-jac': self._log_jac_like(x)}
def _z_to_x(self, z, **kwargs):
return {'x': z, 'log-jac': self._log_jac_like(z)}
def _log_jac_like(self, inputs):
return torch.zeros(inputs.shape[0], 1, dtype=inputs.dtype, device=inputs.device)
|
class CompositeBijection(Bijection):
def __init__(self, layers, direction):
if (direction == 'z-to-x'):
x_shape = layers[(- 1)].x_shape
z_shape = layers[0].z_shape
elif (direction == 'x-to-z'):
x_shape = layers[0].x_shape
z_shape = layers[(- 1)].z_shape
else:
assert False, f'Invalid direction {direction}'
super().__init__(x_shape, z_shape)
if (direction == 'z-to-x'):
layers = reversed(layers)
self._x_to_z_layers = nn.ModuleList(layers)
def _x_to_z(self, x, **kwargs):
(z, log_jac) = self._pass_through(x, 'x-to-z', **kwargs)
return {'z': z, 'log-jac': log_jac}
def _z_to_x(self, z, **kwargs):
(x, log_jac) = self._pass_through(z, 'z-to-x', **kwargs)
return {'x': x, 'log-jac': log_jac}
def _pass_through(self, inputs, direction, **kwargs):
assert (direction in ['z-to-x', 'x-to-z'])
if (direction == 'x-to-z'):
output_name = 'z'
layer_order = self._x_to_z_layers
else:
output_name = 'x'
layer_order = reversed(self._x_to_z_layers)
outputs = inputs
log_jac = None
for layer in layer_order:
result = layer(outputs, direction, **kwargs)
outputs = result[output_name]
if (log_jac is None):
log_jac = result['log-jac']
else:
log_jac += result['log-jac']
return (outputs, log_jac)
|
class BlockNeuralAutoregressiveBijection(Bijection):
def __init__(self, num_input_channels, num_hidden_layers, hidden_channels_factor, activation, residual):
shape = (num_input_channels,)
super().__init__(x_shape=shape, z_shape=shape)
if (activation == 'tanh'):
warnings.warn('BNAF with tanh nonlinearities is not surjective')
act_class = Tanh
elif (activation == 'leaky-relu'):
act_class = LeakyReLU
elif (activation == 'soft-leaky-relu'):
act_class = SoftLeakyReLU
else:
assert False, f'Invalid activation {activation}'
layers = [MaskedWeight(in_features=num_input_channels, out_features=(num_input_channels * hidden_channels_factor), dim=num_input_channels), act_class()]
for _ in range(num_hidden_layers):
layers += [MaskedWeight(in_features=(num_input_channels * hidden_channels_factor), out_features=(num_input_channels * hidden_channels_factor), dim=num_input_channels), act_class()]
layers += [MaskedWeight(in_features=(num_input_channels * hidden_channels_factor), out_features=num_input_channels, dim=num_input_channels)]
self.bnaf = BNAF(*layers, res=residual)
def _x_to_z(self, x, **kwargs):
(z, log_jac) = self.bnaf(x)
return {'z': z, 'log-jac': log_jac.view(x.shape[0], 1)}
|
class Nonlinearity(nn.Module):
def forward(self, inputs, grad=None):
(outputs, log_jac) = self._do_forward(inputs)
if (grad is None):
grad = log_jac
else:
grad = (log_jac.view(grad.shape) + grad)
return (outputs, grad)
|
class LeakyReLU(Nonlinearity):
def _do_forward(self, inputs):
outputs = F.leaky_relu(inputs, negative_slope=self.negative_slope)
log_jac = torch.zeros_like(inputs)
log_jac[(inputs < 0)] = np.log(self.negative_slope)
return (outputs, log_jac)
|
class SoftLeakyReLU(Nonlinearity):
def __init__(self, negative_slope=0.01):
super().__init__()
self.negative_slope = negative_slope
def _do_forward(self, inputs):
eps = self.negative_slope
outputs = ((eps * inputs) + ((1 - eps) * F.softplus(inputs)))
log_jac = torch.log((eps + ((1 - eps) * torch.sigmoid(inputs))))
return (outputs, log_jac)
|
class Invertible1x1ConvBijection(Bijection):
def __init__(self, x_shape, num_u_channels=0):
assert ((len(x_shape) == 1) or (len(x_shape) == 3))
super().__init__(x_shape, x_shape)
num_channels = x_shape[0]
self.weight_shape = [num_channels, num_channels]
self.conv_weights_shape = (self.weight_shape + [1 for _ in x_shape[1:]])
self.num_non_channel_elements = np.prod(x_shape[1:])
self.weights_init = torch.qr(torch.randn(*self.weight_shape))[0]
self.num_u_channels = num_u_channels
if (num_u_channels > 0):
self.u_weights = nn.Parameter(torch.zeros(num_channels, num_u_channels))
self.u_conv_weights_shape = ([num_channels, num_u_channels] + [1 for _ in x_shape[1:]])
def _convolve(self, inputs, weights, weights_shape):
if (len(weights_shape) < 3):
return torch.matmul(inputs, weights.t())
else:
return F.conv2d(inputs, weights.view(*weights_shape))
def _log_jac_single(self):
raise NotImplementedError
def _get_weights(self):
raise NotImplementedError
def _get_Vu(self, **kwargs):
if ('u' in kwargs):
Vu = self._convolve(kwargs['u'], self.u_weights, self.u_conv_weights_shape)
else:
Vu = 0
assert (self.num_u_channels == 0)
return Vu
def _x_to_z(self, x, **kwargs):
Vu = self._get_Vu(**kwargs)
Wx = self._convolve(x, self._get_weights(), self.conv_weights_shape)
z = (Wx + Vu)
log_jac = self._log_jac_single().expand(x.shape[0], 1)
return {'z': z, 'log-jac': log_jac}
def _z_to_x(self, z, **kwargs):
Vu = self._get_Vu(**kwargs)
x = self._convolve((z - Vu), torch.inverse(self._get_weights()), self.conv_weights_shape)
neg_log_jac = self._log_jac_single().expand(z.shape[0], 1)
return {'x': x, 'log-jac': (- neg_log_jac)}
|
class BruteForceInvertible1x1ConvBijection(Invertible1x1ConvBijection):
def __init__(self, x_shape, num_u_channels=0):
super().__init__(x_shape, num_u_channels)
self.weights = nn.Parameter(self.weights_init)
def _get_weights(self):
return self.weights
def _log_jac_single(self):
return (torch.slogdet(self.weights)[1] * self.num_non_channel_elements)
|
class LUInvertible1x1ConvBijection(Invertible1x1ConvBijection):
def __init__(self, x_shape, num_u_channels=0):
super().__init__(x_shape, num_u_channels)
(P, lower, upper) = torch.lu_unpack(*torch.lu(self.weights_init))
s = torch.diag(upper)
log_s = torch.log(torch.abs(s))
upper = torch.triu(upper, 1)
self.register_buffer('P', P)
self.register_buffer('sign_s', torch.sign(s))
self.register_buffer('l_mask', torch.tril(torch.ones(self.weight_shape), (- 1)))
self.register_buffer('eye', torch.eye(*self.weight_shape))
self.lower = nn.Parameter(lower)
self.log_s = nn.Parameter(log_s)
self.upper = nn.Parameter(upper)
self.bias = nn.Parameter(torch.zeros(x_shape[0], *x_shape[1:]))
def _get_weights(self):
L = ((self.lower * self.l_mask) + self.eye)
U = (self.upper * self.l_mask.transpose(0, 1).contiguous())
U += torch.diag((self.sign_s * torch.exp(self.log_s)))
W = torch.matmul(self.P, torch.matmul(L, U))
return W
def _log_jac_single(self):
return (torch.sum(self.log_s) * self.num_non_channel_elements)
|
class LULinearBijection(Bijection):
def __init__(self, num_input_channels):
shape = (num_input_channels,)
super().__init__(x_shape=shape, z_shape=shape)
self.linear = LULinear(features=num_input_channels, identity_init=True)
def _x_to_z(self, x, **kwargs):
(z, log_jac) = self.linear(x)
return {'z': z, 'log-jac': log_jac.view(x.shape[0], 1)}
def _z_to_x(self, z, **kwargs):
(x, log_jac) = self.linear(z)
return {'x': x, 'log-jac': log_jac.view(z.shape[0], 1)}
|
class ElementwiseBijection(Bijection):
def __init__(self, x_shape):
super().__init__(x_shape=x_shape, z_shape=x_shape)
def _x_to_z(self, x, **kwargs):
return {'z': self._F(x), 'log-jac': self._log_jac_x_to_z(x)}
def _z_to_x(self, z, **kwargs):
return {'x': self._F_inv(z), 'log-jac': self._log_jac_z_to_x(z)}
def _log_jac_x_to_z(self, x):
return self._log_dF(x).flatten(start_dim=1).sum(dim=1, keepdim=True)
def _log_jac_z_to_x(self, z):
return (- self._log_jac_x_to_z(z))
def _F(self, x):
raise NotImplementedError
def _F_inv(self, z):
raise NotImplementedError
def _log_dF(self, x):
raise NotImplementedError
|
class LogitBijection(ElementwiseBijection):
_EPS = 1e-07
def _F(self, x):
return (torch.log(x) - torch.log((1 - x)))
def _F_inv(self, z):
return torch.sigmoid(z)
def _log_dF(self, x):
x_clamped = x.clamp(self._EPS, (1 - self._EPS))
return ((- torch.log(x_clamped)) - torch.log((1 - x_clamped)))
|
class TanhBijection(ElementwiseBijection):
_EPS = 1e-07
def _F(self, x):
return torch.tanh(x)
def _F_inv(self, z):
z_clamped = z.clamp(((- 1) + self._EPS), (1 - self._EPS))
return (0.5 * (torch.log((1 + z_clamped)) - torch.log((1 - z_clamped))))
def _log_dF(self, x):
return ((y - (2 * F.softplus(y))) + np.log(4))
|
class ScalarMultiplicationBijection(ElementwiseBijection):
def __init__(self, x_shape, value):
assert np.isscalar(value)
assert (value != 0.0), 'Scalar multiplication by zero is not a bijection'
super().__init__(x_shape=x_shape)
self.value = value
def _F(self, x):
return (self.value * x)
def _F_inv(self, z):
return (z / self.value)
def _log_dF(self, x):
return torch.full_like(x, np.log(np.abs(self.value)))
|
class ScalarAdditionBijection(ElementwiseBijection):
def __init__(self, x_shape, value):
assert np.isscalar(value)
super().__init__(x_shape=x_shape)
self.value = value
def _F(self, x):
return (x + self.value)
def _F_inv(self, z):
return (z - self.value)
def _log_dF(self, x):
return torch.zeros_like(x)
|
class RationalQuadraticSplineBijection(Bijection):
def __init__(self, num_input_channels, flow):
shape = (num_input_channels,)
super().__init__(x_shape=shape, z_shape=shape)
self.flow = flow
def _x_to_z(self, x):
(z, log_jac) = self.flow(x)
return {'z': z, 'log-jac': log_jac.view(x.shape[0], 1)}
def _z_to_x(self, z):
(x, log_jac) = self.flow.inverse(z)
return {'x': x, 'log-jac': log_jac.view(z.shape[0], 1)}
|
class CoupledRationalQuadraticSplineBijection(RationalQuadraticSplineBijection):
def __init__(self, num_input_channels, num_hidden_layers, num_hidden_channels, num_bins, tail_bound, activation, dropout_probability, reverse_mask):
def transform_net_create_fn(in_features, out_features):
return ResidualNet(in_features=in_features, out_features=out_features, context_features=None, hidden_features=num_hidden_channels, num_blocks=num_hidden_layers, activation=activation(), dropout_probability=dropout_probability, use_batch_norm=False)
super().__init__(num_input_channels=num_input_channels, flow=PiecewiseRationalQuadraticCouplingTransform(mask=create_alternating_binary_mask(num_input_channels, even=reverse_mask), transform_net_create_fn=transform_net_create_fn, num_bins=num_bins, tails='linear', tail_bound=tail_bound, apply_unconditional_transform=True))
|
class AutoregressiveRationalQuadraticSplineBijection(RationalQuadraticSplineBijection):
def __init__(self, num_input_channels, num_hidden_layers, num_hidden_channels, num_bins, tail_bound, activation, dropout_probability):
super().__init__(num_input_channels=num_input_channels, flow=MaskedPiecewiseRationalQuadraticAutoregressiveTransform(features=num_input_channels, hidden_features=num_hidden_channels, context_features=None, num_bins=num_bins, tails='linear', tail_bound=tail_bound, num_blocks=num_hidden_layers, use_residual_blocks=True, random_mask=False, activation=activation(), dropout_probability=dropout_probability, use_batch_norm=False))
|
class ODEVelocityFunction(ODEnet):
def __init__(self, hidden_dims, x_input_shape, nonlinearity, num_u_channels=0, strides=None, conv=False, layer_type='concatsquash'):
super().__init__(hidden_dims=hidden_dims, input_shape=x_input_shape, strides=strides, conv=conv, layer_type=layer_type, nonlinearity=nonlinearity)
if (num_u_channels > 0):
layer_0_class = self.layers[0].__class__
self.layers[0] = layer_0_class((x_input_shape[0] + num_u_channels), hidden_dims[0])
self.num_u_channels = num_u_channels
def set_u(self, u):
assert (self.num_u_channels > 0)
self._u = u
def forward(self, t, y):
if (self.num_u_channels > 0):
y = torch.cat((y, self._u), 1)
return super().forward(t, y)
|
class FFJORDBijection(Bijection):
_VELOCITY_NONLINEARITY = 'tanh'
_DIVERGENCE_METHOD = 'brute_force'
_SOLVER = 'dopri5'
_INTEGRATION_TIME = 0.5
def __init__(self, x_shape, velocity_hidden_channels, num_u_channels, relative_tolerance, absolute_tolerance):
super().__init__(x_shape=x_shape, z_shape=x_shape)
self.diffeq = ODEVelocityFunction(hidden_dims=tuple(velocity_hidden_channels), x_input_shape=x_shape, nonlinearity=self._VELOCITY_NONLINEARITY, num_u_channels=num_u_channels)
odefunc = ODEfunc(diffeq=self.diffeq, divergence_fn=self._DIVERGENCE_METHOD, residual=False, rademacher=False)
self.cnf = CNF(odefunc=odefunc, T=self._INTEGRATION_TIME, train_T=False, regularization_fns=None, solver=self._SOLVER, atol=absolute_tolerance, rtol=relative_tolerance)
def _get_nfes(self):
return torch.tensor(self.cnf.odefunc.num_evals())
def _evolve_ODE(self, input_state, reverse, **kwargs):
if ('u' in kwargs):
self.diffeq.set_u(kwargs['u'])
init_log_jac = input_state.new_zeros(input_state.shape[0], 1)
(output_state, neg_log_jac) = self.cnf(input_state, init_log_jac, reverse=reverse)
return (output_state, (- neg_log_jac))
def _x_to_z(self, x, **kwargs):
(z, log_jac) = self._evolve_ODE(input_state=x, reverse=False, **kwargs)
return {'z': z, 'log-jac': log_jac, 'nfes': self._get_nfes()}
def _z_to_x(self, z, **kwargs):
(x, log_jac) = self._evolve_ODE(input_state=z, reverse=True, **kwargs)
return {'x': x, 'log-jac': log_jac}
|
class ResidualFlowBijection(Bijection):
def __init__(self, x_shape, lipschitz_net, reduce_memory):
super().__init__(x_shape=x_shape, z_shape=x_shape)
self.block = self._get_iresblock(net=lipschitz_net, reduce_memory=reduce_memory)
def _x_to_z(self, x, **kwargs):
(z, neg_log_jac) = self.block(x=x, logpx=0.0)
return {'z': z, 'log-jac': (- neg_log_jac)}
def _z_to_x(self, z, **kwargs):
(x, neg_log_jac) = self.block.inverse(y=z, logpy=0.0)
return {'x': x, 'log-jac': (- neg_log_jac)}
def _get_iresblock(self, net, reduce_memory):
return iResBlock(nnet=net, brute_force=False, n_power_series=None, neumann_grad=reduce_memory, n_dist='geometric', geom_p=0.5, lamb=(- 1.0), n_exact_terms=2, n_samples=1, exact_trace=False, grad_in_forward=reduce_memory)
|
class SumOfSquaresPolynomialBijection(Bijection):
def __init__(self, num_input_channels, hidden_channels, activation, num_polynomials, polynomial_degree):
super().__init__(x_shape=(num_input_channels,), z_shape=(num_input_channels,))
arn = AutoRegressiveNN(input_dim=int(num_input_channels), hidden_dims=hidden_channels, param_dims=[((polynomial_degree + 1) * num_polynomials)], nonlinearity=activation())
self.flow = Polynomial(autoregressive_nn=arn, input_dim=int(num_input_channels), count_degree=polynomial_degree, count_sum=num_polynomials)
def _x_to_z(self, x):
z = self.flow._call(x)
log_jac = self.flow.log_abs_det_jacobian(None, None).view(x.shape[0], 1)
return {'z': z, 'log-jac': log_jac}
|
class BernoulliConditionalDensity(ConditionalDensity):
def __init__(self, logit_net):
super().__init__()
self.logit_net = logit_net
def _log_prob(self, inputs, cond_inputs):
logits = self.logit_net(cond_inputs)
log_probs = dist.bernoulli.Bernoulli(logits=logits).log_prob(inputs)
return {'log-prob': log_probs.flatten(start_dim=1).sum(dim=1, keepdim=True)}
def _sample(self, cond_inputs, detach_params, detach_samples):
logits = self.logit_net(cond_inputs)
bernoulli = dist.bernoulli.Bernoulli(logits=logits)
samples = bernoulli.sample()
if detach_params:
logits = logits.detach()
if detach_samples:
samples = samples.detach()
log_probs = bernoulli.log_prob(samples).flatten(start_dim=1).sum(dim=1, keepdim=True)
return {'sample': samples, 'log-prob': log_probs}
|
def concrete_log_prob(u, alphas, lam):
assert (alphas.shape == u.shape)
flat_u = u.flatten(start_dim=1)
flat_alphas = alphas.flatten(start_dim=1)
(_, dim) = flat_u.shape
const_term = (np.sum(np.log(np.arange(1, dim))) + ((dim - 1) * np.log(lam)))
log_denominator = torch.logsumexp((torch.log(flat_alphas) - (lam * torch.log(flat_u))), dim=1, keepdim=True)
log_numerator = (torch.log(flat_alphas) - ((lam + 1) * torch.log(flat_u)))
log_product_quotient = torch.sum((log_numerator - log_denominator), dim=1, keepdim=True)
return (const_term + log_product_quotient)
|
def concrete_sample(alphas, lam):
standard_gumbel = torch.distributions.gumbel.Gumbel(torch.zeros_like(alphas), torch.ones_like(alphas))
gumbels = standard_gumbel.sample()
log_numerator = ((torch.log(alphas) + gumbels) / lam)
log_denominator = torch.logsumexp(log_numerator, dim=1, keepdim=True)
return torch.exp((log_numerator - log_denominator))
|
class ConcreteConditionalDensity(ConditionalDensity):
def __init__(self, log_alpha_map, lam):
super().__init__()
self.log_alpha_map = log_alpha_map
self.lam = lam
def _log_prob(self, inputs, cond_inputs):
return {'log-prob': concrete_log_prob(inputs, self._alphas(cond_inputs), self.lam)}
def _sample(self, cond_inputs, detach_params, detach_samples):
alphas = self._alphas(cond_inputs)
samples = concrete_sample(alphas, self.lam)
if detach_params:
alphas = alphas.detach()
if detach_samples:
samples = samples.detach()
log_probs = concrete_log_prob(samples, alphas, self.lam)
return {'log-prob': log_probs, 'sample': samples}
def _alphas(self, cond_inputs):
return torch.exp(self.log_alpha_map(cond_inputs))
|
class ConditionalDensity(nn.Module):
def forward(self, mode, *args, **kwargs):
if (mode == 'log-prob'):
return self._log_prob(*args, **kwargs)
elif (mode == 'sample'):
return self._sample(*args, **kwargs)
else:
assert False, f'Invalid mode {mode}'
def log_prob(self, inputs, cond_inputs):
return self('log-prob', inputs, cond_inputs)
def sample(self, cond_inputs, detach_params=False, detach_samples=False):
return self('sample', cond_inputs, detach_params=detach_params, detach_samples=detach_samples)
def _log_prob(self, inputs, cond_inputs):
raise NotImplementedError
def _sample(self, cond_inputs, detach_params, detach_samples):
raise NotImplementedError
|
class DiagonalGaussianConditionalDensity(ConditionalDensity):
def __init__(self, coupler):
super().__init__()
self.coupler = coupler
def _log_prob(self, inputs, cond_inputs):
(means, stddevs) = self._means_and_stddevs(cond_inputs)
return {'log-prob': diagonal_gaussian_log_prob(inputs, means, stddevs)}
def _sample(self, cond_inputs, detach_params, detach_samples):
(means, stddevs) = self._means_and_stddevs(cond_inputs)
samples = diagonal_gaussian_sample(means, stddevs)
if detach_params:
means = means.detach()
stddevs = stddevs.detach()
if detach_samples:
samples = samples.detach()
log_probs = diagonal_gaussian_log_prob(samples, means, stddevs)
return {'sample': samples, 'log-prob': log_probs}
def _means_and_stddevs(self, cond_inputs):
result = self.coupler(cond_inputs)
return (result['shift'], torch.exp(result['log-scale']))
|
class CIFDensity(Density):
def __init__(self, prior, p_u_density, bijection, q_u_density):
super().__init__()
self.bijection = bijection
self.prior = prior
self.p_u_density = p_u_density
self.q_u_density = q_u_density
def p_parameters(self):
return [*self.bijection.parameters(), *self.p_u_density.parameters(), *self.prior.p_parameters()]
def q_parameters(self):
result = list(self.q_u_density.parameters())
prior_q_params = list(self.prior.q_parameters())
result += prior_q_params
if prior_q_params:
result += list(self.bijection.parameters())
return result
def _elbo(self, x, detach_q_params, detach_q_samples):
result = self.q_u_density.sample(cond_inputs=x, detach_params=detach_q_params, detach_samples=detach_q_samples)
u = result['sample']
log_q_u = result['log-prob']
result = self.bijection.x_to_z(x, u=u)
z = result['z']
log_jac = result['log-jac']
result = self.p_u_density.log_prob(inputs=u, cond_inputs=z)
log_p_u = result['log-prob']
prior_dict = self.prior('elbo', z, detach_q_params=detach_q_params, detach_q_samples=detach_q_samples)
return {'log-p': ((log_jac + log_p_u) + prior_dict['log-p']), 'log-q': (log_q_u + prior_dict['log-q']), 'bijection-info': result, 'prior-dict': prior_dict}
def _fix_random_u(self):
(fixed_prior, z) = self.prior._fix_random_u()
z = z.unsqueeze(0)
u = self.p_u_density.sample(z)['sample']
fixed_bijection = self.bijection.condition(u.squeeze(0))
new_z = fixed_bijection.z_to_x(z)['x'].squeeze(0)
return (FlowDensity(prior=fixed_prior, bijection=fixed_bijection), new_z)
def fix_u(self, u):
fixed_prior = self.prior.fix_u(u=u[1:])
fixed_bijection = self.bijection.condition(u[0])
return FlowDensity(prior=fixed_prior, bijection=fixed_bijection)
def _sample(self, num_samples):
z = self.prior.sample(num_samples)
u = self.p_u_density.sample(z)['sample']
return self.bijection.z_to_x(z, u=u)['x']
def _fixed_sample(self, noise):
z = self.prior.fixed_sample(noise=noise)
u = self.p_u_density.sample(z)['sample']
return self.bijection.z_to_x(z, u=u)['x']
|
class Density(nn.Module):
def forward(self, mode, *args, **kwargs):
if (mode == 'elbo'):
return self._elbo(*args, **kwargs)
elif (mode == 'sample'):
return self._sample(*args, **kwargs)
elif (mode == 'fixed-sample'):
return self._fixed_sample(*args, **kwargs)
else:
assert False, f'Invalid mode {mode}'
def p_parameters(self):
raise NotImplementedError
def q_parameters(self):
raise NotImplementedError
def fix_random_u(self):
(fixed_density, _) = self._fix_random_u()
return fixed_density
def fix_u(self, u):
raise NotImplementedError
def elbo(self, x, num_importance_samples, detach_q_params=False, detach_q_samples=False):
result = self('elbo', x.repeat_interleave(num_importance_samples, dim=0), detach_q_params=detach_q_params, detach_q_samples=detach_q_samples)
output_shape = (x.shape[0], num_importance_samples, 1)
log_p = result['log-p'].view(output_shape)
log_q = result['log-q'].view(output_shape)
log_w = (log_p - log_q)
return {'log-p': log_p, 'log-q': log_q, 'log-w': log_w}
def sample(self, num_samples):
return self('sample', num_samples)
def fixed_sample(self, noise=None):
return self('fixed-sample', noise)
def _fix_random_u(self):
raise NotImplementedError
def _elbo(self, x, detach_q_params, detach_q_samples):
raise NotImplementedError
def _sample(self, num_samples):
raise NotImplementedError
def _fixed_sample(self, noise):
raise NotImplementedError
|
class FlowDensity(Density):
def __init__(self, prior, bijection):
super().__init__()
self.bijection = bijection
self.prior = prior
def p_parameters(self):
return [*self.bijection.parameters(), *self.prior.p_parameters()]
def q_parameters(self):
return self.prior.q_parameters()
def _fix_random_u(self):
(fixed_prior, z) = self.prior._fix_random_u()
new_z = self.bijection.z_to_x(z.unsqueeze(0))['x'].squeeze(0)
return (FlowDensity(bijection=self.bijection, prior=fixed_prior), new_z)
def fix_u(self, u):
fixed_prior = self.prior.fix_u(u=u)
return FlowDensity(bijection=self.bijection, prior=fixed_prior)
def _elbo(self, x, detach_q_params, detach_q_samples):
result = self.bijection.x_to_z(x)
prior_dict = self.prior('elbo', result['z'], detach_q_params=detach_q_params, detach_q_samples=detach_q_samples)
return {'log-p': (prior_dict['log-p'] + result['log-jac']), 'log-q': prior_dict['log-q'], 'bijection-info': result, 'prior-dict': prior_dict}
def _sample(self, num_samples):
z = self.prior.sample(num_samples)
return self.bijection.z_to_x(z)['x']
def _fixed_sample(self, noise):
z = self.prior.fixed_sample(noise=noise)
return self.bijection.z_to_x(z)['x']
|
def diagonal_gaussian_log_prob(w, means, stddevs):
assert (means.shape == stddevs.shape == w.shape)
flat_w = w.flatten(start_dim=1)
flat_means = means.flatten(start_dim=1)
flat_vars = (stddevs.flatten(start_dim=1) ** 2)
(_, dim) = flat_w.shape
const_term = (((- 0.5) * dim) * np.log((2 * np.pi)))
log_det_terms = ((- 0.5) * torch.sum(torch.log(flat_vars), dim=1, keepdim=True))
product_terms = ((- 0.5) * torch.sum((((flat_w - flat_means) ** 2) / flat_vars), dim=1, keepdim=True))
return ((const_term + log_det_terms) + product_terms)
|
def diagonal_gaussian_sample(means, stddevs):
return ((stddevs * torch.randn_like(means)) + means)
|
def diagonal_gaussian_entropy(stddevs):
flat_stddevs = stddevs.flatten(start_dim=1)
(_, dim) = flat_stddevs.shape
return (torch.sum(torch.log(flat_stddevs), dim=1, keepdim=True) + ((0.5 * dim) * (1 + np.log((2 * np.pi)))))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.