code stringlengths 17 6.64M |
|---|
def get_most_similar_str_to_a_from_b(a, b):
'Return the most similar string to a in b.\n\n Args:\n a (str): probe string.\n b (list): a list of candidate strings.\n '
highest_sim = 0
chosen = None
for candidate in b:
sim = SequenceMatcher(None, a, candidate).ratio()
if (sim >= highest_sim):
highest_sim = sim
chosen = candidate
return chosen
|
def check_availability(requested, available):
'Check if an element is available in a list.\n\n Args:\n requested (str): probe string.\n available (list): a list of available strings.\n '
if (requested not in available):
psb_ans = get_most_similar_str_to_a_from_b(requested, available)
raise ValueError('The requested one is expected to belong to {}, but got [{}] (do you mean [{}]?)'.format(available, requested, psb_ans))
|
def tolist_if_not(x):
'Convert to a list.'
if (not isinstance(x, list)):
x = [x]
return x
|
def save_checkpoint(state, save_dir, is_best=False, remove_module_from_keys=True, model_name=''):
'Save checkpoint.\n\n Args:\n state (dict): dictionary.\n save_dir (str): directory to save checkpoint.\n is_best (bool, optional): if True, this checkpoint will be copied and named\n ``model-best.pth.tar``. Default is False.\n remove_module_from_keys (bool, optional): whether to remove "module."\n from layer names. Default is True.\n model_name (str, optional): model name to save.\n\n Examples::\n >>> state = {\n >>> \'state_dict\': model.state_dict(),\n >>> \'epoch\': 10,\n >>> \'optimizer\': optimizer.state_dict()\n >>> }\n >>> save_checkpoint(state, \'log/my_model\')\n '
mkdir_if_missing(save_dir)
if remove_module_from_keys:
state_dict = state['state_dict']
new_state_dict = OrderedDict()
for (k, v) in state_dict.items():
if k.startswith('module.'):
k = k[7:]
new_state_dict[k] = v
state['state_dict'] = new_state_dict
epoch = state['epoch']
if (not model_name):
model_name = ('model.pth.tar-' + str(epoch))
fpath = osp.join(save_dir, model_name)
torch.save(state, fpath)
print('Checkpoint saved to "{}"'.format(fpath))
checkpoint_file = osp.join(save_dir, 'checkpoint')
checkpoint = open(checkpoint_file, 'w+')
checkpoint.write('{}\n'.format(osp.basename(fpath)))
checkpoint.close()
if is_best:
best_fpath = osp.join(osp.dirname(fpath), 'model-best.pth.tar')
shutil.copy(fpath, best_fpath)
print('Best checkpoint saved to "{}"'.format(best_fpath))
|
def load_checkpoint(fpath):
"Load checkpoint.\n\n ``UnicodeDecodeError`` can be well handled, which means\n python2-saved files can be read from python3.\n\n Args:\n fpath (str): path to checkpoint.\n\n Returns:\n dict\n\n Examples::\n >>> fpath = 'log/my_model/model.pth.tar-10'\n >>> checkpoint = load_checkpoint(fpath)\n "
if (fpath is None):
raise ValueError('File path is None')
if (not osp.exists(fpath)):
raise FileNotFoundError('File is not found at "{}"'.format(fpath))
map_location = (None if torch.cuda.is_available() else 'cpu')
try:
checkpoint = torch.load(fpath, map_location=map_location)
except UnicodeDecodeError:
pickle.load = partial(pickle.load, encoding='latin1')
pickle.Unpickler = partial(pickle.Unpickler, encoding='latin1')
checkpoint = torch.load(fpath, pickle_module=pickle, map_location=map_location)
except Exception:
print('Unable to load checkpoint from "{}"'.format(fpath))
raise
return checkpoint
|
def resume_from_checkpoint(fdir, model, optimizer=None, scheduler=None):
"Resume training from a checkpoint.\n\n This will load (1) model weights and (2) ``state_dict``\n of optimizer if ``optimizer`` is not None.\n\n Args:\n fdir (str): directory where the model was saved.\n model (nn.Module): model.\n optimizer (Optimizer, optional): an Optimizer.\n scheduler (Scheduler, optional): an Scheduler.\n\n Returns:\n int: start_epoch.\n\n Examples::\n >>> fdir = 'log/my_model'\n >>> start_epoch = resume_from_checkpoint(fdir, model, optimizer, scheduler)\n "
with open(osp.join(fdir, 'checkpoint'), 'r') as checkpoint:
model_name = checkpoint.readlines()[0].strip('\n')
fpath = osp.join(fdir, model_name)
print('Loading checkpoint from "{}"'.format(fpath))
checkpoint = load_checkpoint(fpath)
model.load_state_dict(checkpoint['state_dict'])
print('Loaded model weights')
if ((optimizer is not None) and ('optimizer' in checkpoint.keys())):
optimizer.load_state_dict(checkpoint['optimizer'])
print('Loaded optimizer')
if ((scheduler is not None) and ('scheduler' in checkpoint.keys())):
scheduler.load_state_dict(checkpoint['scheduler'])
print('Loaded scheduler')
start_epoch = checkpoint['epoch']
print('Previous epoch: {}'.format(start_epoch))
return start_epoch
|
def adjust_learning_rate(optimizer, base_lr, epoch, stepsize=20, gamma=0.1, linear_decay=False, final_lr=0, max_epoch=100):
'Adjust learning rate.\n\n Deprecated.\n '
if linear_decay:
frac_done = (epoch / max_epoch)
lr = ((frac_done * final_lr) + ((1.0 - frac_done) * base_lr))
else:
lr = (base_lr * (gamma ** (epoch // stepsize)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
def set_bn_to_eval(m):
'Set BatchNorm layers to eval mode.'
classname = m.__class__.__name__
if (classname.find('BatchNorm') != (- 1)):
m.eval()
|
def open_all_layers(model):
'Open all layers in model for training.\n\n Examples::\n >>> open_all_layers(model)\n '
model.train()
for p in model.parameters():
p.requires_grad = True
|
def open_specified_layers(model, open_layers):
"Open specified layers in model for training while keeping\n other layers frozen.\n\n Args:\n model (nn.Module): neural net model.\n open_layers (str or list): layers open for training.\n\n Examples::\n >>> # Only model.classifier will be updated.\n >>> open_layers = 'classifier'\n >>> open_specified_layers(model, open_layers)\n >>> # Only model.fc and model.classifier will be updated.\n >>> open_layers = ['fc', 'classifier']\n >>> open_specified_layers(model, open_layers)\n "
if isinstance(model, nn.DataParallel):
model = model.module
if isinstance(open_layers, str):
open_layers = [open_layers]
for layer in open_layers:
assert hasattr(model, layer), '"{}" is not an attribute of the model, please provide the correct name'.format(layer)
for (name, module) in model.named_children():
if (name in open_layers):
module.train()
for p in module.parameters():
p.requires_grad = True
else:
module.eval()
for p in module.parameters():
p.requires_grad = False
|
def count_num_param(model):
'Count number of parameters in a model.\n\n Args:\n model (nn.Module): network model.\n\n Examples::\n >>> model_size = count_num_param(model)\n '
return sum((p.numel() for p in model.parameters()))
|
def load_pretrained_weights(model, weight_path):
'Load pretrianed weights to model.\n\n Features::\n - Incompatible layers (unmatched in name or size) will be ignored.\n - Can automatically deal with keys containing "module.".\n\n Args:\n model (nn.Module): network model.\n weight_path (str): path to pretrained weights.\n\n Examples::\n >>> weight_path = \'log/my_model/model-best.pth.tar\'\n >>> load_pretrained_weights(model, weight_path)\n '
checkpoint = load_checkpoint(weight_path)
if ('state_dict' in checkpoint):
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
model_dict = model.state_dict()
new_state_dict = OrderedDict()
(matched_layers, discarded_layers) = ([], [])
for (k, v) in state_dict.items():
if k.startswith('module.'):
k = k[7:]
if ((k in model_dict) and (model_dict[k].size() == v.size())):
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
if (len(matched_layers) == 0):
warnings.warn('The pretrained weights "{}" cannot be loaded, please check the key names manually (** ignored and continue **)'.format(weight_path))
else:
print('Successfully loaded pretrained weights from "{}"'.format(weight_path))
if (len(discarded_layers) > 0):
print('** The following layers are discarded due to unmatched keys or layer size: {}'.format(discarded_layers))
|
def init_network_weights(model, init_type='normal', gain=0.02):
def _init_func(m):
classname = m.__class__.__name__
if (hasattr(m, 'weight') and ((classname.find('Conv') != (- 1)) or (classname.find('Linear') != (- 1)))):
if (init_type == 'normal'):
nn.init.normal_(m.weight.data, 0.0, gain)
elif (init_type == 'xavier'):
nn.init.xavier_normal_(m.weight.data, gain=gain)
elif (init_type == 'kaiming'):
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif (init_type == 'orthogonal'):
nn.init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method {} is not implemented'.format(init_type))
if (hasattr(m, 'bias') and (m.bias is not None)):
nn.init.constant_(m.bias.data, 0.0)
elif (classname.find('BatchNorm') != (- 1)):
nn.init.constant_(m.weight.data, 1.0)
nn.init.constant_(m.bias.data, 0.0)
elif (classname.find('InstanceNorm') != (- 1)):
if ((m.weight is not None) and (m.bias is not None)):
nn.init.constant_(m.weight.data, 1.0)
nn.init.constant_(m.bias.data, 0.0)
model.apply(_init_func)
|
def extract_and_save_image(dataset, save_dir, discard, label2name):
if osp.exists(save_dir):
print('Folder "{}" already exists'.format(save_dir))
return
print('Extracting images to "{}" ...'.format(save_dir))
mkdir_if_missing(save_dir)
for i in range(len(dataset)):
(img, label) = dataset[i]
if (label == discard):
continue
class_name = label2name[label]
label_new = new_name2label[class_name]
class_dir = osp.join(save_dir, ((str(label_new).zfill(3) + '_') + class_name))
mkdir_if_missing(class_dir)
impath = osp.join(class_dir, (str((i + 1)).zfill(5) + '.jpg'))
img.save(impath)
|
def download_and_prepare(name, root, discarded_label, label2name):
print('Dataset: {}'.format(name))
print('Root: {}'.format(root))
print('Old labels:')
pp.pprint(label2name)
print('Discarded label: {}'.format(discarded_label))
print('New labels:')
pp.pprint(new_name2label)
if (name == 'cifar'):
train = CIFAR10(root, train=True, download=True)
test = CIFAR10(root, train=False)
else:
train = STL10(root, split='train', download=True)
test = STL10(root, split='test')
train_dir = osp.join(root, name, 'train')
test_dir = osp.join(root, name, 'test')
extract_and_save_image(train, train_dir, discarded_label, label2name)
extract_and_save_image(test, test_dir, discarded_label, label2name)
|
def extract_and_save(images, labels, level, dst):
assert (0 <= level <= 4)
for i in range(10000):
real_i = (i + (level * 10000))
im = Image.fromarray(images[real_i])
label = int(labels[real_i])
category_dir = osp.join(dst, str(label).zfill(3))
mkdir_if_missing(category_dir)
save_path = osp.join(category_dir, (str((i + 1)).zfill(5) + '.jpg'))
im.save(save_path)
|
def main(npy_folder):
npy_folder = osp.abspath(osp.expanduser(npy_folder))
dataset_cap = osp.basename(npy_folder)
assert (dataset_cap in ['CIFAR-10-C', 'CIFAR-100-C'])
if (dataset_cap == 'CIFAR-10-C'):
dataset = 'cifar10_c'
else:
dataset = 'cifar100_c'
if (not osp.exists(npy_folder)):
print('The given folder "{}" does not exist'.format(npy_folder))
root = osp.dirname(npy_folder)
im_folder = osp.join(root, dataset)
mkdir_if_missing(im_folder)
dirnames = os.listdir(npy_folder)
dirnames.remove('labels.npy')
if ('README.txt' in dirnames):
dirnames.remove('README.txt')
assert (len(dirnames) == 19)
labels = np.load(osp.join(npy_folder, 'labels.npy'))
for dirname in dirnames:
corruption = dirname.split('.')[0]
corruption_folder = osp.join(im_folder, corruption)
mkdir_if_missing(corruption_folder)
npy_filename = osp.join(npy_folder, dirname)
images = np.load(npy_filename)
assert (images.shape[0] == 50000)
for level in range(5):
dst = osp.join(corruption_folder, str((level + 1)))
mkdir_if_missing(dst)
print('Saving images to "{}"'.format(dst))
extract_and_save(images, labels, level, dst)
|
def extract_and_save_image(dataset, save_dir):
if osp.exists(save_dir):
print('Folder "{}" already exists'.format(save_dir))
return
print('Extracting images to "{}" ...'.format(save_dir))
mkdir_if_missing(save_dir)
for i in range(len(dataset)):
(img, label) = dataset[i]
class_dir = osp.join(save_dir, str(label).zfill(3))
mkdir_if_missing(class_dir)
impath = osp.join(class_dir, (str((i + 1)).zfill(5) + '.jpg'))
img.save(impath)
|
def download_and_prepare(name, root):
print('Dataset: {}'.format(name))
print('Root: {}'.format(root))
if (name == 'cifar10'):
train = CIFAR10(root, train=True, download=True)
test = CIFAR10(root, train=False)
elif (name == 'cifar100'):
train = CIFAR100(root, train=True, download=True)
test = CIFAR100(root, train=False)
elif (name == 'svhn'):
train = SVHN(root, split='train', download=True)
test = SVHN(root, split='test', download=True)
else:
raise ValueError
train_dir = osp.join(root, name, 'train')
test_dir = osp.join(root, name, 'test')
extract_and_save_image(train, train_dir)
extract_and_save_image(test, test_dir)
|
def extract_and_save_image(dataset, save_dir):
if osp.exists(save_dir):
print('Folder "{}" already exists'.format(save_dir))
return
print('Extracting images to "{}" ...'.format(save_dir))
mkdir_if_missing(save_dir)
for i in range(len(dataset)):
(img, label) = dataset[i]
if (label == (- 1)):
label_name = 'none'
else:
label_name = str(label)
imname = (((str(i).zfill(6) + '_') + label_name) + '.jpg')
impath = osp.join(save_dir, imname)
img.save(impath)
|
def download_and_prepare(root):
train = STL10(root, split='train', download=True)
test = STL10(root, split='test')
unlabeled = STL10(root, split='unlabeled')
train_dir = osp.join(root, 'train')
test_dir = osp.join(root, 'test')
unlabeled_dir = osp.join(root, 'unlabeled')
extract_and_save_image(train, train_dir)
extract_and_save_image(test, test_dir)
extract_and_save_image(unlabeled, unlabeled_dir)
|
def readme():
with open('README.md') as f:
content = f.read()
return content
|
def find_version():
version_file = 'dassl/__init__.py'
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
|
def numpy_include():
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
return numpy_include
|
def get_requirements(filename='requirements.txt'):
here = osp.dirname(osp.realpath(__file__))
with open(osp.join(here, filename), 'r') as f:
requires = [line.replace('\n', '') for line in f.readlines()]
return requires
|
def compute_ci95(res):
return ((1.96 * np.std(res)) / np.sqrt(len(res)))
|
def parse_function(*metrics, directory='', args=None, end_signal=None):
print(f'Parsing files in {directory}')
subdirs = listdir_nohidden(directory, sort=True)
outputs = []
for subdir in subdirs:
fpath = osp.join(directory, subdir, 'log.txt')
assert check_isfile(fpath)
good_to_go = False
output = OrderedDict()
with open(fpath, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if (line == end_signal):
good_to_go = True
for metric in metrics:
match = metric['regex'].search(line)
if (match and good_to_go):
if ('file' not in output):
output['file'] = fpath
num = float(match.group(1))
name = metric['name']
output[name] = num
if output:
outputs.append(output)
assert (len(outputs) > 0), f'Nothing found in {directory}'
metrics_results = defaultdict(list)
for output in outputs:
msg = ''
for (key, value) in output.items():
if isinstance(value, float):
msg += f'{key}: {value:.2f}%. '
else:
msg += f'{key}: {value}. '
if (key != 'file'):
metrics_results[key].append(value)
print(msg)
output_results = OrderedDict()
print('===')
print(f'Summary of directory: {directory}')
for (key, values) in metrics_results.items():
avg = np.mean(values)
std = (compute_ci95(values) if args.ci95 else np.std(values))
print(f'* {key}: {avg:.2f}% +- {std:.2f}%')
output_results[key] = avg
print('===')
return output_results
|
def main(args, end_signal):
metric1 = {'name': 'accuracy', 'regex': re.compile('\\* accuracy: ([\\.\\deE+-]+)%')}
metric2 = {'name': 'error', 'regex': re.compile('\\* error: ([\\.\\deE+-]+)%')}
if args.multi_exp:
final_results = defaultdict(list)
for directory in listdir_nohidden(args.directory, sort=True):
directory = osp.join(args.directory, directory)
results = parse_function(metric1, metric2, directory=directory, args=args, end_signal=end_signal)
for (key, value) in results.items():
final_results[key].append(value)
print('Average performance')
for (key, values) in final_results.items():
avg = np.mean(values)
print(f'* {key}: {avg:.2f}%')
else:
parse_function(metric1, metric2, directory=args.directory, args=args, end_signal=end_signal)
|
def is_python_file(filename):
ext = osp.splitext(filename)[1]
return (ext == EXTENSION)
|
def update_file(filename, text_to_search, replacement_text):
print('Processing {}'.format(filename))
with fileinput.FileInput(filename, inplace=True, backup='') as file:
for line in file:
print(line.replace(text_to_search, replacement_text), end='')
|
def recursive_update(directory, text_to_search, replacement_text):
filenames = glob.glob(osp.join(directory, '*'))
for filename in filenames:
if osp.isfile(filename):
if (not is_python_file(filename)):
continue
update_file(filename, text_to_search, replacement_text)
elif osp.isdir(filename):
recursive_update(filename, text_to_search, replacement_text)
else:
raise NotImplementedError
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('file_or_dir', type=str, help='path to file or directory')
parser.add_argument('text_to_search', type=str, help='name to be replaced')
parser.add_argument('replacement_text', type=str, help='new name')
parser.add_argument('--ext', type=str, default='.py', help='file extension')
args = parser.parse_args()
file_or_dir = args.file_or_dir
text_to_search = args.text_to_search
replacement_text = args.replacement_text
extension = args.ext
global EXTENSION
EXTENSION = extension
if osp.isfile(file_or_dir):
if (not is_python_file(file_or_dir)):
return
update_file(file_or_dir, text_to_search, replacement_text)
elif osp.isdir(file_or_dir):
recursive_update(file_or_dir, text_to_search, replacement_text)
else:
raise NotImplementedError
|
def print_args(args, cfg):
print('***************')
print('** Arguments **')
print('***************')
optkeys = list(args.__dict__.keys())
optkeys.sort()
for key in optkeys:
print('{}: {}'.format(key, args.__dict__[key]))
print('************')
print('** Config **')
print('************')
print(cfg)
|
def reset_cfg(cfg, args):
if args.root:
cfg.DATASET.ROOT = args.root
if args.output_dir:
cfg.OUTPUT_DIR = args.output_dir
if args.resume:
cfg.RESUME = args.resume
if args.seed:
cfg.SEED = args.seed
if args.source_domains:
cfg.DATASET.SOURCE_DOMAINS = args.source_domains
if args.target_domains:
cfg.DATASET.TARGET_DOMAINS = args.target_domains
if args.transforms:
cfg.INPUT.TRANSFORMS = args.transforms
if args.trainer:
cfg.TRAINER.NAME = args.trainer
if args.backbone:
cfg.MODEL.BACKBONE.NAME = args.backbone
if args.head:
cfg.MODEL.HEAD.NAME = args.head
|
def extend_cfg(cfg):
'\n Add new config variables.\n\n E.g.\n from yacs.config import CfgNode as CN\n cfg.TRAINER.MY_MODEL = CN()\n cfg.TRAINER.MY_MODEL.PARAM_A = 1.\n cfg.TRAINER.MY_MODEL.PARAM_B = 0.5\n cfg.TRAINER.MY_MODEL.PARAM_C = False\n '
pass
|
def setup_cfg(args):
cfg = get_cfg_default()
extend_cfg(cfg)
if args.dataset_config_file:
cfg.merge_from_file(args.dataset_config_file)
if args.config_file:
cfg.merge_from_file(args.config_file)
reset_cfg(cfg, args)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
|
def main(args):
cfg = setup_cfg(args)
if (cfg.SEED >= 0):
print('Setting fixed seed: {}'.format(cfg.SEED))
set_random_seed(cfg.SEED)
setup_logger(cfg.OUTPUT_DIR)
if (torch.cuda.is_available() and cfg.USE_CUDA):
torch.backends.cudnn.benchmark = True
print_args(args, cfg)
print('Collecting env info ...')
print('** System info **\n{}\n'.format(collect_env_info()))
trainer = build_trainer(cfg)
if args.eval_only:
trainer.load_model(args.model_dir, epoch=args.load_epoch)
trainer.test()
return
if (not args.no_train):
trainer.train()
|
def verify_token(headers, path):
token = headers.get('authorization', '')[7:]
if (os.environ['SYSTEM_TOKEN'] == token):
return True
elif ((not path.startswith('/upload_video')) and (os.environ['USER_TOKEN'] == token)):
return True
else:
return False
|
@app.get('/jobid/{task_id}')
def check_job(task_id: str) -> str:
res = celery_workers.AsyncResult(task_id)
if (res.state == states.PENDING):
reserved_tasks = celery_workers.control.inspect().reserved()
tasks = []
if reserved_tasks:
tasks_per_worker = reserved_tasks.values()
tasks = [item for sublist in tasks_per_worker for item in sublist]
found = False
for task in tasks:
if (task['id'] == task_id):
found = True
result = {'jobs_in_queue': len(tasks)}
elif (res.state == states.FAILURE):
result = str(res.result)
else:
result = res.result
return {'state': res.state, 'result': result}
|
def fix_obj(parent_obj):
for obj in parent_obj.children:
fix_obj(obj)
parent_obj.rotation_euler.x = 0
if (parent_obj.name in ['pCube0', 'pCube1', 'pCube2']):
parent_obj.location.y = (- 13)
if (parent_obj.name == 'pCube3'):
parent_obj.location.y = (- 10)
if (parent_obj.name == 'pCube5'):
parent_obj.location.y = (- 9.5)
if ('materials' in dir(parent_obj.data)):
if parent_obj.data.materials:
parent_obj.data.materials[0] = mat
else:
parent_obj.data.materials.append(mat)
|
class TaskFailure(Exception):
pass
|
def validate_bvh_file(bvh_file):
MAX_NUMBER_FRAMES = int(os.environ['MAX_NUMBER_FRAMES'])
FRAME_TIME = (1.0 / float(os.environ['RENDER_FPS']))
file_content = bvh_file.decode('utf-8')
mocap = Bvh(file_content)
counter = None
for line in file_content.split('\n'):
if ((counter is not None) and line.strip()):
counter += 1
if (line.strip() == 'MOTION'):
counter = (- 2)
if (mocap.nframes != counter):
raise TaskFailure(f'The number of rows with motion data ({counter}) does not match the Frames field ({mocap.nframes})')
if ((MAX_NUMBER_FRAMES != (- 1)) and (mocap.nframes > MAX_NUMBER_FRAMES)):
raise TaskFailure(f'The supplied number of frames ({mocap.nframes}) is bigger than {MAX_NUMBER_FRAMES}')
if (mocap.frame_time != FRAME_TIME):
raise TaskFailure(f'The supplied frame time ({mocap.frame_time}) differs from the required {FRAME_TIME}')
|
@celery.task(name='tasks.render', bind=True, hard_time_limit=WORKER_TIMEOUT)
def render(self, bvh_file_uri: str) -> str:
HEADERS = {'Authorization': (f'Bearer ' + os.environ['SYSTEM_TOKEN'])}
API_SERVER = os.environ['API_SERVER']
logger.info('rendering..')
self.update_state(state='PROCESSING')
bvh_file = requests.get((API_SERVER + bvh_file_uri), headers=HEADERS).content
validate_bvh_file(bvh_file)
with tempfile.NamedTemporaryFile(suffix='.bhv') as tmpf:
tmpf.write(bvh_file)
tmpf.seek(0)
process = subprocess.Popen(['/blender/blender-2.83.0-linux64/blender', '-noaudio', '-b', '--python', 'blender_render.py', '--', tmpf.name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
total = None
current_frame = None
for line in process.stdout:
line = line.decode('utf-8').strip()
if line.startswith('total_frames '):
(_, total) = line.split(' ')
total = int(float(total))
elif line.startswith('Append frame '):
(*_, current_frame) = line.split(' ')
current_frame = int(current_frame)
elif line.startswith('output_file'):
(_, file_name) = line.split(' ')
files = {'file': (os.path.basename(file_name), open(file_name, 'rb'))}
return requests.post((API_SERVER + '/upload_video'), files=files, headers=HEADERS).text
if (total and current_frame):
self.update_state(state='RENDERING', meta={'current': current_frame, 'total': total})
if (process.returncode != 0):
raise TaskFailure(process.stderr.read().decode('utf-8'))
|
def training_params(is_gcloud=False, output_dir=None):
if (not output_dir):
output_dir = util.construct_experiment_output_dir(__file__)
num_gpus = 1
stop_after = 7
dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3}
imgs_per_phase = 384000
dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()}
dynamic_steps_per_phase[7] *= 2
return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=((2 * 60) * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, infogan_cont_weight=10.0, infogan_cont_depth_to_num_vars={2: 16, 3: 16, 4: 16, 5: 16, 6: 16, 7: 0, 8: 0}, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False, infogan_input_method='append_channels', append_channels_div=1), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True)
|
def main(*args):
train.run_training(training_params())
|
def training_params(is_gcloud=False, output_dir=None):
if (not output_dir):
output_dir = util.construct_experiment_output_dir(__file__)
num_gpus = 1
stop_after = 7
dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3}
imgs_per_phase = 384000
dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()}
dynamic_steps_per_phase[7] *= 2
return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=((2 * 60) * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, infogan_cont_weight=10.0, infogan_cont_depth_to_num_vars={2: 16, 3: 16, 4: 16, 5: 16, 6: 16, 7: 0, 8: 0}, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False, infogan_input_method='append'), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True)
|
def main(*args):
train.run_training(training_params())
|
def training_params(is_gcloud=False, output_dir=None):
if (not output_dir):
output_dir = util.construct_experiment_output_dir(__file__)
num_gpus = 1
stop_after = 7
dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3}
imgs_per_phase = 384000
dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()}
return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=((2 * 60) * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, infogan_cont_weight=10.0, infogan_cont_depth_to_num_vars={2: 16, 3: 16, 4: 16, 5: 16, 6: 16, 7: 0, 8: 0}, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False, infogan_input_method='custom03'), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True)
|
def main(*args):
train.run_training(training_params())
|
def training_params(is_gcloud=False, output_dir=None):
if (not output_dir):
output_dir = util.construct_experiment_output_dir(__file__)
num_gpus = 1
stop_after = 7
dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3}
imgs_per_phase = 384000
dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()}
dynamic_steps_per_phase[7] *= 2
return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=((2 * 60) * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, infogan_cont_weight=10.0, infogan_cont_depth_to_num_vars={2: 4, 3: 4, 4: 4, 5: 4, 6: 4, 7: 0, 8: 0}, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False, infogan_input_method='custom03'), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True)
|
def main(*args):
train.run_training(training_params())
|
def training_params(is_gcloud=False, output_dir=None):
if (not output_dir):
output_dir = util.construct_experiment_output_dir(__file__)
num_gpus = 1
stop_after = 7
dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3}
imgs_per_phase = 384000
dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()}
dynamic_steps_per_phase[7] *= 2
return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=((2 * 60) * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, infogan_cont_weight=10.0, infogan_cont_depth_to_num_vars={2: 8, 3: 8, 4: 8, 5: 8, 6: 8, 7: 0, 8: 0}, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False, infogan_input_method='custom03'), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True)
|
def main(*args):
train.run_training(training_params())
|
def training_params(is_gcloud=False, output_dir=None):
if (not output_dir):
output_dir = util.construct_experiment_output_dir(__file__)
num_gpus = 1
stop_after = 7
dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3, 9: 2}
imgs_per_phase = 384000
dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()}
dynamic_steps_per_phase[7] *= 2
return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=(30 * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, infogan_cont_weight=10.0, infogan_cont_depth_to_num_vars={2: 16, 3: 16, 4: 16, 5: 16, 6: 16, 7: 0, 8: 0, 9: 0}, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, consistency_loss=30.0, consistency_temporal=False, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False, infogan_input_method='custom03'), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True)
|
def main(*args):
train.run_training(training_params())
|
def training_params(is_gcloud=False, output_dir=None):
if (not output_dir):
output_dir = util.construct_experiment_output_dir(__file__)
num_gpus = 1
stop_after = 7
dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3, 9: 2}
imgs_per_phase = 384000
dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()}
dynamic_steps_per_phase[7] *= 2
return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=(30 * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, infogan_cont_weight=10.0, infogan_cont_depth_to_num_vars={2: 16, 3: 16, 4: 16, 5: 16, 6: 16, 7: 0, 8: 0, 9: 0}, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, consistency_loss=300.0, consistency_temporal=False, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False, infogan_input_method='custom03'), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True)
|
def main(*args):
train.run_training(training_params())
|
def training_params(is_gcloud=False, output_dir=None):
if (not output_dir):
output_dir = util.construct_experiment_output_dir(__file__)
num_gpus = 1
stop_after = 7
dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3}
imgs_per_phase = 384000
dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()}
dynamic_steps_per_phase[7] *= 2
return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=((2 * 60) * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, infogan_cont_weight=10.0, infogan_cont_loss_phase_to_active_coords={2: range(0, 16), 3: range(16, 32), 4: range(32, 48), 5: range(48, 64), 6: range(64, 80), 7: [], 8: []}, infogan_cont_depth_to_num_vars={2: 80, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0}, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False, infogan_input_method='append'), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True)
|
def main(*args):
train.run_training(training_params())
|
def training_params(is_gcloud=False, output_dir=None):
if (not output_dir):
output_dir = util.construct_experiment_output_dir(__file__)
num_gpus = 1
stop_after = 7
dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3}
imgs_per_phase = 384000
dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()}
dynamic_steps_per_phase[7] *= 2
return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=((2 * 60) * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True)
|
def main(*args):
train.run_training(training_params())
|
def training_params(is_gcloud=False, output_dir=None):
if (not output_dir):
output_dir = util.construct_experiment_output_dir(__file__)
num_gpus = 1
stop_after = 7
dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3}
imgs_per_phase = 384000
dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()}
dynamic_steps_per_phase[7] *= 2
return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=((2 * 60) * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, infogan_cont_weight=10.0, infogan_cont_unmask_depth_to_vars={2: range(0, 16), 3: range(16, 32), 4: range(32, 48), 5: range(48, 64), 6: range(64, 80), 7: [], 8: []}, infogan_cont_loss_phase_to_active_coords={2: range(0, 16), 3: range(16, 32), 4: range(32, 48), 5: range(48, 64), 6: range(64, 80), 7: [], 8: []}, infogan_cont_depth_to_num_vars={2: 80, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0}, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False, infogan_input_method='append'), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True)
|
def main(*args):
train.run_training(training_params())
|
def training_params(is_gcloud=False, output_dir=None):
if (not output_dir):
output_dir = util.construct_experiment_output_dir(__file__)
num_gpus = 1
stop_after = 7
dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3}
imgs_per_phase = 384000
dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()}
dynamic_steps_per_phase[7] *= 2
return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=((2 * 60) * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, infogan_cont_weight=10.0, infogan_cont_depth_to_num_vars={2: 80, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0}, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False, infogan_input_method='append'), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True)
|
def main(*args):
train.run_training(training_params())
|
def nchw_to_nhwc(images):
return tf.transpose(images, [0, 2, 3, 1])
|
def nhwc_to_nchw(images):
return tf.transpose(images, [0, 3, 1, 2])
|
def nchw_to_nhwc_single(images):
return tf.transpose(images, [1, 2, 0])
|
def nhwc_to_nchw_single(images):
return tf.transpose(images, [2, 0, 1])
|
class CelebAHQDatasetParams(util.Params):
def get_allowed_params_with_defaults(self):
return dict(values_range=((- 1.0), 1.0), img_side=128, data_dir=None, train_shuffle=True, gcs_bucket=None, tfrecord_dir=constants.NVIDIA_CELEBA_HQ_DATASET_PATH, random_flip=False, crop_at_center=False, restrict_to_num_imgs=None, max_tfrecord_res_available=10)
def validate(self):
assert (self.img_side in [4, 8, 16, 32, 64, 128, 256, 512, 1024])
|
def get_dataset_params(is_gcloud=False, tfrecord_dir=constants.NVIDIA_CELEBA_HQ_DATASET_PATH_GCLOUD, **kwargs):
if is_gcloud:
return CelebAHQDatasetParams(gcs_bucket=constants.GCLOUD_BUCKET, tfrecord_dir=tfrecord_dir, **kwargs)
else:
return CelebAHQDatasetParams(**kwargs)
|
def process(img, params, input_img_size):
img = tf.parse_single_example(img, {'data': tf.FixedLenFeature([], tf.string)})['data']
img = tf.decode_raw(img, tf.uint8)
img = tf.cast(img, tf.float32)
img = tf.reshape(img, [3, input_img_size, input_img_size])
img = tensor_util.nchw_to_nhwc_single(img)
img = (img / 256.0)
(left, right) = params.values_range
img = ((img * (right - left)) + left)
if params.random_flip:
img = tf.image.random_flip_left_right(img)
if params.crop_at_center:
img_side = params.img_side
img = tf.image.resize_images([img], [input_img_size, input_img_size])[0]
img = tf.image.crop_to_bounding_box(img, (input_img_size / 12), (input_img_size / 12), ((input_img_size * 10) / 12), ((input_img_size * 10) / 12))
img = tf.image.resize_images([img], [img_side, img_side])[0]
img.shape.assert_is_compatible_with([params.img_side, params.img_side, 3])
return img
|
def get_train_input_fn(params, batch_size=128):
def train_input_fn():
log_img_side = int(math.log(params.img_side, 2))
assert ((2 ** log_img_side) == params.img_side), str((log_img_side, params.img_side))
tfrecord_id = log_img_side
if params.crop_at_center:
tfrecord_id += 1
tfrecord_id = min(tfrecord_id, params.max_tfrecord_res_available)
full_path = os.path.join(params.tfrecord_dir, ('celeba_hq_tfrecord-r%02d.tfrecords' % tfrecord_id))
tf.logging.info('Using dataset at %s', full_path)
if params.gcs_bucket:
full_path = os.path.join('gs://', params.gcs_bucket, full_path)
d = tf.data.TFRecordDataset([full_path])
if (params.restrict_to_num_imgs is not None):
d = d.take(params.restrict_to_num_imgs)
d = d.map((lambda img: process(img, params, input_img_size=(2 ** tfrecord_id))), num_parallel_calls=16)
d = d.repeat()
if params.train_shuffle:
d = d.shuffle((batch_size * 10))
d = d.batch(batch_size)
d = d.prefetch(buffer_size=64)
iterator = d.make_one_shot_iterator()
imgs = iterator.get_next()
return ({'images': imgs}, tf.constant(1, shape=[batch_size, 1]))
return train_input_fn
|
class InfoGanSummary(object):
def __init__(self, training_params, generator_inputs, sample_t, reps=3):
self.training_params = training_params
self.generator_inputs = generator_inputs
self.sample_t = sample_t
self.cont_placeholder_map = {}
self.cat_placeholder_map = {}
self.cont_grid_side = 5
self.cat_grid_side = 1
while ((self.cat_grid_side ** 2) < training_params.infogan_cat_dim):
self.cat_grid_side += 1
self.reps = reps
if training_params.infogan_cont_num_vars:
self._infogan_images_summary(num_coords=training_params.infogan_cont_num_vars, prefix='infogan_cont_', grid_side=self.cont_grid_side, placeholder_map=self.cont_placeholder_map)
if training_params.infogan_cat_num_vars:
self._infogan_images_summary(num_coords=training_params.infogan_cat_num_vars, prefix='infogan_cat_', grid_side=self.cat_grid_side, placeholder_map=self.cat_placeholder_map)
def _feed_dict_from_gen_inputs(self, cur_generator_inputs):
feed_dict = {}
for (key, val) in cur_generator_inputs.items():
if (val is not None):
feed_dict[self.generator_inputs[key]] = val
return feed_dict
def _construct_cont_infogan_images(self, sess, coord, cur_generator_inputs):
all_imgs = []
cur_generator_inputs = copy.deepcopy(cur_generator_inputs.copy())
for (i, val) in enumerate(np.linspace((- 1.25), 1.25, (self.cont_grid_side ** 2))):
cur_generator_inputs['structured_continuous_input'][0][coord] = val
cur_imgs = sess.run(self.sample_t, feed_dict=self._feed_dict_from_gen_inputs(cur_generator_inputs))
cur_img = cur_imgs[0]
all_imgs.append(cur_img)
return all_imgs
def _construct_cat_infogan_images(self, sess, coord, cur_generator_inputs):
all_imgs = []
cur_generator_inputs = copy.deepcopy(cur_generator_inputs.copy())
for val in range(self.training_params.infogan_cat_dim):
cur_generator_inputs['structured_categorical_input'][0][coord] = val
cur_imgs = sess.run(self.sample_t, feed_dict=self._feed_dict_from_gen_inputs(cur_generator_inputs))
cur_img = cur_imgs[0]
all_imgs.append(cur_img)
black_img = np.full([self.training_params.image_side, self.training_params.image_side, 3], 0.0)
all_imgs += ([black_img] * ((self.cat_grid_side ** 2) - len(all_imgs)))
return all_imgs
def _fetch_generator_inputs(self, sess, generator_inputs):
result = {}
to_fetch = {}
for (key, val) in generator_inputs.items():
if (val is None):
result[key] = val
else:
to_fetch[key] = val
(keys, vals) = zip(*to_fetch.items())
fetched = sess.run(vals)
for (key, val) in zip(keys, fetched):
result[key] = val
return result
def construct_feed_dict(self, sess):
feed_dict = {}
generator_inputs_per_rep = [self._fetch_generator_inputs(sess, self.generator_inputs) for rep in range(self.reps)]
for ((coord, rep), placeholder) in self.cont_placeholder_map.items():
generator_inputs = generator_inputs_per_rep[rep].copy()
imgs = self._construct_cont_infogan_images(sess, coord, generator_inputs)
feed_dict[placeholder] = imgs
for ((coord, rep), placeholder) in self.cat_placeholder_map.items():
generator_inputs = generator_inputs_per_rep[rep].copy()
imgs = self._construct_cat_infogan_images(sess, coord, generator_inputs)
feed_dict[placeholder] = imgs
return feed_dict
def _infogan_images_summary(self, num_coords, prefix, grid_side, placeholder_map):
img_side = self.training_params.image_side
for coord in range(num_coords):
for rep in range(self.reps):
imgs = tf.placeholder(dtype=tf.float32, shape=[(grid_side ** 2), img_side, img_side, 3])
placeholder_map[(coord, rep)] = imgs
tf.summary.image((prefix + ('coord_%d_rep_%d' % (coord, rep))), tfgan.eval.eval_utils.image_grid(imgs[:(grid_side * grid_side)], grid_shape=(grid_side, grid_side), image_shape=(img_side, img_side)), family=(prefix + 'interpolation'))
tf.summary.image((prefix + ('coord_%d_rep_%d_diff' % (coord, rep))), tfgan.eval.eval_utils.image_grid((imgs[:(grid_side * grid_side)] - imgs[0:1]), grid_shape=(grid_side, grid_side), image_shape=(img_side, img_side)), family=(prefix + 'diffs'))
|
def upscale2d(x, factor=2, data_format='NCHW'):
assert (isinstance(factor, int) and (factor >= 1)), factor
if (factor == 1):
return x
with tf.variable_scope('Upscale2D'):
if (data_format == 'NHWC'):
x = tensor_util.nhwc_to_nchw(x)
s = x.shape
x = tf.reshape(x, [(- 1), s[1], s[2], 1, s[3], 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
x = tf.reshape(x, [(- 1), s[1], (s[2] * factor), (s[3] * factor)])
if (data_format == 'NHWC'):
x = tensor_util.nchw_to_nhwc(x)
return x
|
def downscale2d(x, factor=2, data_format='NCHW'):
assert (isinstance(factor, int) and (factor >= 1))
if (factor == 1):
return x
with tf.variable_scope('Downscale2D'):
if (data_format == 'NCHW'):
ksize = [1, 1, factor, factor]
else:
ksize = [1, factor, factor, 1]
res = tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format=data_format)
return res
|
def downgrade2d(x, factor=2, data_format='NCHW'):
x = downscale2d(x, factor, data_format)
x = upscale2d(x, factor, data_format)
return x
|
def concat_features_stddev(layer):
tf.logging.info('Adding minibatch features stddev')
(_, stddev) = tf.nn.moments(layer, [0])
tf.logging.info('MBS layer shape: %s', layer.shape.as_list())
tf.logging.info('MBS stddev shape: %s', stddev.shape.as_list())
stddev_mean = tf.reduce_mean(stddev)
batch_size = layer.shape.as_list()[0]
stddev_feature = (tf.zeros([batch_size, 1, 4, 4]) + stddev_mean)
layer = tf.concat([layer, stddev_feature], axis=1)
tf.logging.info('MBS new layer shape %s:', layer.shape.as_list())
tf.summary.scalar('minibatch_stddev_mean', stddev_mean)
return layer
|
def pixel_norm(x, axis, epsilon=1e-08, scope='pixel_norm'):
with tf.variable_scope('scope'):
return (x * tf.rsqrt((tf.reduce_mean(tf.square(x), axis=axis, keepdims=True) + epsilon)))
|
def append_one_hot_to_tensor(tensor, one_hot):
batch_size = tensor.shape.as_list()[0]
shape_len = len(tensor.shape)
if (shape_len == 2):
return tf.concat([tensor, one_hot], axis=1)
elif (shape_len == 4):
one_hot = tf.expand_dims(tf.expand_dims(one_hot, 2), 3)
zeros = tf.zeros([batch_size, one_hot.shape[1], tensor.shape[2], tensor.shape[3]])
one_hot_cube = (zeros + one_hot)
return tf.concat([tensor, one_hot_cube], axis=1)
else:
assert False, ('Unsupported shape_len: %s' % shape_len)
|
def batch_norm(net, axis, scope='batch_norm'):
with tf.variable_scope(scope):
net = tf.layers.batch_normalization(inputs=net, axis=axis, training=True, fused=True)
del tf.get_collection_ref(tf.GraphKeys.UPDATE_OPS)[(- 2):]
return net
|
def batch_norm_in_place(net, axis, scope='batch_norm_in_place', is_training=None):
assert (is_training is not None)
assert (axis in [1, 3])
data_format = ('NCHW' if (axis == 1) else 'NHWC')
with tf.variable_scope(scope):
net = tf.contrib.layers.batch_norm(inputs=net, is_training=is_training, data_format=data_format, fused=True, updates_collections=None)
return net
|
def layer_norm(net, axis, scope='layer_norm'):
assert False, 'layer norm only works for NHWC,the implementation below is broken'
with tf.variable_scope(scope):
net = tf.contrib.layers.layer_norm(inputs=net, center=True, scale=True, activation_fn=None, reuse=False, begin_params_axis=axis)
return net
|
def dense(inp, units=None, scope='linear', weight_norm=None):
with tf.variable_scope(scope):
filters_in = inp.shape.as_list()[1]
W = get_weights(shape=[filters_in, units], weight_norm=weight_norm)
b = tf.get_variable(name='biases', shape=[units], initializer=tf.constant_initializer(0.0))
return (tf.matmul(inp, W) + b)
|
def _prod(s):
if (s == []):
return 1
return (s[0] * _prod(s[1:]))
|
def get_weights(shape, weight_norm):
if (weight_norm == 'dynamic'):
assert False, 'something is broken here'
W = tf.get_variable(name='weights', shape=shape, initializer=tf.glorot_uniform_initializer())
v = tf.get_variable(name='dynamic_weights_norm', shape=[], initializer=tf.constant_initializer(1.0))
W = ((W / tf.norm(W)) * v)
elif (weight_norm == 'equalized'):
W = tf.get_variable(name='weights', shape=shape, initializer=tf.random_normal_initializer())
W = ((W * np.sqrt(2)) / np.sqrt(_prod(shape[:(- 1)])))
else:
assert (weight_norm is None)
W = tf.get_variable(name='weights', shape=shape, initializer=tf.glorot_uniform_initializer())
return W
|
def conv(net, kernel_size=3, strides=1, filters=None, scope='conv', weight_norm=None):
assert (strides == 1)
with tf.variable_scope(scope):
filters_in = net.shape.as_list()[1]
W = get_weights(shape=[kernel_size, kernel_size, filters_in, filters], weight_norm=weight_norm)
conv = tf.nn.conv2d(input=net, filter=W, strides=[1, 1, strides, strides], padding='SAME', data_format='NCHW')
b = tf.get_variable(name='biases', shape=[filters], initializer=tf.constant_initializer(0.0))
conv = tf.nn.bias_add(conv, b, data_format='NCHW')
return conv
|
def conv_trans(*args, **kwargs):
assert False
|
def _norm(net, axis, version, scope='norm', is_training=None):
assert (is_training is not None)
with tf.variable_scope(scope):
if (version is None):
return net
elif (version == 'batch_norm'):
return batch_norm(net, axis=axis)
elif (version == 'layer_norm'):
return layer_norm(net, axis=axis)
elif (version == 'pixel_norm'):
return pixel_norm(net, axis=axis)
elif (version == 'batch_norm_in_place'):
return batch_norm_in_place(net, axis=axis, is_training=is_training)
else:
assert False, ('Unknown normalization: %s' % version)
|
def norm(net, axis, version, scope='norm', is_training=None, gpu_id=None, per_gpu=None):
assert (gpu_id is not None)
assert (per_gpu is not None)
if per_gpu:
with tf.variable_scope(('norm_gpu_%d' % gpu_id)):
with tf.device(('/gpu:%d' % gpu_id)):
return _norm(net, axis, version, scope, is_training)
else:
return _norm(net, axis, version, scope, is_training)
|
def _get_shape(tensor):
tensor_shape = tf.shape(tensor)
static_tensor_shape = tf_tensor_util.constant_value(tensor_shape)
return (static_tensor_shape if (static_tensor_shape is not None) else tensor_shape)
|
def condition_tensor(tensor, conditioning, act=None):
tensor.shape[1:].assert_is_fully_defined()
num_features = tensor.shape[1:].num_elements()
mapped_conditioning = tf.contrib.layers.linear(tf.contrib.layers.flatten(conditioning), num_features)
if (act is not None):
mapped_contitioning = act(mapped_conditioning)
if (not mapped_conditioning.shape.is_compatible_with(tensor.shape)):
mapped_conditioning = tf.reshape(mapped_conditioning, _get_shape(tensor))
return (tensor + mapped_conditioning)
|
class Generator(nn.Module):
def __init__(self, params):
super().__init__()
self.noise_dim = params.noise_dims
self.gkernel = gkern1D(params.gkernlen, params.gkernsig)
self.FC = nn.Sequential(nn.Linear(self.noise_dim, 256), nn.LeakyReLU(0.2), nn.Dropout(p=0.2), nn.Linear(256, (32 * 16), bias=False), nn.BatchNorm1d((32 * 16)), nn.LeakyReLU(0.2))
self.CONV = nn.Sequential(ConvTranspose1d_meta(16, 16, 5, stride=2, bias=False), nn.BatchNorm1d(16), nn.LeakyReLU(0.2), ConvTranspose1d_meta(16, 8, 5, stride=2, bias=False), nn.BatchNorm1d(8), nn.LeakyReLU(0.2), ConvTranspose1d_meta(8, 4, 5, stride=2, bias=False), nn.BatchNorm1d(4), nn.LeakyReLU(0.2), ConvTranspose1d_meta(4, 1, 5))
def forward(self, noise, params):
net = self.FC(noise)
net = net.view((- 1), 16, 32)
net = self.CONV(net)
net = conv1d_meta((net + noise.unsqueeze(1)), self.gkernel)
net = (torch.tanh((net * params.binary_amp)) * 1.05)
return net
|
class Params():
'Class that loads hyperparameters from a json file.\n\n Example:\n ```\n params = Params(json_path)\n print(params.learning_rate)\n params.learning_rate = 0.5 # change the value of learning_rate in params\n ```\n '
def __init__(self, json_path):
self.update(json_path)
def save(self, json_path):
'Saves parameters to json file'
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, json_path):
'Loads parameters from json file'
with open(json_path) as f:
params = json.load(f)
self.__dict__.update(params)
@property
def dict(self):
"Gives dict-like access to Params instance by `params.dict['learning_rate']`"
return self.__dict__
|
def set_logger(log_path):
'Sets the logger to log info in terminal and file `log_path`.\n\n In general, it is useful to have a logger so that every output to the terminal is saved\n in a permanent file. Here we save it to `model_dir/train.log`.\n\n Example:\n ```\n logging.info("Starting training...")\n ```\n\n Args:\n log_path: (string) where to log\n '
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if (not logger.handlers):
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler)
|
def save_dict_to_json(d, json_path):
'Saves dict of floats in json file\n\n Args:\n d: (dict) of float-castable values (np.float, int, float, etc.)\n json_path: (string) path to json file\n '
with open(json_path, 'w') as f:
d = {k: float(v) for (k, v) in d.items()}
json.dump(d, f, indent=4)
|
def row_csv2dict(csv_file):
dict_club = {}
with open(csv_file) as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
dict_club[(row[0], row[1])] = row[2]
return dict_club
|
def save_checkpoint(state, checkpoint):
"Saves model and training parameters at checkpoint + 'last.pth.tar'. If is_best==True, also saves\n checkpoint + 'best.pth.tar'\n Args:\n state: (dict) contains model's state_dict, may contain other keys such as epoch, optimizer state_dict\n is_best: (bool) True if it is the best model seen till now\n checkpoint: (string) folder where parameters are to be saved\n "
filepath = os.path.join(checkpoint, 'model.pth.tar')
if (not os.path.exists(checkpoint)):
print('Checkpoint Directory does not exist! Making directory {}'.format(checkpoint))
os.mkdir(checkpoint)
else:
print('Checkpoint Directory exists! ')
torch.save(state, filepath)
|
def load_checkpoint(checkpoint, model, optimizer=None, scheduler=None):
'Loads model parameters (state_dict) from file_path. If optimizer is provided, loads state_dict of\n optimizer assuming it is present in checkpoint.\n Args:\n checkpoint: (string) filename which needs to be loaded\n model: (torch.nn.Module) model for which the parameters are loaded\n optimizer: (torch.optim) optional: resume optimizer from checkpoint\n '
if (not os.path.exists(checkpoint)):
raise "File doesn't exist {}".format(checkpoint)
checkpoint = torch.load(checkpoint)
model.load_state_dict(checkpoint['gen_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optim_state_dict'])
if scheduler:
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
return checkpoint
|
def plot_loss_history(loss_history, params):
(effs_mean_history, diversity_history, binarization_history) = loss_history
iterations = [(i * params.plot_iter) for i in range(len(effs_mean_history))]
plt.figure()
plt.plot(iterations, effs_mean_history)
plt.plot(iterations, diversity_history)
plt.plot(iterations, binarization_history)
plt.xlabel('iteration')
plt.legend(('Average Efficiency', 'Pattern diversity', 'Binarizaion'))
plt.axis([0, (len(effs_mean_history) * params.plot_iter), 0, 1.05])
plt.savefig((params.output_dir + '/figures/Train_history.png'))
history_path = os.path.join(params.output_dir, 'history.mat')
io.savemat(history_path, mdict={'effs_mean_history': np.asarray(effs_mean_history), 'diversity_history': np.asarray(diversity_history), 'binarization_history': np.asarray(binarization_history)})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.