code stringlengths 17 6.64M |
|---|
class LinearSchedule(BaseSchedule):
'Linear variance schedule.'
def __init__(self, timesteps: int, device: Optional[torch.device]=None, beta_start: float=0.0001, beta_end: float=0.02, *args, **kwargs) -> None:
'Initialize linear beta schedule.'
self.beta_start = beta_start
self.beta_end = beta_end
super().__init__(timesteps, device, *args, **kwargs)
def _get_betas(self, timesteps: int) -> Tensor:
'Get betas.'
return torch.linspace(self.beta_start, self.beta_end, timesteps)
|
class CosineSchedule(BaseSchedule):
'Cosine variance schedule.'
def __init__(self, timesteps: int, device: Optional[torch.device]=None, s: float=0.008, *args, **kwargs) -> None:
'Initialize cosine beta schedule.'
self.s = s
super().__init__(timesteps, device, *args, **kwargs)
def _get_betas(self, timesteps: int) -> Tensor:
'Get betas.'
steps = (timesteps + 1)
x = torch.linspace(0, timesteps, steps)
alphas_cumprod = (torch.cos((((((x / timesteps) + self.s) / (1 + self.s)) * torch.pi) * 0.5)) ** 2)
alphas_cumprod = (alphas_cumprod / alphas_cumprod[0])
betas = (1 - (alphas_cumprod[1:] / alphas_cumprod[:(- 1)]))
return torch.clip(betas, 0.0001, 0.9999)
|
class QuadraticSchedule(BaseSchedule):
'Quadratic variance schedule.'
def __init__(self, timesteps: int, device: Optional[torch.device]=None, beta_start: float=0.0001, beta_end: float=0.02, *args, **kwargs) -> None:
'Initialize quadratic beta schedule.'
self.beta_start = beta_start
self.beta_end = beta_end
super().__init__(timesteps, device, *args, **kwargs)
def _get_betas(self, timesteps: int) -> Tensor:
'Get betas.'
return (torch.linspace((self.beta_start ** 0.5), (self.beta_end ** 0.5), timesteps) ** 2)
|
class SigmoidSchedule(BaseSchedule):
'Sigmoid variance schedule.'
def __init__(self, timesteps: int, device: Optional[torch.device]=None, beta_start: float=0.0001, beta_end: float=0.02, *args, **kwargs) -> None:
'Initialize sigmoid beta schedule.'
self.beta_start = beta_start
self.beta_end = beta_end
super().__init__(timesteps, device, *args, **kwargs)
def _get_betas(self, timesteps: int) -> Tensor:
'Get betas.'
betas = torch.linspace((- 6), 6, timesteps)
return ((torch.sigmoid(betas) * (self.beta_end - self.beta_start)) + self.beta_start)
|
class ScheduleFactory():
'Factory wrapper for variance schedules.'
@staticmethod
def get_schedule(name: str, timesteps: int, *args, **kwargs) -> BaseSchedule:
'Initialize a scheduler by name.'
cls: Any
if (name == 'linear'):
cls = LinearSchedule
elif (name == 'cosine'):
cls = CosineSchedule
elif (name == 'quadratic'):
cls = QuadraticSchedule
elif (name == 'sigmoid'):
cls = SigmoidSchedule
else:
raise ValueError('There is no matching schedule for name "{}".'.format(name))
return cls(timesteps, *args, **kwargs)
|
def transform_tensor_to_img() -> Compose:
'Transform a tensor with a single element to a PIL image.'
return Compose([Lambda((lambda t: t.detach().cpu())), Lambda((lambda t: ((t + 1) / 2))), Lambda((lambda t: t.permute(1, 2, 0))), Lambda((lambda t: (t * 255.0))), Lambda((lambda t: t.numpy().astype(np.uint8))), ToPILImage()])
|
def plot_image(img: Tensor, fig_size: Any=None, ncols: Optional[int]=None, show: bool=True, save_path: Optional[str]=None) -> None:
'Plot a tensor containing image data.'
img = img.detach().cpu()
if (len(img.shape) == 4):
img = make_grid(img, nrow=(ncols if (ncols is not None) else len(img)))
plt.figure(figsize=fig_size)
plt.imshow(img.permute(1, 2, 0))
plt.axis('off')
if (save_path is not None):
plt.savefig(save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
|
def make_gif(img_arr: Tensor, save_path: str) -> None:
'Create a GIF with the output of DiffusionController.generate().'
assert (len(img_arr) == 5), 'Array has wrong shape.'
img_arr = img_arr.detach().cpu()
fig = plt.figure(frameon=False)
ims = []
for img_t in img_arr:
grid = make_grid(img_t, nrow=(img_t.shape[0] // 2))
im = plt.imshow(grid.permute(1, 2, 0), animated=True)
plt.axis('off')
plt.tight_layout()
ims.append([im])
fig.tight_layout()
animate = animation.ArtistAnimation(fig, ims, interval=100, blit=True, repeat_delay=2000)
animate.save(save_path)
|
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument('-n', '--name', type=str, const=True, default='', nargs='?', help='postfix for logdir')
parser.add_argument('-r', '--resume', type=str, const=True, default='', nargs='?', help='resume from logdir or checkpoint in logdir')
parser.add_argument('-b', '--base', nargs='*', metavar='base_config.yaml', help='paths to base configs. Loaded from left-to-right. Parameters can be overwritten or added with command-line options of the form `--key value`.', default=list())
parser.add_argument('-t', '--train', type=str2bool, const=True, default=False, nargs='?', help='train')
parser.add_argument('--no-test', type=str2bool, const=True, default=False, nargs='?', help='disable test')
parser.add_argument('-p', '--project', help='name of new or path to existing project')
parser.add_argument('-d', '--debug', type=str2bool, nargs='?', const=True, default=False, help='enable post-mortem debugging')
parser.add_argument('-s', '--seed', type=int, default=23, help='seed for seed_everything')
parser.add_argument('-f', '--postfix', type=str, default='', help='post-postfix for default name')
parser.add_argument('-l', '--logdir', type=str, default='logs', help='directory for logging')
return parser
|
def nondefault_trainer_args(opt):
parser = argparse.ArgumentParser()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args([])
return sorted((k for k in vars(args) if (getattr(opt, k) != getattr(args, k))))
|
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
worker_id = worker_info.id
return np.random.seed((np.random.get_state()[1][0] + worker_id))
|
class DataModuleFromConfig(pl.LightningDataModule):
def __init__(self, batch_size, machex_path, test_size, num_workers, mimic=False, *args, **kwargs):
super().__init__()
self.batch_size = batch_size
self.num_workers = num_workers
self.transforms = Compose([Resize(256), ToTensor(), Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
if (not mimic):
self.machex = MaCheXDataset(machex_path, self.transforms)
else:
self.machex = MimicT2IDataset(machex_path, self.transforms)
train_size = (len(self.machex) - test_size)
(self.train_dataset, self.test_dataset) = random_split(self.machex, (train_size, test_size), generator=torch.Generator().manual_seed(1337))
def train_dataloader(self):
loader = DataLoader(dataset=self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, shuffle=True)
return loader
def val_dataloader(self):
loader = DataLoader(dataset=self.test_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, shuffle=False)
return loader
|
class SetupCallback(Callback):
def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config):
super().__init__()
self.resume = resume
self.now = now
self.logdir = logdir
self.ckptdir = ckptdir
self.cfgdir = cfgdir
self.config = config
self.lightning_config = lightning_config
def on_keyboard_interrupt(self, trainer, pl_module):
if (trainer.global_rank == 0):
print('Summoning checkpoint.')
ckpt_path = os.path.join(self.ckptdir, 'last.ckpt')
trainer.save_checkpoint(ckpt_path)
def on_pretrain_routine_start(self, trainer, pl_module):
if (trainer.global_rank == 0):
os.makedirs(self.logdir, exist_ok=True)
os.makedirs(self.ckptdir, exist_ok=True)
os.makedirs(self.cfgdir, exist_ok=True)
if ('callbacks' in self.lightning_config):
if ('metrics_over_trainsteps_checkpoint' in self.lightning_config['callbacks']):
os.makedirs(os.path.join(self.ckptdir, 'trainstep_checkpoints'), exist_ok=True)
print('Project config')
print(OmegaConf.to_yaml(self.config))
OmegaConf.save(self.config, os.path.join(self.cfgdir, '{}-project.yaml'.format(self.now)))
print('Lightning config')
print(OmegaConf.to_yaml(self.lightning_config))
OmegaConf.save(OmegaConf.create({'lightning': self.lightning_config}), os.path.join(self.cfgdir, '{}-lightning.yaml'.format(self.now)))
elif ((not self.resume) and os.path.exists(self.logdir)):
(dst, name) = os.path.split(self.logdir)
dst = os.path.join(dst, 'child_runs', name)
os.makedirs(os.path.split(dst)[0], exist_ok=True)
try:
os.rename(self.logdir, dst)
except FileNotFoundError:
pass
|
class ImageLogger(Callback):
def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True, rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False, log_images_kwargs=None):
super().__init__()
self.rescale = rescale
self.batch_freq = batch_frequency
self.max_images = max_images
self.logger_log_images = {pl.loggers.TestTubeLogger: self._testtube}
self.log_steps = [(2 ** n) for n in range((int(np.log2(self.batch_freq)) + 1))]
if (not increase_log_steps):
self.log_steps = [self.batch_freq]
self.clamp = clamp
self.disabled = disabled
self.log_on_batch_idx = log_on_batch_idx
self.log_images_kwargs = (log_images_kwargs if log_images_kwargs else {})
self.log_first_step = log_first_step
@rank_zero_only
def _testtube(self, pl_module, images, batch_idx, split):
for k in images:
grid = torchvision.utils.make_grid(images[k])
grid = ((grid + 1.0) / 2.0)
tag = f'{split}/{k}'
pl_module.logger.experiment.add_image(tag, grid, global_step=pl_module.global_step)
@rank_zero_only
def log_local(self, save_dir, split, images, global_step, current_epoch, batch_idx):
root = os.path.join(save_dir, 'images', split)
for k in images:
grid = torchvision.utils.make_grid(images[k], nrow=4)
if self.rescale:
grid = ((grid + 1.0) / 2.0)
grid = grid.transpose(0, 1).transpose(1, 2).squeeze((- 1))
grid = grid.numpy()
grid = (grid * 255).astype(np.uint8)
filename = '{}_gs-{:06}_e-{:06}_b-{:06}.png'.format(k, global_step, current_epoch, batch_idx)
path = os.path.join(root, filename)
os.makedirs(os.path.split(path)[0], exist_ok=True)
Image.fromarray(grid).save(path)
def log_img(self, pl_module, batch, batch_idx, split='train'):
check_idx = (batch_idx if self.log_on_batch_idx else pl_module.global_step)
if (self.check_frequency(check_idx) and hasattr(pl_module, 'log_images') and callable(pl_module.log_images) and (self.max_images > 0)):
logger = type(pl_module.logger)
is_train = pl_module.training
if is_train:
pl_module.eval()
with torch.no_grad():
images = pl_module.log_images(batch, split=split, **self.log_images_kwargs)
for k in images:
N = min(images[k].shape[0], self.max_images)
images[k] = images[k][:N]
if isinstance(images[k], torch.Tensor):
images[k] = images[k].detach().cpu()
if self.clamp:
images[k] = torch.clamp(images[k], (- 1.0), 1.0)
self.log_local(pl_module.logger.save_dir, split, images, pl_module.global_step, pl_module.current_epoch, batch_idx)
logger_log_images = self.logger_log_images.get(logger, (lambda *args, **kwargs: None))
logger_log_images(pl_module, images, pl_module.global_step, split)
if is_train:
pl_module.train()
def check_frequency(self, check_idx):
if ((((check_idx % self.batch_freq) == 0) or (check_idx in self.log_steps)) and ((check_idx > 0) or self.log_first_step)):
try:
self.log_steps.pop(0)
except IndexError as e:
print(e)
pass
return True
return False
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
if ((not self.disabled) and ((pl_module.global_step > 0) or self.log_first_step)):
self.log_img(pl_module, batch, batch_idx, split='train')
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if ((not self.disabled) and (pl_module.global_step > 0)):
self.log_img(pl_module, batch, batch_idx, split='val')
if hasattr(pl_module, 'calibrate_grad_norm'):
if ((pl_module.calibrate_grad_norm and ((batch_idx % 25) == 0)) and (batch_idx > 0)):
self.log_gradients(trainer, pl_module, batch_idx=batch_idx)
|
class CUDACallback(Callback):
def on_train_epoch_start(self, trainer, pl_module):
torch.cuda.reset_peak_memory_stats(trainer.root_gpu)
torch.cuda.synchronize(trainer.root_gpu)
self.start_time = time.time()
def on_train_epoch_end(self, trainer, pl_module, *args, **kwargs):
torch.cuda.synchronize(trainer.root_gpu)
max_memory = (torch.cuda.max_memory_allocated(trainer.root_gpu) / (2 ** 20))
epoch_time = (time.time() - self.start_time)
try:
max_memory = trainer.training_type_plugin.reduce(max_memory)
epoch_time = trainer.training_type_plugin.reduce(epoch_time)
rank_zero_info(f'Average Epoch time: {epoch_time:.2f} seconds')
rank_zero_info(f'Average Peak memory {max_memory:.2f}MiB')
except AttributeError:
pass
|
class EasyDict(dict):
'Convenience class that behaves like a dict but allows access with the attribute syntax.'
def __getattr__(self, name: str) -> Any:
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None:
self[name] = value
def __delattr__(self, name: str) -> None:
del self[name]
|
class Logger(object):
'Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file.'
def __init__(self, file_name: Optional[str]=None, file_mode: str='w', should_flush: bool=True):
self.file = None
if (file_name is not None):
self.file = open(file_name, file_mode)
self.should_flush = should_flush
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __enter__(self) -> 'Logger':
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def write(self, text: Union[(str, bytes)]) -> None:
'Write text to stdout (and a file) and optionally flush.'
if isinstance(text, bytes):
text = text.decode()
if (len(text) == 0):
return
if (self.file is not None):
self.file.write(text)
self.stdout.write(text)
if self.should_flush:
self.flush()
def flush(self) -> None:
'Flush written text to both stdout and a file, if open.'
if (self.file is not None):
self.file.flush()
self.stdout.flush()
def close(self) -> None:
'Flush, close possible files, and remove stdout/stderr mirroring.'
self.flush()
if (sys.stdout is self):
sys.stdout = self.stdout
if (sys.stderr is self):
sys.stderr = self.stderr
if (self.file is not None):
self.file.close()
self.file = None
|
def set_cache_dir(path: str) -> None:
global _dnnlib_cache_dir
_dnnlib_cache_dir = path
|
def make_cache_dir_path(*paths: str) -> str:
if (_dnnlib_cache_dir is not None):
return os.path.join(_dnnlib_cache_dir, *paths)
if ('DNNLIB_CACHE_DIR' in os.environ):
return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths)
if ('HOME' in os.environ):
return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths)
if ('USERPROFILE' in os.environ):
return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths)
return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths)
|
def format_time(seconds: Union[(int, float)]) -> str:
'Convert the seconds to human readable string with days, hours, minutes and seconds.'
s = int(np.rint(seconds))
if (s < 60):
return '{0}s'.format(s)
elif (s < (60 * 60)):
return '{0}m {1:02}s'.format((s // 60), (s % 60))
elif (s < ((24 * 60) * 60)):
return '{0}h {1:02}m {2:02}s'.format((s // (60 * 60)), ((s // 60) % 60), (s % 60))
else:
return '{0}d {1:02}h {2:02}m'.format((s // ((24 * 60) * 60)), ((s // (60 * 60)) % 24), ((s // 60) % 60))
|
def format_time_brief(seconds: Union[(int, float)]) -> str:
'Convert the seconds to human readable string with days, hours, minutes and seconds.'
s = int(np.rint(seconds))
if (s < 60):
return '{0}s'.format(s)
elif (s < (60 * 60)):
return '{0}m {1:02}s'.format((s // 60), (s % 60))
elif (s < ((24 * 60) * 60)):
return '{0}h {1:02}m'.format((s // (60 * 60)), ((s // 60) % 60))
else:
return '{0}d {1:02}h'.format((s // ((24 * 60) * 60)), ((s // (60 * 60)) % 24))
|
def ask_yes_no(question: str) -> bool:
'Ask the user the question until the user inputs a valid answer.'
while True:
try:
print('{0} [y/n]'.format(question))
return strtobool(input().lower())
except ValueError:
pass
|
def tuple_product(t: Tuple) -> Any:
'Calculate the product of the tuple elements.'
result = 1
for v in t:
result *= v
return result
|
def get_dtype_and_ctype(type_obj: Any) -> Tuple[(np.dtype, Any)]:
'Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes.'
type_str = None
if isinstance(type_obj, str):
type_str = type_obj
elif hasattr(type_obj, '__name__'):
type_str = type_obj.__name__
elif hasattr(type_obj, 'name'):
type_str = type_obj.name
else:
raise RuntimeError('Cannot infer type name from input')
assert (type_str in _str_to_ctype.keys())
my_dtype = np.dtype(type_str)
my_ctype = _str_to_ctype[type_str]
assert (my_dtype.itemsize == ctypes.sizeof(my_ctype))
return (my_dtype, my_ctype)
|
def is_pickleable(obj: Any) -> bool:
try:
with io.BytesIO() as stream:
pickle.dump(obj, stream)
return True
except:
return False
|
def get_module_from_obj_name(obj_name: str) -> Tuple[(types.ModuleType, str)]:
'Searches for the underlying module behind the name to some python object.\n Returns the module and the object name (original name with module part removed).'
obj_name = re.sub('^np.', 'numpy.', obj_name)
obj_name = re.sub('^tf.', 'tensorflow.', obj_name)
parts = obj_name.split('.')
name_pairs = [('.'.join(parts[:i]), '.'.join(parts[i:])) for i in range(len(parts), 0, (- 1))]
for (module_name, local_obj_name) in name_pairs:
try:
module = importlib.import_module(module_name)
get_obj_from_module(module, local_obj_name)
return (module, local_obj_name)
except:
pass
for (module_name, _local_obj_name) in name_pairs:
try:
importlib.import_module(module_name)
except ImportError:
if (not str(sys.exc_info()[1]).startswith((("No module named '" + module_name) + "'"))):
raise
for (module_name, local_obj_name) in name_pairs:
try:
module = importlib.import_module(module_name)
get_obj_from_module(module, local_obj_name)
except ImportError:
pass
raise ImportError(obj_name)
|
def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:
'Traverses the object name and returns the last (rightmost) python object.'
if (obj_name == ''):
return module
obj = module
for part in obj_name.split('.'):
obj = getattr(obj, part)
return obj
|
def get_obj_by_name(name: str) -> Any:
'Finds the python object with the given name.'
(module, obj_name) = get_module_from_obj_name(name)
return get_obj_from_module(module, obj_name)
|
def call_func_by_name(*args, func_name: str=None, **kwargs) -> Any:
'Finds the python object with the given name and calls it as a function.'
assert (func_name is not None)
func_obj = get_obj_by_name(func_name)
assert callable(func_obj)
return func_obj(*args, **kwargs)
|
def construct_class_by_name(*args, class_name: str=None, **kwargs) -> Any:
'Finds the python class with the given name and constructs it with the given arguments.'
return call_func_by_name(*args, func_name=class_name, **kwargs)
|
def get_module_dir_by_obj_name(obj_name: str) -> str:
'Get the directory path of the module containing the given object name.'
(module, _) = get_module_from_obj_name(obj_name)
return os.path.dirname(inspect.getfile(module))
|
def is_top_level_function(obj: Any) -> bool:
"Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."
return (callable(obj) and (obj.__name__ in sys.modules[obj.__module__].__dict__))
|
def get_top_level_function_name(obj: Any) -> str:
'Return the fully-qualified name of a top-level function.'
assert is_top_level_function(obj)
module = obj.__module__
if (module == '__main__'):
module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0]
return ((module + '.') + obj.__name__)
|
def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str]=None, add_base_to_relative: bool=False) -> List[Tuple[(str, str)]]:
'List all files recursively in a given directory while ignoring given file and directory names.\n Returns list of tuples containing both absolute and relative paths.'
assert os.path.isdir(dir_path)
base_name = os.path.basename(os.path.normpath(dir_path))
if (ignores is None):
ignores = []
result = []
for (root, dirs, files) in os.walk(dir_path, topdown=True):
for ignore_ in ignores:
dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
for d in dirs_to_remove:
dirs.remove(d)
files = [f for f in files if (not fnmatch.fnmatch(f, ignore_))]
absolute_paths = [os.path.join(root, f) for f in files]
relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
if add_base_to_relative:
relative_paths = [os.path.join(base_name, p) for p in relative_paths]
assert (len(absolute_paths) == len(relative_paths))
result += zip(absolute_paths, relative_paths)
return result
|
def copy_files_and_create_dirs(files: List[Tuple[(str, str)]]) -> None:
'Takes in a list of tuples of (src, dst) paths and copies files.\n Will create all necessary directories.'
for file in files:
target_dir_name = os.path.dirname(file[1])
if (not os.path.exists(target_dir_name)):
os.makedirs(target_dir_name)
shutil.copyfile(file[0], file[1])
|
def is_url(obj: Any, allow_file_urls: bool=False) -> bool:
'Determine whether the given object is a valid URL string.'
if ((not isinstance(obj, str)) or (not ('://' in obj))):
return False
if (allow_file_urls and obj.startswith('file://')):
return True
try:
res = requests.compat.urlparse(obj)
if ((not res.scheme) or (not res.netloc) or (not ('.' in res.netloc))):
return False
res = requests.compat.urlparse(requests.compat.urljoin(obj, '/'))
if ((not res.scheme) or (not res.netloc) or (not ('.' in res.netloc))):
return False
except:
return False
return True
|
def open_url(url: str, cache_dir: str=None, num_attempts: int=10, verbose: bool=True, return_filename: bool=False, cache: bool=True) -> Any:
'Download the given URL and return a binary-mode file object to access the data.'
assert (num_attempts >= 1)
assert (not (return_filename and (not cache)))
if (not re.match('^[a-z]+://', url)):
return (url if return_filename else open(url, 'rb'))
if url.startswith('file://'):
filename = urllib.parse.urlparse(url).path
if re.match('^/[a-zA-Z]:', filename):
filename = filename[1:]
return (filename if return_filename else open(filename, 'rb'))
assert is_url(url)
if (cache_dir is None):
cache_dir = make_cache_dir_path('downloads')
url_md5 = hashlib.md5(url.encode('utf-8')).hexdigest()
if cache:
cache_files = glob.glob(os.path.join(cache_dir, (url_md5 + '_*')))
if (len(cache_files) == 1):
filename = cache_files[0]
return (filename if return_filename else open(filename, 'rb'))
url_name = None
url_data = None
with requests.Session() as session:
if verbose:
print(('Downloading %s ...' % url), end='', flush=True)
for attempts_left in reversed(range(num_attempts)):
try:
with session.get(url) as res:
res.raise_for_status()
if (len(res.content) == 0):
raise IOError('No data received')
if (len(res.content) < 8192):
content_str = res.content.decode('utf-8')
if ('download_warning' in res.headers.get('Set-Cookie', '')):
links = [html.unescape(link) for link in content_str.split('"') if ('export=download' in link)]
if (len(links) == 1):
url = requests.compat.urljoin(url, links[0])
raise IOError('Google Drive virus checker nag')
if ('Google Drive - Quota exceeded' in content_str):
raise IOError('Google Drive download quota exceeded -- please try again later')
match = re.search('filename="([^"]*)"', res.headers.get('Content-Disposition', ''))
url_name = (match[1] if match else url)
url_data = res.content
if verbose:
print(' done')
break
except KeyboardInterrupt:
raise
except:
if (not attempts_left):
if verbose:
print(' failed')
raise
if verbose:
print('.', end='', flush=True)
if cache:
safe_name = re.sub('[^0-9a-zA-Z-._]', '_', url_name)
safe_name = safe_name[:min(len(safe_name), 128)]
cache_file = os.path.join(cache_dir, ((url_md5 + '_') + safe_name))
temp_file = os.path.join(cache_dir, ((((('tmp_' + uuid.uuid4().hex) + '_') + url_md5) + '_') + safe_name))
os.makedirs(cache_dir, exist_ok=True)
with open(temp_file, 'wb') as f:
f.write(url_data)
os.replace(temp_file, cache_file)
if return_filename:
return cache_file
assert (not return_filename)
return io.BytesIO(url_data)
|
class NoiseScheduleEDM():
def __init__(self, schedule='linear', betas=None, alphas_cumprod=None, continuous_beta_0=0.1, continuous_beta_1=20.0, dtype=torch.float32):
if (schedule not in ['discrete', 'linear']):
raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear'".format(schedule))
self.schedule = schedule
if (schedule == 'discrete'):
if (betas is not None):
log_alphas = (0.5 * torch.log((1 - betas)).cumsum(dim=0))
else:
assert (alphas_cumprod is not None)
log_alphas = (0.5 * torch.log(alphas_cumprod))
self.T = 1.0
self.log_alpha_array = self.numerical_clip_alpha(log_alphas).reshape((1, (- 1))).to(dtype=dtype)
self.total_N = self.log_alpha_array.shape[1]
self.t_array = torch.linspace(0.0, 1.0, (self.total_N + 1))[1:].reshape((1, (- 1))).to(dtype=dtype)
else:
self.T = 80.0
self.total_N = 1000
self.beta_0 = continuous_beta_0
self.beta_1 = continuous_beta_1
def numerical_clip_alpha(self, log_alphas, clipped_lambda=(- 5.1)):
'\n For some beta schedules such as cosine schedule, the log-SNR has numerical isssues. \n We clip the log-SNR near t=T within -5.1 to ensure the stability.\n Such a trick is very useful for diffusion models with the cosine schedule, such as i-DDPM, guided-diffusion and GLIDE.\n '
log_sigmas = (0.5 * torch.log((1.0 - torch.exp((2.0 * log_alphas)))))
lambs = (log_alphas - log_sigmas)
idx = torch.searchsorted(torch.flip(lambs, [0]), clipped_lambda)
if (idx > 0):
log_alphas = log_alphas[:(- idx)]
return log_alphas
def marginal_log_mean_coeff(self, t):
'\n Compute log(alpha_t) of a given continuous-time label t in [0, T].\n '
return torch.zeros_like(t)
def marginal_alpha(self, t):
'\n Compute alpha_t of a given continuous-time label t in [0, T].\n '
return torch.ones_like(t)
def marginal_std(self, t):
'\n Compute sigma_t of a given continuous-time label t in [0, T].\n '
return t
def marginal_lambda(self, t):
'\n Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].\n '
return (- torch.log(t))
def inverse_lambda(self, lamb):
'\n Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.\n '
return (- lamb).exp().reshape(((- 1),))
|
def model_wrapper(model, noise_schedule, model_type='noise', model_kwargs={}, guidance_type='uncond', condition=None, unconditional_condition=None, guidance_scale=1.0, classifier_fn=None, classifier_kwargs={}):
'Create a wrapper function for the noise prediction model.\n\n DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to\n firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.\n\n We support four types of the diffusion model by setting `model_type`:\n\n 1. "noise": noise prediction model. (Trained by predicting noise).\n\n 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).\n\n 3. "v": velocity prediction model. (Trained by predicting the velocity).\n The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].\n\n [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."\n arXiv preprint arXiv:2202.00512 (2022).\n [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."\n arXiv preprint arXiv:2210.02303 (2022).\n \n 4. "score": marginal score function. (Trained by denoising score matching).\n Note that the score function and the noise prediction model follows a simple relationship:\n ```\n noise(x_t, t) = -sigma_t * score(x_t, t)\n ```\n\n We support three types of guided sampling by DPMs by setting `guidance_type`:\n 1. "uncond": unconditional sampling by DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n ``\n\n 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n `` \n\n The input `classifier_fn` has the following format:\n ``\n classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)\n ``\n\n [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"\n in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.\n\n 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score\n `` \n And if cond == `unconditional_condition`, the model output is the unconditional DPM output.\n\n [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."\n arXiv preprint arXiv:2207.12598 (2022).\n \n\n The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)\n or continuous-time labels (i.e. epsilon to T).\n\n We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:\n ``\n def model_fn(x, t_continuous) -> noise:\n t_input = get_model_input_time(t_continuous)\n return noise_pred(model, x, t_input, **model_kwargs) \n ``\n where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.\n\n ===============================================================\n\n Args:\n model: A diffusion model with the corresponding format described above.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n model_type: A `str`. The parameterization type of the diffusion model.\n "noise" or "x_start" or "v" or "score".\n model_kwargs: A `dict`. A dict for the other inputs of the model function.\n guidance_type: A `str`. The type of the guidance for sampling.\n "uncond" or "classifier" or "classifier-free".\n condition: A pytorch tensor. The condition for the guided sampling.\n Only used for "classifier" or "classifier-free" guidance type.\n unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.\n Only used for "classifier-free" guidance type.\n guidance_scale: A `float`. The scale for the guided sampling.\n classifier_fn: A classifier function. Only used for the classifier guidance.\n classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.\n Returns:\n A noise prediction model that accepts the noised data and the continuous time as the inputs.\n '
def get_model_input_time(t_continuous):
'\n Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.\n For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].\n For continuous-time DPMs, we just use `t_continuous`.\n '
if (noise_schedule.schedule == 'discrete'):
return ((t_continuous - (1.0 / noise_schedule.total_N)) * 1000.0)
else:
return t_continuous
def noise_pred_fn(x, t_continuous, cond=None):
t_input = get_model_input_time(t_continuous)
if (cond is None):
output = model(x, t_input, **model_kwargs)
else:
output = model(x, t_input, cond, **model_kwargs)
if (model_type == 'noise'):
return output
elif (model_type == 'x_start'):
(alpha_t, sigma_t) = (noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous))
return ((x - (expand_dims(alpha_t, x.dim()) * output)) / expand_dims(sigma_t, x.dim()))
elif (model_type == 'v'):
(alpha_t, sigma_t) = (noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous))
return ((expand_dims(alpha_t, x.dim()) * output) + (expand_dims(sigma_t, x.dim()) * x))
elif (model_type == 'score'):
sigma_t = noise_schedule.marginal_std(t_continuous)
return ((- expand_dims(sigma_t, x.dim())) * output)
def cond_grad_fn(x, t_input):
'\n Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).\n '
with torch.enable_grad():
x_in = x.detach().requires_grad_(True)
log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
return torch.autograd.grad(log_prob.sum(), x_in)[0]
def model_fn(x, t_continuous):
'\n The noise predicition model function that is used for DPM-Solver.\n '
if (guidance_type == 'uncond'):
return noise_pred_fn(x, t_continuous)
elif (guidance_type == 'classifier'):
assert (classifier_fn is not None)
t_input = get_model_input_time(t_continuous)
cond_grad = cond_grad_fn(x, t_input)
sigma_t = noise_schedule.marginal_std(t_continuous)
noise = noise_pred_fn(x, t_continuous)
return (noise - ((guidance_scale * expand_dims(sigma_t, x.dim())) * cond_grad))
elif (guidance_type == 'classifier-free'):
if ((guidance_scale == 1.0) or (unconditional_condition is None)):
return noise_pred_fn(x, t_continuous, cond=condition)
else:
x_in = torch.cat(([x] * 2))
t_in = torch.cat(([t_continuous] * 2))
c_in = torch.cat([unconditional_condition, condition])
(noise_uncond, noise) = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
return (noise_uncond + (guidance_scale * (noise - noise_uncond)))
assert (model_type in ['noise', 'x_start', 'v', 'score'])
assert (guidance_type in ['uncond', 'classifier', 'classifier-free'])
return model_fn
|
class DPM_Solver():
def __init__(self, model_fn, noise_schedule, algorithm_type='dpmsolver++', correcting_x0_fn=None, correcting_xt_fn=None, thresholding_max_val=1.0, dynamic_thresholding_ratio=0.995):
'Construct a DPM-Solver. \n\n We support both DPM-Solver (`algorithm_type="dpmsolver"`) and DPM-Solver++ (`algorithm_type="dpmsolver++"`).\n\n We also support the "dynamic thresholding" method in Imagen[1]. For pixel-space diffusion models, you\n can set both `algorithm_type="dpmsolver++"` and `correcting_x0_fn="dynamic_thresholding"` to use the\n dynamic thresholding. The "dynamic thresholding" can greatly improve the sample quality for pixel-space\n DPMs with large guidance scales. Note that the thresholding method is **unsuitable** for latent-space\n DPMs (such as stable-diffusion).\n\n To support advanced algorithms in image-to-image applications, we also support corrector functions for\n both x0 and xt.\n\n Args:\n model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):\n ``\n def model_fn(x, t_continuous):\n return noise\n ``\n The shape of `x` is `(batch_size, **shape)`, and the shape of `t_continuous` is `(batch_size,)`.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n algorithm_type: A `str`. Either "dpmsolver" or "dpmsolver++".\n correcting_x0_fn: A `str` or a function with the following format:\n ```\n def correcting_x0_fn(x0, t):\n x0_new = ...\n return x0_new\n ```\n This function is to correct the outputs of the data prediction model at each sampling step. e.g.,\n ```\n x0_pred = data_pred_model(xt, t)\n if correcting_x0_fn is not None:\n x0_pred = correcting_x0_fn(x0_pred, t)\n xt_1 = update(x0_pred, xt, t)\n ```\n If `correcting_x0_fn="dynamic_thresholding"`, we use the dynamic thresholding proposed in Imagen[1].\n correcting_xt_fn: A function with the following format:\n ```\n def correcting_xt_fn(xt, t, step):\n x_new = ...\n return x_new\n ```\n This function is to correct the intermediate samples xt at each sampling step. e.g.,\n ```\n xt = ...\n xt = correcting_xt_fn(xt, t, step)\n ```\n thresholding_max_val: A `float`. The max value for thresholding.\n Valid only when use `dpmsolver++` and `correcting_x0_fn="dynamic_thresholding"`.\n dynamic_thresholding_ratio: A `float`. The ratio for dynamic thresholding (see Imagen[1] for details).\n Valid only when use `dpmsolver++` and `correcting_x0_fn="dynamic_thresholding"`.\n\n [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour,\n Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models\n with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.\n '
self.model = (lambda x, t: model_fn(x, t))
self.noise_schedule = noise_schedule
assert (algorithm_type in ['dpmsolver', 'dpmsolver++'])
self.algorithm_type = algorithm_type
if (correcting_x0_fn == 'dynamic_thresholding'):
self.correcting_x0_fn = self.dynamic_thresholding_fn
else:
self.correcting_x0_fn = correcting_x0_fn
self.correcting_xt_fn = correcting_xt_fn
self.dynamic_thresholding_ratio = dynamic_thresholding_ratio
self.thresholding_max_val = thresholding_max_val
def dynamic_thresholding_fn(self, x0, t):
'\n The dynamic thresholding method. \n '
dims = x0.dim()
p = self.dynamic_thresholding_ratio
s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], (- 1))), p, dim=1)
s = expand_dims(torch.maximum(s, (self.thresholding_max_val * torch.ones_like(s).to(s.device))), dims)
x0 = (torch.clamp(x0, (- s), s) / s)
return x0
def noise_prediction_fn(self, x, t):
'\n Return the noise prediction model.\n '
return self.model(x, t)
def data_prediction_fn(self, x, t):
'\n Return the data prediction model (with corrector).\n '
noise = self.noise_prediction_fn(x, t)
(alpha_t, sigma_t) = (self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t))
x0 = ((x - (sigma_t * noise)) / alpha_t)
if (self.correcting_x0_fn is not None):
x0 = self.correcting_x0_fn(x0, t)
return x0
def model_fn(self, x, t):
'\n Convert the model to the noise prediction model or the data prediction model. \n '
if (self.algorithm_type == 'dpmsolver++'):
return self.data_prediction_fn(x, t)
else:
return self.noise_prediction_fn(x, t)
def get_time_steps(self, skip_type, t_T, t_0, N, device):
"Compute the intermediate time steps for sampling.\n\n Args:\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n N: A `int`. The total number of the spacing of the time steps.\n device: A torch device.\n Returns:\n A pytorch tensor of the time steps, with the shape (N + 1,).\n "
if (skip_type == 'logSNR'):
lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), (N + 1)).to(device)
return self.noise_schedule.inverse_lambda(logSNR_steps)
elif (skip_type == 'time_uniform'):
return torch.linspace(t_T, t_0, (N + 1)).to(device)
elif (skip_type == 'time_quadratic'):
t_order = 2
t = torch.linspace((t_T ** (1.0 / t_order)), (t_0 ** (1.0 / t_order)), (N + 1)).pow(t_order).to(device)
return t
else:
raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
'\n Get the order of each step for sampling by the singlestep DPM-Solver.\n\n We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".\n Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:\n - If order == 1:\n We take `steps` of DPM-Solver-1 (i.e. DDIM).\n - If order == 2:\n - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of DPM-Solver-2.\n - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If order == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.\n\n ============================================\n Args:\n order: A `int`. The max order for the solver (2 or 3).\n steps: A `int`. The total number of function evaluations (NFE).\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - \'logSNR\': uniform logSNR for the time steps.\n - \'time_uniform\': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - \'time_quadratic\': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n device: A torch device.\n Returns:\n orders: A list of the solver order of each step.\n '
if (order == 3):
K = ((steps // 3) + 1)
if ((steps % 3) == 0):
orders = (([3] * (K - 2)) + [2, 1])
elif ((steps % 3) == 1):
orders = (([3] * (K - 1)) + [1])
else:
orders = (([3] * (K - 1)) + [2])
elif (order == 2):
if ((steps % 2) == 0):
K = (steps // 2)
orders = ([2] * K)
else:
K = ((steps // 2) + 1)
orders = (([2] * (K - 1)) + [1])
elif (order == 1):
K = 1
orders = ([1] * steps)
else:
raise ValueError("'order' must be '1' or '2' or '3'.")
if (skip_type == 'logSNR'):
timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
else:
timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor(([0] + orders)), 0).to(device)]
return (timesteps_outer, orders)
def denoise_to_zero_fn(self, x, s):
'\n Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. \n '
return self.data_prediction_fn(x, s)
def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
'\n DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n '
ns = self.noise_schedule
dims = x.dim()
(lambda_s, lambda_t) = (ns.marginal_lambda(s), ns.marginal_lambda(t))
h = (lambda_t - lambda_s)
(log_alpha_s, log_alpha_t) = (ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t))
(sigma_s, sigma_t) = (ns.marginal_std(s), ns.marginal_std(t))
alpha_t = torch.exp(log_alpha_t)
if (self.algorithm_type == 'dpmsolver++'):
phi_1 = torch.expm1((- h))
if (model_s is None):
model_s = self.model_fn(x, s)
x_t = (((sigma_t / sigma_s) * x) - ((alpha_t * phi_1) * model_s))
if return_intermediate:
return (x_t, {'model_s': model_s})
else:
return x_t
else:
phi_1 = torch.expm1(h)
if (model_s is None):
model_s = self.model_fn(x, s)
x_t = ((torch.exp((log_alpha_t - log_alpha_s)) * x) - ((sigma_t * phi_1) * model_s))
if return_intermediate:
return (x_t, {'model_s': model_s})
else:
return x_t
def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpmsolver'):
"\n Singlestep solver DPM-Solver-2 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n r1: A `float`. The hyperparameter of the second-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n "
if (solver_type not in ['dpmsolver', 'taylor']):
raise ValueError("'solver_type' must be either 'dpmsolver' or 'taylor', got {}".format(solver_type))
if (r1 is None):
r1 = 0.5
ns = self.noise_schedule
(lambda_s, lambda_t) = (ns.marginal_lambda(s), ns.marginal_lambda(t))
h = (lambda_t - lambda_s)
lambda_s1 = (lambda_s + (r1 * h))
s1 = ns.inverse_lambda(lambda_s1)
(log_alpha_s, log_alpha_s1, log_alpha_t) = (ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(t))
(sigma_s, sigma_s1, sigma_t) = (ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t))
(alpha_s1, alpha_t) = (torch.exp(log_alpha_s1), torch.exp(log_alpha_t))
if (self.algorithm_type == 'dpmsolver++'):
phi_11 = torch.expm1(((- r1) * h))
phi_1 = torch.expm1((- h))
if (model_s is None):
model_s = self.model_fn(x, s)
x_s1 = (((sigma_s1 / sigma_s) * x) - ((alpha_s1 * phi_11) * model_s))
model_s1 = self.model_fn(x_s1, s1)
if (solver_type == 'dpmsolver'):
x_t = ((((sigma_t / sigma_s) * x) - ((alpha_t * phi_1) * model_s)) - (((0.5 / r1) * (alpha_t * phi_1)) * (model_s1 - model_s)))
elif (solver_type == 'taylor'):
x_t = ((((sigma_t / sigma_s) * x) - ((alpha_t * phi_1) * model_s)) + (((1.0 / r1) * (alpha_t * ((phi_1 / h) + 1.0))) * (model_s1 - model_s)))
else:
phi_11 = torch.expm1((r1 * h))
phi_1 = torch.expm1(h)
if (model_s is None):
model_s = self.model_fn(x, s)
x_s1 = ((torch.exp((log_alpha_s1 - log_alpha_s)) * x) - ((sigma_s1 * phi_11) * model_s))
model_s1 = self.model_fn(x_s1, s1)
if (solver_type == 'dpmsolver'):
x_t = (((torch.exp((log_alpha_t - log_alpha_s)) * x) - ((sigma_t * phi_1) * model_s)) - (((0.5 / r1) * (sigma_t * phi_1)) * (model_s1 - model_s)))
elif (solver_type == 'taylor'):
x_t = (((torch.exp((log_alpha_t - log_alpha_s)) * x) - ((sigma_t * phi_1) * model_s)) - (((1.0 / r1) * (sigma_t * ((phi_1 / h) - 1.0))) * (model_s1 - model_s)))
if return_intermediate:
return (x_t, {'model_s': model_s, 'model_s1': model_s1})
else:
return x_t
def singlestep_dpm_solver_third_update(self, x, s, t, r1=(1.0 / 3.0), r2=(2.0 / 3.0), model_s=None, model_s1=None, return_intermediate=False, solver_type='dpmsolver'):
"\n Singlestep solver DPM-Solver-3 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n r1: A `float`. The hyperparameter of the third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).\n If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n "
if (solver_type not in ['dpmsolver', 'taylor']):
raise ValueError("'solver_type' must be either 'dpmsolver' or 'taylor', got {}".format(solver_type))
if (r1 is None):
r1 = (1.0 / 3.0)
if (r2 is None):
r2 = (2.0 / 3.0)
ns = self.noise_schedule
(lambda_s, lambda_t) = (ns.marginal_lambda(s), ns.marginal_lambda(t))
h = (lambda_t - lambda_s)
lambda_s1 = (lambda_s + (r1 * h))
lambda_s2 = (lambda_s + (r2 * h))
s1 = ns.inverse_lambda(lambda_s1)
s2 = ns.inverse_lambda(lambda_s2)
(log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t) = (ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t))
(sigma_s, sigma_s1, sigma_s2, sigma_t) = (ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(s2), ns.marginal_std(t))
(alpha_s1, alpha_s2, alpha_t) = (torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t))
if (self.algorithm_type == 'dpmsolver++'):
phi_11 = torch.expm1(((- r1) * h))
phi_12 = torch.expm1(((- r2) * h))
phi_1 = torch.expm1((- h))
phi_22 = ((torch.expm1(((- r2) * h)) / (r2 * h)) + 1.0)
phi_2 = ((phi_1 / h) + 1.0)
phi_3 = ((phi_2 / h) - 0.5)
if (model_s is None):
model_s = self.model_fn(x, s)
if (model_s1 is None):
x_s1 = (((sigma_s1 / sigma_s) * x) - ((alpha_s1 * phi_11) * model_s))
model_s1 = self.model_fn(x_s1, s1)
x_s2 = ((((sigma_s2 / sigma_s) * x) - ((alpha_s2 * phi_12) * model_s)) + (((r2 / r1) * (alpha_s2 * phi_22)) * (model_s1 - model_s)))
model_s2 = self.model_fn(x_s2, s2)
if (solver_type == 'dpmsolver'):
x_t = ((((sigma_t / sigma_s) * x) - ((alpha_t * phi_1) * model_s)) + (((1.0 / r2) * (alpha_t * phi_2)) * (model_s2 - model_s)))
elif (solver_type == 'taylor'):
D1_0 = ((1.0 / r1) * (model_s1 - model_s))
D1_1 = ((1.0 / r2) * (model_s2 - model_s))
D1 = (((r2 * D1_0) - (r1 * D1_1)) / (r2 - r1))
D2 = ((2.0 * (D1_1 - D1_0)) / (r2 - r1))
x_t = (((((sigma_t / sigma_s) * x) - ((alpha_t * phi_1) * model_s)) + ((alpha_t * phi_2) * D1)) - ((alpha_t * phi_3) * D2))
else:
phi_11 = torch.expm1((r1 * h))
phi_12 = torch.expm1((r2 * h))
phi_1 = torch.expm1(h)
phi_22 = ((torch.expm1((r2 * h)) / (r2 * h)) - 1.0)
phi_2 = ((phi_1 / h) - 1.0)
phi_3 = ((phi_2 / h) - 0.5)
if (model_s is None):
model_s = self.model_fn(x, s)
if (model_s1 is None):
x_s1 = ((torch.exp((log_alpha_s1 - log_alpha_s)) * x) - ((sigma_s1 * phi_11) * model_s))
model_s1 = self.model_fn(x_s1, s1)
x_s2 = (((torch.exp((log_alpha_s2 - log_alpha_s)) * x) - ((sigma_s2 * phi_12) * model_s)) - (((r2 / r1) * (sigma_s2 * phi_22)) * (model_s1 - model_s)))
model_s2 = self.model_fn(x_s2, s2)
if (solver_type == 'dpmsolver'):
x_t = (((torch.exp((log_alpha_t - log_alpha_s)) * x) - ((sigma_t * phi_1) * model_s)) - (((1.0 / r2) * (sigma_t * phi_2)) * (model_s2 - model_s)))
elif (solver_type == 'taylor'):
D1_0 = ((1.0 / r1) * (model_s1 - model_s))
D1_1 = ((1.0 / r2) * (model_s2 - model_s))
D1 = (((r2 * D1_0) - (r1 * D1_1)) / (r2 - r1))
D2 = ((2.0 * (D1_1 - D1_0)) / (r2 - r1))
x_t = ((((torch.exp((log_alpha_t - log_alpha_s)) * x) - ((sigma_t * phi_1) * model_s)) - ((sigma_t * phi_2) * D1)) - ((sigma_t * phi_3) * D2))
if return_intermediate:
return (x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2})
else:
return x_t
def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpmsolver'):
"\n Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n "
if (solver_type not in ['dpmsolver', 'taylor']):
raise ValueError("'solver_type' must be either 'dpmsolver' or 'taylor', got {}".format(solver_type))
ns = self.noise_schedule
(model_prev_1, model_prev_0) = (model_prev_list[(- 2)], model_prev_list[(- 1)])
(t_prev_1, t_prev_0) = (t_prev_list[(- 2)], t_prev_list[(- 1)])
(lambda_prev_1, lambda_prev_0, lambda_t) = (ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t))
(log_alpha_prev_0, log_alpha_t) = (ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t))
(sigma_prev_0, sigma_t) = (ns.marginal_std(t_prev_0), ns.marginal_std(t))
alpha_t = torch.exp(log_alpha_t)
h_0 = (lambda_prev_0 - lambda_prev_1)
h = (lambda_t - lambda_prev_0)
r0 = (h_0 / h)
D1_0 = ((1.0 / r0) * (model_prev_0 - model_prev_1))
if (self.algorithm_type == 'dpmsolver++'):
phi_1 = torch.expm1((- h))
if (solver_type == 'dpmsolver'):
x_t = ((((sigma_t / sigma_prev_0) * x) - ((alpha_t * phi_1) * model_prev_0)) - ((0.5 * (alpha_t * phi_1)) * D1_0))
elif (solver_type == 'taylor'):
x_t = ((((sigma_t / sigma_prev_0) * x) - ((alpha_t * phi_1) * model_prev_0)) + ((alpha_t * ((phi_1 / h) + 1.0)) * D1_0))
else:
phi_1 = torch.expm1(h)
if (solver_type == 'dpmsolver'):
x_t = (((torch.exp((log_alpha_t - log_alpha_prev_0)) * x) - ((sigma_t * phi_1) * model_prev_0)) - ((0.5 * (sigma_t * phi_1)) * D1_0))
elif (solver_type == 'taylor'):
x_t = (((torch.exp((log_alpha_t - log_alpha_prev_0)) * x) - ((sigma_t * phi_1) * model_prev_0)) - ((sigma_t * ((phi_1 / h) - 1.0)) * D1_0))
return x_t
def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpmsolver'):
"\n Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n "
ns = self.noise_schedule
(model_prev_2, model_prev_1, model_prev_0) = model_prev_list
(t_prev_2, t_prev_1, t_prev_0) = t_prev_list
(lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t) = (ns.marginal_lambda(t_prev_2), ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t))
(log_alpha_prev_0, log_alpha_t) = (ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t))
(sigma_prev_0, sigma_t) = (ns.marginal_std(t_prev_0), ns.marginal_std(t))
alpha_t = torch.exp(log_alpha_t)
h_1 = (lambda_prev_1 - lambda_prev_2)
h_0 = (lambda_prev_0 - lambda_prev_1)
h = (lambda_t - lambda_prev_0)
(r0, r1) = ((h_0 / h), (h_1 / h))
D1_0 = ((1.0 / r0) * (model_prev_0 - model_prev_1))
D1_1 = ((1.0 / r1) * (model_prev_1 - model_prev_2))
D1 = (D1_0 + ((r0 / (r0 + r1)) * (D1_0 - D1_1)))
D2 = ((1.0 / (r0 + r1)) * (D1_0 - D1_1))
if (self.algorithm_type == 'dpmsolver++'):
phi_1 = torch.expm1((- h))
phi_2 = ((phi_1 / h) + 1.0)
phi_3 = ((phi_2 / h) - 0.5)
x_t = (((((sigma_t / sigma_prev_0) * x) - ((alpha_t * phi_1) * model_prev_0)) + ((alpha_t * phi_2) * D1)) - ((alpha_t * phi_3) * D2))
else:
phi_1 = torch.expm1(h)
phi_2 = ((phi_1 / h) - 1.0)
phi_3 = ((phi_2 / h) - 0.5)
x_t = ((((torch.exp((log_alpha_t - log_alpha_prev_0)) * x) - ((sigma_t * phi_1) * model_prev_0)) - ((sigma_t * phi_2) * D1)) - ((sigma_t * phi_3) * D2))
return x_t
def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpmsolver', r1=None, r2=None):
"\n Singlestep DPM-Solver with the order `order` from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n r1: A `float`. The hyperparameter of the second-order or third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n "
if (order == 1):
return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
elif (order == 2):
return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1)
elif (order == 3):
return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2)
else:
raise ValueError('Solver order must be 1 or 2 or 3, got {}'.format(order))
def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpmsolver'):
"\n Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n "
if (order == 1):
return self.dpm_solver_first_update(x, t_prev_list[(- 1)], t, model_s=model_prev_list[(- 1)])
elif (order == 2):
return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
elif (order == 3):
return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
else:
raise ValueError('Solver order must be 1 or 2 or 3, got {}'.format(order))
def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-05, solver_type='dpmsolver'):
'\n The adaptive step size solver based on singlestep DPM-Solver.\n\n Args:\n x: A pytorch tensor. The initial value at time `t_T`.\n order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n h_init: A `float`. The initial step size (for logSNR).\n atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].\n rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.\n theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].\n t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the \n current time and `t_0` is less than `t_err`. The default setting is 1e-5.\n solver_type: either \'dpmsolver\' or \'taylor\'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use \'dpmsolver\' type.\n Returns:\n x_0: A pytorch tensor. The approximated solution at time `t_0`.\n\n [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.\n '
ns = self.noise_schedule
s = (t_T * torch.ones((1,)).to(x))
lambda_s = ns.marginal_lambda(s)
lambda_0 = ns.marginal_lambda((t_0 * torch.ones_like(s).to(x)))
h = (h_init * torch.ones_like(s).to(x))
x_prev = x
nfe = 0
if (order == 2):
r1 = 0.5
lower_update = (lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True))
higher_update = (lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs))
elif (order == 3):
(r1, r2) = ((1.0 / 3.0), (2.0 / 3.0))
lower_update = (lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type))
higher_update = (lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs))
else:
raise ValueError('For adaptive step size solver, order must be 2 or 3, got {}'.format(order))
while (torch.abs((s - t_0)).mean() > t_err):
t = ns.inverse_lambda((lambda_s + h))
(x_lower, lower_noise_kwargs) = lower_update(x, s, t)
x_higher = higher_update(x, s, t, **lower_noise_kwargs)
delta = torch.max((torch.ones_like(x).to(x) * atol), (rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev))))
norm_fn = (lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], (- 1)))).mean(dim=(- 1), keepdim=True)))
E = norm_fn(((x_higher - x_lower) / delta)).max()
if torch.all((E <= 1.0)):
x = x_higher
s = t
x_prev = x_lower
lambda_s = ns.marginal_lambda(s)
h = torch.min(((theta * h) * torch.float_power(E, ((- 1.0) / order)).float()), (lambda_0 - lambda_s))
nfe += order
print('adaptive solver nfe', nfe)
return x
def add_noise(self, x, t, noise=None):
'\n Compute the noised input xt = alpha_t * x + sigma_t * noise. \n\n Args:\n x: A `torch.Tensor` with shape `(batch_size, *shape)`.\n t: A `torch.Tensor` with shape `(t_size,)`.\n Returns:\n xt with shape `(t_size, batch_size, *shape)`.\n '
(alpha_t, sigma_t) = (self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t))
if (noise is None):
noise = torch.randn((t.shape[0], *x.shape), device=x.device)
x = x.reshape(((- 1), *x.shape))
xt = ((expand_dims(alpha_t, x.dim()) * x) + (expand_dims(sigma_t, x.dim()) * noise))
if (t.shape[0] == 1):
return xt.squeeze(0)
else:
return xt
def inverse(self, x, steps=20, t_start=None, t_end=None, order=2, skip_type='time_uniform', method='multistep', lower_order_final=True, denoise_to_zero=False, solver_type='dpmsolver', atol=0.0078, rtol=0.05, return_intermediate=False):
'\n Inverse the sample `x` from time `t_start` to `t_end` by DPM-Solver.\n For discrete-time DPMs, we use `t_start=1/N`, where `N` is the total time steps during training.\n '
t_0 = ((1.0 / self.noise_schedule.total_N) if (t_start is None) else t_start)
t_T = (self.noise_schedule.T if (t_end is None) else t_end)
assert ((t_0 > 0) and (t_T > 0)), 'Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array'
return self.sample(x, steps=steps, t_start=t_0, t_end=t_T, order=order, skip_type=skip_type, method=method, lower_order_final=lower_order_final, denoise_to_zero=denoise_to_zero, solver_type=solver_type, atol=atol, rtol=rtol, return_intermediate=return_intermediate)
def sample(self, x, steps=20, t_start=None, t_end=None, order=2, skip_type='time_uniform', method='multistep', lower_order_final=True, denoise_to_zero=False, solver_type='dpmsolver', atol=0.0078, rtol=0.05, return_intermediate=False):
'\n Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.\n\n =====================================================\n\n We support the following algorithms for both noise prediction model and data prediction model:\n - \'singlestep\':\n Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver. \n We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).\n The total number of function evaluations (NFE) == `steps`.\n Given a fixed NFE == `steps`, the sampling procedure is:\n - If `order` == 1:\n - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.\n - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If `order` == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.\n - \'multistep\':\n Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.\n We initialize the first `order` values by lower order multistep solvers.\n Given a fixed NFE == `steps`, the sampling procedure is:\n Denote K = steps.\n - If `order` == 1:\n - We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.\n - If `order` == 3:\n - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.\n - \'singlestep_fixed\':\n Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).\n We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.\n - \'adaptive\':\n Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).\n We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.\n You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs\n (NFE) and the sample quality.\n - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.\n - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.\n\n =====================================================\n\n Some advices for choosing the algorithm:\n - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:\n Use singlestep DPM-Solver or DPM-Solver++ ("DPM-Solver-fast" in the paper) with `order = 3`.\n e.g., DPM-Solver:\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type=\'time_uniform\', method=\'singlestep\')\n e.g., DPM-Solver++:\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver++")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type=\'time_uniform\', method=\'singlestep\')\n - For **guided sampling with large guidance scale** by DPMs:\n Use multistep DPM-Solver with `algorithm_type="dpmsolver++"` and `order = 2`.\n e.g.\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver++")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,\n skip_type=\'time_uniform\', method=\'multistep\')\n\n We support three types of `skip_type`:\n - \'logSNR\': uniform logSNR for the time steps. **Recommended for low-resolutional images**\n - \'time_uniform\': uniform time for the time steps. **Recommended for high-resolutional images**.\n - \'time_quadratic\': quadratic time for the time steps.\n\n =====================================================\n Args:\n x: A pytorch tensor. The initial value at time `t_start`\n e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.\n steps: A `int`. The total number of function evaluations (NFE).\n t_start: A `float`. The starting time of the sampling.\n If `T` is None, we use self.noise_schedule.T (default is 1.0).\n t_end: A `float`. The ending time of the sampling.\n If `t_end` is None, we use 1. / self.noise_schedule.total_N.\n e.g. if total_N == 1000, we have `t_end` == 1e-3.\n For discrete-time DPMs:\n - We recommend `t_end` == 1. / self.noise_schedule.total_N.\n For continuous-time DPMs:\n - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.\n order: A `int`. The order of DPM-Solver.\n skip_type: A `str`. The type for the spacing of the time steps. \'time_uniform\' or \'logSNR\' or \'time_quadratic\'.\n method: A `str`. The method for sampling. \'singlestep\' or \'multistep\' or \'singlestep_fixed\' or \'adaptive\'.\n denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.\n Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).\n\n This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and\n score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID\n for diffusion models sampling by diffusion SDEs for low-resolutional images\n (such as CIFAR-10). However, we observed that such trick does not matter for\n high-resolutional images. As it needs an additional NFE, we do not recommend\n it for high-resolutional images.\n lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.\n Only valid for `method=multistep` and `steps < 15`. We empirically find that\n this trick is a key to stabilizing the sampling by DPM-Solver with very few steps\n (especially for steps <= 10). So we recommend to set it to be `True`.\n solver_type: A `str`. The taylor expansion type for the solver. `dpmsolver` or `taylor`. We recommend `dpmsolver`.\n atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == \'adaptive\'.\n rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == \'adaptive\'.\n return_intermediate: A `bool`. Whether to save the xt at each step.\n When set to `True`, method returns a tuple (x0, intermediates); when set to False, method returns only x0.\n Returns:\n x_end: A pytorch tensor. The approximated solution at time `t_end`.\n\n '
t_0 = ((1.0 / self.noise_schedule.total_N) if (t_end is None) else t_end)
t_T = (self.noise_schedule.T if (t_start is None) else t_start)
assert ((t_0 > 0) and (t_T > 0)), 'Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array'
if return_intermediate:
assert (method in ['multistep', 'singlestep', 'singlestep_fixed']), 'Cannot use adaptive solver when saving intermediate values'
if (self.correcting_xt_fn is not None):
assert (method in ['multistep', 'singlestep', 'singlestep_fixed']), 'Cannot use adaptive solver when correcting_xt_fn is not None'
device = x.device
intermediates = []
with torch.no_grad():
if (method == 'adaptive'):
x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type)
elif (method == 'multistep'):
assert (steps >= order)
timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
assert ((timesteps.shape[0] - 1) == steps)
step = 0
t = timesteps[step]
t_prev_list = [t]
model_prev_list = [self.model_fn(x, t)]
if (self.correcting_xt_fn is not None):
x = self.correcting_xt_fn(x, t, step)
if return_intermediate:
intermediates.append(x)
for step in range(1, order):
t = timesteps[step]
x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, t, step, solver_type=solver_type)
if (self.correcting_xt_fn is not None):
x = self.correcting_xt_fn(x, t, step)
if return_intermediate:
intermediates.append(x)
t_prev_list.append(t)
model_prev_list.append(self.model_fn(x, t))
for step in range(order, (steps + 1)):
t = timesteps[step]
if (lower_order_final and (steps < 10)):
step_order = min(order, ((steps + 1) - step))
else:
step_order = order
x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, t, step_order, solver_type=solver_type)
if (self.correcting_xt_fn is not None):
x = self.correcting_xt_fn(x, t, step)
if return_intermediate:
intermediates.append(x)
for i in range((order - 1)):
t_prev_list[i] = t_prev_list[(i + 1)]
model_prev_list[i] = model_prev_list[(i + 1)]
t_prev_list[(- 1)] = t
if (step < steps):
model_prev_list[(- 1)] = self.model_fn(x, t)
elif (method in ['singlestep', 'singlestep_fixed']):
if (method == 'singlestep'):
(timesteps_outer, orders) = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device)
elif (method == 'singlestep_fixed'):
K = (steps // order)
orders = ([order] * K)
timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
for (step, order) in enumerate(orders):
(s, t) = (timesteps_outer[step], timesteps_outer[(step + 1)])
timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=s.item(), t_0=t.item(), N=order, device=device)
lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
h = (lambda_inner[(- 1)] - lambda_inner[0])
r1 = (None if (order <= 1) else ((lambda_inner[1] - lambda_inner[0]) / h))
r2 = (None if (order <= 2) else ((lambda_inner[2] - lambda_inner[0]) / h))
x = self.singlestep_dpm_solver_update(x, s, t, order, solver_type=solver_type, r1=r1, r2=r2)
if (self.correcting_xt_fn is not None):
x = self.correcting_xt_fn(x, t, step)
if return_intermediate:
intermediates.append(x)
else:
raise ValueError('Got wrong method {}'.format(method))
if denoise_to_zero:
t = (torch.ones((1,)).to(device) * t_0)
x = self.denoise_to_zero_fn(x, t)
if (self.correcting_xt_fn is not None):
x = self.correcting_xt_fn(x, t, (step + 1))
if return_intermediate:
intermediates.append(x)
if return_intermediate:
return (x, intermediates)
else:
return x
|
def interpolate_fn(x, xp, yp):
'\n A piecewise linear function y = f(x), using xp and yp as keypoints.\n We implement f(x) in a differentiable way (i.e. applicable for autograd).\n The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)\n\n Args:\n x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).\n xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.\n yp: PyTorch tensor with shape [C, K].\n Returns:\n The function values f(x), with shape [N, C].\n '
(N, K) = (x.shape[0], xp.shape[1])
all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
(sorted_all_x, x_indices) = torch.sort(all_x, dim=2)
x_idx = torch.argmin(x_indices, dim=2)
cand_start_idx = (x_idx - 1)
start_idx = torch.where(torch.eq(x_idx, 0), torch.tensor(1, device=x.device), torch.where(torch.eq(x_idx, K), torch.tensor((K - 2), device=x.device), cand_start_idx))
end_idx = torch.where(torch.eq(start_idx, cand_start_idx), (start_idx + 2), (start_idx + 1))
start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
start_idx2 = torch.where(torch.eq(x_idx, 0), torch.tensor(0, device=x.device), torch.where(torch.eq(x_idx, K), torch.tensor((K - 2), device=x.device), cand_start_idx))
y_positions_expanded = yp.unsqueeze(0).expand(N, (- 1), (- 1))
start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
cand = (start_y + (((x - start_x) * (end_y - start_y)) / (end_x - start_x)))
return cand
|
def expand_dims(v, dims):
'\n Expand the tensor `v` to the dim `dims`.\n\n Args:\n `v`: a PyTorch tensor with shape [N].\n `dim`: a `int`.\n Returns:\n a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.\n '
return v[((...,) + ((None,) * (dims - 1)))]
|
def calculate_inception_stats(image_path, num_expected=None, seed=0, max_batch_size=64, num_workers=3, prefetch_factor=2, device=torch.device('cuda')):
if (dist.get_rank() != 0):
torch.distributed.barrier()
dist.print0('Loading Inception-v3 model...')
detector_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/inception-2015-12-05.pkl'
detector_kwargs = dict(return_features=True)
feature_dim = 2048
with dnnlib.util.open_url(detector_url, verbose=(dist.get_rank() == 0)) as f:
detector_net = pickle.load(f).to(device)
dist.print0(f'Loading images from "{image_path}"...')
dataset_obj = dataset.ImageFolderDataset(path=image_path, max_size=num_expected, random_seed=seed)
if ((num_expected is not None) and (len(dataset_obj) < num_expected)):
raise click.ClickException(f'Found {len(dataset_obj)} images, but expected at least {num_expected}')
if (len(dataset_obj) < 2):
raise click.ClickException(f'Found {len(dataset_obj)} images, but need at least 2 to compute statistics')
if (dist.get_rank() == 0):
torch.distributed.barrier()
num_batches = ((((len(dataset_obj) - 1) // (max_batch_size * dist.get_world_size())) + 1) * dist.get_world_size())
all_batches = torch.arange(len(dataset_obj)).tensor_split(num_batches)
rank_batches = all_batches[dist.get_rank()::dist.get_world_size()]
data_loader = torch.utils.data.DataLoader(dataset_obj, batch_sampler=rank_batches, num_workers=num_workers, prefetch_factor=prefetch_factor)
dist.print0(f'Calculating statistics for {len(dataset_obj)} images...')
mu = torch.zeros([feature_dim], dtype=torch.float64, device=device)
sigma = torch.zeros([feature_dim, feature_dim], dtype=torch.float64, device=device)
for (images, _labels) in tqdm.tqdm(data_loader, unit='batch', disable=(dist.get_rank() != 0)):
torch.distributed.barrier()
if (images.shape[0] == 0):
continue
if (images.shape[1] == 1):
images = images.repeat([1, 3, 1, 1])
features = detector_net(images.to(device), **detector_kwargs).to(torch.float64)
mu += features.sum(0)
sigma += (features.T @ features)
torch.distributed.all_reduce(mu)
torch.distributed.all_reduce(sigma)
mu /= len(dataset_obj)
sigma -= (mu.ger(mu) * len(dataset_obj))
sigma /= (len(dataset_obj) - 1)
return (mu.cpu().numpy(), sigma.cpu().numpy())
|
def calculate_fid_from_inception_stats(mu, sigma, mu_ref, sigma_ref):
m = np.square((mu - mu_ref)).sum()
(s, _) = scipy.linalg.sqrtm(np.dot(sigma, sigma_ref), disp=False)
fid = (m + np.trace(((sigma + sigma_ref) - (s * 2))))
return float(np.real(fid))
|
@click.group()
def main():
'Calculate Frechet Inception Distance (FID).\n\n Examples:\n\n \x08\n # Generate 50000 images and save them as fid-tmp/*/*.png\n torchrun --standalone --nproc_per_node=1 generate.py --outdir=fid-tmp --seeds=0-49999 --subdirs \\\n --network=https://nvlabs-fi-cdn.nvidia.com/edm/pretrained/edm-cifar10-32x32-cond-vp.pkl\n\n \x08\n # Calculate FID\n torchrun --standalone --nproc_per_node=1 fid.py calc --images=fid-tmp \\\n --ref=https://nvlabs-fi-cdn.nvidia.com/edm/fid-refs/cifar10-32x32.npz\n\n \x08\n # Compute dataset reference statistics\n python fid.py ref --data=datasets/my-dataset.zip --dest=fid-refs/my-dataset.npz\n '
|
@main.command()
@click.option('--images', 'image_path', help='Path to the images', metavar='PATH|ZIP', type=str, required=True)
@click.option('--ref', 'ref_path', help='Dataset reference statistics ', metavar='NPZ|URL', type=str, required=True)
@click.option('--num', 'num_expected', help='Number of images to use', metavar='INT', type=click.IntRange(min=2), default=50000, show_default=True)
@click.option('--seed', help='Random seed for selecting the images', metavar='INT', type=int, default=0, show_default=True)
@click.option('--batch', help='Maximum batch size', metavar='INT', type=click.IntRange(min=1), default=64, show_default=True)
def calc(image_path, ref_path, num_expected, seed, batch):
'Calculate FID for a given set of images.'
torch.multiprocessing.set_start_method('spawn')
dist.init()
dist.print0(f'Loading dataset reference statistics from "{ref_path}"...')
ref = None
if (dist.get_rank() == 0):
with dnnlib.util.open_url(ref_path) as f:
ref = dict(np.load(f))
(mu, sigma) = calculate_inception_stats(image_path=image_path, num_expected=num_expected, seed=seed, max_batch_size=batch)
dist.print0('Calculating FID...')
if (dist.get_rank() == 0):
fid = calculate_fid_from_inception_stats(mu, sigma, ref['mu'], ref['sigma'])
print(f'{fid:g}')
torch.distributed.barrier()
|
@main.command()
@click.option('--data', 'dataset_path', help='Path to the dataset', metavar='PATH|ZIP', type=str, required=True)
@click.option('--dest', 'dest_path', help='Destination .npz file', metavar='NPZ', type=str, required=True)
@click.option('--batch', help='Maximum batch size', metavar='INT', type=click.IntRange(min=1), default=64, show_default=True)
def ref(dataset_path, dest_path, batch):
"Calculate dataset reference statistics needed by 'calc'."
torch.multiprocessing.set_start_method('spawn')
dist.init()
(mu, sigma) = calculate_inception_stats(image_path=dataset_path, max_batch_size=batch)
dist.print0(f'Saving dataset reference statistics to "{dest_path}"...')
if (dist.get_rank() == 0):
if os.path.dirname(dest_path):
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
np.savez(dest_path, mu=mu, sigma=sigma)
torch.distributed.barrier()
dist.print0('Done.')
|
def init():
if ('MASTER_ADDR' not in os.environ):
os.environ['MASTER_ADDR'] = 'localhost'
if ('MASTER_PORT' not in os.environ):
os.environ['MASTER_PORT'] = '29500'
if ('RANK' not in os.environ):
os.environ['RANK'] = '0'
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = '0'
if ('WORLD_SIZE' not in os.environ):
os.environ['WORLD_SIZE'] = '1'
backend = ('gloo' if (os.name == 'nt') else 'nccl')
torch.distributed.init_process_group(backend=backend, init_method='env://')
torch.cuda.set_device(int(os.environ.get('LOCAL_RANK', '0')))
sync_device = (torch.device('cuda') if (get_world_size() > 1) else None)
training_stats.init_multiprocessing(rank=get_rank(), sync_device=sync_device)
|
def get_rank():
return (torch.distributed.get_rank() if torch.distributed.is_initialized() else 0)
|
def get_world_size():
return (torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1)
|
def should_stop():
return False
|
def update_progress(cur, total):
_ = (cur, total)
|
def print0(*args, **kwargs):
if (get_rank() == 0):
print(*args, **kwargs)
|
def constant(value, shape=None, dtype=None, device=None, memory_format=None):
value = np.asarray(value)
if (shape is not None):
shape = tuple(shape)
if (dtype is None):
dtype = torch.get_default_dtype()
if (device is None):
device = torch.device('cpu')
if (memory_format is None):
memory_format = torch.contiguous_format
key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format)
tensor = _constant_cache.get(key, None)
if (tensor is None):
tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device)
if (shape is not None):
(tensor, _) = torch.broadcast_tensors(tensor, torch.empty(shape))
tensor = tensor.contiguous(memory_format=memory_format)
_constant_cache[key] = tensor
return tensor
|
@contextlib.contextmanager
def suppress_tracer_warnings():
flt = ('ignore', None, torch.jit.TracerWarning, None, 0)
warnings.filters.insert(0, flt)
(yield)
warnings.filters.remove(flt)
|
def assert_shape(tensor, ref_shape):
if (tensor.ndim != len(ref_shape)):
raise AssertionError(f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}')
for (idx, (size, ref_size)) in enumerate(zip(tensor.shape, ref_shape)):
if (ref_size is None):
pass
elif isinstance(ref_size, torch.Tensor):
with suppress_tracer_warnings():
symbolic_assert(torch.equal(torch.as_tensor(size), ref_size), f'Wrong size for dimension {idx}')
elif isinstance(size, torch.Tensor):
with suppress_tracer_warnings():
symbolic_assert(torch.equal(size, torch.as_tensor(ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}')
elif (size != ref_size):
raise AssertionError(f'Wrong size for dimension {idx}: got {size}, expected {ref_size}')
|
def profiled_function(fn):
def decorator(*args, **kwargs):
with torch.autograd.profiler.record_function(fn.__name__):
return fn(*args, **kwargs)
decorator.__name__ = fn.__name__
return decorator
|
class InfiniteSampler(torch.utils.data.Sampler):
def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):
assert (len(dataset) > 0)
assert (num_replicas > 0)
assert (0 <= rank < num_replicas)
assert (0 <= window_size <= 1)
super().__init__(dataset)
self.dataset = dataset
self.rank = rank
self.num_replicas = num_replicas
self.shuffle = shuffle
self.seed = seed
self.window_size = window_size
def __iter__(self):
order = np.arange(len(self.dataset))
rnd = None
window = 0
if self.shuffle:
rnd = np.random.RandomState(self.seed)
rnd.shuffle(order)
window = int(np.rint((order.size * self.window_size)))
idx = 0
while True:
i = (idx % order.size)
if ((idx % self.num_replicas) == self.rank):
(yield order[i])
if (window >= 2):
j = ((i - rnd.randint(window)) % order.size)
(order[i], order[j]) = (order[j], order[i])
idx += 1
|
def params_and_buffers(module):
assert isinstance(module, torch.nn.Module)
return (list(module.parameters()) + list(module.buffers()))
|
def named_params_and_buffers(module):
assert isinstance(module, torch.nn.Module)
return (list(module.named_parameters()) + list(module.named_buffers()))
|
@torch.no_grad()
def copy_params_and_buffers(src_module, dst_module, require_all=False):
assert isinstance(src_module, torch.nn.Module)
assert isinstance(dst_module, torch.nn.Module)
src_tensors = dict(named_params_and_buffers(src_module))
for (name, tensor) in named_params_and_buffers(dst_module):
assert ((name in src_tensors) or (not require_all))
if (name in src_tensors):
tensor.copy_(src_tensors[name])
|
@contextlib.contextmanager
def ddp_sync(module, sync):
assert isinstance(module, torch.nn.Module)
if (sync or (not isinstance(module, torch.nn.parallel.DistributedDataParallel))):
(yield)
else:
with module.no_sync():
(yield)
|
def check_ddp_consistency(module, ignore_regex=None):
assert isinstance(module, torch.nn.Module)
for (name, tensor) in named_params_and_buffers(module):
fullname = ((type(module).__name__ + '.') + name)
if ((ignore_regex is not None) and re.fullmatch(ignore_regex, fullname)):
continue
tensor = tensor.detach()
if tensor.is_floating_point():
tensor = nan_to_num(tensor)
other = tensor.clone()
torch.distributed.broadcast(tensor=other, src=0)
assert (tensor == other).all(), fullname
|
def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True):
assert isinstance(module, torch.nn.Module)
assert (not isinstance(module, torch.jit.ScriptModule))
assert isinstance(inputs, (tuple, list))
entries = []
nesting = [0]
def pre_hook(_mod, _inputs):
nesting[0] += 1
def post_hook(mod, _inputs, outputs):
nesting[0] -= 1
if (nesting[0] <= max_nesting):
outputs = (list(outputs) if isinstance(outputs, (tuple, list)) else [outputs])
outputs = [t for t in outputs if isinstance(t, torch.Tensor)]
entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs))
hooks = [mod.register_forward_pre_hook(pre_hook) for mod in module.modules()]
hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()]
outputs = module(*inputs)
for hook in hooks:
hook.remove()
tensors_seen = set()
for e in entries:
e.unique_params = [t for t in e.mod.parameters() if (id(t) not in tensors_seen)]
e.unique_buffers = [t for t in e.mod.buffers() if (id(t) not in tensors_seen)]
e.unique_outputs = [t for t in e.outputs if (id(t) not in tensors_seen)]
tensors_seen |= {id(t) for t in ((e.unique_params + e.unique_buffers) + e.unique_outputs)}
if skip_redundant:
entries = [e for e in entries if (len(e.unique_params) or len(e.unique_buffers) or len(e.unique_outputs))]
rows = [[type(module).__name__, 'Parameters', 'Buffers', 'Output shape', 'Datatype']]
rows += [(['---'] * len(rows[0]))]
param_total = 0
buffer_total = 0
submodule_names = {mod: name for (name, mod) in module.named_modules()}
for e in entries:
name = ('<top-level>' if (e.mod is module) else submodule_names[e.mod])
param_size = sum((t.numel() for t in e.unique_params))
buffer_size = sum((t.numel() for t in e.unique_buffers))
output_shapes = [str(list(t.shape)) for t in e.outputs]
output_dtypes = [str(t.dtype).split('.')[(- 1)] for t in e.outputs]
rows += [[(name + (':0' if (len(e.outputs) >= 2) else '')), (str(param_size) if param_size else '-'), (str(buffer_size) if buffer_size else '-'), (output_shapes + ['-'])[0], (output_dtypes + ['-'])[0]]]
for idx in range(1, len(e.outputs)):
rows += [[(name + f':{idx}'), '-', '-', output_shapes[idx], output_dtypes[idx]]]
param_total += param_size
buffer_total += buffer_size
rows += [(['---'] * len(rows[0]))]
rows += [['Total', str(param_total), str(buffer_total), '-', '-']]
widths = [max((len(cell) for cell in column)) for column in zip(*rows)]
print()
for row in rows:
print(' '.join(((cell + (' ' * (width - len(cell)))) for (cell, width) in zip(row, widths))))
print()
print(vars(module))
print()
return outputs
|
def persistent_class(orig_class):
"Class decorator that extends a given class to save its source code\n when pickled.\n\n Example:\n\n from torch_utils import persistence\n\n @persistence.persistent_class\n class MyNetwork(torch.nn.Module):\n def __init__(self, num_inputs, num_outputs):\n super().__init__()\n self.fc = MyLayer(num_inputs, num_outputs)\n ...\n\n @persistence.persistent_class\n class MyLayer(torch.nn.Module):\n ...\n\n When pickled, any instance of `MyNetwork` and `MyLayer` will save its\n source code alongside other internal state (e.g., parameters, buffers,\n and submodules). This way, any previously exported pickle will remain\n usable even if the class definitions have been modified or are no\n longer available.\n\n The decorator saves the source code of the entire Python module\n containing the decorated class. It does *not* save the source code of\n any imported modules. Thus, the imported modules must be available\n during unpickling, also including `torch_utils.persistence` itself.\n\n It is ok to call functions defined in the same module from the\n decorated class. However, if the decorated class depends on other\n classes defined in the same module, they must be decorated as well.\n This is illustrated in the above example in the case of `MyLayer`.\n\n It is also possible to employ the decorator just-in-time before\n calling the constructor. For example:\n\n cls = MyLayer\n if want_to_make_it_persistent:\n cls = persistence.persistent_class(cls)\n layer = cls(num_inputs, num_outputs)\n\n As an additional feature, the decorator also keeps track of the\n arguments that were used to construct each instance of the decorated\n class. The arguments can be queried via `obj.init_args` and\n `obj.init_kwargs`, and they are automatically pickled alongside other\n object state. This feature can be disabled on a per-instance basis\n by setting `self._record_init_args = False` in the constructor.\n\n A typical use case is to first unpickle a previous instance of a\n persistent class, and then upgrade it to use the latest version of\n the source code:\n\n with open('old_pickle.pkl', 'rb') as f:\n old_net = pickle.load(f)\n new_net = MyNetwork(*old_obj.init_args, **old_obj.init_kwargs)\n misc.copy_params_and_buffers(old_net, new_net, require_all=True)\n "
assert isinstance(orig_class, type)
if is_persistent(orig_class):
return orig_class
assert (orig_class.__module__ in sys.modules)
orig_module = sys.modules[orig_class.__module__]
orig_module_src = _module_to_src(orig_module)
class Decorator(orig_class):
_orig_module_src = orig_module_src
_orig_class_name = orig_class.__name__
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
record_init_args = getattr(self, '_record_init_args', True)
self._init_args = (copy.deepcopy(args) if record_init_args else None)
self._init_kwargs = (copy.deepcopy(kwargs) if record_init_args else None)
assert (orig_class.__name__ in orig_module.__dict__)
_check_pickleable(self.__reduce__())
@property
def init_args(self):
assert (self._init_args is not None)
return copy.deepcopy(self._init_args)
@property
def init_kwargs(self):
assert (self._init_kwargs is not None)
return dnnlib.EasyDict(copy.deepcopy(self._init_kwargs))
def __reduce__(self):
fields = list(super().__reduce__())
fields += ([None] * max((3 - len(fields)), 0))
if (fields[0] is not _reconstruct_persistent_obj):
meta = dict(type='class', version=_version, module_src=self._orig_module_src, class_name=self._orig_class_name, state=fields[2])
fields[0] = _reconstruct_persistent_obj
fields[1] = (meta,)
fields[2] = None
return tuple(fields)
Decorator.__name__ = orig_class.__name__
Decorator.__module__ = orig_class.__module__
_decorators.add(Decorator)
return Decorator
|
def is_persistent(obj):
'Test whether the given object or class is persistent, i.e.,\n whether it will save its source code when pickled.\n '
try:
if (obj in _decorators):
return True
except TypeError:
pass
return (type(obj) in _decorators)
|
def import_hook(hook):
'Register an import hook that is called whenever a persistent object\n is being unpickled. A typical use case is to patch the pickled source\n code to avoid errors and inconsistencies when the API of some imported\n module has changed.\n\n The hook should have the following signature:\n\n hook(meta) -> modified meta\n\n `meta` is an instance of `dnnlib.EasyDict` with the following fields:\n\n type: Type of the persistent object, e.g. `\'class\'`.\n version: Internal version number of `torch_utils.persistence`.\n module_src Original source code of the Python module.\n class_name: Class name in the original Python module.\n state: Internal state of the object.\n\n Example:\n\n @persistence.import_hook\n def wreck_my_network(meta):\n if meta.class_name == \'MyNetwork\':\n print(\'MyNetwork is being imported. I will wreck it!\')\n meta.module_src = meta.module_src.replace("True", "False")\n return meta\n '
assert callable(hook)
_import_hooks.append(hook)
|
def _reconstruct_persistent_obj(meta):
'Hook that is called internally by the `pickle` module to unpickle\n a persistent object.\n '
meta = dnnlib.EasyDict(meta)
meta.state = dnnlib.EasyDict(meta.state)
for hook in _import_hooks:
meta = hook(meta)
assert (meta is not None)
assert (meta.version == _version)
module = _src_to_module(meta.module_src)
assert (meta.type == 'class')
orig_class = module.__dict__[meta.class_name]
decorator_class = persistent_class(orig_class)
obj = decorator_class.__new__(decorator_class)
setstate = getattr(obj, '__setstate__', None)
if callable(setstate):
setstate(meta.state)
else:
obj.__dict__.update(meta.state)
return obj
|
def _module_to_src(module):
'Query the source code of a given Python module.\n '
src = _module_to_src_dict.get(module, None)
if (src is None):
src = inspect.getsource(module)
_module_to_src_dict[module] = src
_src_to_module_dict[src] = module
return src
|
def _src_to_module(src):
'Get or create a Python module for the given source code.\n '
module = _src_to_module_dict.get(src, None)
if (module is None):
module_name = ('_imported_module_' + uuid.uuid4().hex)
module = types.ModuleType(module_name)
sys.modules[module_name] = module
_module_to_src_dict[module] = src
_src_to_module_dict[src] = module
exec(src, module.__dict__)
return module
|
def _check_pickleable(obj):
'Check that the given object is pickleable, raising an exception if\n it is not. This function is expected to be considerably more efficient\n than actually pickling the object.\n '
def recurse(obj):
if isinstance(obj, (list, tuple, set)):
return [recurse(x) for x in obj]
if isinstance(obj, dict):
return [[recurse(x), recurse(y)] for (x, y) in obj.items()]
if isinstance(obj, (str, int, float, bool, bytes, bytearray)):
return None
if (f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor', 'torch.nn.parameter.Parameter']):
return None
if is_persistent(obj):
return None
return obj
with io.BytesIO() as f:
pickle.dump(recurse(obj), f)
|
def init_multiprocessing(rank, sync_device):
"Initializes `torch_utils.training_stats` for collecting statistics\n across multiple processes.\n\n This function must be called after\n `torch.distributed.init_process_group()` and before `Collector.update()`.\n The call is not necessary if multi-process collection is not needed.\n\n Args:\n rank: Rank of the current process.\n sync_device: PyTorch device to use for inter-process\n communication, or None to disable multi-process\n collection. Typically `torch.device('cuda', rank)`.\n "
global _rank, _sync_device
assert (not _sync_called)
_rank = rank
_sync_device = sync_device
|
@misc.profiled_function
def report(name, value):
'Broadcasts the given set of scalars to all interested instances of\n `Collector`, across device and process boundaries.\n\n This function is expected to be extremely cheap and can be safely\n called from anywhere in the training loop, loss function, or inside a\n `torch.nn.Module`.\n\n Warning: The current implementation expects the set of unique names to\n be consistent across processes. Please make sure that `report()` is\n called at least once for each unique name by each process, and in the\n same order. If a given process has no scalars to broadcast, it can do\n `report(name, [])` (empty list).\n\n Args:\n name: Arbitrary string specifying the name of the statistic.\n Averages are accumulated separately for each unique name.\n value: Arbitrary set of scalars. Can be a list, tuple,\n NumPy array, PyTorch tensor, or Python scalar.\n\n Returns:\n The same `value` that was passed in.\n '
if (name not in _counters):
_counters[name] = dict()
elems = torch.as_tensor(value)
if (elems.numel() == 0):
return value
elems = elems.detach().flatten().to(_reduce_dtype)
moments = torch.stack([torch.ones_like(elems).sum(), elems.sum(), elems.square().sum()])
assert ((moments.ndim == 1) and (moments.shape[0] == _num_moments))
moments = moments.to(_counter_dtype)
device = moments.device
if (device not in _counters[name]):
_counters[name][device] = torch.zeros_like(moments)
_counters[name][device].add_(moments)
return value
|
def report0(name, value):
'Broadcasts the given set of scalars by the first process (`rank = 0`),\n but ignores any scalars provided by the other processes.\n See `report()` for further details.\n '
report(name, (value if (_rank == 0) else []))
return value
|
class Collector():
'Collects the scalars broadcasted by `report()` and `report0()` and\n computes their long-term averages (mean and standard deviation) over\n user-defined periods of time.\n\n The averages are first collected into internal counters that are not\n directly visible to the user. They are then copied to the user-visible\n state as a result of calling `update()` and can then be queried using\n `mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the\n internal counters for the next round, so that the user-visible state\n effectively reflects averages collected between the last two calls to\n `update()`.\n\n Args:\n regex: Regular expression defining which statistics to\n collect. The default is to collect everything.\n keep_previous: Whether to retain the previous averages if no\n scalars were collected on a given round\n (default: True).\n '
def __init__(self, regex='.*', keep_previous=True):
self._regex = re.compile(regex)
self._keep_previous = keep_previous
self._cumulative = dict()
self._moments = dict()
self.update()
self._moments.clear()
def names(self):
'Returns the names of all statistics broadcasted so far that\n match the regular expression specified at construction time.\n '
return [name for name in _counters if self._regex.fullmatch(name)]
def update(self):
'Copies current values of the internal counters to the\n user-visible state and resets them for the next round.\n\n If `keep_previous=True` was specified at construction time, the\n operation is skipped for statistics that have received no scalars\n since the last update, retaining their previous averages.\n\n This method performs a number of GPU-to-CPU transfers and one\n `torch.distributed.all_reduce()`. It is intended to be called\n periodically in the main training loop, typically once every\n N training steps.\n '
if (not self._keep_previous):
self._moments.clear()
for (name, cumulative) in _sync(self.names()):
if (name not in self._cumulative):
self._cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
delta = (cumulative - self._cumulative[name])
self._cumulative[name].copy_(cumulative)
if (float(delta[0]) != 0):
self._moments[name] = delta
def _get_delta(self, name):
'Returns the raw moments that were accumulated for the given\n statistic between the last two calls to `update()`, or zero if\n no scalars were collected.\n '
assert self._regex.fullmatch(name)
if (name not in self._moments):
self._moments[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
return self._moments[name]
def num(self, name):
'Returns the number of scalars that were accumulated for the given\n statistic between the last two calls to `update()`, or zero if\n no scalars were collected.\n '
delta = self._get_delta(name)
return int(delta[0])
def mean(self, name):
'Returns the mean of the scalars that were accumulated for the\n given statistic between the last two calls to `update()`, or NaN if\n no scalars were collected.\n '
delta = self._get_delta(name)
if (int(delta[0]) == 0):
return float('nan')
return float((delta[1] / delta[0]))
def std(self, name):
'Returns the standard deviation of the scalars that were\n accumulated for the given statistic between the last two calls to\n `update()`, or NaN if no scalars were collected.\n '
delta = self._get_delta(name)
if ((int(delta[0]) == 0) or (not np.isfinite(float(delta[1])))):
return float('nan')
if (int(delta[0]) == 1):
return float(0)
mean = float((delta[1] / delta[0]))
raw_var = float((delta[2] / delta[0]))
return np.sqrt(max((raw_var - np.square(mean)), 0))
def as_dict(self):
'Returns the averages accumulated between the last two calls to\n `update()` as an `dnnlib.EasyDict`. The contents are as follows:\n\n dnnlib.EasyDict(\n NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT),\n ...\n )\n '
stats = dnnlib.EasyDict()
for name in self.names():
stats[name] = dnnlib.EasyDict(num=self.num(name), mean=self.mean(name), std=self.std(name))
return stats
def __getitem__(self, name):
'Convenience getter.\n `collector[name]` is a synonym for `collector.mean(name)`.\n '
return self.mean(name)
|
def _sync(names):
'Synchronize the global cumulative counters across devices and\n processes. Called internally by `Collector.update()`.\n '
if (len(names) == 0):
return []
global _sync_called
_sync_called = True
deltas = []
device = (_sync_device if (_sync_device is not None) else torch.device('cpu'))
for name in names:
delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device)
for counter in _counters[name].values():
delta.add_(counter.to(device))
counter.copy_(torch.zeros_like(counter))
deltas.append(delta)
deltas = torch.stack(deltas)
if (_sync_device is not None):
torch.distributed.all_reduce(deltas)
deltas = deltas.cpu()
for (idx, name) in enumerate(names):
if (name not in _cumulative):
_cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
_cumulative[name].add_(deltas[idx])
return [(name, _cumulative[name]) for name in names]
|
def parse_int_list(s):
if isinstance(s, list):
return s
ranges = []
range_re = re.compile('^(\\d+)-(\\d+)$')
for p in s.split(','):
m = range_re.match(p)
if m:
ranges.extend(range(int(m.group(1)), (int(m.group(2)) + 1)))
else:
ranges.append(int(p))
return ranges
|
@click.command()
@click.option('--outdir', help='Where to save the results', metavar='DIR', type=str, required=True)
@click.option('--data', help='Path to the dataset', metavar='ZIP|DIR', type=str, required=True)
@click.option('--cond', help='Train class-conditional model', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--arch', help='Network architecture', metavar='ddpmpp|ncsnpp', type=click.Choice(['ddpmpp', 'ncsnpp']), default='ddpmpp', show_default=True)
@click.option('--precond', help='Preconditioning & loss function', metavar='vp|ve|edm', type=click.Choice(['vp', 've', 'edm', 'fdm_edm', 'fdm_vp', 'fdm_ve']), default='fdm_edm', show_default=True)
@click.option('--duration', help='Training duration', metavar='MIMG', type=click.FloatRange(min=0, min_open=True), default=200, show_default=True)
@click.option('--batch', help='Total batch size', metavar='INT', type=click.IntRange(min=1), default=512, show_default=True)
@click.option('--batch-gpu', help='Limit batch size per GPU', metavar='INT', type=click.IntRange(min=1))
@click.option('--cbase', help='Channel multiplier [default: varies]', metavar='INT', type=int)
@click.option('--cres', help='Channels per resolution [default: varies]', metavar='LIST', type=parse_int_list)
@click.option('--lr', help='Learning rate', metavar='FLOAT', type=click.FloatRange(min=0, min_open=True), default=0.001, show_default=True)
@click.option('--lr_rampup', help='Learning rate rampup', metavar='FLOAT', type=click.FloatRange(min=0, max=1000), default=10, show_default=True)
@click.option('--ema', help='EMA half-life', metavar='MIMG', type=click.FloatRange(min=0), default=0.5, show_default=True)
@click.option('--dropout', help='Dropout probability', metavar='FLOAT', type=click.FloatRange(min=0, max=1), default=0.13, show_default=True)
@click.option('--augment', help='Augment probability', metavar='FLOAT', type=click.FloatRange(min=0, max=1), default=0.12, show_default=True)
@click.option('--xflip', help='Enable dataset x-flips', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--warmup_ite', help='Loss weight warmup iteration', metavar='FLOAT', type=float, default=None, show_default=True)
@click.option('--fdm_multiplier', help='FDM multiplier', metavar='FLOAT', type=float, default=2.0)
@click.option('--fp16', help='Enable mixed-precision training', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--ls', help='Loss scaling', metavar='FLOAT', type=click.FloatRange(min=0, min_open=True), default=1, show_default=True)
@click.option('--bench', help='Enable cuDNN benchmarking', metavar='BOOL', type=bool, default=True, show_default=True)
@click.option('--cache', help='Cache dataset in CPU memory', metavar='BOOL', type=bool, default=True, show_default=True)
@click.option('--workers', help='DataLoader worker processes', metavar='INT', type=click.IntRange(min=1), default=8, show_default=True)
@click.option('--desc', help='String to include in result dir name', metavar='STR', type=str)
@click.option('--nosubdir', help='Do not create a subdirectory for results', is_flag=True)
@click.option('--tick', help='How often to print progress', metavar='KIMG', type=click.IntRange(min=1), default=50, show_default=True)
@click.option('--snap', help='How often to save snapshots', metavar='TICKS', type=click.IntRange(min=1), default=200, show_default=True)
@click.option('--dump', help='How often to dump state', metavar='TICKS', type=click.IntRange(min=1), default=500, show_default=True)
@click.option('--seed', help='Random seed [default: random]', metavar='INT', type=int)
@click.option('--transfer', help='Transfer learning from network pickle', metavar='PKL|URL', type=str)
@click.option('--resume', help='Resume from previous training state', metavar='PT', type=str)
@click.option('-n', '--dry-run', help='Print training options and exit', is_flag=True)
def main(**kwargs):
opts = dnnlib.EasyDict(kwargs)
torch.multiprocessing.set_start_method('spawn')
dist.init()
c = dnnlib.EasyDict()
c.dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=opts.data, use_labels=opts.cond, xflip=opts.xflip, cache=opts.cache)
c.data_loader_kwargs = dnnlib.EasyDict(pin_memory=True, num_workers=opts.workers, prefetch_factor=2)
c.network_kwargs = dnnlib.EasyDict()
c.loss_kwargs = dnnlib.EasyDict()
c.optimizer_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=opts.lr, betas=[0.9, 0.999], eps=1e-08)
try:
dataset_obj = dnnlib.util.construct_class_by_name(**c.dataset_kwargs)
dataset_name = dataset_obj.name
c.dataset_kwargs.resolution = dataset_obj.resolution
c.dataset_kwargs.max_size = len(dataset_obj)
if (opts.cond and (not dataset_obj.has_labels)):
raise click.ClickException('--cond=True requires labels specified in dataset.json')
del dataset_obj
except IOError as err:
raise click.ClickException(f'--data: {err}')
if (opts.arch == 'ddpmpp'):
c.network_kwargs.update(model_type='SongUNet', embedding_type='positional', encoder_type='standard', decoder_type='standard')
c.network_kwargs.update(channel_mult_noise=1, resample_filter=[1, 1], model_channels=128, channel_mult=[2, 2, 2])
elif (opts.arch == 'ncsnpp'):
c.network_kwargs.update(model_type='SongUNet', embedding_type='fourier', encoder_type='residual', decoder_type='standard')
c.network_kwargs.update(channel_mult_noise=2, resample_filter=[1, 3, 3, 1], model_channels=128, channel_mult=[2, 2, 2])
else:
raise f'Unknown architecture: {opts.arch}'
if (opts.precond == 'vp'):
c.network_kwargs.class_name = 'training.networks.VPPrecond'
c.loss_kwargs.class_name = 'training.loss.VPLoss'
elif (opts.precond == 've'):
c.network_kwargs.class_name = 'training.networks.VEPrecond'
c.loss_kwargs.class_name = 'training.loss.VELoss'
elif (opts.precond == 'edm'):
c.network_kwargs.class_name = 'training.networks.EDMPrecond'
c.loss_kwargs.class_name = 'training.loss.EDMLoss'
elif (opts.precond == 'fdm_vp'):
c.network_kwargs.class_name = 'training.networks.FDM_VPPrecond'
c.loss_kwargs.class_name = 'training.loss.VPLoss'
c.network_kwargs.update(fdm_multiplier=opts.fdm_multiplier)
c.loss_kwargs.update(warmup_ite=opts.warmup_ite)
elif (opts.precond == 'fdm_ve'):
c.network_kwargs.class_name = 'training.networks.FDM_VEPrecond'
c.loss_kwargs.class_name = 'training.loss.VELoss'
c.network_kwargs.update(fdm_multiplier=opts.fdm_multiplier)
c.loss_kwargs.update(warmup_ite=opts.warmup_ite)
else:
assert (opts.precond == 'fdm_edm')
c.network_kwargs.class_name = 'training.networks.FDM_EDMPrecond'
c.loss_kwargs.class_name = 'training.loss.EDMLoss'
c.network_kwargs.update(fdm_multiplier=opts.fdm_multiplier)
c.loss_kwargs.update(warmup_ite=opts.warmup_ite)
if (opts.cbase is not None):
c.network_kwargs.model_channels = opts.cbase
if (opts.cres is not None):
c.network_kwargs.channel_mult = opts.cres
if opts.augment:
c.augment_kwargs = dnnlib.EasyDict(class_name='training.augment.AugmentPipe', p=opts.augment)
c.augment_kwargs.update(xflip=100000000.0, yflip=1, scale=1, rotate_frac=1, aniso=1, translate_frac=1)
c.network_kwargs.augment_dim = 9
c.network_kwargs.update(dropout=opts.dropout, use_fp16=opts.fp16)
c.total_kimg = max(int((opts.duration * 1000)), 1)
c.ema_halflife_kimg = int((opts.ema * 1000))
c.update(batch_size=opts.batch, batch_gpu=opts.batch_gpu)
c.update(loss_scaling=opts.ls, cudnn_benchmark=opts.bench)
c.update(kimg_per_tick=opts.tick, snapshot_ticks=opts.snap, state_dump_ticks=opts.dump)
if (opts.seed is not None):
c.seed = opts.seed
else:
seed = torch.randint((1 << 31), size=[], device=torch.device('cuda'))
torch.distributed.broadcast(seed, src=0)
c.seed = int(seed)
if (opts.transfer is not None):
if (opts.resume is not None):
raise click.ClickException('--transfer and --resume cannot be specified at the same time')
c.resume_pkl = opts.transfer
c.ema_rampup_ratio = None
elif (opts.resume is not None):
match = re.fullmatch('training-state-(\\d+).pt', os.path.basename(opts.resume))
if ((not match) or (not os.path.isfile(opts.resume))):
raise click.ClickException('--resume must point to training-state-*.pt from a previous training run')
c.resume_pkl = os.path.join(os.path.dirname(opts.resume), f'network-snapshot-{match.group(1)}.pkl')
c.resume_kimg = int(match.group(1))
c.resume_state_dump = opts.resume
cond_str = ('cond' if c.dataset_kwargs.use_labels else 'uncond')
dtype_str = ('fp16' if c.network_kwargs.use_fp16 else 'fp32')
desc = f'{dataset_name:s}-{cond_str:s}-{opts.arch:s}-{opts.precond:s}-gpus{dist.get_world_size():d}-batch{c.batch_size:d}-{dtype_str:s}'
if (opts.desc is not None):
desc += f'-{opts.desc}'
if (dist.get_rank() == (- 1)):
c.run_dir = None
elif opts.nosubdir:
c.run_dir = opts.outdir
else:
prev_run_dirs = []
if os.path.isdir(opts.outdir):
prev_run_dirs = [x for x in os.listdir(opts.outdir) if os.path.isdir(os.path.join(opts.outdir, x))]
prev_run_ids = [re.match('^\\d+', x) for x in prev_run_dirs]
prev_run_ids = [int(x.group()) for x in prev_run_ids if (x is not None)]
cur_run_id = (max(prev_run_ids, default=(- 1)) + 1)
c.run_dir = os.path.join(opts.outdir, f'{cur_run_id:05d}-{desc}')
assert (not os.path.exists(c.run_dir))
dist.print0()
dist.print0('Training options:')
dist.print0(json.dumps(c, indent=2))
dist.print0()
dist.print0(f'Output directory: {c.run_dir}')
dist.print0(f'Dataset path: {c.dataset_kwargs.path}')
dist.print0(f'Class-conditional: {c.dataset_kwargs.use_labels}')
dist.print0(f'Network architecture: {opts.arch}')
dist.print0(f'Preconditioning & loss: {opts.precond}')
dist.print0(f'Number of GPUs: {dist.get_world_size()}')
dist.print0(f'Batch size: {c.batch_size}')
dist.print0(f'Mixed-precision: {c.network_kwargs.use_fp16}')
dist.print0(f'network_kwargs: {c.network_kwargs}')
dist.print0(f'loss_kwargs: {c.loss_kwargs}')
dist.print0()
if opts.dry_run:
dist.print0('Dry run; exiting.')
return
dist.print0('Creating output directory...')
if (dist.get_rank() == 0):
os.makedirs(c.run_dir, exist_ok=True)
with open(os.path.join(c.run_dir, 'training_options.json'), 'wt') as f:
json.dump(c, f, indent=2)
dnnlib.util.Logger(file_name=os.path.join(c.run_dir, 'log.txt'), file_mode='a', should_flush=True)
training_loop.training_loop(**c, lr_rampup_kimg=(opts.lr_rampup * 1000))
|
@persistence.persistent_class
class VPLoss():
def __init__(self, beta_d=19.9, beta_min=0.1, epsilon_t=1e-05, warmup_ite=None):
self.beta_d = beta_d
self.beta_min = beta_min
self.epsilon_t = epsilon_t
self.warmup_ite = warmup_ite
self.clamp_cur = 5.0
self.clamp_max = 500.0
if self.warmup_ite:
self.warmup_step = np.exp((np.log(100) / self.warmup_ite))
def __call__(self, net, images, labels, augment_pipe=None):
rnd_uniform = torch.rand([images.shape[0], 1, 1, 1], device=images.device)
sigma = self.sigma((1 + (rnd_uniform * (self.epsilon_t - 1))))
weight = (1 / (sigma ** 2))
if self.warmup_ite:
if (self.clamp_cur < self.clamp_max):
weight.clamp_max_(self.clamp_cur)
self.clamp_cur *= self.warmup_step
(y, augment_labels) = (augment_pipe(images) if (augment_pipe is not None) else (images, None))
n = (torch.randn_like(y) * sigma)
D_yn = net((y + n), sigma, labels, augment_labels=augment_labels)
loss = (weight * ((D_yn - y) ** 2))
return loss
def sigma(self, t):
t = torch.as_tensor(t)
return ((((0.5 * self.beta_d) * (t ** 2)) + (self.beta_min * t)).exp() - 1).sqrt()
|
@persistence.persistent_class
class VELoss():
def __init__(self, sigma_min=0.02, sigma_max=100, warmup_ite=None):
self.sigma_min = sigma_min
self.sigma_max = sigma_max
self.warmup_ite = warmup_ite
self.clamp_cur = 5.0
self.clamp_max = 500.0
if self.warmup_ite:
self.warmup_step = np.exp((np.log(100) / self.warmup_ite))
def __call__(self, net, images, labels, augment_pipe=None):
rnd_uniform = torch.rand([images.shape[0], 1, 1, 1], device=images.device)
sigma = (self.sigma_min * ((self.sigma_max / self.sigma_min) ** rnd_uniform))
weight = (1 / (sigma ** 2))
if self.warmup_ite:
if (self.clamp_cur < self.clamp_max):
weight.clamp_max_(self.clamp_cur)
self.clamp_cur *= self.warmup_step
(y, augment_labels) = (augment_pipe(images) if (augment_pipe is not None) else (images, None))
n = (torch.randn_like(y) * sigma)
D_yn = net((y + n), sigma, labels, augment_labels=augment_labels)
loss = (weight * ((D_yn - y) ** 2))
return loss
|
@persistence.persistent_class
class EDMLoss():
def __init__(self, P_mean=(- 1.2), P_std=1.2, sigma_data=0.5, warmup_ite=None):
self.P_mean = P_mean
self.P_std = P_std
self.sigma_data = sigma_data
self.warmup_ite = warmup_ite
self.clamp_cur = 5.0
self.clamp_max = 500.0
if self.warmup_ite:
self.warmup_step = np.exp((np.log(100) / self.warmup_ite))
def __call__(self, net, images, labels=None, augment_pipe=None):
rnd_normal = torch.randn([images.shape[0], 1, 1, 1], device=images.device)
sigma = ((rnd_normal * self.P_std) + self.P_mean).exp()
weight = (((sigma ** 2) + (self.sigma_data ** 2)) / ((sigma * self.sigma_data) ** 2))
if self.warmup_ite:
if (self.clamp_cur < self.clamp_max):
weight.clamp_max_(self.clamp_cur)
self.clamp_cur *= self.warmup_step
(y, augment_labels) = (augment_pipe(images) if (augment_pipe is not None) else (images, None))
n = (torch.randn_like(y) * sigma)
D_yn = net((y + n), sigma, labels, augment_labels=augment_labels)
loss = (weight * ((D_yn - y) ** 2))
return loss
|
def weight_init(shape, mode, fan_in, fan_out):
if (mode == 'xavier_uniform'):
return (np.sqrt((6 / (fan_in + fan_out))) * ((torch.rand(*shape) * 2) - 1))
if (mode == 'xavier_normal'):
return (np.sqrt((2 / (fan_in + fan_out))) * torch.randn(*shape))
if (mode == 'kaiming_uniform'):
return (np.sqrt((3 / fan_in)) * ((torch.rand(*shape) * 2) - 1))
if (mode == 'kaiming_normal'):
return (np.sqrt((1 / fan_in)) * torch.randn(*shape))
raise ValueError(f'Invalid init mode "{mode}"')
|
@persistence.persistent_class
class Linear(torch.nn.Module):
def __init__(self, in_features, out_features, bias=True, init_mode='kaiming_normal', init_weight=1, init_bias=0):
super().__init__()
self.in_features = in_features
self.out_features = out_features
init_kwargs = dict(mode=init_mode, fan_in=in_features, fan_out=out_features)
self.weight = torch.nn.Parameter((weight_init([out_features, in_features], **init_kwargs) * init_weight))
self.bias = (torch.nn.Parameter((weight_init([out_features], **init_kwargs) * init_bias)) if bias else None)
def forward(self, x):
x = (x @ self.weight.to(x.dtype).t())
if (self.bias is not None):
x = x.add_(self.bias.to(x.dtype))
return x
|
@persistence.persistent_class
class Conv2d(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel, bias=True, up=False, down=False, resample_filter=[1, 1], fused_resample=False, init_mode='kaiming_normal', init_weight=1, init_bias=0):
assert (not (up and down))
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.up = up
self.down = down
self.fused_resample = fused_resample
init_kwargs = dict(mode=init_mode, fan_in=((in_channels * kernel) * kernel), fan_out=((out_channels * kernel) * kernel))
self.weight = (torch.nn.Parameter((weight_init([out_channels, in_channels, kernel, kernel], **init_kwargs) * init_weight)) if kernel else None)
self.bias = (torch.nn.Parameter((weight_init([out_channels], **init_kwargs) * init_bias)) if (kernel and bias) else None)
f = torch.as_tensor(resample_filter, dtype=torch.float32)
f = (f.ger(f).unsqueeze(0).unsqueeze(1) / f.sum().square())
self.register_buffer('resample_filter', (f if (up or down) else None))
def forward(self, x):
w = (self.weight.to(x.dtype) if (self.weight is not None) else None)
b = (self.bias.to(x.dtype) if (self.bias is not None) else None)
f = (self.resample_filter.to(x.dtype) if (self.resample_filter is not None) else None)
w_pad = ((w.shape[(- 1)] // 2) if (w is not None) else 0)
f_pad = (((f.shape[(- 1)] - 1) // 2) if (f is not None) else 0)
if (self.fused_resample and self.up and (w is not None)):
x = torch.nn.functional.conv_transpose2d(x, f.mul(4).tile([self.in_channels, 1, 1, 1]), groups=self.in_channels, stride=2, padding=max((f_pad - w_pad), 0))
x = torch.nn.functional.conv2d(x, w, padding=max((w_pad - f_pad), 0))
elif (self.fused_resample and self.down and (w is not None)):
x = torch.nn.functional.conv2d(x, w, padding=(w_pad + f_pad))
x = torch.nn.functional.conv2d(x, f.tile([self.out_channels, 1, 1, 1]), groups=self.out_channels, stride=2)
else:
if self.up:
x = torch.nn.functional.conv_transpose2d(x, f.mul(4).tile([self.in_channels, 1, 1, 1]), groups=self.in_channels, stride=2, padding=f_pad)
if self.down:
x = torch.nn.functional.conv2d(x, f.tile([self.in_channels, 1, 1, 1]), groups=self.in_channels, stride=2, padding=f_pad)
if (w is not None):
x = torch.nn.functional.conv2d(x, w, padding=w_pad)
if (b is not None):
x = x.add_(b.reshape(1, (- 1), 1, 1))
return x
|
@persistence.persistent_class
class GroupNorm(torch.nn.Module):
def __init__(self, num_channels, num_groups=32, min_channels_per_group=4, eps=1e-05):
super().__init__()
self.num_groups = min(num_groups, (num_channels // min_channels_per_group))
self.eps = eps
self.weight = torch.nn.Parameter(torch.ones(num_channels))
self.bias = torch.nn.Parameter(torch.zeros(num_channels))
def forward(self, x):
x = torch.nn.functional.group_norm(x, num_groups=self.num_groups, weight=self.weight.to(x.dtype), bias=self.bias.to(x.dtype), eps=self.eps)
return x
|
class AttentionOp(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k):
w = torch.einsum('ncq,nck->nqk', q.to(torch.float32), (k / np.sqrt(k.shape[1])).to(torch.float32)).softmax(dim=2).to(q.dtype)
ctx.save_for_backward(q, k, w)
return w
@staticmethod
def backward(ctx, dw):
(q, k, w) = ctx.saved_tensors
db = torch._softmax_backward_data(grad_output=dw.to(torch.float32), output=w.to(torch.float32), dim=2, input_dtype=torch.float32)
dq = (torch.einsum('nck,nqk->ncq', k.to(torch.float32), db).to(q.dtype) / np.sqrt(k.shape[1]))
dk = (torch.einsum('ncq,nqk->nck', q.to(torch.float32), db).to(k.dtype) / np.sqrt(k.shape[1]))
return (dq, dk)
|
@persistence.persistent_class
class UNetBlock(torch.nn.Module):
def __init__(self, in_channels, out_channels, emb_channels, up=False, down=False, attention=False, num_heads=None, channels_per_head=64, dropout=0, skip_scale=1, eps=1e-05, resample_filter=[1, 1], resample_proj=False, adaptive_scale=True, init=dict(), init_zero=dict(init_weight=0), init_attn=None):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.emb_channels = emb_channels
self.num_heads = (0 if (not attention) else (num_heads if (num_heads is not None) else (out_channels // channels_per_head)))
self.dropout = dropout
self.skip_scale = skip_scale
self.adaptive_scale = adaptive_scale
self.norm0 = GroupNorm(num_channels=in_channels, eps=eps)
self.conv0 = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel=3, up=up, down=down, resample_filter=resample_filter, **init)
self.affine = Linear(in_features=emb_channels, out_features=(out_channels * (2 if adaptive_scale else 1)), **init)
self.norm1 = GroupNorm(num_channels=out_channels, eps=eps)
self.conv1 = Conv2d(in_channels=out_channels, out_channels=out_channels, kernel=3, **init_zero)
self.skip = None
if ((out_channels != in_channels) or up or down):
kernel = (1 if (resample_proj or (out_channels != in_channels)) else 0)
self.skip = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel=kernel, up=up, down=down, resample_filter=resample_filter, **init)
if self.num_heads:
self.norm2 = GroupNorm(num_channels=out_channels, eps=eps)
self.qkv = Conv2d(in_channels=out_channels, out_channels=(out_channels * 3), kernel=1, **(init_attn if (init_attn is not None) else init))
self.proj = Conv2d(in_channels=out_channels, out_channels=out_channels, kernel=1, **init_zero)
def forward(self, x, emb):
orig = x
x = self.conv0(silu(self.norm0(x)))
params = self.affine(emb).unsqueeze(2).unsqueeze(3).to(x.dtype)
if self.adaptive_scale:
(scale, shift) = params.chunk(chunks=2, dim=1)
x = silu(torch.addcmul(shift, self.norm1(x), (scale + 1)))
else:
x = silu(self.norm1(x.add_(params)))
x = self.conv1(torch.nn.functional.dropout(x, p=self.dropout, training=self.training))
x = x.add_((self.skip(orig) if (self.skip is not None) else orig))
x = (x * self.skip_scale)
if self.num_heads:
(q, k, v) = self.qkv(self.norm2(x)).reshape((x.shape[0] * self.num_heads), (x.shape[1] // self.num_heads), 3, (- 1)).unbind(2)
w = AttentionOp.apply(q, k)
a = torch.einsum('nqk,nck->ncq', w, v)
x = self.proj(a.reshape(*x.shape)).add_(x)
x = (x * self.skip_scale)
return x
|
@persistence.persistent_class
class PositionalEmbedding(torch.nn.Module):
def __init__(self, num_channels, max_positions=10000, endpoint=False):
super().__init__()
self.num_channels = num_channels
self.max_positions = max_positions
self.endpoint = endpoint
def forward(self, x):
freqs = torch.arange(start=0, end=(self.num_channels // 2), dtype=torch.float32, device=x.device)
freqs = (freqs / ((self.num_channels // 2) - (1 if self.endpoint else 0)))
freqs = ((1 / self.max_positions) ** freqs)
x = x.ger(freqs.to(x.dtype))
x = torch.cat([x.cos(), x.sin()], dim=1)
return x
|
@persistence.persistent_class
class FourierEmbedding(torch.nn.Module):
def __init__(self, num_channels, scale=16):
super().__init__()
self.register_buffer('freqs', (torch.randn((num_channels // 2)) * scale))
def forward(self, x):
x = x.ger(((2 * np.pi) * self.freqs).to(x.dtype))
x = torch.cat([x.cos(), x.sin()], dim=1)
return x
|
@persistence.persistent_class
class SongUNet(torch.nn.Module):
def __init__(self, img_resolution, in_channels, out_channels, label_dim=0, augment_dim=0, model_channels=128, channel_mult=[1, 2, 2, 2], channel_mult_emb=4, num_blocks=4, attn_resolutions=[16], dropout=0.1, label_dropout=0, embedding_type='positional', channel_mult_noise=1, encoder_type='standard', decoder_type='standard', resample_filter=[1, 1]):
assert (embedding_type in ['fourier', 'positional'])
assert (encoder_type in ['standard', 'skip', 'residual'])
assert (decoder_type in ['standard', 'skip'])
super().__init__()
self.label_dropout = label_dropout
emb_channels = (model_channels * channel_mult_emb)
noise_channels = (model_channels * channel_mult_noise)
init = dict(init_mode='xavier_uniform')
init_zero = dict(init_mode='xavier_uniform', init_weight=1e-05)
init_attn = dict(init_mode='xavier_uniform', init_weight=np.sqrt(0.2))
block_kwargs = dict(emb_channels=emb_channels, num_heads=1, dropout=dropout, skip_scale=np.sqrt(0.5), eps=1e-06, resample_filter=resample_filter, resample_proj=True, adaptive_scale=False, init=init, init_zero=init_zero, init_attn=init_attn)
self.map_noise = (PositionalEmbedding(num_channels=noise_channels, endpoint=True) if (embedding_type == 'positional') else FourierEmbedding(num_channels=noise_channels))
self.map_label = (Linear(in_features=label_dim, out_features=noise_channels, **init) if label_dim else None)
self.map_augment = (Linear(in_features=augment_dim, out_features=noise_channels, bias=False, **init) if augment_dim else None)
self.map_layer0 = Linear(in_features=noise_channels, out_features=emb_channels, **init)
self.map_layer1 = Linear(in_features=emb_channels, out_features=emb_channels, **init)
self.enc = torch.nn.ModuleDict()
cout = in_channels
caux = in_channels
for (level, mult) in enumerate(channel_mult):
res = (img_resolution >> level)
if (level == 0):
cin = cout
cout = model_channels
self.enc[f'{res}x{res}_conv'] = Conv2d(in_channels=cin, out_channels=cout, kernel=3, **init)
else:
self.enc[f'{res}x{res}_down'] = UNetBlock(in_channels=cout, out_channels=cout, down=True, **block_kwargs)
if (encoder_type == 'skip'):
self.enc[f'{res}x{res}_aux_down'] = Conv2d(in_channels=caux, out_channels=caux, kernel=0, down=True, resample_filter=resample_filter)
self.enc[f'{res}x{res}_aux_skip'] = Conv2d(in_channels=caux, out_channels=cout, kernel=1, **init)
if (encoder_type == 'residual'):
self.enc[f'{res}x{res}_aux_residual'] = Conv2d(in_channels=caux, out_channels=cout, kernel=3, down=True, resample_filter=resample_filter, fused_resample=True, **init)
caux = cout
for idx in range(num_blocks):
cin = cout
cout = (model_channels * mult)
attn = (res in attn_resolutions)
self.enc[f'{res}x{res}_block{idx}'] = UNetBlock(in_channels=cin, out_channels=cout, attention=attn, **block_kwargs)
skips = [block.out_channels for (name, block) in self.enc.items() if ('aux' not in name)]
self.dec = torch.nn.ModuleDict()
for (level, mult) in reversed(list(enumerate(channel_mult))):
res = (img_resolution >> level)
if (level == (len(channel_mult) - 1)):
self.dec[f'{res}x{res}_in0'] = UNetBlock(in_channels=cout, out_channels=cout, attention=True, **block_kwargs)
self.dec[f'{res}x{res}_in1'] = UNetBlock(in_channels=cout, out_channels=cout, **block_kwargs)
else:
self.dec[f'{res}x{res}_up'] = UNetBlock(in_channels=cout, out_channels=cout, up=True, **block_kwargs)
for idx in range((num_blocks + 1)):
cin = (cout + skips.pop())
cout = (model_channels * mult)
attn = ((idx == num_blocks) and (res in attn_resolutions))
self.dec[f'{res}x{res}_block{idx}'] = UNetBlock(in_channels=cin, out_channels=cout, attention=attn, **block_kwargs)
if ((decoder_type == 'skip') or (level == 0)):
if ((decoder_type == 'skip') and (level < (len(channel_mult) - 1))):
self.dec[f'{res}x{res}_aux_up'] = Conv2d(in_channels=out_channels, out_channels=out_channels, kernel=0, up=True, resample_filter=resample_filter)
self.dec[f'{res}x{res}_aux_norm'] = GroupNorm(num_channels=cout, eps=1e-06)
self.dec[f'{res}x{res}_aux_conv'] = Conv2d(in_channels=cout, out_channels=out_channels, kernel=3, **init_zero)
def forward(self, x, noise_labels, class_labels, augment_labels=None):
emb = self.map_noise(noise_labels)
emb = emb.reshape(emb.shape[0], 2, (- 1)).flip(1).reshape(*emb.shape)
if (self.map_label is not None):
tmp = class_labels
if (self.training and self.label_dropout):
tmp = (tmp * (torch.rand([x.shape[0], 1], device=x.device) >= self.label_dropout).to(tmp.dtype))
emb = (emb + self.map_label((tmp * np.sqrt(self.map_label.in_features))))
if ((self.map_augment is not None) and (augment_labels is not None)):
emb = (emb + self.map_augment(augment_labels))
emb = silu(self.map_layer0(emb))
emb = silu(self.map_layer1(emb))
skips = []
aux = x
for (name, block) in self.enc.items():
if ('aux_down' in name):
aux = block(aux)
elif ('aux_skip' in name):
x = skips[(- 1)] = (x + block(aux))
elif ('aux_residual' in name):
x = skips[(- 1)] = aux = ((x + block(aux)) / np.sqrt(2))
else:
x = (block(x, emb) if isinstance(block, UNetBlock) else block(x))
skips.append(x)
aux = None
tmp = None
for (name, block) in self.dec.items():
if ('aux_up' in name):
aux = block(aux)
elif ('aux_norm' in name):
tmp = block(x)
elif ('aux_conv' in name):
tmp = block(silu(tmp))
aux = (tmp if (aux is None) else (tmp + aux))
else:
if (x.shape[1] != block.in_channels):
x = torch.cat([x, skips.pop()], dim=1)
x = block(x, emb)
return aux
|
@persistence.persistent_class
class VPPrecond(torch.nn.Module):
def __init__(self, img_resolution, img_channels, label_dim=0, use_fp16=False, beta_d=19.9, beta_min=0.1, M=1000, epsilon_t=1e-05, model_type='SongUNet', **model_kwargs):
super().__init__()
self.img_resolution = img_resolution
self.img_channels = img_channels
self.label_dim = label_dim
self.use_fp16 = use_fp16
self.beta_d = beta_d
self.beta_min = beta_min
self.M = M
self.epsilon_t = epsilon_t
self.sigma_min = float(self.sigma(epsilon_t))
self.sigma_max = float(self.sigma(1))
self.model = globals()[model_type](img_resolution=img_resolution, in_channels=img_channels, out_channels=img_channels, label_dim=label_dim, **model_kwargs)
def forward(self, x, sigma, class_labels=None, force_fp32=False, **model_kwargs):
x = x.to(torch.float32)
sigma = sigma.to(torch.float32).reshape((- 1), 1, 1, 1)
class_labels = (None if (self.label_dim == 0) else (torch.zeros([1, self.label_dim], device=x.device) if (class_labels is None) else class_labels.to(torch.float32).reshape((- 1), self.label_dim)))
dtype = (torch.float16 if (self.use_fp16 and (not force_fp32) and (x.device.type == 'cuda')) else torch.float32)
c_skip = 1
c_out = (- sigma)
c_in = (1 / ((sigma ** 2) + 1).sqrt())
c_noise = ((self.M - 1) * self.sigma_inv(sigma))
F_x = self.model((c_in * x).to(dtype), c_noise.flatten(), class_labels=class_labels, **model_kwargs)
assert (F_x.dtype == dtype)
D_x = ((c_skip * x) + (c_out * F_x.to(torch.float32)))
return D_x
def sigma(self, t):
t = torch.as_tensor(t)
return ((((0.5 * self.beta_d) * (t ** 2)) + (self.beta_min * t)).exp() - 1).sqrt()
def sigma_inv(self, sigma):
sigma = torch.as_tensor(sigma)
return ((((self.beta_min ** 2) + ((2 * self.beta_d) * (1 + (sigma ** 2)).log())).sqrt() - self.beta_min) / self.beta_d)
def round_sigma(self, sigma):
return torch.as_tensor(sigma)
|
@persistence.persistent_class
class VEPrecond(torch.nn.Module):
def __init__(self, img_resolution, img_channels, label_dim=0, use_fp16=False, sigma_min=0.02, sigma_max=100, model_type='SongUNet', **model_kwargs):
super().__init__()
self.img_resolution = img_resolution
self.img_channels = img_channels
self.label_dim = label_dim
self.use_fp16 = use_fp16
self.sigma_min = sigma_min
self.sigma_max = sigma_max
self.model = globals()[model_type](img_resolution=img_resolution, in_channels=img_channels, out_channels=img_channels, label_dim=label_dim, **model_kwargs)
def forward(self, x, sigma, class_labels=None, force_fp32=False, **model_kwargs):
x = x.to(torch.float32)
sigma = sigma.to(torch.float32).reshape((- 1), 1, 1, 1)
class_labels = (None if (self.label_dim == 0) else (torch.zeros([1, self.label_dim], device=x.device) if (class_labels is None) else class_labels.to(torch.float32).reshape((- 1), self.label_dim)))
dtype = (torch.float16 if (self.use_fp16 and (not force_fp32) and (x.device.type == 'cuda')) else torch.float32)
c_skip = 1
c_out = sigma
c_in = 1
c_noise = (0.5 * sigma).log()
F_x = self.model((c_in * x).to(dtype), c_noise.flatten(), class_labels=class_labels, **model_kwargs)
assert (F_x.dtype == dtype)
D_x = ((c_skip * x) + (c_out * F_x.to(torch.float32)))
return D_x
def round_sigma(self, sigma):
return torch.as_tensor(sigma)
|
@persistence.persistent_class
class EDMPrecond(torch.nn.Module):
def __init__(self, img_resolution, img_channels, label_dim=0, use_fp16=False, sigma_min=0, sigma_max=float('inf'), sigma_data=0.5, model_type='SongUNet', **model_kwargs):
super().__init__()
self.img_resolution = img_resolution
self.img_channels = img_channels
self.label_dim = label_dim
self.use_fp16 = use_fp16
self.sigma_min = sigma_min
self.sigma_max = sigma_max
self.sigma_data = sigma_data
self.model = globals()[model_type](img_resolution=img_resolution, in_channels=img_channels, out_channels=img_channels, label_dim=label_dim, **model_kwargs)
def forward(self, x, sigma, class_labels=None, force_fp32=False, **model_kwargs):
x = x.to(torch.float32)
sigma = sigma.to(torch.float32).reshape((- 1), 1, 1, 1)
class_labels = (None if (self.label_dim == 0) else (torch.zeros([1, self.label_dim], device=x.device) if (class_labels is None) else class_labels.to(torch.float32).reshape((- 1), self.label_dim)))
dtype = (torch.float16 if (self.use_fp16 and (not force_fp32) and (x.device.type == 'cuda')) else torch.float32)
c_skip = ((self.sigma_data ** 2) / ((sigma ** 2) + (self.sigma_data ** 2)))
c_out = ((sigma * self.sigma_data) / ((sigma ** 2) + (self.sigma_data ** 2)).sqrt())
c_in = (1 / ((self.sigma_data ** 2) + (sigma ** 2)).sqrt())
c_noise = (sigma.log() / 4)
F_x = self.model((c_in * x).to(dtype), c_noise.flatten(), class_labels=class_labels, **model_kwargs)
assert (F_x.dtype == dtype)
D_x = ((c_skip * x) + (c_out * F_x.to(torch.float32)))
return D_x
def round_sigma(self, sigma):
return torch.as_tensor(sigma)
|
@persistence.persistent_class
class FDM_EDMPrecond(torch.nn.Module):
def __init__(self, img_resolution, img_channels, label_dim=0, use_fp16=False, sigma_min=0.002, sigma_max=80.0, sigma_data=0.5, model_type='SongUNet', fdm_beta_d=19.9, fdm_beta_min=0.1, fdm_multiplier=1.0, **model_kwargs):
super().__init__()
self.img_resolution = img_resolution
self.img_channels = img_channels
self.label_dim = label_dim
self.use_fp16 = use_fp16
self.sigma_min = sigma_min
self.sigma_max = sigma_max
self.sigma_data = sigma_data
self.model = globals()[model_type](img_resolution=img_resolution, in_channels=img_channels, out_channels=img_channels, label_dim=label_dim, **model_kwargs)
self.fdm_beta_d = fdm_beta_d
self.fdm_beta_min = fdm_beta_min
self.fdm_multiplier = fdm_multiplier
def forward(self, x, sigma, class_labels=None, force_fp32=False, **model_kwargs):
x = x.to(torch.float32)
sigma = sigma.to(torch.float32).reshape((- 1), 1, 1, 1)
class_labels = (None if (self.label_dim == 0) else (torch.zeros([1, self.label_dim], device=x.device) if (class_labels is None) else class_labels.to(torch.float32).reshape((- 1), self.label_dim)))
dtype = (torch.float16 if (self.use_fp16 and (not force_fp32) and (x.device.type == 'cuda')) else torch.float32)
c_skip = ((self.sigma_data ** 2) / ((sigma ** 2) + (self.sigma_data ** 2)))
c_out = ((sigma * self.sigma_data) / ((sigma ** 2) + (self.sigma_data ** 2)).sqrt())
c_in = self.s(sigma)
c_noise = (sigma.log() / 4)
F_x = self.model((c_in * x).to(dtype), c_noise.flatten(), class_labels=class_labels, **model_kwargs)
assert (F_x.dtype == dtype)
D_x = ((c_skip * x) + (c_out * F_x.to(torch.float32)))
return D_x
def round_sigma(self, sigma):
return torch.as_tensor(sigma)
def fdm_sigma_inv(self, sigma):
sigma = torch.as_tensor(sigma)
return ((sigma - self.sigma_min) / (self.sigma_max - self.sigma_min))
def fdm_beta_fn(self, t):
return ((self.fdm_beta_min * t) + ((0.5 * self.fdm_beta_d) * (t ** 2)))
def s(self, sigma):
t = self.fdm_sigma_inv(sigma)
beta = self.fdm_beta_fn(t)
return (torch.exp(((- self.fdm_multiplier) * beta)) * (1.0 + (self.fdm_multiplier * beta)))
|
@persistence.persistent_class
class FDM_VPPrecond(torch.nn.Module):
def __init__(self, img_resolution, img_channels, label_dim=0, use_fp16=False, beta_d=19.9, beta_min=0.1, M=1000, epsilon_t=1e-05, model_type='SongUNet', fdm_beta_d=19.9, fdm_beta_min=0.1, fdm_multiplier=1.0, **model_kwargs):
super().__init__()
self.img_resolution = img_resolution
self.img_channels = img_channels
self.label_dim = label_dim
self.use_fp16 = use_fp16
self.beta_d = beta_d
self.beta_min = beta_min
self.M = M
self.epsilon_t = epsilon_t
self.sigma_min = float(self.sigma(epsilon_t))
self.sigma_max = float(self.sigma(1))
self.model = globals()[model_type](img_resolution=img_resolution, in_channels=img_channels, out_channels=img_channels, label_dim=label_dim, **model_kwargs)
self.fdm_beta_d = fdm_beta_d
self.fdm_beta_min = fdm_beta_min
self.fdm_multiplier = fdm_multiplier
def forward(self, x, sigma, class_labels=None, force_fp32=False, **model_kwargs):
x = x.to(torch.float32)
sigma = sigma.to(torch.float32).reshape((- 1), 1, 1, 1)
class_labels = (None if (self.label_dim == 0) else (torch.zeros([1, self.label_dim], device=x.device) if (class_labels is None) else class_labels.to(torch.float32).reshape((- 1), self.label_dim)))
dtype = (torch.float16 if (self.use_fp16 and (not force_fp32) and (x.device.type == 'cuda')) else torch.float32)
c_skip = 1
c_out = (- sigma)
c_in = self.s(sigma)
c_noise = ((self.M - 1) * self.sigma_inv(sigma))
F_x = self.model((c_in * x).to(dtype), c_noise.flatten(), class_labels=class_labels, **model_kwargs)
assert (F_x.dtype == dtype)
D_x = ((c_skip * x) + (c_out * F_x.to(torch.float32)))
return D_x
def sigma(self, t):
t = torch.as_tensor(t)
return ((((0.5 * self.beta_d) * (t ** 2)) + (self.beta_min * t)).exp() - 1).sqrt()
def sigma_inv(self, sigma):
sigma = torch.as_tensor(sigma)
return ((((self.beta_min ** 2) + ((2 * self.beta_d) * (1 + (sigma ** 2)).log())).sqrt() - self.beta_min) / self.beta_d)
def round_sigma(self, sigma):
return torch.as_tensor(sigma)
def fdm_sigma_inv(self, sigma):
sigma = torch.as_tensor(sigma)
return ((sigma - self.sigma_min) / (self.sigma_max - self.sigma_min))
def fdm_beta_fn(self, t):
return ((self.fdm_beta_min * t) + ((0.5 * self.fdm_beta_d) * (t ** 2)))
def s(self, sigma):
t = self.fdm_sigma_inv(sigma)
beta = self.fdm_beta_fn(t)
return (torch.exp(((- self.fdm_multiplier) * beta)) * (1.0 + (self.fdm_multiplier * beta)))
|
@persistence.persistent_class
class FDM_VEPrecond(torch.nn.Module):
def __init__(self, img_resolution, img_channels, label_dim=0, use_fp16=False, sigma_min=0.02, sigma_max=100, model_type='SongUNet', fdm_beta_d=19.9, fdm_beta_min=0.1, fdm_multiplier=1.0, **model_kwargs):
super().__init__()
self.img_resolution = img_resolution
self.img_channels = img_channels
self.label_dim = label_dim
self.use_fp16 = use_fp16
self.sigma_min = sigma_min
self.sigma_max = sigma_max
self.model = globals()[model_type](img_resolution=img_resolution, in_channels=img_channels, out_channels=img_channels, label_dim=label_dim, **model_kwargs)
self.fdm_beta_d = fdm_beta_d
self.fdm_beta_min = fdm_beta_min
self.fdm_multiplier = fdm_multiplier
def forward(self, x, sigma, class_labels=None, force_fp32=False, **model_kwargs):
x = x.to(torch.float32)
sigma = sigma.to(torch.float32).reshape((- 1), 1, 1, 1)
class_labels = (None if (self.label_dim == 0) else (torch.zeros([1, self.label_dim], device=x.device) if (class_labels is None) else class_labels.to(torch.float32).reshape((- 1), self.label_dim)))
dtype = (torch.float16 if (self.use_fp16 and (not force_fp32) and (x.device.type == 'cuda')) else torch.float32)
c_skip = 1
c_out = sigma
c_in = self.s(sigma)
c_noise = (0.5 * sigma).log()
F_x = self.model((c_in * x).to(dtype), c_noise.flatten(), class_labels=class_labels, **model_kwargs)
assert (F_x.dtype == dtype)
D_x = ((c_skip * x) + (c_out * F_x.to(torch.float32)))
return D_x
def round_sigma(self, sigma):
return torch.as_tensor(sigma)
def fdm_sigma_inv(self, sigma):
sigma = torch.as_tensor(sigma)
return ((sigma - self.sigma_min) / (self.sigma_max - self.sigma_min))
def fdm_beta_fn(self, t):
return ((self.fdm_beta_min * t) + ((0.5 * self.fdm_beta_d) * (t ** 2)))
def s(self, sigma):
t = self.fdm_sigma_inv(sigma)
beta = self.fdm_beta_fn(t)
return (torch.exp((- beta)) * (1.0 + beta))
|
def training_loop(run_dir='.', dataset_kwargs={}, data_loader_kwargs={}, network_kwargs={}, loss_kwargs={}, optimizer_kwargs={}, augment_kwargs=None, seed=0, batch_size=512, batch_gpu=None, total_kimg=200000, ema_halflife_kimg=500, ema_rampup_ratio=0.05, lr_rampup_kimg=10000, loss_scaling=1, kimg_per_tick=50, snapshot_ticks=50, state_dump_ticks=500, resume_pkl=None, resume_state_dump=None, resume_kimg=0, cudnn_benchmark=True, device=torch.device('cuda')):
start_time = time.time()
np.random.seed((((seed * dist.get_world_size()) + dist.get_rank()) % (1 << 31)))
torch.manual_seed(np.random.randint((1 << 31)))
torch.backends.cudnn.benchmark = cudnn_benchmark
torch.backends.cudnn.allow_tf32 = False
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
batch_gpu_total = (batch_size // dist.get_world_size())
if ((batch_gpu is None) or (batch_gpu > batch_gpu_total)):
batch_gpu = batch_gpu_total
num_accumulation_rounds = (batch_gpu_total // batch_gpu)
assert (batch_size == ((batch_gpu * num_accumulation_rounds) * dist.get_world_size()))
dist.print0('Loading dataset...')
dataset_obj = dnnlib.util.construct_class_by_name(**dataset_kwargs)
dataset_sampler = misc.InfiniteSampler(dataset=dataset_obj, rank=dist.get_rank(), num_replicas=dist.get_world_size(), seed=seed)
dataset_iterator = iter(torch.utils.data.DataLoader(dataset=dataset_obj, sampler=dataset_sampler, batch_size=batch_gpu, **data_loader_kwargs))
dist.print0('Constructing network...')
interface_kwargs = dict(img_resolution=dataset_obj.resolution, img_channels=dataset_obj.num_channels, label_dim=dataset_obj.label_dim)
net = dnnlib.util.construct_class_by_name(**network_kwargs, **interface_kwargs)
net.train().requires_grad_(True).to(device)
if (dist.get_rank() == 0):
with torch.no_grad():
images = torch.zeros([batch_gpu, net.img_channels, net.img_resolution, net.img_resolution], device=device)
sigma = torch.ones([batch_gpu], device=device)
labels = torch.zeros([batch_gpu, net.label_dim], device=device)
misc.print_module_summary(net, [images, sigma, labels], max_nesting=2)
dist.print0('Setting up optimizer...')
loss_fn = dnnlib.util.construct_class_by_name(**loss_kwargs)
optimizer = dnnlib.util.construct_class_by_name(params=net.parameters(), **optimizer_kwargs)
augment_pipe = (dnnlib.util.construct_class_by_name(**augment_kwargs) if (augment_kwargs is not None) else None)
ddp = torch.nn.parallel.DistributedDataParallel(net, device_ids=[device], broadcast_buffers=False)
ema = copy.deepcopy(net).eval().requires_grad_(False)
if (resume_pkl is not None):
dist.print0(f'Loading network weights from "{resume_pkl}"...')
if (dist.get_rank() != 0):
torch.distributed.barrier()
with dnnlib.util.open_url(resume_pkl, verbose=(dist.get_rank() == 0)) as f:
data = pickle.load(f)
if (dist.get_rank() == 0):
torch.distributed.barrier()
misc.copy_params_and_buffers(src_module=data['ema'], dst_module=net, require_all=False)
misc.copy_params_and_buffers(src_module=data['ema'], dst_module=ema, require_all=False)
del data
if resume_state_dump:
dist.print0(f'Loading training state from "{resume_state_dump}"...')
data = torch.load(resume_state_dump, map_location=torch.device('cpu'))
misc.copy_params_and_buffers(src_module=data['net'], dst_module=net, require_all=True)
optimizer.load_state_dict(data['optimizer_state'])
del data
dist.print0(f'Training for {total_kimg} kimg...')
dist.print0()
cur_nimg = (resume_kimg * 1000)
cur_tick = 0
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = (tick_start_time - start_time)
dist.update_progress((cur_nimg // 1000), total_kimg)
stats_jsonl = None
while True:
optimizer.zero_grad(set_to_none=True)
for round_idx in range(num_accumulation_rounds):
with misc.ddp_sync(ddp, (round_idx == (num_accumulation_rounds - 1))):
(images, labels) = next(dataset_iterator)
images = ((images.to(device).to(torch.float32) / 127.5) - 1)
labels = labels.to(device)
loss = loss_fn(net=ddp, images=images, labels=labels, augment_pipe=augment_pipe)
training_stats.report('Loss/loss', loss)
loss.sum().mul((loss_scaling / batch_gpu_total)).backward()
for g in optimizer.param_groups:
g['lr'] = (optimizer_kwargs['lr'] * min((cur_nimg / max((lr_rampup_kimg * 1000), 1e-08)), 1))
for param in net.parameters():
if (param.grad is not None):
torch.nan_to_num(param.grad, nan=0, posinf=100000.0, neginf=(- 100000.0), out=param.grad)
optimizer.step()
ema_halflife_nimg = (ema_halflife_kimg * 1000)
if (ema_rampup_ratio is not None):
ema_halflife_nimg = min(ema_halflife_nimg, (cur_nimg * ema_rampup_ratio))
ema_beta = (0.5 ** (batch_size / max(ema_halflife_nimg, 1e-08)))
for (p_ema, p_net) in zip(ema.parameters(), net.parameters()):
p_ema.copy_(p_net.detach().lerp(p_ema, ema_beta))
cur_nimg += batch_size
done = (cur_nimg >= (total_kimg * 1000))
if ((not done) and (cur_tick != 0) and (cur_nimg < (tick_start_nimg + (kimg_per_tick * 1000)))):
continue
tick_end_time = time.time()
fields = []
fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
fields += [f"kimg {training_stats.report0('Progress/kimg', (cur_nimg / 1000.0)):<9.1f}"]
fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', (tick_end_time - start_time))):<12s}"]
fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', (tick_end_time - tick_start_time)):<7.1f}"]
fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (((tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg)) * 1000.0)):<7.2f}"]
fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', (psutil.Process(os.getpid()).memory_info().rss / (2 ** 30))):<6.2f}"]
fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', (torch.cuda.max_memory_allocated(device) / (2 ** 30))):<6.2f}"]
fields += [f"reserved {training_stats.report0('Resources/peak_gpu_mem_reserved_gb', (torch.cuda.max_memory_reserved(device) / (2 ** 30))):<6.2f}"]
fields += [f'loss {loss.sum().item():<10.2f}']
torch.cuda.reset_peak_memory_stats()
dist.print0(' '.join(fields))
if ((not done) and dist.should_stop()):
done = True
dist.print0()
dist.print0('Aborting...')
if ((snapshot_ticks is not None) and (done or ((cur_tick % snapshot_ticks) == 0))):
data = dict(ema=ema, loss_fn=loss_fn, augment_pipe=augment_pipe, dataset_kwargs=dict(dataset_kwargs))
for (key, value) in data.items():
if isinstance(value, torch.nn.Module):
value = copy.deepcopy(value).eval().requires_grad_(False)
misc.check_ddp_consistency(value)
data[key] = value.cpu()
del value
if (dist.get_rank() == 0):
with open(os.path.join(run_dir, f'network-snapshot-{(cur_nimg // 1000):06d}.pkl'), 'wb') as f:
pickle.dump(data, f)
del data
if ((state_dump_ticks is not None) and (done or ((cur_tick % state_dump_ticks) == 0)) and (cur_tick != 0) and (dist.get_rank() == 0)):
torch.save(dict(net=net, optimizer_state=optimizer.state_dict()), os.path.join(run_dir, f'training-state-{(cur_nimg // 1000):06d}.pt'))
training_stats.default_collector.update()
if (dist.get_rank() == 0):
if (stats_jsonl is None):
stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'at')
stats_jsonl.write((json.dumps(dict(training_stats.default_collector.as_dict(), timestamp=time.time())) + '\n'))
stats_jsonl.flush()
dist.update_progress((cur_nimg // 1000), total_kimg)
cur_tick += 1
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = (tick_start_time - tick_end_time)
if done:
break
dist.print0()
dist.print0('Exiting...')
|
def normalize(v):
return (v / np.linalg.norm(v))
|
def get_r_matrix(ax_, angle):
ax = normalize(ax_)
if (np.abs(angle) > ANGLE_EPS):
S_hat = np.array([[0.0, (- ax[2]), ax[1]], [ax[2], 0.0, (- ax[0])], [(- ax[1]), ax[0], 0.0]], dtype=np.float32)
R = ((np.eye(3) + (np.sin(angle) * S_hat)) + ((1 - np.cos(angle)) * np.linalg.matrix_power(S_hat, 2)))
else:
R = np.eye(3)
return R
|
def r_between(v_from_, v_to_):
v_from = normalize(v_from_)
v_to = normalize(v_to_)
ax = normalize(np.cross(v_from, v_to))
angle = np.arccos(np.dot(v_from, v_to))
return get_r_matrix(ax, angle)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.