code stringlengths 17 6.64M |
|---|
@s3_request
def s3_etag(url: str) -> Optional[str]:
'Check ETag on S3 object.'
s3_resource = boto3.resource('s3')
(bucket_name, s3_path) = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
|
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
'Pull a file directly from S3.'
s3_resource = boto3.resource('s3')
(bucket_name, s3_path) = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
|
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = (int(content_length) if (content_length is not None) else None)
progress = tqdm(unit='B', total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk:
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
|
def get_from_cache(url: str, cache_dir: Union[(str, Path)]=None) -> str:
"\n Given a URL, look for the corresponding dataset in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n "
if (cache_dir is None):
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
if url.startswith('s3://'):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if (response.status_code != 200):
raise IOError('HEAD request failed for url {} with status code {}'.format(url, response.status_code))
etag = response.headers.get('ETag')
filename = url_to_filename(url, etag)
cache_path = os.path.join(cache_dir, filename)
if (not os.path.exists(cache_path)):
with tempfile.NamedTemporaryFile() as temp_file:
logger.info('%s not found in cache, downloading to %s', url, temp_file.name)
if url.startswith('s3://'):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
temp_file.flush()
temp_file.seek(0)
logger.info('copying %s to cache at %s', temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info('creating metadata file for %s', cache_path)
meta = {'url': url, 'etag': etag}
meta_path = (cache_path + '.json')
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info('removing temp file %s', temp_file.name)
return cache_path
|
def read_set_from_file(filename: str) -> Set[str]:
'\n Extract a de-duped collection (set) of text from a file.\n Expected file format is one item per line.\n '
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
|
def get_file_extension(path: str, dot=True, lower: bool=True):
ext = os.path.splitext(path)[1]
ext = (ext if dot else ext[1:])
return (ext.lower() if lower else ext)
|
class LayerNorm(nn.LayerNorm):
"Subclass torch's LayerNorm to handle fp16."
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
|
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return (x * torch.sigmoid((1.702 * x)))
|
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask=None):
super(ResidualAttentionBlock, self).__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model, (d_model * 4))), ('gelu', QuickGELU()), ('c_proj', nn.Linear((d_model * 4), d_model))]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
self.n_head = n_head
def attention(self, x: torch.Tensor, attn_mask_: torch.Tensor):
attn_mask_ = attn_mask_.repeat_interleave(self.n_head, dim=0)
attn_mask_ = (attn_mask_.to(dtype=x.dtype, device=x.device) if (attn_mask_ is not None) else None)
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask_)[0]
def forward(self, para_tuple: tuple):
(x, attn_mask) = para_tuple
x = (x + self.attention(self.ln_1(x), attn_mask))
x = (x + self.mlp(self.ln_2(x)))
return (x, attn_mask)
|
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask=None):
super(Transformer, self).__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads) for _ in range(layers)])
def forward(self, x: torch.Tensor, attn_mask: torch.Tensor):
return self.resblocks((x, attn_mask))[0]
|
def warmup_cosine(x, warmup=0.002):
if (x < warmup):
return (x / warmup)
return (0.5 * (1.0 + math.cos((math.pi * x))))
|
def warmup_constant(x, warmup=0.002):
' Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.\n Learning rate is 1. afterwards. '
if (x < warmup):
return (x / warmup)
return 1.0
|
def warmup_linear(x, warmup=0.002):
' Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.\n After `t_total`-th training step, learning rate is zero. '
if (x < warmup):
return (x / warmup)
return max(((x - 1.0) / (warmup - 1.0)), 0)
|
class BertAdam(Optimizer):
"Implements BERT version of Adam algorithm with weight decay fix.\n Params:\n lr: learning rate\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate. Default: -1\n schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'\n b1: Adams b1. Default: 0.9\n b2: Adams b2. Default: 0.999\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n "
def __init__(self, params, lr=required, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-06, weight_decay=0.01, max_grad_norm=1.0):
if ((lr is not required) and (lr < 0.0)):
raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))
if (schedule not in SCHEDULES):
raise ValueError('Invalid schedule parameter: {}'.format(schedule))
if ((not (0.0 <= warmup < 1.0)) and (not (warmup == (- 1)))):
raise ValueError('Invalid warmup: {} - should be in [0.0, 1.0[ or -1'.format(warmup))
if (not (0.0 <= b1 < 1.0)):
raise ValueError('Invalid b1 parameter: {} - should be in [0.0, 1.0['.format(b1))
if (not (0.0 <= b2 < 1.0)):
raise ValueError('Invalid b2 parameter: {} - should be in [0.0, 1.0['.format(b2))
if (not (e >= 0.0)):
raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, b1=b1, b2=b2, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
state = self.state[p]
if (len(state) == 0):
return [0]
if (group['t_total'] != (- 1)):
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = (group['lr'] * schedule_fct((state['step'] / group['t_total']), group['warmup']))
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['next_m'] = torch.zeros_like(p.data)
state['next_v'] = torch.zeros_like(p.data)
(next_m, next_v) = (state['next_m'], state['next_v'])
(beta1, beta2) = (group['b1'], group['b2'])
if (group['max_grad_norm'] > 0):
clip_grad_norm_(p, group['max_grad_norm'])
next_m.mul_(beta1).add_(grad, alpha=(1 - beta1))
next_v.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
update = (next_m / (next_v.sqrt() + group['e']))
if (group['weight_decay'] > 0.0):
update += (group['weight_decay'] * p.data)
if (group['t_total'] != (- 1)):
schedule_fct = SCHEDULES[group['schedule']]
progress = (state['step'] / group['t_total'])
lr_scheduled = (group['lr'] * schedule_fct(progress, group['warmup']))
else:
lr_scheduled = group['lr']
update_with_lr = (lr_scheduled * update)
p.data.add_((- update_with_lr))
state['step'] += 1
return loss
|
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
|
@lru_cache()
def bytes_to_unicode():
"\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n "
bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1))))
cs = bs[:]
n = 0
for b in range((2 ** 8)):
if (b not in bs):
bs.append(b)
cs.append(((2 ** 8) + n))
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
|
def get_pairs(word):
'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n '
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
|
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
|
def whitespace_clean(text):
text = re.sub('\\s+', ' ', text)
text = text.strip()
return text
|
class SimpleTokenizer(object):
def __init__(self, bpe_path: str=default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode('utf-8').split('\n')
merges = merges[1:(((49152 - 256) - 2) + 1)]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = (vocab + [(v + '</w>') for v in vocab])
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE)
self.vocab = self.encoder
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ')
return text
def tokenize(self, text):
tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return tokens
def convert_tokens_to_ids(self, tokens):
return [self.encoder[bpe_token] for bpe_token in tokens]
|
class PretrainedConfig(object):
pretrained_model_archive_map = {}
config_name = ''
weights_name = ''
@classmethod
def get_config(cls, pretrained_model_name, cache_dir, type_vocab_size, state_dict, task_config=None):
archive_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), pretrained_model_name)
if (os.path.exists(archive_file) is False):
if (pretrained_model_name in cls.pretrained_model_archive_map):
archive_file = cls.pretrained_model_archive_map[pretrained_model_name]
else:
archive_file = pretrained_model_name
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
if ((task_config is None) or (task_config.local_rank == 0)):
logger.error("Model name '{}' was not found in model name list. We assumed '{}' was a path or url but couldn't find any file associated to this path or url.".format(pretrained_model_name, archive_file))
return None
if (resolved_archive_file == archive_file):
if ((task_config is None) or (task_config.local_rank == 0)):
logger.info('loading archive file {}'.format(archive_file))
elif ((task_config is None) or (task_config.local_rank == 0)):
logger.info('loading archive file {} from cache at {}'.format(archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
tempdir = tempfile.mkdtemp()
if ((task_config is None) or (task_config.local_rank == 0)):
logger.info('extracting archive file {} to temp dir {}'.format(resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
config_file = os.path.join(serialization_dir, cls.config_name)
config = cls.from_json_file(config_file)
config.type_vocab_size = type_vocab_size
if ((task_config is None) or (task_config.local_rank == 0)):
logger.info('Model config {}'.format(config))
if (state_dict is None):
weights_path = os.path.join(serialization_dir, cls.weights_name)
if os.path.exists(weights_path):
state_dict = torch.load(weights_path, map_location='cpu')
elif ((task_config is None) or (task_config.local_rank == 0)):
logger.info("Weight doesn't exsits. {}".format(weights_path))
if tempdir:
shutil.rmtree(tempdir)
return (config, state_dict)
@classmethod
def from_dict(cls, json_object):
'Constructs a `BertConfig` from a Python dictionary of parameters.'
config = cls(vocab_size_or_config_json_file=(- 1))
for (key, value) in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
'Constructs a `BertConfig` from a json file of parameters.'
with open(json_file, 'r', encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
'Serializes this instance to a Python dictionary.'
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
'Serializes this instance to a JSON string.'
return (json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\n')
|
def get_world_size():
if (not dist.is_available()):
return 1
if (not dist.is_initialized()):
return 1
return dist.get_world_size()
|
def get_rank():
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
return dist.get_rank()
|
def is_main_process():
return (get_rank() == 0)
|
def synchronize():
'\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n '
if (not dist.is_available()):
return
if (not dist.is_initialized()):
return
world_size = dist.get_world_size()
if (world_size == 1):
return
dist.barrier()
|
def all_gather(data):
'\n Run all_gather on arbitrary picklable data (not necessarily tensors)\n Args:\n data: any picklable object\n Returns:\n list[data]: list of data gathered from each rank\n '
world_size = get_world_size()
if (world_size == 1):
return [data]
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to('cuda')
local_size = torch.LongTensor([tensor.numel()]).to('cuda')
size_list = [torch.LongTensor([0]).to('cuda') for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
if (local_size != max_size):
padding = torch.ByteTensor(size=((max_size - local_size),)).to('cuda')
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for (size, tensor) in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
|
def reduce_dict(input_dict, average=True):
'\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that process with rank\n 0 has the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n '
world_size = get_world_size()
if (world_size < 2):
return input_dict
with torch.no_grad():
names = []
values = []
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if ((dist.get_rank() == 0) and average):
values /= world_size
reduced_dict = {k: v for (k, v) in zip(names, values)}
return reduced_dict
|
def setup_logger(name, save_dir, dist_rank, filename='log.txt'):
logger = logging.getLogger(name)
logger.setLevel(logging.ERROR)
if (dist_rank > 0):
return logger
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s %(name)s %(lineno)s %(levelname)s]: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
if save_dir:
fh = logging.FileHandler(os.path.join(save_dir, filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
|
class SmoothedValue(object):
'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n '
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
@property
def global_avg(self):
return (self.total / self.count)
|
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {:.4f} ({:.4f})'.format(name, meter.median, meter.global_avg))
return self.delimiter.join(loss_str)
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def accuracy(output, target, topk=(1,)):
'Computes the precision@k for the specified values of k'
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred)).contiguous()
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if (isinstance(obj, list) or isinstance(obj, tuple)):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
|
def parallel_apply(fct, model, inputs, device_ids):
modules = nn.parallel.replicate(model, device_ids)
assert (len(modules) == len(inputs))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input):
torch.set_grad_enabled(grad_enabled)
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
if (not isinstance(input, (list, tuple))):
input = (input,)
output = fct(module, *input)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(where='in replica {} on device {}'.format(i, device))
if (len(modules) > 1):
threads = [threading.Thread(target=_worker, args=(i, module, input)) for (i, (module, input)) in enumerate(zip(modules, inputs))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
|
def get_logger(filename=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
if (filename is not None):
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
|
def convert_module_to_f16(l):
'\n Convert primitive modules to float16.\n '
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.half()
if (l.bias is not None):
l.bias.data = l.bias.data.half()
|
def convert_module_to_f32(l):
'\n Convert primitive modules to float32, undoing convert_module_to_f16().\n '
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.float()
if (l.bias is not None):
l.bias.data = l.bias.data.float()
|
def make_master_params(param_groups_and_shapes):
'\n Copy model parameters into a (differently-shaped) list of full-precision\n parameters.\n '
master_params = []
for (param_group, shape) in param_groups_and_shapes:
master_param = nn.Parameter(_flatten_dense_tensors([param.detach().float() for (_, param) in param_group]).view(shape))
master_param.requires_grad = True
master_params.append(master_param)
return master_params
|
def model_grads_to_master_grads(param_groups_and_shapes, master_params):
'\n Copy the gradients from the model parameters into the master parameters\n from make_master_params().\n '
for (master_param, (param_group, shape)) in zip(master_params, param_groups_and_shapes):
master_param.grad = _flatten_dense_tensors([param_grad_or_zeros(param) for (_, param) in param_group]).view(shape)
|
def master_params_to_model_params(param_groups_and_shapes, master_params):
'\n Copy the master parameter data back into the model parameters.\n '
for (master_param, (param_group, _)) in zip(master_params, param_groups_and_shapes):
for ((_, param), unflat_master_param) in zip(param_group, unflatten_master_params(param_group, master_param.view((- 1)))):
param.detach().copy_(unflat_master_param)
|
def unflatten_master_params(param_group, master_param):
return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group])
|
def get_param_groups_and_shapes(named_model_params):
named_model_params = list(named_model_params)
scalar_vector_named_params = ([(n, p) for (n, p) in named_model_params if (p.ndim <= 1)], (- 1))
matrix_named_params = ([(n, p) for (n, p) in named_model_params if (p.ndim > 1)], (1, (- 1)))
return [scalar_vector_named_params, matrix_named_params]
|
def master_params_to_state_dict(model, param_groups_and_shapes, master_params, use_fp16):
if use_fp16:
state_dict = model.state_dict()
for (master_param, (param_group, _)) in zip(master_params, param_groups_and_shapes):
for ((name, _), unflat_master_param) in zip(param_group, unflatten_master_params(param_group, master_param.view((- 1)))):
assert (name in state_dict)
state_dict[name] = unflat_master_param
else:
state_dict = model.state_dict()
for (i, (name, _value)) in enumerate(model.named_parameters()):
assert (name in state_dict)
state_dict[name] = master_params[i]
return state_dict
|
def state_dict_to_master_params(model, state_dict, use_fp16):
if use_fp16:
named_model_params = [(name, state_dict[name]) for (name, _) in model.named_parameters()]
param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
master_params = make_master_params(param_groups_and_shapes)
else:
master_params = [state_dict[name] for (name, _) in model.named_parameters()]
return master_params
|
def zero_master_grads(master_params):
for param in master_params:
param.grad = None
|
def zero_grad(model_params):
for param in model_params:
if (param.grad is not None):
param.grad.detach_()
param.grad.zero_()
|
def param_grad_or_zeros(param):
if (param.grad is not None):
return param.grad.data.detach()
else:
return th.zeros_like(param)
|
class MixedPrecisionTrainer():
def __init__(self, *, model, use_fp16=False, fp16_scale_growth=0.001, initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE):
self.model = model
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.param_groups_and_shapes = None
self.lg_loss_scale = initial_lg_loss_scale
if self.use_fp16:
self.param_groups_and_shapes = get_param_groups_and_shapes(self.model.named_parameters())
self.master_params = make_master_params(self.param_groups_and_shapes)
self.model.convert_to_fp16()
def zero_grad(self):
zero_grad(self.model_params)
def backward(self, loss: th.Tensor):
if self.use_fp16:
loss_scale = (2 ** self.lg_loss_scale)
(loss * loss_scale).backward()
else:
loss.backward()
def optimize(self, opt: th.optim.Optimizer):
if self.use_fp16:
return self._optimize_fp16(opt)
else:
return self._optimize_normal(opt)
def _optimize_fp16(self, opt: th.optim.Optimizer):
logger.logkv_mean('lg_loss_scale', self.lg_loss_scale)
model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)
(grad_norm, param_norm) = self._compute_norms(grad_scale=(2 ** self.lg_loss_scale))
if check_overflow(grad_norm):
self.lg_loss_scale -= 1
logger.log(f'Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}')
zero_master_grads(self.master_params)
return False
logger.logkv_mean('grad_norm', grad_norm)
logger.logkv_mean('param_norm', param_norm)
self.master_params[0].grad.mul_((1.0 / (2 ** self.lg_loss_scale)))
opt.step()
zero_master_grads(self.master_params)
master_params_to_model_params(self.param_groups_and_shapes, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
return True
def _optimize_normal(self, opt: th.optim.Optimizer):
(grad_norm, param_norm) = self._compute_norms()
logger.logkv_mean('grad_norm', grad_norm)
logger.logkv_mean('param_norm', param_norm)
opt.step()
return True
def _compute_norms(self, grad_scale=1.0):
grad_norm = 0.0
param_norm = 0.0
for p in self.master_params:
with th.no_grad():
param_norm += (th.norm(p, p=2, dtype=th.float32).item() ** 2)
if (p.grad is not None):
grad_norm += (th.norm(p.grad, p=2, dtype=th.float32).item() ** 2)
return ((np.sqrt(grad_norm) / grad_scale), np.sqrt(param_norm))
def master_params_to_state_dict(self, master_params):
return master_params_to_state_dict(self.model, self.param_groups_and_shapes, master_params, self.use_fp16)
def state_dict_to_master_params(self, state_dict):
return state_dict_to_master_params(self.model, state_dict, self.use_fp16)
|
def check_overflow(value):
return ((value == float('inf')) or (value == (- float('inf'))) or (value != value))
|
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
'\n Get a pre-defined beta schedule for the given name.\n\n The beta schedule library consists of beta schedules which remain similar\n in the limit of num_diffusion_timesteps.\n Beta schedules may be added, but should not be removed or changed once\n they are committed to maintain backwards compatibility.\n '
if (schedule_name == 'linear'):
scale = (1000 / num_diffusion_timesteps)
beta_start = (scale * 0.0001)
beta_end = (scale * 0.02)
return np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
elif (schedule_name == 'cosine'):
return betas_for_alpha_bar(num_diffusion_timesteps, (lambda t: (math.cos(((((t + 0.008) / 1.008) * math.pi) / 2)) ** 2)))
else:
raise NotImplementedError(f'unknown beta schedule: {schedule_name}')
|
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
'\n Create a beta schedule that discretizes the given alpha_t_bar function,\n which defines the cumulative product of (1-beta) over time from t = [0,1].\n\n :param num_diffusion_timesteps: the number of betas to produce.\n :param alpha_bar: a lambda that takes an argument t from 0 to 1 and\n produces the cumulative product of (1-beta) up to that\n part of the diffusion process.\n :param max_beta: the maximum beta to use; use values lower than 1 to\n prevent singularities.\n '
betas = []
for i in range(num_diffusion_timesteps):
t1 = (i / num_diffusion_timesteps)
t2 = ((i + 1) / num_diffusion_timesteps)
betas.append(min((1 - (alpha_bar(t2) / alpha_bar(t1))), max_beta))
return np.array(betas)
|
class ModelMeanType(enum.Enum):
'\n Which type of output the model predicts.\n '
PREVIOUS_X = enum.auto()
START_X = enum.auto()
EPSILON = enum.auto()
|
class ModelVarType(enum.Enum):
"\n What is used as the model's output variance.\n\n The LEARNED_RANGE option has been added to allow the model to predict\n values between FIXED_SMALL and FIXED_LARGE, making its job easier.\n "
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
|
class LossType(enum.Enum):
MSE = enum.auto()
RESCALED_MSE = enum.auto()
KL = enum.auto()
RESCALED_KL = enum.auto()
def is_vb(self):
return ((self == LossType.KL) or (self == LossType.RESCALED_KL))
|
class GaussianDiffusion():
'\n Utilities for training and sampling diffusion models.\n\n Ported directly from here, and then adapted over time to further experimentation.\n https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42\n\n :param betas: a 1-D numpy array of betas for each diffusion timestep,\n starting at T and going to 1.\n :param model_mean_type: a ModelMeanType determining what the model outputs.\n :param model_var_type: a ModelVarType determining how variance is output.\n :param loss_type: a LossType determining the loss function to use.\n :param rescale_timesteps: if True, pass floating point timesteps into the\n model so that they are always scaled like in the\n original paper (0 to 1000).\n '
def __init__(self, *, betas, model_mean_type, model_var_type, loss_type, rescale_timesteps=False):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
self.rescale_timesteps = rescale_timesteps
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert (len(betas.shape) == 1), 'betas must be 1-D'
assert ((betas > 0).all() and (betas <= 1).all())
self.num_timesteps = int(betas.shape[0])
alphas = (1.0 - betas)
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:(- 1)])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert (self.alphas_cumprod_prev.shape == (self.num_timesteps,))
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt((1.0 - self.alphas_cumprod))
self.log_one_minus_alphas_cumprod = np.log((1.0 - self.alphas_cumprod))
self.sqrt_recip_alphas_cumprod = np.sqrt((1.0 / self.alphas_cumprod))
self.sqrt_recipm1_alphas_cumprod = np.sqrt(((1.0 / self.alphas_cumprod) - 1))
self.posterior_variance = ((betas * (1.0 - self.alphas_cumprod_prev)) / (1.0 - self.alphas_cumprod))
self.posterior_log_variance_clipped = np.log(np.append(self.posterior_variance[1], self.posterior_variance[1:]))
self.posterior_mean_coef1 = ((betas * np.sqrt(self.alphas_cumprod_prev)) / (1.0 - self.alphas_cumprod))
self.posterior_mean_coef2 = (((1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas)) / (1.0 - self.alphas_cumprod))
def q_mean_variance(self, x_start, t):
"\n Get the distribution q(x_t | x_0).\n\n :param x_start: the [N x C x ...] tensor of noiseless inputs.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :return: A tuple (mean, variance, log_variance), all of x_start's shape.\n "
mean = (_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
variance = _extract_into_tensor((1.0 - self.alphas_cumprod), t, x_start.shape)
log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return (mean, variance, log_variance)
def q_sample(self, x_start, t, noise=None):
'\n Diffuse the data for a given number of diffusion steps.\n\n In other words, sample from q(x_t | x_0).\n\n :param x_start: the initial data batch.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :param noise: if specified, the split-out normal noise.\n :return: A noisy version of x_start.\n '
if (noise is None):
noise = th.randn_like(x_start)
assert (noise.shape == x_start.shape)
return ((_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) + (_extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise))
def q_posterior_mean_variance(self, x_start, x_t, t):
'\n Compute the mean and variance of the diffusion posterior:\n\n q(x_{t-1} | x_t, x_0)\n\n '
assert (x_start.shape == x_t.shape)
posterior_mean = ((_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start) + (_extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t))
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
assert (posterior_mean.shape[0] == posterior_variance.shape[0] == posterior_log_variance_clipped.shape[0] == x_start.shape[0])
return (posterior_mean, posterior_variance, posterior_log_variance_clipped)
def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n "
if (model_kwargs is None):
model_kwargs = {}
(B, C) = x.shape[:2]
assert (t.shape == (B,))
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
if (self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]):
assert (model_output.shape == (B, (C * 2), *x.shape[2:]))
(model_output, model_var_values) = th.split(model_output, C, dim=1)
if (self.model_var_type == ModelVarType.LEARNED):
model_log_variance = model_var_values
model_variance = th.exp(model_log_variance)
else:
min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
frac = ((model_var_values + 1) / 2)
model_log_variance = ((frac * max_log) + ((1 - frac) * min_log))
model_variance = th.exp(model_log_variance)
else:
(model_variance, model_log_variance) = {ModelVarType.FIXED_LARGE: (np.append(self.posterior_variance[1], self.betas[1:]), np.log(np.append(self.posterior_variance[1], self.betas[1:]))), ModelVarType.FIXED_SMALL: (self.posterior_variance, self.posterior_log_variance_clipped)}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if (denoised_fn is not None):
x = denoised_fn(x)
if clip_denoised:
return x.clamp((- 1), 1)
return x
if (self.model_mean_type == ModelMeanType.PREVIOUS_X):
pred_xstart = process_xstart(self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output))
model_mean = model_output
elif (self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]):
if (self.model_mean_type == ModelMeanType.START_X):
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output))
(model_mean, _, _) = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)
else:
raise NotImplementedError(self.model_mean_type)
assert (model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape)
return {'mean': model_mean, 'variance': model_variance, 'log_variance': model_log_variance, 'pred_xstart': pred_xstart}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert (x_t.shape == eps.shape)
return ((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t) - (_extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps))
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert (x_t.shape == xprev.shape)
return ((_extract_into_tensor((1.0 / self.posterior_mean_coef1), t, x_t.shape) * xprev) - (_extract_into_tensor((self.posterior_mean_coef2 / self.posterior_mean_coef1), t, x_t.shape) * x_t))
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t) - pred_xstart) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape))
def _scale_timesteps(self, t):
if self.rescale_timesteps:
return (t.float() * (1000.0 / self.num_timesteps))
return t
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
'\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x. In particular, cond_fn computes grad(log(p(y|x))), and we want to\n condition on y.\n\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n '
gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
new_mean = (p_mean_var['mean'].float() + (p_mean_var['variance'] * gradient.float()))
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"\n Compute what the p_mean_variance output would have been, should the\n model's score function be conditioned by cond_fn.\n\n See condition_mean() for details on cond_fn.\n\n Unlike condition_mean(), this instead uses the conditioning strategy\n from Song et al (2020).\n "
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var['pred_xstart'])
eps = (eps - ((1 - alpha_bar).sqrt() * cond_fn(x, self._scale_timesteps(t), **model_kwargs)))
out = p_mean_var.copy()
out['pred_xstart'] = self._predict_xstart_from_eps(x, t, eps)
(out['mean'], _, _) = self.q_posterior_mean_variance(x_start=out['pred_xstart'], x_t=x, t=t)
return out
def p_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None):
"\n Sample x_{t-1} from the model at the given timestep.\n\n :param model: the model to sample from.\n :param x: the current tensor at x_{t-1}.\n :param t: the value of t, starting at 0 for the first diffusion step.\n :param clip_denoised: if True, clip the x_start prediction to [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample.\n :param cond_fn: if not None, this is a gradient function that acts\n similarly to the model.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict containing the following keys:\n - 'sample': a random sample from the model.\n - 'pred_xstart': a prediction of x_0.\n "
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
noise = th.randn_like(x)
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x.shape) - 1)))
if (cond_fn is not None):
out['mean'] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)
sample = (out['mean'] + ((nonzero_mask * th.exp((0.5 * out['log_variance']))) * noise))
return {'sample': sample, 'pred_xstart': out['pred_xstart']}
def p_sample_loop(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False):
"\n Generate samples from the model.\n\n :param model: the model module.\n :param shape: the shape of the samples, (N, C, H, W).\n :param noise: if specified, the noise from the encoder to sample.\n Should be of the same shape as `shape`.\n :param clip_denoised: if True, clip x_start predictions to [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample.\n :param cond_fn: if not None, this is a gradient function that acts\n similarly to the model.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :param device: if specified, the device to create the samples on.\n If not specified, use a model parameter's device.\n :param progress: if True, show a tqdm progress bar.\n :return: a non-differentiable batch of samples.\n "
final = None
for sample in self.p_sample_loop_progressive(model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, device=device, progress=progress):
final = sample
return final['sample']
def p_sample_loop_progressive(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False):
'\n Generate samples from the model and yield intermediate samples from\n each timestep of diffusion.\n\n Arguments are the same as p_sample_loop().\n Returns a generator over dicts, where each dict is the return value of\n p_sample().\n '
if (device is None):
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if (noise is not None):
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::(- 1)]
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor(([i] * shape[0]), device=device)
with th.no_grad():
out = self.p_sample(model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs)
(yield out)
img = out['sample']
def ddim_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, eta=0.0):
'\n Sample x_{t-1} from the model using DDIM.\n\n Same usage as p_sample().\n '
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
if (cond_fn is not None):
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
eps = self._predict_eps_from_xstart(x, t, out['pred_xstart'])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = ((eta * th.sqrt(((1 - alpha_bar_prev) / (1 - alpha_bar)))) * th.sqrt((1 - (alpha_bar / alpha_bar_prev))))
noise = th.randn_like(x)
mean_pred = ((out['pred_xstart'] * th.sqrt(alpha_bar_prev)) + (th.sqrt(((1 - alpha_bar_prev) - (sigma ** 2))) * eps))
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x.shape) - 1)))
sample = (mean_pred + ((nonzero_mask * sigma) * noise))
return {'sample': sample, 'pred_xstart': out['pred_xstart']}
def ddim_reverse_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, eta=0.0):
'\n Sample x_{t+1} from the model using DDIM reverse ODE.\n '
assert (eta == 0.0), 'Reverse ODE only for deterministic path'
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
eps = (((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x) - out['pred_xstart']) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape))
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
mean_pred = ((out['pred_xstart'] * th.sqrt(alpha_bar_next)) + (th.sqrt((1 - alpha_bar_next)) * eps))
return {'sample': mean_pred, 'pred_xstart': out['pred_xstart']}
def ddim_sample_loop(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, eta=0.0):
'\n Generate samples from the model using DDIM.\n\n Same usage as p_sample_loop().\n '
final = None
for sample in self.ddim_sample_loop_progressive(model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, device=device, progress=progress, eta=eta):
final = sample
return final['sample']
def ddim_sample_loop_progressive(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, eta=0.0):
'\n Use DDIM to sample from the model and yield intermediate samples from\n each timestep of DDIM.\n\n Same usage as p_sample_loop_progressive().\n '
if (device is None):
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if (noise is not None):
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::(- 1)]
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor(([i] * shape[0]), device=device)
with th.no_grad():
out = self.ddim_sample(model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, eta=eta)
(yield out)
img = out['sample']
def _vb_terms_bpd(self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None):
"\n Get a term for the variational lower-bound.\n\n The resulting units are bits (rather than nats, as one might expect).\n This allows for comparison to other papers.\n\n :return: a dict with the following keys:\n - 'output': a shape [N] tensor of NLLs or KLs.\n - 'pred_xstart': the x_0 predictions.\n "
(true_mean, _, true_log_variance_clipped) = self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t)
out = self.p_mean_variance(model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs)
kl = normal_kl(true_mean, true_log_variance_clipped, out['mean'], out['log_variance'])
kl = (mean_flat(kl) / np.log(2.0))
decoder_nll = (- discretized_gaussian_log_likelihood(x_start, means=out['mean'], log_scales=(0.5 * out['log_variance'])))
assert (decoder_nll.shape == x_start.shape)
decoder_nll = (mean_flat(decoder_nll) / np.log(2.0))
output = th.where((t == 0), decoder_nll, kl)
return {'output': output, 'pred_xstart': out['pred_xstart']}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None, temp=1):
'\n Compute training losses for a single timestep.\n\n :param model: the model to evaluate loss on.\n :param x_start: the [N x C x ...] tensor of inputs.\n :param t: a batch of timestep indices.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :param noise: if specified, the specific Gaussian noise to try to remove.\n :return: a dict with the key "loss" containing a tensor of shape [N].\n Some mean or variance settings may also have other keys.\n '
if (model_kwargs is None):
model_kwargs = {}
if (noise is None):
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
if (self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]):
(B, C) = x_t.shape[:2]
assert (model_output.shape == (B, (C * 2), *x_t.shape[2:]))
(model_output, model_var_values) = th.split(model_output, C, dim=1)
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms['vb'] = self._vb_terms_bpd(model=(lambda *args, r=frozen_out: r), x_start=x_start, x_t=x_t, t=t, clip_denoised=False)['output']
if (self.loss_type == LossType.RESCALED_MSE):
terms['vb'] *= (self.num_timesteps / 1000.0)
target = {ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t)[0], ModelMeanType.START_X: x_start, ModelMeanType.EPSILON: noise}[self.model_mean_type]
assert (model_output.shape == target.shape == x_start.shape)
if ((self.loss_type == LossType.KL) or (self.loss_type == LossType.RESCALED_KL)):
terms['kl_loss'] = F.cross_entropy((model_output * temp), th.zeros(model_output.size(0), dtype=th.long).to(model_output.device))
elif ((self.loss_type == LossType.MSE) or (self.loss_type == LossType.RESCALED_MSE)):
terms['mse'] = mean_flat(((target - model_output) ** 2))
if ('vb' in terms):
terms['loss'] = (terms['mse'] + terms['vb'])
else:
terms['loss'] = terms['mse']
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n\n This term can't be optimized, as it only depends on the encoder.\n\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n "
batch_size = x_start.shape[0]
t = th.tensor(([(self.num_timesteps - 1)] * batch_size), device=x_start.device)
(qt_mean, _, qt_log_variance) = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return (mean_flat(kl_prior) / np.log(2.0))
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
'\n Compute the entire variational lower-bound, measured in bits-per-dim,\n as well as other related quantities.\n\n :param model: the model to evaluate loss on.\n :param x_start: the [N x C x ...] tensor of inputs.\n :param clip_denoised: if True, clip denoised samples.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n\n :return: a dict containing the following keys:\n - total_bpd: the total variational lower-bound, per batch element.\n - prior_bpd: the prior term in the lower-bound.\n - vb: an [N x T] tensor of terms in the lower-bound.\n - xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.\n - mse: an [N x T] tensor of epsilon MSEs for each timestep.\n '
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::(- 1)]:
t_batch = th.tensor(([t] * batch_size), device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
with th.no_grad():
out = self._vb_terms_bpd(model, x_start=x_start, x_t=x_t, t=t_batch, clip_denoised=clip_denoised, model_kwargs=model_kwargs)
vb.append(out['output'])
xstart_mse.append(mean_flat(((out['pred_xstart'] - x_start) ** 2)))
eps = self._predict_eps_from_xstart(x_t, t_batch, out['pred_xstart'])
mse.append(mean_flat(((eps - noise) ** 2)))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = (vb.sum(dim=1) + prior_bpd)
return {'total_bpd': total_bpd, 'prior_bpd': prior_bpd, 'vb': vb, 'xstart_mse': xstart_mse, 'mse': mse}
|
def _extract_into_tensor(arr, timesteps, broadcast_shape):
'\n Extract values from a 1-D numpy array for a batch of indices.\n\n :param arr: the 1-D numpy array.\n :param timesteps: a tensor of indices into the array to extract.\n :param broadcast_shape: a larger shape of K dimensions with the batch\n dimension equal to the length of timesteps.\n :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.\n '
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while (len(res.shape) < len(broadcast_shape)):
res = res[(..., None)]
return res.expand(broadcast_shape)
|
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
|
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
|
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), ('expected file or str, got %s' % filename_or_file)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, '__float__'):
valstr = ('%-8.3g' % val)
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
if (len(key2str) == 0):
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
dashes = ('-' * ((keywidth + valwidth) + 7))
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=(lambda kv: kv[0].lower())):
lines.append(('| %s%s | %s%s |' % (key, (' ' * (keywidth - len(key))), val, (' ' * (valwidth - len(val))))))
lines.append(dashes)
self.file.write(('\n'.join(lines) + '\n'))
self.file.flush()
def _truncate(self, s):
maxlen = 30
return ((s[:(maxlen - 3)] + '...') if (len(s) > maxlen) else s)
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if (i < (len(seq) - 1)):
self.file.write(' ')
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
|
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for (k, v) in sorted(kvs.items()):
if hasattr(v, 'dtype'):
kvs[k] = float(v)
self.file.write((json.dumps(kvs) + '\n'))
self.file.flush()
def close(self):
self.file.close()
|
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
extra_keys = list((kvs.keys() - self.keys))
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if (i > 0):
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:(- 1)])
self.file.write((self.sep * len(extra_keys)))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if (i > 0):
self.file.write(',')
v = kvs.get(k)
if (v is not None):
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
|
class TensorBoardOutputFormat(KVWriter):
"\n Dumps key/value pairs into TensorBoard's numeric format.\n "
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for (k, v) in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
|
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if (format == 'stdout'):
return HumanOutputFormat(sys.stdout)
elif (format == 'log'):
return HumanOutputFormat(osp.join(ev_dir, ('log%s.txt' % log_suffix)))
elif (format == 'json'):
return JSONOutputFormat(osp.join(ev_dir, ('progress%s.json' % log_suffix)))
elif (format == 'csv'):
return CSVOutputFormat(osp.join(ev_dir, ('progress%s.csv' % log_suffix)))
elif (format == 'tensorboard'):
return TensorBoardOutputFormat(osp.join(ev_dir, ('tb%s' % log_suffix)))
else:
raise ValueError(('Unknown format specified: %s' % (format,)))
|
def logkv(key, val):
'\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n If called many times, last value will be used.\n '
get_current().logkv(key, val)
|
def logkv_mean(key, val):
'\n The same as logkv(), but if called many times, values averaged.\n '
get_current().logkv_mean(key, val)
|
def logkvs(d):
'\n Log a dictionary of key-value pairs\n '
for (k, v) in d.items():
logkv(k, v)
|
def dumpkvs():
'\n Write all of the diagnostics from the current iteration\n '
return get_current().dumpkvs()
|
def getkvs():
return get_current().name2val
|
def log(*args, level=INFO):
"\n Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).\n "
get_current().log(*args, level=level)
|
def debug(*args):
log(*args, level=DEBUG)
|
def info(*args):
log(*args, level=INFO)
|
def warn(*args):
log(*args, level=WARN)
|
def error(*args):
log(*args, level=ERROR)
|
def set_level(level):
'\n Set logging threshold on current logger.\n '
get_current().set_level(level)
|
def set_comm(comm):
get_current().set_comm(comm)
|
def get_dir():
"\n Get directory that log files are being written to.\n will be None if there is no output directory (i.e., if you didn't call start)\n "
return get_current().get_dir()
|
@contextmanager
def profile_kv(scopename):
logkey = ('wait_' + scopename)
tstart = time.time()
try:
(yield)
finally:
get_current().name2val[logkey] += (time.time() - tstart)
|
def profile(n):
'\n Usage:\n @profile("my_func")\n def my_func(): code\n '
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
|
def get_current():
if (Logger.CURRENT is None):
_configure_default_logger()
return Logger.CURRENT
|
class Logger(object):
DEFAULT = None
CURRENT = None
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float)
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
(oldval, cnt) = (self.name2val[key], self.name2cnt[key])
self.name2val[key] = (((oldval * cnt) / (cnt + 1)) + (val / (cnt + 1)))
self.name2cnt[key] = (cnt + 1)
def dumpkvs(self):
if (self.comm is None):
d = self.name2val
else:
d = mpi_weighted_mean(self.comm, {name: (val, self.name2cnt.get(name, 1)) for (name, val) in self.name2val.items()})
if (self.comm.rank != 0):
d['dummy'] = 1
out = d.copy()
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if (self.level <= level):
self._do_log(args)
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
|
def get_rank_without_mpi_import():
for varname in ['PMI_RANK', 'OMPI_COMM_WORLD_RANK']:
if (varname in os.environ):
return int(os.environ[varname])
return 0
|
def mpi_weighted_mean(comm, local_name2valcount):
'\n Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110\n Perform a weighted average over dicts that are each on a different node\n Input: local_name2valcount: dict mapping key -> (value, count)\n Returns: key -> mean\n '
all_name2valcount = comm.gather(local_name2valcount)
if (comm.rank == 0):
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if (comm.rank == 0):
warnings.warn('WARNING: tried to compute mean on non-float {}={}'.format(name, val))
else:
name2sum[name] += (val * count)
name2count[name] += count
return {name: (name2sum[name] / name2count[name]) for name in name2sum}
else:
return {}
|
def configure(dir=None, format_strs=None, comm=None, log_suffix=''):
'\n If comm is provided, average all numerical stats across that comm\n '
if (dir is None):
dir = os.getenv('OPENAI_LOGDIR')
if (dir is None):
dir = osp.join(tempfile.gettempdir(), datetime.datetime.now().strftime('openai-%Y-%m-%d-%H-%M-%S-%f'))
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if (rank > 0):
log_suffix = (log_suffix + ('-rank%03i' % rank))
if (format_strs is None):
if (rank == 0):
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',')
else:
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',')
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log(('Logging to %s' % dir))
|
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
|
def reset():
if (Logger.CURRENT is not Logger.DEFAULT):
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
|
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
(yield)
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
|
def normal_kl(mean1, logvar1, mean2, logvar2):
'\n Compute the KL divergence between two gaussians.\n\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n '
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert (tensor is not None), 'at least one argument must be a Tensor'
(logvar1, logvar2) = [(x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)) for x in (logvar1, logvar2)]
return (0.5 * (((((- 1.0) + logvar2) - logvar1) + th.exp((logvar1 - logvar2))) + (((mean1 - mean2) ** 2) * th.exp((- logvar2)))))
|
def approx_standard_normal_cdf(x):
'\n A fast approximation of the cumulative distribution function of the\n standard normal.\n '
return (0.5 * (1.0 + th.tanh((np.sqrt((2.0 / np.pi)) * (x + (0.044715 * th.pow(x, 3)))))))
|
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
'\n Compute the log-likelihood of a Gaussian distribution discretizing to a\n given image.\n\n :param x: the target images. It is assumed that this was uint8 values,\n rescaled to the range [-1, 1].\n :param means: the Gaussian mean Tensor.\n :param log_scales: the Gaussian log stddev Tensor.\n :return: a tensor like x of log probabilities (in nats).\n '
assert (x.shape == means.shape == log_scales.shape)
centered_x = (x - means)
inv_stdv = th.exp((- log_scales))
plus_in = (inv_stdv * (centered_x + (1.0 / 255.0)))
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = (inv_stdv * (centered_x - (1.0 / 255.0)))
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = (cdf_plus - cdf_min)
log_probs = th.where((x < (- 0.999)), log_cdf_plus, th.where((x > 0.999), log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))))
assert (log_probs.shape == x.shape)
return log_probs
|
def space_timesteps(num_timesteps, section_counts):
'\n Create a list of timesteps to use from an original diffusion process,\n given the number of timesteps we want to take from equally-sized portions\n of the original process.\n\n For example, if there\'s 300 timesteps and the section counts are [10,15,20]\n then the first 100 timesteps are strided to be 10 timesteps, the second 100\n are strided to be 15 timesteps, and the final 100 are strided to be 20.\n\n If the stride is a string starting with "ddim", then the fixed striding\n from the DDIM paper is used, and only one section is allowed.\n\n :param num_timesteps: the number of diffusion steps in the original\n process to divide up.\n :param section_counts: either a list of numbers, or a string containing\n comma-separated numbers, indicating the step count\n per section. As a special case, use "ddimN" where N\n is a number of steps to use the striding from the\n DDIM paper.\n :return: a set of diffusion steps from the original process to use.\n '
if isinstance(section_counts, str):
if section_counts.startswith('ddim'):
desired_count = int(section_counts[len('ddim'):])
for i in range(1, num_timesteps):
if (len(range(0, num_timesteps, i)) == desired_count):
return set(range(0, num_timesteps, i))
raise ValueError(f'cannot create exactly {num_timesteps} steps with an integer stride')
section_counts = [int(x) for x in section_counts.split(',')]
size_per = (num_timesteps // len(section_counts))
extra = (num_timesteps % len(section_counts))
start_idx = 0
all_steps = []
for (i, section_count) in enumerate(section_counts):
size = (size_per + (1 if (i < extra) else 0))
if (size < section_count):
raise ValueError(f'cannot divide section of {size} steps into {section_count}')
if (section_count <= 1):
frac_stride = 1
else:
frac_stride = ((size - 1) / (section_count - 1))
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append((start_idx + round(cur_idx)))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
|
class SpacedDiffusion(GaussianDiffusion):
'\n A diffusion process which can skip steps in a base diffusion process.\n\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n '
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs['betas'])
base_diffusion = GaussianDiffusion(**kwargs)
last_alpha_cumprod = 1.0
new_betas = []
for (i, alpha_cumprod) in enumerate(base_diffusion.alphas_cumprod):
if (i in self.use_timesteps):
new_betas.append((1 - (alpha_cumprod / last_alpha_cumprod)))
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs['betas'] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(self, model, *args, **kwargs):
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(self, model, *args, **kwargs):
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(model, self.timestep_map, self.rescale_timesteps, self.original_num_steps)
def _scale_timesteps(self, t):
return t
|
class _WrappedModel():
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
if self.rescale_timesteps:
new_ts = (new_ts.float() * (1000.0 / self.original_num_steps))
return self.model(x, new_ts, **kwargs)
|
def compress(paras):
(input_video_path, output_video_path) = paras
try:
command = ['ffmpeg', '-y', '-i', input_video_path, '-filter:v', "scale='if(gt(a,1),trunc(oh*a/2)*2,224)':'if(gt(a,1),224,trunc(ow*a/2)*2)'", '-map', '0:v', '-r', '3', output_video_path]
ffmpeg = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = ffmpeg.communicate()
retcode = ffmpeg.poll()
except Exception as e:
raise e
|
def prepare_input_output_pairs(input_root, output_root):
input_video_path_list = []
output_video_path_list = []
for (root, dirs, files) in os.walk(input_root):
for file_name in files:
input_video_path = os.path.join(root, file_name)
output_video_path = os.path.join(output_root, file_name)
if (os.path.exists(output_video_path) and (os.path.getsize(output_video_path) > 0)):
pass
else:
input_video_path_list.append(input_video_path)
output_video_path_list.append(output_video_path)
return (input_video_path_list, output_video_path_list)
|
class CosEMA(nn.Module):
def __init__(self, total_steps, base_decay=0.996):
'Exponential moving average used in BYOL.\n\n :param base_decay: the base ema decay used to modulate\n :returns: EMA module\n :rtype: nn.Module\n\n '
super(CosEMA, self).__init__()
self.step = 0
self.total_steps = total_steps
self.base_decay = base_decay
self.register_buffer('mean', None)
def forward(self, x):
'Takes an input and updates internal running mean.\n\n :param x: input tensor\n :returns: same input tensor itself [tracks internally]\n :rtype: torch.Tensor\n\n '
if (self.mean is None):
self.mean = torch.zeros_like(x)
if self.training:
decay = (1 - (((1 - self.base_decay) * (np.cos(((np.pi * self.step) / self.total_steps)) + 1)) / 2.0))
self.mean = (((1 - decay) * x.detach()) + (decay * self.mean))
self.step += 1
return x
|
class BYOL(nn.Module):
'Simple BYOL implementation.'
def __init__(self, base_network_output_size, projection_output_size, classifier_output_size, total_training_steps, base_decay=0.996):
'BYOL model.\n\n :param base_network_output_size: output-size of resnet50 embedding\n :param projection_output_size: output size of projection and prediction heads\n :param classifier_output_size: number of classes in classifier problem\n :param total_training_steps: total steps for a single training epoch\n :param base_decay: the decay for the target network\n :returns: BYOL object\n :rtype: nn.Module\n\n '
super(BYOL, self).__init__()
self.base_network_output_size = base_network_output_size
model_fn = models.__dict__[args.arch]
self.base_network = nn.Sequential(*list(model_fn(pretrained=False).children())[:(- 1)])
self.head = nn.Sequential(nn.Linear(base_network_output_size, args.head_latent_size), nn.BatchNorm1d(args.head_latent_size), nn.ReLU(), nn.Linear(args.head_latent_size, projection_output_size))
self.predictor = nn.Sequential(nn.Linear(projection_output_size, args.head_latent_size), nn.BatchNorm1d(args.head_latent_size), nn.ReLU(), nn.Linear(args.head_latent_size, projection_output_size))
self.linear_classifier = nn.Linear(base_network_output_size, classifier_output_size)
self.target_network = CosEMA(total_training_steps, base_decay)
self.target_network(nn.utils.parameters_to_vector(self.parameters()))
def target_prediction(self, augmentation2):
'Produce a prediction using the target network.\n\n :param augmentation2: the second augmentation\n :returns: the same outputs as prediction\n :rtype: torch.Tensor, torch.Tensor, torch.Tensor\n\n '
mean = self.target_network.mean
original_params = nn.utils.parameters_to_vector(self.parameters())
nn.utils.vector_to_parameters(mean, self.parameters())
preds = self.prediction(augmentation2)
nn.utils.vector_to_parameters(original_params, self.parameters())
return preds
def prediction(self, augmentation):
'Simple helper to project a single augmentation\n\n :param augmentation: a single data augmentation\n :returns: representation, projection and prediction\n :rtype: torch.Tensor, torch.Tensor, torch.Tensor\n\n '
representation = self.base_network(augmentation).view((- 1), self.base_network_output_size)
projection = self.head(representation)
prediction = self.predictor(projection)
return (representation, projection, prediction)
def forward(self, augmentation1, augmentation2):
'Returns the online and target network representations, projections and predictions.'
(online_representation1, online_projection1, online_prediction1) = self.prediction(augmentation1)
(online_representation2, online_projection2, online_prediction2) = self.prediction(augmentation2)
(target_representation1, target_projection1, target_prediction1) = self.target_prediction(augmentation1)
(target_representation2, target_projection2, target_prediction2) = self.target_prediction(augmentation2)
repr_to_classifier = (torch.cat([online_representation1, online_representation2], 0) if self.training else online_representation1)
linear_preds = self.linear_classifier(repr_to_classifier.clone().detach())
self.target_network(nn.utils.parameters_to_vector(self.parameters()))
return {'linear_preds': linear_preds, 'online_representation1': online_representation1, 'online_projection1': online_projection1, 'online_prediction1': online_prediction1, 'online_representation2': online_representation2, 'online_projection2': online_projection2, 'online_prediction2': online_prediction2, 'target_representation1': target_representation1, 'target_projection1': target_projection1, 'target_prediction1': target_prediction1, 'target_representation2': target_representation2, 'target_projection2': target_projection2, 'target_prediction2': target_prediction2}
|
def build_lr_schedule(optimizer, last_epoch=(- 1)):
' adds a lr scheduler to the optimizer.\n\n :param optimizer: nn.Optimizer\n :returns: scheduler\n :rtype: optim.lr_scheduler\n\n '
if (args.lr_update_schedule == 'fixed'):
sched = optim.lr_scheduler.LambdaLR(optimizer, (lambda epoch: 1.0), last_epoch=last_epoch)
elif (args.lr_update_schedule == 'cosine'):
total_epochs = (args.epochs - args.warmup)
sched = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=total_epochs, last_epoch=last_epoch)
else:
raise NotImplementedError('lr scheduler {} not implemented'.format(args.lr_update_schedule))
if (args.warmup > 0):
warmup = scheduler.LinearWarmup(optimizer, warmup_steps=args.warmup, last_epoch=last_epoch)
sched = scheduler.Scheduler(sched, warmup)
return sched
|
def build_optimizer(model, last_epoch=(- 1)):
' helper to build the optimizer and wrap model\n\n :param model: the model to wrap\n :returns: optimizer wrapping model provided\n :rtype: nn.Optim\n\n '
optim_map = {'rmsprop': optim.RMSprop, 'adam': optim.Adam, 'adadelta': optim.Adadelta, 'sgd': optim.SGD, 'momentum': functools.partial(optim.SGD, momentum=0.9), 'lbfgs': optim.LBFGS}
params_to_optimize = layers.add_weight_decay(model, args.weight_decay)
full_opt_name = args.optimizer.lower().strip()
is_lars = ('lars' in full_opt_name)
if (full_opt_name == 'lamb'):
assert args.half, 'Need fp16 precision to use Apex FusedLAMB.'
optim_map['lamb'] = optimizers.fused_lamb.FusedLAMB
opt_name = (full_opt_name.split('_')[(- 1)] if is_lars else full_opt_name)
print('using {} optimizer {} lars.'.format(opt_name, ('with' if is_lars else 'without')))
lr = args.lr
if (opt_name in ['momentum', 'sgd']):
lr = (args.lr * ((args.batch_size * args.num_replicas) / 256))
opt = optim_map[opt_name](params_to_optimize, lr=lr)
if is_lars:
opt = LARS(opt, eps=0.0)
sched = build_lr_schedule(opt, last_epoch=last_epoch)
return (opt, sched)
|
def build_train_and_test_transforms():
'Returns torchvision OR nvidia-dali transforms.\n\n :returns: train_transforms, test_transforms\n :rtype: list, list\n\n '
resize_shape = (args.image_size_override, args.image_size_override)
if ('dali' in args.task):
import nvidia.dali.ops as ops
import nvidia.dali.types as types
from datasets.dali_imagefolder import ColorJitter, RandomHorizontalFlip, RandomGrayScale
train_transform = [ops.RandomResizedCrop(device=('gpu' if args.cuda else 'cpu'), size=resize_shape, random_area=(0.08, 1.0), random_aspect_ratio=((3.0 / 4), (4.0 / 3))), RandomHorizontalFlip(prob=0.2, cuda=args.cuda), ColorJitter(brightness=(0.8 * args.color_jitter_strength), contrast=(0.8 * args.color_jitter_strength), saturation=(0.2 * args.color_jitter_strength), hue=(0.2 * args.color_jitter_strength), prob=0.8, cuda=args.cuda), RandomGrayScale(prob=0.2, cuda=args.cuda)]
test_transform = [ops.Resize(resize_x=resize_shape[0], resize_y=resize_shape[1], device=('gpu' if args.cuda else 'cpu'), image_type=types.RGB, interp_type=types.INTERP_LINEAR)]
else:
from datasets.utils import GaussianBlur
train_transform = [transforms.RandomResizedCrop((args.image_size_override, args.image_size_override)), transforms.RandomHorizontalFlip(p=0.5), transforms.RandomApply([transforms.ColorJitter(brightness=(0.8 * args.color_jitter_strength), contrast=(0.8 * args.color_jitter_strength), saturation=(0.8 * args.color_jitter_strength), hue=(0.2 * args.color_jitter_strength))], p=0.8), transforms.RandomGrayscale(p=0.2), GaussianBlur(kernel_size=int((0.1 * args.image_size_override)), p=0.5)]
test_transform = [transforms.Resize(resize_shape)]
return (train_transform, test_transform)
|
def build_loader_model_grapher(args):
'builds a model, a dataloader and a grapher\n\n :param args: argparse\n :param transform: the dataloader transform\n :returns: a dataloader, a grapher and a model\n :rtype: list\n\n '
(train_transform, test_transform) = build_train_and_test_transforms()
loader_dict = {'train_transform': train_transform, 'test_transform': test_transform, **vars(args)}
loader = get_loader(**loader_dict)
args.input_shape = loader.input_shape
args.num_train_samples = (loader.num_train_samples // args.num_replicas)
args.num_test_samples = loader.num_test_samples
args.num_valid_samples = (loader.num_valid_samples // args.num_replicas)
args.steps_per_train_epoch = (args.num_train_samples // args.batch_size)
args.total_train_steps = (args.epochs * args.steps_per_train_epoch)
network = BYOL(base_network_output_size=args.representation_size, projection_output_size=args.projection_size, classifier_output_size=loader.output_size, total_training_steps=args.total_train_steps, base_decay=args.base_decay)
network = (nn.SyncBatchNorm.convert_sync_batchnorm(network) if args.convert_to_sync_bn else network)
network = (network.cuda() if args.cuda else network)
lazy_generate_modules(network, loader.train_loader)
network = layers.init_weights(network, init=args.weight_initialization)
if (args.num_replicas > 1):
print('wrapping model with DDP...')
network = layers.DistributedDataParallelPassthrough(network, device_ids=[0], output_device=0, find_unused_parameters=True)
print(network)
print('model has {} million parameters.'.format((utils.number_of_parameters(network) / 1000000.0)))
grapher = None
if ((args.visdom_url is not None) and (args.distributed_rank == 0)):
grapher = Grapher('visdom', env=utils.get_name(args), server=args.visdom_url, port=args.visdom_port, log_folder=args.log_dir)
elif (args.distributed_rank == 0):
grapher = Grapher('tensorboard', logdir=os.path.join(args.log_dir, utils.get_name(args)))
return (loader, network, grapher)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.