code stringlengths 17 6.64M |
|---|
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
|
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
|
def _randomly_negate(v):
'With 50% prob, negate the value'
return ((- v) if (random.random() > 0.5) else v)
|
def _rotate_level_to_arg(level, _hparams):
level = ((level / _MAX_LEVEL) * 30.0)
level = _randomly_negate(level)
return (level,)
|
def _enhance_level_to_arg(level, _hparams):
return ((((level / _MAX_LEVEL) * 1.8) + 0.1),)
|
def _enhance_increasing_level_to_arg(level, _hparams):
level = ((level / _MAX_LEVEL) * 0.9)
level = (1.0 + _randomly_negate(level))
return (level,)
|
def _shear_level_to_arg(level, _hparams):
level = ((level / _MAX_LEVEL) * 0.3)
level = _randomly_negate(level)
return (level,)
|
def _translate_abs_level_to_arg(level, hparams):
translate_const = hparams['translate_const']
level = ((level / _MAX_LEVEL) * float(translate_const))
level = _randomly_negate(level)
return (level,)
|
def _translate_rel_level_to_arg(level, hparams):
translate_pct = hparams.get('translate_pct', 0.45)
level = ((level / _MAX_LEVEL) * translate_pct)
level = _randomly_negate(level)
return (level,)
|
def _posterize_level_to_arg(level, _hparams):
return (int(((level / _MAX_LEVEL) * 4)),)
|
def _posterize_increasing_level_to_arg(level, hparams):
return ((4 - _posterize_level_to_arg(level, hparams)[0]),)
|
def _posterize_original_level_to_arg(level, _hparams):
return ((int(((level / _MAX_LEVEL) * 4)) + 4),)
|
def _solarize_level_to_arg(level, _hparams):
return (int(((level / _MAX_LEVEL) * 256)),)
|
def _solarize_increasing_level_to_arg(level, _hparams):
return ((256 - _solarize_level_to_arg(level, _hparams)[0]),)
|
def _solarize_add_level_to_arg(level, _hparams):
return (int(((level / _MAX_LEVEL) * 110)),)
|
class AugmentOp():
'\n Apply for video.\n '
def __init__(self, name, prob=0.5, magnitude=10, hparams=None):
hparams = (hparams or _HPARAMS_DEFAULT)
self.aug_fn = NAME_TO_OP[name]
self.level_fn = LEVEL_TO_ARG[name]
self.prob = prob
self.magnitude = magnitude
self.hparams = hparams.copy()
self.kwargs = {'fillcolor': (hparams['img_mean'] if ('img_mean' in hparams) else _FILL), 'resample': (hparams['interpolation'] if ('interpolation' in hparams) else _RANDOM_INTERPOLATION)}
self.magnitude_std = self.hparams.get('magnitude_std', 0)
def __call__(self, img_list):
if ((self.prob < 1.0) and (random.random() > self.prob)):
return img_list
magnitude = self.magnitude
if (self.magnitude_std and (self.magnitude_std > 0)):
magnitude = random.gauss(magnitude, self.magnitude_std)
magnitude = min(_MAX_LEVEL, max(0, magnitude))
level_args = (self.level_fn(magnitude, self.hparams) if (self.level_fn is not None) else ())
if isinstance(img_list, list):
return [self.aug_fn(img, *level_args, **self.kwargs) for img in img_list]
else:
return self.aug_fn(img_list, *level_args, **self.kwargs)
|
def _select_rand_weights(weight_idx=0, transforms=None):
transforms = (transforms or _RAND_TRANSFORMS)
assert (weight_idx == 0)
rand_weights = _RAND_CHOICE_WEIGHTS_0
probs = [rand_weights[k] for k in transforms]
probs /= np.sum(probs)
return probs
|
def rand_augment_ops(magnitude=10, hparams=None, transforms=None):
hparams = (hparams or _HPARAMS_DEFAULT)
transforms = (transforms or _RAND_TRANSFORMS)
return [AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms]
|
class RandAugment():
def __init__(self, ops, num_layers=2, choice_weights=None):
self.ops = ops
self.num_layers = num_layers
self.choice_weights = choice_weights
def __call__(self, img):
ops = np.random.choice(self.ops, self.num_layers, replace=(self.choice_weights is None), p=self.choice_weights)
for op in ops:
img = op(img)
return img
|
def rand_augment_transform(config_str, hparams):
"\n RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719\n\n Create a RandAugment transform\n :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by\n dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining\n sections, not order sepecific determine\n 'm' - integer magnitude of rand augment\n 'n' - integer num layers (number of transform ops selected per image)\n 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)\n 'mstd' - float std deviation of magnitude noise applied\n 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)\n Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5\n 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2\n :param hparams: Other hparams (kwargs) for the RandAugmentation scheme\n :return: A PyTorch compatible Transform\n "
magnitude = _MAX_LEVEL
num_layers = 2
weight_idx = None
transforms = _RAND_TRANSFORMS
config = config_str.split('-')
assert (config[0] == 'rand')
config = config[1:]
for c in config:
cs = re.split('(\\d.*)', c)
if (len(cs) < 2):
continue
(key, val) = cs[:2]
if (key == 'mstd'):
hparams.setdefault('magnitude_std', float(val))
elif (key == 'inc'):
if bool(val):
transforms = _RAND_INCREASING_TRANSFORMS
elif (key == 'm'):
magnitude = int(val)
elif (key == 'n'):
num_layers = int(val)
elif (key == 'w'):
weight_idx = int(val)
else:
assert NotImplementedError
ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams, transforms=transforms)
choice_weights = (None if (weight_idx is None) else _select_rand_weights(weight_idx))
return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
|
class RawVideoExtractorCV2():
def __init__(self, centercrop=False, size=224, framerate=(- 1), subset='test'):
self.centercrop = centercrop
self.size = size
self.framerate = framerate
self.transform = self._transform(self.size)
self.subset = subset
self.tsfm_dict = {'clip_test': Compose([Resize(size, interpolation=InterpolationMode.BICUBIC), CenterCrop(size), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))]), 'clip_train': Compose([RandomResizedCrop(size, scale=(0.5, 1.0)), RandomHorizontalFlip(), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))])}
self.aug_transform = video_transforms.create_random_augment(input_size=(size, size), auto_augment='rand-m7-n4-mstd0.5-inc1', interpolation='bicubic')
def _transform(self, n_px):
return Compose([Resize(n_px, interpolation=InterpolationMode.BICUBIC), CenterCrop(n_px), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))])
def video_to_tensor(self, video_file, preprocess, sample_fp=0, start_time=None, end_time=None, _no_process=False):
if ((start_time is not None) or (end_time is not None)):
assert (isinstance(start_time, int) and isinstance(end_time, int) and (start_time > (- 1)) and (end_time > start_time))
assert (sample_fp > (- 1))
cap = cv2.VideoCapture(video_file)
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
if (fps == 0):
print(((video_file + '\n') * 10))
total_duration = (((frameCount + fps) - 1) // fps)
(start_sec, end_sec) = (0, total_duration)
if (start_time is not None):
(start_sec, end_sec) = (start_time, (end_time if (end_time <= total_duration) else total_duration))
cap.set(cv2.CAP_PROP_POS_FRAMES, int((start_time * fps)))
interval = 1
if (sample_fp > 0):
interval = (fps // sample_fp)
else:
sample_fp = fps
if (interval == 0):
interval = 1
inds = [ind for ind in np.arange(0, fps, interval)]
assert (len(inds) >= sample_fp)
inds = inds[:sample_fp]
ret = True
(images, included) = ([], [])
for sec in np.arange(start_sec, (end_sec + 1)):
if (not ret):
break
sec_base = int((sec * fps))
for ind in inds:
cap.set(cv2.CAP_PROP_POS_FRAMES, (sec_base + ind))
(ret, frame) = cap.read()
if (not ret):
break
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if _no_process:
images.append(Image.fromarray(frame_rgb).convert('RGB'))
else:
images.append(Image.fromarray(frame_rgb))
cap.release()
if (len(images) > 0):
if _no_process:
video_data = images
else:
if (self.subset == 'train'):
images = self.aug_transform(images)
video_data = th.stack([preprocess(img) for img in images])
else:
video_data = th.zeros(1)
return {'video': video_data}
def get_video_data(self, video_path, start_time=None, end_time=None, _no_process=False):
image_input = self.video_to_tensor(video_path, self.transform, sample_fp=self.framerate, start_time=start_time, end_time=end_time, _no_process=_no_process)
return image_input
def process_raw_data(self, raw_video_data):
tensor_size = raw_video_data.size()
tensor = raw_video_data.view((- 1), 1, tensor_size[(- 3)], tensor_size[(- 2)], tensor_size[(- 1)])
return tensor
def process_frame_order(self, raw_video_data, frame_order=0):
if (frame_order == 0):
pass
elif (frame_order == 1):
reverse_order = np.arange((raw_video_data.size(0) - 1), (- 1), (- 1))
raw_video_data = raw_video_data[(reverse_order, ...)]
elif (frame_order == 2):
random_order = np.arange(raw_video_data.size(0))
np.random.shuffle(random_order)
raw_video_data = raw_video_data[(random_order, ...)]
return raw_video_data
|
def url_to_filename(url: str, etag: str=None) -> str:
"\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.\n "
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += ('.' + etag_hash.hexdigest())
return filename
|
def filename_to_url(filename: str, cache_dir: Union[(str, Path)]=None) -> Tuple[(str, str)]:
'\n Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.\n '
if (cache_dir is None):
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if (not os.path.exists(cache_path)):
raise FileNotFoundError('file {} not found'.format(cache_path))
meta_path = (cache_path + '.json')
if (not os.path.exists(meta_path)):
raise FileNotFoundError('file {} not found'.format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return (url, etag)
|
def cached_path(url_or_filename: Union[(str, Path)], cache_dir: Union[(str, Path)]=None) -> str:
"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n "
if (cache_dir is None):
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if (parsed.scheme in ('http', 'https', 's3')):
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
return url_or_filename
elif (parsed.scheme == ''):
raise FileNotFoundError('file {} not found'.format(url_or_filename))
else:
raise ValueError('unable to parse {} as a URL or as a local path'.format(url_or_filename))
|
def split_s3_path(url: str) -> Tuple[(str, str)]:
'Split a full s3 path into the bucket name and path.'
parsed = urlparse(url)
if ((not parsed.netloc) or (not parsed.path)):
raise ValueError('bad s3 path {}'.format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
if s3_path.startswith('/'):
s3_path = s3_path[1:]
return (bucket_name, s3_path)
|
def s3_request(func: Callable):
'\n Wrapper function for s3 requests in order to create more helpful error\n messages.\n '
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if (int(exc.response['Error']['Code']) == 404):
raise FileNotFoundError('file {} not found'.format(url))
else:
raise
return wrapper
|
@s3_request
def s3_etag(url: str) -> Optional[str]:
'Check ETag on S3 object.'
s3_resource = boto3.resource('s3')
(bucket_name, s3_path) = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
|
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
'Pull a file directly from S3.'
s3_resource = boto3.resource('s3')
(bucket_name, s3_path) = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
|
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = (int(content_length) if (content_length is not None) else None)
progress = tqdm(unit='B', total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk:
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
|
def get_from_cache(url: str, cache_dir: Union[(str, Path)]=None) -> str:
"\n Given a URL, look for the corresponding dataset in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n "
if (cache_dir is None):
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
if url.startswith('s3://'):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if (response.status_code != 200):
raise IOError('HEAD request failed for url {} with status code {}'.format(url, response.status_code))
etag = response.headers.get('ETag')
filename = url_to_filename(url, etag)
cache_path = os.path.join(cache_dir, filename)
if (not os.path.exists(cache_path)):
with tempfile.NamedTemporaryFile() as temp_file:
logger.info('%s not found in cache, downloading to %s', url, temp_file.name)
if url.startswith('s3://'):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
temp_file.flush()
temp_file.seek(0)
logger.info('copying %s to cache at %s', temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info('creating metadata file for %s', cache_path)
meta = {'url': url, 'etag': etag}
meta_path = (cache_path + '.json')
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info('removing temp file %s', temp_file.name)
return cache_path
|
def read_set_from_file(filename: str) -> Set[str]:
'\n Extract a de-duped collection (set) of text from a file.\n Expected file format is one item per line.\n '
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
|
def get_file_extension(path: str, dot=True, lower: bool=True):
ext = os.path.splitext(path)[1]
ext = (ext if dot else ext[1:])
return (ext.lower() if lower else ext)
|
class LayerNorm(nn.LayerNorm):
"Subclass torch's LayerNorm to handle fp16."
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
|
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return (x * torch.sigmoid((1.702 * x)))
|
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask=None):
super(ResidualAttentionBlock, self).__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model, (d_model * 4))), ('gelu', QuickGELU()), ('c_proj', nn.Linear((d_model * 4), d_model))]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
self.n_head = n_head
def attention(self, x: torch.Tensor, attn_mask_: torch.Tensor):
attn_mask_ = attn_mask_.repeat_interleave(self.n_head, dim=0)
attn_mask_ = (attn_mask_.to(dtype=x.dtype, device=x.device) if (attn_mask_ is not None) else None)
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask_)[0]
def forward(self, para_tuple: tuple):
(x, attn_mask) = para_tuple
x = (x + self.attention(self.ln_1(x), attn_mask))
x = (x + self.mlp(self.ln_2(x)))
return (x, attn_mask)
|
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask=None):
super(Transformer, self).__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads) for _ in range(layers)])
def forward(self, x: torch.Tensor, attn_mask: torch.Tensor):
return self.resblocks((x, attn_mask))[0]
|
def warmup_cosine(x, warmup=0.002):
if (x < warmup):
return (x / warmup)
return (0.5 * (1.0 + math.cos((math.pi * x))))
|
def warmup_constant(x, warmup=0.002):
' Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.\n Learning rate is 1. afterwards. '
if (x < warmup):
return (x / warmup)
return 1.0
|
def warmup_linear(x, warmup=0.002):
' Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.\n After `t_total`-th training step, learning rate is zero. '
if (x < warmup):
return (x / warmup)
return max(((x - 1.0) / (warmup - 1.0)), 0)
|
class BertAdam(Optimizer):
"Implements BERT version of Adam algorithm with weight decay fix.\n Params:\n lr: learning rate\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate. Default: -1\n schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'\n b1: Adams b1. Default: 0.9\n b2: Adams b2. Default: 0.999\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n "
def __init__(self, params, lr=required, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-06, weight_decay=0.01, max_grad_norm=1.0):
if ((lr is not required) and (lr < 0.0)):
raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))
if (schedule not in SCHEDULES):
raise ValueError('Invalid schedule parameter: {}'.format(schedule))
if ((not (0.0 <= warmup < 1.0)) and (not (warmup == (- 1)))):
raise ValueError('Invalid warmup: {} - should be in [0.0, 1.0[ or -1'.format(warmup))
if (not (0.0 <= b1 < 1.0)):
raise ValueError('Invalid b1 parameter: {} - should be in [0.0, 1.0['.format(b1))
if (not (0.0 <= b2 < 1.0)):
raise ValueError('Invalid b2 parameter: {} - should be in [0.0, 1.0['.format(b2))
if (not (e >= 0.0)):
raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, b1=b1, b2=b2, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
state = self.state[p]
if (len(state) == 0):
return [0]
if (group['t_total'] != (- 1)):
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = (group['lr'] * schedule_fct((state['step'] / group['t_total']), group['warmup']))
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['next_m'] = torch.zeros_like(p.data)
state['next_v'] = torch.zeros_like(p.data)
(next_m, next_v) = (state['next_m'], state['next_v'])
(beta1, beta2) = (group['b1'], group['b2'])
if (group['max_grad_norm'] > 0):
clip_grad_norm_(p, group['max_grad_norm'])
next_m.mul_(beta1).add_(grad, alpha=(1 - beta1))
next_v.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
update = (next_m / (next_v.sqrt() + group['e']))
if (group['weight_decay'] > 0.0):
update += (group['weight_decay'] * p.data)
if (group['t_total'] != (- 1)):
schedule_fct = SCHEDULES[group['schedule']]
progress = (state['step'] / group['t_total'])
lr_scheduled = (group['lr'] * schedule_fct(progress, group['warmup']))
else:
lr_scheduled = group['lr']
update_with_lr = (lr_scheduled * update)
p.data.add_((- update_with_lr))
state['step'] += 1
return loss
|
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
|
@lru_cache()
def bytes_to_unicode():
"\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n "
bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1))))
cs = bs[:]
n = 0
for b in range((2 ** 8)):
if (b not in bs):
bs.append(b)
cs.append(((2 ** 8) + n))
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
|
def get_pairs(word):
'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n '
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
|
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
|
def whitespace_clean(text):
text = re.sub('\\s+', ' ', text)
text = text.strip()
return text
|
class SimpleTokenizer(object):
def __init__(self, bpe_path: str=default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode('utf-8').split('\n')
merges = merges[1:(((49152 - 256) - 2) + 1)]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = (vocab + [(v + '</w>') for v in vocab])
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE)
self.vocab = self.encoder
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ')
return text
def tokenize(self, text):
tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return tokens
def convert_tokens_to_ids(self, tokens):
return [self.encoder[bpe_token] for bpe_token in tokens]
|
class PretrainedConfig(object):
pretrained_model_archive_map = {}
config_name = ''
weights_name = ''
@classmethod
def get_config(cls, pretrained_model_name, cache_dir, type_vocab_size, state_dict, task_config=None):
archive_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), pretrained_model_name)
if (os.path.exists(archive_file) is False):
if (pretrained_model_name in cls.pretrained_model_archive_map):
archive_file = cls.pretrained_model_archive_map[pretrained_model_name]
else:
archive_file = pretrained_model_name
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
if ((task_config is None) or (task_config.local_rank == 0)):
logger.error("Model name '{}' was not found in model name list. We assumed '{}' was a path or url but couldn't find any file associated to this path or url.".format(pretrained_model_name, archive_file))
return None
if (resolved_archive_file == archive_file):
if ((task_config is None) or (task_config.local_rank == 0)):
logger.info('loading archive file {}'.format(archive_file))
elif ((task_config is None) or (task_config.local_rank == 0)):
logger.info('loading archive file {} from cache at {}'.format(archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
tempdir = tempfile.mkdtemp()
if ((task_config is None) or (task_config.local_rank == 0)):
logger.info('extracting archive file {} to temp dir {}'.format(resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
config_file = os.path.join(serialization_dir, cls.config_name)
config = cls.from_json_file(config_file)
config.type_vocab_size = type_vocab_size
if ((task_config is None) or (task_config.local_rank == 0)):
logger.info('Model config {}'.format(config))
if (state_dict is None):
weights_path = os.path.join(serialization_dir, cls.weights_name)
if os.path.exists(weights_path):
state_dict = torch.load(weights_path, map_location='cpu')
elif ((task_config is None) or (task_config.local_rank == 0)):
logger.info("Weight doesn't exsits. {}".format(weights_path))
if tempdir:
shutil.rmtree(tempdir)
return (config, state_dict)
@classmethod
def from_dict(cls, json_object):
'Constructs a `BertConfig` from a Python dictionary of parameters.'
config = cls(vocab_size_or_config_json_file=(- 1))
for (key, value) in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
'Constructs a `BertConfig` from a json file of parameters.'
with open(json_file, 'r', encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
'Serializes this instance to a Python dictionary.'
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
'Serializes this instance to a JSON string.'
return (json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\n')
|
def get_world_size():
if (not dist.is_available()):
return 1
if (not dist.is_initialized()):
return 1
return dist.get_world_size()
|
def get_rank():
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
return dist.get_rank()
|
def is_main_process():
return (get_rank() == 0)
|
def synchronize():
'\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n '
if (not dist.is_available()):
return
if (not dist.is_initialized()):
return
world_size = dist.get_world_size()
if (world_size == 1):
return
dist.barrier()
|
def all_gather(data):
'\n Run all_gather on arbitrary picklable data (not necessarily tensors)\n Args:\n data: any picklable object\n Returns:\n list[data]: list of data gathered from each rank\n '
world_size = get_world_size()
if (world_size == 1):
return [data]
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to('cuda')
local_size = torch.LongTensor([tensor.numel()]).to('cuda')
size_list = [torch.LongTensor([0]).to('cuda') for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
if (local_size != max_size):
padding = torch.ByteTensor(size=((max_size - local_size),)).to('cuda')
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for (size, tensor) in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
|
def reduce_dict(input_dict, average=True):
'\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that process with rank\n 0 has the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n '
world_size = get_world_size()
if (world_size < 2):
return input_dict
with torch.no_grad():
names = []
values = []
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if ((dist.get_rank() == 0) and average):
values /= world_size
reduced_dict = {k: v for (k, v) in zip(names, values)}
return reduced_dict
|
def setup_logger(name, save_dir, dist_rank, filename='log.txt'):
logger = logging.getLogger(name)
logger.setLevel(logging.ERROR)
if (dist_rank > 0):
return logger
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s %(name)s %(lineno)s %(levelname)s]: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
if save_dir:
fh = logging.FileHandler(os.path.join(save_dir, filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
|
class SmoothedValue(object):
'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n '
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
@property
def global_avg(self):
return (self.total / self.count)
|
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {:.4f} ({:.4f})'.format(name, meter.median, meter.global_avg))
return self.delimiter.join(loss_str)
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def accuracy(output, target, topk=(1,)):
'Computes the precision@k for the specified values of k'
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred)).contiguous()
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if (isinstance(obj, list) or isinstance(obj, tuple)):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
|
def parallel_apply(fct, model, inputs, device_ids):
modules = nn.parallel.replicate(model, device_ids)
assert (len(modules) == len(inputs))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input):
torch.set_grad_enabled(grad_enabled)
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
if (not isinstance(input, (list, tuple))):
input = (input,)
output = fct(module, *input)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(where='in replica {} on device {}'.format(i, device))
if (len(modules) > 1):
threads = [threading.Thread(target=_worker, args=(i, module, input)) for (i, (module, input)) in enumerate(zip(modules, inputs))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
|
def get_logger(filename=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
if (filename is not None):
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
|
def convert_module_to_f16(l):
'\n Convert primitive modules to float16.\n '
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.half()
if (l.bias is not None):
l.bias.data = l.bias.data.half()
|
def convert_module_to_f32(l):
'\n Convert primitive modules to float32, undoing convert_module_to_f16().\n '
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.float()
if (l.bias is not None):
l.bias.data = l.bias.data.float()
|
def make_master_params(param_groups_and_shapes):
'\n Copy model parameters into a (differently-shaped) list of full-precision\n parameters.\n '
master_params = []
for (param_group, shape) in param_groups_and_shapes:
master_param = nn.Parameter(_flatten_dense_tensors([param.detach().float() for (_, param) in param_group]).view(shape))
master_param.requires_grad = True
master_params.append(master_param)
return master_params
|
def model_grads_to_master_grads(param_groups_and_shapes, master_params):
'\n Copy the gradients from the model parameters into the master parameters\n from make_master_params().\n '
for (master_param, (param_group, shape)) in zip(master_params, param_groups_and_shapes):
master_param.grad = _flatten_dense_tensors([param_grad_or_zeros(param) for (_, param) in param_group]).view(shape)
|
def master_params_to_model_params(param_groups_and_shapes, master_params):
'\n Copy the master parameter data back into the model parameters.\n '
for (master_param, (param_group, _)) in zip(master_params, param_groups_and_shapes):
for ((_, param), unflat_master_param) in zip(param_group, unflatten_master_params(param_group, master_param.view((- 1)))):
param.detach().copy_(unflat_master_param)
|
def unflatten_master_params(param_group, master_param):
return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group])
|
def get_param_groups_and_shapes(named_model_params):
named_model_params = list(named_model_params)
scalar_vector_named_params = ([(n, p) for (n, p) in named_model_params if (p.ndim <= 1)], (- 1))
matrix_named_params = ([(n, p) for (n, p) in named_model_params if (p.ndim > 1)], (1, (- 1)))
return [scalar_vector_named_params, matrix_named_params]
|
def master_params_to_state_dict(model, param_groups_and_shapes, master_params, use_fp16):
if use_fp16:
state_dict = model.state_dict()
for (master_param, (param_group, _)) in zip(master_params, param_groups_and_shapes):
for ((name, _), unflat_master_param) in zip(param_group, unflatten_master_params(param_group, master_param.view((- 1)))):
assert (name in state_dict)
state_dict[name] = unflat_master_param
else:
state_dict = model.state_dict()
for (i, (name, _value)) in enumerate(model.named_parameters()):
assert (name in state_dict)
state_dict[name] = master_params[i]
return state_dict
|
def state_dict_to_master_params(model, state_dict, use_fp16):
if use_fp16:
named_model_params = [(name, state_dict[name]) for (name, _) in model.named_parameters()]
param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
master_params = make_master_params(param_groups_and_shapes)
else:
master_params = [state_dict[name] for (name, _) in model.named_parameters()]
return master_params
|
def zero_master_grads(master_params):
for param in master_params:
param.grad = None
|
def zero_grad(model_params):
for param in model_params:
if (param.grad is not None):
param.grad.detach_()
param.grad.zero_()
|
def param_grad_or_zeros(param):
if (param.grad is not None):
return param.grad.data.detach()
else:
return th.zeros_like(param)
|
class MixedPrecisionTrainer():
def __init__(self, *, model, use_fp16=False, fp16_scale_growth=0.001, initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE):
self.model = model
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.param_groups_and_shapes = None
self.lg_loss_scale = initial_lg_loss_scale
if self.use_fp16:
self.param_groups_and_shapes = get_param_groups_and_shapes(self.model.named_parameters())
self.master_params = make_master_params(self.param_groups_and_shapes)
self.model.convert_to_fp16()
def zero_grad(self):
zero_grad(self.model_params)
def backward(self, loss: th.Tensor):
if self.use_fp16:
loss_scale = (2 ** self.lg_loss_scale)
(loss * loss_scale).backward()
else:
loss.backward()
def optimize(self, opt: th.optim.Optimizer):
if self.use_fp16:
return self._optimize_fp16(opt)
else:
return self._optimize_normal(opt)
def _optimize_fp16(self, opt: th.optim.Optimizer):
logger.logkv_mean('lg_loss_scale', self.lg_loss_scale)
model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)
(grad_norm, param_norm) = self._compute_norms(grad_scale=(2 ** self.lg_loss_scale))
if check_overflow(grad_norm):
self.lg_loss_scale -= 1
logger.log(f'Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}')
zero_master_grads(self.master_params)
return False
logger.logkv_mean('grad_norm', grad_norm)
logger.logkv_mean('param_norm', param_norm)
self.master_params[0].grad.mul_((1.0 / (2 ** self.lg_loss_scale)))
opt.step()
zero_master_grads(self.master_params)
master_params_to_model_params(self.param_groups_and_shapes, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
return True
def _optimize_normal(self, opt: th.optim.Optimizer):
(grad_norm, param_norm) = self._compute_norms()
logger.logkv_mean('grad_norm', grad_norm)
logger.logkv_mean('param_norm', param_norm)
opt.step()
return True
def _compute_norms(self, grad_scale=1.0):
grad_norm = 0.0
param_norm = 0.0
for p in self.master_params:
with th.no_grad():
param_norm += (th.norm(p, p=2, dtype=th.float32).item() ** 2)
if (p.grad is not None):
grad_norm += (th.norm(p.grad, p=2, dtype=th.float32).item() ** 2)
return ((np.sqrt(grad_norm) / grad_scale), np.sqrt(param_norm))
def master_params_to_state_dict(self, master_params):
return master_params_to_state_dict(self.model, self.param_groups_and_shapes, master_params, self.use_fp16)
def state_dict_to_master_params(self, state_dict):
return state_dict_to_master_params(self.model, state_dict, self.use_fp16)
|
def check_overflow(value):
return ((value == float('inf')) or (value == (- float('inf'))) or (value != value))
|
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
'\n Get a pre-defined beta schedule for the given name.\n\n The beta schedule library consists of beta schedules which remain similar\n in the limit of num_diffusion_timesteps.\n Beta schedules may be added, but should not be removed or changed once\n they are committed to maintain backwards compatibility.\n '
if (schedule_name == 'linear'):
scale = (1000 / num_diffusion_timesteps)
beta_start = (scale * 0.0001)
beta_end = (scale * 0.02)
return np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
elif (schedule_name == 'cosine'):
return betas_for_alpha_bar(num_diffusion_timesteps, (lambda t: (math.cos(((((t + 0.008) / 1.008) * math.pi) / 2)) ** 2)))
else:
raise NotImplementedError(f'unknown beta schedule: {schedule_name}')
|
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
'\n Create a beta schedule that discretizes the given alpha_t_bar function,\n which defines the cumulative product of (1-beta) over time from t = [0,1].\n\n :param num_diffusion_timesteps: the number of betas to produce.\n :param alpha_bar: a lambda that takes an argument t from 0 to 1 and\n produces the cumulative product of (1-beta) up to that\n part of the diffusion process.\n :param max_beta: the maximum beta to use; use values lower than 1 to\n prevent singularities.\n '
betas = []
for i in range(num_diffusion_timesteps):
t1 = (i / num_diffusion_timesteps)
t2 = ((i + 1) / num_diffusion_timesteps)
betas.append(min((1 - (alpha_bar(t2) / alpha_bar(t1))), max_beta))
return np.array(betas)
|
class ModelMeanType(enum.Enum):
'\n Which type of output the model predicts.\n '
PREVIOUS_X = enum.auto()
START_X = enum.auto()
EPSILON = enum.auto()
|
class ModelVarType(enum.Enum):
"\n What is used as the model's output variance.\n\n The LEARNED_RANGE option has been added to allow the model to predict\n values between FIXED_SMALL and FIXED_LARGE, making its job easier.\n "
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
|
class LossType(enum.Enum):
MSE = enum.auto()
RESCALED_MSE = enum.auto()
KL = enum.auto()
RESCALED_KL = enum.auto()
def is_vb(self):
return ((self == LossType.KL) or (self == LossType.RESCALED_KL))
|
class GaussianDiffusion():
'\n Utilities for training and sampling diffusion models.\n\n Ported directly from here, and then adapted over time to further experimentation.\n https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42\n\n :param betas: a 1-D numpy array of betas for each diffusion timestep,\n starting at T and going to 1.\n :param model_mean_type: a ModelMeanType determining what the model outputs.\n :param model_var_type: a ModelVarType determining how variance is output.\n :param loss_type: a LossType determining the loss function to use.\n :param rescale_timesteps: if True, pass floating point timesteps into the\n model so that they are always scaled like in the\n original paper (0 to 1000).\n '
def __init__(self, *, betas, model_mean_type, model_var_type, loss_type, rescale_timesteps=False):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
self.rescale_timesteps = rescale_timesteps
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert (len(betas.shape) == 1), 'betas must be 1-D'
assert ((betas > 0).all() and (betas <= 1).all())
self.num_timesteps = int(betas.shape[0])
alphas = (1.0 - betas)
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:(- 1)])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert (self.alphas_cumprod_prev.shape == (self.num_timesteps,))
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt((1.0 - self.alphas_cumprod))
self.log_one_minus_alphas_cumprod = np.log((1.0 - self.alphas_cumprod))
self.sqrt_recip_alphas_cumprod = np.sqrt((1.0 / self.alphas_cumprod))
self.sqrt_recipm1_alphas_cumprod = np.sqrt(((1.0 / self.alphas_cumprod) - 1))
self.posterior_variance = ((betas * (1.0 - self.alphas_cumprod_prev)) / (1.0 - self.alphas_cumprod))
self.posterior_log_variance_clipped = np.log(np.append(self.posterior_variance[1], self.posterior_variance[1:]))
self.posterior_mean_coef1 = ((betas * np.sqrt(self.alphas_cumprod_prev)) / (1.0 - self.alphas_cumprod))
self.posterior_mean_coef2 = (((1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas)) / (1.0 - self.alphas_cumprod))
def q_mean_variance(self, x_start, t):
"\n Get the distribution q(x_t | x_0).\n\n :param x_start: the [N x C x ...] tensor of noiseless inputs.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :return: A tuple (mean, variance, log_variance), all of x_start's shape.\n "
mean = (_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
variance = _extract_into_tensor((1.0 - self.alphas_cumprod), t, x_start.shape)
log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return (mean, variance, log_variance)
def q_sample(self, x_start, t, noise=None):
'\n Diffuse the data for a given number of diffusion steps.\n\n In other words, sample from q(x_t | x_0).\n\n :param x_start: the initial data batch.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :param noise: if specified, the split-out normal noise.\n :return: A noisy version of x_start.\n '
if (noise is None):
noise = th.randn_like(x_start)
assert (noise.shape == x_start.shape)
return ((_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) + (_extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise))
def q_posterior_mean_variance(self, x_start, x_t, t):
'\n Compute the mean and variance of the diffusion posterior:\n\n q(x_{t-1} | x_t, x_0)\n\n '
assert (x_start.shape == x_t.shape)
posterior_mean = ((_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start) + (_extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t))
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
assert (posterior_mean.shape[0] == posterior_variance.shape[0] == posterior_log_variance_clipped.shape[0] == x_start.shape[0])
return (posterior_mean, posterior_variance, posterior_log_variance_clipped)
def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n "
if (model_kwargs is None):
model_kwargs = {}
(B, C) = x.shape[:2]
assert (t.shape == (B,))
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
if (self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]):
assert (model_output.shape == (B, (C * 2), *x.shape[2:]))
(model_output, model_var_values) = th.split(model_output, C, dim=1)
if (self.model_var_type == ModelVarType.LEARNED):
model_log_variance = model_var_values
model_variance = th.exp(model_log_variance)
else:
min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
frac = ((model_var_values + 1) / 2)
model_log_variance = ((frac * max_log) + ((1 - frac) * min_log))
model_variance = th.exp(model_log_variance)
else:
(model_variance, model_log_variance) = {ModelVarType.FIXED_LARGE: (np.append(self.posterior_variance[1], self.betas[1:]), np.log(np.append(self.posterior_variance[1], self.betas[1:]))), ModelVarType.FIXED_SMALL: (self.posterior_variance, self.posterior_log_variance_clipped)}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if (denoised_fn is not None):
x = denoised_fn(x)
if clip_denoised:
return x.clamp((- 1), 1)
return x
if (self.model_mean_type == ModelMeanType.PREVIOUS_X):
pred_xstart = process_xstart(self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output))
model_mean = model_output
elif (self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]):
if (self.model_mean_type == ModelMeanType.START_X):
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output))
(model_mean, _, _) = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)
else:
raise NotImplementedError(self.model_mean_type)
assert (model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape)
return {'mean': model_mean, 'variance': model_variance, 'log_variance': model_log_variance, 'pred_xstart': pred_xstart}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert (x_t.shape == eps.shape)
return ((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t) - (_extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps))
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert (x_t.shape == xprev.shape)
return ((_extract_into_tensor((1.0 / self.posterior_mean_coef1), t, x_t.shape) * xprev) - (_extract_into_tensor((self.posterior_mean_coef2 / self.posterior_mean_coef1), t, x_t.shape) * x_t))
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t) - pred_xstart) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape))
def _scale_timesteps(self, t):
if self.rescale_timesteps:
return (t.float() * (1000.0 / self.num_timesteps))
return t
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
'\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x. In particular, cond_fn computes grad(log(p(y|x))), and we want to\n condition on y.\n\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n '
gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
new_mean = (p_mean_var['mean'].float() + (p_mean_var['variance'] * gradient.float()))
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"\n Compute what the p_mean_variance output would have been, should the\n model's score function be conditioned by cond_fn.\n\n See condition_mean() for details on cond_fn.\n\n Unlike condition_mean(), this instead uses the conditioning strategy\n from Song et al (2020).\n "
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var['pred_xstart'])
eps = (eps - ((1 - alpha_bar).sqrt() * cond_fn(x, self._scale_timesteps(t), **model_kwargs)))
out = p_mean_var.copy()
out['pred_xstart'] = self._predict_xstart_from_eps(x, t, eps)
(out['mean'], _, _) = self.q_posterior_mean_variance(x_start=out['pred_xstart'], x_t=x, t=t)
return out
def p_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None):
"\n Sample x_{t-1} from the model at the given timestep.\n\n :param model: the model to sample from.\n :param x: the current tensor at x_{t-1}.\n :param t: the value of t, starting at 0 for the first diffusion step.\n :param clip_denoised: if True, clip the x_start prediction to [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample.\n :param cond_fn: if not None, this is a gradient function that acts\n similarly to the model.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict containing the following keys:\n - 'sample': a random sample from the model.\n - 'pred_xstart': a prediction of x_0.\n "
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
noise = th.randn_like(x)
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x.shape) - 1)))
if (cond_fn is not None):
out['mean'] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)
sample = (out['mean'] + ((nonzero_mask * th.exp((0.5 * out['log_variance']))) * noise))
return {'sample': sample, 'pred_xstart': out['pred_xstart']}
def p_sample_loop(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False):
"\n Generate samples from the model.\n\n :param model: the model module.\n :param shape: the shape of the samples, (N, C, H, W).\n :param noise: if specified, the noise from the encoder to sample.\n Should be of the same shape as `shape`.\n :param clip_denoised: if True, clip x_start predictions to [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample.\n :param cond_fn: if not None, this is a gradient function that acts\n similarly to the model.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :param device: if specified, the device to create the samples on.\n If not specified, use a model parameter's device.\n :param progress: if True, show a tqdm progress bar.\n :return: a non-differentiable batch of samples.\n "
final = None
for sample in self.p_sample_loop_progressive(model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, device=device, progress=progress):
final = sample
return final['sample']
def p_sample_loop_progressive(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False):
'\n Generate samples from the model and yield intermediate samples from\n each timestep of diffusion.\n\n Arguments are the same as p_sample_loop().\n Returns a generator over dicts, where each dict is the return value of\n p_sample().\n '
if (device is None):
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if (noise is not None):
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::(- 1)]
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor(([i] * shape[0]), device=device)
with th.no_grad():
out = self.p_sample(model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs)
(yield out)
img = out['sample']
def ddim_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, eta=0.0):
'\n Sample x_{t-1} from the model using DDIM.\n\n Same usage as p_sample().\n '
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
if (cond_fn is not None):
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
eps = self._predict_eps_from_xstart(x, t, out['pred_xstart'])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = ((eta * th.sqrt(((1 - alpha_bar_prev) / (1 - alpha_bar)))) * th.sqrt((1 - (alpha_bar / alpha_bar_prev))))
noise = th.randn_like(x)
mean_pred = ((out['pred_xstart'] * th.sqrt(alpha_bar_prev)) + (th.sqrt(((1 - alpha_bar_prev) - (sigma ** 2))) * eps))
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x.shape) - 1)))
sample = (mean_pred + ((nonzero_mask * sigma) * noise))
return {'sample': sample, 'pred_xstart': out['pred_xstart']}
def ddim_reverse_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, eta=0.0):
'\n Sample x_{t+1} from the model using DDIM reverse ODE.\n '
assert (eta == 0.0), 'Reverse ODE only for deterministic path'
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
eps = (((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x) - out['pred_xstart']) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape))
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
mean_pred = ((out['pred_xstart'] * th.sqrt(alpha_bar_next)) + (th.sqrt((1 - alpha_bar_next)) * eps))
return {'sample': mean_pred, 'pred_xstart': out['pred_xstart']}
def ddim_sample_loop(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, eta=0.0):
'\n Generate samples from the model using DDIM.\n\n Same usage as p_sample_loop().\n '
final = None
for sample in self.ddim_sample_loop_progressive(model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, device=device, progress=progress, eta=eta):
final = sample
return final['sample']
def ddim_sample_loop_progressive(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, eta=0.0):
'\n Use DDIM to sample from the model and yield intermediate samples from\n each timestep of DDIM.\n\n Same usage as p_sample_loop_progressive().\n '
if (device is None):
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if (noise is not None):
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::(- 1)]
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor(([i] * shape[0]), device=device)
with th.no_grad():
out = self.ddim_sample(model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, eta=eta)
(yield out)
img = out['sample']
def _vb_terms_bpd(self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None):
"\n Get a term for the variational lower-bound.\n\n The resulting units are bits (rather than nats, as one might expect).\n This allows for comparison to other papers.\n\n :return: a dict with the following keys:\n - 'output': a shape [N] tensor of NLLs or KLs.\n - 'pred_xstart': the x_0 predictions.\n "
(true_mean, _, true_log_variance_clipped) = self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t)
out = self.p_mean_variance(model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs)
kl = normal_kl(true_mean, true_log_variance_clipped, out['mean'], out['log_variance'])
kl = (mean_flat(kl) / np.log(2.0))
decoder_nll = (- discretized_gaussian_log_likelihood(x_start, means=out['mean'], log_scales=(0.5 * out['log_variance'])))
assert (decoder_nll.shape == x_start.shape)
decoder_nll = (mean_flat(decoder_nll) / np.log(2.0))
output = th.where((t == 0), decoder_nll, kl)
return {'output': output, 'pred_xstart': out['pred_xstart']}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None, temp=1):
'\n Compute training losses for a single timestep.\n\n :param model: the model to evaluate loss on.\n :param x_start: the [N x C x ...] tensor of inputs.\n :param t: a batch of timestep indices.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :param noise: if specified, the specific Gaussian noise to try to remove.\n :return: a dict with the key "loss" containing a tensor of shape [N].\n Some mean or variance settings may also have other keys.\n '
if (model_kwargs is None):
model_kwargs = {}
if (noise is None):
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
if (self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]):
(B, C) = x_t.shape[:2]
assert (model_output.shape == (B, (C * 2), *x_t.shape[2:]))
(model_output, model_var_values) = th.split(model_output, C, dim=1)
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms['vb'] = self._vb_terms_bpd(model=(lambda *args, r=frozen_out: r), x_start=x_start, x_t=x_t, t=t, clip_denoised=False)['output']
if (self.loss_type == LossType.RESCALED_MSE):
terms['vb'] *= (self.num_timesteps / 1000.0)
target = {ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t)[0], ModelMeanType.START_X: x_start, ModelMeanType.EPSILON: noise}[self.model_mean_type]
assert (model_output.shape == target.shape == x_start.shape)
if ((self.loss_type == LossType.KL) or (self.loss_type == LossType.RESCALED_KL)):
terms['kl_loss'] = F.cross_entropy((model_output * temp), th.zeros(model_output.size(0), dtype=th.long).to(model_output.device))
elif ((self.loss_type == LossType.MSE) or (self.loss_type == LossType.RESCALED_MSE)):
terms['mse'] = mean_flat(((target - model_output) ** 2))
if ('vb' in terms):
terms['loss'] = (terms['mse'] + terms['vb'])
else:
terms['loss'] = terms['mse']
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n\n This term can't be optimized, as it only depends on the encoder.\n\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n "
batch_size = x_start.shape[0]
t = th.tensor(([(self.num_timesteps - 1)] * batch_size), device=x_start.device)
(qt_mean, _, qt_log_variance) = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return (mean_flat(kl_prior) / np.log(2.0))
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
'\n Compute the entire variational lower-bound, measured in bits-per-dim,\n as well as other related quantities.\n\n :param model: the model to evaluate loss on.\n :param x_start: the [N x C x ...] tensor of inputs.\n :param clip_denoised: if True, clip denoised samples.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n\n :return: a dict containing the following keys:\n - total_bpd: the total variational lower-bound, per batch element.\n - prior_bpd: the prior term in the lower-bound.\n - vb: an [N x T] tensor of terms in the lower-bound.\n - xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.\n - mse: an [N x T] tensor of epsilon MSEs for each timestep.\n '
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::(- 1)]:
t_batch = th.tensor(([t] * batch_size), device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
with th.no_grad():
out = self._vb_terms_bpd(model, x_start=x_start, x_t=x_t, t=t_batch, clip_denoised=clip_denoised, model_kwargs=model_kwargs)
vb.append(out['output'])
xstart_mse.append(mean_flat(((out['pred_xstart'] - x_start) ** 2)))
eps = self._predict_eps_from_xstart(x_t, t_batch, out['pred_xstart'])
mse.append(mean_flat(((eps - noise) ** 2)))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = (vb.sum(dim=1) + prior_bpd)
return {'total_bpd': total_bpd, 'prior_bpd': prior_bpd, 'vb': vb, 'xstart_mse': xstart_mse, 'mse': mse}
|
def _extract_into_tensor(arr, timesteps, broadcast_shape):
'\n Extract values from a 1-D numpy array for a batch of indices.\n\n :param arr: the 1-D numpy array.\n :param timesteps: a tensor of indices into the array to extract.\n :param broadcast_shape: a larger shape of K dimensions with the batch\n dimension equal to the length of timesteps.\n :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.\n '
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while (len(res.shape) < len(broadcast_shape)):
res = res[(..., None)]
return res.expand(broadcast_shape)
|
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
|
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
|
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), ('expected file or str, got %s' % filename_or_file)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, '__float__'):
valstr = ('%-8.3g' % val)
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
if (len(key2str) == 0):
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
dashes = ('-' * ((keywidth + valwidth) + 7))
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=(lambda kv: kv[0].lower())):
lines.append(('| %s%s | %s%s |' % (key, (' ' * (keywidth - len(key))), val, (' ' * (valwidth - len(val))))))
lines.append(dashes)
self.file.write(('\n'.join(lines) + '\n'))
self.file.flush()
def _truncate(self, s):
maxlen = 30
return ((s[:(maxlen - 3)] + '...') if (len(s) > maxlen) else s)
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if (i < (len(seq) - 1)):
self.file.write(' ')
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
|
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for (k, v) in sorted(kvs.items()):
if hasattr(v, 'dtype'):
kvs[k] = float(v)
self.file.write((json.dumps(kvs) + '\n'))
self.file.flush()
def close(self):
self.file.close()
|
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
extra_keys = list((kvs.keys() - self.keys))
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if (i > 0):
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:(- 1)])
self.file.write((self.sep * len(extra_keys)))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if (i > 0):
self.file.write(',')
v = kvs.get(k)
if (v is not None):
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
|
class TensorBoardOutputFormat(KVWriter):
"\n Dumps key/value pairs into TensorBoard's numeric format.\n "
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for (k, v) in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
|
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if (format == 'stdout'):
return HumanOutputFormat(sys.stdout)
elif (format == 'log'):
return HumanOutputFormat(osp.join(ev_dir, ('log%s.txt' % log_suffix)))
elif (format == 'json'):
return JSONOutputFormat(osp.join(ev_dir, ('progress%s.json' % log_suffix)))
elif (format == 'csv'):
return CSVOutputFormat(osp.join(ev_dir, ('progress%s.csv' % log_suffix)))
elif (format == 'tensorboard'):
return TensorBoardOutputFormat(osp.join(ev_dir, ('tb%s' % log_suffix)))
else:
raise ValueError(('Unknown format specified: %s' % (format,)))
|
def logkv(key, val):
'\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n If called many times, last value will be used.\n '
get_current().logkv(key, val)
|
def logkv_mean(key, val):
'\n The same as logkv(), but if called many times, values averaged.\n '
get_current().logkv_mean(key, val)
|
def logkvs(d):
'\n Log a dictionary of key-value pairs\n '
for (k, v) in d.items():
logkv(k, v)
|
def dumpkvs():
'\n Write all of the diagnostics from the current iteration\n '
return get_current().dumpkvs()
|
def getkvs():
return get_current().name2val
|
def log(*args, level=INFO):
"\n Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).\n "
get_current().log(*args, level=level)
|
def debug(*args):
log(*args, level=DEBUG)
|
def info(*args):
log(*args, level=INFO)
|
def warn(*args):
log(*args, level=WARN)
|
def error(*args):
log(*args, level=ERROR)
|
def set_level(level):
'\n Set logging threshold on current logger.\n '
get_current().set_level(level)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.