code stringlengths 17 6.64M |
|---|
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
|
def whitespace_clean(text):
text = re.sub('\\s+', ' ', text)
text = text.strip()
return text
|
class SimpleTokenizer(object):
def __init__(self, bpe_path: str=default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode('utf-8').split('\n')
merges = merges[1:(((49152 - 256) - 2) + 1)]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = (vocab + [(v + '</w>') for v in vocab])
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE)
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ')
return text
|
@DATASET_REGISTRY.register()
class Bamboo(DatasetBase):
dataset_dir = 'bamboo'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.image_dir = (root + '/images')
self.dataset_dir = root
self.preprocessed = os.path.join(self.dataset_dir, 'preprocessed.pkl')
self.split_fewshot_dir = os.path.join(self.dataset_dir, 'split_fewshot')
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.preprocessed):
with open(self.preprocessed, 'rb') as f:
preprocessed = pickle.load(f)
train = preprocessed['train']
test = preprocessed['test']
else:
json_file = (root + '/bamboo_id_map_sample.json')
classnames = self.read_classnames(json_file)
(train, test, _) = self.read_and_split_data(self.image_dir, p_trn=0.8, ignored=[], new_cnames=classnames)
preprocessed = {'train': train, 'test': test}
with open(self.preprocessed, 'wb') as f:
pickle.dump(preprocessed, f, protocol=pickle.HIGHEST_PROTOCOL)
num_shots = cfg.DATASET.NUM_SHOTS
if (num_shots >= 1):
seed = cfg.SEED
preprocessed = os.path.join(self.split_fewshot_dir, f'shot_{num_shots}-seed_{seed}.pkl')
if os.path.exists(preprocessed):
print(f'Loading preprocessed few-shot data from {preprocessed}')
with open(preprocessed, 'rb') as file:
data = pickle.load(file)
train = data['train']
else:
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
data = {'train': train}
print(f'Saving preprocessed few-shot data to {preprocessed}')
with open(preprocessed, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
subsample = cfg.DATASET.SUBSAMPLE_CLASSES
(train, test) = OxfordPets.subsample_classes(train, test, subsample=subsample)
super().__init__(train_x=train, val=test, test=test)
@staticmethod
def read_classnames(text_file):
'Return a dictionary containing\n key-value pairs of <folder name>: <class name>.\n '
classnames = OrderedDict()
import json
classnames_origin = json.load(open(text_file, 'r'))
for (k, v) in classnames_origin.items():
if isinstance(v, list):
classnames[k] = v[0]
else:
classnames[k] = v
return classnames
def read_data(self, classnames, split_dir):
split_dir = os.path.join(self.image_dir, split_dir)
folders = sorted((f.name for f in os.scandir(split_dir) if f.is_dir()))
items = []
for (label, folder) in enumerate(folders):
imnames = listdir_nohidden(os.path.join(split_dir, folder))
classname = classnames[folder]
for imname in imnames:
impath = os.path.join(split_dir, folder, imname)
item = Datum(impath=impath, label=label, classname=classname)
items.append(item)
return items
@staticmethod
def read_and_split_data(image_dir, p_trn=0.5, p_val=0.2, ignored=[], new_cnames=None):
categories = listdir_nohidden(image_dir)
categories = [c for c in categories if (c not in ignored)]
categories.sort()
p_tst = ((1 - p_trn) - p_val)
print(f'Splitting into {p_trn:.0%} train, {p_val:.0%} val, and {p_tst:.0%} test')
def _collate(ims, y, c):
items = []
for im in ims:
item = Datum(impath=im, label=y, classname=c)
items.append(item)
return items
(train, val, test) = ([], [], [])
for (label, category) in enumerate(categories):
category_dir = os.path.join(image_dir, category)
images = listdir_nohidden(category_dir)
images = [os.path.join(category_dir, im) for im in images]
random.shuffle(images)
n_total = len(images)
n_train = round((n_total * p_trn))
n_val = round((n_total * p_val))
n_test = ((n_total - n_train) - n_val)
assert (n_train > 0)
if ((new_cnames is not None) and (category in new_cnames)):
category = new_cnames[category]
train.extend(_collate(images[:n_train], label, category))
if (n_val > 0):
val.extend(_collate(images[n_train:(n_train + n_val)], label, category))
if (n_test > 0):
test.extend(_collate(images[(n_train + n_val):], label, category))
return (train, val, test)
|
@DATASET_REGISTRY.register()
class Caltech101(DatasetBase):
dataset_dir = 'caltech-101'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, '101_ObjectCategories')
self.split_path = os.path.join(self.dataset_dir, 'split_zhou_Caltech101.json')
self.split_fewshot_dir = os.path.join(self.dataset_dir, 'split_fewshot')
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.split_path):
(train, val, test) = OxfordPets.read_split(self.split_path, self.image_dir)
else:
(train, val, test) = DTD.read_and_split_data(self.image_dir, ignored=IGNORED, new_cnames=NEW_CNAMES)
OxfordPets.save_split(train, val, test, self.split_path, self.image_dir)
num_shots = cfg.DATASET.NUM_SHOTS
if (num_shots >= 1):
seed = cfg.SEED
preprocessed = os.path.join(self.split_fewshot_dir, f'shot_{num_shots}-seed_{seed}.pkl')
if os.path.exists(preprocessed):
print(f'Loading preprocessed few-shot data from {preprocessed}')
with open(preprocessed, 'rb') as file:
data = pickle.load(file)
(train, val) = (data['train'], data['val'])
else:
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
val = self.generate_fewshot_dataset(val, num_shots=min(num_shots, 4))
data = {'train': train, 'val': val}
print(f'Saving preprocessed few-shot data to {preprocessed}')
with open(preprocessed, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
subsample = cfg.DATASET.SUBSAMPLE_CLASSES
(train, val, test) = OxfordPets.subsample_classes(train, val, test, subsample=subsample)
super().__init__(train_x=train, val=val, test=test)
|
@DATASET_REGISTRY.register()
class DescribableTextures(DatasetBase):
dataset_dir = 'dtd'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, 'images')
self.split_path = os.path.join(self.dataset_dir, 'split_zhou_DescribableTextures.json')
self.split_fewshot_dir = os.path.join(self.dataset_dir, 'split_fewshot')
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.split_path):
(train, val, test) = OxfordPets.read_split(self.split_path, self.image_dir)
else:
(train, val, test) = self.read_and_split_data(self.image_dir)
OxfordPets.save_split(train, val, test, self.split_path, self.image_dir)
num_shots = cfg.DATASET.NUM_SHOTS
if (num_shots >= 1):
seed = cfg.SEED
preprocessed = os.path.join(self.split_fewshot_dir, f'shot_{num_shots}-seed_{seed}.pkl')
if os.path.exists(preprocessed):
print(f'Loading preprocessed few-shot data from {preprocessed}')
with open(preprocessed, 'rb') as file:
data = pickle.load(file)
(train, val) = (data['train'], data['val'])
else:
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
val = self.generate_fewshot_dataset(val, num_shots=min(num_shots, 4))
data = {'train': train, 'val': val}
print(f'Saving preprocessed few-shot data to {preprocessed}')
with open(preprocessed, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
subsample = cfg.DATASET.SUBSAMPLE_CLASSES
(train, val, test) = OxfordPets.subsample_classes(train, val, test, subsample=subsample)
super().__init__(train_x=train, val=val, test=test)
@staticmethod
def read_and_split_data(image_dir, p_trn=0.5, p_val=0.2, ignored=[], new_cnames=None):
categories = listdir_nohidden(image_dir)
categories = [c for c in categories if (c not in ignored)]
categories.sort()
p_tst = ((1 - p_trn) - p_val)
print(f'Splitting into {p_trn:.0%} train, {p_val:.0%} val, and {p_tst:.0%} test')
def _collate(ims, y, c):
items = []
for im in ims:
item = Datum(impath=im, label=y, classname=c)
items.append(item)
return items
(train, val, test) = ([], [], [])
for (label, category) in enumerate(categories):
category_dir = os.path.join(image_dir, category)
images = listdir_nohidden(category_dir)
images = [os.path.join(category_dir, im) for im in images]
random.shuffle(images)
n_total = len(images)
n_train = round((n_total * p_trn))
n_val = round((n_total * p_val))
n_test = ((n_total - n_train) - n_val)
assert ((n_train > 0) and (n_val > 0) and (n_test > 0))
if ((new_cnames is not None) and (category in new_cnames)):
category = new_cnames[category]
train.extend(_collate(images[:n_train], label, category))
val.extend(_collate(images[n_train:(n_train + n_val)], label, category))
test.extend(_collate(images[(n_train + n_val):], label, category))
return (train, val, test)
|
@DATASET_REGISTRY.register()
class EuroSAT(DatasetBase):
dataset_dir = 'eurosat'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, '2750')
self.split_path = os.path.join(self.dataset_dir, 'split_zhou_EuroSAT.json')
self.split_fewshot_dir = os.path.join(self.dataset_dir, 'split_fewshot')
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.split_path):
(train, val, test) = OxfordPets.read_split(self.split_path, self.image_dir)
else:
(train, val, test) = DTD.read_and_split_data(self.image_dir, new_cnames=NEW_CNAMES)
OxfordPets.save_split(train, val, test, self.split_path, self.image_dir)
num_shots = cfg.DATASET.NUM_SHOTS
if (num_shots >= 1):
seed = cfg.SEED
preprocessed = os.path.join(self.split_fewshot_dir, f'shot_{num_shots}-seed_{seed}.pkl')
if os.path.exists(preprocessed):
print(f'Loading preprocessed few-shot data from {preprocessed}')
with open(preprocessed, 'rb') as file:
data = pickle.load(file)
(train, val) = (data['train'], data['val'])
else:
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
val = self.generate_fewshot_dataset(val, num_shots=min(num_shots, 4))
data = {'train': train, 'val': val}
print(f'Saving preprocessed few-shot data to {preprocessed}')
with open(preprocessed, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
subsample = cfg.DATASET.SUBSAMPLE_CLASSES
(train, val, test) = OxfordPets.subsample_classes(train, val, test, subsample=subsample)
super().__init__(train_x=train, val=val, test=test)
def update_classname(self, dataset_old):
dataset_new = []
for item_old in dataset_old:
cname_old = item_old.classname
cname_new = NEW_CLASSNAMES[cname_old]
item_new = Datum(impath=item_old.impath, label=item_old.label, classname=cname_new)
dataset_new.append(item_new)
return dataset_new
|
@DATASET_REGISTRY.register()
class FGVCAircraft(DatasetBase):
dataset_dir = 'fgvc_aircraft'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, 'images')
self.split_fewshot_dir = os.path.join(self.dataset_dir, 'split_fewshot')
mkdir_if_missing(self.split_fewshot_dir)
classnames = []
with open(os.path.join(self.dataset_dir, 'variants.txt'), 'r') as f:
lines = f.readlines()
for line in lines:
classnames.append(line.strip())
cname2lab = {c: i for (i, c) in enumerate(classnames)}
train = self.read_data(cname2lab, 'images_variant_train.txt')
val = self.read_data(cname2lab, 'images_variant_val.txt')
test = self.read_data(cname2lab, 'images_variant_test.txt')
num_shots = cfg.DATASET.NUM_SHOTS
if (num_shots >= 1):
seed = cfg.SEED
preprocessed = os.path.join(self.split_fewshot_dir, f'shot_{num_shots}-seed_{seed}.pkl')
if os.path.exists(preprocessed):
print(f'Loading preprocessed few-shot data from {preprocessed}')
with open(preprocessed, 'rb') as file:
data = pickle.load(file)
(train, val) = (data['train'], data['val'])
else:
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
val = self.generate_fewshot_dataset(val, num_shots=min(num_shots, 4))
data = {'train': train, 'val': val}
print(f'Saving preprocessed few-shot data to {preprocessed}')
with open(preprocessed, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
subsample = cfg.DATASET.SUBSAMPLE_CLASSES
(train, val, test) = OxfordPets.subsample_classes(train, val, test, subsample=subsample)
super().__init__(train_x=train, val=val, test=test)
def read_data(self, cname2lab, split_file):
filepath = os.path.join(self.dataset_dir, split_file)
items = []
with open(filepath, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip().split(' ')
imname = (line[0] + '.jpg')
classname = ' '.join(line[1:])
impath = os.path.join(self.image_dir, imname)
label = cname2lab[classname]
item = Datum(impath=impath, label=label, classname=classname)
items.append(item)
return items
|
@DATASET_REGISTRY.register()
class Food101(DatasetBase):
dataset_dir = 'food-101'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, 'images')
self.split_path = os.path.join(self.dataset_dir, 'split_zhou_Food101.json')
self.split_fewshot_dir = os.path.join(self.dataset_dir, 'split_fewshot')
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.split_path):
(train, val, test) = OxfordPets.read_split(self.split_path, self.image_dir)
else:
(train, val, test) = DTD.read_and_split_data(self.image_dir)
OxfordPets.save_split(train, val, test, self.split_path, self.image_dir)
num_shots = cfg.DATASET.NUM_SHOTS
if (num_shots >= 1):
seed = cfg.SEED
preprocessed = os.path.join(self.split_fewshot_dir, f'shot_{num_shots}-seed_{seed}.pkl')
if os.path.exists(preprocessed):
print(f'Loading preprocessed few-shot data from {preprocessed}')
with open(preprocessed, 'rb') as file:
data = pickle.load(file)
(train, val) = (data['train'], data['val'])
else:
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
val = self.generate_fewshot_dataset(val, num_shots=min(num_shots, 4))
data = {'train': train, 'val': val}
print(f'Saving preprocessed few-shot data to {preprocessed}')
with open(preprocessed, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
subsample = cfg.DATASET.SUBSAMPLE_CLASSES
(train, val, test) = OxfordPets.subsample_classes(train, val, test, subsample=subsample)
super().__init__(train_x=train, val=val, test=test)
|
@DATASET_REGISTRY.register()
class ImageNet(DatasetBase):
dataset_dir = 'imagenet'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = self.dataset_dir
self.preprocessed = os.path.join(self.dataset_dir.replace('group', 'sheng'), 'preprocessed.pkl')
self.split_fewshot_dir = os.path.join(self.dataset_dir.replace('group', 'sheng'), 'split_fewshot')
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.preprocessed):
with open(self.preprocessed, 'rb') as f:
preprocessed = pickle.load(f)
train = preprocessed['train']
test = preprocessed['test']
else:
text_file = './scripts/classnames.txt'
classnames = self.read_classnames(text_file)
train = self.read_data(classnames, 'train')
test = self.read_data(classnames, 'val')
preprocessed = {'train': train, 'test': test}
with open(self.preprocessed, 'wb') as f:
pickle.dump(preprocessed, f, protocol=pickle.HIGHEST_PROTOCOL)
num_shots = cfg.DATASET.NUM_SHOTS
if (num_shots >= 1):
seed = cfg.SEED
preprocessed = os.path.join(self.split_fewshot_dir, f'shot_{num_shots}-seed_{seed}.pkl')
if os.path.exists(preprocessed):
print(f'Loading preprocessed few-shot data from {preprocessed}')
with open(preprocessed, 'rb') as file:
data = pickle.load(file)
train = data['train']
else:
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
data = {'train': train}
print(f'Saving preprocessed few-shot data to {preprocessed}')
with open(preprocessed, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
subsample = cfg.DATASET.SUBSAMPLE_CLASSES
(train, test) = OxfordPets.subsample_classes(train, test, subsample=subsample)
super().__init__(train_x=train, val=test, test=test)
@staticmethod
def read_classnames(text_file):
'Return a dictionary containing\n key-value pairs of <folder name>: <class name>.\n '
classnames = OrderedDict()
with open(text_file, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip().split(' ')
folder = line[0]
classname = ' '.join(line[1:])
classnames[folder] = classname
return classnames
def read_data(self, classnames, split_dir):
split_dir = os.path.join(self.image_dir, split_dir)
folders = sorted((f.name for f in os.scandir(split_dir) if f.is_dir()))
items = []
for (label, folder) in enumerate(folders):
imnames = listdir_nohidden(os.path.join(split_dir, folder))
classname = classnames[folder]
for imname in imnames:
impath = os.path.join(split_dir, folder, imname)
item = Datum(impath=impath, label=label, classname=classname)
items.append(item)
return items
|
@DATASET_REGISTRY.register()
class ImageNet21k(DatasetBase):
dataset_dir = 'imagenet21k'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.image_dir = root
self.dataset_dir = root
self.preprocessed = os.path.join(self.dataset_dir.replace('group', 'sheng'), 'preprocessed.pkl')
self.split_fewshot_dir = os.path.join(self.dataset_dir.replace('group', 'sheng'), 'split_fewshot')
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.preprocessed):
with open(self.preprocessed, 'rb') as f:
preprocessed = pickle.load(f)
train = preprocessed['train']
test = preprocessed['test']
else:
text_file = './scripts/imagenet21k_classnames.txt'
classnames = self.read_classnames(text_file)
(train, test, _) = self.read_and_split_data(self.image_dir, p_trn=0.8, ignored=[], new_cnames=classnames)
preprocessed = {'train': train, 'test': test}
with open(self.preprocessed, 'wb') as f:
pickle.dump(preprocessed, f, protocol=pickle.HIGHEST_PROTOCOL)
num_shots = cfg.DATASET.NUM_SHOTS
if (num_shots >= 1):
seed = cfg.SEED
preprocessed = os.path.join(self.split_fewshot_dir, f'shot_{num_shots}-seed_{seed}.pkl')
if os.path.exists(preprocessed):
print(f'Loading preprocessed few-shot data from {preprocessed}')
with open(preprocessed, 'rb') as file:
data = pickle.load(file)
train = data['train']
else:
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
data = {'train': train}
print(f'Saving preprocessed few-shot data to {preprocessed}')
with open(preprocessed, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
subsample = cfg.DATASET.SUBSAMPLE_CLASSES
(train, test) = OxfordPets.subsample_classes(train, test, subsample=subsample)
super().__init__(train_x=train, val=test, test=test)
@staticmethod
def read_classnames(text_file):
'Return a dictionary containing\n key-value pairs of <folder name>: <class name>.\n '
classnames = OrderedDict()
with open(text_file, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip().split(' ')
folder = line[0]
classname = ' '.join(line[1:])
classnames[folder] = classname
return classnames
def read_data(self, classnames, split_dir):
split_dir = os.path.join(self.image_dir, split_dir)
folders = sorted((f.name for f in os.scandir(split_dir) if f.is_dir()))
items = []
for (label, folder) in enumerate(folders):
imnames = listdir_nohidden(os.path.join(split_dir, folder))
classname = classnames[folder]
for imname in imnames:
impath = os.path.join(split_dir, folder, imname)
item = Datum(impath=impath, label=label, classname=classname)
items.append(item)
return items
@staticmethod
def read_and_split_data(image_dir, p_trn=0.5, p_val=0.2, ignored=[], new_cnames=None):
categories = listdir_nohidden(image_dir)
categories = [c for c in categories if (c not in ignored)]
categories.sort()
p_tst = ((1 - p_trn) - p_val)
print(f'Splitting into {p_trn:.0%} train, {p_val:.0%} val, and {p_tst:.0%} test')
def _collate(ims, y, c):
items = []
for im in ims:
item = Datum(impath=im, label=y, classname=c)
items.append(item)
return items
(train, val, test) = ([], [], [])
for (label, category) in enumerate(categories):
category_dir = os.path.join(image_dir, category)
images = listdir_nohidden(category_dir)
images = [os.path.join(category_dir, im) for im in images]
random.shuffle(images)
n_total = len(images)
n_train = round((n_total * p_trn))
n_val = round((n_total * p_val))
n_test = ((n_total - n_train) - n_val)
assert (n_train > 0)
if ((new_cnames is not None) and (category in new_cnames)):
category = new_cnames[category]
train.extend(_collate(images[:n_train], label, category))
if (n_val > 0):
val.extend(_collate(images[n_train:(n_train + n_val)], label, category))
if (n_test > 0):
test.extend(_collate(images[(n_train + n_val):], label, category))
return (train, val, test)
|
@DATASET_REGISTRY.register()
class ImageNetA(DatasetBase):
'ImageNet-A(dversarial).\n\n This dataset is used for testing only.\n '
dataset_dir = 'imagenet-adversarial'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, 'imagenet-a')
text_file = os.path.join(self.dataset_dir, 'classnames.txt')
classnames = ImageNet.read_classnames(text_file)
data = self.read_data(classnames)
super().__init__(train_x=data, test=data)
def read_data(self, classnames):
image_dir = self.image_dir
folders = listdir_nohidden(image_dir, sort=True)
folders = [f for f in folders if (f not in TO_BE_IGNORED)]
items = []
for (label, folder) in enumerate(folders):
imnames = listdir_nohidden(os.path.join(image_dir, folder))
classname = classnames[folder]
for imname in imnames:
impath = os.path.join(image_dir, folder, imname)
item = Datum(impath=impath, label=label, classname=classname)
items.append(item)
return items
|
@DATASET_REGISTRY.register()
class ImageNetR(DatasetBase):
'ImageNet-R(endition).\n\n This dataset is used for testing only.\n '
dataset_dir = 'imagenet-rendition'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, 'imagenet-r')
text_file = os.path.join(self.dataset_dir, 'classnames.txt')
classnames = ImageNet.read_classnames(text_file)
data = self.read_data(classnames)
super().__init__(train_x=data, test=data)
def read_data(self, classnames):
image_dir = self.image_dir
folders = listdir_nohidden(image_dir, sort=True)
folders = [f for f in folders if (f not in TO_BE_IGNORED)]
items = []
for (label, folder) in enumerate(folders):
imnames = listdir_nohidden(os.path.join(image_dir, folder))
classname = classnames[folder]
for imname in imnames:
impath = os.path.join(image_dir, folder, imname)
item = Datum(impath=impath, label=label, classname=classname)
items.append(item)
return items
|
@DATASET_REGISTRY.register()
class ImageNetSketch(DatasetBase):
'ImageNet-Sketch.\n\n This dataset is used for testing only.\n '
dataset_dir = 'imagenet-sketch'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, 'images')
text_file = os.path.join(self.dataset_dir, 'classnames.txt')
classnames = ImageNet.read_classnames(text_file)
data = self.read_data(classnames)
super().__init__(train_x=data, test=data)
def read_data(self, classnames):
image_dir = self.image_dir
folders = listdir_nohidden(image_dir, sort=True)
items = []
for (label, folder) in enumerate(folders):
imnames = listdir_nohidden(os.path.join(image_dir, folder))
classname = classnames[folder]
for imname in imnames:
impath = os.path.join(image_dir, folder, imname)
item = Datum(impath=impath, label=label, classname=classname)
items.append(item)
return items
|
@DATASET_REGISTRY.register()
class ImageNetV2(DatasetBase):
'ImageNetV2.\n\n This dataset is used for testing only.\n '
dataset_dir = 'imagenetv2'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
image_dir = 'imagenetv2-matched-frequency-format-val'
self.image_dir = os.path.join(self.dataset_dir, image_dir)
text_file = os.path.join(self.dataset_dir, 'classnames.txt')
classnames = ImageNet.read_classnames(text_file)
data = self.read_data(classnames)
super().__init__(train_x=data, test=data)
def read_data(self, classnames):
image_dir = self.image_dir
folders = list(classnames.keys())
items = []
for label in range(1000):
class_dir = os.path.join(image_dir, str(label))
imnames = listdir_nohidden(class_dir)
folder = folders[label]
classname = classnames[folder]
for imname in imnames:
impath = os.path.join(class_dir, imname)
item = Datum(impath=impath, label=label, classname=classname)
items.append(item)
return items
|
@DATASET_REGISTRY.register()
class OxfordFlowers(DatasetBase):
dataset_dir = 'oxford_flowers'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, 'jpg')
self.label_file = os.path.join(self.dataset_dir, 'imagelabels.mat')
self.lab2cname_file = os.path.join(self.dataset_dir, 'cat_to_name.json')
self.split_path = os.path.join(self.dataset_dir, 'split_zhou_OxfordFlowers.json')
self.split_fewshot_dir = os.path.join(self.dataset_dir, 'split_fewshot')
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.split_path):
(train, val, test) = OxfordPets.read_split(self.split_path, self.image_dir)
else:
(train, val, test) = self.read_data()
OxfordPets.save_split(train, val, test, self.split_path, self.image_dir)
num_shots = cfg.DATASET.NUM_SHOTS
if (num_shots >= 1):
seed = cfg.SEED
preprocessed = os.path.join(self.split_fewshot_dir, f'shot_{num_shots}-seed_{seed}.pkl')
if os.path.exists(preprocessed):
print(f'Loading preprocessed few-shot data from {preprocessed}')
with open(preprocessed, 'rb') as file:
data = pickle.load(file)
(train, val) = (data['train'], data['val'])
else:
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
val = self.generate_fewshot_dataset(val, num_shots=min(num_shots, 4))
data = {'train': train, 'val': val}
print(f'Saving preprocessed few-shot data to {preprocessed}')
with open(preprocessed, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
subsample = cfg.DATASET.SUBSAMPLE_CLASSES
(train, val, test) = OxfordPets.subsample_classes(train, val, test, subsample=subsample)
super().__init__(train_x=train, val=val, test=test)
def read_data(self):
tracker = defaultdict(list)
label_file = loadmat(self.label_file)['labels'][0]
for (i, label) in enumerate(label_file):
imname = f'image_{str((i + 1)).zfill(5)}.jpg'
impath = os.path.join(self.image_dir, imname)
label = int(label)
tracker[label].append(impath)
print('Splitting data into 50% train, 20% val, and 30% test')
def _collate(ims, y, c):
items = []
for im in ims:
item = Datum(impath=im, label=(y - 1), classname=c)
items.append(item)
return items
lab2cname = read_json(self.lab2cname_file)
(train, val, test) = ([], [], [])
for (label, impaths) in tracker.items():
random.shuffle(impaths)
n_total = len(impaths)
n_train = round((n_total * 0.5))
n_val = round((n_total * 0.2))
n_test = ((n_total - n_train) - n_val)
assert ((n_train > 0) and (n_val > 0) and (n_test > 0))
cname = lab2cname[str(label)]
train.extend(_collate(impaths[:n_train], label, cname))
val.extend(_collate(impaths[n_train:(n_train + n_val)], label, cname))
test.extend(_collate(impaths[(n_train + n_val):], label, cname))
return (train, val, test)
|
@DATASET_REGISTRY.register()
class OxfordPets(DatasetBase):
dataset_dir = 'oxford_pets'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, 'images')
self.anno_dir = os.path.join(self.dataset_dir, 'annotations')
self.split_path = os.path.join(self.dataset_dir, 'split_zhou_OxfordPets.json')
self.split_fewshot_dir = os.path.join(self.dataset_dir, 'split_fewshot')
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.split_path):
(train, val, test) = self.read_split(self.split_path, self.image_dir)
else:
trainval = self.read_data(split_file='trainval.txt')
test = self.read_data(split_file='test.txt')
(train, val) = self.split_trainval(trainval)
self.save_split(train, val, test, self.split_path, self.image_dir)
num_shots = cfg.DATASET.NUM_SHOTS
if (num_shots >= 1):
seed = cfg.SEED
preprocessed = os.path.join(self.split_fewshot_dir, f'shot_{num_shots}-seed_{seed}.pkl')
if os.path.exists(preprocessed):
print(f'Loading preprocessed few-shot data from {preprocessed}')
with open(preprocessed, 'rb') as file:
data = pickle.load(file)
(train, val) = (data['train'], data['val'])
else:
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
val = self.generate_fewshot_dataset(val, num_shots=min(num_shots, 4))
data = {'train': train, 'val': val}
print(f'Saving preprocessed few-shot data to {preprocessed}')
with open(preprocessed, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
subsample = cfg.DATASET.SUBSAMPLE_CLASSES
(train, val, test) = self.subsample_classes(train, val, test, subsample=subsample)
super().__init__(train_x=train, val=val, test=test)
def read_data(self, split_file):
filepath = os.path.join(self.anno_dir, split_file)
items = []
with open(filepath, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
(imname, label, species, _) = line.split(' ')
breed = imname.split('_')[:(- 1)]
breed = '_'.join(breed)
breed = breed.lower()
imname += '.jpg'
impath = os.path.join(self.image_dir, imname)
label = (int(label) - 1)
item = Datum(impath=impath, label=label, classname=breed)
items.append(item)
return items
@staticmethod
def split_trainval(trainval, p_val=0.2):
p_trn = (1 - p_val)
print(f'Splitting trainval into {p_trn:.0%} train and {p_val:.0%} val')
tracker = defaultdict(list)
for (idx, item) in enumerate(trainval):
label = item.label
tracker[label].append(idx)
(train, val) = ([], [])
for (label, idxs) in tracker.items():
n_val = round((len(idxs) * p_val))
assert (n_val > 0)
random.shuffle(idxs)
for (n, idx) in enumerate(idxs):
item = trainval[idx]
if (n < n_val):
val.append(item)
else:
train.append(item)
return (train, val)
@staticmethod
def save_split(train, val, test, filepath, path_prefix):
def _extract(items):
out = []
for item in items:
impath = item.impath
label = item.label
classname = item.classname
impath = impath.replace(path_prefix, '')
if impath.startswith('/'):
impath = impath[1:]
out.append((impath, label, classname))
return out
train = _extract(train)
val = _extract(val)
test = _extract(test)
split = {'train': train, 'val': val, 'test': test}
write_json(split, filepath)
print(f'Saved split to {filepath}')
@staticmethod
def read_split(filepath, path_prefix):
def _convert(items):
out = []
for (impath, label, classname) in items:
impath = os.path.join(path_prefix, impath)
item = Datum(impath=impath, label=int(label), classname=classname)
out.append(item)
return out
print(f'Reading split from {filepath}')
split = read_json(filepath)
train = _convert(split['train'])
val = _convert(split['val'])
test = _convert(split['test'])
return (train, val, test)
@staticmethod
def subsample_classes(*args, subsample='all'):
'Divide classes into two groups. The first group\n represents base classes while the second group represents\n new classes.\n\n Args:\n args: a list of datasets, e.g. train, val and test.\n subsample (str): what classes to subsample.\n '
assert (subsample in ['all', 'base', 'new'])
if (subsample == 'all'):
return args
dataset = args[0]
labels = set()
for item in dataset:
labels.add(item.label)
labels = list(labels)
labels.sort()
n = len(labels)
m = math.ceil((n / 2))
print(f'SUBSAMPLE {subsample.upper()} CLASSES!')
if (subsample == 'base'):
selected = labels[:m]
else:
selected = labels[m:]
relabeler = {y: y_new for (y_new, y) in enumerate(selected)}
output = []
for dataset in args:
dataset_new = []
for item in dataset:
if (item.label not in selected):
continue
item_new = Datum(impath=item.impath, label=relabeler[item.label], classname=item.classname)
dataset_new.append(item_new)
output.append(dataset_new)
return output
|
@DATASET_REGISTRY.register()
class StanfordCars(DatasetBase):
dataset_dir = 'stanford_cars'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.split_path = os.path.join(self.dataset_dir, 'split_zhou_StanfordCars.json')
self.split_fewshot_dir = os.path.join(self.dataset_dir, 'split_fewshot')
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.split_path):
(train, val, test) = OxfordPets.read_split(self.split_path, self.dataset_dir)
else:
trainval_file = os.path.join(self.dataset_dir, 'devkit', 'cars_train_annos.mat')
test_file = os.path.join(self.dataset_dir, 'cars_test_annos_withlabels.mat')
meta_file = os.path.join(self.dataset_dir, 'devkit', 'cars_meta.mat')
trainval = self.read_data('cars_train', trainval_file, meta_file)
test = self.read_data('cars_test', test_file, meta_file)
(train, val) = OxfordPets.split_trainval(trainval)
OxfordPets.save_split(train, val, test, self.split_path, self.dataset_dir)
num_shots = cfg.DATASET.NUM_SHOTS
if (num_shots >= 1):
seed = cfg.SEED
preprocessed = os.path.join(self.split_fewshot_dir, f'shot_{num_shots}-seed_{seed}.pkl')
if os.path.exists(preprocessed):
print(f'Loading preprocessed few-shot data from {preprocessed}')
with open(preprocessed, 'rb') as file:
data = pickle.load(file)
(train, val) = (data['train'], data['val'])
else:
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
val = self.generate_fewshot_dataset(val, num_shots=min(num_shots, 4))
data = {'train': train, 'val': val}
print(f'Saving preprocessed few-shot data to {preprocessed}')
with open(preprocessed, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
subsample = cfg.DATASET.SUBSAMPLE_CLASSES
(train, val, test) = OxfordPets.subsample_classes(train, val, test, subsample=subsample)
super().__init__(train_x=train, val=val, test=test)
def read_data(self, image_dir, anno_file, meta_file):
anno_file = loadmat(anno_file)['annotations'][0]
meta_file = loadmat(meta_file)['class_names'][0]
items = []
for i in range(len(anno_file)):
imname = anno_file[i]['fname'][0]
impath = os.path.join(self.dataset_dir, image_dir, imname)
label = anno_file[i]['class'][(0, 0)]
label = (int(label) - 1)
classname = meta_file[label][0]
names = classname.split(' ')
year = names.pop((- 1))
names.insert(0, year)
classname = ' '.join(names)
item = Datum(impath=impath, label=label, classname=classname)
items.append(item)
return items
|
@DATASET_REGISTRY.register()
class SUN397(DatasetBase):
dataset_dir = 'sun397'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, 'SUN397')
self.split_path = os.path.join(self.dataset_dir, 'split_zhou_SUN397.json')
self.split_fewshot_dir = os.path.join(self.dataset_dir, 'split_fewshot')
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.split_path):
(train, val, test) = OxfordPets.read_split(self.split_path, self.image_dir)
else:
classnames = []
with open(os.path.join(self.dataset_dir, 'ClassName.txt'), 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()[1:]
classnames.append(line)
cname2lab = {c: i for (i, c) in enumerate(classnames)}
trainval = self.read_data(cname2lab, 'Training_01.txt')
test = self.read_data(cname2lab, 'Testing_01.txt')
(train, val) = OxfordPets.split_trainval(trainval)
OxfordPets.save_split(train, val, test, self.split_path, self.image_dir)
num_shots = cfg.DATASET.NUM_SHOTS
if (num_shots >= 1):
seed = cfg.SEED
preprocessed = os.path.join(self.split_fewshot_dir, f'shot_{num_shots}-seed_{seed}.pkl')
if os.path.exists(preprocessed):
print(f'Loading preprocessed few-shot data from {preprocessed}')
with open(preprocessed, 'rb') as file:
data = pickle.load(file)
(train, val) = (data['train'], data['val'])
else:
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
val = self.generate_fewshot_dataset(val, num_shots=min(num_shots, 4))
data = {'train': train, 'val': val}
print(f'Saving preprocessed few-shot data to {preprocessed}')
with open(preprocessed, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
subsample = cfg.DATASET.SUBSAMPLE_CLASSES
(train, val, test) = OxfordPets.subsample_classes(train, val, test, subsample=subsample)
super().__init__(train_x=train, val=val, test=test)
def read_data(self, cname2lab, text_file):
text_file = os.path.join(self.dataset_dir, text_file)
items = []
with open(text_file, 'r') as f:
lines = f.readlines()
for line in lines:
imname = line.strip()[1:]
classname = os.path.dirname(imname)
label = cname2lab[classname]
impath = os.path.join(self.image_dir, imname)
names = classname.split('/')[1:]
names = names[::(- 1)]
classname = ' '.join(names)
item = Datum(impath=impath, label=label, classname=classname)
items.append(item)
return items
|
@DATASET_REGISTRY.register()
class UCF101(DatasetBase):
dataset_dir = 'ucf101'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, 'UCF-101-midframes')
self.split_path = os.path.join(self.dataset_dir, 'split_zhou_UCF101.json')
self.split_fewshot_dir = os.path.join(self.dataset_dir, 'split_fewshot')
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.split_path):
(train, val, test) = OxfordPets.read_split(self.split_path, self.image_dir)
else:
cname2lab = {}
filepath = os.path.join(self.dataset_dir, 'ucfTrainTestlist/classInd.txt')
with open(filepath, 'r') as f:
lines = f.readlines()
for line in lines:
(label, classname) = line.strip().split(' ')
label = (int(label) - 1)
cname2lab[classname] = label
trainval = self.read_data(cname2lab, 'ucfTrainTestlist/trainlist01.txt')
test = self.read_data(cname2lab, 'ucfTrainTestlist/testlist01.txt')
(train, val) = OxfordPets.split_trainval(trainval)
OxfordPets.save_split(train, val, test, self.split_path, self.image_dir)
num_shots = cfg.DATASET.NUM_SHOTS
if (num_shots >= 1):
seed = cfg.SEED
preprocessed = os.path.join(self.split_fewshot_dir, f'shot_{num_shots}-seed_{seed}.pkl')
if os.path.exists(preprocessed):
print(f'Loading preprocessed few-shot data from {preprocessed}')
with open(preprocessed, 'rb') as file:
data = pickle.load(file)
(train, val) = (data['train'], data['val'])
else:
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
val = self.generate_fewshot_dataset(val, num_shots=min(num_shots, 4))
data = {'train': train, 'val': val}
print(f'Saving preprocessed few-shot data to {preprocessed}')
with open(preprocessed, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
subsample = cfg.DATASET.SUBSAMPLE_CLASSES
(train, val, test) = OxfordPets.subsample_classes(train, val, test, subsample=subsample)
super().__init__(train_x=train, val=val, test=test)
def read_data(self, cname2lab, text_file):
text_file = os.path.join(self.dataset_dir, text_file)
items = []
with open(text_file, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip().split(' ')[0]
(action, filename) = line.split('/')
label = cname2lab[action]
elements = re.findall('[A-Z][^A-Z]*', action)
renamed_action = '_'.join(elements)
filename = filename.replace('.avi', '.jpg')
impath = os.path.join(self.image_dir, renamed_action, filename)
item = Datum(impath=impath, label=label, classname=renamed_action)
items.append(item)
return items
|
def print_args(args, cfg):
print('***************')
print('** Arguments **')
print('***************')
optkeys = list(args.__dict__.keys())
optkeys.sort()
for key in optkeys:
print('{}: {}'.format(key, args.__dict__[key]))
print('************')
print('** Config **')
print('************')
print(cfg)
|
def reset_cfg(cfg, args):
if args.root:
cfg.DATASET.ROOT = args.root
if args.output_dir:
cfg.OUTPUT_DIR = args.output_dir
if args.trainer:
cfg.TRAINER.NAME = args.trainer
if args.backbone:
cfg.MODEL.BACKBONE.NAME = args.backbone
if args.head:
cfg.MODEL.HEAD.NAME = args.head
|
def extend_cfg(cfg):
'\n Add new config variables.\n\n E.g.\n from yacs.config import CfgNode as CN\n cfg.TRAINER.MY_MODEL = CN()\n cfg.TRAINER.MY_MODEL.PARAM_A = 1.\n cfg.TRAINER.MY_MODEL.PARAM_B = 0.5\n cfg.TRAINER.MY_MODEL.PARAM_C = False\n '
from yacs.config import CfgNode as CN
cfg.TRAINER.OURS = CN()
cfg.TRAINER.OURS.N_CTX = 10
cfg.TRAINER.OURS.CSC = False
cfg.TRAINER.OURS.CTX_INIT = ''
cfg.TRAINER.OURS.WEIGHT_U = 0.1
|
def setup_cfg(args):
cfg = get_cfg_default()
extend_cfg(cfg)
if args.dataset_config_file:
cfg.merge_from_file(args.dataset_config_file)
if args.config_file:
cfg.merge_from_file(args.config_file)
reset_cfg(cfg, args)
cfg.freeze()
return cfg
|
def main(args):
cfg = setup_cfg(args)
if (cfg.SEED >= 0):
print('Setting fixed seed: {}'.format(cfg.SEED))
set_random_seed(cfg.SEED)
setup_logger(cfg.OUTPUT_DIR)
if (torch.cuda.is_available() and cfg.USE_CUDA):
torch.backends.cudnn.benchmark = True
print_args(args, cfg)
print('Collecting env info ...')
print('** System info **\n{}\n'.format(collect_env_info()))
dataset = eval(cfg.DATASET.NAME)(cfg)
if (args.split == 'train'):
dataset_input = dataset.train_x
elif (args.split == 'val'):
dataset_input = dataset.val
else:
dataset_input = dataset.test
tfm_train = build_transform(cfg, is_train=False)
data_loader = torch.utils.data.DataLoader(DatasetWrapper(cfg, dataset_input, transform=tfm_train, is_train=False), batch_size=cfg.DATALOADER.TRAIN_X.BATCH_SIZE, sampler=None, shuffle=False, num_workers=cfg.DATALOADER.NUM_WORKERS, drop_last=False, pin_memory=(torch.cuda.is_available() and cfg.USE_CUDA))
(clip_model, _) = clip.load('RN50', 'cuda', jit=False)
clip_model.eval()
feature_list = []
label_list = []
train_dataiter = iter(data_loader)
for train_step in range(1, (len(train_dataiter) + 1)):
batch = next(train_dataiter)
data = batch['img'].cuda()
feature = clip_model.visual(data)
feature = feature.cpu()
for idx in range(len(data)):
feature_list.append(feature[idx].tolist())
label_list.extend(batch['label'].tolist())
save_dir = os.path.join(cfg.OUTPUT_DIR, cfg.DATASET.NAME)
os.makedirs(save_dir, exist_ok=True)
save_filename = f'{args.split}'
np.savez(os.path.join(save_dir, save_filename), feature_list=feature_list, label_list=label_list)
|
def average_ckpt(state_dict, ignore=['optimizer', 'scheduler']):
new_dict = dict()
print(state_dict['val_result'], state_dict['epoch'])
for key in state_dict:
if (key in ignore):
continue
if isinstance(state_dict[key][0], int):
new_dict[key] = int(np.average(state_dict[key]))
elif isinstance(state_dict[key][0], float):
new_dict[key] = np.average(state_dict[key])
elif isinstance(state_dict[key][0], dict):
avg_dict = dict()
for ckpt_id in range(len(state_dict[key])):
for param_key in state_dict[key][ckpt_id]:
if (param_key not in avg_dict):
avg_dict[param_key] = []
avg_dict[param_key].append(state_dict[key][ckpt_id][param_key])
for param_key in avg_dict:
avg_dict[param_key] = torch.stack(avg_dict[param_key]).mean(dim=0)
new_dict[key] = dict(avg_dict)
return new_dict
|
def compute_ci95(res):
return ((1.96 * np.std(res)) / np.sqrt(len(res)))
|
def parse_function(*metrics, directory='', args=None, end_signal=None):
print(f'Parsing files in {directory}')
subdirs = listdir_nohidden(directory, sort=True)
outputs = []
for subdir in subdirs:
fpath = osp.join(directory, subdir, 'log.txt')
assert check_isfile(fpath)
good_to_go = False
output = OrderedDict()
with open(fpath, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if (line == end_signal):
good_to_go = True
for metric in metrics:
match = metric['regex'].search(line)
if (match and good_to_go):
if ('file' not in output):
output['file'] = fpath
num = float(match.group(1))
name = metric['name']
output[name] = num
if output:
outputs.append(output)
assert (len(outputs) > 0), f'Nothing found in {directory}'
metrics_results = defaultdict(list)
for output in outputs:
msg = ''
for (key, value) in output.items():
if isinstance(value, float):
msg += f'{key}: {value:.2f}%. '
else:
msg += f'{key}: {value}. '
if (key != 'file'):
metrics_results[key].append(value)
print(msg)
output_results = OrderedDict()
print('===')
print(f'Summary of directory: {directory}')
for (key, values) in metrics_results.items():
avg = np.mean(values)
std = (compute_ci95(values) if args.ci95 else np.std(values))
print(f'* {key}: {avg:.2f}% +- {std:.2f}%')
output_results[key] = avg
print('===')
return output_results
|
def main(args, end_signal):
metric = {'name': args.keyword, 'regex': re.compile(f'\* {args.keyword}: ([\.\deE+-]+)%')}
if args.multi_exp:
final_results = defaultdict(list)
for directory in listdir_nohidden(args.directory, sort=True):
directory = osp.join(args.directory, directory)
results = parse_function(metric, directory=directory, args=args, end_signal=end_signal)
for (key, value) in results.items():
final_results[key].append(value)
print('Average performance')
for (key, values) in final_results.items():
avg = np.mean(values)
print(f'* {key}: {avg:.2f}%')
else:
parse_function(metric, directory=args.directory, args=args, end_signal=end_signal)
|
def main():
with open(f'./scripts/{out_name}.csv', 'w', encoding='UTF8') as f:
writer = csv.writer(f)
dataset = COOP_ELEVATER_DATASET
writer.writerow(([' '] + dataset))
missed = 0
for seed in seeds:
temp_row = []
temp_row.append(f'seed {seed}')
for data1 in dataset:
missed_ = True
log_files = glob.glob(f'{ckpt_folder}/{data1}/{ckpt_setting}/seed{seed}/log.txt*')
for log_file in log_files:
with open(log_file) as open_file:
lines = open_file.readlines()
number = re.findall('([+-]?[0-9]*\\.[0-9]*)', lines[accuracy_index])
if (('results' in lines[accuracy_index]) and ('test' in lines[(accuracy_index - 2)])):
try:
temp_row.append(float(number[0]))
missed_ = False
break
except Exception as e:
continue
if missed_:
temp_row.append(' ')
missed += 1
print('missed', data1, 'seed', seed)
writer.writerow(temp_row)
print(f'okay we missed {missed} entries')
|
def print_args(args, cfg):
print('***************')
print('** Arguments **')
print('***************')
optkeys = list(args.__dict__.keys())
optkeys.sort()
for key in optkeys:
print('{}: {}'.format(key, args.__dict__[key]))
print('************')
print('** Config **')
print('************')
print(cfg)
|
def reset_cfg(cfg, args):
if args.root:
cfg.DATASET.ROOT = args.root
if args.output_dir:
cfg.OUTPUT_DIR = args.output_dir
if args.resume:
cfg.RESUME = args.resume
if args.seed:
cfg.SEED = args.seed
cfg.DATASET.RANDOM_SEED_SAMPLING = args.seed
if args.source_domains:
cfg.DATASET.SOURCE_DOMAINS = args.source_domains
if args.target_domains:
cfg.DATASET.TARGET_DOMAINS = args.target_domains
if args.transforms:
cfg.INPUT.TRANSFORMS = args.transforms
if args.trainer:
cfg.TRAINER.NAME = args.trainer
if args.backbone:
cfg.MODEL.BACKBONE.NAME = args.backbone
if args.head:
cfg.MODEL.HEAD.NAME = args.head
if args.dataset:
cfg.DATASET.DATASET = args.dataset
if args.shots:
cfg.DATASET.NUM_SAMPLES_PER_CLASS = args.shots
cfg.DATASET.NUM_SHOTS = args.shots
if args.multi_task:
cfg.DATASET.MULTITASK = args.multi_task
if args.multi_task_label_pertask:
cfg.DATASET.MULTITASK_LABEL_PERTASK = args.multi_task_label_pertask
if args.dataset_coop:
cfg.DATASET.COOP = args.dataset_coop
if args.cut_contextlen:
cfg.TRAINER.CUT_CONTEXTLEN = args.cut_contextlen
if args.act_ckpt:
cfg.TRAINER.ACT_CKPT = args.act_ckpt
if (args.multi_task_evalkey != 'average'):
cfg.DATASET.MULTITASK_EVALKEY = args.multi_task_evalkey
|
def extend_cfg(cfg):
'\n Add new config variables.\n\n E.g.\n from yacs.config import CfgNode as CN\n cfg.TRAINER.MY_MODEL = CN()\n cfg.TRAINER.MY_MODEL.PARAM_A = 1.\n cfg.TRAINER.MY_MODEL.PARAM_B = 0.5\n cfg.TRAINER.MY_MODEL.PARAM_C = False\n '
from yacs.config import CfgNode as CN
cfg.TRAINER.COOP = CN()
cfg.TRAINER.COOP.N_CTX = 16
cfg.TRAINER.COOP.CSC = False
cfg.TRAINER.COOP.CTX_INIT = ''
cfg.TRAINER.COOP.PREC = 'fp16'
cfg.TRAINER.COOP.CLASS_TOKEN_POSITION = 'end'
cfg.TRAINER.COCOOP = CN()
cfg.TRAINER.COCOOP.N_CTX = 16
cfg.TRAINER.COCOOP.CTX_INIT = ''
cfg.TRAINER.COCOOP.PREC = 'fp16'
cfg.TRAINER.MVLPT = CN()
cfg.TRAINER.MVLPT.PREC = 'fp16'
cfg.TRAINER.MVLPT.PROJECT_METHOD = 'transformer'
cfg.TRAINER.MVLPT.PROJECT_DIM = 128
cfg.TRAINER.MVLPT.VPT = CN()
cfg.TRAINER.MVLPT.VPT.N_CTX = 0
cfg.TRAINER.MVLPT.VPT.CSC = False
cfg.TRAINER.MVLPT.VPT.CTX_INIT = ''
cfg.TRAINER.MVLPT.VPT.DROPOUT = 0.0
cfg.TRAINER.MVLPT.VPT.PROJECT = (- 1)
cfg.TRAINER.MVLPT.VPT.DEEP = True
cfg.TRAINER.MVLPT.COOP = CN()
cfg.TRAINER.MVLPT.COOP.N_CTX = 0
cfg.TRAINER.MVLPT.COOP.CSC = False
cfg.TRAINER.MVLPT.COOP.CTX_INIT = ''
cfg.TRAINER.MVLPT.COOP.CLASS_TOKEN_POSITION = 'middle'
cfg.TRAINER.MVLPT.COCOOP = CN()
cfg.TRAINER.MVLPT.COCOOP.N_CTX = 0
cfg.TRAINER.MVLPT.COCOOP.CTX_INIT = ''
cfg.TRAINER.MVLPT.COCOOP.PREC = 'fp16'
cfg.DATASET.SUBSAMPLE_CLASSES = 'all'
cfg.DATASET.NUM_SAMPLES_PER_CLASS = 20
cfg.DATASET.DATASET = ''
cfg.DATASET.RANDOM_SEED_SAMPLING = 1
cfg.DATASET.VAL_SET = ''
cfg.DATASET.TRAIN_SET = 'train'
cfg.DATASET.TEST_SET = 'val'
cfg.DATASET.CENTER_CROP = False
cfg.TRAINER.CUT_CONTEXTLEN = False
cfg.TRAINER.ACT_CKPT = 1
cfg.DATASET.COOP = False
cfg.DATASET.MULTITASK = False
cfg.DATASET.MULTITASK_LABEL_PERTASK = False
cfg.DATASET.MULTITASK_EVALKEY = 'average'
|
def setup_cfg(args):
cfg = get_cfg_default()
extend_cfg(cfg)
if args.dataset_config_file:
cfg.merge_from_file(args.dataset_config_file)
if args.config_file:
cfg.merge_from_file(args.config_file)
reset_cfg(cfg, args)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
|
def main(args):
cfg = setup_cfg(args)
if (cfg.SEED >= 0):
print('Setting fixed seed: {}'.format(cfg.SEED))
set_random_seed(cfg.SEED)
setup_logger(cfg.OUTPUT_DIR)
if (torch.cuda.is_available() and cfg.USE_CUDA):
torch.backends.cudnn.benchmark = True
print_args(args, cfg)
print('Collecting env info ...')
print('** System info **\n{}\n'.format(collect_env_info()))
trainer = build_trainer(cfg)
if args.eval_only:
trainer.load_model(args.model_dir, epoch=args.load_epoch)
trainer.test()
return
if args.model_dir:
trainer.load_model(args.model_dir)
if (not args.no_train):
trainer.train()
|
def add_finetuning_args(parser):
parser.add_argument('--ds', required=False, help='Evaluation dataset configure file name.', type=str)
parser.add_argument('--model', required=True, help='Evaluation model configure file name', type=str)
parser.add_argument('--submit-predictions', help='submit predictions and model info to leaderboard.', default=False, action='store_true')
parser.add_argument('--submit-by', help='Person who submits the results.', type=str)
parser.add_argument('--no-tuning', help='No hyperparameter-tuning.', default=False, type=(lambda x: (x.lower() == 'true')))
parser.add_argument('--l2', help='(Inverse) L2 regularization strength. This option is only useful when option --no-tuning is True.', default=0.316, type=float)
parser.add_argument('--lr', help='Test with a specific learning rate. This option is only useful when option --no-tuning is True.', default=0.001, type=float)
parser.add_argument('--run', help='Run id', default=1, type=int)
parser.add_argument('--fix_seed', help='Fix the random seed. [-1] not fixing the seeds', default=0, type=int)
parser.add_argument('--save-predictions', help='save predictions logits for analysis.', default=True, action='store_true')
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
|
def main():
parser = argparse.ArgumentParser(description='Test a classification model, with finetuning.')
add_finetuning_args(parser)
args = parser.parse_args()
args.cfg = args.ds
update_config(config, args)
args.cfg = args.model
update_config(config, args)
config.defrost()
config.NAME = ''
config.freeze()
if args.submit_predictions:
assert args.submit_by
if (args.fix_seed != (- 1)):
random.seed(args.fix_seed)
np.random.seed(args.fix_seed)
torch.manual_seed(args.fix_seed)
torch.cuda.manual_seed_all(args.fix_seed)
n_samples = (str(config.DATASET.NUM_SAMPLES_PER_CLASS) if (config.DATASET.NUM_SAMPLES_PER_CLASS > 0) else 'full')
exp_name = ('finetuning_' + n_samples)
if config.TRAIN.TWO_LR:
exp_name += '_two_lr'
final_output_dir = create_logger(config, exp_name)
if (config.DATASET.NUM_SAMPLES_PER_CLASS == 1):
config.defrost()
config.DATASET.NUM_SAMPLES_PER_CLASS = 2
config.DATASET.MERGE_TRAIN_VAL_FINAL_RUN = False
config.freeze()
if comm.is_main_process():
log_arg_env_config(args, config, final_output_dir)
if ((config.DATASET.DATASET == 'patch-camelyon') and (config.DATASET.NUM_SAMPLES_PER_CLASS == (- 1))):
logging.info(f'Detecting large dataset with {config.DATASET.NUM_SAMPLES_PER_CLASS}-shot.')
config.defrost()
config.DATASET.NUM_SAMPLES_PER_CLASS = 10000
config.freeze()
logging.info(f'Used the subset ({config.DATASET.NUM_SAMPLES_PER_CLASS}-shot) to train the model.')
logging.info(f'{config.DATASET.DATASET} is a dataset.')
(train_dataloader, val_dataloader, test_dataloader) = construct_dataloader(config)
logging.info('Finetuning with full model. This may take several minutes to hours depending on the size of your data.')
(best_acc, model_info) = full_model_finetune(train_dataloader, val_dataloader, test_dataloader, args.no_tuning, args.lr, args.l2, config)
test_predictions = model_info['best_logits']
if args.save_predictions:
import json
def json_prec_dump(data, prec=6):
return json.dumps(json.loads(json.dumps(data), parse_float=(lambda x: round(float(x), prec))))
results_dict = {'model_name': config.MODEL.NAME, 'dataset_name': config.DATASET.DATASET, 'num_trainable_params': model_info.get('n_trainable_params', None), 'num_params': model_info.get('n_params', None), 'num_visual_params': model_info.get('n_visual_params', None), 'num_backbone_params': model_info.get('n_backbone_params', None), 'n_shot': config.DATASET.NUM_SAMPLES_PER_CLASS, 'rnd_seeds': [config.DATASET.RANDOM_SEED_SAMPLING], 'predictions': [test_predictions.tolist()]}
json_string = json_prec_dump(results_dict)
prediction_folder = os.path.join(config.OUTPUT_DIR, 'predictions', exp_name)
os.makedirs(prediction_folder, exist_ok=True)
with open(os.path.join(prediction_folder, f'seed{config.DATASET.RANDOM_SEED_SAMPLING}_{config.DATASET.DATASET}.json'), 'w') as outfile:
outfile.write(json_string)
|
def add_linear_probing_args(parser):
parser.add_argument('--ds', required=False, help='Evaluation dataset configure file name.', type=str)
parser.add_argument('--model', required=True, help='Evaluation model configure file name', type=str)
parser.add_argument('--submit-predictions', help='submit predictions and model info to leaderboard.', default=False, action='store_true')
parser.add_argument('--submit-by', help='Person who submits the results.', type=str)
parser.add_argument('--no-tuning', help='No hyperparameter-tuning.', default=False, type=(lambda x: (x.lower() == 'true')))
parser.add_argument('--emulate-zeroshot', help='Emulate zero shot learning.', default=False, type=str)
parser.add_argument('--l2', help='(Inverse) L2 regularization strength. This option is only useful when option --no-tuning is True.', default=0.316, type=float)
parser.add_argument('--lr', help='Test with a specific learning rate. This option is only useful when option --no-tuning is True.', default=0.001, type=float)
parser.add_argument('--run', help='Run id', default=1, type=int)
parser.add_argument('--fix_seed', help='Fix the random seed. [-1] not fixing the seeds', default=0, type=int)
parser.add_argument('--save-predictions', help='save predictions logits for analysis.', default=True, action='store_true')
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
|
def main():
parser = argparse.ArgumentParser(description='Test a classification model, with linear probing.')
add_linear_probing_args(parser)
args = parser.parse_args()
args.cfg = args.ds
update_config(config, args)
args.cfg = args.model
update_config(config, args)
config.defrost()
config.NAME = ''
config.freeze()
if args.submit_predictions:
assert args.submit_by
if (args.fix_seed != (- 1)):
random.seed(args.fix_seed)
np.random.seed(args.fix_seed)
torch.manual_seed(args.fix_seed)
torch.cuda.manual_seed_all(args.fix_seed)
if args.emulate_zeroshot:
args.no_tuning = True
config.defrost()
config.TRAIN.END_EPOCH = 1
config.TRAIN.EXTRA_FINAL_TRAIN_EPOCH = 0
config.DATASET.NUM_SAMPLES_PER_CLASS = 0
config.TRAIN.EMULATE_ZERO_SHOT = True
config.freeze()
n_samples = (str(config.DATASET.NUM_SAMPLES_PER_CLASS) if (config.DATASET.NUM_SAMPLES_PER_CLASS >= 0) else 'full')
exp_name = ('linear_probe_' + n_samples)
if (config.DATASET.NUM_SAMPLES_PER_CLASS == 1):
config.defrost()
config.DATASET.NUM_SAMPLES_PER_CLASS = 2
config.DATASET.MERGE_TRAIN_VAL_FINAL_RUN = False
config.freeze()
if config.MODEL.NAME.startswith('mae_'):
config.defrost()
config.MODEL.SPEC.GLOBAL_POOL = False
config.freeze()
final_output_dir = create_logger(config, exp_name)
if comm.is_main_process():
log_arg_env_config(args, config, final_output_dir)
if ((config.DATASET.DATASET == 'patch-camelyon') and (config.DATASET.NUM_SAMPLES_PER_CLASS == (- 1))):
logging.info(f'Detecting large dataset with {config.DATASET.NUM_SAMPLES_PER_CLASS}-shot.')
config.defrost()
config.DATASET.NUM_SAMPLES_PER_CLASS = 10000
config.freeze()
logging.info(f'Used the subset ({config.DATASET.NUM_SAMPLES_PER_CLASS}-shot) to train the model.')
(train_dataloader, val_dataloader, test_dataloader) = construct_dataloader(config)
(best_acc, model_info) = full_model_finetune(train_dataloader, val_dataloader, test_dataloader, args.no_tuning, args.lr, args.l2, config)
test_predictions = model_info['best_logits']
if args.save_predictions:
import json
def json_prec_dump(data, prec=6):
return json.dumps(json.loads(json.dumps(data), parse_float=(lambda x: round(float(x), prec))))
results_dict = {'model_name': config.MODEL.NAME, 'dataset_name': config.DATASET.DATASET, 'num_trainable_params': model_info.get('n_trainable_params', None), 'num_params': model_info.get('n_params', None), 'num_visual_params': model_info.get('n_visual_params', None), 'num_backbone_params': model_info.get('n_backbone_params', None), 'n_shot': config.DATASET.NUM_SAMPLES_PER_CLASS, 'rnd_seeds': [config.DATASET.RANDOM_SEED_SAMPLING], 'predictions': [test_predictions.tolist()]}
json_string = json_prec_dump(results_dict)
prediction_folder = os.path.join(config.OUTPUT_DIR, 'predictions', exp_name)
os.makedirs(prediction_folder, exist_ok=True)
with open(os.path.join(prediction_folder, f'seed{config.DATASET.RANDOM_SEED_SAMPLING}_{config.DATASET.DATASET}.json'), 'w') as outfile:
outfile.write(json_string)
|
def parse_args():
parser = argparse.ArgumentParser(description='Submit predictions to leaderboard service.')
parser.add_argument('--combine_path', required=True, help='Prediction json file path.', type=pathlib.Path)
parser.add_argument('--combine_name', default='all_predictions', required=False, help='Output file name.', type=str)
args = parser.parse_args()
return args
|
def json_prec_dump(data, prec=6):
return json.dumps(json.loads(json.dumps(data), parse_float=(lambda x: round(float(x), prec))))
|
def main():
logging.basicConfig(level=logging.INFO)
args = parse_args()
all_predictions = defaultdict(list)
for prediction_file in args.combine_path.iterdir():
if (prediction_file.suffix != '.json'):
print(f'Ignoring file {prediction_file.name} by suffix.')
continue
prediction_data = json.loads(prediction_file.read_text())
all_predictions[prediction_data['dataset_name']].append(prediction_data)
all_combine_predictions = []
KNOWN_AVERAGE_KEYS = ['num_trainable_params']
KNOWN_MERGE_KEYS = ['rnd_seeds', 'predictions']
KNOWN_DIFF_KEYS = (KNOWN_AVERAGE_KEYS + KNOWN_MERGE_KEYS)
for (ds, prediction_data) in all_predictions.items():
prediction_keys = list(prediction_data[0])
combined_dict = dict()
for key in prediction_keys:
values = [x[key] for x in prediction_data]
if (key not in KNOWN_DIFF_KEYS):
assert all(((x == values[0]) for x in values))
values = values[0]
elif (key in KNOWN_MERGE_KEYS):
values = list(itertools.chain.from_iterable(values))
elif (key in KNOWN_AVERAGE_KEYS):
values = np.asarray(values).mean()
else:
assert False
combined_dict[key] = values
all_combine_predictions.append(combined_dict)
all_predictions = {'data': all_combine_predictions}
all_predictions = json_prec_dump(all_predictions)
save_path = (args.combine_path / f'{args.combine_name}.zip')
zf = zipfile.ZipFile(save_path, 'w', zipfile.ZIP_DEFLATED)
zf.writestr('all_predictions.json', all_predictions)
zf.close()
|
def add_zero_shot_args(parser):
parser.add_argument('--ds', required=False, help='Evaluation dataset configure file name.', type=str)
parser.add_argument('--model', required=True, help='Clip model configure file name', type=str)
parser.add_argument('--text_feature_only', help='consider text feature or not.', default=False, action='store_true')
parser.add_argument('--save-predictions', help='save predictions logits for analysis.', default=True, action='store_true')
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
|
def load_or_extract_features(args, cfg):
if (cfg.MODEL.SPEC.TEXT.TOKENIZER == 'clip'):
tokenizer = SimpleTokenizer()
elif ('hf_' in cfg.MODEL.SPEC.TEXT.TOKENIZER):
tokenizer = HFPTTokenizer(pt_name=cfg.MODEL.SPEC.TEXT.TOKENIZER[3:])
else:
tokenizer = None
feature_file = os.path.join(cfg.DATASET.ROOT, (((('zeroshot_features_' + cfg.MODEL.NAME.replace('/', '')) + f'_wiki_{cfg.KNOWLEDGE.WIKITIONARY.USE_DEFINITION}') + f'_gpt3_{cfg.KNOWLEDGE.GPT3.USE_GPT3}') + '.npy'))
logging.info(f'feature_file: {feature_file}')
if os.path.exists(feature_file):
logging.info('Loading features from existing files.')
with open(feature_file, 'rb') as fread:
image_features = np.load(fread)
text_features = np.load(fread)
image_labels = np.load(fread)
else:
(image_features, image_labels) = extract_features(cfg, test_split_only=True)
text_features = extract_text_features(cfg, tokenizer, args)
logging.info(f'Test size is {image_features.shape[0]}.')
return (image_features, text_features, image_labels)
|
def load_or_extract_text_features(args, cfg):
if (cfg.MODEL.SPEC.TEXT.TOKENIZER == 'clip'):
tokenizer = SimpleTokenizer()
elif ('hf_' in cfg.MODEL.SPEC.TEXT.TOKENIZER):
tokenizer = HFPTTokenizer(pt_name=cfg.MODEL.SPEC.TEXT.TOKENIZER[3:])
else:
tokenizer = None
feature_file = os.path.join(cfg.DATASET.ROOT, (((('zeroshot_text_features_' + cfg.MODEL.NAME.replace('/', '')) + f'_wiki_{cfg.KNOWLEDGE.WIKITIONARY.USE_DEFINITION}') + f'_gpt3_{cfg.KNOWLEDGE.GPT3.USE_GPT3}') + '.npy'))
logging.info(f'feature_file: {feature_file}')
if os.path.exists(feature_file):
logging.info('Loading features from existing files.')
with open(feature_file, 'rb') as fread:
text_features = np.load(fread)
else:
(wiki_dict, gpt3_dict) = extract_text_features(cfg, tokenizer, args)
logging.info(f'Test size is {len(wiki_dict)}.')
return (wiki_dict, gpt3_dict)
|
def main():
parser = argparse.ArgumentParser(description='Zero-shot evaluation script.')
add_zero_shot_args(parser)
args = parser.parse_args()
args.cfg = args.ds
update_config(config, args)
args.cfg = args.model
update_config(config, args)
config.defrost()
config.NAME = ''
config.freeze()
exp_name = ('zeroshot_eval_' + f'wiki_{config.KNOWLEDGE.WIKITIONARY.USE_DEFINITION}_wnh_{config.KNOWLEDGE.WORDNET.USE_HIERARCHY}_wnd_{config.KNOWLEDGE.WORDNET.USE_DEFINITION}_gpt3_{config.KNOWLEDGE.GPT3.USE_GPT3}')
exp_name += f'agg_{config.KNOWLEDGE.AGGREGATION.MEHTOD}_gpt3count_{config.KNOWLEDGE.AGGREGATION.NUM_GPT3_ITEMS}'
final_output_dir = create_logger(config, exp_name)
if comm.is_main_process():
log_arg_env_config(args, config, final_output_dir)
if args.text_feature_only:
(wiki_dict, gpt3_dict) = load_or_extract_text_features(args, config)
else:
(image_features, text_features, image_labels) = load_or_extract_features(args, config)
(result, test_predictions, metric) = clip_zeroshot_evaluator(image_features, text_features, image_labels, config)
msg = f'=> TEST: {metric} {(100 * result):.3f}% '
logging.info(msg)
if args.save_predictions:
import json
def json_prec_dump(data, prec=6):
return json.dumps(json.loads(json.dumps(data), parse_float=(lambda x: round(float(x), prec))))
results_dict = {'model_name': f'CLIP-{config.MODEL.NAME}', 'dataset_name': config.DATASET.DATASET, 'num_trainable_params': 0, 'num_params': config.MODEL.STATS.get('n_params', None), 'num_visual_params': config.MODEL.STATS.get('n_visual_params', None), 'num_backbone_params': config.MODEL.STATS.get('n_backbone_params', None), 'n_shot': 0, 'rnd_seeds': [0], 'predictions': [test_predictions.cpu().data.numpy().tolist()]}
json_string = json_prec_dump(results_dict)
prediction_folder = os.path.join(config.OUTPUT_DIR, 'predictions', exp_name)
os.makedirs(prediction_folder, exist_ok=True)
with open(os.path.join(prediction_folder, f'{config.DATASET.DATASET}.json'), 'w') as outfile:
outfile.write(json_string)
|
def get_dataset_hub():
vision_dataset_json = (((pathlib.Path(__file__).resolve().parents[1] / 'resources') / 'datasets') / 'vision_datasets.json').read_text()
hub = DatasetHub(vision_dataset_json)
return hub
|
class DataClassBase():
def __post_init__(self):
self.validate()
@classmethod
def from_dict(cls, data_content):
c = {}
for field in dataclasses.fields(cls):
d_type = DataClassBase._get_dataclass_type(field.type)
if (field.name in data_content):
c[field.name] = (d_type.from_dict(data_content[field.name]) if d_type else data_content[field.name])
assert (len(data_content) == len(c)), f'{data_content.keys()} vs {c.keys()}'
return cls(**c)
def to_dict(self, skip_default=True):
result = {}
for f in dataclasses.fields(self):
value = getattr(self, f.name)
if dataclasses.is_dataclass(value):
value = value.to_dict()
elif isinstance(value, (list, tuple)):
value = type(value)(((v.to_dict() if dataclasses.is_dataclass(v) else v) for v in value))
if ((not skip_default) or (value != f.default)):
result[f.name] = value
return result
def validate(self):
for field in dataclasses.fields(self):
if (hasattr(field.type, '__origin__') and (field.type.__origin__ in (tuple, collections.abc.Sequence))):
expected_types = field.type.__origin__
elif hasattr(field.type, '__args__'):
expected_types = field.type.__args__
else:
expected_types = field.type
if (not isinstance(self.__dict__[field.name], expected_types)):
raise TypeError(f'Unexpected field type for {field.name}: Expected: {expected_types}. Actual: {type(self.__dict__[field.name])}')
def _raise_value_error(self, config_name, msg=None):
error_msg = f'Invalid {config_name}: {getattr(self, config_name)}.'
if msg:
error_msg += (' ' + msg)
raise ValueError(error_msg)
def _check_value(self, value_name, checker):
value = getattr(self, value_name)
if (not checker(value)):
raise ValueError(f'Invalid {value_name}: {value}.')
def _get_dataclass_type(field_type):
'Returns dataclass type if the given type is dataclass or Optional[dataclass].'
if dataclasses.is_dataclass(field_type):
return field_type
if hasattr(field_type, '__args__'):
args = field_type.__args__
if ((len(args) == 2) and (type(None) in args)):
return next((t for t in args if dataclasses.is_dataclass(t)), None)
return None
|
class Tasks():
IC_MULTILABEL = DatasetTypes.IC_MULTILABEL
IC_MULTICLASS = DatasetTypes.IC_MULTICLASS
OBJECT_DETECTION = DatasetTypes.OD
VALID_TYPES = [IC_MULTILABEL, IC_MULTICLASS, OBJECT_DETECTION]
@staticmethod
def is_valid(task):
return (task in Tasks.VALID_TYPES)
|
class Tracks():
LINEAR_PROBING = 'linear_probing'
TRANSFER_LEARNING = 'transfer_learning'
ZERO_SHOT = 'zero_shot'
VALID_TYPES = [LINEAR_PROBING, TRANSFER_LEARNING, ZERO_SHOT]
@staticmethod
def is_valid(task, track):
if (track not in Tracks.VALID_TYPES):
return False
if (task in [Tasks.IC_MULTICLASS, Tasks.IC_MULTILABEL]):
return True
if (task == Tasks.OBJECT_DETECTION):
return (track != Tracks.LINEAR_PROBING)
return False
|
@dataclasses.dataclass(frozen=True)
class PredictionSubmission(DataClassBase):
dataset_name: str
model_name: str
created_by: str
task: str
track: str
predictions: List
def validate(self):
vision_dataset_json = (((pathlib.Path(__file__).resolve().parents[1] / 'resources') / 'datasets') / 'vision_datasets.json').read_text()
hub = DatasetHub(vision_dataset_json)
dataset_names = set([x['name'] for x in hub.list_data_version_and_types()])
self._check_value('dataset_name', (lambda x: (x and (x in dataset_names))))
self._check_value('model_name', (lambda x: x))
self._check_value('created_by', (lambda x: x))
self._check_value('task', (lambda x: Tasks.is_valid(x)))
self._check_value('track', (lambda x: Tracks.is_valid(self.task, x)))
self._check_value('predictions', (lambda x: x))
dataset_manifest = hub.create_dataset_manifest(VISION_DATASET_STORAGE, None, self.dataset_name, usage=Usages.TEST_PURPOSE)[0]
logging.info(f'Created test set manifest for {self.dataset_name}')
for (fold_idx, predictions) in enumerate(self.predictions):
PredictionSubmission.validate_predictions(dataset_manifest, predictions, fold_idx)
@staticmethod
def validate_predictions(dataset_manifest: DatasetManifest, predictions, fold_idx):
assert predictions, f'fold {fold_idx}, empty predictions.'
assert (len(predictions) == len(dataset_manifest.images)), f'fold {fold_idx}, Number of predictions does not match number of images.'
if (dataset_manifest.data_type in [DatasetTypes.IC_MULTICLASS, DatasetTypes.IC_MULTILABEL]):
for (i, probs) in enumerate(predictions):
if (dataset_manifest.data_type == DatasetTypes.IC_MULTICLASS):
sum_probs = sum(probs)
assert math.isclose(sum_probs, 1.0, rel_tol=0.001), f'fold {fold_idx}, Sum of predicted prob vector for image {i}: {sum_probs}, should be 1.0.'
assert all([(0.0 <= prob <= 1.0) for prob in probs]), f'fold {fold_idx}, Predicted prob for image {i} not in [0, 1]: {probs}'
if (dataset_manifest.data_type == DatasetTypes.OD):
for (i, img_wise_bboxes) in enumerate(predictions):
for bbox_pred in img_wise_bboxes:
assert PredictionSubmission.is_valid_box(bbox_pred, len(dataset_manifest.labelmap)), f'fold {fold_idx}, Invalid predicted bbox for image {i}: {bbox_pred}'
@staticmethod
def is_valid_box(bbox_pred, num_classes):
return ((len(bbox_pred) == 6) and (0 <= bbox_pred[0] < num_classes) and (0.0 <= bbox_pred[1] <= 1.0) and all([(x >= 0) for x in bbox_pred[2:]]) and (bbox_pred[2] <= bbox_pred[4]) and (bbox_pred[3] <= bbox_pred[5]))
|
@dataclasses.dataclass(frozen=True)
class ModelInfoSubmission(DataClassBase):
name: str
author: str
num_params_in_millions: int
pretrained_data: str
creation_time: str
def validate(self):
self._check_value('name', (lambda x: x))
self._check_value('author', (lambda x: x))
self._check_value('num_params_in_millions', (lambda x: (x > 0)))
self._check_value('pretrained_data', (lambda x: x))
self._check_value('creation_time', (lambda x: datetime.datetime.strptime(x, '%Y-%m-%d')))
|
def log_arg_env_config(args, config, output_dir):
logging.info('=> collecting env info (might take some time)')
logging.info(('\n' + get_pretty_env_info()))
logging.info(pprint.pformat(args))
logging.info(config)
logging.info(f'=> saving logging info into: {output_dir}')
|
def submit_predictions(prediction_list, submit_by, config, track, task):
from vision_benchmark.commands.submit_predictions import submit_predictions_to_leaderboard, submit_model_to_leaderboard
submission = {'dataset_name': config.DATASET.DATASET, 'model_name': config.MODEL.NAME, 'track': track, 'task': task, 'created_by': submit_by, 'predictions': [prediction_list]}
logging.info('Submit model and predictions to leaderboard.')
submit_predictions_to_leaderboard(submission)
model_info = {'name': config.MODEL.NAME, 'author': config.MODEL.AUTHOR, 'num_params_in_millions': config.MODEL.NUM_PARAMS_IN_M, 'pretrained_data': config.MODEL.PRETRAINED_DATA, 'creation_time': config.MODEL.CREATION_TIME}
submit_model_to_leaderboard(model_info)
|
def _update_config_from_file(config, cfg_file):
config.defrost()
with open(cfg_file, 'r') as f:
yaml_cfg = yaml.load(f, Loader=yaml.FullLoader)
for cfg in yaml_cfg.setdefault('BASE', ['']):
if cfg:
_update_config_from_file(config, op.join(op.dirname(cfg_file), cfg))
print('=> merge config from {}'.format(cfg_file))
config.merge_from_file(cfg_file)
config.freeze()
|
def update_config(config, args):
_update_config_from_file(config, args.cfg)
config.defrost()
config.merge_from_list(args.opts)
config.TRAIN.LR *= comm.world_size
(file_name, _) = op.splitext(op.basename(args.cfg))
config.NAME = (file_name + config.NAME)
config.RANK = comm.rank
if hasattr(config.TRAIN.LR_SCHEDULER, 'METHOD'):
if ('timm' == config.TRAIN.LR_SCHEDULER.METHOD):
config.TRAIN.LR_SCHEDULER.ARGS.epochs = config.TRAIN.END_EPOCH
if ('timm' == config.TRAIN.OPTIMIZER):
config.TRAIN.OPTIMIZER_ARGS.lr = config.TRAIN.LR
aug = config.AUG
if ((aug.MIXUP > 0.0) or (aug.MIXCUT > 0.0) or aug.MIXCUT_MINMAX):
aug.MIXUP_PROB = 1.0
config.freeze()
|
class HFPTTokenizer(object):
def __init__(self, pt_name=None):
self.pt_name = pt_name
self.added_sep_token = 0
self.added_cls_token = 0
self.enable_add_tokens = False
self.gpt_special_case = ((not self.enable_add_tokens) and ('gpt' in self.pt_name))
if (pt_name is None):
self.tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
else:
self.tokenizer = AutoTokenizer.from_pretrained(pt_name)
if self.enable_add_tokens:
if (self.tokenizer.sep_token is None):
self.tokenizer.add_special_tokens({'sep_token': '<SEP>'})
self.added_sep_token = 1
if (self.tokenizer.cls_token is None):
self.tokenizer.add_special_tokens({'cls_token': '<CLS>'})
self.added_cls_token = 1
if self.gpt_special_case:
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.sep_token = self.tokenizer.eos_token
def get_eot_token(self):
return self.tokenizer.encode(self.tokenizer.sep_token, add_special_tokens=False)[0]
def get_sot_token(self):
return self.tokenizer.encode(self.tokenizer.cls_token, add_special_tokens=False)[0]
def get_eot_token_list(self):
return self.tokenizer.encode(self.tokenizer.sep_token, add_special_tokens=False)
def get_sot_token_list(self):
return self.tokenizer.encode(self.tokenizer.cls_token, add_special_tokens=False)
def get_tokenizer_obj(self):
return self.tokenizer
def check_added_tokens(self):
return (self.added_sep_token + self.added_cls_token)
def tokenize(self, texts: Union[(str, List[str])], context_length: int=77):
if isinstance(texts, str):
texts = [texts]
padding = 'max_length'
seqstart = []
seqend = []
max_length = context_length
if (self.added_cls_token > 0):
seqstart = self.get_sot_token_list()
max_length = (max_length - 1)
if (self.added_sep_token > 0):
seqend = self.get_eot_token_list()
max_length = (max_length - 1)
tokens = self.tokenizer(texts, padding=padding, truncation=True, max_length=max_length)['input_ids']
for i in range(len(tokens)):
tokens[i] = ((seqstart + tokens[i]) + seqend)
if self.gpt_special_case:
for i in range(len(tokens)):
tokens[i][(- 1)] = self.get_eot_token()
result = torch.Tensor(tokens).type(torch.LongTensor)
return result
def get_vocab_size(self):
return self.tokenizer.vocab_size
def __call__(self, texts: Union[(str, List[str])], context_length: int=77):
return self.tokenize(texts, context_length)
|
def build_tokenizer(tokenizer_name):
tokenizer = None
if (tokenizer_name == 'clip'):
tokenizer = SimpleTokenizer()
elif ('hf_' in tokenizer_name):
tokenizer = HFPTTokenizer(pt_name=tokenizer_name[3:])
elif ('hfc_' in tokenizer_name):
tokenizer = HFPTTokenizer(pt_name=tokenizer_name[4:])
else:
raise ValueError('Unknown tokenizer')
return tokenizer
|
class HFPTTokenizer(object):
def __init__(self, pt_name=None):
self.pt_name = pt_name
self.added_sep_token = 0
self.added_cls_token = 0
self.enable_add_tokens = False
self.gpt_special_case = ((not self.enable_add_tokens) and ('gpt' in self.pt_name))
if (pt_name is None):
self.tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
else:
self.tokenizer = AutoTokenizer.from_pretrained(pt_name)
if self.enable_add_tokens:
if (self.tokenizer.sep_token is None):
self.tokenizer.add_special_tokens({'sep_token': '<SEP>'})
self.added_sep_token = 1
if (self.tokenizer.cls_token is None):
self.tokenizer.add_special_tokens({'cls_token': '<CLS>'})
self.added_cls_token = 1
if self.gpt_special_case:
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.sep_token = self.tokenizer.eos_token
def get_eot_token(self):
return self.tokenizer.encode(self.tokenizer.sep_token, add_special_tokens=False)[0]
def get_sot_token(self):
return self.tokenizer.encode(self.tokenizer.cls_token, add_special_tokens=False)[0]
def get_eot_token_list(self):
return self.tokenizer.encode(self.tokenizer.sep_token, add_special_tokens=False)
def get_sot_token_list(self):
return self.tokenizer.encode(self.tokenizer.cls_token, add_special_tokens=False)
def get_tokenizer_obj(self):
return self.tokenizer
def check_added_tokens(self):
return (self.added_sep_token + self.added_cls_token)
def tokenize(self, texts: Union[(str, List[str])], context_length: int=77):
if isinstance(texts, str):
texts = [texts]
padding = 'max_length'
seqstart = []
seqtok = []
seqend = []
max_length = context_length
if (self.added_cls_token > 0):
seqstart = self.get_sot_token_list()
max_length = (max_length - 1)
if (self.added_sep_token > 0):
seqend = self.get_eot_token_list()
max_length = (max_length - 1)
tokens = self.tokenizer(texts, padding=padding, truncation=True, max_length=max_length)['input_ids']
for i in range(len(tokens)):
tokens[i] = ((seqstart + tokens[i]) + seqend)
if self.gpt_special_case:
for i in range(len(tokens)):
tokens[i][(- 1)] = self.get_eot_token()
result = torch.Tensor(tokens).type(torch.LongTensor)
return result
def get_vocab_size(self):
return self.tokenizer.vocab_size
def __call__(self, texts: Union[(str, List[str])], context_length: int=77):
return self.tokenize(texts, context_length)
|
def get_prompt_templates():
prompt_templates = ['{}.', 'a photo of a {}.', 'a bad photo of a {}.', 'a photo of many {}.', 'a sculpture of a {}.', 'a photo of the hard to see {}.', 'a low resolution photo of the {}.', 'a rendering of a {}.', 'graffiti of a {}.', 'a bad photo of the {}.', 'a cropped photo of the {}.', 'a tattoo of a {}.', 'the embroidered {}.', 'a photo of a hard to see {}.', 'a bright photo of a {}.', 'a photo of a clean {}.', 'a photo of a dirty {}.', 'a dark photo of the {}.', 'a drawing of a {}.', 'a photo of my {}.', 'the plastic {}.', 'a photo of the cool {}.', 'a close-up photo of a {}.', 'a black and white photo of the {}.', 'a painting of the {}.', 'a painting of a {}.', 'a pixelated photo of the {}.', 'a sculpture of the {}.', 'a bright photo of the {}.', 'a cropped photo of a {}.', 'a plastic {}.', 'a photo of the dirty {}.', 'a jpeg corrupted photo of a {}.', 'a blurry photo of the {}.', 'a photo of the {}.', 'a good photo of the {}.', 'a rendering of the {}.', 'a {} in a video game.', 'a photo of one {}.', 'a doodle of a {}.', 'a close-up photo of the {}.', 'the origami {}.', 'the {} in a video game.', 'a sketch of a {}.', 'a doodle of the {}.', 'a origami {}.', 'a low resolution photo of a {}.', 'the toy {}.', 'a rendition of the {}.', 'a photo of the clean {}.', 'a photo of a large {}.', 'a rendition of a {}.', 'a photo of a nice {}.', 'a photo of a weird {}.', 'a blurry photo of a {}.', 'a cartoon {}.', 'art of a {}.', 'a sketch of the {}.', 'a embroidered {}.', 'a pixelated photo of a {}.', 'itap of the {}.', 'a jpeg corrupted photo of the {}.', 'a good photo of a {}.', 'a plushie {}.', 'a photo of the nice {}.', 'a photo of the small {}.', 'a photo of the weird {}.', 'the cartoon {}.', 'art of the {}.', 'a drawing of the {}.', 'a photo of the large {}.', 'a black and white photo of a {}.', 'the plushie {}.', 'a dark photo of a {}.', 'itap of a {}.', 'graffiti of the {}.', 'a toy {}.', 'itap of my {}.', 'a photo of a cool {}.', 'a photo of a small {}.', 'a tattoo of the {}.']
return prompt_templates
|
def prompt_engineering(classnames):
prompt_templates = get_prompt_templates()
temp_idx = np.random.randint(len(prompt_templates))
if isinstance(classnames, list):
classname = random.choice(classnames)
else:
classname = classnames
return prompt_templates[temp_idx].replace('{}', classname.replace(',', '').replace('+', ' '))
|
class Voc2007Classification(torch.utils.data.Dataset):
def __init__(self, data_root, image_set='train', transform=None):
'\n Pascal voc2007 training/validation data: http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar\n test data: http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar\n '
self.data_root = self._update_path(data_root, image_set)
self.transform = transform
self.labels = self._read_annotation(image_set)
self.images = list(self.labels.keys())
@staticmethod
def _update_path(data_root, image_set):
if ((image_set == 'train') or (image_set == 'val')):
data_root += 'train/VOCdevkit/VOC2007'
elif (image_set == 'test'):
data_root += 'test/VOCdevkit 2/VOC2007'
else:
raise Exception('Incorrect image set!')
return data_root
def __getitem__(self, index):
img_path = os.path.join(self.data_root, (('JPEGImages/' + self.images[index]) + '.jpg'))
image = Image.open(img_path).convert('RGB')
if (self.transform is not None):
image = self.transform(image)
else:
image = transforms.ToTensor()(image)
label = self.labels[self.images[index]]
label = torch.LongTensor(label)
return (image, label)
def __len__(self):
return len(self.images)
def _read_annotation(self, image_set='train'):
'\n Annotation interpolation, refer to:\n http://host.robots.ox.ac.uk/pascal/VOC/voc2007/htmldoc/voc.html#SECTION00093000000000000000\n '
object_categories = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
annotation_folder = os.path.join(self.data_root, 'ImageSets/Main/')
files = [file_name for file_name in os.listdir(annotation_folder) if file_name.endswith((('_' + image_set) + '.txt'))]
labels_all = dict()
for file_name in files:
label_str = file_name.split('_')[0]
label_int = object_categories.index(label_str)
with open(((annotation_folder + '/') + file_name), 'r') as fread:
for line in fread.readlines():
index = line[:6]
if (index not in labels_all.keys()):
labels_all[index] = ([0] * len(object_categories))
flag = 1
if (line[7:9] and (int(line[7:9]) != 1)):
flag = (- 1)
if (flag == 1):
labels_all[index][label_int] = 1
return labels_all
|
def _is_depthwise(m):
return (isinstance(m, nn.Conv2d) and (m.groups == m.in_channels) and (m.groups == m.out_channels))
|
def _set_wd(cfg, model):
without_decay_list = cfg.TRAIN.WITHOUT_WD_LIST
without_decay_depthwise = []
without_decay_norm = []
for m in model.modules():
if (_is_depthwise(m) and ('depthwise' in without_decay_list)):
without_decay_depthwise.append(m.weight)
elif (isinstance(m, nn.BatchNorm2d) and ('bn' in without_decay_list)):
without_decay_norm.append(m.weight)
without_decay_norm.append(m.bias)
elif (isinstance(m, nn.GroupNorm) and ('gn' in without_decay_list)):
without_decay_norm.append(m.weight)
without_decay_norm.append(m.bias)
elif (isinstance(m, nn.LayerNorm) and ('ln' in without_decay_list)):
without_decay_norm.append(m.weight)
without_decay_norm.append(m.bias)
with_decay = []
without_decay = []
skip = {}
if hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
for (n, p) in model.named_parameters():
ever_set = False
if (p.requires_grad is False):
continue
if (n in skip):
print('=> set {} wd to 0'.format(n))
without_decay.append(p)
continue
for pp in without_decay_depthwise:
if (p is pp):
if cfg.VERBOSE:
print('=> set depthwise({}) wd to 0'.format(n))
without_decay.append(p)
ever_set = True
break
for pp in without_decay_norm:
if (p is pp):
if cfg.VERBOSE:
print('=> set norm({}) wd to 0'.format(n))
without_decay.append(p)
ever_set = True
break
if ((not ever_set) and ('bias' in without_decay_list) and n.endswith('.bias')):
if cfg.VERBOSE:
print('=> set bias({}) wd to 0'.format(n))
without_decay.append(p)
elif (not ever_set):
with_decay.append(p)
params = [{'params': with_decay}, {'params': without_decay, 'weight_decay': 0.0}]
return params
|
def build_optimizer(cfg, model):
if (cfg.TRAIN.OPTIMIZER == 'timm'):
args = cfg.TRAIN.OPTIMIZER_ARGS
print(f'=> usage timm optimizer args: {cfg.TRAIN.OPTIMIZER_ARGS}')
optimizer = create_optimizer(args, model)
return optimizer
optimizer = None
params = _set_wd(cfg, model)
if (cfg.TRAIN.OPTIMIZER == 'sgd'):
if cfg.TRAIN.TWO_LR:
trunk_parameters = []
head_parameters = []
for (name, param) in model.named_parameters():
if ('backbone' in name):
trunk_parameters.append(param)
else:
head_parameters.append(param)
optimizer = optim.SGD([{'params': trunk_parameters}, {'params': head_parameters, 'lr': cfg.TRAIN.LR}], lr=(cfg.TRAIN.LR * 0.1), momentum=cfg.TRAIN.MOMENTUM, weight_decay=cfg.TRAIN.WD, nesterov=cfg.TRAIN.NESTEROV)
else:
optimizer = optim.SGD(params, lr=cfg.TRAIN.LR, momentum=cfg.TRAIN.MOMENTUM, weight_decay=cfg.TRAIN.WD, nesterov=cfg.TRAIN.NESTEROV)
elif (cfg.TRAIN.OPTIMIZER == 'adam'):
if cfg.TRAIN.TWO_LR:
trunk_parameters = []
head_parameters = []
for (name, param) in model.named_parameters():
if ('backbone' in name):
trunk_parameters.append(param)
else:
head_parameters.append(param)
optimizer = optim.Adam([{'params': trunk_parameters}, {'params': head_parameters, 'lr': cfg.TRAIN.LR}], lr=(cfg.TRAIN.LR * 0.1), weight_decay=cfg.TRAIN.WD)
else:
optimizer = optim.Adam(params, lr=cfg.TRAIN.LR, weight_decay=cfg.TRAIN.WD)
elif (cfg.TRAIN.OPTIMIZER == 'adamW'):
optimizer = optim.AdamW(params, lr=cfg.TRAIN.LR, weight_decay=cfg.TRAIN.WD)
elif (cfg.TRAIN.OPTIMIZER == 'rmsprop'):
optimizer = optim.RMSprop(params, lr=cfg.TRAIN.LR, momentum=cfg.TRAIN.MOMENTUM, weight_decay=cfg.TRAIN.WD, alpha=cfg.TRAIN.RMSPROP_ALPHA, centered=cfg.TRAIN.RMSPROP_CENTERED)
return optimizer
|
class Comm(object):
def __init__(self):
self.local_rank = 0
@property
def world_size(self):
if (not dist.is_available()):
return 1
if (not dist.is_initialized()):
return 1
return dist.get_world_size()
@property
def rank(self):
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
return dist.get_rank()
@property
def local_rank(self):
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
return self._local_rank
@local_rank.setter
def local_rank(self, value):
if (not dist.is_available()):
self._local_rank = 0
if (not dist.is_initialized()):
self._local_rank = 0
self._local_rank = value
@property
def head(self):
return 'Rank[{}/{}]'.format(self.rank, self.world_size)
def is_main_process(self):
return (self.rank == 0)
def synchronize(self):
'\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n '
if (self.world_size == 1):
return
dist.barrier()
|
def all_gather(data):
'\n Run all_gather on arbitrary picklable data (not necessarily tensors)\n Args:\n data: any picklable object\n Returns:\n list[data]: list of data gathered from each rank\n '
world_size = comm.world_size
if (world_size == 1):
return [data]
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to('cuda')
local_size = torch.LongTensor([tensor.numel()]).to('cuda')
size_list = [torch.LongTensor([0]).to('cuda') for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
if (local_size != max_size):
padding = torch.ByteTensor(size=((max_size - local_size),)).to('cuda')
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for (size, tensor) in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
|
def reduce_dict(input_dict, average=True):
'\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that process with rank\n 0 has the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n '
world_size = comm.world_size
if (world_size < 2):
return input_dict
with torch.no_grad():
names = []
values = []
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if ((dist.get_rank() == 0) and average):
values /= world_size
reduced_dict = {k: v for (k, v) in zip(names, values)}
return reduced_dict
|
def gather_tensors(tensor):
'\n Performs all_gather operation on the provided tensors.\n *** Warning ***: torch.distributed.all_gather has no gradient.\n '
tensors_gather = [torch.ones_like(tensor) for _ in range(comm.world_size)]
dist.all_gather(tensors_gather, tensor, async_op=False)
tensors_gather[comm.rank] = tensor
output = torch.cat(tensors_gather, dim=0)
return output
|
def setup_logger(final_output_dir, rank, phase):
time_str = time.strftime('%Y-%m-%d-%H-%M')
log_file = f'{phase}_{time_str}_rank{rank}.txt'
final_log_file = os.path.join(final_output_dir, log_file)
head = (('%(asctime)-15s:[P:%(process)d]:' + comm.head) + ' %(message)s')
logging.basicConfig(filename=str(final_log_file), format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
console.setFormatter(logging.Formatter(head))
logging.getLogger('').addHandler(console)
|
def create_logger(cfg, phase='train'):
root_output_dir = Path(cfg.OUTPUT_DIR)
dataset = cfg.DATASET.DATASET
cfg_name = cfg.NAME
final_output_dir = ((root_output_dir / dataset) / cfg_name)
print('=> creating {} ...'.format(root_output_dir))
root_output_dir.mkdir(parents=True, exist_ok=True)
print('=> creating {} ...'.format(final_output_dir))
final_output_dir.mkdir(parents=True, exist_ok=True)
print('=> setup logger ...')
setup_logger(final_output_dir, cfg.RANK, phase)
return str(final_output_dir)
|
@TRAINER_REGISTRY.register()
class ZeroshotCLIP(TrainerX):
def build_model(self):
cfg = self.cfg
classnames = self.dm.dataset.classnames
print(f'Loading CLIP (backbone: {cfg.MODEL.BACKBONE.NAME})')
clip_model = load_clip_to_cpu(cfg)
clip_model.to(self.device)
temp = CUSTOM_TEMPLATES[cfg.DATASET.NAME]
prompts = [temp.format(c.replace('_', ' ')) for c in classnames]
print(f'Prompts: {prompts}')
prompts = torch.cat([clip.tokenize(p) for p in prompts])
prompts = prompts.to(self.device)
with torch.no_grad():
text_features = clip_model.encode_text(prompts)
text_features = (text_features / text_features.norm(dim=(- 1), keepdim=True))
self.text_features = text_features
self.clip_model = clip_model
def model_inference(self, image):
image_features = self.clip_model.encode_image(image)
image_features = (image_features / image_features.norm(dim=(- 1), keepdim=True))
logit_scale = self.clip_model.logit_scale.exp()
logits = ((logit_scale * image_features) @ self.text_features.t())
return logits
|
@TRAINER_REGISTRY.register()
class ZeroshotCLIP2(ZeroshotCLIP):
'Prompt ensembling.'
templates = IMAGENET_TEMPLATES_SELECT
def build_model(self):
cfg = self.cfg
classnames = self.dm.dataset.classnames
print(f'Loading CLIP (backbone: {cfg.MODEL.BACKBONE.NAME})')
clip_model = load_clip_to_cpu(cfg)
clip_model.to(self.device)
for params in clip_model.parameters():
params.requires_grad_(False)
if (cfg.DATASET.NAME != 'ImageNet'):
self.templates += [CUSTOM_TEMPLATES[cfg.DATASET.NAME]]
num_temp = len(self.templates)
print(f'Prompt ensembling (n={num_temp})')
mean_text_features = 0
for (i, temp) in enumerate(self.templates):
prompts = [temp.format(c.replace('_', ' ')) for c in classnames]
prompts = torch.cat([clip.tokenize(p) for p in prompts]).to(self.device)
text_features = clip_model.encode_text(prompts)
text_features = (text_features / text_features.norm(dim=(- 1), keepdim=True))
mean_text_features = (mean_text_features + text_features)
mean_text_features = (mean_text_features / num_temp)
mean_text_features = (mean_text_features / mean_text_features.norm(dim=(- 1), keepdim=True))
self.text_features = mean_text_features
self.clip_model = clip_model
|
def create_config(model_name='u256', timestep_rp=50):
if (model_name == 'c64'):
config = {'model_path': 'symlink/pretrained/64x64_diffusion.pt', 'classifier_path': 'symlink/pretrained/64x64_classifier.pt', 'image_size': 64, 'batch_size': 64, 'use_ddim': False, 'clip_denoised': True, 'classifier_scale': 0.1}
model_config = {'attention_resolutions': '32, 16, 8', 'class_cond': True, 'diffusion_steps': 1000, 'rescale_timesteps': True, 'timestep_respacing': str(timestep_rp), 'dropout': 0.1, 'image_size': 64, 'learn_sigma': True, 'noise_schedule': 'cosine', 'num_channels': 192, 'num_head_channels': 64, 'num_res_blocks': 3, 'resblock_updown': True, 'use_new_attention_order': True, 'use_fp16': True, 'use_scale_shift_norm': True}
class_config = {'classifier_depth': 4}
elif (model_name == 'c128'):
config = {'model_path': 'symlink/pretrained/128x128_diffusion.pt', 'classifier_path': 'symlink/pretrained/128x128_classifier.pt', 'image_size': 128, 'batch_size': 25, 'use_ddim': False, 'clip_denoised': True, 'classifier_scale': 1.25}
model_config = {'attention_resolutions': '32, 16, 8', 'class_cond': True, 'diffusion_steps': 1000, 'rescale_timesteps': True, 'timestep_respacing': str(timestep_rp), 'image_size': 128, 'learn_sigma': True, 'noise_schedule': 'linear', 'num_channels': 256, 'num_heads': 4, 'num_res_blocks': 2, 'resblock_updown': True, 'use_fp16': True, 'use_scale_shift_norm': True}
class_config = {'image_size': 128}
elif (model_name == 'c256'):
config = {'model_path': 'symlink/pretrained/256x256_diffusion.pt', 'classifier_path': 'symlink/pretrained/256x256_classifier.pt', 'image_size': 256, 'batch_size': 6, 'use_ddim': False, 'clip_denoised': True, 'classifier_scale': 2.5}
model_config = {'attention_resolutions': '32, 16, 8', 'class_cond': True, 'diffusion_steps': 1000, 'rescale_timesteps': True, 'timestep_respacing': str(timestep_rp), 'image_size': 256, 'learn_sigma': True, 'noise_schedule': 'linear', 'num_channels': 256, 'num_head_channels': 64, 'num_res_blocks': 2, 'resblock_updown': True, 'use_fp16': True, 'use_scale_shift_norm': True}
class_config = {'image_size': 256}
elif (model_name == 'u256'):
config = {'model_path': 'symlink/pretrained/256x256_diffusion_uncond.pt', 'classifier_path': 'symlink/pretrained/256x256_classifier.pt', 'image_size': 256, 'batch_size': 20, 'use_ddim': False, 'clip_denoised': True, 'classifier_scale': 10.0}
model_config = {'attention_resolutions': '32, 16, 8', 'class_cond': False, 'diffusion_steps': 1000, 'rescale_timesteps': True, 'timestep_respacing': str(timestep_rp), 'image_size': 256, 'learn_sigma': True, 'noise_schedule': 'linear', 'num_channels': 256, 'num_head_channels': 64, 'num_res_blocks': 2, 'resblock_updown': True, 'use_fp16': True, 'use_scale_shift_norm': True}
class_config = {'image_size': 256}
elif (model_name == 'c512'):
config = {'model_path': 'symlink/pretrained/512x512_diffusion.pt', 'classifier_path': 'symlink/pretrained/512x512_classifier.pt', 'image_size': 512, 'batch_size': 9, 'use_ddim': False, 'clip_denoised': True, 'classifier_scale': 9.0}
model_config = {'attention_resolutions': '32, 16, 8', 'class_cond': True, 'diffusion_steps': 1000, 'rescale_timesteps': True, 'timestep_respacing': str(timestep_rp), 'image_size': 512, 'learn_sigma': True, 'noise_schedule': 'linear', 'num_channels': 256, 'num_head_channels': 64, 'num_res_blocks': 2, 'resblock_updown': True, 'use_fp16': False, 'use_scale_shift_norm': True}
class_config = {'image_size': 512}
return (config, model_config, class_config)
|
def setup_dist():
'\n Setup a distributed process group.\n '
if dist.is_initialized():
return
os.environ['CUDA_VISIBLE_DEVICES'] = f'{(MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE)}'
comm = MPI.COMM_WORLD
backend = 'gloo'
if (backend == 'gloo'):
hostname = 'localhost'
else:
hostname = socket.gethostbyname(socket.getfqdn())
os.environ['MASTER_ADDR'] = comm.bcast(hostname, root=0)
os.environ['RANK'] = str(comm.rank)
os.environ['WORLD_SIZE'] = str(comm.size)
port = comm.bcast(_find_free_port(), root=0)
os.environ['MASTER_PORT'] = str(port)
dist.init_process_group(backend=backend, init_method='env://')
|
def dev():
'\n Get the device to use for torch.distributed.\n '
if th.cuda.is_available():
return th.device(f'cuda')
return th.device('cpu')
|
def load_state_dict(path, **kwargs):
'\n Load a PyTorch file without redundant fetches across MPI ranks.\n '
chunk_size = (2 ** 30)
if (MPI.COMM_WORLD.Get_rank() == 0):
with bf.BlobFile(path, 'rb') as f:
data = f.read()
num_chunks = (len(data) // chunk_size)
if (len(data) % chunk_size):
num_chunks += 1
MPI.COMM_WORLD.bcast(num_chunks)
for i in range(0, len(data), chunk_size):
MPI.COMM_WORLD.bcast(data[i:(i + chunk_size)])
else:
num_chunks = MPI.COMM_WORLD.bcast(None)
data = bytes()
for _ in range(num_chunks):
data += MPI.COMM_WORLD.bcast(None)
return th.load(io.BytesIO(data), **kwargs)
|
def sync_params(params):
'\n Synchronize a sequence of Tensors across ranks from rank 0.\n '
for p in params:
with th.no_grad():
dist.broadcast(p, 0)
|
def _find_free_port():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
finally:
s.close()
|
def convert_module_to_f16(l):
'\n Convert primitive modules to float16.\n '
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.half()
if (l.bias is not None):
l.bias.data = l.bias.data.half()
|
def convert_module_to_f32(l):
'\n Convert primitive modules to float32, undoing convert_module_to_f16().\n '
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.float()
if (l.bias is not None):
l.bias.data = l.bias.data.float()
|
def make_master_params(param_groups_and_shapes):
'\n Copy model parameters into a (differently-shaped) list of full-precision\n parameters.\n '
master_params = []
for (param_group, shape) in param_groups_and_shapes:
master_param = nn.Parameter(_flatten_dense_tensors([param.detach().float() for (_, param) in param_group]).view(shape))
master_param.requires_grad = True
master_params.append(master_param)
return master_params
|
def model_grads_to_master_grads(param_groups_and_shapes, master_params):
'\n Copy the gradients from the model parameters into the master parameters\n from make_master_params().\n '
for (master_param, (param_group, shape)) in zip(master_params, param_groups_and_shapes):
master_param.grad = _flatten_dense_tensors([param_grad_or_zeros(param) for (_, param) in param_group]).view(shape)
|
def master_params_to_model_params(param_groups_and_shapes, master_params):
'\n Copy the master parameter data back into the model parameters.\n '
for (master_param, (param_group, _)) in zip(master_params, param_groups_and_shapes):
for ((_, param), unflat_master_param) in zip(param_group, unflatten_master_params(param_group, master_param.view((- 1)))):
param.detach().copy_(unflat_master_param)
|
def unflatten_master_params(param_group, master_param):
return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group])
|
def get_param_groups_and_shapes(named_model_params):
named_model_params = list(named_model_params)
scalar_vector_named_params = ([(n, p) for (n, p) in named_model_params if (p.ndim <= 1)], (- 1))
matrix_named_params = ([(n, p) for (n, p) in named_model_params if (p.ndim > 1)], (1, (- 1)))
return [scalar_vector_named_params, matrix_named_params]
|
def master_params_to_state_dict(model, param_groups_and_shapes, master_params, use_fp16):
if use_fp16:
state_dict = model.state_dict()
for (master_param, (param_group, _)) in zip(master_params, param_groups_and_shapes):
for ((name, _), unflat_master_param) in zip(param_group, unflatten_master_params(param_group, master_param.view((- 1)))):
assert (name in state_dict)
state_dict[name] = unflat_master_param
else:
state_dict = model.state_dict()
for (i, (name, _value)) in enumerate(model.named_parameters()):
assert (name in state_dict)
state_dict[name] = master_params[i]
return state_dict
|
def state_dict_to_master_params(model, state_dict, use_fp16):
if use_fp16:
named_model_params = [(name, state_dict[name]) for (name, _) in model.named_parameters()]
param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
master_params = make_master_params(param_groups_and_shapes)
else:
master_params = [state_dict[name] for (name, _) in model.named_parameters()]
return master_params
|
def zero_master_grads(master_params):
for param in master_params:
param.grad = None
|
def zero_grad(model_params):
for param in model_params:
if (param.grad is not None):
param.grad.detach_()
param.grad.zero_()
|
def param_grad_or_zeros(param):
if (param.grad is not None):
return param.grad.data.detach()
else:
return th.zeros_like(param)
|
class MixedPrecisionTrainer():
def __init__(self, *, model, use_fp16=False, fp16_scale_growth=0.001, initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE):
self.model = model
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.param_groups_and_shapes = None
self.lg_loss_scale = initial_lg_loss_scale
if self.use_fp16:
self.param_groups_and_shapes = get_param_groups_and_shapes(self.model.named_parameters())
self.master_params = make_master_params(self.param_groups_and_shapes)
self.model.convert_to_fp16()
def zero_grad(self):
zero_grad(self.model_params)
def backward(self, loss: th.Tensor):
if self.use_fp16:
loss_scale = (2 ** self.lg_loss_scale)
(loss * loss_scale).backward()
else:
loss.backward()
def optimize(self, opt: th.optim.Optimizer):
if self.use_fp16:
return self._optimize_fp16(opt)
else:
return self._optimize_normal(opt)
def _optimize_fp16(self, opt: th.optim.Optimizer):
logger.logkv_mean('lg_loss_scale', self.lg_loss_scale)
model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)
(grad_norm, param_norm) = self._compute_norms(grad_scale=(2 ** self.lg_loss_scale))
if check_overflow(grad_norm):
self.lg_loss_scale -= 1
logger.log(f'Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}')
zero_master_grads(self.master_params)
return False
logger.logkv_mean('grad_norm', grad_norm)
logger.logkv_mean('param_norm', param_norm)
self.master_params[0].grad.mul_((1.0 / (2 ** self.lg_loss_scale)))
opt.step()
zero_master_grads(self.master_params)
master_params_to_model_params(self.param_groups_and_shapes, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
return True
def _optimize_normal(self, opt: th.optim.Optimizer):
(grad_norm, param_norm) = self._compute_norms()
logger.logkv_mean('grad_norm', grad_norm)
logger.logkv_mean('param_norm', param_norm)
opt.step()
return True
def _compute_norms(self, grad_scale=1.0):
grad_norm = 0.0
param_norm = 0.0
for p in self.master_params:
with th.no_grad():
param_norm += (th.norm(p, p=2, dtype=th.float32).item() ** 2)
if (p.grad is not None):
grad_norm += (th.norm(p.grad, p=2, dtype=th.float32).item() ** 2)
return ((np.sqrt(grad_norm) / grad_scale), np.sqrt(param_norm))
def master_params_to_state_dict(self, master_params):
return master_params_to_state_dict(self.model, self.param_groups_and_shapes, master_params, self.use_fp16)
def state_dict_to_master_params(self, state_dict):
return state_dict_to_master_params(self.model, state_dict, self.use_fp16)
|
def check_overflow(value):
return ((value == float('inf')) or (value == (- float('inf'))) or (value != value))
|
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
'\n Get a pre-defined beta schedule for the given name.\n\n The beta schedule library consists of beta schedules which remain similar\n in the limit of num_diffusion_timesteps.\n Beta schedules may be added, but should not be removed or changed once\n they are committed to maintain backwards compatibility.\n '
if (schedule_name == 'linear'):
scale = (1000 / num_diffusion_timesteps)
beta_start = (scale * 0.0001)
beta_end = (scale * 0.02)
return np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
elif (schedule_name == 'cosine'):
return betas_for_alpha_bar(num_diffusion_timesteps, (lambda t: (math.cos(((((t + 0.008) / 1.008) * math.pi) / 2)) ** 2)))
else:
raise NotImplementedError(f'unknown beta schedule: {schedule_name}')
|
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
'\n Create a beta schedule that discretizes the given alpha_t_bar function,\n which defines the cumulative product of (1-beta) over time from t = [0,1].\n\n :param num_diffusion_timesteps: the number of betas to produce.\n :param alpha_bar: a lambda that takes an argument t from 0 to 1 and\n produces the cumulative product of (1-beta) up to that\n part of the diffusion process.\n :param max_beta: the maximum beta to use; use values lower than 1 to\n prevent singularities.\n '
betas = []
for i in range(num_diffusion_timesteps):
t1 = (i / num_diffusion_timesteps)
t2 = ((i + 1) / num_diffusion_timesteps)
betas.append(min((1 - (alpha_bar(t2) / alpha_bar(t1))), max_beta))
return np.array(betas)
|
class ModelMeanType(enum.Enum):
'\n Which type of output the model predicts.\n '
PREVIOUS_X = enum.auto()
START_X = enum.auto()
EPSILON = enum.auto()
|
class ModelVarType(enum.Enum):
"\n What is used as the model's output variance.\n\n The LEARNED_RANGE option has been added to allow the model to predict\n values between FIXED_SMALL and FIXED_LARGE, making its job easier.\n "
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
|
class LossType(enum.Enum):
MSE = enum.auto()
RESCALED_MSE = enum.auto()
KL = enum.auto()
RESCALED_KL = enum.auto()
def is_vb(self):
return ((self == LossType.KL) or (self == LossType.RESCALED_KL))
|
class GaussianDiffusion():
'\n Utilities for training and sampling diffusion models.\n\n Ported directly from here, and then adapted over time to further experimentation.\n https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42\n\n :param betas: a 1-D numpy array of betas for each diffusion timestep,\n starting at T and going to 1.\n :param model_mean_type: a ModelMeanType determining what the model outputs.\n :param model_var_type: a ModelVarType determining how variance is output.\n :param loss_type: a LossType determining the loss function to use.\n :param rescale_timesteps: if True, pass floating point timesteps into the\n model so that they are always scaled like in the\n original paper (0 to 1000).\n '
def __init__(self, *, betas, model_mean_type, model_var_type, loss_type, rescale_timesteps=False):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
self.rescale_timesteps = rescale_timesteps
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert (len(betas.shape) == 1), 'betas must be 1-D'
assert ((betas > 0).all() and (betas <= 1).all())
self.num_timesteps = int(betas.shape[0])
alphas = (1.0 - betas)
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:(- 1)])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert (self.alphas_cumprod_prev.shape == (self.num_timesteps,))
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt((1.0 - self.alphas_cumprod))
self.log_one_minus_alphas_cumprod = np.log((1.0 - self.alphas_cumprod))
self.sqrt_recip_alphas_cumprod = np.sqrt((1.0 / self.alphas_cumprod))
self.sqrt_recipm1_alphas_cumprod = np.sqrt(((1.0 / self.alphas_cumprod) - 1))
self.posterior_variance = ((betas * (1.0 - self.alphas_cumprod_prev)) / (1.0 - self.alphas_cumprod))
self.posterior_log_variance_clipped = np.log(np.append(self.posterior_variance[1], self.posterior_variance[1:]))
self.posterior_mean_coef1 = ((betas * np.sqrt(self.alphas_cumprod_prev)) / (1.0 - self.alphas_cumprod))
self.posterior_mean_coef2 = (((1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas)) / (1.0 - self.alphas_cumprod))
betas = get_named_beta_schedule('linear', 1000)
alphas = (1.0 - betas)
alphas_cumprod = np.cumprod(alphas, axis=0)
g_full = (np.sqrt((1 - alphas_cumprod)) / np.sqrt(alphas_cumprod))
self.g_full = th.tensor(g_full).cuda()
self.g = (np.sqrt((1 - self.alphas_cumprod)) / np.sqrt(self.alphas_cumprod))
self.g_prev = (np.sqrt((1 - self.alphas_cumprod_prev)) / np.sqrt(self.alphas_cumprod_prev))
def q_mean_variance(self, x_start, t):
"\n Get the distribution q(x_t | x_0).\n\n :param x_start: the [N x C x ...] tensor of noiseless inputs.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :return: A tuple (mean, variance, log_variance), all of x_start's shape.\n "
mean = (_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
variance = _extract_into_tensor((1.0 - self.alphas_cumprod), t, x_start.shape)
log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return (mean, variance, log_variance)
def q_sample(self, x_start, t, noise=None):
'\n Diffuse the data for a given number of diffusion steps.\n\n In other words, sample from q(x_t | x_0).\n\n :param x_start: the initial data batch.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :param noise: if specified, the split-out normal noise.\n :return: A noisy version of x_start.\n '
if (noise is None):
noise = th.randn_like(x_start)
assert (noise.shape == x_start.shape)
return ((_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) + (_extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise))
def q_posterior_mean_variance(self, x_start, x_t, t):
'\n Compute the mean and variance of the diffusion posterior:\n\n q(x_{t-1} | x_t, x_0)\n\n '
assert (x_start.shape == x_t.shape)
posterior_mean = ((_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start) + (_extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t))
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
assert (posterior_mean.shape[0] == posterior_variance.shape[0] == posterior_log_variance_clipped.shape[0] == x_start.shape[0])
return (posterior_mean, posterior_variance, posterior_log_variance_clipped)
def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n "
if (model_kwargs is None):
model_kwargs = {}
(B, C) = x.shape[:2]
assert (t.shape == (B,))
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
if (self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]):
assert (model_output.shape == (B, (C * 2), *x.shape[2:]))
(model_output, model_var_values) = th.split(model_output, C, dim=1)
if (self.model_var_type == ModelVarType.LEARNED):
model_log_variance = model_var_values
model_variance = th.exp(model_log_variance)
else:
min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
frac = ((model_var_values + 1) / 2)
model_log_variance = ((frac * max_log) + ((1 - frac) * min_log))
model_variance = th.exp(model_log_variance)
else:
(model_variance, model_log_variance) = {ModelVarType.FIXED_LARGE: (np.append(self.posterior_variance[1], self.betas[1:]), np.log(np.append(self.posterior_variance[1], self.betas[1:]))), ModelVarType.FIXED_SMALL: (self.posterior_variance, self.posterior_log_variance_clipped)}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if (denoised_fn is not None):
x = denoised_fn(x)
if clip_denoised:
return x.clamp((- 1), 1)
return x
if (self.model_mean_type == ModelMeanType.PREVIOUS_X):
pred_xstart = process_xstart(self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output))
model_mean = model_output
elif (self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]):
if (self.model_mean_type == ModelMeanType.START_X):
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output))
(model_mean, _, _) = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)
else:
raise NotImplementedError(self.model_mean_type)
assert (model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape)
return {'mean': model_mean, 'variance': model_variance, 'log_variance': model_log_variance, 'pred_xstart': pred_xstart}
def p_mean_variance_non_warp(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n "
if (model_kwargs is None):
model_kwargs = {}
(B, C) = x.shape[:2]
assert (t.shape == (B,))
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
'\n t = t.long()\n\n if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:\n assert model_output.shape == (B, C * 2, *x.shape[2:])\n model_output, model_var_values = th.split(model_output, C, dim=1)\n if self.model_var_type == ModelVarType.LEARNED:\n model_log_variance = model_var_values\n model_variance = th.exp(model_log_variance)\n else:\n min_log = _extract_into_tensor(\n self.posterior_log_variance_clipped, t, x.shape\n )\n max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = th.exp(model_log_variance)\n else:\n model_variance, model_log_variance = {\n # for fixedlarge, we set the initial (log-)variance like so\n # to get a better decoder log likelihood.\n ModelVarType.FIXED_LARGE: (\n np.append(self.posterior_variance[1], self.betas[1:]),\n np.log(np.append(self.posterior_variance[1], self.betas[1:])),\n ),\n ModelVarType.FIXED_SMALL: (\n self.posterior_variance,\n self.posterior_log_variance_clipped,\n ),\n }[self.model_var_type]\n model_variance = _extract_into_tensor(model_variance, t, x.shape)\n model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)\n \n def process_xstart(x):\n if denoised_fn is not None:\n x = denoised_fn(x)\n if clip_denoised:\n return x.clamp(-1, 1)\n return x\n\n if self.model_mean_type == ModelMeanType.PREVIOUS_X:\n pred_xstart = process_xstart(\n self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)\n )\n model_mean = model_output\n elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:\n if self.model_mean_type == ModelMeanType.START_X:\n pred_xstart = process_xstart(model_output)\n else:\n pred_xstart = process_xstart(\n self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n )\n model_mean, _, _ = self.q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t\n )\n else:\n raise NotImplementedError(self.model_mean_type)\n\n assert (\n model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n )\n pdb.set_trace()\n '
return {'eps': model_output}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert (x_t.shape == eps.shape)
return ((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t) - (_extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps))
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert (x_t.shape == xprev.shape)
return ((_extract_into_tensor((1.0 / self.posterior_mean_coef1), t, x_t.shape) * xprev) - (_extract_into_tensor((self.posterior_mean_coef2 / self.posterior_mean_coef1), t, x_t.shape) * x_t))
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t) - pred_xstart) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape))
def _scale_timesteps(self, t):
if self.rescale_timesteps:
return (t.float() * (1000.0 / self.num_timesteps))
return t
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
'\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x. In particular, cond_fn computes grad(log(p(y|x))), and we want to\n condition on y.\n\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n '
gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
new_mean = (p_mean_var['mean'].float() + (p_mean_var['variance'] * gradient.float()))
return new_mean
def condition_mean_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
'\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x. In particular, cond_fn computes grad(log(p(y|x))), and we want to\n condition on y.\n\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n '
gradient = cond_fn(x, t, p_mean_var, **model_kwargs)
new_mean = (p_mean_var['mean'].float() + (p_mean_var['variance'] * gradient.float()))
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"\n Compute what the p_mean_variance output would have been, should the\n model's score function be conditioned by cond_fn.\n\n See condition_mean() for details on cond_fn.\n\n Unlike condition_mean(), this instead uses the conditioning strategy\n from Song et al (2020).\n "
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var['pred_xstart'])
eps = (eps - ((1 - alpha_bar).sqrt() * cond_fn(x, self._scale_timesteps(t), **model_kwargs)))
out = p_mean_var.copy()
out['pred_xstart'] = self._predict_xstart_from_eps(x, t, eps)
(out['mean'], _, _) = self.q_posterior_mean_variance(x_start=out['pred_xstart'], x_t=x, t=t)
return out
def condition_score_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"\n Compute what the p_mean_variance output would have been, should the\n model's score function be conditioned by cond_fn.\n\n See condition_mean() for details on cond_fn.\n\n Unlike condition_mean(), this instead uses the conditioning strategy\n from Song et al (2020).\n "
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var['pred_xstart'])
eps = (eps - ((1 - alpha_bar).sqrt() * cond_fn(x, t, p_mean_var, **model_kwargs)))
out = p_mean_var.copy()
out['pred_xstart'] = self._predict_xstart_from_eps(x, t, eps)
(out['mean'], _, _) = self.q_posterior_mean_variance(x_start=out['pred_xstart'], x_t=x, t=t)
return out
def p_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None):
"\n Sample x_{t-1} from the model at the given timestep.\n\n :param model: the model to sample from.\n :param x: the current tensor at x_{t-1}.\n :param t: the value of t, starting at 0 for the first diffusion step.\n :param clip_denoised: if True, clip the x_start prediction to [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample.\n :param cond_fn: if not None, this is a gradient function that acts\n similarly to the model.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict containing the following keys:\n - 'sample': a random sample from the model.\n - 'pred_xstart': a prediction of x_0.\n "
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
noise = th.randn_like(x)
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x.shape) - 1)))
if (cond_fn is not None):
out['mean'] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)
sample = (out['mean'] + ((nonzero_mask * th.exp((0.5 * out['log_variance']))) * noise))
return {'sample': sample, 'pred_xstart': out['pred_xstart']}
def p_sample_with_grad(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None):
"\n Sample x_{t-1} from the model at the given timestep.\n\n :param model: the model to sample from.\n :param x: the current tensor at x_{t-1}.\n :param t: the value of t, starting at 0 for the first diffusion step.\n :param clip_denoised: if True, clip the x_start prediction to [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample.\n :param cond_fn: if not None, this is a gradient function that acts\n similarly to the model.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict containing the following keys:\n - 'sample': a random sample from the model.\n - 'pred_xstart': a prediction of x_0.\n "
with th.enable_grad():
x = x.detach().requires_grad_()
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
noise = th.randn_like(x)
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x.shape) - 1)))
if (cond_fn is not None):
out['mean'] = self.condition_mean_with_grad(cond_fn, out, x, t, model_kwargs=model_kwargs)
sample = (out['mean'] + ((nonzero_mask * th.exp((0.5 * out['log_variance']))) * noise))
return {'sample': sample, 'pred_xstart': out['pred_xstart'].detach()}
def p_sample_loop(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, skip_timesteps=0, init_image=None, randomize_class=False, cond_fn_with_grad=False):
"\n Generate samples from the model.\n\n :param model: the model module.\n :param shape: the shape of the samples, (N, C, H, W).\n :param noise: if specified, the noise from the encoder to sample.\n Should be of the same shape as `shape`.\n :param clip_denoised: if True, clip x_start predictions to [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample.\n :param cond_fn: if not None, this is a gradient function that acts\n similarly to the model.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :param device: if specified, the device to create the samples on.\n If not specified, use a model parameter's device.\n :param progress: if True, show a tqdm progress bar.\n :return: a non-differentiable batch of samples.\n "
final = None
for sample in self.p_sample_loop_progressive(model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, device=device, progress=progress, skip_timesteps=skip_timesteps, init_image=init_image, randomize_class=randomize_class, cond_fn_with_grad=cond_fn_with_grad):
final = sample
return final['sample']
def p_sample_loop_progressive(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, skip_timesteps=0, init_image=None, randomize_class=False, cond_fn_with_grad=False):
'\n Generate samples from the model and yield intermediate samples from\n each timestep of diffusion.\n\n Arguments are the same as p_sample_loop().\n Returns a generator over dicts, where each dict is the return value of\n p_sample().\n '
if (device is None):
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if (noise is not None):
img = noise
else:
img = th.randn(*shape, device=device)
if (skip_timesteps and (init_image is None)):
init_image = th.zeros_like(img)
indices = list(range((self.num_timesteps - skip_timesteps)))[::(- 1)]
if (init_image is not None):
my_t = (th.ones([shape[0]], device=device, dtype=th.long) * indices[0])
img = self.q_sample(init_image, my_t, img)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor(([i] * shape[0]), device=device)
if (randomize_class and ('y' in model_kwargs)):
model_kwargs['y'] = th.randint(low=0, high=model.num_classes, size=model_kwargs['y'].shape, device=model_kwargs['y'].device)
with th.no_grad():
sample_fn = (self.p_sample_with_grad if cond_fn_with_grad else self.p_sample)
out = sample_fn(model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs)
(yield out)
img = out['sample']
def ddim_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, eta=0.0):
'\n Sample x_{t-1} from the model using DDIM.\n\n Same usage as p_sample().\n '
out_orig = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
if (cond_fn is not None):
out = self.condition_score(cond_fn, out_orig, x, t, model_kwargs=model_kwargs)
else:
out = out_orig
eps = self._predict_eps_from_xstart(x, t, out['pred_xstart'])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = ((eta * th.sqrt(((1 - alpha_bar_prev) / (1 - alpha_bar)))) * th.sqrt((1 - (alpha_bar / alpha_bar_prev))))
noise = th.randn_like(x)
mean_pred = ((out['pred_xstart'] * th.sqrt(alpha_bar_prev)) + (th.sqrt(((1 - alpha_bar_prev) - (sigma ** 2))) * eps))
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x.shape) - 1)))
sample = (mean_pred + ((nonzero_mask * sigma) * noise))
return {'sample': sample, 'pred_xstart': out_orig['pred_xstart']}
def ddim_sample_with_grad(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, eta=0.0):
'\n Sample x_{t-1} from the model using DDIM.\n\n Same usage as p_sample().\n '
with th.enable_grad():
x = x.detach().requires_grad_()
out_orig = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
if (cond_fn is not None):
out = self.condition_score_with_grad(cond_fn, out_orig, x, t, model_kwargs=model_kwargs)
else:
out = out_orig
out['pred_xstart'] = out['pred_xstart'].detach()
eps = self._predict_eps_from_xstart(x, t, out['pred_xstart'])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = ((eta * th.sqrt(((1 - alpha_bar_prev) / (1 - alpha_bar)))) * th.sqrt((1 - (alpha_bar / alpha_bar_prev))))
noise = th.randn_like(x)
mean_pred = ((out['pred_xstart'] * th.sqrt(alpha_bar_prev)) + (th.sqrt(((1 - alpha_bar_prev) - (sigma ** 2))) * eps))
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x.shape) - 1)))
sample = (mean_pred + ((nonzero_mask * sigma) * noise))
return {'sample': sample, 'pred_xstart': out_orig['pred_xstart'].detach()}
def ddim_reverse_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, eta=0.0):
'\n Sample x_{t+1} from the model using DDIM reverse ODE.\n '
assert (eta == 0.0), 'Reverse ODE only for deterministic path'
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
eps = (((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x) - out['pred_xstart']) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape))
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
mean_pred = ((out['pred_xstart'] * th.sqrt(alpha_bar_next)) + (th.sqrt((1 - alpha_bar_next)) * eps))
return {'sample': mean_pred, 'pred_xstart': out['pred_xstart']}
def ddim_sample_loop(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, impu_fn=None, model_kwargs=None, device=None, progress=False, eta=0.0, skip_timesteps=0, init_image=None, randomize_class=False, cond_fn_with_grad=False):
'\n Generate samples from the model using DDIM.\n\n Same usage as p_sample_loop().\n '
final = None
for sample in self.ddim_sample_loop_progressive(model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, device=device, progress=progress, eta=eta, skip_timesteps=skip_timesteps, init_image=init_image, randomize_class=randomize_class, cond_fn_with_grad=cond_fn_with_grad):
final = sample
return final['sample']
def ddim_sample_loop_progressive(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, impu_fn=None, model_kwargs=None, device=None, progress=False, eta=0.0, skip_timesteps=0, init_image=None, randomize_class=False, cond_fn_with_grad=False):
'\n Use DDIM to sample from the model and yield intermediate samples from\n each timestep of DDIM.\n\n Same usage as p_sample_loop_progressive().\n '
if (device is None):
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if (noise is not None):
img = noise
else:
img = th.randn(*shape, device=device)
if (skip_timesteps and (init_image is None)):
init_image = th.zeros_like(img)
indices = list(range((self.num_timesteps - skip_timesteps)))[::(- 1)]
if (init_image is not None):
my_t = (th.ones([shape[0]], device=device, dtype=th.long) * indices[0])
img = self.q_sample(init_image, my_t, img)
if progress:
from fastprogress import progress_bar
if isinstance(progress, bool):
indices = progress_bar(indices)
else:
indices = progress_bar(indices, parent=progress)
for i in indices:
t = th.tensor(([i] * shape[0]), device=device)
if (randomize_class and ('y' in model_kwargs)):
model_kwargs['y'] = th.randint(low=0, high=model.num_classes, size=model_kwargs['y'].shape, device=model_kwargs['y'].device)
with th.no_grad():
sample_fn = (self.ddim_sample_with_grad if cond_fn_with_grad else self.ddim_sample)
out = sample_fn(model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, eta=eta)
(yield out)
img = out['sample']
def plms_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, impu_fn=None, model_kwargs=None, cond_fn_with_grad=False, order=2, old_out=None):
'\n Sample x_{t-1} from the model using Pseudo Linear Multistep.\n\n Same usage as p_sample().\n '
if ((not int(order)) or (not (1 <= order <= 4))):
raise ValueError('order is invalid (should be int from 1-4).')
def get_model_output(x, t):
with th.set_grad_enabled((cond_fn_with_grad and (cond_fn is not None))):
x = (x.detach().requires_grad_() if cond_fn_with_grad else x)
out_orig = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
if (cond_fn is not None):
if cond_fn_with_grad:
out = self.condition_score_with_grad(cond_fn, out_orig, x, t, model_kwargs=model_kwargs)
x = x.detach()
else:
out = self.condition_score(cond_fn, out_orig, x, t, model_kwargs=model_kwargs)
else:
out = out_orig
eps = self._predict_eps_from_xstart(x, t, out['pred_xstart'])
return (eps, out, out_orig)
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
(eps, out, out_orig) = get_model_output(x, t)
if ((order >= 1) and (old_out is None)):
old_eps = [eps]
mean_pred = ((out['pred_xstart'] * th.sqrt(alpha_bar_prev)) + (th.sqrt((1 - alpha_bar_prev)) * eps))
(eps_2, _, _) = get_model_output(mean_pred, (t - 1))
eps_prime = ((eps + eps_2) / 2)
pred_prime = self._predict_xstart_from_eps(x, t, eps_prime)
mean_pred = ((pred_prime * th.sqrt(alpha_bar_prev)) + (th.sqrt((1 - alpha_bar_prev)) * eps_prime))
else:
old_eps = old_out['old_eps']
old_eps.append(eps)
cur_order = min(order, len(old_eps))
if (cur_order == 1):
eps_prime = old_eps[(- 1)]
elif (cur_order == 2):
eps_prime = (((3 * old_eps[(- 1)]) - old_eps[(- 2)]) / 2)
elif (cur_order == 3):
eps_prime = ((((23 * old_eps[(- 1)]) - (16 * old_eps[(- 2)])) + (5 * old_eps[(- 3)])) / 12)
elif (cur_order == 4):
eps_prime = (((((55 * old_eps[(- 1)]) - (59 * old_eps[(- 2)])) + (37 * old_eps[(- 3)])) - (9 * old_eps[(- 4)])) / 24)
else:
raise RuntimeError('cur_order is invalid.')
pred_prime = self._predict_xstart_from_eps(x, t, eps_prime)
mean_pred = ((pred_prime * th.sqrt(alpha_bar_prev)) + (th.sqrt((1 - alpha_bar_prev)) * eps_prime))
if (len(old_eps) >= order):
old_eps.pop(0)
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x.shape) - 1)))
sample = ((mean_pred * nonzero_mask) + (out['pred_xstart'] * (1 - nonzero_mask)))
if (impu_fn is not None):
sample = self.condition_score3(impu_fn, None, sample, t, model_kwargs=model_kwargs)
return {'sample': sample, 'pred_xstart': out_orig['pred_xstart'], 'old_eps': old_eps}
def plms_sample_loop(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, impu_fn=None, model_kwargs=None, device=None, progress=False, skip_timesteps=0, init_image=None, randomize_class=False, cond_fn_with_grad=False, order=2):
'\n Generate samples from the model using Pseudo Linear Multistep.\n\n Same usage as p_sample_loop().\n '
final = None
for sample in self.plms_sample_loop_progressive(model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, impu_fn=inpu_fn, model_kwargs=model_kwargs, device=device, progress=progress, skip_timesteps=skip_timesteps, init_image=init_image, randomize_class=randomize_class, cond_fn_with_grad=cond_fn_with_grad, order=order):
final = sample
return final['sample']
def plms_sample_loop_progressive(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, impu_fn=None, model_kwargs=None, device=None, progress=False, skip_timesteps=0, init_image=None, randomize_class=False, cond_fn_with_grad=False, order=2):
'\n Use PLMS to sample from the model and yield intermediate samples from each\n timestep of PLMS.\n\n Same usage as p_sample_loop_progressive().\n '
if (device is None):
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if (noise is not None):
img = noise
else:
img = th.randn(*shape, device=device)
if (skip_timesteps and (init_image is None)):
init_image = th.zeros_like(img)
indices = list(range((self.num_timesteps - skip_timesteps)))[::(- 1)]
if (init_image is not None):
my_t = (th.ones([shape[0]], device=device, dtype=th.long) * indices[0])
img = self.q_sample(init_image, my_t, img)
if progress:
from fastprogress import progress_bar
if isinstance(progress, bool):
indices = progress_bar(indices)
else:
indices = progress_bar(indices, parent=progress)
old_out = None
for i in indices:
t = th.tensor(([i] * shape[0]), device=device)
if (randomize_class and ('y' in model_kwargs)):
model_kwargs['y'] = th.randint(low=0, high=model.num_classes, size=model_kwargs['y'].shape, device=model_kwargs['y'].device)
with th.no_grad():
out = self.plms_sample(model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, impu_fn=impu_fn, model_kwargs=model_kwargs, cond_fn_with_grad=cond_fn_with_grad, order=order, old_out=old_out)
(yield out)
old_out = out
img = out['sample']
def condition_score2(self, cond_fn, p_mean_var, x, t, del_g, s0=0, s_1=0, model_kwargs=None):
'\n Unlike condition_score(), this function output only grad value.\n Note that p_mean_var and s_1 is never used in this version.\n '
grad = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
return (((- grad) * del_g) * (1 - (s0 ** 2)).sqrt())
def condition_score3(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
x = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
return x
def stsp_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, impu_fn=None, model_kwargs=None, cond_fn_with_grad=False, order=2, old_out=None):
'\n Sample x_{t-1} from the model using Pseudo Linear Multistep\n and Strange Splitting for conditioning.\n Same usage as p_sample().\n '
g0 = _extract_into_tensor(self.g, t[0], (1,))
g_1 = _extract_into_tensor(self.g_prev, t[0], (1,))
s0 = (1 / th.sqrt(((g0 ** 2) + 1)))
s_1 = (1 / th.sqrt(((g_1 ** 2) + 1)))
del_g = (g_1 - g0)
if (cond_fn is not None):
alpha_half = (1 / ((((g0 + g_1) ** 2) / 4) + 1))
s_half = th.sqrt(alpha_half)
grad = self.condition_score2(cond_fn, None, x, t, (del_g / 2), s0, s_half, model_kwargs=model_kwargs)
x = (x + (grad * s0))
out_orig = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
eps = self._predict_eps_from_xstart(x, t, out_orig['pred_xstart'])
if (old_out is None):
old_out = []
old_eps = [eps]
else:
old_eps = old_out['old_eps']
old_eps.append(eps)
eps_prime = plms_mixer(old_eps, order)
sample = (((x / s0) + (del_g * eps_prime)) * s_1)
if (cond_fn is not None):
if (t[0].long() < 1):
t = (t + 1)
grad = self.condition_score2(cond_fn, out_orig, sample, (t - 1), (del_g / 2), s_half, s_1, model_kwargs=model_kwargs)
sample = (sample + (grad * s_1))
if (impu_fn is not None):
sample = self.condition_score3(impu_fn, None, sample, t, model_kwargs=model_kwargs)
return {'sample': sample, 'pred_xstart': out_orig['pred_xstart'], 'old_eps': old_eps}
def stsp_sample_loop(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, impu_fn=None, model_kwargs=None, device=None, progress=False, skip_timesteps=0, init_image=None, randomize_class=False, cond_fn_with_grad=False, order=2):
'\n Generate samples from the model using Strang Splitting \n and Pseudo Linear Multistep.\n\n Same usage as p_sample_loop().\n '
final = None
for sample in self.stsp_sample_loop_progressive(model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, impu_fn=impu_fn, model_kwargs=model_kwargs, device=device, progress=progress, skip_timesteps=skip_timesteps, init_image=init_image, randomize_class=randomize_class, cond_fn_with_grad=cond_fn_with_grad, order=order):
final = sample
return final['sample']
def stsp_sample_loop_progressive(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, impu_fn=None, model_kwargs=None, device=None, progress=False, skip_timesteps=0, init_image=None, randomize_class=False, cond_fn_with_grad=False, order=2):
'\n Use STSP to sample from the model and yield intermediate samples from each\n timestep of STSP.\n\n Same usage as p_sample_loop_progressive().\n '
if (device is None):
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if (noise is not None):
img = noise
else:
img = th.randn(*shape, device=device)
if (skip_timesteps and (init_image is None)):
init_image = th.zeros_like(img)
indices = list(range((self.num_timesteps - skip_timesteps)))[::(- 1)]
if (init_image is not None):
my_t = (th.ones([shape[0]], device=device, dtype=th.long) * indices[0])
img = self.q_sample(init_image, my_t, img)
if progress:
from fastprogress import progress_bar
if isinstance(progress, bool):
indices = progress_bar(indices)
else:
indices = progress_bar(indices, parent=progress)
old_out = None
for i in indices:
t = th.tensor(([i] * shape[0]), device=device)
if (randomize_class and ('y' in model_kwargs)):
model_kwargs['y'] = th.randint(low=0, high=model.num_classes, size=model_kwargs['y'].shape, device=model_kwargs['y'].device)
with th.no_grad():
out = self.stsp_sample(model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, impu_fn=impu_fn, model_kwargs=model_kwargs, order=order, old_out=old_out)
(yield out)
old_out = out
img = out['sample']
def ltsp_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, impu_fn=None, model_kwargs=None, cond_fn_with_grad=False, order=2, old_out=None):
'\n Sample x_{t-1} from the model using Lie-Trotter Splitting \n and Pseudo Linear Multistep.\n\n Same usage as p_sample().\n '
g0 = _extract_into_tensor(self.g, t[0], (1,))
g_1 = _extract_into_tensor(self.g_prev, t[0], (1,))
s0 = (1 / th.sqrt(((g0 ** 2) + 1)))
s_1 = (1 / th.sqrt(((g_1 ** 2) + 1)))
del_g = (g_1 - g0)
if ((cond_fn is not None) and True):
grad = self.condition_score2(cond_fn, None, x, t, del_g, s0, s_1, model_kwargs=model_kwargs)
x = (x + (grad * s0))
out_orig = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
eps = self._predict_eps_from_xstart(x, t, out_orig['pred_xstart'])
if (old_out is None):
old_out = []
old_eps = [eps]
else:
old_eps = old_out['old_eps']
old_eps.append(eps)
eps_prime = plms_mixer(old_eps, order)
sample = (((x / s0) + (del_g * eps_prime)) * s_1)
if ((cond_fn is not None) and False):
grad = self.condition_score2(cond_fn, None, sample, t, del_g, s0, s_1, model_kwargs=model_kwargs)
sample = (sample + (grad * s_1))
if (impu_fn is not None):
sample = self.condition_score3(impu_fn, None, sample, t, model_kwargs=model_kwargs)
return {'sample': sample, 'pred_xstart': out_orig['pred_xstart'], 'old_eps': old_eps}
def ltsp_sample_loop(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, impu_fn=None, model_kwargs=None, device=None, progress=False, skip_timesteps=0, init_image=None, randomize_class=False, cond_fn_with_grad=False, order=2):
'\n Generate samples from the model using Lie-Trotter Splitting \n and Pseudo Linear Multistep.\n\n Same usage as p_sample_loop().\n '
final = None
for sample in self.ltsp_sample_loop_progressive(model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, impu_fn=impu_fn, model_kwargs=model_kwargs, device=device, progress=progress, skip_timesteps=skip_timesteps, init_image=init_image, randomize_class=randomize_class, cond_fn_with_grad=cond_fn_with_grad, order=order):
final = sample
return final['sample']
def ltsp_sample_loop_progressive(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, impu_fn=None, model_kwargs=None, device=None, progress=False, skip_timesteps=0, init_image=None, randomize_class=False, cond_fn_with_grad=False, order=2):
'\n Use LTPS to sample from the model and yield intermediate samples from each\n timestep of LTSP.\n\n Same usage as p_sample_loop_progressive().\n '
if (device is None):
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if (noise is not None):
img = noise
else:
img = th.randn(*shape, device=device)
if (skip_timesteps and (init_image is None)):
init_image = th.zeros_like(img)
indices = list(range((self.num_timesteps - skip_timesteps)))[::(- 1)]
if (init_image is not None):
my_t = (th.ones([shape[0]], device=device, dtype=th.long) * indices[0])
img = self.q_sample(init_image, my_t, img)
if progress:
from fastprogress import progress_bar
if isinstance(progress, bool):
indices = progress_bar(indices)
else:
indices = progress_bar(indices, parent=progress)
old_out = None
for i in indices:
t = th.tensor(([i] * shape[0]), device=device)
if (randomize_class and ('y' in model_kwargs)):
model_kwargs['y'] = th.randint(low=0, high=model.num_classes, size=model_kwargs['y'].shape, device=model_kwargs['y'].device)
with th.no_grad():
out = self.ltsp_sample(model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, impu_fn=impu_fn, model_kwargs=model_kwargs, order=order, old_out=old_out)
(yield out)
old_out = out
img = out['sample']
def _vb_terms_bpd(self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None):
"\n Get a term for the variational lower-bound.\n\n The resulting units are bits (rather than nats, as one might expect).\n This allows for comparison to other papers.\n\n :return: a dict with the following keys:\n - 'output': a shape [N] tensor of NLLs or KLs.\n - 'pred_xstart': the x_0 predictions.\n "
(true_mean, _, true_log_variance_clipped) = self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t)
out = self.p_mean_variance(model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs)
kl = normal_kl(true_mean, true_log_variance_clipped, out['mean'], out['log_variance'])
kl = (mean_flat(kl) / np.log(2.0))
decoder_nll = (- discretized_gaussian_log_likelihood(x_start, means=out['mean'], log_scales=(0.5 * out['log_variance'])))
assert (decoder_nll.shape == x_start.shape)
decoder_nll = (mean_flat(decoder_nll) / np.log(2.0))
output = th.where((t == 0), decoder_nll, kl)
return {'output': output, 'pred_xstart': out['pred_xstart']}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
'\n Compute training losses for a single timestep.\n\n :param model: the model to evaluate loss on.\n :param x_start: the [N x C x ...] tensor of inputs.\n :param t: a batch of timestep indices.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :param noise: if specified, the specific Gaussian noise to try to remove.\n :return: a dict with the key "loss" containing a tensor of shape [N].\n Some mean or variance settings may also have other keys.\n '
if (model_kwargs is None):
model_kwargs = {}
if (noise is None):
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if ((self.loss_type == LossType.KL) or (self.loss_type == LossType.RESCALED_KL)):
terms['loss'] = self._vb_terms_bpd(model=model, x_start=x_start, x_t=x_t, t=t, clip_denoised=False, model_kwargs=model_kwargs)['output']
if (self.loss_type == LossType.RESCALED_KL):
terms['loss'] *= self.num_timesteps
elif ((self.loss_type == LossType.MSE) or (self.loss_type == LossType.RESCALED_MSE)):
model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
if (self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]):
(B, C) = x_t.shape[:2]
assert (model_output.shape == (B, (C * 2), *x_t.shape[2:]))
(model_output, model_var_values) = th.split(model_output, C, dim=1)
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms['vb'] = self._vb_terms_bpd(model=(lambda *args, r=frozen_out: r), x_start=x_start, x_t=x_t, t=t, clip_denoised=False)['output']
if (self.loss_type == LossType.RESCALED_MSE):
terms['vb'] *= (self.num_timesteps / 1000.0)
target = {ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t)[0], ModelMeanType.START_X: x_start, ModelMeanType.EPSILON: noise}[self.model_mean_type]
assert (model_output.shape == target.shape == x_start.shape)
terms['mse'] = mean_flat(((target - model_output) ** 2))
if ('vb' in terms):
terms['loss'] = (terms['mse'] + terms['vb'])
else:
terms['loss'] = terms['mse']
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n\n This term can't be optimized, as it only depends on the encoder.\n\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n "
batch_size = x_start.shape[0]
t = th.tensor(([(self.num_timesteps - 1)] * batch_size), device=x_start.device)
(qt_mean, _, qt_log_variance) = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return (mean_flat(kl_prior) / np.log(2.0))
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
'\n Compute the entire variational lower-bound, measured in bits-per-dim,\n as well as other related quantities.\n\n :param model: the model to evaluate loss on.\n :param x_start: the [N x C x ...] tensor of inputs.\n :param clip_denoised: if True, clip denoised samples.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n\n :return: a dict containing the following keys:\n - total_bpd: the total variational lower-bound, per batch element.\n - prior_bpd: the prior term in the lower-bound.\n - vb: an [N x T] tensor of terms in the lower-bound.\n - xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.\n - mse: an [N x T] tensor of epsilon MSEs for each timestep.\n '
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::(- 1)]:
t_batch = th.tensor(([t] * batch_size), device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
with th.no_grad():
out = self._vb_terms_bpd(model, x_start=x_start, x_t=x_t, t=t_batch, clip_denoised=clip_denoised, model_kwargs=model_kwargs)
vb.append(out['output'])
xstart_mse.append(mean_flat(((out['pred_xstart'] - x_start) ** 2)))
eps = self._predict_eps_from_xstart(x_t, t_batch, out['pred_xstart'])
mse.append(mean_flat(((eps - noise) ** 2)))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = (vb.sum(dim=1) + prior_bpd)
return {'total_bpd': total_bpd, 'prior_bpd': prior_bpd, 'vb': vb, 'xstart_mse': xstart_mse, 'mse': mse}
|
def _extract_into_tensor(arr, timesteps, broadcast_shape):
'\n Extract values from a 1-D numpy array for a batch of indices.\n\n :param arr: the 1-D numpy array.\n :param timesteps: a tensor of indices into the array to extract.\n :param broadcast_shape: a larger shape of K dimensions with the batch\n dimension equal to the length of timesteps.\n :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.\n '
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while (len(res.shape) < len(broadcast_shape)):
res = res[(..., None)]
return res.expand(broadcast_shape)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.