code stringlengths 17 6.64M |
|---|
class LeaveOneOutSelectionMethod(SelectionMethod):
'Picks (hparams, step) by leave-one-out cross validation.'
name = 'leave-one-domain-out cross-validation'
@classmethod
def _step_acc(self, records):
'Return the {val_acc, test_acc} for a group of records corresponding\n to a single step.'
test_records = get_test_records(records)
if (len(test_records) != 1):
return None
test_env = test_records[0]['args']['test_envs'][0]
n_envs = 0
for i in itertools.count():
if (f'env{i}_out_acc' not in records[0]):
break
n_envs += 1
val_accs = (np.zeros(n_envs) - 1)
for r in records.filter((lambda r: (len(r['args']['test_envs']) == 2))):
val_env = (set(r['args']['test_envs']) - set([test_env])).pop()
val_accs[val_env] = r['env{}_in_acc'.format(val_env)]
val_accs = (list(val_accs[:test_env]) + list(val_accs[(test_env + 1):]))
if any([(v == (- 1)) for v in val_accs]):
return None
val_acc = (np.sum(val_accs) / (n_envs - 1))
return {'val_acc': val_acc, 'test_acc': test_records[0]['env{}_in_acc'.format(test_env)]}
@classmethod
def run_acc(self, records):
step_accs = records.group('step').map((lambda step, step_records: self._step_acc(step_records))).filter_not_none()
if len(step_accs):
return step_accs.argmax('val_acc')
else:
return None
|
def format_mean(data, latex):
'Given a list of datapoints, return a string describing their mean and\n standard error'
if (len(data) == 0):
return (None, None, 'X')
mean = (100 * np.mean(list(data)))
err = (100 * np.std((list(data) / np.sqrt(len(data)))))
if latex:
return (mean, err, '{:.1f} $\\pm$ {:.1f}'.format(mean, err))
else:
return (mean, err, '{:.1f} +/- {:.1f}'.format(mean, err))
|
def print_table(table, header_text, row_labels, col_labels, colwidth=10, latex=True):
'Pretty-print a 2D array of data, optionally with row/col labels'
print('')
if latex:
num_cols = len(table[0])
print('\\begin{center}')
print('\\adjustbox{max width=\\textwidth}{%')
print((('\\begin{tabular}{l' + ('c' * num_cols)) + '}'))
print('\\toprule')
else:
print('--------', header_text)
for (row, label) in zip(table, row_labels):
row.insert(0, label)
if latex:
col_labels = [(('\\textbf{' + str(col_label).replace('%', '\\%')) + '}') for col_label in col_labels]
table.insert(0, col_labels)
for (r, row) in enumerate(table):
misc.print_row(row, colwidth=colwidth, latex=latex)
if (latex and (r == 0)):
print('\\midrule')
if latex:
print('\\bottomrule')
print('\\end{tabular}}')
print('\\end{center}')
|
def print_results_tables(records, task, selection_method, latex):
'Given all records, print a results table for each dataset.'
grouped_records = reporting.get_grouped_records(records, group_test_envs=(task != 'unsupervised_domain_generalization')).map((lambda group: {**group, 'sweep_acc': selection_method.sweep_acc(group['records'])})).filter((lambda g: (g['sweep_acc'] is not None)))
alg_names = Q(records).select('args.algorithm').unique()
alg_names = ([n for n in algorithms.ALGORITHMS if (n in alg_names)] + [n for n in alg_names if (n not in algorithms.ALGORITHMS)])
dataset_names = Q(records).select('args.dataset').unique().sorted()
dataset_names = [d for d in datasets.DATASETS if (d in dataset_names)]
for dataset in dataset_names:
if latex:
print()
print('\\subsubsection{{{}}}'.format(dataset))
test_envs = range(datasets.num_environments(dataset))
if (task != 'unsupervised_domain_generalization'):
table = [[None for _ in [*test_envs, 'Avg']] for _ in alg_names]
for (i, algorithm) in enumerate(alg_names):
means = []
for (j, test_env) in enumerate(test_envs):
trial_accs = grouped_records.filter_equals('dataset, algorithm, test_env', (dataset, algorithm, test_env)).select('sweep_acc')
(mean, err, table[i][j]) = format_mean(trial_accs, latex)
means.append(mean)
if (None in means):
table[i][(- 1)] = 'X'
else:
table[i][(- 1)] = '{:.1f}'.format((sum(means) / len(means)))
col_labels = ['Algorithm', *datasets.get_dataset_class(dataset).ENVIRONMENTS, 'Avg']
header_text = f'Dataset: {dataset}, model selection method: {selection_method.name}'
print_table(table, header_text, alg_names, list(col_labels), colwidth=20, latex=latex)
if latex:
print()
print('\\subsubsection{Averages}')
table = [[None for _ in [*dataset_names, 'Avg']] for _ in alg_names]
for (i, algorithm) in enumerate(alg_names):
means = []
for (j, dataset) in enumerate(dataset_names):
trial_averages = grouped_records.filter_equals('algorithm, dataset', (algorithm, dataset)).group('trial_seed').map((lambda trial_seed, group: group.select('sweep_acc').mean()))
(mean, err, table[i][j]) = format_mean(trial_averages, latex)
means.append(mean)
if (None in means):
table[i][(- 1)] = 'X'
else:
table[i][(- 1)] = '{:.1f}'.format((sum(means) / len(means)))
col_labels = ['Algorithm', *dataset_names, 'Avg']
header_text = f'Averages, model selection method: {selection_method.name}'
print_table(table, header_text, alg_names, col_labels, colwidth=25, latex=latex)
|
def stage_path(data_dir, name):
full_path = os.path.join(data_dir, name)
if (not os.path.exists(full_path)):
os.makedirs(full_path)
return full_path
|
def download_and_extract(url, dst, remove=True):
gdown.download(url, dst, quiet=False)
if dst.endswith('.tar.gz'):
tar = tarfile.open(dst, 'r:gz')
tar.extractall(os.path.dirname(dst))
tar.close()
if dst.endswith('.tar'):
tar = tarfile.open(dst, 'r:')
tar.extractall(os.path.dirname(dst))
tar.close()
if dst.endswith('.zip'):
zf = ZipFile(dst, 'r')
zf.extractall(os.path.dirname(dst))
zf.close()
if remove:
os.remove(dst)
|
def download_vlcs(data_dir):
full_path = stage_path(data_dir, 'VLCS')
download_and_extract('https://drive.google.com/uc?id=1skwblH1_okBwxWxmRsp9_qi15hyPpxg8', os.path.join(data_dir, 'VLCS.tar.gz'))
|
def download_mnist(data_dir):
full_path = stage_path(data_dir, 'MNIST')
MNIST(full_path, download=True)
|
def download_pacs(data_dir):
full_path = stage_path(data_dir, 'PACS')
download_and_extract('https://drive.google.com/uc?id=0B6x7gtvErXgfbF9CSk53UkRxVzg', os.path.join(data_dir, 'PACS.zip'))
os.rename(os.path.join(data_dir, 'kfold'), full_path)
|
def download_office_home(data_dir):
full_path = stage_path(data_dir, 'office_home')
download_and_extract('https://drive.google.com/uc?id=0B81rNlvomiwed0V1YUxQdC1uOTg', os.path.join(data_dir, 'office_home.zip'))
os.rename(os.path.join(data_dir, 'OfficeHomeDataset_10072016'), full_path)
|
def download_domain_net(data_dir):
full_path = stage_path(data_dir, 'domain_net')
urls = ['http://csr.bu.edu/ftp/visda/2019/multi-source/groundtruth/clipart.zip', 'http://csr.bu.edu/ftp/visda/2019/multi-source/infograph.zip', 'http://csr.bu.edu/ftp/visda/2019/multi-source/groundtruth/painting.zip', 'http://csr.bu.edu/ftp/visda/2019/multi-source/quickdraw.zip', 'http://csr.bu.edu/ftp/visda/2019/multi-source/real.zip', 'http://csr.bu.edu/ftp/visda/2019/multi-source/sketch.zip']
for url in urls:
download_and_extract(url, os.path.join(full_path, url.split('/')[(- 1)]))
with open('domainbed/misc/domain_net_duplicates.txt', 'r') as f:
for line in f.readlines():
try:
os.remove(os.path.join(full_path, line.strip()))
except OSError:
pass
|
def download_terra_incognita(data_dir):
full_path = stage_path(data_dir, 'terra_incognita')
download_and_extract('https://lilablobssc.blob.core.windows.net/caltechcameratraps/eccv_18_all_images_sm.tar.gz', os.path.join(full_path, 'terra_incognita_images.tar.gz'))
download_and_extract('https://lilablobssc.blob.core.windows.net/caltechcameratraps/labels/caltech_camera_traps.json.zip', os.path.join(full_path, 'caltech_camera_traps.json.zip'))
include_locations = ['38', '46', '100', '43']
include_categories = ['bird', 'bobcat', 'cat', 'coyote', 'dog', 'empty', 'opossum', 'rabbit', 'raccoon', 'squirrel']
images_folder = os.path.join(full_path, 'eccv_18_all_images_sm/')
annotations_file = os.path.join(full_path, 'caltech_images_20210113.json')
destination_folder = full_path
stats = {}
if (not os.path.exists(destination_folder)):
os.mkdir(destination_folder)
with open(annotations_file, 'r') as f:
data = json.load(f)
category_dict = {}
for item in data['categories']:
category_dict[item['id']] = item['name']
for image in data['images']:
image_location = image['location']
if (image_location not in include_locations):
continue
loc_folder = os.path.join(destination_folder, (('location_' + str(image_location)) + '/'))
if (not os.path.exists(loc_folder)):
os.mkdir(loc_folder)
image_id = image['id']
image_fname = image['file_name']
for annotation in data['annotations']:
if (annotation['image_id'] == image_id):
if (image_location not in stats):
stats[image_location] = {}
category = category_dict[annotation['category_id']]
if (category not in include_categories):
continue
if (category not in stats[image_location]):
stats[image_location][category] = 0
else:
stats[image_location][category] += 1
loc_cat_folder = os.path.join(loc_folder, (category + '/'))
if (not os.path.exists(loc_cat_folder)):
os.mkdir(loc_cat_folder)
dst_path = os.path.join(loc_cat_folder, image_fname)
src_path = os.path.join(images_folder, image_fname)
shutil.copyfile(src_path, dst_path)
shutil.rmtree(images_folder)
os.remove(annotations_file)
|
def download_sviro(data_dir):
full_path = stage_path(data_dir, 'sviro')
download_and_extract('https://sviro.kl.dfki.de/?wpdmdl=1731', os.path.join(data_dir, 'sviro_grayscale_rectangle_classification.zip'))
os.rename(os.path.join(data_dir, 'SVIRO_DOMAINBED'), full_path)
|
def todo_rename(records, selection_method, latex):
grouped_records = reporting.get_grouped_records(records).map((lambda group: {**group, 'sweep_acc': selection_method.sweep_acc(group['records'])})).filter((lambda g: (g['sweep_acc'] is not None)))
alg_names = Q(records).select('args.algorithm').unique()
alg_names = ([n for n in algorithms.ALGORITHMS if (n in alg_names)] + [n for n in alg_names if (n not in algorithms.ALGORITHMS)])
dataset_names = Q(records).select('args.dataset').unique().sorted()
dataset_names = [d for d in datasets.DATASETS if (d in dataset_names)]
for dataset in dataset_names:
if latex:
print()
print('\\subsubsection{{{}}}'.format(dataset))
test_envs = range(datasets.num_environments(dataset))
table = [[None for _ in [*test_envs, 'Avg']] for _ in alg_names]
for (i, algorithm) in enumerate(alg_names):
means = []
for (j, test_env) in enumerate(test_envs):
trial_accs = grouped_records.filter_equals('dataset, algorithm, test_env', (dataset, algorithm, test_env)).select('sweep_acc')
(mean, err, table[i][j]) = format_mean(trial_accs, latex)
means.append(mean)
if (None in means):
table[i][(- 1)] = 'X'
else:
table[i][(- 1)] = '{:.1f}'.format((sum(means) / len(means)))
col_labels = ['Algorithm', *datasets.get_dataset_class(dataset).ENVIRONMENTS, 'Avg']
header_text = f'Dataset: {dataset}, model selection method: {selection_method.name}'
print_table(table, header_text, alg_names, list(col_labels), colwidth=20, latex=latex)
if latex:
print()
print('\\subsubsection{Averages}')
table = [[None for _ in [*dataset_names, 'Avg']] for _ in alg_names]
for (i, algorithm) in enumerate(alg_names):
means = []
for (j, dataset) in enumerate(dataset_names):
trial_averages = grouped_records.filter_equals('algorithm, dataset', (algorithm, dataset)).group('trial_seed').map((lambda trial_seed, group: group.select('sweep_acc').mean()))
(mean, err, table[i][j]) = format_mean(trial_averages, latex)
means.append(mean)
if (None in means):
table[i][(- 1)] = 'X'
else:
table[i][(- 1)] = '{:.1f}'.format((sum(means) / len(means)))
col_labels = ['Algorithm', *dataset_names, 'Avg']
header_text = f'Averages, model selection method: {selection_method.name}'
print_table(table, header_text, alg_names, col_labels, colwidth=25, latex=latex)
|
class Job():
NOT_LAUNCHED = 'Not launched'
INCOMPLETE = 'Incomplete'
DONE = 'Done'
def __init__(self, train_args, sweep_output_dir):
args_str = json.dumps(train_args, sort_keys=True)
args_hash = hashlib.md5(args_str.encode('utf-8')).hexdigest()
self.output_dir = os.path.join(sweep_output_dir, args_hash)
self.train_args = copy.deepcopy(train_args)
self.train_args['output_dir'] = self.output_dir
command = ['python', '-m', 'domainbed.scripts.train']
for (k, v) in sorted(self.train_args.items()):
if isinstance(v, list):
v = ' '.join([str(v_) for v_ in v])
elif isinstance(v, str):
v = shlex.quote(v)
command.append(f'--{k} {v}')
self.command_str = ' '.join(command)
if os.path.exists(os.path.join(self.output_dir, 'done')):
self.state = Job.DONE
elif os.path.exists(self.output_dir):
self.state = Job.INCOMPLETE
else:
self.state = Job.NOT_LAUNCHED
def __str__(self):
job_info = (self.train_args['dataset'], self.train_args['algorithm'], self.train_args['test_envs'], self.train_args['hparams_seed'])
return '{}: {} {}'.format(self.state, self.output_dir, job_info)
@staticmethod
def launch(jobs, launcher_fn):
print('Launching...')
jobs = jobs.copy()
np.random.shuffle(jobs)
print('Making job directories:')
for job in tqdm.tqdm(jobs, leave=False):
os.makedirs(job.output_dir, exist_ok=True)
commands = [job.command_str for job in jobs]
launcher_fn(commands)
print(f'Launched {len(jobs)} jobs!')
@staticmethod
def delete(jobs):
print('Deleting...')
for job in jobs:
shutil.rmtree(job.output_dir)
print(f'Deleted {len(jobs)} jobs!')
|
def all_test_env_combinations(n):
'\n For a dataset with n >= 3 envs, return all combinations of 1 and 2 test\n envs.\n '
assert (n >= 3)
for i in range(n):
(yield [i])
for j in range((i + 1), n):
(yield [i, j])
|
def make_args_list(n_trials, dataset_names, algorithms, n_hparams_from, n_hparams, steps, data_dir, task, holdout_fraction, single_test_envs, hparams):
args_list = []
for trial_seed in range(n_trials):
for dataset in dataset_names:
for algorithm in algorithms:
if single_test_envs:
all_test_envs = [[i] for i in range(datasets.num_environments(dataset))]
else:
all_test_envs = all_test_env_combinations(datasets.num_environments(dataset))
for test_envs in all_test_envs:
for hparams_seed in range(n_hparams_from, n_hparams):
train_args = {}
train_args['dataset'] = dataset
train_args['algorithm'] = algorithm
train_args['test_envs'] = test_envs
train_args['holdout_fraction'] = holdout_fraction
train_args['hparams_seed'] = hparams_seed
train_args['data_dir'] = data_dir
train_args['task'] = task
train_args['trial_seed'] = trial_seed
train_args['seed'] = misc.seed_hash(dataset, algorithm, test_envs, hparams_seed, trial_seed)
if (steps is not None):
train_args['steps'] = steps
if (hparams is not None):
train_args['hparams'] = hparams
args_list.append(train_args)
return args_list
|
def ask_for_confirmation():
response = input('Are you sure? (y/n) ')
if (not (response.lower().strip()[:1] == 'y')):
print('Nevermind!')
exit(0)
|
class Job():
NOT_LAUNCHED = 'Not launched'
INCOMPLETE = 'Incomplete'
DONE = 'Done'
def __init__(self, train_args, sweep_output_dir, train_script='domainbed.scripts.train'):
args_str = json.dumps(train_args, sort_keys=True)
args_hash = hashlib.md5(args_str.encode('utf-8')).hexdigest()
self.output_dir = os.path.join(sweep_output_dir, args_hash)
self.train_args = copy.deepcopy(train_args)
self.train_args['output_dir'] = self.output_dir
command = ['python', '-m', train_script]
for (k, v) in sorted(self.train_args.items()):
if isinstance(v, list):
v = ' '.join([str(v_) for v_ in v])
elif isinstance(v, str):
v = shlex.quote(v)
command.append(f'--{k} {v}')
self.command_str = ' '.join(command)
if os.path.exists(os.path.join(self.output_dir, 'done')):
self.state = Job.DONE
elif os.path.exists(self.output_dir):
self.state = Job.INCOMPLETE
else:
self.state = Job.NOT_LAUNCHED
def __str__(self):
job_info = (self.train_args['dataset'], self.train_args['algorithm'], self.train_args['test_envs'], self.train_args['hparams_seed'])
return '{}: {} {}'.format(self.state, self.output_dir, job_info)
@staticmethod
def launch(jobs, launcher_fn):
print('Launching...')
jobs = jobs.copy()
np.random.shuffle(jobs)
print('Making job directories:')
for job in tqdm.tqdm(jobs, leave=False):
os.makedirs(job.output_dir, exist_ok=True)
commands = [job.command_str for job in jobs]
launcher_fn(commands)
print(f'Launched {len(jobs)} jobs!')
@staticmethod
def delete(jobs):
print('Deleting...')
for job in jobs:
shutil.rmtree(job.output_dir)
print(f'Deleted {len(jobs)} jobs!')
|
def all_test_env_combinations(n):
'\n For a dataset with n >= 3 envs, return all combinations of 1 and 2 test\n envs.\n '
assert (n >= 3)
for i in range(n):
(yield [i])
for j in range((i + 1), n):
(yield [i, j])
|
def make_args_list(n_trials, dataset_names, algorithms, n_hparams_from, n_hparams, steps, data_dir, task, holdout_fraction, single_test_envs, wandb_proj, wandb_group, only_eval, always_rerun, warmstart_model_ckpt, hparams):
args_list = []
for trial_seed in range(n_trials):
for dataset in dataset_names:
for algorithm in algorithms:
if (task == 'unsupervised_domain_generalization'):
all_test_envs = [[(- 1)]]
elif single_test_envs:
all_test_envs = [[i] for i in range(datasets.num_environments(dataset))]
else:
all_test_envs = all_test_env_combinations(datasets.num_environments(dataset))
for test_envs in all_test_envs:
for hparams_seed in range(n_hparams_from, n_hparams):
train_args = {}
train_args['dataset'] = dataset
train_args['algorithm'] = algorithm
train_args['test_envs'] = test_envs
train_args['holdout_fraction'] = holdout_fraction
train_args['hparams_seed'] = hparams_seed
train_args['data_dir'] = data_dir
train_args['task'] = task
train_args['trial_seed'] = trial_seed
train_args['seed'] = misc.seed_hash(dataset, algorithm, test_envs, hparams_seed, trial_seed)
train_args['wandb_proj'] = wandb_proj
train_args['wandb_group'] = wandb_group
if only_eval:
train_args['only_eval'] = only_eval
if always_rerun:
train_args['always_rerun'] = always_rerun
if (warmstart_model_ckpt is not None):
train_args['warmstart_model_ckpt'] = warmstart_model_ckpt
if (steps is not None):
train_args['steps'] = steps
if (hparams is not None):
train_args['hparams'] = hparams
args_list.append(train_args)
return args_list
|
def ask_for_confirmation():
response = input('Are you sure? (y/n) ')
if (not (response.lower().strip()[:1] == 'y')):
print('Nevermind!')
exit(0)
|
class CLIPConLoss(nn.Module):
'CLIP text-image contrastive loss'
def __init__(self, feature_dim, temperature=0.07, learnable_temperature=True, is_project=False, is_symmetric=True):
super(CLIPConLoss, self).__init__()
self.temperature = temperature
if learnable_temperature:
self.logit_scale = nn.Parameter((torch.ones([]) * np.log((1 / self.temperature))))
else:
self.logit_scale = (torch.ones([]) * np.log((1 / self.temperature)))
self.is_project = is_project
self.is_symmetric = is_symmetric
if self.is_project:
self.project = nn.Sequential(nn.Linear(feature_dim, feature_dim), nn.ReLU(inplace=True), nn.Linear(feature_dim, feature_dim))
def forward(self, z, text_features):
device = z.device
if self.is_project:
z = self.project(z)
image_features = (z / z.norm(dim=(- 1), keepdim=True))
text_features = (text_features / text_features.norm(dim=(- 1), keepdim=True))
logit_scale = self.logit_scale.exp()
logit_scale = torch.clamp(logit_scale, max=100)
logits_per_image = ((logit_scale * image_features) @ text_features.t())
logits_per_text = logits_per_image.t()
y = torch.arange(z.shape[0]).to(z.device)
if (not self.is_symmetric):
loss = F.cross_entropy(logits_per_image, y)
else:
loss_i = F.cross_entropy(logits_per_image, y)
loss_t = F.cross_entropy(logits_per_text, y)
loss = (loss_t + loss_i)
return loss
|
def finite_mean(x):
num_finite = torch.isfinite(x).float().sum()
mean = torch.where(torch.isfinite(x), x, torch.tensor(0.0).to(x)).sum()
if (num_finite != 0):
mean = (mean / num_finite)
else:
return torch.tensor(0.0).to(x)
return mean
|
class PLLogisticRegression(pl.LightningModule):
'\n Logistic regression model\n '
def __init__(self, input_dim: int, num_classes: int, bias: bool=True, learning_rate: float=0.0001, optimizer: Optimizer=Adam, l1_strength: float=0.0, l2_strength: float=0.0, is_nonlinear: bool=False, **kwargs):
"\n Args:\n input_dim: number of dimensions of the input (at least 1)\n num_classes: number of class labels (binary: 2, multi-class: >2)\n bias: specifies if a constant or intercept should be fitted (equivalent to fit_intercept in sklearn)\n learning_rate: learning_rate for the optimizer\n optimizer: the optimizer to use (default='Adam')\n l1_strength: L1 regularization strength (default=None)\n l2_strength: L2 regularization strength (default=None)\n is_nonlinear: whether use a nonlinear classifier\n "
super().__init__()
self.save_hyperparameters()
self.optimizer = optimizer
if (not self.hparams.is_nonlinear):
self.linear = nn.Linear(in_features=self.hparams.input_dim, out_features=self.hparams.num_classes, bias=bias)
else:
hidden_dim = (self.hparams.input_dim * 2)
self.linear = nn.Sequential(nn.Linear(self.hparams.input_dim, hidden_dim), nn.ReLU(inplace=True), nn.Linear(hidden_dim, self.hparams.num_classes))
def forward(self, x):
logits = self.linear(x)
return logits
def training_step(self, batch, batch_idx):
(x, y) = batch
x = x.view(x.size(0), (- 1))
y_hat = self(x)
loss = F.cross_entropy(y_hat, y, reduction='sum')
if (self.hparams.l1_strength > 0):
l1_reg = sum((param.abs().sum() for param in self.parameters()))
loss += (self.hparams.l1_strength * l1_reg)
if (self.hparams.l2_strength > 0):
l2_reg = sum((param.pow(2).sum() for param in self.parameters()))
loss += (self.hparams.l2_strength * l2_reg)
loss /= x.size(0)
acc = accuracy(y_hat, y)
tensorboard_logs = {'train_ce_loss': loss, 'train_acc': acc}
progress_bar_metrics = tensorboard_logs
self.log_dict(tensorboard_logs, prog_bar=True)
return {'loss': loss, 'log': tensorboard_logs, 'progress_bar': progress_bar_metrics}
def validation_step(self, batch, batch_idx):
(x, y) = batch
x = x.view(x.size(0), (- 1))
y_hat = self(x)
acc = accuracy(y_hat, y)
tensorboard_logs = {'val_loss': F.cross_entropy(y_hat, y), 'acc': acc}
self.log_dict(tensorboard_logs, prog_bar=True)
return tensorboard_logs
def validation_epoch_end(self, outputs):
acc = torch.stack([x['acc'] for x in outputs]).mean()
val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_ce_loss': val_loss, 'val_acc': acc}
progress_bar_metrics = tensorboard_logs
self.log_dict(tensorboard_logs)
return {'val_loss': val_loss, 'log': tensorboard_logs, 'progress_bar': progress_bar_metrics}
def configure_optimizers(self):
return self.optimizer(self.parameters(), lr=self.hparams.learning_rate)
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--learning_rate', type=float, default=0.0001)
parser.add_argument('--input_dim', type=int, default=None)
parser.add_argument('--num_classes', type=int, default=None)
parser.add_argument('--bias', default='store_true')
parser.add_argument('--batch_size', type=int, default=16)
return parser
|
def pretty(d, indent=0):
for (key, value) in d.items():
print((('\t' * indent) + str(key)))
if isinstance(value, dict):
pretty(value, (indent + 1))
else:
print((('\t' * (indent + 1)) + str(value)))
|
def get_record(output_dir):
print('Loading records from:', output_dir)
records = reporting.load_records(output_dir)
print('Total records:', len(records))
return records
|
def get_results(out_dir, selection_method):
'Given all records, print a results table for each dataset.'
records = get_record(out_dir)
grouped_records = reporting.get_grouped_records(records, group_test_envs=True).map((lambda group: {**group, 'sweep_acc': selection_method.sweep_acc(group['records'], return_extra=True)})).filter((lambda g: (g['sweep_acc'] is not None)))
alg_names = Q(records).select('args.algorithm').unique()
assert (len(alg_names) == 1)
algorithm = alg_names[0]
dataset_names = Q(records).select('args.dataset').unique().sorted()
assert (len(dataset_names) == 1)
dataset = dataset_names[0]
trial_averages = grouped_records.filter_equals('algorithm, dataset', (algorithm, dataset)).group('trial_seed').map((lambda trial_seed, group: tuple(map((lambda y: (sum(y) / float(len(y)))), zip(*group.select('sweep_acc'))))))
(tgt_all, src_all, tgt_in_all) = zip(*trial_averages)
(tgt_mean, src_mean, tgt_in_mean) = ((100 * np.mean(list(tgt_all))), (100 * np.mean(list(src_all))), (100 * np.mean(list(tgt_in_all))))
(tgt_std, src_std, tgt_in_std) = ((100 * np.std(list(tgt_all))), (100 * np.std(list(src_all))), (100 * np.std(list(tgt_in_all))))
return ((tgt_mean, src_mean, tgt_in_mean), (tgt_std, src_std, tgt_in_std))
|
def plot_result(result_dict, plot_dataset, plot_y='acc_tgt', include=None, exclude=None, plot_std=False):
plt.figure()
sub_result_dict = result_dict[plot_dataset]
plt_xs = []
plt_ys = []
plt_errs = []
for (k, v) in sub_result_dict.items():
subsub_result_dict = sub_result_dict[k]
if ((include is not None) and (k not in include)):
continue
if ((exclude is not None) and (k in exclude)):
continue
plt_xs.append(k.replace('_', '\n').replace('SupConOutCLIPBottleneckBase', 'SupConOut'))
plt_ys.append(subsub_result_dict[plot_y])
plt_errs.append(subsub_result_dict[(plot_y + '_std')])
if (not plot_std):
plt.bar(plt_xs, plt_ys)
else:
plt.bar(plt_xs, plt_ys, yerr=plt_errs)
for (plt_x, plt_y, plt_err) in zip(plt_xs, plt_ys, plt_errs):
plt.text(plt_x, (plt_y + 0.25), '{:.1f} +/- {:.1f}'.format(plt_y, plt_err), color='blue', fontweight='bold')
plt.xlabel('Method')
plt.ylabel('Accuracy')
plt.ylim([(np.min(plt_ys) - 1.0), (np.max(plt_ys) + 0.5)])
plt.title(plot_dataset)
plt.show()
|
def is_image_file(filename):
return filename.lower().endswith(IMG_EXTENSIONS)
|
def make_dataset(dir, class_to_idx):
images = []
dir = os.path.expanduser(dir)
for target in sorted(class_to_idx.keys()):
d = os.path.join(dir, target)
if (not os.path.isdir(d)):
continue
for (root, _, fnames) in sorted(os.walk(d)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_image_file(path):
item = (path, class_to_idx[target])
images.append(item)
return images
|
def pil_loader(path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
|
class CustomImageFolder(VisionDataset):
def __init__(self, root, transform, perturbation_fn=None, idx_subsample_list=None):
super().__init__(root, transform=transform, target_transform=None)
(classes, class_to_idx) = self._find_classes(self.root)
samples = make_dataset(self.root, class_to_idx)
if (len(samples) == 0):
raise RuntimeError(('Found 0 images in subfolders of: ' + self.root))
if (idx_subsample_list is not None):
samples = [samples[i] for i in idx_subsample_list]
self.loader = pil_loader
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
self.perturbation_fn = perturbation_fn
def _find_classes(self, dir):
if (sys.version_info >= (3, 5)):
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return (classes, class_to_idx)
def __getitem__(self, index):
(path, target) = self.samples[index]
return [index, os.path.relpath(path, self.root), self.load_img(path), target]
def load_img(self, path):
sample = self.loader(path)
sample = self.transform(sample)
if (self.perturbation_fn is not None):
sample = self.perturbation_fn(sample)
return sample
def __len__(self):
return len(self.samples)
|
class DistributedSampler(Sampler):
'Sampler that restricts data loading to a subset of the dataset.\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n .. note::\n Dataset is assumed to be of constant size.\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n shuffle (optional): If true (default), sampler will shuffle the indices\n '
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if (num_replicas is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
num_replicas = dist.get_world_size()
if (rank is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(((len(self.dataset) * 1.0) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas)
self.shuffle = shuffle
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
indices = indices[self.rank::self.num_replicas]
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
class EvalSetting():
def __init__(self, name, dataset, size, perturbation_fn_cpu=None, perturbation_fn_gpu=None, metrics_fn=None, adversarial_attack=None, class_sublist=None, idx_subsample_list=None, parent_eval_setting=None, transform=None):
super().__init__()
self.name = name
self.dataset = dataset
self.size = size
self.perturbation_fn_cpu = perturbation_fn_cpu
self.perturbation_fn_gpu = perturbation_fn_gpu
self.class_sublist = class_sublist
self.adversarial_attack = adversarial_attack
self.idx_subsample_list = idx_subsample_list
self.metrics_fn = (metrics_fn if (metrics_fn is not None) else accuracy_topk)
self.parent_eval_setting = parent_eval_setting
self.transform = transform
def get_dataset_root(self):
return self.dataset.get_root()
def get_metrics(self, logits, targets, image_paths, py_model):
sig = signature(self.metrics_fn).parameters.keys()
assert (('logits' in sig) and ('targets' in sig)), ('Unrecognized metrics function ' + 'definition. Make sure function takes arguments "logits" and "targets"')
kwargs = {'logits': logits, 'targets': targets}
if ('image_paths' in sig):
kwargs['image_paths'] = image_paths
if ('py_model' in sig):
kwargs['py_model'] = py_model
return self.metrics_fn(**kwargs)
def get_perturbation_fn_gpu(self, py_model):
if (self.adversarial_attack and ('adversarial_attack' in py_model.classify_fn_args)):
return None
else:
return self.perturbation_fn_gpu
def get_perturbation_fn_cpu(self, py_model):
return self.perturbation_fn_cpu
def get_idx_subsample_list(self, py_model):
return self.idx_subsample_list
|
def accuracy_topk(logits, targets, topk=(1, 5)):
maxk = max(topk)
batch_size = targets.size(0)
(_, pred) = logits.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(targets.view(1, (- 1)).expand_as(pred))
res = {}
for k in topk:
correct_k = correct[:k].contiguous().view((- 1)).float().sum(0, keepdim=True)
res[f'top{k}'] = correct_k.mul_((100.0 / batch_size)).item()
return res
|
class StandardDataset():
def __init__(self, name=None, path=None):
super().__init__()
assert (bool(name) ^ bool(path)), 'Please specify one (and exactly one) of name or path'
if (name is not None):
assert (name in DATASET_NAMES), f'Dataset {name} is not recognized as an existing dataset in the server.'
self.name = name
self.path = path
def get_root(self):
if (self.name is not None):
return download_dataset(self.name)
return self.path
|
def corrupt_greyscale(image):
return greyscale(image)
|
def accuracy_topk_subselected(logits, targets):
targets = torch.tensor([class_sublist.index(x) for x in targets])
return accuracy_topk(logits, targets)
|
def corr_brightness_sev_1(image):
return corruption_dict['brightness'](image, 0)
|
def corr_brightness_sev_2(image):
return corruption_dict['brightness'](image, 1)
|
def corr_brightness_sev_3(image):
return corruption_dict['brightness'](image, 2)
|
def corr_brightness_sev_4(image):
return corruption_dict['brightness'](image, 3)
|
def corr_brightness_sev_5(image):
return corruption_dict['brightness'](image, 4)
|
def corr_contrast_sev_1(image):
return corruption_dict['contrast'](image, 0)
|
def corr_contrast_sev_2(image):
return corruption_dict['contrast'](image, 1)
|
def corr_contrast_sev_3(image):
return corruption_dict['contrast'](image, 2)
|
def corr_contrast_sev_4(image):
return corruption_dict['contrast'](image, 3)
|
def corr_contrast_sev_5(image):
return corruption_dict['contrast'](image, 4)
|
def corr_fog_sev_1(image):
return corruption_dict['fog'](image, 0)
|
def corr_fog_sev_2(image):
return corruption_dict['fog'](image, 1)
|
def corr_fog_sev_3(image):
return corruption_dict['fog'](image, 2)
|
def corr_fog_sev_4(image):
return corruption_dict['fog'](image, 3)
|
def corr_fog_sev_5(image):
return corruption_dict['fog'](image, 4)
|
def corr_frost_sev_1(image):
return corruption_dict['frost'](image, 0)
|
def corr_frost_sev_2(image):
return corruption_dict['frost'](image, 1)
|
def corr_frost_sev_3(image):
return corruption_dict['frost'](image, 2)
|
def corr_frost_sev_4(image):
return corruption_dict['frost'](image, 3)
|
def corr_frost_sev_5(image):
return corruption_dict['frost'](image, 4)
|
def corr_gaussian_blur_sev_1(image):
return corruption_dict['gaussian_blur'](image, 0)
|
def corr_gaussian_blur_sev_2(image):
return corruption_dict['gaussian_blur'](image, 1)
|
def corr_gaussian_blur_sev_3(image):
return corruption_dict['gaussian_blur'](image, 2)
|
def corr_gaussian_blur_sev_4(image):
return corruption_dict['gaussian_blur'](image, 3)
|
def corr_gaussian_blur_sev_5(image):
return corruption_dict['gaussian_blur'](image, 4)
|
def corr_gaussian_noise_sev_1(image):
return corruption_dict['gaussian_noise'](image, 0)
|
def corr_gaussian_noise_sev_2(image):
return corruption_dict['gaussian_noise'](image, 1)
|
def corr_gaussian_noise_sev_3(image):
return corruption_dict['gaussian_noise'](image, 2)
|
def corr_gaussian_noise_sev_4(image):
return corruption_dict['gaussian_noise'](image, 3)
|
def corr_gaussian_noise_sev_5(image):
return corruption_dict['gaussian_noise'](image, 4)
|
def corr_impulse_noise_sev_1(image):
return corruption_dict['impulse_noise'](image, 0)
|
def corr_impulse_noise_sev_2(image):
return corruption_dict['impulse_noise'](image, 1)
|
def corr_impulse_noise_sev_3(image):
return corruption_dict['impulse_noise'](image, 2)
|
def corr_impulse_noise_sev_4(image):
return corruption_dict['impulse_noise'](image, 3)
|
def corr_impulse_noise_sev_5(image):
return corruption_dict['impulse_noise'](image, 4)
|
def corr_jpeg_compression_sev_1(image):
return corruption_dict['jpeg_compression'](image, 0)
|
def corr_jpeg_compression_sev_2(image):
return corruption_dict['jpeg_compression'](image, 1)
|
def corr_jpeg_compression_sev_3(image):
return corruption_dict['jpeg_compression'](image, 2)
|
def corr_jpeg_compression_sev_4(image):
return corruption_dict['jpeg_compression'](image, 3)
|
def corr_jpeg_compression_sev_5(image):
return corruption_dict['jpeg_compression'](image, 4)
|
def corr_pixelate_sev_1(image):
return corruption_dict['pixelate'](image, 0)
|
def corr_pixelate_sev_2(image):
return corruption_dict['pixelate'](image, 1)
|
def corr_pixelate_sev_3(image):
return corruption_dict['pixelate'](image, 2)
|
def corr_pixelate_sev_4(image):
return corruption_dict['pixelate'](image, 3)
|
def corr_pixelate_sev_5(image):
return corruption_dict['pixelate'](image, 4)
|
def corr_saturate_sev_1(image):
return corruption_dict['saturate'](image, 0)
|
def corr_saturate_sev_2(image):
return corruption_dict['saturate'](image, 1)
|
def corr_saturate_sev_3(image):
return corruption_dict['saturate'](image, 2)
|
def corr_saturate_sev_4(image):
return corruption_dict['saturate'](image, 3)
|
def corr_saturate_sev_5(image):
return corruption_dict['saturate'](image, 4)
|
def corr_shot_noise_sev_1(image):
return corruption_dict['shot_noise'](image, 0)
|
def corr_shot_noise_sev_2(image):
return corruption_dict['shot_noise'](image, 1)
|
def corr_shot_noise_sev_3(image):
return corruption_dict['shot_noise'](image, 2)
|
def corr_shot_noise_sev_4(image):
return corruption_dict['shot_noise'](image, 3)
|
def corr_shot_noise_sev_5(image):
return corruption_dict['shot_noise'](image, 4)
|
def corr_spatter_sev_1(image):
return corruption_dict['spatter'](image, 0)
|
def corr_spatter_sev_2(image):
return corruption_dict['spatter'](image, 1)
|
def corr_spatter_sev_3(image):
return corruption_dict['spatter'](image, 2)
|
def corr_spatter_sev_4(image):
return corruption_dict['spatter'](image, 3)
|
def corr_spatter_sev_5(image):
return corruption_dict['spatter'](image, 4)
|
def corr_speckle_noise_sev_1(image):
return corruption_dict['speckle_noise'](image, 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.