code stringlengths 17 6.64M |
|---|
def corr_speckle_noise_sev_2(image):
return corruption_dict['speckle_noise'](image, 1)
|
def corr_speckle_noise_sev_3(image):
return corruption_dict['speckle_noise'](image, 2)
|
def corr_speckle_noise_sev_4(image):
return corruption_dict['speckle_noise'](image, 3)
|
def corr_speckle_noise_sev_5(image):
return corruption_dict['speckle_noise'](image, 4)
|
def corr_zoom_blur_sev_1(image):
return corruption_dict['zoom_blur'](image, 0)
|
def corr_zoom_blur_sev_2(image):
return corruption_dict['zoom_blur'](image, 1)
|
def corr_zoom_blur_sev_3(image):
return corruption_dict['zoom_blur'](image, 2)
|
def corr_zoom_blur_sev_4(image):
return corruption_dict['zoom_blur'](image, 3)
|
def corr_zoom_blur_sev_5(image):
return corruption_dict['zoom_blur'](image, 4)
|
def gen_corrupt_batch_gpu(corruption, severity):
def corrupt_batch_gpu(images, model):
for i in range(images.size(0)):
corr_func = corruption_dict[corruption]
images[i] = corr_func(images[i], severity, gpu=True)
return images
return corrupt_batch_gpu
|
def accuracy_topk_subselected(logits, targets):
targets = torch.tensor([class_sublist.index(x) for x in targets])
return accuracy_topk(logits, targets)
|
def accuracy_topk_subselected(logits, targets):
targets = torch.tensor(list(map((lambda x: class_sublist_1_8.index(x)), targets)))
return accuracy_topk(logits, targets)
|
def crop_image(image, border=2):
return PIL.ImageOps.crop(image, border=border)
|
def objectnet_accuracy(logits, targets, image_paths, using_class_sublist=True):
if using_class_sublist:
folder_map = {k: [class_sublist.index(x) for x in v] for (k, v) in folder_to_ids.items()}
else:
folder_map = folder_to_ids
preds = logits.argmax(dim=1)
(num_correct, num_total) = (0, 0)
for (pred, image_path) in zip(preds, image_paths):
folder = image_path.split('/')[0]
if (folder in folder_map):
num_total += 1
if (pred in folder_map[folder]):
num_correct += 1
return {'top1': ((num_correct / num_total) * 100)}
|
def accuracy_topk_subselected(logits, targets):
targets = torch.tensor([class_sublist.index(x) for x in targets])
return accuracy_topk(logits, targets)
|
def pgd_style_attack(d, images, model):
eps = d['eps']
if (d['norm'] == 2):
eps = ((eps * images.size(2)) / 224)
return pgd(model, images, eps, d['step_size'], d['num_steps'], d['norm'], targeted=d['targeted'])
|
def gen_attack_fn(d):
return (lambda images, model: pgd_style_attack(d, images, model))
|
def accuracy_topk_subselected(logits, targets):
targets = torch.tensor([class_sublist_1_8.index(x) for x in targets])
return accuracy_topk(logits, targets)
|
def main(args):
py_model = registry.get_model(args.model)
py_eval_setting = registry.get_eval_setting(args.eval_setting)
if (args.db and utils.evaluation_completed(py_model, py_eval_setting)):
print(f'Evaluation for {py_model.name} x {py_eval_setting.name} already found. Skipping...')
return
args.num_gpus = torch.cuda.device_count()
results_dict = mp.Manager().dict()
mp.spawn(main_worker, nprocs=args.num_gpus, args=(args, results_dict))
(idx_sorted, idx_map) = torch.cat([results_dict[i]['idxs'] for i in range(args.num_gpus)]).sort()
assert idx_sorted.eq(idx_sorted.unique()).all(), 'Error collecting results'
assert idx_sorted.eq(torch.tensor(list(range(idx_sorted.size(0))))).all(), 'Error collecting results'
logits = torch.cat([results_dict[i]['logits'] for i in range(args.num_gpus)])[idx_map]
targets = torch.cat([results_dict[i]['targets'] for i in range(args.num_gpus)])[idx_map]
image_paths = np.concatenate([results_dict[i]['image_paths'] for i in range(args.num_gpus)])[idx_map]
metrics = py_eval_setting.get_metrics(logits, targets, image_paths, py_model)
with open(join(args.logdir, 'metrics.json'), 'w') as outfile:
json.dump(metrics, outfile)
if args.db:
utils.store_evaluation(py_model, py_eval_setting, metrics, logits)
print('Uploaded to db')
utils.close_db_connection()
print('************************************')
print(f'RESULT {args.model} on {args.eval_setting} - {metrics}')
print('************************************')
|
def main_worker(gpu, args, results_dict):
dist.init_process_group(backend=args.backend, init_method=args.dist_url, world_size=args.num_gpus, rank=gpu)
torch.cuda.set_device(gpu)
registry.load_full_registry()
py_model = registry.get_model(args.model)
py_eval_setting = registry.get_eval_setting(args.eval_setting)
model = py_model.generate_classifier(py_eval_setting)
model = model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
batch_size = py_model.get_batch_size(py_eval_setting)
gpu_perturbation_fn = py_eval_setting.get_perturbation_fn_gpu(py_model)
torch.set_grad_enabled((py_eval_setting.adversarial_attack is not None))
setting_transform = ([py_eval_setting.transform] if (py_eval_setting.transform is not None) else [])
val_dataset = CustomImageFolder(root=py_eval_setting.get_dataset_root(), transform=transforms.Compose((setting_transform + [py_model.transform])), perturbation_fn=py_eval_setting.get_perturbation_fn_cpu(py_model), idx_subsample_list=py_eval_setting.get_idx_subsample_list(py_model))
val_sampler = DistributedSampler(val_dataset, num_replicas=args.num_gpus, rank=gpu, shuffle=False)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, sampler=val_sampler)
(logits, targets, image_paths, idxs) = validate(gpu, args, val_loader, model, gpu_perturbation_fn)
results_dict[gpu] = {'logits': logits, 'targets': targets, 'image_paths': image_paths, 'idxs': idxs}
|
def validate(gpu, args, val_loader, model, gpu_perturbation_fn):
model.eval()
(all_logits, all_targets, all_idxs, all_image_paths) = ([], [], [], [])
if (gpu == 0):
val_loader = tqdm(val_loader, desc='Validating')
for (idxs, image_paths, images, target) in val_loader:
images = images.cuda()
if (gpu_perturbation_fn is not None):
images = gpu_perturbation_fn(images, model)
output = model(images)
all_logits.append(output.detach().cpu())
all_targets.append(target)
all_idxs.append(idxs)
all_image_paths.append(image_paths)
all_logits = torch.cat(all_logits, dim=0)
all_targets = torch.cat(all_targets, dim=0)
all_idxs = torch.cat(all_idxs, dim=0)
all_image_paths = [image_path for batch in all_image_paths for image_path in batch]
return (all_logits, all_targets, all_image_paths, all_idxs)
|
def download_db():
if (not exists(join(s3_utils.default_cache_root_path, 'robustness_evaluation.db'))):
print('downloading database dump...')
subprocess.run(['wget', '-P', s3_utils.default_cache_root_path, DB_DUMP_URL, '--no-check-certificate'], check=True)
|
def gen_short_uuid(num_chars=None):
num = uuid.uuid4().int
alphabet = '23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
res = []
while (num > 0):
(num, digit) = divmod(num, len(alphabet))
res.append(alphabet[digit])
res2 = ''.join(reversed(res))
if (num_chars is None):
return res2
else:
return res2[:num_chars]
|
def get_logdir_key(model_id):
return 'logdir/{}'.format(model_id)
|
def get_checkpoint_data_key(checkpoint_id):
return 'checkpoints/{}_data.bytes'.format(checkpoint_id)
|
def get_dataset_data_key(dataset_id):
return 'datasets/{}_data.bytes'.format(dataset_id)
|
def get_evaluation_setting_extra_data_key(evaluation_setting_id):
return 'evaluation_settings/{}_extra_data.bytes'.format(evaluation_setting_id)
|
def get_evaluation_setting_processed_dataset_key(evaluation_setting_id):
return 'evaluation_settings/{}_processed_dataset.bytes'.format(evaluation_setting_id)
|
def get_raw_input_data_key(raw_input_id):
return 'raw_inputs/{}_data.bytes'.format(raw_input_id)
|
def get_evaluation_extra_data_key(evaluation_id):
return 'evaluations/{}_data.bytes'.format(evaluation_id)
|
def get_evaluation_logits_data_key(evaluation_id):
return 'evaluations/{}_logits_data.bytes'.format(evaluation_id)
|
def get_evaluation_chunk_extra_data_key(evaluation_chunk_id):
return 'evaluation_chunks/{}_data.bytes'.format(evaluation_chunk_id)
|
def get_evaluation_chunk_logits_data_key(evaluation_chunk_id):
return 'evaluation_chunks/{}_logits_data.bytes'.format(evaluation_chunk_id)
|
def get_evaluation_chunk_indices_data_key(evaluation_chunk_id):
return 'evaluation_chunks/{}_indices_data.bytes'.format(evaluation_chunk_id)
|
class Model(sqlalchemy_base):
__tablename__ = 'models'
uuid = sqla.Column(sqla.String, primary_key=True)
name = sqla.Column(sqla.String, unique=True)
description = sqla.Column(sqla.String)
username = sqla.Column(sqla.String)
creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now())
extra_info = sqla.Column(sqla.JSON)
checkpoints = sqla.orm.relationship('Checkpoint', back_populates='model', cascade='all, delete, delete-orphan', foreign_keys='Checkpoint.model_uuid')
final_checkpoint_uuid = sqla.Column(sqla.String, sqla.ForeignKey('checkpoints.uuid'), nullable=True)
final_checkpoint = sqla.orm.relationship('Checkpoint', foreign_keys=[final_checkpoint_uuid], uselist=False)
completed = sqla.Column(sqla.Boolean)
hidden = sqla.Column(sqla.Boolean)
logdir_filepaths = sqla.Column(sqla.JSON)
def __repr__(self):
return f'<Model(uuid="{self.uuid}", name="{self.name}")>'
def __hash__(self):
return hash((hash(self.uuid) + hash(self.name)))
def __eq__(self, other):
return (self.__hash__() == hash(other))
|
class Checkpoint(sqlalchemy_base):
__tablename__ = 'checkpoints'
uuid = sqla.Column(sqla.String, primary_key=True)
name = sqla.Column(sqla.String, unique=True)
creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now())
model_uuid = sqla.Column(sqla.String, sqla.ForeignKey('models.uuid'), nullable=False)
model = sqla.orm.relationship('Model', back_populates='checkpoints', foreign_keys=[model_uuid])
evaluations = sqla.orm.relationship('Evaluation', back_populates='checkpoint', cascade='all, delete, delete-orphan', foreign_keys='Evaluation.checkpoint_uuid')
training_step = sqla.Column(sqla.BigInteger)
epoch = sqla.Column(sqla.Float)
username = sqla.Column(sqla.String)
extra_info = sqla.Column(sqla.JSON)
hidden = sqla.Column(sqla.Boolean)
def __repr__(self):
return f'<Checkpoint(uuid="{self.uuid}", model_uuid="{self.model_uuid}")>'
def __hash__(self):
return hash((hash(self.uuid) + hash(self.name)))
def __eq__(self, other):
return (self.__hash__() == hash(other))
|
class Dataset(sqlalchemy_base):
__tablename__ = 'datasets'
uuid = sqla.Column(sqla.String, primary_key=True)
name = sqla.Column(sqla.String, unique=True, nullable=False)
description = sqla.Column(sqla.String)
username = sqla.Column(sqla.String)
creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now())
size = sqla.Column(sqla.Integer)
extra_info = sqla.Column(sqla.JSON)
evaluation_settings = sqla.orm.relationship('EvaluationSetting', back_populates='dataset', cascade='all, delete, delete-orphan', foreign_keys='EvaluationSetting.dataset_uuid')
hidden = sqla.Column(sqla.Boolean)
def __repr__(self):
return f'<Dataset(uuid="{self.uuid}", name="{self.name}")>'
def __hash__(self):
return hash((hash(self.uuid) + hash(self.name)))
def __eq__(self, other):
return (self.__hash__() == hash(other))
|
class EvaluationSetting(sqlalchemy_base):
__tablename__ = 'evaluation_settings'
uuid = sqla.Column(sqla.String, primary_key=True)
name = sqla.Column(sqla.String, unique=True, nullable=False)
description = sqla.Column(sqla.String)
username = sqla.Column(sqla.String)
creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now())
dataset_uuid = sqla.Column(sqla.String, sqla.ForeignKey('datasets.uuid'), nullable=False)
dataset = sqla.orm.relationship('Dataset', back_populates='evaluation_settings', foreign_keys=[dataset_uuid])
evaluations = sqla.orm.relationship('Evaluation', back_populates='setting', cascade='all, delete, delete-orphan', foreign_keys='Evaluation.setting_uuid')
raw_inputs = sqla.orm.relationship('RawInput', back_populates='setting', cascade='all, delete, delete-orphan', foreign_keys='RawInput.setting_uuid')
extra_info = sqla.Column(sqla.JSON)
hidden = sqla.Column(sqla.Boolean)
def __repr__(self):
return f'<EvaluationSetting(uuid="{self.uuid}", name="{self.name}")>'
def __hash__(self):
return hash((hash(self.uuid) + hash(self.name)))
def __eq__(self, other):
return (self.__hash__() == hash(other))
|
class RawInput(sqlalchemy_base):
__tablename__ = 'raw_inputs'
uuid = sqla.Column(sqla.String, primary_key=True)
name = sqla.Column(sqla.String, unique=True)
description = sqla.Column(sqla.String)
username = sqla.Column(sqla.String)
creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now())
setting_uuid = sqla.Column(sqla.String, sqla.ForeignKey('evaluation_settings.uuid'), nullable=False)
setting = sqla.orm.relationship('EvaluationSetting', back_populates='raw_inputs', foreign_keys=[setting_uuid])
data_shape = sqla.Column(sqla.JSON)
data_format = sqla.Column(sqla.String)
evaluations = sqla.orm.relationship('Evaluation', back_populates='raw_input', cascade='all, delete, delete-orphan', foreign_keys='Evaluation.raw_input_uuid')
extra_info = sqla.Column(sqla.JSON)
hidden = sqla.Column(sqla.Boolean)
def __repr__(self):
return f'<RawInput(uuid="{self.uuid}", name="{self.name}")>'
def __hash__(self):
return hash((hash(self.uuid) + hash(self.name)))
def __eq__(self, other):
return (self.__hash__() == hash(other))
|
class Evaluation(sqlalchemy_base):
__tablename__ = 'evaluations'
uuid = sqla.Column(sqla.String, primary_key=True)
name = sqla.Column(sqla.String, unique=True)
creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now())
checkpoint_uuid = sqla.Column(sqla.String, sqla.ForeignKey('checkpoints.uuid'), nullable=False)
checkpoint = sqla.orm.relationship('Checkpoint', back_populates='evaluations', foreign_keys=[checkpoint_uuid])
setting_uuid = sqla.Column(sqla.String, sqla.ForeignKey('evaluation_settings.uuid'), nullable=True)
setting = sqla.orm.relationship('EvaluationSetting', back_populates='evaluations', foreign_keys=[setting_uuid])
raw_input_uuid = sqla.Column(sqla.String, sqla.ForeignKey('raw_inputs.uuid'), nullable=True)
raw_input = sqla.orm.relationship('RawInput', back_populates='evaluations', foreign_keys=[raw_input_uuid])
chunks = sqla.orm.relationship('EvaluationChunk', back_populates='evaluation', cascade='all, delete, delete-orphan', foreign_keys='EvaluationChunk.evaluation_uuid')
username = sqla.Column(sqla.String)
extra_info = sqla.Column(sqla.JSON)
completed = sqla.Column(sqla.Boolean)
hidden = sqla.Column(sqla.Boolean)
def __repr__(self):
return f'<Evaluation(uuid="{self.uuid}", checkpoint_uuid="{self.checkpoint_uuid}")>'
def __hash__(self):
return hash((hash(self.uuid) + hash(self.name)))
def __eq__(self, other):
return (self.__hash__() == hash(other))
|
class EvaluationChunk(sqlalchemy_base):
__tablename__ = 'evaluation_chunks'
uuid = sqla.Column(sqla.String, primary_key=True)
creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now())
evaluation_uuid = sqla.Column(sqla.String, sqla.ForeignKey('evaluations.uuid'), nullable=False)
evaluation = sqla.orm.relationship('Evaluation', back_populates='chunks', foreign_keys=[evaluation_uuid])
username = sqla.Column(sqla.String)
extra_info = sqla.Column(sqla.JSON)
hidden = sqla.Column(sqla.Boolean)
def __repr__(self):
return f'<EvaluationChunk(uuid="{self.uuid}", evaluation_uuid="{self.evaluation_uuid}")>'
def __hash__(self):
return hash((hash(self.uuid) + hash(self.name)))
def __eq__(self, other):
return (self.hash() == hash(other))
|
class ModelRepository():
def __init__(self, mode=s3_utils.DB_CONNECTION_MODE, sql_verbose=False, download_database=True):
self.sql_verbose = sql_verbose
if (mode == 'sqlite'):
if download_database:
download_db()
self.db_connection_string = s3_utils.DB_CONNECTION_STRING_SQLITE
self.engine = sqla.create_engine(self.db_connection_string, echo=self.sql_verbose)
elif (mode == 'rds'):
self.db_connection_string = s3_utils.DB_CONNECTION_STRING_RDS
self.engine = sqla.create_engine(self.db_connection_string, echo=self.sql_verbose, pool_pre_ping=True)
else:
assert False
if (not database_exists(self.engine.url)):
create_database(self.engine.url)
self.sessionmaker = sqla.orm.sessionmaker(bind=self.engine, expire_on_commit=False)
self.cache_root_path = s3_utils.default_cache_root_path
self.s3wrapper = s3_utils.S3Wrapper(bucket='robustness-eval', cache_root_path=self.cache_root_path, verbose=False)
if (mode == 'sqlite'):
self.s3wrapper.put = (lambda *args, **kwargs: None)
self.uuid_length = 10
self.pickle_protocol = 4
def dispose(self):
self.engine.dispose()
@contextlib.contextmanager
def session_scope(self):
session = self.sessionmaker()
try:
(yield session)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def gen_short_uuid(self):
new_id = gen_short_uuid(num_chars=self.uuid_length)
return new_id
def gen_checkpoint_uuid(self):
return gen_short_uuid(num_chars=None)
def run_query_with_optional_session(self, query, session=None):
if (session is None):
with self.session_scope() as sess:
return query(sess)
else:
return query(session)
def run_get(self, get_fn, session=None, assert_exists=True):
def query(sess):
result = get_fn(sess)
assert (len(result) <= 1)
if assert_exists:
assert (len(result) == 1)
if (len(result) == 0):
return None
else:
return result[0]
return self.run_query_with_optional_session(query, session)
def get_model(self, *, uuid=None, name=None, session=None, assert_exists=True, load_final_checkpoint=False, load_all_checkpoints=False, load_evaluations=False):
if (uuid is not None):
assert (type(uuid) is str)
if (name is not None):
assert (type(name) is str)
def get_fn(sess):
return self.get_models(uuids=([uuid] if (uuid is not None) else None), names=([name] if (name is not None) else None), session=sess, load_final_checkpoint=load_final_checkpoint, load_all_checkpoints=load_all_checkpoints, load_evaluations=load_evaluations, show_hidden=True)
return self.run_get(get_fn, session=session, assert_exists=assert_exists)
def get_checkpoint(self, uuid=None, *, session=None, assert_exists=True, load_parents=False, load_evaluations=False):
if (uuid is not None):
assert (type(uuid) is str)
def get_fn(sess):
return self.get_checkpoints(uuids=[uuid], session=sess, load_parents=load_parents, load_evaluations=load_evaluations, show_hidden=True)
return self.run_get(get_fn, session=session, assert_exists=assert_exists)
def get_dataset(self, *, uuid=None, name=None, session=None, assert_exists=True, load_evaluation_settings=False):
if (uuid is not None):
assert (type(uuid) is str)
if (name is not None):
assert (type(name) is str)
def get_fn(sess):
return self.get_datasets(uuids=([uuid] if (uuid is not None) else None), names=([name] if (name is not None) else None), session=sess, load_evaluation_settings=load_evaluation_settings, show_hidden=True)
return self.run_get(get_fn, session=session, assert_exists=assert_exists)
def get_evaluation_setting(self, *, uuid=None, name=None, session=None, assert_exists=True, load_parents=False, load_evaluations=False, load_raw_inputs=False):
if (uuid is not None):
assert (type(uuid) is str)
if (name is not None):
assert (type(name) is str)
def get_fn(sess):
return self.get_evaluation_settings(uuids=([uuid] if (uuid is not None) else None), names=([name] if (name is not None) else None), session=sess, load_parents=load_parents, load_evaluations=load_evaluations, load_raw_inputs=load_raw_inputs, show_hidden=True)
return self.run_get(get_fn, session=session, assert_exists=assert_exists)
def get_raw_input(self, uuid=None, *, session=None, assert_exists=True, load_parents=False, load_evaluations=False):
if (uuid is not None):
assert (type(uuid) is str)
def get_fn(sess):
return self.get_raw_inputs(uuids=[uuid], session=sess, load_parents=load_parents, load_evaluations=load_evaluations, show_hidden=True)
return self.run_get(get_fn, session=session, assert_exists=assert_exists)
def get_evaluation(self, uuid=None, *, session=None, assert_exists=True, load_parents=False, load_chunks=True):
if (uuid is not None):
assert (type(uuid) is str)
def get_fn(sess):
return self.get_evaluations(uuids=[uuid], session=sess, load_parents=load_parents, load_chunks=load_chunks, show_hidden=True)
return self.run_get(get_fn, session=session, assert_exists=assert_exists)
def get_evaluation_chunk(self, *, uuid=None, session=None, assert_exists=True, load_parents=False):
if (uuid is not None):
assert (type(uuid) is str)
def get_fn(sess):
return self.get_evaluation_chunks(uuids=[uuid], session=sess, load_parents=load_parents, show_hidden=True)
return self.run_get(get_fn, session=session, assert_exists=assert_exists)
def model_uuid_exists(self, uuid, session=None):
return (self.get_model(uuid=uuid, assert_exists=False, session=session) is not None)
def checkpoint_uuid_exists(self, uuid, session=None):
return (self.get_checkpoint(uuid=uuid, assert_exists=False, session=session) is not None)
def dataset_uuid_exists(self, uuid, session=None):
return (self.get_dataset(uuid=uuid, assert_exists=False, session=session) is not None)
def evaluation_setting_uuid_exists(self, uuid, session=None):
return (self.get_evaluation_setting(uuid=uuid, assert_exists=False, session=session) is not None)
def raw_input_uuid_exists(self, uuid, session=None):
return (self.get_raw_input(uuid=uuid, assert_exists=False, session=session) is not None)
def evaluation_uuid_exists(self, uuid, session=None):
return (self.get_evaluation(uuid=uuid, assert_exists=False, session=session) is not None)
def evaluation_chunk_uuid_exists(self, uuid, session=None):
return (self.get_evaluation_chunk(uuid=uuid, assert_exists=False, session=session) is not None)
def get_checkpoints(self, uuids=None, *, session=None, load_parents=True, load_evaluations=False, show_hidden=False):
cur_options = []
if load_parents:
cur_options.append(sqla.orm.subqueryload(Checkpoint.model))
if load_evaluations:
cur_options.append(sqla.orm.subqueryload(Checkpoint.evaluations))
filter_list = []
if (not show_hidden):
filter_list.append((Checkpoint.hidden == False))
if (uuids is not None):
filter_list.append(Checkpoint.uuid.in_(uuids))
def query(sess):
return sess.query(Checkpoint).options(cur_options).filter(*filter_list).all()
return self.run_query_with_optional_session(query, session)
def get_datasets(self, *, uuids=None, names=None, session=None, load_evaluation_settings=True, show_hidden=False):
cur_options = []
if load_evaluation_settings:
cur_options.append(sqla.orm.subqueryload(Dataset.evaluation_settings))
filter_list = []
if (not show_hidden):
filter_list.append((Dataset.hidden == False))
if (uuids is not None):
filter_list.append(Dataset.uuid.in_(uuids))
if (names is not None):
filter_list.append(Dataset.name.in_(names))
def query(sess):
return sess.query(Dataset).options(cur_options).filter(*filter_list).all()
return self.run_query_with_optional_session(query, session)
def get_evaluation_settings(self, *, uuids=None, names=None, session=None, load_parents=True, load_evaluations=False, load_raw_inputs=False, show_hidden=False):
cur_options = []
if load_parents:
cur_options.append(sqla.orm.subqueryload(EvaluationSetting.dataset))
if load_evaluations:
cur_options.append(sqla.orm.subqueryload(EvaluationSetting.evaluations).subqueryload(Evaluation.checkpoint).subqueryload(Checkpoint.model))
if load_raw_inputs:
cur_options.append(sqla.orm.subqueryload(EvaluationSetting.raw_inputs))
filter_list = []
if (not show_hidden):
filter_list.append((EvaluationSetting.hidden == False))
if (uuids is not None):
filter_list.append(EvaluationSetting.uuid.in_(uuids))
if (names is not None):
filter_list.append(EvaluationSetting.name.in_(names))
def query(sess):
return sess.query(EvaluationSetting).options(cur_options).filter(*filter_list).all()
return self.run_query_with_optional_session(query, session)
def get_raw_inputs(self, uuids=None, *, session=None, load_parents=True, load_evaluations=False, show_hidden=False):
cur_options = []
if load_parents:
cur_options.append(sqla.orm.subqueryload(RawInput.setting).subqueryload(EvaluationSetting.dataset))
if load_evaluations:
cur_options.append(sqla.orm.subqueryload(RawInput.evaluations).subqueryload(Evaluation.checkpoint).subqueryload(Checkpoint.model))
filter_list = []
if (not show_hidden):
filter_list.append((RawInput.hidden == False))
if (uuids is not None):
filter_list.append(RawInput.uuid.in_(uuids))
def query(sess):
return sess.query(RawInput).options(cur_options).filter(*filter_list).all()
return self.run_query_with_optional_session(query, session)
def get_evaluations(self, uuids=None, *, session=None, load_parents=True, load_chunks=True, show_hidden=False):
cur_options = []
if load_parents:
cur_options.append(sqla.orm.subqueryload(Evaluation.checkpoint).subqueryload(Checkpoint.model))
cur_options.append(sqla.orm.subqueryload(Evaluation.raw_input))
cur_options.append(sqla.orm.subqueryload(Evaluation.setting).subqueryload(EvaluationSetting.dataset))
if load_chunks:
cur_options.append(sqla.orm.subqueryload(Evaluation.chunks))
filter_list = []
if (not show_hidden):
filter_list.append((Evaluation.hidden == False))
if (uuids is not None):
filter_list.append(Evaluation.uuid.in_(uuids))
def query(sess):
return sess.query(Evaluation).options(cur_options).filter(*filter_list).all()
return self.run_query_with_optional_session(query, session)
def get_evaluation_chunks(self, *, uuids=None, session=None, load_parents=False, show_hidden=False):
cur_options = []
if load_parents:
cur_options.append(sqla.orm.subqueryload(EvaluationChunk.evaluation))
filter_list = []
if (not show_hidden):
filter_list.append((EvaluationChunk.hidden == False))
if (uuids is not None):
filter_list.append(EvaluationChunk.uuid.in_(uuids))
def query(sess):
return sess.query(EvaluationChunk).options(cur_options).filter(*filter_list).all()
return self.run_query_with_optional_session(query, session)
def get_models(self, *, uuids=None, names=None, session=None, load_parents=True, load_final_checkpoint=True, load_all_checkpoints=False, load_evaluations=False, show_hidden=False):
cur_options = []
checkpoint_nodes = []
if load_final_checkpoint:
cur_options.append(sqla.orm.subqueryload(Model.final_checkpoint))
checkpoint_nodes.append(cur_options[(- 1)])
if load_all_checkpoints:
cur_options.append(sqla.orm.subqueryload(Model.checkpoints))
checkpoint_nodes.append(cur_options[(- 1)])
if load_evaluations:
for opt in checkpoint_nodes:
opt.subqueryload(Checkpoint.evaluations)
filter_list = []
if (not show_hidden):
filter_list.append((Model.hidden == False))
if (uuids is not None):
filter_list.append(Model.uuid.in_(uuids))
if (names is not None):
filter_list.append(Model.name.in_(names))
def query(sess):
return sess.query(Model).options(cur_options).filter(*filter_list).all()
return self.run_query_with_optional_session(query, session)
def create_model(self, extra_info=None, name=None, description=None, verbose=False, completed=False):
with self.session_scope() as session:
new_id = self.gen_short_uuid()
username = getpass.getuser()
new_model = Model(uuid=new_id, name=name, description=description, username=username, extra_info=extra_info, hidden=False, completed=completed, logdir_filepaths={}, final_checkpoint_uuid=None)
session.add(new_model)
return self.get_model(uuid=new_id, assert_exists=True)
def rename_model(self, model_uuid, new_name):
with self.session_scope() as session:
model = self.get_model(uuid=model_uuid, session=session, assert_exists=True)
old_name = model.name
model.name = new_name
return old_name
def hide_model(self, model_uuid):
with self.session_scope() as session:
model = self.get_model(uuid=model_uuid, session=session, assert_exists=True)
model.hidden = True
def get_latest_model_checkpoint_data(self, model_uuid, verbose=False, allow_non_final_checkpoint=True):
with self.session_scope() as session:
model = self.get_model(uuid=model_uuid, session=session, assert_exists=True)
if (len(model.checkpoints) == 0):
return (None, None)
if allow_non_final_checkpoint:
cur_checkpoints = sorted(model.checkpoints, key=(lambda x: x.training_step))
checkpoint_to_load = cur_checkpoints[(- 1)]
else:
assert (model.final_checkpoint is not None)
checkpoint_to_load = model.final_checkpoint
checkpoint_uuid = checkpoint_to_load.uuid
key = get_checkpoint_data_key(checkpoint_uuid)
if self.s3wrapper.exists(key):
data = self.s3wrapper.get(key, verbose=verbose)
else:
data = None
return (data, checkpoint_to_load)
def mark_model_as_completed(self, model_uuid):
with self.session_scope() as session:
model = self.get_model(uuid=model_uuid, session=session, assert_exists=True)
model.completed = True
def set_final_model_checkpoint(self, model_uuid, checkpoint_uuid):
with self.session_scope() as session:
model = self.get_model(uuid=model_uuid, session=session, assert_exists=True)
checkpoint = self.get_checkpoint(checkpoint_uuid, session=session, assert_exists=True)
assert (checkpoint.model_uuid == model_uuid)
model.final_checkpoint_uuid = checkpoint_uuid
def store_logdir(self, model_uuid, logdir, verbose=False):
with self.session_scope() as session:
model = self.get_model(uuid=model_uuid, session=session, assert_exists=True)
logdir_path = pathlib.Path(logdir).resolve()
assert logdir_path.is_dir()
tmp_filepaths = [x for x in logdir_path.glob('**/*') if x.is_file()]
all_data = {}
base_key = (get_logdir_key(model_uuid) + '/')
cur_logdir_files = {}
for cur_filepath in tmp_filepaths:
cur_filepath_resolved = cur_filepath.resolve()
with open(cur_filepath_resolved, 'rb') as f:
cur_data = f.read()
cur_relative_path = str(cur_filepath.relative_to(logdir_path))
assert (cur_relative_path not in cur_logdir_files)
cur_logdir_files[cur_relative_path] = {'size': cur_filepath_resolved.stat().st_size, 'mtime': cur_filepath_resolved.stat().st_mtime}
cur_key = (base_key + cur_relative_path)
all_data[cur_key] = cur_data
self.s3wrapper.put_multiple(all_data, verbose=verbose)
model.logdir_filepaths = cur_logdir_files
sqla.orm.attributes.flag_modified(model, 'logdir_filepaths')
def create_checkpoint(self, *, model_uuid, training_step=None, epoch=None, name=None, data_bytes=None, extra_info=None, verbose=False):
with self.session_scope() as session:
assert self.model_uuid_exists(model_uuid, session=session)
new_id = self.gen_checkpoint_uuid()
username = getpass.getuser()
new_checkpoint = Checkpoint(uuid=new_id, model_uuid=model_uuid, username=username, extra_info=extra_info, name=name, training_step=training_step, epoch=epoch, hidden=False)
if (data_bytes is not None):
key = get_checkpoint_data_key(new_id)
self.s3wrapper.put(data_bytes, key, verbose=verbose)
session.add(new_checkpoint)
return self.get_checkpoint(uuid=new_id, assert_exists=True)
def get_checkpoint_data(self, checkpoint_uuid, verbose=False):
with self.session_scope() as session:
assert self.checkpoint_uuid_exists(checkpoint_uuid, session=session)
key = get_checkpoint_data_key(checkpoint_uuid)
if self.s3wrapper.exists(key):
return self.s3wrapper.get(key, verbose=verbose)
else:
return None
def create_evaluation(self, *, checkpoint_uuid, setting_uuid, name=None, logits_data_bytes=None, extra_data_bytes=None, raw_input_uuid=None, extra_info=None, completed=False, verbose=False):
with self.session_scope() as session:
assert self.checkpoint_uuid_exists(checkpoint_uuid, session=session)
assert self.evaluation_setting_uuid_exists(setting_uuid, session=session)
if (raw_input_uuid is not None):
assert self.raw_input_uuid_exists(raw_input_uuid, session=session)
new_id = self.gen_short_uuid()
username = getpass.getuser()
new_evaluation = Evaluation(uuid=new_id, checkpoint_uuid=checkpoint_uuid, setting_uuid=setting_uuid, raw_input_uuid=raw_input_uuid, username=username, extra_info=extra_info, name=name, completed=completed, hidden=False)
if (extra_data_bytes is not None):
key = get_evaluation_extra_data_key(new_id)
self.s3wrapper.put(extra_data_bytes, key, verbose=verbose)
if (logits_data_bytes is not None):
key = get_evaluation_logits_data_key(new_id)
self.s3wrapper.put(logits_data_bytes, key, verbose=verbose)
session.add(new_evaluation)
return self.get_evaluation(uuid=new_id, assert_exists=True)
def hide_evaluation(self, evaluation_uuid):
with self.session_scope() as session:
evaluation = self.get_evaluation(evaluation_uuid, session=session, assert_exists=True)
evaluation.hidden = True
def rename_evaluation(self, evaluation_uuid, new_name):
with self.session_scope() as session:
evaluation = self.get_evaluation(evaluation_uuid, session=session, assert_exists=True)
old_name = evaluation.name
evaluation.name = new_name
return old_name
def mark_evaluation_as_completed(self, evaluation_uuid):
with self.session_scope() as session:
evaluation = self.get_evaluation(uuid=evaluation_uuid, session=session, assert_exists=True)
evaluation.completed = True
def get_evaluation_extra_data(self, evaluation_uuid, verbose=False):
with self.session_scope() as session:
assert self.evaluation_uuid_exists(evaluation_uuid, session=session)
key = get_evaluation_extra_data_key(evaluation_uuid)
if self.s3wrapper.exists(key):
return self.s3wrapper.get(key, verbose=verbose)
else:
return None
def get_evaluation_logits_data(self, evaluation_uuid, verbose=False):
with self.session_scope() as session:
assert self.evaluation_uuid_exists(evaluation_uuid, session=session)
key = get_evaluation_logits_data_key(evaluation_uuid)
if self.s3wrapper.exists(key):
return self.s3wrapper.get(key, verbose=verbose)
else:
return None
def has_evaluation_logits_data(self, evaluation_uuid):
with self.session_scope() as session:
assert self.evaluation_uuid_exists(evaluation_uuid, session=session)
key = get_evaluation_logits_data_key(evaluation_uuid)
return self.s3wrapper.exists(key)
def put_evaluation_extra_data(self, evaluation_uuid, extra_data_bytes, verbose=False):
with self.session_scope() as session:
assert self.evaluation_uuid_exists(evaluation_uuid, session=session)
key = get_evaluation_extra_data_key(evaluation_uuid)
self.s3wrapper.put(extra_data_bytes, key, verbose=verbose)
def put_evaluation_logits_data(self, evaluation_uuid, logits_data_bytes, verbose=False):
with self.session_scope() as session:
assert self.evaluation_uuid_exists(evaluation_uuid, session=session)
key = get_evaluation_logits_data_key(evaluation_uuid)
self.s3wrapper.put(logits_data_bytes, key, verbose=verbose)
def create_dataset(self, *, name, size, description=None, data_bytes=None, data_filename=None, extra_info=None, verbose=False):
assert (name is not None)
assert (size is not None)
assert (type(size) is int)
assert ((data_bytes is None) or (data_filename is None))
assert ((data_bytes is not None) or (data_filename is not None))
with self.session_scope() as session:
new_id = self.gen_short_uuid()
username = getpass.getuser()
new_dataset = Dataset(uuid=new_id, name=name, description=description, username=username, size=size, extra_info=extra_info, hidden=False)
key = get_dataset_data_key(new_id)
if (data_bytes is not None):
self.s3wrapper.put(data_bytes, key, verbose=verbose)
else:
assert (data_filename is not None)
self.s3wrapper.upload_file(data_filename, key, verbose=verbose)
session.add(new_dataset)
return self.get_dataset(uuid=new_id, assert_exists=True)
def get_dataset_data(self, dataset_uuid, verbose=False):
with self.session_scope() as session:
assert self.dataset_uuid_exists(dataset_uuid, session=session)
key = get_dataset_data_key(dataset_uuid)
if self.s3wrapper.exists(key):
return self.s3wrapper.get(key, verbose=verbose)
else:
return None
def download_dataset_data(self, dataset_uuid, target_filename, verbose=False):
with self.session_scope() as session:
assert self.dataset_uuid_exists(dataset_uuid, session=session)
key = get_dataset_data_key(dataset_uuid)
if self.s3wrapper.exists(key):
return self.s3wrapper.download_file(key, target_filename, verbose=verbose)
def rename_dataset(self, dataset_uuid, new_name):
with self.session_scope() as session:
dataset = self.get_dataset(uuid=dataset_uuid, session=session, assert_exists=True)
old_name = dataset.name
dataset.name = new_name
return old_name
def hide_dataset(self, dataset_uuid):
with self.session_scope() as session:
dataset = self.get_dataset(uuid=dataset_uuid, session=session, assert_exists=True)
dataset.hidden = True
def create_evaluation_setting(self, *, name, dataset_uuid=None, description=None, extra_info=None, processed_dataset_bytes=None, processed_dataset_filename=None, extra_data_bytes=None, verbose=False):
assert (name is not None)
assert ((processed_dataset_filename is None) or (processed_dataset_bytes is None))
with self.session_scope() as session:
if (dataset_uuid is not None):
assert self.dataset_uuid_exists(dataset_uuid, session=session)
new_id = self.gen_short_uuid()
username = getpass.getuser()
new_setting = EvaluationSetting(uuid=new_id, name=name, description=description, username=username, dataset_uuid=dataset_uuid, extra_info=extra_info, hidden=False)
if (extra_data_bytes is not None):
key = get_evaluation_setting_extra_data_key(new_id)
self.s3wrapper.put(extra_data_bytes, key, verbose=verbose)
key = get_evaluation_setting_processed_dataset_key(new_id)
if (processed_dataset_bytes is not None):
self.s3wrapper.put(processed_dataset_bytes, key, verbose=verbose)
elif (processed_dataset_filename is not None):
self.s3wrapper.upload_file(processed_dataset_filename, key, verbose=verbose)
session.add(new_setting)
return self.get_evaluation_setting(uuid=new_id, assert_exists=True)
def hide_evaluation_setting(self, evaluation_setting_uuid):
with self.session_scope() as session:
evaluation_setting = self.get_evaluation_setting(uuid=evaluation_setting_uuid, session=session, assert_exists=True)
evaluation_setting.hidden = True
def rename_evaluation_setting(self, evaluation_setting_uuid, new_name):
with self.session_scope() as session:
evaluation_setting = self.get_evaluation_setting(uuid=evaluation_setting_uuid, session=session, assert_exists=True)
old_name = evaluation_setting.name
evaluation_setting.name = new_name
return old_name
def get_evaluation_setting_extra_data(self, evaluation_setting_uuid, verbose=False):
with self.session_scope() as session:
assert self.evaluation_setting_uuid_exists(evaluation_setting_uuid, session=session)
key = get_evaluation_setting_extra_data_key(evaluation_setting_uuid)
if self.s3wrapper.exists(key):
return self.s3wrapper.get(key, verbose=verbose)
else:
return None
def download_evaluation_setting_processed_dataset_data(self, evaluation_setting_uuid, target_filename, verbose=False):
with self.session_scope() as session:
assert self.evaluation_setting_uuid_exists(evaluation_setting_uuid, session=session)
key = get_evaluation_setting_processed_dataset_key(evaluation_setting_uuid)
if self.s3wrapper.exists(key):
return self.s3wrapper.download_file(key, target_filename, verbose=verbose)
def get_evaluation_setting_processed_dataset_data(self, evaluation_setting_uuid, verbose=False):
with self.session_scope() as session:
assert self.evaluation_setting_uuid_exists(evaluation_setting_uuid, session=session)
key = get_evaluation_setting_processed_dataset_key(evaluation_setting_uuid)
if self.s3wrapper.exists(key):
return self.s3wrapper.get(key, verbose=verbose)
else:
return None
def create_raw_input(self, *, name, evaluation_setting_uuid, data_shape, data_format, description=None, extra_info=None, data_bytes=None, data_filename=None, verbose=False):
assert (name is not None)
assert (evaluation_setting_uuid is not None)
assert ((data_bytes is None) or (data_filename is None))
assert ((data_bytes is not None) or (data_filename is not None))
assert (data_format in ['float32', 'float64', 'uint8'])
assert (type(data_shape) is list)
for x in data_shape:
assert (type(x) is int)
with self.session_scope() as session:
assert self.evaluation_setting_uuid_exists(evaluation_setting_uuid, session=session)
new_id = self.gen_short_uuid()
username = getpass.getuser()
new_raw_input = RawInput(uuid=new_id, name=name, description=description, username=username, data_shape=data_shape, data_format=data_format, setting_uuid=evaluation_setting_uuid, extra_info=extra_info, hidden=False)
key = get_raw_input_data_key(new_id)
if (data_bytes is not None):
self.s3wrapper.put(data_bytes, key, verbose=verbose)
else:
assert (data_filename is not None)
self.s3wrapper.upload_file(data_filename, key, verbose=verbose)
session.add(new_raw_input)
return self.get_raw_input(uuid=new_id, assert_exists=True)
def hide_raw_input(self, raw_input_uuid):
with self.session_scope() as session:
raw_input = self.get_raw_input(raw_input_uuid, session=session, assert_exists=True)
raw_input.hidden = True
def rename_raw_input(self, raw_input_uuid, new_name):
with self.session_scope() as session:
raw_input = self.get_raw_input(raw_input_uuid, session=session, assert_exists=True)
old_name = raw_input.name
raw_input.name = new_name
return old_name
def download_raw_input_data(self, raw_input_uuid, target_filename, verbose=False):
with self.session_scope() as session:
assert self.raw_input_uuid_exists(raw_input_uuid, session=session)
key = get_raw_input_data_key(raw_input_uuid)
if self.s3wrapper.exists(key):
return self.s3wrapper.download_file(key, target_filename, verbose=verbose)
def get_raw_input_data(self, raw_input_uuid, verbose=False):
with self.session_scope() as session:
assert self.raw_input_uuid_exists(raw_input_uuid, session=session)
key = get_raw_input_data_key(raw_input_uuid)
if self.s3wrapper.exists(key):
return self.s3wrapper.get(key, verbose=verbose)
else:
return None
def create_evaluation_chunk(self, *, evaluation_uuid, logits_data_bytes=None, indices=None, indices_bytes=None, extra_data_bytes=None, extra_info=None, verbose=False):
if (indices is not None):
assert (indices_bytes is None)
indices_bytes = pickle.dumps(indices)
else:
assert (indices_bytes is not None)
with self.session_scope() as session:
assert self.evaluation_uuid_exists(evaluation_uuid, session=session)
new_id = self.gen_short_uuid()
username = getpass.getuser()
new_chunk = EvaluationChunk(uuid=new_id, evaluation_uuid=evaluation_uuid, username=username, extra_info=extra_info, hidden=False)
if (extra_data_bytes is not None):
key = get_evaluation_chunk_extra_data_key(new_id)
self.s3wrapper.put(extra_data_bytes, key, verbose=verbose)
if (logits_data_bytes is not None):
key = get_evaluation_chunk_logits_data_key(new_id)
self.s3wrapper.put(logits_data_bytes, key, verbose=verbose)
if (indices_bytes is not None):
key = get_evaluation_chunk_indices_data_key(new_id)
self.s3wrapper.put(indices_bytes, key, verbose=verbose)
session.add(new_chunk)
return self.get_evaluation_chunk(uuid=new_id, assert_exists=True)
def hide_evaluation_chunk(self, evaluation_chunk_uuid):
with self.session_scope() as session:
evaluation_chunk = self.get_evaluation_chunk(uuid=evaluation_chunk_uuid, session=session, assert_exists=True)
evaluation_chunk.hidden = True
def get_evaluation_chunk_extra_data(self, evaluation_chunk_uuid, verbose=False):
with self.session_scope() as session:
assert self.evaluation_chunk_uuid_exists(evaluation_chunk_uuid, session=session)
key = get_evaluation_chunk_extra_data_key(evaluation_chunk_uuid)
if self.s3wrapper.exists(key):
return self.s3wrapper.get(key, verbose=verbose)
else:
return None
def get_evaluation_chunk_logits_data(self, evaluation_chunk_uuid, verbose=False):
with self.session_scope() as session:
assert self.evaluation_chunk_uuid_exists(evaluation_chunk_uuid, session=session)
key = get_evaluation_chunk_logits_data_key(evaluation_chunk_uuid)
if self.s3wrapper.exists(key):
return self.s3wrapper.get(key, verbose=verbose)
else:
return None
def get_evaluation_chunk_indices_data(self, evaluation_chunk_uuid, verbose=False, unpickle=False):
with self.session_scope() as session:
assert self.evaluation_chunk_uuid_exists(evaluation_chunk_uuid, session=session)
key = get_evaluation_chunk_indices_data_key(evaluation_chunk_uuid)
if self.s3wrapper.exists(key):
data = self.s3wrapper.get(key, verbose=verbose)
if unpickle:
return pickle.loads(data)
else:
return data
else:
return None
|
def tar_directory(dir_name, target_filename):
subprocess.run(['tar', '-cf', str(target_filename), str(dir_name)], check=True)
|
def untar_directory(tar_filename, target_dir, strip=None, one_top_level=False):
cmd = ['tar', '-xf', str(tar_filename)]
if strip:
cmd += [f'--strip={strip}']
cmd += ['-C', str(target_dir)]
if one_top_level:
cmd += ['--one-top-level']
subprocess.run(cmd, check=True)
subprocess.run(['rm', tar_filename], check=True)
|
def get_s3_client():
if (DB_CONNECTION_MODE == 'rds'):
if (default_profile in boto3.Session()._session.available_profiles):
session = boto3.Session(profile_name=default_profile)
else:
session = boto3.Session()
client = session.client('s3', endpoint_url='https://vasa.millennium.berkeley.edu:9000', aws_access_key_id='robustness-eval', aws_secret_access_key='rtB_HizvjHVl59_HgKjOBYZJZTbXjNRHbIsBEj5D4g4', config=Config(connect_timeout=250, read_timeout=250), verify=(pathlib.Path(__file__).parent / 'vasa_chain.cer').resolve(), region_name='us-east-1')
return client
elif (DB_CONNECTION_MODE == 'sqlite'):
client = boto3.client('s3', endpoint_url='https://vasa.millennium.berkeley.edu:9000', config=Config(signature_version=botocore.UNSIGNED), verify=(pathlib.Path(__file__).parent / 'vasa_chain.cer').resolve(), region_name='us-east-1')
return client
|
def key_exists(bucket, key):
client = get_s3_client()
try:
client.head_object(Bucket=bucket, Key=key)
return True
except botocore.exceptions.ClientError as exc:
if (exc.response['Error']['Code'] != '404'):
raise
return False
except:
raise
|
def get_s3_object_bytes_parallel(keys, *, bucket, cache_on_local_disk=True, cache_root_path=None, verbose=False, special_verbose=True, max_num_threads=90, num_tries=5, initial_delay=1.0, delay_factor=math.sqrt(2.0), download_callback=None, skip_modification_time_check=False):
if cache_on_local_disk:
assert (cache_root_path is not None)
cache_root_path = pathlib.Path(cache_root_path).resolve()
missing_keys = []
existing_keys = []
for key in keys:
local_filepath = (cache_root_path / key)
if (not local_filepath.is_file()):
missing_keys.append(key)
local_filepath.parent.mkdir(parents=True, exist_ok=True)
else:
existing_keys.append(key)
keys_to_download = missing_keys.copy()
if skip_modification_time_check:
if verbose:
print(f'Skipping the file modification time check for {len(existing_keys)} keys that have local copies.')
for key in existing_keys:
if download_callback:
download_callback(1)
else:
if verbose:
print(f'Getting metadata for {len(existing_keys)} keys that have local copies ... ', end='')
metadata_start = timer()
metadata = get_s3_object_metadata_parallel(existing_keys, bucket=bucket, verbose=False, max_num_threads=max_num_threads, num_tries=num_tries, initial_delay=initial_delay, delay_factor=delay_factor, download_callback=None)
metadata_end = timer()
if verbose:
print(f'took {(metadata_end - metadata_start):.3f} seconds')
for key in existing_keys:
local_filepath = (cache_root_path / key)
assert local_filepath.is_file
local_time = datetime.datetime.fromtimestamp(local_filepath.stat().st_mtime, datetime.timezone.utc)
remote_time = metadata[key]['LastModified']
if (local_time <= remote_time):
if verbose:
print(f'Local copy of key "{key}" is outdated')
keys_to_download.append(key)
elif download_callback:
download_callback(1)
tl = threading.local()
def cur_download_file(key):
local_filepath = (cache_root_path / key)
if (verbose or special_verbose):
print('{} not available locally or outdated, downloading from S3 ... '.format(key))
download_s3_file_with_backoff(key, str(local_filepath), bucket=bucket, num_tries=num_tries, initial_delay=initial_delay, delay_factor=delay_factor, thread_local=tl)
return local_filepath.is_file()
if (len(keys_to_download) > 0):
download_start = timer()
with concurrent.futures.ThreadPoolExecutor(max_workers=max_num_threads) as executor:
future_to_key = {executor.submit(cur_download_file, key): key for key in keys_to_download}
for future in concurrent.futures.as_completed(future_to_key):
key = future_to_key[future]
try:
success = future.result()
assert success
if download_callback:
download_callback(1)
except Exception as exc:
print('Key {} generated an exception: {}'.format(key, exc))
raise exc
download_end = timer()
if verbose:
print('Downloading took {:.3f} seconds'.format((download_end - download_start)))
result = {}
for key in keys:
local_filepath = (cache_root_path / key)
if verbose:
print('Reading from local file {} ... '.format(local_filepath), end='')
with open(local_filepath, 'rb') as f:
result[key] = f.read()
if verbose:
print('done')
else:
tl = threading.local()
def cur_get_object_bytes(key):
if verbose:
print('Loading {} from S3 ... '.format(key))
return get_s3_object_bytes_with_backoff(key, bucket=bucket, num_tries=num_tries, initial_delay=initial_delay, delay_factor=delay_factor, thread_local=tl)[0]
download_start = timer()
result = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=max_num_threads) as executor:
future_to_key = {executor.submit(cur_get_object_bytes, key): key for key in keys}
for future in concurrent.futures.as_completed(future_to_key):
key = future_to_key[future]
try:
result[key] = future.result()
if download_callback:
download_callback(1)
except Exception as exc:
print('Key {} generated an exception: {}'.format(key, exc))
raise exc
download_end = timer()
if verbose:
print('Getting object bytes took {} seconds'.format((download_end - download_start)))
return result
|
def get_s3_object_bytes_with_backoff(key, *, bucket, num_tries=5, initial_delay=1.0, delay_factor=math.sqrt(2.0), num_replicas=1, thread_local=None):
if (thread_local is None):
client = get_s3_client()
else:
if (not hasattr(thread_local, 'get_object_client')):
thread_local.get_object_client = get_s3_client()
client = thread_local.get_object_client
delay = initial_delay
num_tries_left = num_tries
if (num_replicas > 1):
replicas_counter_len = len(str(num_replicas))
format_string = '_replica{{:0{}d}}-{{}}'.format(replicas_counter_len)
while (num_tries_left >= 1):
try:
if (num_replicas > 1):
cur_replica = random.randint(1, num_replicas)
cur_key = (key + format_string.format(cur_replica, num_replicas))
else:
cur_key = key
read_bytes = client.get_object(Key=cur_key, Bucket=bucket)['Body'].read()
return (read_bytes, cur_key)
except:
if (num_tries_left == 1):
raise Exception(((('get backoff failed ' + key) + ' ') + str(delay)))
else:
time.sleep(delay)
delay *= delay_factor
num_tries_left -= 1
|
def get_s3_object_metadata_with_backoff(key, *, bucket, num_tries=5, initial_delay=1.0, delay_factor=math.sqrt(2.0), thread_local=None):
if (thread_local is None):
client = get_s3_client()
else:
if (not hasattr(thread_local, 'get_object_client')):
thread_local.get_object_client = get_s3_client()
client = thread_local.get_object_client
delay = initial_delay
num_tries_left = num_tries
while (num_tries_left >= 1):
try:
metadata = client.head_object(Key=key, Bucket=bucket)
return metadata
except:
if (num_tries_left == 1):
raise Exception(((('get backoff failed ' + key) + ' ') + str(delay)))
else:
time.sleep(delay)
delay *= delay_factor
num_tries_left -= 1
|
def get_s3_object_metadata_parallel(keys, bucket, verbose=False, max_num_threads=20, num_tries=5, initial_delay=1.0, delay_factor=math.sqrt(2.0), download_callback=None):
tl = threading.local()
def cur_get_object_metadata(key):
if verbose:
print('Loading metadata for {} from S3 ... '.format(key))
return get_s3_object_metadata_with_backoff(key, bucket=bucket, num_tries=num_tries, initial_delay=initial_delay, delay_factor=delay_factor, thread_local=tl)
download_start = timer()
result = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=max_num_threads) as executor:
future_to_key = {executor.submit(cur_get_object_metadata, key): key for key in keys}
for future in concurrent.futures.as_completed(future_to_key):
key = future_to_key[future]
try:
result[key] = future.result()
if download_callback:
download_callback(1)
except Exception as exc:
print('Key {} generated an exception: {}'.format(key, exc))
raise exc
download_end = timer()
if verbose:
print('Getting object metadata took {} seconds'.format((download_end - download_start)))
return result
|
def put_s3_object_bytes_with_backoff(file_bytes, key, bucket, num_tries=10, initial_delay=1.0, delay_factor=2.0):
client = get_s3_client()
delay = initial_delay
num_tries_left = num_tries
while (num_tries_left >= 1):
try:
bio = io.BytesIO(file_bytes)
client.upload_fileobj(bio, Key=key, Bucket=bucket, ExtraArgs={'ACL': 'bucket-owner-full-control'})
return
except:
if (num_tries_left == 1):
print(('put backoff failed' + key))
raise Exception(((((('put backoff failed ' + key) + ' ') + str(len(file_bytes))) + ' ') + str(delay)))
else:
time.sleep(delay)
delay *= delay_factor
num_tries_left -= 1
|
def list_all_keys(client, bucket, prefix, max_keys=None):
objects = client.list_objects(Bucket=bucket, Prefix=prefix, Delimiter=prefix)
if (objects.get('Contents') == None):
return []
keys = list(map((lambda x: x['Key']), objects.get('Contents', [])))
truncated = objects['IsTruncated']
next_marker = objects.get('NextMarker')
while truncated:
objects = client.list_objects(Bucket=bucket, Prefix=prefix, Delimiter=prefix, Marker=next_marker)
truncated = objects['IsTruncated']
next_marker = objects.get('NextMarker')
keys += list(map((lambda x: x['Key']), objects['Contents']))
if ((max_keys is not None) and (len(keys) >= max_keys)):
break
return list(filter((lambda x: (len(x) > 0)), keys))
|
def download_s3_file_with_caching(key, local_filename, *, bucket, cache_on_local_disk=True, cache_root_path=None, verbose=False, special_verbose=True, num_tries=5, initial_delay=1.0, delay_factor=math.sqrt(2.0), num_replicas=1, skip_modification_time_check=False):
if cache_on_local_disk:
assert (cache_root_path is not None)
cache_root_path = pathlib.Path(cache_root_path).resolve()
currently_cached = False
cache_filepath = (cache_root_path / key)
if (not cache_filepath.is_file()):
cache_filepath.parent.mkdir(parents=True, exist_ok=True)
elif skip_modification_time_check:
if verbose:
print(f'Skipping the file modification time check the local copy in the cache.')
currently_cached = True
else:
if verbose:
print(f'Getting metadata to check the modification time compared to the local copy ... ', end='')
metadata_start = timer()
metadata = get_s3_object_metadata_with_backoff(key, bucket=bucket, num_tries=num_tries, initial_delay=initial_delay, delay_factor=delay_factor)
metadata_end = timer()
if verbose:
print(f'took {(metadata_end - metadata_start):.3f} seconds')
local_time = datetime.datetime.fromtimestamp(cache_filepath.stat().st_mtime, datetime.timezone.utc)
remote_time = metadata['LastModified']
if (local_time <= remote_time):
if verbose:
print(f'Local copy of key "{key}" is outdated')
else:
currently_cached = True
if (not currently_cached):
if (verbose or special_verbose):
print('{} not available locally or outdated, downloading from S3 ... '.format(key))
download_start = timer()
download_s3_file_with_backoff(key, str(cache_filepath), bucket=bucket, initial_delay=initial_delay, delay_factor=delay_factor, num_replicas=num_replicas)
download_end = timer()
if verbose:
print('Downloading took {:.3f} seconds'.format((download_end - download_start)))
assert cache_filepath.is_file()
if verbose:
print(f'Copying to the target from the cache file {cache_filepath} ...')
shutil.copy(cache_filepath, local_filename)
else:
if verbose:
print('Loading {} from S3 ... '.format(key))
download_start = timer()
download_s3_file_with_backoff(key, local_filename, bucket=bucket, initial_delay=initial_delay, delay_factor=delay_factor, num_replicas=num_replicas)
download_end = timer()
if verbose:
print('Downloading took {:.3f} seconds'.format((download_end - download_start)))
|
def download_s3_file_with_backoff(key, local_filename, *, bucket, num_tries=5, initial_delay=1.0, delay_factor=math.sqrt(2.0), num_replicas=1, thread_local=None):
if (thread_local is None):
client = get_s3_client()
else:
if (not hasattr(thread_local, 's3_client')):
thread_local.s3_client = get_s3_client()
client = thread_local.s3_client
delay = initial_delay
num_tries_left = num_tries
if (num_replicas > 1):
replicas_counter_len = len(str(num_replicas))
format_string = '_replica{{:0{}d}}-{{}}'.format(replicas_counter_len)
while (num_tries_left >= 1):
try:
if (num_replicas > 1):
cur_replica = random.randint(1, num_replicas)
cur_key = (key + format_string.format(cur_replica, num_replicas))
else:
cur_key = key
client.download_file(bucket, cur_key, local_filename)
return cur_key
except:
if (num_tries_left == 1):
raise Exception((((('download backoff failed ' + ' ') + str(key)) + ' ') + str(delay)))
else:
time.sleep(delay)
delay *= delay_factor
num_tries_left -= 1
|
def upload_file_to_s3_with_backoff(local_filename, key, *, bucket, num_tries=5, initial_delay=1.0, delay_factor=math.sqrt(2.0), thread_local=None):
assert pathlib.Path(local_filename).is_file()
if (thread_local is None):
client = get_s3_client()
else:
if (not hasattr(thread_local, 's3_client')):
thread_local.s3_client = get_s3_client()
client = thread_local.s3_client
delay = initial_delay
num_tries_left = num_tries
while (num_tries_left >= 1):
try:
client.upload_file(local_filename, bucket, key, ExtraArgs={'ACL': 'bucket-owner-full-control'})
return
except:
if (num_tries_left == 1):
raise Exception((((('upload backoff failed ' + ' ') + str(key)) + ' ') + str(delay)))
else:
time.sleep(delay)
delay *= delay_factor
num_tries_left -= 1
|
def default_option_if_needed(*, user_option, default):
if (user_option is None):
return default
else:
return user_option
|
class S3Wrapper():
def __init__(self, bucket, cache_on_local_disk=True, cache_root_path=default_cache_root_path, verbose=False, max_num_threads=90, num_tries=5, initial_delay=1.0, delay_factor=math.sqrt(2.0), skip_modification_time_check=False):
self.bucket = bucket
self.cache_on_local_disk = cache_on_local_disk
self.client = get_s3_client()
if self.cache_on_local_disk:
assert (cache_root_path is not None)
self.cache_root_path = pathlib.Path(cache_root_path).resolve()
self.cache_root_path.mkdir(parents=True, exist_ok=True)
assert self.cache_root_path.is_dir()
else:
self.cache_root_path = None
self.verbose = verbose
self.max_num_threads = max_num_threads
self.num_tries = num_tries
self.initial_delay = initial_delay
self.delay_factor = delay_factor
self.skip_modification_time_check = skip_modification_time_check
def list_keys(self, prefix, max_keys=None):
return list_all_keys(self.client, self.bucket, prefix, max_keys)
def put(self, bytes_to_store, key, verbose=None):
cur_verbose = default_option_if_needed(user_option=verbose, default=self.verbose)
put_s3_object_bytes_with_backoff(bytes_to_store, key, bucket=self.bucket, num_tries=self.num_tries, initial_delay=self.initial_delay, delay_factor=self.delay_factor)
if cur_verbose:
print('Stored {} bytes under key {}'.format(len(bytes_to_store), key))
def put_multiple(self, data, verbose=None, callback=None):
for (key, bytes_to_store) in data.items():
self.put(bytes_to_store, key, verbose)
def upload_file(self, filename, key, verbose=None):
cur_verbose = default_option_if_needed(user_option=verbose, default=self.verbose)
upload_file_to_s3_with_backoff(filename, key, bucket=self.bucket, num_tries=self.num_tries, initial_delay=self.initial_delay, delay_factor=self.delay_factor, thread_local=None)
def download_file(self, key, filename, verbose=None, skip_modification_time_check=None):
cur_verbose = default_option_if_needed(user_option=verbose, default=self.verbose)
cur_skip_time_check = default_option_if_needed(user_option=skip_modification_time_check, default=self.skip_modification_time_check)
download_s3_file_with_caching(key, filename, bucket=self.bucket, cache_on_local_disk=self.cache_on_local_disk, cache_root_path=self.cache_root_path, num_tries=self.num_tries, initial_delay=self.initial_delay, delay_factor=self.delay_factor, skip_modification_time_check=cur_skip_time_check, verbose=cur_verbose)
def get(self, key, verbose=None, skip_modification_time_check=None):
return self.get_multiple([key], verbose=verbose, skip_modification_time_check=skip_modification_time_check)[key]
def get_multiple(self, keys, verbose=None, callback=None, skip_modification_time_check=None):
if (verbose is None):
cur_verbose = self.verbose
else:
cur_verbose = verbose
cur_verbose = default_option_if_needed(user_option=verbose, default=self.verbose)
cur_skip_time_check = default_option_if_needed(user_option=skip_modification_time_check, default=self.skip_modification_time_check)
return get_s3_object_bytes_parallel(keys, bucket=self.bucket, cache_on_local_disk=self.cache_on_local_disk, cache_root_path=self.cache_root_path, verbose=cur_verbose, max_num_threads=self.max_num_threads, num_tries=self.num_tries, initial_delay=self.initial_delay, delay_factor=self.delay_factor, download_callback=callback, skip_modification_time_check=cur_skip_time_check)
def exists(self, key):
return key_exists(self.bucket, key)
|
def evaluation_completed(py_model, py_eval_setting):
assert (py_model.name in MODEL_NAMES), (f'Model {py_model.name} is not recognized as an existing model in the' + ' server. Did you run the db script?')
assert (py_eval_setting.name in EVAL_SETTING_NAMES), (f'Model {py_eval_setting.name} is not recognized as an existing eval setting in the' + ' server. Did you run the db script?')
checkpoint = m_repo.get_model(name=py_model.name, load_final_checkpoint=True, load_evaluations=True).final_checkpoint
setting_uuids = [m_repo.get_evaluation_setting(name=py_eval_setting.name).uuid]
if (py_eval_setting.parent_eval_setting is not None):
assert (py_eval_setting.parent_eval_setting in EVAL_SETTING_NAMES), (f'Eval setting {py_eval_setting.parent_eval_setting} is not recognized as an existing eval setting in the' + ' server. Did you run the db script?')
setting_uuids += [m_repo.get_evaluation_setting(name=py_eval_setting.parent_eval_setting).uuid]
for e in m_repo.get_evaluations([x.uuid for x in checkpoint.evaluations]):
if ((e.setting_uuid in setting_uuids) and e.completed):
return True
return False
|
def store_evaluation(py_model, py_eval_setting, metrics, logits):
assert (py_model.name in MODEL_NAMES), (f'Model {py_model.name} is not recognized as an existing model in the' + ' server. Did you run the db script?')
assert (py_eval_setting.name in EVAL_SETTING_NAMES), (f'Model {py_eval_setting.name} is not recognized as an existing eval setting in the' + ' server. Did you run the db script?')
bio = io.BytesIO()
torch.save(logits.cpu(), bio)
model_uuid = m_repo.get_model(name=py_model.name).final_checkpoint_uuid
setting_uuid = m_repo.get_evaluation_setting(name=py_eval_setting.name).uuid
eval = m_repo.create_evaluation(checkpoint_uuid=model_uuid, setting_uuid=setting_uuid, extra_info=metrics, logits_data_bytes=bio.getvalue(), completed=True)
|
def download_dataset(dataset):
dataset = m_repo.get_dataset(name=dataset)
filedir = join(default_cache_root_path, f'datasets/{dataset.name}')
if (not exists(filedir)):
filename = (filedir + '.tar')
m_repo.download_dataset_data(dataset_uuid=dataset.uuid, target_filename=filename)
if ('format-val' in dataset.name):
(strip, one_top_level) = (2, False)
elif ('imagenet-c' in dataset.name):
(strip, one_top_level) = (6, True)
elif (dataset.name in ['imagenetv2-matched-frequency', 'imagenetv2-topimages', 'imagenetv2-threshold0.7', 'val']):
(strip, one_top_level) = (3, False)
else:
(strip, one_top_level) = (1, True)
untar_directory(filename, join(default_cache_root_path, 'datasets'), strip=strip, one_top_level=one_top_level)
return filedir
|
def load_model_checkpoint_bytes(model_name):
r_model = m_repo.get_model(name=model_name)
data = m_repo.get_checkpoint_data(r_model.final_checkpoint_uuid)
bio = io.BytesIO(data)
return bio
|
def load_model_state_dict(model, name):
bio = load_model_checkpoint_bytes(name)
state_dict = torch.load(bio, map_location=f'cpu')
if ('state_dict' in state_dict):
state_dict = state_dict['state_dict']
model.load_state_dict(state_dict)
|
def add_model_shell(model_name):
r_model = m_repo.create_model(name=model_name, completed=True)
checkpoint = m_repo.create_checkpoint(model_uuid=r_model.uuid)
m_repo.set_final_model_checkpoint(r_model.uuid, checkpoint.uuid)
|
def hide_rename_model(model_name):
model = m_repo.get_model(name=model_name, load_final_checkpoint=True, load_evaluations=True)
for e in m_repo.get_evaluations([x.uuid for x in model.final_checkpoint.evaluations]):
m_repo.hide_evaluation(e.uuid)
new_name = (model_name + f'_hidden_{random.randint(0, 10000)}')
m_repo.rename_model(model.uuid, new_name)
m_repo.hide_model(model.uuid)
|
def create_eval_setting(eval_setting_name):
m_repo.create_evaluation_setting(name=eval_setting_name)
|
def hide_rename_eval_setting(eval_setting_name):
setting = m_repo.get_evaluation_setting(name=eval_setting_name, load_evaluations=True)
for e in m_repo.get_evaluations([x.uuid for x in setting.evaluations]):
m_repo.hide_evaluation(e.uuid)
new_name = (eval_setting_name + f'_hidden_{random.randint(0, 10000)}')
m_repo.rename_evaluation_setting(setting.uuid, new_name)
m_repo.hide_evaluation_setting(setting.uuid)
|
def rename_model(model_name, new_model_name):
uuid = m_repo.get_model(name=model_name).uuid
m_repo.rename_model(uuid, new_model_name)
|
def rename_eval_setting(eval_setting_name, new_eval_setting_name):
uuid = m_repo.get_evaluation_setting(name=eval_setting_name).uuid
m_repo.rename_evaluation_setting(uuid, new_eval_setting_name)
|
def hide_evaluation(model_name, eval_setting_name):
flag = False
for e in m_repo.get_evaluations():
if ((e.checkpoint.model.name == model_name) and (e.setting.name == eval_setting_name)):
m_repo.hide_evaluation(e.uuid)
flag = True
return flag
|
def get_eval_extra_info(model_name, eval_setting_name):
for e in m_repo.get_evaluations():
if ((e.checkpoint.model.name == model_name) and (e.setting.name == eval_setting_name) and e.completed):
return e.extra_info
|
def close_db_connection():
m_repo.dispose()
|
def classifier_loader():
model = torch_models.resnet50()
load_model_state_dict(model, 'resnet50_adv-train-free')
return model
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = getattr(models_lpf, d['arch'])(filter_size=d['filter_size'])
load_model_state_dict(model, name)
return model
return classifier_loader
|
def classifier_loader():
model = torch_models.resnet50()
load_model_state_dict(model, 'resnet50_augmix')
return model
|
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
(v, m) = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = ((w - m) / torch.sqrt((v + 1e-10)))
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.dilation, self.groups)
|
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups)
|
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias)
|
def tf2th(conv_weights):
'Possibly convert HWIO to OIHW.'
if (conv_weights.ndim == 4):
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
|
class PreActBottleneck(nn.Module):
'Pre-activation (v2) bottleneck block.\n Follows the implementation of "Identity Mappings in Deep Residual Networks":\n https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua\n Except it puts the stride on 3x3 conv when available.\n '
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = (cout or cin)
cmid = (cmid or (cout // 4))
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid, stride)
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if ((stride != 1) or (cin != cout)):
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
out = self.relu(self.gn1(x))
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(out)
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return (out + residual)
def load_from(self, weights, prefix=''):
convname = 'standardized_conv2d'
with torch.no_grad():
self.conv1.weight.copy_(tf2th(weights[f'{prefix}a/{convname}/kernel']))
self.conv2.weight.copy_(tf2th(weights[f'{prefix}b/{convname}/kernel']))
self.conv3.weight.copy_(tf2th(weights[f'{prefix}c/{convname}/kernel']))
self.gn1.weight.copy_(tf2th(weights[f'{prefix}a/group_norm/gamma']))
self.gn2.weight.copy_(tf2th(weights[f'{prefix}b/group_norm/gamma']))
self.gn3.weight.copy_(tf2th(weights[f'{prefix}c/group_norm/gamma']))
self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta']))
self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta']))
self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta']))
if hasattr(self, 'downsample'):
w = weights[f'{prefix}a/proj/{convname}/kernel']
self.downsample.weight.copy_(tf2th(w))
|
class ResNetV2(nn.Module):
'Implementation of Pre-activation (v2) ResNet mode.'
def __init__(self, block_units, width_factor, head_size=21843, zero_head=False):
super().__init__()
wf = width_factor
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, (64 * wf), kernel_size=7, stride=2, padding=3, bias=False)), ('pad', nn.ConstantPad2d(1, 0)), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(OrderedDict(([('unit01', PreActBottleneck(cin=(64 * wf), cout=(256 * wf), cmid=(64 * wf)))] + [(f'unit{i:02d}', PreActBottleneck(cin=(256 * wf), cout=(256 * wf), cmid=(64 * wf))) for i in range(2, (block_units[0] + 1))])))), ('block2', nn.Sequential(OrderedDict(([('unit01', PreActBottleneck(cin=(256 * wf), cout=(512 * wf), cmid=(128 * wf), stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=(512 * wf), cout=(512 * wf), cmid=(128 * wf))) for i in range(2, (block_units[1] + 1))])))), ('block3', nn.Sequential(OrderedDict(([('unit01', PreActBottleneck(cin=(512 * wf), cout=(1024 * wf), cmid=(256 * wf), stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=(1024 * wf), cout=(1024 * wf), cmid=(256 * wf))) for i in range(2, (block_units[2] + 1))])))), ('block4', nn.Sequential(OrderedDict(([('unit01', PreActBottleneck(cin=(1024 * wf), cout=(2048 * wf), cmid=(512 * wf), stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=(2048 * wf), cout=(2048 * wf), cmid=(512 * wf))) for i in range(2, (block_units[3] + 1))]))))]))
self.zero_head = zero_head
self.head = nn.Sequential(OrderedDict([('gn', nn.GroupNorm(32, (2048 * wf))), ('relu', nn.ReLU(inplace=True)), ('avg', nn.AdaptiveAvgPool2d(output_size=1)), ('conv', nn.Conv2d((2048 * wf), head_size, kernel_size=1, bias=True))]))
def forward(self, x):
x = self.head(self.body(self.root(x)))
assert (x.shape[(- 2):] == (1, 1))
return x[(..., 0, 0)]
def load_from(self, weights, prefix='resnet/'):
with torch.no_grad():
self.root.conv.weight.copy_(tf2th(weights[f'{prefix}root_block/standardized_conv2d/kernel']))
self.head.gn.weight.copy_(tf2th(weights[f'{prefix}group_norm/gamma']))
self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta']))
if self.zero_head:
nn.init.zeros_(self.head.conv.weight)
nn.init.zeros_(self.head.conv.bias)
else:
self.head.conv.weight.copy_(tf2th(weights[f'{prefix}head/conv2d/kernel']))
self.head.conv.bias.copy_(tf2th(weights[f'{prefix}head/conv2d/bias']))
for (bname, block) in self.body.named_children():
for (uname, unit) in block.named_children():
unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/')
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = KNOWN_MODELS[d['arch']](head_size=(1000 if ('head_size' not in d) else d['head_size']))
load_model_state_dict(model, name)
return model
return classifier_loader
|
def load_weights(weight_file):
if (weight_file == None):
return
try:
weights_dict = np.load(weight_file, allow_pickle=True).item()
except:
weights_dict = np.load(weight_file, encoding='bytes').item()
return weights_dict
|
class KitModel(nn.Module):
def __init__(self, weight_file):
super(KitModel, self).__init__()
global __weights_dict
__weights_dict = load_weights(weight_file)
self.conv_conv1 = self.__conv(2, name='conv_conv1', in_channels=3, out_channels=96, kernel_size=(7, 7), stride=(2, 2), groups=1, bias=True)
self.bn_conv1 = self.__batch_normalization(2, 'bn_conv1', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_conv2red = self.__conv(2, name='conv_conv2red', in_channels=96, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_conv2red = self.__batch_normalization(2, 'bn_conv2red', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_conv2 = self.__conv(2, name='conv_conv2', in_channels=128, out_channels=288, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_conv2 = self.__batch_normalization(2, 'bn_conv2', num_features=288, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_3a_1x1 = self.__conv(2, name='conv_3a_1x1', in_channels=288, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_3a_3x3_reduce = self.__conv(2, name='conv_3a_3x3_reduce', in_channels=288, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_3a_double_3x3_reduce = self.__conv(2, name='conv_3a_double_3x3_reduce', in_channels=288, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_3a_1x1 = self.__batch_normalization(2, 'bn_3a_1x1', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_3a_3x3_reduce = self.__batch_normalization(2, 'bn_3a_3x3_reduce', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_3a_double_3x3_reduce = self.__batch_normalization(2, 'bn_3a_double_3x3_reduce', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_3a_proj = self.__conv(2, name='conv_3a_proj', in_channels=288, out_channels=48, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_3a_proj = self.__batch_normalization(2, 'bn_3a_proj', num_features=48, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_3a_3x3 = self.__conv(2, name='conv_3a_3x3', in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_3a_double_3x3_0 = self.__conv(2, name='conv_3a_double_3x3_0', in_channels=96, out_channels=144, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_3a_3x3 = self.__batch_normalization(2, 'bn_3a_3x3', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_3a_double_3x3_0 = self.__batch_normalization(2, 'bn_3a_double_3x3_0', num_features=144, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_3a_double_3x3_1 = self.__conv(2, name='conv_3a_double_3x3_1', in_channels=144, out_channels=144, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_3a_double_3x3_1 = self.__batch_normalization(2, 'bn_3a_double_3x3_1', num_features=144, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_3b_1x1 = self.__conv(2, name='conv_3b_1x1', in_channels=384, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_3b_3x3_reduce = self.__conv(2, name='conv_3b_3x3_reduce', in_channels=384, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_3b_double_3x3_reduce = self.__conv(2, name='conv_3b_double_3x3_reduce', in_channels=384, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_3b_1x1 = self.__batch_normalization(2, 'bn_3b_1x1', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_3b_3x3_reduce = self.__batch_normalization(2, 'bn_3b_3x3_reduce', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_3b_double_3x3_reduce = self.__batch_normalization(2, 'bn_3b_double_3x3_reduce', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_3b_proj = self.__conv(2, name='conv_3b_proj', in_channels=384, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_3b_proj = self.__batch_normalization(2, 'bn_3b_proj', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_3b_3x3 = self.__conv(2, name='conv_3b_3x3', in_channels=96, out_channels=144, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_3b_double_3x3_0 = self.__conv(2, name='conv_3b_double_3x3_0', in_channels=96, out_channels=144, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_3b_3x3 = self.__batch_normalization(2, 'bn_3b_3x3', num_features=144, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_3b_double_3x3_0 = self.__batch_normalization(2, 'bn_3b_double_3x3_0', num_features=144, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_3b_double_3x3_1 = self.__conv(2, name='conv_3b_double_3x3_1', in_channels=144, out_channels=144, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_3b_double_3x3_1 = self.__batch_normalization(2, 'bn_3b_double_3x3_1', num_features=144, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_3c_3x3_reduce = self.__conv(2, name='conv_3c_3x3_reduce', in_channels=480, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_3c_double_3x3_reduce = self.__conv(2, name='conv_3c_double_3x3_reduce', in_channels=480, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_3c_3x3_reduce = self.__batch_normalization(2, 'bn_3c_3x3_reduce', num_features=192, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_3c_double_3x3_reduce = self.__batch_normalization(2, 'bn_3c_double_3x3_reduce', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_3c_3x3 = self.__conv(2, name='conv_3c_3x3', in_channels=192, out_channels=240, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=True)
self.conv_3c_double_3x3_0 = self.__conv(2, name='conv_3c_double_3x3_0', in_channels=96, out_channels=144, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_3c_3x3 = self.__batch_normalization(2, 'bn_3c_3x3', num_features=240, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_3c_double_3x3_0 = self.__batch_normalization(2, 'bn_3c_double_3x3_0', num_features=144, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_3c_double_3x3_1 = self.__conv(2, name='conv_3c_double_3x3_1', in_channels=144, out_channels=144, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=True)
self.bn_3c_double_3x3_1 = self.__batch_normalization(2, 'bn_3c_double_3x3_1', num_features=144, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4a_1x1 = self.__conv(2, name='conv_4a_1x1', in_channels=864, out_channels=224, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4a_3x3_reduce = self.__conv(2, name='conv_4a_3x3_reduce', in_channels=864, out_channels=64, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4a_double_3x3_reduce = self.__conv(2, name='conv_4a_double_3x3_reduce', in_channels=864, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4a_1x1 = self.__batch_normalization(2, 'bn_4a_1x1', num_features=224, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_4a_3x3_reduce = self.__batch_normalization(2, 'bn_4a_3x3_reduce', num_features=64, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_4a_double_3x3_reduce = self.__batch_normalization(2, 'bn_4a_double_3x3_reduce', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4a_proj = self.__conv(2, name='conv_4a_proj', in_channels=864, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4a_proj = self.__batch_normalization(2, 'bn_4a_proj', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4a_3x3 = self.__conv(2, name='conv_4a_3x3', in_channels=64, out_channels=96, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_4a_double_3x3_0 = self.__conv(2, name='conv_4a_double_3x3_0', in_channels=96, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4a_3x3 = self.__batch_normalization(2, 'bn_4a_3x3', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_4a_double_3x3_0 = self.__batch_normalization(2, 'bn_4a_double_3x3_0', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4a_double_3x3_1 = self.__conv(2, name='conv_4a_double_3x3_1', in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4a_double_3x3_1 = self.__batch_normalization(2, 'bn_4a_double_3x3_1', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4b_1x1 = self.__conv(2, name='conv_4b_1x1', in_channels=576, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4b_3x3_reduce = self.__conv(2, name='conv_4b_3x3_reduce', in_channels=576, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4b_double_3x3_reduce = self.__conv(2, name='conv_4b_double_3x3_reduce', in_channels=576, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4b_1x1 = self.__batch_normalization(2, 'bn_4b_1x1', num_features=192, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_4b_3x3_reduce = self.__batch_normalization(2, 'bn_4b_3x3_reduce', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_4b_double_3x3_reduce = self.__batch_normalization(2, 'bn_4b_double_3x3_reduce', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4b_proj = self.__conv(2, name='conv_4b_proj', in_channels=576, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4b_proj = self.__batch_normalization(2, 'bn_4b_proj', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4b_3x3 = self.__conv(2, name='conv_4b_3x3', in_channels=96, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_4b_double_3x3_0 = self.__conv(2, name='conv_4b_double_3x3_0', in_channels=96, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4b_3x3 = self.__batch_normalization(2, 'bn_4b_3x3', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_4b_double_3x3_0 = self.__batch_normalization(2, 'bn_4b_double_3x3_0', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4b_double_3x3_1 = self.__conv(2, name='conv_4b_double_3x3_1', in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4b_double_3x3_1 = self.__batch_normalization(2, 'bn_4b_double_3x3_1', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4c_1x1 = self.__conv(2, name='conv_4c_1x1', in_channels=576, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4c_3x3_reduce = self.__conv(2, name='conv_4c_3x3_reduce', in_channels=576, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4c_double_3x3_reduce = self.__conv(2, name='conv_4c_double_3x3_reduce', in_channels=576, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4c_1x1 = self.__batch_normalization(2, 'bn_4c_1x1', num_features=160, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_4c_3x3_reduce = self.__batch_normalization(2, 'bn_4c_3x3_reduce', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_4c_double_3x3_reduce = self.__batch_normalization(2, 'bn_4c_double_3x3_reduce', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4c_proj = self.__conv(2, name='conv_4c_proj', in_channels=576, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4c_proj = self.__batch_normalization(2, 'bn_4c_proj', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4c_3x3 = self.__conv(2, name='conv_4c_3x3', in_channels=128, out_channels=160, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_4c_double_3x3_0 = self.__conv(2, name='conv_4c_double_3x3_0', in_channels=128, out_channels=160, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4c_3x3 = self.__batch_normalization(2, 'bn_4c_3x3', num_features=160, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_4c_double_3x3_0 = self.__batch_normalization(2, 'bn_4c_double_3x3_0', num_features=160, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4c_double_3x3_1 = self.__conv(2, name='conv_4c_double_3x3_1', in_channels=160, out_channels=160, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4c_double_3x3_1 = self.__batch_normalization(2, 'bn_4c_double_3x3_1', num_features=160, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4d_1x1 = self.__conv(2, name='conv_4d_1x1', in_channels=608, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4d_3x3_reduce = self.__conv(2, name='conv_4d_3x3_reduce', in_channels=608, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4d_double_3x3_reduce = self.__conv(2, name='conv_4d_double_3x3_reduce', in_channels=608, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4d_1x1 = self.__batch_normalization(2, 'bn_4d_1x1', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_4d_3x3_reduce = self.__batch_normalization(2, 'bn_4d_3x3_reduce', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_4d_double_3x3_reduce = self.__batch_normalization(2, 'bn_4d_double_3x3_reduce', num_features=160, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4d_proj = self.__conv(2, name='conv_4d_proj', in_channels=608, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4d_proj = self.__batch_normalization(2, 'bn_4d_proj', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4d_3x3 = self.__conv(2, name='conv_4d_3x3', in_channels=128, out_channels=192, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_4d_double_3x3_0 = self.__conv(2, name='conv_4d_double_3x3_0', in_channels=160, out_channels=96, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4d_3x3 = self.__batch_normalization(2, 'bn_4d_3x3', num_features=192, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_4d_double_3x3_0 = self.__batch_normalization(2, 'bn_4d_double_3x3_0', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4d_double_3x3_1 = self.__conv(2, name='conv_4d_double_3x3_1', in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4d_double_3x3_1 = self.__batch_normalization(2, 'bn_4d_double_3x3_1', num_features=96, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4e_3x3_reduce = self.__conv(2, name='conv_4e_3x3_reduce', in_channels=512, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4e_double_3x3_reduce = self.__conv(2, name='conv_4e_double_3x3_reduce', in_channels=512, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4e_3x3_reduce = self.__batch_normalization(2, 'bn_4e_3x3_reduce', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_4e_double_3x3_reduce = self.__batch_normalization(2, 'bn_4e_double_3x3_reduce', num_features=192, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4e_3x3 = self.__conv(2, name='conv_4e_3x3', in_channels=128, out_channels=192, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=True)
self.conv_4e_double_3x3_0 = self.__conv(2, name='conv_4e_double_3x3_0', in_channels=192, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4e_3x3 = self.__batch_normalization(2, 'bn_4e_3x3', num_features=192, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_4e_double_3x3_0 = self.__batch_normalization(2, 'bn_4e_double_3x3_0', num_features=256, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_4e_double_3x3_1 = self.__conv(2, name='conv_4e_double_3x3_1', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=True)
self.bn_4e_double_3x3_1 = self.__batch_normalization(2, 'bn_4e_double_3x3_1', num_features=256, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_5a_1x1 = self.__conv(2, name='conv_5a_1x1', in_channels=960, out_channels=352, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_5a_3x3_reduce = self.__conv(2, name='conv_5a_3x3_reduce', in_channels=960, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_5a_double_3x3_reduce = self.__conv(2, name='conv_5a_double_3x3_reduce', in_channels=960, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_5a_1x1 = self.__batch_normalization(2, 'bn_5a_1x1', num_features=352, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_5a_3x3_reduce = self.__batch_normalization(2, 'bn_5a_3x3_reduce', num_features=192, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_5a_double_3x3_reduce = self.__batch_normalization(2, 'bn_5a_double_3x3_reduce', num_features=160, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_5a_proj = self.__conv(2, name='conv_5a_proj', in_channels=960, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_5a_proj = self.__batch_normalization(2, 'bn_5a_proj', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_5a_3x3 = self.__conv(2, name='conv_5a_3x3', in_channels=192, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_5a_double_3x3_0 = self.__conv(2, name='conv_5a_double_3x3_0', in_channels=160, out_channels=224, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_5a_3x3 = self.__batch_normalization(2, 'bn_5a_3x3', num_features=320, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_5a_double_3x3_0 = self.__batch_normalization(2, 'bn_5a_double_3x3_0', num_features=224, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_5a_double_3x3_1 = self.__conv(2, name='conv_5a_double_3x3_1', in_channels=224, out_channels=224, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_5a_double_3x3_1 = self.__batch_normalization(2, 'bn_5a_double_3x3_1', num_features=224, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_5b_1x1 = self.__conv(2, name='conv_5b_1x1', in_channels=1024, out_channels=352, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_5b_3x3_reduce = self.__conv(2, name='conv_5b_3x3_reduce', in_channels=1024, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_5b_double_3x3_reduce = self.__conv(2, name='conv_5b_double_3x3_reduce', in_channels=1024, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_5b_1x1 = self.__batch_normalization(2, 'bn_5b_1x1', num_features=352, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_5b_3x3_reduce = self.__batch_normalization(2, 'bn_5b_3x3_reduce', num_features=192, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_5b_double_3x3_reduce = self.__batch_normalization(2, 'bn_5b_double_3x3_reduce', num_features=192, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_5b_proj = self.__conv(2, name='conv_5b_proj', in_channels=1024, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_5b_proj = self.__batch_normalization(2, 'bn_5b_proj', num_features=128, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_5b_3x3 = self.__conv(2, name='conv_5b_3x3', in_channels=192, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_5b_double_3x3_0 = self.__conv(2, name='conv_5b_double_3x3_0', in_channels=192, out_channels=224, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_5b_3x3 = self.__batch_normalization(2, 'bn_5b_3x3', num_features=320, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.bn_5b_double_3x3_0 = self.__batch_normalization(2, 'bn_5b_double_3x3_0', num_features=224, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.conv_5b_double_3x3_1 = self.__conv(2, name='conv_5b_double_3x3_1', in_channels=224, out_channels=224, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_5b_double_3x3_1 = self.__batch_normalization(2, 'bn_5b_double_3x3_1', num_features=224, eps=9.999999747378752e-05, momentum=0.8999999761581421)
self.fc1 = self.__dense(name='fc1', in_features=1024, out_features=21841, bias=True)
def forward(self, x):
conv_conv1_pad = F.pad(x, (3, 3, 3, 3))
conv_conv1 = self.conv_conv1(conv_conv1_pad)
bn_conv1 = self.bn_conv1(conv_conv1)
relu_conv1 = F.relu(bn_conv1)
pool1 = F.max_pool2d(relu_conv1, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False)
conv_conv2red = self.conv_conv2red(pool1)
bn_conv2red = self.bn_conv2red(conv_conv2red)
relu_conv2red = F.relu(bn_conv2red)
conv_conv2_pad = F.pad(relu_conv2red, (1, 1, 1, 1))
conv_conv2 = self.conv_conv2(conv_conv2_pad)
bn_conv2 = self.bn_conv2(conv_conv2)
relu_conv2 = F.relu(bn_conv2)
pool2 = F.max_pool2d(relu_conv2, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False)
conv_3a_1x1 = self.conv_3a_1x1(pool2)
conv_3a_3x3_reduce = self.conv_3a_3x3_reduce(pool2)
conv_3a_double_3x3_reduce = self.conv_3a_double_3x3_reduce(pool2)
avg_pool_3a_pool = F.avg_pool2d(pool2, kernel_size=(3, 3), stride=(1, 1), padding=(1,), ceil_mode=False, count_include_pad=False)
bn_3a_1x1 = self.bn_3a_1x1(conv_3a_1x1)
bn_3a_3x3_reduce = self.bn_3a_3x3_reduce(conv_3a_3x3_reduce)
bn_3a_double_3x3_reduce = self.bn_3a_double_3x3_reduce(conv_3a_double_3x3_reduce)
conv_3a_proj = self.conv_3a_proj(avg_pool_3a_pool)
relu_3a_1x1 = F.relu(bn_3a_1x1)
relu_3a_3x3_reduce = F.relu(bn_3a_3x3_reduce)
relu_3a_double_3x3_reduce = F.relu(bn_3a_double_3x3_reduce)
bn_3a_proj = self.bn_3a_proj(conv_3a_proj)
conv_3a_3x3_pad = F.pad(relu_3a_3x3_reduce, (1, 1, 1, 1))
conv_3a_3x3 = self.conv_3a_3x3(conv_3a_3x3_pad)
conv_3a_double_3x3_0_pad = F.pad(relu_3a_double_3x3_reduce, (1, 1, 1, 1))
conv_3a_double_3x3_0 = self.conv_3a_double_3x3_0(conv_3a_double_3x3_0_pad)
relu_3a_proj = F.relu(bn_3a_proj)
bn_3a_3x3 = self.bn_3a_3x3(conv_3a_3x3)
bn_3a_double_3x3_0 = self.bn_3a_double_3x3_0(conv_3a_double_3x3_0)
relu_3a_3x3 = F.relu(bn_3a_3x3)
relu_3a_double_3x3_0 = F.relu(bn_3a_double_3x3_0)
conv_3a_double_3x3_1_pad = F.pad(relu_3a_double_3x3_0, (1, 1, 1, 1))
conv_3a_double_3x3_1 = self.conv_3a_double_3x3_1(conv_3a_double_3x3_1_pad)
bn_3a_double_3x3_1 = self.bn_3a_double_3x3_1(conv_3a_double_3x3_1)
relu_3a_double_3x3_1 = F.relu(bn_3a_double_3x3_1)
ch_concat_3a_chconcat = torch.cat((relu_3a_1x1, relu_3a_3x3, relu_3a_double_3x3_1, relu_3a_proj), 1)
conv_3b_1x1 = self.conv_3b_1x1(ch_concat_3a_chconcat)
conv_3b_3x3_reduce = self.conv_3b_3x3_reduce(ch_concat_3a_chconcat)
conv_3b_double_3x3_reduce = self.conv_3b_double_3x3_reduce(ch_concat_3a_chconcat)
avg_pool_3b_pool = F.avg_pool2d(ch_concat_3a_chconcat, kernel_size=(3, 3), stride=(1, 1), padding=(1,), ceil_mode=False, count_include_pad=False)
bn_3b_1x1 = self.bn_3b_1x1(conv_3b_1x1)
bn_3b_3x3_reduce = self.bn_3b_3x3_reduce(conv_3b_3x3_reduce)
bn_3b_double_3x3_reduce = self.bn_3b_double_3x3_reduce(conv_3b_double_3x3_reduce)
conv_3b_proj = self.conv_3b_proj(avg_pool_3b_pool)
relu_3b_1x1 = F.relu(bn_3b_1x1)
relu_3b_3x3_reduce = F.relu(bn_3b_3x3_reduce)
relu_3b_double_3x3_reduce = F.relu(bn_3b_double_3x3_reduce)
bn_3b_proj = self.bn_3b_proj(conv_3b_proj)
conv_3b_3x3_pad = F.pad(relu_3b_3x3_reduce, (1, 1, 1, 1))
conv_3b_3x3 = self.conv_3b_3x3(conv_3b_3x3_pad)
conv_3b_double_3x3_0_pad = F.pad(relu_3b_double_3x3_reduce, (1, 1, 1, 1))
conv_3b_double_3x3_0 = self.conv_3b_double_3x3_0(conv_3b_double_3x3_0_pad)
relu_3b_proj = F.relu(bn_3b_proj)
bn_3b_3x3 = self.bn_3b_3x3(conv_3b_3x3)
bn_3b_double_3x3_0 = self.bn_3b_double_3x3_0(conv_3b_double_3x3_0)
relu_3b_3x3 = F.relu(bn_3b_3x3)
relu_3b_double_3x3_0 = F.relu(bn_3b_double_3x3_0)
conv_3b_double_3x3_1_pad = F.pad(relu_3b_double_3x3_0, (1, 1, 1, 1))
conv_3b_double_3x3_1 = self.conv_3b_double_3x3_1(conv_3b_double_3x3_1_pad)
bn_3b_double_3x3_1 = self.bn_3b_double_3x3_1(conv_3b_double_3x3_1)
relu_3b_double_3x3_1 = F.relu(bn_3b_double_3x3_1)
ch_concat_3b_chconcat = torch.cat((relu_3b_1x1, relu_3b_3x3, relu_3b_double_3x3_1, relu_3b_proj), 1)
conv_3c_3x3_reduce = self.conv_3c_3x3_reduce(ch_concat_3b_chconcat)
conv_3c_double_3x3_reduce = self.conv_3c_double_3x3_reduce(ch_concat_3b_chconcat)
max_pool_3c_pool_pad = F.pad(ch_concat_3b_chconcat, (1, 1, 1, 1), value=float('-inf'))
max_pool_3c_pool = F.max_pool2d(max_pool_3c_pool_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False)
bn_3c_3x3_reduce = self.bn_3c_3x3_reduce(conv_3c_3x3_reduce)
bn_3c_double_3x3_reduce = self.bn_3c_double_3x3_reduce(conv_3c_double_3x3_reduce)
relu_3c_3x3_reduce = F.relu(bn_3c_3x3_reduce)
relu_3c_double_3x3_reduce = F.relu(bn_3c_double_3x3_reduce)
conv_3c_3x3_pad = F.pad(relu_3c_3x3_reduce, (1, 1, 1, 1))
conv_3c_3x3 = self.conv_3c_3x3(conv_3c_3x3_pad)
conv_3c_double_3x3_0_pad = F.pad(relu_3c_double_3x3_reduce, (1, 1, 1, 1))
conv_3c_double_3x3_0 = self.conv_3c_double_3x3_0(conv_3c_double_3x3_0_pad)
bn_3c_3x3 = self.bn_3c_3x3(conv_3c_3x3)
bn_3c_double_3x3_0 = self.bn_3c_double_3x3_0(conv_3c_double_3x3_0)
relu_3c_3x3 = F.relu(bn_3c_3x3)
relu_3c_double_3x3_0 = F.relu(bn_3c_double_3x3_0)
conv_3c_double_3x3_1_pad = F.pad(relu_3c_double_3x3_0, (1, 1, 1, 1))
conv_3c_double_3x3_1 = self.conv_3c_double_3x3_1(conv_3c_double_3x3_1_pad)
bn_3c_double_3x3_1 = self.bn_3c_double_3x3_1(conv_3c_double_3x3_1)
relu_3c_double_3x3_1 = F.relu(bn_3c_double_3x3_1)
ch_concat_3c_chconcat = torch.cat((relu_3c_3x3, relu_3c_double_3x3_1, max_pool_3c_pool), 1)
conv_4a_1x1 = self.conv_4a_1x1(ch_concat_3c_chconcat)
conv_4a_3x3_reduce = self.conv_4a_3x3_reduce(ch_concat_3c_chconcat)
conv_4a_double_3x3_reduce = self.conv_4a_double_3x3_reduce(ch_concat_3c_chconcat)
avg_pool_4a_pool = F.avg_pool2d(ch_concat_3c_chconcat, kernel_size=(3, 3), stride=(1, 1), padding=(1,), ceil_mode=False, count_include_pad=False)
bn_4a_1x1 = self.bn_4a_1x1(conv_4a_1x1)
bn_4a_3x3_reduce = self.bn_4a_3x3_reduce(conv_4a_3x3_reduce)
bn_4a_double_3x3_reduce = self.bn_4a_double_3x3_reduce(conv_4a_double_3x3_reduce)
conv_4a_proj = self.conv_4a_proj(avg_pool_4a_pool)
relu_4a_1x1 = F.relu(bn_4a_1x1)
relu_4a_3x3_reduce = F.relu(bn_4a_3x3_reduce)
relu_4a_double_3x3_reduce = F.relu(bn_4a_double_3x3_reduce)
bn_4a_proj = self.bn_4a_proj(conv_4a_proj)
conv_4a_3x3_pad = F.pad(relu_4a_3x3_reduce, (1, 1, 1, 1))
conv_4a_3x3 = self.conv_4a_3x3(conv_4a_3x3_pad)
conv_4a_double_3x3_0_pad = F.pad(relu_4a_double_3x3_reduce, (1, 1, 1, 1))
conv_4a_double_3x3_0 = self.conv_4a_double_3x3_0(conv_4a_double_3x3_0_pad)
relu_4a_proj = F.relu(bn_4a_proj)
bn_4a_3x3 = self.bn_4a_3x3(conv_4a_3x3)
bn_4a_double_3x3_0 = self.bn_4a_double_3x3_0(conv_4a_double_3x3_0)
relu_4a_3x3 = F.relu(bn_4a_3x3)
relu_4a_double_3x3_0 = F.relu(bn_4a_double_3x3_0)
conv_4a_double_3x3_1_pad = F.pad(relu_4a_double_3x3_0, (1, 1, 1, 1))
conv_4a_double_3x3_1 = self.conv_4a_double_3x3_1(conv_4a_double_3x3_1_pad)
bn_4a_double_3x3_1 = self.bn_4a_double_3x3_1(conv_4a_double_3x3_1)
relu_4a_double_3x3_1 = F.relu(bn_4a_double_3x3_1)
ch_concat_4a_chconcat = torch.cat((relu_4a_1x1, relu_4a_3x3, relu_4a_double_3x3_1, relu_4a_proj), 1)
conv_4b_1x1 = self.conv_4b_1x1(ch_concat_4a_chconcat)
conv_4b_3x3_reduce = self.conv_4b_3x3_reduce(ch_concat_4a_chconcat)
conv_4b_double_3x3_reduce = self.conv_4b_double_3x3_reduce(ch_concat_4a_chconcat)
avg_pool_4b_pool = F.avg_pool2d(ch_concat_4a_chconcat, kernel_size=(3, 3), stride=(1, 1), padding=(1,), ceil_mode=False, count_include_pad=False)
bn_4b_1x1 = self.bn_4b_1x1(conv_4b_1x1)
bn_4b_3x3_reduce = self.bn_4b_3x3_reduce(conv_4b_3x3_reduce)
bn_4b_double_3x3_reduce = self.bn_4b_double_3x3_reduce(conv_4b_double_3x3_reduce)
conv_4b_proj = self.conv_4b_proj(avg_pool_4b_pool)
relu_4b_1x1 = F.relu(bn_4b_1x1)
relu_4b_3x3_reduce = F.relu(bn_4b_3x3_reduce)
relu_4b_double_3x3_reduce = F.relu(bn_4b_double_3x3_reduce)
bn_4b_proj = self.bn_4b_proj(conv_4b_proj)
conv_4b_3x3_pad = F.pad(relu_4b_3x3_reduce, (1, 1, 1, 1))
conv_4b_3x3 = self.conv_4b_3x3(conv_4b_3x3_pad)
conv_4b_double_3x3_0_pad = F.pad(relu_4b_double_3x3_reduce, (1, 1, 1, 1))
conv_4b_double_3x3_0 = self.conv_4b_double_3x3_0(conv_4b_double_3x3_0_pad)
relu_4b_proj = F.relu(bn_4b_proj)
bn_4b_3x3 = self.bn_4b_3x3(conv_4b_3x3)
bn_4b_double_3x3_0 = self.bn_4b_double_3x3_0(conv_4b_double_3x3_0)
relu_4b_3x3 = F.relu(bn_4b_3x3)
relu_4b_double_3x3_0 = F.relu(bn_4b_double_3x3_0)
conv_4b_double_3x3_1_pad = F.pad(relu_4b_double_3x3_0, (1, 1, 1, 1))
conv_4b_double_3x3_1 = self.conv_4b_double_3x3_1(conv_4b_double_3x3_1_pad)
bn_4b_double_3x3_1 = self.bn_4b_double_3x3_1(conv_4b_double_3x3_1)
relu_4b_double_3x3_1 = F.relu(bn_4b_double_3x3_1)
ch_concat_4b_chconcat = torch.cat((relu_4b_1x1, relu_4b_3x3, relu_4b_double_3x3_1, relu_4b_proj), 1)
conv_4c_1x1 = self.conv_4c_1x1(ch_concat_4b_chconcat)
conv_4c_3x3_reduce = self.conv_4c_3x3_reduce(ch_concat_4b_chconcat)
conv_4c_double_3x3_reduce = self.conv_4c_double_3x3_reduce(ch_concat_4b_chconcat)
avg_pool_4c_pool = F.avg_pool2d(ch_concat_4b_chconcat, kernel_size=(3, 3), stride=(1, 1), padding=(1,), ceil_mode=False, count_include_pad=False)
bn_4c_1x1 = self.bn_4c_1x1(conv_4c_1x1)
bn_4c_3x3_reduce = self.bn_4c_3x3_reduce(conv_4c_3x3_reduce)
bn_4c_double_3x3_reduce = self.bn_4c_double_3x3_reduce(conv_4c_double_3x3_reduce)
conv_4c_proj = self.conv_4c_proj(avg_pool_4c_pool)
relu_4c_1x1 = F.relu(bn_4c_1x1)
relu_4c_3x3_reduce = F.relu(bn_4c_3x3_reduce)
relu_4c_double_3x3_reduce = F.relu(bn_4c_double_3x3_reduce)
bn_4c_proj = self.bn_4c_proj(conv_4c_proj)
conv_4c_3x3_pad = F.pad(relu_4c_3x3_reduce, (1, 1, 1, 1))
conv_4c_3x3 = self.conv_4c_3x3(conv_4c_3x3_pad)
conv_4c_double_3x3_0_pad = F.pad(relu_4c_double_3x3_reduce, (1, 1, 1, 1))
conv_4c_double_3x3_0 = self.conv_4c_double_3x3_0(conv_4c_double_3x3_0_pad)
relu_4c_proj = F.relu(bn_4c_proj)
bn_4c_3x3 = self.bn_4c_3x3(conv_4c_3x3)
bn_4c_double_3x3_0 = self.bn_4c_double_3x3_0(conv_4c_double_3x3_0)
relu_4c_3x3 = F.relu(bn_4c_3x3)
relu_4c_double_3x3_0 = F.relu(bn_4c_double_3x3_0)
conv_4c_double_3x3_1_pad = F.pad(relu_4c_double_3x3_0, (1, 1, 1, 1))
conv_4c_double_3x3_1 = self.conv_4c_double_3x3_1(conv_4c_double_3x3_1_pad)
bn_4c_double_3x3_1 = self.bn_4c_double_3x3_1(conv_4c_double_3x3_1)
relu_4c_double_3x3_1 = F.relu(bn_4c_double_3x3_1)
ch_concat_4c_chconcat = torch.cat((relu_4c_1x1, relu_4c_3x3, relu_4c_double_3x3_1, relu_4c_proj), 1)
conv_4d_1x1 = self.conv_4d_1x1(ch_concat_4c_chconcat)
conv_4d_3x3_reduce = self.conv_4d_3x3_reduce(ch_concat_4c_chconcat)
conv_4d_double_3x3_reduce = self.conv_4d_double_3x3_reduce(ch_concat_4c_chconcat)
avg_pool_4d_pool = F.avg_pool2d(ch_concat_4c_chconcat, kernel_size=(3, 3), stride=(1, 1), padding=(1,), ceil_mode=False, count_include_pad=False)
bn_4d_1x1 = self.bn_4d_1x1(conv_4d_1x1)
bn_4d_3x3_reduce = self.bn_4d_3x3_reduce(conv_4d_3x3_reduce)
bn_4d_double_3x3_reduce = self.bn_4d_double_3x3_reduce(conv_4d_double_3x3_reduce)
conv_4d_proj = self.conv_4d_proj(avg_pool_4d_pool)
relu_4d_1x1 = F.relu(bn_4d_1x1)
relu_4d_3x3_reduce = F.relu(bn_4d_3x3_reduce)
relu_4d_double_3x3_reduce = F.relu(bn_4d_double_3x3_reduce)
bn_4d_proj = self.bn_4d_proj(conv_4d_proj)
conv_4d_3x3_pad = F.pad(relu_4d_3x3_reduce, (1, 1, 1, 1))
conv_4d_3x3 = self.conv_4d_3x3(conv_4d_3x3_pad)
conv_4d_double_3x3_0_pad = F.pad(relu_4d_double_3x3_reduce, (1, 1, 1, 1))
conv_4d_double_3x3_0 = self.conv_4d_double_3x3_0(conv_4d_double_3x3_0_pad)
relu_4d_proj = F.relu(bn_4d_proj)
bn_4d_3x3 = self.bn_4d_3x3(conv_4d_3x3)
bn_4d_double_3x3_0 = self.bn_4d_double_3x3_0(conv_4d_double_3x3_0)
relu_4d_3x3 = F.relu(bn_4d_3x3)
relu_4d_double_3x3_0 = F.relu(bn_4d_double_3x3_0)
conv_4d_double_3x3_1_pad = F.pad(relu_4d_double_3x3_0, (1, 1, 1, 1))
conv_4d_double_3x3_1 = self.conv_4d_double_3x3_1(conv_4d_double_3x3_1_pad)
bn_4d_double_3x3_1 = self.bn_4d_double_3x3_1(conv_4d_double_3x3_1)
relu_4d_double_3x3_1 = F.relu(bn_4d_double_3x3_1)
ch_concat_4d_chconcat = torch.cat((relu_4d_1x1, relu_4d_3x3, relu_4d_double_3x3_1, relu_4d_proj), 1)
conv_4e_3x3_reduce = self.conv_4e_3x3_reduce(ch_concat_4d_chconcat)
conv_4e_double_3x3_reduce = self.conv_4e_double_3x3_reduce(ch_concat_4d_chconcat)
max_pool_4e_pool_pad = F.pad(ch_concat_4d_chconcat, (1, 1, 1, 1), value=float('-inf'))
max_pool_4e_pool = F.max_pool2d(max_pool_4e_pool_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False)
bn_4e_3x3_reduce = self.bn_4e_3x3_reduce(conv_4e_3x3_reduce)
bn_4e_double_3x3_reduce = self.bn_4e_double_3x3_reduce(conv_4e_double_3x3_reduce)
relu_4e_3x3_reduce = F.relu(bn_4e_3x3_reduce)
relu_4e_double_3x3_reduce = F.relu(bn_4e_double_3x3_reduce)
conv_4e_3x3_pad = F.pad(relu_4e_3x3_reduce, (1, 1, 1, 1))
conv_4e_3x3 = self.conv_4e_3x3(conv_4e_3x3_pad)
conv_4e_double_3x3_0_pad = F.pad(relu_4e_double_3x3_reduce, (1, 1, 1, 1))
conv_4e_double_3x3_0 = self.conv_4e_double_3x3_0(conv_4e_double_3x3_0_pad)
bn_4e_3x3 = self.bn_4e_3x3(conv_4e_3x3)
bn_4e_double_3x3_0 = self.bn_4e_double_3x3_0(conv_4e_double_3x3_0)
relu_4e_3x3 = F.relu(bn_4e_3x3)
relu_4e_double_3x3_0 = F.relu(bn_4e_double_3x3_0)
conv_4e_double_3x3_1_pad = F.pad(relu_4e_double_3x3_0, (1, 1, 1, 1))
conv_4e_double_3x3_1 = self.conv_4e_double_3x3_1(conv_4e_double_3x3_1_pad)
bn_4e_double_3x3_1 = self.bn_4e_double_3x3_1(conv_4e_double_3x3_1)
relu_4e_double_3x3_1 = F.relu(bn_4e_double_3x3_1)
ch_concat_4e_chconcat = torch.cat((relu_4e_3x3, relu_4e_double_3x3_1, max_pool_4e_pool), 1)
conv_5a_1x1 = self.conv_5a_1x1(ch_concat_4e_chconcat)
conv_5a_3x3_reduce = self.conv_5a_3x3_reduce(ch_concat_4e_chconcat)
conv_5a_double_3x3_reduce = self.conv_5a_double_3x3_reduce(ch_concat_4e_chconcat)
avg_pool_5a_pool = F.avg_pool2d(ch_concat_4e_chconcat, kernel_size=(3, 3), stride=(1, 1), padding=(1,), ceil_mode=False, count_include_pad=False)
bn_5a_1x1 = self.bn_5a_1x1(conv_5a_1x1)
bn_5a_3x3_reduce = self.bn_5a_3x3_reduce(conv_5a_3x3_reduce)
bn_5a_double_3x3_reduce = self.bn_5a_double_3x3_reduce(conv_5a_double_3x3_reduce)
conv_5a_proj = self.conv_5a_proj(avg_pool_5a_pool)
relu_5a_1x1 = F.relu(bn_5a_1x1)
relu_5a_3x3_reduce = F.relu(bn_5a_3x3_reduce)
relu_5a_double_3x3_reduce = F.relu(bn_5a_double_3x3_reduce)
bn_5a_proj = self.bn_5a_proj(conv_5a_proj)
conv_5a_3x3_pad = F.pad(relu_5a_3x3_reduce, (1, 1, 1, 1))
conv_5a_3x3 = self.conv_5a_3x3(conv_5a_3x3_pad)
conv_5a_double_3x3_0_pad = F.pad(relu_5a_double_3x3_reduce, (1, 1, 1, 1))
conv_5a_double_3x3_0 = self.conv_5a_double_3x3_0(conv_5a_double_3x3_0_pad)
relu_5a_proj = F.relu(bn_5a_proj)
bn_5a_3x3 = self.bn_5a_3x3(conv_5a_3x3)
bn_5a_double_3x3_0 = self.bn_5a_double_3x3_0(conv_5a_double_3x3_0)
relu_5a_3x3 = F.relu(bn_5a_3x3)
relu_5a_double_3x3_0 = F.relu(bn_5a_double_3x3_0)
conv_5a_double_3x3_1_pad = F.pad(relu_5a_double_3x3_0, (1, 1, 1, 1))
conv_5a_double_3x3_1 = self.conv_5a_double_3x3_1(conv_5a_double_3x3_1_pad)
bn_5a_double_3x3_1 = self.bn_5a_double_3x3_1(conv_5a_double_3x3_1)
relu_5a_double_3x3_1 = F.relu(bn_5a_double_3x3_1)
ch_concat_5a_chconcat = torch.cat((relu_5a_1x1, relu_5a_3x3, relu_5a_double_3x3_1, relu_5a_proj), 1)
conv_5b_1x1 = self.conv_5b_1x1(ch_concat_5a_chconcat)
conv_5b_3x3_reduce = self.conv_5b_3x3_reduce(ch_concat_5a_chconcat)
conv_5b_double_3x3_reduce = self.conv_5b_double_3x3_reduce(ch_concat_5a_chconcat)
max_pool_5b_pool_pad = F.pad(ch_concat_5a_chconcat, (1, 1, 1, 1), value=float('-inf'))
max_pool_5b_pool = F.max_pool2d(max_pool_5b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False)
bn_5b_1x1 = self.bn_5b_1x1(conv_5b_1x1)
bn_5b_3x3_reduce = self.bn_5b_3x3_reduce(conv_5b_3x3_reduce)
bn_5b_double_3x3_reduce = self.bn_5b_double_3x3_reduce(conv_5b_double_3x3_reduce)
conv_5b_proj = self.conv_5b_proj(max_pool_5b_pool)
relu_5b_1x1 = F.relu(bn_5b_1x1)
relu_5b_3x3_reduce = F.relu(bn_5b_3x3_reduce)
relu_5b_double_3x3_reduce = F.relu(bn_5b_double_3x3_reduce)
bn_5b_proj = self.bn_5b_proj(conv_5b_proj)
conv_5b_3x3_pad = F.pad(relu_5b_3x3_reduce, (1, 1, 1, 1))
conv_5b_3x3 = self.conv_5b_3x3(conv_5b_3x3_pad)
conv_5b_double_3x3_0_pad = F.pad(relu_5b_double_3x3_reduce, (1, 1, 1, 1))
conv_5b_double_3x3_0 = self.conv_5b_double_3x3_0(conv_5b_double_3x3_0_pad)
relu_5b_proj = F.relu(bn_5b_proj)
bn_5b_3x3 = self.bn_5b_3x3(conv_5b_3x3)
bn_5b_double_3x3_0 = self.bn_5b_double_3x3_0(conv_5b_double_3x3_0)
relu_5b_3x3 = F.relu(bn_5b_3x3)
relu_5b_double_3x3_0 = F.relu(bn_5b_double_3x3_0)
conv_5b_double_3x3_1_pad = F.pad(relu_5b_double_3x3_0, (1, 1, 1, 1))
conv_5b_double_3x3_1 = self.conv_5b_double_3x3_1(conv_5b_double_3x3_1_pad)
bn_5b_double_3x3_1 = self.bn_5b_double_3x3_1(conv_5b_double_3x3_1)
relu_5b_double_3x3_1 = F.relu(bn_5b_double_3x3_1)
ch_concat_5b_chconcat = torch.cat((relu_5b_1x1, relu_5b_3x3, relu_5b_double_3x3_1, relu_5b_proj), 1)
global_pool = F.avg_pool2d(ch_concat_5b_chconcat, kernel_size=(7, 7), stride=(1, 1), padding=(0,), ceil_mode=False, count_include_pad=False)
flatten = global_pool.view(global_pool.size(0), (- 1))
fc1 = self.fc1(flatten)
softmax = F.softmax(fc1)
return softmax
@staticmethod
def __conv(dim, name, **kwargs):
if (dim == 1):
layer = nn.Conv1d(**kwargs)
elif (dim == 2):
layer = nn.Conv2d(**kwargs)
elif (dim == 3):
layer = nn.Conv3d(**kwargs)
else:
raise NotImplementedError()
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if ('bias' in __weights_dict[name]):
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer
@staticmethod
def __batch_normalization(dim, name, **kwargs):
if ((dim == 0) or (dim == 1)):
layer = nn.BatchNorm1d(**kwargs)
elif (dim == 2):
layer = nn.BatchNorm2d(**kwargs)
elif (dim == 3):
layer = nn.BatchNorm3d(**kwargs)
else:
raise NotImplementedError()
if ('scale' in __weights_dict[name]):
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['scale']))
else:
layer.weight.data.fill_(1)
if ('bias' in __weights_dict[name]):
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
else:
layer.bias.data.fill_(0)
layer.state_dict()['running_mean'].copy_(torch.from_numpy(__weights_dict[name]['mean']))
layer.state_dict()['running_var'].copy_(torch.from_numpy(__weights_dict[name]['var']))
return layer
@staticmethod
def __dense(name, **kwargs):
layer = nn.Linear(**kwargs)
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if ('bias' in __weights_dict[name]):
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer
|
def classifier_loader():
return KitModel(load_model_checkpoint_bytes('bninception-imagenet21k'))
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = pretrainedmodels.__dict__[d['arch']](num_classes=1000, pretrained=None)
load_model_state_dict(model, name)
return model
return classifier_loader
|
def gen_classifier_loader(path):
def classifier_loader():
model = build_clip_imagenet_model(path)[0]
return model
return classifier_loader
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = torch_models.__dict__[d['arch']]()
load_model_state_dict(model, name)
return model
return classifier_loader
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = torch_models.__dict__[d['arch']]()
load_model_state_dict(model, name)
return model
return classifier_loader
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = torch_models.__dict__[d['arch']]()
load_model_state_dict(model, name)
return model
return classifier_loader
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = torch_models.__dict__[d['arch']](pretrained=False)
load_model_state_dict(model, name)
return model
return classifier_loader
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = EfficientNet.from_name(d['arch'])
load_model_state_dict(model, name)
return model
return classifier_loader
|
def noisystudent_loader():
model = timm.create_model('tf_efficientnet_l2_ns', pretrained=False)
load_model_state_dict(model, 'efficientnet-l2-noisystudent')
return model
|
def store_logits_jft(images, model):
global store_counter
assert (store_filename_prefix is not '')
images = images.cpu().permute([0, 2, 3, 1]).numpy()
with open(f'{store_filename_prefix}_{store_counter}.npy', 'wb') as f:
np.save(f, images)
store_counter += 1
return torch.empty(images.shape[0], 1000).cuda()
|
def load_logits_jft(images, model):
global load_counter
assert (load_filename_prefix is not '')
with open(f'{load_filename_prefix}_{load_counter}.npy', 'rb') as f:
logits = torch.from_numpy(np.load(f)).cuda()
load_counter += 1
return logits
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = torch.hub.load('facebookresearch/WSL-Images', (d['arch'] + '_wsl'))
load_model_state_dict(model, name)
return model
return classifier_loader
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = torch_models.__dict__[d['arch']]()
load_model_state_dict(model, name)
return model
return classifier_loader
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = torch_models.__dict__[d['arch']]()
load_model_state_dict(model, name)
return model
return classifier_loader
|
class ConvertToPyTorchModel(nn.Module):
def __init__(self, base_model, classify_fn_args, classify=None, normalization=None, class_sublist=None, adversarial_attack=None):
super().__init__()
if (normalization is not None):
self.input_space = normalization.input_space
self.mean = nn.Parameter(torch.tensor(normalization.mean).float().view(3, 1, 1))
self.std = nn.Parameter(torch.tensor(normalization.std).float().view(3, 1, 1))
self.base_model = base_model
self.classify_fn_args = classify_fn_args
self.classify = classify
self.class_sublist = class_sublist
self.adversarial_attack = adversarial_attack
self.normalization = normalization
def forward(self, x):
if (self.normalization is not None):
if (self.input_space == 'BGR'):
x = x.flip(1)
x = ((x - self.mean) / self.std)
if (self.classify is None):
x = self.base_model(x)
else:
kwargs = {'images': x, 'model': self.base_model}
if ('class_sublist' in self.classify_fn_args):
kwargs['class_sublist'] = self.class_sublist
if ('adversarial_attack' in self.classify_fn_args):
kwargs['adversarial_attack'] = self.adversarial_attack
x = self.classify(**kwargs)
if ((self.class_sublist is not None) and ('class_sublist' not in self.classify_fn_args)):
x = x.t()[self.class_sublist].t()
return x
|
class Model():
ADVERSARIAL_BATCH_SIZE_REDUCTION_FACTOR = 8
def __init__(self, name, transform, classifier_loader, eval_batch_size, arch=None, normalization=None, classify=None, adversarial_batch_size=None):
super().__init__()
self.name = name
self.arch = (arch if (arch is not None) else 'NA')
self.transform = transform
self.classifier_loader = classifier_loader
self.eval_batch_size = eval_batch_size
self.adversarial_batch_size = adversarial_batch_size
self.normalization = normalization
self.classify = classify
self.classify_fn_args = set()
if (self.classify is not None):
sig = list(signature(self.classify).parameters.keys())
assert (('images' in sig) and ('model' in sig)), ('Unrecognized metrics function ' + 'definition. Make sure function takes arguments "images" and "model"')
for arg in ['images', 'model', 'class_sublist', 'adversarial_attack']:
if (arg in sig):
self.classify_fn_args.add(arg)
def generate_classifier(self, py_eval_setting):
self.classifier = self.classifier_loader()
model = ConvertToPyTorchModel(self.classifier, self.classify_fn_args, self.classify, self.normalization, py_eval_setting.class_sublist, py_eval_setting.adversarial_attack)
if (len(list(model.parameters())) == 0):
model._dummy = nn.Parameter(torch.tensor(0.0))
return model
def get_batch_size(self, py_eval_setting):
if (not py_eval_setting.adversarial_attack):
return self.eval_batch_size
elif (py_eval_setting.adversarial_attack and (self.adversarial_batch_size is not None)):
return self.adversarial_batch_size
return max((self.eval_batch_size // self.ADVERSARIAL_BATCH_SIZE_REDUCTION_FACTOR), 1)
|
class StandardNormalization():
def __init__(self, mean, std, input_space='RGB'):
assert (input_space in ['RGB', 'BGR']), f'Can only handle RGB or BGR channel input formats'
self.mean = mean
self.std = std
self.input_space = input_space
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.