code stringlengths 17 6.64M |
|---|
class sage_build_ext(build_ext):
def finalize_options(self):
build_ext.finalize_options(self)
self.check_flags()
def run(self):
self.run_command('build_cython')
build_ext.run(self)
def check_flags(self):
'\n Sanity check the compiler flags used to build th... |
class sage_build_ext_minimal(build_ext):
"\n In contrast to :func:`~sage_setup.sage_build_ext.sage_build_ext`, this build extension is designed\n to be used in combination with Cython's cythonize function.\n Thus, we only take care of some options and letting Cython do the main work.\n "
def init... |
class install_kernel_spec_mixin():
def install_kernel_spec(self):
'\n Install the Jupyter kernel spec.\n\n .. NOTE::\n\n The files are generated, not copied. Therefore, we cannot\n use ``data_files`` for this.\n '
from sage.repl.ipython_kernel.install im... |
class sage_install(install, install_kernel_spec_mixin):
def run(self):
install.run(self)
self.install_kernel_spec()
|
class sage_develop(develop, install_kernel_spec_mixin):
def run(self):
develop.run(self)
if (not self.uninstall):
self.install_kernel_spec()
|
class sage_clean(install):
all_distributions = None
def run(self):
t = time.time()
self.clean_stale_files()
log.info(('Finished cleaning, time: %.2f seconds.' % (time.time() - t)))
def clean_stale_files(self):
'\n Remove stale installed files.\n\n This remov... |
class sage_install_and_clean(sage_install, sage_clean):
def run(self):
sage_install.run(self)
sage_clean.run(self)
|
def compiler_directives(profile: bool):
'\n Return a list of Cython directives used for compilation.\n '
return dict(auto_pickle=False, autotestdict=False, binding=False, c_api_binop_methods=True, cdivision=True, cpow=True, embedsignature=True, fast_getattr=True, language_level='3', legacy_implicit_noex... |
def compile_time_env_variables():
'\n Return a list of environmental variables used for compilation.\n '
return dict(PY_PLATFORM=sys.platform, PY_VERSION_HEX=sys.hexversion, PY_MAJOR_VERSION=sys.version_info[0])
|
def excepthook(*exc):
'\n When an error occurs, display an error message similar to the error\n messages from ``sage-spkg``.\n\n In particular, ``build/make/install`` will recognize "sage" as a failed\n package, see :trac:`16774`.\n '
stars = ('*' * 72)
print(stars, file=sys.stderr)
imp... |
def create_extension(template, kwds):
from Cython.Build.Dependencies import default_create_extension
from sage.env import sage_include_directories
include_dirs = (kwds.get('include_dirs', []) + sage_include_directories(use_sources=True))
kwds['include_dirs'] = include_dirs
return default_create_ex... |
def find_python_sources(src_dir, modules=['sage'], distributions=None, exclude_distributions=None):
'\n Find all Python packages and Python/Cython modules in the sources.\n\n INPUT:\n\n - ``src_dir`` -- root directory for the sources\n\n - ``modules`` -- (default: ``[\'sage\']``) sequence of strings:\... |
def filter_cython_sources(src_dir, distributions, exclude_distributions=None):
'\n Find all Cython modules in the given source directory that belong to the\n given distributions.\n\n INPUT:\n\n - ``src_dir`` -- root directory for the sources\n\n - ``distributions`` -- a sequence or set of strings: ... |
def _cythonized_dir(src_dir=None, editable_install=None):
"\n Return the path where Cython-generated files are placed by the build system.\n\n INPUT:\n\n - ``src_dir`` -- string or path (default: the value of ``SAGE_SRC``). The\n root directory for the sources.\n\n - ``editable_install`` -- bool... |
def find_extra_files(src_dir, modules, cythonized_dir, special_filenames=[], *, distributions=None):
'\n Find all extra files which should be installed.\n\n These are (for each ``module`` in ``modules``):\n\n 1. From ``src_dir/module``: all .pyx, .pxd and .pxi files and files\n listed in ``special_... |
def installed_files_by_module(site_packages, modules=('sage',)):
"\n Find all currently installed files\n\n INPUT:\n\n - ``site_packages`` -- string. The root Python path where the Sage\n library is being installed. If the path doesn't exist, returns\n an empty dictionary.\n\n - ``modules`` ... |
def get_extensions(type=None):
"\n Returns the filename extensions for different types of Python module files.\n\n By default returns all extensions, but can be filtered by type. The\n possible types are 'source' (for pure Python sources), 'bytecode' (for\n compiled bytecode files (i.e. pyc files), o... |
def _get_extensions(type):
'\n Python 3.3+ implementation of ``get_extensions()`` using the\n `importlib.extensions` module.\n '
if type:
return {'source': importlib.machinery.SOURCE_SUFFIXES, 'bytecode': importlib.machinery.BYTECODE_SUFFIXES, 'extension': importlib.machinery.EXTENSION_SUFFIX... |
def run_command(cmd):
'\n INPUT:\n\n - ``cmd`` -- a string; a command to run\n\n OUTPUT: prints ``cmd`` to the console and then runs\n ``os.system(cmd)``.\n '
print(cmd)
sys.stdout.flush()
return os.system(cmd)
|
def apply_func_progress(p):
"\n Given a triple p consisting of a function, value and a string,\n output the string and apply the function to the value.\n\n The string could for example be some progress indicator.\n\n This exists solely because we can't pickle an anonymous function\n in execute_list... |
def execute_list_of_commands_in_parallel(command_list, nthreads):
'\n Execute the given list of commands, possibly in parallel, using\n ``nthreads`` threads. Terminates ``setup.py`` with an exit code\n of 1 if an error occurs in any subcommand.\n\n INPUT:\n\n - ``command_list`` -- a list of comman... |
def process_command_results(result_values):
error = None
for r in result_values:
if r:
print(('Error running command, failed with status %s.' % r))
if (not keep_going):
sys.exit(1)
error = r
if error:
sys.exit(1)
|
def execute_list_of_commands(command_list):
'\n INPUT:\n\n - ``command_list`` -- a list of strings or pairs\n\n OUTPUT:\n\n For each entry in command_list, we attempt to run the command.\n If it is a string, we call ``os.system()``. If it is a pair [f, v],\n we call f(v).\n\n If the environme... |
def _environ_prepend(var, value, separator=':'):
if value:
if (var in os.environ):
os.environ[var] = ((value + separator) + os.environ[var])
else:
os.environ[var] = value
|
def setenv():
from sage.env import UNAME, SAGE_LOCAL, SAGE_VENV, SAGE_ARCHFLAGS, SAGE_PKG_CONFIG_PATH
if (('ARCHFLAGS' not in os.environ) and (SAGE_ARCHFLAGS != 'unset')):
os.environ['ARCHFLAGS'] = SAGE_ARCHFLAGS
_environ_prepend('PKG_CONFIG_PATH', SAGE_PKG_CONFIG_PATH)
if (SAGE_LOCAL and (Pat... |
def stable_uniq(L):
'\n Given an iterable L, remove duplicate items from L by keeping only\n the last occurrence of any item.\n\n The items must be hashable.\n\n EXAMPLES::\n\n sage: from sage_setup.util import stable_uniq\n sage: stable_uniq( (1, 2, 3, 4, 5, 6, 3, 7, 5, 1, 5, 9) )\n ... |
def have_module(name):
'\n Check whether a Python module named ``name`` can be imported.\n\n This is done by trying to import that module and returning ``True``\n if that import succeeded. So, as a side effect, the module is\n actually imported if possible.\n\n EXAMPLES::\n\n sage: from sage... |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='ivqa', choices=['ivqa', 'msrvtt', 'msrvttmc', 'msvd', 'webvid', 'activitynet', 'howto100m', 'howtovqa', 'how2qa', 'nextqa', 'star', 'tgifqa/transition', 'tgifqa/action', 'tgifqa/frameqa', 'tgifqa2/transition... |
class MultipleChoiceLoss(nn.Module):
def __init__(self, num_option=5, margin=1, size_average=True):
super(MultipleChoiceLoss, self).__init__()
self.margin = margin
self.num_option = num_option
self.size_average = size_average
def forward(self, score, target):
N = scor... |
def accuracy_metric(sample_list_file, result_file):
sample_list = load_file(sample_list_file)
group = {'CW': [], 'CH': [], 'TN': [], 'TC': [], 'DC': [], 'DL': [], 'DO': []}
for (id, row) in sample_list.iterrows():
qns_id = ((str(row['video_id']) + '_') + str(row['qid']))
qtype = str(row['t... |
def main(result_file, mode='val'):
dataset_dir = '../data/datasets/nextqa/'
data_set = mode
sample_list_file = osp.join(dataset_dir, (data_set + '.csv'))
print('Evaluating {}'.format(result_file))
accuracy_metric(sample_list_file, result_file)
|
class Contrastive_Loss(torch.nn.Module):
def __init__(self):
super(Contrastive_Loss, self).__init__()
self.ce_loss = torch.nn.CrossEntropyLoss()
def forward(self, x, target):
return self.ce_loss(x, target)
|
class LogSoftmax(torch.nn.Module):
def __init__(self, dim):
super(LogSoftmax, self).__init__()
self.dim = dim
def forward(self, x, a):
nll = (- F.log_softmax(x, self.dim, _stacklevel=5))
return ((nll * a) / a.sum(1, keepdim=True).clamp(min=1)).sum(dim=1).mean()
|
class NCELoss(torch.nn.Module):
def __init__(self, batch_size=4096):
super(NCELoss, self).__init__()
self.ce_loss = torch.nn.CrossEntropyLoss()
def forward(self, x):
batch_size = len(x)
target = torch.arange(batch_size).cuda()
x = torch.cat((x, x.t()), dim=1)
... |
def main(args):
if (not os.path.isdir(args.save_dir)):
os.mkdir(os.path.join(args.save_dir))
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s')
logFormatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
rootLogger = logging.getLogger()
... |
class Bert(nn.Module):
' Finetuned *BERT module '
def __init__(self, bert_tokenizer):
super(Bert, self).__init__()
config = BertConfig.from_pretrained('bert-base-uncased', output_hidden_states=True)
self.bert = BertModel.from_pretrained('bert-base-uncased', config=config)
self... |
class Sentence_Maxpool(nn.Module):
' Utilitary for the answer module '
def __init__(self, word_dimension, output_dim, relu=True):
super(Sentence_Maxpool, self).__init__()
self.fc = nn.Linear(word_dimension, output_dim)
self.out_dim = output_dim
self.relu = relu
def forwar... |
class FFN(nn.Module):
def __init__(self, word_dim, hidden_dim, out_dim, dropout=0.3):
super().__init__()
activation = 'gelu'
self.dropout = nn.Dropout(p=dropout)
self.lin1 = nn.Linear(in_features=word_dim, out_features=hidden_dim)
self.lin2 = nn.Linear(in_features=hidden_d... |
class AModel(nn.Module):
'\n Answer embedding module\n '
def __init__(self, bert_tokenizer, word_dim=768, out_dim=512):
super(AModel, self).__init__()
self.bert = Bert(bert_tokenizer)
self.linear_text = nn.Linear(word_dim, out_dim)
def forward(self, answer):
if (len... |
def draw_rectangle(img, bbox, bbox_color=(255, 255, 255), thickness=3, is_opaque=False, alpha=0.5):
'Draws the rectangle around the object\n\n Parameters\n ----------\n img : ndarray\n the actual image\n bbox : list\n a list containing x_min, y_min, x_max and y_max of the rectangle posit... |
def add_label(img, label, bbox, draw_bg=True, text_bg_color=(255, 255, 255), text_color=(0, 0, 0), top=True):
'adds label, inside or outside the rectangle\n\n Parameters\n ----------\n img : ndarray\n the image on which the label is to be written, preferably the image with the rectangular bounding... |
def add_T_label(img, label, bbox, draw_bg=True, text_bg_color=(255, 255, 255), text_color=(0, 0, 0)):
'adds a T label to the rectangle, originating from the top of the rectangle\n\n Parameters\n ----------\n img : ndarray\n the image on which the T label is to be written/drawn, preferably the imag... |
def draw_flag_with_label(img, label, bbox, write_label=True, line_color=(255, 255, 255), text_bg_color=(255, 255, 255), text_color=(0, 0, 0)):
"draws a pole from the middle of the object that is to be labeled and adds the label to the flag\n\n Parameters\n ----------\n img : ndarray\n the image on... |
def draw_multiple_rectangles(img, bboxes, bbox_color=(255, 255, 255), thickness=2, is_opaque=False, alpha=0.5):
'draws multiple rectangles\n\n img : ndarray\n the actual image\n bboxes : list\n a list of lists, each inner list containing x_min, y_min, x_max and y_max of the rectangle positions... |
def add_multiple_labels(img, labels, bboxes, draw_bg=True, text_bg_color=(255, 255, 255), text_color=(0, 0, 0), top=True):
'add labels, inside or outside the rectangles\n\n Parameters\n ----------\n img : ndarray\n the image on which the labels are to be written, preferably the image with the rect... |
def add_multiple_T_labels(img, labels, bboxes, draw_bg=True, text_bg_color=(255, 255, 255), text_color=(0, 0, 0)):
'adds T labels to the rectangles, each originating from the top of the rectangle\n\n Parameters\n ----------\n img : ndarray\n the image on which the T labels are to be written/drawn,... |
def draw_multiple_flags_with_labels(img, labels, bboxes, write_label=True, line_color=(255, 255, 255), text_bg_color=(255, 255, 255), text_color=(0, 0, 0)):
"draws poles from the middle of the objects that are to be labeled and adds the labels to the flags\n\n Parameters\n ----------\n img : ndarray\n ... |
def d(**kwargs):
'Helper of creating a config dict.'
return ml_collections.ConfigDict(initial_dictionary=kwargs)
|
def get_config():
config = ml_collections.ConfigDict()
config.seed = 1234
config.pred = 'noise_pred'
config.train = d(n_steps=500000, batch_size=128, mode='uncond', log_interval=10, eval_interval=5000, save_interval=50000)
config.optimizer = d(name='adamw', lr=0.0002, weight_decay=0.03, betas=(0.9... |
def interpolate_fn(x: torch.Tensor, xp: torch.Tensor, yp: torch.Tensor) -> torch.Tensor:
'Performs piecewise linear interpolation for x, using xp and yp keypoints (knots).\n Performs separate interpolation for each channel.\n Args:\n x: [N, C] points to be calibrated (interpolated). Batch with C chan... |
class NoiseScheduleVP():
def __init__(self, schedule='discrete', beta_0=0.0001, beta_1=0.02, total_N=1000, betas=None, alphas_cumprod=None):
"Create a wrapper class for the forward SDE (VP type).\n\n The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, si... |
def model_wrapper(model, noise_schedule=None, is_cond_classifier=False, classifier_fn=None, classifier_scale=1.0, time_input_type='1', total_N=1000, model_kwargs={}, is_deis=False):
'Create a wrapper function for the noise prediction model.\n\n DPM-Solver needs to solve the continuous-time diffusion ODEs. For ... |
class DPM_Solver():
def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.0):
'Construct a DPM-Solver. \n\n Args:\n model_fn: A noise prediction model function which accepts the continuous-time input\n (t in [epsilon, T]):\n ... |
class NoiseScheduleVP():
def __init__(self, schedule='linear'):
"Create a wrapper class for the forward SDE (VP type).\n\n The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).\n We further define lambda_t = log(alpha_t) - log(sigma... |
def model_wrapper(model, noise_schedule=None, is_cond_classifier=False, classifier_fn=None, classifier_scale=1.0, time_input_type='1', total_N=1000, model_kwargs={}):
'Create a wrapper function for the noise prediction model.\n\n DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on... |
class DPM_Solver():
def __init__(self, model_fn, noise_schedule):
'Construct a DPM-Solver.\n\n Args:\n model_fn: A noise prediction model function which accepts the continuous-time input\n (t in [epsilon, T]):\n ``\n def model_fn(x, t_continu... |
def evaluate(config):
if config.get('benchmark', False):
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
mp.set_start_method('spawn')
accelerator = accelerate.Accelerator()
device = accelerator.device
accelerate.utils.set_seed(config.seed, device_sp... |
def main(argv):
config = FLAGS.config
config.nnet_path = FLAGS.nnet_path
config.output_path = FLAGS.output_path
evaluate(config)
|
class AbstractEncoder(nn.Module):
def __init__(self):
super().__init__()
def encode(self, *args, **kwargs):
raise NotImplementedError
|
class FrozenCLIPEmbedder(AbstractEncoder):
'Uses the CLIP transformer encoder for text (from Hugging Face)'
def __init__(self, version='openai/clip-vit-large-patch14', device='cuda', max_length=77):
super().__init__()
self.tokenizer = CLIPTokenizer.from_pretrained(version)
self.transf... |
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return ((1.0 + math.erf((x / math.sqrt(2.0)))) / 2.0)
if ((mean < (a - (2 * std))) or (mean > (b + (2 * std)))):
warnings.warn('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be... |
def trunc_normal_(tensor, mean=0.0, std=1.0, a=(- 2.0), b=2.0):
'Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn ... |
def drop_path(x, drop_prob: float=0.0, training: bool=False):
"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n\n This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,\n the original name is misleading as 'Drop Connect' is a dif... |
class DropPath(nn.Module):
'Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n '
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, s... |
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = (out_features or in_features)
hidden_features = (hidden_features or in_features)
self.fc1 = nn.Linear(in_features, hidden_... |
def get_sde(name, **kwargs):
if (name == 'vpsde'):
return VPSDE(**kwargs)
elif (name == 'vpsde_cosine'):
return VPSDECosine(**kwargs)
else:
raise NotImplementedError
|
def stp(s, ts: torch.Tensor):
if isinstance(s, np.ndarray):
s = torch.from_numpy(s).type_as(ts)
extra_dims = ((1,) * (ts.dim() - 1))
return (s.view((- 1), *extra_dims) * ts)
|
def mos(a, start_dim=1):
return a.pow(2).flatten(start_dim=start_dim).mean(dim=(- 1))
|
def duplicate(tensor, *size):
return tensor.unsqueeze(dim=0).expand(*size, *tensor.shape)
|
class SDE(object):
'\n dx = f(x, t)dt + g(t) dw with 0 <= t <= 1\n f(x, t) is the drift\n g(t) is the diffusion\n '
def drift(self, x, t):
raise NotImplementedError
def diffusion(self, t):
raise NotImplementedError
def cum_beta(self, t):
raise NotImpl... |
class VPSDE(SDE):
def __init__(self, beta_min=0.1, beta_max=20):
self.beta_0 = beta_min
self.beta_1 = beta_max
def drift(self, x, t):
return ((- 0.5) * stp(self.squared_diffusion(t), x))
def diffusion(self, t):
return (self.squared_diffusion(t) ** 0.5)
def squared_d... |
class VPSDECosine(SDE):
'\n dx = f(x, t)dt + g(t) dw with 0 <= t <= 1\n f(x, t) is the drift\n g(t) is the diffusion\n '
def __init__(self, s=0.008):
self.s = s
self.F = (lambda t: (torch.cos(((((t + s) / (1 + s)) * math.pi) / 2)) ** 2))
self.F0 = (math.cos((((... |
class ScoreModel(object):
'\n The forward process is q(x_[0,T])\n '
def __init__(self, nnet: nn.Module, pred: str, sde: SDE, T=1):
assert (T == 1)
self.nnet = nnet
self.pred = pred
self.sde = sde
self.T = T
print(f'ScoreModel with pred={pred}, sde={sd... |
class ReverseSDE(object):
'\n dx = [f(x, t) - g(t)^2 s(x, t)] dt + g(t) dw\n '
def __init__(self, score_model):
self.sde = score_model.sde
self.score_model = score_model
def drift(self, x, t, **kwargs):
drift = self.sde.drift(x, t)
diffusion = self.sde.diffusion... |
class ODE(object):
'\n dx = [f(x, t) - g(t)^2 s(x, t)] dt\n '
def __init__(self, score_model):
self.sde = score_model.sde
self.score_model = score_model
def drift(self, x, t, **kwargs):
drift = self.sde.drift(x, t)
diffusion = self.sde.diffusion(t)
score... |
def dct2str(dct):
return str({k: f'{v:.6g}' for (k, v) in dct.items()})
|
@torch.no_grad()
def euler_maruyama(rsde, x_init, sample_steps, eps=0.001, T=1, trace=None, verbose=False, **kwargs):
'\n The Euler Maruyama sampler for reverse SDE / ODE\n See `Score-Based Generative Modeling through Stochastic Differential Equations`\n '
assert (isinstance(rsde, ReverseSDE) or isin... |
def LSimple(score_model: ScoreModel, x0, pred='noise_pred', **kwargs):
(t, noise, xt) = score_model.sde.sample(x0)
if (pred == 'noise_pred'):
noise_pred = score_model.noise_pred(xt, t, **kwargs)
return mos((noise - noise_pred))
elif (pred == 'x0_pred'):
x0_pred = score_model.x0_pre... |
class ImagePathDataset(torch.utils.data.Dataset):
def __init__(self, files, transforms=None):
self.files = files
self.transforms = transforms
def __len__(self):
return len(self.files)
def __getitem__(self, i):
path = self.files[i]
img = Image.open(path).convert('... |
def get_activations(files, model, batch_size=50, dims=2048, device='cpu', num_workers=8):
'Calculates the activations of the pool_3 layer for all images.\n\n Params:\n -- files : List of image files paths\n -- model : Instance of inception model\n -- batch_size : Batch size of images for ... |
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-06):
"Numpy implementation of the Frechet Distance.\n The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)\n and X_2 ~ N(mu_2, C_2) is\n d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).\n\n Stable v... |
def calculate_activation_statistics(files, model, batch_size=50, dims=2048, device='cpu', num_workers=8):
'Calculation of the statistics used by the FID.\n Params:\n -- files : List of image files paths\n -- model : Instance of inception model\n -- batch_size : The images numpy array is s... |
def compute_statistics_of_path(path, model, batch_size, dims, device, num_workers=8):
if path.endswith('.npz'):
with np.load(path) as f:
(m, s) = (f['mu'][:], f['sigma'][:])
else:
path = pathlib.Path(path)
files = sorted([file for ext in IMAGE_EXTENSIONS for file in path.gl... |
def save_statistics_of_path(path, out_path, device=None, batch_size=50, dims=2048, num_workers=8):
if (device is None):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
else:
device = torch.device(device)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = I... |
def calculate_fid_given_paths(paths, device=None, batch_size=50, dims=2048, num_workers=8):
'Calculates the FID of two paths'
if (device is None):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
else:
device = torch.device(device)
for p in paths:
if (not... |
class InceptionV3(nn.Module):
'Pretrained InceptionV3 network returning feature maps'
DEFAULT_BLOCK_INDEX = 3
BLOCK_INDEX_BY_DIM = {64: 0, 192: 1, 768: 2, 2048: 3}
def __init__(self, output_blocks=(DEFAULT_BLOCK_INDEX,), resize_input=True, normalize_input=True, requires_grad=False, use_fid_inception=... |
def _inception_v3(*args, **kwargs):
'Wraps `torchvision.models.inception_v3`\n\n Skips default weight inititialization if supported by torchvision version.\n See https://github.com/mseitzer/pytorch-fid/issues/28.\n '
try:
version = tuple(map(int, torchvision.__version__.split('.')[:2]))
e... |
def fid_inception_v3():
"Build pretrained Inception model for FID computation\n\n The Inception model for FID computation uses a different set of weights\n and has a slightly different structure than torchvision's Inception.\n\n This method first constructs torchvision's Inception and then patches the\n ... |
class FIDInceptionA(torchvision.models.inception.InceptionA):
'InceptionA block patched for FID computation'
def __init__(self, in_channels, pool_features):
super(FIDInceptionA, self).__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5... |
class FIDInceptionC(torchvision.models.inception.InceptionC):
'InceptionC block patched for FID computation'
def __init__(self, in_channels, channels_7x7):
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7... |
class FIDInceptionE_1(torchvision.models.inception.InceptionE):
'First InceptionE block patched for FID computation'
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_... |
class FIDInceptionE_2(torchvision.models.inception.InceptionE):
'Second InceptionE block patched for FID computation'
def __init__(self, in_channels):
super(FIDInceptionE_2, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3... |
def train(config):
if config.get('benchmark', False):
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
mp.set_start_method('spawn')
accelerator = accelerate.Accelerator()
device = accelerator.device
accelerate.utils.set_seed(config.seed, device_speci... |
def get_config_name():
argv = sys.argv
for i in range(1, len(argv)):
if argv[i].startswith('--config='):
return Path(argv[i].split('=')[(- 1)]).stem
|
def get_hparams():
argv = sys.argv
lst = []
for i in range(1, len(argv)):
assert ('=' in argv[i])
if (argv[i].startswith('--config.') and (not argv[i].startswith('--config.dataset.path'))):
(hparam, val) = argv[i].split('=')
hparam = hparam.split('.')[(- 1)]
... |
def main(argv):
config = FLAGS.config
config.config_name = get_config_name()
config.hparams = get_hparams()
config.workdir = (FLAGS.workdir or os.path.join('workdir', config.config_name, config.hparams))
config.ckpt_root = os.path.join(config.workdir, 'ckpts')
config.sample_dir = os.path.join(... |
def set_logger(log_level='info', fname=None):
import logging as _logging
handler = logging.get_absl_handler()
formatter = _logging.Formatter('%(asctime)s - %(filename)s - %(message)s')
handler.setFormatter(formatter)
logging.set_verbosity(log_level)
if (fname is not None):
handler = _l... |
def dct2str(dct):
return str({k: f'{v:.6g}' for (k, v) in dct.items()})
|
def get_nnet(name, **kwargs):
if (name == 'uvit'):
from libs.uvit import UViT
return UViT(**kwargs)
elif (name == 'uvit_t2i'):
from libs.uvit_t2i import UViT
return UViT(**kwargs)
else:
raise NotImplementedError(name)
|
def set_seed(seed: int):
if (seed is not None):
torch.manual_seed(seed)
np.random.seed(seed)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.