code stringlengths 101 5.91M |
|---|
class GINEConv(nn.Module):
def __init__(self, nin, nout, bias=True):
super().__init__()
self.nn = MLP(nin, nout, 2, False, bias=bias)
self.layer = gnn.GINEConv(self.nn, train_eps=True)
def reset_parameters(self):
self.layer.reset_parameters()
def forward(self, x, edge_index, edge_attr):
return self.layer(x, edge_index, edge_attr) |
def extract_feature(inception_model, images):
features = inception_model(images, output_logits=False)
features = features.detach().cpu().numpy()
assert ((features.ndim == 2) and (features.shape[1] == 2048))
return features |
def generate_codes_list(hwdb1x_codes_list: list, hwdb2x_train_codes_list: list, hwdb2x_test_codes_list: list):
codes_list = hwdb1x_codes_list
for code in hwdb2x_train_codes_list:
if (code not in codes_list):
codes_list.append(code)
for code in hwdb2x_test_codes_list:
if (code not in codes_list):
codes_list.append(code)
return codes_list |
def show_sample(sample):
print(('==' * 20))
print('idx:', sample['idx'])
for key in ['type', 'level']:
if (key in sample):
print('{}: {}'.format(key, sample[key]))
print('question:', sample['question'])
if ('code' in sample):
for code in sample['code']:
print(('-' * 20))
print('code:', code)
print('execution', sample['report'])
for key in ['pred', 'gt', 'score', 'unit', 'gt_cot']:
if (key in sample):
print('{}: {}'.format(key, sample[key]))
print() |
def add_parser_arguments(parser):
parser.add_argument('--last-epoch', type=int, default=(- 1), metavar='', help='lr scheduler - the index of last epoch required by [all]')
parser.add_argument('--step-size', type=int, default=(- 1), metavar='', help='lr scheduler - period (epoch) of learning rate decay required by [steplr]')
parser.add_argument('--milestones', type=cmd.str2intlist, default=[], metavar='', help='lr scheduler - increased list of epoch indices required by [multisteplr]')
parser.add_argument('--gamma', type=float, default=(- 1), metavar='', help='lr scheduler - multiplicative factor of learning rate decay required by [steplr, multisteplr, exponentiallr]')
parser.add_argument('--T-max', type=int, default=(- 1), metavar='', help='lr scheduler - maximum number of epochs required by [cosineannealinglr]')
parser.add_argument('--eta-min', type=float, default=(- 1), metavar='', help='lr scheduler - minimum learning rate required by [cosineannealinglr]')
parser.add_argument('--power', type=float, default=(- 1), metavar='', help='lr scheduler - power factor of learning rate decay required by [polynomiallr]') |
class DataParallelCriterion(DataParallel):
def forward(self, inputs, *targets, **kwargs):
if (not self.device_ids):
return self.module(inputs, *targets, **kwargs)
(targets, kwargs) = self.scatter(targets, kwargs, self.device_ids)
if (len(self.device_ids) == 1):
return self.module(inputs, *targets[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = _criterion_parallel_apply(replicas, inputs, targets, kwargs)
return (Reduce.apply(*outputs) / len(outputs)) |
class ResNet(nn.Module):
def __init__(self, block, num_blocks, cfg, num_classes=10):
super(ResNet, self).__init__()
n = 2
self.in_planes = 64
self.cfg = cfg
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], cfg=cfg[0:n], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], cfg=cfg[n:(2 * n)], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], cfg=cfg[(2 * n):(3 * n)], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], cfg=cfg[(3 * n):(4 * n)], stride=2)
self.linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, cfg, stride=1):
downsample = None
if ((stride != 1) or (self.in_planes != (planes * block.expansion))):
downsample = partial(downsample_basic_block, planes=(planes * block.expansion))
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
layers.append(block(self.in_planes, planes, cfg[0], stride, downsample))
self.in_planes = (planes * block.expansion)
for i in range(1, num_blocks):
layers.append(block(self.in_planes, planes, cfg[i]))
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def log_box_proposal_results(results):
for dataset in results.keys():
keys = results[dataset]['box_proposal'].keys()
pad = max([len(k) for k in keys])
logger.info(dataset)
for (k, v) in results[dataset]['box_proposal'].items():
logger.info('{}: {:.3f}'.format(k.ljust(pad), v)) |
def torch_gc():
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
elif torch.backends.mps.is_available():
try:
from torch.mps import empty_cache
empty_cache()
except Exception as e:
print(e)
print(' macOS pytorch 2.0.0 , torch ') |
class SynthPairTnf_pck(object):
def __init__(self, use_cuda=True, geometric_model='affine', crop_factor=(9 / 16), output_size=(240, 240), padding_factor=0.5):
assert isinstance(use_cuda, bool)
assert isinstance(crop_factor, float)
assert isinstance(output_size, tuple)
assert isinstance(padding_factor, float)
self.use_cuda = use_cuda
self.crop_factor = crop_factor
self.padding_factor = padding_factor
(self.out_h, self.out_w) = output_size
self.rescalingTnf = GeometricTnf('affine', self.out_h, self.out_w, use_cuda=self.use_cuda)
self.geometricTnf = GeometricTnf(geometric_model, self.out_h, self.out_w, use_cuda=self.use_cuda)
def __call__(self, batch):
(src_image_batch, trg_image_batch, theta_batch) = (batch['src_image'], batch['trg_image'], batch['theta'])
src_image_batch = self.symmetricImagePad(src_image_batch, self.padding_factor)
trg_image_batch = self.symmetricImagePad(trg_image_batch, self.padding_factor)
src_image_batch = Variable(src_image_batch, requires_grad=False)
trg_image_batch = Variable(trg_image_batch, requires_grad=False)
theta_batch = Variable(theta_batch, requires_grad=False)
cropped_image_batch = self.rescalingTnf(src_image_batch, None, self.padding_factor, self.crop_factor)
warped_image_batch = self.geometricTnf(trg_image_batch, theta_batch, self.padding_factor, self.crop_factor)
return {'source_image': cropped_image_batch, 'target_image': warped_image_batch}
def symmetricImagePad(self, image_batch, padding_factor):
(b, c, h, w) = image_batch.size()
(pad_h, pad_w) = (int((h * padding_factor)), int((w * padding_factor)))
idx_pad_left = torch.LongTensor(range((pad_w - 1), (- 1), (- 1)))
idx_pad_right = torch.LongTensor(range((w - 1), ((w - pad_w) - 1), (- 1)))
idx_pad_top = torch.LongTensor(range((pad_h - 1), (- 1), (- 1)))
idx_pad_bottom = torch.LongTensor(range((h - 1), ((h - pad_h) - 1), (- 1)))
if self.use_cuda:
idx_pad_left = idx_pad_left.cuda()
idx_pad_right = idx_pad_right.cuda()
idx_pad_top = idx_pad_top.cuda()
idx_pad_bottom = idx_pad_bottom.cuda()
image_batch = torch.cat((image_batch.index_select(3, idx_pad_left), image_batch, image_batch.index_select(3, idx_pad_right)), 3)
image_batch = torch.cat((image_batch.index_select(2, idx_pad_top), image_batch, image_batch.index_select(2, idx_pad_bottom)), 2)
return image_batch |
class Caltech256(data.Dataset):
base_folder = '256_ObjectCategories'
url = '
filename = '256_ObjectCategories.tar'
tgz_md5 = '67b4f42ca05d46448c6bb8ecd2220f6d'
def __init__(self, root, train=True, transform=None, target_transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
if download:
self.download()
if (not self._check_integrity()):
raise RuntimeError(('Dataset not found or corrupted.' + ' You can use download=True to download it'))
self.data = []
self.labels = []
for cat in range(0, 257):
print(cat)
cat_dirs = glob.glob(os.path.join(self.root, self.base_folder, ('%03d*' % cat)))
for fdir in cat_dirs:
for fimg in glob.glob(os.path.join(fdir, '*.jpg')):
img = Image.open(fimg).convert('RGB')
self.data.append(img)
self.labels.append(cat)
def __getitem__(self, index):
(img, target) = (self.data[index], self.labels[index])
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (img, target)
def __len__(self):
return len(self.data)
def _check_integrity(self):
fpath = os.path.join(self.root, self.filename)
if (not check_integrity(fpath, self.tgz_md5)):
return False
return True
def download(self):
import tarfile
root = self.root
download_url(self.url, root, self.filename, self.tgz_md5)
cwd = os.getcwd()
tar = tarfile.open(os.path.join(root, self.filename), 'r')
os.chdir(root)
tar.extractall()
tar.close()
os.chdir(cwd)
def __repr__(self):
fmt_str = (('Dataset ' + self.__class__.__name__) + '\n')
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
return fmt_str |
def base_kernels(dimensions=1, base_kernel_names='SE'):
for kernel in base_kernels_without_dimension(base_kernel_names):
if kernel.is_thunk:
(yield kernel)
else:
for dimension in range(dimensions):
k = kernel.copy()
k.dimension = dimension
(yield k) |
class ResNet_LandScape(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(ResNet_LandScape, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x |
def create_target_connection(target: ConfigTarget):
if (target.os == 'linux'):
conn = get_ssh_connection(target)
conn.connect()
else:
conn = get_smb_connection(target)
conn.connect()
return conn |
_BOX_FEATURE_EXTRACTORS.register('FPN2MLPFeatureExtractor')
class FPN2MLPFeatureExtractor(nn.Module):
def __init__(self, cfg, in_channels):
super(FPN2MLPFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(output_size=(resolution, resolution), scales=scales, sampling_ratio=sampling_ratio)
input_size = (in_channels * (resolution ** 2))
representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
use_gn = cfg.MODEL.ROI_BOX_HEAD.USE_GN
self.pooler = pooler
self.fc6 = make_fc(input_size, representation_size, use_gn)
self.fc7 = make_fc(representation_size, representation_size, use_gn)
self.out_channels = representation_size
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = x.view(x.size(0), (- 1))
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
return x |
def GetModelParser():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-L', '--lr', '--learning_rate', help='Learning rate to be used in algorithm.', type=float, default=0.001)
parser.add_argument('--model_directory', help='models directory', default='~/.costar/models')
parser.add_argument('--reqs_directory', help='directory for reading in required submodels', type=str, default=None)
parser.add_argument('-i', '--iter', help='Number of iterations to run', default=100, type=int)
parser.add_argument('-b', '--batch_size', help='Batch size to use in the model', default=64, type=int)
parser.add_argument('-e', '--epochs', help='Number of epochs', type=int, default=500)
parser.add_argument('--initial_epoch', help='Where to start counting epochs', type=int, default=0)
parser.add_argument('--data_file', '--file', help='File name for data archive.', default='data.npz')
parser.add_argument('--model_descriptor', help='model description for use with save/load file', default='model')
parser.add_argument('-m', '--model', help='Name of NN model to learn.', default=None, choices=GetModels())
parser.add_argument('--optimizer', '--opt', help='optimizer to use with learning', default='adam')
(parser.add_argument('--clip_weights', help='clip the weights to [-value to +value] (0 is no clipping)', type=float, default=0.01),)
parser.add_argument('-z', '--zdim', '--noise_dim', help='size of action parameterization', type=int, default=1)
parser.add_argument('-D', '--debug_model', '--dm', '--debug', help='Run a short script to debug the current model.', action='store_true')
parser.add_argument('--clipnorm', help=('Clip norm of gradients to this value to ' + 'prevent exploding gradients.'), default=100)
parser.add_argument('--load_model', '--lm', help='Load model from file for tests.', action='store_true')
parser.add_argument('--show_iter', '--si', help=('Show output images from model training' + ' every N iterations.'), default=0, type=int)
parser.add_argument('--pretrain_iter', '--pi', help=(('Number of iterations of pretraining to run' + ', in particular for training GAN') + ' discriminators.'), default=0, type=int)
parser.add_argument('--load_pretrained_weights', '--lpw', help='Load pretrained weights when training more complex models. Will usually fail gracefully if weights cannot be found. (GAN OPTION)', action='store_true')
parser.add_argument('--cpu', help=('Run in CPU-only mode, even if GPUs are' + ' available.'), action='store_true')
parser.add_argument('--seed', help='Seed used for running experiments.', type=int)
parser.add_argument('--profile', help='Run cProfile on agent', action='store_true')
parser.add_argument('--features', help='Specify feature function', default='multi', choices=GetAvailableFeatures())
parser.add_argument('--steps_per_epoch', help=('Steps per epoch (used with the generator-' + 'based version of the fit tool'), default=300, type=int)
parser.add_argument('--upsampling', help='set upsampling definition', choices=UpsamplingOptions(), default='conv_transpose')
parser.add_argument('--hypothesis_dropout', help='dropout in hypothesis decoder', default=True, type=bool)
parser.add_argument('--dropout_rate', '--dr', help='Dropout rate to use', type=float, default=0.1)
parser.add_argument('--enc_loss', help='Add encoder loss', action='store_true')
parser.add_argument('--use_noise', help='use random noise to sample distributions', action='store_true', default=False)
parser.add_argument('--skip_connections', '--sc', help='use skip connections to generate better outputs', type=int, default=1)
parser.add_argument('--use_ssm', '--ssm', help='use spatial softmax to compute global information', type=int, default=1)
parser.add_argument('--decoder_dropout_rate', '--ddr', help='specify a separate dropout for the model decoder', default=None)
parser.add_argument('--success_only', help='only train on positive examples', action='store_true')
parser.add_argument('--loss', help='Loss for state variables: MSE, MAE, or log(cosh).', choices=['mse', 'mae', 'logcosh'], default='mae')
parser.add_argument('--gan_method', help='Whether to train with GAN or no GAN', dest='gan_method', choices=['gan', 'mae', 'desc'], default='gan')
parser.add_argument('--no_save_model', help='Should we save to the model file', default=True, dest='save_model', action='store_false')
parser.add_argument('--retrain', help='Retrain sub-models', action='store_true')
parser.add_argument('--submodel', help='Specific part of the planing model to train', choices=GetSubmodelOptions(), default='all')
parser.add_argument('--use_batchnorm', help='Use batchnorm (defaults to false; many modelsdo not use this parameter.', type=int, default=1)
parser.add_argument('--option_num', help='Choose an option to learn for the multi-policy hierarchical model', type=int, default=None)
parser.add_argument('--gpu_fraction', help='portion of the gpu to allocate for this job', type=float, default=1.0)
parser.add_argument('--preload', help='preload all files into RAM', default=False, action='store_true')
parser.add_argument('--wasserstein', help='Use weisserstein gan loss. Sets clip_weights to 0.01', default=False, dest='use_wasserstein', action='store_true')
parser.add_argument('--validate', help='Validation mode.', action='store_true')
parser.add_argument('--no_disc', help='Disable discriminator usage with images', action='store_true')
parser.add_argument('--unique_id', help='Unique id to differentiate status file', default='')
parser.add_argument('--dense_transform', help='Use dense layer for trasform', default=False, action='store_true')
parser.add_argument('--max_img_size', help='Set max size for frames to be resized into', default=224)
return parser |
class NuSVR(SvmModel, RegressorMixin):
_impl = 'nu_svr'
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0, nu=0.5, C=1.0, tol=0.001, probability=False, shrinking=False, cache_size=None, verbose=False, max_iter=(- 1), n_jobs=(- 1), max_mem_size=(- 1), gpu_id=0):
super(NuSVR, self).__init__(kernel=kernel, degree=degree, gamma=gamma, coef0=coef0, nu=nu, C=C, epsilon=0.0, tol=tol, probability=probability, class_weight=None, shrinking=shrinking, cache_size=cache_size, verbose=verbose, max_iter=max_iter, n_jobs=n_jobs, max_mem_size=max_mem_size, random_state=None, gpu_id=gpu_id) |
class BasicTextNormalizer():
def __init__(self, remove_diacritics: bool=False, split_letters: bool=False):
self.clean = (remove_symbols_and_diacritics if remove_diacritics else remove_symbols)
self.split_letters = split_letters
def __call__(self, s: str):
s = s.lower()
s = re.sub('[<\\[][^>\\]]*[>\\]]', '', s)
s = re.sub('\\(([^)]+?)\\)', '', s)
s = self.clean(s).lower()
if self.split_letters:
s = ' '.join(regex.findall('\\X', s, regex.U))
s = re.sub('\\s+', ' ', s)
return s |
class CategoricalParams(DistributionParams[Categorical]):
def __init__(self, n_categories, batch_shape: Size=torch.Size()):
super().__init__(batch_shape=torch.Size(batch_shape))
self.logits = nn.Parameter(torch.randn(*batch_shape, n_categories))
def get_distribution(self) -> Categorical:
return Categorical(logits=self.logits)
def from_distribution(dist: Categorical):
new = CategoricalParams.__new__(CategoricalParams)
super(CategoricalParams, new).__init__(batch_shape=dist.batch_shape)
new.logits = nn.Parameter(dist.logits)
return new
def extra_repr(self):
s = f'{self.logits.shape[(- 1)]}'
batch_shape = self.logits.shape[:(- 1)]
if batch_shape:
s += f', batch_shape={batch_shape}'
return s |
class CallerMutation(ExternalCallHandler):
def handle(self) -> None:
self.mutate_caller(should_propagate=True) |
class FlaxDiffusionPipeline(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['flax'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['flax']) |
def gumbel_softmax(logits, tau=1, hard=False, eps=1e-10):
y_soft = gumbel_softmax_sample(logits, tau=tau, eps=eps)
if hard:
shape = logits.size()
(_, k) = y_soft.data.max((- 1))
y_hard = torch.zeros(*shape)
if y_soft.is_cuda:
y_hard = y_hard.cuda()
y_hard = y_hard.zero_().scatter_((- 1), k.view((shape[:(- 1)] + (1,))), 1.0)
y = (Variable((y_hard - y_soft.data)) + y_soft)
else:
y = y_soft
return y |
def linkcode_resolve(domain, info):
if (domain != 'py'):
return None
if (not info['module']):
return None
filename = info['module'].replace('.', '/')
res = '{}/{}.py'.format(repo_url, filename)
return res |
def save(subdir, b, p, dp, faces, gt_mesh, image, duration=5, fps=50):
from scipy.misc import imsave
from util3d.mesh.obj_io import write_obj
imsave(os.path.join(subdir, 'image.png'), image)
(v, f) = (np.array(gt_mesh[k]) for k in ('vertices', 'faces'))
write_obj(os.path.join(subdir, 'gt_mesh.obj'), v, f)
save_anim(subdir, b, p, dp, faces, duration, fps) |
class Model(nn.Module):
def __init__(self, vsize, ncls):
super().__init__()
self.emb = nn.Embedding(vsize, 100)
self.rnn = nn.LSTM(100, 100, 1)
self.proj = nn.Linear(100, ncls)
def forward(self, input_):
emb_out = self.emb(input_)
(_, (h, c)) = self.rnn(emb_out)
output = self.proj(h[(- 1)])
return output |
def _name_cleaner(agent_name):
rename_dict = {'correct_ts': 'Correct TS', 'kl_ucb': 'KL UCB', 'misspecified_ts': 'Misspecified TS', 'ucb1': 'UCB1', 'ucb-best': 'UCB-best', 'nonstationary_ts': 'Nonstationary TS', 'stationary_ts': 'Stationary TS', 'greedy': 'greedy', 'ts': 'TS', 'action_0': 'Action 0', 'action_1': 'Action 1', 'action_2': 'Action 2', 'bootstrap': 'bootstrap TS', 'laplace': 'Laplace TS', 'thoughtful': 'Thoughtful TS', 'gibbs': 'Gibbs TS'}
if (agent_name in rename_dict):
return rename_dict[agent_name]
else:
return agent_name |
(a='double', spline='Spline', returns='double')
def H(a=(- 1)):
if (not enable_Hubble):
return 0
if (a == (- 1)):
a = universals.a
spline = temporal_splines.a_H
if (spline is None):
abort('The function H(a) has not been tabulated. Have you called init_time?')
return (a(a) * spline.eval_deriv(a)) |
def build_fake_yaml():
fake_yaml = "\n model:\n name: imagenet_prune\n framework: pytorch\n\n pruning:\n approach:\n weight_compression:\n initial_sparsity: 0.0\n target_sparsity: 0.97\n start_epoch: 0\n end_epoch: 3\n pruners:\n - !Pruner\n start_epoch: 1\n end_epoch: 3\n prune_type: basic_magnitude\n names: ['layer1.0.conv1.weight']\n\n - !Pruner\n target_sparsity: 0.6\n prune_type: basic_magnitude\n update_frequency: 2\n names: ['layer1.0.conv2.weight']\n evaluation:\n accuracy:\n metric:\n topk: 1\n "
with open('fake.yaml', 'w', encoding='utf-8') as f:
f.write(fake_yaml) |
def _is_valid_explainer(proposed_explainer, expected_explainer_type):
try:
explainer_type = proposed_explainer.explainer_type
available_explanations = proposed_explainer.available_explanations
if (explainer_type != expected_explainer_type):
_log.warning('Proposed explainer is not a {}.'.format(expected_explainer_type))
return False
for available_explanation in available_explanations:
has_explain_method = hasattr(proposed_explainer, ('explain_' + available_explanation))
if (not has_explain_method):
_log.warning('Proposed explainer has available explanation {} but has no respective method.'.format(available_explanation))
return False
return True
except Exception as e:
_log.warning('Validate function threw exception {}'.format(e))
return False |
class MBartTokenizer(metaclass=DummyObject):
_backends = ['sentencepiece']
def __init__(self, *args, **kwargs):
requires_backends(self, ['sentencepiece']) |
def weights_init_xavier(m):
classname = m.__class__.__name__
if (classname.find('Conv2d') != (- 1)):
init.xavier_normal(m.weight.data) |
def preload_training_data(cur_fraction, start_pos, end_pos):
input_spec = load_col_data(df_train, list(range(num_samples)), start_pos, end_pos, 'input_spec_path')
np.save(os.path.join(dataset_path, 'PreLoad Training Dataset', ('fraction_' + str(cur_fraction)), 'input_spec'), input_spec)
output_spec = load_col_data(df_train, list(range(num_samples)), start_pos, end_pos, 'output_spec_path')
np.save(os.path.join(dataset_path, 'PreLoad Training Dataset', ('fraction_' + str(cur_fraction)), 'output_spec'), output_spec)
dvec = load_col_data(df_train, list(range(num_samples)), start_pos, end_pos, 'dvector_path')
np.save(os.path.join(dataset_path, 'PreLoad Training Dataset', ('fraction_' + str(cur_fraction)), 'dvec'), dvec) |
class Step9_GenerateCleanDataset():
def __init__(self, savePath: str, infoFile: str, audioPersistenz: AudioPersistenz, transcriptsPersistenz: TranscriptsPersistenz, audioSamplingRateTransformer: AudioSamplingRateTransformer, transcriptsSelectionTransformer: TranscriptsSelectionTransformer, filter):
self.audioSamplingRateTransformer = audioSamplingRateTransformer
self.audioPersistenz = audioPersistenz
self.transcriptsPersistenz = transcriptsPersistenz
self.transcriptsSelectionTransformer = transcriptsSelectionTransformer
self.savePath = savePath
self.infoFile = infoFile
self.filter = filter
def run(self):
doneMarker = DoneMarker(self.savePath)
result = doneMarker.run(self.script, deleteFolder=False)
return result
def script(self):
df = pd.read_csv(self.infoFile, sep='|', index_col=0)
try:
df = df.set_index('id')
except:
pass
print('Audios bevore: ', df.shape[0])
filteredAudios = self.filter(df)
print('Audios after: ', filteredAudios.shape[0])
audiosAllowed = filteredAudios.index.tolist()
self.copyAudioFiles(audiosAllowed)
self.copyAndFilterTranscripts(audiosAllowed)
def copyAudioFiles(self, audiosAllowed):
countFiles = len(self.audioPersistenz.getIds())
for audio in tqdm(self.audioPersistenz.loadAll(), total=countFiles):
if (audio.name in audiosAllowed):
self.audioPersistenz.save(audio)
def copyAndFilterTranscripts(self, usedAudioFileNames):
for transcripts in tqdm(self.transcriptsPersistenz.loadAll()):
filteredTranscript = self.transcriptsSelectionTransformer.transform(transcripts, usedAudioFileNames)
self.transcriptsPersistenz.save(filteredTranscript) |
def val_data():
(datasets, info) = tfds.load(name='beans', with_info=True, as_supervised=True, split=['train'])
valdataset = [scale(v, l) for (v, l) in datasets[(- 1)]]
return valdataset |
def parse_with_config(parser, cmds=None):
if (cmds is None):
args = parser.parse_args()
else:
args = parser.parse_args(cmds)
if (args.config is not None):
config_args = json.load(open(args.config))
override_keys = {arg[2:].split('=')[0] for arg in sys.argv[1:] if arg.startswith('--')}
for (k, v) in config_args.items():
if (k not in override_keys):
setattr(args, k, v)
return args |
def get_f1(file, task, iters):
f = open(file)
for line in f:
line = line.strip().replace(',', '').split()
if (int(line[1]) == iters):
if (line[3] == task):
acc = float(line[9])
break
f.close()
return acc |
def _box_cxcywh_to_xyxy(boxes: Tensor) -> Tensor:
(cx, cy, w, h) = boxes.unbind((- 1))
x1 = (cx - (0.5 * w))
y1 = (cy - (0.5 * h))
x2 = (cx + (0.5 * w))
y2 = (cy + (0.5 * h))
boxes = torch.stack((x1, y1, x2, y2), dim=(- 1))
return boxes |
def main():
parser = ArgumentParser(description="This script computes the ASR-BLEU metric between model's generated audio and the text reference sequences.")
parser.add_argument('--lang', help='The target language used to initialize ASR model, see asr_model_cfgs.json for available languages', type=str)
parser.add_argument('--asr_version', type=str, default='oct22', help='For future support we add and extra layer of asr versions. The current most recent version is oct22 meaning October 2022')
parser.add_argument('--audio_dirpath', type=str, help='Path to the directory containing the audio predictions from the translation model')
parser.add_argument('--reference_path', type=str, help='Path to the file containing reference translations in the form of normalized text (to be compared to ASR predictions')
parser.add_argument('--reference_format', choices=['txt', 'tsv'], help='Format of reference file. Txt means plain text format where each line represents single reference sequence')
parser.add_argument('--reference_tsv_column', default=None, type=str, help='If format is tsv, then specify the column name which contains reference sequence')
parser.add_argument('--audio_format', default='n_pred.wav', choices=['n_pred.wav'], help='Audio format n_pred.wav corresponds to names like 94_pred.wav or 94_spk7_pred.wav where spk7 is the speaker id')
parser.add_argument('--results_dirpath', default=None, type=str, help='If specified, the resulting BLEU score will be written to this file path as txt file')
parser.add_argument('--transcripts_path', default=None, type=str, help='If specified, the predicted transcripts will be written to this path as a txt file.')
args = parser.parse_args()
(prediction_transcripts, bleu_score) = run_asr_bleu(args)
result_filename = f'{args.reference_format}_{args.lang}_bleu.txt'
if (args.results_dirpath is not None):
if (not Path(args.results_dirpath).exists()):
Path(args.results_dirpath).mkdir(parents=True)
with open((Path(args.results_dirpath) / result_filename), 'w') as f:
f.write(bleu_score.format(width=2))
if (args.transcripts_path is not None):
with open(args.transcripts_path, 'w') as f:
for transcript in prediction_transcripts:
f.write((transcript + '\n')) |
def main(cmdargs):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Compute the distortion matrix of the auto and cross-correlation of delta fields')
parser.add_argument('--out', type=str, default=None, required=True, help='Output file name')
parser.add_argument('--in-dir', type=str, default=None, required=True, help='Directory to delta files')
parser.add_argument('--in-dir2', type=str, default=None, required=False, help='Directory to 2nd delta files')
parser.add_argument('--rp-min', type=float, default=0.0, required=False, help='Min r-parallel [h^-1 Mpc]')
parser.add_argument('--rp-max', type=float, default=200.0, required=False, help='Max r-parallel [h^-1 Mpc]')
parser.add_argument('--rt-max', type=float, default=200.0, required=False, help='Max r-transverse [h^-1 Mpc]')
parser.add_argument('--np', type=int, default=50, required=False, help='Number of r-parallel bins')
parser.add_argument('--nt', type=int, default=50, required=False, help='Number of r-transverse bins')
parser.add_argument('--coef-binning-model', type=int, default=1, required=False, help='Coefficient multiplying np and nt to get finner binning for the model')
parser.add_argument('--z-cut-min', type=float, default=0.0, required=False, help='Use only pairs of forest x object with the mean of the last absorber redshift and the object redshift larger than z-cut-min')
parser.add_argument('--z-cut-max', type=float, default=10.0, required=False, help='Use only pairs of forest x object with the mean of the last absorber redshift and the object redshift smaller than z-cut-max')
parser.add_argument('--z-min-sources', type=float, default=0.0, required=False, help='Limit the minimum redshift of the quasars used as sources for spectra')
parser.add_argument('--z-max-sources', type=float, default=10.0, required=False, help='Limit the maximum redshift of the quasars used as sources for spectra')
parser.add_argument('--lambda-abs', type=str, default='LYA', required=False, help='Name of the absorption in picca.constants defining the redshift of the delta')
parser.add_argument('--lambda-abs2', type=str, default=None, required=False, help='Name of the absorption in picca.constants defining the redshift of the 2nd delta')
parser.add_argument('--z-ref', type=float, default=2.25, required=False, help='Reference redshift')
parser.add_argument('--z-evol', type=float, default=2.9, required=False, help='Exponent of the redshift evolution of the delta field')
parser.add_argument('--z-evol2', type=float, default=2.9, required=False, help='Exponent of the redshift evolution of the 2nd delta field')
parser.add_argument('--fid-Om', type=float, default=0.315, required=False, help='Omega_matter(z=0) of fiducial LambdaCDM cosmology')
parser.add_argument('--fid-Or', type=float, default=0.0, required=False, help='Omega_radiation(z=0) of fiducial LambdaCDM cosmology')
parser.add_argument('--fid-Ok', type=float, default=0.0, required=False, help='Omega_k(z=0) of fiducial LambdaCDM cosmology')
parser.add_argument('--fid-wl', type=float, default=(- 1.0), required=False, help='Equation of state of dark energy of fiducial LambdaCDM cosmology')
parser.add_argument('--no-project', action='store_true', required=False, help='Do not project out continuum fitting modes')
parser.add_argument('--remove-same-half-plate-close-pairs', action='store_true', required=False, help='Reject pairs in the first bin in r-parallel from same half plate')
parser.add_argument('--rej', type=float, default=1.0, required=False, help='Fraction of rejected forest-forest pairs: -1=no rejection, 1=all rejection')
parser.add_argument('--nside', type=int, default=16, required=False, help='Healpix nside')
parser.add_argument('--nproc', type=int, default=None, required=False, help='Number of processors')
parser.add_argument('--nspec', type=int, default=None, required=False, help='Maximum number of spectra to read')
parser.add_argument('--unfold-cf', action='store_true', required=False, help='rp can be positive or negative depending on the relative position between absorber1 and absorber2')
parser.add_argument('--rebin-factor', type=int, default=None, required=False, help='Rebin factor for deltas. If not None, deltas will be rebinned by that factor')
args = parser.parse_args(cmdargs)
if (args.nproc is None):
args.nproc = (cpu_count() // 2)
userprint('nproc', args.nproc)
cf.r_par_max = args.rp_max
cf.r_par_min = args.rp_min
cf.r_trans_max = args.rt_max
cf.z_cut_max = args.z_cut_max
cf.z_cut_min = args.z_cut_min
cf.num_bins_r_par = args.np
cf.num_bins_r_trans = args.nt
cf.num_model_bins_r_par = (args.np * args.coef_binning_model)
cf.num_model_bins_r_trans = (args.nt * args.coef_binning_model)
cf.nside = args.nside
cf.z_ref = args.z_ref
cf.alpha = args.z_evol
cf.reject = args.rej
cf.lambda_abs = constants.ABSORBER_IGM[args.lambda_abs]
cf.remove_same_half_plate_close_pairs = args.remove_same_half_plate_close_pairs
blinding = io.read_blinding(args.in_dir)
cosmo = constants.Cosmo(Om=args.fid_Om, Or=args.fid_Or, Ok=args.fid_Ok, wl=args.fid_wl, blinding=blinding)
t0 = time.time()
(data, num_data, z_min, z_max) = io.read_deltas(args.in_dir, cf.nside, cf.lambda_abs, cf.alpha, cf.z_ref, cosmo, max_num_spec=args.nspec, no_project=args.no_project, nproc=args.nproc, rebin_factor=args.rebin_factor, z_min_qso=args.z_min_sources, z_max_qso=args.z_max_sources)
del z_max
cf.data = data
cf.num_data = num_data
cf.ang_max = utils.compute_ang_max(cosmo, cf.r_trans_max, z_min)
userprint('')
userprint('done, npix = {}'.format(len(data)))
if (args.in_dir2 or args.lambda_abs2):
if (args.lambda_abs2 or args.unfold_cf):
cf.x_correlation = True
cf.alpha2 = args.z_evol2
if (args.in_dir2 is None):
args.in_dir2 = args.in_dir
if args.lambda_abs2:
cf.lambda_abs2 = constants.ABSORBER_IGM[args.lambda_abs2]
else:
cf.lambda_abs2 = cf.lambda_abs
(data2, num_data2, z_min2, z_max2) = io.read_deltas(args.in_dir2, cf.nside, cf.lambda_abs2, cf.alpha2, cf.z_ref, cosmo, max_num_spec=args.nspec, no_project=args.no_project, nproc=args.nproc, rebin_factor=args.rebin_factor, z_min_qso=args.z_min_sources, z_max_qso=args.z_max_sources)
del z_max2
cf.data2 = data2
cf.num_data2 = num_data2
cf.ang_max = utils.compute_ang_max(cosmo, cf.r_trans_max, z_min, z_min2)
userprint('')
userprint('done, npix = {}'.format(len(data2)))
t1 = time.time()
userprint(f'picca_dmat.py - Time reading data: {((t1 - t0) / 60):.3f} minutes')
cf.counter = Value('i', 0)
cf.lock = Lock()
cpu_data = {}
for (index, healpix) in enumerate(sorted(data)):
num_processor = (index % args.nproc)
if (num_processor not in cpu_data):
cpu_data[num_processor] = []
cpu_data[num_processor].append(healpix)
if (args.nproc > 1):
context = multiprocessing.get_context('fork')
pool = context.Pool(processes=args.nproc)
dmat_data = pool.map(calc_dmat, sorted(cpu_data.values()))
pool.close()
elif (args.nproc == 1):
dmat_data = map(calc_dmat, sorted(cpu_data.values()))
t2 = time.time()
userprint(f'picca_dmat.py - Time computing distortion matrix: {((t2 - t1) / 60):.3f} minutes')
dmat_data = list(dmat_data)
weights_dmat = np.array([item[0] for item in dmat_data]).sum(axis=0)
dmat = np.array([item[1] for item in dmat_data]).sum(axis=0)
r_par = np.array([item[2] for item in dmat_data]).sum(axis=0)
r_trans = np.array([item[3] for item in dmat_data]).sum(axis=0)
z = np.array([item[4] for item in dmat_data]).sum(axis=0)
weights = np.array([item[5] for item in dmat_data]).sum(axis=0)
num_pairs = np.array([item[6] for item in dmat_data]).sum(axis=0)
num_pairs_used = np.array([item[7] for item in dmat_data]).sum(axis=0)
w = (weights > 0.0)
r_par[w] /= weights[w]
r_trans[w] /= weights[w]
z[w] /= weights[w]
w = (weights_dmat > 0)
dmat[w] /= weights_dmat[(w, None)]
results = fitsio.FITS(args.out, 'rw', clobber=True)
header = [{'name': 'RPMIN', 'value': cf.r_par_min, 'comment': 'Minimum r-parallel [h^-1 Mpc]'}, {'name': 'RPMAX', 'value': cf.r_par_max, 'comment': 'Maximum r-parallel [h^-1 Mpc]'}, {'name': 'RTMAX', 'value': cf.r_trans_max, 'comment': 'Maximum r-transverse [h^-1 Mpc]'}, {'name': 'NP', 'value': cf.num_bins_r_par, 'comment': 'Number of bins in r-parallel'}, {'name': 'NT', 'value': cf.num_bins_r_trans, 'comment': 'Number of bins in r-transverse'}, {'name': 'COEFMOD', 'value': args.coef_binning_model, 'comment': 'Coefficient for model binning'}, {'name': 'ZCUTMIN', 'value': cf.z_cut_min, 'comment': 'Minimum redshift of pairs'}, {'name': 'ZCUTMAX', 'value': cf.z_cut_max, 'comment': 'Maximum redshift of pairs'}, {'name': 'REJ', 'value': cf.reject, 'comment': 'Rejection factor'}, {'name': 'NPALL', 'value': num_pairs, 'comment': 'Number of pairs'}, {'name': 'NPUSED', 'value': num_pairs_used, 'comment': 'Number of used pairs'}, {'name': 'OMEGAM', 'value': args.fid_Om, 'comment': 'Omega_matter(z=0) of fiducial LambdaCDM cosmology'}, {'name': 'OMEGAR', 'value': args.fid_Or, 'comment': 'Omega_radiation(z=0) of fiducial LambdaCDM cosmology'}, {'name': 'OMEGAK', 'value': args.fid_Ok, 'comment': 'Omega_k(z=0) of fiducial LambdaCDM cosmology'}, {'name': 'WL', 'value': args.fid_wl, 'comment': 'Equation of state of dark energy of fiducial LambdaCDM cosmology'}, {'name': 'BLINDING', 'value': blinding, 'comment': 'String specifying the blinding strategy'}]
dmat_name = 'DM'
if (blinding != 'none'):
dmat_name += '_BLIND'
results.write([weights_dmat, dmat], names=['WDM', dmat_name], comment=['Sum of weight', 'Distortion matrix'], units=['', ''], header=header, extname='DMAT')
results.write([r_par, r_trans, z], names=['RP', 'RT', 'Z'], comment=['R-parallel', 'R-transverse', 'Redshift'], units=['h^-1 Mpc', 'h^-1 Mpc', ''], extname='ATTRI')
results.close()
t3 = time.time()
userprint(f'picca_dmat.py - Time total : {((t3 - t0) / 60):.3f} minutes') |
def main(dataset=None):
if (not dataset):
dataset = DatasetBuilder.build_kitti_dataset(DatasetBuilder.KITTI_TRAIN)
label_cluster_utils = dataset.kitti_utils.label_cluster_utils
print('Generating clusters in {}/{}'.format(label_cluster_utils.data_dir, dataset.data_split))
(clusters, std_devs) = dataset.get_cluster_info()
print('Clusters generated')
print('classes: {}'.format(dataset.classes))
print('num_clusters: {}'.format(dataset.num_clusters))
print('all_clusters:\n {}'.format(clusters))
print('all_std_devs:\n {}'.format(std_devs)) |
_checkable
class JuEstimatorLike(EstimatorLikeFit1, Protocol):
def get_needed_types(self) -> ColumnTypes:
return ColumnTypes('placeholder')
def get_apply_to(self) -> ColumnTypes:
return ColumnTypes('placeholder') |
def rename_and_save_block(current_block, save_path):
current_block = rename_keys(current_block)
new_current_block = {}
for (k, v) in current_block.items():
new_current_block[k.replace('/', '.')] = v
current_block = new_current_block
torch.save(current_block, save_path) |
def register_agent(id=None, **kwargs):
if (id is None):
id = get_dynamic_name()
print(('Registering agent %s' % id))
def wrap(agent):
_agent_registry[id] = dict(agent=agent, **kwargs)
return agent
return wrap |
def test_synthesized_onnx_model(tmp_path):
d = (tmp_path / 'test_trt_onnx')
d.mkdir()
ONNXModel = Model.init('onnx')
factory = BackendFactory.init('tensorrt', target='cuda', optmax=True)
gen = model_gen(opset=auto_opset(ONNXModel, factory), seed=23132, max_nodes=1)
model = ONNXModel.from_gir(gen.make_concrete())
assert model.with_torch
model.refine_weights()
oracle = model.make_oracle()
testcase = TestCase(model, oracle)
testcase.dump(root_folder=d)
assert (factory.verify_testcase(testcase) is None) |
def train():
depth = 6
filters = 25
block_filters = ([filters] * depth)
model = tcn.build_model(sequence_length=(28 * 28), channels=1, num_classes=10, filters=block_filters, kernel_size=8)
model.compile(optimizer='Adam', metrics=[metrics.SparseCategoricalAccuracy()], loss=losses.SparseCategoricalCrossentropy())
print(model.summary())
(train_dataset, test_dataset) = load_dataset()
model.fit(train_dataset.batch(32), validation_data=test_dataset.batch(32), callbacks=[TensorBoard(str((Path('logs') / datetime.now().strftime('%Y-%m-%dT%H-%M_%S'))))], epochs=10) |
def load_csv(data_dir):
sep = ('\t' if data_dir.endswith('.tsv') else ',')
import pandas as pd
try:
df = pd.read_csv(data_dir, sep=sep, header=0, encoding='utf-8')
except:
try:
sep = '\t'
df = pd.read_csv(data_dir, sep=sep, header=0, encoding='utf-8')
except UnicodeDecodeError:
df = pd.read_csv(data_dir, sep=',', header=0, encoding='ISO-8859-1')
print(df.head())
if ('abstractive' in df.columns):
src = list(df['contents'].values)
tgt = list(df['abstractive'].values)
elif ('bot_summary' in df.columns):
df['content'] = df['content'].astype(str)
df['bot_summary'] = df['bot_summary'].astype(str)
src = list(df['content'].values)
tgt = list(df['bot_summary'].values)
else:
raise IndexError
return (src, tgt) |
class CelebAHQDatasetParams(util.Params):
def get_allowed_params_with_defaults(self):
return dict(values_range=((- 1.0), 1.0), img_side=128, data_dir=None, train_shuffle=True, gcs_bucket=None, tfrecord_dir=constants.NVIDIA_CELEBA_HQ_DATASET_PATH, random_flip=False, crop_at_center=False, restrict_to_num_imgs=None, max_tfrecord_res_available=10)
def validate(self):
assert (self.img_side in [4, 8, 16, 32, 64, 128, 256, 512, 1024]) |
class Meters():
def __init__(self):
self.meters = {}
def get_names(self):
return self.meters.keys()
def reset(self):
for (_, meter) in self.meters.items():
meter.reset()
def update(self, name, val):
if (name not in self.meters):
self.meters[name] = AverageMeter()
self.meters[name].update(val)
def avg(self, name):
return self.meters[name].avg |
class TFFunnelForQuestionAnswering():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):
type_b = type(value_b)
type_a = type(value_a)
if (type_a is type_b):
return value_a
if isinstance(value_b, six.string_types):
value_a = str(value_a)
elif (isinstance(value_a, tuple) and isinstance(value_b, list)):
value_a = list(value_a)
elif (isinstance(value_a, list) and isinstance(value_b, tuple)):
value_a = tuple(value_a)
else:
raise ValueError('Type mismatch ({} vs. {}) with values ({} vs. {}) for config key: {}'.format(type_b, type_a, value_b, value_a, full_key))
return value_a |
def scale(image, label):
w = 224
h = 224
class_num = 3
image = tf.cast(image, tf.float32)
image /= 255.0
return (tf.image.resize(image, [w, h]), tf.one_hot(label, class_num)) |
class FrontendCheckerResult(NamedTuple):
waiting_cells: Set[IdType]
ready_cells: Set[IdType]
new_ready_cells: Set[IdType]
forced_reactive_cells: Set[IdType]
forced_cascading_reactive_cells: Set[IdType]
typecheck_error_cells: Set[IdType]
unsafe_order_cells: Dict[(IdType, Set[Cell])]
unsafe_order_symbol_usage: Dict[(IdType, List[Dict[(str, Any)]])]
waiter_links: Dict[(IdType, Set[IdType])]
ready_maker_links: Dict[(IdType, Set[IdType])]
phantom_cell_info: Dict[(IdType, Dict[(IdType, Set[int])])]
allow_new_ready: bool
def empty(cls, allow_new_ready: bool=True):
return cls(waiting_cells=set(), ready_cells=set(), new_ready_cells=set(), forced_reactive_cells=set(), forced_cascading_reactive_cells=set(), typecheck_error_cells=set(), unsafe_order_cells=defaultdict(set), unsafe_order_symbol_usage=defaultdict(list), waiter_links=defaultdict(set), ready_maker_links=defaultdict(set), phantom_cell_info={}, allow_new_ready=allow_new_ready)
def to_json(self) -> Dict[(str, Any)]:
return {'waiting_cells': list((self.waiting_cells | self.typecheck_error_cells)), 'ready_cells': list(self.ready_cells), 'new_ready_cells': (list(self.new_ready_cells) if self.allow_new_ready else []), 'forced_reactive_cells': list(self.forced_reactive_cells), 'forced_cascading_reactive_cells': list(self.forced_cascading_reactive_cells), 'unsafe_order_cells': {cell_id: [unsafe.cell_id for unsafe in unsafe_order_cells] for (cell_id, unsafe_order_cells) in self.unsafe_order_cells.items()}, 'unsafe_order_symbol_usage': self.unsafe_order_symbol_usage, 'waiter_links': {cell_id: list(linked_cell_ids) for (cell_id, linked_cell_ids) in self.waiter_links.items()}, 'ready_maker_links': {cell_id: list(linked_cell_ids) for (cell_id, linked_cell_ids) in self.ready_maker_links.items()}}
def _compute_waiter_and_ready_maker_links(self) -> None:
waiter_link_changes = True
while waiter_link_changes:
waiter_link_changes = False
for waiting_cell_id in self.waiting_cells:
new_waiter_links = set(self.waiter_links[waiting_cell_id])
original_length = len(new_waiter_links)
for ready_making_cell_id in self.waiter_links[waiting_cell_id]:
if (ready_making_cell_id not in self.waiting_cells):
continue
new_waiter_links |= self.waiter_links[ready_making_cell_id]
new_waiter_links.discard(waiting_cell_id)
waiter_link_changes = (waiter_link_changes or (original_length != len(new_waiter_links)))
self.waiter_links[waiting_cell_id] = new_waiter_links
for waiting_cell_id in self.waiting_cells:
self.waiter_links[waiting_cell_id] -= self.waiting_cells
for ready_making_cell_id in self.waiter_links[waiting_cell_id]:
self.ready_maker_links[ready_making_cell_id].add(waiting_cell_id)
def _compute_ready_making_cells(self, waiting_symbols_by_cell_id: Dict[(IdType, Set[Symbol])], killing_cell_ids_for_symbol: Dict[(Symbol, Set[IdType])], last_executed_cell_id: Optional[IdType]) -> None:
flow_ = flow()
eligible_ready_making_for_dag = (self.ready_cells | self.waiting_cells)
for waiting_cell_id in self.waiting_cells:
ready_making_cell_ids: Set[IdType] = set()
if (flow_.mut_settings.exec_schedule in (ExecutionSchedule.DAG_BASED, ExecutionSchedule.HYBRID_DAG_LIVENESS_BASED)):
for _ in flow_.mut_settings.iter_slicing_contexts():
ready_making_cell_ids |= (cells().from_id(waiting_cell_id).directional_parents.keys() & eligible_ready_making_for_dag)
else:
waiting_syms = waiting_symbols_by_cell_id.get(waiting_cell_id, set())
ready_making_cell_ids = ready_making_cell_ids.union(*(killing_cell_ids_for_symbol[waiting_sym] for waiting_sym in waiting_syms))
if (flow_.mut_settings.flow_order == FlowDirection.IN_ORDER):
ready_making_cell_ids = {cid for cid in ready_making_cell_ids if (cells().from_id(cid).position < cells().from_id(waiting_cell_id).position)}
if (last_executed_cell_id is not None):
ready_making_cell_ids.discard(last_executed_cell_id)
self.waiter_links[waiting_cell_id] = ready_making_cell_ids
def _compute_reactive_cells_for_reactive_symbols(self, checker_results_by_cid: Dict[(IdType, CheckerResult)], last_executed_cell_pos: int) -> None:
flow_ = flow()
for cell_id in self.ready_cells:
if (cell_id not in checker_results_by_cid):
continue
cell = cells().from_id(cell_id)
if ((flow_.mut_settings.flow_order == FlowDirection.IN_ORDER) and (cell.position < last_executed_cell_pos)):
continue
max_used_ctr = cell.get_max_used_live_symbol_cell_counter(checker_results_by_cid[cell_id].live, filter_to_reactive=True)
if (max_used_ctr > max(cell.cell_ctr, flow_.min_forced_reactive_cell_counter())):
self.forced_reactive_cells.add(cell_id)
max_used_ctr = cell.get_max_used_live_symbol_cell_counter(checker_results_by_cid[cell_id].live, filter_to_cascading_reactive=True)
if (max_used_ctr > max(cell.cell_ctr, flow_.min_forced_reactive_cell_counter())):
self.forced_cascading_reactive_cells.add(cell_id)
def _compute_dag_based_waiters(self, cells_to_check: List[Cell]) -> None:
flow_ = flow()
if (flow_.mut_settings.exec_schedule not in (ExecutionSchedule.DAG_BASED, ExecutionSchedule.HYBRID_DAG_LIVENESS_BASED)):
return
prev_waiting_cells: Set[IdType] = set()
while True:
for cell in cells_to_check:
if (cell.cell_id in self.waiting_cells):
continue
for _ in flow_.mut_settings.iter_slicing_contexts():
if (cell.directional_parents.keys() & (self.ready_cells | self.waiting_cells)):
self.waiting_cells.add(cell.cell_id)
continue
if (prev_waiting_cells == self.waiting_cells):
break
prev_waiting_cells = set(self.waiting_cells)
self.ready_cells.difference_update(self.waiting_cells)
self.new_ready_cells.difference_update(self.waiting_cells)
for cell_id in self.waiting_cells:
cells().from_id(cell_id).set_ready(False)
def _compute_readiness(self, cell: Cell, checker_result: CheckerResult) -> Tuple[(bool, bool)]:
flow_ = flow()
cell_id = cell.cell_id
if (cell_id in self.waiting_cells):
return (False, False)
is_ready = False
is_new_ready = False
exec_schedule = flow_.mut_settings.exec_schedule
flow_order = flow_.mut_settings.flow_order
if (flow_.mut_settings.exec_schedule in (ExecutionSchedule.DAG_BASED, ExecutionSchedule.HYBRID_DAG_LIVENESS_BASED)):
for _ in flow_.mut_settings.iter_slicing_contexts():
if is_new_ready:
break
for _ in iter_dangling_contexts():
if is_new_ready:
break
for (pid, syms) in cell.directional_parents.items():
par = cells().from_id(pid)
if ((flow_.fake_edge_sym in syms) and (cell.cell_ctr < 0 < par.cell_ctr)):
is_ready = True
break
if ((max(cell.cell_ctr, flow_.min_timestamp) < par.cell_ctr) and (par.cell_ctr in {sym.timestamp.cell_num for sym in syms})):
is_ready = True
if ((par.cell_ctr >= flow_.min_new_ready_cell_counter()) and (cell.cell_ctr > 0)):
is_new_ready = True
break
if ((not is_new_ready) and ((exec_schedule == ExecutionSchedule.LIVENESS_BASED) or ((exec_schedule == ExecutionSchedule.HYBRID_DAG_LIVENESS_BASED) and (flow_order == FlowDirection.IN_ORDER)))):
max_used_live_sym_ctr = cell.get_max_used_live_symbol_cell_counter(checker_result.live, dead_symbols=checker_result.dead)
if (max_used_live_sym_ctr > max(cell.cell_ctr, flow_.min_timestamp)):
is_ready = True
if ((cell.cell_ctr > 0) and (max_used_live_sym_ctr >= flow_.min_new_ready_cell_counter())):
is_new_ready = True
return (is_ready, is_new_ready)
def _check_one_cell(self, cell: Cell, update_liveness_time_versions: bool, last_executed_cell_pos: int, waiting_symbols_by_cell_id: Dict[(IdType, Set[Symbol])], killing_cell_ids_for_symbol: Dict[(Symbol, Set[IdType])], phantom_cell_info: Dict[(IdType, Dict[(IdType, Set[int])])]) -> Optional[CheckerResult]:
flow_ = flow()
try:
checker_result = cell.check_and_resolve_symbols(update_liveness_time_versions=update_liveness_time_versions)
except Exception:
if flow_.is_dev_mode:
logger.exception('exception occurred during checking')
return None
cell_id = cell.cell_id
if (flow_.mut_settings.flow_order == FlowDirection.IN_ORDER):
for live_sym in checker_result.live:
if ((not live_sym.is_deep) or (not live_sym.timestamp.is_initialized)):
continue
updated_cell = cells().at_timestamp(live_sym.timestamp)
if (updated_cell.position > cell.position):
self.unsafe_order_cells[cell_id].add(updated_cell)
if (flow_.mut_settings.exec_schedule == ExecutionSchedule.LIVENESS_BASED):
waiting_symbols = {sym.sym for sym in checker_result.live if sym.is_waiting_at_position(cell.position)}
unresolved_live_refs = checker_result.unresolved_live_refs
else:
waiting_symbols = set()
unresolved_live_refs = set()
if (len(waiting_symbols) > 0):
waiting_symbols_by_cell_id[cell_id] = waiting_symbols
if ((len(waiting_symbols) > 0) or (len(unresolved_live_refs) > 0)):
self.waiting_cells.add(cell_id)
if (not checker_result.typechecks):
self.typecheck_error_cells.add(cell_id)
for dead_sym in checker_result.dead:
killing_cell_ids_for_symbol[dead_sym].add(cell_id)
if flow_.settings.mark_phantom_cell_usages_unsafe:
phantom_cell_info_for_cell = cell.compute_phantom_cell_info(checker_result.used_cells)
if (len(phantom_cell_info_for_cell) > 0):
phantom_cell_info[cell_id] = phantom_cell_info_for_cell
(is_ready, is_new_ready) = self._compute_readiness(cell, checker_result)
if is_ready:
self.ready_cells.add(cell_id)
was_ready = cell.set_ready(is_ready)
if (flow_.mut_settings.flow_order == FlowDirection.IN_ORDER):
if ((last_executed_cell_pos is not None) and (cell.position <= last_executed_cell_pos)):
return checker_result
if (is_new_ready or ((cell.cell_ctr > 0) and (not was_ready) and is_ready and (flow_.cell_counter() >= flow_.min_new_ready_cell_counter()))):
self.new_ready_cells.add(cell_id)
return checker_result
def _get_last_executed_pos_and_handle_reactive_tags(self, last_executed_cell_id: Optional[IdType]) -> Optional[int]:
if (last_executed_cell_id is None):
return None
last_executed_cell = cells().from_id(last_executed_cell_id)
if (last_executed_cell is None):
return None
for tag in last_executed_cell.tags:
for reactive_cell_id in cells().get_reactive_ids_for_tag(tag):
self.forced_reactive_cells.add(reactive_cell_id)
return last_executed_cell.position
def _compute_unsafe_order_usages(self, cells_to_check: List[Cell]) -> None:
cell_by_ctr: Dict[(int, Cell)] = {cell.cell_ctr: cell for cell in cells_to_check}
for sym in flow().all_data_symbols():
if sym.is_anonymous:
continue
for (used_ts, ts_when_used) in sym.timestamp_by_used_time.items():
cell = cell_by_ctr.get(used_ts.cell_num, None)
if (cell is None):
continue
if (cells().at_timestamp(ts_when_used).position <= cell.position):
continue
used_node = sym.used_node_by_used_time.get(used_ts, None)
if ((used_node is None) or (not all((hasattr(used_node, pos_attr) for pos_attr in ('lineno', 'end_lineno', 'col_offset', 'end_col_offset'))))):
continue
self.unsafe_order_symbol_usage[cell.cell_id].append({'name': sym.readable_name, 'range': _make_range_from_node(used_node), 'last_updated_cell': ts_when_used.cell_num})
def compute_frontend_checker_result(self, cells_to_check: Optional[Iterable[Cell]]=None, update_liveness_time_versions: bool=False, last_executed_cell_id: Optional[IdType]=None) -> 'FrontendCheckerResult':
flow_ = flow()
if (last_executed_cell_id is None):
last_executed_cell_id = flow_.last_executed_cell_id
waiting_symbols_by_cell_id: Dict[(IdType, Set[Symbol])] = {}
killing_cell_ids_for_symbol: Dict[(Symbol, Set[IdType])] = defaultdict(set)
phantom_cell_info: Dict[(IdType, Dict[(IdType, Set[int])])] = {}
checker_results_by_cid: Dict[(IdType, CheckerResult)] = {}
last_executed_cell_pos = self._get_last_executed_pos_and_handle_reactive_tags(last_executed_cell_id)
if (cells_to_check is None):
cells_to_check = cells().current_cells_for_each_id()
cells_to_check = sorted(cells_to_check, key=(lambda c: c.position))
for cell in cells_to_check:
checker_result = self._check_one_cell(cell, update_liveness_time_versions, last_executed_cell_pos, waiting_symbols_by_cell_id, killing_cell_ids_for_symbol, phantom_cell_info)
if (checker_result is not None):
checker_results_by_cid[cell.cell_id] = checker_result
self._compute_dag_based_waiters(cells_to_check)
self._compute_reactive_cells_for_reactive_symbols(checker_results_by_cid, last_executed_cell_pos)
self._compute_ready_making_cells(waiting_symbols_by_cell_id, killing_cell_ids_for_symbol, last_executed_cell_id)
self._compute_waiter_and_ready_maker_links()
if flow_.mut_settings.lint_out_of_order_usages:
self._compute_unsafe_order_usages(cells_to_check)
return self |
class AffineTransform3D(ImagePreprocessing3D):
def __init__(self, affine_mat, translation=np.zeros(3), clamp_mode='clamp', pad_val=0.0, bigdl_type='float'):
affine_mat_tensor = JTensor.from_ndarray(affine_mat)
translation_tensor = JTensor.from_ndarray(translation)
super(AffineTransform3D, self).__init__(bigdl_type, affine_mat_tensor, translation_tensor, clamp_mode, pad_val) |
def main_worker(gpu, args):
args.gpu = gpu
args.rank = gpu
print(f'Process Launching at GPU {gpu}')
if args.distributed:
torch.cuda.set_device(args.gpu)
dist.init_process_group(backend='nccl')
print(f'Building train loader at GPU {gpu}')
train_loader = get_loader(args, split=args.train, mode='train', batch_size=args.batch_size, distributed=args.distributed, gpu=args.gpu, workers=args.num_workers, topk=args.train_topk)
if (args.valid_batch_size is not None):
valid_batch_size = args.valid_batch_size
else:
valid_batch_size = args.batch_size
print(f'Building val loader at GPU {gpu}')
val_loader = get_loader(args, split=args.valid, mode='val', batch_size=valid_batch_size, distributed=args.distributed, gpu=args.gpu, workers=4, topk=args.valid_topk)
print(f'Building test loader at GPU {gpu}')
test_loader = get_loader(args, split=args.test, mode='val', batch_size=valid_batch_size, distributed=args.distributed, gpu=args.gpu, workers=4, topk=args.valid_topk)
trainer = Trainer(args, train_loader, val_loader, test_loader, train=True)
trainer.train() |
def string_sublength(args):
params = functionParams(args, ('s', 'i', 'len'))
s = params.get('s', '')
i = (int((params.get('i', 1) or 1)) - 1)
len = int((params.get('len', 1) or 1))
return s[i:(i + len)] |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', default='settings/pretrain.yaml', type=str, help='Setting files')
parser.add_argument('-n', '--exp_name', default='exp_name', type=str, help='name of this experiment.')
parser.add_argument('-l', '--lr', default=5e-05, type=float, help='Learning rate.')
parser.add_argument('-t', '--model_type', default='cnn', type=str, help='Model type.')
parser.add_argument('-m', '--model', default='Cnn14', type=str, help='Model name.')
parser.add_argument('-a', '--max_length', default=30, type=int, help='Max length.')
parser.add_argument('-s', '--batch_size', default=128, type=int, help='Batch size.')
parser.add_argument('-b', '--blacklist', default='blacklist_exclude_ub8k_esc50_vggsound.json', type=str, help='Blacklist file.')
args = parser.parse_args()
exp_name = args.exp_name
with open(args.config, 'r') as f:
config = yaml.safe_load(f)
config['audio_encoder_args']['type'] = args.model_type
config['audio_encoder_args']['model'] = args.model
config['audio_args']['max_length'] = args.max_length
config['optim_args']['lr'] = args.lr
config['blacklist'] += args.blacklist
config['data_args']['batch_size'] = args.batch_size
init_distributed_mode(config['dist_args'])
device = torch.device(config['device'])
seed = (config['seed'] + get_rank())
setup_seed(seed)
exp_name = (exp_name + f'_lr_{args.lr}_seed_{seed}')
wandb.init(project='AT-retrieval', name=exp_name, config=config)
dataloader = pretrain_dataloader(config, bucket=True, bucket_boundaries=(5, 30, 6), is_distributed=is_dist_avail_and_initialized(), num_tasks=get_world_size(), global_rank=get_rank())
model = ASE(config)
model = model.to(device)
wandb.watch(model)
optimizer = get_optimizer(model.parameters(), lr=config['optim_args']['lr'], betas=config['optim_args']['betas'], eps=config['optim_args']['eps'], momentum=config['optim_args']['momentum'], optimizer_name=config['optim_args']['optimizer_name'])
scheduler = cosine_lr(optimizer, base_lr=config['optim_args']['lr'], warmup_length=(config['optim_args']['warmup_epochs'] * len(dataloader)), steps=(len(dataloader) * config['training']['epochs']))
start_epoch = 1
max_epoch = config['training']['epochs']
if config['resume']:
cp = torch.load(config.checkpoint, map_location='cpu')
state_dict = cp['model']
optimizer.load_state_dict(cp['optimizer'])
start_epoch = (cp['epoch'] + 1)
model.load_state_dict(state_dict)
(model_output_dir, log_output_dir) = set_logger(exp_name)
main_logger = logger.bind(indent=1)
printer = PrettyPrinter()
main_logger.info(f'''Training setting:
{printer.pformat(config)}''')
main_logger.info(f'Total numer of parameters: {sum([i.numel() for i in model.parameters()])}')
main_logger.info(f'Size of training set: {len(dataloader.dataset)}, size of batches: {len(dataloader)}')
model_without_ddp = model
if is_dist_avail_and_initialized():
model = torch.nn.parallel.DistributedDataParallel(model)
model_without_ddp = model.module
ac_datamodule = AudioCaptionDataModule(config, 'AudioCaps')
clotho_datamodule = AudioCaptionDataModule(config, 'Clotho')
ac_val_loader = ac_datamodule.val_dataloader()
clotho_val_loader = clotho_datamodule.val_dataloader()
loss_stats = []
ac_recall_stats = []
clotho_recall_stats = []
for epoch in range(start_epoch, (max_epoch + 1)):
main_logger.info(f'Training for epoch [{epoch}]')
train_statics = train(model, dataloader, optimizer, scheduler, device, epoch)
loss = train_statics['loss']
elapsed_time = train_statics['time']
loss_stats.append(loss)
main_logger.info(f"Training statistics: loss for epoch [{epoch}]: {loss:.3f}, time: {elapsed_time:.1f}, lr: {optimizer.param_groups[0]['lr']:.6f}.")
if ((loss <= min(loss_stats)) and is_main_process()):
sav_obj = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'config': config, 'epoch': epoch}
torch.save(sav_obj, (str(model_output_dir) + '/best_model.pt'))
if is_dist_avail_and_initialized():
dist.barrier()
torch.cuda.empty_cache()
ac_metrics = validate(model, ac_val_loader, device)
log_results(ac_metrics, 'AudioCaps', main_logger, test=False)
ac_recall_stats.append((ac_metrics['t2a'][0] + ac_metrics['a2t'][0]))
if ((ac_recall_stats[(- 1)] >= max(ac_recall_stats)) and is_main_process()):
sav_obj = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'config': config, 'epoch': epoch}
torch.save(sav_obj, (str(model_output_dir) + '/ac_best_model.pt'))
clotho_metrics = validate(model, clotho_val_loader, device)
log_results(clotho_metrics, 'Clotho', main_logger, test=False)
clotho_recall_stats.append((clotho_metrics['t2a'][0] + clotho_metrics['a2t'][0]))
if ((clotho_recall_stats[(- 1)] >= max(clotho_recall_stats)) and is_main_process()):
sav_obj = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'config': config, 'epoch': epoch}
torch.save(sav_obj, (str(model_output_dir) + '/clotho_best_model.pt'))
main_logger.info('Evaluation start...')
ac_test_loader = ac_datamodule.test_dataloader()
clotho_test_loader = clotho_datamodule.test_dataloader()
model.load_state_dict(torch.load((str(model_output_dir) + '/best_model.pt'))['model'])
main_logger.info(f"Evaluation model with smallest loss... epoch:{torch.load((str(model_output_dir) + '/best_model.pt'))['epoch']}")
ac_metrics = validate(model, ac_test_loader, device)
log_results(ac_metrics, 'AudioCaps', main_logger, test=True)
clotho_metrics = validate(model, clotho_test_loader, device)
log_results(clotho_metrics, 'Clotho', main_logger, test=True)
model.load_state_dict(torch.load((str(model_output_dir) + '/ac_best_model.pt'))['model'])
main_logger.info(f"Evaluation best AudioCaps model... epoch:{torch.load((str(model_output_dir) + '/ac_best_model.pt'))['epoch']}")
ac_metrics = validate(model, ac_test_loader, device)
log_results(ac_metrics, 'AudioCaps', main_logger, test=True)
clotho_metrics = validate(model, clotho_test_loader, device)
log_results(clotho_metrics, 'Clotho', main_logger, test=True)
model.load_state_dict(torch.load((str(model_output_dir) + '/clotho_best_model.pt'))['model'])
main_logger.info(f"Evaluation best Clotho model... epoch:{torch.load((str(model_output_dir) + '/clotho_best_model.pt'))['epoch']}")
ac_metrics = validate(model, ac_test_loader, device)
log_results(ac_metrics, 'AudioCaps', main_logger, test=True)
clotho_metrics = validate(model, clotho_test_loader, device)
log_results(clotho_metrics, 'Clotho', main_logger, test=True)
main_logger.info('Done.')
wandb.finish() |
def _post_command(self, cmd: str) -> None:
_deprecation("'post(cmd)' is deprecated. Use 'post.command(cmd)'.")
return post.command(cmd) |
def test_W_from_zZ():
shape = (3, 1, 5)
z = torch.tensor(np.random.rand(*shape))
Z = (z + torch.tensor(np.random.rand(*shape)))
box_W = SigmoidBoxTensor.W(z, Z)
eps = torch.finfo(z.dtype).tiny
w1 = inv_sigmoid(z.clamp(eps, (1.0 - eps)))
w2 = inv_sigmoid(((Z - z) / (1.0 - z)).clamp(eps, (1.0 - eps)))
W = torch.stack((w1, w2), (- 2))
assert torch.allclose(box_W, W) |
def setup_imports():
root_folder = registry.get('pythia_root', no_warning=True)
if (root_folder is None):
root_folder = os.path.dirname(os.path.abspath(__file__))
root_folder = os.path.join(root_folder, '..')
environment_pythia_path = os.environ.get('PYTHIA_PATH')
if (environment_pythia_path is not None):
root_folder = environment_pythia_path
root_folder = os.path.join(root_folder, 'pythia')
registry.register('pythia_path', root_folder)
trainer_folder = os.path.join(root_folder, 'trainers')
trainer_pattern = os.path.join(trainer_folder, '**', '*.py')
datasets_folder = os.path.join(root_folder, 'datasets')
datasets_pattern = os.path.join(datasets_folder, '**', '*.py')
model_folder = os.path.join(root_folder, 'models')
model_pattern = os.path.join(model_folder, '**', '*.py')
importlib.import_module('pythia.common.meter')
files = ((glob.glob(datasets_pattern, recursive=True) + glob.glob(model_pattern, recursive=True)) + glob.glob(trainer_pattern, recursive=True))
for f in files:
if (f.find('models') != (- 1)):
splits = f.split(os.sep)
file_name = splits[(- 1)]
module_name = file_name[:file_name.find('.py')]
importlib.import_module(('pythia.models.' + module_name))
elif (f.find('trainer') != (- 1)):
splits = f.split(os.sep)
file_name = splits[(- 1)]
module_name = file_name[:file_name.find('.py')]
importlib.import_module(('pythia.trainers.' + module_name))
elif f.endswith('builder.py'):
splits = f.split(os.sep)
task_name = splits[(- 3)]
dataset_name = splits[(- 2)]
if ((task_name == 'datasets') or (dataset_name == 'datasets')):
continue
file_name = splits[(- 1)]
module_name = file_name[:file_name.find('.py')]
importlib.import_module(((((('pythia.datasets.' + task_name) + '.') + dataset_name) + '.') + module_name)) |
class PrintModelAnalysisHook(TrainingHook):
def __init__(self, params, model_dir, run_config):
super(PrintModelAnalysisHook, self).__init__(params, model_dir, run_config)
self._filename = os.path.join(self.model_dir, 'model_analysis.txt')
def default_params():
return {}
def begin(self):
if self.is_chief:
opts = tf.contrib.tfprof.model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS
opts['dump_to_file'] = os.path.abspath(self._filename)
tf.contrib.tfprof.model_analyzer.print_model_analysis(tf.get_default_graph(), tfprof_options=opts)
with gfile.GFile(self._filename) as file:
tf.logging.info(file.read()) |
def traverse(node, index):
queue = Queue()
queue.push(node)
result = []
while (not queue.isEmpty()):
node = queue.pop()
result.append(get_token(node, mode=token_mode))
result.append(index)
index += 1
for (child_name, child) in node.children():
queue.push(child)
return result |
def dsrla_mobilenetv2_k6():
print('Constructing dsrla_mobilenetv2_k6......')
model = dsRLA_MobileNetV2(rla_channel=6)
return model |
.skip()
def test_redwood_indoor_office1():
gt_prefix = 'RedwoodIndoorOffice1'
(_, gt_download_dir, gt_extract_dir) = get_test_data_dirs(gt_prefix)
dataset = o3d.data.RedwoodIndoorOffice1()
assert Path(gt_download_dir).is_dir()
assert Path(gt_extract_dir).is_dir()
pcd = o3d.io.read_point_cloud(dataset.point_cloud_path)
im_rgbds = []
for (color_path, depth_path) in zip(dataset.color_paths, dataset.depth_paths):
im_color = o3d.io.read_image(color_path)
im_depth = o3d.io.read_image(depth_path)
im_rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(im_color, im_depth)
im_rgbds.append(im_rgbd)
assert (len(im_rgbds) == 2690)
im_noisy_rgbds = []
for (color_path, depth_path) in zip(dataset.color_paths, dataset.noisy_depth_paths):
im_color = o3d.io.read_image(color_path)
im_depth = o3d.io.read_image(depth_path)
im_rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(im_color, im_depth)
im_noisy_rgbds.append(im_rgbd)
assert (len(im_noisy_rgbds) == 2690) |
class UCF101DataModule(pl.LightningDataModule):
def __init__(self, data_root, train_batch_size, test_batch_size, num_workers, scale_lower_bound, jitter_prob, greyscale_prob, solarize_prob, **kwargs):
super().__init__()
self.data_root = data_root
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
self.num_workers = num_workers
self.scale_lower_bound = scale_lower_bound
self.jitter_prob = jitter_prob
self.greyscale_prob = greyscale_prob
self.solarize_prob = solarize_prob
self.nr_of_classes = 101
self.prompt_prefix = 'This is a photo of a'
def setup(self, stage: Optional[str]=None) -> None:
root_dir = (pathlib.Path(self.data_root) / 'ucf101')
train_transform = torchvision.transforms.Compose([torchvision.transforms.RandomResizedCrop(224, scale=(self.scale_lower_bound, 1.0), interpolation=InterpolationMode.BICUBIC), torchvision.transforms.RandomApply([torchvision.transforms.ColorJitter(0.4, 0.4, 0.4, 0.4)], p=self.jitter_prob), torchvision.transforms.RandomGrayscale(p=self.greyscale_prob), torchvision.transforms.RandomApply([Solarize()], p=self.solarize_prob), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=(0., 0.4578275, 0.), std=(0., 0., 0.))])
test_transform = torchvision.transforms.Compose([torchvision.transforms.Resize(size=224), torchvision.transforms.CenterCrop(size=224), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=(0., 0.4578275, 0.), std=(0., 0., 0.))])
self.id_to_class = self._load_id_to_class(root_dir)
self._create_index_to_classes()
self._create_prompts()
if (stage == 'fit'):
self.train_set = JSONDataset(json_path=os.path.join(root_dir, 'split_zhou_UCF101.json'), data_root=os.path.join(root_dir, 'images'), split='train', transforms=train_transform)
self.val_set = JSONDataset(json_path=os.path.join(root_dir, 'split_zhou_UCF101.json'), data_root=os.path.join(root_dir, 'images'), split='test', transforms=test_transform)
if (stage == 'test'):
self.test_set = JSONDataset(json_path=os.path.join(root_dir, 'split_zhou_UCF101.json'), data_root=os.path.join(root_dir, 'images'), split='test', transforms=test_transform)
def train_dataloader(self):
return torch.utils.data.DataLoader(self.train_set, shuffle=True, batch_size=self.train_batch_size, num_workers=self.num_workers, persistent_workers=(True if (self.num_workers > 0) else False))
def val_dataloader(self):
return torch.utils.data.DataLoader(self.val_set, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers, persistent_workers=(True if (self.num_workers > 0) else False))
def test_dataloader(self):
return torch.utils.data.DataLoader(self.test_set, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers, persistent_workers=(True if (self.num_workers > 0) else False))
def _create_index_to_classes(self):
index_to_classes = {key: value.replace('_', ' ') for (key, value) in self.id_to_class.items()}
self.index_to_classes = dict(sorted(index_to_classes.items()))
def _load_id_to_class(self, root_dir):
dummy_set = JSONDataset(json_path=os.path.join(root_dir, 'split_zhou_UCF101.json'), data_root=os.path.join(root_dir, 'images'), split='train', transforms=None)
class_to_idx = dummy_set.class_to_idx
return {idx: class_label for (class_label, idx) in class_to_idx.items()}
def _create_prompts(self):
prompts = [((self.prompt_prefix + ' ') + text_label.lower()) for text_label in self.index_to_classes.values()]
self.prompts = prompts |
def _get_qiskit_versions():
cmd = [sys.executable, '-m', 'pip', 'freeze']
reqs = subprocess.check_output(cmd)
reqs_dict = {}
for req in reqs.split():
req_parts = req.decode().split('==')
if ((len(req_parts) == 1) and req_parts[0].startswith('git')):
if ('qiskit' in req_parts[0]):
package = req_parts[0].split('#egg=')[1]
sha = req_parts[0].split('')[(- 1)].split('#')[0]
reqs_dict[package] = ('dev-' + sha)
continue
elif (len(req_parts) == 1):
continue
reqs_dict[req_parts[0]] = req_parts[1]
out_dict = {}
for package in ['qiskit_terra', 'qiskit_ignis', 'qiskit_aer', 'qiskit_ibmq_provider', 'qiskit_aqua']:
if (package in reqs_dict):
out_dict[package.replace('_', '-')] = reqs_dict[package]
for package in ['qiskit', 'qiskit-terra', 'qiskit-ignis', 'qiskit-aer', 'qiskit-ibmq-provider', 'qiskit-aqua']:
if (package in out_dict):
continue
if (package in reqs_dict):
out_dict[package] = reqs_dict[package]
else:
out_dict[package] = None
return out_dict |
_tf
def resnet152_v2_imagenet(tile_px, **kwargs):
return TensorflowImagenetLayerExtractor('resnet152_v2', tile_px, **kwargs) |
class _GatherShardDimWithReshuffleCheck(torch.autograd.Function):
def forward(ctx, input_, shard_dim, group=None, ranks=None):
ctx.group = group
ctx.ranks = ranks
ctx.shard_dim = shard_dim
return _gather_shard_dim_with_reshuffle_check(input_, shard_dim, group, ranks)
def backward(ctx, grad_output):
return (_split_shard_dim_with_reshuffle_check(grad_output, ctx.shard_dim, ctx.group, ctx.ranks), None, None, None) |
def train(args):
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if (args.dataset == 'Flythings3D'):
train_dataset = Flythings3D(npoints=args.npoints, root=args.root, train=True)
elif (args.dataset == 'Kitti'):
train_dataset = KittiSceneFlowDataset(args.root, args.npoints, True)
else:
raise 'Invalid dataset'
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=4, shuffle=True, pin_memory=True, drop_last=True)
net = FlowNet3D().cuda()
if args.use_wandb:
wandb.watch(net)
if (args.dataset == 'Flythings3D'):
net.apply(init_weights)
elif (args.dataset == 'Kitti'):
net.load_state_dict(torch.load(args.pretrain_model))
else:
raise 'Invalid dataset'
optimizer = optim.Adam(net.parameters(), lr=args.init_lr)
lr_scheduler = ClippedStepLR(optimizer, args.step_size_lr, args.min_lr, args.gamma_lr)
def update_bn_momentum(epoch):
for m in net.modules():
if (isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d)):
m.momentum = max((args.init_bn_momentum * (args.gamma_bn_momentum ** (epoch // args.step_size_bn_momentum))), args.min_bn_momentum)
best_train_loss = float('inf')
for epoch in range(args.epochs):
update_bn_momentum(epoch)
net.train()
count = 0
total_loss = 0
pbar = tqdm(enumerate(train_loader))
for (i, data) in pbar:
(points1, points2, features1, features2, flow, mask1) = data
points1 = points1.cuda(non_blocking=True)
points2 = points2.cuda(non_blocking=True)
features1 = features1.cuda(non_blocking=True)
features2 = features2.cuda(non_blocking=True)
flow = flow.cuda(non_blocking=True)
mask1 = mask1.cuda(non_blocking=True).float()
optimizer.zero_grad()
pred_flow = net(points1, points2, features1, features2)
loss = flow_criterion(pred_flow, flow, mask1)
loss.backward()
optimizer.step()
count += 1
total_loss += loss.item()
if ((i % 10) == 0):
pbar.set_description('Train Epoch:{}[{}/{}({:.0f}%)]\tLoss: {:.6f}'.format((epoch + 1), i, len(train_loader), ((100.0 * i) / len(train_loader)), loss.item()))
lr_scheduler.step()
total_loss = (total_loss / count)
if args.use_wandb:
wandb.log({'loss': total_loss})
print('Epoch ', (epoch + 1), 'finished ', 'loss = ', total_loss)
if (total_loss < best_train_loss):
torch.save(net.state_dict(), (args.save_dir + 'best_train.pth'))
best_train_loss = total_loss
print('Best train loss: {:.4f}'.format(best_train_loss)) |
def run():
test_acc_results = []
for task_id in range(1, (20 + 1)):
print('-*_*_*_*_*_*_*_*_ Task', task_id)
if use_10k:
(train_data, test_data, vocab) = load_data('./data/tasks_1-20_v1-2/en-10k', 0, task_id)
else:
(train_data, test_data, vocab) = load_data('./data/tasks_1-20_v1-2/en', 0, task_id)
data = (train_data + test_data)
print('sample', train_data[0])
w2i = dict(((w, i) for (i, w) in enumerate(vocab, 1)))
w2i[PAD] = 0
vocab_size = (len(vocab) + 1)
story_len = min(max_story_len, max((len(s) for (s, q, a) in data)))
s_sent_len = max((len(ss) for (s, q, a) in data for ss in s))
q_sent_len = max((len(q) for (s, q, a) in data))
print('train num', len(train_data))
print('test num', len(test_data))
print('vocab_size', vocab_size)
print('embd_size', embd_size)
print('story_len', story_len)
print('s_sent_len', s_sent_len)
print('q_sent_len', q_sent_len)
model = MemNN(vocab_size, embd_size, vocab_size, story_len)
if torch.cuda.is_available():
model.cuda()
optimizer = torch.optim.Adam(model.parameters())
loss_fn = nn.NLLLoss()
ds = ('10k' if use_10k else '1k')
model_filename = generate_model_filename(task_id, ds, n_epochs)
if (os.path.isfile(model_filename) and args.resume):
print("=> loading checkpoint '{}'".format(model_filename))
checkpoint = torch.load(model_filename)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(model_filename))
if (args.test != 1):
train(model, train_data, test_data, optimizer, loss_fn, w2i, task_id, batch_size, n_epochs)
save_checkpoint({'epoch': args.n_epochs, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, True, filename=model_filename)
print('Final Acc')
acc = test(model, test_data, w2i, batch_size, task_id)
test_acc_results.append(acc)
else:
acc = test(model, test_data, w2i, batch_size, task_id)
test_acc_results.append(acc)
for (i, acc) in enumerate(test_acc_results):
print('Task {}: Acc {:.2f}%'.format((i + 1), acc)) |
def fdmobilenet_wd2(**kwargs):
return get_mobilenet(version='fd', width_scale=0.5, model_name='fdmobilenet_wd2', **kwargs) |
def set_random_seed(seed):
np.random.seed(seed)
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False |
def _convert_output_type_range(img, dst_type):
if (dst_type not in (np.uint8, np.float32)):
raise TypeError(f'The dst_type should be np.float32 or np.uint8, but got {dst_type}')
if (dst_type == np.uint8):
img = img.round()
else:
img /= 255.0
return img.astype(dst_type) |
class SendStat(Callback):
def __init__(self, command, stats):
self.command = command
if (not isinstance(stats, list)):
stats = [stats]
self.stats = stats
def _trigger_epoch(self):
holder = self.trainer.stat_holder
v = {k: holder.get_stat_now(k) for k in self.stats}
cmd = self.command.format(**v)
ret = os.system(cmd)
if (ret != 0):
logger.error('Command {} failed with ret={}!'.format(cmd, ret)) |
def parse_args():
parser = argparse.ArgumentParser(description='Gather benchmarked models')
parser.add_argument('root', type=str, help='root path of benchmarked models to be gathered')
parser.add_argument('out', type=str, help='output path of gathered models to be stored')
parser.add_argument('--best', action='store_true', help='whether to gather the best model.')
args = parser.parse_args()
return args |
def _parse_main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('data_dir')
parser.add_argument('--no-solve1', action='store_true', dest='no_solve1')
parser.add_argument('--sexp', action='store_true', dest='sexp')
return parser.parse_args() |
def set_save_name_log_nvdm(args):
args.save_name = os.path.join(args.root_path, args.exp_path, 'Data{}_Dist{}_Model{}_Emb{}_Hid{}_lat{}_lr{}_drop{}_kappa{}_auxw{}_normf{}'.format(args.data_name, str(args.dist), args.model, args.emsize, args.nhid, args.lat_dim, args.lr, args.dropout, args.kappa, args.aux_weight, str(args.norm_func)))
writer = SummaryWriter(log_dir=args.save_name)
log_name = (args.save_name + '.log')
logging.basicConfig(filename=log_name, level=logging.INFO)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
return (args, writer) |
class ResBlock(PlainNetBasicBlockClass):
def __init__(self, block_list, in_channels=None, stride=None, no_create=False, **kwargs):
super(ResBlock, self).__init__(**kwargs)
self.block_list = block_list
self.stride = stride
self.no_create = no_create
if (not no_create):
self.module_list = nn.ModuleList(block_list)
if (in_channels is None):
self.in_channels = block_list[0].in_channels
else:
self.in_channels = in_channels
self.out_channels = block_list[(- 1)].out_channels
if (self.stride is None):
tmp_input_res = 1024
tmp_output_res = self.get_output_resolution(tmp_input_res)
self.stride = (tmp_input_res // tmp_output_res)
self.proj = None
if ((self.stride > 1) or (self.in_channels != self.out_channels)):
self.proj = nn.Sequential(nn.Conv2d(self.in_channels, self.out_channels, 1, self.stride), nn.BatchNorm2d(self.out_channels))
def forward(self, x):
if (len(self.block_list) == 0):
return x
output = x
for inner_block in self.block_list:
output = inner_block(output)
if (self.proj is not None):
output = (output + self.proj(x))
else:
output = (output + x)
return output
def __str__(self):
s = 'ResBlock({},{},'.format(self.in_channels, self.stride)
for inner_block in self.block_list:
s += str(inner_block)
s += ')'
return s
def __repr__(self):
s = 'ResBlock({}|{},{},'.format(self.block_name, self.in_channels, self.stride)
for inner_block in self.block_list:
s += str(inner_block)
s += ')'
return s
def get_output_resolution(self, input_resolution):
the_res = input_resolution
for the_block in self.block_list:
the_res = the_block.get_output_resolution(the_res)
return the_res
def get_FLOPs(self, input_resolution):
the_res = input_resolution
the_flops = 0
for the_block in self.block_list:
the_flops += the_block.get_FLOPs(the_res)
the_res = the_block.get_output_resolution(the_res)
if (self.proj is not None):
the_flops += (((self.in_channels * self.out_channels) * ((the_res / self.stride) ** 2)) + (((the_res / self.stride) ** 2) * self.out_channels))
return the_flops
def get_model_size(self):
the_size = 0
for the_block in self.block_list:
the_size += the_block.get_model_size()
if (self.proj is not None):
the_size += ((self.in_channels * self.out_channels) + self.out_channels)
return the_size
def set_in_channels(self, c):
self.in_channels = c
if (len(self.block_list) == 0):
self.out_channels = c
return
self.block_list[0].set_in_channels(c)
last_channels = self.block_list[0].out_channels
if ((len(self.block_list) >= 2) and (isinstance(self.block_list[0], ConvKX) or isinstance(self.block_list[0], ConvDW)) and isinstance(self.block_list[1], BN)):
self.block_list[1].set_in_channels(last_channels)
self.proj = None
if (not self.no_create):
if ((self.stride > 1) or (self.in_channels != self.out_channels)):
self.proj = nn.Sequential(nn.Conv2d(self.in_channels, self.out_channels, 1, self.stride), nn.BatchNorm2d(self.out_channels))
self.proj.train()
self.proj.requires_grad_(True)
def create_from_str(cls, s, no_create=False, **kwargs):
assert ResBlock.is_instance_from_str(s)
idx = _get_right_parentheses_index_(s)
assert (idx is not None)
the_stride = None
param_str = s[len('ResBlock('):idx]
tmp_idx = param_str.find('|')
if (tmp_idx < 0):
tmp_block_name = 'uuid{}'.format(uuid.uuid4().hex)
else:
tmp_block_name = param_str[0:tmp_idx]
param_str = param_str[(tmp_idx + 1):]
first_comma_index = param_str.find(',')
if ((first_comma_index < 0) or (not param_str[0:first_comma_index].isdigit())):
in_channels = None
(the_block_list, remaining_s) = _create_netblock_list_from_str_(param_str, no_create=no_create)
else:
in_channels = int(param_str[0:first_comma_index])
param_str = param_str[(first_comma_index + 1):]
second_comma_index = param_str.find(',')
if ((second_comma_index < 0) or (not param_str[0:second_comma_index].isdigit())):
(the_block_list, remaining_s) = _create_netblock_list_from_str_(param_str, no_create=no_create)
else:
the_stride = int(param_str[0:second_comma_index])
param_str = param_str[(second_comma_index + 1):]
(the_block_list, remaining_s) = _create_netblock_list_from_str_(param_str, no_create=no_create)
pass
pass
assert (len(remaining_s) == 0)
if ((the_block_list is None) or (len(the_block_list) == 0)):
return (None, s[(idx + 1):])
return (ResBlock(block_list=the_block_list, in_channels=in_channels, stride=the_stride, no_create=no_create, block_name=tmp_block_name), s[(idx + 1):]) |
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
valid_mask = ((labels >= 0) & (labels != ignore_index))
inds = torch.nonzero((valid_mask & (labels < label_channels)), as_tuple=False)
if (inds.numel() > 0):
bin_labels[(inds, labels[inds])] = 1
valid_mask = valid_mask.view((- 1), 1).expand(labels.size(0), label_channels).float()
if (label_weights is None):
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.view((- 1), 1).repeat(1, label_channels)
bin_label_weights *= valid_mask
return (bin_labels, bin_label_weights) |
class KnetD(nn.Module):
def __init__(self, inplanes, planes, dropout=0.0, norm='in', first=False):
super(KnetD, self).__init__()
self.first = first
self.maxpool = nn.MaxPool2d(2, 2)
self.dropout = dropout
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv3d(inplanes, planes, 3, 1, 1, bias=False)
self.bn1 = normalization(planes, norm)
self.conv2 = nn.Conv3d(planes, planes, 3, 1, 1, bias=False)
self.bn2 = normalization(planes, norm)
self.conv3 = nn.Conv3d(planes, planes, 3, 1, 1, bias=False)
self.bn3 = normalization(planes, norm)
def forward(self, x):
x = x.permute(0, 1, 4, 2, 3)
if (not self.first):
x = self.maxpool(x.squeeze(0))
x = x.unsqueeze(0)
x = x.permute(0, 1, 3, 4, 2)
x = self.bn1(self.conv1(x))
y = self.relu(self.bn2(self.conv2(x)))
if (self.dropout > 0):
y = F.dropout3d(y, self.dropout)
y = self.bn3(self.conv3(x))
return self.relu((x + y)) |
def binary_search_y1(x_minus, x_plus, y_minus, y_plus):
eps = 0.0001
y_lower = y_minus.data.clone()
y_upper = y_plus.data.clone()
y1 = ((y_lower + y_upper) / 2)
for i in range(10):
y1 = ((y_lower + y_upper) / 2)
g = estimate_gradient_upper(y1, eps, x_minus, x_plus, y_minus, y_plus)
idx = (g > 0)
y_upper[idx] = y1[idx]
idx = (1 - idx)
y_lower[idx] = y1[idx]
return ((y_upper + y_lower) / 2) |
def remove_comments(original: str) -> str:
lines = original.splitlines()
c_lines = [x for x in lines if ((not x.rstrip().startswith('#')) and (not (x.strip() == '')))]
code = '\n'.join(c_lines)
try:
root = ast.parse(code)
PassRemoveDocstring().remove_docstring(root)
modified = ast.fix_missing_locations(root)
code_cleaned = astunparse.unparse(root)
except:
return code
return code_cleaned |
class ContrastLoss(nn.Module):
def __init__(self, n_data):
super(ContrastLoss, self).__init__()
self.n_data = n_data
def forward(self, x):
bsz = x.shape[0]
m = (x.size(1) - 1)
Pn = (1 / float(self.n_data))
P_pos = x.select(1, 0)
P_pos[(P_pos == 0)] = eps
log_D1 = torch.div(P_pos, P_pos.add(((m * Pn) + eps))).log_()
P_neg = x.narrow(1, 1, m)
log_D0 = torch.div(P_neg.clone().fill_((m * Pn)), P_neg.add(((m * Pn) + eps))).log_()
loss = ((- (log_D1.sum(0) + log_D0.view((- 1), 1).sum(0))) / bsz)
return loss |
def register_dataset(datasets_root: Optional[os.PathLike]=None):
def empty_load_callback():
pass
video_list_fpath = maybe_prepend_base_path(datasets_root, 'chimpnsee/cdna.eva.mpg.de/video_list.txt')
video_base_path = maybe_prepend_base_path(datasets_root, 'chimpnsee/cdna.eva.mpg.de')
DatasetCatalog.register(CHIMPNSEE_DATASET_NAME, empty_load_callback)
MetadataCatalog.get(CHIMPNSEE_DATASET_NAME).set(dataset_type=DatasetType.VIDEO_LIST, video_list_fpath=video_list_fpath, video_base_path=video_base_path) |
def test_purge(remove: MagicMock) -> None:
glob_result = ['1.cache_record.json', '2.cache_record.json']
glob_mock = MagicMock(return_value=glob_result)
mock_cache_record = {'expires': '3000-01-01', 'filename': 'df_cache.parquet'}
mock_load_json = MagicMock(return_value=mock_cache_record)
with patch('glob.glob', glob_mock):
with patch('pybaseball.cache.file_utils.load_json', mock_load_json):
cache.purge()
assert glob_mock.called_once()
assert (mock_load_json.call_count == len(glob_result))
assert (remove.call_count == len(glob_result)) |
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {}'.format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if (not header):
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ((':' + str(len(str(len(iterable))))) + 'd')
log_msg = [header, (('[{0' + space_fmt) + '}/{1}]'), 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}']
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = (1024.0 * 1024.0)
for obj in iterable:
data_time.update((time.time() - end))
(yield obj)
iter_time.update((time.time() - end))
if (((i % print_freq) == 0) or (i == (len(iterable) - 1))):
eta_seconds = (iter_time.global_avg * (len(iterable) - i))
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=(torch.cuda.max_memory_allocated() / MB)))
else:
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(header, total_time_str, (total_time / len(iterable)))) |
def decode_png(input: torch.Tensor, mode: ImageReadMode=ImageReadMode.UNCHANGED) -> torch.Tensor:
output = torch.ops.image.decode_png(input, mode.value)
return output |
def main(args):
set_logging(args.log_dir)
logger.setLevel(logging.INFO)
logger.info(f'Parameters: {args}')
logger.info('Reading data...')
with open(os.path.join(args.save_loc, f'train.json'), 'r', encoding='utf-8') as f:
train_data = json.load(f)
with open(os.path.join(args.save_loc, f'valid.json'), 'r', encoding='utf-8') as f:
valid_data = json.load(f)
with open(os.path.join(args.save_loc, f'test.json'), 'r', encoding='utf-8') as f:
test_data = json.load(f)
logger.info('Reading metadata...')
with open(os.path.join(args.save_loc, 'meta.json'), 'r', encoding='utf-8') as f:
meta = json.load(f)
logger.info('Getting new metadata')
max_length = 0
avg_length = 0
for data in [train_data, valid_data, test_data]:
for (k, v) in data.items():
l_sent = len(v['data']['text'])
avg_length += l_sent
if (l_sent > max_length):
max_length = l_sent
avg_length /= ((len(train_data) + len(valid_data)) + len(test_data))
meta['train_size'] = len(train_data)
meta['valid_size'] = len(valid_data)
meta['test_size '] = len(test_data)
meta['max_length'] = max_length
meta['avg_length'] = avg_length
meta['num_lf'] = len(meta['lf'])
meta['num_labels'] = ((2 * len(meta['entity_types'])) + 1)
t_lbs = list()
w_lbs = [[] for _ in range(meta['num_lf'])]
for (k, v) in train_data.items():
t_lbs.append(v['label'])
for (i, w_lb) in enumerate(np.asarray(v['weak_labels']).T):
w_lbs[i].append(w_lb.tolist())
for (k, v) in valid_data.items():
t_lbs.append(v['label'])
for (i, w_lb) in enumerate(np.asarray(v['weak_labels']).T):
w_lbs[i].append(w_lb.tolist())
for (k, v) in test_data.items():
t_lbs.append(v['label'])
for (i, w_lb) in enumerate(np.asarray(v['weak_labels']).T):
w_lbs[i].append(w_lb.tolist())
rec_src = list()
src_f1 = dict()
logger.info(f'Source performance (F1 score)')
for (i, src_name) in enumerate(meta['lf']):
f1 = metrics.f1_score(t_lbs, w_lbs[i], mode='strict', scheme=IOB2)
logger.info(f'{src_name}: {f1}')
if (f1 > 0.05):
rec_src.append(src_name)
meta['lf_f1'] = src_f1
logger.info(f'''The following sources are recommended for model evaluation:
{rec_src}''')
meta['lf_rec'] = rec_src
meta['num_lf_rec'] = len(rec_src)
logger.info('Saving updated meta...')
with open(os.path.join(args.save_loc, 'meta.json'), 'w', encoding='utf-8') as f:
json.dump(meta, f, ensure_ascii=False, indent=2)
logger.info('Updating dataset label formats')
for k in train_data:
lbs = train_data[k]['label']
wlbs = train_data[k]['weak_labels']
train_data[k]['label'] = span_dict_to_list(label_to_span(lbs))
train_data[k]['weak_labels'] = [span_dict_to_list(label_to_span(wlb)) for wlb in np.array(wlbs).T]
with open(os.path.join(args.save_loc, 'train.json'), 'w', encoding='utf-8') as f:
f.write(prettify_json(json.dumps(train_data, ensure_ascii=False, indent=2)))
for k in valid_data:
lbs = valid_data[k]['label']
wlbs = valid_data[k]['weak_labels']
valid_data[k]['label'] = span_dict_to_list(label_to_span(lbs))
valid_data[k]['weak_labels'] = [span_dict_to_list(label_to_span(wlb)) for wlb in np.array(wlbs).T]
with open(os.path.join(args.save_loc, 'valid.json'), 'w', encoding='utf-8') as f:
f.write(prettify_json(json.dumps(valid_data, ensure_ascii=False, indent=2)))
for k in test_data:
lbs = test_data[k]['label']
wlbs = test_data[k]['weak_labels']
test_data[k]['label'] = span_dict_to_list(label_to_span(lbs))
test_data[k]['weak_labels'] = [span_dict_to_list(label_to_span(wlb)) for wlb in np.array(wlbs).T]
with open(os.path.join(args.save_loc, 'test.json'), 'w', encoding='utf-8') as f:
f.write(prettify_json(json.dumps(test_data, ensure_ascii=False, indent=2)))
logger.info('Program successfully finished') |
def torch_available():
try:
import torch
import torch.utils.dlpack
except ImportError:
return False
return True |
class CosineAnnealingScheduler(Callback):
def __init__(self, T_max, eta_max, eta_min=0, verbose=0, epoch_start=80, restart_epochs=None, gamma=1, expansion=1, flat_end=False):
super(CosineAnnealingScheduler, self).__init__()
self.epoch_start = epoch_start
self.expansion = expansion
self.T_max = T_max
self.eta_max = eta_max
self.eta_min = eta_min
self.verbose = verbose
self.restart_epochs = restart_epochs
self.gamma = gamma
self.flat_end = flat_end
def on_epoch_begin(self, epoch, logs=None):
if (not hasattr(self.model.optimizer, 'learning_rate')):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
if (epoch > (self.epoch_start - 1)):
if (self.restart_epochs is None):
learning_rate = (self.eta_min + ((((self.eta_max * self.gamma) - self.eta_min) * (1 + math.cos(((math.pi * (epoch - self.epoch_start)) / self.T_max)))) / 2))
K.set_value(self.model.optimizer.learning_rate, learning_rate)
else:
learning_rate = (self.eta_min + ((((self.eta_max * self.gamma) - self.eta_min) * (1 + math.cos(((math.pi * ((epoch % (self.restart_epochs + self.epoch_start)) - self.epoch_start)) / self.T_max)))) / 2))
K.set_value(self.model.optimizer.learning_rate, learning_rate)
if (learning_rate <= self.eta_min):
self.eta_max *= self.gamma
self.T_max *= self.expansion
if (self.flat_end and (epoch >= ((self.epoch_start - 1) + T_max))):
learning_rate = self.eta_min
else:
learning_rate = self.model.optimizer.learning_rate
if (self.verbose > 0):
print(('\nEpoch %05d: CosineAnnealingScheduler setting learning rate to %s.' % ((epoch + 1), learning_rate)))
def on_epoch_end(self, epoch, logs=None):
logs = (logs or {})
logs['learning_rate'] = K.get_value(self.model.optimizer.learning_rate) |
(argument('id', help='id of instance to start/restart', type=int), usage='vast.py start instance <id> [--raw]', help='Start a stopped instance')
def start__instance(args):
url = apiurl(args, '/instances/{id}/'.format(id=args.id))
r = requests.put(url, json={'state': 'running'})
r.raise_for_status()
if (r.status_code == 200):
rj = r.json()
if rj['success']:
print('starting instance {args.id}.'.format(**locals()))
else:
print(rj['msg'])
else:
print(r.text)
print('failed with error {r.status_code}'.format(**locals())) |
def SEResNet18(input_shape=None, input_tensor=None, weights=None, classes=1000, stride_size=2, init_filters=64, include_top=False, repetitions=(2, 2, 2, 2), **kwargs):
return ResNet(MODELS_PARAMS['seresnet18'], input_shape=input_shape, input_tensor=input_tensor, include_top=include_top, classes=classes, stride_size=stride_size, init_filters=init_filters, weights=weights, repetitions=repetitions, **kwargs) |
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--seed', type=int, help='seed', default=1)
parser.add_argument('--data-file', type=str, default='_output/data.pkl')
parser.add_argument('--out-file', type=str, default='_output/out.csv')
parser.add_argument('--data-classes', type=int, default=0)
parser.add_argument('--scratch', type=str, default='_output/scratch')
args = parser.parse_args()
return args |
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs):
kmer_size = 4
features_tra_cdr3 = Input(shape=input_shape_tra_cdr3)
features_tra_vgene = Input(shape=input_shape_tra_vgene)
features_tra_jgene = Input(shape=input_shape_tra_jgene)
features_trb_cdr3 = Input(shape=input_shape_trb_cdr3)
features_trb_vgene = Input(shape=input_shape_trb_vgene)
features_trb_jgene = Input(shape=input_shape_trb_jgene)
weights = Input(shape=[])
features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3)
features_tra_length = Length()(features_tra_mask)
logits_tra_cdr3 = Conv1D(8, kmer_size)(features_tra_cdr3)
logits_tra_cdr3 = Conv1D(num_outputs, kmer_size)(logits_tra_cdr3)
logits_tra_cdr3_mask = MaskCopy(trim_front=((2 * kmer_size) - 2))([logits_tra_cdr3, features_tra_mask])
logits_tra_cdr3_pool = GlobalPoolWithMask()(logits_tra_cdr3_mask)
logits_tra_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_cdr3_pool, weights])
logits_tra_length = Dense(num_outputs)(features_tra_length)
logits_tra_length_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_length, weights])
logits_tra_vgene = Dense(num_outputs)(features_tra_vgene)
logits_tra_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_vgene, weights])
logits_tra_jgene = Dense(num_outputs)(features_tra_jgene)
logits_tra_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_jgene, weights])
features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3)
features_trb_length = Length()(features_trb_mask)
logits_trb_cdr3 = Conv1D(8, kmer_size)(features_trb_cdr3)
logits_trb_cdr3 = Conv1D(num_outputs, kmer_size)(logits_trb_cdr3)
logits_trb_cdr3_mask = MaskCopy(trim_front=((2 * kmer_size) - 2))([logits_trb_cdr3, features_tra_mask])
logits_trb_cdr3_pool = GlobalPoolWithMask()(logits_trb_cdr3_mask)
logits_trb_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_cdr3_pool, weights])
logits_trb_length = Dense(num_outputs)(features_trb_length)
logits_trb_length_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_length, weights])
logits_trb_vgene = Dense(num_outputs)(features_trb_vgene)
logits_trb_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_vgene, weights])
logits_trb_jgene = Dense(num_outputs)(features_trb_jgene)
logits_trb_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_jgene, weights])
logits = Add()([logits_tra_cdr3_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_cdr3_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm])
logits_norm = NormalizeInitialization(epsilon=0.0)([logits, weights])
model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene, weights], outputs=logits_norm)
return model |
def check_box_8c_format(input_data):
if isinstance(input_data, np.ndarray):
if (input_data.ndim == 3):
if (input_data.shape[1:] != (3, 8)):
raise TypeError('Given input does not have valid number of attributes. Should be N x 3 x 8 for box_8c.')
elif (input_data.ndim == 2):
if (input_data.shape != (3, 8)):
raise TypeError('Given input does not have valid number of attributes. Should be 3 x 8 for box_8c.')
elif isinstance(input_data, tf.Tensor):
if isinstance(input_data, tf.Tensor):
if (input_data.shape[1:] != (3, 8)):
raise TypeError('Given input does not have valid number of attributes. Should be N x 3 x 8 for box_8c.')
else:
raise TypeError('Given input is not of valid types.(i.e. np.ndarray or tf.Tensor)') |
class LayoutLMv2FeatureExtractor(LayoutLMv2ImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use LayoutLMv2ImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs) |
def open_tsv(fname, folder):
print(('Opening %s Data File...' % fname))
df = pd.read_csv(fname, sep='\t', names=['caption', 'url'], usecols=range(1, 2))
df['folder'] = folder
print('Processing', len(df), ' Images:')
return df |
class BasicBlock(nn.Module):
def __init__(self, norm, in_channels):
super(BasicBlock, self).__init__()
self.norm = norm_layer(norm, in_channels)
self.dropout = SharedDropout()
def forward(self, x, edge_index, dropout_mask=None, edge_emb=None):
out = self.norm(x)
out = F.relu(out)
if isinstance(self.dropout, SharedDropout):
if (dropout_mask is not None):
self.dropout.set_mask(dropout_mask)
out = self.dropout(out)
if (edge_emb is not None):
out = self.gcn(out, edge_index, edge_emb)
else:
out = self.gcn(out, edge_index)
return out |
_module()
class GaussianFocalLoss(nn.Module):
def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0):
super(GaussianFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None):
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
loss_reg = (self.loss_weight * gaussian_focal_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, reduction=reduction, avg_factor=avg_factor))
return loss_reg |
def test_reference_split_handles_repeated_fields():
ref_line = u'[20] A. Buchel, Finite temperature resolution of the Klebanov-Tseytlin singularity, Nucl. Phys. B 600, 219 (2001) [hep-th/0011146]. A. Buchel, C. P. Herzog, I. R. Klebanov, L. A. Pando Zayas and A. A. Tseytlin, Nonextremal gravity duals for fractional D-3 branes on the conifold, JHEP 0104 (2001) 033 [hep-th/0102105].'
res = get_references(ref_line)
references = res[0]
assert (references == [{'author': [u'A. Buchel'], 'journal_page': [u'219'], 'journal_reference': [u'Nucl. Phys. B 600 (2001) 219'], 'journal_title': [u'Nucl. Phys. B'], 'journal_volume': [u'600'], 'journal_year': [u'2001'], 'linemarker': [u'20'], 'raw_ref': [u'[20] A. Buchel, Finite temperature resolution of the Klebanov-Tseytlin singularity, Nucl. Phys. B 600, 219 (2001) [hep-th/0011146]. A. Buchel, C. P. Herzog, I. R. Klebanov, L. A. Pando Zayas and A. A. Tseytlin, Nonextremal gravity duals for fractional D-3 branes on the conifold, JHEP 0104 (2001) 033 [hep-th/0102105].'], 'reportnumber': [u'hep-th/0011146'], 'title': [u'Finite temperature resolution of the Klebanov-Tseytlin singularity'], 'year': [u'2001']}, {'author': [u'A. Buchel, C. P. Herzog, I. R. Klebanov, L. A. Pando Zayas and A. A. Tseytlin'], 'journal_page': [u'033'], 'journal_reference': [u'J. High Energy Phys. 0104 (2001) 033'], 'journal_title': [u'J. High Energy Phys.'], 'journal_volume': [u'0104'], 'journal_year': [u'2001'], 'linemarker': [u'20'], 'raw_ref': [u'[20] A. Buchel, Finite temperature resolution of the Klebanov-Tseytlin singularity, Nucl. Phys. B 600, 219 (2001) [hep-th/0011146]. A. Buchel, C. P. Herzog, I. R. Klebanov, L. A. Pando Zayas and A. A. Tseytlin, Nonextremal gravity duals for fractional D-3 branes on the conifold, JHEP 0104 (2001) 033 [hep-th/0102105].'], 'reportnumber': [u'hep-th/0102105'], 'title': [u'Nonextremal gravity duals for fractional D-3 branes on the conifold'], 'year': [u'2001']}]) |
class TestEmbeddings(unittest.TestCase):
def setUp(self):
self.emb_size = 10
self.vocab_size = 11
self.pad_idx = 1
seed = 42
torch.manual_seed(seed)
def test_size(self):
emb = Embeddings(embedding_dim=self.emb_size, vocab_size=self.vocab_size, padding_idx=self.pad_idx)
self.assertEqual(emb.lut.weight.shape, torch.Size([self.vocab_size, self.emb_size]))
def test_pad_zeros(self):
emb = Embeddings(embedding_dim=self.emb_size, vocab_size=self.vocab_size, padding_idx=self.pad_idx)
torch.testing.assert_close(emb.lut.weight[self.pad_idx], torch.zeros([self.emb_size]))
def test_freeze(self):
encoder = Embeddings(embedding_dim=self.emb_size, vocab_size=self.vocab_size, padding_idx=self.pad_idx, freeze=True)
for (_, p) in encoder.named_parameters():
self.assertFalse(p.requires_grad)
def test_forward(self):
weights = self._get_random_embedding_weights()
emb = Embeddings(embedding_dim=self.emb_size, vocab_size=self.vocab_size, padding_idx=self.pad_idx)
emb.lut.weight.data = weights
indices = torch.Tensor([0, 1, self.pad_idx, 9]).long()
embedded = emb.forward(x=indices)
torch.testing.assert_close(embedded, torch.index_select(input=weights, index=indices, dim=0))
torch.testing.assert_close(embedded[2], torch.zeros([self.emb_size]))
def test_scale(self):
weights = self._get_random_embedding_weights()
emb = Embeddings(embedding_dim=self.emb_size, vocab_size=self.vocab_size, padding_idx=self.pad_idx, scale=True)
emb.lut.weight.data = weights
indices = torch.Tensor([0, 1, self.pad_idx, 9]).long()
embedded = emb.forward(x=indices)
expected = torch.index_select(input=weights, index=indices, dim=0)
torch.testing.assert_close(embedded, (expected * (self.emb_size ** 0.5)), rtol=0.0001, atol=0.0001)
def _get_random_embedding_weights(self):
weights = torch.rand([self.vocab_size, self.emb_size])
weights[self.pad_idx] = torch.zeros([self.emb_size])
return weights |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.