code stringlengths 101 5.91M |
|---|
class CacheStats():
def __init__(self):
self.num_queries: Dict[(str, int)] = defaultdict(int)
self.num_computes: Dict[(str, int)] = defaultdict(int)
self.lock = threading.Lock()
def reset(self):
with self.lock:
self.num_queries.clear()
self.num_computes.clear()
def increment_query(self, path: str):
with self.lock:
self.num_queries[path] += 1
def increment_compute(self, path: str):
with self.lock:
self.num_computes[path] += 1
(None)
def print_status(self):
with self.lock:
for path in self.num_queries:
hlog(f'{path}: {self.num_queries[path]} queries, {self.num_computes[path]} computes') |
def should_build_for_install_command(req, check_binary_allowed):
return _should_build(req, need_wheel=False, check_binary_allowed=check_binary_allowed) |
class GCSObject(ObjectStoreObject):
def full_path(self):
return os.path.join(f'gs://{self.bucket}', self.key) |
_module()
class FusedSemanticHead(nn.Module):
def __init__(self, num_ins, fusion_level, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=183, ignore_label=255, loss_weight=0.2, conv_cfg=None, norm_cfg=None):
super(FusedSemanticHead, self).__init__()
self.num_ins = num_ins
self.fusion_level = fusion_level
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.ignore_label = ignore_label
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(ConvModule(self.in_channels, self.in_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = (self.in_channels if (i == 0) else conv_out_channels)
self.convs.append(ConvModule(in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(conv_out_channels, conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label)
def init_weights(self):
kaiming_init(self.conv_logits)
_fp16()
def forward(self, feats):
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[(- 2):])
for (i, feat) in enumerate(feats):
if (i != self.fusion_level):
feat = F.interpolate(feat, size=fused_size, mode='bilinear', align_corners=True)
x += self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return (mask_pred, x)
_fp32(apply_to=('mask_pred',))
def loss(self, mask_pred, labels):
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
loss_semantic_seg *= self.loss_weight
return loss_semantic_seg |
def may_build_model_ema(cfg, model):
if (not cfg.MODEL_EMA.ENABLED):
return
model = _remove_ddp(model)
assert (not hasattr(model, 'ema_state')), 'Name `ema_state` is reserved for model ema.'
model.ema_state = EMAState()
logger.info('Using Model EMA.') |
def bin_op_out_template(backend: Type[Backend], a: Union[(Tensor[T], int, float, numpy.number)], b: Union[(Tensor[T], int, float, numpy.number)], *, name: str, copy_sparse_dim: bool=True, allow_broadcast_all_sources: Optional[bool]=None, dim_order: Optional[Sequence[Dim]]=None, allow_scalar: bool=True) -> Tuple[(Tensor[T], T, T)]:
src_dtype = None
src_device = None
if isinstance(a, Tensor):
src_dtype = a.dtype
src_device = a.device
elif isinstance(b, Tensor):
src_dtype = b.dtype
src_device = b.device
a = rf.convert_to_tensor(a, dtype=src_dtype, device=src_device, keep_scalar_on_cpu=allow_scalar, _backend=backend)
src_dtype = (src_dtype or a.dtype)
b = rf.convert_to_tensor(b, dtype=src_dtype, device=src_device, keep_scalar_on_cpu=allow_scalar, _backend=backend)
assert (a._raw_backend == b._raw_backend), 'Cannot combine tensors from two different frontends, e.g. TF and PT'
all_dims = []
for dim in (a.dims + b.dims):
if (dim in all_dims):
continue
if ((a.dims.count(dim) <= 1) and (b.dims.count(dim) <= 1)):
all_dims.append(dim)
continue
if (a.dims.count(dim) >= b.dims.count(dim)):
all_dims.extend([dim_ for dim_ in a.dims if (dim_ == dim)])
else:
all_dims.extend([dim_ for dim_ in b.dims if (dim_ == dim)])
if all(((set(x.dims) != set(all_dims)) for x in (a, b))):
if (allow_broadcast_all_sources is False):
raise ValueError(f'compare: sources {a!r} {b!r} not allowed with allow_broadcast_all_sources=False')
elif (allow_broadcast_all_sources is None):
raise ValueError(f'compare: sources {a!r} {b!r} require explicit allow_broadcast_all_sources=True')
elif (allow_broadcast_all_sources is True):
pass
else:
raise TypeError(f'invalid type for allow_broadcast_all_sources: {type(allow_broadcast_all_sources)}')
if dim_order:
all_dims.sort(key=(lambda d: (dim_order.index(d) if (d in dim_order) else len(dim_order))))
out = Tensor(name, dims=all_dims, dtype=src_dtype)
out.feature_dim = res_feature_dim(a, b)
if copy_sparse_dim:
out.sparse_dim = res_sparse_dim(a, b)
if ((not allow_scalar) or a.dims):
a_raw = a.copy_compatible_to_dims_raw(all_dims)
else:
a_raw = a.raw_tensor
if ((not allow_scalar) or b.dims):
b_raw = b.copy_compatible_to_dims_raw(all_dims)
else:
b_raw = b.raw_tensor
return (out, a_raw, b_raw) |
class AnomalibDataset(Dataset, ABC):
def __init__(self, task: TaskType, transform: A.Compose) -> None:
super().__init__()
self.task = task
self.transform = transform
self._samples: DataFrame
def __len__(self) -> int:
return len(self.samples)
def subsample(self, indices: Sequence[int], inplace: bool=False) -> AnomalibDataset:
assert (len(set(indices)) == len(indices)), 'No duplicates allowed in indices.'
dataset = (self if inplace else copy.deepcopy(self))
dataset.samples = self.samples.iloc[indices].reset_index(drop=True)
return dataset
def is_setup(self) -> bool:
return hasattr(self, '_samples')
def samples(self) -> DataFrame:
if (not self.is_setup):
raise RuntimeError('Dataset is not setup yet. Call setup() first.')
return self._samples
def samples(self, samples: DataFrame) -> None:
assert isinstance(samples, DataFrame), f'samples must be a pandas.DataFrame, found {type(samples)}'
expected_columns = _EXPECTED_COLUMNS_PERTASK[self.task]
assert all(((col in samples.columns) for col in expected_columns)), f'samples must have (at least) columns {expected_columns}, found {samples.columns}'
assert samples['image_path'].apply((lambda p: Path(p).exists())).all(), 'missing file path(s) in samples'
self._samples = samples.sort_values(by='image_path', ignore_index=True)
def has_normal(self) -> bool:
return (0 in list(self.samples.label_index))
def has_anomalous(self) -> bool:
return (1 in list(self.samples.label_index))
def __getitem__(self, index: int) -> dict[(str, (str | Tensor))]:
image_path = self._samples.iloc[index].image_path
mask_path = self._samples.iloc[index].mask_path
label_index = self._samples.iloc[index].label_index
image = read_image(image_path)
item = dict(image_path=image_path, label=label_index)
if (self.task == TaskType.CLASSIFICATION):
transformed = self.transform(image=image)
item['image'] = transformed['image']
elif (self.task in (TaskType.DETECTION, TaskType.SEGMENTATION)):
if (label_index == 0):
mask = np.zeros(shape=image.shape[:2])
else:
mask = (cv2.imread(mask_path, flags=0) / 255.0)
transformed = self.transform(image=image, mask=mask)
item['image'] = transformed['image']
item['mask_path'] = mask_path
item['mask'] = transformed['mask']
if (self.task == TaskType.DETECTION):
(boxes, _) = masks_to_boxes(item['mask'])
item['boxes'] = boxes[0]
else:
raise ValueError(f'Unknown task type: {self.task}')
return item
def __add__(self, other_dataset: AnomalibDataset) -> AnomalibDataset:
assert isinstance(other_dataset, self.__class__), 'Cannot concatenate datasets that are not of the same type.'
assert self.is_setup, 'Cannot concatenate uninitialized datasets. Call setup first.'
assert other_dataset.is_setup, 'Cannot concatenate uninitialized datasets. Call setup first.'
dataset = copy.deepcopy(self)
dataset.samples = pd.concat([self.samples, other_dataset.samples], ignore_index=True)
return dataset
def setup(self) -> None:
if (not self.is_setup):
self._setup()
assert self.is_setup, 'setup() should set self._samples'
def _setup(self) -> DataFrame:
raise NotImplementedError
def augment_train_set(self, dirpath: str, augmentation_transforms: A.Compose, number_transforms: int):
augmented_images_info = []
for index in range(len(self._samples)):
for num in range(number_transforms):
image_path = self._samples.iloc[index].image_path
mask_path = self._samples.iloc[index].mask_path
label = self._samples.iloc[index].label
label_index = self._samples.iloc[index].label_index
if (label_index != 0):
raise ValueError('Augmentations currently only supported on healthy train samples')
image = read_image(image_path)
transformed = augmentation_transforms(image=image)
augmented_image_path = (dirpath + f'/{Path(image_path).stem}_augmented_{num}.png')
cv2.imwrite(augmented_image_path, transformed['image'])
augmented_images_info.append(dict(image_path=str(augmented_image_path), mask_path=mask_path, label=label, label_index=label_index))
self._samples = pd.concat([self._samples, pd.DataFrame(augmented_images_info)], ignore_index=True) |
class HardSigmoidChannel(PiecewiseLinearChannel):
def __init__(self):
L = 2.5
neg = dict(zmin=(- np.inf), zmax=(- L), slope=0, x0=0)
mid = dict(zmin=(- L), zmax=(+ L), slope=(1 / (2 * L)), x0=0.5)
pos = dict(zmin=L, zmax=np.inf, slope=0, x0=1)
super().__init__(name='h-sigm', regions=[pos, mid, neg]) |
def is_args_coref(arg_i, arg_j, topic):
global non_coref_args_count, checked_args_count
cluster_i = topic.entity_mention_id_to_gold[arg_i]
cluster_j = topic.entity_mention_id_to_gold[arg_j]
checked_args_count += 1
if (cluster_i == cluster_j):
return True
else:
non_coref_args_count += 1
return False |
def adjust_learning_rate(optimizer, learning_rate, i_iter, max_iter, power):
lr = lr_poly(learning_rate, i_iter, max_iter, power)
optimizer.param_groups[0]['lr'] = lr
return lr |
_task('cross_lingual_lm')
class CrossLingualLMTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner')
parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample')
parser.add_argument('--monolingual-langs', default='en', type=str, help='comma separated list of languages for which we want to train XLM on')
parser.add_argument('--raw-text', default=False, action='store_true', help='load raw text dataset')
parser.add_argument('--lazy-load', action='store_true', help='load the dataset lazily')
parser.add_argument('--shuffle', action='store_true', help='shuffle each monolingual dataset while training')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
self.distributed_world_size = args.distributed_world_size
self.langs2id = self._lang_to_id(args.monolingual_langs)
def _lang_to_id(self, languages: str):
lang2id = {}
langs = [l.strip() for l in languages.split(',')]
for (id, lang) in enumerate(langs):
lang2id[lang] = id
return lang2id
def load_dictionary(cls, filename):
return MaskedLMDictionary.load(filename)
def build_dictionary(cls, filenames, workers=1, threshold=(- 1), nwords=(- 1), padding_factor=8):
d = MaskedLMDictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
def target_dictionary(self):
return self.dictionary
def setup_task(cls, args, **kwargs):
dictionary = MaskedLMDictionary.load(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def _load_single_lang_dataset(self, split, epoch):
loaded_datasets = []
paths = self.args.data.split(':')
assert (len(paths) > 0)
data_path = paths[(epoch % len(paths))]
for k in itertools.count():
split_k = (split + (str(k) if (k > 0) else ''))
path = os.path.join(data_path, split_k)
ds = data_utils.load_indexed_dataset(path, self.dictionary, self.args.dataset_impl)
if (ds is None):
if (k > 0):
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
loaded_datasets.append(TokenBlockDataset(ds, ds.sizes, (self.args.tokens_per_sample - 1), pad=self.dictionary.pad(), eos=self.dictionary.eos()))
print('| {} {} {} examples'.format(data_path, split_k, len(loaded_datasets[(- 1)])))
if (len(loaded_datasets) == 1):
dataset = loaded_datasets[0]
sizes = dataset.sizes
else:
dataset = ConcatDataset(loaded_datasets)
sizes = np.concatenate([ds.sizes for ds in loaded_datasets])
return (dataset, sizes)
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
dataset_map = OrderedDict()
for lang in self.langs2id.keys():
language_split = '{}.{}'.format(split, lang)
(block_dataset, sizes) = self._load_single_lang_dataset(split=language_split, epoch=epoch)
dataset_map[lang] = MaskedLMDataset(dataset=block_dataset, sizes=sizes, vocab=self.dictionary, pad_idx=self.dictionary.pad(), mask_idx=self.dictionary.mask(), classif_token_idx=self.dictionary.eos(), sep_token_idx=self.dictionary.eos(), shuffle=getattr(self.args, 'shuffle', False), has_pairs=False, segment_id=self.langs2id[lang], seed=self.seed)
self.datasets[split] = MultiCorpusSampledDataset(dataset_map)
print('| {} {} {} examples'.format(self.args.data.split(':')[epoch], split, len(self.datasets[split]))) |
def show_performance(distortion_name):
errs = []
for severity in range(1, 6):
distorted_dataset = dset.ImageFolder(root=((('imagenet2012_corrupted/' + distortion_name) + '/') + str(severity)), transform=trn.Compose([trn.CenterCrop(224), trn.ToTensor(), trn.Normalize(mean, std)]))
distorted_dataset_loader = torch.utils.data.DataLoader(distorted_dataset, batch_size=args.test_bs, shuffle=False, num_workers=args.prefetch, pin_memory=True)
correct = 0
for (batch_idx, (data, target)) in enumerate(distorted_dataset_loader):
with torch.no_grad():
data = data.cuda()
output = net(data)
pred = output.data.max(1)[1]
correct += pred.eq(target.cuda()).sum()
err = (1 - ((1.0 * correct) / len(distorted_dataset)))
errs.append(err.cpu().numpy())
print('\n=Average', tuple(errs))
return np.mean(errs) |
class FasterBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(FasterBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self._conv1 = nn.Conv2d(self.in_channels, self.out_channels, kernel_size=3, stride=self.stride, padding=1)
self._norm1 = nn.BatchNorm2d(self.out_channels)
self._activation = nn.ReLU(inplace=True)
self._conv2 = nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1, padding=1)
self._norm2 = nn.BatchNorm2d(self.out_channels)
if (self.stride > 1):
self._downsample = nn.Sequential(nn.Conv2d(self.in_channels, self.out_channels, kernel_size=3, stride=self.stride, padding=1), nn.BatchNorm2d(self.out_channels))
def forward(self, x):
identity = x
out = self._conv1(x)
out = self._norm1(out)
out = self._activation(out)
out = self._conv2(out)
out = self._norm2(out)
if (self._downsample is not None):
identity = self._downsample(x)
out += identity
out = self._activation(out)
return out |
def get_placeholder(name, dtype, shape):
if (name in _PLACEHOLDER_CACHE):
(out, dtype1, shape1) = _PLACEHOLDER_CACHE[name]
assert ((dtype1 == dtype) and (shape1 == shape))
return out
else:
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out |
_utils.test(arch=ti.cuda)
def test_gpu_sparse_solver():
from scipy.sparse import coo_matrix
def init_b(b: ti.types.ndarray(), nrows: ti.i32):
for i in range(nrows):
b[i] = (1.0 + (i / nrows))
n = 10
A = np.random.rand(n, n)
A_psd = (np.dot(A, A.transpose()) + np.eye(n)).astype(np.float32)
A_raw_coo = coo_matrix(A_psd)
(nrows, ncols) = A_raw_coo.shape
nnz = A_raw_coo.nnz
A_csr = A_raw_coo.tocsr()
b = ti.ndarray(shape=nrows, dtype=ti.f32)
init_b(b, nrows)
A_coo = A_csr.tocoo()
A_builder = ti.linalg.SparseMatrixBuilder(num_rows=nrows, num_cols=ncols, dtype=ti.f32, max_num_triplets=nnz)
def fill(A_builder: ti.types.sparse_matrix_builder(), row_coo: ti.types.ndarray(), col_coo: ti.types.ndarray(), val_coo: ti.types.ndarray()):
for i in range(nnz):
A_builder[(row_coo[i], col_coo[i])] += val_coo[i]
fill(A_builder, A_coo.row, A_coo.col, A_coo.data)
A_ti = A_builder.build()
x_ti = ti.ndarray(shape=ncols, dtype=ti.float32)
b_np = b.to_numpy()
x_np = np.linalg.solve(A_psd, b_np)
solver = ti.linalg.SparseSolver(dtype=ti.f32)
solver.analyze_pattern(A_ti)
solver.factorize(A_ti)
x_ti = solver.solve(b)
ti.sync()
assert np.allclose(x_ti.to_numpy(), x_np, rtol=0.005)
solver = ti.linalg.SparseSolver(dtype=ti.f32)
solver.compute(A_ti)
x_cti = solver.solve(b)
ti.sync()
assert np.allclose(x_cti.to_numpy(), x_np, rtol=0.005) |
class TransformerEncoder(nn.Module):
def __init__(self, args):
super().__init__()
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.pos_conv = nn.Conv1d(self.embedding_dim, self.embedding_dim, kernel_size=args.conv_pos, padding=(args.conv_pos // 2), groups=args.conv_pos_groups)
dropout = 0
std = math.sqrt(((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim)))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name='weight', dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
self.layers = nn.ModuleList([TransformerSentenceEncoderLayer(embedding_dim=self.embedding_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=self.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_fn=args.activation_fn, layer_norm_first=args.layer_norm_first) for _ in range(args.encoder_layers)])
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.apply(init_bert_params)
def forward(self, x, padding_mask=None, layer=None):
(x, layer_results) = self.extract_features(x, padding_mask, layer)
if (self.layer_norm_first and (layer is None)):
x = self.layer_norm(x)
return (x, layer_results)
def extract_features(self, x, padding_mask=None, tgt_layer=None):
if (padding_mask is not None):
x = index_put(x, padding_mask, 0)
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = (x + x_conv)
if (not self.layer_norm_first):
x = self.layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = x.transpose(0, 1)
layer_results = []
r = None
for (i, layer) in enumerate(self.layers):
dropout_probability = np.random.random()
if ((not self.training) or (dropout_probability > self.layerdrop)):
(x, z) = layer(x, self_attn_padding_mask=padding_mask, need_weights=False)
if (tgt_layer is not None):
layer_results.append((x, z))
if (i == tgt_layer):
r = x
break
if (r is not None):
x = r
x = x.transpose(0, 1)
return (x, layer_results)
def max_positions(self):
return self.args.max_positions
def upgrade_state_dict_named(self, state_dict, name):
return state_dict |
def get_relations_by_type(data_dir, relation_index_path):
with open(os.path.join(data_dir, 'raw.kb')) as f:
triples = list(f.readlines())
with open(os.path.join(data_dir, 'train.triples')) as f:
triples += list(f.readlines())
triples = list(set(triples))
query_answers = dict()
theta_1_to_M = 1.5
for triple_str in triples:
(e1, e2, r) = triple_str.strip().split('\t')
if (not (r in query_answers)):
query_answers[r] = dict()
if (not (e1 in query_answers[r])):
query_answers[r][e1] = set()
query_answers[r][e1].add(e2)
to_M_rels = set()
to_1_rels = set()
dev_rels = set()
with open(os.path.join(data_dir, 'dev.triples')) as f:
for line in f:
(e1, e2, r) = line.strip().split('\t')
dev_rels.add(r)
(relation2id, _) = load_index(relation_index_path)
num_rels = len(dev_rels)
print('{} relations in dev dataset in total'.format(num_rels))
for r in dev_rels:
ratio = np.mean([len(x) for x in query_answers[r].values()])
if (ratio > theta_1_to_M):
to_M_rels.add(relation2id[r])
else:
to_1_rels.add(relation2id[r])
num_to_M = (len(to_M_rels) + 0.0)
num_to_1 = (len(to_1_rels) + 0.0)
print('to-M relations: {}/{} ({})'.format(num_to_M, num_rels, (num_to_M / num_rels)))
print('to-1 relations: {}/{} ({})'.format(num_to_1, num_rels, (num_to_1 / num_rels)))
to_M_examples = []
to_1_examples = []
num_exps = 0
with open(os.path.join(data_dir, 'dev.triples')) as f:
for line in f:
num_exps += 1
(e1, e2, r) = line.strip().split('\t')
if (relation2id[r] in to_M_rels):
to_M_examples.append(line)
elif (relation2id[r] in to_1_rels):
to_1_examples.append(line)
num_to_M_exps = (len(to_M_examples) + 0.0)
num_to_1_exps = (len(to_1_examples) + 0.0)
to_M_ratio = (num_to_M_exps / num_exps)
to_1_ratio = (num_to_1_exps / num_exps)
print('to-M examples: {}/{} ({})'.format(num_to_M_exps, num_exps, to_M_ratio))
print('to-1 examples: {}/{} ({})'.format(num_to_1_exps, num_exps, to_1_ratio))
return (to_M_rels, to_1_rels, (to_M_ratio, to_1_ratio)) |
class RepVGGConvModule(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, activation='ReLU', padding_mode='zeros', deploy=False):
super(RepVGGConvModule, self).__init__()
assert ((activation is None) or isinstance(activation, str))
self.activation = activation
self.deploy = deploy
self.groups = groups
self.in_channels = in_channels
assert (kernel_size == 3)
assert (padding == 1)
padding_11 = (padding - (kernel_size // 2))
if self.activation:
self.act = act_layers(self.activation)
if deploy:
self.rbr_reparam = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=True, padding_mode=padding_mode)
else:
self.rbr_identity = (nn.BatchNorm2d(num_features=in_channels) if ((out_channels == in_channels) and (stride == 1)) else None)
self.rbr_dense = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False), nn.BatchNorm2d(num_features=out_channels))
self.rbr_1x1 = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=padding_11, groups=groups, bias=False), nn.BatchNorm2d(num_features=out_channels))
def forward(self, inputs):
if hasattr(self, 'rbr_reparam'):
return self.act(self.rbr_reparam(inputs))
if (self.rbr_identity is None):
id_out = 0
else:
id_out = self.rbr_identity(inputs)
return self.act(((self.rbr_dense(inputs) + self.rbr_1x1(inputs)) + id_out))
def get_equivalent_kernel_bias(self):
(kernel3x3, bias3x3) = self._fuse_bn_tensor(self.rbr_dense)
(kernel1x1, bias1x1) = self._fuse_bn_tensor(self.rbr_1x1)
(kernelid, biasid) = self._fuse_bn_tensor(self.rbr_identity)
return (((kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1)) + kernelid), ((bias3x3 + bias1x1) + biasid))
def _pad_1x1_to_3x3_tensor(self, kernel1x1):
if (kernel1x1 is None):
return 0
else:
return nn.functional.pad(kernel1x1, [1, 1, 1, 1])
def _fuse_bn_tensor(self, branch):
if (branch is None):
return (0, 0)
if isinstance(branch, nn.Sequential):
kernel = branch[0].weight
running_mean = branch[1].running_mean
running_var = branch[1].running_var
gamma = branch[1].weight
beta = branch[1].bias
eps = branch[1].eps
else:
assert isinstance(branch, nn.BatchNorm2d)
if (not hasattr(self, 'id_tensor')):
input_dim = (self.in_channels // self.groups)
kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32)
for i in range(self.in_channels):
kernel_value[(i, (i % input_dim), 1, 1)] = 1
self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)
kernel = self.id_tensor
running_mean = branch.running_mean
running_var = branch.running_var
gamma = branch.weight
beta = branch.bias
eps = branch.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape((- 1), 1, 1, 1)
return ((kernel * t), (beta - ((running_mean * gamma) / std)))
def repvgg_convert(self):
(kernel, bias) = self.get_equivalent_kernel_bias()
return (kernel.detach().cpu().numpy(), bias.detach().cpu().numpy()) |
def head_tail(sequence: S[T]) -> tuple[((T | type(NO_HEAD)), S[T])]:
if (len(sequence) == 0):
return (NO_HEAD, ())
else:
return (sequence[0], sequence[1:]) |
def evaluate_data(data: (T | Callable[([], T)])) -> T:
return (data() if callable(data) else data) |
def test_meanshift_all_orphans():
ms = MeanShift(bandwidth=0.1, seeds=[[(- 9), (- 9)], [(- 10), (- 10)]])
msg = 'No point was within bandwidth=0.1'
with pytest.raises(ValueError, match=msg):
ms.fit(X) |
def create_vocabulary_lookup_table(filename, default_value=None):
if (not gfile.Exists(filename)):
raise ValueError('File does not exist: {}'.format(filename))
with gfile.GFile(filename) as file:
vocab = list((line.strip('\n') for line in file))
vocab_size = len(vocab)
has_counts = (len(vocab[0].split('\t')) == 2)
if has_counts:
(vocab, counts) = zip(*[_.split('\t') for _ in vocab])
counts = [float(_) for _ in counts]
vocab = list(vocab)
else:
counts = [(- 1.0) for _ in vocab]
special_vocab = get_special_vocab(vocab_size)
vocab += list(special_vocab._fields)
vocab_size += len(special_vocab)
counts += [(- 1.0) for _ in list(special_vocab._fields)]
if (default_value is None):
default_value = special_vocab.UNK
tf.logging.info('Creating vocabulary lookup table of size %d', vocab_size)
vocab_tensor = tf.constant(vocab)
count_tensor = tf.constant(counts, dtype=tf.float32)
vocab_idx_tensor = tf.range(vocab_size, dtype=tf.int64)
id_to_vocab_init = tf.contrib.lookup.KeyValueTensorInitializer(vocab_idx_tensor, vocab_tensor, tf.int64, tf.string)
id_to_vocab_table = tf.contrib.lookup.HashTable(id_to_vocab_init, 'UNK')
vocab_to_id_init = tf.contrib.lookup.KeyValueTensorInitializer(vocab_tensor, vocab_idx_tensor, tf.string, tf.int64)
vocab_to_id_table = tf.contrib.lookup.HashTable(vocab_to_id_init, default_value)
word_to_count_init = tf.contrib.lookup.KeyValueTensorInitializer(vocab_tensor, count_tensor, tf.string, tf.float32)
word_to_count_table = tf.contrib.lookup.HashTable(word_to_count_init, (- 1))
return (vocab_to_id_table, id_to_vocab_table, word_to_count_table, vocab_size) |
class TestTensorBoardPytorchGraph(BaseTestCase):
def test_pytorch_graph(self):
dummy_input = (torch.zeros(1, 3),)
class myLinear(torch.nn.Module):
def __init__(self):
super(myLinear, self).__init__()
self.l = torch.nn.Linear(3, 5)
def forward(self, x):
return self.l(x)
with self.createSummaryWriter() as w:
w.add_graph(myLinear(), dummy_input)
(actual_proto, _) = graph(myLinear(), dummy_input)
expected_str = read_expected_content(self)
expected_proto = GraphDef()
text_format.Parse(expected_str, expected_proto)
self.assertEquals(len(expected_proto.node), len(actual_proto.node))
for i in range(len(expected_proto.node)):
expected_node = expected_proto.node[i]
actual_node = actual_proto.node[i]
self.assertEquals(expected_node.name, actual_node.name)
self.assertEquals(expected_node.op, actual_node.op)
self.assertEquals(expected_node.input, actual_node.input)
self.assertEquals(expected_node.device, actual_node.device)
self.assertEquals(sorted(expected_node.attr.keys()), sorted(actual_node.attr.keys()))
def test_mlp_graph(self):
dummy_input = (torch.zeros(2, 1, 28, 28),)
class myMLP(torch.nn.Module):
def __init__(self):
super(myMLP, self).__init__()
self.input_len = ((1 * 28) * 28)
self.fc1 = torch.nn.Linear(self.input_len, 1200)
self.fc2 = torch.nn.Linear(1200, 1200)
self.fc3 = torch.nn.Linear(1200, 10)
def forward(self, x, update_batch_stats=True):
h = torch.nn.functional.relu(self.fc1(x.view((- 1), self.input_len)))
h = self.fc2(h)
h = torch.nn.functional.relu(h)
h = self.fc3(h)
return h
with self.createSummaryWriter() as w:
w.add_graph(myMLP(), dummy_input)
def test_wrong_input_size(self):
with self.assertRaises(RuntimeError) as e_info:
dummy_input = torch.rand(1, 9)
model = torch.nn.Linear(3, 5)
with self.createSummaryWriter() as w:
w.add_graph(model, dummy_input)
def test_torchvision_smoke(self):
model_input_shapes = {'alexnet': (2, 3, 224, 224), 'resnet34': (2, 3, 224, 224), 'resnet152': (2, 3, 224, 224), 'densenet121': (2, 3, 224, 224), 'vgg16': (2, 3, 224, 224), 'vgg19': (2, 3, 224, 224), 'vgg16_bn': (2, 3, 224, 224), 'vgg19_bn': (2, 3, 224, 224), 'mobilenet_v2': (2, 3, 224, 224)}
for (model_name, input_shape) in model_input_shapes.items():
with self.createSummaryWriter() as w:
model = getattr(torchvision.models, model_name)()
w.add_graph(model, torch.zeros(input_shape)) |
class Representation_abstract(CombinatorialFreeModule):
def __init__(self, semigroup, base_ring, *args, **opts):
self._semigroup = semigroup
self._semigroup_algebra = semigroup.algebra(base_ring)
CombinatorialFreeModule.__init__(self, base_ring, *args, **opts)
def semigroup(self):
return self._semigroup
def semigroup_algebra(self):
return self._semigroup_algebra
_method
def side(self):
def invariant_module(self, S=None, **kwargs):
if (S is None):
S = self.semigroup()
side = kwargs.pop('side', self.side())
if (side == 'twosided'):
side = 'left'
return super().invariant_module(S, side=side, **kwargs)
def twisted_invariant_module(self, chi, G=None, **kwargs):
from sage.categories.groups import Groups
if (G is None):
G = self.semigroup()
elif (chi in Groups()):
(G, chi) = (chi, G)
side = kwargs.pop('side', self.side())
if (side == 'twosided'):
side = 'left'
return super().twisted_invariant_module(G, chi, side=side, **kwargs) |
def save_checkpoint(state, is_best, filedir, filepre, filename='_checkpoint.pth.tar'):
torch.save(state, os.path.join(filedir, (filepre + filename)))
if is_best:
shutil.copyfile(os.path.join(filedir, (filepre + filename)), os.path.join(filedir, 'model_best.pth.tar')) |
class SageDocTestRunner(doctest.DocTestRunner):
def __init__(self, *args, **kwds):
O = kwds.pop('outtmpfile', None)
self.msgfile = kwds.pop('msgfile', None)
self.options = kwds.pop('sage_options')
doctest.DocTestRunner.__init__(self, *args, **kwds)
self._fakeout = SageSpoofInOut(O)
if (self.msgfile is None):
self.msgfile = self._fakeout.real_stdout
self.history = []
self.references = []
self.setters = defaultdict(dict)
self.running_global_digest = hashlib.md5()
self.total_walltime_skips = 0
self.total_performed_tests = 0
self.total_walltime = 0
def _run(self, test, compileflags, out):
set_globals(test.globs)
failures = tries = walltime_skips = 0
quiet = False
original_optionflags = self.optionflags
(SUCCESS, FAILURE, BOOM) = range(3)
check = self._checker.check_output
for (examplenum, example) in enumerate(test.examples):
if failures:
if self.options.exitfirst:
break
quiet |= (self.optionflags & doctest.REPORT_ONLY_FIRST_FAILURE)
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= (~ optionflag)
if ((self.options.target_walltime != (- 1)) and (self.total_walltime >= self.options.target_walltime)):
walltime_skips += 1
self.optionflags |= doctest.SKIP
if (self.optionflags & doctest.SKIP):
continue
tries += 1
with OriginalSource(example):
print((('sage: ' + example.source[:(- 1)]) + (' ## line %s ##' % ((test.lineno + example.lineno) + 1))))
self._fakeout.getvalue()
if (not quiet):
self.report_start(out, test, example)
sys.stdout.flush()
sys.stderr.flush()
self.msgfile.flush()
filename = ('<doctest %s[%d]>' % (test.name, examplenum))
exception = None
def compiler(example):
code = compile(example.source, filename, 'single', compileflags, 1)
execcode = compile(example.source, filename, 'exec', compileflags, 1)
linenumbers1 = {lineno for (index, lineno) in findlinestarts(code)}
linenumbers2 = {lineno for (index, lineno) in findlinestarts(execcode)}
if (linenumbers1 != linenumbers2):
raise SyntaxError('doctest is not a single statement')
return code
if (not self.options.gc):
pass
elif (self.options.gc > 0):
if gc.isenabled():
gc.collect()
elif (self.options.gc < 0):
gc.disable()
try:
self.compile_and_execute(example, compiler, test.globs)
except SystemExit:
raise
except BaseException:
exception = sys.exc_info()
try:
sys.exc_clear()
except AttributeError:
pass
finally:
if (self.debugger is not None):
self.debugger.set_continue()
check_starttime = walltime()
got = self._fakeout.getvalue()
outcome = FAILURE
probed_tags = getattr(example, 'probed_tags', False)
if (exception is None):
if check(example.want, got, self.optionflags):
if (probed_tags and (probed_tags is not True)):
example.warnings.append(f"The tag '{unparse_optional_tags(probed_tags)}' may no longer be needed; these features are not present, but we ran the doctest anyway as requested by --probe, and it succeeded.")
outcome = SUCCESS
else:
exc_msg = traceback.format_exception_only(*exception[:2])[(- 1)]
if (example.exc_msg is not None):
exc_cls = exception[0]
exc_name = exc_cls.__name__
if exc_cls.__module__:
exc_fullname = ((exc_cls.__module__ + '.') + exc_cls.__qualname__)
else:
exc_fullname = exc_cls.__qualname__
if (example.exc_msg.startswith(exc_name) and exc_msg.startswith(exc_fullname)):
exc_msg = exc_msg.replace(exc_fullname, exc_name, 1)
if (not quiet):
got += doctest._exception_traceback(exception)
if (example.exc_msg is None):
outcome = BOOM
elif check(example.exc_msg, exc_msg, self.optionflags):
if (probed_tags and (probed_tags is not True)):
example.warnings.append(f"The tag '{unparse_optional_tags(example.probed_tags)}' may no longer be needed; these features are not present, but we ran the doctest anyway as requested by --probe, and it succeeded (raised the expected exception).")
outcome = SUCCESS
elif (self.optionflags & doctest.IGNORE_EXCEPTION_DETAIL):
m1 = re.match('(?:[^:]*\\.)?([^:]*:)', example.exc_msg)
m2 = re.match('(?:[^:]*\\.)?([^:]*:)', exc_msg)
if (m1 and m2 and check(m1.group(1), m2.group(1), self.optionflags)):
if (probed_tags and (probed_tags is not True)):
example.warnings.append(f"The tag '{unparse_optional_tags(example.probed_tags)}' may no longer be needed; these features are not present, but we ran the doctest anyway as requested by --probe, and it succeeded (raised an exception as expected).")
outcome = SUCCESS
check_duration = walltime(check_starttime)
self.total_walltime += (example.walltime + check_duration)
if example.warnings:
for warning in example.warnings:
out(self._failure_header(test, example, f'Warning: {warning}'))
if (outcome is SUCCESS):
if ((self.options.warn_long > 0) and ((example.walltime + check_duration) > self.options.warn_long)):
self.report_overtime(out, test, example, got, check_duration=check_duration)
elif example.warnings:
pass
elif (not quiet):
self.report_success(out, test, example, got, check_duration=check_duration)
elif probed_tags:
pass
elif (outcome is FAILURE):
if (not quiet):
self.report_failure(out, test, example, got, test.globs)
failures += 1
elif (outcome is BOOM):
if (not quiet):
self.report_unexpected_exception(out, test, example, exception)
failures += 1
else:
assert False, ('unknown outcome', outcome)
self.optionflags = original_optionflags
self._DocTestRunner__record_outcome(test, failures, tries)
self.total_walltime_skips += walltime_skips
self.total_performed_tests += tries
return TestResults(failures, tries)
def run(self, test, compileflags=0, out=None, clear_globs=True):
self.setters = defaultdict(dict)
randstate.set_random_seed(self.options.random_seed)
warnings.showwarning = showwarning_with_traceback
self.running_doctest_digest = hashlib.md5()
self.test = test
if self.options.debug:
self.debugger = doctest._OutputRedirectingPdb(sys.stdout)
self.debugger.reset()
else:
self.debugger = None
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self._DocTestRunner__patched_linecache_getlines
if (out is None):
def out(s):
self.msgfile.write(s)
self.msgfile.flush()
self._fakeout.start_spoofing()
self.no_failure_yet = True
try:
return self._run(test, compileflags, out)
finally:
self._fakeout.stop_spoofing()
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
def summarize(self, verbose=None):
if (verbose is None):
verbose = self._verbose
m = self.msgfile
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
(name, (f, t)) = x
assert (f <= t)
totalt += t
totalf += f
if (not t):
notests.append(name)
elif (not f):
passed.append((name, t))
else:
failed.append(x)
if verbose:
if notests:
print(count_noun(len(notests), 'item'), 'had no tests:', file=m)
notests.sort()
for thing in notests:
print((' %s' % thing), file=m)
if passed:
print(count_noun(len(passed), 'item'), 'passed all tests:', file=m)
passed.sort()
for (thing, count) in passed:
print((' %s in %s' % (count_noun(count, 'test', pad_number=3, pad_noun=True), thing)), file=m)
if failed:
print(self.DIVIDER, file=m)
print(count_noun(len(failed), 'item'), 'had failures:', file=m)
failed.sort()
for (thing, (f, t)) in failed:
print((' %3d of %3d in %s' % (f, t, thing)), file=m)
if verbose:
print((((count_noun(totalt, 'test') + ' in ') + count_noun(len(self._name2ft), 'item')) + '.'), file=m)
print(('%s passed and %s failed.' % ((totalt - totalf), totalf)), file=m)
if totalf:
print('***Test Failed***', file=m)
else:
print('Test passed.', file=m)
m.flush()
return doctest.TestResults(totalf, totalt)
def update_digests(self, example):
s = str_to_bytes(pre_hash(get_source(example)), 'utf-8')
self.running_global_digest.update(s)
self.running_doctest_digest.update(s)
if (example.predecessors is not None):
digest = hashlib.md5(s)
gen = (e.running_state for e in example.predecessors)
digest.update(str_to_bytes(reduce_hex(gen), 'ascii'))
example.running_state = digest.hexdigest()
def compile_and_execute(self, example, compiler, globs):
if isinstance(globs, RecordingDict):
globs.start()
example.sequence_number = len(self.history)
if (not hasattr(example, 'warnings')):
example.warnings = []
self.history.append(example)
timer = Timer().start()
try:
compiled = compiler(example)
timer.start()
exec(compiled, globs)
finally:
timer.stop().annotate(example)
if isinstance(globs, RecordingDict):
example.predecessors = []
for name in globs.got:
setters_dict = self.setters.get(name)
if setters_dict:
was_set = False
for (setter_optional_tags, setter) in setters_dict.items():
if setter_optional_tags.issubset(example.optional_tags):
was_set = True
example.predecessors.append(setter)
if (not was_set):
if example.probed_tags:
example.probed_tags = True
else:
f_setter_optional_tags = '; '.join(((("'" + unparse_optional_tags(setter_optional_tags)) + "'") for setter_optional_tags in setters_dict))
example.warnings.append(f"Variable '{name}' referenced here was set only in doctest marked {f_setter_optional_tags}")
for name in globs.set:
self.setters[name][example.optional_tags] = example
else:
example.predecessors = None
self.update_digests(example)
example.total_state = self.running_global_digest.hexdigest()
example.doctest_state = self.running_doctest_digest.hexdigest()
def _failure_header(self, test, example, message='Failed example:'):
out = [self.DIVIDER]
with OriginalSource(example):
if test.filename:
if ((test.lineno is not None) and (example.lineno is not None)):
lineno = ((test.lineno + example.lineno) + 1)
else:
lineno = '?'
out.append(('File "%s", line %s, in %s' % (test.filename, lineno, test.name)))
else:
out.append(('Line %s, in %s' % ((example.lineno + 1), test.name)))
out.append(message)
source = example.source
out.append(doctest._indent(source))
return '\n'.join(out)
def report_start(self, out, test, example):
with OriginalSource(example):
if self._verbose:
start_txt = (('Trying (line %s):' % ((test.lineno + example.lineno) + 1)) + doctest._indent(example.source))
if example.want:
start_txt += ('Expecting:\n' + doctest._indent(example.want))
else:
start_txt += 'Expecting nothing\n'
out(start_txt)
def report_success(self, out, test, example, got, *, check_duration=0):
if self._verbose:
out(('ok [%.2f s]\n' % (example.walltime + check_duration)))
def report_failure(self, out, test, example, got, globs):
if ((not self.options.initial) or self.no_failure_yet):
self.no_failure_yet = False
returnval = doctest.DocTestRunner.report_failure(self, out, test, example, got)
if self.options.debug:
self._fakeout.stop_spoofing()
restore_tcpgrp = None
try:
if os.isatty(0):
restore_tcpgrp = os.tcgetpgrp(0)
signal.signal(signal.SIGTTIN, signal.SIG_IGN)
signal.signal(signal.SIGTTOU, signal.SIG_IGN)
os.tcsetpgrp(0, os.getpgrp())
print(('*' * 70))
print('Previously executed commands:')
for ex in test.examples:
if (ex is example):
break
if hasattr(ex, 'sage_source'):
src = (' sage: ' + ex.sage_source)
else:
src = (' sage: ' + ex.source)
if (src[(- 1)] == '\n'):
src = src[:(- 1)]
src = src.replace('\n', '\n ....: ')
print(src)
if ex.want:
print(doctest._indent(ex.want[:(- 1)]))
from sage.repl.configuration import sage_ipython_config
from IPython.terminal.embed import InteractiveShellEmbed
cfg = sage_ipython_config.default()
shell = InteractiveShellEmbed(config=cfg, banner1='', user_ns=dict(globs))
shell(header='', stack_depth=2)
except KeyboardInterrupt:
if (not self.options.serial):
os.kill(os.getppid(), signal.SIGINT)
raise
finally:
if (restore_tcpgrp is not None):
os.tcsetpgrp(0, restore_tcpgrp)
signal.signal(signal.SIGTTIN, signal.SIG_DFL)
signal.signal(signal.SIGTTOU, signal.SIG_DFL)
print('Returning to doctests...')
self._fakeout.start_spoofing()
return returnval
def report_overtime(self, out, test, example, got, *, check_duration=0):
out((self._failure_header(test, example, 'Warning, slow doctest:') + ('Test ran for %.2f s, check ran for %.2f s\n' % (example.walltime, check_duration))))
def report_unexpected_exception(self, out, test, example, exc_info):
if ((not self.options.initial) or self.no_failure_yet):
self.no_failure_yet = False
returnval = doctest.DocTestRunner.report_unexpected_exception(self, out, test, example, exc_info)
if self.options.debug:
self._fakeout.stop_spoofing()
restore_tcpgrp = None
try:
if os.isatty(0):
restore_tcpgrp = os.tcgetpgrp(0)
signal.signal(signal.SIGTTIN, signal.SIG_IGN)
signal.signal(signal.SIGTTOU, signal.SIG_IGN)
os.tcsetpgrp(0, os.getpgrp())
(exc_type, exc_val, exc_tb) = exc_info
if (exc_tb is None):
raise RuntimeError('could not start the debugger for an unexpected exception, probably due to an unhandled error in a C extension module')
self.debugger.reset()
self.debugger.interaction(None, exc_tb)
except KeyboardInterrupt:
if (not self.options.serial):
os.kill(os.getppid(), signal.SIGINT)
raise
finally:
if (restore_tcpgrp is not None):
os.tcsetpgrp(0, restore_tcpgrp)
signal.signal(signal.SIGTTIN, signal.SIG_DFL)
signal.signal(signal.SIGTTOU, signal.SIG_DFL)
self._fakeout.start_spoofing()
return returnval
def update_results(self, D):
for key in ['cputime', 'walltime']:
if (key not in D):
D[key] = []
if hasattr(self, key):
D[key].append(self.__dict__[key])
D['tests'] = self.total_performed_tests
D['walltime_skips'] = self.total_walltime_skips
if hasattr(self, 'failures'):
D['failures'] = self.failures
return self.failures
else:
return False |
class DcxImageFile(PcxImageFile):
format = 'DCX'
format_description = 'Intel DCX'
_close_exclusive_fp_after_loading = False
def _open(self):
s = self.fp.read(4)
if (i32(s) != MAGIC):
raise SyntaxError('not a DCX file')
self._offset = []
for i in range(1024):
offset = i32(self.fp.read(4))
if (not offset):
break
self._offset.append(offset)
self.__fp = self.fp
self.frame = None
self.seek(0)
def n_frames(self):
return len(self._offset)
def is_animated(self):
return (len(self._offset) > 1)
def seek(self, frame):
if (not self._seek_check(frame)):
return
self.frame = frame
self.fp = self.__fp
self.fp.seek(self._offset[frame])
PcxImageFile._open(self)
def tell(self):
return self.frame
def _close__fp(self):
try:
if (self.__fp != self.fp):
self.__fp.close()
except AttributeError:
pass
finally:
self.__fp = None |
class MoE(torch.nn.Module):
def __init__(self, args: Arguments):
super(MoE, self).__init__()
self.router = router.LearnedRouter(args)
self.experts = ParallelMLP(args)
def forward(self, x):
x = common.cast_if_autocast_enabled(x)
(scores, expert_weights, top_experts) = self.router(x)
return self.experts(x, scores, expert_weights, top_experts) |
def add_graff_ms_train_args(parser):
parser.add_argument('--debug', default=False, action='store_true')
parser.add_argument('--debug-overfit', default=False, action='store_true')
parser.add_argument('--gpu', default=False, action='store_true')
parser.add_argument('--seed', default=42, action='store', type=int)
parser.add_argument('--num-workers', default=0, action='store', type=int)
parser.add_argument('--batch-size', default=128, action='store', type=int)
parser.add_argument('--max-epochs', default=100, action='store', type=int)
parser.add_argument('--min-epochs', default=0, action='store', type=int)
date = datetime.now().strftime('%Y_%m_%d')
parser.add_argument('--save-dir', default=f'results/{date}_gnn/')
parser.add_argument('--dataset-name', default='gnps2015_debug')
parser.add_argument('--dataset-labels', default='labels.tsv')
parser.add_argument('--split-name', default='split_22.tsv')
parser.add_argument('--learning-rate', default=4e-07, action='store', type=float)
parser.add_argument('--lr-decay-rate', default=1.0, action='store', type=float)
parser.add_argument('--weight-decay', default=0, action='store', type=float)
parser.add_argument('--num-bins', default=15000, action='store', type=int)
parser.add_argument('--layers', default=3, action='store', type=int)
parser.add_argument('--set-layers', default=2, action='store', type=int)
parser.add_argument('--pe-embed-k', default=0, action='store', type=int)
parser.add_argument('--pool-op', default='avg', action='store')
parser.add_argument('--mpnn-type', default='GGNN', action='store', choices=['GGNN', 'PNA', 'GINE'])
parser.add_argument('--loss-fn', default='cosine', action='store', choices=['mse', 'hurdle', 'cosine'])
parser.add_argument('--dropout', default=0.1, action='store', type=float)
parser.add_argument('--hidden-size', default=256, action='store', type=int)
parser.add_argument('--form-dir-name', default='magma_subform_50_with_raw', action='store')
parser.add_argument('--embed-adduct', default=False, action='store_true')
parser.add_argument('--num-fixed-forms', default=10000, action='store', type=int)
return parser |
def _average_with_log_weights(x, logweights):
x = np.asarray(x)
logweights = np.asarray(logweights)
maxlogw = logweights.max()
weights = np.exp((logweights - maxlogw))
return np.average(x, weights=weights) |
class LJspeechDataset(Dataset):
def __init__(self, data_root, train=True, test_size=0.05):
self.data_root = data_root
self.lengths = []
self.train = train
self.test_size = test_size
self.paths = [self.collect_files(0), self.collect_files(1)]
def __len__(self):
return len(self.paths[0])
def __getitem__(self, idx):
wav = np.load(self.paths[0][idx])
mel = np.load(self.paths[1][idx])
return (wav, mel)
def interest_indices(self, paths):
test_num_samples = int((self.test_size * len(paths)))
(train_indices, test_indices) = (range(0, (len(paths) - test_num_samples)), range((len(paths) - test_num_samples), len(paths)))
return (train_indices if self.train else test_indices)
def collect_files(self, col):
meta = os.path.join(self.data_root, 'train.txt')
with open(meta, 'rb') as f:
lines = f.readlines()
l = lines[0].decode('utf-8').split('|')
assert (len(l) == 4)
self.lengths = list(map((lambda l: int(l.decode('utf-8').split('|')[2])), lines))
paths = list(map((lambda l: l.decode('utf-8').split('|')[col]), lines))
paths = list(map((lambda f: os.path.join(self.data_root, f)), paths))
indices = self.interest_indices(paths)
paths = list(np.array(paths)[indices])
self.lengths = list(np.array(self.lengths)[indices])
self.lengths = list(map(int, self.lengths))
return paths |
class DistributedTimeoutWrapper(nn.Module):
def __init__(self, module: nn.Module, timeout: int, signal=signal.SIGINT):
super().__init__()
self.module = module
self.timeout = timeout
self.signal = signal
if (timeout > 0):
self._heartbeat = threading.Event()
self._heartbeat_thread = threading.Thread(target=self._check_heartbeat, args=(os.getpid(),), daemon=True)
self._heartbeat_thread.start()
self._terminated = False
else:
self._heartbeat = None
self._heartbeat_thread = None
def __del__(self):
self.stop_timeout()
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
def stop_timeout(self):
if (self._heartbeat_thread is not None):
self._terminated = True
self._heartbeat_thread.join()
def state_dict(self, *args, **kwargs):
return self.module.state_dict(*args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return self.module.load_state_dict(*args, **kwargs)
def forward(self, *args, **kwargs):
if (self._heartbeat is not None):
self._heartbeat.set()
return self.module(*args, **kwargs)
def _check_heartbeat(self, parent_pid):
self._heartbeat.wait()
while True:
self._heartbeat.clear()
success = self._heartbeat.wait(timeout=self.timeout)
if self._terminated:
break
elif (not success):
logger.error('Killing job for not making progress in {} seconds. Set --heartbeat-timeout=-1 to disable this timeout.'.format(int(self.timeout)))
os.kill(parent_pid, self.signal)
return |
def init(output_file, flags=None, output_mode='key_value'):
flags = (DEFAULT_FLAGS if (flags is None) else flags)
output_mode = cudaOutputMode.for_key(output_mode)
with tempfile.NamedTemporaryFile(delete=True) as f:
f.write(b'\n'.join(map((lambda f: f.encode('ascii')), flags)))
f.flush()
check_error(cudart().cudaProfilerInitialize(ctypes.c_char_p(f.name.encode('ascii')), ctypes.c_char_p(output_file.encode('ascii')), output_mode)) |
class Trainer(object):
def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None):
if isinstance(cfg, Namespace):
logger.warning('argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf')
cfg = convert_namespace_to_omegaconf(cfg)
self.cfg = cfg
self.task = task
shared_params = _catalog_shared_params(model)
self.tpu = cfg.common.tpu
self.cuda = (torch.cuda.is_available() and (not cfg.common.cpu) and (not self.tpu))
if self.cuda:
self.device = torch.device('cuda')
elif self.tpu:
self.device = utils.get_tpu_device()
else:
self.device = torch.device('cpu')
if self.is_fsdp:
import fairscale
if self.cfg.common.bf16:
raise ValueError('FullyShardedDataParallel is not compatible with --bf16 or --memory-efficient-bf16')
if (self.cfg.distributed_training.zero_sharding != 'none'):
raise ValueError("FullyShardedDataParallel is not compatible with --zero-sharding option (it's already built in)")
if ((max(self.cfg.optimization.update_freq) > 1) and (fairscale.__version__ < '0.4.0')):
raise RuntimeError('Please update to fairscale 0.4.0 or newer when combining --update-freq with FullyShardedDataParallel')
elif (hasattr(self.cfg.distributed_training, 'cpu_offload') and self.cfg.distributed_training.cpu_offload):
raise ValueError('--cpu-offload requires --ddp-backend=fully_sharded')
self._criterion = criterion
self._model = model
if (not self.is_fsdp):
if cfg.common.fp16:
assert (not cfg.common.amp), 'Cannot use fp16 and AMP together'
self._criterion = self._criterion.half()
self._model = self._model.half()
elif cfg.common.bf16:
self._criterion = self._criterion.to(dtype=torch.bfloat16)
self._model = self._model.to(dtype=torch.bfloat16)
elif cfg.common.amp:
self._amp_retries = 0
if ((not cfg.distributed_training.pipeline_model_parallel) and (not self.use_distributed_wrapper)):
self._criterion = self._criterion.to(device=self.device)
self._model = self._model.to(device=self.device)
self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel
self.last_device = None
if (self.cuda and self.pipeline_model_parallel):
self.last_device = torch.device(cfg.distributed_training.pipeline_devices[(- 1)])
for shared_param in shared_params:
ref = _get_module_by_path(self._model, shared_param[0])
for path in shared_param[1:]:
logger.info('detected shared parameter: {} <- {}'.format(shared_param[0], path))
_set_module_by_path(self._model, path, ref)
self._dummy_batch = None
self._lr_scheduler = None
self._num_updates = 0
self._num_xla_compiles = 0
self._optim_history = None
self._optimizer = None
self._warn_once = set()
self._wrapped_criterion = None
self._wrapped_model = None
self._ema = None
if (self.cuda and (self.data_parallel_world_size > 1)):
self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size)
else:
self._grad_norm_buf = None
self.quantizer = quantizer
if (self.quantizer is not None):
self.quantizer.set_trainer(self)
if self.cuda:
self.cuda_env = utils.CudaEnvironment()
if (self.data_parallel_world_size > 1):
self.cuda_env_arr = distributed_utils.all_gather_list(self.cuda_env, group=distributed_utils.get_global_group())
else:
self.cuda_env_arr = [self.cuda_env]
if (self.data_parallel_rank == 0):
utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr)
else:
self.cuda_env = None
self.cuda_env_arr = None
metrics.log_start_time('wall', priority=790, round=0)
self._start_time = time.time()
self._previous_training_time = 0
self._cumulative_training_time = None
def reinitialize(self):
self._lr_scheduler = None
self._optimizer = None
self._wrapped_criterion = None
self._wrapped_model = None
def data_parallel_world_size(self):
if (self.cfg.distributed_training.distributed_world_size == 1):
return 1
return distributed_utils.get_data_parallel_world_size()
def data_parallel_process_group(self):
return distributed_utils.get_data_parallel_group()
def data_parallel_rank(self):
if (self.cfg.distributed_training.distributed_world_size == 1):
return 0
return distributed_utils.get_data_parallel_rank()
def is_data_parallel_master(self):
return (self.data_parallel_rank == 0)
def use_distributed_wrapper(self) -> bool:
return (((self.data_parallel_world_size > 1) and (not self.cfg.optimization.use_bmuf)) or (self.is_fsdp and self.cfg.distributed_training.cpu_offload))
def should_save_checkpoint_on_current_rank(self) -> bool:
if ((self.is_fsdp and self.cfg.distributed_training.use_sharded_state) or (getattr(self.cfg.model, 'base_layers', 0) > 0)):
return True
else:
return self.is_data_parallel_master
def always_call_state_dict_during_save_checkpoint(self) -> bool:
if (self.is_fsdp and (not self.cfg.distributed_training.use_sharded_state)):
return True
else:
return False
def checkpoint_suffix(self) -> str:
if (self.is_fsdp and self.cfg.distributed_training.use_sharded_state):
return (self.cfg.checkpoint.checkpoint_suffix + '-shard{0}'.format(self.data_parallel_rank))
else:
return (self.cfg.checkpoint.checkpoint_suffix or '')
def criterion(self):
if (self._wrapped_criterion is None):
if (utils.has_parameters(self._criterion) and self.use_distributed_wrapper):
self._wrapped_criterion = models.DistributedFairseqModel(self.cfg.distributed_training, self._criterion, process_group=self.data_parallel_process_group, device=self.device)
else:
self._wrapped_criterion = self._criterion
return self._wrapped_criterion
def model(self):
if (self._wrapped_model is None):
if self.use_distributed_wrapper:
self._wrapped_model = models.DistributedFairseqModel(self.cfg.distributed_training, self._model, process_group=self.data_parallel_process_group, device=self.device)
else:
self._wrapped_model = self._model
return self._wrapped_model
def ema(self):
if (self._ema is None):
self._build_ema()
return self._ema
def _build_ema(self):
if self.cfg.ema.store_ema:
self._ema = build_ema(self._model, self.cfg.ema, self.device)
logger.info('Exponential Moving Average Shadow Model is initialized.')
def optimizer(self):
if (self._optimizer is None):
self._build_optimizer()
return self._optimizer
def lr_scheduler(self):
if (self._lr_scheduler is None):
self._build_optimizer()
return self._lr_scheduler
def _build_optimizer(self):
params = list(filter((lambda p: p.requires_grad), chain(self.model.parameters(), self.criterion.parameters())))
if (self.is_fsdp and self.cfg.common.fp16):
allow_unsupported = (not self.cfg.common.memory_efficient_fp16)
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(self.cfg, params, allow_unsupported=allow_unsupported)
elif (self.cfg.common.fp16 or self.cfg.common.bf16 or self.cfg.common.amp):
if (self.cuda and (torch.cuda.get_device_capability(0)[0] < 7)):
logger.info('NOTE: your device does NOT support faster training with --fp16 or --amp, please switch to FP32 which is likely to be faster')
if (self.cfg.common.memory_efficient_fp16 or self.cfg.common.memory_efficient_bf16):
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(self.cfg, params)
elif self.cfg.common.amp:
self._optimizer = optim.AMPOptimizer.build_optimizer(self.cfg, params)
else:
self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params)
else:
if (self.cuda and (torch.cuda.get_device_capability(0)[0] >= 7)):
logger.info('NOTE: your device may support faster training with --fp16 or --amp')
self._optimizer = optim.build_optimizer(self.cfg.optimizer, params)
if self.is_fsdp:
assert (not self.cfg.optimization.use_bmuf), '--ddp-backend=fully_sharded is not compatible with BMUF'
assert self._optimizer.supports_flat_params, '--ddp-backend=fully_sharded is only compatible with pointwise optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.). However, the sharding will result in slightly different results when using non-pointwise optimizers (e.g., Adagrad, Adafactor, LAMB)'
if self.cfg.optimization.use_bmuf:
self._optimizer = optim.FairseqBMUF(self.cfg.bmuf, self._optimizer)
if (self.cfg.distributed_training.zero_sharding == 'os'):
if ((self.cfg.common.fp16 and (not self.cfg.common.memory_efficient_fp16) and (not self.cfg.common.memory_efficient_bf16)) and (not self.cfg.common.fp16_no_flatten_grads)):
raise ValueError('ZeRO is incomptabile with fp16 and flattened grads. Please use --fp16-no-flatten-grads')
else:
optim.shard_(self._optimizer, self.data_parallel_process_group)
self._lr_scheduler = lr_scheduler.build_lr_scheduler(self.cfg.lr_scheduler, self.optimizer)
self._lr_scheduler.step_update(0)
def is_fsdp(self):
return (self.cfg.distributed_training.ddp_backend == 'fully_sharded')
def consolidate_optimizer(self):
if self.cfg.checkpoint.no_save_optimizer_state:
return
self._gathered_optim_state = None
if hasattr(self.optimizer.optimizer, 'consolidate_state_dict'):
self.optimizer.optimizer.consolidate_state_dict()
elif (self.is_fsdp and (not self.model.use_sharded_state)):
st = self.model.gather_full_optim_state_dict(self.optimizer)
self._gathered_optim_state = st
def state_dict(self):
state_dict = {'args': None, 'cfg': (OmegaConf.to_container(self.cfg, resolve=True, enum_to_str=True) if OmegaConf.is_config(self.cfg) else self.cfg), 'model': self.model.state_dict(), 'criterion': (self.criterion.state_dict() if utils.has_parameters(self.criterion) else None), 'optimizer_history': ((self._optim_history or []) + [{'criterion_name': self.get_criterion().__class__.__name__, 'optimizer_name': self.optimizer.__class__.__name__, 'lr_scheduler_state': self.lr_scheduler.state_dict(), 'num_updates': self.get_num_updates()}]), 'task_state': (self.task.state_dict() if (self.task is not None) else {}), 'extra_state': {'metrics': metrics.state_dict(), 'previous_training_time': self.cumulative_training_time()}}
if self.cfg.ema.store_ema:
state_dict['extra_state']['ema'] = self.ema.get_model().state_dict()
if self.cfg.ema.ema_fp32:
state_dict['extra_state']['ema_fp32_params'] = self.ema.fp32_params
if (not self.cfg.checkpoint.no_save_optimizer_state):
if (self._gathered_optim_state is not None):
state_dict['last_optimizer_state'] = self._gathered_optim_state
self._gathered_optim_state = None
else:
state_dict['last_optimizer_state'] = self.optimizer.state_dict()
if self.is_fsdp:
state_dict['fsdp_metadata'] = self.model.local_metadata_dict()
return state_dict
def save_checkpoint(self, filename, extra_state):
logger.info(f'Saving checkpoint to {filename}')
state_dict = utils.move_to_cpu(self.state_dict())
state_dict['extra_state'].update(extra_state)
if self.should_save_checkpoint_on_current_rank:
checkpoint_utils.torch_persistent_save(state_dict, filename, async_write=self.cfg.checkpoint.write_checkpoints_asynchronously)
logger.info(f'Finished saving checkpoint to {filename}')
def load_checkpoint(self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None, reset_meters=False):
(extra_state, self._optim_history, last_optim_state) = (None, [], None)
logger.info(f'Preparing to load checkpoint {filename}')
is_distributed = (self.data_parallel_world_size > 1)
bexists = PathManager.isfile(filename)
if bexists:
load_on_all_ranks = (self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks or self.tpu or (self.is_fsdp and self.cfg.distributed_training.use_sharded_state) or (getattr(self.cfg.model, 'base_layers', 0) > 0))
if (load_on_all_ranks or (self.data_parallel_rank == 0)):
state = checkpoint_utils.load_checkpoint_to_cpu(filename, load_on_all_ranks=load_on_all_ranks)
last_optim_state = state.get('last_optimizer_state', None)
if ((not load_on_all_ranks) and (self.cfg.distributed_training.zero_sharding == 'os') and ('last_optimizer_state' in state) and is_distributed):
state['last_optimizer_state'] = 'SHARDED'
else:
last_optim_state = None
state = None
if (is_distributed and (not load_on_all_ranks)):
state = distributed_utils.broadcast_object(state, src_rank=0, group=self.data_parallel_process_group, dist_device=self.device)
if (self.data_parallel_rank > 0):
last_optim_state = state.get('last_optimizer_state', None)
try:
self.model.load_state_dict(state['model'], strict=True, model_cfg=self.cfg.model)
del state['model']
if utils.has_parameters(self.get_criterion()):
self.get_criterion().load_state_dict(state['criterion'], strict=True)
del state['criterion']
except Exception:
raise Exception('Cannot load model parameters from checkpoint {}; please ensure that the architectures match.'.format(filename))
extra_state = state['extra_state']
self._optim_history = state['optimizer_history']
if ((last_optim_state is not None) and (not reset_optimizer)):
self._build_optimizer()
last_optim = self._optim_history[(- 1)]
assert (last_optim['criterion_name'] == self.get_criterion().__class__.__name__), f"Criterion does not match; please reset the optimizer (--reset-optimizer). {last_optim['criterion_name']} vs {self.get_criterion().__class__.__name__}"
assert (last_optim['optimizer_name'] == self.optimizer.__class__.__name__), f"Optimizer does not match; please reset the optimizer (--reset-optimizer). {last_optim['optimizer_name']} vs {self.optimizer.__class__.__name__}"
if (not reset_lr_scheduler):
self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state'])
if (self.is_fsdp and (not self.model.use_sharded_state)):
last_optim_state = self.model.get_shard_from_optim_state_dict(last_optim_state)
elif ((not load_on_all_ranks) and is_distributed):
last_optim_state = self.optimizer.broadcast_global_state_dict(last_optim_state)
self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)
self.set_num_updates(last_optim['num_updates'])
if (extra_state is not None):
itr_state = extra_state['train_iterator']
epoch = itr_state['epoch']
if ('previous_training_time' in extra_state):
self._previous_training_time = extra_state['previous_training_time']
self._start_time = time.time()
self.lr_step(epoch)
if ((itr_state.get('version', 1) >= 2) and (itr_state['iterations_in_epoch'] == 0)):
reset_meters = True
if (('metrics' in extra_state) and (not reset_meters)):
metrics.load_state_dict(extra_state['metrics'])
for meter in metrics.get_meters('default'):
if isinstance(meter, meters.TimeMeter):
meter.reset()
if self.cfg.ema.store_ema:
if ('ema' not in extra_state):
logger.warn('EMA not found in checkpoint. But store_ema is True. EMA is re-initialized from checkpoint.')
self.ema.restore(state['model'], build_fp32_params=self.cfg.ema.ema_fp32)
else:
logger.info('Loading EMA from checkpoint')
self.ema.restore(extra_state['ema'], build_fp32_params=False)
if self.cfg.ema.ema_fp32:
if ('ema_fp32_params' in extra_state):
logger.info('Loading EMA fp32 params from checkpoint')
self.ema.build_fp32_params(extra_state['ema_fp32_params'])
else:
logger.info('Building EMA fp32 params from EMA model in checkpoint')
self.ema.build_fp32_params()
logger.info('Loaded checkpoint {} (epoch {} {} updates)'.format(filename, epoch, self.get_num_updates()))
else:
logger.info('No existing checkpoint found {}'.format(filename))
return extra_state
def get_train_iterator(self, epoch, combine=True, load_dataset=True, data_selector=None, shard_batch_itr=True, disable_iterator_cache=False):
if load_dataset:
logger.info('loading train data for epoch {}'.format(epoch))
self.task.load_dataset(self.cfg.dataset.train_subset, epoch=epoch, combine=combine, data_selector=data_selector, tpu=self.tpu)
batch_iterator = self.task.get_batch_iterator(dataset=self.task.dataset(self.cfg.dataset.train_subset), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=utils.resolve_max_positions(self.task.max_positions(), self.model.max_positions(), self.cfg.dataset.max_tokens), ignore_invalid_inputs=True, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=(self.data_parallel_world_size if shard_batch_itr else 1), shard_id=(self.data_parallel_rank if shard_batch_itr else 0), num_workers=self.cfg.dataset.num_workers, epoch=epoch, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def get_valid_iterator(self, subset, disable_iterator_cache=False):
batch_iterator = self.task.get_batch_iterator(dataset=self.task.dataset(subset), max_tokens=self.cfg.dataset.max_tokens_valid, max_sentences=self.cfg.dataset.batch_size_valid, max_positions=utils.resolve_max_positions(self.task.max_positions(), self.model.max_positions()), ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size, shard_id=self.data_parallel_rank, num_workers=self.cfg.dataset.num_workers, epoch=1, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def begin_epoch(self, epoch):
logger.info('begin training epoch {}'.format(epoch))
self.lr_step_begin_epoch(epoch)
if (self.quantizer is not None):
self.quantizer.begin_epoch(epoch)
self.task.begin_epoch(epoch, self.get_model())
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous('begin_epoch')
xm.mark_step()
def begin_valid_epoch(self, epoch):
self.task.begin_valid_epoch(epoch, self.get_model())
def reset_dummy_batch(self, batch):
self._dummy_batch = batch
('train')
def train_step(self, samples, raise_oom=False):
self._set_seed()
self.model.train()
self.criterion.train()
self.zero_grad()
metrics.log_start_time('train_wall', priority=800, round=0)
extra_kwargs = {}
if (self.cfg.ema.store_ema and getattr(self.task, 'uses_ema', False)):
extra_kwargs['ema_model'] = self.ema.get_model()
(logging_outputs, sample_size, ooms) = ([], 0, 0)
for (i, sample) in enumerate(samples):
(sample, is_dummy_batch) = self._prepare_sample(sample)
def maybe_no_sync():
if ((self.data_parallel_world_size > 1) and hasattr(self.model, 'no_sync') and (i < (len(samples) - 1)) and (not self.is_fsdp)):
return self.model.no_sync()
else:
return contextlib.ExitStack()
try:
with maybe_no_sync():
(loss, sample_size_i, logging_output) = self.task.train_step(sample=sample, model=self.model, criterion=self.criterion, optimizer=self.optimizer, update_num=self.get_num_updates(), ignore_grad=is_dummy_batch, **extra_kwargs)
del loss
logging_outputs.append(logging_output)
sample_size += sample_size_i
if (self.cuda and (self.get_num_updates() == 0)):
torch.cuda.empty_cache()
except RuntimeError as e:
if ('out of memory' in str(e)):
self._log_oom(e)
if raise_oom:
raise e
logger.warning('attempting to recover from OOM in forward/backward pass')
ooms += 1
self.zero_grad()
if self.cuda:
torch.cuda.empty_cache()
if (self.cfg.distributed_training.distributed_world_size == 1):
return None
else:
raise e
if (self.tpu and (i < (len(samples) - 1))):
self._xla_markstep_and_send_to_cpu()
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
if torch.is_tensor(sample_size):
sample_size = sample_size.float()
else:
sample_size = float(sample_size)
if self._sync_stats():
train_time = self._local_cumulative_training_time()
(logging_outputs, (sample_size, ooms, total_train_time)) = self._aggregate_logging_outputs(logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch)
self._cumulative_training_time = (total_train_time / self.data_parallel_world_size)
overflow = False
try:
with torch.autograd.profiler.record_function('reduce-grads'):
self.optimizer.all_reduce_grads(self.model)
if utils.has_parameters(self.criterion):
self.optimizer.all_reduce_grads(self.criterion)
with torch.autograd.profiler.record_function('multiply-grads'):
numer = (self.data_parallel_world_size if ((not self.cfg.optimization.use_bmuf) or self._sync_stats()) else 1)
self.optimizer.multiply_grads((numer / (sample_size or 1.0)))
with torch.autograd.profiler.record_function('clip-grads'):
grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm)
if (not self.tpu):
if ((not self.cfg.optimization.use_bmuf) and (self.cfg.distributed_training.ddp_backend != 'slow_mo')):
self._check_grad_norms(grad_norm)
if (not torch.isfinite(grad_norm).all()):
if self.cfg.common.amp:
overflow = True
else:
raise FloatingPointError('gradients are Nan/Inf')
with torch.autograd.profiler.record_function('optimizer'):
self.task.optimizer_step(self.optimizer, model=self.model, update_num=self.get_num_updates())
if (self.cfg.common.amp and overflow):
if (self._amp_retries == self.cfg.common.amp_batch_retries):
logger.info('AMP: skipping this batch.')
self._amp_retries = 0
else:
self._amp_retries += 1
return self.train_step(samples, raise_oom)
except FloatingPointError:
self.zero_grad()
with NanDetector(self.get_model()):
for (_, sample) in enumerate(samples):
(sample, _) = self._prepare_sample(sample)
self.task.train_step(sample, self.model, self.criterion, self.optimizer, self.get_num_updates(), ignore_grad=False, **extra_kwargs)
raise
except OverflowError as e:
overflow = True
logger.info(f'NOTE: gradient overflow detected, ignoring gradient, {str(e)}')
grad_norm = torch.tensor(0.0).cuda()
self.zero_grad()
except RuntimeError as e:
if ('out of memory' in str(e)):
self._log_oom(e)
logger.error('OOM during optimization, irrecoverable')
raise e
if hasattr(self.model, 'perform_additional_optimizer_actions'):
if hasattr(self.optimizer, 'fp32_params'):
self.model.perform_additional_optimizer_actions(self.optimizer.optimizer, self.optimizer.fp32_params)
else:
self.model.perform_additional_optimizer_actions(self.optimizer.optimizer)
logging_output = None
if ((not overflow) or (self.cfg.distributed_training.ddp_backend == 'slow_mo')):
self.set_num_updates((self.get_num_updates() + 1))
if self.cfg.ema.store_ema:
self.ema.step(self.get_model(), self.get_num_updates())
metrics.log_scalar('ema_decay', self.ema.get_decay(), priority=10000, round=5, weight=0)
if self.tpu:
import torch_xla.core.xla_model as xm
self._xla_markstep_and_send_to_cpu()
logging_output = {}
if ((self.get_num_updates() % self.cfg.common.log_interval) == 0):
mem_info = xm.get_memory_info(self.device)
gb_free = ((mem_info['kb_free'] / 1024) / 1024)
gb_total = ((mem_info['kb_total'] / 1024) / 1024)
metrics.log_scalar('gb_free', gb_free, priority=1500, round=1, weight=0)
metrics.log_scalar('gb_total', gb_total, priority=1600, round=1, weight=0)
logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs)
logging_output = self._reduce_and_log_stats(logging_outputs, sample_size, grad_norm)
self._check_xla_compilation()
else:
if (self.cuda and (self.cuda_env is not None)):
gb_used = (((torch.cuda.max_memory_allocated() / 1024) / 1024) / 1024)
torch.cuda.reset_peak_memory_stats()
gb_free = (self.cuda_env.total_memory_in_GB - gb_used)
metrics.log_scalar('gb_free', gb_free, priority=1500, round=1, weight=0)
logging_output = self._reduce_and_log_stats(logging_outputs, sample_size, grad_norm)
if (self.cuda and (self.cfg.common.empty_cache_freq > 0) and ((((self.get_num_updates() + self.cfg.common.empty_cache_freq) - 1) % self.cfg.common.empty_cache_freq) == 0)):
torch.cuda.empty_cache()
if (self.cfg.common.fp16 or self.cfg.common.amp):
metrics.log_scalar('loss_scale', (self.optimizer.scaler.loss_scale if self.cfg.common.fp16 else self.optimizer.scaler.get_scale()), priority=700, round=4, weight=0)
metrics.log_stop_time('train_wall')
return logging_output
('valid')
def valid_step(self, sample, raise_oom=False):
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous('valid_step')
extra_kwargs = {}
if (self.cfg.ema.store_ema and getattr(self.task, 'uses_ema', False)):
extra_kwargs['ema_model'] = self.ema.get_model()
with torch.no_grad():
self.model.eval()
self.criterion.eval()
(sample, is_dummy_batch) = self._prepare_sample(sample)
try:
(_loss, sample_size, logging_output) = self.task.valid_step(sample, self.model, self.criterion, **extra_kwargs)
except RuntimeError as e:
if ('out of memory' in str(e)):
self._log_oom(e)
if (not raise_oom):
logger.warning('ran out of memory in validation step, retrying batch')
for p in self.model.parameters():
if (p.grad is not None):
p.grad = None
if self.cuda:
torch.cuda.empty_cache()
return self.valid_step(sample, raise_oom=True)
raise e
logging_outputs = [logging_output]
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
if (self.data_parallel_world_size > 1):
(logging_outputs, (sample_size,)) = self._aggregate_logging_outputs(logging_outputs, sample_size, ignore=is_dummy_batch)
if self.tpu:
logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs)
logging_output = self._reduce_and_log_stats(logging_outputs, sample_size)
return logging_output
def zero_grad(self):
self.optimizer.zero_grad()
def lr_step_begin_epoch(self, epoch):
self.lr_scheduler.step_begin_epoch(epoch)
return self.lr_step_update()
def lr_step(self, epoch, val_loss=None):
self.lr_scheduler.step(epoch, val_loss)
return self.lr_step_update()
def lr_step_update(self):
new_lr = self.lr_scheduler.step_update(self.get_num_updates())
if isinstance(new_lr, dict):
for (k, v) in new_lr.items():
metrics.log_scalar(f'lr_{k}', v, weight=0, priority=300)
new_lr = new_lr.get('default', next(iter(new_lr.values())))
else:
metrics.log_scalar('lr', new_lr, weight=0, priority=300)
return new_lr
def get_lr(self):
return self.optimizer.get_lr()
def get_model(self):
return self._model
def get_criterion(self):
return self._criterion
def get_meter(self, name):
from fairseq import meters
if ('get_meter' not in self._warn_once):
self._warn_once.add('get_meter')
utils.deprecation_warning('Trainer.get_meter is deprecated. Please use fairseq.metrics instead.')
train_meters = metrics.get_meters('train')
if (train_meters is None):
train_meters = {}
if ((name == 'train_loss') and ('loss' in train_meters)):
return train_meters['loss']
elif (name == 'train_nll_loss'):
m = train_meters.get('nll_loss', None)
return (m or meters.AverageMeter())
elif (name == 'wall'):
m = metrics.get_meter('default', 'wall')
return (m or meters.TimeMeter())
elif (name == 'wps'):
m = metrics.get_meter('train', 'wps')
return (m or meters.TimeMeter())
elif (name in {'valid_loss', 'valid_nll_loss'}):
k = name[len('valid_'):]
m = metrics.get_meter('valid', k)
return (m or meters.AverageMeter())
elif (name == 'oom'):
return meters.AverageMeter()
elif (name in train_meters):
return train_meters[name]
return None
def get_num_updates(self):
return self._num_updates
def set_num_updates(self, num_updates):
self._num_updates = num_updates
self.lr_step_update()
if self.quantizer:
self.quantizer.step_update(self._num_updates)
metrics.log_scalar('num_updates', self._num_updates, weight=0, priority=200)
def clip_grad_norm(self, clip_norm):
def agg_norm_fn(total_norm):
total_norm = (total_norm.cuda().float() ** 2)
total_norm = distributed_utils.all_reduce(total_norm, group=self.data_parallel_process_group)
return (total_norm ** 0.5)
should_agg_norm = (self.is_fsdp and ((self.data_parallel_process_group is not None) or torch.distributed.is_initialized()))
return self.optimizer.clip_grad_norm(clip_norm, aggregate_norm_fn=(agg_norm_fn if should_agg_norm else None))
def cumulative_training_time(self):
if (self._cumulative_training_time is None):
return self._local_cumulative_training_time()
else:
return self._cumulative_training_time
def _local_cumulative_training_time(self):
return ((time.time() - self._start_time) + self._previous_training_time)
def _fp_convert_sample(self, sample):
def apply_half(t):
if (t.dtype is torch.float32):
return t.to(dtype=torch.half)
return t
def apply_bfloat16(t):
if (t.dtype is torch.float32):
return t.to(dtype=torch.bfloat16)
return t
if self.cfg.common.fp16:
sample = utils.apply_to_sample(apply_half, sample)
if self.cfg.common.bf16:
sample = utils.apply_to_sample(apply_bfloat16, sample)
return sample
def _prepare_sample(self, sample, is_dummy=False):
if (sample == 'DUMMY'):
raise Exception("Trying to use an uninitialized 'dummy' batch. This usually indicates that the total number of batches is smaller than the number of participating GPUs. Try reducing the batch size or using fewer GPUs.")
if ((sample is None) or (len(sample) == 0)):
assert ((self._dummy_batch is not None) and (len(self._dummy_batch) > 0)), 'Invalid dummy batch: {}'.format(self._dummy_batch)
(sample, _) = self._prepare_sample(self._dummy_batch, is_dummy=True)
return (sample, True)
if self.cfg.common.on_cpu_convert_precision:
sample = self._fp_convert_sample(sample)
if self.cuda:
if self.pipeline_model_parallel:
if ('target' in sample):
sample['target'] = utils.move_to_cuda(sample['target'], device=self.last_device)
else:
sample = utils.move_to_cuda(sample)
elif (self.tpu and is_dummy):
sample = utils.move_to_cuda(sample, device=self.device)
if (not self.cfg.common.on_cpu_convert_precision):
sample = self._fp_convert_sample(sample)
if (self._dummy_batch == 'DUMMY'):
self._dummy_batch = sample
return (sample, False)
def _set_seed(self):
seed = (self.cfg.common.seed + self.get_num_updates())
utils.set_torch_seed(seed)
def _sync_stats(self):
if (self.data_parallel_world_size == 1):
return False
elif self.cfg.optimization.use_bmuf:
return ((((self.get_num_updates() + 1) % self.cfg.bmuf.global_sync_iter) == 0) and ((self.get_num_updates() + 1) > self.cfg.bmuf.warmup_iterations))
else:
return True
def _log_oom(self, exc):
msg = 'OOM: Ran out of memory with exception: {}'.format(exc)
logger.warning(msg)
if (torch.cuda.is_available() and hasattr(torch.cuda, 'memory_summary')):
for device_idx in range(torch.cuda.device_count()):
logger.warning(torch.cuda.memory_summary(device=device_idx))
sys.stderr.flush()
def _aggregate_logging_outputs(self, logging_outputs: List[Dict[(str, Any)]], *extra_stats_to_sum, ignore=False):
if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()):
return self._fast_stat_sync_sum(logging_outputs, *extra_stats_to_sum, ignore=ignore)
else:
return self._all_gather_list_sync(logging_outputs, *extra_stats_to_sum, ignore=ignore)
def _all_gather_list_sync(self, logging_outputs: List[Dict[(str, Any)]], *extra_stats_to_sum, ignore=False):
if self.tpu:
raise NotImplementedError
if ignore:
logging_outputs = []
results = list(zip(*distributed_utils.all_gather_list(([logging_outputs] + list(extra_stats_to_sum)), max_size=getattr(self.cfg.common, 'all_gather_list_size', 16384), group=self.data_parallel_process_group)))
(logging_outputs, extra_stats_to_sum) = (results[0], results[1:])
logging_outputs = list(chain.from_iterable(logging_outputs))
extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum]
return (logging_outputs, extra_stats_to_sum)
def _fast_stat_sync_sum(self, logging_outputs: List[Dict[(str, Any)]], *extra_stats_to_sum, ignore=False):
data = {}
for (i, stat) in enumerate(extra_stats_to_sum):
data[('extra_stats_' + str(i))] = stat
if (len(logging_outputs) > 0):
log_keys = list(logging_outputs[0].keys())
for k in log_keys:
if (not ignore):
v = sum((log[k] for log in logging_outputs if (k in log)))
else:
v = logging_outputs[0][k]
v = (torch.zeros_like(v) if torch.is_tensor(v) else 0)
data[('logging_outputs_' + k)] = v
else:
log_keys = None
data = distributed_utils.all_reduce_dict(data, device=self.device, group=self.data_parallel_process_group)
extra_stats_to_sum = [data[('extra_stats_' + str(i))] for i in range(len(extra_stats_to_sum))]
if (log_keys is not None):
logging_outputs = [{k: data[('logging_outputs_' + k)] for k in log_keys}]
else:
logging_outputs = []
return (logging_outputs, extra_stats_to_sum)
def _check_grad_norms(self, grad_norm):
if (self._grad_norm_buf is not None):
self._grad_norm_buf.zero_()
self._grad_norm_buf[self.data_parallel_rank] = grad_norm
distributed_utils.all_reduce(self._grad_norm_buf, group=self.data_parallel_process_group)
def is_consistent(tensor):
max_abs_diff = torch.max(torch.abs((tensor - tensor[0])))
return ((torch.isfinite(tensor).all() and ((max_abs_diff / (tensor[0] + 1e-06)) < 1e-06).all()) or (self.cfg.common.amp and (not torch.isfinite(tensor).all())))
if (not is_consistent(self._grad_norm_buf)):
pretty_detail = '\n'.join(('rank {:3d} = {:.8f}'.format(r, n) for (r, n) in enumerate(self._grad_norm_buf.tolist())))
error_detail = 'grad_norm across the workers:\n{}\n'.format(pretty_detail)
raise FloatingPointError((((('Fatal error: gradients are inconsistent between workers. Try --ddp-backend=legacy_ddp. Or are you mixing up different generation of GPUs in training?' + '\n') + ('-' * 80)) + '\n{}\n'.format(error_detail)) + ('-' * 80)))
def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None):
if ((grad_norm is not None) and ((not torch.is_tensor(grad_norm)) or torch.isfinite(grad_norm))):
metrics.log_speed('ups', 1.0, priority=100, round=2)
metrics.log_scalar('gnorm', grad_norm, priority=400, round=3)
if (self.cfg.optimization.clip_norm > 0):
metrics.log_scalar('clip', torch.where((grad_norm > self.cfg.optimization.clip_norm), grad_norm.new_tensor(100), grad_norm.new_tensor(0)), priority=500, round=1)
with metrics.aggregate() as agg:
if (logging_outputs is not None):
self.task.reduce_metrics(logging_outputs, self.get_criterion())
del logging_outputs
if ('loss' not in agg):
if ('loss' not in self._warn_once):
self._warn_once.add('loss')
logger.warning("Criterion.reduce_metrics did not log a 'loss' value, which may break some functionality")
metrics.log_scalar('loss', (- 1))
if self.tpu:
logging_output = {}
else:
logging_output = agg.get_smoothed_values()
logging_output['sample_size'] = sample_size
for key_to_delete in ['ppl', 'wps', 'wpb', 'bsz']:
if (key_to_delete in logging_output):
del logging_output[key_to_delete]
return logging_output
def _check_xla_compilation(self):
import torch_xla.debug.metrics as met
compile_stats = met.metric_data('CompileTime')
if (compile_stats is None):
return
num_xla_compiles = compile_stats[0]
if (num_xla_compiles > self._num_xla_compiles):
logger.warning('XLA compilation detected on device #{}; too many of these can lead to slow training, but we expect a few in the beginning'.format(self.cfg.distributed_training.distributed_rank))
self._num_xla_compiles = num_xla_compiles
def _xla_markstep_and_send_to_cpu(self, data=None):
import torch_xla.core.xla_model as xm
xm.mark_step()
if (data is not None):
from fairseq.utils import xla_device_to_cpu
return xla_device_to_cpu(data) |
def test(args):
time_taken = []
img_save_id = 0
(losses, psnrs, ssims) = myutils.init_meters(args.loss)
model.eval()
psnr_list = []
with torch.no_grad():
for (i, (images, name)) in enumerate(test_loader):
if (name[0] not in folderList):
continue
images = torch.stack(images, dim=1).squeeze(0)
(H, W) = images[0].shape[(- 2):]
resizes = ((8 * (H // 8)), (8 * (W // 8)))
import torchvision
transform = Resize(resizes)
rev_transforms = Resize((H, W))
images = transform(images).unsqueeze(0).cuda()
images = torch.unbind(images, dim=1)
start_time = time.time()
out = model(images)
print('Time Taken', (time.time() - start_time))
out = torch.cat(out)
out = rev_transforms(out)
output_image = make_image(out.squeeze(0))
import imageio
os.makedirs(('Middleburry/%s/' % name[0]))
imageio.imwrite(('Middleburry/%s/frame10i11.png' % name[0]), output_image)
return |
def insert_node_between_two_nodes(graph: Graph, node_to_insert: BaseNode, first_node: BaseNode, last_node: BaseNode):
graph.add_node(node_to_insert)
e_attr = graph.get_edge_data(first_node, last_node)
assert (len(list(e_attr.values())) == 1)
e_attr = list(e_attr.values())[0]
graph.add_edge(first_node, node_to_insert, **e_attr)
graph.add_edge(node_to_insert, last_node, **e_attr)
graph.remove_edge(first_node, last_node) |
class TfEnv(GarageEnv):
def __init__(self, env=None, env_name=''):
super().__init__(env, env_name)
self.action_space = akro.from_gym(self.env.action_space)
self.observation_space = akro.from_gym(self.env.observation_space)
_property
def max_episode_steps(self):
return self.env.spec.max_episode_steps |
def read_tsv(path, corpus_root, language, accent=None, hours=(- 1)):
with open(path, 'r') as fp:
rows = csv.reader(fp, delimiter='\t')
data_list = []
total_len = 0
iterator = tqdm(enumerate(rows))
for (i, row) in iterator:
if (i == 0):
continue
if ((language == 'es') and (row[7] != 'mexicano')):
continue
if ((language == 'en') and (row[7] != accent)):
continue
audio = MP3(join(corpus_root, row[1]))
secs = audio.info.length
sent_normed = normalize(row[2], language)
if (sent_normed == ''):
continue
data_list.append({'path': row[1], 'sentence': sent_normed, 'accent': (row[7] if (row[7] != '') else 'unk'), 'len': secs})
total_len += secs
if ((hours > 0) and ((total_len / 3600.0) > hours)):
iterator.close()
break
print(f'Read {len(data_list)} files')
print('Total {:.2f} hours'.format((total_len / 3600.0)))
return data_list |
def _load_local(hubconf_dir, model, *args, **kwargs):
sys.path.insert(0, hubconf_dir)
hubconf_path = os.path.join(hubconf_dir, MODULE_HUBCONF)
hub_module = import_module(MODULE_HUBCONF, hubconf_path)
entry = _load_entry_from_hubconf(hub_module, model)
model = entry(*args, **kwargs)
sys.path.remove(hubconf_dir)
return model |
class MPNetTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='[UNK]', pad_token='<pad>', mask_token='<mask>', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
bos_token = (AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token)
eos_token = (AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token)
sep_token = (AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token)
cls_token = (AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token)
unk_token = (AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token)
pad_token = (AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token)
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
if (token in self.basic_tokenizer.never_split):
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
else:
vocab_file = (((filename_prefix + '-') if filename_prefix else '') + save_directory)
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write((token + '\n'))
index += 1
return (vocab_file,) |
class TUCh(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
Val = _swig_property(_snap.TUCh_Val_get, _snap.TUCh_Val_set)
def __init__(self, *args):
_snap.TUCh_swiginit(self, _snap.new_TUCh(*args))
def Save(self, SOut):
return _snap.TUCh_Save(self, SOut)
def __eq__(self, UCh):
return _snap.TUCh___eq__(self, UCh)
def __lt__(self, UCh):
return _snap.TUCh___lt__(self, UCh)
def __call__(self):
return _snap.TUCh___call__(self)
def GetMemUsed(self):
return _snap.TUCh_GetMemUsed(self)
def GetPrimHashCd(self):
return _snap.TUCh_GetPrimHashCd(self)
def GetSecHashCd(self):
return _snap.TUCh_GetSecHashCd(self)
__swig_destroy__ = _snap.delete_TUCh |
class IndexScore():
def __init__(self, index, score):
self.index = index
self.score = score
def __lt__(self, other):
return (self.score < other.score)
def __repr__(self):
return ('(%d, %.3f)' % (self.index, self.score))
def __str__(self):
return ('(index: %d, score:%.3f)' % (self.index, self.score)) |
class ToImageCA(ToImage):
def __init__(self, game, name, cfg: Config):
super().__init__(game, name, cfg)
def step(self, action, **kwargs):
action = action.reshape((self.dim, self.w, self.h))
(obs, reward, done, truncated, info) = self.env.step(action, **kwargs)
obs = self.transform(obs)
return (obs, reward, done, truncated, info) |
class MarkdownTableLinearize(TableLinearize):
def process_table(self, table_content: Dict):
assert (('header' in table_content) and ('rows' in table_content)), self.PROMPT_MESSAGE
_table_str = (self.process_header(table_content['header']) + ' ')
for (i, row_example) in enumerate(table_content['rows']):
_table_str += (self.process_row(row_example, row_index=(i + 1)) + '\n')
return _table_str.strip()
def process_header(self, headers: List):
return (((('| ' + ' | '.join(headers)) + ' |\n') + ''.join(['|---' for _ in range(len(headers))])) + '|\n')
def process_row(self, row: List, row_index: int):
row_str = ''
row_cell_values = []
for cell_value in row:
if isinstance(cell_value, int):
row_cell_values.append(str(cell_value))
else:
row_cell_values.append(cell_value)
row_str += ' | '.join(row_cell_values)
return (('| ' + row_str) + ' |\n') |
def format_timestamp(seconds: float, always_include_hours: bool=False, decimal_marker: str='.'):
if (seconds is not None):
milliseconds = round((seconds * 1000.0))
hours = (milliseconds // 3600000)
milliseconds -= (hours * 3600000)
minutes = (milliseconds // 60000)
milliseconds -= (minutes * 60000)
seconds = (milliseconds // 1000)
milliseconds -= (seconds * 1000)
hours_marker = (f'{hours:02d}:' if (always_include_hours or (hours > 0)) else '')
return f'{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}'
else:
return seconds |
def _preprocess_reader_samples_chunk(samples: List, out_file_prefix: str, gold_passages_file: str, tensorizer: Tensorizer, is_train_set: bool) -> str:
(chunk_id, samples) = samples
logger.info('Start batch %d', len(samples))
iterator = preprocess_retriever_data(samples, gold_passages_file, tensorizer, is_train_set=is_train_set)
results = []
iterator = tqdm(iterator)
for (i, r) in enumerate(iterator):
r.on_serialize()
results.append(r)
out_file = (((out_file_prefix + '.') + str(chunk_id)) + '.pkl')
with open(out_file, mode='wb') as f:
logger.info('Serialize %d results to %s', len(results), out_file)
pickle.dump(results, f)
return out_file |
class ImageNet12(object):
def __init__(self, trainFolder, testFolder, num_workers=8, pin_memory=True, size_images=224, scaled_size=256, type_of_data_augmentation='rand_scale', data_config=None):
self.data_config = data_config
self.trainFolder = trainFolder
self.testFolder = testFolder
self.num_workers = num_workers
self.pin_memory = pin_memory
self.patch_dataset = self.data_config.patch_dataset
if (not isinstance(size_images, int)):
raise ValueError('size_images must be an int. It will be scaled to a square image')
self.size_images = size_images
self.scaled_size = scaled_size
type_of_data_augmentation = type_of_data_augmentation.lower()
if (type_of_data_augmentation not in ('rand_scale', 'random_sized')):
raise ValueError('type_of_data_augmentation must be either rand-scale or random-sized')
self.type_of_data_augmentation = type_of_data_augmentation
def _getTransformList(self, aug_type):
assert (aug_type in ['rand_scale', 'random_sized', 'week_train', 'validation'])
list_of_transforms = []
if (aug_type == 'validation'):
list_of_transforms.append(transforms.Resize(self.scaled_size))
list_of_transforms.append(transforms.CenterCrop(self.size_images))
elif (aug_type == 'week_train'):
list_of_transforms.append(transforms.Resize(256))
list_of_transforms.append(transforms.RandomCrop(self.size_images))
list_of_transforms.append(transforms.RandomHorizontalFlip())
else:
if (aug_type == 'rand_scale'):
list_of_transforms.append(transforms_extension.RandomScale(256, 480))
list_of_transforms.append(transforms.RandomCrop(self.size_images))
list_of_transforms.append(transforms.RandomHorizontalFlip())
elif (aug_type == 'random_sized'):
list_of_transforms.append(transforms.RandomResizedCrop(self.size_images, scale=(self.data_config.random_sized.min_scale, 1.0)))
list_of_transforms.append(transforms.RandomHorizontalFlip())
if self.data_config.color:
list_of_transforms.append(transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4))
return transforms.Compose(list_of_transforms)
def _getTrainSet(self):
train_transform = self._getTransformList(self.type_of_data_augmentation)
if (self.data_config.train_data_type == 'img'):
train_set = torchvision.datasets.ImageFolder(self.trainFolder, train_transform)
elif (self.data_config.train_data_type == 'lmdb'):
train_set = lmdb_dataset.ImageFolder(self.trainFolder, os.path.join(self.trainFolder, '..', 'train_datalist'), train_transform, patch_dataset=self.patch_dataset)
self.train_num_examples = train_set.__len__()
return train_set
def _getWeekTrainSet(self):
train_transform = self._getTransformList('week_train')
if (self.data_config.train_data_type == 'img'):
train_set = torchvision.datasets.ImageFolder(self.trainFolder, train_transform)
elif (self.data_config.train_data_type == 'lmdb'):
train_set = lmdb_dataset.ImageFolder(self.trainFolder, os.path.join(self.trainFolder, '..', 'train_datalist'), train_transform, patch_dataset=self.patch_dataset)
self.train_num_examples = train_set.__len__()
return train_set
def _getTestSet(self):
test_transform = self._getTransformList('validation')
if (self.data_config.val_data_type == 'img'):
test_set = torchvision.datasets.ImageFolder(self.testFolder, test_transform)
elif (self.data_config.val_data_type == 'lmdb'):
test_set = lmdb_dataset.ImageFolder(self.testFolder, os.path.join(self.testFolder, '..', 'val_datalist'), test_transform)
self.test_num_examples = test_set.__len__()
return test_set
def getTrainLoader(self, batch_size, shuffle=True):
train_set = self._getTrainSet()
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=shuffle, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=None, collate_fn=fast_collate)
return train_loader
def getWeekTrainLoader(self, batch_size, shuffle=True):
train_set = self._getWeekTrainSet()
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=shuffle, num_workers=self.num_workers, pin_memory=self.pin_memory, collate_fn=fast_collate)
return train_loader
def getTestLoader(self, batch_size, shuffle=False):
test_set = self._getTestSet()
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=shuffle, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=None, collate_fn=fast_collate)
return test_loader
def getTrainTestLoader(self, batch_size, train_shuffle=True, val_shuffle=False):
train_loader = self.getTrainLoader(batch_size, train_shuffle)
test_loader = self.getTestLoader(batch_size, val_shuffle)
return (train_loader, test_loader)
def getSetTrainTestLoader(self, batch_size, train_shuffle=True, val_shuffle=False):
train_loader = self.getTrainLoader(batch_size, train_shuffle)
week_train_loader = self.getWeekTrainLoader(batch_size, train_shuffle)
test_loader = self.getTestLoader(batch_size, val_shuffle)
return ((train_loader, week_train_loader), test_loader) |
def main(args):
cfg = setup(args)
PathManager.set_strict_kwargs_checking(False)
if args.eval_only:
model = Trainer.build_model(cfg)
DensePoseCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
res = Trainer.test(cfg, model)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
if cfg.TEST.AUG.ENABLED:
trainer.register_hooks([hooks.EvalHook(0, (lambda : trainer.test_with_TTA(cfg, trainer.model)))])
return trainer.train() |
def yaml_dump(data, Dumper=None, allow_unicode: bool=True, **kwargs):
if (Dumper is None):
Dumper = OrderedDumper
return yaml.dump(data, Dumper=Dumper, allow_unicode=allow_unicode, **kwargs) |
def _macosx_vers(_cache=[]):
if (not _cache):
version = platform.mac_ver()[0]
if (version == ''):
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if ('ProductVersion' in plist_content):
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0] |
class GeneratedPaths():
output_dir: Path
lcm_type_dir: Path
function_dir: Path
python_types_dir: Path
cpp_types_dir: Path
generated_files: T.List[Path] |
def validate_ar_dni(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(dni.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(dni.is_valid)
else:
return df.applymap(dni.is_valid)
return dni.is_valid(df) |
def latex_env(env, text, titleline, counter, format):
(label, titleline) = get_label(titleline)
titleline = titleline.strip()
template = '\n\\begin{${env}}\n% if label:\nlabel{${label}}\n% endif\n% if titleline:\n\\noindent\\emph{${titleline}}.\n%endif\n${text}\n\\end{${env}}\n'
return Template(template).render(**vars()) |
class NumericalVarField(NumericalDataFrameField):
def __init__(self, *args, **kwargs):
super().__init__(*args, field_type='var', **kwargs) |
def sftw(A: dace.float64[20]):
B = dace.define_local([20], dace.float64)
C = dace.define_local([20], dace.float64)
D = dace.define_local([20], dace.float64)
E = dace.define_local([20], dace.float64)
dup = dace.define_local([20], dace.float64)
for i in dace.map[0:20]:
with dace.tasklet:
(a << A[i])
(b >> B[i])
b = a
for i in dace.map[0:20]:
with dace.tasklet:
(a << B[i])
(b >> dup[i])
b = a
for i in dace.map[0:20]:
with dace.tasklet:
(a << dup[i])
(b >> D[i])
b = (a + 2)
for i in dace.map[0:20]:
with dace.tasklet:
(a << A[i])
(b >> C[i])
b = (a + 1)
for i in dace.map[0:20]:
with dace.tasklet:
(a << C[i])
(b >> dup[i])
b = (a + 1)
for i in dace.map[0:20]:
with dace.tasklet:
(a << dup[i])
(b >> E[i])
b = (a + 3)
for i in dace.map[0:20]:
with dace.tasklet:
(d << D[i])
(e << E[i])
(a >> A[i])
a = (d + e) |
.parametrize(['current_shell_id', 'delta_shell', 'no_of_shells'], [(132, (- 1), 199), (132, 0, 132), (132, 20, 154)])
def test_move_packet_across_shell_boundary_increment(packet, current_shell_id, delta_shell, no_of_shells):
packet.current_shell_id = current_shell_id
r_packet_transport.move_packet_across_shell_boundary(packet, delta_shell, no_of_shells)
assert (packet.current_shell_id == (current_shell_id + delta_shell)) |
def optimizer_kwargs(parsed_args):
return {'optim': parsed_args.optim, 'lr': parsed_args.lr, 'weight_decay': parsed_args.weight_decay, 'momentum': parsed_args.momentum, 'sgd_dampening': parsed_args.sgd_dampening, 'sgd_nesterov': parsed_args.sgd_nesterov, 'rmsprop_alpha': parsed_args.rmsprop_alpha, 'adam_beta1': parsed_args.adam_beta1, 'adam_beta2': parsed_args.adam_beta2, 'staged_lr': parsed_args.staged_lr, 'new_layers': parsed_args.new_layers, 'base_lr_mult': parsed_args.base_lr_mult} |
def python_app_auth(python_app_type):
if (python_app_type == 'wsgi'):
return '\nimport werkzeug\n\()\nclass Auth:\n\n def get(self, case, context):\n client = werkzeug.Client(context.app)\n response = client.post("/auth/token/", json={"username": "test", "password": "pass"})\n return response.json["access_token"]\n\n def set(self, case, data, context):\n case.headers = case.headers or {}\n case.headers["Authorization"] = f"Bearer {data}"\n'
if (python_app_type == 'asgi'):
return '\nfrom starlette_testclient import TestClient\n\()\nclass Auth:\n\n def get(self, case, context):\n client = TestClient(context.app)\n response = client.post("/auth/token/", json={"username": "test", "password": "pass"})\n return response.json()["access_token"]\n\n def set(self, case, data, context):\n case.headers = case.headers or {}\n case.headers["Authorization"] = f"Bearer {data}"\n' |
class TestSctypeDict(object):
def test_longdouble(self):
assert_((np.sctypeDict['f8'] is not np.longdouble))
assert_((np.sctypeDict['c16'] is not np.clongdouble)) |
_module()
class DMHead(BaseDecodeHead):
def __init__(self, filter_sizes=(1, 3, 5, 7), fusion=False, **kwargs):
super(DMHead, self).__init__(**kwargs)
assert isinstance(filter_sizes, (list, tuple))
self.filter_sizes = filter_sizes
self.fusion = fusion
dcm_modules = []
for filter_size in self.filter_sizes:
dcm_modules.append(DCM(filter_size, self.fusion, self.in_channels, self.channels, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg))
self.dcm_modules = nn.ModuleList(dcm_modules)
self.bottleneck = ConvModule((self.in_channels + (len(filter_sizes) * self.channels)), self.channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
def forward(self, inputs):
x = self._transform_inputs(inputs)
dcm_outs = [x]
for dcm_module in self.dcm_modules:
dcm_outs.append(dcm_module(x))
dcm_outs = torch.cat(dcm_outs, dim=1)
output = self.bottleneck(dcm_outs)
output = self.cls_seg(output)
return output |
def sja_to_aa(sja: Union[(torch.Tensor, numpy.ndarray)], R_t: Union[(torch.Tensor, numpy.ndarray)]=TRANSFORMATION_AA_TO_SJA, R_t_inv: Union[(torch.Tensor, numpy.ndarray)]=TRANSFORMATION_SJA_TO_AA) -> Union[(torch.Tensor, numpy.ndarray)]:
def _sja_to_aa(sja, R_t, R_t_inv):
R_sja = euler_angles_to_matrix(sja, convention='XYZ')
R_aa = ((R_t_inv R_sja) R_t)
aa = quaternion_to_axis_angle(matrix_to_quaternion(R_aa))
return aa
if (sja.shape[(- 2):] != (21, 3)):
raise ValueError(f'Invalid input axis angles shape f{sja.shape}.')
if (R_t.shape[(- 3):] != (21, 3, 3)):
raise ValueError(f'Invalid input R_t shape f{R_t.shape}.')
if (R_t_inv.shape[(- 3):] != (21, 3, 3)):
raise ValueError(f'Invalid input R_t_inv shape f{R_t.shape}.')
t = Compose([_sja_to_aa])
return t(sja, R_t=R_t, R_t_inv=R_t_inv) |
(name='save')
('-n', '--name', required=False, help='Name of the Federated learning plan', default='default', type=str)
def save_(name):
from os import makedirs
from shutil import copyfile
echo(f'Saving plan to {name}')
makedirs(f'plan/plans/{name}', exist_ok=True)
copyfile('plan/plan.yaml', f'plan/plans/{name}/plan.yaml')
switch_plan(name) |
def gen_line_dict_file(out_path, imgid2imgname, imgid2anno):
lines = []
for (key, value) in imgid2imgname.items():
if (key in imgid2anno):
anno = imgid2anno[key]
line_dict = {}
line_dict['file_name'] = value['file_name']
line_dict['height'] = value['height']
line_dict['width'] = value['width']
line_dict['annotations'] = anno
lines.append(json.dumps(line_dict))
list_to_file(out_path, lines) |
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super().__init__()
self.nf = nf
self.weight = nn.Parameter(torch.empty(nx, nf))
self.bias = nn.Parameter(torch.zeros(nf))
nn.init.normal_(self.weight, std=0.02)
def forward(self, x):
size_out = (x.size()[:(- 1)] + (self.nf,))
x = torch.addmm(self.bias, x.view((- 1), x.size((- 1))), self.weight)
x = x.view(size_out)
return x |
class MockOpen(object):
def __init__(self, test_dir):
self.files = {}
self.old_open = open
self.test_dir = test_dir
def __call__(self, filename, mode, *args, **kwargs):
if filename.startswith(self.test_dir):
if ((filename not in self.files) or (mode in ('w', 'w+'))):
self.files[filename] = StringIO.StringIO()
fakefile = self.files[filename]
if (mode in ('r', 'r+')):
fakefile.seek(0)
else:
fakefile.seek(0, os.SEEK_END)
return contextlib.contextmanager(yields)(fakefile)
else:
return self.old_open(filename, *args, **kwargs) |
def _impl(array, n, replacement, axis, fields, parameters, with_name, highlevel, behavior, attrs):
axis = regularize_axis(axis)
if (with_name is None):
pass
elif (parameters is None):
parameters = {'__record__': with_name}
else:
parameters = {**parameters, '__record__': with_name}
with HighLevelContext(behavior=behavior, attrs=attrs) as ctx:
layout = ctx.unwrap(array, allow_record=False, primitive_policy='error')
out = ak._do.combinations(layout, n, replacement=replacement, axis=axis, fields=fields, parameters=parameters)
return ctx.wrap(out, highlevel=highlevel) |
def register_Ns3LteAnrSapUser_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteAnrSapUser const &', 'arg0')])
cls.add_method('AddUeMeasReportConfigForAnr', 'uint8_t', [param('ns3::LteRrcSap::ReportConfigEutra', 'reportConfig')], is_pure_virtual=True, is_virtual=True)
return |
def import_tf_params(tf_mdl_dir, sess):
print('\nLoading tensorflow model\n')
if callable(tf_mdl_dir):
tf_mdl_dir(sess)
else:
facenet.load_model(tf_mdl_dir)
print('\nGetting model weights\n')
tf_layers = tf.trainable_variables()
tf_params = sess.run(tf_layers)
tf_shapes = [p.shape for p in tf_params]
tf_layers = [l.name for l in tf_layers]
if (not callable(tf_mdl_dir)):
path = os.path.join(tf_mdl_dir, 'layer_description.json')
else:
path = 'data/layer_description.json'
with open(path, 'w') as f:
json.dump({l: s for (l, s) in zip(tf_layers, tf_shapes)}, f)
return (tf_layers, tf_params, tf_shapes) |
.parametrize('length,max_seq_length,eos_token_id,expected', [(3, None, None, '[(0, 0) (1, -1) (2, -2)]')])
def test_str(tokenized_line: TokenizedLine, expected: str):
assert (str(tokenized_line) == repr(tokenized_line) == expected) |
def dataset(mode, input_Dir, motifReqs):
beta = 25
number_of_clusters = CLUSTER_NUMBER
oldAssignName = ('%s/old/assign.out' % input_Dir)
input_name = ('%s/data.out' % input_Dir)
if (mode == 1):
return runHyperParameterTests(input_name, input_Dir, number_of_clusters, beta, oldAssignName, motifReqs)
return 0 |
(Output('pattern-time-series', 'figure'), [Input('summary-scatter', 'clickData'), Input('time-interval', 'value')], prevent_initial_call=True)
def update_y_timeseries(data, interval):
print(data)
interval_map = {0: '1s', 1: '1min', 2: '1h', 3: '1d'}
pattern = data['points'][0]['customdata']
freq = interval_map[interval]
result_df = log_pattern_demo.result_table
dff = result_df[(result_df['parsed_logline'] == pattern)][['timestamp', 'parsed_logline']]
ts_df = dff[['timestamp', 'parsed_logline']].groupby(pd.Grouper(key='timestamp', freq=freq, offset=0, label='right')).size().reset_index(name='count')
title = 'Trend of Occurrence at Freq({})'.format(freq)
return create_time_series(ts_df, 'Linear', title) |
def chat_to_worker_id(cursor, code_to_wid):
d = {}
cursor.execute('SELECT chat_id, agent_ids FROM chat')
for (chat_id, agent_uids) in cursor.fetchall():
agent_wid = {}
agent_uids = eval(agent_uids)
for (agent_id, agent_uid) in agent_uids.iteritems():
if (not isinstance(agent_uid, basestring)):
agent_wid[agent_id] = None
else:
cursor.execute('SELECT mturk_code FROM mturk_task WHERE name=?', (agent_uid,))
res = cursor.fetchall()
if (len(res) > 0):
mturk_code = res[0][0]
if (mturk_code not in code_to_wid):
continue
else:
agent_wid[agent_id] = code_to_wid[mturk_code]
d[chat_id] = agent_wid
return d |
class SawyerPickOutOfHoleEnv(SawyerXYZEnv):
def __init__(self):
liftThresh = 0.11
hand_low = ((- 0.5), 0.4, (- 0.05))
hand_high = (0.5, 1, 0.5)
obj_low = (0, 0.84, (- 0.03))
obj_high = (0, 0.84, (- 0.03))
goal_low = ((- 0.1), 0.6, 0.15)
goal_high = (0.1, 0.7, 0.3)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_pos': np.array([0, 0.84, (- 0.03)]), 'obj_init_angle': 0.3, 'hand_init_pos': np.array([0.0, 0.6, 0.2])}
self.goal = np.array([0.0, 0.6, 0.2])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self.liftThresh = liftThresh
self._random_reset_space = Box(np.hstack((obj_low, goal_low)), np.hstack((obj_high, goal_high)))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v1_path_for('sawyer_xyz/sawyer_pick_out_of_hole.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, reachDist, pickRew, placingDist) = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'goalDist': placingDist, 'epRew': reward, 'pickRew': pickRew, 'success': float((placingDist <= 0.08))}
return (ob, reward, False, info)
def _get_pos_objects(self):
return self.data.get_geom_xpos('objGeom')
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
if self.random_init:
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[(- 3):]
while (np.linalg.norm((goal_pos[:2] - self._target_pos[:2])) < 0.15):
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[(- 3):]
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[(- 1)]]))
self._set_obj_xyz(self.obj_init_pos)
self.objHeight = self.data.get_geom_xpos('objGeom')[2]
self.heightTarget = (self.objHeight + self.liftThresh)
self.maxPlacingDist = (np.linalg.norm((np.array([self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) - np.array(self._target_pos))) + self.heightTarget)
return self._get_obs()
def _reset_hand(self):
super()._reset_hand(10)
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
self.init_fingerCOM = ((rightFinger + leftFinger) / 2)
self.pickCompleted = False
def compute_reward(self, actions, obs):
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
heightTarget = self.heightTarget
goal = self._target_pos
reachDist = np.linalg.norm((objPos - fingerCOM))
placingDist = np.linalg.norm((objPos - goal))
assert np.all((goal == self._get_site_pos('goal')))
def reachReward():
reachRew = (- reachDist)
reachDistxy = np.linalg.norm((objPos[:(- 1)] - fingerCOM[:(- 1)]))
zRew = np.linalg.norm((fingerCOM[(- 1)] - self.init_fingerCOM[(- 1)]))
if (reachDistxy < 0.05):
reachRew = (- reachDist)
else:
reachRew = ((- reachDistxy) - (2 * zRew))
if (reachDist < 0.05):
reachRew = ((- reachDist) + (max(actions[(- 1)], 0) / 50))
return (reachRew, reachDist)
def pickCompletionCriteria():
tolerance = 0.01
return (objPos[2] >= (heightTarget - tolerance))
self.pickCompleted = pickCompletionCriteria()
def objDropped():
return ((objPos[2] < (self.objHeight + 0.005)) and (placingDist > 0.02) and (reachDist > 0.02))
def orig_pickReward():
hScale = 100
if (self.pickCompleted and (not objDropped())):
return (hScale * ((heightTarget - self.objHeight) + 0.02))
elif ((reachDist < 0.1) and (objPos[2] > (self.objHeight + 0.005))):
return (hScale * ((min(heightTarget, objPos[2]) - self.objHeight) + 0.02))
else:
return 0
def placeReward():
c1 = 1000
c2 = 0.01
c3 = 0.001
cond = (self.pickCompleted and (reachDist < 0.1) and (not objDropped()))
if cond:
placeRew = ((1000 * (self.maxPlacingDist - placingDist)) + (c1 * (np.exp(((- (placingDist ** 2)) / c2)) + np.exp(((- (placingDist ** 2)) / c3)))))
placeRew = max(placeRew, 0)
return [placeRew, placingDist]
else:
return [0, placingDist]
(reachRew, reachDist) = reachReward()
pickRew = orig_pickReward()
(placeRew, placingDist) = placeReward()
assert ((placeRew >= 0) and (pickRew >= 0))
reward = ((reachRew + pickRew) + placeRew)
return [reward, reachDist, pickRew, placingDist] |
_numpy_output(check_dtype=True)
def test_ufunc_heaviside_ff(A: dace.float32[10], B: dace.float32[10]):
return np.heaviside(A, B) |
def intermediate_name(filename, epoch, dev_scoring, score):
(root, ext) = os.path.splitext(filename)
return ((root + '.E{epoch:04d}-{score_type}{acc:05.2f}'.format(**{'epoch': epoch, 'score_type': dev_scoring.value, 'acc': (score * 100)})) + ext) |
class RunBenchmarkExperiment(TaskConfiguration):
ID = 'ex3'
def mode() -> str:
return 'run {}'.format(RunBenchmarkExperiment.ID)
def tasks(self, config) -> List:
compile_version = CompileVersionTask(config.compiles_path, config.run_timestamp, config.force_compile, config.use_tmp_wrkdir)
load_detector = LoadDetectorTask(config.detectors_path, config.detector, config.requested_release, config.java_options)
detect = DetectAllFindingsTask(config.findings_path, config.force_detect, config.timeout, config.run_timestamp)
return (([load_detector] + CheckoutTaskConfiguration().tasks(config)) + [compile_version, detect]) |
def test_constructor_goals_parameter():
goals = {MagicMock(ff.FitnessFunction), MagicMock(ff.FitnessFunction)}
comparator = dc.DominanceComparator(goals=goals)
assert (comparator._objectives == goals) |
def crop_and_resize(image, height, width):
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
aspect_ratio = (width / height)
image = distorted_bounding_box_crop(image, bbox, min_object_covered=0.1, aspect_ratio_range=(((3.0 / 4) * aspect_ratio), ((4.0 / 3.0) * aspect_ratio)), area_range=(0.08, 1.0), max_attempts=100, scope=None)
return tf.image.resize([image], [height, width], method=tf.image.ResizeMethod.BICUBIC)[0] |
class pAdicModuleIsomorphism(Map):
def _repr_type(self):
return 'Isomorphism'
def is_injective(self):
return True
def is_surjective(self):
return True
def _richcmp_(self, other, op):
if isinstance(other, pAdicModuleIsomorphism):
return rich_to_bool(op, 0)
else:
return rich_to_bool(op, 1) |
def create_spinner(repetitions: int) -> Generator[(str, None, None)]:
assert (repetitions > 0), 'The number of repetitions should be greater than zero'
while True:
for ch in '':
for _ in range(repetitions):
(yield ch) |
class GaussianMLPPolicy(StochasticPolicy, LasagnePowered):
def __init__(self, env_spec, hidden_sizes=(32, 32), learn_std=True, init_std=1.0, adaptive_std=False, std_share_network=False, std_hidden_sizes=(32, 32), min_std=1e-06, std_hidden_nonlinearity=NL.tanh, hidden_nonlinearity=NL.tanh, output_nonlinearity=None, mean_network=None, std_network=None, dist_cls=DiagonalGaussian):
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Box)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if (mean_network is None):
mean_network = MLP(input_shape=(obs_dim,), output_dim=action_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity)
self._mean_network = mean_network
l_mean = mean_network.output_layer
obs_var = mean_network.input_layer.input_var
if (std_network is not None):
l_log_std = std_network.output_layer
elif adaptive_std:
std_network = MLP(input_shape=(obs_dim,), input_layer=mean_network.input_layer, output_dim=action_dim, hidden_sizes=std_hidden_sizes, hidden_nonlinearity=std_hidden_nonlinearity, output_nonlinearity=None)
l_log_std = std_network.output_layer
else:
l_log_std = ParamLayer(mean_network.input_layer, num_units=action_dim, param=lasagne.init.Constant(np.log(init_std)), name='output_log_std', trainable=learn_std)
self.min_std = min_std
(mean_var, log_std_var) = L.get_output([l_mean, l_log_std])
if (self.min_std is not None):
log_std_var = TT.maximum(log_std_var, np.log(min_std))
(self._mean_var, self._log_std_var) = (mean_var, log_std_var)
self._l_mean = l_mean
self._l_log_std = l_log_std
self._dist = dist_cls(action_dim)
LasagnePowered.__init__(self, [l_mean, l_log_std])
super(GaussianMLPPolicy, self).__init__(env_spec)
self._f_dist = ext.compile_function(inputs=[obs_var], outputs=[mean_var, log_std_var])
def dist_info_sym(self, obs_var, state_info_vars=None):
(mean_var, log_std_var) = L.get_output([self._l_mean, self._l_log_std], obs_var)
if (self.min_std is not None):
log_std_var = TT.maximum(log_std_var, np.log(self.min_std))
return dict(mean=mean_var, log_std=log_std_var)
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
(mean, log_std) = [x[0] for x in self._f_dist([flat_obs])]
rnd = np.random.normal(size=mean.shape)
action = ((rnd * np.exp(log_std)) + mean)
return (action, dict(mean=mean, log_std=log_std))
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
(means, log_stds) = self._f_dist(flat_obs)
rnd = np.random.normal(size=means.shape)
actions = ((rnd * np.exp(log_stds)) + means)
return (actions, dict(mean=means, log_std=log_stds))
def get_reparam_action_sym(self, obs_var, action_var, old_dist_info_vars):
new_dist_info_vars = self.dist_info_sym(obs_var, action_var)
(new_mean_var, new_log_std_var) = (new_dist_info_vars['mean'], new_dist_info_vars['log_std'])
(old_mean_var, old_log_std_var) = (old_dist_info_vars['mean'], old_dist_info_vars['log_std'])
epsilon_var = ((action_var - old_mean_var) / (TT.exp(old_log_std_var) + 1e-08))
new_action_var = (new_mean_var + (epsilon_var * TT.exp(new_log_std_var)))
return new_action_var
def log_diagnostics(self, paths):
log_stds = np.vstack([path['agent_infos']['log_std'] for path in paths])
logger.record_tabular('AveragePolicyStd', np.mean(np.exp(log_stds)))
def distribution(self):
return self._dist |
def convert_conv_layer(conv, prefix, out):
convert_conv2d(conv.conv2d_1x3, (prefix + '.conv1'), out)
convert_layernorm(conv.BN_1x3, (prefix + '.ln1'), out)
convert_conv2d(conv.conv2d_3x1, (prefix + '.conv2'), out)
convert_layernorm(conv.BN_3x1, (prefix + '.ln2'), out)
return (conv.conv2d_1x3.strides, conv.conv2d_3x1.strides) |
def train_model(model, criterion, optimizer, scheduler, num_epochs=25, device='cpu'):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, (num_epochs - 1)))
print(('-' * 10))
for phase in ['train', 'val']:
if (phase == 'train'):
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
for (inputs, labels) in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad(set_to_none=True)
with torch.set_grad_enabled((phase == 'train')):
outputs = model(inputs)
(_, preds) = torch.max(outputs, 1)
loss = criterion(outputs, labels)
if (phase == 'train'):
loss.backward()
optimizer.step()
running_loss += (loss.item() * inputs.size(0))
running_corrects += torch.sum((preds == labels.data))
if (phase == 'train'):
scheduler.step()
epoch_loss = (running_loss / dataset_sizes[phase])
epoch_acc = (running_corrects.double() / dataset_sizes[phase])
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
if ((phase == 'val') and (epoch_acc > best_acc)):
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = (time.time() - since)
print('Training complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60)))
print('Best val Acc: {:4f}'.format(best_acc))
model.load_state_dict(best_model_wts)
return model |
def prove_BSD(E, verbosity=0, two_desc='mwrank', proof=None, secs_hi=5, return_BSD=False):
if (proof is None):
from sage.structure.proof.proof import get_flag
proof = get_flag(proof, 'elliptic_curve')
else:
proof = bool(proof)
if (not proof):
return []
from copy import copy
BSD = BSD_data()
BSD.curve = E.optimal_curve()
if BSD.curve.has_cm():
non_max_j_invs = [(- ), 54000, 287496, ]
if (BSD.curve.j_invariant() in non_max_j_invs):
if (verbosity > 0):
print('CM by non maximal order: switching curves')
for E in BSD.curve.isogeny_class():
if (E.j_invariant() not in non_max_j_invs):
BSD.curve = E
break
BSD.update()
galrep = BSD.curve.galois_representation()
if (two_desc == 'mwrank'):
M = mwrank_two_descent_work(BSD.curve, BSD.two_tor_rk)
elif (two_desc == 'pari'):
M = pari_two_descent_work(BSD.curve)
elif (two_desc == 'sage'):
M = native_two_isogeny_descent_work(BSD.curve, BSD.two_tor_rk)
else:
raise NotImplementedError()
(rank_lower_bd, rank_upper_bd, sha2_lower_bd, sha2_upper_bd, gens) = M
assert (sha2_lower_bd <= sha2_upper_bd)
if (gens is not None):
gens = BSD.curve.saturation(gens)[0]
if (rank_lower_bd > rank_upper_bd):
raise RuntimeError(('Apparent contradiction: %d <= rank <= %d.' % (rank_lower_bd, rank_upper_bd)))
BSD.two_selmer_rank = ((rank_upper_bd + sha2_lower_bd) + BSD.two_tor_rk)
if (sha2_upper_bd == sha2_lower_bd):
BSD.rank = rank_lower_bd
BSD.bounds[2] = (sha2_lower_bd, sha2_upper_bd)
else:
BSD.rank = BSD.curve.rank(use_database=True)
sha2_upper_bd -= (BSD.rank - rank_lower_bd)
BSD.bounds[2] = (sha2_lower_bd, sha2_upper_bd)
if (verbosity > 0):
print('Unable to compute the rank exactly -- used database.')
if (rank_lower_bd > 1):
from sage.sets.primes import Primes
BSD.primes = Primes()
if return_BSD:
BSD.rank = rank_lower_bd
return BSD
return BSD.primes
if ((BSD.sha_an.ord(2) == 0) != (BSD.bounds[2][1] == 0)):
raise RuntimeError(('Apparent contradiction: %d <= rank(sha[2]) <= %d, but ord_2(sha_an) = %d' % (sha2_lower_bd, sha2_upper_bd, BSD.sha_an.ord(2))))
if ((BSD.bounds[2][0] == BSD.sha_an.ord(2)) and (BSD.sha_an.ord(2) == BSD.bounds[2][1])):
if (verbosity > 0):
print('p = 2: True by 2-descent')
BSD.primes = []
BSD.bounds.pop(2)
BSD.proof[2] = ['2-descent']
else:
BSD.primes = [2]
BSD.proof[2] = [(('2-descent',) + BSD.bounds[2])]
if ((len(gens) > rank_lower_bd) or (rank_lower_bd > rank_upper_bd)):
raise RuntimeError('Something went wrong with 2-descent.')
if (BSD.rank != len(gens)):
gens = BSD.curve.gens(proof=True)
if (BSD.rank != len(gens)):
raise RuntimeError('Could not get generators')
BSD.gens = [BSD.curve.point(x, check=True) for x in gens]
if (BSD.rank != BSD.curve.analytic_rank()):
raise RuntimeError(('It seems that the rank conjecture does not hold for this curve (%s)! This may be a counterexample to BSD, but is more likely a bug.' % BSD.curve))
kolyvagin_primes = []
heegner_index = None
if (BSD.rank == 0):
for D in BSD.curve.heegner_discriminants_list(10):
max_height = max(13, BSD.curve.quadratic_twist(D).CPS_height_bound())
heegner_primes = (- 1)
while (heegner_primes == (- 1)):
if (max_height > 21):
break
(heegner_primes, _, exact) = BSD.curve.heegner_index_bound(D, max_height=max_height)
max_height += 1
if isinstance(heegner_primes, list):
break
if (not isinstance(heegner_primes, list)):
raise RuntimeError('Tried 10 Heegner discriminants, and heegner_index_bound failed each time.')
if (exact is not False):
heegner_index = exact
BSD.heegner_indexes[D] = exact
else:
BSD.heegner_index_upper_bound[D] = max((heegner_primes + [1]))
if (2 in heegner_primes):
heegner_primes.remove(2)
else:
for D in BSD.curve.heegner_discriminants_list(10):
I = BSD.curve.heegner_index(D)
J = I.is_int()
if (J[0] and (J[1] > 0)):
I = J[1]
else:
J = (2 * I).is_int()
if (J[0] and (J[1] > 0)):
I = J[1]
else:
continue
heegner_index = I
BSD.heegner_indexes[D] = I
break
heegner_primes = [p for p in prime_divisors(heegner_index) if (p != 2)]
assert ((BSD.sha_an in ZZ) and (BSD.sha_an > 0))
if BSD.curve.has_cm():
if (BSD.curve.analytic_rank() == 0):
if (verbosity > 0):
print(' p >= 5: true by Rubin')
BSD.primes.append(3)
else:
K = QuadraticField(BSD.curve.cm_discriminant(), 'a')
D_K = K.disc()
D_E = BSD.curve.discriminant()
if (len(K.factor(3)) == 1):
BSD.primes.append(3)
for p in prime_divisors(D_K):
if (p >= 5):
BSD.primes.append(p)
for p in prime_divisors(D_E):
if ((p >= 5) and (D_K % p) and (len(K.factor(p)) == 1)):
BSD.primes.append(p)
for p in heegner_primes:
if ((p >= 5) and (D_E % p) and (D_K % p) and (len(K.factor(p)) == 1)):
kolyvagin_primes.append(p)
for p in prime_divisors(BSD.sha_an):
if ((p >= 5) and (D_K % p) and (len(K.factor(p)) == 1)):
if BSD.curve.is_good(p):
if ((verbosity > 2) and (p in heegner_primes) and (heegner_index is None)):
print('ALERT: Prime p (%d) >= 5 dividing sha_an, good for E, inert in K, in heegner_primes, should not divide the actual Heegner index')
if (p not in heegner_primes):
raise RuntimeError(('p = %d divides sha_an, is of good reduction for E, inert in K, and does not divide the Heegner index. This may be a counterexample to BSD, but is more likely a bug. %s' % (p, BSD.curve)))
if (verbosity > 0):
print(('True for p not in {%s} by Kolyvagin (via Stein & Lum -- unpublished) and Rubin.' % str(list(set(BSD.primes).union(set(kolyvagin_primes))))[1:(- 1)]))
BSD.proof['finite'] = copy(BSD.primes)
else:
BSD.primes += [p for p in galrep.non_surjective() if (p != 2)]
for p in heegner_primes:
if (p not in BSD.primes):
BSD.primes.append(p)
for p in prime_divisors(BSD.sha_an):
if ((p not in BSD.primes) and (p != 2)):
BSD.primes.append(p)
if (verbosity > 0):
s = str(BSD.primes)[1:(- 1)]
if (2 not in BSD.primes):
if (not s):
s = '2'
else:
s = ('2, ' + s)
print((('True for p not in {' + s) + '} by Kolyvagin.'))
BSD.proof['finite'] = copy(BSD.primes)
primes_to_remove = []
for p in BSD.primes:
if (p == 2):
continue
if (galrep.is_surjective(p) and (not BSD.curve.has_additive_reduction(p))):
if BSD.curve.has_nonsplit_multiplicative_reduction(p):
if (BSD.rank > 0):
continue
if (p == 3):
if ((not (BSD.curve.is_ordinary(p) and BSD.curve.is_good(p))) and (not BSD.curve.has_split_multiplicative_reduction(p))):
continue
if (BSD.rank > 0):
continue
if (verbosity > 1):
print((' p = %d: Trying p_primary_bound' % p))
p_bound = BSD.Sha.p_primary_bound(p)
if (p in BSD.proof):
BSD.proof[p].append(('Stein-Wuthrich', p_bound))
else:
BSD.proof[p] = [('Stein-Wuthrich', p_bound)]
if ((BSD.sha_an.ord(p) == 0) and (p_bound == 0)):
if (verbosity > 0):
print(('True for p=%d by Stein-Wuthrich.' % p))
primes_to_remove.append(p)
else:
if (p in BSD.bounds):
BSD.bounds[p][1] = min(BSD.bounds[p][1], p_bound)
else:
BSD.bounds[p] = (0, p_bound)
print(((('Analytic %d-rank is ' % p) + str(BSD.sha_an.ord(p))) + (', actual %d-rank is at most %d.' % (p, p_bound))))
print(' by Stein-Wuthrich.\n')
for p in primes_to_remove:
BSD.primes.remove(p)
kolyvagin_primes = []
for p in BSD.primes:
if (p == 2):
continue
if galrep.is_surjective(p):
kolyvagin_primes.append(p)
for p in kolyvagin_primes:
BSD.primes.remove(p)
D_K = QuadraticField(D, 'a').disc()
for p in BSD.primes:
if (p == 2):
continue
if (((D_K % p) != 0) and ((BSD.N % (p ** 2)) != 0) and galrep.is_irreducible(p)):
if (verbosity > 0):
print(("Kolyvagin's bound for p = %d applies by Cha." % p))
if (p in BSD.proof):
BSD.proof[p].append('Cha')
else:
BSD.proof[p] = ['Cha']
kolyvagin_primes.append(p)
for p in BSD.primes:
if ((p in kolyvagin_primes) or (p == 2) or ((D_K % p) == 0)):
continue
crit_lw = False
if ((p > 11) or (p == 7)):
crit_lw = True
elif (p == 11):
if ((BSD.N != 121) or (BSD.curve.label() != '121c2')):
crit_lw = True
elif galrep.is_irreducible(p):
crit_lw = True
else:
phis = BSD.curve.isogenies_prime_degree(p)
if (len(phis) != 1):
crit_lw = True
else:
C = phis[0].codomain()
if (p == 3):
if (((BSD.curve.torsion_order() % p) != 0) and ((C.torsion_order() % p) != 0)):
crit_lw = True
else:
Et = BSD.curve.quadratic_twist(5)
if (((Et.torsion_order() % p) != 0) and ((C.torsion_order() % p) != 0)):
crit_lw = True
if crit_lw:
if (verbosity > 0):
print(("Kolyvagin's bound for p = %d applies by Lawson-Wuthrich" % p))
kolyvagin_primes.append(p)
if (p in BSD.proof):
BSD.proof[p].append('Lawson-Wuthrich')
else:
BSD.proof[p] = ['Lawson-Wuthrich']
for p in kolyvagin_primes:
if (p in BSD.primes):
BSD.primes.remove(p)
primes_to_remove = []
for p in kolyvagin_primes:
if (p == 2):
continue
if (p not in heegner_primes):
ord_p_bound = 0
elif (heegner_index is not None):
ord_p_bound = (2 * heegner_index.ord(p))
m_max = max([BSD.curve.tamagawa_number(q).ord(p) for q in BSD.N.prime_divisors()])
if (m_max > 0):
if (verbosity > 0):
print(("Jetchev's results apply (at p = %d) with m_max =" % p), m_max)
if (p in BSD.proof):
BSD.proof[p].append(('Jetchev', m_max))
else:
BSD.proof[p] = [('Jetchev', m_max)]
ord_p_bound -= (2 * m_max)
else:
for D in BSD.heegner_index_upper_bound:
M = BSD.heegner_index_upper_bound[D]
ord_p_bound = 0
while ((p ** (ord_p_bound + 1)) <= (M ** 2)):
ord_p_bound += 1
ord_p_bound *= 2
break
if (p in BSD.proof):
BSD.proof[p].append(('Kolyvagin', ord_p_bound))
else:
BSD.proof[p] = [('Kolyvagin', ord_p_bound)]
if ((BSD.sha_an.ord(p) == 0) and (ord_p_bound == 0)):
if (verbosity > 0):
print(('True for p = %d by Kolyvagin bound' % p))
primes_to_remove.append(p)
elif (BSD.sha_an.ord(p) > ord_p_bound):
raise RuntimeError(('p = %d: ord_p_bound == %d, but sha_an.ord(p) == %d. This appears to be a counterexample to BSD, but is more likely a bug.' % (p, ord_p_bound, BSD.sha_an.ord(p))))
elif (p in BSD.bounds):
low = BSD.bounds[p][0]
BSD.bounds[p] = (low, min(BSD.bounds[p][1], ord_p_bound))
else:
BSD.bounds[p] = (0, ord_p_bound)
for p in primes_to_remove:
kolyvagin_primes.remove(p)
BSD.primes = list(set(BSD.primes).union(set(kolyvagin_primes)))
if ((BSD.rank == 0) and (not BSD.curve.has_cm())):
L_over_Omega = BSD.curve.lseries().L_ratio()
kato_primes = BSD.Sha.bound_kato()
primes_to_remove = []
for p in BSD.primes:
if (p == 2):
continue
if (p not in kato_primes):
if (verbosity > 0):
print(('Kato further implies that #Sha[%d] is trivial.' % p))
primes_to_remove.append(p)
if (p in BSD.proof):
BSD.proof[p].append(('Kato', 0))
else:
BSD.proof[p] = [('Kato', 0)]
if ((p not in [2, 3]) and ((BSD.N % p) != 0)):
if galrep.is_surjective(p):
bd = L_over_Omega.valuation(p)
if (verbosity > 1):
print(('Kato implies that ord_p(#Sha[%d]) <= %d ' % (p, bd)))
if (p in BSD.proof):
BSD.proof[p].append(('Kato', bd))
else:
BSD.proof[p] = [('Kato', bd)]
if (p in BSD.bounds):
low = BSD.bounds[p][0]
BSD.bounds[p][1] = (low, min(BSD.bounds[p][1], bd))
else:
BSD.bounds[p] = (0, bd)
for p in primes_to_remove:
BSD.primes.remove(p)
primes_to_remove = []
if BSD.N.is_prime():
for p in BSD.primes:
if (p == 2):
continue
if galrep.is_reducible(p):
primes_to_remove.append(p)
if (verbosity > 0):
print(('True for p=%s by Mazur' % p))
for p in primes_to_remove:
BSD.primes.remove(p)
if (p in BSD.proof):
BSD.proof[p].append('Mazur')
else:
BSD.proof[p] = ['Mazur']
BSD.primes.sort()
if (heegner_index is None):
if (max_height < 18):
max_height = 18
for D in BSD.heegner_index_upper_bound:
M = BSD.heegner_index_upper_bound[D]
for p in kolyvagin_primes:
if ((p not in BSD.primes) or (p == 3)):
continue
if (verbosity > 0):
print((' p = %d: Trying harder for Heegner index' % p))
obt = 0
while (((p ** ((BSD.sha_an.ord(p) / 2) + 1)) <= M) and (max_height < 22)):
if (verbosity > 2):
print(' trying max_height =', max_height)
old_bound = M
(M, _, exact) = BSD.curve.heegner_index_bound(D, max_height=max_height, secs_dc=secs_hi)
if (M == (- 1)):
max_height += 1
continue
if (exact is not False):
heegner_index = exact
BSD.heegner_indexes[D] = exact
M = exact
if (verbosity > 2):
print(' heegner index =', M)
else:
M = max((M + [1]))
if (verbosity > 2):
print(' bound =', M)
if (old_bound == M):
obt += 1
if (obt == 2):
break
max_height += 1
BSD.heegner_index_upper_bound[D] = min(M, BSD.heegner_index_upper_bound[D])
(low, upp) = BSD.bounds[p]
expn = 0
while ((p ** (expn + 1)) <= M):
expn += 1
if ((2 * expn) < upp):
upp = (2 * expn)
BSD.bounds[p] = (low, upp)
if (verbosity > 0):
print(' got better bound on ord_p =', upp)
if (low == upp):
if (upp != BSD.sha_an.ord(p)):
raise RuntimeError
else:
if (verbosity > 0):
print(' proven!')
BSD.primes.remove(p)
break
for p in kolyvagin_primes:
if ((p not in BSD.primes) or (p == 3)):
continue
for D in BSD.curve.heegner_discriminants_list(4):
if (D in BSD.heegner_index_upper_bound):
continue
print(' discriminant', D)
if (verbosity > 0):
print(('p = %d: Trying discriminant = %d for Heegner index' % (p, D)))
max_height = max(10, BSD.curve.quadratic_twist(D).CPS_height_bound())
obt = 0
while True:
if (verbosity > 2):
print(' trying max_height =', max_height)
old_bound = M
if (((p ** ((BSD.sha_an.ord(p) / 2) + 1)) > M) or (max_height >= 22)):
break
(M, _, exact) = BSD.curve.heegner_index_bound(D, max_height=max_height, secs_dc=secs_hi)
if (M == (- 1)):
max_height += 1
continue
if (exact is not False):
heegner_index = exact
BSD.heegner_indexes[D] = exact
M = exact
if (verbosity > 2):
print(' heegner index =', M)
else:
M = max((M + [1]))
if (verbosity > 2):
print(' bound =', M)
if (old_bound == M):
obt += 1
if (obt == 2):
break
max_height += 1
BSD.heegner_index_upper_bound[D] = M
(low, upp) = BSD.bounds[p]
expn = 0
while ((p ** (expn + 1)) <= M):
expn += 1
if ((2 * expn) < upp):
upp = (2 * expn)
BSD.bounds[p] = (low, upp)
if (verbosity > 0):
print(' got better bound =', upp)
if (low == upp):
if (upp != BSD.sha_an.ord(p)):
raise RuntimeError
else:
if (verbosity > 0):
print(' proven!')
BSD.primes.remove(p)
break
if (verbosity > 1):
if BSD.primes:
print('Remaining primes:')
for p in BSD.primes:
s = (('p = ' + str(p)) + ': ')
if galrep.is_irreducible(p):
s += 'ir'
s += 'reducible, '
if (not galrep.is_surjective(p)):
s += 'not '
s += 'surjective, '
a_p = BSD.curve.an(p)
if BSD.curve.is_good(p):
if ((a_p % p) != 0):
s += 'good ordinary'
else:
s += 'good, non-ordinary'
else:
assert BSD.curve.is_minimal()
if (a_p == 0):
s += 'additive'
elif (a_p == 1):
s += 'split multiplicative'
elif (a_p == (- 1)):
s += 'non-split multiplicative'
if ((BSD.curve.tamagawa_product() % p) == 0):
s += ', divides a Tamagawa number'
if (p in BSD.bounds):
s += ('\n (%d <= ord_p <= %d)' % BSD.bounds[p])
else:
s += '\n (no bounds found)'
s += ('\n ord_p(#Sha_an) = %d' % BSD.sha_an.ord(p))
if (heegner_index is None):
may_divide = True
for D in BSD.heegner_index_upper_bound:
if ((p > BSD.heegner_index_upper_bound[D]) or (p not in kolyvagin_primes)):
may_divide = False
if may_divide:
s += '\n may divide the Heegner index, for which only a bound was computed'
print(s)
if BSD.curve.has_cm():
if (BSD.rank == 1):
BSD.proof['reason_finite'] = 'Rubin&Kolyvagin'
else:
BSD.proof['reason_finite'] = 'Rubin'
else:
BSD.proof['reason_finite'] = 'Kolyvagin'
BSD.curve = BSD.curve.label()
BSD.Sha = None
return (BSD if return_BSD else BSD.primes) |
def test_write_data_csv_backend(tmpdir):
statistics_dir = (tmpdir / 'statistics')
Path(statistics_dir).mkdir(parents=True, exist_ok=True)
config.configuration.statistics_output.report_dir = statistics_dir
data_1 = {'module': OutputVariable('module', 'foo'), 'value': OutputVariable('value', 'bar')}
data_2 = {'module': OutputVariable('module', 'bar'), 'value': OutputVariable('value', 'baz')}
backend = CSVStatisticsBackend()
backend.write_data(data_1)
backend.write_data(data_2) |
def send_message(messages):
access_token = os.environ.get('SCRIBE_GRAPHQL_ACCESS_TOKEN')
if (not access_token):
raise ValueError("Can't find access token from environment variable")
url = '
r = requests.post(url, data={'access_token': access_token, 'logs': json.dumps([{'category': 'perfpipe_pytorch_binary_size', 'message': json.dumps(message), 'line_escape': False} for message in messages])})
print(r.text)
r.raise_for_status() |
def check_ggui_availability():
if _ti_core.GGUI_AVAILABLE:
return
try:
import taichi
wheel_tag = try_get_wheel_tag(taichi)
if ((platform.system() == 'Linux') and wheel_tag and ('manylinux2014' in wheel_tag)):
raise GGUINotAvailableException('GGUI is not available since you have installed a restricted version of taichi. Please see yellow warning messages printed during startup for details.')
except GGUINotAvailableException:
raise
except Exception:
pass
raise GGUINotAvailableException('GGUI is not available.') |
class TStrPool64(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_snap.TStrPool64_swiginit(self, _snap.new_TStrPool64(*args))
__swig_destroy__ = _snap.delete_TStrPool64
def Save(self, SOut):
return _snap.TStrPool64_Save(self, SOut)
def New(MxBfL=0, GrowBy=16):
return _snap.TStrPool64_New(MxBfL, GrowBy)
New = staticmethod(New)
def Load(SIn, LoadCompact=True):
return _snap.TStrPool64_Load(SIn, LoadCompact)
Load = staticmethod(Load)
def GetMemUsed(self):
return _snap.TStrPool64_GetMemUsed(self)
def Empty(self):
return _snap.TStrPool64_Empty(self)
def Len(self):
return _snap.TStrPool64_Len(self)
def Reserved(self):
return _snap.TStrPool64_Reserved(self)
def Clr(self, DoDel=False):
return _snap.TStrPool64_Clr(self, DoDel)
def Cmp(self, Offset, Str):
return _snap.TStrPool64_Cmp(self, Offset, Str)
def AddStr(self, Str):
return _snap.TStrPool64_AddStr(self, Str)
def GetStr(self, StrId):
return _snap.TStrPool64_GetStr(self, StrId) |
def test_loadarff_dataframe():
contents = ''.join(EXPECTED_NO_QUOTES)
with StringIO(contents) as fp:
actual_df = loadarff(fp)
expected_df = pd.DataFrame.from_dict(OrderedDict([('attr_nominal', pd.Series(pd.Categorical.from_codes([1, 2, 0, (- 1), 2, 1], ['beer', 'water', 'wine']))), ('attr_nominal_spaces', pd.Series(pd.Categorical.from_codes([2, 0, (- 1), 1, 0, 1], ['hard liquor', 'mate', 'red wine'])))]))
tm.assert_frame_equal(expected_df, actual_df, check_exact=True) |
class DecoderLayer(nn.Module):
def __init__(self, config):
super(DecoderLayer, self).__init__()
self.slf_attn = DecoderAttention(config)
self.enc_attn = DecoderAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, dec_input, enc_output, slf_attn_mask=None, dec_enc_attn_mask=None):
(slf_output, _) = self.slf_attn(dec_input, dec_input, dec_input, slf_attn_mask)
(dec_output, dec_att_scores) = self.enc_attn(slf_output, enc_output, enc_output, dec_enc_attn_mask)
intermediate_output = self.intermediate(dec_output)
dec_output = self.output(intermediate_output, dec_output)
return (dec_output, dec_att_scores) |
def sage_include_directories(use_sources=False):
if use_sources:
dirs = [SAGE_SRC]
else:
import sage
dirs = [os.path.dirname(directory) for directory in sage.__path__]
try:
import numpy
dirs.append(numpy.get_include())
except ModuleNotFoundError:
pass
dirs.append(sysconfig.get_config_var('INCLUDEPY'))
return dirs |
class VocabFromText(VocabDict):
DEFAULT_TOKENS = [VocabDict.PAD_TOKEN, VocabDict.UNK_TOKEN, VocabDict.START_TOKEN, VocabDict.END_TOKEN]
def __init__(self, sentences, min_count=1, regex=SENTENCE_SPLIT_REGEX, keep=None, remove=None, only_unk_extra=False):
if (keep is None):
keep = []
if (remove is None):
remove = []
token_counter = Counter()
for sentence in sentences:
tokens = tokenize(sentence, regex=regex, keep=keep, remove=remove)
token_counter.update(tokens)
token_list = []
for token in token_counter:
if (token_counter[token] >= min_count):
token_list.append(token)
extras = self.DEFAULT_TOKENS
if only_unk_extra:
extras = [self.UNK_TOKEN]
self.word_list = (extras + token_list)
self._build() |
class AutoModelForMultipleChoice(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def test_fpn_extra_convs_outputs():
outs = fpn_neck_config('fpn_extra_convs_outputs')
ort_validate(*outs) |
def assert_omp_single_thread():
omp_num_threads = os.environ.get('OMP_NUM_THREADS', None)
if (omp_num_threads != '1'):
logging.getLogger('replay').warning('Environment variable "OMP_NUM_THREADS" is set to "%s". Set it to 1 if the working process freezes.', omp_num_threads) |
def eval_words(args, target, data_test):
if (args.data == 'yelp'):
bounds = {'Upper': load_result('results/res_model_yelp_1_discrete_2_1.json'), 'Ours': load_result('results/res_model_yelp_1_baf_2_1.json')}
else:
bounds = {'Upper': load_result('res_model_sst_1_discrete_2_1_100s.json'), 'Ours': load_result('res_model_sst_1_baf_2_1_100s.json')}
label_dict = {}
for example in data_test:
for i in range(len(example['sent_a'])):
if ('word_labels' in example):
label_dict[example['sent_a'][i].lower()] = example['word_labels'][i]
words_sum_top = {}
words_sum_bottom = {}
words_top = {}
words_bottom = {}
for method in methods:
words_sum_top[method] = np.zeros(100)
words_sum_bottom[method] = np.zeros(100)
words_top[method] = []
words_bottom[method] = []
def add(method, w):
w = sorted(w, key=(lambda x: x[0]), reverse=True)
if (method in ['Upper', 'Ours']):
w = w[::(- 1)]
for i in range(len(w)):
if (w[i][1] in label_dict):
words_sum_top[method][i] += abs((label_dict[w[i][1]] - 2))
words_top[method].append(w[0][1])
w = w[::(- 1)]
for i in range(len(w)):
if (w[i][1] in label_dict):
words_sum_bottom[method][i] += abs((label_dict[w[i][1]] - 2))
words_bottom[method].append(w[0][1])
for (t, example) in enumerate(bounds['Ours']['examples']):
tokens = copy.deepcopy(example['tokens'])
sent = ''
for j in range(1, (len(tokens) - 1)):
cur = tokens[j]
if (cur[0] == '#'):
cur = cur[2:]
sent += (cur + ' ')
std = target.step([{'sent_a': sent.split(), 'label': int(example['label'])}], infer_grad=True)[(- 1)]
valid = ([0] * std['embedding_output'][0].shape[0])
for p in example['bounds']:
valid[p['position']] = True
for method in ['Upper', 'Ours']:
if (method in bounds):
w = []
for p in bounds[method]['examples'][t]['bounds']:
w.append((p['eps_normalized'], tokens[p['position']]))
add(method, w)
grad = torch.norm(std['gradients'][0], p=2, dim=(- 1))
w = []
for i in range(1, (len(tokens) - 1)):
if valid[i]:
w.append((float(grad[i]), tokens[i]))
add('Grad', w)
if (args.data == 'yelp'):
important_words = ['terrible', 'great', 'best', 'good', 'slow', 'perfect', 'typical', 'decadent']
for word_list in [words_top, words_bottom]:
if (word_list == words_top):
type = 'Most'
else:
type = 'Least'
k = 10
for (t, method) in enumerate(methods):
print(('%s & 0.00 & ' % method), end='')
used = {}
for (i, w) in enumerate(word_list[method][:k]):
used[w] = True
if (w == '&'):
_w = '\\&'
else:
_w = w
if (w in important_words):
print(('\\textbf{\\texttt{%s}}' % _w), end='')
else:
print(('\\texttt{%s}' % _w), end='')
if ((i + 1) < len(word_list[method][:k])):
print(' /', end=' ')
print('\\\\')
print()
else:
cnt = len(bounds[method]['examples'])
for method in methods:
ours = (method == 'Ours')
print(method)
print('{:.2f}'.format(((np.sum(words_sum_top[method][0]) * 1.0) / cnt)))
print('{:.2f}'.format(((np.sum(words_sum_bottom[method][0]) * 1.0) / cnt))) |
.parametrize('packing_boundary,ext_type,prompt_prefix,prompt_postfix,articles,gold_tokenized_sequences,gold_unfinished_sequence', [(BoundaryType.JSONL, FileExtension.TXT, None, None, ['hi bye', 'hi bye', 'hi hi'], [[], [], [get_tokenized_seq([Token(1, COMP), Token(2, COMP), Token(0, SEP), Token(1, COMP), Token(2, COMP), Token(0, SEP)])]], get_tokenized_seq([Token(1, COMP), Token(1, COMP), Token(0, SEP)])), (BoundaryType.JSONL, FileExtension.JSONL, None, None, ['{"prompt": "hi", "completion": "bye"}', '{"prompt": "hi", "completion": "bye"}', '{ "prompt": "hi", "completion": "hi"}'], [[], [], [get_tokenized_seq([Token(1, PROMPT), Token(2, COMP), Token(0, SEP), Token(1, PROMPT), Token(2, COMP), Token(0, SEP)])]], get_tokenized_seq([Token(1, PROMPT), Token(1, COMP), Token(0, SEP)]))])
def test_multiple__call__(article_tokenizer: ArticleTokenizer, articles: List[Optional[str]], gold_tokenized_sequences: List[List[TokenizedSequence]], gold_unfinished_sequence: List[TokenizedSequence]):
for (article, gold_tokenized_sequence) in zip(articles, gold_tokenized_sequences):
tokenized_sequence = article_tokenizer(article)
assert (tokenized_sequence == gold_tokenized_sequence)
assert (article_tokenizer.packer.unfinished_sequence == gold_unfinished_sequence) |
def _type_is_enforceable(layout: ak.contents.Content, type_: ak.types.Type) -> _TypeEnforceableResult:
if layout.is_unknown:
return _TypeEnforceableResult(is_enforceable=True, requires_packing=False)
elif isinstance(type_, ak.types.UnknownType):
return _TypeEnforceableResult(is_enforceable=False, requires_packing=False)
elif layout.is_option:
if isinstance(type_, ak.types.OptionType):
if isinstance(type_.content, ak.types.UnknownType):
return _TypeEnforceableResult(is_enforceable=True, requires_packing=False)
else:
return _type_is_enforceable(layout.content, type_.content)
else:
(content_is_enforceable, content_needs_packed) = _type_is_enforceable(layout.content, type_)
return _TypeEnforceableResult(is_enforceable=content_is_enforceable, requires_packing=True)
elif layout.is_indexed:
return _type_is_enforceable(layout.content, type_)
elif isinstance(type_, ak.types.OptionType):
if isinstance(type_.content, ak.types.UnknownType):
return _TypeEnforceableResult(is_enforceable=True, requires_packing=False)
else:
return _type_is_enforceable(layout, type_.content)
elif layout.is_union:
if isinstance(type_, ak.types.UnionType):
n_type_contents = len(type_.contents)
n_layout_contents = len(layout.contents)
if (n_type_contents > n_layout_contents):
ix_contents = range(n_type_contents)
for ix_perm_contents in permutations(ix_contents, n_layout_contents):
retained_types = [type_.contents[j] for j in ix_perm_contents]
if all((_layout_has_type(c, t) for (c, t) in zip(layout.contents, retained_types))):
return _TypeEnforceableResult(is_enforceable=True, requires_packing=False)
return _TypeEnforceableResult(is_enforceable=False, requires_packing=False)
elif (n_layout_contents > n_type_contents):
ix_contents = range(n_layout_contents)
for ix_perm_contents in permutations(ix_contents, n_type_contents):
retained_contents = [layout.contents[j] for j in ix_perm_contents]
if all((_layout_has_type(c, t) for (c, t) in zip(retained_contents, type_.contents))):
return _TypeEnforceableResult(is_enforceable=True, requires_packing=True)
return _TypeEnforceableResult(is_enforceable=False, requires_packing=False)
else:
ix_contents = range(n_type_contents)
for ix_perm_contents in permutations(ix_contents):
permuted_types = [type_.contents[j] for j in ix_perm_contents]
content_matches_type = [_layout_has_type(c, t) for (c, t) in zip(layout.contents, permuted_types)]
n_matching = sum(content_matches_type, 0)
if (n_matching == len(type_.contents)):
return _TypeEnforceableResult(is_enforceable=True, requires_packing=False)
elif (n_matching == (len(type_.contents) - 1)):
for (tag, content_type, is_match) in zip(range(len(layout.contents)), permuted_types, content_matches_type):
if (not is_match):
return _type_is_enforceable(layout.contents[tag], content_type)
raise AssertionError()
else:
return _TypeEnforceableResult(is_enforceable=False, requires_packing=False)
else:
contents_enforceable = [_type_is_enforceable(content, type_) for (tag, content) in enumerate(layout.contents)]
if all((c.is_enforceable for c in contents_enforceable)):
return _TypeEnforceableResult(is_enforceable=True, requires_packing=any((c.requires_packing for c in contents_enforceable)))
else:
for (tag, content) in enumerate(layout.contents):
if _layout_has_type(content, type_):
return _TypeEnforceableResult(is_enforceable=True, requires_packing=False)
return _TypeEnforceableResult(is_enforceable=False, requires_packing=False)
elif isinstance(type_, ak.types.UnionType):
for (_i, content_type) in enumerate(type_.contents):
if _layout_has_type(layout, content_type):
return _type_is_enforceable(layout, content_type)
return _TypeEnforceableResult(is_enforceable=False, requires_packing=False)
elif layout.is_regular:
if isinstance(type_, ak.types.RegularType):
if (layout.size == type_.size):
return _TypeEnforceableResult(is_enforceable=False, requires_packing=False)
return _type_is_enforceable(layout.content, type_.content)
elif isinstance(type_, ak.types.ListType):
return _type_is_enforceable(layout.content, type_.content)
else:
return _TypeEnforceableResult(is_enforceable=False, requires_packing=False)
elif layout.is_list:
if isinstance(type_, ak.types.RegularType):
return _type_is_enforceable(layout.content, type_.content)
elif isinstance(type_, ak.types.ListType):
return _type_is_enforceable(layout.content, type_.content)
else:
return _TypeEnforceableResult(is_enforceable=False, requires_packing=False)
elif layout.is_numpy:
for _ in range((layout.purelist_depth - 1)):
if (not isinstance(type_, ak.types.RegularType)):
return _TypeEnforceableResult(is_enforceable=False, requires_packing=False)
type_ = type_.content
if isinstance(type_, ak.types.NumpyType):
return _TypeEnforceableResult(is_enforceable=True, requires_packing=(primitive_to_dtype(type_.primitive) != layout.dtype))
else:
return _TypeEnforceableResult(is_enforceable=False, requires_packing=False)
elif layout.is_record:
if isinstance(type_, ak.types.RecordType):
if (type_.is_tuple and layout.is_tuple):
type_contents = iter(type_.contents)
contents_enforceable = [_type_is_enforceable(c, t) for (c, t) in zip(layout.contents, type_contents)]
for next_type in type_contents:
if (not isinstance(next_type, ak.types.OptionType)):
return _TypeEnforceableResult(is_enforceable=False, requires_packing=False)
return _TypeEnforceableResult(is_enforceable=all((c.is_enforceable for c in contents_enforceable)), requires_packing=any((c.requires_packing for c in contents_enforceable)))
elif (not (type_.is_tuple or layout.is_tuple)):
layout_fields = frozenset(layout._fields)
existing_fields = []
new_fields = []
for field in type_.fields:
if (field in layout_fields):
existing_fields.append(field)
else:
new_fields.append(field)
contents_enforceable = [_type_is_enforceable(layout.content(f), type_.content(f)) for f in existing_fields]
for field in new_fields:
field_type = type_.content(field)
if (not isinstance(field_type, ak.types.OptionType)):
return _TypeEnforceableResult(is_enforceable=False, requires_packing=False)
return _TypeEnforceableResult(is_enforceable=all((c.is_enforceable for c in contents_enforceable)), requires_packing=any((c.requires_packing for c in contents_enforceable)))
else:
return _TypeEnforceableResult(is_enforceable=False, requires_packing=False)
else:
return _TypeEnforceableResult(is_enforceable=False, requires_packing=False)
else:
raise TypeError(layout) |
def to_ordered_dict(obj: Base, skip_missing: bool=True, deepcopy: bool=True) -> OrderedDict:
return obj.to_ordered_dict(skip_missing=skip_missing, deepcopy=deepcopy) |
def test_abs_complex():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='complex64')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.abs(x)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.