code stringlengths 101 5.91M |
|---|
def test():
a = ak.operations.to_numpy(ak.highlevel.Array({'A': [1, 2, 3], 'B': [4, None, 5]}))
assert (a['A'].data.tolist() == [1, 2, 3])
assert (a['A'].mask.tolist() == [False, False, False])
assert (a['B'].data[0] == 4)
assert (a['B'].data[2] == 5)
assert (a['A'].mask.tolist() == [False, False, False])
assert (a['B'].mask.tolist() == [False, True, False]) |
class ConditionalDecoder(nn.Module):
def __init__(self, input_size, hidden_size, ctx_size_dict, ctx_name, n_vocab, rnn_type, tied_emb=False, dec_init='zero', dec_init_activ='tanh', dec_init_size=None, att_type='mlp', att_activ='tanh', att_bottleneck='ctx', att_temp=1.0, transform_ctx=True, mlp_bias=False, dropout_out=0, emb_maxnorm=None, emb_gradscale=False):
super().__init__()
self.rnn_type = rnn_type.upper()
assert (self.rnn_type in ('GRU', 'LSTM')), "rnn_type '{}' not known".format(rnn_type)
assert (dec_init in ('zero', 'mean_ctx', 'feats')), "dec_init '{}' not known".format(dec_init)
RNN = getattr(nn, '{}Cell'.format(self.rnn_type))
self.n_states = (1 if (self.rnn_type == 'GRU') else 2)
if (self.rnn_type == 'GRU'):
self._rnn_unpack_states = (lambda x: x)
self._rnn_pack_states = (lambda x: x)
elif (self.rnn_type == 'LSTM'):
self._rnn_unpack_states = self._lstm_unpack_states
self._rnn_pack_states = self._lstm_pack_states
self._init_func = getattr(self, '_rnn_init_{}'.format(dec_init))
self.input_size = input_size
self.hidden_size = hidden_size
self.ctx_size_dict = ctx_size_dict
self.ctx_name = ctx_name
self.n_vocab = n_vocab
self.tied_emb = tied_emb
self.dec_init = dec_init
self.dec_init_size = dec_init_size
self.dec_init_activ = dec_init_activ
self.att_type = att_type
self.att_bottleneck = att_bottleneck
self.att_activ = att_activ
self.att_temp = att_temp
self.transform_ctx = transform_ctx
self.mlp_bias = mlp_bias
self.dropout_out = dropout_out
self.emb_maxnorm = emb_maxnorm
self.emb_gradscale = emb_gradscale
self.emb = nn.Embedding(self.n_vocab, self.input_size, padding_idx=0, max_norm=self.emb_maxnorm, scale_grad_by_freq=self.emb_gradscale)
self.att = Attention(self.ctx_size_dict[self.ctx_name], self.hidden_size, transform_ctx=self.transform_ctx, mlp_bias=self.mlp_bias, att_type=self.att_type, att_activ=self.att_activ, att_bottleneck=self.att_bottleneck, temp=self.att_temp)
if (self.dec_init in ('mean_ctx', 'feats')):
if (self.dec_init == 'mean_ctx'):
self.dec_init_size = self.ctx_size_dict[self.ctx_name]
self.ff_dec_init = FF(self.dec_init_size, (self.hidden_size * self.n_states), activ=self.dec_init_activ)
self.dec0 = RNN(self.input_size, self.hidden_size)
self.dec1 = RNN(self.hidden_size, self.hidden_size)
if (self.dropout_out > 0):
self.do_out = nn.Dropout(p=self.dropout_out)
self.hid2out = FF(self.hidden_size, self.input_size, bias_zero=True, activ='tanh')
self.out2prob = FF(self.input_size, self.n_vocab)
if self.tied_emb:
self.out2prob.weight = self.emb.weight
self.nll_loss = nn.NLLLoss(size_average=False, ignore_index=0)
def _lstm_pack_states(self, h):
return torch.cat(h, dim=(- 1))
def _lstm_unpack_states(self, h):
return torch.split(h, self.hidden_size, dim=(- 1))
def _rnn_init_zero(self, ctx_dict):
(ctx, _) = ctx_dict[self.ctx_name]
h_0 = torch.zeros(ctx.shape[1], (self.hidden_size * self.n_states))
return Variable(h_0).cuda()
def _rnn_init_mean_ctx(self, ctx_dict):
(ctx, ctx_mask) = ctx_dict[self.ctx_name]
if (ctx_mask is None):
return self.ff_dec_init(ctx.mean(0))
else:
return self.ff_dec_init((ctx.sum(0) / ctx_mask.sum(0).unsqueeze(1)))
def _rnn_init_feats(self, ctx_dict):
(ctx, _) = ctx_dict['feats']
return self.ff_dec_init(ctx)
def f_init(self, ctx_dict, **kwargs):
self.alphas = []
return self._init_func(ctx_dict)
def f_next(self, ctx_dict, y, h, **kwargs):
h1_c1 = self.dec0(y, self._rnn_unpack_states(h))
h1 = get_rnn_hidden_state(h1_c1)
(self.txt_alpha_t, txt_z_t) = self.att(h1.unsqueeze(0), *ctx_dict[self.ctx_name])
h2_c2 = self.dec1(txt_z_t, h1_c1)
h2 = get_rnn_hidden_state(h2_c2)
logit = self.hid2out(h2)
if (self.dropout_out > 0):
logit = self.do_out(logit)
log_p = F.log_softmax(self.out2prob(logit), dim=(- 1))
return (log_p, self._rnn_pack_states(h2_c2))
def forward(self, ctx_dict, y):
loss = 0.0
logps = (None if self.training else torch.zeros((y.shape[0] - 1), y.shape[1], self.n_vocab).cuda())
y_emb = self.emb(y)
h = self.f_init(ctx_dict)
for t in range((y_emb.shape[0] - 1)):
(log_p, h) = self.f_next(ctx_dict, y_emb[t], h)
if (not self.training):
logps[t] = log_p.data
loss += self.nll_loss(log_p, y[(t + 1)])
return {'loss': loss, 'logps': logps} |
def _conv_type_shape(im):
(typ, extra) = _MODE_CONV[im.mode]
if (extra is None):
return ((im.size[1], im.size[0]), typ)
else:
return ((im.size[1], im.size[0], extra), typ) |
def suppress_output():
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
if ('force' in kwargs):
force = kwargs.pop('force')
if force:
builtin_print(*args, **kwargs)
__builtin__.print = print |
class SwishX(nn.Module):
def __init__(self, maxvalue=2.72):
super(SwishX, self).__init__()
self.maximal = nn.Parameter(torch.FloatTensor([maxvalue]))
def forward(self, x):
output = (x * torch.sigmoid(x))
output = output.sub(self.maximal).clamp(max=0.0).add(self.maximal)
return output |
def collate_fn(batch):
(seq, label) = zip(*batch)
seql = [x.reshape((- 1)) for x in seq]
data = rnn_utils.pad_sequence(seql, batch_first=True, padding_value=0)
label = torch.tensor(list(label))
return (data, label) |
def pairwise_circleloss(embedding: torch.Tensor, targets: torch.Tensor, margin: float, gamma: float) -> torch.Tensor:
embedding = F.normalize(embedding, dim=1)
dist_mat = torch.matmul(embedding, embedding.t())
N = dist_mat.size(0)
is_pos = targets.view(N, 1).expand(N, N).eq(targets.view(N, 1).expand(N, N).t()).float()
is_neg = targets.view(N, 1).expand(N, N).ne(targets.view(N, 1).expand(N, N).t()).float()
is_pos = (is_pos - torch.eye(N, N, device=is_pos.device))
s_p = (dist_mat * is_pos)
s_n = (dist_mat * is_neg)
alpha_p = torch.clamp_min((((- s_p.detach()) + 1) + margin), min=0.0)
alpha_n = torch.clamp_min((s_n.detach() + margin), min=0.0)
delta_p = (1 - margin)
delta_n = margin
logit_p = ((((- gamma) * alpha_p) * (s_p - delta_p)) + ((- .0) * (1 - is_pos)))
logit_n = (((gamma * alpha_n) * (s_n - delta_n)) + ((- .0) * (1 - is_neg)))
loss = F.softplus((torch.logsumexp(logit_p, dim=1) + torch.logsumexp(logit_n, dim=1))).mean()
return loss |
def randn(g, shapes, dtype, *options):
dtype = sym_help._get_const(dtype, 'i', 'dtype')
if (dtype is None):
dtype = 6
if sym_help._is_packed_list(shapes):
shape_const = g.op('ConstantOfShape', shapes, value_t=torch.tensor([0], dtype=sym_help.scalar_type_to_pytorch_type[6]))
return g.op('RandomNormalLike', shape_const, dtype_i=sym_help.scalar_type_to_onnx[dtype])
shape = sym_help._get_const(shapes, 'is', 'randn')
return g.op('RandomNormal', shape_i=shape) |
def plot_prior_grad_RS(teacher, student):
df = check_prior_grad_RS(teacher, student)
(fig, axs) = plt.subplots(1, 3, figsize=(12, 4))
axs[0].plot(df['mx_hat'], df['mx'], '-', label='$m_x$')
axs[0].plot(df['mx_hat'], df['grad_mx_hat_A'], '--', label='$\\partial_{\\widehat{m}_x^-} A$')
axs[0].set(xlabel='$\\widehat{m}_x^-$')
axs[0].legend()
axs[1].plot(df['mx_hat'], df['qx'], '-', label='$q_x$')
axs[1].plot(df['mx_hat'], ((- 2) * df['grad_qx_hat_A']), '--', label='$-2\\partial_{\\widehat{q}_x^-} A$')
axs[1].set(xlabel='$\\widehat{m}_x^-$')
axs[1].legend()
axs[2].plot(df['mx_hat'], df['tx'], '-', label='$\\tau_x$')
axs[2].plot(df['mx_hat'], ((- 2) * df['grad_tx_hat_A']), '--', label='$-2\\partial_{\\widehat{\\tau}_x^-} A$')
axs[2].set(xlabel='$\\widehat{m}_x^-$')
axs[2].legend()
fig.suptitle(f'''teacher={teacher}
student={student}''')
fig.tight_layout(rect=[0, 0.03, 1, 0.9]) |
class LoggerFactory(object):
def create(path, module_name):
logger = logging.getLogger(module_name)
logger.setLevel(logging.DEBUG)
try:
os.makedirs(path)
except OSError as e:
if (e.errno != errno.EEXIST):
raise
fh = RotatingFileHandler((((path + os.sep) + module_name) + '.log'), maxBytes=1000000, backupCount=5)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger |
class IsotropicMorphology2D():
param_names = ['shape', 'radius']
params = [((512, 512),), (1, 3, 5, 15, 25, 40)]
def setup(self, shape, radius):
rng = np.random.default_rng(123)
self.image = (rng.standard_normal(shape) < 3.5)
def time_erosion(self, shape, radius, *args):
morphology.isotropic_erosion(self.image, radius) |
def test_basic_pytest_graphql(testdir, graphql_path, graphql_url):
testdir.make_test(f'''
schema = schemathesis.graphql.from_url('{graphql_url}')
()
(max_examples=10, deadline=None, suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much])
def test_(request, case):
request.config.HYPOTHESIS_CASES += 1
assert case.path == "{graphql_path}"
assert case.operation.definition.field_name in case.body
response = case.call()
assert response.status_code == 200
case.validate_response(response)
case.call_and_validate()
''')
result = testdir.runpytest('-v', '-s')
result.assert_outcomes(passed=4)
result.stdout.re_match_lines(['test_basic_pytest_graphql.py::test_\\[Query.getBooks] PASSED', 'test_basic_pytest_graphql.py::test_\\[Query.getAuthors] PASSED', 'test_basic_pytest_graphql.py::test_\\[Mutation.addBook] PASSED', 'test_basic_pytest_graphql.py::test_\\[Mutation.addAuthor] PASSED', 'Hypothesis calls: 40']) |
class randint_gen(rv_discrete):
def _shape_info(self):
return [_ShapeInfo('low', True, ((- np.inf), np.inf), (False, False)), _ShapeInfo('high', True, ((- np.inf), np.inf), (False, False))]
def _argcheck(self, low, high):
return (((high > low) & _isintegral(low)) & _isintegral(high))
def _get_support(self, low, high):
return (low, (high - 1))
def _pmf(self, k, low, high):
p = (np.ones_like(k) / (high - low))
return np.where(((k >= low) & (k < high)), p, 0.0)
def _cdf(self, x, low, high):
k = floor(x)
return (((k - low) + 1.0) / (high - low))
def _ppf(self, q, low, high):
vals = (ceil(((q * (high - low)) + low)) - 1)
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where((temp >= q), vals1, vals)
def _stats(self, low, high):
(m2, m1) = (np.asarray(high), np.asarray(low))
mu = (((m2 + m1) - 1.0) / 2)
d = (m2 - m1)
var = (((d * d) - 1) / 12.0)
g1 = 0.0
g2 = ((((- 6.0) / 5.0) * ((d * d) + 1.0)) / ((d * d) - 1.0))
return (mu, var, g1, g2)
def _rvs(self, low, high, size=None, random_state=None):
if ((np.asarray(low).size == 1) and (np.asarray(high).size == 1)):
return rng_integers(random_state, low, high, size=size)
if (size is not None):
low = np.broadcast_to(low, size)
high = np.broadcast_to(high, size)
randint = np.vectorize(partial(rng_integers, random_state), otypes=[np.dtype(int)])
return randint(low, high)
def _entropy(self, low, high):
return log((high - low)) |
class ConcatDataset(Dataset):
def __init__(self, datasets, total_samples, weights=None):
if (weights is None):
weights = [(1.0 / float(len(datasets))) for _ in range(len(datasets))]
assert (abs((sum(weights) - 1.0)) < 1e-06), 'Sum of weights is {}. Should be 1'.format(sum(weights))
self.id_mapping = []
self.samples_per_dataset = []
for (i, (wt, ds)) in enumerate(zip(weights, datasets)):
assert (0.0 < wt <= 1.0)
num_samples_ds = int(round((wt * total_samples)))
if (num_samples_ds < len(ds)):
ds = SparseDataset(ds, num_samples_ds)
repetitions = int(math.floor((num_samples_ds / float(len(ds)))))
idxes = sum([list(range(len(ds))) for _ in range(repetitions)], [])
rem_idxes = torch.linspace(0, (len(ds) - 1), (num_samples_ds - len(idxes))).round().long().tolist()
idxes += rem_idxes
self.id_mapping.extend([(i, j) for j in idxes])
self.samples_per_dataset.append(num_samples_ds)
self.datasets = datasets
self.weights = weights
assert (len(self.id_mapping) == total_samples)
def __len__(self):
return len(self.id_mapping)
def __getitem__(self, index):
(ds_idx, sample_idx) = self.id_mapping[index]
return self.datasets[ds_idx][sample_idx] |
def sig_for_ops(opname):
assert (opname.endswith('__') and opname.startswith('__')), 'Unexpected op {}'.format(opname)
name = opname[2:(- 2)]
if (name in binary_ops):
return ['def {}(self, other: Any) -> Tensor: ...'.format(opname)]
elif (name in comparison_ops):
return ['def {}(self, other: Any) -> Tensor: ... # type: ignore'.format(opname)]
elif (name in unary_ops):
return ['def {}(self) -> Tensor: ...'.format(opname)]
elif (name in to_py_type_ops):
if (name in {'bool', 'float', 'complex'}):
tname = name
elif (name == 'nonzero'):
tname = 'bool'
else:
tname = 'int'
if (tname in {'float', 'int', 'bool', 'complex'}):
tname = ('builtins.' + tname)
return ['def {}(self) -> {}: ...'.format(opname, tname)]
else:
raise Exception('unknown op', opname) |
def commit_loss(x1, x2):
norm = np.prod(x1.shape[1:])
loss = nn.MSELoss(reduction='sum')(x1, x2)
return (loss / norm) |
class TotalVariationDistance(Layer):
def call(self, x):
diff_dist = Subtract()([x[0], x[1]])
return K.sum(K.abs(diff_dist), axis=1) |
class Data2VecAudioForCTC(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class BaseTrainer():
def __init__(self, args: argparse.Namespace):
self.args = args
self._debug = self.args.debug
name = str(datetime.datetime.now()).replace(' ', '_')
self._log_path = os.path.join(self.args.log_path, self.args.label, name)
util.create_directories_dir(self._log_path)
if hasattr(args, 'save_path'):
self._save_path = os.path.join(self.args.save_path, self.args.label, name)
util.create_directories_dir(self._save_path)
self._log_paths = dict()
log_formatter = logging.Formatter('%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s')
self._logger = logging.getLogger()
util.reset_logger(self._logger)
file_handler = logging.FileHandler(os.path.join(self._log_path, 'all.log'))
file_handler.setFormatter(log_formatter)
self._logger.addHandler(file_handler)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(log_formatter)
self._logger.addHandler(console_handler)
if self._debug:
self._logger.setLevel(logging.DEBUG)
else:
self._logger.setLevel(logging.INFO)
self._summary_writer = (tensorboardx.SummaryWriter(self._log_path) if (tensorboardx is not None) else None)
self._best_results = dict()
self._log_arguments()
self._device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.cpu)) else 'cpu'))
self._gpu_count = torch.cuda.device_count()
if (args.seed is not None):
util.set_seed(args.seed)
def _add_dataset_logging(self, *labels, data: Dict[(str, List[str])]):
for label in labels:
dic = dict()
for (key, columns) in data.items():
path = os.path.join(self._log_path, ('%s_%s.csv' % (key, label)))
util.create_csv(path, *columns)
dic[key] = path
self._log_paths[label] = dic
self._best_results[label] = 0
def _log_arguments(self):
util.save_dict(self._log_path, self.args, 'args')
if (self._summary_writer is not None):
util.summarize_dict(self._summary_writer, self.args, 'args')
def _log_tensorboard(self, dataset_label: str, data_label: str, data: object, iteration: int):
if (self._summary_writer is not None):
self._summary_writer.add_scalar(('data/%s/%s' % (dataset_label, data_label)), data, iteration)
def _log_csv(self, dataset_label: str, data_label: str, *data: Tuple[object]):
logs = self._log_paths[dataset_label]
util.append_csv(logs[data_label], *data)
def _save_best(self, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, optimizer: optimizer, accuracy: float, iteration: int, label: str, extra=None):
if (accuracy > self._best_results[label]):
self._logger.info(('[%s] Best model in iteration %s: %s%% accuracy' % (label, iteration, accuracy)))
self._save_model(self._save_path, model, tokenizer, iteration, optimizer=(optimizer if self.args.save_optimizer else None), save_as_best=True, name=('model_%s' % label), extra=extra)
self._best_results[label] = accuracy
def _save_model(self, save_path: str, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, iteration: int, optimizer: optimizer=None, save_as_best: bool=False, extra: dict=None, include_iteration: int=True, name: str='model'):
extra_state = dict(iteration=iteration)
if optimizer:
extra_state['optimizer'] = optimizer.state_dict()
if extra:
extra_state.update(extra)
if save_as_best:
dir_path = os.path.join(save_path, ('%s_best' % name))
else:
dir_name = (('%s_%s' % (name, iteration)) if include_iteration else name)
dir_path = os.path.join(save_path, dir_name)
util.create_directories_dir(dir_path)
if isinstance(model, DataParallel):
model.module.save_pretrained(dir_path)
else:
model.save_pretrained(dir_path)
tokenizer.save_pretrained(dir_path)
state_path = os.path.join(dir_path, 'extra.state')
torch.save(extra_state, state_path)
def _get_lr(self, optimizer):
lrs = []
for group in optimizer.param_groups:
lr_scheduled = group['lr']
lrs.append(lr_scheduled)
return lrs
def _close_summary_writer(self):
if (self._summary_writer is not None):
self._summary_writer.close() |
def validateaxis(axis) -> None:
if (axis is None):
return
axis_type = type(axis)
if (axis_type == tuple):
raise TypeError("Tuples are not accepted for the 'axis' parameter. Please pass in one of the following: {-2, -1, 0, 1, None}.")
if (not np.issubdtype(np.dtype(axis_type), np.integer)):
raise TypeError(f'axis must be an integer, not {axis_type.__name__}')
if (not ((- 2) <= axis <= 1)):
raise ValueError('axis out of range') |
class SLayerNorm(nn.LayerNorm):
def __init__(self, normalized_shape: int, eps: float=1e-05, elementwise_affine: bool=True) -> None:
super(SLayerNorm, self).__init__(normalized_shape, eps, elementwise_affine)
self.staticize()
def staticize(self):
self.sample_normalized_shape = self.normalized_shape[0]
self.samples = {'weight': self.weight, 'bias': self.bias}
def set_sample_config(self, sample_normalized_shape: int):
self.sample_normalized_shape = sample_normalized_shape
self._sample_parameters()
def _sample_parameters(self):
if self.elementwise_affine:
self.samples['weight'] = self.weight[:self.sample_normalized_shape]
self.samples['bias'] = self.bias[:self.sample_normalized_shape]
else:
self.samples['weight'] = None
self.samples['bias'] = None
return self.samples
def calc_sampled_param_num(self):
return (self.samples['weight'].numel() + self.samples['bias'].numel())
def get_complexity(self, sequence_length):
return (sequence_length * self.sample_normalized_shape)
def weights(self):
return (self.samples['weight'] if self.elementwise_affine else None)
def biases(self):
return (self.samples['bias'] if self.elementwise_affine else None)
def normalized_shapes(self):
if isinstance(self.sample_normalized_shape, numbers.Integral):
sample_normalized_shape = (self.sample_normalized_shape,)
else:
sample_normalized_shape = self.sample_normalized_shape
return tuple(sample_normalized_shape)
def forward(self, input: Tensor) -> Tensor:
self._sample_parameters()
return F.layer_norm(input, self.normalized_shapes, self.weights, self.biases, self.eps)
def extra_repr(self) -> str:
return f'{self.normalized_shape}, eps={self.eps}, elementwise_affine={self.elementwise_affine}'
def clone_model(self, normalized_shape: int):
self.set_sample_config(normalized_shape)
m = nn.LayerNorm(normalized_shape, self.eps, self.elementwise_affine)
if m.elementwise_affine:
m = m.to(self.weight.device)
m = m.to(self.weight.dtype)
m.weight.data.copy_(self.weights)
m.bias.data.copy_(self.biases)
return m.eval()
def build_from(cls, m: nn.LayerNorm):
normalized_shape = m.normalized_shape
eps = m.eps
elementwise_affine = m.elementwise_affine
_m = cls(normalized_shape, eps, elementwise_affine)
if _m.elementwise_affine:
_m = _m.to(m.weight.device)
_m = _m.to(m.weight.dtype)
_m.weight.data.copy_(m.weight)
_m.bias.data.copy_(m.bias)
return _m |
_paths
def parse_args(args=None, namespace=None):
parser = argparse.ArgumentParser(description='Extract audio from videos.')
parser.add_argument('-i', '--in_dir', type=pathlib.Path, required=True, help='input directory')
parser.add_argument('-o', '--out_dir', type=pathlib.Path, required=True, help='output directory')
parser.add_argument('-r', '--rate', default=16000, type=int, help='sampling rate')
parser.add_argument('-s', '--skip_existing', action='store_true', help='whether to skip existing outputs')
parser.add_argument('-e', '--ignore_exceptions', action='store_true', help='whether to ignore all exceptions')
parser.add_argument('-j', '--jobs', default=1, type=int, help='number of jobs')
parser.add_argument('-q', '--quiet', action='store_true', help='show warnings only')
return parser.parse_args(args=args, namespace=namespace) |
class IndexedFreeAbelianMonoid(IndexedMonoid):
def _repr_(self):
return 'Free abelian monoid indexed by {}'.format(self._indices)
def _element_constructor_(self, x=None):
if isinstance(x, (list, tuple)):
d = dict()
for (k, v) in x:
if (k in d):
d[k] += v
else:
d[k] = v
x = d
if isinstance(x, dict):
x = {k: v for (k, v) in x.items() if (v != 0)}
return IndexedMonoid._element_constructor_(self, x)
Element = IndexedFreeAbelianMonoidElement
_method
def one(self):
return self.element_class(self, {})
def gen(self, x):
if (x not in self._indices):
raise IndexError('{} is not in the index set'.format(x))
try:
return self.element_class(self, {self._indices(x): 1})
except (TypeError, NotImplementedError):
return self.element_class(self, {x: 1}) |
class Embedding(Layer):
def __init__(self, input_dim, output_dim, init='uniform', name=None):
super(Embedding, self).__init__()
self.init = initializations.get(init)
self.input_dim = input_dim
self.output_dim = output_dim
self.W = self.init((self.input_dim, self.output_dim), scale=0.1)
self.params = [self.W]
if (name is not None):
self.set_name(name)
def get_output_mask(self, X):
return (T.ones_like(X) * (1 - T.eq(X, 0))).astype('int8')
def init_pretrained(self, file_path, vocab):
W = self.W.get_value(borrow=True)
inited_words = set()
for (word, embed) in get_embed_iter(file_path):
if (word in vocab):
idx = vocab[word]
W[idx] = embed
inited_words.add(word)
return inited_words
def __call__(self, X, mask_zero=False):
out = self.W[X]
if mask_zero:
return (out, self.get_output_mask(X))
else:
return out |
class miniImageNet(ImageFolder):
def __init__(self, root: str, mode: str, backbone_name='resnet12', image_sz=84) -> None:
assert (mode in ['train', 'val', 'test'])
self.mode = mode
(_, train_process, val_process) = load(backbone_name, jit=False)
IMAGE_PATH = os.path.join(root, mode)
if ((mode == 'val') or (mode == 'test')):
transform = val_process
elif (mode == 'train'):
transform = train_process
super().__init__(IMAGE_PATH, transform)
samples = []
if (mode == 'train'):
path = ('/space0/songk/cache/miniImageNet/mini_train.pkl' if os.path.exists(f'/space0/songk/cache/miniImageNet/mini_train.pkl') else 'cache/miniImageNet/mini_train.pkl')
if os.path.exists(path):
print(f'loading {path}')
samples = pickle.load(open(path, 'rb'))
else:
for i in tqdm(range(len(self.samples))):
sample = self.loader(self.samples[i][0])
samples.append(sample)
pickle.dump(samples, open('cache/miniImageNet/mini_train.pkl', 'wb'))
elif ((mode == 'val') or (mode == 'test')):
path = (f'/space0/songk/cache/miniImageNet/mini_{mode}.pkl' if os.path.exists(f'/space0/songk/cache/miniImageNet/mini_{mode}.pkl') else f'cache/miniImageNet/mini_{mode}.pkl')
if os.path.exists(path):
print(f'loading {path}')
samples = pickle.load(open(path, 'rb'))
else:
for i in tqdm(range(len(self.samples))):
sample = self.loader(self.samples[i][0])
samples.append(self.transform(sample))
pickle.dump(samples, open(f'cache/miniImageNet/mini_{mode}.pkl', 'wb'))
self.samples = samples
self.label = self.targets
def __getitem__(self, index: int):
(sample, target) = (self.samples[index], self.label[index])
if (self.mode == 'train'):
if (self.transform is not None):
sample = self.transform(sample)
if (self.target_transform is not None):
target = self.target_transform(target)
return (sample, target) |
_module()
class SRFolderRefDataset(BaseSRDataset):
def __init__(self, pipeline, scale, ref_folder, gt_folder=None, lq_folder=None, test_mode=False, filename_tmpl_gt='{}', filename_tmpl_lq='{}'):
super().__init__(pipeline, scale, test_mode)
assert (gt_folder or lq_folder), 'At least one of gt_folder andlq_folder cannot be None.'
self.scale = scale
self.ref_folder = str(ref_folder)
self.gt_folder = (str(gt_folder) if gt_folder else None)
self.lq_folder = (str(lq_folder) if lq_folder else None)
self.filename_tmpl_gt = filename_tmpl_gt
self.filename_tmpl_lq = filename_tmpl_lq
self.data_infos = self.load_annotations()
def load_annotations(self):
data_infos = []
ref_paths = self.scan_folder(self.ref_folder)
if (self.gt_folder is not None):
gt_paths = self.scan_folder(self.gt_folder)
assert (len(ref_paths) == len(gt_paths)), f'ref and gt datasets have different number of images: {len(ref_paths)}, {len(gt_paths)}.'
if (self.lq_folder is not None):
lq_paths = self.scan_folder(self.lq_folder)
assert (len(ref_paths) == len(lq_paths)), f'ref and lq datasets have different number of images: {len(ref_paths)}, {len(lq_paths)}.'
for ref_path in ref_paths:
(basename, ext) = osp.splitext(osp.basename(ref_path))
data_dict = dict(ref_path=ref_path)
if (self.gt_folder is not None):
gt_path = osp.join(self.gt_folder, f'{self.filename_tmpl_gt.format(basename)}{ext}')
assert (gt_path in gt_paths), f'{gt_path} is not in gt_paths.'
data_dict['gt_path'] = gt_path
if (self.lq_folder is not None):
lq_path = osp.join(self.lq_folder, f'{self.filename_tmpl_lq.format(basename)}{ext}')
assert (lq_path in lq_paths), f'{lq_path} is not in lq_paths.'
data_dict['lq_path'] = lq_path
data_infos.append(data_dict)
return data_infos |
class ModularSymbolsSubspace(sage.modular.modsym.space.ModularSymbolsSpace, hecke.HeckeSubmodule):
def __init__(self, ambient_hecke_module, submodule, dual_free_module=None, check=False):
self.__ambient_hecke_module = ambient_hecke_module
A = ambient_hecke_module
sage.modular.modsym.space.ModularSymbolsSpace.__init__(self, A.group(), A.weight(), A.character(), A.sign(), A.base_ring())
hecke.HeckeSubmodule.__init__(self, A, submodule, dual_free_module=dual_free_module, check=check)
def _repr_(self):
return ('Modular Symbols subspace of dimension %s of %s' % (self.rank(), self.ambient_module()))
def boundary_map(self):
try:
return self.__boundary_map
except AttributeError:
b = self.ambient_hecke_module().boundary_map()
self.__boundary_map = b.restrict_domain(self)
return self.__boundary_map
def cuspidal_submodule(self):
try:
return self.__cuspidal_submodule
except AttributeError:
try:
if self.__is_cuspidal:
return self
except AttributeError:
pass
S = self.ambient_hecke_module().cuspidal_submodule()
self.__cuspidal_submodule = S.intersection(self)
return self.__cuspidal_submodule
def dual_star_involution_matrix(self):
try:
return self.__dual_star_involution
except AttributeError:
pass
S = self.ambient_hecke_module().dual_star_involution_matrix()
A = S.restrict(self.dual_free_module())
self.__dual_star_involution = A
return self.__dual_star_involution
def eisenstein_subspace(self):
try:
return self.__eisenstein_subspace
except AttributeError:
S = self.ambient_hecke_module().eisenstein_subspace()
self.__eisenstein_subspace = S.intersection(self)
return self.__eisenstein_subspace
def factorization(self):
try:
return self._factorization
except AttributeError:
pass
try:
if self._is_simple:
return [(self, 1)]
except AttributeError:
pass
if (self.is_new() and self.is_cuspidal()):
D = []
N = self.decomposition()
if (self.sign() == 0):
for A in N:
if A.is_cuspidal():
V = A.plus_submodule()
V._is_simple = True
D.append((V, 1))
V = A.minus_submodule()
V._is_simple = True
D.append((V, 1))
else:
A._is_simple = True
D.append((A, 1))
else:
for A in N:
A._is_simple = True
D.append((A, 1))
else:
D = []
for S in self.ambient_hecke_module().simple_factors():
n = self.multiplicity(S, check_simple=False)
if (n > 0):
D.append((S, n))
r = self.dimension()
s = sum([(A.rank() * mult) for (A, mult) in D])
if (r != s):
raise NotImplementedError(('modular symbols factorization not fully implemented yet -- self has dimension %s, but sum of dimensions of factors is %s' % (r, s)))
self._factorization = sage.structure.factorization.Factorization(D, cr=True)
return self._factorization
def is_cuspidal(self) -> bool:
try:
return self.__is_cuspidal
except AttributeError:
C = self.ambient_hecke_module().cuspidal_submodule()
self.__is_cuspidal = self.is_submodule(C)
return self.__is_cuspidal
def _set_is_cuspidal(self, t):
self.__is_cuspidal = t
def is_eisenstein(self):
try:
return self.__is_eisenstien
except AttributeError:
C = self.ambient_hecke_module().eisenstein_subspace()
self.__is_eisenstein = self.is_submodule(C)
return self.__is_eisenstein
def _compute_sign_subspace(self, sign, compute_dual=True):
S = (self.star_involution().matrix() - sign)
V = S.kernel()
if compute_dual:
Sdual = (self.dual_star_involution_matrix() - sign)
Vdual = Sdual.kernel()
else:
Vdual = None
res = self.submodule_from_nonembedded_module(V, Vdual)
res._set_sign(sign)
return res
def star_involution(self):
try:
return self.__star_involution
except AttributeError:
pass
S = self.ambient_hecke_module().star_involution()
self.__star_involution = S.restrict(self)
return self.__star_involution |
class BaseMetric(ABC):
def __init__(self, recommendations, config, params, evaluation_objects, additional_data=None):
self._recommendations: t.Dict[(int, t.List[t.Tuple[(int, float)]])] = recommendations
self._config = config
self._params = params
self._evaluation_objects = evaluation_objects
self._additional_data = additional_data
def name(self):
pass
def eval(self):
return np.average(list(self.eval_user_metric().values()))
def needs_full_recommendations():
return False
def get(self):
return [self] |
def base_prob2pianoroll(probs):
base_list = [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
index = np.argmax(probs, axis=0)
pianoroll = [0 for i in range(128)]
pianoroll[base_list[index]] = 1
pianoroll = np.asarray(pianoroll)
return pianoroll |
def save_csv_notes(filename, data):
assert (data.shape[1] == 5)
np.savetxt(filename, data, fmt='%d', delimiter=',', header='beat,position,pitch,duration,program', comments='') |
_quant_pattern(torch.nn.AdaptiveAvgPool1d)
_quant_pattern(torch.nn.AdaptiveAvgPool2d)
_quant_pattern(torch.nn.AdaptiveAvgPool3d)
_quant_pattern(torch.nn.AvgPool1d)
_quant_pattern(torch.nn.AvgPool2d)
_quant_pattern(torch.nn.AvgPool3d)
_quant_pattern(torch.nn.Dropout)
_quant_pattern(torch.nn.Hardsigmoid)
_quant_pattern(torch.nn.Hardtanh)
_quant_pattern(torch.nn.LeakyReLU)
_quant_pattern(torch.nn.MaxPool1d)
_quant_pattern(torch.nn.MaxPool2d)
_quant_pattern(torch.nn.MaxPool3d)
_quant_pattern(torch.nn.ReLU)
_quant_pattern(torch.nn.ReLU6)
_quant_pattern(torch.nn.Sigmoid)
_quant_pattern(torch.nn.Tanh)
_quant_pattern(torch.adaptive_avg_pool1d)
_quant_pattern(torch.nn.functional.adaptive_avg_pool2d)
_quant_pattern(torch.nn.functional.adaptive_avg_pool3d)
_quant_pattern(torch.nn.functional.dropout)
_quant_pattern(torch.nn.functional.hardsigmoid)
_quant_pattern(torch.nn.functional.hardtanh)
_quant_pattern(torch.nn.functional.hardtanh_)
_quant_pattern(torch.nn.functional.interpolate)
_quant_pattern(torch.nn.functional.leaky_relu)
_quant_pattern(torch.nn.functional.max_pool1d)
_quant_pattern(torch.nn.functional.max_pool2d)
_quant_pattern(torch.nn.functional.max_pool3d)
_quant_pattern(torch.nn.functional.relu)
_quant_pattern(torch.nn.functional.relu6)
_quant_pattern(torch.avg_pool1d)
_quant_pattern(torch._C._nn.avg_pool2d)
_quant_pattern(torch._C._nn.avg_pool3d)
_quant_pattern(torch.chunk)
_quant_pattern(torch.clamp)
_quant_pattern(torch.flatten)
_quant_pattern(torch.transpose)
_quant_pattern(torch.max)
_quant_pattern(torch.mean)
_quant_pattern(torch.min)
_quant_pattern(torch.repeat_interleave)
_quant_pattern(torch.sigmoid)
_quant_pattern(torch.sort)
_quant_pattern(torch.squeeze)
_quant_pattern(torch.stack)
_quant_pattern(torch.tanh)
_quant_pattern(torch.unsqueeze)
_quant_pattern(operator.getitem)
_quant_pattern(operator.floordiv)
_quant_pattern('chunk')
_quant_pattern('clamp')
_quant_pattern('contiguous')
_quant_pattern('detach')
_quant_pattern('detach_')
_quant_pattern('hardsigmoid')
_quant_pattern('hardsigmoid_')
_quant_pattern('leaky_relu')
_quant_pattern('leaky_relu_')
_quant_pattern('mean')
_quant_pattern('numel')
_quant_pattern('permute')
_quant_pattern('relu')
_quant_pattern('relu_')
_quant_pattern('repeat')
_quant_pattern('repeat_interleave')
_quant_pattern('reshape')
_quant_pattern('resize_')
_quant_pattern('shape')
_quant_pattern('sigmoid')
_quant_pattern('sigmoid_')
_quant_pattern('size')
_quant_pattern('squeeze')
_quant_pattern('squeeze_')
_quant_pattern('tanh')
_quant_pattern('tanh_')
_quant_pattern('transpose')
_quant_pattern('unsqueeze')
_quant_pattern('unsqueeze_')
_quant_pattern('view')
class CopyNode(QuantizeHandler):
def convert(self, quantizer, node, load_arg, debug=False):
return quantizer.quantized_graph.node_copy(node, load_arg(quantized=None)) |
class SBMCDCEig(SBMCLUSTEREval, BaseEigModelScheme):
def get_default_config(self):
config_dict = super().get_default_config()
config_dict.update(dataset_name='sbm_cluster', class_sizes=[19695, 19222, 19559, 19417, 19801, 20139])
return config_dict
def get_dataset_config(self, splits=['training', 'validation']):
(dataset_config, _) = super().get_dataset_config()
return (dataset_config, EigenDataset)
def get_model_config(self):
(model_config, _) = super().get_model_config()
return (model_config, DCEigTransformer)
def get_loss(self):
class_sizes = np.array(self.config.class_sizes, dtype='float32')
class_weights = (class_sizes.sum() - class_sizes)
class_weights = (class_weights / class_weights.sum())
class_weights = tf.constant(class_weights, dtype=tf.float32)
def loss(y_true, y_pred):
weights = tf.gather(class_weights, tf.cast(y_true, tf.int32))
w_xent = (weights * losses.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True, axis=(- 1)))
return w_xent
return loss
def get_metrics(self):
acc = metrics.SparseCategoricalAccuracy(name='acc')
return [acc] |
def create_jsonl_candidates_for_pyserini(train_dir):
list_dir = [x for x in os.walk(train_dir)]
for sub_dir in list_dir[0][1]:
with jsonlines.open(os.path.join(train_dir, sub_dir, 'candidates.jsonl'), mode='w') as writer:
list_sub_dir_paragraphs = [x for x in os.walk(os.path.join(train_dir, sub_dir, 'paragraphs'))]
for paragraph in list_sub_dir_paragraphs[0][2]:
with open(os.path.join(train_dir, sub_dir, 'paragraphs', paragraph), 'r') as paragraph_file:
para_text = paragraph_file.read().splitlines()[1:]
writer.write({'id': '{}_{}'.format(sub_dir, paragraph.split('.')[0]), 'contents': ' '.join([text.strip().replace('\n', '') for text in para_text])}) |
.parametrize('beam_size,expected', [(1, False), (2, False), (5, True)])
def test_beam_search(beam_size, expected):
start_node = 'Arad'
goal_fn = (lambda x: (x == 'Fagaras'))
(goal, _, path) = generalized_a_star_search(start_node=start_node, expand_fn=expand_fn, goal_fn=goal_fn, beam_size=beam_size, return_path=True)
assert ((goal is None) if (not expected) else (goal is not None)) |
def call_ChatGPT(message, model_name='gpt-3.5-turbo', max_len=1024, temp=0.7, verbose=False):
response = None
received = False
num_rate_errors = 0
while (not received):
try:
response = openai.ChatCompletion.create(model=model_name, messages=message, max_tokens=max_len, temperature=temp)
received = True
except:
num_rate_errors += 1
error = sys.exc_info()[0]
if (error == openai.error.InvalidRequestError):
logging.critical(f'''InvalidRequestError
Prompt passed in:
{message}
''')
assert False
logging.error(('API error: %s (%d). Waiting %dsec' % (error, num_rate_errors, np.power(2, num_rate_errors))))
time.sleep(np.power(2, num_rate_errors))
return response |
def skipCUDAMemoryLeakCheckIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_memory_leak_check', True):
fn._do_cuda_memory_leak_check = (not condition)
return fn
return dec |
def test_ByteMaskedArray_NumpyArray():
array = ak.Array(ak.contents.ByteMaskedArray(ak.index.Index(np.array([1, 0, 1, 0, 1], np.int8)), ak.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=True), backend='cuda')
results = nb_cuda.to_device(np.empty(5, dtype=np.float64))
pass_through[(1, 5)](array, results)
nb_cuda.synchronize()
host_results = results.copy_to_host()
assert (ak.nan_to_none(ak.Array(host_results)).tolist() == array.to_list())
array = ak.Array(ak.contents.ByteMaskedArray(ak.index.Index(np.array([0, 1, 0, 1, 0], np.int8)), ak.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=False), backend='cuda')
results = nb_cuda.to_device(np.empty(5, dtype=np.float64))
pass_through[(1, 5)](array, results)
nb_cuda.synchronize()
host_results = results.copy_to_host()
assert (ak.nan_to_none(ak.Array(host_results)).tolist() == array.to_list()) |
class PatchImageDiscriminator(nn.Module):
def __init__(self, n_channels, ndf=64, use_noise=False, noise_sigma=None):
super(PatchImageDiscriminator, self).__init__()
self.use_noise = use_noise
self.main = nn.Sequential(Noise(use_noise, sigma=noise_sigma), nn.Conv2d(n_channels, ndf, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), Noise(use_noise, sigma=noise_sigma), nn.Conv2d(ndf, (ndf * 2), 4, 2, 1, bias=False), nn.BatchNorm2d((ndf * 2)), nn.LeakyReLU(0.2, inplace=True), Noise(use_noise, sigma=noise_sigma), nn.Conv2d((ndf * 2), (ndf * 4), 4, 2, 1, bias=False), nn.BatchNorm2d((ndf * 4)), nn.LeakyReLU(0.2, inplace=True), Noise(use_noise, sigma=noise_sigma), nn.Conv2d((ndf * 4), (ndf * 8), 4, 2, 1, bias=False), nn.BatchNorm2d((ndf * 8)), nn.LeakyReLU(0.2, inplace=True), Noise(use_noise, sigma=noise_sigma), nn.Conv2d((ndf * 8), (ndf * 16), 4, 2, 1, bias=False), nn.BatchNorm2d((ndf * 16)), nn.LeakyReLU(0.2, inplace=True), Noise(use_noise, sigma=noise_sigma), nn.Conv2d((ndf * 16), 1, 4, 2, 1, bias=False))
def forward(self, input):
h = self.main(input).squeeze()
return (h, None) |
def validation_transforms(sample, image_shape):
if (len(image_shape) > 0):
sample['rgb'] = resize_image(sample['rgb'], image_shape)
sample = to_tensor_sample(sample)
return sample |
class DatasetOnlineLoad(torchdata.Dataset):
def __init__(self, files, n_max_samples=(- 1)):
self.files = files
self.n_samples = len(self.files)
if (n_max_samples != (- 1)):
self.n_samples = min(self.n_samples, n_max_samples)
def __len__(self):
return self.n_samples
def __getitem__(self, index):
infile = self.files[index]
filename_center = infile['filename_vox_center']
filename_heatmap = infile['filename_vox_heatmap']
match = infile['match']
p_scan = torch.tensor(infile['p_scan'])
scale = np.array(infile['scale'], dtype=np.float32)
basename_save = infile['customname']
vox_center = Vox.load_vox(filename_center)
vox_center.make_torch()
dims = vox_center.dims
if (vox_center.sdf[(0, (dims[2] // 2), (dims[1] // 2), (dims[0] // 2))] < (- 0.15)):
return self.__getitem__(((index + 31) % self.n_samples))
vox_heatmap = Vox.load_vox(filename_heatmap)
vox_heatmap.make_torch()
sdf_scan = vox_center.sdf
df_cad = vox_heatmap.sdf
if (vox_heatmap.pdf is None):
heatmap = torch.zeros(df_cad.shape)
else:
heatmap = vox_heatmap.pdf
return {'sdf_scan': sdf_scan, 'df_cad': df_cad, 'heatmap': heatmap, 'match': match, 'scale': scale, 'p_scan': p_scan, 'basename_save': basename_save, 'voxres_scan': vox_center.res, 'voxres_cad': vox_heatmap.res, 'grid2world_scan': vox_center.grid2world, 'grid2world_cad': vox_heatmap.grid2world, 'filename_vox_center': filename_center} |
def _minkan(state: State):
c_p = state.current_player
l_p = state._last_player
state = _accept_riichi(state)
src = ((l_p - c_p) % 4)
meld = Meld.init(Action.MINKAN, state._target, src)
state = _append_meld(state, meld, c_p)
hand = state._hand.at[c_p].set(Hand.minkan(state._hand[c_p], state._target))
state = state.replace(_hand=hand)
is_menzen = state._is_menzen.at[c_p].set(FALSE)
rinshan_tile = state._deck[state._next_deck_ix]
_next_deck_ix = (state._next_deck_ix - 1)
hand = state._hand.at[c_p].set(Hand.add(state._hand[c_p], rinshan_tile))
legal_action_mask = jnp.zeros(NUM_ACTION, dtype=jnp.bool_)
legal_action_mask = legal_action_mask.at[0:34].set((hand[c_p] > 0))
legal_action_mask = legal_action_mask.at[rinshan_tile].set(FALSE)
legal_action_mask = legal_action_mask.at[Action.TSUMOGIRI].set(TRUE)
river = state._river.at[(l_p, (state._n_river[l_p] - 1))].set((state._river[(l_p, (state._n_river[l_p] - 1))] | jnp.uint8(128)))
return state.replace(_target=jnp.int8((- 1)), _is_menzen=is_menzen, _next_deck_ix=_next_deck_ix, _last_draw=rinshan_tile, _hand=hand, legal_action_mask=legal_action_mask, _river=river, _n_kan=(state._n_kan + 1), _doras=state._doras.at[(state._n_kan + 1)].set(state._deck[(9 - (2 * (state._n_kan + 1)))])) |
def test():
net = MobileNet()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y.size()) |
def anisotropic_scaling(img, p):
if (random.random() < (1 - p)):
return img
s = np.random.lognormal(0, (0.2 * np.log(2)))
if (s < 1):
s = (2 - s)
(H, W) = (img.size()[(- 2)], img.size()[(- 1)])
if (random.random() > 0.5):
img = transforms.functional.resize(img, (int((H * s)), W))
else:
img = transforms.functional.resize(img, (H, int((W * s))))
img = transforms.functional.center_crop(img, (H, W))
return img |
class RaiseStatNode(StatNode):
child_attrs = ['exc_type', 'exc_value', 'exc_tb', 'cause']
is_terminator = True
def analyse_expressions(self, env):
if self.exc_type:
exc_type = self.exc_type.analyse_types(env)
self.exc_type = exc_type.coerce_to_pyobject(env)
if self.exc_value:
exc_value = self.exc_value.analyse_types(env)
self.exc_value = exc_value.coerce_to_pyobject(env)
if self.exc_tb:
exc_tb = self.exc_tb.analyse_types(env)
self.exc_tb = exc_tb.coerce_to_pyobject(env)
if self.cause:
cause = self.cause.analyse_types(env)
self.cause = cause.coerce_to_pyobject(env)
self.builtin_exc_name = None
if (self.exc_type and (not self.exc_value) and (not self.exc_tb)):
exc = self.exc_type
from . import ExprNodes
if (isinstance(exc, ExprNodes.SimpleCallNode) and (not (exc.args or ((exc.arg_tuple is not None) and exc.arg_tuple.args)))):
exc = exc.function
if (exc.is_name and exc.entry.is_builtin):
self.builtin_exc_name = exc.name
if (self.builtin_exc_name == 'MemoryError'):
self.exc_type = None
return self
nogil_check = Node.gil_error
gil_message = 'Raising exception'
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if (self.builtin_exc_name == 'MemoryError'):
code.putln(('PyErr_NoMemory(); %s' % code.error_goto(self.pos)))
return
if self.exc_type:
self.exc_type.generate_evaluation_code(code)
type_code = self.exc_type.py_result()
if self.exc_type.is_name:
code.globalstate.use_entry_utility_code(self.exc_type.entry)
else:
type_code = '0'
if self.exc_value:
self.exc_value.generate_evaluation_code(code)
value_code = self.exc_value.py_result()
else:
value_code = '0'
if self.exc_tb:
self.exc_tb.generate_evaluation_code(code)
tb_code = self.exc_tb.py_result()
else:
tb_code = '0'
if self.cause:
self.cause.generate_evaluation_code(code)
cause_code = self.cause.py_result()
else:
cause_code = '0'
code.globalstate.use_utility_code(raise_utility_code)
code.putln(('__Pyx_Raise(%s, %s, %s, %s);' % (type_code, value_code, tb_code, cause_code)))
for obj in (self.exc_type, self.exc_value, self.exc_tb, self.cause):
if obj:
obj.generate_disposal_code(code)
obj.free_temps(code)
code.putln(code.error_goto(self.pos))
def generate_function_definitions(self, env, code):
if (self.exc_type is not None):
self.exc_type.generate_function_definitions(env, code)
if (self.exc_value is not None):
self.exc_value.generate_function_definitions(env, code)
if (self.exc_tb is not None):
self.exc_tb.generate_function_definitions(env, code)
if (self.cause is not None):
self.cause.generate_function_definitions(env, code)
def annotate(self, code):
if self.exc_type:
self.exc_type.annotate(code)
if self.exc_value:
self.exc_value.annotate(code)
if self.exc_tb:
self.exc_tb.annotate(code)
if self.cause:
self.cause.annotate(code) |
class ResnetDownsampleBlock3D(nn.Module):
def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor=1.0, add_downsample=True, skip_time_act=False):
super().__init__()
resnets = []
temp_convs = []
for i in range(num_layers):
in_channels = (in_channels if (i == 0) else out_channels)
resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act))
temp_convs.append(TemporalConvLayer(out_channels, out_channels, dropout=0.1))
self.resnets = nn.ModuleList(resnets)
self.temp_convs = nn.ModuleList(temp_convs)
if add_downsample:
self.downsamplers = nn.ModuleList([ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, down=True)])
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, temb=None, num_frames=1):
output_states = ()
for (resnet, temp_conv) in zip(self.resnets, self.temp_convs):
if (self.training and self.gradient_checkpointing):
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False)
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames, use_reentrant=False)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = temp_conv(hidden_states, num_frames=num_frames)
output_states = (output_states + (hidden_states,))
if (self.downsamplers is not None):
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states, temb)
output_states = (output_states + (hidden_states,))
return (hidden_states, output_states) |
class StdoutTee(Tee):
def set_stream(self, stream):
sys.stdout = stream
def get_stream(self):
return sys.stdout |
def main():
args = parse_args()
set_seed(args.seed)
if (args.data == 'lba'):
log = Logger(f'{args.save_path}pdbbind_{args.split}/', f"pdbind_{strftime('%Y-%m-%d_%H-%M-%S', localtime())}.log")
else:
log = Logger(f'{args.save_path}lep/', f"lep_{strftime('%Y-%m-%d_%H-%M-%S', localtime())}.log")
args.epochs = 1000
args.lr = (0.0001 * len(args.gpu.split(',')))
args.bs = (4 * len(args.gpu.split(',')))
if (args.data == 'lba'):
(x_train, _, pos_train, y_train) = torch.load(f'data/pdb/pdb_train_{args.split}.pt')
(x_val, _, pos_val, y_val) = torch.load(f'data/pdb/pdb_val_{args.split}.pt')
if (not args.unknown):
(x_test, _, pos_test, y_test) = torch.load(f'data/pdb/pdb_test_{args.split}.pt')
else:
(x_test, _, pos_test, y_test) = torch.load(f'data/pdb/docking_test_{args.split}.pt')
else:
(x_train, pos_train, y_train) = torch.load(f'data/pdb/lep_train.pt')
(x_val, pos_val, y_val) = torch.load(f'data/pdb/lep_val.pt')
(x_test, pos_test, y_test) = torch.load(f'data/pdb/lep_test.pt')
train_loader = DataLoader(TensorDataset(x_train, pos_train, y_train), batch_size=args.bs, shuffle=True)
val_loader = DataLoader(TensorDataset(x_val, pos_val, y_val), batch_size=(args.bs * 2))
test_loader = DataLoader(TensorDataset(x_test, pos_test, y_test), batch_size=(args.bs * 2))
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
model = EGNN_Network(num_tokens=args.tokens, dim=args.dim, depth=args.depth, num_nearest_neighbors=args.num_nearest, dropout=args.dropout, global_linear_attn_every=1, norm_coors=True, coor_weights_clamp_value=2.0, aggregate=False).cuda()
if args.pretrain:
checkpoint = torch.load((args.save_path + args.pretrain))
model.load_state_dict(checkpoint['model'])
if args.linear_probe:
for param in model.parameters():
param.requires_grad = False
else:
args.pretrain = 'no_pre'
model.aggregate = True
model.out = predictor(args.dim).cuda()
if (len(args.gpu) > 1):
model = torch.nn.DataParallel(model)
if (args.data == 'lba'):
criterion = torch.nn.MSELoss()
best_metric = .0
else:
best_metric = 0
criterion = torch.nn.BCELoss()
optimizer = opt.Adam(model.parameters(), lr=args.lr)
if (args.data == 'lba'):
lr_scheduler = opt.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.6, patience=10, min_lr=5e-06)
else:
lr_scheduler = opt.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', factor=0.6, patience=10, min_lr=5e-06)
scaler = torch.cuda.amp.GradScaler(enabled=True)
log.logger.info(f'''{('=' * 40)} PDBbind {('=' * 40)}
Embed_dim: {args.dim}; Train: {len(x_train)}; Val: {len(x_val)}; Test: {len(x_test)}; Pre-train Model: {args.pretrain}
Data Split: {args.split}; Target: {args.data}; Batch_size: {args.bs}; Linear-probe: {args.linear_probe}
{('=' * 40)} Start Training {('=' * 40)}''')
t0 = time()
early_stop = 0
try:
for epoch in range(0, args.epochs):
model.train()
loss = 0.0
t1 = time()
for (x, pos, y) in train_loader:
(x, pos, y) = (x.long().cuda(), pos.float().cuda(), y.cuda())
mask = (x != 0)
out = model(x, pos, mask=mask)[1][(..., 0)]
if (args.data == 'lep'):
out = torch.sigmoid(out)
loss_batch = criterion(out, y.float())
loss += (loss_batch.item() / (len(x_train) * args.bs))
scaler.scale(loss_batch).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
if (args.data == 'lba'):
(spearman, pearson, metric) = run_eval(args, model, val_loader, y_val)
log.logger.info('Epoch: {} | Time: {:.1f}s | Loss: {:.2f} | RMSE: {:.3f} | Pearson: {:.3f} | Spearman: {:.3f} | Lr: {:.3f}'.format((epoch + 1), (time() - t1), (loss * 10000.0), (metric ** 0.5), pearson, spearman, (optimizer.param_groups[0]['lr'] * 100000.0)))
else:
(auroc, auprc, _) = run_eval(args, model, val_loader, y_val)
metric = auroc
log.logger.info('Epoch: {} | Time: {:.1f}s | Loss: {:.2f} | AUROC: {:.3f} | AUPRC: {:.3f} | Lr: {:.3f}'.format((epoch + 1), (time() - t1), (loss * 10000.0), auroc, auprc, (optimizer.param_groups[0]['lr'] * 100000.0)))
lr_scheduler.step(metric)
if (((args.data == 'lba') and (metric < best_metric)) or ((args.data == 'lep') and (metric > best_metric))):
best_metric = metric
best_model = copy.deepcopy(model)
best_epoch = (epoch + 1)
early_stop = 0
else:
early_stop += 1
if (early_stop >= 50):
log.logger.info('Early Stopping!!! No Improvement on Loss for 50 Epochs.')
break
except:
log.logger.info('Training is interrupted.')
log.logger.info('{} End Training (Time: {:.2f}h) {}'.format(('=' * 20), ((time() - t0) / 3600), ('=' * 20)))
checkpoint = {'epochs': args.epochs}
if (args.data == 'lba'):
(spearman, pearson, metric) = run_eval(args, best_model, test_loader, y_test)
else:
(auroc, auprc, _) = run_eval(args, best_model, test_loader, y_test)
if (len(args.gpu) > 1):
checkpoint['model'] = best_model.module.state_dict()
else:
checkpoint['model'] = best_model.state_dict()
if args.linear_probe:
args.linear_probe = 'Linear'
if (args.data == 'lba'):
torch.save(checkpoint, (args.save_path + f'PDB_{args.split}_{args.pretrain}_{args.linear_probe}.pt'))
log.logger.info(f'''Save the best model as PDB_{args.split}_{args.pretrain}_{args.linear_probe}.pt.
Best Epoch: {best_epoch} | RMSE: {(metric ** 0.5)} | Test Pearson: {spearman} | Test Spearman: {pearson}''')
else:
torch.save(checkpoint, (args.save_path + f'LEP_{args.split}_{args.pretrain}_{args.linear_probe}.pt'))
log.logger.info(f'''Save the best model as LEP_{args.split}_{args.pretrain}_{args.linear_probe}.pt.
Best Epoch: {best_epoch} | Test AUROC: {auroc} | Test AUPRC: {auprc}''') |
def convert_diarization(base_model_name, hf_config, downstream_dict):
model = UniSpeechSatForAudioFrameClassification.from_pretrained(base_model_name, config=hf_config)
model.classifier.weight.data = downstream_dict['model.linear.weight']
model.classifier.bias.data = downstream_dict['model.linear.bias']
return model |
class TestSignal(TestCore):
SIGNALS = [(IntegerSignal, 99), (FloatSignal, 55.3), (DoubleSignal, 22.2), (StringSignal, 'hello')]
def test_set_get_clear_signals(self):
for (signal_class, test_value) in TestSignal.SIGNALS:
with self.subTest(signal=str(signal_class)):
sig = signal_class('my_signal')
sig.set(test_value)
ret_value = sig.get()
if isinstance(test_value, float):
self.assertAlmostEqual(ret_value, test_value, places=3)
else:
self.assertEqual(ret_value, test_value)
clears = sig.clear()
self.assertEqual(clears, 1)
def test_get_signal_fails_when_empty(self):
for (signal_class, test_value) in TestSignal.SIGNALS:
with self.subTest(signal=str(signal_class)):
sig = signal_class('my_signal')
with self.assertRaises(PyRepError):
sig.get() |
class ExtRandomRotation(object):
def __init__(self, degrees, resample=False, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if (degrees < 0):
raise ValueError('If degrees is a single number, it must be positive.')
self.degrees = ((- degrees), degrees)
else:
if (len(degrees) != 2):
raise ValueError('If degrees is a sequence, it must be of len 2.')
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
def get_params(degrees):
angle = random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img, lbl):
angle = self.get_params(self.degrees)
return (F.rotate(img, angle, self.resample, self.expand, self.center), F.rotate(lbl, angle, self.resample, self.expand, self.center))
def __repr__(self):
format_string = (self.__class__.__name__ + '(degrees={0}'.format(self.degrees))
format_string += ', resample={0}'.format(self.resample)
format_string += ', expand={0}'.format(self.expand)
if (self.center is not None):
format_string += ', center={0}'.format(self.center)
format_string += ')'
return format_string |
def build_err_msg(arrays, err_msg, header='Items are not equal:', verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
msg = [('\n' + header)]
if err_msg:
if ((err_msg.find('\n') == (- 1)) and (len(err_msg) < (79 - len(header)))):
msg = [((msg[0] + ' ') + err_msg)]
else:
msg.append(err_msg)
if verbose:
for (i, a) in enumerate(arrays):
if isinstance(a, ndarray):
r_func = partial(array_repr, precision=precision)
else:
r_func = repr
try:
r = r_func(a)
except Exception as exc:
r = '[repr failed for <{}>: {}]'.format(type(a).__name__, exc)
if (r.count('\n') > 3):
r = '\n'.join(r.splitlines()[:3])
r += '...'
msg.append((' %s: %s' % (names[i], r)))
return '\n'.join(msg) |
def maybe_download_and_extract(data_url):
dest_directory = FLAGS.model_dir
if (not os.path.exists(dest_directory)):
os.makedirs(dest_directory)
filename = data_url.split('/')[(- 1)]
filepath = os.path.join(dest_directory, filename)
if (not os.path.exists(filepath)):
def _progress(count, block_size, total_size):
sys.stdout.write(('\r>> Downloading %s %.1f%%' % (filename, ((float((count * block_size)) / float(total_size)) * 100.0))))
sys.stdout.flush()
(filepath, _) = urllib.request.urlretrieve(data_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
tf.logging.info('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory) |
class VisionTextDualEncoderModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class GIN(torch.nn.Module):
def __init__(self, args):
super(GIN, self).__init__()
self.args = args
self.layers = torch.nn.ModuleList([])
for i in range((args['num_layers'] + 1)):
dim_input = (args['num_features'] if (i == 0) else args['hidden_dim'])
nn = Sequential(Linear(dim_input, args['hidden_dim']), ReLU(), Linear(args['hidden_dim'], args['hidden_dim']))
conv = GINConv(nn)
self.layers.append(conv)
self.fc1 = Linear(args['hidden_dim'], args['hidden_dim'])
self.fc2 = Linear(args['hidden_dim'], args['num_classes'])
def forward(self, x, edge_index, batch):
for (i, _) in enumerate(self.layers):
x = F.relu(self.layers[i](x, edge_index))
x = global_add_pool(x, batch)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.args['dropout'], training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=(- 1)) |
def main(_):
config = flags.FLAGS
config.out_dir = os.path.join(config.out_base_dir, config.model_name, str(config.run_id).zfill(2))
m(config) |
()
def assigner():
Assigner.define_task_assignments = mock.Mock()
assigner = Assigner(None, None, None)
assigner.define_task_assignments = mock.Mock()
return assigner |
def register_statistics(name: str, stats_cls: Type[Stats]):
AVAILBALE_STATS[name] = stats_cls
AVAILBALE_STATS[(name + '_loss_per_batch')] = stats_cls |
def create_error_vector_with_count_above_vector(target_vector=None, argsort_vector=None, rank_weighting_vector=None, count_above_vector=None):
assert (target_vector is not None)
assert (argsort_vector is not None)
assert (rank_weighting_vector is not None)
assert (count_above_vector is not None)
assert (target_vector.shape == argsort_vector.shape == rank_weighting_vector.shape == count_above_vector.shape)
assert (len(target_vector.shape) == 1)
reversed_argsort_vector = np.flip(argsort_vector, axis=0)
flipped_rank_weight = np.flip(rank_weighting_vector, axis=0)
reverse_argsorted_target_vector = target_vector[reversed_argsort_vector]
reverse_argsorted_count_above = count_above_vector[reversed_argsort_vector]
good_part_of_argsorted_error_vector = (((reverse_argsorted_count_above * flipped_rank_weight) * reverse_argsorted_target_vector) * (- 1))
amount_per_bad_vector = np.cumsum((reverse_argsorted_target_vector * flipped_rank_weight))
bad_part_of_argsorted_error_vector = (amount_per_bad_vector * (1 - reverse_argsorted_target_vector))
argsorted_error_vector = (bad_part_of_argsorted_error_vector + good_part_of_argsorted_error_vector)
error_vector = argsorted_error_vector[np.argsort(reversed_argsort_vector)]
return error_vector |
class GCNPropagate(tnn.MessagePassing):
def __init__(self, improved: bool=False, cached: bool=False, add_self_loops: bool=True, normalization: str='sym', **kwargs):
kwargs.setdefault('aggr', 'add')
super().__init__(**kwargs)
self.improved = improved
self.cached = cached
self.add_self_loops = add_self_loops
self.normalization = normalization
self._cached_edge_index = None
self._cached_adj_t = None
def reset_parameters(self):
self._cached_edge_index = None
self._cached_adj_t = None
def forward(self, x: Tensor, edge_index: Adj, edge_weight: OptTensor=None) -> Tensor:
if (self.normalization is not None):
if isinstance(edge_index, Tensor):
cache = self._cached_edge_index
if (cache is None):
(edge_index, edge_weight) = mat_norm(edge_index, edge_weight, x.size(self.node_dim), improved=self.improved, add_self_loops=self.add_self_loops, dtype=x.dtype, normalization=self.normalization)
if self.cached:
self._cached_edge_index = (edge_index, edge_weight)
else:
(edge_index, edge_weight) = (cache[0], cache[1])
elif isinstance(edge_index, SparseTensor):
cache = self._cached_adj_t
if (cache is None):
edge_index = mat_norm(edge_index, edge_weight, x.size(self.node_dim), improved=self.improved, add_self_loops=self.add_self_loops, dtype=x.dtype, normalization=self.normalization)
if self.cached:
self._cached_adj_t = edge_index
else:
edge_index = cache
out = self.propagate(edge_index, x=x, edge_weight=edge_weight, size=None)
return out
def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor:
return (x_j if (edge_weight is None) else (edge_weight.view((- 1), 1) * x_j))
def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor:
return matmul(adj_t, x, reduce=self.aggr) |
_module()
class PascalContextDataset(CustomDataset):
CLASSES = ('background', 'aeroplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle', 'bird', 'boat', 'book', 'bottle', 'building', 'bus', 'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth', 'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence', 'floor', 'flower', 'food', 'grass', 'ground', 'horse', 'keyboard', 'light', 'motorbike', 'mountain', 'mouse', 'person', 'plate', 'platform', 'pottedplant', 'road', 'rock', 'sheep', 'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', 'table', 'track', 'train', 'tree', 'truck', 'tvmonitor', 'wall', 'water', 'window', 'wood')
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]]
def __init__(self, split, **kwargs):
super(PascalContextDataset, self).__init__(img_suffix='.jpg', seg_map_suffix='.png', split=split, reduce_zero_label=False, **kwargs)
assert (osp.exists(self.img_dir) and (self.split is not None)) |
class BatchNormalization(ModelLayer):
def __init__(self, model, input_record, name='batch_normalization', scale_optim=None, bias_optim=None, momentum=0.9, order='NCHW', scale_init_value=1.0, **kwargs):
super(BatchNormalization, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), 'Incorrect input type'
self.input_shape = input_record.field_type().shape
if (len(self.input_shape) == 3):
if (order == 'NCHW'):
input_dims = self.input_shape[0]
elif (order == 'NHWC'):
input_dims = self.input_shape[2]
else:
raise ValueError('Please specify a correct order')
else:
assert (len(self.input_shape) == 1), 'This layer supports only 4D or 2D tensors'
input_dims = self.input_shape[0]
self.output_schema = schema.Scalar((np.float32, self.input_shape), self.get_next_blob_reference('output'))
self.momentum = momentum
self.order = order
self.scale = self.create_param(param_name='scale', shape=[input_dims], initializer=('ConstantFill', {'value': scale_init_value}), optimizer=scale_optim)
self.bias = self.create_param(param_name='bias', shape=[input_dims], initializer=('ConstantFill', {'value': 0.0}), optimizer=bias_optim)
self.rm = self.create_param(param_name='running_mean', shape=[input_dims], initializer=('ConstantFill', {'value': 0.0}), optimizer=model.NoOptim)
self.riv = self.create_param(param_name='running_inv_var', shape=[input_dims], initializer=('ConstantFill', {'value': 1.0}), optimizer=model.NoOptim)
def _add_ops(self, net, is_test, out_blob=None):
original_input_blob = self.input_record.field_blobs()
input_blob = net.NextScopedBlob('expand_input')
if (len(self.input_shape) == 1):
input_blob = net.ExpandDims(original_input_blob, dims=[2, 3])
else:
input_blob = original_input_blob[0]
if (out_blob is None):
bn_output = self.output_schema.field_blobs()
else:
bn_output = out_blob
if is_test:
output_blobs = bn_output
else:
output_blobs = (bn_output + [self.rm, self.riv, net.NextScopedBlob('bn_saved_mean'), net.NextScopedBlob('bn_saved_iv')])
net.SpatialBN([input_blob, self.scale, self.bias, self.rm, self.riv], output_blobs, momentum=self.momentum, is_test=is_test, order=self.order)
if (len(self.input_shape) == 1):
net.Squeeze(bn_output, bn_output, dims=[2, 3])
def add_train_ops(self, net):
self._add_ops(net, is_test=False)
def add_eval_ops(self, net):
self._add_ops(net, is_test=True)
def add_ops(self, net):
self.add_eval_ops(net) |
def init_segmentor(config, checkpoint=None, device='cuda:0', classes=None, palette=None, revise_checkpoint=[('^module\\.', '')]):
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif (not isinstance(config, mmcv.Config)):
raise TypeError('config must be a filename or Config object, but got {}'.format(type(config)))
config.model.pretrained = None
config.model.train_cfg = None
model = build_segmentor(config.model, test_cfg=config.get('test_cfg'))
if (checkpoint is not None):
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu', revise_keys=revise_checkpoint)
model.CLASSES = (checkpoint['meta']['CLASSES'] if (classes is None) else classes)
model.PALETTE = (checkpoint['meta']['PALETTE'] if (palette is None) else palette)
model.cfg = config
model.to(device)
model.eval()
return model |
class QAttentionStackAgent(Agent):
def __init__(self, qattention_agents: List[QAttentionAgent], rotation_resolution: float, camera_names: List[str], rotation_prediction_depth: int=0):
super(QAttentionStackAgent, self).__init__()
self._qattention_agents = qattention_agents
self._rotation_resolution = rotation_resolution
self._camera_names = camera_names
self._rotation_prediction_depth = rotation_prediction_depth
def build(self, training: bool, device=None) -> None:
self._device = device
if (self._device is None):
self._device = torch.device('cpu')
for qa in self._qattention_agents:
qa.build(training, device)
def update(self, step: int, replay_sample: dict) -> dict:
priorities = 0
for qa in self._qattention_agents:
update_dict = qa.update(step, replay_sample)
priorities += update_dict['priority']
replay_sample.update(update_dict)
return {'priority': (priorities ** REPLAY_ALPHA)}
def act(self, step: int, observation: dict, deterministic=False) -> ActResult:
observation_elements = {}
(translation_results, rot_grip_results) = ([], [])
infos = {}
for (depth, qagent) in enumerate(self._qattention_agents):
act_results = qagent.act(step, observation, deterministic)
attention_coordinate = act_results.observation_elements['attention_coordinate'].cpu().numpy()
observation_elements[('attention_coordinate_layer_%d' % depth)] = attention_coordinate[0]
(translation_idxs, rot_grip_idxs) = act_results.action
translation_results.append(translation_idxs)
if (rot_grip_idxs is not None):
rot_grip_results.append(rot_grip_idxs)
observation['attention_coordinate'] = act_results.observation_elements['attention_coordinate']
observation['prev_layer_voxel_grid'] = act_results.observation_elements['prev_layer_voxel_grid']
for n in self._camera_names:
(px, py) = utils.point_to_pixel_index(attention_coordinate[0], observation[('%s_camera_extrinsics' % n)][(0, 0)].cpu().numpy(), observation[('%s_camera_intrinsics' % n)][(0, 0)].cpu().numpy())
pc_t = torch.tensor([[[py, px]]], dtype=torch.float32, device=self._device)
observation[('%s_pixel_coord' % n)] = pc_t
observation_elements[('%s_pixel_coord' % n)] = [py, px]
infos.update(act_results.info)
rgai = torch.cat(rot_grip_results, 1)[0].cpu().numpy()
observation_elements['trans_action_indicies'] = torch.cat(translation_results, 1)[0].cpu().numpy()
observation_elements['rot_grip_action_indicies'] = rgai
continuous_action = np.concatenate([act_results.observation_elements['attention_coordinate'].cpu().numpy()[0], utils.discrete_euler_to_quaternion(rgai[(- 4):(- 1)], self._rotation_resolution), rgai[(- 1):]])
return ActResult(continuous_action, observation_elements=observation_elements, info=infos)
def update_summaries(self) -> List[Summary]:
summaries = []
for qa in self._qattention_agents:
summaries.extend(qa.update_summaries())
return summaries
def act_summaries(self) -> List[Summary]:
s = []
for qa in self._qattention_agents:
s.extend(qa.act_summaries())
return s
def load_weights(self, savedir: str):
for qa in self._qattention_agents:
qa.load_weights(savedir)
def save_weights(self, savedir: str):
for qa in self._qattention_agents:
qa.save_weights(savedir) |
def add_attached_file(filename):
sage.repl.inputhook.install()
fpath = os.path.abspath(filename)
attached[fpath] = os.path.getmtime(fpath) |
def get_image_list(train_list_path):
with open(train_list_path) as f:
imgs = f.readlines()
imgs = [img.replace('\n', '') for img in imgs]
return imgs |
class PieriFactors_type_A_affine(PieriFactors_affine_type):
def __classcall__(cls, W, min_length=0, max_length=infinity, min_support=frozenset([]), max_support=None):
assert (W.cartan_type().is_affine() and (W.cartan_type().letter == 'A'))
min_support = frozenset(min_support)
if (max_support is None):
max_support = frozenset(W.index_set())
else:
max_support = frozenset(max_support)
min_length = max(min_length, len(min_support))
max_length = min(len(max_support), max_length, (len(W.index_set()) - 1))
return super().__classcall__(cls, W, min_length, max_length, min_support, max_support)
def __init__(self, W, min_length, max_length, min_support, max_support):
Parent.__init__(self, category=FiniteEnumeratedSets())
self.W = W
self._min_support = frozenset(min_support)
self._max_support = frozenset(max_support)
if (not self._min_support.issubset(self._max_support)):
raise ValueError('the min support must be a subset of the max support')
self._extra_support = self._max_support.difference(self._min_support)
self._min_length = min_length
self._max_length = max_length
def subset(self, length):
return self.__class__(self.W, min_support=self._min_support, max_support=self._max_support, min_length=length, max_length=length)
def maximal_elements_combinatorial(self):
return self.subset(self._max_length)
def _test_maximal_elements(self, **options):
tester = self._tester(**options)
index_set = self.W.index_set()
if ((self._min_length > 0) or (self._max_length < (len(self.W.index_set()) - 1)) or (self._max_support != frozenset(index_set))):
tester.info('\n Strict subset of the Pieri factors; skipping test')
return
return super()._test_maximal_elements(**options)
def __contains__(self, w):
if (w not in self.W):
raise ValueError('{} is not an element of the Weyl group'.format(w))
n = (len(self.W.index_set()) - 1)
red = w.reduced_word()
support = set(red)
if (len(support) < len(red)):
return False
if (not ((self._min_length <= len(support)) and (len(support) <= self._max_length) and self._min_support.issubset(support) and support.issubset(self._max_support))):
return False
[rank, unrank] = sage.combinat.ranker.from_list(red)
for i in red:
j = ((i + 1) % (n + 1))
if (j in support):
if (rank(i) < rank(j)):
return False
return True
def __getitem__(self, support):
index_set = sorted(self.W.index_set())
support = sorted(support)
if ((not set(support).issubset(set(index_set))) or (support == index_set)):
raise ValueError('the support must be a proper subset of the index set')
if (not support):
return self.W.one()
s = self.W.simple_reflections()
i = 0
while ((i < len(support)) and (support[i] == index_set[i])):
i += 1
return prod((s[j] for j in (list(reversed(support[0:i])) + list(reversed(support[i:])))), self.W.one())
def cardinality(self):
if ((self._min_length == len(self._min_support)) and (self._max_length == (len(self._max_support) - 1))):
return Integer(((2 ** len(self._extra_support)) - 1))
else:
return self.generating_series(weight=ConstantFunction(1))
def generating_series(self, weight=None):
if (weight is None):
weight = self.default_weight()
l_min = len(self._min_support)
l_max = len(self._max_support)
return sum(((binomial((l_max - l_min), (l - l_min)) * weight(l)) for l in range(self._min_length, (self._max_length + 1))))
def __iter__(self):
from sage.combinat.subset import Subsets
for l in range(self._min_length, (self._max_length + 1)):
for extra in Subsets(self._extra_support, (l - len(self._min_support))):
(yield self[self._min_support.union(extra)])
def stanley_symm_poly_weight(self, w):
return 0 |
def test_list_real():
a = ak.highlevel.ArrayBuilder()
a.begin_list()
a.real(1.1)
a.real(2.2)
a.real(3.3)
a.end_list()
a.begin_list()
a.end_list()
a.begin_list()
a.real(4.4)
a.real(5.5)
a.end_list()
assert (to_list(a.snapshot()) == [[1.1, 2.2, 3.3], [], [4.4, 5.5]])
assert (to_list(a) == [[1.1, 2.2, 3.3], [], [4.4, 5.5]])
assert (to_list(a.snapshot()[1:(- 1)]) == [[]])
assert (to_list(a.snapshot()[1:]) == [[], [4.4, 5.5]]) |
.parametrize('pd_dtype', ['Int8', 'Int16', 'UInt8', 'UInt16', 'Float32', 'Float64'])
.parametrize('dtype, expected_dtype', [([np.float32, np.float64], np.float32), (np.float64, np.float64), ('numeric', np.float64)])
def test_check_array_pandas_na_support(pd_dtype, dtype, expected_dtype):
pd = pytest.importorskip('pandas')
if (pd_dtype in {'Float32', 'Float64'}):
pd = pytest.importorskip('pandas', minversion='1.2')
X_np = np.array([[1, 2, 3, np.nan, np.nan], [np.nan, np.nan, 8, 4, 6], [1, 2, 3, 4, 5]]).T
X = pd.DataFrame(X_np, dtype=pd_dtype, columns=['a', 'b', 'c'])
X['c'] = X['c'].astype('float')
X_checked = check_array(X, force_all_finite='allow-nan', dtype=dtype)
assert_allclose(X_checked, X_np)
assert (X_checked.dtype == expected_dtype)
X_checked = check_array(X, force_all_finite=False, dtype=dtype)
assert_allclose(X_checked, X_np)
assert (X_checked.dtype == expected_dtype)
msg = 'Input contains NaN'
with pytest.raises(ValueError, match=msg):
check_array(X, force_all_finite=True) |
def all_gather_list(data, group=None, max_size=16384):
SIZE_STORAGE_BYTES = 4
enc = pickle.dumps(data)
enc_size = len(enc)
if ((enc_size + SIZE_STORAGE_BYTES) > max_size):
raise ValueError('encoded data exceeds max_size, this can be fixed by increasing buffer size: {}'.format(enc_size))
rank = get_rank()
world_size = get_world_size()
buffer_size = (max_size * world_size)
if ((not hasattr(all_gather_list, '_buffer')) or (all_gather_list._buffer.numel() < buffer_size)):
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
assert (enc_size < (256 ** SIZE_STORAGE_BYTES)), 'Encoded object size should be less than {} bytes'.format((256 ** SIZE_STORAGE_BYTES))
size_bytes = enc_size.to_bytes(SIZE_STORAGE_BYTES, byteorder='big')
cpu_buffer[0:SIZE_STORAGE_BYTES] = torch.ByteTensor(list(size_bytes))
cpu_buffer[SIZE_STORAGE_BYTES:(enc_size + SIZE_STORAGE_BYTES)] = torch.ByteTensor(list(enc))
start = (rank * max_size)
size = (enc_size + SIZE_STORAGE_BYTES)
buffer[start:(start + size)].copy_(cpu_buffer[:size])
all_reduce(buffer, group=group)
try:
result = []
for i in range(world_size):
out_buffer = buffer[(i * max_size):((i + 1) * max_size)]
size = int.from_bytes(out_buffer[0:SIZE_STORAGE_BYTES], byteorder='big')
if (size > 0):
result.append(pickle.loads(bytes(out_buffer[SIZE_STORAGE_BYTES:(size + SIZE_STORAGE_BYTES)].tolist())))
return result
except pickle.UnpicklingError:
raise Exception('Unable to unpickle data from other workers. all_gather_list requires all workers to enter the function together, so this error usually indicates that the workers have fallen out of sync somehow. Workers can fall out of sync if one of them runs out of memory, or if there are other conditions in your training script that can cause one worker to finish an epoch while other workers are still iterating over their portions of the data.') |
def _init():
global _plugin
if (_plugin is None):
_plugin = custom_ops.get_plugin(module_name='filtered_lrelu_plugin', sources=['filtered_lrelu.cpp', 'filtered_lrelu_wr.cu', 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu'], headers=['filtered_lrelu.h', 'filtered_lrelu.cu'], source_dir=os.path.dirname(__file__), extra_cuda_cflags=['--use_fast_math'])
return True |
def register_Ns3AnimPacketInfo_methods(root_module, cls):
cls.add_constructor([param('ns3::AnimPacketInfo const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice const >', 'tx_nd'), param('ns3::Time const &', 'fbTx'), param('ns3::Time const &', 'lbTx'), param('ns3::Vector', 'txLoc'), param('uint32_t', 'txNodeId', default_value='0')])
cls.add_method('GetRxInfo', 'ns3::AnimRxInfo', [param('ns3::Ptr< ns3::NetDevice const >', 'nd')])
cls.add_method('ProcessRxBegin', 'void', [param('ns3::Ptr< ns3::NetDevice const >', 'nd'), param('ns3::Time const &', 'fbRx')])
cls.add_method('ProcessRxDrop', 'void', [param('ns3::Ptr< ns3::NetDevice const >', 'nd')])
cls.add_method('ProcessRxEnd', 'bool', [param('ns3::Ptr< ns3::NetDevice const >', 'nd'), param('ns3::Time const &', 'fbRx'), param('ns3::Vector', 'rxLoc')])
cls.add_method('RemoveRxInfo', 'void', [param('ns3::Ptr< ns3::NetDevice const >', 'nd')])
cls.add_instance_attribute('firstlastbitDelta', 'double', is_const=False)
cls.add_instance_attribute('m_fbTx', 'double', is_const=False)
cls.add_instance_attribute('m_lbTx', 'double', is_const=False)
cls.add_instance_attribute('m_rx', 'std::map< unsigned int, ns3::AnimRxInfo >', is_const=False)
cls.add_instance_attribute('m_txLoc', 'ns3::Vector', is_const=False)
cls.add_instance_attribute('m_txNodeId', 'uint32_t', is_const=False)
cls.add_instance_attribute('m_txnd', 'ns3::Ptr< ns3::NetDevice const >', is_const=False)
return |
class Tanh_GoogLeNet(nn.Module):
def __init__(self):
super(Tanh_GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(nn.Conv2d(3, 192, kernel_size=3, padding=1), nn.BatchNorm2d(192), nn.Tanh())
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 100)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def template_simulation_with_mtls(spec, scene, sim_props, delete_on_clean=False, caching=False, save_maya_scene=False):
shd_names = list(scene.Mtls.material_types)
num_body = 1
sim_names = list(sim_props.keys())
names = list(set(shd_names).intersection(sim_names))
for name in names:
print('===>> Current MTL type: {}'.format(name))
garment = mymaya.MayaGarment(spec)
try:
garment.load(shader_group=scene.update_cloth_SG(name), obstacles=[scene.body], config=sim_props[name]['config'])
except mymaya.PatternLoadingError as e:
sim_props[name]['stats']['fails']['pattern_loading'].append(garment.name)
else:
garment.sim_caching(caching)
qw.run_sim(garment, sim_props[name])
garment.save_mesh(tag='sim_{}'.format(name.split(':')[(- 1)]))
for i in range(10):
if (name == 'default'):
scene.Mtls.random_default_shader(texture_path=scene.textures_path)
else:
scene.Mtls.random_load_materials(name, patterns_path=scene.textures_path)
scene.random_light_color()
scene.render_multiple_rot(garment.path, (garment.name + '_{}_{}'.format(name.split(':')[(- 1)], int(i))))
if save_maya_scene:
cmds.file(rename=os.path.join(garment.path, (garment.name + '_scene')))
cmds.file(save=True, type='mayaBinary', force=True, defaultExtensions=True)
garment.clean(delete_on_clean) |
def load_transform_data_fn(path):
labels = []
data = []
max_trace_len = (- 1)
for fn in tqdm(os.listdir(path)):
file_path = os.path.join(path, fn)
if os.path.isfile(file_path):
cell_list = load_cell(file_path)
if ('-' in str(fn)):
labels.append(1)
else:
labels.append(0)
data.append(cell_list)
if (len(cell_list) > max_trace_len):
max_trace_len = len(cell_list)
labels = np.array(labels)
padded_cells = np.array([np.pad(c, (0, (max_trace_len - len(c))), 'constant') for c in data])
return (padded_cells, labels, np.arange(padded_cells.shape[1])) |
class BiaffineScorer(nn.Module):
def __init__(self, input1_size, input2_size, output_size):
super().__init__()
self.W_bilin = nn.Bilinear((input1_size + 1), (input2_size + 1), output_size)
self.W_bilin.weight.data.zero_()
self.W_bilin.bias.data.zero_()
def forward(self, input1, input2):
input1 = torch.cat([input1, input1.new_ones(*input1.size()[:(- 1)], 1)], (len(input1.size()) - 1))
input2 = torch.cat([input2, input2.new_ones(*input2.size()[:(- 1)], 1)], (len(input2.size()) - 1))
return self.W_bilin(input1, input2) |
def quantile(arr, q, weights=None):
q = np.clip(q, 0, 1)
if (len(arr) == 0):
return (np.zeros(len(q)) if hasattr(q, '__len__') else 0)
if (weights is None):
return np.quantile(arr, q, method='inverted_cdf')
assert (len(weights) == len(arr))
idx = np.argsort(arr)
weights = np.cumsum(weights[idx])
q_idx = np.searchsorted((weights / weights[(- 1)]), q)
return np.asarray(arr)[idx[q_idx]] |
class DataMixin():
def load_data(self, file_path, nrows=None):
if (nrows is None):
self.logger.info('Loading the time series...')
df = pd.read_csv(file_path, nrows=nrows)
index_type = df.dtypes[df.columns[0]]
df = df.set_index(df.columns[0])
df.index = pd.to_datetime(df.index.values, unit=('ms' if (index_type in [np.int32, np.int64]) else None))
return df |
def freq_calc(create_loss, nbins=None):
(loss, (_, __, mean, ___)) = create_loss(npeak=80, nbins=nbins)
calculator = FrequentistCalculator.from_yaml(f'{notebooks_dir}/toys/ci_freq_zfit_toys.yml', loss, Minuit())
return (mean, calculator) |
class FFN(nn.Module):
def __init__(self, hidden_size, ff_size, dropout):
super(FFN, self).__init__()
self.mlp = MLP(in_size=hidden_size, mid_size=ff_size, out_size=hidden_size, dropout_r=dropout, use_relu=True)
def forward(self, x):
return self.mlp(x) |
def _get_dataset(sets, feature_dir, is_train=False):
return TransformDataset(LoadData(sets, feature_dir), TransformData(is_train)) |
.unit
.convert
def test_slice_idx_generator_z0():
shape = (4305, 9791)
zoom = 0
tile_size = 256
given = convert.slice_idx_generator(shape, zoom, tile_size)
expected = helpers.get_slice_idx_generator_solution(zoom)
comparable_given = set(map(helpers.covert_idx_to_hashable_tuple, given))
comparable_expected = set(map(helpers.covert_idx_to_hashable_tuple, expected))
assert (comparable_given == comparable_expected) |
def action_prob_detection(bbox):
center_point = np.array([((bbox[0] + bbox[2]) / 2), ((bbox[1] + bbox[3]) / 2)])
left_prob = np.linalg.norm((center_point - np.array([0, 150])))
right_prob = np.linalg.norm((center_point - np.array([300, 150])))
up_prob = np.linalg.norm((center_point - np.array([150, 0])))
down_prob = np.linalg.norm((center_point - np.array([150, 300])))
forward_prob = np.linalg.norm((center_point - np.array([150, 150])))
detection_prob = torch.tensor([forward_prob, left_prob, right_prob, up_prob, down_prob])
return torch.argmin(detection_prob) |
class VariantDict(AttrDict):
def __init__(self, d, hidden_keys):
super(VariantDict, self).__init__(d)
self._hidden_keys = hidden_keys
def dump(self):
return {k: v for (k, v) in self.items() if (k not in self._hidden_keys)} |
def warning_message(message: str) -> None:
click.secho((click.style('', fg='yellow') + f' {message}')) |
def read_pfm(path):
with open(path, 'rb') as file:
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if (header.decode('ascii') == 'PF'):
color = True
elif (header.decode('ascii') == 'Pf'):
color = False
else:
raise Exception(('Not a PFM file: ' + path))
dim_match = re.match('^(\\d+)\\s(\\d+)\\s$', file.readline().decode('ascii'))
if dim_match:
(width, height) = list(map(int, dim_match.groups()))
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().decode('ascii').rstrip())
if (scale < 0):
endian = '<'
scale = (- scale)
else:
endian = '>'
data = np.fromfile(file, (endian + 'f'))
shape = ((height, width, 3) if color else (height, width))
data = np.reshape(data, shape)
data = np.flipud(data)
return (data, scale) |
()
def azure_get_valid_skus(regions: List[str]=typer.Option(compute.AzureCloudProvider.region_list(), '--regions', '-r'), prefix: str=typer.Option('', '--prefix', help='Filter by prefix'), top_k: int=typer.Option((- 1), '--top-k', help='Print top k entries')):
auth = compute.AzureAuthentication()
client = auth.get_compute_client()
def get_skus(region):
valid_skus = []
for sku in client.resource_skus.list(filter="location eq '{}'".format(region)):
if ((len(sku.restrictions) == 0) and ((not prefix) or sku.name.startswith(prefix))):
valid_skus.append(sku.name)
return set(valid_skus)
result = do_parallel(get_skus, regions, spinner=True, desc='Query SKUs')
sku_regions = defaultdict(set)
for (region, skus) in result:
for sku in skus:
sku_regions[sku].add(region)
sorted_top_keys = sorted(sku_regions.keys(), key=(lambda x: len(sku_regions[x])), reverse=True)
if (top_k > 0):
sorted_top_keys = sorted_top_keys[:top_k]
for sku in sorted_top_keys:
typer.secho(f'{sku} in {len(sku_regions[sku])} regions: {list(sorted(sku_regions[sku]))}') |
def ter(hyps: List[Union[(str, List[str])]], refs: List[Union[(str, List[str])]]) -> float:
error_tokens = 0
total_tokens = 0
for (h, r) in zip(hyps, refs):
error_tokens += ed.eval(h, r)
total_tokens += len(r)
return (float(error_tokens) / float(total_tokens)) |
def find_meta(_meta, string):
l_match = re.search((('^' + string) + '\\s*=\\s*"(.*)"'), _meta, re.M)
if l_match:
return l_match.group(1)
raise RuntimeError(f'Unable to find {string} string.') |
def typetracer_from_form(form: ((Form | str) | Mapping), *, highlevel: bool=True, behavior: (Mapping | None)=None, attrs: (Mapping[(str, Any)] | None)=None) -> (Array | Content):
if isinstance(form, str):
if is_primitive(form):
form = awkward.forms.NumpyForm(form)
else:
form = awkward.forms.from_json(form)
elif isinstance(form, Mapping):
form = awkward.forms.from_dict(form)
elif (not isinstance(form, awkward.forms.Form)):
raise TypeError("'form' argument must be a Form or its Python dict/JSON string representation")
layout = form.length_zero_array(highlevel=False).to_typetracer(forget_length=True)
return wrap_layout(layout, behavior=behavior, highlevel=highlevel, attrs=attrs) |
def main(argv=None):
tf.reset_default_graph()
keep_prob = tf.placeholder(tf.float32, name='keep_probabilty')
image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_image')
ROIMap = tf.placeholder(tf.int32, shape=[None, None, None, 1], name='ROIMap')
GTLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name='GTLabel')
Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path)
Net.build(image, ROIMap, NUM_CLASSES, keep_prob)
Loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(GTLabel, squeeze_dims=[3]), logits=Net.Prob, name='Loss'))
trainable_var = tf.trainable_variables()
train_op = train(Loss, trainable_var)
TrainReader = Data_Reader.Data_Reader(Train_Image_Dir, ROIMap_Dir, GTLabelDir=Label_Dir, BatchSize=Batch_Size)
if UseValidationSet:
ValidReader = Data_Reader.Data_Reader(Valid_Image_Dir, ROIMap_Dir, GTLabelDir=Label_Dir, BatchSize=Batch_Size)
sess = tf.Session()
print('Setting up Saver...')
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(logs_dir)
if (ckpt and ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
print('Model restored...')
f = open(TrainLossTxtFile, 'w')
f.write(('Iteration\tloss\t Learning Rate=' + str(learning_rate)))
f.close()
if UseValidationSet:
f = open(ValidLossTxtFile, 'w')
f.write(('Iteration\tloss\t Learning Rate=' + str(learning_rate)))
f.close()
for itr in range(MAX_ITERATION):
(Images, ROIMaps, GTLabels) = TrainReader.ReadAndAugmentNextBatch()
feed_dict = {image: Images, GTLabel: GTLabels, ROIMap: ROIMaps, keep_prob: 0.5}
sess.run(train_op, feed_dict=feed_dict)
if (((itr % 500) == 0) and (itr > 0)):
print(('Saving Model to file in' + logs_dir))
saver.save(sess, (logs_dir + 'model.ckpt'), itr)
if ((itr % 10) == 0):
feed_dict = {image: Images, GTLabel: GTLabels, ROIMap: ROIMaps, keep_prob: 1}
TLoss = sess.run(Loss, feed_dict=feed_dict)
print(((('Step ' + str(itr)) + ' Train Loss=') + str(TLoss)))
with open(TrainLossTxtFile, 'a') as f:
f.write(((('\n' + str(itr)) + '\t') + str(TLoss)))
f.close()
if (UseValidationSet and ((itr % 2000) == 0)):
SumLoss = np.float64(0.0)
NBatches = np.int(np.ceil((ValidReader.NumFiles / ValidReader.BatchSize)))
print((('Calculating Validation on ' + str(ValidReader.NumFiles)) + ' Images'))
for i in range(NBatches):
(Images, ROIMaps, GTLabels) = ValidReader.ReadNextBatchClean()
feed_dict = {image: Images, ROIMap: ROIMaps, GTLabel: GTLabels, keep_prob: 1.0}
TLoss = sess.run(Loss, feed_dict=feed_dict)
SumLoss += TLoss
NBatches += 1
SumLoss /= NBatches
print(('Validation Loss: ' + str(SumLoss)))
with open(ValidLossTxtFile, 'a') as f:
f.write(((('\n' + str(itr)) + '\t') + str(SumLoss)))
f.close() |
def setup_buckets(region, n_files=1, file_size_mb=1, write=False):
(provider, zone) = region.split(':')
if (provider == 'azure'):
bucket_name = f"integration{zone}/{str(uuid.uuid4()).replace('-', '')}"
else:
bucket_name = f'skyplane-integration-{zone}-{str(uuid.uuid4())[:8]}'
logger.debug(f'creating buckets {bucket_name}')
iface = ObjectStoreInterface.create(region, bucket_name)
iface.create_bucket(zone)
prefix = f'{uuid.uuid4()}'
if write:
with tempfile.NamedTemporaryFile() as tmp:
fpath = tmp.name
with open(fpath, 'wb+') as f:
f.write(os.urandom(int((file_size_mb * MB))))
for i in range(n_files):
iface.upload_object(fpath, f'{prefix}/{i}', mime_type='text/plain')
return (iface, bucket_name, prefix) |
def test_get_ontology_path_cost():
o = basic_ontology
s0 = Function('5', [], o.types['number'])
s1 = Function('O', [], o.types['reference'])
oo = augment_ontology(o, {s0.name: s0, s1.name: s1})
s2 = o.functions['equal']
s3 = o.functions['radiusOf']
s4 = o.functions['isRadiusOf']
s5 = o.functions['circle']
truth = o.types['truth']
number = o.types['number']
perp = oo.functions['isPerpendicularTo']
line = o.types['line']
ref = o.types['reference']
paths = get_ontology_paths(oo, ref, s1)
for path in paths.values():
print(path)
print(get_ontology_path_cost(path)) |
def _bad_tensor_splits(draw):
lengths = draw(st.lists(st.integers(4, 6), min_size=4, max_size=4))
batch_size = 4
element_pairs = [(batch, r) for batch in range(batch_size) for r in range(len(lengths))]
perm = draw(st.permutations(element_pairs))
ranges = [([(0, 0)] * len(lengths)) for _ in range(batch_size)]
offset = 0
for pair in perm:
if (pair[0] == 2):
length = 0
elif (pair[0] <= 1):
length = (lengths[pair[1]] // 2)
else:
length = lengths[pair[1]]
ranges[pair[0]][pair[1]] = (offset, length)
offset += length
data = draw(st.lists(st.floats(min_value=(- 1.0), max_value=1.0), min_size=offset, max_size=offset))
key = draw(st.permutations(range(offset)))
return (np.array(data).astype(np.float32), np.array(ranges), np.array(lengths), np.array(key).astype(np.int64)) |
class ParserElement(object):
DEFAULT_WHITE_CHARS = ' \n\t\r'
verbose_stacktrace = False
def setDefaultWhitespaceChars(chars):
ParserElement.DEFAULT_WHITE_CHARS = chars
def inlineLiteralsUsing(cls):
ParserElement._literalStringClass = cls
def __init__(self, savelist=False):
self.parseAction = list()
self.failAction = None
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True
self.errmsg = ''
self.modalResults = True
self.debugActions = (None, None, None)
self.re = None
self.callPreparse = True
self.callDuringTry = False
def copy(self):
cpy = copy.copy(self)
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName(self, name):
self.name = name
self.errmsg = ('Expected ' + self.name)
if hasattr(self, 'exception'):
self.exception.msg = self.errmsg
return self
def setResultsName(self, name, listAllMatches=False):
newself = self.copy()
if name.endswith('*'):
name = name[:(- 1)]
listAllMatches = True
newself.resultsName = name
newself.modalResults = (not listAllMatches)
return newself
def setBreak(self, breakFlag=True):
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod(instring, loc, doActions, callPreParse)
breaker._originalParseMethod = _parseMethod
self._parse = breaker
elif hasattr(self._parse, '_originalParseMethod'):
self._parse = self._parse._originalParseMethod
return self
def setParseAction(self, *fns, **kwargs):
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get('callDuringTry', False)
return self
def addParseAction(self, *fns, **kwargs):
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = (self.callDuringTry or kwargs.get('callDuringTry', False))
return self
def addCondition(self, *fns, **kwargs):
msg = kwargs.get('message', 'failed user-defined condition')
exc_type = (ParseFatalException if kwargs.get('fatal', False) else ParseException)
for fn in fns:
def pa(s, l, t):
if (not bool(_trim_arity(fn)(s, l, t))):
raise exc_type(s, l, msg)
self.parseAction.append(pa)
self.callDuringTry = (self.callDuringTry or kwargs.get('callDuringTry', False))
return self
def setFailAction(self, fn):
self.failAction = fn
return self
def _skipIgnorables(self, instring, loc):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
(loc, dummy) = e._parse(instring, loc)
exprsFound = True
except ParseException:
pass
return loc
def preParse(self, instring, loc):
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while ((loc < instrlen) and (instring[loc] in wt)):
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
return (loc, [])
def postParse(self, instring, loc, tokenlist):
return tokenlist
def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True):
debugging = self.debug
if (debugging or self.failAction):
if self.debugActions[0]:
self.debugActions[0](instring, loc, self)
if (callPreParse and self.callPreparse):
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
try:
try:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
except ParseBaseException as err:
if self.debugActions[2]:
self.debugActions[2](instring, tokensStart, self, err)
if self.failAction:
self.failAction(instring, tokensStart, self, err)
raise
else:
if (callPreParse and self.callPreparse):
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
if (self.mayIndexError or (loc >= len(instring))):
try:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
else:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
tokens = self.postParse(instring, loc, tokens)
retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults)
if (self.parseAction and (doActions or self.callDuringTry)):
if debugging:
try:
for fn in self.parseAction:
tokens = fn(instring, tokensStart, retTokens)
if (tokens is not None):
retTokens = ParseResults(tokens, self.resultsName, asList=(self.saveAsList and isinstance(tokens, (ParseResults, list))), modal=self.modalResults)
except ParseBaseException as err:
if self.debugActions[2]:
self.debugActions[2](instring, tokensStart, self, err)
raise
else:
for fn in self.parseAction:
tokens = fn(instring, tokensStart, retTokens)
if (tokens is not None):
retTokens = ParseResults(tokens, self.resultsName, asList=(self.saveAsList and isinstance(tokens, (ParseResults, list))), modal=self.modalResults)
if debugging:
if self.debugActions[1]:
self.debugActions[1](instring, tokensStart, loc, self, retTokens)
return (loc, retTokens)
def tryParse(self, instring, loc):
try:
return self._parse(instring, loc, doActions=False)[0]
except ParseFatalException:
raise ParseException(instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
if (_OrderedDict is not None):
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = _OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while (len(cache) > size):
try:
cache.popitem(False)
except KeyError:
pass
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
else:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while (len(key_fifo) > size):
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
packrat_cache = {}
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
def _parseCache(self, instring, loc, doActions=True, callPreParse=True):
(HIT, MISS) = (0, 1)
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if (value is cache.not_in_cache):
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
_parse = _parseNoCache
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = ([0] * len(ParserElement.packrat_cache_stats))
_packratEnabled = False
def enablePackrat(cache_size_limit=128):
if (not ParserElement._packratEnabled):
ParserElement._packratEnabled = True
if (cache_size_limit is None):
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString(self, instring, parseAll=False):
ParserElement.resetCache()
if (not self.streamlined):
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if (not self.keepTabs):
instring = instring.expandtabs()
try:
(loc, tokens) = self._parse(instring, 0)
if parseAll:
loc = self.preParse(instring, loc)
se = (Empty() + StringEnd())
se._parse(instring, loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
else:
return tokens
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):
if (not self.streamlined):
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if (not self.keepTabs):
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while ((loc <= instrlen) and (matches < maxMatches)):
try:
preloc = preparseFn(instring, loc)
(nextLoc, tokens) = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = (preloc + 1)
else:
if (nextLoc > loc):
matches += 1
(yield (tokens, preloc, nextLoc))
if overlap:
nextloc = preparseFn(instring, loc)
if (nextloc > loc):
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = (preloc + 1)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
def transformString(self, instring):
out = []
lastE = 0
self.keepTabs = True
try:
for (t, s, e) in self.scanString(instring):
out.append(instring[lastE:s])
if t:
if isinstance(t, ParseResults):
out += t.asList()
elif isinstance(t, list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return ''.join(map(_ustr, _flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
def searchString(self, instring, maxMatches=_MAX_INT):
try:
return ParseResults([t for (t, s, e) in self.scanString(instring, maxMatches)])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
splits = 0
last = 0
for (t, s, e) in self.scanString(instring, maxMatches=maxsplit):
(yield instring[last:s])
if includeSeparators:
(yield t[0])
last = e
(yield instring[last:])
def __add__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return And([self, other])
def __radd__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return (other + self)
def __sub__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return ((self + And._ErrorStop()) + other)
def __rsub__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return (other - self)
def __mul__(self, other):
if isinstance(other, int):
(minElements, optElements) = (other, 0)
elif isinstance(other, tuple):
other = (other + (None, None))[:2]
if (other[0] is None):
other = (0, other[1])
if (isinstance(other[0], int) and (other[1] is None)):
if (other[0] == 0):
return ZeroOrMore(self)
if (other[0] == 1):
return OneOrMore(self)
else:
return ((self * other[0]) + ZeroOrMore(self))
elif (isinstance(other[0], int) and isinstance(other[1], int)):
(minElements, optElements) = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]), type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if (minElements < 0):
raise ValueError('cannot multiply ParserElement by negative value')
if (optElements < 0):
raise ValueError('second tuple value must be greater or equal to first tuple value')
if (minElements == optElements == 0):
raise ValueError('cannot multiply ParserElement by 0 or (0,0)')
if optElements:
def makeOptionalList(n):
if (n > 1):
return Optional((self + makeOptionalList((n - 1))))
else:
return Optional(self)
if minElements:
if (minElements == 1):
ret = (self + makeOptionalList(optElements))
else:
ret = (And(([self] * minElements)) + makeOptionalList(optElements))
else:
ret = makeOptionalList(optElements)
elif (minElements == 1):
ret = self
else:
ret = And(([self] * minElements))
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return MatchFirst([self, other])
def __ror__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return (other | self)
def __xor__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return Or([self, other])
def __rxor__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return (other ^ self)
def __and__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return Each([self, other])
def __rand__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return (other & self)
def __invert__(self):
return NotAny(self)
def __call__(self, name=None):
if (name is not None):
return self.setResultsName(name)
else:
return self.copy()
def suppress(self):
return Suppress(self)
def leaveWhitespace(self):
self.skipWhitespace = False
return self
def setWhitespaceChars(self, chars):
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs(self):
self.keepTabs = True
return self
def ignore(self, other):
if isinstance(other, basestring):
other = Suppress(other)
if isinstance(other, Suppress):
if (other not in self.ignoreExprs):
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append(Suppress(other.copy()))
return self
def setDebugActions(self, startAction, successAction, exceptionAction):
self.debugActions = ((startAction or _defaultStartDebugAction), (successAction or _defaultSuccessDebugAction), (exceptionAction or _defaultExceptionDebugAction))
self.debug = True
return self
def setDebug(self, flag=True):
if flag:
self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction)
else:
self.debug = False
return self
def __str__(self):
return self.name
def __repr__(self):
return _ustr(self)
def streamline(self):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion(self, parseElementList):
pass
def validate(self, validateTrace=[]):
self.checkRecursion([])
def parseFile(self, file_or_filename, parseAll=False):
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, 'r') as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
def __eq__(self, other):
if isinstance(other, ParserElement):
return ((self is other) or (vars(self) == vars(other)))
elif isinstance(other, basestring):
return self.matches(other)
else:
return (super(ParserElement, self) == other)
def __ne__(self, other):
return (not (self == other))
def __hash__(self):
return hash(id(self))
def __req__(self, other):
return (self == other)
def __rne__(self, other):
return (not (self == other))
def matches(self, testString, parseAll=True):
try:
self.parseString(_ustr(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
allResults = []
comments = []
success = True
for t in tests:
if (((comment is not None) and comment.matches(t, False)) or (comments and (not t))):
comments.append(t)
continue
if (not t):
continue
out = ['\n'.join(comments), t]
comments = []
try:
t = t.replace('\\n', '\n')
result = self.parseString(t, parseAll=parseAll)
out.append(result.dump(full=fullDump))
success = (success and (not failureTests))
except ParseBaseException as pe:
fatal = ('(FATAL)' if isinstance(pe, ParseFatalException) else '')
if ('\n' in t):
out.append(line(pe.loc, t))
out.append((((' ' * (col(pe.loc, t) - 1)) + '^') + fatal))
else:
out.append((((' ' * pe.loc) + '^') + fatal))
out.append(('FAIL: ' + str(pe)))
success = (success and failureTests)
result = pe
except Exception as exc:
out.append(('FAIL-EXCEPTION: ' + str(exc)))
success = (success and failureTests)
result = exc
if printResults:
if fullDump:
out.append('')
print('\n'.join(out))
allResults.append((t, result))
return (success, allResults) |
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum((1 - epsilon), p)
res = sum(((y * sp.log(p)) + (sp.subtract(1, y) * sp.log(sp.subtract(1, p)))))
res *= ((- 1.0) / len(y))
return res |
class PerEpochLoader():
def __init__(self, loader, func, do_tqdm=True):
self.orig_loader = loader
self.func = func
self.do_tqdm = do_tqdm
self.data_loader = self.compute_loader()
self.loader = iter(self.data_loader)
def compute_loader(self):
return TransformedLoader(self.orig_loader, self.func, None, self.orig_loader.num_workers, self.orig_loader.batch_size, do_tqdm=self.do_tqdm)
def __len__(self):
return len(self.orig_loader)
def __getattr__(self, attr):
return getattr(self.data_loader, attr)
def __iter__(self):
return self
def __next__(self):
try:
return next(self.loader)
except StopIteration as e:
self.data_loader = self.compute_loader()
self.loader = iter(self.data_loader)
raise StopIteration
return self.func(im, targ) |
('drwiki-te')
class TextualEntailmentPredictor(Predictor):
def _batch_json_to_instances(self, json: List[JsonDict]) -> List[Instance]:
instances = []
for blob in json:
instances.extend(self._json_to_instances(blob))
return instances
def set_docdb(self, db):
self.db = db
def get_doc_line(self, doc, line):
lines = self.db.get_doc_lines(doc)
if (line > (- 1)):
return lines.split('\n')[line].split('\t')[1]
else:
non_empty_lines = [line.split('\t')[1] for line in lines.split('\n') if ((len(line.split('\t')) > 1) and len(line.split('\t')[1].strip()))]
return non_empty_lines[SimpleRandom.get_instance().next_rand(0, (len(non_empty_lines) - 1))]
def _json_to_instances(self, json):
hypothesis_text = json['claim']
instances = []
premise_texts = []
flattened_evidence = [evidence for evidence_group in json['evidence'] for evidence in evidence_group]
for (_, _, page, sentence) in flattened_evidence:
premise_texts = self.get_doc_line(page, sentence)
instances.append(self._dataset_reader.text_to_instance(' '.join(premise_texts), hypothesis_text))
return instances |
class RoIAlignFunction(Function):
def __init__(self, aligned_height, aligned_width, spatial_scale):
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
self.rois = None
self.feature_size = None
def forward(self, features, rois):
self.rois = rois
self.feature_size = features.size()
(batch_size, num_channels, data_height, data_width) = features.size()
num_rois = rois.size(0)
output = features.new(num_rois, num_channels, self.aligned_height, self.aligned_width).zero_()
if features.is_cuda:
roi_align.roi_align_forward_cuda(self.aligned_height, self.aligned_width, self.spatial_scale, features, rois, output)
else:
roi_align.roi_align_forward(self.aligned_height, self.aligned_width, self.spatial_scale, features, rois, output)
return output
def backward(self, grad_output):
assert ((self.feature_size is not None) and grad_output.is_cuda)
(batch_size, num_channels, data_height, data_width) = self.feature_size
grad_input = self.rois.new(batch_size, num_channels, data_height, data_width).zero_()
roi_align.roi_align_backward_cuda(self.aligned_height, self.aligned_width, self.spatial_scale, grad_output, self.rois, grad_input)
return (grad_input, None) |
class StandardRibbonShapedTableaux_shape(StandardRibbonShapedTableaux):
def __classcall_private__(cls, shape):
return super(StandardRibbonShapedTableaux, cls).__classcall__(cls, tuple(shape))
def __init__(self, shape):
self.shape = shape
StandardRibbonShapedTableaux.__init__(self, FiniteEnumeratedSets())
def _repr_(self):
return ('Standard ribbon shaped tableaux of shape %s' % list(self.shape))
def first(self):
return self.from_permutation(descents_composition_first(self.shape))
def last(self):
return self.from_permutation(descents_composition_last(self.shape))
def __iter__(self):
for p in descents_composition_list(self.shape):
(yield self.from_permutation(p)) |
def dummy_diff(*args):
f = args[0]
args = list(args[1:])
for i in range(1, len(args), 2):
args[i] = Integer(args[i])
return f.diff(*args) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.