code stringlengths 101 5.91M |
|---|
class ModelBuffer():
def __init__(self, buffer_size):
self.data = None
self.buffer_size = int(buffer_size)
def put(self, batch_data):
batch_data.to_torch(device='cpu')
if (self.data is None):
self.data = batch_data
else:
self.data.cat_(batch_data)
if (len(self) > self.buffer_size):
self.data = self.data[(len(self) - self.buffer_size):]
def __len__(self):
if (self.data is None):
return 0
return self.data.shape[0]
def sample(self, batch_size):
indexes = np.random.randint(0, len(self), size=batch_size)
return self.data[indexes] |
class OutputLogger(Logger):
def __init__(self):
super(OutputLogger, self).__init__()
self.stats['tensor_val'] = None
def forward(self, x):
if (self.stats['tensor_val'] is None):
self.stats['tensor_val'] = x
else:
self.stats['tensor_val'] = torch.cat((self.stats['tensor_val'], x))
return x |
def test_outer_iterations_max_constrained():
def fg(x):
n = len(x)
c = np.arange(n)
f = (x.dot(x) + c.dot(x))
g = ((2 * x) + c)
return (f, g)
def constraint_f(x):
f = (np.sum(x) - 1)
return f
def constraint_jac_prod(x, y):
g = np.ones_like(x)
jp = (y * g)
return jp
constraints = {'type': 'eq', 'fun': constraint_f, 'jacprod': constraint_jac_prod}
options = {'eps_pg': 0.0001, 'constraint_tol': 0.0001, 'max_iter_outer': 1, 'm': 10, 'ls': 0, 'verbose': 0}
n = 4
x0 = np.zeros(n)
res = minimize(fg, x0, constraints=constraints, options=options, np=np)
assert (res.status == 2) |
def load_datasets(name: str) -> Tuple[(CVDataset, CVDataset, CVDataset)]:
if (name == 'omniglot'):
return (paddlefsl.datasets.Omniglot(mode='train', image_size=(28, 28)), paddlefsl.datasets.Omniglot(mode='valid', image_size=(28, 28)), paddlefsl.datasets.Omniglot(mode='test', image_size=(28, 28)))
if (name == 'miniimagenet'):
return (paddlefsl.datasets.MiniImageNet(mode='train'), paddlefsl.datasets.MiniImageNet(mode='valid'), paddlefsl.datasets.MiniImageNet(mode='test'))
if (name == 'cifarfs'):
return (paddlefsl.datasets.CifarFS(mode='train', image_size=(28, 28)), paddlefsl.datasets.CifarFS(mode='valid', image_size=(28, 28)), paddlefsl.datasets.CifarFS(mode='test', image_size=(28, 28)))
if (name == 'fc100'):
return (paddlefsl.datasets.FC100(mode='train'), paddlefsl.datasets.FC100(mode='valid'), paddlefsl.datasets.FC100(mode='test'))
if (name == 'cub'):
return (paddlefsl.datasets.CubFS(mode='train'), paddlefsl.datasets.CubFS(mode='valid'), paddlefsl.datasets.CubFS(mode='test'))
raise ValueError(f'the dataset name: <{name}> is not supported') |
def _logmap0(y, c):
sqrt_c = (c ** 0.5)
y_norm = torch.clamp_min(y.norm(dim=(- 1), p=2, keepdim=True), 1e-05)
return (((y / y_norm) / sqrt_c) * artanh((sqrt_c * y_norm))) |
class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
def __init__(self, *args, **kwargs):
kwargs['max_help_position'] = 30
kwargs['indent_increment'] = 1
kwargs['width'] = (get_terminal_size()[0] - 2)
optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
def format_option_strings(self, option):
return self._format_option_strings(option, ' <%s>', ', ')
def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '):
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if (len(opts) > 1):
opts.insert(1, optsep)
if option.takes_value():
metavar = (option.metavar or option.dest.lower())
opts.append((mvarfmt % metavar.lower()))
return ''.join(opts)
def format_heading(self, heading):
if (heading == 'Options'):
return ''
return (heading + ':\n')
def format_usage(self, usage):
msg = ('\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), ' '))
return msg
def format_description(self, description):
if description:
if hasattr(self.parser, 'main'):
label = 'Commands'
else:
label = 'Description'
description = description.lstrip('\n')
description = description.rstrip()
description = self.indent_lines(textwrap.dedent(description), ' ')
description = ('%s:\n%s\n' % (label, description))
return description
else:
return ''
def format_epilog(self, epilog):
if epilog:
return epilog
else:
return ''
def indent_lines(self, text, indent):
new_lines = [(indent + line) for line in text.split('\n')]
return '\n'.join(new_lines) |
def test_sym_sym():
tmp = np.zeros((M.get(), N.get()), dtype=np.int64)
A = sym_sym(tmp)
assert (A[0] == (M.get() + N.get())) |
def main(args):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
n_gpu = torch.cuda.device_count()
logger.info('device: {}, n_gpu: {}, 16-bits training: {}'.format(device, n_gpu, args.fp16))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (n_gpu > 0):
torch.cuda.manual_seed_all(args.seed)
if (args.gradient_accumulation_steps < 1):
raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps))
args.train_batch_size = (args.train_batch_size // args.gradient_accumulation_steps)
if ((not args.do_train) and (not args.do_eval)):
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
if args.do_train:
assert ((args.train_file is not None) and (args.dev_file is not None))
if args.eval_test:
assert (args.test_file is not None)
else:
assert (args.dev_file is not None)
if (not os.path.exists(args.output_dir)):
os.makedirs(args.output_dir)
if args.do_train:
logger.addHandler(logging.FileHandler(os.path.join(args.output_dir, 'train.log'), 'w'))
else:
logger.addHandler(logging.FileHandler(os.path.join(args.output_dir, 'eval.log'), 'w'))
logger.info(args)
tokenizer = BertTokenizer.from_pretrained(args.model, do_lower_case=args.do_lower_case)
if (args.do_train or (not args.eval_test)):
with open(args.dev_file) as f:
dataset_json = json.load(f)
eval_dataset = dataset_json['data']
eval_examples = read_squad_examples(input_file=args.dev_file, is_training=False, version_2_with_negative=args.version_2_with_negative)
eval_features = convert_examples_to_features(examples=eval_examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=False)
logger.info('***** Dev *****')
logger.info(' Num orig examples = %d', len(eval_examples))
logger.info(' Num split examples = %d', len(eval_features))
logger.info(' Batch size = %d', args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
eval_dataloader = DataLoader(eval_data, batch_size=args.eval_batch_size)
if args.do_train:
train_examples = read_squad_examples(input_file=args.train_file, is_training=True, version_2_with_negative=args.version_2_with_negative)
train_features = convert_examples_to_features(examples=train_examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=True)
if ((args.train_mode == 'sorted') or (args.train_mode == 'random_sorted')):
train_features = sorted(train_features, key=(lambda f: np.sum(f.input_mask)))
else:
random.shuffle(train_features)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions)
train_dataloader = DataLoader(train_data, batch_size=args.train_batch_size)
train_batches = [batch for batch in train_dataloader]
num_train_optimization_steps = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
logger.info('***** Train *****')
logger.info(' Num orig examples = %d', len(train_examples))
logger.info(' Num split examples = %d', len(train_features))
logger.info(' Batch size = %d', args.train_batch_size)
logger.info(' Num steps = %d', num_train_optimization_steps)
eval_step = max(1, (len(train_batches) // args.eval_per_epoch))
best_result = None
lrs = ([args.learning_rate] if args.learning_rate else [1e-06, 2e-06, 3e-06, 5e-06, 1e-05, 2e-05, 3e-05, 5e-05])
for lr in lrs:
model = BertForQuestionAnswering.from_pretrained(args.model, cache_dir=PYTORCH_PRETRAINED_BERT_CACHE)
if args.fp16:
model.half()
model.to(device)
if (n_gpu > 1):
model = torch.nn.DataParallel(model)
param_optimizer = list(model.named_parameters())
param_optimizer = [n for n in param_optimizer if ('pooler' not in n[0])]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError('Please install apex from use distributed and fp16 training.')
optimizer = FusedAdam(optimizer_grouped_parameters, lr=lr, bias_correction=False, max_grad_norm=1.0)
if (args.loss_scale == 0):
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters, lr=lr, warmup=args.warmup_proportion, t_total=num_train_optimization_steps)
tr_loss = 0
nb_tr_examples = 0
nb_tr_steps = 0
global_step = 0
start_time = time.time()
for epoch in range(int(args.num_train_epochs)):
model.train()
logger.info('Start epoch #{} (lr = {})...'.format(epoch, lr))
if ((args.train_mode == 'random') or (args.train_mode == 'random_sorted')):
random.shuffle(train_batches)
for (step, batch) in enumerate(train_batches):
if (n_gpu == 1):
batch = tuple((t.to(device) for t in batch))
(input_ids, input_mask, segment_ids, start_positions, end_positions) = batch
loss = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
if (n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
lr_this_step = (lr * warmup_linear((global_step / num_train_optimization_steps), args.warmup_proportion))
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if (((step + 1) % eval_step) == 0):
logger.info('Epoch: {}, Step: {} / {}, used_time = {:.2f}s, loss = {:.6f}'.format(epoch, (step + 1), len(train_batches), (time.time() - start_time), (tr_loss / nb_tr_steps)))
save_model = False
if args.do_eval:
(result, _, _) = evaluate(args, model, device, eval_dataset, eval_dataloader, eval_examples, eval_features)
model.train()
result['global_step'] = global_step
result['epoch'] = epoch
result['learning_rate'] = lr
result['batch_size'] = args.train_batch_size
if ((best_result is None) or (result[args.eval_metric] > best_result[args.eval_metric])):
best_result = result
save_model = True
logger.info(('!!! Best dev %s (lr=%s, epoch=%d): %.2f' % (args.eval_metric, str(lr), epoch, result[args.eval_metric])))
else:
save_model = True
if save_model:
model_to_save = (model.module if hasattr(model, 'module') else model)
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
if best_result:
with open(os.path.join(args.output_dir, 'eval_results.txt'), 'w') as writer:
for key in sorted(best_result.keys()):
writer.write(('%s = %s\n' % (key, str(best_result[key]))))
if args.do_eval:
if args.eval_test:
with open(args.test_file) as f:
dataset_json = json.load(f)
eval_dataset = dataset_json['data']
eval_examples = read_squad_examples(input_file=args.test_file, is_training=False, version_2_with_negative=args.version_2_with_negative)
eval_features = convert_examples_to_features(examples=eval_examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=False)
logger.info('***** Test *****')
logger.info(' Num orig examples = %d', len(eval_examples))
logger.info(' Num split examples = %d', len(eval_features))
logger.info(' Batch size = %d', args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
eval_dataloader = DataLoader(eval_data, batch_size=args.eval_batch_size)
model = BertForQuestionAnswering.from_pretrained(args.output_dir)
if args.fp16:
model.half()
model.to(device)
na_prob_thresh = 1.0
if args.version_2_with_negative:
eval_result_file = os.path.join(args.output_dir, 'eval_results.txt')
if os.path.isfile(eval_result_file):
with open(eval_result_file) as f:
for line in f.readlines():
if line.startswith('best_f1_thresh'):
na_prob_thresh = float(line.strip().split()[(- 1)])
logger.info(('na_prob_thresh = %.6f' % na_prob_thresh))
(result, preds, _) = evaluate(args, model, device, eval_dataset, eval_dataloader, eval_examples, eval_features, na_prob_thresh=na_prob_thresh, pred_only=args.eval_test)
with open(os.path.join(args.output_dir, 'predictions.json'), 'w') as writer:
writer.write((json.dumps(preds, indent=4) + '\n')) |
def test_issue_334():
a = ak.highlevel.Array([1, 2, 3, 4])
b = ak.highlevel.Array([(- 1)])
c = ak.highlevel.Array([True, False, True, True])
assert (ak.operations.where(c, a, b).to_list() == [1, (- 1), 3, 4])
assert (ak.operations.where(*ak.operations.broadcast_arrays(c, a, b)).to_list() == [1, (- 1), 3, 4])
assert (ak.operations.where(c, a, (- 1)).to_list() == [1, (- 1), 3, 4])
assert (ak.operations.where(*ak.operations.broadcast_arrays(c, a, (- 1))).to_list() == [1, (- 1), 3, 4]) |
class FunctionField_polymod(FunctionField):
Element = FunctionFieldElement_polymod
def __init__(self, polynomial, names, category=None):
from sage.rings.polynomial.polynomial_element import Polynomial
if ((polynomial.parent().ngens() > 1) or (not isinstance(polynomial, Polynomial))):
raise TypeError('polynomial must be univariate a polynomial')
if (names is None):
names = (polynomial.variable_name(),)
elif (names != polynomial.variable_name()):
polynomial = polynomial.change_variable_name(names)
if (polynomial.degree() <= 0):
raise ValueError('polynomial must have positive degree')
base_field = polynomial.base_ring()
if (not isinstance(base_field, FunctionField)):
raise TypeError('polynomial must be over a FunctionField')
self._base_field = base_field
self._polynomial = polynomial
FunctionField.__init__(self, base_field, names=names, category=FunctionFields().or_subcategory(category))
from .place_polymod import FunctionFieldPlace_polymod
self._place_class = FunctionFieldPlace_polymod
self._hash = hash(polynomial)
self._ring = self._polynomial.parent()
self._populate_coercion_lists_(coerce_list=[base_field, self._ring])
self._gen = self(self._ring.gen())
def __hash__(self):
return self._hash
def _element_constructor_(self, x):
if isinstance(x, FunctionFieldElement):
return self.element_class(self, self._ring(x.element()))
return self.element_class(self, self._ring(x))
def gen(self, n=0):
if (n != 0):
raise IndexError('there is only one generator')
return self._gen
def ngens(self):
return 1
def _to_base_field(self, f):
K = self.base_field()
if f.element().is_constant():
return K(f.element())
raise ValueError(('%r is not an element of the base field' % (f,)))
def _to_constant_base_field(self, f):
return self.base_field()._to_constant_base_field(self._to_base_field(f))
def monic_integral_model(self, names=None):
if names:
if (not isinstance(names, tuple)):
names = (names,)
if (len(names) > 2):
raise ValueError('names must contain at most 2 entries')
if (self.base_field() is not self.rational_function_field()):
(L, from_L, to_L) = self.simple_model()
(ret, ret_to_L, L_to_ret) = L.monic_integral_model(names)
from_ret = ret.hom([from_L(ret_to_L(ret.gen())), from_L(ret_to_L(ret.base_field().gen()))])
to_ret = self.hom([L_to_ret(to_L(k.gen())) for k in self._intermediate_fields(self.rational_function_field())])
return (ret, from_ret, to_ret)
elif (self.polynomial().is_monic() and all((c.denominator().is_one() for c in self.polynomial()))):
if ((names is None) or (names == ())):
names = (self.variable_name(),)
return self.change_variable_name(names)
else:
if (not names):
names = ((self.variable_name() + '_'),)
if (len(names) == 1):
names = (names[0], self.rational_function_field().variable_name())
(g, d) = self._make_monic_integral(self.polynomial())
(K, from_K, to_K) = self.base_field().change_variable_name(names[1])
g = g.map_coefficients(to_K)
ret = K.extension(g, names=names[0])
from_ret = ret.hom([(self.gen() * d), self.base_field().gen()])
to_ret = self.hom([(ret.gen() / d), ret.base_field().gen()])
return (ret, from_ret, to_ret)
def _make_monic_integral(self, f):
R = f.base_ring()
if (not isinstance(R, RationalFunctionField)):
raise NotImplementedError
n = f.degree()
c = f.leading_coefficient()
if (c != 1):
f = (f / c)
d = lcm([b.denominator() for b in f.list() if b])
if (d != 1):
x = f.parent().gen()
g = ((d ** n) * f((x / d)))
else:
g = f
return (g, d)
def constant_field(self):
raise NotImplementedError
def constant_base_field(self):
return self.base_field().constant_base_field()
_method(key=(lambda self, base: (self.base_field() if (base is None) else base)))
def degree(self, base=None):
if (base is None):
base = self.base_field()
if (base is self):
from sage.rings.integer_ring import ZZ
return ZZ(1)
return (self._polynomial.degree() * self.base_field().degree(base))
def _repr_(self):
return ('Function field in %s defined by %s' % (self.variable_name(), self._polynomial))
def base_field(self):
return self._base_field
def random_element(self, *args, **kwds):
return self(self._ring.random_element(*args, degree=self.degree(), **kwds))
def polynomial(self):
return self._polynomial
def is_separable(self, base=None):
if (base is None):
base = self.base_field()
for k in self._intermediate_fields(base)[:(- 1)]:
f = k.polynomial()
g = f.derivative()
if (f.gcd(g).degree() != 0):
return False
return True
def polynomial_ring(self):
return self._ring
_method(key=(lambda self, base, basis, map: ((self.base_field() if (base is None) else base), basis, map)))
def free_module(self, base=None, basis=None, map=True):
if (basis is not None):
raise NotImplementedError
from .maps import MapVectorSpaceToFunctionField, MapFunctionFieldToVectorSpace
if (base is None):
base = self.base_field()
degree = self.degree(base)
V = (base ** degree)
if (not map):
return V
from_V = MapVectorSpaceToFunctionField(V, self)
to_V = MapFunctionFieldToVectorSpace(self, V)
return (V, from_V, to_V)
def maximal_order(self):
from .order_polymod import FunctionFieldMaximalOrder_polymod
return FunctionFieldMaximalOrder_polymod(self)
def maximal_order_infinite(self):
from .order_polymod import FunctionFieldMaximalOrderInfinite_polymod
return FunctionFieldMaximalOrderInfinite_polymod(self)
def different(self):
O = self.maximal_order()
Oinf = self.maximal_order_infinite()
return (O.different().divisor() + Oinf.different().divisor())
def equation_order(self):
d = self._make_monic_integral(self.polynomial())[1]
return self.order((d * self.gen()), check=False)
def hom(self, im_gens, base_morphism=None):
if (not isinstance(im_gens, (list, tuple))):
im_gens = [im_gens]
if (len(im_gens) == 0):
raise ValueError('no images specified')
if (len(im_gens) > 1):
base_morphism = self.base_field().hom(im_gens[1:], base_morphism)
codomain = im_gens[0].parent()
if (base_morphism is not None):
from sage.categories.pushout import pushout
codomain = pushout(codomain, base_morphism.codomain())
from .maps import FunctionFieldMorphism_polymod
return FunctionFieldMorphism_polymod(self.Hom(codomain), im_gens[0], base_morphism)
_method
def genus(self):
if (isinstance(self._base_field, RationalFunctionField) and self._base_field.constant_field().is_prime_field()):
from sage.interfaces.singular import singular
tmpAuxRing = PolynomialRing(self._base_field.constant_field(), ((str(self._base_field.gen()) + ',') + str(self._ring.gen())))
(intMinPoly, d) = self._make_monic_integral(self._polynomial)
curveIdeal = tmpAuxRing.ideal(intMinPoly)
singular.lib('normal.lib')
return int(curveIdeal._singular_().genus())
else:
raise NotImplementedError('computation of genus over non-prime constant fields not implemented yet')
def _simple_model(self, name='v'):
M = self
L = M.base_field()
K = L.base_field()
assert isinstance(K, RationalFunctionField)
assert (K is not L)
assert (L is not M)
if (not K.constant_field().is_perfect()):
raise NotImplementedError('simple_model() only implemented over perfect constant fields')
x = K.gen()
b = L.gen()
a = M.gen()
factor = self.constant_base_field().zero()
exponent = 0
while True:
v = M((a + ((b * factor) * (x ** exponent))))
minpoly = v.matrix(K).minpoly()
if (minpoly.degree() == (M.degree() * L.degree())):
break
factor += 1
if (factor == 0):
factor = self.constant_base_field().one()
exponent += 1
N = K.extension(minpoly, names=(name,))
N_to_M = N.hom(v)
(V, V_to_M, M_to_V) = M.free_module(K)
(V, V_to_N, N_to_V) = N.free_module(K)
from sage.matrix.matrix_space import MatrixSpace
MS = MatrixSpace(V.base_field(), V.dimension())
B = [M_to_V((v ** i)) for i in range(V.dimension())]
B = MS(B)
M_b = V_to_N(B.solve_left(M_to_V(b)))
M_a = V_to_N(B.solve_left(M_to_V(a)))
M_to_N = M.hom([M_a, M_b])
return (N, N_to_M, M_to_N)
_method
def simple_model(self, name=None):
if (name is None):
name = self.variable_name()
if isinstance(self.base_field(), RationalFunctionField):
if (name == self.variable_name()):
id = Hom(self, self).identity()
return (self, id, id)
else:
ret = self.base_field().extension(self.polynomial(), names=(name,))
f = ret.hom(self.gen())
t = self.hom(ret.gen())
return (ret, f, t)
else:
base = self.base_field()
(base_, from_base_, to_base_) = base.simple_model()
self_ = base_.extension(self.polynomial().map_coefficients(to_base_), names=(name,))
gens_in_base_ = [to_base_(k.gen()) for k in base._intermediate_fields(base.rational_function_field())]
to_self_ = self.hom(([self_.gen()] + gens_in_base_))
from_self_ = self_.hom([self.gen(), from_base_(base_.gen())])
(ret, ret_to_self_, self__to_ret) = self_._simple_model(name)
ret_to_self = ret.hom(from_self_(ret_to_self_(ret.gen())))
gens_in_ret = [self__to_ret(to_self_(k.gen())) for k in self._intermediate_fields(self.rational_function_field())]
self_to_ret = self.hom(gens_in_ret)
return (ret, ret_to_self, self_to_ret)
_method
def primitive_element(self):
(N, f, t) = self.simple_model()
return f(N.gen())
_method
def separable_model(self, names=None):
if (names is None):
pass
elif (not isinstance(names, tuple)):
raise TypeError('names must be a tuple consisting of two strings')
elif (len(names) != 2):
raise ValueError('must provide exactly two variable names')
if (self.base_ring() is not self.rational_function_field()):
(L, from_L, to_L) = self.simple_model()
(K, from_K, to_K) = L.separable_model(names=names)
f = K.hom([from_L(from_K(K.gen())), from_L(from_K(K.base_field().gen()))])
t = self.hom([to_K(to_L(k.gen())) for k in self._intermediate_fields(self.rational_function_field())])
return (K, f, t)
if self.polynomial().gcd(self.polynomial().derivative()).is_one():
if (names is None):
names = (self.variable_name(), self.base_field().variable_name())
return self.change_variable_name(names)
if (not self.constant_base_field().is_perfect()):
raise NotImplementedError('constructing a separable model is only implemented for function fields over a perfect constant base field')
if (names is None):
names = ((self.variable_name() + '_'), (self.rational_function_field().variable_name() + '_'))
(L, from_L, to_L) = self.monic_integral_model()
if L.polynomial().gcd(L.polynomial().derivative()).is_one():
(ret, ret_to_L, L_to_ret) = L.change_variable_name(names)
f = ret.hom([from_L(ret_to_L(ret.gen())), from_L(ret_to_L(ret.base_field().gen()))])
t = self.hom([L_to_ret(to_L(self.gen())), L_to_ret(to_L(self.base_field().gen()))])
return (ret, f, t)
else:
from .constructor import FunctionField
K = FunctionField(self.constant_base_field(), names=(names[1],))
if (names[0] == names[1]):
raise ValueError('names of generators must be distinct')
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
R = PolynomialRing(self.constant_base_field(), names=names)
S = R.remove_var(names[1])
f = R(L.polynomial().change_variable_name(names[1]).map_coefficients((lambda c: c.numerator().change_variable_name(names[0])), S))
f = f.polynomial(R.gen(0)).change_ring(K)
f /= f.leading_coefficient()
assert f.gcd(f.derivative()).is_one()
ret = K.extension(f, names=(names[0],))
ret_to_L = ret.hom([L(L.base_field().gen()), L.gen()])
L_to_ret = L.hom([ret(K.gen()), ret.gen()])
f = ret.hom([from_L(ret_to_L(ret.gen())), from_L(ret_to_L(ret.base_field().gen()))])
t = self.hom([L_to_ret(to_L(self.gen())), L_to_ret(to_L(self.base_field().gen()))])
return (ret, f, t)
def change_variable_name(self, name):
if (not isinstance(name, tuple)):
name = (name,)
if (len(name) == 0):
raise ValueError('name must contain at least one string')
elif (len(name) == 1):
base = self.base_field()
from_base = to_base = Hom(base, base).identity()
else:
(base, from_base, to_base) = self.base_field().change_variable_name(name[1:])
ret = base.extension(self.polynomial().map_coefficients(to_base), names=(name[0],))
f = ret.hom([k.gen() for k in self._intermediate_fields(self.rational_function_field())])
t = self.hom([k.gen() for k in ret._intermediate_fields(ret.rational_function_field())])
return (ret, f, t) |
class PolynomialCameraCal(object):
__slots__ = ['data']
if T.TYPE_CHECKING:
data = []
def __init__(self, focal_length, principal_point, critical_undistorted_radius, distortion_coeffs):
self.data = []
if isinstance(focal_length, numpy.ndarray):
if (focal_length.shape in [(2, 1), (1, 2)]):
focal_length = focal_length.flatten()
elif (focal_length.shape != (2,)):
raise IndexError('Expected focal_length to be a vector of length 2; instead had shape {}'.format(focal_length.shape))
elif (len(focal_length) != 2):
raise IndexError('Expected focal_length to be a sequence of length 2, was instead length {}.'.format(len(focal_length)))
if isinstance(principal_point, numpy.ndarray):
if (principal_point.shape in [(2, 1), (1, 2)]):
principal_point = principal_point.flatten()
elif (principal_point.shape != (2,)):
raise IndexError('Expected principal_point to be a vector of length 2; instead had shape {}'.format(principal_point.shape))
elif (len(principal_point) != 2):
raise IndexError('Expected principal_point to be a sequence of length 2, was instead length {}.'.format(len(principal_point)))
if isinstance(distortion_coeffs, numpy.ndarray):
if (distortion_coeffs.shape in [(3, 1), (1, 3)]):
distortion_coeffs = distortion_coeffs.flatten()
elif (distortion_coeffs.shape != (3,)):
raise IndexError('Expected distortion_coeffs to be a vector of length 3; instead had shape {}'.format(distortion_coeffs.shape))
elif (len(distortion_coeffs) != 3):
raise IndexError('Expected distortion_coeffs to be a sequence of length 3, was instead length {}.'.format(len(distortion_coeffs)))
self.data.extend(focal_length)
self.data.extend(principal_point)
self.data.append(critical_undistorted_radius)
self.data.extend(distortion_coeffs)
def __repr__(self):
return '<{} {}>'.format(self.__class__.__name__, self.data)
def focal_length(self):
return ops.CameraOps.focal_length(self)
def principal_point(self):
return ops.CameraOps.principal_point(self)
def pixel_from_camera_point(self, point, epsilon):
return ops.CameraOps.pixel_from_camera_point(self, point, epsilon)
def pixel_from_camera_point_with_jacobians(self, point, epsilon):
return ops.CameraOps.pixel_from_camera_point_with_jacobians(self, point, epsilon)
def storage_dim():
return 8
def to_storage(self):
return list(self.data)
def from_storage(cls, vec):
instance = cls.__new__(cls)
if isinstance(vec, list):
instance.data = vec
else:
instance.data = list(vec)
if (len(vec) != cls.storage_dim()):
raise ValueError('{} has storage dim {}, got {}.'.format(cls.__name__, cls.storage_dim(), len(vec)))
return instance
def tangent_dim():
return 8
def from_tangent(cls, vec, epsilon=1e-08):
if (len(vec) != cls.tangent_dim()):
raise ValueError('Vector dimension ({}) not equal to tangent space dimension ({}).'.format(len(vec), cls.tangent_dim()))
return ops.LieGroupOps.from_tangent(vec, epsilon)
def to_tangent(self, epsilon=1e-08):
return ops.LieGroupOps.to_tangent(self, epsilon)
def retract(self, vec, epsilon=1e-08):
if (len(vec) != self.tangent_dim()):
raise ValueError('Vector dimension ({}) not equal to tangent space dimension ({}).'.format(len(vec), self.tangent_dim()))
return ops.LieGroupOps.retract(self, vec, epsilon)
def local_coordinates(self, b, epsilon=1e-08):
return ops.LieGroupOps.local_coordinates(self, b, epsilon)
def interpolate(self, b, alpha, epsilon=1e-08):
return ops.LieGroupOps.interpolate(self, b, alpha, epsilon)
def __eq__(self, other):
if isinstance(other, PolynomialCameraCal):
return (self.data == other.data)
else:
return False |
_node_type()
class DiffEpsilon(optplan.Function):
type = schema_utils.polymorphic_model_type('function.diff_epsilon')
epsilon = optplan.ReferenceType(optplan.Function)
epsilon_ref = types.PolyModelType(EpsilonSpec) |
def SymmetricGroupRepresentation(partition, implementation='specht', ring=None, cache_matrices=True):
partition = Partition(partition)
Rep = SymmetricGroupRepresentations(sum(partition), implementation=implementation, ring=ring, cache_matrices=cache_matrices)
return Rep(partition) |
class BiGRU(nn.Module):
def __init__(self, inputdim, outputdim, bidirectional=True, **kwargs):
nn.Module.__init__(self)
self.rnn = nn.GRU(inputdim, outputdim, bidirectional=bidirectional, batch_first=True, **kwargs)
def forward(self, x, hid=None):
(x, hid) = self.rnn(x)
return (x, (hid,)) |
class OperatorsSet(OperatorsSetBase):
def __init__(self, name: str, qc_options: QuantizationConfigOptions=None):
super().__init__(name)
self.qc_options = qc_options
is_fusing_set = (qc_options is None)
self.is_default = ((_current_tp_model.get().default_qco == self.qc_options) or is_fusing_set)
def get_info(self) -> Dict[(str, Any)]:
return {'name': self.name, 'is_default_qc': self.is_default} |
def test_asarray_with_order_ignored():
xp = pytest.importorskip('numpy.array_api')
xp_ = _AdjustableNameAPITestWrapper(xp, 'wrapped.array_api')
X = numpy.asarray([[1.2, 3.4, 5.1], [3.4, 5.5, 1.2]], order='C')
X = xp_.asarray(X)
X_new = _asarray_with_order(X, order='F', xp=xp_)
X_new_np = numpy.asarray(X_new)
assert X_new_np.flags['C_CONTIGUOUS']
assert (not X_new_np.flags['F_CONTIGUOUS']) |
def get_image_net_datasets(train_transform, test_transform, train_classes=range(1000), open_set_classes=range(1000), num_open_set_classes=1000, balance_open_set_eval=False, split_train_val=True, seed=0, osr_split='random'):
np.random.seed(seed)
print('No validation split option for ImageNet dataset...')
print('ImageNet datasets use hardcoded OSR splits...')
print('Loading ImageNet Train...')
train_dataset_whole = ImageNetBase(root=os.path.join(imagenet_root, 'train'), transform=train_transform)
print('Loading ImageNet Val...')
test_dataset_known = ImageNetBase(root=os.path.join(imagenet_root, 'val'), transform=test_transform)
print('Loading ImageNet21K Val...')
test_dataset_unknown = ImageNetBase(root=os.path.join(imagenet21k_root, 'val'), transform=test_transform)
open_set_classes = get_imagenet_osr_class_splits(imagenet21k_dataset=test_dataset_unknown, osr_split=osr_split)
test_dataset_unknown = subsample_classes(test_dataset_unknown, include_classes=open_set_classes)
if balance_open_set_eval:
(test_dataset_known, test_dataset_unknown) = get_equal_len_datasets(test_dataset_known, test_dataset_unknown)
all_datasets = {'train': train_dataset_whole, 'val': test_dataset_known, 'test_known': test_dataset_known, 'test_unknown': test_dataset_unknown}
return all_datasets |
def encode(batch, tokenizer, nlp):
if (nlp is not None):
tokenized_texts = tokenize_with_spacy(batch['text'], nlp)
else:
tokenized_texts = batch
tokenized_texts['offset_mapping'] = [list(zip(range(len(tokens)), range(1, (1 + len(tokens))))) for tokens in tokenized_texts['tokens']]
encoded_batch = tokenizer(tokenized_texts['tokens'], add_special_tokens=True, is_split_into_words=True, return_length=True, return_attention_mask=False)
return {'tokens': tokenized_texts['tokens'], 'input_ids': encoded_batch['input_ids'], 'length': encoded_batch['length'], 'subtoken_map': [enc.word_ids for enc in encoded_batch.encodings], 'new_token_map': [list(range(len(tokens))) for tokens in tokenized_texts['tokens']], 'offset_mapping': tokenized_texts['offset_mapping']} |
def main(args):
cfg = setup(args)
logger.info(f'Used CDPN module name: {cfg.MODEL.CDPN.NAME}')
(model, optimizer) = eval(cfg.MODEL.CDPN.NAME).build_model_optimizer(cfg)
logger.info('Model:\n{}'.format(model))
if args.eval_only:
MyCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
return do_test(cfg, model)
distributed = (comm.get_world_size() > 1)
if (distributed and (not args.use_hvd)):
model = DistributedDataParallel(model, device_ids=[comm.get_local_rank()], broadcast_buffers=False, find_unused_parameters=True)
do_train(cfg, args, model, optimizer, resume=args.resume)
return do_test(cfg, model) |
def test_kernel_ridge_precomputed():
for kernel in ['linear', 'rbf', 'poly', 'cosine']:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel='precomputed').fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2) |
def mupad_console():
from sage.repl.rich_output.display_manager import get_display_manager
if (not get_display_manager().is_in_terminal()):
raise RuntimeError('Can use the console only in the terminal. Try %%mupad magics instead.')
os.system('mupkern') |
def forward_step(*, model: Model, extern_data: TensorDict, **_kwargs) -> Tuple[(Tensor, Tensor, Dim, Dim)]:
data = extern_data[extern_data_inputs_name]
batch_dims = data.remaining_dims((data_spatial_dim, data.feature_dim))
(enc_args, enc_spatial_dim) = model.encode(data, in_spatial_dim=data_spatial_dim)
beam_size = 12
length_normalization_exponent = 1.0
max_seq_len = enc_spatial_dim.get_size_tensor()
print('** max seq len:', max_seq_len.raw_tensor)
beam_dim = Dim(1, name='initial-beam')
batch_dims_ = ([beam_dim] + batch_dims)
decoder_state = model.decoder_default_initial_state(batch_dims=batch_dims_, enc_spatial_dim=enc_spatial_dim)
target = rf.constant(model.bos_idx, dims=batch_dims_, sparse_dim=model.target_dim)
ended = rf.constant(False, dims=batch_dims_)
out_seq_len = rf.constant(0, dims=batch_dims_)
seq_log_prob = rf.constant(0.0, dims=batch_dims_)
i = 0
seq_targets = []
seq_backrefs = []
while True:
input_embed = model.target_embed(target)
(step_out, decoder_state) = model.loop_step(**enc_args, enc_spatial_dim=enc_spatial_dim, input_embed=input_embed, state=decoder_state)
logits = model.decode_logits(input_embed=input_embed, **step_out)
label_log_prob = rf.log_softmax(logits, axis=model.target_dim)
label_log_prob = rf.where(ended, rf.sparse_to_dense(model.eos_idx, axis=model.target_dim, label_value=0.0, other_value=(- 1e+30)), label_log_prob)
seq_log_prob = (seq_log_prob + label_log_prob)
(seq_log_prob, (backrefs, target), beam_dim) = rf.top_k(seq_log_prob, k_dim=Dim(beam_size, name=f'dec-step{i}-beam'), axis=[beam_dim, model.target_dim])
seq_targets.append(target)
seq_backrefs.append(backrefs)
decoder_state = tree.map_structure((lambda s: rf.gather(s, indices=backrefs)), decoder_state)
ended = rf.gather(ended, indices=backrefs)
out_seq_len = rf.gather(out_seq_len, indices=backrefs)
i += 1
ended = rf.logical_or(ended, (target == model.eos_idx))
ended = rf.logical_or(ended, rf.copy_to_device((i >= max_seq_len)))
if bool(rf.reduce_all(ended, axis=ended.dims).raw_tensor):
break
out_seq_len = (out_seq_len + rf.where(ended, 0, 1))
if ((i > 1) and (length_normalization_exponent != 0)):
seq_log_prob *= rf.where(ended, ((i / (i - 1)) ** length_normalization_exponent), 1.0)
if ((i > 0) and (length_normalization_exponent != 0)):
seq_log_prob *= ((1 / i) ** length_normalization_exponent)
seq_targets_ = []
indices = rf.range_over_dim(beam_dim)
for (backrefs, target) in zip(seq_backrefs[::(- 1)], seq_targets[::(- 1)]):
seq_targets_.insert(0, rf.gather(target, indices=indices))
indices = rf.gather(backrefs, indices=indices)
seq_targets__ = TensorArray(seq_targets_[0])
for target in seq_targets_:
seq_targets__ = seq_targets__.push_back(target)
out_spatial_dim = Dim(out_seq_len, name='out-spatial')
seq_targets = seq_targets__.stack(axis=out_spatial_dim)
return (seq_targets, seq_log_prob, out_spatial_dim, beam_dim) |
def zero_pad_collator(batch) -> Union[(Dict[(str, torch.Tensor)], Tuple[torch.Tensor])]:
datum = batch[0]
if isinstance(datum, str):
return batch
if isinstance(datum, tuple):
return tuple((collate_tensors([b[i] for b in batch]) for i in range(len(datum))))
keys = datum.keys()
return {k: collate_tensors([b[k] for b in batch]) for k in keys} |
def generate_all_entities(facts_arr):
entities = []
for triple in facts_arr:
(subject, object) = (triple[0], triple[2])
if (subject not in entities):
entities.append(subject)
if (object not in entities):
entities.append(object)
return entities |
class Grammar(object):
def __init__(self, rules):
self.rules = rules
self.rule_index = defaultdict(list)
self.rule_to_id = OrderedDict()
node_types = set()
lhs_nodes = set()
rhs_nodes = set()
for rule in self.rules:
self.rule_index[rule.parent].append(rule)
for node in rule.nodes:
node_types.add(typename(node.type))
lhs_nodes.add(rule.parent)
for child in rule.children:
rhs_nodes.add(child.as_type_node)
root_node = (lhs_nodes - rhs_nodes)
assert (len(root_node) == 1)
self.root_node = next(iter(root_node))
self.terminal_nodes = (rhs_nodes - lhs_nodes)
self.terminal_types = set([n.type for n in self.terminal_nodes])
self.node_type_to_id = OrderedDict()
for (i, type) in enumerate(node_types, start=0):
self.node_type_to_id[type] = i
for (gid, rule) in enumerate(rules, start=0):
self.rule_to_id[rule] = gid
self.id_to_rule = OrderedDict(((v, k) for (k, v) in self.rule_to_id.iteritems()))
logging.info('num. rules: %d', len(self.rules))
logging.info('num. types: %d', len(self.node_type_to_id))
logging.info('root: %s', self.root_node)
logging.info('terminals: %s', ', '.join((repr(n) for n in self.terminal_nodes)))
def __iter__(self):
return self.rules.__iter__()
def __len__(self):
return len(self.rules)
def __getitem__(self, lhs):
key_node = ASTNode(lhs.type, None)
if (key_node in self.rule_index):
return self.rule_index[key_node]
else:
KeyError(('key=%s' % key_node))
def get_node_type_id(self, node):
from astnode import ASTNode
if isinstance(node, ASTNode):
type_repr = typename(node.type)
return self.node_type_to_id[type_repr]
else:
type_repr = typename(node)
return self.node_type_to_id[type_repr]
def is_terminal(self, node):
return (node.type in self.terminal_types)
def is_value_node(self, node):
raise NotImplementedError |
def test_wrap_index_cupy():
cp = pytest.importorskip('cupy')
data = cp.arange(10, dtype=cp.int64)
index = ak.index.Index64(data)
other_data = cp.asarray(index)
assert cp.shares_memory(data, other_data) |
class PDELU_MobileNet(nn.Module):
cfg = [64, (128, 2), 128, (256, 2), 256, (512, 2), 512, 512, 512, 512, 512, (1024, 2), 1024]
def __init__(self, num_classes=100):
super(PDELU_MobileNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.linear = nn.Linear(1024, num_classes)
self.pdelu = PDELU()
def _make_layers(self, in_planes):
layers = []
for x in self.cfg:
out_planes = (x if isinstance(x, int) else x[0])
stride = (1 if isinstance(x, int) else x[1])
layers.append(Block(in_planes, out_planes, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.pdelu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
class Stack(Progress):
phases = (' ', '', '', '', '', '', '', '', '')
def update(self):
nphases = len(self.phases)
i = min((nphases - 1), int((self.progress * nphases)))
self.write(self.phases[i]) |
class Metrics(object):
def __init__(self):
self.metrics = OrderedDict()
self.cache_dict = OrderedDict()
def register(self, name=None, value=None, formatter=None, display_name=None, write_db=True, write_mail=True):
assert (not (name is None)), 'No name specified'.format(name)
if (not value):
value = 0
self.__setattr__(name, value)
if (not display_name):
display_name = name
self.metrics[name] = {'name': name, 'write_db': write_db, 'formatter': formatter, 'write_mail': write_mail, 'display_name': display_name}
def cache(self, name=None, value=None, func=None):
assert (not (name is None)), 'No name specified'.format(name)
self.__setattr__(name, value)
self.cache_dict[name] = {'name': name, 'func': func}
def __call__(self, name):
return self.metrics[name]
def names(self):
return [v['name'] for v in self.metrics.values()]
def display_names(self):
return [v['display_name'] for v in self.metrics.values()]
def formatters(self):
return dict([(v['display_name'], v['formatter']) for (k, v) in self.metrics.items() if (not (v['formatter'] is None))])
def val_dict(self, display_name=False, object='metrics'):
if display_name:
key_string = 'display_name'
else:
key_string = 'name'
print('object dict: ', object)
val_dict = dict([(self.__getattribute__(object)[key][key_string], self.__getattribute__(key)) for key in self.__getattribute__(object).keys()])
return val_dict
def val_db(self, display_name=True):
if display_name:
key_string = 'display_name'
else:
key_string = 'name'
val_dict = dict([(self.metrics[key][key_string], self.__getattribute__(key)) for key in self.metrics.keys() if self.metrics[key]['write_db']])
return val_dict
def val_mail(self, display_name=True):
if display_name:
key_string = 'display_name'
else:
key_string = 'name'
val_dict = dict([(self.metrics[key][key_string], self.__getattribute__(key)) for key in self.metrics.keys() if self.metrics[key]['write_mail']])
return val_dict
def to_dataframe(self, display_name=False, type=None):
if (type == 'mail'):
self.df = pd.DataFrame(self.val_mail(display_name=display_name), index=[self.seqName])
else:
self.df = pd.DataFrame(self.val_dict(display_name=display_name), index=[self.seqName])
def update_values(self, value_dict=None):
if value_dict:
for (key, value) in value_dict.items():
if hasattr(self, key):
self.__setattr__(key, value)
def print_type(self, object='metrics'):
print('OBJECT ', object)
val_dict = self.val_dict(object=object)
for (key, item) in val_dict.items():
print(('%s: %s; Shape: %s' % (key, type(item), np.shape(item))))
def print_results(self):
result_dict = self.val_dict()
for (key, item) in result_dict.items():
print(key)
print(('%s: %s' % (key, self.metrics[key]['formatter'](item))))
def save_dict(self, path):
with open(path, 'wb') as handle:
pickle.dump(self.__dict__, handle, protocol=pickle.HIGHEST_PROTOCOL)
def compute_metrics_per_sequence(self):
raise NotImplementedError |
def get_opt(model, model_bert, model_type):
if (model_type == 'FT_s2s_1'):
opt = torch.optim.Adam(filter((lambda p: p.requires_grad), model.parameters()), lr=args.lr, weight_decay=0)
opt_bert = torch.optim.Adam(filter((lambda p: p.requires_grad), model_bert.parameters()), lr=args.lr_bert, weight_decay=0)
else:
raise NotImplementedError
return (opt, opt_bert) |
class GroupOps(object):
def identity():
_res = ([0.0] * 8)
_res[0] = 0
_res[1] = 0
_res[2] = 0
_res[3] = 0
_res[4] = 0
_res[5] = 0
_res[6] = 0
_res[7] = 0
return sym.PolynomialCameraCal.from_storage(_res)
def inverse(a):
_a = a.data
_res = ([0.0] * 8)
_res[0] = (- _a[0])
_res[1] = (- _a[1])
_res[2] = (- _a[2])
_res[3] = (- _a[3])
_res[4] = (- _a[4])
_res[5] = (- _a[5])
_res[6] = (- _a[6])
_res[7] = (- _a[7])
return sym.PolynomialCameraCal.from_storage(_res)
def compose(a, b):
_a = a.data
_b = b.data
_res = ([0.0] * 8)
_res[0] = (_a[0] + _b[0])
_res[1] = (_a[1] + _b[1])
_res[2] = (_a[2] + _b[2])
_res[3] = (_a[3] + _b[3])
_res[4] = (_a[4] + _b[4])
_res[5] = (_a[5] + _b[5])
_res[6] = (_a[6] + _b[6])
_res[7] = (_a[7] + _b[7])
return sym.PolynomialCameraCal.from_storage(_res)
def between(a, b):
_a = a.data
_b = b.data
_res = ([0.0] * 8)
_res[0] = ((- _a[0]) + _b[0])
_res[1] = ((- _a[1]) + _b[1])
_res[2] = ((- _a[2]) + _b[2])
_res[3] = ((- _a[3]) + _b[3])
_res[4] = ((- _a[4]) + _b[4])
_res[5] = ((- _a[5]) + _b[5])
_res[6] = ((- _a[6]) + _b[6])
_res[7] = ((- _a[7]) + _b[7])
return sym.PolynomialCameraCal.from_storage(_res)
def inverse_with_jacobian(a):
_a = a.data
_res = ([0.0] * 8)
_res[0] = (- _a[0])
_res[1] = (- _a[1])
_res[2] = (- _a[2])
_res[3] = (- _a[3])
_res[4] = (- _a[4])
_res[5] = (- _a[5])
_res[6] = (- _a[6])
_res[7] = (- _a[7])
_res_D_a = numpy.zeros((8, 8))
_res_D_a[(0, 0)] = (- 1)
_res_D_a[(1, 0)] = 0
_res_D_a[(2, 0)] = 0
_res_D_a[(3, 0)] = 0
_res_D_a[(4, 0)] = 0
_res_D_a[(5, 0)] = 0
_res_D_a[(6, 0)] = 0
_res_D_a[(7, 0)] = 0
_res_D_a[(0, 1)] = 0
_res_D_a[(1, 1)] = (- 1)
_res_D_a[(2, 1)] = 0
_res_D_a[(3, 1)] = 0
_res_D_a[(4, 1)] = 0
_res_D_a[(5, 1)] = 0
_res_D_a[(6, 1)] = 0
_res_D_a[(7, 1)] = 0
_res_D_a[(0, 2)] = 0
_res_D_a[(1, 2)] = 0
_res_D_a[(2, 2)] = (- 1)
_res_D_a[(3, 2)] = 0
_res_D_a[(4, 2)] = 0
_res_D_a[(5, 2)] = 0
_res_D_a[(6, 2)] = 0
_res_D_a[(7, 2)] = 0
_res_D_a[(0, 3)] = 0
_res_D_a[(1, 3)] = 0
_res_D_a[(2, 3)] = 0
_res_D_a[(3, 3)] = (- 1)
_res_D_a[(4, 3)] = 0
_res_D_a[(5, 3)] = 0
_res_D_a[(6, 3)] = 0
_res_D_a[(7, 3)] = 0
_res_D_a[(0, 4)] = 0
_res_D_a[(1, 4)] = 0
_res_D_a[(2, 4)] = 0
_res_D_a[(3, 4)] = 0
_res_D_a[(4, 4)] = (- 1)
_res_D_a[(5, 4)] = 0
_res_D_a[(6, 4)] = 0
_res_D_a[(7, 4)] = 0
_res_D_a[(0, 5)] = 0
_res_D_a[(1, 5)] = 0
_res_D_a[(2, 5)] = 0
_res_D_a[(3, 5)] = 0
_res_D_a[(4, 5)] = 0
_res_D_a[(5, 5)] = (- 1)
_res_D_a[(6, 5)] = 0
_res_D_a[(7, 5)] = 0
_res_D_a[(0, 6)] = 0
_res_D_a[(1, 6)] = 0
_res_D_a[(2, 6)] = 0
_res_D_a[(3, 6)] = 0
_res_D_a[(4, 6)] = 0
_res_D_a[(5, 6)] = 0
_res_D_a[(6, 6)] = (- 1)
_res_D_a[(7, 6)] = 0
_res_D_a[(0, 7)] = 0
_res_D_a[(1, 7)] = 0
_res_D_a[(2, 7)] = 0
_res_D_a[(3, 7)] = 0
_res_D_a[(4, 7)] = 0
_res_D_a[(5, 7)] = 0
_res_D_a[(6, 7)] = 0
_res_D_a[(7, 7)] = (- 1)
return (sym.PolynomialCameraCal.from_storage(_res), _res_D_a)
def compose_with_jacobians(a, b):
_a = a.data
_b = b.data
_res = ([0.0] * 8)
_res[0] = (_a[0] + _b[0])
_res[1] = (_a[1] + _b[1])
_res[2] = (_a[2] + _b[2])
_res[3] = (_a[3] + _b[3])
_res[4] = (_a[4] + _b[4])
_res[5] = (_a[5] + _b[5])
_res[6] = (_a[6] + _b[6])
_res[7] = (_a[7] + _b[7])
_res_D_a = numpy.zeros((8, 8))
_res_D_a[(0, 0)] = 1
_res_D_a[(1, 0)] = 0
_res_D_a[(2, 0)] = 0
_res_D_a[(3, 0)] = 0
_res_D_a[(4, 0)] = 0
_res_D_a[(5, 0)] = 0
_res_D_a[(6, 0)] = 0
_res_D_a[(7, 0)] = 0
_res_D_a[(0, 1)] = 0
_res_D_a[(1, 1)] = 1
_res_D_a[(2, 1)] = 0
_res_D_a[(3, 1)] = 0
_res_D_a[(4, 1)] = 0
_res_D_a[(5, 1)] = 0
_res_D_a[(6, 1)] = 0
_res_D_a[(7, 1)] = 0
_res_D_a[(0, 2)] = 0
_res_D_a[(1, 2)] = 0
_res_D_a[(2, 2)] = 1
_res_D_a[(3, 2)] = 0
_res_D_a[(4, 2)] = 0
_res_D_a[(5, 2)] = 0
_res_D_a[(6, 2)] = 0
_res_D_a[(7, 2)] = 0
_res_D_a[(0, 3)] = 0
_res_D_a[(1, 3)] = 0
_res_D_a[(2, 3)] = 0
_res_D_a[(3, 3)] = 1
_res_D_a[(4, 3)] = 0
_res_D_a[(5, 3)] = 0
_res_D_a[(6, 3)] = 0
_res_D_a[(7, 3)] = 0
_res_D_a[(0, 4)] = 0
_res_D_a[(1, 4)] = 0
_res_D_a[(2, 4)] = 0
_res_D_a[(3, 4)] = 0
_res_D_a[(4, 4)] = 1
_res_D_a[(5, 4)] = 0
_res_D_a[(6, 4)] = 0
_res_D_a[(7, 4)] = 0
_res_D_a[(0, 5)] = 0
_res_D_a[(1, 5)] = 0
_res_D_a[(2, 5)] = 0
_res_D_a[(3, 5)] = 0
_res_D_a[(4, 5)] = 0
_res_D_a[(5, 5)] = 1
_res_D_a[(6, 5)] = 0
_res_D_a[(7, 5)] = 0
_res_D_a[(0, 6)] = 0
_res_D_a[(1, 6)] = 0
_res_D_a[(2, 6)] = 0
_res_D_a[(3, 6)] = 0
_res_D_a[(4, 6)] = 0
_res_D_a[(5, 6)] = 0
_res_D_a[(6, 6)] = 1
_res_D_a[(7, 6)] = 0
_res_D_a[(0, 7)] = 0
_res_D_a[(1, 7)] = 0
_res_D_a[(2, 7)] = 0
_res_D_a[(3, 7)] = 0
_res_D_a[(4, 7)] = 0
_res_D_a[(5, 7)] = 0
_res_D_a[(6, 7)] = 0
_res_D_a[(7, 7)] = 1
_res_D_b = numpy.zeros((8, 8))
_res_D_b[(0, 0)] = 1
_res_D_b[(1, 0)] = 0
_res_D_b[(2, 0)] = 0
_res_D_b[(3, 0)] = 0
_res_D_b[(4, 0)] = 0
_res_D_b[(5, 0)] = 0
_res_D_b[(6, 0)] = 0
_res_D_b[(7, 0)] = 0
_res_D_b[(0, 1)] = 0
_res_D_b[(1, 1)] = 1
_res_D_b[(2, 1)] = 0
_res_D_b[(3, 1)] = 0
_res_D_b[(4, 1)] = 0
_res_D_b[(5, 1)] = 0
_res_D_b[(6, 1)] = 0
_res_D_b[(7, 1)] = 0
_res_D_b[(0, 2)] = 0
_res_D_b[(1, 2)] = 0
_res_D_b[(2, 2)] = 1
_res_D_b[(3, 2)] = 0
_res_D_b[(4, 2)] = 0
_res_D_b[(5, 2)] = 0
_res_D_b[(6, 2)] = 0
_res_D_b[(7, 2)] = 0
_res_D_b[(0, 3)] = 0
_res_D_b[(1, 3)] = 0
_res_D_b[(2, 3)] = 0
_res_D_b[(3, 3)] = 1
_res_D_b[(4, 3)] = 0
_res_D_b[(5, 3)] = 0
_res_D_b[(6, 3)] = 0
_res_D_b[(7, 3)] = 0
_res_D_b[(0, 4)] = 0
_res_D_b[(1, 4)] = 0
_res_D_b[(2, 4)] = 0
_res_D_b[(3, 4)] = 0
_res_D_b[(4, 4)] = 1
_res_D_b[(5, 4)] = 0
_res_D_b[(6, 4)] = 0
_res_D_b[(7, 4)] = 0
_res_D_b[(0, 5)] = 0
_res_D_b[(1, 5)] = 0
_res_D_b[(2, 5)] = 0
_res_D_b[(3, 5)] = 0
_res_D_b[(4, 5)] = 0
_res_D_b[(5, 5)] = 1
_res_D_b[(6, 5)] = 0
_res_D_b[(7, 5)] = 0
_res_D_b[(0, 6)] = 0
_res_D_b[(1, 6)] = 0
_res_D_b[(2, 6)] = 0
_res_D_b[(3, 6)] = 0
_res_D_b[(4, 6)] = 0
_res_D_b[(5, 6)] = 0
_res_D_b[(6, 6)] = 1
_res_D_b[(7, 6)] = 0
_res_D_b[(0, 7)] = 0
_res_D_b[(1, 7)] = 0
_res_D_b[(2, 7)] = 0
_res_D_b[(3, 7)] = 0
_res_D_b[(4, 7)] = 0
_res_D_b[(5, 7)] = 0
_res_D_b[(6, 7)] = 0
_res_D_b[(7, 7)] = 1
return (sym.PolynomialCameraCal.from_storage(_res), _res_D_a, _res_D_b)
def between_with_jacobians(a, b):
_a = a.data
_b = b.data
_res = ([0.0] * 8)
_res[0] = ((- _a[0]) + _b[0])
_res[1] = ((- _a[1]) + _b[1])
_res[2] = ((- _a[2]) + _b[2])
_res[3] = ((- _a[3]) + _b[3])
_res[4] = ((- _a[4]) + _b[4])
_res[5] = ((- _a[5]) + _b[5])
_res[6] = ((- _a[6]) + _b[6])
_res[7] = ((- _a[7]) + _b[7])
_res_D_a = numpy.zeros((8, 8))
_res_D_a[(0, 0)] = (- 1)
_res_D_a[(1, 0)] = 0
_res_D_a[(2, 0)] = 0
_res_D_a[(3, 0)] = 0
_res_D_a[(4, 0)] = 0
_res_D_a[(5, 0)] = 0
_res_D_a[(6, 0)] = 0
_res_D_a[(7, 0)] = 0
_res_D_a[(0, 1)] = 0
_res_D_a[(1, 1)] = (- 1)
_res_D_a[(2, 1)] = 0
_res_D_a[(3, 1)] = 0
_res_D_a[(4, 1)] = 0
_res_D_a[(5, 1)] = 0
_res_D_a[(6, 1)] = 0
_res_D_a[(7, 1)] = 0
_res_D_a[(0, 2)] = 0
_res_D_a[(1, 2)] = 0
_res_D_a[(2, 2)] = (- 1)
_res_D_a[(3, 2)] = 0
_res_D_a[(4, 2)] = 0
_res_D_a[(5, 2)] = 0
_res_D_a[(6, 2)] = 0
_res_D_a[(7, 2)] = 0
_res_D_a[(0, 3)] = 0
_res_D_a[(1, 3)] = 0
_res_D_a[(2, 3)] = 0
_res_D_a[(3, 3)] = (- 1)
_res_D_a[(4, 3)] = 0
_res_D_a[(5, 3)] = 0
_res_D_a[(6, 3)] = 0
_res_D_a[(7, 3)] = 0
_res_D_a[(0, 4)] = 0
_res_D_a[(1, 4)] = 0
_res_D_a[(2, 4)] = 0
_res_D_a[(3, 4)] = 0
_res_D_a[(4, 4)] = (- 1)
_res_D_a[(5, 4)] = 0
_res_D_a[(6, 4)] = 0
_res_D_a[(7, 4)] = 0
_res_D_a[(0, 5)] = 0
_res_D_a[(1, 5)] = 0
_res_D_a[(2, 5)] = 0
_res_D_a[(3, 5)] = 0
_res_D_a[(4, 5)] = 0
_res_D_a[(5, 5)] = (- 1)
_res_D_a[(6, 5)] = 0
_res_D_a[(7, 5)] = 0
_res_D_a[(0, 6)] = 0
_res_D_a[(1, 6)] = 0
_res_D_a[(2, 6)] = 0
_res_D_a[(3, 6)] = 0
_res_D_a[(4, 6)] = 0
_res_D_a[(5, 6)] = 0
_res_D_a[(6, 6)] = (- 1)
_res_D_a[(7, 6)] = 0
_res_D_a[(0, 7)] = 0
_res_D_a[(1, 7)] = 0
_res_D_a[(2, 7)] = 0
_res_D_a[(3, 7)] = 0
_res_D_a[(4, 7)] = 0
_res_D_a[(5, 7)] = 0
_res_D_a[(6, 7)] = 0
_res_D_a[(7, 7)] = (- 1)
_res_D_b = numpy.zeros((8, 8))
_res_D_b[(0, 0)] = 1
_res_D_b[(1, 0)] = 0
_res_D_b[(2, 0)] = 0
_res_D_b[(3, 0)] = 0
_res_D_b[(4, 0)] = 0
_res_D_b[(5, 0)] = 0
_res_D_b[(6, 0)] = 0
_res_D_b[(7, 0)] = 0
_res_D_b[(0, 1)] = 0
_res_D_b[(1, 1)] = 1
_res_D_b[(2, 1)] = 0
_res_D_b[(3, 1)] = 0
_res_D_b[(4, 1)] = 0
_res_D_b[(5, 1)] = 0
_res_D_b[(6, 1)] = 0
_res_D_b[(7, 1)] = 0
_res_D_b[(0, 2)] = 0
_res_D_b[(1, 2)] = 0
_res_D_b[(2, 2)] = 1
_res_D_b[(3, 2)] = 0
_res_D_b[(4, 2)] = 0
_res_D_b[(5, 2)] = 0
_res_D_b[(6, 2)] = 0
_res_D_b[(7, 2)] = 0
_res_D_b[(0, 3)] = 0
_res_D_b[(1, 3)] = 0
_res_D_b[(2, 3)] = 0
_res_D_b[(3, 3)] = 1
_res_D_b[(4, 3)] = 0
_res_D_b[(5, 3)] = 0
_res_D_b[(6, 3)] = 0
_res_D_b[(7, 3)] = 0
_res_D_b[(0, 4)] = 0
_res_D_b[(1, 4)] = 0
_res_D_b[(2, 4)] = 0
_res_D_b[(3, 4)] = 0
_res_D_b[(4, 4)] = 1
_res_D_b[(5, 4)] = 0
_res_D_b[(6, 4)] = 0
_res_D_b[(7, 4)] = 0
_res_D_b[(0, 5)] = 0
_res_D_b[(1, 5)] = 0
_res_D_b[(2, 5)] = 0
_res_D_b[(3, 5)] = 0
_res_D_b[(4, 5)] = 0
_res_D_b[(5, 5)] = 1
_res_D_b[(6, 5)] = 0
_res_D_b[(7, 5)] = 0
_res_D_b[(0, 6)] = 0
_res_D_b[(1, 6)] = 0
_res_D_b[(2, 6)] = 0
_res_D_b[(3, 6)] = 0
_res_D_b[(4, 6)] = 0
_res_D_b[(5, 6)] = 0
_res_D_b[(6, 6)] = 1
_res_D_b[(7, 6)] = 0
_res_D_b[(0, 7)] = 0
_res_D_b[(1, 7)] = 0
_res_D_b[(2, 7)] = 0
_res_D_b[(3, 7)] = 0
_res_D_b[(4, 7)] = 0
_res_D_b[(5, 7)] = 0
_res_D_b[(6, 7)] = 0
_res_D_b[(7, 7)] = 1
return (sym.PolynomialCameraCal.from_storage(_res), _res_D_a, _res_D_b) |
def scipy_optimise(merge_test_loader, args):
small_k = args.num_labeled_classes
big_k = args.max_classes
test_k_means_partial = partial(test_kmeans_for_scipy, merge_test_loader=merge_test_loader, args=args, verbose=True)
res = minimize_scalar(test_k_means_partial, bounds=(small_k, big_k), method='bounded', options={'disp': True})
print(f'Optimal K is {res.x}') |
class Tree(object):
def __init__(self, data, children, meta=None):
self.data = data
self.children = children
self._meta = meta
def meta(self):
if (self._meta is None):
self._meta = Meta()
return self._meta
def __repr__(self):
return ('Tree(%r, %r)' % (self.data, self.children))
def _pretty_label(self):
return self.data
def _pretty(self, level, indent_str):
if ((len(self.children) == 1) and (not isinstance(self.children[0], Tree))):
return [(indent_str * level), self._pretty_label(), '\t', ('%s' % (self.children[0],)), '\n']
l = [(indent_str * level), self._pretty_label(), '\n']
for n in self.children:
if isinstance(n, Tree):
l += n._pretty((level + 1), indent_str)
else:
l += [(indent_str * (level + 1)), ('%s' % (n,)), '\n']
return l
def pretty(self, indent_str=' '):
return ''.join(self._pretty(0, indent_str))
def __eq__(self, other):
try:
return ((self.data == other.data) and (self.children == other.children))
except AttributeError:
return False
def __ne__(self, other):
return (not (self == other))
def __hash__(self):
return hash((self.data, tuple(self.children)))
def iter_subtrees(self):
queue = [self]
subtrees = OrderedDict()
for subtree in queue:
subtrees[id(subtree)] = subtree
queue += [c for c in reversed(subtree.children) if (isinstance(c, Tree) and (id(c) not in subtrees))]
del queue
return reversed(list(subtrees.values()))
def find_pred(self, pred):
return filter(pred, self.iter_subtrees())
def find_data(self, data):
return self.find_pred((lambda t: (t.data == data))) |
def handle_after_execution(context: ExecutionContext, event: events.AfterExecution) -> None:
context.operations_processed += 1
context.results.append(event.result)
display_execution_result(context, event)
display_percentage(context, event) |
_function_dispatch(_multiply_dispatcher)
def multiply(a, i):
a_arr = numpy.asarray(a)
i_arr = numpy.asarray(i)
if (not issubclass(i_arr.dtype.type, integer)):
raise ValueError('Can only multiply by integers')
out_size = (_get_num_chars(a_arr) * max(long(i_arr.max()), 0))
return _vec_string(a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,)) |
def test(model, dataloader, nshot):
utils.fix_randseed(0)
average_meter = AverageMeter(dataloader.dataset)
for (idx, batch) in enumerate(dataloader):
batch = utils.to_cuda(batch)
pred_mask = model.module.predict_mask_nshot(batch, nshot=nshot)
assert (pred_mask.size() == batch['query_mask'].size())
(area_inter, area_union) = Evaluator.classify_prediction(pred_mask.clone(), batch)
average_meter.update(area_inter, area_union, batch['class_id'], loss=None)
average_meter.write_process(idx, len(dataloader), epoch=(- 1), write_batch_idx=1)
average_meter.write_result('Test', 0)
(miou, fb_iou) = average_meter.compute_iou()
return (miou, fb_iou) |
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue |
def test_EPOCH16breakdown(lib):
epoch16 = (ctypes.c_double * 2)(.0, .0)
yyyy = ctypes.c_long(0)
mm = ctypes.c_long(0)
dd = ctypes.c_long(0)
hh = ctypes.c_long(0)
mn = ctypes.c_long(0)
sec = ctypes.c_long(0)
msec = ctypes.c_long(0)
usec = ctypes.c_long(0)
nsec = ctypes.c_long(0)
psec = ctypes.c_long(0)
lib.EPOCH16breakdown(epoch16, yyyy, mm, dd, hh, mn, sec, msec, usec, nsec, psec)
print('Expect 2010-1-2 3:4:5.')
print('Actual {:.0f}-{:.0f}-{:.0f} {:.0f}:{:.0f}:{:.0f}.{:03.0f}{:03.0f}{:03.0f}{:03.0f}'.format(yyyy.value, mm.value, dd.value, hh.value, mn.value, sec.value, msec.value, usec.value, nsec.value, psec.value)) |
class Cached(type):
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
obj.register_cache()
return obj |
def create_logger():
loggers = []
names = ['train', 'val', 'test']
for (i, dataset) in enumerate(range(cfg.share.num_splits)):
loggers.append(Logger(name=names[i], task_type=infer_task()))
return loggers |
def _exp_sinch(a, x):
if (abs(x) < 0.0135):
x2 = (x * x)
return (np.exp(a) * (1 + ((x2 / 6.0) * (1 + ((x2 / 20.0) * (1 + (x2 / 42.0)))))))
else:
return ((np.exp((a + x)) - np.exp((a - x))) / (2 * x)) |
def get_test_list(run_only: Optional[List[str]]) -> TestList:
test_list: TestList = []
test_list.extend(get_test_list_by_type(run_only, TestType.CPP))
py_run_only = get_python_run_only(run_only)
test_list.extend(get_test_list_by_type(py_run_only, TestType.PY))
if (not test_list):
raise_no_test_found_exception(get_oss_binary_folder(TestType.CPP), get_oss_binary_folder(TestType.PY))
return test_list |
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out |
class SegmentationPsa(nn.Module):
def __init__(self, config, num_classes, in_channel=4096, middle_channel=512, scale=8):
super(SegmentationPsa, self).__init__()
self.config = config
self.seg1 = Conv2dbnPR(in_channel, middle_channel, 3, 1, padding=12, dilation=12, bias=True)
self.rpad = nn.ReflectionPad2d(12)
self.seg2 = nn.Conv2d(middle_channel, 21, kernel_size=3, stride=1, padding=0, dilation=12, bias=True)
self.upsample = nn.Upsample(scale_factor=scale, mode='bilinear')
def forward(self, inputs):
x = inputs
seg_head = self.seg1(x)
x = self.rpad(seg_head)
x = self.seg2(x)
seg_head = self.upsample(seg_head)
x = self.upsample(x)
return (x, seg_head) |
_utils.test()
def test_snode_clear_gradient():
x = ti.field(float, shape=(), needs_grad=True, needs_dual=True)
y = ti.field(float, shape=(), needs_grad=True, needs_dual=True)
x[None] = 1.0
def compute():
y[None] += (x[None] ** 2)
with ti.ad.Tape(loss=y):
compute()
with ti.ad.FwdMode(loss=y, param=x):
compute()
assert (x.grad[None] == 2.0)
with ti.ad.Tape(loss=y):
compute()
assert (y.dual[None] == 2.0) |
def resolve_dir(env_variable, default='data'):
default_dir = os.path.join(resolve_cache_dir(), default)
dir_path = os.getenv(env_variable, default_dir)
if (not PathManager.exists(dir_path)):
PathManager.mkdirs(dir_path)
return dir_path |
(TEST_WITH_TSAN, 'Fails with TSAN with the following error: starting new threads after multi-threaded fork is not supported. Dying (set die_after_fork=0 to override)')
class TestIndividualWorkerQueue(TestCase):
def setUp(self):
super(TestIndividualWorkerQueue, self).setUp()
self.dataset = TestWorkerQueueDataset(list(range(128)))
def _run_ind_worker_queue_test(self, batch_size, num_workers):
loader = DataLoader(self.dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, worker_init_fn=self.dataset.worker_init_fn)
current_worker_idx = 0
for (i, (worker_ids, sample)) in enumerate(loader):
self.assertEqual(worker_ids.tolist(), ([current_worker_idx] * batch_size))
self.assertEqual(sample.tolist(), list(range((i * batch_size), ((i + 1) * batch_size))))
current_worker_idx += 1
if (current_worker_idx == num_workers):
current_worker_idx = 0
def test_ind_worker_queue(self):
for batch_size in (8, 16, 32, 64):
for num_workers in range(1, 6):
self._run_ind_worker_queue_test(batch_size=batch_size, num_workers=num_workers) |
def format_sftp_path(path):
if path.as_posix().startswith('sftp'):
uid = os.getuid()
path = Path(f'/run/user/{uid}/gvfs/sftp:host={path.as_posix()[6:]}')
return path |
class TestRecurrence():
def check_poly(self, func, param_ranges=[], x_range=[], nn=10, nparam=10, nx=10, rtol=1e-08):
np.random.seed(1234)
dataset = []
for n in np.arange(nn):
params = [(a + ((b - a) * np.random.rand(nparam))) for (a, b) in param_ranges]
params = np.asarray(params).T
if (not param_ranges):
params = [0]
for p in params:
if param_ranges:
p = ((n,) + tuple(p))
else:
p = (n,)
x = (x_range[0] + ((x_range[1] - x_range[0]) * np.random.rand(nx)))
x[0] = x_range[0]
x[1] = x_range[1]
kw = dict(sig=(((len(p) + 1) * 'd') + '->d'))
z = np.c_[(np.tile(p, (nx, 1)), x, func(*(p + (x,)), **kw))]
dataset.append(z)
dataset = np.concatenate(dataset, axis=0)
def polyfunc(*p):
p = ((p[0].astype(int),) + p[1:])
kw = dict(sig=(('l' + ((len(p) - 1) * 'd')) + '->d'))
return func(*p, **kw)
with np.errstate(all='raise'):
ds = FuncData(polyfunc, dataset, list(range((len(param_ranges) + 2))), (- 1), rtol=rtol)
ds.check()
def test_jacobi(self):
self.check_poly(_ufuncs.eval_jacobi, param_ranges=[((- 0.99), 10), ((- 0.99), 10)], x_range=[(- 1), 1])
def test_sh_jacobi(self):
self.check_poly(_ufuncs.eval_sh_jacobi, param_ranges=[(1, 10), (0, 1)], x_range=[0, 1])
def test_gegenbauer(self):
self.check_poly(_ufuncs.eval_gegenbauer, param_ranges=[((- 0.499), 10)], x_range=[(- 1), 1])
def test_chebyt(self):
self.check_poly(_ufuncs.eval_chebyt, param_ranges=[], x_range=[(- 1), 1])
def test_chebyu(self):
self.check_poly(_ufuncs.eval_chebyu, param_ranges=[], x_range=[(- 1), 1])
def test_chebys(self):
self.check_poly(_ufuncs.eval_chebys, param_ranges=[], x_range=[(- 2), 2])
def test_chebyc(self):
self.check_poly(_ufuncs.eval_chebyc, param_ranges=[], x_range=[(- 2), 2])
def test_sh_chebyt(self):
self.check_poly(_ufuncs.eval_sh_chebyt, param_ranges=[], x_range=[0, 1])
def test_sh_chebyu(self):
self.check_poly(_ufuncs.eval_sh_chebyu, param_ranges=[], x_range=[0, 1])
def test_legendre(self):
self.check_poly(_ufuncs.eval_legendre, param_ranges=[], x_range=[(- 1), 1])
def test_sh_legendre(self):
self.check_poly(_ufuncs.eval_sh_legendre, param_ranges=[], x_range=[0, 1])
def test_genlaguerre(self):
self.check_poly(_ufuncs.eval_genlaguerre, param_ranges=[((- 0.99), 10)], x_range=[0, 100])
def test_laguerre(self):
self.check_poly(_ufuncs.eval_laguerre, param_ranges=[], x_range=[0, 100])
def test_hermite(self):
v = _ufuncs.eval_hermite(70, 1.0)
a = (- 1.e+60)
assert_allclose(v, a) |
class EnsembleModelEntropy(ModelTemplate):
def __init__(self, all_models, mode='entropy', num_classes=4, use_softmax=False):
super(ModelTemplate, self).__init__()
self.all_models = all_models
self.max_ent = torch.log(torch.Tensor([num_classes])).item()
self.mode = mode
self.use_softmax = use_softmax
def entropy(self, preds):
logp = torch.log((preds + 1e-05))
entropy = torch.sum(((- preds) * logp), dim=(- 1))
return entropy
def forward(self, imgs):
all_closed_set_preds = []
for m in self.all_models:
closed_set_preds = m(imgs, return_feature=False)
if self.use_softmax:
closed_set_preds = torch.nn.Softmax(dim=(- 1))(closed_set_preds)
all_closed_set_preds.append(closed_set_preds)
closed_set_preds = torch.stack(all_closed_set_preds).mean(dim=0)
if (self.mode == 'entropy'):
open_set_preds = self.entropy(closed_set_preds)
elif (self.mode == 'max_softmax'):
open_set_preds = (- closed_set_preds.max(dim=(- 1))[0])
else:
raise NotImplementedError
return (closed_set_preds, open_set_preds) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target', default='', type=str, required=True, help='JSON file path to the target (reference) text')
parser.add_argument('--translation', default='', type=str, required=True, help='JSON file path to the translation text')
parser.add_argument('--english_term', default='', type=str, required=False, help='JSON file path to English terms for the entity evaluation')
args = parser.parse_args()
assert os.path.exists(args.target)
assert os.path.exists(args.translation)
jsn_target = json.load(open(args.target, 'r'))
jsn_translation = json.load(open(args.translation, 'r'))
if os.path.exists(args.english_term):
english_term = json.load(open(args.english_term, 'r'))
english_term = set(english_term)
total_trans = 0
total_gold = 0
correct_trans = 0
correct_gold = 0
else:
english_term = None
assert (jsn_target['lang'] == jsn_translation['lang'])
lang = jsn_target['lang']
assert (jsn_target['type'] == 'target')
assert (jsn_translation['type'] == 'translation')
xml_acc = 0
xml_match = 0
tagTypeList = ['ph', 'xref', 'uicontrol', 'b', 'codeph', 'parmname', 'i', 'title', 'menucascade', 'varname', 'userinput', 'filepath', 'term', 'systemoutput', 'cite', 'li', 'ul', 'p', 'note', 'indexterm', 'u', 'fn']
tagBegList = [(('<' + t) + '>') for t in tagTypeList]
tagEndList = [(('</' + t) + '>') for t in tagTypeList]
tagList = (tagBegList + tagEndList)
DUMMY = '####DUMMY###SEPARATOR###DUMMY###'
suffix = str(random.randint(0, ))
assert (not os.path.exists(suffix))
os.mkdir(suffix)
f_trans_without_tags = open(os.path.join(suffix, 'trans.txt'), 'w')
f_trans_with_tags = open(os.path.join(suffix, 'trans_struct.txt'), 'w')
f_gold_without_tags = open(os.path.join(suffix, 'gold.txt'), 'w')
f_gold_with_tags = open(os.path.join(suffix, 'gold_struct.txt'), 'w')
for target_id in jsn_target['text']:
assert (target_id in jsn_translation['text'])
target = jsn_target['text'][target_id].strip()
translation = jsn_translation['text'][target_id].strip()
xml_elm_target = convertToXML('<ROOT>{}</ROOT>'.format(target))
xml_elm_translation = convertToXML('<ROOT>{}</ROOT>'.format(translation))
assert (xml_elm_target is not None)
match = False
if (xml_elm_translation is not None):
xml_acc += 1
if matchXML(xml_elm_translation, xml_elm_target):
xml_match += 1
match = True
for tag in tagList:
target = target.replace(tag, DUMMY)
translation = translation.replace(tag, DUMMY)
target = de_escape(target)
translation = de_escape(translation)
target = target.split(DUMMY)
translation = translation.split(DUMMY)
if (english_term is not None):
(total_trans, correct_trans, total_gold, correct_gold) = num_tech_eval(''.join(translation), ''.join(target), total_trans, total_gold, correct_trans, correct_gold, english_term)
f_trans_without_tags.write((''.join(translation) + '\n'))
f_gold_without_tags.write((''.join(target) + '\n'))
if match:
assert (len(target) == len(translation))
for i in range(len(target)):
f_trans_with_tags.write((translation[i] + '\n'))
f_gold_with_tags.write((target[i] + '\n'))
else:
for i in range(len(target)):
f_trans_with_tags.write('\n')
f_gold_with_tags.write((target[i] + '\n'))
print('XML structure accuracy: {} %'.format(((100 * xml_acc) / len(jsn_target['text']))))
print('XML matching accuracy: {} %'.format(((100 * xml_match) / len(jsn_target['text']))))
if (english_term is not None):
print('NE&NUM precision: {} %'.format(((100 * correct_trans) / total_trans)))
print('NE&NUM recall: {} %'.format(((100 * correct_gold) / total_gold)))
f_trans_without_tags.close()
f_trans_with_tags.close()
f_gold_without_tags.close()
f_gold_with_tags.close()
os.system('./scripts/calc_bleu.sh {} {} 2> {}'.format(lang, suffix, os.path.join(suffix, 'TMP')))
f_trans = open(os.path.join(suffix, 'bleu.txt'), 'r')
for line in f_trans:
bleu = float(line.split()[2][0:(- 1)])
break
f_trans.close()
f_trans = open(os.path.join(suffix, 'bleu_struct.txt'), 'r')
for line in f_trans:
bleu_struct = float(line.split()[2][0:(- 1)])
break
f_trans.close()
print('BLEU:', bleu)
print('XML BLEU:', bleu_struct)
os.system('rm -r ./{}*'.format(suffix)) |
def shard_params_and_opt_state(params, params_spec, mesh, optimizer, init_opt_state=None):
def init_fn(params_):
if (init_opt_state is None):
opt_state_ = optimizer.init(params_)
else:
opt_state_ = init_opt_state
return (opt_state_, params_)
def get_opt_spec(x):
if isinstance(x, (dict, FrozenDict)):
return params_spec
return None
params_shapes = jax.tree_util.tree_map((lambda x: ShapeDtypeStruct(x.shape, x.dtype)), params)
state_shapes = jax.eval_shape(init_fn, params_shapes)
(opt_state_spec, _) = jax.tree_util.tree_map(get_opt_spec, state_shapes, is_leaf=(lambda x: isinstance(x, (dict, FrozenDict, optax.EmptyState))))
p_get_initial_state = pjit(init_fn, in_shardings=(params_spec,), out_shardings=(opt_state_spec, params_spec))
with mesh:
(opt_state, params) = p_get_initial_state(params)
return (params, opt_state, opt_state_spec) |
def draw_bootstrap(*arrays, bootstrap_ratio=0.632, min_samples=1):
num_data = arrays[0].shape[0]
assert all(((arr.shape[0] == num_data) for arr in arrays))
if (bootstrap_ratio is None):
num_samples = min_samples
else:
assert (bootstrap_ratio < 1)
num_samples = int((math.log((1 - bootstrap_ratio)) / math.log((1 - (1 / num_data)))))
num_samples = max(min_samples, num_samples)
idxs = random.choices(range(num_data), k=num_samples)
res = [arr[idxs] for arr in arrays]
return res |
_module
class SpMiddleFHD(nn.Module):
def __init__(self, num_input_features=128, norm_cfg=None, name='SpMiddleFHD', **kwargs):
super(SpMiddleFHD, self).__init__()
self.name = name
self.dcn = None
self.zero_init_residual = False
if (norm_cfg is None):
norm_cfg = dict(type='BN1d', eps=0.001, momentum=0.01)
self.middle_conv = spconv.SparseSequential(SubMConv3d(num_input_features, 16, 3, bias=False, indice_key='subm0'), build_norm_layer(norm_cfg, 16)[1], nn.ReLU(), SubMConv3d(16, 16, 3, bias=False, indice_key='subm0'), build_norm_layer(norm_cfg, 16)[1], nn.ReLU(), SparseConv3d(16, 32, 3, 2, padding=1, bias=False), build_norm_layer(norm_cfg, 32)[1], nn.ReLU(), SubMConv3d(32, 32, 3, indice_key='subm1', bias=False), build_norm_layer(norm_cfg, 32)[1], nn.ReLU(), SubMConv3d(32, 32, 3, indice_key='subm1', bias=False), build_norm_layer(norm_cfg, 32)[1], nn.ReLU(), SparseConv3d(32, 64, 3, 2, padding=1, bias=False), build_norm_layer(norm_cfg, 64)[1], nn.ReLU(), SubMConv3d(64, 64, 3, indice_key='subm2', bias=False), build_norm_layer(norm_cfg, 64)[1], nn.ReLU(), SubMConv3d(64, 64, 3, indice_key='subm2', bias=False), build_norm_layer(norm_cfg, 64)[1], nn.ReLU(), SubMConv3d(64, 64, 3, indice_key='subm2', bias=False), build_norm_layer(norm_cfg, 64)[1], nn.ReLU(), SparseConv3d(64, 64, 3, 2, padding=[0, 1, 1], bias=False), build_norm_layer(norm_cfg, 64)[1], nn.ReLU(), SubMConv3d(64, 64, 3, indice_key='subm3', bias=False), build_norm_layer(norm_cfg, 64)[1], nn.ReLU(), SubMConv3d(64, 64, 3, indice_key='subm3', bias=False), build_norm_layer(norm_cfg, 64)[1], nn.ReLU(), SubMConv3d(64, 64, 3, indice_key='subm3', bias=False), build_norm_layer(norm_cfg, 64)[1], nn.ReLU())
self.extra_conv = spconv.SparseSequential(SparseConv3d(64, 64, (3, 1, 1), (2, 1, 1), bias=False), build_norm_layer(norm_cfg, 64)[1], nn.ReLU())
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if (self.dcn is not None):
for m in self.modules():
if (isinstance(m, Bottleneck) and hasattr(m, 'conv2_offset')):
constant_init(m.conv2_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, voxel_features, coors, batch_size, input_shape):
sparse_shape = (np.array(input_shape[::(- 1)]) + [1, 0, 0])
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, sparse_shape, batch_size)
conv_4 = self.middle_conv(ret)
ret = self.extra_conv(conv_4)
ret = ret.dense()
(N, C, D, H, W) = ret.shape
ret = ret.view(N, (C * D), H, W)
return (ret, conv_4) |
.parametrize('task_name', [tn for tn in (all_tasks - julia_tasks)])
def test_describe_x(task_name):
task = get_task(task_name)
labels = task.get_labels_data()
assert isinstance(labels, list)
assert (len(labels) == task.get_observation(num_observation=1).shape[(- 1)]) |
class QuadraticEVPSolver(Solver):
def __init__(self, conf, mtx_m=None, mtx_d=None, mtx_k=None, n_eigs=None, eigenvectors=None, status=None, context=None, **kwargs):
Solver.__init__(self, conf=conf, mtx_m=mtx_m, mtx_d=mtx_d, mtx_k=mtx_k, n_eigs=n_eigs, eigenvectors=eigenvectors, status=status, context=context)
solver_conf = structify(self.conf.solver)
self.solver = Solver.any_from_conf(solver_conf)
def __call__(self, mtx_m, mtx_d, mtx_k, n_eigs=None, eigenvectors=None, status=None, conf=None):
raise ValueError('called an abstract QuadraticEVPSolver instance!') |
def extend_with_decoupled_weight_decay(base_optimizer: Type[tf.keras.optimizers.Optimizer]) -> Type[tf.keras.optimizers.Optimizer]:
class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension, base_optimizer):
def __init__(self, weight_decay: Union[(FloatTensorLike, Callable)], *args, **kwargs):
super().__init__(weight_decay, *args, **kwargs)
return OptimizerWithDecoupledWeightDecay |
class LamaFeatureSelector():
def __init__(self, outcome: str, outcome_type: str, treatment: str, timeout: int, n_threads: int, n_folds: int, verbose: bool, generate_report: bool, report_dir: str, use_algos: List[str]):
self.outcome = outcome
self.outcome_type = outcome_type
self.treatment = treatment
self.use_algos = use_algos
self.timeout = timeout
self.n_threads = n_threads
self.n_folds = n_folds
self.verbose = verbose
self.generate_report = generate_report
self.report_dir = report_dir
def perform_selection(self, df: pd.DataFrame) -> pd.DataFrame:
roles = {'target': self.outcome, 'drop': [self.treatment]}
if (self.outcome_type == 'numeric'):
task_name = 'reg'
loss = 'mse'
metric = 'mse'
elif (self.outcome_type == 'binary'):
task_name = 'binary'
loss = 'logloss'
metric = 'logloss'
else:
task_name = 'multiclass'
loss = 'crossentropy'
metric = 'crossentropy'
task = Task(name=task_name, loss=loss, metric=metric)
automl = TabularAutoML(task=task, timeout=self.timeout, cpu_limit=self.n_threads, general_params={'use_algos': [self.use_algos]}, reader_params={'n_jobs': self.n_threads, 'cv': self.n_folds})
if self.generate_report:
report = ReportDeco(output_path=self.report_dir)
automl = report(automl)
_ = automl.fit_predict(df, roles=roles)
return automl.model.get_feature_scores() |
class PrivMLP(MLP):
def __init__(self, num_classes, epsilon: Annotated[(float, ArgInfo(help='DP epsilon parameter', option='-e'))], delta: Annotated[(Union[(Literal['auto'], float)], ArgInfo(help='DP delta parameter (if "auto", sets a proper value based on data size)', option='-d'))]='auto', max_grad_norm: Annotated[(float, ArgInfo(help='maximum norm of the per-sample gradients'))]=1.0, batch_size: Annotated[(int, ArgInfo(help='batch size'))]=256, **kwargs: Annotated[(dict, ArgInfo(help='extra options passed to base class', bases=[MLP], exclude=['batch_norm']))]):
super().__init__(num_classes, batch_norm=False, batch_size=batch_size, **kwargs)
self.epsilon = epsilon
self.delta = delta
self.max_grad_norm = max_grad_norm
self.num_train_nodes = None
def calibrate(self):
self.noisy_sgd = NoisySGD(noise_scale=0.0, dataset_size=self.num_train_nodes, batch_size=self.batch_size, epochs=self.epochs, max_grad_norm=self.max_grad_norm)
with console.status('calibrating noise to privacy budget'):
if (self.delta == 'auto'):
delta = (0.0 if np.isinf(self.epsilon) else (1.0 / (10 ** len(str(self.num_train_nodes)))))
console.info('delta = %.0e', delta)
self.noise_scale = self.noisy_sgd.calibrate(eps=self.epsilon, delta=delta)
console.info(f'''noise scale: {self.noise_scale:.4f}
''')
self._classifier = self.noisy_sgd.prepare_module(self._classifier)
def fit(self, data: Data, prefix: str='') -> Metrics:
num_train_nodes = data.train_mask.sum().item()
if (num_train_nodes != self.num_train_nodes):
self.num_train_nodes = num_train_nodes
self.calibrate()
return super().fit(data, prefix=prefix)
def data_loader(self, data: Data, stage: Stage) -> NodeDataLoader:
dataloader = super().data_loader(data, stage)
if (stage == 'train'):
dataloader.poisson_sampling = True
return dataloader
def configure_optimizer(self) -> Optimizer:
optimizer = super().configure_optimizer()
optimizer = self.noisy_sgd.prepare_optimizer(optimizer)
return optimizer |
_toolkit()
class AugustSmartLock(FunctionToolkit):
name_for_human = 'August Smart Lock'
description_for_human = 'Toolkit for controlling and managing August Smart Lock.'
name_for_model = 'AugustSmartLock'
description_for_model = "Used for controlling and managing the August Smart Lock, specifically installed on the front door of the user's residence. It provides tools to lock and unlock the door, check the lock status, add and delete guests, grant and revoke access for guests, generate and revoke access codes, and view access history."
tool_classes = [AugustSmartLockCheckLockStatus, AugustSmartLockLockDoor, AugustSmartLockUnlockDoor, AugustSmartLockSearchGuests, AugustSmartLockAddGuest, AugustSmartLockDeleteGuest, AugustSmartLockGrantGuestAccess, AugustSmartLockRevokeGuestAccess, AugustSmartLockGenerateTemporaryAccessCode, AugustSmartLockRevokeTemporaryAccessCode, AugustSmartLockViewAccessHistory] |
(frozen=True)
class Processor():
metric: 'Metric'
metric_service: MetricService
eval_cache_path: str
adapter_spec: AdapterSpec
def process(self, request_state_set: RequestStateSet) -> List[Stat]:
instance_stats: List[Stat] = []
generation_states = request_state_set.generation_states
if (len(generation_states) != 0):
instance_stats.extend(self.metric.evaluate_generation(self.adapter_spec, singleton(generation_states), self.metric_service, self.eval_cache_path))
references_states = request_state_set.references_states
if (len(references_states) != 0):
instance_stats.extend(self.metric.evaluate_references(self.adapter_spec, references_states, self.metric_service, self.eval_cache_path))
for (i, stat) in enumerate(instance_stats):
instance_stats[i] = add_context(stat, MetricContext.from_instance(request_state_set.instance))
return instance_stats |
def train(args):
np.random.seed(args.seed)
(train_l, train_ul, test) = load_dataset(args.data_dir, valid=args.validation, dataset_seed=args.dataset_seed)
print('N_train_labeled:{}, N_train_unlabeled:{}'.format(train_l.N, train_ul.N))
enc = CNN(n_outputs=args.n_categories, dropout_rate=args.dropout_rate, top_bn=args.top_bn)
if (args.gpu > (- 1)):
chainer.cuda.get_device(args.gpu).use()
enc.to_gpu()
optimizer = optimizers.Adam(alpha=args.lr, beta1=args.mom1)
optimizer.setup(enc)
optimizer.use_cleargrads()
alpha_plan = ([args.lr] * args.num_epochs)
beta1_plan = ([args.mom1] * args.num_epochs)
for i in range(args.epoch_decay_start, args.num_epochs):
alpha_plan[i] = ((float((args.num_epochs - i)) / (args.num_epochs - args.epoch_decay_start)) * args.lr)
beta1_plan[i] = args.mom2
accs_test = np.zeros(args.num_epochs)
cl_losses = np.zeros(args.num_epochs)
ul_losses = np.zeros(args.num_epochs)
mkdir_p(args.log_dir)
for epoch in range(args.num_epochs):
optimizer.alpha = alpha_plan[epoch]
optimizer.beta1 = beta1_plan[epoch]
sum_loss_l = 0
sum_loss_ul = 0
start = time.time()
for it in range(args.num_iter_per_epoch):
(x, t) = train_l.get(args.batchsize, gpu=args.gpu, aug_trans=args.aug_trans, aug_flip=args.aug_flip)
loss_l = loss_labeled(enc, Variable(x), Variable(t))
(x_u, _) = train_ul.get(args.batchsize_ul, gpu=args.gpu, aug_trans=args.aug_trans, aug_flip=args.aug_flip)
loss_ul = loss_unlabeled(enc, Variable(x_u), args)
loss_total = (loss_l + loss_ul)
enc.cleargrads()
loss_total.backward()
optimizer.update()
sum_loss_l += loss_l.data
sum_loss_ul += loss_ul.data
end = time.time()
cl_losses[epoch] = (sum_loss_l / args.num_iter_per_epoch)
ul_losses[epoch] = (sum_loss_ul / args.num_iter_per_epoch)
if (((epoch + 1) % args.eval_freq) == 0):
acc_test_sum = 0
(test_x, test_t) = test.get()
N_test = test_x.shape[0]
for i in range(0, N_test, args.batchsize_eval):
x = test_x[i:(i + args.batchsize_eval)]
t = test_t[i:(i + args.batchsize_eval)]
if (args.gpu > (- 1)):
(x, t) = (cuda.to_gpu(x, device=args.gpu), cuda.to_gpu(t, device=args.gpu))
(_, acc) = loss_test(enc, Variable(x, volatile=True), Variable(t, volatile=True))
acc_test_sum += (acc * x.shape[0])
accs_test[epoch] = (acc_test_sum / N_test)
print('Epoch:{}, classification loss:{}, unlabeled loss:{}, time:{}'.format(epoch, cl_losses[epoch], ul_losses[epoch], (end - start)))
print('test acc:{}'.format(accs_test[epoch]))
sys.stdout.flush()
if (((epoch + 1) % args.snapshot_freq) == 0):
np.savetxt(os.path.join(args.log_dir, 'log.txt'), np.concatenate([np.array([['acc', 'cl_loss', 'ul_loss']]), np.transpose([accs_test, cl_losses, ul_losses])], 0), fmt='%s')
serializers.save_npz(os.path.join(args.log_dir, 'trained_model_ep{}'.format(epoch)), enc)
np.savetxt(os.path.join(args.log_dir, 'log.txt'), np.concatenate([np.array([['acc', 'cl_loss', 'ul_loss']]), np.transpose([accs_test, cl_losses, ul_losses])], 0), fmt='%s')
serializers.save_npz(os.path.join(args.log_dir, 'trained_model_final'), enc) |
class Schelling(Model):
def __init__(self, height=20, width=20, density=0.8, minority_pc=0.2, homophily=3, education_boost=0, education_pc=0.2, seed=None):
self.height = height
self.width = width
self.density = density
self.minority_pc = minority_pc
self.homophily = homophily
self.education_boost = education_boost
self.education_pc = education_pc
self.schedule = RandomActivation(self)
self.grid = SingleGrid(height, width, torus=True)
self.happy = 0
self.datacollector = DataCollector({'happy': 'happy'}, {'x': (lambda a: a.pos[0]), 'y': (lambda a: a.pos[1])})
for cell in self.grid.coord_iter():
x_coord = cell[1]
y_coord = cell[2]
if (self.random.random() < self.density):
if (self.random.random() < self.minority_pc):
agent_type = 1
else:
agent_type = 0
agent_homophily = homophily
if (self.random.random() < self.education_pc):
agent_homophily += self.education_boost
agent = SchellingAgent((x_coord, y_coord), self, agent_type, agent_homophily)
self.grid.position_agent(agent, (x_coord, y_coord))
self.schedule.add(agent)
self.running = True
self.datacollector.collect(self)
def step(self):
self.happy = 0
self.schedule.step()
self.datacollector.collect(self)
if (self.happy == self.schedule.get_agent_count()):
self.running = False |
class DCGAN_G_nobn(nn.Module):
def __init__(self, isize, nz, nc, ngf, ngpu, n_extra_layers=0):
super(DCGAN_G_nobn, self).__init__()
self.ngpu = ngpu
assert ((isize % 16) == 0), 'isize has to be a multiple of 16'
(cngf, tisize) = ((ngf // 2), 4)
while (tisize != isize):
cngf = (cngf * 2)
tisize = (tisize * 2)
main = nn.Sequential()
main.add_module('initial.{0}-{1}.convt'.format(nz, cngf), nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False))
main.add_module('initial.{0}.relu'.format(cngf), nn.ReLU(True))
(csize, cndf) = (4, cngf)
while (csize < (isize // 2)):
main.add_module('pyramid.{0}-{1}.convt'.format(cngf, (cngf // 2)), nn.ConvTranspose2d(cngf, (cngf // 2), 4, 2, 1, bias=False))
main.add_module('pyramid.{0}.relu'.format((cngf // 2)), nn.ReLU(True))
cngf = (cngf // 2)
csize = (csize * 2)
for t in range(n_extra_layers):
main.add_module('extra-layers-{0}.{1}.conv'.format(t, cngf), nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False))
main.add_module('extra-layers-{0}.{1}.relu'.format(t, cngf), nn.ReLU(True))
main.add_module('final.{0}-{1}.convt'.format(cngf, nc), nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False))
main.add_module('final.{0}.tanh'.format(nc), nn.Softmax())
self.main = main
def forward(self, input):
if (isinstance(input.data, torch.cuda.FloatTensor) and (self.ngpu > 1)):
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output |
class HuggingFaceWav2Vec2(nn.Module):
def __init__(self, source, save_path, output_norm=False, freeze=False, freeze_feature_extractor=False, apply_spec_augment=False, output_all_hiddens=False):
super().__init__()
self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(source, cache_dir=save_path)
if ('hubert' in source):
config = HF_config.get('hubert')
model = HF_models.get('hubert')
elif ('wavlm' in source):
config = HF_config.get('wavlm')
model = HF_models.get('wavlm')
else:
config = HF_config.get('wav2vec2')
model = HF_models.get('wav2vec2')
self._from_pretrained(source, config=config, model=model, save_path=save_path)
self.model.config.apply_spec_augment = apply_spec_augment
self.normalize_wav = self.feature_extractor.do_normalize
self.freeze = freeze
self.freeze_feature_extractor = freeze_feature_extractor
self.output_norm = output_norm
if self.freeze:
logger.warning('speechbrain.lobes.models.huggingface_wav2vec - wav2vec 2.0 is frozen.')
self.model.eval()
for param in self.model.parameters():
param.requires_grad = False
else:
self.model.train()
if self.freeze_feature_extractor:
logger.warning('speechbrain.lobes.models.huggingface_wav2vec - wav2vec 2.0 feature extractor is frozen.')
self.model.feature_extractor.eval()
for param in self.model.feature_extractor.parameters():
param.requires_grad = False
self.output_all_hiddens = output_all_hiddens
def _from_pretrained(self, source, config, model, save_path):
(is_sb, ckpt_file, is_local) = self._check_model_source(source, save_path)
if is_sb:
config = config.from_pretrained(source, cache_dir=save_path)
self.model = model(config)
self.model.gradient_checkpointing_disable()
ckpt_full_path = fetch(filename=ckpt_file, source=source, savedir=save_path)
self._load_sb_pretrained_w2v2_parameters(ckpt_full_path)
else:
self.model = model.from_pretrained(source, cache_dir=save_path, local_files_only=is_local)
def _load_sb_pretrained_w2v2_parameters(self, path):
modified_state_dict = {}
orig_state_dict = torch.load(path, map_location='cpu')
for (key, params) in orig_state_dict.items():
if ('wav2vec2.' in key):
save_key = key.replace('model.wav2vec2.', '')
modified_state_dict[save_key] = params
incompatible_keys = self.model.load_state_dict(modified_state_dict, strict=False)
for missing_key in incompatible_keys.missing_keys:
logger.warning(((f'During parameter transfer to {self.model} loading from ' + f'{path}, the transferred parameters did not have ') + f'parameters for the key: {missing_key}'))
for unexpected_key in incompatible_keys.unexpected_keys:
logger.warning((f'The param with the key: {unexpected_key} is discarded as it ' + 'is useless for wav2vec 2.0 finetuning.'))
def _check_model_source(self, path, save_path):
checkpoint_filename = ''
source = pathlib.Path(path)
is_local = True
if (not source.exists()):
is_local = False
sink = pathlib.Path((((save_path + '/models--') + path.replace('/', '--')) + '/snapshots'))
if sink.exists():
sink = (sink / os.listdir(str(sink))[0])
if any(((File.endswith('.bin') or File.endswith('.ckpt')) for File in os.listdir(str(sink)))):
is_local = True
local_path = str(sink)
else:
local_path = path
else:
local_path = path
if is_local:
if any((File.endswith('.bin') for File in os.listdir(local_path))):
is_sb = False
return (is_sb, checkpoint_filename, is_local)
for File in os.listdir(local_path):
if File.endswith('.ckpt'):
checkpoint_filename = os.path.join(path, File)
is_sb = True
return (is_sb, checkpoint_filename, is_local)
else:
files = model_info(path).siblings
for File in files:
if File.rfilename.endswith('.ckpt'):
checkpoint_filename = File.rfilename
is_sb = True
return (is_sb, checkpoint_filename, is_local)
for File in files:
if File.rfilename.endswith('.bin'):
checkpoint_filename = File.rfilename
is_sb = False
return (is_sb, checkpoint_filename, is_local)
err_msg = f'{path} does not contain a .bin or .ckpt checkpoint !'
raise FileNotFoundError(err_msg)
def forward(self, wav, wav_lens=None):
if self.freeze:
with torch.no_grad():
return self.extract_features(wav, wav_lens)
return self.extract_features(wav, wav_lens)
def extract_features(self, wav, wav_lens=None):
padding_mask = self.make_masks(wav, wav_len=wav_lens)
if self.normalize_wav:
wav = F.layer_norm(wav, wav.shape[1:])
out = self.model(wav, attention_mask=padding_mask, output_hidden_states=self.output_all_hiddens)
if self.output_all_hiddens:
out = torch.stack(list(out.hidden_states), dim=0)
norm_shape = out.shape[(- 3):]
else:
out = out.last_hidden_state
norm_shape = out.shape
if self.output_norm:
out = F.layer_norm(out, norm_shape[1:])
return out
def make_masks(self, src, wav_len=None, pad_idx=0):
src_key_padding_mask = None
if (wav_len is not None):
abs_len = torch.round((wav_len * src.shape[1]))
src_key_padding_mask = length_to_mask(abs_len).bool()
return src_key_padding_mask |
class AugScoreBase():
def __call__(self, augmenter, X_train, Y_train, X_test, Y_test):
raise NotImplementedError() |
def load_warning(method):
if (_warnings_enabled['YAMLLoadWarning'] is False):
return
import warnings
message = ('calling yaml.%s() without Loader=... is deprecated, as the default Loader is unsafe. Please read for full details.' % method)
warnings.warn(message, YAMLLoadWarning, stacklevel=3) |
def dace_sum(X_in: dace.float32[N], X_out: dace.float32[1]):
dace.reduce((lambda a, b: (a + b)), X_in, X_out, identity=0) |
class FiniteWordPath_square_grid_callable(WordDatatype_callable, FiniteWordPath_square_grid, FiniteWord_class):
pass |
class TestLINGAM(unittest.TestCase):
def setUp(self) -> None:
np.random.seed(0)
sample_size = 1000
columns = ['x0', 'x1', 'x2', 'x3', 'x4', 'x5']
x3 = np.random.uniform(size=sample_size)
x0 = ((3.0 * x3) + np.random.uniform(size=sample_size))
x2 = ((6.0 * x3) + np.random.uniform(size=sample_size))
x1 = (((3.0 * x0) + (2.0 * x2)) + np.random.uniform(size=sample_size))
x5 = ((4.0 * x0) + np.random.uniform(size=sample_size))
x4 = (((8.0 * x0) - (1.0 * x2)) + np.random.uniform(size=sample_size))
self.df = pd.DataFrame(np.array([x0, x1, x2, x3, x4, x5]).T, columns=columns)
self.graph = pd.DataFrame([[0, 1, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [1, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], columns=columns, index=columns)
def test(self):
try:
model = LiNGAM(LiNGAM.config_class())
graph = model.train(self.df)
except ImportError as e:
print(str(e))
return
diff = np.sum(np.abs((graph.values - self.graph.values)))
self.assertLessEqual(diff, 2) |
def compute_metrics_from_files(path_to_reference, path_to_candidate, exclude_qids, perform_checks=True):
qids_to_relevant_documentids = load_reference(path_to_reference)
qids_to_ranked_candidate_documents = load_candidate(path_to_candidate)
if perform_checks:
(allowed, message) = quality_checks_qids(qids_to_relevant_documentids, qids_to_ranked_candidate_documents)
if (message != ''):
print(message)
return compute_metrics(qids_to_relevant_documentids, qids_to_ranked_candidate_documents, exclude_qids) |
class AdMethod():
def __init__(self, arch: Arch, scorer: Scorer, dataset: Dataset):
self.arch = arch
self.scorer = scorer
self.dataset = dataset
def get_trained_arch(self):
raise NotImplementedError
def get_normal_class(self) -> int:
raise NotImplementedError
def get_scorer(self):
return self.scorer
def get_transform(self):
raise NotImplementedError |
class Scanner(object):
def __init__(self, lexicon, stream, name='', initial_pos=None):
self.trace = 0
self.buffer = u''
self.buf_start_pos = 0
self.next_pos = 0
self.cur_pos = 0
self.cur_line = 1
self.start_pos = 0
self.start_line = 0
self.start_col = 0
self.text = None
self.state_name = None
self.lexicon = lexicon
self.stream = stream
self.name = name
self.queue = []
self.initial_state = None
self.begin('')
self.next_pos = 0
self.cur_pos = 0
self.cur_line_start = 0
self.cur_char = BOL
self.input_state = 1
if (initial_pos is not None):
(self.cur_line, self.cur_line_start) = (initial_pos[1], (- initial_pos[2]))
def read(self):
queue = self.queue
while (not queue):
(self.text, action) = self.scan_a_token()
if (action is None):
self.produce(None)
self.eof()
else:
value = action.perform(self, self.text)
if (value is not None):
self.produce(value)
result = queue[0]
del queue[0]
return result
def scan_a_token(self):
self.start_pos = self.cur_pos
self.start_line = self.cur_line
self.start_col = (self.cur_pos - self.cur_line_start)
action = self.run_machine_inlined()
if (action is not None):
if self.trace:
print(('Scanner: read: Performing %s %d:%d' % (action, self.start_pos, self.cur_pos)))
text = self.buffer[(self.start_pos - self.buf_start_pos):(self.cur_pos - self.buf_start_pos)]
return (text, action)
else:
if (self.cur_pos == self.start_pos):
if (self.cur_char is EOL):
self.next_char()
if ((self.cur_char is None) or (self.cur_char is EOF)):
return (u'', None)
raise Errors.UnrecognizedInput(self, self.state_name)
def run_machine_inlined(self):
state = self.initial_state
cur_pos = self.cur_pos
cur_line = self.cur_line
cur_line_start = self.cur_line_start
cur_char = self.cur_char
input_state = self.input_state
next_pos = self.next_pos
buffer = self.buffer
buf_start_pos = self.buf_start_pos
buf_len = len(buffer)
(b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos) = (None, 0, 0, 0, u'', 0, 0)
trace = self.trace
while 1:
if trace:
print(('State %d, %d/%d:%s -->' % (state['number'], input_state, cur_pos, repr(cur_char))))
action = state['action']
if (action is not None):
(b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos) = (action, cur_pos, cur_line, cur_line_start, cur_char, input_state, next_pos)
c = cur_char
new_state = state.get(c, NOT_FOUND)
if (new_state is NOT_FOUND):
new_state = (c and state.get('else'))
if new_state:
if trace:
print(('State %d' % new_state['number']))
state = new_state
if (input_state == 1):
cur_pos = next_pos
buf_index = (next_pos - buf_start_pos)
if (buf_index < buf_len):
c = buffer[buf_index]
next_pos += 1
else:
discard = (self.start_pos - buf_start_pos)
data = self.stream.read(4096)
buffer = (self.buffer[discard:] + data)
self.buffer = buffer
buf_start_pos += discard
self.buf_start_pos = buf_start_pos
buf_len = len(buffer)
buf_index -= discard
if data:
c = buffer[buf_index]
next_pos += 1
else:
c = u''
if (c == u'\n'):
cur_char = EOL
input_state = 2
elif (not c):
cur_char = EOL
input_state = 4
else:
cur_char = c
elif (input_state == 2):
cur_char = u'\n'
input_state = 3
elif (input_state == 3):
cur_line += 1
cur_line_start = cur_pos = next_pos
cur_char = BOL
input_state = 1
elif (input_state == 4):
cur_char = EOF
input_state = 5
else:
cur_char = u''
else:
if trace:
print('blocked')
if (b_action is not None):
(action, cur_pos, cur_line, cur_line_start, cur_char, input_state, next_pos) = (b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos)
else:
action = None
break
self.cur_pos = cur_pos
self.cur_line = cur_line
self.cur_line_start = cur_line_start
self.cur_char = cur_char
self.input_state = input_state
self.next_pos = next_pos
if trace:
if (action is not None):
print(('Doing %s' % action))
return action
def next_char(self):
input_state = self.input_state
if self.trace:
print(('Scanner: next: %s [%d] %d' % ((' ' * 20), input_state, self.cur_pos)))
if (input_state == 1):
self.cur_pos = self.next_pos
c = self.read_char()
if (c == u'\n'):
self.cur_char = EOL
self.input_state = 2
elif (not c):
self.cur_char = EOL
self.input_state = 4
else:
self.cur_char = c
elif (input_state == 2):
self.cur_char = u'\n'
self.input_state = 3
elif (input_state == 3):
self.cur_line += 1
self.cur_line_start = self.cur_pos = self.next_pos
self.cur_char = BOL
self.input_state = 1
elif (input_state == 4):
self.cur_char = EOF
self.input_state = 5
else:
self.cur_char = u''
if self.trace:
print(('--> [%d] %d %r' % (input_state, self.cur_pos, self.cur_char)))
def position(self):
return (self.name, self.start_line, self.start_col)
def get_position(self):
return self.position()
def begin(self, state_name):
self.initial_state = self.lexicon.get_initial_state(state_name)
self.state_name = state_name
def produce(self, value, text=None):
if (text is None):
text = self.text
self.queue.append((value, text))
def eof(self): |
_utils.in_tempdir
def test_dory_shadow_extract(location):
copy_dory_catlas()
args = 'dory_k21 dory_k21_r1 shadow_out --contigs-db dory_k21/bcalm.unitigs.db'.split()
print('** running extract_nodes_by_shadow_ratio')
assert (extract_nodes_by_shadow_ratio.main(args) == 0) |
def add_reader_preprocessing_params(parser: argparse.ArgumentParser):
parser.add_argument('--gold_passages_src', type=str, help='File with the original dataset passages (json format). Required for train set')
parser.add_argument('--gold_passages_src_dev', type=str, help='File with the original dataset passages (json format). Required for dev set')
parser.add_argument('--num_workers', type=int, default=16, help='number of parallel processes to binarize reader data') |
def request(method, url, **kwargs):
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs) |
def calc_au(model, test_dataloader, delta=0.01, verbose=False):
data_loop = (tqdm(test_dataloader) if verbose else test_dataloader)
def get_mu(batch):
(encoder_inputs, encoder_masks, labels) = batch
encoder_inputs = encoder_inputs.to(model.device)
encoder_masks = encoder_masks.to(model.device)
labels = labels.to(model.device)
with torch.no_grad():
(_, _, mu, _) = model(encoder_inputs, encoder_masks, labels)
return mu
all_mu = [get_mu(batch) for batch in data_loop]
mus = torch.vstack(all_mu)
mu_mean = mus.mean(dim=0)
vars = (mus - mu_mean).pow(2)
au_var = vars.mean(dim=0)
return ((au_var >= delta).sum().item(), au_var) |
class CaselessPreservingLiteral(CaselessLiteral):
def __init__(self, matchString):
super().__init__(matchString.upper())
self.name = ("'%s'" % matchString)
self.errmsg = ('Expected ' + self.name)
self.myException.msg = self.errmsg
def parseImpl(self, instring, loc, doActions=True):
test = instring[loc:(loc + self.matchLen)]
if (test.upper() == self.match):
return ((loc + self.matchLen), test)
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc |
def get_pai_explain_cmd(datasource, project, oss_model_path, model_name, data_table, result_table, model_type, model_params, job_file, params_file, label_name):
if (model_type == EstimatorType.PAIML):
cmd = get_explain_random_forests_cmd(datasource, model_name, data_table, result_table, label_name)
else:
conf = cluster_conf.get_cluster_config(model_params)
cmd = get_pai_tf_cmd(conf, job_file, params_file, ENTRY_FILE, model_name, oss_model_path, data_table, '', result_table, project)
return cmd |
('openfl.federated.Plan.parse')
def test_aggregator_start(mock_parse):
current_path = Path(__file__).resolve()
plan_path = current_path.parent.joinpath('plan')
plan_config = plan_path.joinpath('plan.yaml')
cols_config = plan_path.joinpath('cols.yaml')
mock_parse.return_value = mock.Mock()
ret = start_(['-p', plan_config, '-c', cols_config], standalone_mode=False)
assert (ret is None) |
class Functional(ModelLayer):
def __init__(self, model, input_record, output_names_or_num, function, name='functional', output_dtypes=None, tags=None, **kwargs):
input_record = schema.as_record(input_record)
super(Functional, self).__init__(model, name, input_record, tags=tags, **kwargs)
self._function = function
self._kwargs = kwargs
return_struct = (isinstance(output_names_or_num, list) or (isinstance(output_names_or_num, six.integer_types) and (output_names_or_num != 1)))
with scope.NameScope(self.name, reset=True):
if isinstance(output_names_or_num, int):
struct_output_schema = schema.NewRecord(model.net, schema.RawTuple(output_names_or_num))
elif isinstance(output_names_or_num, schema.Field):
self.output_schema = output_names_or_num.clone(keep_blobs=True)
return
else:
if (not isinstance(output_names_or_num, list)):
output_names_or_num = [output_names_or_num]
out_tuple = [(out, np.void) for out in output_names_or_num]
struct_output_schema = schema.NewRecord(model.net, schema.Struct(*out_tuple))
num_outputs = len(struct_output_schema.field_blobs())
if return_struct:
self.output_schema = struct_output_schema
else:
self.output_schema = struct_output_schema[0]
if (output_dtypes is not None):
if (not isinstance(output_dtypes, list)):
output_dtypes = ([output_dtypes] * num_outputs)
assert (len(output_dtypes) == num_outputs)
for (dtype, scalar) in zip(output_dtypes, self.output_schema.all_scalars()):
scalar.set_type(dtype)
return
had_issues = False
try:
type_net = core.Net('_temp_type_and_shape_inference_net')
schema.InitEmptyRecord(type_net, input_record, enforce_types=True)
function(type_net, self.input_record, self.output_schema, **kwargs)
(shapes, types) = workspace.InferShapesAndTypes([type_net], {})
for i in range(num_outputs):
scalar_schema = (self.output_schema[i] if return_struct else self.output_schema)
blob = scalar_schema()
if ((blob not in types) or (blob not in shapes)):
had_issues = True
continue
if (shapes[blob] == []):
shape = tuple()
elif (shapes[blob][0] == 0):
shape = tuple(shapes[blob][1:])
else:
logger.warning('unexpected shape: {}'.format(shapes[blob]))
had_issues = True
continue
dtype = None
if (types[blob] == caffe2_pb2.TensorProto.DOUBLE):
dtype = (np.float64, shape)
elif (types[blob] == caffe2_pb2.TensorProto.FLOAT):
dtype = (np.float32, shape)
elif (types[blob] == caffe2_pb2.TensorProto.INT32):
dtype = (np.int32, shape)
elif (types[blob] == caffe2_pb2.TensorProto.INT64):
dtype = (np.int64, shape)
elif (types[blob] == caffe2_pb2.TensorProto.FLOAT16):
dtype = (np.float16, shape)
if (dtype is not None):
scalar_schema.set_type(dtype)
except TypeError as ex:
had_issues = True
logger.warning(str(ex))
if had_issues:
logger.warning('Type inference had problems for layer: {}'.format(self.name))
def add_ops(self, net):
self._function(net, self.input_record, self.output_schema, **self._kwargs) |
class DiscreteDecisionTransformerImpl(TransformerAlgoImplBase):
_modules: DiscreteDecisionTransformerModules
_clip_grad_norm: float
_warmup_tokens: int
_final_tokens: int
_initial_learning_rate: float
_tokens: int
def __init__(self, observation_shape: Shape, action_size: int, modules: DiscreteDecisionTransformerModules, clip_grad_norm: float, warmup_tokens: int, final_tokens: int, initial_learning_rate: float, device: str):
super().__init__(observation_shape=observation_shape, action_size=action_size, modules=modules, device=device)
self._clip_grad_norm = clip_grad_norm
self._warmup_tokens = warmup_tokens
self._final_tokens = final_tokens
self._initial_learning_rate = initial_learning_rate
self._tokens = 0
_api
def predict(self, inpt: TorchTransformerInput) -> torch.Tensor:
(_, logits) = self._modules.transformer(inpt.observations, inpt.actions, inpt.returns_to_go, inpt.timesteps)
return logits[0][(- 1)]
def inner_update(self, batch: TorchTrajectoryMiniBatch, grad_step: int) -> Dict[(str, float)]:
self._modules.optim.zero_grad()
loss = self.compute_loss(batch)
loss.backward()
torch.nn.utils.clip_grad_norm_(self._modules.transformer.parameters(), self._clip_grad_norm)
self._modules.optim.step()
self._tokens += int(batch.masks.sum().cpu().detach().numpy())
if (self._tokens < self._warmup_tokens):
lr_mult = (self._tokens / max(1, self._warmup_tokens))
else:
progress = ((self._tokens - self._warmup_tokens) / max(1, (self._final_tokens - self._warmup_tokens)))
lr_mult = max(0.1, (0.5 * (1.0 + math.cos((math.pi * progress)))))
new_learning_rate = (lr_mult * self._initial_learning_rate)
for param_group in self._modules.optim.param_groups:
param_group['lr'] = new_learning_rate
return {'loss': float(loss.cpu().detach().numpy()), 'learning_rate': new_learning_rate}
def compute_loss(self, batch: TorchTrajectoryMiniBatch) -> torch.Tensor:
(_, logits) = self._modules.transformer(batch.observations, batch.actions, batch.returns_to_go, batch.timesteps)
loss = F.cross_entropy(logits.view((- 1), self._action_size), batch.actions.view((- 1)).long(), reduction='none')
return loss.mean() |
def regroup_reds_dataset(train_path, val_path):
val_folders = glob.glob(os.path.join(val_path, '*'))
for folder in val_folders:
new_folder_idx = (int(folder.split('/')[(- 1)]) + 240)
os.system(f'cp -r {folder} {os.path.join(train_path, str(new_folder_idx))}') |
def register_Ns3Dot11sPeerManagementProtocol_methods(root_module, cls):
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')])
cls.add_method('ConfigurationMismatch', 'void', [param('uint32_t', 'interface'), param('ns3::Mac48Address', 'peerAddress')])
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('FindPeerLink', 'ns3::Ptr< ns3::dot11s::PeerLink >', [param('uint32_t', 'interface'), param('ns3::Mac48Address', 'peerAddress')])
cls.add_method('GetAddress', 'ns3::Mac48Address', [])
cls.add_method('GetBeaconCollisionAvoidance', 'bool', [], is_const=True)
cls.add_method('GetBeaconTimingElement', 'ns3::Ptr< ns3::dot11s::IeBeaconTiming >', [param('uint32_t', 'interface')])
cls.add_method('GetMeshId', 'ns3::Ptr< ns3::dot11s::IeMeshId >', [], is_const=True)
cls.add_method('GetNumberOfLinks', 'uint8_t', [])
cls.add_method('GetPeerLinks', 'std::vector< ns3::Ptr< ns3::dot11s::PeerLink > >', [], is_const=True)
cls.add_method('GetPeers', 'std::vector< ns3::Mac48Address >', [param('uint32_t', 'interface')], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Install', 'bool', [param('ns3::Ptr< ns3::MeshPointDevice >', 'arg0')])
cls.add_method('IsActiveLink', 'bool', [param('uint32_t', 'interface'), param('ns3::Mac48Address', 'peerAddress')])
cls.add_method('NotifyBeaconSent', 'void', [param('uint32_t', 'interface'), param('ns3::Time', 'beaconInterval')])
cls.add_method('ReceiveBeacon', 'void', [param('uint32_t', 'interface'), param('ns3::Mac48Address', 'peerAddress'), param('ns3::Time', 'beaconInterval'), param('ns3::Ptr< ns3::dot11s::IeBeaconTiming >', 'beaconTiming')])
cls.add_method('ReceivePeerLinkFrame', 'void', [param('uint32_t', 'interface'), param('ns3::Mac48Address', 'peerAddress'), param('ns3::Mac48Address', 'peerMeshPointAddress'), param('uint16_t', 'aid'), param('ns3::dot11s::IePeerManagement', 'peerManagementElement'), param('ns3::dot11s::IeConfiguration', 'meshConfig')])
cls.add_method('Report', 'void', [param('std::ostream &', 'os')], is_const=True)
cls.add_method('ResetStats', 'void', [])
cls.add_method('SetBeaconCollisionAvoidance', 'void', [param('bool', 'enable')])
cls.add_method('SetMeshId', 'void', [param('std::string', 's')])
cls.add_method('SetPeerLinkStatusCallback', 'void', [param('ns3::Callback< void, ns3::Mac48Address, ns3::Mac48Address, unsigned int, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')])
cls.add_method('TransmissionFailure', 'void', [param('uint32_t', 'interface'), param('ns3::Mac48Address const', 'peerAddress')])
cls.add_method('TransmissionSuccess', 'void', [param('uint32_t', 'interface'), param('ns3::Mac48Address const', 'peerAddress')])
cls.add_method('DoInitialize', 'void', [], visibility='private', is_virtual=True)
return |
def main():
project_root = Path(__file__).parent.parent
nerf_mvl_root = (((project_root / 'data') / 'nerf_mvl') / 'nerf_mvl_7k_pano')
nerf_mvl_parent_dir = nerf_mvl_root.parent
train_split = {'water_safety_barrier': 2, 'tire': 2, 'pier': 2, 'plant': 2, 'warning_sign': 2, 'bollard': 2, 'pedestrian': 3, 'car': 3, 'traffic_cone': 3}
for (class_name, split_intervel) in train_split.items():
range_view_dir = (nerf_mvl_root / class_name)
filenames = os.listdir(range_view_dir)
filenames.remove('lidar2world.txt')
range_view_paths = [Path(os.path.join(range_view_dir, filename)) for filename in filenames]
num_samples = len(range_view_paths)
frame_ids = np.arange(num_samples)
train_frame_ids = [i for i in range(0, num_samples, split_intervel)]
val_frame_ids = [i for i in range(0, num_samples, (split_intervel * 20))]
test_frame_ids = val_frame_ids
nerf_mvl_dataset = NeRFMVLLoader(nerf_mvl_root, class_name)
lidar2world = nerf_mvl_dataset.load_lidars(frame_ids)
lidar_range_image = np.load(range_view_paths[0])['data']
(lidar_h, lidar_w, _) = lidar_range_image.shape
all_indices = frame_ids
train_indices = train_frame_ids
val_indices = val_frame_ids
test_indices = test_frame_ids
split_to_all_indices = {'train': train_indices, 'val': val_indices, 'test': test_indices}
for (split, indices) in split_to_all_indices.items():
print(f'Split {split} has {len(indices)} frames.')
lidar_paths_split = [range_view_paths[i] for i in indices]
lidar2world_split = [lidar2world[i] for i in indices]
json_dict = {'w_lidar': lidar_w, 'h_lidar': lidar_h, 'aabb_scale': 2, 'frames': [{'lidar_file_path': str(lidar_path.relative_to(nerf_mvl_parent_dir)), 'lidar2world': lidar2world.tolist()} for (lidar_path, lidar2world) in zip(lidar_paths_split, lidar2world_split)]}
json_path = (nerf_mvl_parent_dir / f'transforms_{class_name}_{split}.json')
with open(json_path, 'w') as f:
json.dump(json_dict, f, indent=2)
print(f'Saved {json_path}.') |
def import_dir_files(cdir, pattern='*'):
path = os.path.join(cdir, pattern)
return sorted(glob.glob(path)) |
def test_forward_partitioned_attention(pretrain_file):
model = build_model(pretrain_file, '--pattn_num_heads', '8', '--pattn_num_layers', '8')
run_forward_checks(model)
model = build_model(pretrain_file, '--pattn_num_heads', '0', '--pattn_num_layers', '0')
run_forward_checks(model) |
class UnknownExecutor(ActionExecutor):
def execute(self, script: Script, state: EnvironmentState, info: ExecutionInfo):
raise ExecutionException('Execution of {0} is not supported', script[0].action) |
def _predict(predictors: Dict[(str, str)]):
def predict_inner(args: argparse.Namespace) -> None:
predictor = _get_predictor(args, predictors)
output_file = None
if (args.silent and (not args.output_file)):
print('--silent specified without --output-file.')
print('Exiting early because no output will be created.')
sys.exit(0)
with ExitStack() as stack:
input_file = stack.enter_context(args.input_file)
if args.output_file:
output_file = stack.enter_context(args.output_file)
_run(predictor, input_file, output_file, args.batch_size, (not args.silent), args.cuda_device)
return predict_inner |
def send_morphology_request(request):
return send_request(request, MorphologyResponse, MORPHOLOGY_JAVA) |
def batch_predict_with_a_model(data, model, session=None):
data_logits = []
data_labels = []
data_weights = []
step = 1
while ((step * FLAGS.batch_size) <= len(data.fileindices)):
(batch_docnames, batch_docs, batch_label, batch_weight) = data.get_batch(((step - 1) * FLAGS.batch_size), (step * FLAGS.batch_size))
batch_logits = session.run(model.logits, feed_dict={model.document_placeholder: batch_docs})
data_logits.append(batch_logits)
data_labels.append(batch_label)
data_weights.append(batch_weight)
step += 1
if (len(data.fileindices) > ((step - 1) * FLAGS.batch_size)):
(batch_docnames, batch_docs, batch_label, batch_weight) = data.get_batch(((step - 1) * FLAGS.batch_size), len(data.fileindices))
batch_logits = session.run(model.logits, feed_dict={model.document_placeholder: batch_docs})
data_logits.append(batch_logits)
data_labels.append(batch_label)
data_weights.append(batch_weight)
data_logits = tf.concat(0, data_logits)
data_lables = tf.concat(0, data_labels)
data_weights = tf.concat(0, data_weights)
return (data_logits, data_lables, data_weights) |
class DiscreteFunctionFieldValuation_base(DiscreteValuation):
def extensions(self, L):
K = self.domain()
from sage.categories.function_fields import FunctionFields
if (L is K):
return [self]
if (L in FunctionFields()):
if K.is_subring(L):
if (L.base() is K):
G = L.polynomial()
if (not G.is_monic()):
G = (G / G.leading_coefficient())
if any(((self(c) < 0) for c in G.coefficients())):
from sage.rings.valuation.gauss_valuation import GaussValuation
g = GaussValuation(G.parent(), self)
(y_to_u, u_to_y, H) = g.monic_integral_model(G)
M = K.extension(H, names=L.variable_names())
H_extensions = self.extensions(M)
from sage.rings.morphism import RingHomomorphism_im_gens
if (isinstance(y_to_u, RingHomomorphism_im_gens) and isinstance(u_to_y, RingHomomorphism_im_gens)):
return [L.valuation((w, L.hom([M(y_to_u(y_to_u.domain().gen()))]), M.hom([L(u_to_y(u_to_y.domain().gen()))]))) for w in H_extensions]
raise NotImplementedError
return [L.valuation(w) for w in self.mac_lane_approximants(L.polynomial(), require_incomparability=True)]
elif ((L.base() is not L) and K.is_subring(L)):
from operator import add
from functools import reduce
A = [base_valuation.extensions(L) for base_valuation in self.extensions(L.base())]
return reduce(add, A, [])
elif ((L.constant_base_field() is not K.constant_base_field()) and K.constant_base_field().is_subring(L)):
raise NotImplementedError(('Cannot compute the extensions of %r from %r to %r since the base ring changes.' % (self, self.domain(), L)))
raise NotImplementedError(('extension of %r from %r to %r not implemented' % (self, K, L))) |
def get_custom_op_library_path():
if sys.platform.startswith('win32'):
library_filename = 'custom_ops.dll'
elif sys.platform.startswith('darwin'):
library_filename = 'libcustom_ops.dylib'
else:
library_filename = 'libcustom_ops.so'
path = os.path.abspath('build/{}'.format(library_filename))
assert os.path.exists(path), path
return path |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--max_enc_len', default=400, help='Encoder input max sequence length', type=int)
parser.add_argument('--max_dec_len', default=100, help='Decoder input max sequence length', type=int)
parser.add_argument('--max_dec_steps', default=120, help='maximum number of words of the predicted abstract', type=int)
parser.add_argument('--min_dec_steps', default=30, help='Minimum number of words of the predicted abstract', type=int)
parser.add_argument('--batch_size', default=16, help='batch size', type=int)
parser.add_argument('--beam_size', default=4, help='beam size for beam search decoding (must be equal to batch size in decode mode)', type=int)
parser.add_argument('--vocab_size', default=50000, help='Vocabulary size', type=int)
parser.add_argument('--embed_size', default=128, help='Words embeddings dimension', type=int)
parser.add_argument('--enc_units', default=256, help='Encoder GRU cell units number', type=int)
parser.add_argument('--dec_units', default=256, help='Decoder GRU cell units number', type=int)
parser.add_argument('--attn_units', default=512, help='[context vector, decoder state, decoder input] feedforward result dimension - this result is used to compute the attention weights', type=int)
parser.add_argument('--learning_rate', default=0.15, help='Learning rate', type=float)
parser.add_argument('--adagrad_init_acc', default=0.1, help='Adagrad optimizer initial accumulator value. Please refer to the Adagrad optimizer API documentation on tensorflow site for more details.', type=float)
parser.add_argument('--max_grad_norm', default=0.8, help='Gradient norm above which gradients must be clipped', type=float)
parser.add_argument('--checkpoints_save_steps', default=10000, help='Save checkpoints every N steps', type=int)
parser.add_argument('--max_steps', default=10000, help='Max number of iterations', type=int)
parser.add_argument('--num_to_test', default=5, help='Number of examples to test', type=int)
parser.add_argument('--max_num_to_eval', default=5, help='Max number of examples to evaluate', type=int)
parser.add_argument('--mode', help='training, eval or test options', default='', type=str)
parser.add_argument('--model_path', help='Path to a specific model', default='', type=str)
parser.add_argument('--checkpoint_dir', help='Checkpoint directory', default='', type=str)
parser.add_argument('--test_save_dir', help='Directory in which we store the decoding results', default='', type=str)
parser.add_argument('--data_dir', help='Data Folder', default='', type=str)
parser.add_argument('--vocab_path', help='Vocab path', default='', type=str)
parser.add_argument('--log_file', help='File in which to redirect console outputs', default='', type=str)
args = parser.parse_args()
params = vars(args)
print(params)
assert params['mode'], 'mode is required. train, test or eval option'
assert (params['mode'] in ['train', 'test', 'eval']), 'The mode must be train , test or eval'
assert os.path.exists(params['data_dir']), "data_dir doesn't exist"
assert os.path.isfile(params['vocab_path']), "vocab_path doesn't exist"
if (params['mode'] == 'train'):
train(params)
elif (params['mode'] == 'test'):
test_and_save(params)
elif (params['mode'] == 'eval'):
evaluate(params) |
def list_files(files, path):
for item in os.listdir(path):
item = os.path.join(path, item)
if os.path.isdir(item):
list_files(files, item)
elif os.path.isfile(item):
files.append(item) |
def choose_color_by_layertype(layertype):
color = '#6495ED'
if ((layertype == 'Convolution') or (layertype == 'Deconvolution')):
color = '#FF5050'
elif (layertype == 'Pooling'):
color = '#FF9900'
elif (layertype == 'InnerProduct'):
color = '#CC33FF'
return color |
def _recursive_in_check(node, state, gpu_scalars):
scalset = set()
scalout = True
sdfg = state.parent
for e in state.in_edges(node):
last_edge = state.memlet_path(e)[0]
if isinstance(last_edge.src, nodes.AccessNode):
desc = sdfg.arrays[last_edge.src.data]
if isinstance(desc, data.Scalar):
if ((desc.storage in gpu_storage) or (last_edge.src.data in gpu_scalars)):
scalout = False
scalset.add(last_edge.src.data)
(sset, ssout) = _recursive_in_check(last_edge.src, state, gpu_scalars)
scalset = scalset.union(sset)
scalout = (scalout and ssout)
continue
if (desc.shape == (1,)):
scalout = False
(sset, ssout) = _recursive_in_check(last_edge.src, state, gpu_scalars)
scalset = scalset.union(sset)
scalout = (scalout and ssout)
continue
if ((desc.storage not in gpu_storage) and (last_edge.data.num_elements() == 1)):
(sset, ssout) = _recursive_in_check(last_edge.src, state, gpu_scalars)
scalset = scalset.union(sset)
scalout = (scalout and ssout)
continue
scalout = False
return (scalset, scalout) |
def broadcast_xla_master_model_param(model):
logger.info('Broadcasting XLA model parameters and buffers from master process ...')
parameters_and_buffers = []
for p in chain(model.parameters(), model.buffers()):
if (not is_main()):
zero = torch.tensor(0, dtype=p.data.dtype, device=p.data.device)
p.data.mul_(zero)
parameters_and_buffers.append(p.data)
xm.wait_device_ops()
xm.all_reduce(xm.REDUCE_SUM, parameters_and_buffers)
xm.mark_step()
xm.rendezvous('mmf.trainers.core.device.broadcast_xla_master_model_param')
logger.info('Done!') |
class Metadata(Base):
_attributes = OrderedDict([('schema_version', str), ('title', str), ('creators', str), ('copyright', str), ('collection', str), ('source_filename', str), ('source_format', str)])
_optional_attributes = ['title', 'creators', 'copyright', 'collection', 'source_filename', 'source_format']
_list_attributes = ['creators']
def __init__(self, schema_version: str=DEFAULT_SCHEMA_VERSION, title: str=None, creators: List[str]=None, copyright: str=None, collection: str=None, source_filename: str=None, source_format: str=None):
self.schema_version = schema_version
self.title = title
self.creators = (creators if (creators is not None) else [])
self.copyright = copyright
self.collection = collection
self.source_filename = source_filename
self.source_format = source_format |
class feature_node(Structure):
_names = ['index', 'value']
_types = [c_int, c_double]
_fields_ = genFields(_names, _types)
def __str__(self):
return ('%d:%g' % (self.index, self.value)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.