code stringlengths 101 5.91M |
|---|
def sample_optional_tags(optional, sample_probs):
sampled = []
if (len(optional) > 0):
n_sample = np.random.choice([0, 1], 1, p=sample_probs[:2])[0]
n_sample = min(n_sample, len(optional))
sampled = random.sample(optional, n_sample)
return sampled |
.parametrize('ctx, func_name', ctxs)
.parametrize('inshape', [(5, 8, 16), (10, 16, 32)])
.parametrize('w0_init, w_init, b_init', [(None, None, None), (I.ConstantInitializer(), I.ConstantInitializer(), I.ConstantInitializer()), (True, True, True)])
.parametrize('num_layers, dropout, bidirectional, with_bias', [(1, 0.0, False, False), (2, 0.5, True, True)])
.parametrize('hidden_size', [5])
.parametrize('training', [False, True])
.parametrize('fix_parameters', [False, True])
.parametrize('rng', [None, True])
def test_pf_lstm_execution(g_rng, inshape, w0_init, w_init, b_init, num_layers, dropout, bidirectional, with_bias, hidden_size, training, fix_parameters, rng, ctx, func_name):
with nn.context_scope(ctx):
if (func_name == 'LSTM'):
pytest.skip('Not implemented in CPU.')
num_directions = (2 if bidirectional else 1)
w0_shape = (num_directions, 4, hidden_size, (inshape[2] + hidden_size))
w_shape = (max(1, (num_layers - 1)), num_directions, 4, hidden_size, ((num_directions * hidden_size) + hidden_size))
b_shape = (num_layers, num_directions, 4, hidden_size)
w0_init = process_param_init(w0_init, w0_shape, g_rng)
w_init = process_param_init(w_init, w_shape, g_rng)
b_init = process_param_init(b_init, b_shape, g_rng)
rng = process_rng(rng)
kw = {}
insert_if_not_none(kw, 'w0_init', w0_init)
insert_if_not_none(kw, 'w_init', w_init)
insert_if_not_none(kw, 'b_init', b_init)
insert_if_not_default(kw, 'num_layers', num_layers, 1)
insert_if_not_default(kw, 'dropout', dropout, 0.0)
insert_if_not_default(kw, 'bidirectional', bidirectional, False)
insert_if_not_default(kw, 'training', training, True)
insert_if_not_none(kw, 'rng', rng)
insert_if_not_default(kw, 'with_bias', with_bias, True)
insert_if_not_default(kw, 'fix_parameters', fix_parameters, False)
x = nn.Variable.from_numpy_array(g_rng.randn(*inshape))
h = nn.Variable.from_numpy_array(g_rng.randn(*(num_layers, num_directions, inshape[1], hidden_size)))
c = nn.Variable.from_numpy_array(g_rng.randn(*(num_layers, num_directions, inshape[1], hidden_size)))
(y, hn, cn) = PF.lstm(x, h, c, **kw)
y.forward()
if training:
y.backward()
assert (y.parent.info.type_name == 'LSTM')
args = y.parent.info.args
assert (y.parent.inputs[0] == x)
assert (y.parent.inputs[1] == h)
assert (y.parent.inputs[2] == c)
w0 = nn.get_parameters()['lstm/weight_l0']
assert (w0.shape == w0_shape)
assert w0.need_grad
assert (y.parent.inputs[3].need_grad == (not fix_parameters))
if isinstance(w0_init, np.ndarray):
assert_allclose(w0_init, w0.d)
if (num_layers > 1):
w = nn.get_parameters()['lstm/weight']
assert (w.shape == w_shape)
assert w.need_grad
assert (y.parent.inputs[4].need_grad == (not fix_parameters))
if isinstance(w_init, np.ndarray):
assert_allclose(w_init, w.d)
if with_bias:
b = nn.get_parameters()['lstm/bias']
assert (b.shape == b_shape)
assert b.need_grad
if (num_layers > 1):
assert (y.parent.inputs[5].need_grad == (not fix_parameters))
else:
assert (y.parent.inputs[4].need_grad == (not fix_parameters))
if isinstance(b_init, np.ndarray):
assert_allclose(b_init, b.d) |
def simple_log(spark):
date = datetime(2019, 1, 1)
return spark.createDataFrame(data=[[0, 0, date, 1.0], [1, 0, date, 1.0], [2, 1, date, 2.0], [1, 1, date, 2.0], [2, 2, date, 2.0], [0, 2, date, 2.0], [3, 0, date, 2.0]], schema=INTERACTIONS_SCHEMA) |
(scope='module')
def larger_control_flow_graph() -> CFG:
graph = CFG(MagicMock())
entry = ProgramGraphNode(index=(- sys.maxsize))
n_1 = ProgramGraphNode(index=1)
n_2 = ProgramGraphNode(index=2)
n_3 = ProgramGraphNode(index=3)
n_5 = ProgramGraphNode(index=5)
n_100 = ProgramGraphNode(index=100)
n_110 = ProgramGraphNode(index=110)
n_120 = ProgramGraphNode(index=120)
n_130 = ProgramGraphNode(index=130)
n_140 = ProgramGraphNode(index=140)
n_150 = ProgramGraphNode(index=150)
n_160 = ProgramGraphNode(index=160)
n_170 = ProgramGraphNode(index=170)
n_180 = ProgramGraphNode(index=180)
n_190 = ProgramGraphNode(index=190)
n_200 = ProgramGraphNode(index=200)
n_210 = ProgramGraphNode(index=210)
n_300 = ProgramGraphNode(index=300)
n_exit = ProgramGraphNode(index=sys.maxsize)
graph.add_node(entry)
graph.add_node(n_1)
graph.add_node(n_2)
graph.add_node(n_3)
graph.add_node(n_5)
graph.add_node(n_100)
graph.add_node(n_110)
graph.add_node(n_120)
graph.add_node(n_130)
graph.add_node(n_140)
graph.add_node(n_150)
graph.add_node(n_160)
graph.add_node(n_170)
graph.add_node(n_180)
graph.add_node(n_190)
graph.add_node(n_200)
graph.add_node(n_210)
graph.add_node(n_300)
graph.add_node(n_exit)
graph.add_edge(entry, n_1)
graph.add_edge(n_1, n_2)
graph.add_edge(n_2, n_3)
graph.add_edge(n_3, n_5)
graph.add_edge(n_5, n_100)
graph.add_edge(n_100, n_110)
graph.add_edge(n_110, n_120, label='true')
graph.add_edge(n_120, n_130)
graph.add_edge(n_130, n_140)
graph.add_edge(n_140, n_150, label='true')
graph.add_edge(n_150, n_160)
graph.add_edge(n_160, n_170, label='false')
graph.add_edge(n_170, n_180)
graph.add_edge(n_180, n_190)
graph.add_edge(n_160, n_190, label='true')
graph.add_edge(n_190, n_140)
graph.add_edge(n_140, n_200, label='false')
graph.add_edge(n_200, n_210)
graph.add_edge(n_210, n_110)
graph.add_edge(n_110, n_300, label='false')
graph.add_edge(n_300, n_exit)
return graph |
def read_as_dict(filename, split=','):
rows = []
with open(filename, 'r') as csvfile:
for row in csv.DictReader(csvfile, delimiter=','):
rows.append(row)
return rows |
def import_model(opt):
model_name = ('SYE' + opt.model_task.upper())
if (opt.model_task == 'sr'):
model_name += 'X{}'.format(opt.config['model']['scale'])
kwargs = {'channels': opt.config['model']['channels']}
if (opt.config['model']['type'] == 're-parameterized'):
model_name += 'NetS'
elif (opt.config['model']['type'] == 'original'):
model_name += 'Net'
kwargs['rep_scale'] = opt.config['model']['rep_scale']
else:
raise ValueError('unknown model type, please choose from [original, re-parameterized]')
model = getattr(import_module('model'), model_name)(**kwargs)
model = model.to(opt.device)
if opt.config['model']['pretrained']:
model.load_state_dict(torch.load(opt.config['model']['pretrained']))
if ((opt.config['model']['type'] == 'original') and (opt.config['model']['need_slim'] is True)):
model = model.slim().to(opt.device)
return model |
def taylor_series_at_1(N):
coeffs = []
with mpmath.workdps(100):
coeffs.append((- mpmath.euler))
for n in range(2, (N + 1)):
coeffs.append(((((- 1) ** n) * mpmath.zeta(n)) / n))
return coeffs |
def build_val_dataset_for_pt(is_train, args):
transform = build_transform(is_train, args)
print('Transform = ')
if isinstance(transform, tuple):
for trans in transform:
print(' - - - - - - - - - - ')
for t in trans.transforms:
print(t)
else:
for t in transform.transforms:
print(t)
print('')
if (args.val_data_set == 'CIFAR'):
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif (args.val_data_set == 'IMNET'):
root = os.path.join(args.val_data_path, ('train' if is_train else 'val'))
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif (args.val_data_set == 'image_folder'):
root = (args.data_path if is_train else args.eval_data_path)
dataset = ImageFolder(root, transform=transform)
nb_classes = args.nb_classes
assert (len(dataset.class_to_idx) == nb_classes)
else:
raise NotImplementedError()
return (dataset, nb_classes) |
def test_expected_calibration_error():
pp = [0.1, 0.5, 0.8, 0.2]
ac = [0.1, 0.3, 0.5, 0.8, 0.9]
co = [0.15, 0.3, 0.55, 0.75, 0.92]
with pytest.raises(ValueError):
expected_calibration_error(prediction_probabilities=pp, accuracy=ac, confidence=co)
with pytest.raises(ValueError):
expected_calibration_error(prediction_probabilities=pp, accuracy=np.array(ac), confidence=np.array(co))
with pytest.raises(ValueError):
expected_calibration_error(prediction_probabilities=np.array(pp), accuracy=ac, confidence=np.array(co))
with pytest.raises(ValueError):
expected_calibration_error(prediction_probabilities=np.array(pp), accuracy=np.array(ac), confidence=co)
with pytest.raises(ValueError):
expected_calibration_error(prediction_probabilities=np.array(pp), accuracy=np.array([0.2, 0.5]), confidence=np.array([0.3, 0.5, 0.8]))
ece = expected_calibration_error(prediction_probabilities=np.array(pp), accuracy=np.array(ac), confidence=np.array(co))
assert (ece > 0) |
class Cylinder():
def __init__(self, center, axis, radius, texture):
(x, y, z) = center
self._center = (float(x), float(y), float(z))
(x, y, z) = axis
self._axis = (float(x), float(y), float(z))
self._radius = float(radius)
self._texture = texture
def str(self):
return ('\n cylinder center %s axis %s rad %s %s\n ' % (tostr(self._center), tostr(self._axis), self._radius, self._texture)) |
def _initialize_control_variable(ocp: optimal_control.OptimalControlProblem, u: Optional[List[fenics.Function]]) -> List[fenics.Function]:
if (u is None):
u = []
for j in range(len(ocp.db.function_db.controls)):
temp = fenics.Function(ocp.db.function_db.control_spaces[j])
temp.vector().vec().aypx(0.0, ocp.db.function_db.controls[j].vector().vec())
temp.vector().apply('')
u.append(temp)
ids_u = [fun.id() for fun in u]
ids_controls = [fun.id() for fun in ocp.db.function_db.controls]
if (ids_u == ids_controls):
u = []
for j in range(len(ocp.db.function_db.controls)):
temp = fenics.Function(ocp.db.function_db.control_spaces[j])
temp.vector().vec().aypx(0.0, ocp.db.function_db.controls[j].vector().vec())
temp.vector().apply('')
u.append(temp)
return u |
class NewTypeMixin(DataDocumenterMixinBase):
def should_suppress_directive_header(self) -> bool:
return (inspect.isNewType(self.object) or super().should_suppress_directive_header())
def update_content(self, more_content: StringList) -> None:
if inspect.isNewType(self.object):
if (self.config.autodoc_typehints_format == 'short'):
supertype = restify(self.object.__supertype__, 'smart')
else:
supertype = restify(self.object.__supertype__)
more_content.append((_('alias of %s') % supertype), '')
more_content.append('', '')
super().update_content(more_content) |
(hookwrapper=True)
def pytest_runtest_call(item):
hooks = item.config.pluginmanager.hook
settings = _get_item_settings(item)
is_timeout = ((settings.timeout is not None) and (settings.timeout > 0))
if (is_timeout and (settings.func_only is True)):
hooks.pytest_timeout_set_timer(item=item, settings=settings)
(yield)
if (is_timeout and (settings.func_only is True)):
hooks.pytest_timeout_cancel_timer(item=item) |
def parse_assignment(alist):
assert (len(alist) == 3)
op = alist[0]
head = parse_expression(alist[1])
exp = parse_expression(alist[2])
if (op == '='):
return pddl.Assign(head, exp)
elif (op == 'increase'):
return pddl.Increase(head, exp)
else:
assert False, 'Assignment operator not supported.' |
def z_score_filter(z_threshold: float, bins: np.ndarray, counts: np.ndarray):
bins = np.copy(bins)
counts = np.copy(counts)
bins = bins[:(- 1)]
mu = (np.sum((bins * counts)) / np.sum(counts))
sigma = np.sqrt((np.sum((np.power((bins - mu), 2.0) * counts)) / np.sum(counts)))
z_score = (np.abs((bins - mu)) / sigma)
index2zero = (z_score > z_threshold)
counts[index2zero] = 0
return counts |
def frequencies(source, size_mb=None, sets=None):
if (size_mb and (not bounter_is_installed)):
size_mb = None
source_is_generator = (isinstance(source, GeneratorType) or callable(source))
def get_indices(ids):
if isinstance(ids, numbers.Integral):
return ids
if isinstance(ids, tuple):
ids = list(ids)
if (len(ids) == 1):
ids = ids[0]
return ids
if (sets is not None):
multiple_sets = (not isinstance(sets, (tuple, numbers.Integral)))
if multiple_sets:
sets = (get_indices(s) for s in sets)
else:
sets = get_indices(sets)
if source_is_generator:
if (sets is not None):
if multiple_sets:
if (not callable(source)):
raise ValueError('Cant run multiples sets if source is a generator')
return (_frequencies_from_records(source, ids=get_indices(s), size_mb=size_mb) for s in sets)
return _frequencies_from_records(source, ids=get_indices(sets), size_mb=size_mb)
return _frequencies_from_records(source, size_mb=size_mb)
if pandas_is_installed:
if isinstance(source, (DataFrame, Series)):
source = source.to_numpy()
if (source.ndim > 2):
raise ValueError('source should be max 2D')
if isinstance(source, numpy.ndarray):
source = numpy.transpose(source)
else:
source = numpy.asarray(source).T
if ((sets is not None) and (source.ndim == 2)):
if multiple_sets:
return (_frequencies_from_array(source[get_indices(s)]) for s in sets)
sets = get_indices(sets)
source = source[sets]
return _frequencies_from_array(source)
return _frequencies_from_array(source) |
def get_free_gpus() -> Optional[List[int]]:
try:
free = []
proc = subprocess.Popen('nvidia-smi --query-compute-apps=gpu_uuid --format=csv,noheader,nounits'.split(' '), stdout=subprocess.PIPE)
uuids = [s.strip() for s in proc.communicate()[0].decode().split('\n') if s]
proc = subprocess.Popen('nvidia-smi --query-gpu=index,uuid --format=csv,noheader,nounits'.split(' '), stdout=subprocess.PIPE)
id_uid_pair = [s.strip().split(', ') for s in proc.communicate()[0].decode().split('\n') if s]
for i in id_uid_pair:
(id, uid) = i
if (uid not in uuids):
free.append(int(id))
return free
except:
return None |
class DistOptimizerHook(OptimizerHook):
def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=(- 1)):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
runner.outputs['loss'].backward()
allreduce_grads(runner.model.parameters(), self.coalesce, self.bucket_size_mb)
if (self.grad_clip is not None):
self.clip_grads(runner.model.parameters())
runner.optimizer.step() |
class TOMDataset(DatasetBase):
def __getitem__(self, index):
cloth_name = self.cloth_names[index]
cloth_im = Image.open(os.path.join(self.data_path, 'warp-cloth', cloth_name))
cloth_tensor = self.transform(cloth_im)
cloth_mask_im = Image.open(os.path.join(self.data_path, 'warp-cloth-mask', cloth_name))
cloth_mask_tensor = binarized_tensor(np.array(cloth_mask_im))
data = self._get_item_base(index)
data['cloth_name'] = cloth_name
data['cloth'] = cloth_tensor
data['cloth_mask'] = cloth_mask_tensor
if self.train:
data = random_horizontal_flip(data)
return data |
def read_parsing_evaluation(evaluation_file_path):
try:
with open(evaluation_file_path, 'r') as f:
lines = f.readlines()
las = float(lines[0].split('=')[1].strip('% \n'))
uas = float(lines[1].split('=')[1].strip('% \n'))
acc = float(lines[2].split('=')[1].strip('% \n'))
except Exception:
las = 0.0
uas = 0.0
acc = 0.0
return (las, uas, acc) |
class PolicyNetwork():
def __init__(self, args):
self.inputs = tf.placeholder(tf.float32, [args.batch_size, args.state_dim], name='inputs')
self.targets = tf.placeholder(tf.float32, [args.batch_size, args.action_dim], name='targets')
self.learning_rate = tf.Variable(0.0, trainable=False, name='learning_rate')
self._create_mlp(args)
if (args.gru_input_dim > 0):
self._create_gru(args)
self._create_optimizer(args)
def _create_mlp(self, args):
W = tf.get_variable('mlp_policy/hidden_0/W', [args.state_dim, args.mlp_size[0]], initializer=initializers.xavier_initializer())
b = tf.get_variable('mlp_policy/hidden_0/b', [args.mlp_size[0]])
output = args.mlp_activation(tf.nn.xw_plus_b(self.inputs, W, b))
for i in xrange(1, len(args.mlp_size)):
W = tf.get_variable((('mlp_policy/hidden_' + str(i)) + '/W'), [args.mlp_size[(i - 1)], args.mlp_size[i]], initializer=initializers.xavier_initializer())
b = tf.get_variable((('mlp_policy/hidden_' + str(i)) + '/b'), [args.mlp_size[i]])
output = args.mlp_activation(tf.nn.xw_plus_b(output, W, b))
if (args.gru_input_dim > 0):
W = tf.get_variable('mlp_policy/output/W', [args.mlp_size[(- 1)], args.gru_input_dim], initializer=initializers.xavier_initializer())
b = tf.get_variable('mlp_policy/output/b', [args.gru_input_dim])
self.gru_input = args.mlp_activation(tf.nn.xw_plus_b(output, W, b))
else:
W = tf.get_variable('mlp_policy/mean_network/output_flat/W', [args.mlp_size[(- 1)], args.action_dim], initializer=initializers.xavier_initializer())
b = tf.get_variable('mlp_policy/mean_network/output_flat/b', [args.action_dim])
self.a_mean = tf.nn.xw_plus_b(output, W, b)
self.a_logstd = tf.Variable(np.zeros(args.action_dim), name='mlp_policy/output_log_std/param', dtype=tf.float32)
def _create_gru(self, args):
self.hprev = tf.get_variable('gru_policy/mean_network/gru/h0', [args.batch_size, args.gru_size], initializer=tf.zeros_initializer, trainable=False)
W_xr = tf.get_variable('gru_policy/mean_network/gru/W_xr', [args.gru_input_dim, args.gru_size], initializer=initializers.xavier_initializer())
W_hr = tf.get_variable('gru_policy/mean_network/gru/W_hr', [args.gru_size, args.gru_size], initializer=OrthogonalInitializer())
b_r = tf.get_variable('gru_policy/mean_network/gru/b_r', [args.gru_size], initializer=tf.zeros_initializer)
W_xu = tf.get_variable('gru_policy/mean_network/gru/W_xu', [args.gru_input_dim, args.gru_size], initializer=initializers.xavier_initializer())
W_hu = tf.get_variable('gru_policy/mean_network/gru/W_hu', [args.gru_size, args.gru_size], initializer=OrthogonalInitializer())
b_u = tf.get_variable('gru_policy/mean_network/gru/b_u', [args.gru_size], initializer=tf.zeros_initializer)
W_xc = tf.get_variable('gru_policy/mean_network/gru/W_xc', [args.gru_input_dim, args.gru_size], initializer=initializers.xavier_initializer())
W_hc = tf.get_variable('gru_policy/mean_network/gru/W_hc', [args.gru_size, args.gru_size], initializer=OrthogonalInitializer())
b_c = tf.get_variable('gru_policy/mean_network/gru/b_c', [args.gru_size], initializer=tf.zeros_initializer)
self.W_x_ruc = tf.concat(1, [W_xr, W_xu, W_xc])
self.W_h_ruc = tf.concat(1, [W_hr, W_hu, W_hc])
self.b_ruc = tf.concat(0, [b_r, b_u, b_c])
xb_ruc = (tf.matmul(self.gru_input, self.W_x_ruc) + tf.reshape(self.b_ruc, (1, (- 1))))
h_ruc = tf.matmul(self.hprev, self.W_h_ruc)
(self.xb_r, self.xb_u, self.xb_c) = tf.split(split_dim=1, num_split=3, value=xb_ruc)
(self.h_r, self.h_u, self.h_c) = tf.split(split_dim=1, num_split=3, value=h_ruc)
self.r = tf.nn.sigmoid((self.xb_r + self.h_r))
self.u = tf.nn.sigmoid((self.xb_u + self.h_u))
self.c = tf.nn.tanh((self.xb_c + (self.r * self.h_c)))
self.h = (((1 - self.u) * self.hprev) + (self.u * self.c))
W = tf.get_variable('gru_policy/mean_network/output_flat/W', [args.gru_size, args.action_dim], initializer=initializers.xavier_initializer())
b = tf.get_variable('gru_policy/mean_network/output_flat/b', [args.action_dim])
self.a_mean = tf.nn.xw_plus_b(self.h, W, b)
self.a_logstd = tf.Variable(np.zeros(args.action_dim), name='gru_policy/output_log_std/param', dtype=tf.float32)
def _create_optimizer(self, args):
std_a = tf.exp(self.a_logstd)
pl_1 = ((0.5 * tf.to_float(args.action_dim)) * np.log((2.0 * np.pi)))
pl_2 = (tf.to_float(args.action_dim) * tf.reduce_sum(tf.log(std_a)))
pl_3 = (0.5 * tf.reduce_mean(tf.reduce_sum(tf.square(((self.targets - self.a_mean) / std_a)), 1)))
policy_loss = ((pl_1 + pl_2) + pl_3)
self.cost = policy_loss
self.summary_policy = tf.scalar_summary('Policy loss', tf.reduce_mean(policy_loss))
tvars = tf.trainable_variables()
(grads, _) = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), args.grad_clip)
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.train = optimizer.apply_gradients(zip(grads, tvars)) |
class Partition8(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/T5Block[21]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[22]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[23]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:8'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.21', 'l_1': 'encoder.22', 'l_2': 'encoder.23', 'l_3': 'encoder.final_layer_norm', 'l_4': 'encoder.dropout'}
self.to(self.device)
def forward(self, *args):
(attention_mask, x0, x1) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=attention_mask, position_bias=x0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_0 = self.l_1(t_0, attention_mask=attention_mask, position_bias=x0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_0 = self.l_2(t_0, attention_mask=attention_mask, position_bias=x0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_0 = self.l_3(t_0)
t_0 = self.l_4(t_0)
return (t_0,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def fit_predict_balanced_model(X_train, y_train, X_test, y_test):
model = make_model(X_train.shape[1])
training_generator = BalancedBatchGenerator(X_train, y_train, batch_size=1000, random_state=42)
model.fit(training_generator, epochs=5, verbose=1)
y_pred = model.predict(X_test, batch_size=1000)
return roc_auc_score(y_test, y_pred) |
class TestCrossProtoCalls(unittest.TestCase):
def testSimple(self):
net = caffe2_pb2.NetDef()
meta = metanet_pb2.MetaNetDef()
meta.nets.add(key='foo', value=net) |
def get_beamline():
distance0 = 300.0
distance1 = 630.0
distance = (distance0 + distance1)
f_hfm = 3.0
f_vfm = 1.9
distance_hfm_vfm = (f_hfm - f_vfm)
distance_foc = (1.0 / ((1.0 / f_vfm) + (1.0 / (distance + distance_hfm_vfm))))
theta_om = 0.0035
theta_kb = 0.0035
om_mirror_length = 0.8
om_clear_ap = (om_mirror_length * theta_om)
kb_mirror_length = 0.9
kb_clear_ap = (kb_mirror_length * theta_kb)
drift0 = optical_elements.Drift(distance0)
drift1 = optical_elements.Drift(distance1)
drift_in_kb = optical_elements.Drift(distance_hfm_vfm)
drift_to_foc = optical_elements.Drift(distance_foc)
ap0 = optical_elements.Aperture('r', 'a', 0.00012, 0.00012)
ap1 = optical_elements.Aperture('r', 'a', om_clear_ap, (2 * om_clear_ap))
ap_kb = optical_elements.Aperture('r', 'a', kb_clear_ap, kb_clear_ap)
hfm = optical_elements.Mirror_elliptical(orient='x', p=distance, q=(distance_hfm_vfm + distance_foc), thetaE=theta_kb, theta0=theta_kb, length=kb_mirror_length)
vfm = optical_elements.Mirror_elliptical(orient='y', p=(distance + distance_hfm_vfm), q=distance_foc, thetaE=theta_kb, theta0=theta_kb, length=kb_mirror_length)
wf_dist_om = optical_elements.Mirror_plane(orient='x', theta=theta_om, length=om_mirror_length, range_xy=(2 * om_clear_ap), filename=os.path.join(mirror_data_dir, 'mirror2.dat'), scale=2.0, bPlot=False)
wf_dist_hfm = optical_elements.Mirror_plane(orient='x', theta=theta_kb, length=kb_mirror_length, range_xy=kb_clear_ap, filename=os.path.join(mirror_data_dir, 'mirror1.dat'), scale=2.0, bPlot=False)
wf_dist_vfm = optical_elements.Mirror_plane(orient='y', theta=theta_kb, length=kb_mirror_length, range_xy=kb_clear_ap, filename=os.path.join(mirror_data_dir, 'mirror2.dat'), scale=2.0, bPlot=False)
bl0 = Beamline()
bl0.append(ap0, Use_PP(semi_analytical_treatment=0, zoom=14.4, sampling=(1 / 1.6)))
bl0.append(drift0, Use_PP(semi_analytical_treatment=0))
bl0.append(ap1, Use_PP(zoom=0.8))
bl0.append(wf_dist_om, Use_PP())
bl0.append(drift1, Use_PP(semi_analytical_treatment=1))
bl0.append(ap_kb, Use_PP(zoom=6.4, sampling=(1 / 16.0)))
bl0.append(hfm, Use_PP())
bl0.append(wf_dist_hfm, Use_PP())
bl0.append(drift_in_kb, Use_PP(semi_analytical_treatment=1))
bl0.append(vfm, Use_PP())
bl0.append(wf_dist_vfm, Use_PP())
bl0.append(drift_to_foc, Use_PP(semi_analytical_treatment=1))
return bl0 |
def configure(conf):
cc = (conf.env['COMPILER_CC'] or None)
cxx = (conf.env['COMPILER_CXX'] or None)
if (not (cc or cxx)):
raise Utils.WafError('neither COMPILER_CC nor COMPILER_CXX are defined; maybe the compiler_cc or compiler_cxx tool has not been configured yet?')
try:
compiler = compiler_mapping[cc]
except KeyError:
try:
compiler = compiler_mapping[cxx]
except KeyError:
Logs.warn(('No compiler flags support for compiler %r or %r' % (cc, cxx)))
return
(opt_level, warn_level, dbg_level) = profiles[Options.options.build_profile]
optimizations = compiler.get_optimization_flags(opt_level)
(debug, debug_defs) = compiler.get_debug_flags(dbg_level)
warnings = compiler.get_warnings_flags(warn_level)
if Options.options.disable_werror:
try:
warnings.remove('-Werror')
except ValueError:
pass
if (cc and (not conf.env['CCFLAGS'])):
conf.env.append_value('CCFLAGS', optimizations)
conf.env.append_value('CCFLAGS', debug)
conf.env.append_value('CCFLAGS', warnings)
conf.env.append_value('CCDEFINES', debug_defs)
if (cxx and (not conf.env['CXXFLAGS'])):
conf.env.append_value('CXXFLAGS', optimizations)
conf.env.append_value('CXXFLAGS', debug)
conf.env.append_value('CXXFLAGS', warnings)
conf.env.append_value('CXXDEFINES', debug_defs) |
.parametrize('n_neighbors, idx_0, idx_1, expected, n_expected', [(1, [[0], [1], [2], [3]], [[4], [5], [6], [7]], {}, 0), (1, [[0], [1], [2], [3]], [[4], [1], [6], [7]], {1: {1}}, 1), (1, [[0], [1], [2], [3]], [[4], [1], [6], [7]], {1: {1}}, 1), (1, [[0], [1], [6], [3]], [[4], [1], [6], [7]], {1: {1}, 2: {6}}, 2), (1, [[0, 1], [2, 3]], [[1, 0], [3, 2]], {}, 0), (2, [[0, 1], [2, 3]], [[1, 0], [3, 2]], {0: {0, 1}, 1: {2, 3}}, 2)])
def test_find_links(n_neighbors, idx_0, idx_1, expected, n_expected):
indexes = LinkabilityIndexes(idx_0=np.array(idx_0), idx_1=np.array(idx_1))
links = indexes.find_links(n_neighbors=n_neighbors)
n_links = indexes.count_links(n_neighbors=n_neighbors)
assert (links == expected)
assert (n_links == n_expected) |
_args('v', 'v', 'v', 'is', 'is', 'is', 'i', 'is', 'i', 'i', 'i', 'i', 'i')
def _convolution(g, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32):
weight_size = weight.type().sizes()
args = [input, weight]
if ((not sym_help._is_none(bias)) and (bias.type().dim() == 1)):
args.append(bias)
kwargs = {'kernel_shape_i': weight_size[2:], 'strides_i': stride, 'pads_i': (padding + padding), 'dilations_i': dilation, 'group_i': groups}
if any(((o != 0) for o in output_padding)):
assert transposed
assert (len(stride) == len(output_padding))
kwargs['output_padding_i'] = output_padding
n = g.op(('ConvTranspose' if transposed else 'Conv'), *args, **kwargs)
if ((not sym_help._is_none(bias)) and (bias.type().dim() != 1)):
return g.op('Add', n, bias)
else:
return n |
def interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=None):
if (input.numel() > 0):
return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners=align_corners)
def _check_size_scale_factor(dim):
if ((size is None) and (scale_factor is None)):
raise ValueError('either size or scale_factor should be defined')
if ((size is not None) and (scale_factor is not None)):
raise ValueError('only one of size or scale_factor should be defined')
if ((scale_factor is not None) and isinstance(scale_factor, tuple) and (len(scale_factor) != dim)):
raise ValueError('scale_factor shape must match input shape. Input is {}D, scale_factor size is {}'.format(dim, len(scale_factor)))
def _output_size(dim):
_check_size_scale_factor(dim)
if (size is not None):
return size
scale_factors = _ntuple(dim)(scale_factor)
return [int(math.floor((input.size((i + 2)) * scale_factors[i]))) for i in range(dim)]
output_shape = tuple(_output_size(2))
output_shape = (input.shape[:(- 2)] + output_shape)
return _NewEmptyTensorOp.apply(input, output_shape) |
def load_saved_models(dir):
file_paths = os.listdir(dir)
records = {}
for file_path in file_paths:
if ('Java_Graph2Search' in file_path):
with open(os.path.join(dir, file_path, 'config.json'), 'r') as f:
config = json.load(f)
set_random_seed(config['random_seed'])
if (config['out_dir'] is not None):
config['pretrained'] = config['out_dir']
config['out_dir'] = None
model_handle = ModelHandlerExtend(config)
format_str = model_handle.test()
records[file_path] = format_str
for record in records:
print(record)
print(records[record])
print('') |
def batch_logdet_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes):
dy = grad_inputs[0]
x0 = inputs[0]
raise NotImplementedError('batch_logdet_backward is not implemented.') |
def train(writer, logger):
model_cfg = get_config(args.model_cfg)
train_dataset = DatasetEgobody(cfg=model_cfg, train=True, device=device, data_root=args.dataset_root, dataset_file=os.path.join(args.dataset_root, 'annotation_egocentric_smpl_npz/egocapture_train_smpl.npz'), add_scale=args.add_bbox_scale, do_augment=args.do_augment, split='train', scene_type=args.scene_type, scene_cano=args.scene_cano)
train_dataloader = torch.utils.data.DataLoader(train_dataset, args.batch_size, shuffle=args.shuffle, num_workers=args.num_workers, collate_fn=collate_fn)
train_dataloader_iter = iter(train_dataloader)
val_dataset = DatasetEgobody(cfg=model_cfg, train=False, device=device, data_root=args.dataset_root, dataset_file=os.path.join(args.dataset_root, 'annotation_egocentric_smpl_npz/egocapture_val_smpl.npz'), spacing=1, add_scale=args.add_bbox_scale, split='val', scene_type=args.scene_type, scene_cano=args.scene_cano)
val_dataloader = torch.utils.data.DataLoader(val_dataset, args.batch_size, shuffle=False, num_workers=args.num_workers)
mocap_dataset = MoCapDataset(dataset_file='data/datasets/cmu_mocap.npz')
mocap_dataloader = torch.utils.data.DataLoader(mocap_dataset, args.batch_size, shuffle=True, num_workers=args.num_workers)
mocap_dataloader_iter = iter(mocap_dataloader)
model = ProHMRScene(cfg=model_cfg, device=device, with_focal_length=args.with_focal_length, with_bbox_info=args.with_bbox_info, with_cam_center=args.with_cam_center, with_full_2d_loss=args.with_full_2d_loss, with_global_3d_loss=args.with_global_3d_loss, scene_feat_dim=512, scene_cano=args.scene_cano)
model.train()
if args.load_pretrained:
weights = torch.load(args.checkpoint, map_location=(lambda storage, loc: storage))
if args.load_only_backbone:
weights_backbone = {}
weights_backbone['state_dict'] = {k: v for (k, v) in weights['state_dict'].items() if (k.split('.')[0] == 'backbone')}
model.load_state_dict(weights_backbone['state_dict'], strict=False)
else:
model.load_state_dict(weights['state_dict'], strict=False)
print('[INFO] pretrained model loaded from {}.'.format(args.checkpoint))
print('[INFO] load_only_backbone: {}'.format(args.load_only_backbone))
model.init_optimizers()
total_steps = 0
best_loss_keypoints_3d_mode = 10000
for epoch in range(args.num_epoch):
for step in tqdm(range((train_dataset.dataset_len // args.batch_size))):
total_steps += 1
try:
batch = next(train_dataloader_iter)
except StopIteration:
train_dataloader_iter = iter(train_dataloader)
batch = next(train_dataloader_iter)
try:
mocap_batch = next(mocap_dataloader_iter)
except StopIteration:
mocap_dataloader_iter = iter(mocap_dataloader)
mocap_batch = next(mocap_dataloader_iter)
for param_name in batch.keys():
if (param_name not in ['imgname', 'smpl_params', 'has_smpl_params', 'smpl_params_is_axis_angle']):
batch[param_name] = batch[param_name].to(device)
for param_name in batch['smpl_params'].keys():
batch['smpl_params'][param_name] = batch['smpl_params'][param_name].to(device)
for param_name in mocap_batch.keys():
mocap_batch[param_name] = mocap_batch[param_name].to(device)
output = model.training_step(batch, mocap_batch)
if ((total_steps % args.log_step) == 0):
for key in output['losses'].keys():
writer.add_scalar('train/{}'.format(key), output['losses'][key].item(), total_steps)
print_str = '[Step {:d}/ Epoch {:d}] [train] {}: {:.10f}'.format(step, epoch, key, output['losses'][key].item())
logger.info(print_str)
print(print_str)
if ((total_steps % args.val_step) == 0):
val_loss_dict = {}
with torch.no_grad():
for (test_step, test_batch) in tqdm(enumerate(val_dataloader)):
for param_name in test_batch.keys():
if (param_name not in ['imgname', 'smpl_params', 'has_smpl_params', 'smpl_params_is_axis_angle']):
test_batch[param_name] = test_batch[param_name].to(device)
for param_name in test_batch['smpl_params'].keys():
test_batch['smpl_params'][param_name] = test_batch['smpl_params'][param_name].to(device)
val_output = model.validation_step(test_batch)
for key in val_output['losses'].keys():
if (test_step == 0):
val_loss_dict[key] = val_output['losses'][key].detach().clone()
else:
val_loss_dict[key] += val_output['losses'][key].detach().clone()
for key in val_loss_dict.keys():
val_loss_dict[key] = (val_loss_dict[key] / test_step)
writer.add_scalar('val/{}'.format(key), val_loss_dict[key].item(), total_steps)
print_str = '[Step {:d}/ Epoch {:d}] [test] {}: {:.10f}'.format(step, epoch, key, val_loss_dict[key].item())
logger.info(print_str)
print(print_str)
if (val_loss_dict['loss_keypoints_3d_mode'] < best_loss_keypoints_3d_mode):
best_loss_keypoints_3d_mode = val_loss_dict['loss_keypoints_3d_mode']
save_path = os.path.join(writer.file_writer.get_logdir(), 'best_model.pt')
state = {'state_dict': model.state_dict()}
torch.save(state, save_path)
logger.info('[*] best model saved\n')
print('[*] best model saved\n')
if ((total_steps % args.save_step) == 0):
save_path = os.path.join(writer.file_writer.get_logdir(), 'last_model.pt')
state = {'state_dict': model.state_dict()}
torch.save(state, save_path)
logger.info('[*] last model saved\n')
print('[*] last model saved\n') |
def find_element_by_ref(ref, elements):
for dom_element in elements:
if (dom_element.ref == ref):
return dom_element
raise ValueError('Invalid ref: {}'.format(ref)) |
class FiniteJoinSemilattice(FinitePoset):
Element = JoinSemilatticeElement
_desc = 'Finite join-semilattice'
def join_matrix(self):
return self._hasse_diagram.join_matrix()
def join(self, x, y=None):
jn = self._hasse_diagram.join_matrix()
if (y is not None):
(i, j) = map(self._element_to_vertex, (x, y))
return self._vertex_to_element(jn[(i, j)])
j = 0
for i in (self._element_to_vertex(_) for _ in x):
j = jn[(i, j)]
return self._vertex_to_element(j)
def coatoms(self):
if (self.cardinality() == 0):
return []
return self.lower_covers(self.top()) |
class QueryOnTriplaneGradFeature(PythonFunction):
def __init__(self, ctx, min_, max_, boundary_check=False, G=None):
super(QueryOnTriplaneGradFeature, self).__init__(ctx)
self._min = min_
self._max = max_
self._boundary_check = boundary_check
self._G = G
def name(self):
return self.__class__.__name__
def min_outputs(self):
return 1
def setup_impl(self, inputs, outputs):
grad_output = inputs[0]
D = grad_output.shape[(- 1)]
outputs[0].reset_shape((self._G + (D,)), True)
def forward_impl(self, inputs, outputs):
grad_feature = outputs[0]
grad_output = inputs[0]
query = inputs[1]
batch_sizes = query.shape[:(- 1)]
B = np.prod(batch_sizes)
D = grad_output.shape[(- 1)]
grad_feature_ptr = grad_feature.data.data_ptr(np.float32, self.ctx)
grad_output_ptr = grad_output.data.data_ptr(np.float32, self.ctx)
query_ptr = query.data.data_ptr(np.float32, self.ctx)
lanczos_triplane_feature_cuda.grad_feature(((B * D) * 3), grad_feature_ptr, grad_output_ptr, query_ptr, self._G, D, self._min, self._max, self._boundary_check, False)
def backward_impl(self, inputs, outputs, propagate_down, accum):
grad_feature = outputs[0]
grad_output = inputs[0]
query = inputs[1]
batch_sizes = query.shape[:(- 1)]
B = np.prod(batch_sizes)
D = grad_output.shape[(- 1)]
grad_grad_feature_ptr = grad_feature.grad.data_ptr(np.float32, self.ctx)
grad_output_ptr = grad_output.data.data_ptr(np.float32, self.ctx)
query_ptr = query.data.data_ptr(np.float32, self.ctx)
grad_grad_output_ptr = grad_output.grad.data_ptr(np.float32, self.ctx)
grad_query_ptr = query.grad.data_ptr(np.float32, self.ctx)
if propagate_down[0]:
lanczos_triplane_feature_cuda.grad_feature_grad_grad_output(((B * D) * 3), grad_grad_output_ptr, grad_grad_feature_ptr, query_ptr, self._G, D, self._min, self._max, self._boundary_check, accum[0])
if propagate_down[1]:
lanczos_triplane_feature_cuda.grad_feature_grad_query(((B * D) * 3), grad_query_ptr, grad_grad_feature_ptr, grad_output_ptr, query_ptr, self._G, D, self._min, self._max, self._boundary_check, accum[1])
def grad_depends_output_data(self, i, o):
return False
def grad_depends_input_data(self, i, j):
if ((i == 0) and (j == 1)):
return True
if (i == 1):
return True
return False |
class DataSplitter(pl.LightningDataModule):
data_loader_cls = AnnDataLoader
def __init__(self, adata_manager: AnnDataManager, train_size: float=0.9, validation_size: Optional[float]=None, shuffle_set_split: bool=True, load_sparse_tensor: bool=False, pin_memory: bool=False, **kwargs):
super().__init__()
self.adata_manager = adata_manager
self.train_size = float(train_size)
self.validation_size = validation_size
self.shuffle_set_split = shuffle_set_split
self.load_sparse_tensor = load_sparse_tensor
self.data_loader_kwargs = kwargs
self.pin_memory = pin_memory
(self.n_train, self.n_val) = validate_data_split(self.adata_manager.adata.n_obs, self.train_size, self.validation_size)
def setup(self, stage: Optional[str]=None):
n_train = self.n_train
n_val = self.n_val
indices = np.arange(self.adata_manager.adata.n_obs)
if self.shuffle_set_split:
random_state = np.random.RandomState(seed=settings.seed)
indices = random_state.permutation(indices)
self.val_idx = indices[:n_val]
self.train_idx = indices[n_val:(n_val + n_train)]
self.test_idx = indices[(n_val + n_train):]
def train_dataloader(self):
return self.data_loader_cls(self.adata_manager, indices=self.train_idx, shuffle=True, drop_last=False, load_sparse_tensor=self.load_sparse_tensor, pin_memory=self.pin_memory, **self.data_loader_kwargs)
def val_dataloader(self):
if (len(self.val_idx) > 0):
return self.data_loader_cls(self.adata_manager, indices=self.val_idx, shuffle=False, drop_last=False, load_sparse_tensor=self.load_sparse_tensor, pin_memory=self.pin_memory, **self.data_loader_kwargs)
else:
pass
def test_dataloader(self):
if (len(self.test_idx) > 0):
return self.data_loader_cls(self.adata_manager, indices=self.test_idx, shuffle=False, drop_last=False, load_sparse_tensor=self.load_sparse_tensor, pin_memory=self.pin_memory, **self.data_loader_kwargs)
else:
pass
def on_after_batch_transfer(self, batch, dataloader_idx):
if self.load_sparse_tensor:
for (key, val) in batch.items():
layout = (val.layout if isinstance(val, torch.Tensor) else None)
if ((layout is torch.sparse_csr) or (layout is torch.sparse_csc)):
batch[key] = val.to_dense()
return batch |
def get_focused_table(table, ref_table, win_ratio):
focused_table = copy.deepcopy(table)
win_size = int((win_ratio * len(ref_table.data)))
focused_table.data = focused_table.data.tail(win_size).reset_index(drop=True)
focused_table.parse_columns()
return focused_table |
def test_lad_head_loss():
class mock_skm():
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape((- 1))
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
lad_head.skm = mock_skm()
s = 256
img_metas = [{'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3)}]
train_cfg = mmcv.Config(dict(assigner=dict(type='MaxIoUAssigner', pos_iou_thr=0.1, neg_iou_thr=0.1, min_pos_iou=0, ignore_iof_thr=(- 1)), allowed_border=(- 1), pos_weight=(- 1), debug=False))
self = LADHead(num_classes=4, in_channels=1, train_cfg=train_cfg, loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
teacher_model = LADHead(num_classes=4, in_channels=1, train_cfg=train_cfg, loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [torch.rand(1, 1, (s // feat_size), (s // feat_size)) for feat_size in [4, 8, 16, 32, 64]]
self.init_weights()
teacher_model.init_weights()
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
outs_teacher = teacher_model(feat)
label_assignment_results = teacher_model.get_label_assignment(*outs_teacher, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore)
outs = teacher_model(feat)
empty_gt_losses = self.loss(*outs, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore, label_assignment_results)
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
assert (empty_cls_loss.item() > 0), 'cls loss should be non-zero'
assert (empty_box_loss.item() == 0), 'there should be no box loss when there are no true boxes'
assert (empty_iou_loss.item() == 0), 'there should be no box loss when there are no true boxes'
gt_bboxes = [torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]])]
gt_labels = [torch.LongTensor([2])]
label_assignment_results = teacher_model.get_label_assignment(*outs_teacher, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore)
one_gt_losses = self.loss(*outs, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore, label_assignment_results)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
assert (onegt_cls_loss.item() > 0), 'cls loss should be non-zero'
assert (onegt_box_loss.item() > 0), 'box loss should be non-zero'
assert (onegt_iou_loss.item() > 0), 'box loss should be non-zero'
(n, c, h, w) = (10, 4, 20, 20)
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert (len(results) == n)
assert (results[0].size() == (((h * w) * 5), c))
assert self.with_score_voting
self = LADHead(num_classes=4, in_channels=1, train_cfg=train_cfg, anchor_generator=dict(type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8]), loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = mmcv.Config(dict(nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100))
rescale = False
self.get_bboxes(cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale) |
class SecStr8(Alphabet):
def __init__(self):
chars = b'HBEGITS '
encoding = np.arange(len(chars))
super(SecStr8, self).__init__(chars, encoding, missing=255) |
def _fix_real_abs_gt_1(x):
x = asarray(x)
if any((isreal(x) & (abs(x) > 1))):
x = _tocomplex(x)
return x |
def getGPUbatchSize(num_gpus, batch_size):
nf = int(noremDiv(batch_size, num_gpus))
nl = (batch_size - (nf * (num_gpus - 1)))
return np.cumsum((([0] + ([nf] * (num_gpus - 1))) + [nl])) |
def main_worker(gpu, argss):
global args
args = argss
torch.cuda.set_device(gpu)
rank = ((args.nr * args.gpus) + gpu)
args.rank = rank
exp_name = '/imagenet_pretrain'
args.save_path = (args.save_path + exp_name)
args.snapshot_root = (args.save_path + '/snapshot/')
args.log_root = (args.save_path + '/logs/train-{}'.format(time.strftime('%Y%m%d-%H%M%S')))
if ((args.phase == 'train') and (args.rank == 0)):
create_exp_dir(args.log_root, scripts_to_save=None)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.log_root, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
if ((not os.path.exists(args.snapshot_root)) and (args.rank == 0)):
os.mkdir(args.snapshot_root)
dist.init_process_group(backend='nccl', init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
train_dataset = datasets.ImageFolder(os.path.join(args.data_root, 'train'), transforms.Compose([transforms.RandomSizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]))
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, num_replicas=args.world_size, rank=rank)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=args.batchsize, num_workers=0, pin_memory=True, sampler=train_sampler)
valid_dataset = datasets.ImageFolder(os.path.join(args.data_root, 'val'), transforms.Compose([transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]))
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset, num_replicas=args.world_size, rank=rank, shuffle=False)
valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset, batch_size=args.batchsize, num_workers=0, pin_memory=True, sampler=valid_sampler)
kwargs = {'num_workers': 2, 'pin_memory': True}
logging.info('data already')
model_depth = torch.nn.SyncBatchNorm.convert_sync_batchnorm(DepthNet())
model_rgb = torch.nn.SyncBatchNorm.convert_sync_batchnorm(RgbNet())
model_fusion = torch.nn.SyncBatchNorm.convert_sync_batchnorm(NasFusionNet_pre())
model_depth.init_weights()
vgg19_bn = torchvision.models.vgg19_bn(pretrained=True)
model_rgb.copy_params_from_vgg19_bn(vgg19_bn)
model_fusion.init_weights()
if (args.rank == 0):
print('model_rgb param size = %fMB', count_parameters_in_MB(model_rgb))
print('model_depth param size = %fMB', count_parameters_in_MB(model_depth))
print('nas-model param size = %fMB', count_parameters_in_MB(model_fusion))
model_depth = model_depth.cuda()
model_rgb = model_rgb.cuda()
model_fusion = model_fusion.cuda()
if args.distributed:
model_depth = DDP(model_depth, device_ids=[gpu])
model_rgb = DDP(model_rgb, device_ids=[gpu])
model_fusion = DDP(model_fusion, device_ids=[gpu])
optimizer_depth = optim.SGD(model_depth.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
optimizer_rgb = optim.SGD(model_rgb.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
optimizer_fusion = optim.SGD(model_fusion.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
CE = nn.CrossEntropyLoss().cuda()
logger = SummaryWriter(args.log_root)
best_prec1 = (- 1)
for epoch in range(0, args.epoch):
adjust_learning_rate(optimizer_depth, epoch, args)
adjust_learning_rate(optimizer_rgb, epoch, args)
adjust_learning_rate(optimizer_fusion, epoch, args)
if (args.rank == 0):
print('lr:', optimizer_rgb.param_groups[0]['lr'])
train_sampler.set_epoch(epoch)
train(train_loader, [model_rgb, model_depth, model_fusion], CE, [optimizer_rgb, optimizer_depth, optimizer_fusion], epoch, logger, logging)
prec1 = validate(valid_loader, [model_rgb, model_depth, model_fusion], CE, epoch, logger, logging)
is_best = (prec1 > best_prec1)
best_prec1 = max(prec1, best_prec1)
if (args.rank == 0):
logging.info(('Best accuracy: %f' % best_prec1))
logger.add_scalar('best/accuracy', best_prec1, global_step=epoch)
savename_depth = ('%s/depth_pre_epoch%d.pth' % (args.snapshot_root, epoch))
torch.save(model_depth.state_dict(), savename_depth)
print(('save: (snapshot: %d)' % epoch))
savename_rgb = ('%s/rgb_pre_epoch%d.pth' % (args.snapshot_root, epoch))
torch.save(model_rgb.state_dict(), savename_rgb)
print(('save: (snapshot: %d)' % epoch))
savename_fusion = ('%s/fusion_pre_epoch%d.pth' % (args.snapshot_root, epoch))
torch.save(model_fusion.state_dict(), savename_fusion)
print(('save: (snapshot: %d)' % epoch))
if is_best:
savename_depth = ('%s/depth_pre.pth' % args.snapshot_root)
torch.save(model_depth.state_dict(), savename_depth)
print(('save: (snapshot: %d)' % epoch))
savename_rgb = ('%s/rgb_pre.pth' % args.snapshot_root)
torch.save(model_rgb.state_dict(), savename_rgb)
print(('save: (snapshot: %d)' % epoch))
savename_fusion = ('%s/fusion_pre.pth' % args.snapshot_root)
torch.save(model_fusion.state_dict(), savename_fusion)
print(('save: (snapshot: %d)' % epoch)) |
def plot_heatmap(model_dir, name, features, labels, num_classes):
(features_sort, _) = utils.sort_dataset(features, labels, classes=num_classes, stack=False)
features_sort_ = np.vstack(features_sort)
sim_mat = np.abs((features_sort_ features_sort_.T))
(fig, ax) = plt.subplots(figsize=(7, 5), sharey=True, sharex=True)
im = ax.imshow(sim_mat, cmap='Blues')
fig.colorbar(im, pad=0.02, drawedges=0, ticks=[0, 0.5, 1])
ax.set_xticks(np.linspace(0, len(labels), (num_classes + 1)))
ax.set_yticks(np.linspace(0, len(labels), (num_classes + 1)))
[tick.label.set_fontsize(10) for tick in ax.xaxis.get_major_ticks()]
[tick.label.set_fontsize(10) for tick in ax.yaxis.get_major_ticks()]
fig.tight_layout()
save_dir = os.path.join(model_dir, 'figures', 'heatmaps')
os.makedirs(save_dir, exist_ok=True)
file_name = os.path.join(save_dir, f'{name}.png')
fig.savefig(file_name)
print('Plot saved to: {}'.format(file_name))
plt.close() |
class PreBottleneckX(nn.Module):
expansion = 4
bias = False
def __init__(self, inplanes, planes, baseWidth, cardinality, stride=1, ptype='preact'):
super(PreBottleneckX, self).__init__()
D = math.floor(((planes * baseWidth) / 64.0))
if (ptype != 'no_preact'):
self.preact = nn.Sequential(nn.BatchNorm2d(inplanes), nn.ReLU(inplace=True))
(conv1, bn1, conv2, bn2) = ([], [], [], [])
for i in range(cardinality):
conv1.append(nn.Conv2d(inplanes, D, kernel_size=1, bias=self.bias))
bn1.append(nn.BatchNorm2d(D))
conv2.append(nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, bias=self.bias))
bn2.append(nn.BatchNorm2d(D))
self.conv1 = nn.ModuleList(conv1)
self.bn1 = nn.ModuleList(bn1)
self.conv2 = nn.ModuleList(conv2)
self.bn2 = nn.ModuleList(bn2)
self.conv3 = nn.Conv2d((D * cardinality), (planes * self.expansion), kernel_size=1, bias=self.bias)
self.relu = nn.ReLU(inplace=True)
if ((stride != 1) or (inplanes != (planes * self.expansion))):
self.downsample = nn.Conv2d(inplanes, (planes * self.expansion), kernel_size=1, stride=stride, bias=self.bias)
else:
self.downsample = nn.Sequential()
self.cardinality = cardinality
self.ptype = ptype
def forward(self, x):
if (self.ptype == 'both_preact'):
x = self.preact(x)
residual = x
if ((self.ptype != 'no_preact') and (self.ptype != 'both_preact')):
x = self.preact(x)
out = []
for i in range(self.cardinality):
y = self.conv1[i](x)
y = self.bn1[i](y)
y = self.relu(y)
y = self.conv2[i](y)
y = self.bn2[i](y)
y = self.relu(y)
out.append(y)
out = torch.cat(out, dim=1)
out = self.conv3(out)
residual = self.downsample(residual)
out += residual
return out |
def compute_tensor_method(*, target: Target) -> Callable[([NativeFunction], Optional[str])]:
_native_function
def go(f: NativeFunction) -> Optional[str]:
if (Variant.method not in f.variants):
return None
assert (not f.func.is_out_fn())
assert (len(f.func.arguments) > 0)
assert (sum(((a.name == 'self') for a in f.func.arguments)) == 1)
name = cpp.name(f.func)
cpp_returns_type = cpp.returns_type(f.func.returns)
cpp_args = cpp.arguments(f.func, method=True)
cpp_args_exclude_this = [a for a in cpp_args if (not isinstance(a.argument, ThisArgument))]
cpp_args_exclude_this_str = ', '.join((str(a) for a in cpp_args_exclude_this))
if (target is Target.DECLARATION):
return f'{cpp_returns_type} {name}({cpp_args_exclude_this_str}) const;'
assert (target is Target.DEFINITION)
dispatcher_exprs = dispatcher.cpparguments_exprs(cpp_args)
cpp_args_exclude_this_str_no_default = ', '.join((a.str_no_default() for a in cpp_args_exclude_this))
dispatcher_returns_type = dispatcher.returns_type(f.func.returns)
dispatcher_types_str = ', '.join(map((lambda a: a.type), dispatcher_exprs))
dispatcher_exprs_str = ', '.join(map((lambda a: a.expr), dispatcher_exprs))
return f'''
// aten::{f.func}
{cpp_returns_type} Tensor::{name}({cpp_args_exclude_this_str_no_default}) const {{
static auto op = c10::Dispatcher::singleton()
.findSchemaOrThrow("aten::{f.func.name.name}", "{f.func.name.overload_name}")
.typed<{dispatcher_returns_type} ({dispatcher_types_str})>();
return op.call({dispatcher_exprs_str});
}}
'''
return go |
def get_root_logger(log_file=None, log_level=logging.INFO):
logger = get_logger(__name__.split('.')[0], log_file, log_level)
return logger |
def plot_num_components_undirected(G_times, fname):
max_time = len(G_times)
t = list(range(0, max_time))
num_connected_components = []
for G in G_times:
G = G.to_undirected()
num_connected_components.append(nx.number_connected_components(G))
plt.rcParams.update({'figure.autolayout': True})
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(figsize=(4, 2))
ax = fig.add_subplot(1, 1, 1)
ax.plot(t, num_connected_components, marker='P', color='#ffa600', ls='solid', linewidth=0.5, markersize=1)
ax.set_xlabel('time', fontsize=8)
outliers = find_rarity_windowed_outlier(num_connected_components)
outliers.sort()
for xc in outliers:
plt.axvline(x=xc, color='k', linestyle=':', linewidth=0.5)
ax.set_ylabel('number of connected components', fontsize=8)
plt.title('number of connected components over time', fontsize='x-small')
plt.savefig((fname + 'components.pdf'), pad_inches=0)
return outliers |
def make_command(params, unique_id):
params['savedir'] = ('./log/%s/baselines-%s' % (datetime.date.today().strftime('%y-%m-%d'), unique_id))
params = itertools.chain(*[(('--%s' % k), str(v)) for (k, v) in params.items()])
return list(params) |
def first_sunday_on_or_after(dt):
days_to_go = (6 - dt.weekday())
if days_to_go:
dt += timedelta(days_to_go)
return dt |
def warning(msg, warning_type=UserWarning, stacklevel=1, print_stack=True):
if (not is_logging_effective('warn')):
return
if print_stack:
msg += f'''
{get_traceback(stacklevel)}'''
warnings.warn((((Fore.YELLOW + Style.BRIGHT) + msg) + Style.RESET_ALL), warning_type) |
class Extension(_Extension):
def __init__(self, name, sources, *args, **kw):
self.py_limited_api = kw.pop('py_limited_api', False)
_Extension.__init__(self, name, sources, *args, **kw)
def _convert_pyx_sources_to_lang(self):
if _have_cython():
return
lang = (self.language or '')
target_ext = ('.cpp' if (lang.lower() == 'c++') else '.c')
sub = functools.partial(re.sub, '.pyx$', target_ext)
self.sources = list(map(sub, self.sources)) |
def recall(pred, target, num_classes):
tp = true_positive(pred, target, num_classes).to(torch.float)
fn = false_negative(pred, target, num_classes).to(torch.float)
out = (tp / (tp + fn))
out[torch.isnan(out)] = 0
return out |
def max_pool3d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False) -> Tensor:
return complex_fcaller(F.max_pool3d, input, kernel_size, stride, padding, dilation, ceil_mode, return_indices) |
class Printable(object):
def __init__(self):
super().__init__()
def print(self, indentation: int=0) -> str:
raise NotImplementedError('print not implemented.')
def __str__(self) -> str:
return self.print(0) |
class JBluesNormFactor(ProcessingPlasmaProperty):
outputs = ('j_blues_norm_factor',)
latex = '\\frac{c time_\\textrm{simulation}}}{4\\pitime_\\textrm{simulation} volume}'
def calculate(time_explosion, time_simulation, volume):
return ((const.c.cgs * time_explosion) / (((4 * np.pi) * time_simulation) * volume)) |
def test_attn_agg_constructor_1():
agg = AttentionalAggregator(output_dim=4, bias=True, act=(lambda x: (x + 1)))
assert (agg.output_dim == 4)
assert agg.has_bias
assert (agg.act(2) == 3) |
class BR(nn.Module):
def __init__(self, nOut):
super().__init__()
self.bn = nn.BatchNorm3d(nOut, momentum=0.95, eps=0.001)
self.act = nn.ReLU(inplace=True)
def forward(self, input):
output = self.bn(input)
output = self.act(output)
return output |
def check_list(rlms_list, rimgs_list, rmsks_list):
(lms_list, imgs_list, msks_list) = ([], [], [])
for i in range(len(rlms_list)):
flag = 'false'
lm_path = rlms_list[i]
im_path = rimgs_list[i]
msk_path = rmsks_list[i]
if (os.path.isfile(lm_path) and os.path.isfile(im_path) and os.path.isfile(msk_path)):
flag = 'true'
lms_list.append(rlms_list[i])
imgs_list.append(rimgs_list[i])
msks_list.append(rmsks_list[i])
print(i, rlms_list[i], flag)
return (lms_list, imgs_list, msks_list) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
def test_ceil_double_backward(seed, ctx, func_name):
from nbla_test_utils import cap_ignore_region, backward_function_tester
rng = np.random.RandomState(seed)
inputs = [(rng.randn(2, 3, 4).astype(np.float32) * 2)]
backward_function_tester(rng, F.ceil, inputs, atol_accum=0.01, backward=[True], ctx=ctx) |
class History(Callback):
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = (logs or {})
self.epoch.append(epoch)
for (k, v) in logs.items():
self.history.setdefault(k, []).append(v) |
def add_token(token_list, word, token):
if ((token is None) and isinstance(word.id, int)):
raise AssertionError("Only expected word w/o token for 'extra' words")
query_token = token_list.add()
query_token.word = word.text
query_token.value = word.text
if (word.lemma is not None):
query_token.lemma = word.lemma
if (word.xpos is not None):
query_token.pos = word.xpos
if (word.upos is not None):
query_token.coarseTag = word.upos
if (word.feats and (word.feats != '_')):
for feature in word.feats.split('|'):
(key, value) = feature.split('=', maxsplit=1)
query_token.conllUFeatures.key.append(key)
query_token.conllUFeatures.value.append(value)
if (token is not None):
if (token.ner is not None):
query_token.ner = token.ner
if ((token is not None) and (len(token.id) > 1)):
query_token.mwtText = token.text
query_token.isMWT = True
query_token.isFirstMWT = (token.id[0] == word.id)
if (token.id[(- 1)] != word.id):
pass
else:
space_after = misc_to_space_after(token.misc)
if (space_after == ' '):
space_after = misc_to_space_after(word.misc)
query_token.after = space_after
query_token.index = word.id
else:
query_token.after = misc_to_space_after(word.misc)
query_token.index = word.id[0]
query_token.emptyIndex = word.id[1]
if (word.misc and (word.misc != '_')):
query_token.conllUMisc = word.misc
if ((token is not None) and token.misc and (token.misc != '_')):
query_token.mwtMisc = token.misc |
def batch_mat_mul(model, blob_in, blob_out, enable_tensor_core=False, **kwargs):
if enable_tensor_core:
kwargs['engine'] = 'TENSORCORE'
return model.net.BatchMatMul(blob_in, blob_out, **kwargs) |
def test_yolov3_head_onnx_export():
yolo_model = yolo_config()
s = 128
img_metas = [{'img_shape_for_onnx': torch.Tensor([s, s]), 'img_shape': (s, s, 3), 'scale_factor': np.ones(4), 'pad_shape': (s, s, 3)}]
yolo_head_data = 'yolov3_head_get_bboxes.pkl'
pred_maps = mmcv.load(osp.join(data_path, yolo_head_data))
yolo_model.onnx_export = partial(yolo_model.onnx_export, img_metas=img_metas, with_nms=False)
ort_validate(yolo_model.onnx_export, pred_maps) |
class FileLoaderIterDataPipe(IterDataPipe[Tuple[(str, IOBase)]]):
def __init__(self, datapipe: Iterable[str], mode: str='b', length: int=(- 1)):
super().__init__()
self.datapipe: Iterable = datapipe
self.mode: str = mode
if (self.mode not in ('b', 't', 'rb', 'rt', 'r')):
raise ValueError('Invalid mode {}'.format(mode))
self.length: int = length
def __iter__(self):
(yield from get_file_binaries_from_pathnames(self.datapipe, self.mode))
def __len__(self):
if (self.length == (- 1)):
raise TypeError("{} instance doesn't have valid length".format(type(self).__name__))
return self.length |
def FGCNN(linear_feature_columns, dnn_feature_columns, conv_kernel_width=(7, 7, 7, 7), conv_filters=(14, 16, 18, 20), new_maps=(3, 3, 3, 3), pooling_width=(2, 2, 2, 2), dnn_hidden_units=(256, 128, 64), l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_dnn=0, dnn_dropout=0, seed=1024, task='binary'):
if (not (len(conv_kernel_width) == len(conv_filters) == len(new_maps) == len(pooling_width))):
raise ValueError('conv_kernel_width,conv_filters,new_maps and pooling_width must have same length')
features = build_input_features(dnn_feature_columns)
inputs_list = list(features.values())
linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed, prefix='linear', l2_reg=l2_reg_linear)
(deep_emb_list, _) = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding, seed)
(fg_deep_emb_list, _) = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding, seed, prefix='fg')
fg_input = concat_func(fg_deep_emb_list, axis=1)
origin_input = concat_func(deep_emb_list, axis=1)
if (len(conv_filters) > 0):
new_features = FGCNNLayer(conv_filters, conv_kernel_width, new_maps, pooling_width)(fg_input)
combined_input = concat_func([origin_input, new_features], axis=1)
else:
combined_input = origin_input
inner_product = Flatten()(InnerProductLayer()(Lambda(unstack, mask=([None] * int(combined_input.shape[1])))(combined_input)))
linear_signal = Flatten()(combined_input)
dnn_input = Concatenate()([linear_signal, inner_product])
dnn_input = Flatten()(dnn_input)
final_logit = DNN(dnn_hidden_units, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout)(dnn_input)
final_logit = Dense(1, use_bias=False)(final_logit)
final_logit = add_func([final_logit, linear_logit])
output = PredictionLayer(task)(final_logit)
model = Model(inputs=inputs_list, outputs=output)
return model |
def _get_args_from_config(from_config_func, *args, **kwargs):
signature = inspect.signature(from_config_func)
if (list(signature.parameters.keys())[0] != 'cfg'):
raise TypeError(f"{from_config_func.__self__}.from_config must take 'cfg' as the first argument!")
support_var_arg = any(((param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]) for param in signature.parameters.values()))
if support_var_arg:
ret = from_config_func(*args, **kwargs)
else:
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if (name not in supported_arg_names):
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
ret.update(extra_kwargs)
return ret |
def test_scar():
n_latent = 5
adata = synthetic_iid()
adata.X = scipy.sparse.csr_matrix(adata.X)
SCAR.setup_anndata(adata)
_ = SCAR.get_ambient_profile(adata, adata, prob=0.0, iterations=1, sample=100)
model = SCAR(adata, ambient_profile=None, n_latent=n_latent)
model.train(1, check_val_every_n_epoch=1, train_size=0.5)
model.get_elbo()
model.get_latent_representation()
model.get_marginal_ll(n_mc_samples=5)
model.get_reconstruction_error()
model.get_denoised_counts(adata, n_samples=1)
_ = model.history
print(model) |
def test():
x0s = [[2, 0.5], [8, 0.5], [2, 3.5], [8, 3.5]]
wall_half_width = 0.05
A = np.array([[(- 1), 0], [1, 0], [0, (- 1)], [0, 1]])
walls = []
walls.append(np.array([0, 0, 0, 4], dtype=np.float64))
walls.append(np.array([10, 10, 0, 4], dtype=np.float64))
walls.append(np.array([0, 10, 0, 0], dtype=np.float64))
walls.append(np.array([0, 10, 4, 4], dtype=np.float64))
walls.append(np.array([0, 4.0, 2, 2], dtype=np.float64))
walls.append(np.array([5.0, 10, 2, 2], dtype=np.float64))
obs = []
for wall in walls:
if (wall[0] == wall[1]):
wall[0] -= wall_half_width
wall[1] += wall_half_width
elif (wall[2] == wall[3]):
wall[2] -= wall_half_width
wall[3] += wall_half_width
else:
raise ValueError('wrong shape for axis-aligned wall')
wall *= np.array([(- 1), 1, (- 1), 1])
obs.append((A, wall))
b1 = np.array([(- 1.5), 2.5, (- 3), 4], dtype=np.float64)
b2 = np.array([(- 7.5), 8.5, (- 3), 4], dtype=np.float64)
b3 = np.array([(- 1.5), 2.5, 0, 1], dtype=np.float64)
b4 = np.array([(- 7.5), 8.5, 0, 1], dtype=np.float64)
goals = [(A, b1), (A, b2), (A, b3), (A, b4)]
tmax = 6.0
vmax = 3.0
specs = []
for i in range(4):
avoids = [Node('negmu', info={'A': A, 'b': b}) for (A, b) in obs]
avoid_obs = Node('and', deps=avoids)
always_avoid_obs = Node('A', deps=[avoid_obs], info={'int': [0, tmax]})
reach_goal = Node('mu', info={'A': goals[i][0], 'b': goals[i][1]})
finally_reach_goal = Node('F', deps=[reach_goal], info={'int': [0, tmax]})
specs.append(Node('and', deps=[always_avoid_obs, finally_reach_goal]))
PWL = plan(x0s, specs, bloat=0.2, num_segs=6, tmax=tmax, vmax=vmax, MIPGap=0.3)
plots = [[goals, 'g'], [obs, 'k']]
return (x0s, plots, PWL) |
def parse_args():
parser = argparse.ArgumentParser(description='Train a vanilla XGBoost GBDT model.')
parser.add_argument('--train', '--train_data', type=str, help='train data file name.', required=True)
parser.add_argument('--test', '--test_data', type=str, help='test data file name.', required=True)
parser.add_argument('--nfeat', type=int, default=None, help='number of features.', required=True)
parser.add_argument('--num_trees', type=int, help='Number of trees.', required=True)
parser.add_argument('--max_depth', type=int, help='Maximum number of depth for each tree.', required=True)
parser.add_argument('--model_name', type=str, help='Save the trained model.', required=True)
parser.add_argument('--monotone', type=str, default=None, help='monotonic constraints string. 1/0/-1.', required=False)
parser.add_argument('--exfeat', type=str, default=None, help='exclude the list of features', required=False)
parser.add_argument('--scale_pos_weight', type=float, default=1, help='scale_pos_weight parameter.', required=False)
parser.add_argument('--xgb_model', type=str, default=None, help='file name of stored xgb model to continue training.', required=False)
return parser.parse_args() |
def test_kwargs_with_default():
def kwarg(A: dace.float64[20], kw: dace.float64[20]=np.ones([20])):
A[:] = (kw + 1)
A = np.random.rand(20)
kwarg(A)
assert np.allclose(A, 2.0)
kw = np.random.rand(20)
kwarg(A, kw)
assert np.allclose(A, (kw + 1)) |
def tr_interior_point(fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac, x0, fun0, grad0, constr_ineq0, jac_ineq0, constr_eq0, jac_eq0, stop_criteria, enforce_feasibility, xtol, state, initial_barrier_parameter, initial_tolerance, initial_penalty, initial_trust_radius, factorization_method):
BOUNDARY_PARAMETER = 0.995
BARRIER_DECAY_RATIO = 0.2
TRUST_ENLARGEMENT = 5
if (enforce_feasibility is None):
enforce_feasibility = np.zeros(n_ineq, bool)
barrier_parameter = initial_barrier_parameter
tolerance = initial_tolerance
trust_radius = initial_trust_radius
s0 = np.maximum(((- 1.5) * constr_ineq0), np.ones(n_ineq))
subprob = BarrierSubproblem(x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac, barrier_parameter, tolerance, enforce_feasibility, stop_criteria, xtol, fun0, grad0, constr_ineq0, jac_ineq0, constr_eq0, jac_eq0)
z = np.hstack((x0, s0))
(fun0_subprob, constr0_subprob) = (subprob.fun0, subprob.constr0)
(grad0_subprob, jac0_subprob) = (subprob.grad0, subprob.jac0)
trust_lb = np.hstack((np.full(subprob.n_vars, (- np.inf)), np.full(subprob.n_ineq, (- BOUNDARY_PARAMETER))))
trust_ub = np.full((subprob.n_vars + subprob.n_ineq), np.inf)
while True:
(z, state) = equality_constrained_sqp(subprob.function_and_constraints, subprob.gradient_and_jacobian, subprob.lagrangian_hessian, z, fun0_subprob, grad0_subprob, constr0_subprob, jac0_subprob, subprob.stop_criteria, state, initial_penalty, trust_radius, factorization_method, trust_lb, trust_ub, subprob.scaling)
if subprob.terminate:
break
trust_radius = max(initial_trust_radius, (TRUST_ENLARGEMENT * state.tr_radius))
barrier_parameter *= BARRIER_DECAY_RATIO
tolerance *= BARRIER_DECAY_RATIO
subprob.update(barrier_parameter, tolerance)
(fun0_subprob, constr0_subprob) = subprob.function_and_constraints(z)
(grad0_subprob, jac0_subprob) = subprob.gradient_and_jacobian(z)
x = subprob.get_variables(z)
return (x, state) |
def add_code_sample_docstrings(*docstr, tokenizer_class=None, checkpoint=None, output_type=None, config_class=None, mask=None):
def docstring_decorator(fn):
model_class = fn.__qualname__.split('.')[0]
is_tf_class = (model_class[:2] == 'TF')
doc_kwargs = dict(model_class=model_class, tokenizer_class=tokenizer_class, checkpoint=checkpoint)
if ('SequenceClassification' in model_class):
code_sample = (TF_SEQUENCE_CLASSIFICATION_SAMPLE if is_tf_class else PT_SEQUENCE_CLASSIFICATION_SAMPLE)
elif ('QuestionAnswering' in model_class):
code_sample = (TF_QUESTION_ANSWERING_SAMPLE if is_tf_class else PT_QUESTION_ANSWERING_SAMPLE)
elif ('TokenClassification' in model_class):
code_sample = (TF_TOKEN_CLASSIFICATION_SAMPLE if is_tf_class else PT_TOKEN_CLASSIFICATION_SAMPLE)
elif ('MultipleChoice' in model_class):
code_sample = (TF_MULTIPLE_CHOICE_SAMPLE if is_tf_class else PT_MULTIPLE_CHOICE_SAMPLE)
elif (('MaskedLM' in model_class) or (model_class in ['FlaubertWithLMHeadModel', 'XLMWithLMHeadModel'])):
doc_kwargs['mask'] = ('[MASK]' if (mask is None) else mask)
code_sample = (TF_MASKED_LM_SAMPLE if is_tf_class else PT_MASKED_LM_SAMPLE)
elif ('LMHead' in model_class):
code_sample = (TF_CAUSAL_LM_SAMPLE if is_tf_class else PT_CAUSAL_LM_SAMPLE)
elif (('Model' in model_class) or ('Encoder' in model_class)):
code_sample = (TF_BASE_MODEL_SAMPLE if is_tf_class else PT_BASE_MODEL_SAMPLE)
else:
raise ValueError(f"Docstring can't be built for model {model_class}")
output_doc = (_prepare_output_docstrings(output_type, config_class) if (output_type is not None) else '')
built_doc = code_sample.format(**doc_kwargs)
fn.__doc__ = ((((fn.__doc__ or '') + ''.join(docstr)) + output_doc) + built_doc)
return fn
return docstring_decorator |
class QuarterOfYear(TimeFeature):
def __call__(self, idx: pd.DatetimeIndex) -> np.ndarray:
return self.process((idx.quarter - 1))
def _max_val(self):
return 3.0 |
def inputConversion():
try:
user_input = input('Enter a number: ')
user_input = int(user_input)
except ValueError:
logging.error('Invalid input')
return user_input |
_torch
class CTRLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = ((CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ())
all_generative_model_classes = ((CTRLLMHeadModel,) if is_torch_available() else ())
test_pruning = True
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = CTRLModelTester(self)
self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_ctrl_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*config_and_inputs)
def test_ctrl_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_model_from_pretrained(self):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = CTRLModel.from_pretrained(model_name)
self.assertIsNotNone(model) |
def is_FreeMonoid(x):
if isinstance(x, FreeMonoid):
return True
from sage.monoids.indexed_free_monoid import IndexedFreeMonoid
return isinstance(x, IndexedFreeMonoid) |
def stp(s, ts: torch.Tensor):
if isinstance(s, np.ndarray):
s = torch.from_numpy(s).type_as(ts)
extra_dims = ((1,) * (ts.dim() - 1))
return (s.view((- 1), *extra_dims) * ts) |
_native_function
def compute_registration_declarations(f: NativeFunction) -> str:
name = dispatcher.name(f.func)
returns_type = dispatcher.returns_type(f.func.returns)
args = dispatcher.arguments(f.func)
args_str = ', '.join(map(str, args))
dispatch = (f.dispatch is not None)
math = (dispatch and ('Math' in f.dispatch))
return f'''{returns_type} {name}({args_str}); // {{"schema": "aten::{f.func}", "dispatch": "{dispatch}", "math": "{math}"}}
''' |
def classification_eval(model, data_loader, limit=None):
logging.info(f'Start classification evaluation')
correct = 0
total = 0
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
model.to(device)
model.eval()
with torch.no_grad():
for data in tqdm(data_loader, desc='Classification evaluation'):
(images, labels) = data
outputs = model(images.to(device))
(_, predicted) = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.to(device)).sum().item()
if (limit and (total >= int(limit))):
break
logging.info(f'Num of images: {total}, Accuracy: {round(((100 * correct) / total), 2)} %')
return ((correct / total), total) |
class SlavePipe(_SlavePipeBase):
def run_slave(self, msg):
self.queue.put((self.identifier, msg))
ret = self.result.get()
self.queue.put(True)
return ret |
def subscript_to_ast_slice(node, without_array=False):
if isinstance(node, ast.Name):
(result_arr, result_slice) = (node.id, None)
return (result_slice if without_array else (result_arr, result_slice))
if (not isinstance(node, ast.Subscript)):
raise TypeError('AST node is not a subscript')
result_slice = None
if ((sys.version_info < (3, 9)) and isinstance(node.slice, ast.Index)):
slc = node.slice.value
if (not isinstance(slc, ast.Tuple)):
result_slice = [slc]
elif ((sys.version_info < (3, 9)) and isinstance(node.slice, ast.ExtSlice)):
slc = tuple(node.slice.dims)
else:
slc = node.slice
if (result_slice is None):
if isinstance(slc, ast.Tuple):
slices = slc.elts
elif isinstance(slc, tuple):
slices = slc
else:
slices = [slc]
result_slice = []
for s in slices:
if isinstance(s, ast.Slice):
result_slice.append((s.lower, s.upper, s.step))
elif ((sys.version_info < (3, 9)) and isinstance(s, ast.Index)):
result_slice.append(s.value)
else:
result_slice.append(s)
if without_array:
return result_slice
else:
return (rname(node.value), result_slice) |
def bool_flag(s):
FALSY_STRINGS = {'off', 'false', '0'}
TRUTHY_STRINGS = {'on', 'true', '1'}
if (s.lower() in FALSY_STRINGS):
return False
elif (s.lower() in TRUTHY_STRINGS):
return True
else:
raise argparse.ArgumentTypeError('invalid value for a boolean flag') |
def register_bdd_context(name, dirname, split, class_names=BDD_SEM):
DatasetCatalog.register(name, (lambda : load_bdd_instances(name, dirname, split, class_names)))
MetadataCatalog.get(name).set(stuff_classes=class_names, dirname=dirname, split=split, ignore_label=[255], thing_dataset_id_to_contiguous_id={}, class_offset=0, keep_sem_bgd=False) |
def deps_from_tsv(infile, limit=None):
res = []
for (i, d) in enumerate(csv.DictReader(open(infile), delimiter='\t')):
if ((limit is not None) and (i >= limit)):
break
res.append({x: (int(y) if y.isdigit() else y) for (x, y) in d.items()})
return res |
class PathAlgebra(CombinatorialFreeModule):
Element = PathAlgebraElement
def __init__(self, k, P, order='negdegrevlex'):
from sage.categories.graded_algebras_with_basis import GradedAlgebrasWithBasis
self._quiver = P.quiver()
self._semigroup = P
self._ordstr = order
super().__init__(k, self._semigroup, prefix='', category=GradedAlgebrasWithBasis(k), bracket=False)
self._assign_names(self._semigroup.variable_names())
def order_string(self):
return self._ordstr
_method
def gens(self):
return tuple((self.gen(i) for i in range(self.ngens())))
_method
def arrows(self):
return tuple((self._from_dict({index: self.base_ring().one()}, remove_zeros=False) for index in self._semigroup.arrows()))
_method
def idempotents(self):
return tuple((self._from_dict({index: self.base_ring().one()}, remove_zeros=False) for index in self._semigroup.idempotents()))
_method
def gen(self, i):
return self._from_dict({self._semigroup.gen(i): self.base_ring().one()}, remove_zeros=False)
def ngens(self):
return self._semigroup.ngens()
def _element_constructor_(self, x):
from sage.quivers.paths import QuiverPath
if (isinstance(x, PathAlgebraElement) and isinstance(x.parent(), PathAlgebra)):
result = {}
coeffs = x.monomial_coefficients()
for key in coeffs:
result[self._semigroup(key)] = coeffs[key]
return self.element_class(self, result)
if isinstance(x, QuiverPath):
return self.element_class(self, {x: self.base_ring().one()})
if (x in self.base_ring()):
return (self.one() * x)
if isinstance(x, (tuple, list, str)):
return self.element_class(self, {self._semigroup(x): self.base_ring().one()})
if isinstance(x, dict):
return self.element_class(self, x)
return super()._element_constructor_(x)
def _coerce_map_from_(self, other):
if (isinstance(other, PathAlgebra) and self._base.has_coerce_map_from(other._base)):
OQ = other._quiver
SQ = self._quiver
SQE = self._semigroup._sorted_edges
if (all(((v in SQ) for v in OQ.vertex_iterator())) and all(((e in SQE) for e in other._semigroup._sorted_edges))):
return True
if self._semigroup.has_coerce_map_from(other):
return True
return self._base.has_coerce_map_from(other)
def _repr_(self):
return 'Path algebra of {0} over {1}'.format(self._quiver, self._base)
def _repr_monomial(self, data):
arrows = self.variable_names()
return '*'.join((arrows[n] for n in data))
def _latex_monomial(self, data):
arrows = self.variable_names()
return '\\cdot '.join((arrows[n] for n in data))
_method
def one(self):
one = self.base_ring().one()
D = {index: one for index in self._semigroup.idempotents()}
return self._from_dict(D)
def quiver(self):
return self._quiver
def semigroup(self):
return self._semigroup
def degree_on_basis(self, x):
return x.degree()
def sum(self, iter_of_elements):
return sum(iter_of_elements, self.zero())
def homogeneous_component(self, n):
basis = []
for v in self._semigroup._quiver:
basis.extend(self._semigroup.iter_paths_by_length_and_startpoint(n, v))
M = CombinatorialFreeModule(self._base, basis, prefix='', bracket=False)
M._name = 'Free module spanned by {0}'.format(basis)
return M
__getitem__ = homogeneous_component
def homogeneous_components(self):
result = []
i = 0
while True:
c = self.homogeneous_component(i)
if (not c.dimension()):
break
result.append(c)
i += 1
return result |
.gpu
def test_relu():
_config()
def halftest(A: dace.float16[N]):
out = np.ndarray([N], dace.float16)
for i in dace.map[0:N]:
with dace.tasklet:
(a << A[i])
(o >> out[i])
o = (a if (a > dace.float16(0)) else dace.float16(0))
return out
A = np.random.rand(20).astype(np.float16)
sdfg = halftest.to_sdfg()
sdfg.apply_gpu_transformations()
out = sdfg(A=A, N=20)
assert np.allclose(out, np.maximum(A, 0)) |
def yaml_load(filename):
with open(filename, 'r') as f:
yaml_data = yaml.load(f)
return yaml_data |
def add_eval_lm_args(parser):
group = parser.add_argument_group('LM Evaluation')
add_common_eval_args(group)
gen_parser_from_dataclass(group, EvalLMConfig()) |
def test_record_int32():
t = RecordType([NumpyType('int32')], None)
assert (str(parser.parse(str(t))) == str(t)) |
def require_version_core(requirement):
hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(requirement, hint) |
def groupby_first_item(lst):
groups = defaultdict(list)
for (first, *rest) in lst:
rest = (rest[0] if (len(rest) == 1) else rest)
groups[first].append(rest)
return groups |
def test_example_config_file():
parser = ConfigParser()
parser.read('orvara/tests/config.ini')
assert (len(parser.items('data_paths')) == 8)
assert (len(parser.items('mcmc_settings')) == 6)
assert (parser.getint('mcmc_settings', 'nthreads') == 1)
assert parser.getboolean('mcmc_settings', 'use_epoch_astrometry') |
def get_all_epoch():
d = get_ckpt_dir()
names = (os.listdir(d) if os.path.exists(d) else [])
if (len(names) == 0):
return [0]
epochs = [int(name.split('.')[0]) for name in names]
return epochs |
def _get_samples(cp, size, signed=True):
for i in range(_sample_count(cp, size)):
(yield _get_sample(cp, size, i, signed)) |
class BertPlain(nn.Module):
def __init__(self, num_tokens, num_labels, dropout):
super().__init__()
self.bert = BertModel.from_pretrained('bert-base-cased')
self.bert.resize_token_embeddings(num_tokens)
self.dropout = nn.Dropout(dropout)
self.classifier = nn.Linear(self.bert.config.hidden_size, num_labels)
def forward(self, input_ids, attention_mask, mention_pos_idx, labels=None):
outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
tok_embed = outputs[0]
(bsz, mtok, dim) = tok_embed.shape
tok_embed_flat = tok_embed.reshape((- 1), dim)
men_idx = ((torch.arange(bsz).to(tok_embed.device) * mtok) + mention_pos_idx)
men_embed = tok_embed_flat[men_idx]
pooled_output = self.dropout(men_embed)
logits = self.classifier(pooled_output)
return logits |
def conv_bn(inp, oup, stride):
return nn.Sequential(nn.Conv2d(inp, oup, 3, stride, 1, bias=False), SynchronizedBatchNorm2d(oup), nn.ReLU6(inplace=True)) |
def main(args):
if (args.intfeat != None):
infile = json.load(open(args.intfeat, 'r'))
int_indices = infile['indices']
if (args.model_type == 'cln'):
cln_model = torch.load(args.model_path)
print('cln model loaded from', args.model_path)
elif (args.model_type == 'xgboost'):
cln_model = CLNModel(None, 0, 1, 1, args.model_path, args.model_path, (- 1), True, True, int_indices, args.nfeat, args.nlabels, [], negate=False)
cln_model = cln_model.cuda()
else:
exit()
st = (args.start_cid if (args.start_cid != (- 1)) else 0)
ed = (args.end_cid if (args.end_cid != (- 1)) else cln_model.last_cid)
attack_model = ILPModel(cln_model, st, ed, int_indices, args.nfeat, args.default_lo)
if args.int_var:
attack_model.grb.setParam('IntFeasTol', 1e-09)
if args.no_timeout:
attack_model.grb.setParam('TimeLimit', GRB.INFINITY)
if (args.monotonicity != None):
monotone_index_list = eval(args.monotonicity)
monotone_direction = eval(args.monotonicity_dir)
for (i, fi) in enumerate(monotone_index_list):
direction = monotone_direction[i]
attack_model.global_attack(st, ed, 'monotonicity', [fi], direction)
elif (args.stability != None):
stable_index_list = eval(args.stability)
stable_threshold = args.stability_th
for (i, fi) in enumerate(stable_index_list):
attack_model.global_attack(st, ed, 'stability', [fi], stable_threshold)
elif (args.eps != None):
featmax = np.loadtxt(args.featmax, delimiter=',', usecols=list(range(args.nfeat))).astype(np.float32)
constant = args.C
eps = args.eps
maxdiff = (eps * constant)
attack_model.global_attack(st, ed, 'eps', list(range(args.nfeat)), eps, maxdiff, featmax)
elif (args.lowcost != None):
lowcost_dict = eval(args.lowcost)
confidence_threshold = args.lowcost_th
cutoff = 0.5
for (lowcost_index, bounds) in lowcost_dict.items():
(lower, upper) = bounds
attack_model.global_attack(st, ed, 'lowcost', [lowcost_index], lower, upper, cutoff, confidence_threshold)
elif (args.redundancy != None):
lowcost_array = eval(args.redundancy)
confidence_threshold = args.lowcost_th
cutoff = 0.5
for lowcost_dict in lowcost_array:
fi_list = []
lower_list = []
upper_list = []
for (lowcost_index, bounds) in lowcost_dict.items():
(lower, upper) = bounds
fi_list.append(lowcost_index)
lower_list.append(lower)
upper_list.append(upper)
attack_model.global_attack(st, ed, 'redundancy', fi_list, lower_list, upper_list, cutoff, confidence_threshold)
return |
def locate_model(name):
if os.path.exists(name):
return name
elif (('/' not in name) and ('.' not in name)):
import nltk.data
try:
nltk_loc = nltk.data.find(f'models/{name}')
return nltk_loc.path
except LookupError as e:
arg = e.args[0].replace('nltk.download', 'benepar.download')
raise LookupError(arg)
raise LookupError("Can't find {}".format(name)) |
class Sets(Category_singleton):
def super_categories(self):
return [SetsWithPartialMaps()]
def _call_(self, X, enumerated_set=False):
if (enumerated_set and (type(X) in (tuple, list, range))):
from sage.categories.enumerated_sets import EnumeratedSets
return EnumeratedSets()(X)
from sage.sets.set import Set
return Set(X)
def example(self, choice=None):
if (choice is None):
from sage.categories.examples.sets_cat import PrimeNumbers
return PrimeNumbers()
elif (choice == 'inherits'):
from sage.categories.examples.sets_cat import PrimeNumbers_Inherits
return PrimeNumbers_Inherits()
elif (choice == 'facade'):
from sage.categories.examples.sets_cat import PrimeNumbers_Facade
return PrimeNumbers_Facade()
elif (choice == 'wrapper'):
from sage.categories.examples.sets_cat import PrimeNumbers_Wrapper
return PrimeNumbers_Wrapper()
else:
raise ValueError('unknown choice')
class SubcategoryMethods():
_method
def CartesianProducts(self):
return CartesianProductsCategory.category_of(self)
_method
def Subquotients(self):
return SubquotientsCategory.category_of(self)
_method
def Quotients(self):
return QuotientsCategory.category_of(self)
_method
def Subobjects(self):
return SubobjectsCategory.category_of(self)
_method
def IsomorphicObjects(self):
return IsomorphicObjectsCategory.category_of(self)
_method
def Topological(self):
from sage.categories.topological_spaces import TopologicalSpacesCategory
return TopologicalSpacesCategory.category_of(self)
_method
def Metric(self):
from sage.categories.metric_spaces import MetricSpacesCategory
return MetricSpacesCategory.category_of(self)
_method
def Algebras(self, base_ring):
from sage.categories.rings import Rings
assert ((base_ring in Rings()) or (isinstance(base_ring, Category) and base_ring.is_subcategory(Rings())))
return AlgebrasCategory.category_of(self, base_ring)
_method
def Finite(self):
return self._with_axiom('Finite')
_method
def Infinite(self):
return self._with_axiom('Infinite')
_method
def Enumerated(self):
return self._with_axiom('Enumerated')
def Facade(self):
return self._with_axiom('Facade')
class ParentMethods():
_attribute
def _element_constructor_(self):
if hasattr(self, 'element_class'):
return self._element_constructor_from_element_class
else:
return NotImplemented
def _element_constructor_from_element_class(self, *args, **keywords):
return self.element_class(self, *args, **keywords)
def is_parent_of(self, element):
from sage.structure.element import parent
return (parent(element) == self)
_method
def __contains__(self, x):
_method
def an_element(self):
return self._an_element_()
def _test_an_element(self, **options):
tester = self._tester(**options)
try:
an_element = self.an_element()
except EmptySetError:
return
tester.assertIn(an_element, self, 'self.an_element() is not in self')
if self.is_parent_of(an_element):
tester.assertEqual(self(an_element), an_element, 'element construction is not idempotent')
else:
try:
rebuilt_element = self(an_element)
except NotImplementedError:
tester.info("\n The set doesn't seems to implement __call__; skipping test of construction idempotency")
else:
tester.assertEqual(rebuilt_element, an_element, 'element construction is not idempotent')
def _test_elements(self, tester=None, **options):
is_sub_testsuite = (tester is not None)
tester = self._tester(tester=tester, **options)
try:
an_element = self.an_element()
except EmptySetError:
return
tester.info('\n Running the test suite of self.an_element()')
TestSuite(an_element).run(verbose=tester._verbose, prefix=(tester._prefix + ' '), raise_on_failure=is_sub_testsuite)
tester.info((tester._prefix + ' '), newline=False)
def _test_elements_eq_reflexive(self, **options):
tester = self._tester(**options)
S = (list(tester.some_elements()) + [None, 0])
for x in S:
tester.assertEqual(x, x)
def _test_elements_eq_symmetric(self, **options):
tester = self._tester(**options)
S = (list(tester.some_elements()) + [None, 0])
from sage.misc.misc import some_tuples
for (x, y) in some_tuples(S, 2, tester._max_runs):
tester.assertEqual((x == y), (y == x), (LazyFormat('non symmetric equality: %s but %s') % (print_compare(x, y), print_compare(y, x))))
def _test_elements_eq_transitive(self, **options):
tester = self._tester(**options)
S = list(tester.some_elements())
n = max(tester._max_runs, 8)
if (((len(S) + 2) ** 3) <= n):
S = (list(S) + [None, 0])
else:
from random import sample
from sage.rings.integer import Integer
S = (sample(S, (Integer(n).nth_root(3, truncate_mode=1)[0] - 2)) + [None, 0])
for x in S:
for y in S:
if (not (x == y)):
continue
for z in S:
if (not (y == z)):
continue
tester.assertEqual(x, z, (LazyFormat('non transitive equality:\n%s and %s but %s') % (print_compare(x, y), print_compare(y, z), print_compare(x, z))))
def _test_elements_neq(self, **options):
tester = self._tester(**options)
S = (list(tester.some_elements()) + [None, 0])
from sage.misc.misc import some_tuples
for (x, y) in some_tuples(S, 2, tester._max_runs):
tester.assertNotEqual((x == y), (x != y), (LazyFormat('__eq__ and __ne__ inconsistency:\n %s == %s returns %s but %s != %s returns %s') % (x, y, (x == y), x, y, (x != y))))
def some_elements(self):
try:
return [self.an_element()]
except EmptySetError:
return []
def _test_some_elements(self, **options):
tester = self._tester(**options)
elements = self.some_elements()
for x in elements:
tester.assertIn(x, self, (LazyFormat('the object %s in self.some_elements() is not in self') % (x,)))
def _test_cardinality(self, **options):
try:
cardinality = self.cardinality()
except (AttributeError, NotImplementedError):
return
from sage.structure.element import parent
from sage.rings.infinity import Infinity
from sage.rings.integer_ring import ZZ
tester = self._tester(**options)
tester.assertTrue(((cardinality is Infinity) or (parent(cardinality) is ZZ)), 'the output of the method cardinality must either be a Sage integer or infinity. Not {}.'.format(type(cardinality)))
def construction(self):
return None
def _test_construction(self, **options):
tester = self._tester(**options)
FO = self.construction()
if (FO is None):
return
tester.assertEqual(FO[0](FO[1]), self, "the object's construction does not recreate this object")
CartesianProduct = CartesianProduct
def cartesian_product(*parents, **kwargs):
category = kwargs.pop('category', None)
extra_category = kwargs.pop('extra_category', None)
category = (category or cartesian_product.category_from_parents(parents))
if extra_category:
if isinstance(category, (list, tuple)):
category = (tuple(category) + (extra_category,))
else:
category = (category & extra_category)
return parents[0].CartesianProduct(parents, category=category, **kwargs)
def algebra(self, base_ring, category=None, **kwds):
if (category is None):
category = self.category()
from sage.categories.semigroups import Semigroups
from sage.categories.commutative_additive_semigroups import CommutativeAdditiveSemigroups
if (category.is_subcategory(Semigroups()) and category.is_subcategory(CommutativeAdditiveSemigroups())):
raise TypeError(' `S = {}` is both an additive and a multiplicative semigroup.\nConstructing its algebra is ambiguous.\nPlease use, e.g., S.algebra(QQ, category=Semigroups())'.format(self))
from sage.categories.groups import Groups
from sage.categories.additive_groups import AdditiveGroups
from sage.algebras.group_algebra import GroupAlgebra_class
algebra_category = category.Algebras(base_ring)
if (category.is_subcategory(Groups()) or category.is_subcategory(AdditiveGroups())):
from sage.categories.modules_with_basis import ModulesWithBasis
if (self not in ModulesWithBasis):
if ('prefix' not in kwds):
kwds['prefix'] = ''
if ('bracket' not in kwds):
kwds['bracket'] = False
result = GroupAlgebra_class(base_ring, self, category=algebra_category, **kwds)
result.__doc__ = Sets.ParentMethods.algebra.__doc__
return result
def _sympy_(self):
from sage.interfaces.sympy_wrapper import SageSet
from sage.interfaces.sympy import sympy_init
sympy_init()
return SageSet(self)
class ElementMethods():
_dummy_attribute = None
def cartesian_product(*elements):
from sage.structure.element import parent, Element
assert all((isinstance(element, Element) for element in elements))
parents = [parent(element) for element in elements]
return cartesian_product(parents)._cartesian_product_of_elements(elements)
class MorphismMethods():
_method(optional=True)
def __invert__(self):
def is_injective(self):
if (self.domain().cardinality() <= 1):
return True
if (self.domain().cardinality() > self.codomain().cardinality()):
return False
raise NotImplementedError
def image(self, domain_subset=None):
D = self.domain()
if (D is None):
raise ValueError('this map became defunct by garbage collection')
if ((domain_subset is None) or (domain_subset == D)):
try:
if self.is_surjective():
return D
except NotImplementedError:
pass
domain_subset = D
from sage.sets.set import Set_base
from sage.sets.image_set import ImageSubobject, ImageSet
if isinstance(domain_subset, Set_base):
cls = ImageSet
else:
cls = ImageSubobject
return cls(self, domain_subset)
Enumerated = LazyImport('sage.categories.enumerated_sets', 'EnumeratedSets', at_startup=True)
Finite = LazyImport('sage.categories.finite_sets', 'FiniteSets', at_startup=True)
Topological = LazyImport('sage.categories.topological_spaces', 'TopologicalSpaces', 'Topological', at_startup=True)
Metric = LazyImport('sage.categories.metric_spaces', 'MetricSpaces', 'Metric', at_startup=True)
from sage.categories.facade_sets import FacadeSets as Facade
class Infinite(CategoryWithAxiom):
class ParentMethods():
def is_finite(self):
return False
def is_empty(self):
return False
def cardinality(self):
from sage.rings.infinity import infinity
return infinity
class Subquotients(SubquotientsCategory):
class ParentMethods():
def _repr_(self):
return ('A subquotient of %s' % self.ambient())
_method
def ambient(self):
_method
def lift(self, x):
_method
def retract(self, x):
class ElementMethods():
def lift(self):
return self.parent().lift(self)
class Quotients(QuotientsCategory):
class ParentMethods():
def _repr_(self):
return 'A quotient of {}'.format(self.ambient())
def _an_element_(self):
return self.retract(self.ambient().an_element())
class Subobjects(SubobjectsCategory):
class ParentMethods():
def _repr_(self):
return 'A subobject of {}'.format(self.ambient())
class IsomorphicObjects(IsomorphicObjectsCategory):
class ParentMethods():
def _repr_(self):
return ('The image by some isomorphism of %s' % self.ambient())
class CartesianProducts(CartesianProductsCategory):
def extra_super_categories(self):
return [Sets()]
def example(self):
from .finite_enumerated_sets import FiniteEnumeratedSets
from .infinite_enumerated_sets import InfiniteEnumeratedSets
from .cartesian_product import cartesian_product
S1 = Sets().example()
S2 = InfiniteEnumeratedSets().example()
S3 = FiniteEnumeratedSets().example()
return cartesian_product([S1, S2, S3])
class ParentMethods():
def __iter__(self):
factors = list(self.cartesian_factors())
if any(((f not in Sets().Finite()) for f in factors[1:])):
from sage.misc.mrange import cantor_product
for t in cantor_product(*factors):
(yield self._cartesian_product_of_elements(t))
return
wheels = [iter(f) for f in factors]
try:
digits = [next(it) for it in wheels]
except StopIteration:
return
while True:
(yield self._cartesian_product_of_elements(digits))
for i in range((len(digits) - 1), (- 1), (- 1)):
try:
digits[i] = next(wheels[i])
break
except StopIteration:
wheels[i] = iter(factors[i])
try:
digits[i] = next(wheels[i])
except StopIteration:
return
else:
break
_method
def an_element(self):
return self._cartesian_product_of_elements((s.an_element() for s in self._sets))
def is_empty(self):
return any((c.is_empty() for c in self.cartesian_factors()))
def is_finite(self):
f = self.cartesian_factors()
try:
test = any((c.is_empty() for c in f))
except (AttributeError, NotImplementedError):
pass
else:
if test:
return test
return all((c.is_finite() for c in f))
def cardinality(self):
f = self.cartesian_factors()
try:
is_empty = any((c.is_empty() for c in f))
except (AttributeError, NotImplementedError):
pass
else:
if is_empty:
from sage.rings.integer_ring import ZZ
return ZZ.zero()
elif any(((c in Sets().Infinite()) for c in f)):
from sage.rings.infinity import Infinity
return Infinity
from sage.misc.misc_c import prod
return prod((c.cardinality() for c in f))
def random_element(self, *args):
return self._cartesian_product_of_elements((c.random_element(*args) for c in self.cartesian_factors()))
_method
def _sets_keys(self):
_method
def cartesian_factors(self):
_method
def cartesian_projection(self, i):
def construction(self):
return (cartesian_product, self.cartesian_factors())
_method
def _cartesian_product_of_elements(self, elements):
def _sympy_(self):
from sympy import ProductSet
from sage.interfaces.sympy import sympy_init
sympy_init()
return ProductSet(*self.cartesian_factors())
class ElementMethods():
def cartesian_projection(self, i):
return self.parent().cartesian_projection(i)(self)
def cartesian_factors(self):
return tuple((self.cartesian_projection(i) for i in self.parent()._sets_keys()))
class Algebras(AlgebrasCategory):
def extra_super_categories(self):
from sage.categories.modules_with_basis import ModulesWithBasis
return [ModulesWithBasis(self.base_ring())]
class ParentMethods():
def construction(self):
from sage.categories.algebra_functor import GroupAlgebraFunctor, AlgebraFunctor
try:
group = self.group()
except AttributeError:
return (AlgebraFunctor(self.base_ring()), self.basis().keys())
return (GroupAlgebraFunctor(group), self.base_ring())
def _repr_(self):
if hasattr(self, '_name'):
return (self._name + ' over {}'.format(self.base_ring()))
else:
return 'Algebra of {} over {}'.format(self.basis().keys(), self.base_ring())
class WithRealizations(WithRealizationsCategory):
def extra_super_categories(self):
return [Sets().Facade()]
def example(self, base_ring=None, set=None):
from sage.rings.rational_field import QQ
from sage.sets.set import Set
if (base_ring is None):
base_ring = QQ
if (set is None):
set = Set([1, 2, 3])
from sage.categories.examples.with_realizations import SubsetAlgebra
return SubsetAlgebra(base_ring, set)
class ParentMethods():
def _test_with_realizations(self, **options):
tester = self._tester(**options)
for R in self.realizations():
tester.assertIn(R, self.Realizations())
_attribute
def _realizations(self):
return []
def _register_realization(self, realization):
assert (realization.realization_of() is self)
self._realizations.append(realization)
def inject_shorthands(self, shorthands=None, verbose=True):
from sage.misc.misc import inject_variable
if (shorthands == 'all'):
shorthands = getattr(self, '_shorthands_all', None)
if (shorthands is None):
shorthands = getattr(self, '_shorthands', None)
if (shorthands is None):
raise NotImplementedError('no shorthands defined for {}'.format(self))
for shorthand in shorthands:
realization = getattr(self, shorthand)()
if verbose:
print('Defining {} as shorthand for {}'.format(shorthand, realization))
inject_variable(shorthand, realization, warn=False)
_method(optional=True)
def a_realization(self):
def realizations(self):
return self._realizations
def facade_for(self):
return self.realizations()
class Realizations(Category_realization_of_parent):
def super_categories(self):
return [Sets().Realizations()]
def _an_element_(self):
return self.a_realization().an_element()
def __contains__(self, x):
return any(((x in realization) for realization in self.realizations()))
class Realizations(RealizationsCategory):
class ParentMethods():
def __init_extra__(self):
self.realization_of()._register_realization(self)
_method
def realization_of(self):
for category in self.categories():
if isinstance(category, Category_realization_of_parent):
return category.base()
def _realization_name(self):
return self.__class__.__base__.__name__.split('.')[(- 1)]
def _repr_(self):
return '{} in the realization {}'.format(self.realization_of(), self._realization_name()) |
def create_optimizer(opt, model):
optimizer = find_optimizer_using_name(opt.optimizer)
instance = optimizer(model)
return instance |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.