code stringlengths 281 23.7M |
|---|
def pod_labels(app: AppDef, role_idx: int, role: Role, replica_id: int, coscheduler_name: Optional[str], app_id: str) -> Dict[(str, str)]:
labels = object_labels(app, app_id)
pod_labels = {LABEL_VERSION: torchx.__version__, LABEL_APP_NAME: app.name, LABEL_ROLE_INDEX: str(role_idx), LABEL_ROLE_NAME: role.name, LABEL_REPLICA_ID: str(replica_id)}
if (coscheduler_name is not None):
pod_group = ((app_id + '-pg') + str(role_idx))
pod_labels.update({'pod-group.scheduling.sigs.k8s.io': pod_group})
labels.update(pod_labels)
return labels |
class GmetricHandler(Handler):
def __init__(self, config=None):
Handler.__init__(self, config)
if (gmetric is None):
logging.error('Failed to load gmetric module')
return
self.socket = None
self.host = self.config['host']
self.port = int(self.config['port'])
self.protocol = self.config['protocol']
if (not self.protocol):
self.protocol = 'udp'
self.gmetric = gmetric.Gmetric(self.host, self.port, self.protocol)
def get_default_config_help(self):
config = super(GmetricHandler, self).get_default_config_help()
config.update({'host': 'Hostname', 'port': 'Port', 'protocol': 'udp or tcp'})
return config
def get_default_config(self):
config = super(GmetricHandler, self).get_default_config()
config.update({'host': 'localhost', 'port': 8651, 'protocol': 'udp'})
return config
def __del__(self):
self._close()
def process(self, metric):
self._send(metric)
def _send(self, metric):
metric_name = self.get_name_from_path(metric.path)
tmax = '60'
dmax = '0'
slope = 'both'
metric_type = 'double'
units = ''
group = ''
self.gmetric.send(metric_name, metric.value, metric_type, units, slope, tmax, dmax, group)
def _close(self):
self.gmetric = None |
def test_dsl_async_cmd_error_throws_with_save_true():
cmd = get_cmd('tests/testfiles/cmds/exitwitherr.sh', 'tests\\testfiles\\cmds\\exitwitherr.bat')
context = Context({'cmds': {'run': cmd, 'save': True}})
step = AsyncCmdStep('blah', context)
with pytest.raises(MultiError):
step.run_step()
out = context['cmdOut']
assert (len(out) == 1)
result = out[0]
assert (result.returncode == 1)
assert (not result.stdout)
assert (result.stderr == 'arb err here') |
_start_docstrings('The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.', POOLFORMER_START_DOCSTRING)
class PoolFormerModel(PoolFormerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.encoder = PoolFormerEncoder(config)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
_start_docstrings_to_model_forward(POOLFORMER_INPUTS_DOCSTRING)
_code_sample_docstrings(processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[(Tuple, BaseModelOutputWithNoAttention)]:
output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states)
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
if (pixel_values is None):
raise ValueError('You have to specify pixel_values')
encoder_outputs = self.encoder(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
if (not return_dict):
return ((sequence_output, None) + encoder_outputs[1:])
return BaseModelOutputWithNoAttention(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states) |
def create_random_square_matrix(n, is_hermitian=False, min_eival=1.0, max_eival=1.0, minabs_eival=0.0, seed=(- 1)):
dtype = torch.float64
eivals = torch.linspace(min_eival, max_eival, n, dtype=dtype)
idx = (eivals.abs() < minabs_eival)
eivals[idx] = (torch.sign(eivals[idx]) * minabs_eival)
eivals = torch.diag_embed(eivals)
if (seed > 0):
torch.manual_seed(seed)
if is_hermitian:
eivecs = create_random_ortho_matrix(n, seed=seed)
mat = torch.matmul(torch.matmul(eivecs.transpose((- 2), (- 1)), eivals), eivecs)
mat = ((mat + mat.transpose((- 2), (- 1))) * 0.5)
return mat
else:
a = torch.randn((n, n), dtype=dtype)
a = (a / a.norm(dim=(- 2), keepdim=True))
return torch.matmul(torch.matmul(a.inverse(), eivals), a) |
def test_compat_runner_args():
cfg = ConfigDict(dict(total_epochs=12))
with pytest.warns(None) as record:
cfg = compat_runner_args(cfg)
assert (len(record) == 1)
assert ('runner' in record.list[0].message.args[0])
assert ('runner' in cfg)
assert (cfg.runner.type == 'EpochBasedRunner')
assert (cfg.runner.max_epochs == cfg.total_epochs) |
def check_multilayer_graph_consistency(G_intralayer, G_interlayer, layer_vec, model, m_t, T, N=None, Nt=None):
if (G_intralayer.is_directed() != G_interlayer.is_directed()):
warnings.warn('Intralayer graph is {}, but Interlayer graph is {}.'.format(('directed' if G_intralayer.is_directed() else 'undirected'), ('directed' if G_interlayer.is_directed() else 'undirected')), RuntimeWarning)
rules = [(T > 1), 'Graph must have multiple layers', (G_interlayer.vcount() == G_intralayer.vcount()), 'Inter-layer and Intra-layer graphs must be of the same size', (len(layer_vec) == G_intralayer.vcount()), 'Layer membership vector must have length matching graph size', all(((m > 0) for m in m_t)), 'All layers of graph must contain edges', all(((layer_vec[e.source] == layer_vec[e.target]) for e in G_intralayer.es)), 'Intralayer graph should not contain edges across layers', ((model is not 'temporal') or (G_interlayer.ecount() == (N * (T - 1)))), 'Interlayer temporal graph must contain (nodes per layer) * (number of layers - 1) edges', ((model is not 'temporal') or (((G_interlayer.vcount() % T) == 0) and ((G_intralayer.vcount() % T) == 0))), 'Vertex count of a temporal graph should be a multiple of the number of layers', ((model is not 'temporal') or all(((nt == N) for nt in Nt))), 'Temporal networks must have the same number of nodes in every layer', ((model is not 'multilevel') or all(((nt > 0) for nt in Nt))), 'All layers of a multilevel graph must be consecutive and nonempty', ((model is not 'multiplex') or all(((nt == N) for nt in Nt))), 'Multiplex networks must have the same number of nodes in every layer', ((model is not 'multiplex') or (G_interlayer.ecount() == ((N * T) * (T - 1)))), 'Multiplex interlayer networks must contain edges between all pairs of layers']
(checks, messages) = (rules[::2], rules[1::2])
if (not all(checks)):
raise ValueError(('Input graph is malformed\n' + '\n'.join((m for (c, m) in zip(checks, messages) if (not c))))) |
class Layer_param():
def __init__(self, name='', type='', top=(), bottom=()):
self.param = pb.LayerParameter()
self.name = self.param.name = name
self.type = self.param.type = type
self.top = self.param.top
self.top.extend(top)
self.bottom = self.param.bottom
self.bottom.extend(bottom)
def fc_param(self, num_output, weight_filler='xavier', bias_filler='constant', has_bias=True):
if (self.type != 'InnerProduct'):
raise TypeError('the layer type must be InnerProduct if you want set fc param')
fc_param = pb.InnerProductParameter()
fc_param.num_output = num_output
fc_param.weight_filler.type = weight_filler
fc_param.bias_term = has_bias
if has_bias:
fc_param.bias_filler.type = bias_filler
self.param.inner_product_param.CopyFrom(fc_param)
def conv_param(self, num_output, kernel_size, stride=1, pad=(0,), weight_filler_type='xavier', bias_filler_type='constant', bias_term=True, dilation=None, groups=None):
if (self.type not in ['Convolution', 'Deconvolution']):
raise TypeError('the layer type must be Convolution or Deconvolution if you want set conv param')
conv_param = pb.ConvolutionParameter()
conv_param.num_output = num_output
conv_param.kernel_size.extend(pair_reduce(kernel_size))
conv_param.stride.extend(pair_reduce(stride))
conv_param.pad.extend(pair_reduce(pad))
conv_param.bias_term = bias_term
conv_param.weight_filler.type = weight_filler_type
if bias_term:
conv_param.bias_filler.type = bias_filler_type
if dilation:
conv_param.dilation.extend(pair_reduce(dilation))
if groups:
conv_param.group = groups
self.param.convolution_param.CopyFrom(conv_param)
def pool_param(self, type='MAX', kernel_size=2, stride=2, pad=None, ceil_mode=False):
pool_param = pb.PoolingParameter()
pool_param.pool = pool_param.PoolMethod.Value(type)
pool_param.kernel_size = pair_process(kernel_size)
pool_param.stride = pair_process(stride)
pool_param.ceil_mode = ceil_mode
if pad:
if isinstance(pad, tuple):
pool_param.pad_h = pad[0]
pool_param.pad_w = pad[1]
else:
pool_param.pad = pad
self.param.pooling_param.CopyFrom(pool_param)
def batch_norm_param(self, use_global_stats=0, moving_average_fraction=None, eps=None):
bn_param = pb.BatchNormParameter()
bn_param.use_global_stats = use_global_stats
if moving_average_fraction:
bn_param.moving_average_fraction = moving_average_fraction
if eps:
bn_param.eps = eps
self.param.batch_norm_param.CopyFrom(bn_param)
def upsample_param(self, size=None, scale_factor=None):
upsample_param = pb.UpsampleParameter()
if scale_factor:
if isinstance(scale_factor, int):
upsample_param.scale = scale_factor
else:
upsample_param.scale_h = scale_factor[0]
upsample_param.scale_w = scale_factor[1]
if size:
if isinstance(size, int):
upsample_param.upsample_h = size
else:
upsample_param.upsample_h = size[0]
upsample_param.upsample_w = size[1]
self.param.upsample_param.CopyFrom(upsample_param)
def interp_param(self, size=None, scale_factor=None):
interp_param = pb.InterpParameter()
if scale_factor:
if isinstance(scale_factor, int):
interp_param.zoom_factor = scale_factor
if size:
print('size:', size)
interp_param.height = size[0]
interp_param.width = size[1]
self.param.interp_param.CopyFrom(interp_param)
def add_data(self, *args):
del self.param.blobs[:]
for data in args:
new_blob = self.param.blobs.add()
for dim in data.shape:
new_blob.shape.dim.append(dim)
new_blob.data.extend(data.flatten().astype(float))
def set_params_by_dict(self, dic):
pass
def copy_from(self, layer_param):
pass |
def perturb_texts(args, texts, mask_model, mask_tokenizer, base_tokenizer, ceil_pct=False):
outputs = []
for i in tqdm(range(0, len(texts), args.chunk_size), desc='Applying perturbations'):
outputs.extend(perturb_texts_(args, texts[i:(i + args.chunk_size)], mask_model, mask_tokenizer, base_tokenizer, ceil_pct=ceil_pct))
return outputs |
def test_model_nodes(model):
node = Input(model, 'test')
assert (model.nodes['test'] is node)
with pytest.raises(KeyError):
model.nodes['invalid']
all_nodes = [node for node in model.nodes]
assert (all_nodes == [node])
del model.nodes['test']
all_nodes = [node for node in model.nodes]
assert (all_nodes == []) |
def train(args, sess, epoch, learning_rate_placeholder, phase_train_placeholder, global_step, loss, train_op, summary_op, summary_writer, learning_rate_schedule_file):
batch_number = 0
lr = args.learning_rate
while (batch_number < args.epoch_size):
start_time = time.time()
print('Running forward pass on sampled images: ', end='')
feed_dict = {learning_rate_placeholder: lr, phase_train_placeholder: True}
start_time = time.time()
(total_err, reg_err, _, step) = sess.run([loss['total_loss'], loss['total_reg'], train_op, global_step], feed_dict=feed_dict)
duration = (time.time() - start_time)
if (arg.loss_type == 'lmcl'):
print(('Epoch: [%d][%d/%d]\tTime %.3f\tTotal lmcl Loss %2.3f\tReg Loss %2.3f, lr %2.5f' % (epoch, (batch_number + 1), args.epoch_size, duration, total_err, reg_err, lr)))
else:
print(('Epoch: [%d][%d/%d]\tTime %.3f\tTotal softmax Loss %2.3f\tReg Loss %2.3f, lr %2.5f' % (epoch, (batch_number + 1), args.epoch_size, duration, total_err, reg_err, lr)))
batch_number += 1
return step |
def test_dsl_async_cmd_dict_input_sequence_with_cwd_interpolate():
if is_windows:
cmd = cmd_path.joinpath('pwd.bat').as_posix()
else:
cmd = 'testfiles/cmds/pwd.sh'
context = Context({'k1': 'tests', 'cmds': {'run': cmd, 'save': True, 'cwd': '{k1}'}})
step = AsyncCmdStep('blah', context)
step.run_step()
out = context['cmdOut']
assert (len(out) == 1)
result = out[0]
assert (result.returncode == 0)
assert Path(result.stdout).samefile('./tests')
assert (not result.stderr) |
def get_pca_latent(args, latents, text, degrees, exp_name):
save_dir = 'text_pca/'
if (not os.path.exists(save_dir)):
os.makedirs(save_dir, exist_ok=True)
text_latents = []
new_latents = [torch.zeros_like(l) for l in latents]
for i in range(latents[0].shape[0]):
new_tensor = torch.zeros(0, 512).to('cuda')
for j in range(len(new_latents)):
new_tensor = torch.cat((new_tensor, latents[j][i].reshape(1, (- 1))), dim=0)
solver = RPCA_gpu(new_tensor)
(new_tensor_lowrank, _) = solver.fit()
results = torch.pca_lowrank(new_tensor_lowrank, q=4, center=False)
tmp = torch.matmul(results[0], torch.diag(results[1]))
tmp = torch.matmul(tmp, torch.transpose(results[2], 0, 1))
for j in range(len(new_latents)):
new_latents[j][i] = tmp[j]
g_ema = Generator(args.stylegan_size, 512, 8)
g_ema.load_state_dict(torch.load(args.ckpt)['g_ema'], strict=False)
g_ema.eval()
g_ema = g_ema.cuda()
for (i, degree) in enumerate(degrees):
text_latents.append(torch.unsqueeze(new_latents[i], 0))
(img_gen, _) = g_ema([torch.unsqueeze(new_latents[i], 0)], input_is_latent=True, randomize_noise=False, input_is_stylespace=args.work_in_stylespace)
torchvision.utils.save_image(img_gen, f'{save_dir}/{exp_name}_{degree}.png', normalize=True, range=((- 1), 1))
return text_latents |
class ServiceKey(namedtuple('ServiceKey', ['name', 'kid', 'service', 'jwk', 'metadata', 'created_date', 'expiration_date', 'rotation_duration', 'approval'])):
def to_dict(self):
return {'name': self.name, 'kid': self.kid, 'service': self.service, 'jwk': self.jwk, 'metadata': self.metadata, 'created_date': self.created_date, 'expiration_date': self.expiration_date, 'rotation_duration': self.rotation_duration, 'approval': (self.approval.to_dict() if (self.approval is not None) else None)} |
def screening_cost_analyzer(cost_miss_case, cost_false_pos, prevalence, sensitivity, specificity, population=10000, decimal=3):
warnings.warn('NOTE: When calculating costs, be sure to consult experts in health policy or related fields. Costs should encompass more than only monetary costs, like relative costs (regret, disappointment, stigma, disutility, etc.)', UserWarning)
if ((sensitivity > 1) | (specificity > 1)):
raise ValueError('sensitivity/specificity/prevalence cannot be greater than 1')
disease = (population * prevalence)
disease_free = (population - disease)
nt_cost = (disease * cost_miss_case)
pc_nt_cost = (nt_cost / population)
t_cost = (disease_free * cost_false_pos)
pc_t_cost = (t_cost / population)
cost_b = (disease - (disease * sensitivity))
cost_c = (disease_free - (disease_free * specificity))
ct_cost = ((cost_miss_case * cost_b) + (cost_false_pos * cost_c))
pc_ct_cost = (ct_cost / population)
print('')
print('Treat everyone as Test-Negative')
print('Total relative cost:\t\t', round(nt_cost, decimal))
print('Per Capita relative cost:\t', round(pc_nt_cost, decimal))
print('')
print('Treat everyone as Test-Positive')
print('Total relative cost:\t\t', round(t_cost, decimal))
print('Per Capita relative cost:\t', round(pc_t_cost, decimal))
print('')
print('Treating by Screening Test')
print('Total relative cost:\t\t', round(ct_cost, decimal))
print('Per Capita relative cost:\t', round(pc_ct_cost, decimal))
print('')
if (pc_ct_cost > pc_nt_cost):
print('Screening program is more costly than treating everyone as a test-negative')
if (pc_nt_cost > pc_ct_cost > pc_t_cost):
print('Screening program is cost efficient')
if ((pc_t_cost < pc_ct_cost) and (pc_t_cost < pc_nt_cost)):
print('Treating everyone as test-positive is least costly')
print('\n') |
def _blas_info():
config = np.__config__
if hasattr(config, 'blas_ilp64_opt_info'):
blas_info = config.blas_ilp64_opt_info
elif hasattr(config, 'blas_opt_info'):
blas_info = config.blas_opt_info
else:
blas_info = {}
def _in_libaries(name):
return any(((name in lib) for lib in blas_info.get('libraries', [])))
if (getattr(config, 'mkl_info', False) or _in_libaries('mkl')):
blas = 'INTEL MKL'
elif (getattr(config, 'openblas_info', False) or _in_libaries('openblas')):
blas = 'OPENBLAS'
elif ('-Wl,Accelerate' in blas_info.get('extra_link_args', [])):
blas = 'Accelerate'
else:
blas = 'Generic'
return blas |
class TestTurnBattleMagicFunc(EvenniaTest):
def setUp(self):
super(TestTurnBattleMagicFunc, self).setUp()
self.testroom = create_object(DefaultRoom, key='Test Room')
self.attacker = create_object(tb_magic.TBMagicCharacter, key='Attacker', location=self.testroom)
self.defender = create_object(tb_magic.TBMagicCharacter, key='Defender', location=self.testroom)
self.joiner = create_object(tb_magic.TBMagicCharacter, key='Joiner', location=self.testroom)
def tearDown(self):
super(TestTurnBattleMagicFunc, self).tearDown()
self.attacker.delete()
self.defender.delete()
self.joiner.delete()
self.testroom.delete()
self.turnhandler.stop()
def test_tbbasicfunc(self):
initiative = tb_magic.roll_init(self.attacker)
self.assertTrue(((initiative >= 0) and (initiative <= 1000)))
attack_roll = tb_magic.get_attack(self.attacker, self.defender)
self.assertTrue(((attack_roll >= 0) and (attack_roll <= 100)))
defense_roll = tb_magic.get_defense(self.attacker, self.defender)
self.assertTrue((defense_roll == 50))
damage_roll = tb_magic.get_damage(self.attacker, self.defender)
self.assertTrue(((damage_roll >= 15) and (damage_roll <= 25)))
self.defender.db.hp = 10
tb_magic.apply_damage(self.defender, 3)
self.assertTrue((self.defender.db.hp == 7))
self.defender.db.hp = 40
tb_magic.resolve_attack(self.attacker, self.defender, attack_value=20, defense_value=10)
self.assertTrue((self.defender.db.hp < 40))
self.attacker.db.Combat_attribute = True
tb_magic.combat_cleanup(self.attacker)
self.assertFalse(self.attacker.db.combat_attribute)
self.assertFalse(tb_magic.is_in_combat(self.attacker))
self.attacker.location.scripts.add(tb_magic.TBMagicTurnHandler)
self.turnhandler = self.attacker.db.combat_TurnHandler
self.assertTrue(self.attacker.db.combat_TurnHandler)
self.turnhandler.interval = 10000
self.turnhandler.db.fighters = [self.attacker, self.defender]
self.turnhandler.db.turn = 0
self.assertTrue(tb_magic.is_turn(self.attacker))
self.attacker.db.Combat_ActionsLeft = 1
tb_magic.spend_action(self.attacker, 1, action_name='Test')
self.assertTrue((self.attacker.db.Combat_ActionsLeft == 0))
self.assertTrue((self.attacker.db.Combat_LastAction == 'Test'))
self.attacker.db.Combat_ActionsLeft = 983
self.turnhandler.initialize_for_combat(self.attacker)
self.assertTrue((self.attacker.db.Combat_ActionsLeft == 0))
self.assertTrue((self.attacker.db.Combat_LastAction == 'null'))
self.defender.db.Combat_ActionsLeft = 0
self.turnhandler.start_turn(self.defender)
self.assertTrue((self.defender.db.Combat_ActionsLeft == 1))
self.turnhandler.db.fighters = [self.attacker, self.defender]
self.turnhandler.db.turn = 0
self.turnhandler.next_turn()
self.assertTrue((self.turnhandler.db.turn == 1))
self.turnhandler.db.fighters = [self.attacker, self.defender]
self.turnhandler.db.turn = 0
self.attacker.db.Combat_ActionsLeft = 0
self.turnhandler.turn_end_check(self.attacker)
self.assertTrue((self.turnhandler.db.turn == 1))
self.turnhandler.db.fighters = [self.attacker, self.defender]
self.turnhandler.db.turn = 0
self.turnhandler.join_fight(self.joiner)
self.assertTrue((self.turnhandler.db.turn == 1))
self.assertTrue((self.turnhandler.db.fighters == [self.joiner, self.attacker, self.defender])) |
class Parser():
auto_post_parse = True
def __init__(self, file_name, strict=False, encoding='utf-8'):
self.file_name = Path(file_name).resolve()
self.strict = strict
self.encoding = encoding
self.dir = Path(file_name).parent
self.dispatcher = self._build_dispatch_map()
self.lines = self.create_line_generator()
self.line = None
self.values = None
def create_line_generator(self):
if (self.file_name.suffix == '.gz'):
gz = gzip.open(str(self.file_name), mode='rt', encoding=self.encoding)
for line in gz.readlines():
(yield line)
gz.close()
else:
file = open(str(self.file_name), mode='r', encoding=self.encoding)
for line in file:
(yield line)
file.close()
def next_line(self):
self.line = next(self.lines)
self.values = self.line.split()
def consume_line(self):
self.line = None
self.values = None
def parse(self):
try:
while True:
if (not self.line):
self.next_line()
if ((self.line[0] == '#') or (len(self.values) < 2)):
self.consume_line()
continue
self.dispatcher.get(self.values[0], self.parse_fallback)()
except StopIteration:
pass
if self.auto_post_parse:
self.post_parse()
def post_parse(self):
pass
_consume
def parse_fallback(self):
if self.strict:
raise PywavefrontException(("Unimplemented OBJ format statement '%s' on line '%s'" % (self.values[0], self.line.rstrip())))
else:
logger.warning(("Unimplemented OBJ format statement '%s' on line '%s'" % (self.values[0], self.line.rstrip())))
def _build_dispatch_map(self):
return {'_'.join(a.split('_')[1:]): getattr(self, a) for a in dir(self) if a.startswith('parse_')} |
class TestCallbacks(KazooTestCase):
def test_async_result_callbacks_are_always_called(self):
callback_mock = Mock()
async_result = self.client.handler.async_result()
async_result.rawlink(callback_mock)
self.client.stop()
async_result.set_exception(Exception('Anything that throws an exception'))
assert (callback_mock.call_count > 0) |
class bytes():
def __init__(self) -> None:
...
def __init__(self, x: object) -> None:
...
def __add__(self, x: bytes) -> bytes:
...
def __mul__(self, x: int) -> bytes:
...
def __rmul__(self, x: int) -> bytes:
...
def __eq__(self, x: object) -> bool:
...
def __ne__(self, x: object) -> bool:
...
def __getitem__(self, i: int) -> int:
...
def __getitem__(self, i: slice) -> bytes:
...
def join(self, x: Iterable[object]) -> bytes:
...
def decode(self, x: str=..., y: str=...) -> str:
... |
.parametrize('repo, commit_parser, translator, commit_messages,prerelease, expected_new_version', xdist_sort_hack([(lazy_fixture(repo_fixture_name), lazy_fixture(parser_fixture_name), translator, commit_messages, prerelease, expected_new_version) for ((repo_fixture_name, parser_fixture_name, translator), values) in {('repo_with_git_flow_angular_commits', 'default_angular_parser', VersionTranslator(prerelease_token='alpha')): [*((commits, True, '1.2.0-alpha.2') for commits in ([], ['uninteresting'])), *((commits, False, '1.2.0') for commits in ([], ['uninteresting'])), (ANGULAR_COMMITS_PATCH, False, '1.2.0'), (ANGULAR_COMMITS_PATCH, True, '1.2.0-alpha.3'), (ANGULAR_COMMITS_MINOR, False, '1.2.0'), (ANGULAR_COMMITS_MINOR, True, '1.2.0-alpha.3'), (ANGULAR_COMMITS_MAJOR, False, '2.0.0'), (ANGULAR_COMMITS_MAJOR, True, '2.0.0-alpha.1')], ('repo_with_git_flow_and_release_channels_angular_commits', 'default_angular_parser', VersionTranslator(prerelease_token='alpha')): [*((commits, True, '1.1.0-alpha.3') for commits in ([], ['uninteresting'])), *((commits, False, '1.1.0') for commits in ([], ['uninteresting'])), (ANGULAR_COMMITS_PATCH, False, '1.1.0'), (ANGULAR_COMMITS_PATCH, True, '1.1.0-alpha.4'), (ANGULAR_COMMITS_MINOR, False, '1.1.0'), (ANGULAR_COMMITS_MINOR, True, '1.1.0-alpha.4'), (ANGULAR_COMMITS_MAJOR, False, '2.0.0'), (ANGULAR_COMMITS_MAJOR, True, '2.0.0-alpha.1')]}.items() for (commit_messages, prerelease, expected_new_version) in values]))
.parametrize('major_on_zero', [True, False])
def test_algorithm_no_zero_dot_versions_angular(repo, file_in_repo, commit_parser, translator, commit_messages, prerelease, expected_new_version, major_on_zero):
for commit_message in commit_messages:
add_text_to_file(repo, file_in_repo)
repo.git.commit(m=commit_message)
new_version = next_version(repo, translator, commit_parser, prerelease, major_on_zero)
assert (new_version == Version.parse(expected_new_version, prerelease_token=translator.prerelease_token)) |
def compute_labels_xs(font_scale: float, text_sizes: List[OpenCVTextSizes]) -> List[int]:
label_widths = np.array([t[0][0] for t in text_sizes])
relative_shifts = np.insert(label_widths[:(- 1)], 0, 0)
relative_shifts_with_gaps = (relative_shifts + (font_scale * LABEL_TEXT_RELATIVE_GAP_X))
label_shifts = np.cumsum(relative_shifts_with_gaps)
return [int(np.rint(shift)) for shift in label_shifts] |
def _update_incomplete_dict(self_val: Value, pairs: Sequence[KVPair], ctx: CallContext, varname: Optional[VarnameWithOrigin]) -> ImplReturn:
self_pairs = kv_pairs_from_mapping(self_val, ctx.visitor)
if isinstance(self_pairs, CanAssignError):
ctx.show_error('self is not a mapping', arg='self', detail=str(self_pairs))
return ImplReturn(KnownValue(None))
pairs = [*self_pairs, *pairs]
if (varname is not None):
no_return_unless = Constraint(varname, ConstraintType.is_value_object, True, DictIncompleteValue((self_val.typ if isinstance(self_val, TypedValue) else dict), pairs))
return ImplReturn(KnownValue(None), no_return_unless=no_return_unless)
return ImplReturn(KnownValue(None)) |
def _get_files(*, verbose: bool, ignored: List[pathlib.Path]=None) -> Iterator[pathlib.Path]:
filenames = subprocess.run(['git', 'ls-files', '--cached', '--others', '--exclude-standard', '-z'], stdout=subprocess.PIPE, text=True, check=True)
all_ignored = (ignored or [])
all_ignored.append(pathlib.Path('tests', 'unit', 'scripts', 'importer_sample', 'chrome'))
for filename in filenames.stdout.split('\x00'):
path = pathlib.Path(filename)
is_ignored = any((((path == p) or (p in path.parents)) for p in all_ignored))
if ((not filename) or (path.suffix in BINARY_EXTS) or is_ignored):
continue
try:
with tokenize.open(path):
pass
except SyntaxError as e:
utils.print_col('{} - maybe {} should be added to BINARY_EXTS?'.format(str(e).capitalize(), path.suffix), 'yellow')
continue
if verbose:
print(path)
(yield path) |
def restoreVariableFromDisk(name):
logging.info('Recovering variable...')
t0 = time()
val = None
with open(((folder_pickles + name) + '.pickle'), 'rb') as handle:
val = pickle.load(handle)
t1 = time()
logging.info('Variable recovered. Time: {}m'.format(((t1 - t0) / 60)))
return val |
class MeanInterbuildingDistance():
def __init__(self, gdf, spatial_weights, unique_id, order=3, verbose=True):
self.gdf = gdf
self.sw = spatial_weights
self.id = gdf[unique_id]
data = gdf.set_index(unique_id).geometry
results_list = []
adj_list = spatial_weights.to_adjlist(drop_islands=True)
adj_list['weight'] = data.loc[adj_list.focal].reset_index(drop=True).distance(data.loc[adj_list.neighbor].reset_index(drop=True)).values
graph = nx.from_pandas_edgelist(adj_list, source='focal', target='neighbor', edge_attr='weight')
(print('Computing mean interbuilding distances...') if verbose else None)
for uid in tqdm(data.index, total=data.shape[0], disable=(not verbose)):
try:
sub = nx.ego_graph(graph, uid, radius=order)
results_list.append(np.nanmean([x[(- 1)] for x in list(sub.edges.data('weight'))]))
except Exception:
results_list.append(np.nan)
self.series = pd.Series(results_list, index=gdf.index) |
class Cell(nn.Module):
def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
self.reduction = reduction
self.primitives = self.PRIMITIVES[('primitives_reduct' if reduction else 'primitives_normal')]
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
self._bns = nn.ModuleList()
edge_index = 0
for i in range(self._steps):
for j in range((2 + i)):
stride = (2 if (reduction and (j < 2)) else 1)
op = MixedOp(C, stride, self.primitives[edge_index])
self._ops.append(op)
edge_index += 1
def forward(self, s0, s1, weights, drop_prob=0.0):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
offset = 0
for i in range(self._steps):
if ((drop_prob > 0.0) and self.training):
s = sum((drop_path(self._ops[(offset + j)](h, weights[(offset + j)]), drop_prob) for (j, h) in enumerate(states)))
else:
s = sum((self._ops[(offset + j)](h, weights[(offset + j)]) for (j, h) in enumerate(states)))
offset += len(states)
states.append(s)
return torch.cat(states[(- self._multiplier):], dim=1) |
class TestLowRankCrossNet(unittest.TestCase):
def test_cross_net_numercial_forward(self) -> None:
torch.manual_seed(0)
batch_size = 3
num_layers = 20
in_features = 2
input = torch.randn(batch_size, in_features)
dcn = LowRankCrossNet(in_features=in_features, num_layers=num_layers, low_rank=10)
output = dcn(input)
expected_output = torch.Tensor([[(- 11.5), (- 3.4863)], [(- 0.2742), (- 0.333)], [249.6694, 117.3466]])
self.assertTrue(torch.allclose(output, expected_output, rtol=0.0001, atol=0.0001))
def test_fx_script_cross_net(self) -> None:
input = torch.randn(2, 3)
dcn = LowRankCrossNet(in_features=3, num_layers=2, low_rank=2)
dcn(input)
gm = GraphModule(dcn, Tracer().trace(dcn))
torch.jit.script(gm) |
def simxReadVisionSensor(clientID, sensorHandle, operationMode):
detectionState = ct.c_ubyte()
auxValues = ct.POINTER(ct.c_float)()
auxValuesCount = ct.POINTER(ct.c_int)()
ret = c_ReadVisionSensor(clientID, sensorHandle, ct.byref(detectionState), ct.byref(auxValues), ct.byref(auxValuesCount), operationMode)
auxValues2 = []
if (ret == 0):
s = 0
for i in range(auxValuesCount[0]):
auxValues2.append(auxValues[s:(s + auxValuesCount[(i + 1)])])
s += auxValuesCount[(i + 1)]
c_ReleaseBuffer(auxValues)
c_ReleaseBuffer(auxValuesCount)
return (ret, bool((detectionState.value != 0)), auxValues2) |
def bravyi_kitaev_tree(operator, n_qubits=None):
from openfermion.utils import count_qubits
if (n_qubits is None):
n_qubits = count_qubits(operator)
if (n_qubits < count_qubits(operator)):
raise ValueError('Invalid number of qubits specified.')
fenwick_tree = FenwickTree(n_qubits)
transformed_terms = (_transform_operator_term(term=term, coefficient=operator.terms[term], fenwick_tree=fenwick_tree) for term in operator.terms)
return inline_sum(summands=transformed_terms, seed=QubitOperator()) |
class Solution(object):
def numIslands2(self, m, n, positions):
ans = []
islands = Union()
for p in map(tuple, positions):
islands.add(p)
for dp in ((0, 1), (0, (- 1)), (1, 0), ((- 1), 0)):
q = ((p[0] + dp[0]), (p[1] + dp[1]))
if (q in islands.id):
islands.unite(p, q)
ans += [islands.count]
return ans |
def main(config):
assert (config.num_neighbors == (- 1)), 'KNN features is deprecated due to PrepWrap'
model = ResidualGatedGCNModel(config, dtypeFloat, dtypeLong)
if (('sparse' in config) and (config.sparse is not None)):
model = wrap_sparse(model, config.sparse)
model = PrepWrapResidualGatedGCNModel(model)
net = nn.DataParallel(model)
if torch.cuda.is_available():
net.cuda()
print(net)
nb_param = 0
for param in net.parameters():
nb_param += np.prod(list(param.data.size()))
print('Number of parameters:', nb_param)
log_dir = f'./logs/{config.expt_name}/'
os.makedirs(log_dir, exist_ok=True)
json.dump(config, open(f'{log_dir}/config.json', 'w'), indent=4)
writer = SummaryWriter(log_dir)
num_nodes = config.num_nodes
num_neighbors = config.num_neighbors
max_epochs = config.max_epochs
val_every = config.val_every
test_every = config.test_every
batch_size = config.batch_size
batches_per_epoch = config.batches_per_epoch
accumulation_steps = config.accumulation_steps
learning_rate = config.learning_rate
decay_rate = config.decay_rate
val_loss_old = 1000000.0
best_pred_tour_len = 1000000.0
best_val_loss = 1000000.0
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
print(optimizer)
dataset = DataReader(config.num_nodes, config.num_neighbors, config.batch_size, config.train_filepath, config.train_filepath_solution, do_prep=False)
if ('resume_from_dir' in config):
if torch.cuda.is_available():
checkpoint = torch.load(os.path.join(config.resume_from_dir, 'last_train_checkpoint.tar'))
else:
checkpoint = torch.load(os.path.join(config.resume_from_dir, 'last_train_checkpoint.tar'), map_location='cpu')
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
train_loss = checkpoint['train_loss']
val_loss = checkpoint['val_loss']
for param_group in optimizer.param_groups:
learning_rate = param_group['lr']
print(f'Loaded checkpoint from epoch {epoch}')
else:
epoch = (- 1)
epoch_bar = master_bar(range((epoch + 1), max_epochs))
for epoch in epoch_bar:
writer.add_scalar('learning_rate', learning_rate, epoch)
(train_time, train_loss, train_err_edges, train_err_tour, train_err_tsp, train_pred_tour_len, train_gt_tour_len) = train_one_epoch(net, optimizer, config, epoch_bar, dataset=dataset)
epoch_bar.write(('t: ' + metrics_to_str(epoch, train_time, learning_rate, train_loss, train_err_edges, train_err_tour, train_err_tsp, train_pred_tour_len, train_gt_tour_len)))
writer.add_scalar('loss/train_loss', train_loss, epoch)
writer.add_scalar('pred_tour_len/train_pred_tour_len', train_pred_tour_len, epoch)
writer.add_scalar('optimality_gap/train_opt_gap', ((train_pred_tour_len / train_gt_tour_len) - 1), epoch)
if (((epoch % val_every) == 0) or (epoch == (max_epochs - 1))):
(val_time, val_loss, val_err_edges, val_err_tour, val_err_tsp, val_pred_tour_len, val_gt_tour_len) = test(net, config, epoch_bar, mode='val')
epoch_bar.write(('v: ' + metrics_to_str(epoch, val_time, learning_rate, val_loss, val_err_edges, val_err_tour, val_err_tsp, val_pred_tour_len, val_gt_tour_len)))
writer.add_scalar('loss/val_loss', val_loss, epoch)
writer.add_scalar('pred_tour_len/val_pred_tour_len', val_pred_tour_len, epoch)
writer.add_scalar('optimality_gap/val_opt_gap', ((val_pred_tour_len / val_gt_tour_len) - 1), epoch)
if (val_pred_tour_len < best_pred_tour_len):
best_pred_tour_len = val_pred_tour_len
torch.save({'epoch': epoch, 'model_state_dict': net.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'train_loss': train_loss, 'val_loss': val_loss}, (log_dir + 'best_val_tourlen_checkpoint.tar'))
if (val_loss < best_val_loss):
best_val_loss = val_loss
torch.save({'epoch': epoch, 'model_state_dict': net.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'train_loss': train_loss, 'val_loss': val_loss}, (log_dir + 'best_val_loss_checkpoint.tar'))
if (val_loss > (0.99 * val_loss_old)):
learning_rate /= decay_rate
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
val_loss_old = val_loss
if (((epoch % test_every) == 0) or (epoch == (max_epochs - 1))):
(test_time, test_loss, test_err_edges, test_err_tour, test_err_tsp, test_pred_tour_len, test_gt_tour_len) = test(net, config, epoch_bar, mode='test')
epoch_bar.write(('T: ' + metrics_to_str(epoch, test_time, learning_rate, test_loss, test_err_edges, test_err_tour, test_err_tsp, test_pred_tour_len, test_gt_tour_len)))
writer.add_scalar('loss/test_loss', test_loss, epoch)
writer.add_scalar('pred_tour_len/test_pred_tour_len', test_pred_tour_len, epoch)
writer.add_scalar('optimality_gap/test_opt_gap', ((test_pred_tour_len / test_gt_tour_len) - 1), epoch)
torch.save({'epoch': epoch, 'model_state_dict': net.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'train_loss': train_loss, 'val_loss': val_loss}, (log_dir + 'last_train_checkpoint.tar'))
if ((epoch != 0) and (((epoch % 250) == 0) or (epoch == (max_epochs - 1)))):
torch.save({'epoch': epoch, 'model_state_dict': net.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'train_loss': train_loss, 'val_loss': val_loss}, (log_dir + f'checkpoint_epoch{epoch}.tar'))
return net |
def test_session_env_lazy_with_nested_env(monkeypatch, gdalenv):
monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'id')
monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'key')
monkeypatch.setenv('AWS_SESSION_TOKEN', 'token')
expected = {'AWS_ACCESS_KEY_ID': 'id', 'AWS_SECRET_ACCESS_KEY': 'key', 'AWS_SESSION_TOKEN': 'token'}
with rasterio.Env() as env_outer:
assert (getenv() == rasterio.env.local._env.options)
for (k, v) in expected.items():
assert (getenv()[k] == v)
with rasterio.Env() as env_inner:
for (k, v) in expected.items():
assert (getenv()[k] == v)
monkeypatch.undo() |
.parametrize('ndim,dims,valid', [(1, ('dim0',), True), (1, ({'dim0', 'dim1'},), True), (2, ({'dim0', 'dim1'}, 'dim2'), True), ({1, 2}, ({'dim0', None}, 'dim1'), True), (2, ('dim0',), False), ({1, 2}, ({'dim0', 'dim1'},), False), ({1, 2}, ({'dim0', 'dim1'}, 'dim2'), False), (2, ({'dim0', None}, 'dim1'), False)])
def test_ArrayLikeSpec__ndim_matches_dims(ndim, dims, valid):
if valid:
ArrayLikeSpec('foo', 'foo doc', kind='i', ndim=ndim, dims=dims)
else:
message = re.escape(f"Specified ndim '{ndim}' does not match dims {dims}")
with pytest.raises(ValueError, match=message):
ArrayLikeSpec('foo', 'foo doc', kind='i', ndim=ndim, dims=dims) |
class VOCAugDataset(BaseDataSet):
def __init__(self, **kwargs):
self.num_classes = 21
self.palette = palette.get_voc_palette(self.num_classes)
super(VOCAugDataset, self).__init__(**kwargs)
def _set_files(self):
self.root = os.path.join(self.root, 'VOCdevkit/VOC2012')
file_list = os.path.join(self.root, 'ImageSets/Segmentation', (self.split + '.txt'))
file_list = [line.rstrip().split(' ') for line in tuple(open(file_list, 'r'))]
(self.files, self.labels) = list(zip(*file_list))
def _load_data(self, index):
image_path = os.path.join(self.root, self.files[index][1:])
label_path = os.path.join(self.root, self.labels[index][1:])
image = np.asarray(Image.open(image_path), dtype=np.float32)
label = np.asarray(Image.open(label_path), dtype=np.int32)
image_id = self.files[index].split('/')[(- 1)].split('.')[0]
return (image, label, image_id) |
def SDEActWrapper(layer):
(init_fn, apply_fn) = layer
def apply_fun(params, inputs, rng, **kwargs):
(preds, postw, postkl, priorx, priorw, priorkl) = inputs
preds = apply_fn(params, preds, **kwargs)
return (preds, postw, postkl, priorx, priorw, priorkl)
return (init_fn, apply_fun) |
def stc_curve(tl):
ref_curve = np.array([0, 3, 6, 9, 12, 15, 16, 17, 18, 19, 20, 20, 20, 20, 20, 20])
top_curve = ref_curve
res_sum = 0
while True:
diff = (tl - top_curve)
residuals = np.clip(diff, np.min(diff), 0)
res_sum = np.sum(residuals)
if (res_sum < (- 32)):
if np.any((residuals > (- 8))):
top_curve -= 1
break
top_curve += 1
return top_curve |
def test_sentence_argument_errors(capsys):
def foo(step, foo, bar):
pass
steps = {re.compile('What (.*?) can (.*)'): foo}
config = [{'sentence': 'What FOO can BAR', 'should_match': 'foo', 'with_arguments': [{'foo': 'foooooooo'}, {'bar': 'baaaaaaar'}]}]
expected_returncode = (1, 0)
actual_returncode = matches.test_step_matches(config, steps)
(out, _) = capsys.readouterr()
assert ('Expected argument "foo" with value "foooooooo" does not match value "FOO"' in out)
assert ('Expected argument "bar" with value "baaaaaaar" does not match value "BAR"' in out)
assert (actual_returncode == expected_returncode) |
class AppStateMixin():
def __init__(self) -> None:
self._modules: Dict[(str, torch.nn.Module)] = {}
self._optimizers: Dict[(str, torch.optim.Optimizer)] = {}
self._lr_schedulers: Dict[(str, TLRScheduler)] = {}
self._progress: Dict[(str, Progress)] = {}
self._misc_statefuls: Dict[(str, Any)] = {}
def app_state(self) -> Dict[(str, Any)]:
app_state = {**self.tracked_modules(), **self.tracked_optimizers(), **self.tracked_lr_schedulers(), **self.tracked_progress(), **self.tracked_misc_statefuls()}
return app_state
def tracked_modules(self) -> Dict[(str, torch.nn.Module)]:
return self._modules
def tracked_optimizers(self) -> Dict[(str, torch.optim.Optimizer)]:
return self._optimizers
def tracked_lr_schedulers(self) -> Dict[(str, TLRScheduler)]:
return self._lr_schedulers
def tracked_progress(self) -> Dict[(str, Progress)]:
return self._progress
def tracked_misc_statefuls(self) -> Dict[(str, Any)]:
return self._misc_statefuls
def __getattr__(self, name: str) -> object:
if ('_modules' in self.__dict__):
_modules = self.__dict__['_modules']
if (name in _modules):
return _modules[name]
if ('_optimizers' in self.__dict__):
_optimizers = self.__dict__['_optimizers']
if (name in _optimizers):
return _optimizers[name]
if ('_lr_schedulers' in self.__dict__):
_lr_schedulers = self.__dict__['_lr_schedulers']
if (name in _lr_schedulers):
return _lr_schedulers[name]
if ('_progress' in self.__dict__):
_progress = self.__dict__['_progress']
if (name in _progress):
return _progress[name]
if ('_misc_statefuls' in self.__dict__):
_misc_statefuls = self.__dict__['_misc_statefuls']
if (name in _misc_statefuls):
return _misc_statefuls[name]
return self.__getattribute__(name)
def _update_attr(self, name: str, value: object, tracked_objects: Dict[(str, Any)]) -> None:
if (tracked_objects is None):
raise AttributeError('Please call super().__init__() before setting attributes.')
_remove_from_dicts(name, self.__dict__, self._modules, self._optimizers, self._lr_schedulers, self._progress, self._misc_statefuls)
tracked_objects[name] = value
def __setattr__(self, name: str, value: object) -> None:
if isinstance(value, torch.nn.Module):
self._update_attr(name, value, self.__dict__.get('_modules'))
elif isinstance(value, torch.optim.Optimizer):
self._update_attr(name, value, self.__dict__.get('_optimizers'))
elif isinstance(value, TLRScheduler):
self._update_attr(name, value, self.__dict__.get('_lr_schedulers'))
elif isinstance(value, Progress):
self._update_attr(name, value, self.__dict__.get('_progress'))
elif isinstance(value, Stateful):
self._update_attr(name, value, self.__dict__.get('_misc_statefuls'))
else:
if (value is None):
_remove_from_dicts(name, self.__dict__, self._modules, self._optimizers, self._lr_schedulers, self._misc_statefuls)
super().__setattr__(name, value)
def __delattr__(self, name: str) -> None:
if (name in self._modules):
del self._modules[name]
elif (name in self._optimizers):
del self._optimizers[name]
elif (name in self._lr_schedulers):
del self._lr_schedulers[name]
elif (name in self._progress):
del self._progress[name]
elif (name in self._misc_statefuls):
del self._misc_statefuls[name]
else:
super().__delattr__(name)
def _construct_tracked_optimizers_and_schedulers(self) -> Dict[(str, Union[(torch.optim.Optimizer, FSDPOptimizerWrapper, TLRScheduler)])]:
tracked_optimizers_and_schedulers: Dict[(str, Union[(torch.optim.Optimizer, FSDPOptimizerWrapper, TLRScheduler)])] = {}
tracked_optimizers_and_schedulers.update(self._construct_tracked_optimizers())
for (lr_scheduler_attrib_name, lr_scheduler) in self.tracked_lr_schedulers().items():
if (lr_scheduler_attrib_name in tracked_optimizers_and_schedulers):
_logger.warning(f'Key collision "{lr_scheduler_attrib_name}" detected between LR Scheduler and optimizer attribute names. Please ensure there are no identical attribute names, as they will override each other.')
tracked_optimizers_and_schedulers[lr_scheduler_attrib_name] = lr_scheduler
return tracked_optimizers_and_schedulers
def _construct_tracked_optimizers(self) -> Dict[(str, Union[(torch.optim.Optimizer, FSDPOptimizerWrapper)])]:
fsdp_tracked_optimizers: Dict[(str, FSDPOptimizerWrapper)] = {}
for module in self.tracked_modules().values():
if _is_fsdp_module(module):
optimizer_list = _find_optimizers_for_module(module, self.tracked_optimizers())
for (optim_name, optimizer) in optimizer_list:
fsdp_tracked_optimizers[optim_name] = FSDPOptimizerWrapper(module, optimizer)
tracked_optimizers: Dict[(str, Union[(torch.optim.Optimizer, FSDPOptimizerWrapper)])] = {key: value for (key, value) in self.tracked_optimizers().items() if (key not in fsdp_tracked_optimizers)}
tracked_optimizers.update(fsdp_tracked_optimizers)
return tracked_optimizers |
def do_check(squirrel, codes=None, tmin=None, tmax=None, time=None, ignore=[]):
codes_set = set()
for kind in ['waveform', 'channel', 'response']:
if (codes is not None):
codes_pat = codes_patterns_for_kind(to_kind_id(kind), codes)
else:
codes_pat = None
codes_filter = CodesPatternFiltering(codes=codes_pat)
codes_set.update(codes_filter.filter(squirrel.get_codes(kind=kind)))
entries = []
for codes_ in list(sorted(codes_set)):
problems = []
coverage = {}
for kind in ['waveform', 'channel', 'response']:
coverage[kind] = squirrel.get_coverage(kind, codes=[codes_], tmin=(tmin if (tmin is not None) else time), tmax=(tmax if (tmax is not None) else time))
available = [kind for kind in ['waveform', 'channel', 'response'] if (coverage[kind] and any(((cov.total is not None) for cov in coverage[kind])))]
for kind in ['waveform']:
for cov in coverage[kind]:
if any(((count > 1) for (_, count) in cov.changes)):
problems.append(SquirrelCheckProblem(type='p1', symptom=('%s: %s' % (kind, 'duplicates'))))
for kind in ['channel', 'response']:
for cov in coverage[kind]:
if any(((count > 1) for (_, count) in cov.changes)):
problems.append(SquirrelCheckProblem(type='p2', symptom=('%s: %s' % (kind, 'overlapping epochs'))))
if ('waveform' not in available):
problems.append(SquirrelCheckProblem(type='p3', symptom='no waveforms'))
for cw in coverage['waveform']:
for kind in ['channel', 'response']:
ccs = get_matching(coverage[kind], cw)
if (not ccs):
problems.append(SquirrelCheckProblem(type='p4', symptom=('no %s information' % kind)))
elif (len(ccs) > 1):
problems.append(SquirrelCheckProblem(type='p5', symptom=('multiple %s matches (waveform: %g Hz, %s: %s)' % (kind, (1.0 / cw.deltat), kind, ', '.join(((('%g Hz' % (1.0 / cc.deltat)) if cc.deltat else '? Hz') for cc in ccs))))))
if ccs:
cc = ccs[0]
if (cc.deltat and (cc.deltat != cw.deltat)):
problems.append(SquirrelCheckProblem(type='p6', symptom=('sampling rate mismatch (waveform: %g Hz, %s: %g Hz)' % ((1.0 / cw.deltat), kind, (1.0 / cc.deltat)))))
uncovered_spans = list(cw.iter_uncovered_by_combined(cc))
if uncovered_spans:
problems.append(SquirrelCheckProblem(type='p7', symptom=('incompletely covered by %s:' % kind), details=[('%s - %s' % (util.time_to_str(span[0]), util.time_to_str(span[1]))) for span in uncovered_spans]))
entries.append(SquirrelCheckEntry(codes=codes_, available=available, problems=[p for p in problems if (p.type not in ignore)]))
return SquirrelCheck(entries=entries) |
def test_is_transaction_effect_satisfied(chain_state, token_network_address, netting_channel_state):
canonical_identifier = netting_channel_state.canonical_identifier
assert (token_network_address == canonical_identifier.token_network_address)
transaction = ContractSendChannelBatchUnlock(canonical_identifier=canonical_identifier, sender=netting_channel_state.partner_state.address, triggered_by_block_hash=make_block_hash())
state_change = ContractReceiveChannelBatchUnlock(transaction_hash=UNIT_SECRETHASH, canonical_identifier=canonical_identifier, receiver=HOP1, sender=HOP2, locksroot=LOCKSROOT_OF_NO_LOCKS, unlocked_amount=0, returned_tokens=0, block_number=1, block_hash=make_block_hash())
assert (not is_transaction_effect_satisfied(chain_state, transaction, state_change))
state_change = ContractReceiveChannelBatchUnlock(transaction_hash=UNIT_SECRETHASH, canonical_identifier=canonical_identifier, receiver=netting_channel_state.our_state.address, sender=netting_channel_state.partner_state.address, locksroot=LOCKSROOT_OF_NO_LOCKS, unlocked_amount=0, returned_tokens=0, block_number=1, block_hash=make_block_hash())
assert (not is_transaction_effect_satisfied(chain_state, transaction, state_change))
state_change = ContractReceiveChannelBatchUnlock(transaction_hash=UNIT_SECRETHASH, canonical_identifier=canonical_identifier, receiver=netting_channel_state.partner_state.address, sender=netting_channel_state.our_state.address, locksroot=LOCKSROOT_OF_NO_LOCKS, unlocked_amount=0, returned_tokens=0, block_number=1, block_hash=make_block_hash())
assert (not is_transaction_effect_satisfied(chain_state, transaction, state_change))
channel_settled = ContractReceiveChannelSettled(transaction_hash=bytes(32), canonical_identifier=canonical_identifier, our_onchain_locksroot=LOCKSROOT_OF_NO_LOCKS, our_transferred_amount=0, partner_onchain_locksroot=LOCKSROOT_OF_NO_LOCKS, partner_transferred_amount=0, block_number=1, block_hash=make_block_hash())
iteration = state_transition(chain_state=chain_state, state_change=channel_settled)
assert is_transaction_effect_satisfied(iteration.new_state, transaction, state_change) |
class V2VNet(nn.Module):
def __init__(self, input_channels, output_channels):
super(V2VNet, self).__init__()
self.front_layers = nn.Sequential(Basic3DBlock(input_channels, 16, 7), Res3DBlock(16, 32))
self.encoder_decoder = EncoderDecorder()
self.output_layer = nn.Conv3d(32, output_channels, kernel_size=1, stride=1, padding=0)
self._initialize_weights()
def forward(self, x):
x = self.front_layers(x)
x = self.encoder_decoder(x)
x = self.output_layer(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.normal_(m.weight, 0, 0.001)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose3d):
nn.init.normal_(m.weight, 0, 0.001)
nn.init.constant_(m.bias, 0) |
class TPLVarHandler(BaseHandler):
async def get(self, tplid):
user = self.current_user
tpl = (await self.db.tpl.get(tplid, fields=('id', 'note', 'userid', 'sitename', 'siteurl', 'variables', 'init_env')))
if (not self.permission(tpl)):
self.evil((+ 5))
(await self.finish('<span class="alert alert-danger"></span>'))
return
if (not tpl['init_env']):
tpl['init_env'] = '{}'
(await self.render('task_new_var.html', tpl=tpl, variables=json.loads(tpl['variables']), init_env=json.loads(tpl['init_env']))) |
def get_path_from_template(path_template: str, path_type: PathType=PathType.AUTO) -> str:
if (path_type == PathType.AUTO):
if (platform.system() == 'Windows'):
path_type = PathType.WINDOWS
elif (platform.system() == 'Linux'):
path_type = PathType.LINUX
else:
raise RuntimeError('Unknown platform')
path_template = path_template.replace('<USERNAME>', get_user_name())
if (path_type == PathType.WINDOWS):
return str(pathlib.PureWindowsPath(path_template))
elif (path_type == PathType.LINUX):
return str(pathlib.PurePosixPath(path_template))
else:
raise RuntimeError('Unknown platform') |
class _TPattern(TestCase):
def setUp(self):
s1 = {'tracknumber': '5/6', 'artist': 'Artist', 'title': 'Title5', '~filename': '/path/to/a.mp3', 'xmltest': '<&>'}
s2 = {'tracknumber': '6', 'artist': 'Artist', 'title': 'Title6', '~filename': '/path/to/b.ogg', 'discnumber': '2', 'unislash': 'foo/bar'}
s3 = {'title': 'test/subdir', 'genre': '/\n/', '~filename': '/one/more/a.flac', 'version': 'Instrumental'}
s4 = {'performer': 'a\nb', 'artist': 'foo\nbar'}
s5 = {'tracknumber': '7/1234', 'artist': 'Artist', 'title': 'Title7', '~filename': '/path/to/e.mp3'}
s6 = {'artist': 'Foo', 'albumartist': 'foo.bar', 'album': 'Best Of', '~filename': '/path/to/f.mp3', 'title': 'The.Final.Word'}
s7 = {'artist': 'un eleve francais', '~filename': '/path/to/g.mp3', 'albumartist': 'Lee "Scratch" Perry', 'album': "The 'only' way!", 'comment': 'Trouble|Strife'}
s8 = {'tracknumber': '7/8', 'artist': 'Artist1\n\nArtist3', 'artistsort': 'SortA1\nSortA2', 'album': 'Album5', 'albumsort': 'SortAlbum5', '~filename': '/path/to/g.mp3', 'xmltest': '<&>'}
if (os.name == 'nt'):
s1['~filename'] = 'C:\\path\\to\\a.mp3'
s2['~filename'] = 'C:\\path\\to\\b.ogg'
s3['~filename'] = 'C:\\one\\more\\a.flac'
s4['~filename'] = 'C:\\path\\to\\a.mp3'
s5['~filename'] = 'C:\\path\\to\\a.mp3'
s6['~filename'] = 'C:\\path\\to\\f.mp3'
s7['~filename'] = 'C:\\path\\to\\g.mp3'
s8['~filename'] = 'C:\\path\\to\\h.mp3'
self.a = AudioFile(s1)
self.b = AudioFile(s2)
self.c = AudioFile(s3)
self.d = AudioFile(s4)
self.e = AudioFile(s5)
self.f = AudioFile(s6)
self.g = AudioFile(s7)
self.h = AudioFile(s8) |
def extract_reactions(reaction_dataset) -> typing.Tuple[(typing.List[Reaction], typing.Set[str], dict)]:
reactions = []
logger.debug('Extracting reactions')
run_through_stats = dict(num_skipped_due_to_multiple_products=0, num_multiple_same_reactants=0, num_multiple_same_products=0, num_overlap_between_reactants_and_products=0, num_skipped_as_already_seen=0, num_total_molecules=0, num_kept=0, num_total=len(reaction_dataset))
all_molecules = set()
reactant_products_tuples_seen = set()
for (reactants, products, bond_changes) in tqdm.tqdm(reaction_dataset, desc='extracting reactions...'):
action_set = uspto_ds.actionset_from_uspto_line(bond_changes)
(reactants, reagents, products) = rdkit_reaction_ops.split_reagents_out_from_reactants_and_products(reactants, products, action_set)
reactants_split = reactants.split('.')
reactants_split = _canonicalise_and_remove_am(reactants_split)
reactants_split_set = frozenset(reactants_split)
if (len(reactants_split) != len(reactants_split_set)):
run_through_stats['num_multiple_same_reactants'] += 1
products_split = products.split('.')
products_split = _canonicalise_and_remove_am(products_split)
products_split_set = frozenset(products_split)
reaction = Reaction(reactants_split_set, products_split_set)
if (len(products_split) != len(products_split_set)):
run_through_stats['num_multiple_same_products'] += 1
if len((products_split_set & reactants_split_set)):
run_through_stats['num_overlap_between_reactants_and_products'] += 1
continue
if (len(products_split_set) > 1):
run_through_stats['num_skipped_due_to_multiple_products'] += 1
continue
if (reaction in reactant_products_tuples_seen):
run_through_stats['num_skipped_as_already_seen'] += 1
continue
reactant_products_tuples_seen.add(reaction)
reactions.append(reaction)
all_molecules.update(reactants_split_set)
all_molecules.update(products_split_set)
run_through_stats['num_kept'] = len(reactions)
run_through_stats['num_total_molecules'] = len(all_molecules)
logger.info(f'''Extracting reactions done:
{tabulate.tabulate([[name, value] for (name, value) in run_through_stats.items()])}''')
logger.debug('Creating tree dict')
num_reactions_before = len(reactions)
reactions = sorted(list(set(reactions)))
num_reactions_after = len(reactions)
logger.info(f'Removing duplicated reactions {(num_reactions_before - num_reactions_after)}. leaving: {num_reactions_after}')
logger.info(f'Number of reactions {len(reactions)}, number of molecules {len(all_molecules)}')
return (reactions, all_molecules, run_through_stats) |
def run_cmdline(*args, **kwds):
saved_stdin = sys.stdin
saved_stdout = sys.stdout
saved_stderr = sys.stderr
stdin_buffer = BytesIO()
stdout_buffer = BytesIO()
stderr_buffer = BytesIO()
new_stdin = sys.stdin = io.TextIOWrapper(stdin_buffer, 'utf-8')
new_stdout = sys.stdout = io.TextIOWrapper(stdout_buffer, 'utf-8')
new_stderr = sys.stderr = io.TextIOWrapper(stderr_buffer, 'utf-8')
new_stdin.write(kwds.get('stdin', ''))
new_stdin.seek(0, 0)
try:
ret = cmdline.main((['pygmentize'] + list(args)))
finally:
sys.stdin = saved_stdin
sys.stdout = saved_stdout
sys.stderr = saved_stderr
new_stdout.flush()
new_stderr.flush()
(out, err) = (stdout_buffer.getvalue(), stderr_buffer.getvalue())
return (ret, _decode_output(out), _decode_output(err)) |
def login_with_guest(sa: ServerApp, encrypted_login_request: bytes):
if (sa.guest_encrypt is None):
raise error.NotAuthorizedForActionError
try:
login_request_bytes = sa.guest_encrypt.decrypt(encrypted_login_request)
except cryptography.fernet.InvalidToken:
raise error.NotAuthorizedForActionError
try:
login_request = json.loads(login_request_bytes.decode('utf-8'))
name = login_request['name']
date = datetime.datetime.fromisoformat(login_request['date'])
except (UnicodeDecodeError, json.JSONDecodeError, KeyError, ValueError) as e:
raise error.InvalidActionError(str(e))
if ((_get_now() - date) > datetime.timedelta(days=1)):
raise error.NotAuthorizedForActionError
user: User = User.create(name=f'Guest: {name}')
with sa.session() as session:
session['user-id'] = user.id
return _create_client_side_session(sa, user) |
def mk_TestStructuralTranslator(_StructuralTranslator):
def make_indent(src, nindent):
indent = ' '
for (idx, s) in enumerate(src):
src[idx] = ((nindent * indent) + s)
def get_string(obj):
if isinstance(obj, type):
return obj.__name__
return str(obj)
class TestStructuralTranslator(_StructuralTranslator):
def rtlir_tr_vector_dtype(s, dtype):
return str(dtype)
def rtlir_tr_struct_dtype(s, dtype):
return dtype.get_name()
def rtlir_tr_unpacked_array_type(s, Type):
return ('' if (Type is None) else repr(Type))
def rtlir_tr_port_decls(s, port_decls):
decls = ''
for decl in port_decls:
if decl:
make_indent(decl, 1)
decls += ('\n' + '\n'.join(decl))
return f'''port_decls:{decls}
'''
def rtlir_tr_port_decl(s, id_, Type, array_type, dtype):
if (id_ not in ['clk', 'reset']):
array_type = (repr(Type) if (not array_type) else array_type)
return [f'port_decl: {id_} {array_type}']
else:
return ''
def rtlir_tr_wire_decls(s, wire_decls):
decls = ''
for decl in wire_decls:
make_indent(decl, 1)
decls += ('\n' + '\n'.join(decl))
return f'''wire_decls:{decls}
'''
def rtlir_tr_wire_decl(s, id_, Type, array_type, dtype):
array_type = (repr(Type) if (not array_type) else array_type)
return [f'wire_decl: {id_} {array_type}']
def rtlir_tr_const_decls(s, const_decls):
decls = ''
for decl in const_decls:
if decl:
make_indent(decl, 1)
decls += ('\n' + '\n'.join(decl))
return f'''const_decls:{decls}
'''
def rtlir_tr_const_decl(s, id_, Type, array_type, dtype, value):
array_type = (repr(Type) if (not array_type) else array_type)
return [f'const_decl: {id_} {array_type}']
def rtlir_tr_interface_port_decls(s, port_decls):
decls = [['interface_ports:']]
for decl in port_decls:
make_indent(decl, 1)
decls.append([decl[0]])
return decls
def rtlir_tr_interface_port_decl(s, m, id_, rtype, array_type):
rtype = (repr(rtype) if (not array_type) else array_type)
return [f'interface_port: {id_} {rtype}']
def rtlir_tr_interface_decls(s, ifc_decls):
decls = ''
for decl in ifc_decls:
if decl:
make_indent(decl, 1)
decls += ('\n' + '\n'.join(decl))
return f'''interface_decls:{decls}
'''
def rtlir_tr_interface_decl(s, ifc_id, ifc_rtype, array_type, port_decls):
ifc_rtype = (str(ifc_rtype) if (not array_type) else array_type)
ret = [f'interface_decl: {ifc_id} {ifc_rtype}']
for decl in port_decls:
make_indent(decl, 1)
ret.append(decl[0])
return ret
def rtlir_tr_subcomp_port_decls(s, port_decls):
decls = [['component_ports:']]
for decl in port_decls:
if decl:
make_indent(decl, 1)
decls.append([decl[0]])
return decls
def rtlir_tr_subcomp_port_decl(s, m, c_id, c_rtype, c_array_type, port_id, port_rtype, port_dtype, array_type):
if (port_id not in ['clk', 'reset']):
port_rtype = (repr(port_rtype) if (not array_type) else array_type)
return [f'component_port: {port_id} {port_rtype}']
else:
return ''
def rtlir_tr_subcomp_ifc_port_decls(s, ifc_port_decls):
decls = [['component_ifc_ports:']]
for decl in ifc_port_decls:
if decl:
make_indent(decl, 1)
decls.append([decl[0]])
return decls
def rtlir_tr_subcomp_ifc_port_decl(s, m, c_id, c_rtype, c_array_type, ifc_id, ifc_rtype, ifc_array_type, port_id, port_rtype, port_array_type):
port_rtype = (repr(port_rtype) if (not port_array_type) else port_array_type)
return [f'component_ifc_port: {port_id} {port_rtype}']
def rtlir_tr_subcomp_ifc_decls(s, ifc_decls):
decls = [['component_ifcs:']]
for ifc_decl in ifc_decls:
for decl in ifc_decl:
if decl:
make_indent(decl, 1)
decls.append([decl[0]])
return decls
def rtlir_tr_subcomp_ifc_decl(s, m, c_id, c_rtype, c_array_type, ifc_id, ifc_rtype, ifc_array_type, ports):
ifc_rtype = (repr(ifc_rtype) if (not ifc_array_type) else ifc_array_type)
decls = [[f'component_ifc: {ifc_id} {ifc_rtype}']]
for decl in ports:
if decl:
make_indent(decl, 1)
decls.append([decl[0]])
return decls
def rtlir_tr_subcomp_decls(s, subcomps):
decls = ''
for decl in subcomps:
make_indent(decl, 1)
decls += ('\n' + '\n'.join(decl))
return f'''component_decls:{decls}
'''
def rtlir_tr_subcomp_decl(s, m, c_id, c_rtype, c_array_type, port_conns, ifc_conns):
c_rtype = (str(c_rtype) if (not c_array_type) else c_array_type)
ret = [f'component_decl: {c_id} {c_rtype}']
for port in port_conns:
make_indent(port, 1)
ret.append(port[0])
for ifc in ifc_conns:
make_indent(ifc, 1)
ret.append(ifc[0])
return ret
def rtlir_tr_connections(s, connections):
conns = ''
for conn in connections:
if conn:
make_indent(conn, 1)
conns += ('\n' + '\n'.join(conn))
return f'''connections:{conns}
'''
def rtlir_tr_connection(s, wr, rd):
if (('clk' not in wr) and ('reset' not in wr)):
return [f'connection: {wr} -> {rd}']
def rtlir_tr_bit_selection(s, base_signal, index, status):
return f'BitSel {base_signal} {index}'
def rtlir_tr_part_selection(s, base_signal, start, stop, status):
return f'PartSel {base_signal} {start} {stop}'
def rtlir_tr_port_array_index(s, base_signal, index, status):
return f'PortArrayIdx {base_signal} {index}'
def rtlir_tr_wire_array_index(s, base_signal, index, status):
return f'WireArrayIdx {base_signal} {index}'
def rtlir_tr_const_array_index(s, base_signal, index, status):
return f'ConstArrayIdx {base_signal} {index}'
def rtlir_tr_packed_index(s, base_signal, index, status):
return f'PackedIndex {base_signal} {index}'
def rtlir_tr_interface_array_index(s, base_signal, index, status):
return f'IfcArrayIdx {base_signal} {index}'
def rtlir_tr_component_array_index(s, base_signal, index, status):
return f'CompArrayIdx {base_signal} {index}'
def rtlir_tr_struct_attr(s, base_signal, attr, status):
return f'StructAttr {base_signal} {attr}'
def rtlir_tr_interface_attr(s, base_signal, attr, status):
return f'IfcAttr {base_signal} {attr}'
def rtlir_tr_subcomp_attr(s, base_signal, attr, status):
return f'SubCompAttr {base_signal} {attr}'
def rtlir_tr_current_comp_attr(s, base_signal, attr, status):
return f'CurCompAttr {attr}'
def rtlir_tr_current_comp(s, comp_id, comp_rtype, status):
return ''
def rtlir_tr_var_id(s, var_id):
return var_id
def rtlir_tr_literal_number(s, nbits, value):
return f'Bits{nbits}({int(value)})'
def rtlir_tr_component_unique_name(s, c_rtype):
comp_name = c_rtype.get_name()
comp_params = c_rtype.get_params()
assert comp_name
for (arg_name, arg_value) in comp_params:
assert (arg_name != '')
comp_name += ((('__' + arg_name) + '_') + get_string(arg_value))
return comp_name
return TestStructuralTranslator |
class Scenario(ScenarioGenerator):
def __init__(self):
ScenarioGenerator.__init__(self)
self.naming = 'numerical'
def road(self, **kwargs):
road = xodr.create_road([xodr.Spiral(1e-10, kwargs['road_curvature'], 100), xodr.Arc(kwargs['road_curvature'], 50), xodr.Spiral(kwargs['road_curvature'], 1e-10, 100), xodr.Line(100)], id=0, left_lanes=2, right_lanes=2)
odr = xodr.OpenDrive('myroad')
odr.add_road(road)
odr.adjust_roads_and_lanes()
return odr
def scenario(self, **kwargs):
road = xosc.RoadNetwork(self.road_file)
egoname = 'Ego'
entities = xosc.Entities()
entities.add_scenario_object(egoname, xosc.CatalogReference('VehicleCatalog', 'car_white'))
catalog = xosc.Catalog()
catalog.add_catalog('VehicleCatalog', '../xosc/Catalogs/Vehicles')
init = xosc.Init()
init.add_init_action(egoname, xosc.TeleportAction(xosc.LanePosition(50, 0, (- 2), 0)))
init.add_init_action(egoname, xosc.AbsoluteSpeedAction(kwargs['speed'], xosc.TransitionDynamics(xosc.DynamicsShapes.step, xosc.DynamicsDimension.time, 1)))
event = xosc.Event('my event', xosc.Priority.overwrite)
event.add_action('lane change', xosc.AbsoluteLaneChangeAction((- 1), xosc.TransitionDynamics(xosc.DynamicsShapes.sinusoidal, xosc.DynamicsDimension.time, 4)))
event.add_trigger(xosc.ValueTrigger('start_trigger ', 0, xosc.ConditionEdge.none, xosc.SimulationTimeCondition(4, xosc.Rule.greaterThan)))
man = xosc.Maneuver('maneuver')
man.add_event(event)
sb = xosc.StoryBoard(init, stoptrigger=xosc.ValueTrigger('start_trigger ', 0, xosc.ConditionEdge.none, xosc.SimulationTimeCondition(13, xosc.Rule.greaterThan), 'stop'))
sb.add_maneuver(man, egoname)
sce = xosc.Scenario('my scenario', 'Mandolin', xosc.ParameterDeclarations(), entities, sb, road, catalog)
return sce |
class LightningBaseModel(pl.LightningModule):
def __init__(self, args):
super().__init__()
self.args = args
self.train_acc = Accuracy()
self.val_acc = Accuracy(compute_on_step=False)
self.val_iou = IoU(self.args['dataset_params'], compute_on_step=False)
if self.args['submit_to_server']:
self.submit_dir = ((os.path.dirname(self.args['checkpoint']) + '/submit_') + datetime.now().strftime('%Y_%m_%d'))
with open(self.args['dataset_params']['label_mapping'], 'r') as stream:
self.mapfile = yaml.safe_load(stream)
self.ignore_label = self.args['dataset_params']['ignore_label']
def configure_optimizers(self):
if (self.args['train_params']['optimizer'] == 'Adam'):
optimizer = torch.optim.Adam(self.parameters(), lr=self.args['train_params']['learning_rate'])
elif (self.args['train_params']['optimizer'] == 'SGD'):
optimizer = torch.optim.SGD(self.parameters(), lr=self.args['train_params']['learning_rate'], momentum=self.args['train_params']['momentum'], weight_decay=self.args['train_params']['weight_decay'], nesterov=self.args['train_params']['nesterov'])
else:
raise NotImplementedError
if (self.args['train_params']['lr_scheduler'] == 'StepLR'):
lr_scheduler = StepLR(optimizer, step_size=self.args['train_params']['decay_step'], gamma=self.args['train_params']['decay_rate'])
elif (self.args['train_params']['lr_scheduler'] == 'ReduceLROnPlateau'):
lr_scheduler = ReduceLROnPlateau(optimizer, mode='max', factor=self.args['train_params']['decay_rate'], patience=self.args['train_params']['decay_step'], verbose=True)
elif (self.args['train_params']['lr_scheduler'] == 'CosineAnnealingLR'):
lr_scheduler = CosineAnnealingLR(optimizer, T_max=(self.args['train_params']['max_num_epochs'] - 4), eta_min=1e-05)
elif (self.args['train_params']['lr_scheduler'] == 'CosineAnnealingWarmRestarts'):
from functools import partial
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=partial(cosine_schedule_with_warmup, num_epochs=self.args['train_params']['max_num_epochs'], batch_size=self.args['dataset_params']['train_data_loader']['batch_size'], dataset_size=self.args['dataset_params']['training_size'], num_gpu=len(self.args.gpu)))
else:
raise NotImplementedError
scheduler = {'scheduler': lr_scheduler, 'interval': ('step' if (self.args['train_params']['lr_scheduler'] == 'CosineAnnealingWarmRestarts') else 'epoch'), 'frequency': 1}
return {'optimizer': optimizer, 'lr_scheduler': scheduler, 'monitor': self.args.monitor}
def forward(self, data):
pass
def training_step(self, data_dict, batch_idx):
data_dict = self.forward(data_dict)
self.train_acc(data_dict['logits'].argmax(1)[(data_dict['labels'] != self.ignore_label)], data_dict['labels'][(data_dict['labels'] != self.ignore_label)])
self.log('train/acc', self.train_acc, on_epoch=True)
self.log('train/loss_main_ce', data_dict['loss_main_ce'])
self.log('train/loss_main_lovasz', data_dict['loss_main_lovasz'])
return data_dict['loss']
def validation_step(self, data_dict, batch_idx):
indices = data_dict['indices']
raw_labels = data_dict['raw_labels'].squeeze(1).cpu()
origin_len = data_dict['origin_len']
vote_logits = torch.zeros((len(raw_labels), self.num_classes))
data_dict = self.forward(data_dict)
if self.args['test']:
vote_logits.index_add_(0, indices.cpu(), data_dict['logits'].cpu())
if (self.args['dataset_params']['pc_dataset_type'] == 'SemanticKITTI_multiscan'):
vote_logits = vote_logits[:origin_len]
raw_labels = raw_labels[:origin_len]
else:
vote_logits = data_dict['logits'].cpu()
raw_labels = data_dict['labels'].squeeze(0).cpu()
prediction = vote_logits.argmax(1)
if (self.ignore_label != 0):
prediction = prediction[(raw_labels != self.ignore_label)]
raw_labels = raw_labels[(raw_labels != self.ignore_label)]
prediction += 1
raw_labels += 1
self.val_acc(prediction, raw_labels)
self.log('val/acc', self.val_acc, on_epoch=True)
self.val_iou(prediction.cpu().detach().numpy(), raw_labels.cpu().detach().numpy())
return data_dict['loss']
def test_step(self, data_dict, batch_idx):
indices = data_dict['indices']
origin_len = data_dict['origin_len']
raw_labels = data_dict['raw_labels'].squeeze(1).cpu()
path = data_dict['path'][0]
vote_logits = torch.zeros((len(raw_labels), self.num_classes))
data_dict = self.forward(data_dict)
vote_logits.index_add_(0, indices.cpu(), data_dict['logits'].cpu())
if (self.args['dataset_params']['pc_dataset_type'] == 'SemanticKITTI_multiscan'):
vote_logits = vote_logits[:origin_len]
raw_labels = raw_labels[:origin_len]
prediction = vote_logits.argmax(1)
if (self.ignore_label != 0):
prediction = prediction[(raw_labels != self.ignore_label)]
raw_labels = raw_labels[(raw_labels != self.ignore_label)]
prediction += 1
raw_labels += 1
if (not self.args['submit_to_server']):
self.val_acc(prediction, raw_labels)
self.log('val/acc', self.val_acc, on_epoch=True)
self.val_iou(prediction.cpu().detach().numpy(), raw_labels.cpu().detach().numpy())
elif (self.args['dataset_params']['pc_dataset_type'] != 'nuScenes'):
components = path.split('/')
sequence = components[(- 3)]
points_name = components[(- 1)]
label_name = points_name.replace('bin', 'label')
full_save_dir = os.path.join(self.submit_dir, 'sequences', sequence, 'predictions')
os.makedirs(full_save_dir, exist_ok=True)
full_label_name = os.path.join(full_save_dir, label_name)
if os.path.exists(full_label_name):
print(('%s already exsist...' % label_name))
pass
valid_labels = np.vectorize(self.mapfile['learning_map_inv'].__getitem__)
original_label = valid_labels(vote_logits.argmax(1).cpu().numpy().astype(int))
final_preds = original_label.astype(np.uint32)
final_preds.tofile(full_label_name)
else:
meta_dict = {'meta': {'use_camera': False, 'use_lidar': True, 'use_map': False, 'use_radar': False, 'use_external': False}}
os.makedirs(os.path.join(self.submit_dir, 'test'), exist_ok=True)
with open(os.path.join(self.submit_dir, 'test', 'submission.json'), 'w', encoding='utf-8') as f:
json.dump(meta_dict, f)
original_label = prediction.cpu().numpy().astype(np.uint8)
assert all(((original_label > 0) & (original_label < 17))), 'Error: Array for predictions must be between 1 and 16 (inclusive).'
full_save_dir = os.path.join(self.submit_dir, 'lidarseg/test')
full_label_name = os.path.join(full_save_dir, (path + '_lidarseg.bin'))
os.makedirs(full_save_dir, exist_ok=True)
if os.path.exists(full_label_name):
print(('%s already exsist...' % full_label_name))
else:
original_label.tofile(full_label_name)
return data_dict['loss']
def validation_epoch_end(self, outputs):
(iou, best_miou) = self.val_iou.compute()
mIoU = np.nanmean(iou)
str_print = ''
self.log('val/mIoU', mIoU, on_epoch=True)
self.log('val/best_miou', best_miou, on_epoch=True)
str_print += 'Validation per class iou: '
for (class_name, class_iou) in zip(self.val_iou.unique_label_str, iou):
str_print += ('\n%s : %.2f%%' % (class_name, (class_iou * 100)))
str_print += ('\nCurrent val miou is %.3f while the best val miou is %.3f' % ((mIoU * 100), (best_miou * 100)))
self.print(str_print)
def test_epoch_end(self, outputs):
if (not self.args['submit_to_server']):
(iou, best_miou) = self.val_iou.compute()
mIoU = np.nanmean(iou)
str_print = ''
self.log('val/mIoU', mIoU, on_epoch=True)
self.log('val/best_miou', best_miou, on_epoch=True)
str_print += 'Validation per class iou: '
for (class_name, class_iou) in zip(self.val_iou.unique_label_str, iou):
str_print += ('\n%s : %.2f%%' % (class_name, (class_iou * 100)))
str_print += ('\nCurrent val miou is %.3f while the best val miou is %.3f' % ((mIoU * 100), (best_miou * 100)))
self.print(str_print)
def on_after_backward(self) -> None:
valid_gradients = True
for (name, param) in self.named_parameters():
if (param.grad is not None):
valid_gradients = (not (torch.isnan(param.grad).any() or torch.isinf(param.grad).any()))
if (not valid_gradients):
break
if (not valid_gradients):
print(f'detected inf or nan values in gradients. not updating model parameters')
self.zero_grad() |
def test_create_legacy_questions(db, settings):
Catalog.objects.all().delete()
Section.objects.all().delete()
Page.objects.all().delete()
QuestionSet.objects.all().delete()
Question.objects.all().delete()
xml_file = ((((Path(settings.BASE_DIR) / 'xml') / 'elements') / 'legacy') / 'questions.xml')
root = read_xml_file(xml_file)
version = root.attrib.get('version')
elements = flat_xml_to_elements(root)
elements = convert_elements(elements, version)
elements = order_elements(elements)
elements = elements.values()
import_elements(elements)
assert (len(root) == len(elements) == 147)
assert (Catalog.objects.count() == 1)
assert (Section.objects.count() == 6)
assert (Page.objects.count() == 48)
assert (QuestionSet.objects.count() == 3)
assert (Question.objects.count() == 89)
assert all(((element['created'] is True) for element in elements))
assert all(((element['updated'] is False) for element in elements))
catalog = Catalog.objects.prefetch_elements().first()
descendant_uris = {element.uri for element in catalog.descendants}
element_uris = {element['uri'] for element in elements if (element['uri'] != catalog.uri)}
assert (descendant_uris == element_uris) |
class Spiral(XodrBase):
def __init__(self, curvstart, curvend, length=None, angle=None, cdot=None):
super().__init__()
self.curvstart = curvstart
self.curvend = curvend
if ((length == None) and (angle == None) and (cdot == None)):
raise NotEnoughInputArguments('Spiral is underdefined')
if (sum([(x != None) for x in [length, angle, cdot]]) > 1):
raise ToManyOptionalArguments('Spiral is overdefined, please use only one of the optional inputs')
if angle:
self.length = ((2 * abs(angle)) / np.maximum(abs(curvend), abs(curvstart)))
elif cdot:
self.length = ((self.curvend - self.curvstart) / cdot)
else:
self.length = length
def __eq__(self, other):
if (isinstance(other, Spiral) and super().__eq__(other)):
if (self.get_attributes() == other.get_attributes()):
return True
return False
def get_end_data(self, x, y, h):
cloth = pcloth.Clothoid.StandardParams(x, y, h, self.curvstart, ((self.curvend - self.curvstart) / self.length), self.length)
return (cloth.XEnd, cloth.YEnd, cloth.ThetaEnd, cloth.length)
def get_start_data(self, end_x, end_y, end_h):
cloth = pcloth.Clothoid.StandardParams(end_x, end_y, end_h, (- self.curvend), ((- (self.curvstart - self.curvend)) / self.length), self.length)
return (cloth.XEnd, cloth.YEnd, cloth.ThetaEnd, cloth.length)
def get_attributes(self):
return {'curvStart': str(self.curvstart), 'curvEnd': str(self.curvend)}
def get_element(self):
element = ET.Element('spiral', attrib=self.get_attributes())
self._add_additional_data_to_element(element)
return element |
.parametrize('username,password', users)
.parametrize('project_id', projects)
def test_create(db, client, files, username, password, project_id):
client.login(username=username, password=password)
project = Project.objects.get(id=project_id)
snapshot_count = project.snapshots.count()
values_count = project.values.count()
current_values_count = project.values.filter(snapshot=None).count()
url = reverse(urlnames['list'], args=[project_id])
data = {'title': 'A new snapshot', 'description': 'Some description'}
response = client.post(url, data)
if (project_id in add_snapshot_permission_map.get(username, [])):
assert (response.status_code == 201)
assert isinstance(response.json(), dict)
assert (response.json().get('id') in project.snapshots.values_list('id', flat=True))
assert (project.snapshots.count() == (snapshot_count + 1))
assert (project.values.count() == (values_count + current_values_count))
for file_value in project.values.filter(value_type=VALUE_TYPE_FILE):
assert Path(settings.MEDIA_ROOT).joinpath(file_value.file.name).exists()
else:
if (project_id in view_snapshot_permission_map.get(username, [])):
assert (response.status_code == 403)
else:
assert (response.status_code == 404)
assert (project.snapshots.count() == snapshot_count)
assert (project.values.count() == values_count) |
def exec_cmd_in_pod(cli, command, pod_name, namespace, container=None):
exec_command = command
try:
if container:
ret = stream(cli.connect_get_namespaced_pod_exec, pod_name, namespace, container=container, command=exec_command, stderr=True, stdin=False, stdout=True, tty=False)
else:
ret = stream(cli.connect_get_namespaced_pod_exec, pod_name, namespace, command=exec_command, stderr=True, stdin=False, stdout=True, tty=False)
except BaseException:
return False
return ret |
class ImageContainerBilinear(ImageContainer):
def __init__(self, image_data, geo_def, radius_of_influence, epsilon=0, fill_value=0, reduce_data=False, nprocs=1, segments=None, neighbours=32):
super(ImageContainerBilinear, self).__init__(image_data, geo_def, fill_value=fill_value, nprocs=nprocs)
self.radius_of_influence = radius_of_influence
self.epsilon = epsilon
self.reduce_data = reduce_data
self.segments = segments
self.neighbours = neighbours
def resample(self, target_geo_def):
from pyresample import bilinear
image_data = self.image_data
try:
mask = image_data.mask.copy()
image_data = image_data.data.copy()
image_data[mask] = np.nan
except AttributeError:
pass
resampled_image = bilinear.resample_bilinear(image_data, self.geo_def, target_geo_def, radius=self.radius_of_influence, neighbours=self.neighbours, epsilon=self.epsilon, fill_value=self.fill_value, nprocs=self.nprocs, reduce_data=self.reduce_data, segments=self.segments)
return ImageContainerBilinear(resampled_image, target_geo_def, self.radius_of_influence, epsilon=self.epsilon, fill_value=self.fill_value, reduce_data=self.reduce_data, nprocs=self.nprocs, segments=self.segments) |
class BaseNetworkError(Exception):
def human_readable_name(cls) -> str:
return cls.__name__
def code(cls):
return NotImplementedError()
def detail(self):
return None
def as_json(self) -> dict:
return {'error': {'code': self.code(), 'detail': self.detail}}
def from_detail(cls, detail) -> Self:
return cls()
def from_json(cls, data: dict) -> (Self | None):
if ('error' not in data):
return None
code = data['error']['code']
detail = data['error']['detail']
for ret_cls in BaseNetworkError.__subclasses__():
if (code == ret_cls.code()):
return ret_cls.from_detail(detail)
raise RuntimeError('Unknown error')
def __eq__(self, other):
return (isinstance(other, type(self)) and (self.detail == other.detail))
def __str__(self):
return self.human_readable_name() |
def query_execute_wrapper(db_conn, query_string=None, query_list=None, max_tries=3, no_return=True):
for i in range(0, max_tries):
try:
with db_conn:
if (query_list is None):
curs = db_conn.execute(query_string)
else:
curs = db_conn.execute(query_string, query_list)
if no_return:
return None
else:
return curs
except sqlite3.OperationalError:
dsz.Sleep(((1 + i) * random.randint(250, 1250)))
continue
raise |
class LEBertModel(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
for (layer, heads) in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint='bert-base-uncased', output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, word_embeddings=None, word_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None):
output_attentions = (output_attentions if (output_attentions is not None) else self.config.output_attentions)
output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states)
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
if ((input_ids is not None) and (inputs_embeds is not None)):
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif (input_ids is not None):
input_shape = input_ids.size()
elif (inputs_embeds is not None):
input_shape = inputs_embeds.size()[:(- 1)]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
device = (input_ids.device if (input_ids is not None) else inputs_embeds.device)
if (attention_mask is None):
attention_mask = torch.ones(input_shape, device=device)
if (token_type_ids is None):
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
if (self.config.is_decoder and (encoder_hidden_states is not None)):
(encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if (encoder_attention_mask is None):
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
encoder_outputs = self.encoder(embedding_output, word_embeddings=word_embeddings, word_mask=word_mask, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
if (not return_dict):
return ((sequence_output, pooled_output) + encoder_outputs[1:])
return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) |
class AnimOsdPrefs(Gtk.VBox):
def __init__(self, plugin):
super().__init__(spacing=6)
self.Conf = plugin.Conf
self.plugin = plugin
def __coltofloat(x):
return (x / 65535.0)
def __floattocol(x):
return int((x * 65535))
def show_preview():
preview_song = (app.player.song if app.player.song else DUMMY_SONG)
self.plugin.plugin_on_song_started(preview_song)
def on_button_pressed(x=None, y=None):
show_preview()
def set_text(button):
color = button.get_color()
color = map(__coltofloat, (color.red, color.green, color.blue, 0.0))
self.Conf.text = tuple(color)
show_preview()
def set_fill(button):
color = button.get_color()
color = map(__coltofloat, (color.red, color.green, color.blue, button.get_alpha()))
self.Conf.fill = tuple(color)
show_preview()
def set_font(button):
font = button.get_font_name()
self.Conf.font = font
show_preview()
def change_delay(button):
value = int((button.get_value() * 1000))
self.Conf.delay = value
def change_monitor(button):
value = int(button.get_value())
self.Conf.monitor = value
show_preview()
def change_position(button, x, y):
self.Conf.pos_x = (x / 2.0)
self.Conf.pos_y = (y / 2.0)
show_preview()
def change_align(button):
value = button.get_active()
self.Conf.align = value
show_preview()
def change_shadow(button):
if button.get_active():
self.Conf.shadow = (0.0, 0.0, 0.0, self.Conf.fill[3])
else:
self.Conf.shadow = ((- 1.0), 0.0, 0.0, 0.0)
show_preview()
def change_outline(button):
if button.get_active():
alpha = min(1.0, (self.Conf.fill[3] * 1.25))
self.Conf.outline = (0.1, 0.1, 0.1, alpha)
else:
self.Conf.outline = ((- 1.0), 0.0, 0.0)
show_preview()
def change_rounded(button):
if button.get_active():
self.Conf.corners = 1
else:
self.Conf.corners = 0
show_preview()
def change_coversize(button):
value = int(button.get_value())
self.Conf.coversize = value
show_preview()
def edit_pattern(button):
w = PatternEdit(button, self.Conf.string)
w.set_default_size(520, 260)
w.text = self.Conf.string
connect_obj(w.apply, 'clicked', set_string, w)
w.show()
def set_string(window):
value = window.text
self.Conf.string = value
show_preview()
def build_display_widget():
vb2 = Gtk.VBox(spacing=3)
hb = Gtk.HBox(spacing=6)
monitor_cnt = Gdk.Screen.get_default().get_n_monitors()
if (monitor_cnt > 1):
adj = Gtk.Adjustment(value=self.Conf.monitor, lower=0, upper=(monitor_cnt - 1), step_increment=1)
monitor = Gtk.SpinButton(adjustment=adj)
monitor.set_numeric(True)
monitor.connect('value-changed', change_monitor)
l2 = ConfigLabel('_Monitor:', monitor)
hb.pack_start(l2, False, True, 0)
hb.pack_start(monitor, False, True, 0)
vb2.pack_start(hb, True, True, 0)
else:
self.Conf.monitor = 0
hb = Gtk.HBox(spacing=6)
grid = Gtk.Grid(column_homogeneous=True, row_homogeneous=True, row_spacing=4, column_spacing=4)
arrows = [['', '', ''], ['', '', ''], ['', '', ' ']]
group = None
for x in range(3):
for y in range(3):
rb = Gtk.RadioButton(group=group, label=arrows[y][x])
if ((int((self.Conf.pos_x * 2.0)) == x) and (int((self.Conf.pos_y * 2.0)) == y)):
rb.set_active(True)
grid.attach(rb, x, y, 1, 1)
group = rb
for x in range(3):
for y in range(3):
rb = grid.get_child_at(x, y)
rb.connect('toggled', change_position, x, y)
lbl = ConfigLabel(_('_Position:'), grid)
hb.pack_start(lbl, False, True, 0)
hb.pack_start(grid, False, True, 0)
vb2.pack_start(hb, False, True, 6)
hb = Gtk.HBox(spacing=6)
coversize = Gtk.SpinButton(adjustment=Gtk.Adjustment.new(self.Conf.coversize, 1, 600, 1, 10, 0), climb_rate=1, digits=0)
coversize.set_numeric(True)
coversize.connect('value-changed', change_coversize)
l1 = ConfigLabel(_('_Cover size:'), coversize)
hb.pack_start(l1, False, True, 0)
hb.pack_start(coversize, False, True, 0)
vb2.pack_start(hb, False, True, 0)
return vb2
frame = qltk.Frame(label=_('Display'), child=build_display_widget())
frame.set_border_width(6)
self.pack_start(frame, False, True, 0)
def build_text_widget():
t = Gtk.Table(n_rows=2, n_columns=2)
t.props.expand = False
t.set_col_spacings(6)
t.set_row_spacings(3)
font = Gtk.FontButton(show_style=True)
font.set_font_name(self.Conf.font)
font.connect('font-set', set_font)
lbl = ConfigLabel(_('_Font:'), font)
t.attach(lbl, 0, 1, 0, 1, xoptions=Gtk.AttachOptions.FILL)
t.attach(font, 1, 2, 0, 1)
align = Gtk.ComboBoxText()
align.append_text(_('Left'))
align.append_text(_('Center'))
align.append_text(_('Right'))
align.set_active(self.Conf.align)
align.connect('changed', change_align)
lbl = ConfigLabel(_('_Align text:'), align)
t.attach(lbl, 0, 1, 1, 2, xoptions=Gtk.AttachOptions.FILL)
t.attach(align, 1, 2, 1, 2)
return t
frame = qltk.Frame(label=_('Text'), child=build_text_widget())
frame.set_border_width(6)
self.pack_start(frame, False, True, 0)
def build_colors_widget():
t = Gtk.Table(n_rows=2, n_columns=2)
t.props.expand = False
t.set_col_spacings(6)
t.set_row_spacings(3)
b = Gtk.ColorButton(rgba=Gdk.RGBA(*map(__floattocol, self.Conf.text)))
l = ConfigLabel(_('_Text:'), b)
t.attach(l, 0, 1, 0, 1, xoptions=Gtk.AttachOptions.FILL)
t.attach(b, 1, 2, 0, 1)
b.connect('color-set', set_text)
b = Gtk.ColorButton(color=Gdk.Color(*map(__floattocol, self.Conf.fill[0:3])))
b.set_use_alpha(True)
b.set_alpha(__floattocol(self.Conf.fill[3]))
b.connect('color-set', set_fill)
l = ConfigLabel(_('_Fill:'), b)
t.attach(l, 0, 1, 1, 2, xoptions=Gtk.AttachOptions.FILL)
t.attach(b, 1, 2, 1, 2)
return t
f = qltk.Frame(label=_('Colors'), child=build_colors_widget())
f.set_border_width(6)
self.pack_start(f, False, False, 0)
def build_effects_widget():
vb2 = Gtk.VBox(spacing=3)
hb = Gtk.HBox(spacing=6)
toggles = [(_('_Shadows'), self.Conf.shadow[0], change_shadow), (_('_Outline'), self.Conf.outline[0], change_outline), (_('Rou_nded Corners'), (self.Conf.corners - 1), change_rounded)]
for (label, current, callback) in toggles:
checkb = Gtk.CheckButton(label=label, use_underline=True)
checkb.set_active((current != (- 1)))
checkb.connect('toggled', callback)
hb.pack_start(checkb, True, True, 0)
vb2.pack_start(hb, True, True, 0)
hb = Gtk.HBox(spacing=6)
timeout = Gtk.SpinButton(adjustment=Gtk.Adjustment.new((self.Conf.delay / 1000.0), 0, 60, 0.1, 1.0, 0), climb_rate=0.1, digits=1)
timeout.set_numeric(True)
timeout.connect('value-changed', change_delay)
l1 = ConfigLabel(_('_Delay:'), timeout)
hb.pack_start(l1, False, True, 0)
hb.pack_start(timeout, False, True, 0)
vb2.pack_start(hb, False, True, 0)
return vb2
frame = qltk.Frame(label=_('Effects'), child=build_effects_widget())
frame.set_border_width(6)
self.pack_start(frame, False, True, 0)
def build_buttons_widget():
hb = Gtk.HBox(spacing=6)
edit_button = qltk.Button(_('Ed_it Display Pattern...'), Icons.EDIT)
edit_button.connect('clicked', edit_pattern)
hb.pack_start(edit_button, False, True, 0)
preview_button = Gtk.Button(label=_('Preview'), use_underline=True)
preview_button.connect('button-press-event', on_button_pressed)
hb.pack_start(preview_button, False, True, 0)
return hb
self.pack_start(build_buttons_widget(), False, True, 0) |
class AEADCipher(BaseCipher):
PACKET_LIMIT = ((16 * 1024) - 1)
def setup_iv(self, iv=None):
self.iv = (os.urandom(self.IV_LENGTH) if (iv is None) else iv)
randkey = hmac.new(self.iv, self.key, hashlib.sha1).digest()
blocks_needed = (((self.KEY_LENGTH + len(randkey)) - 1) // len(randkey))
okm = bytearray()
output_block = b''
for counter in range(blocks_needed):
output_block = hmac.new(randkey, ((output_block + b'ss-subkey') + bytes([(counter + 1)])), hashlib.sha1).digest()
okm.extend(output_block)
self.key = bytes(okm[:self.KEY_LENGTH])
self._nonce = 0
self._buffer = bytearray()
self._declen = None
self.setup()
return self
def nonce(self):
ret = self._nonce.to_bytes(self.NONCE_LENGTH, 'little')
self._nonce = ((self._nonce + 1) & ((1 << self.NONCE_LENGTH) - 1))
return ret
def decrypt(self, s):
self._buffer.extend(s)
ret = bytearray()
try:
while 1:
if (self._declen is None):
if (len(self._buffer) < (2 + self.TAG_LENGTH)):
break
self._declen = int.from_bytes(self.decrypt_and_verify(self._buffer[:2], self._buffer[2:(2 + self.TAG_LENGTH)]), 'big')
assert (self._declen <= self.PACKET_LIMIT)
del self._buffer[:(2 + self.TAG_LENGTH)]
else:
if (len(self._buffer) < (self._declen + self.TAG_LENGTH)):
break
ret.extend(self.decrypt_and_verify(self._buffer[:self._declen], self._buffer[self._declen:(self._declen + self.TAG_LENGTH)]))
del self._buffer[:(self._declen + self.TAG_LENGTH)]
self._declen = None
except Exception:
return bytes([0])
return bytes(ret)
def encrypt(self, s):
ret = bytearray()
for i in range(0, len(s), self.PACKET_LIMIT):
buf = s[i:(i + self.PACKET_LIMIT)]
(len_chunk, len_tag) = self.encrypt_and_digest(len(buf).to_bytes(2, 'big'))
(body_chunk, body_tag) = self.encrypt_and_digest(buf)
ret.extend((((len_chunk + len_tag) + body_chunk) + body_tag))
return bytes(ret) |
def main(argv):
parser = optparse.OptionParser(add_help_option=False)
parser.disable_interspersed_args()
parser.add_option('-?', '--help', dest='help', action='store_true', default=None, help='print help')
parser.add_option('-t', dest='t', action='store', default=None)
(opts, argv_rest) = parser.parse_args(argv)
if getattr(opts, 'help', False):
parser.print_help()
return 0
tspec = opts.t
if (tspec is None):
times = None
else:
(head, sep, tail) = tspec.partition('.')
if (8 > len(head)):
raise Exception(((('time spec must follow format [[CC]YY]MMDDhhmm[.SS]: ' + tspec) + '; ') + head))
tfmt = ''
if (12 == len(head)):
tfmt += '%Y'
elif (10 == len(head)):
tfmt += '%y'
tfmt += '%m%d%H%M'
if (2 == len(tail)):
tfmt += '.%S'
mtime = time.mktime(time.strptime(tspec, tfmt))
times = (mtime, mtime)
for file in argv_rest:
try:
os.utime(file, times)
except:
open(file, 'w').close()
if (times is not None):
os.utime(file, times) |
def setUpModule():
global h2o, h2o_scanner, o2, o2_scanner
h2o = gto.M(verbose=3, output='/dev/null', atom='O -2. -15. -14.\n H -2. -14. -15.\n H -2. -16. -15.', basis='def2-svp')
h2o_scanner = scf.RHF(h2o)
h2o_scanner.build()
h2o_scanner.conv_tol_grad = 1e-06
h2o_scanner.max_cycle = 700
o2 = gto.M(verbose=3, output='/dev/null', atom='O 0\t0\t0.\n O 0\t0\t1.', basis='def2-svp')
o2_scanner = scf.RHF(o2)
o2_scanner.build()
o2_scanner.conv_tol_grad = 1e-06
o2_scanner.max_cycle = 700 |
class MLP(nn.Module):
def __init__(self, *, d_in: int, d_layers: ty.List[int], dropout: float, d_out: int, categories: ty.Optional[ty.List[int]], d_embedding: int) -> None:
super().__init__()
if (categories is not None):
d_in += (len(categories) * d_embedding)
category_offsets = torch.tensor(([0] + categories[:(- 1)])).cumsum(0)
self.register_buffer('category_offsets', category_offsets)
self.category_embeddings = nn.Embedding(sum(categories), d_embedding)
nn.init.kaiming_uniform_(self.category_embeddings.weight, a=math.sqrt(5))
print(f'self.category_embeddings.weight.shape={self.category_embeddings.weight.shape!r}')
self.layers = nn.ModuleList([nn.Linear((d_layers[(i - 1)] if i else d_in), x) for (i, x) in enumerate(d_layers)])
self.dropout = dropout
self.head = nn.Linear((d_layers[(- 1)] if d_layers else d_in), d_out)
def forward(self, x_num, x_cat):
x = []
if (x_num is not None):
x.append(x_num)
if (x_cat is not None):
x.append(self.category_embeddings((x_cat + self.category_offsets[None])).view(x_cat.size(0), (- 1)))
x = torch.cat(x, dim=(- 1))
for layer in self.layers:
x = layer(x)
x = F.relu(x)
if self.dropout:
x = F.dropout(x, self.dropout, self.training)
x = self.head(x)
x = x.squeeze((- 1))
return x |
def prepare_test(plugin_name, code, tagname='', html='', template=HTML_TEMPLATE_WITH_TAG):
def dec(f):
def _inner(self, *args, **kws):
self.writefile(f'{plugin_name}.py', code)
page_html = template.format(plugin_name=plugin_name, tagname=tagname, html=html)
self.pyscript_run(page_html)
return f(self, *args, **kws)
return _inner
return dec |
def create_network(n_dense=6, dense_units=16, activation='selu', dropout=AlphaDropout, dropout_rate=0.1, kernel_initializer='lecun_normal', optimizer='adam', num_classes=1, max_words=max_words):
model = Sequential()
model.add(Dense(dense_units, input_shape=(max_words,), kernel_initializer=kernel_initializer))
model.add(Activation(activation))
model.add(dropout(dropout_rate))
for i in range((n_dense - 1)):
model.add(Dense(dense_units, kernel_initializer=kernel_initializer))
model.add(Activation(activation))
model.add(dropout(dropout_rate))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model |
class FC3_TestCase(CommandTest):
command = 'xconfig'
def runTest(self):
if ('--card' in self.optionList):
self.assert_parse('xconfig --card=cardA --hsync=H --vsync=V --monitor=monitorA --noprobe', 'xconfig --card=cardA --hsync=H --monitor=monitorA --noprobe --vsync=V\n')
if ('--depth' in self.optionList):
self.assert_parse('xconfig --depth=16 --resolution=1280x1024 --videoram=32000', 'xconfig --depth=16 --resolution=1280x1024 --videoram=32000\n')
self.assert_parse('xconfig --defaultdesktop=xfce --startxonboot', 'xconfig --defaultdesktop=xfce --startxonboot\n')
if ('--server' in self.optionList):
self.assert_parse('xconfig --server=Xvfb', 'xconfig --server=Xvfb\n')
self.assert_parse('xconfig', '')
self.assert_parse_error('xconfig --startxonboot=yes')
self.assert_parse_error('xconfig --extra --arguments --here')
self.assert_parse_error('xconfig extra arguments here') |
def decompress_and_load(key: str, serialized: bytes, flags: int) -> Any:
if (flags & Flags.ZLIB):
serialized = zlib.decompress(serialized)
flags ^= Flags.ZLIB
if (flags == 0):
return serialized
if (flags in (Flags.INTEGER, Flags.LONG)):
return int(serialized)
if (flags == Flags.JSON):
try:
return json.loads(serialized)
except ValueError:
logging.info('json error', exc_info=True)
return None
logging.info('unrecognized flags')
return serialized |
class SysCapture(SysCaptureBinary):
EMPTY_BUFFER = ''
def snap(self) -> str:
res = self.tmpfile.getvalue()
self.tmpfile.seek(0)
self.tmpfile.truncate()
return res
def writeorg(self, data: str) -> None:
self._assert_state('writeorg', ('started', 'suspended'))
self._old.write(data)
self._old.flush() |
class Trainer(DefaultTrainer):
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if (output_folder is None):
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference')
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if (evaluator_type in ['sem_seg', 'coco_panoptic_seg']):
evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, output_dir=output_folder))
if (evaluator_type in ['coco', 'coco_panoptic_seg']):
evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
if (evaluator_type == 'coco_panoptic_seg'):
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if (evaluator_type == 'cityscapes_instance'):
assert (torch.cuda.device_count() >= comm.get_rank()), 'CityscapesEvaluator currently do not work with multiple machines.'
return CityscapesInstanceEvaluator(dataset_name)
if (evaluator_type == 'cityscapes_sem_seg'):
assert (torch.cuda.device_count() >= comm.get_rank()), 'CityscapesEvaluator currently do not work with multiple machines.'
return CityscapesSemSegEvaluator(dataset_name)
elif (evaluator_type == 'pascal_voc'):
return PascalVOCDetectionEvaluator(dataset_name)
elif (evaluator_type == 'lvis'):
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if (len(evaluator_list) == 0):
raise NotImplementedError('no Evaluator for the dataset {} with the type {}'.format(dataset_name, evaluator_type))
elif (len(evaluator_list) == 1):
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
def test_with_TTA(cls, cfg, model):
logger = logging.getLogger('detectron2.trainer')
logger.info('Running inference with test-time augmentation ...')
model = GeneralizedRCNNWithTTA(cfg, model)
evaluators = [cls.build_evaluator(cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, 'inference_TTA')) for name in cfg.DATASETS.TEST]
res = cls.test(cfg, model, evaluators)
res = OrderedDict({(k + '_TTA'): v for (k, v) in res.items()})
return res
def build_optimizer(cls, cfg, model):
params = get_default_optimizer_params(model, base_lr=cfg.SOLVER.BASE_LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY, weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM, bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR, weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS, overrides={'absolute_pos_embed': {'lr': cfg.SOLVER.BASE_LR, 'weight_decay': 0.0}, 'relative_position_bias_table': {'lr': cfg.SOLVER.BASE_LR, 'weight_decay': 0.0}})
def maybe_add_full_model_gradient_clipping(optim):
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
enable = (cfg.SOLVER.CLIP_GRADIENTS.ENABLED and (cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == 'full_model') and (clip_norm_val > 0.0))
class FullModelGradientClippingOptimizer(optim):
def step(self, closure=None):
all_params = itertools.chain(*[x['params'] for x in self.param_groups])
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
super().step(closure=closure)
return (FullModelGradientClippingOptimizer if enable else optim)
optimizer_type = cfg.SOLVER.OPTIMIZER
if (optimizer_type == 'SGD'):
optimizer = maybe_add_gradient_clipping(torch.optim.SGD)(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM, nesterov=cfg.SOLVER.NESTEROV, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
elif (optimizer_type == 'AdamW'):
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(params, cfg.SOLVER.BASE_LR, betas=(0.9, 0.999), weight_decay=cfg.SOLVER.WEIGHT_DECAY)
else:
raise NotImplementedError(f'no optimizer type {optimizer_type}')
return optimizer |
def construct_noise_model(network: Union[(Network_DQNN, Network_QAOA)]) -> None:
provider = training.get_provider()
backend = provider.get_backend('ibmq_16_melbourne')
network.coupling_map = backend.configuration().coupling_map
noise_model = noise.NoiseModel(['cx', 'rz', 'sx', 'x'])
for (gate, value) in network.gate_error_probabilities.items():
error = noise.depolarizing_error(*value)
noise_model.add_all_qubit_quantum_error(error, gate)
network.noise_model = noise_model |
def Ck(input, k, slope=0.2, stride=2, reuse=False, norm='instance', is_training=True, name=None, sn=False):
with tf.variable_scope(name, reuse=reuse):
weights = _weights('weights', shape=[4, 4, 4, input.get_shape()[4], k])
if sn:
conv = tf.nn.conv3d(input, spectral_norm(weights), strides=[1, stride, stride, stride, 1], padding='SAME')
else:
conv = tf.nn.conv3d(input, weights, strides=[1, stride, stride, stride, 1], padding='SAME')
normalized = _norm(conv, is_training, norm)
output = _leaky_relu(normalized, slope)
return output |
def arg(name, type=None, help=None, nargs=None, mapper=None, choices=None, prefix=True):
def wrap(fn):
assert (fn.__name__ == '__init__')
if (not hasattr(fn, '_autoargs_info')):
fn._autoargs_info = dict()
fn._autoargs_info[name] = dict(type=type, help=help, nargs=nargs, choices=choices, mapper=mapper)
return fn
return wrap |
class Dimensions(VersionBase):
def __init__(self, width, length, height):
self.width = convert_float(width)
self.length = convert_float(length)
self.height = convert_float(height)
def parse(element):
width = convert_float(element.attrib['width'])
height = convert_float(element.attrib['height'])
length = convert_float(element.attrib['length'])
return Dimensions(width, length, height)
def __eq__(self, other):
if isinstance(other, Dimensions):
if (self.get_attributes() == other.get_attributes()):
return True
return False
def get_attributes(self):
return {'width': str(self.width), 'length': str(self.length), 'height': str(self.height)}
def get_element(self):
element = ET.Element('Dimensions', attrib=self.get_attributes())
return element |
class Link(object):
def __init__(self, model, linkid):
if (not model.fileLoaded):
raise PYSWMMException('SWMM Model Not Open')
if (linkid not in model.getObjectIDList(ObjectType.LINK.value)):
raise PYSWMMException('ID Not valid')
self._model = model
self._linkid = linkid
def linkid(self):
return self._linkid
def is_conduit(self):
return (self._model.getLinkType(self._linkid) is shared_enum.LinkType.CONDUIT)
def is_pump(self):
return (self._model.getLinkType(self._linkid) is shared_enum.LinkType.PUMP)
def is_orifice(self):
return (self._model.getLinkType(self._linkid) is shared_enum.LinkType.ORIFICE)
def is_weir(self):
return (self._model.getLinkType(self._linkid) is shared_enum.LinkType.WEIR)
def is_outlet(self):
return (self._model.getLinkType(self._linkid) is shared_enum.LinkType.OUTLET)
def connections(self):
return self._model.getLinkConnections(self._linkid)
def inlet_node(self):
return self._model.getLinkConnections(self._linkid)[0]
def outlet_node(self):
return self._model.getLinkConnections(self._linkid)[1]
def inlet_offset(self):
return self._model.getLinkParam(self._linkid, LinkParams.offset1.value)
_offset.setter
def inlet_offset(self, param):
self._model.setLinkParam(self._linkid, LinkParams.offset1.value, param)
def outlet_offset(self):
return self._model.getLinkParam(self._linkid, LinkParams.offset2.value)
_offset.setter
def outlet_offset(self, param):
self._model.setLinkParam(self._linkid, LinkParams.offset2.value, param)
def initial_flow(self):
return self._model.getLinkParam(self._linkid, LinkParams.q0.value)
_flow.setter
def initial_flow(self, param):
self._model.setLinkParam(self._linkid, LinkParams.q0.value, param)
def flow_limit(self):
return self._model.getLinkParam(self._linkid, LinkParams.qLimit.value)
_limit.setter
def flow_limit(self, param):
self._model.setLinkParam(self._linkid, LinkParams.qLimit.value, param)
def inlet_head_loss(self):
return self._model.getLinkParam(self._linkid, LinkParams.cLossInlet.value)
_head_loss.setter
def inlet_head_loss(self, param):
self._model.setLinkParam(self._linkid, LinkParams.cLossInlet.value, param)
def outlet_head_loss(self):
return self._model.getLinkParam(self._linkid, LinkParams.cLossOutlet.value)
_head_loss.setter
def outlet_head_loss(self, param):
self._model.setLinkParam(self._linkid, LinkParams.cLossOutlet.value, param)
def average_head_loss(self):
return self._model.getLinkParam(self._linkid, LinkParams.cLossAvg.value)
_head_loss.setter
def average_head_loss(self, param):
self._model.setLinkParam(self._linkid, LinkParams.cLossAvg.value, param)
def seepage_rate(self):
return self._model.getLinkParam(self._linkid, LinkParams.seepRate.value)
_rate.setter
def seepage_rate(self, param):
self._model.setLinkParam(self._linkid, LinkParams.seepRate.value, param)
def flow(self):
return self._model.getLinkResult(self._linkid, LinkResults.newFlow.value)
def depth(self):
return self._model.getLinkResult(self._linkid, LinkResults.newDepth.value)
def volume(self):
return self._model.getLinkResult(self._linkid, LinkResults.newVolume.value)
def froude(self):
return self._model.getLinkResult(self._linkid, LinkResults.froude.value)
def ups_xsection_area(self):
return self._model.getLinkResult(self._linkid, LinkResults.surfArea1.value)
def ds_xsection_area(self):
return self._model.getLinkResult(self._linkid, LinkResults.surfArea2.value)
def current_setting(self):
return self._model.getLinkResult(self._linkid, LinkResults.setting.value)
def target_setting(self):
return self._model.getLinkResult(self._linkid, LinkResults.targetSetting.value)
_setting.setter
def target_setting(self, setting):
return self._model.setLinkSetting(self._linkid, setting)
def pollut_quality(self):
out_dict = {}
pollut_ids = self._model.getObjectIDList(ObjectType.POLLUT.value)
quality_array = self._model.getLinkPollut(self._linkid, LinkPollut.linkQual.value)
for ind in range(len(pollut_ids)):
out_dict[pollut_ids[ind]] = quality_array[ind]
return out_dict
_quality.setter
def pollut_quality(self, args):
(pollutant_ID, pollutant_value) = args
self._model.setLinkPollut(self._linkid, pollutant_ID, pollutant_value)
def total_loading(self):
out_dict = {}
pollut_ids = self._model.getObjectIDList(ObjectType.POLLUT.value)
totalLoad_array = self._model.getLinkPollut(self._linkid, LinkPollut.totalLoad.value)
for ind in range(len(pollut_ids)):
out_dict[pollut_ids[ind]] = totalLoad_array[ind]
return out_dict
def reactor_quality(self):
out_dict = {}
pollut_ids = self._model.getObjectIDList(ObjectType.POLLUT.value)
quality_array = self._model.getLinkPollut(self._linkid, LinkPollut.linkQual.value)
for ind in range(len(pollut_ids)):
out_dict[pollut_ids[ind]] = quality_array[ind]
return out_dict |
class UnmarshallingProcessor(UnmarshallingIntegration[(RequestType, ResponseType)]):
def handle_request(self, request: RequestType, valid_handler: ValidRequestHandlerCallable[ResponseType], errors_handler: ErrorsHandlerCallable[ResponseType]) -> ResponseType:
request_unmarshal_result = self.unmarshal_request(request)
if request_unmarshal_result.errors:
return errors_handler(request_unmarshal_result.errors)
return valid_handler(request_unmarshal_result)
def handle_response(self, request: RequestType, response: ResponseType, errors_handler: ErrorsHandlerCallable[ResponseType]) -> ResponseType:
response_unmarshal_result = self.unmarshal_response(request, response)
if response_unmarshal_result.errors:
return errors_handler(response_unmarshal_result.errors)
return response |
.parametrize('ident', ('.', '...', ':::', 'a:::c', 'a+-b', '\\nhe\\\\l\\lo\\n\\t\\rbye', 'a/b', '', 'aacc', 'a[bcd]', '1234', '1234abcd', '1234and', 'notandor', 'not_and_or', 'not[and]or', '1234+5678', '123.232', 'True', 'False', 'None', 'if', 'else', 'while'))
def test_valid_idents(ident: str) -> None:
assert evaluate(ident, {ident: True}.__getitem__) |
def bech32_decode(bech, ignore_long_length=False):
if (any((((ord(x) < 33) or (ord(x) > 126)) for x in bech)) or ((bech.lower() != bech) and (bech.upper() != bech))):
return (None, None)
bech = bech.lower()
pos = bech.rfind('1')
if ((pos < 1) or ((pos + 7) > len(bech)) or ((not ignore_long_length) and (len(bech) > 90))):
return (None, None)
if (not all(((x in CHARSET) for x in bech[(pos + 1):]))):
return (None, None)
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[(pos + 1):]]
if (not bech32_verify_checksum(hrp, data)):
return (None, None)
return (hrp, data[:(- 6)]) |
def tensorboard(logdir: str, image: str=torchx.IMAGE, timeout: float=(60 * 60), port: int=6006, start_on_file: str='', exit_on_file: str='') -> specs.AppDef:
return specs.AppDef(name='tensorboard', roles=[specs.Role(name='tensorboard', image=image, entrypoint='python', args=['-m', 'torchx.apps.utils.process_monitor', '--timeout', str(timeout), '--start_on_file', start_on_file, '--exit_on_file', exit_on_file, '--', 'tensorboard', '--bind_all', '--port', str(port), '--logdir', logdir], port_map={' port})]) |
class TestWMS(unittest.TestCase):
def setUp(self):
pass
def test_WMS_OSM(self):
try:
m = Maps(Maps.CRS.GOOGLE_MERCATOR)
m.add_wms.OpenStreetMap.add_layer.default()
plt.close(m.f)
except requests.exceptions.ConnectionError:
warnings.warn('Encountered a connection error for OSM')
except requests.exceptions.ConnectTimeout:
warnings.warn('Encountered a connection timeout for OSM')
def test_WMS_S1GBM(self):
try:
m = Maps(Maps.CRS.GOOGLE_MERCATOR)
m.add_wms.S1GBM.add_layer.vv()
plt.close(m.f)
except requests.exceptions.ConnectionError:
warnings.warn('Encountered a connection error for S1GBM')
except requests.exceptions.ConnectTimeout:
warnings.warn('Encountered a connection timeout for S1GBM')
def test_WMS_ESA_WorldCover(self):
try:
m = Maps(Maps.CRS.GOOGLE_MERCATOR)
m.add_feature.preset.coastline()
ESA_layer = m.add_wms.ESA_WorldCover.add_layer.WORLDCOVER_2020_MAP
ESA_layer.set_extent_to_bbox()
ESA_layer.info
ESA_layer()
plt.close(m.f)
except requests.exceptions.ConnectionError:
warnings.warn('Encountered a connection error for ESA_WorldCover')
except requests.exceptions.ConnectTimeout:
warnings.warn('Encountered a connection timeout for ESA_WorldCover')
def test_ArcGIS_REST_API(self):
try:
m = Maps(Maps.CRS.GOOGLE_MERCATOR)
m.add_feature.preset.ocean(ec='k', zorder=100)
hillshade = m.add_wms.ESRI_ArcGIS.Elevation.Elevation_World_Hillshade
hillshade.add_layer.xyz_layer()
plt.close(m.f)
except requests.exceptions.ConnectionError:
warnings.warn('Encountered a connection error for ArcGIS_REST_API')
except requests.exceptions.ConnectTimeout:
warnings.warn('Encountered a connection timeout for ArcGIS_REST_API')
def test_WMS_legend_capabilities_NASA_GIBS(self):
try:
m = Maps(4326)
m.add_feature.preset.coastline()
NASA_layer = m.add_wms.NASA_GIBS.EPSG_4326.add_layer.AIRS_L2_Cloud_Top_Height_Night
NASA_layer.set_extent_to_bbox()
NASA_layer.info
NASA_layer(transparent=True)
NASA_layer.add_legend()
legax = m.f.axes[(- 1)]
leg_cpos = (((legax.bbox.x0 + legax.bbox.x1) / 2), ((legax.bbox.y0 + legax.bbox.y1) / 2))
button_press_event(m.f.canvas, *leg_cpos, 1, False)
scroll_event(m.f.canvas, *leg_cpos, 20, False)
motion_notify_event(m.f.canvas, ((m.ax.bbox.x0 + m.ax.bbox.x1) / 2), ((m.ax.bbox.y0 + m.ax.bbox.y1) / 2), None)
button_press_event(m.f.canvas, 0, 0, 1, False)
plt.close(m.f)
except requests.exceptions.ConnectionError:
warnings.warn('Encountered a connection error for NASA_GIBS')
except requests.exceptions.ConnectTimeout:
warnings.warn('Encountered a connection timeout for NASA_GIBS')
except xml.etree.ElementTree.ParseError:
warnings.warn('Encountered a ParseError for NASA_GIBS legend') |
class Optimizer(object):
def __init__(self, method, learning_rate, learning_rate2, max_grad_norm, lr_decay=1, start_decay_steps=None, decay_steps=None, beta1=0.9, beta2=0.999, adagrad_accum=0.0, decay_method=None, warmup_steps=4000, warmup_steps2=4000, model_size=None):
self.last_ppl = None
self.learning_rate = learning_rate
self.original_lr = learning_rate
self.original_lr2 = learning_rate2
self.max_grad_norm = max_grad_norm
self.method = method
self.lr_decay = lr_decay
self.start_decay_steps = start_decay_steps
self.decay_steps = decay_steps
self.start_decay = False
self._task_step = 0
self._task2_step = 0
self.betas = [beta1, beta2]
self.adagrad_accum = adagrad_accum
self.decay_method = decay_method
self.warmup_steps = warmup_steps
self.warmup_steps2 = warmup_steps2
self.model_size = model_size
def set_parameters(self, params):
self.params = []
self.sparse_params = []
for (k, p) in params:
if p.requires_grad:
if ((self.method != 'sparseadam') or ('embed' not in k)):
self.params.append(p)
else:
self.sparse_params.append(p)
if (self.method == 'sgd'):
self.optimizer = optim.SGD(self.params, lr=self.learning_rate)
elif (self.method == 'adagrad'):
self.optimizer = optim.Adagrad(self.params, lr=self.learning_rate)
for group in self.optimizer.param_groups:
for p in group['params']:
self.optimizer.state[p]['sum'] = self.optimizer.state[p]['sum'].fill_(self.adagrad_accum)
elif (self.method == 'adadelta'):
self.optimizer = optim.Adadelta(self.params, lr=self.learning_rate)
elif (self.method == 'adam'):
self.optimizer = optim.Adam(self.params, lr=self.learning_rate, betas=self.betas, eps=1e-09)
elif (self.method == 'sparseadam'):
self.optimizer = MultipleOptimizer([optim.Adam(self.params, lr=self.learning_rate, betas=self.betas, eps=1e-08), optim.SparseAdam(self.sparse_params, lr=self.learning_rate, betas=self.betas, eps=1e-08)])
else:
raise RuntimeError(('Invalid optim method: ' + self.method))
def _set_rate(self, learning_rate):
self.learning_rate = learning_rate
if (self.method != 'sparseadam'):
self.optimizer.param_groups[0]['lr'] = self.learning_rate
else:
for op in self.optimizer.optimizers:
op.param_groups[0]['lr'] = self.learning_rate
def step(self, task_type):
if (task_type == 'task'):
self._task_step += 1
_step = self._task_step
warmup_steps = self.warmup_steps
original_lr = self.original_lr
else:
self._task2_step += 1
_step = self._task2_step
warmup_steps = self.warmup_steps2
original_lr = self.original_lr2
if (self.decay_method == 'noam'):
self._set_rate((original_lr * ((self.model_size ** (- 0.5)) * min((_step ** (- 0.5)), (_step * (warmup_steps ** (- 1.5)))))))
else:
if ((self.start_decay_steps is not None) and (_step >= self.start_decay_steps)):
self.start_decay = True
if self.start_decay:
if (((_step - self.start_decay_steps) % self.decay_steps) == 0):
self.learning_rate = (self.learning_rate * self.lr_decay)
if (self.method != 'sparseadam'):
self.optimizer.param_groups[0]['lr'] = self.learning_rate
if self.max_grad_norm:
clip_grad_norm_(self.params, self.max_grad_norm)
self.optimizer.step() |
class Logger(object):
DEFAULT = None
CURRENT = None
def __init__(self, dir, output_formats):
self.name2val = defaultdict(float)
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
if (val is None):
self.name2val[key] = None
return
(oldval, cnt) = (self.name2val[key], self.name2cnt[key])
self.name2val[key] = (((oldval * cnt) / (cnt + 1)) + (val / (cnt + 1)))
self.name2cnt[key] = (cnt + 1)
def dumpkvs(self):
if (self.level == DISABLED):
return
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(self.name2val)
self.name2val.clear()
self.name2cnt.clear()
def log(self, *args, level=INFO):
if (self.level <= level):
self._do_log(args)
def set_level(self, level):
self.level = level
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args)) |
def test_ki_protection_works() -> None:
async def sleeper(name: str, record: set[str]) -> None:
try:
while True:
(await _core.checkpoint())
except _core.Cancelled:
record.add((name + ' ok'))
async def raiser(name: str, record: set[str]) -> None:
try:
print('killing, protection =', _core.currently_ki_protected())
ki_self()
except KeyboardInterrupt:
print('raised!')
(await _core.checkpoint())
record.add((name + ' raise ok'))
raise
else:
print("didn't raise!")
try:
(await _core.wait_task_rescheduled((lambda _: _core.Abort.SUCCEEDED)))
except _core.Cancelled:
record.add((name + ' cancel ok'))
print('check 1')
record_set: set[str] = set()
async def check_unprotected_kill() -> None:
async with _core.open_nursery() as nursery:
nursery.start_soon(sleeper, 's1', record_set)
nursery.start_soon(sleeper, 's2', record_set)
nursery.start_soon(raiser, 'r1', record_set)
with pytest.raises(KeyboardInterrupt):
_core.run(check_unprotected_kill)
assert (record_set == {'s1 ok', 's2 ok', 'r1 raise ok'})
print('check 2')
record_set = set()
async def check_protected_kill() -> None:
async with _core.open_nursery() as nursery:
nursery.start_soon(sleeper, 's1', record_set)
nursery.start_soon(sleeper, 's2', record_set)
nursery.start_soon(_core.enable_ki_protection(raiser), 'r1', record_set)
with pytest.raises(KeyboardInterrupt):
_core.run(check_protected_kill)
assert (record_set == {'s1 ok', 's2 ok', 'r1 cancel ok'})
print('check 3')
async def check_kill_during_shutdown() -> None:
token = _core.current_trio_token()
def kill_during_shutdown() -> None:
assert _core.currently_ki_protected()
try:
token.run_sync_soon(kill_during_shutdown)
except _core.RunFinishedError:
print('kill! kill!')
ki_self()
token.run_sync_soon(kill_during_shutdown)
with pytest.raises(KeyboardInterrupt):
_core.run(check_kill_during_shutdown)
print('check 4')
class InstrumentOfDeath(Instrument):
def before_run(self) -> None:
ki_self()
async def main_1() -> None:
(await _core.checkpoint())
with pytest.raises(KeyboardInterrupt):
_core.run(main_1, instruments=[InstrumentOfDeath()])
print('check 5')
_core.enable_ki_protection
async def main_2() -> None:
assert _core.currently_ki_protected()
ki_self()
with pytest.raises(KeyboardInterrupt):
(await _core.checkpoint_if_cancelled())
_core.run(main_2)
print('check 6')
_core.enable_ki_protection
async def main_3() -> None:
assert _core.currently_ki_protected()
ki_self()
(await _core.cancel_shielded_checkpoint())
(await _core.cancel_shielded_checkpoint())
(await _core.cancel_shielded_checkpoint())
with pytest.raises(KeyboardInterrupt):
(await _core.checkpoint())
_core.run(main_3)
print('check 7')
_core.enable_ki_protection
async def main_4() -> None:
assert _core.currently_ki_protected()
ki_self()
task = _core.current_task()
def abort(_: RaiseCancelT) -> Abort:
_core.reschedule(task, outcome.Value(1))
return _core.Abort.FAILED
assert ((await _core.wait_task_rescheduled(abort)) == 1)
with pytest.raises(KeyboardInterrupt):
(await _core.checkpoint())
_core.run(main_4)
print('check 8')
_core.enable_ki_protection
async def main_5() -> None:
assert _core.currently_ki_protected()
ki_self()
task = _core.current_task()
def abort(raise_cancel: RaiseCancelT) -> Abort:
result = outcome.capture(raise_cancel)
_core.reschedule(task, result)
return _core.Abort.FAILED
with pytest.raises(KeyboardInterrupt):
assert (await _core.wait_task_rescheduled(abort))
(await _core.checkpoint())
_core.run(main_5)
print('check 9')
_core.enable_ki_protection
async def main_6() -> None:
ki_self()
with pytest.raises(KeyboardInterrupt):
_core.run(main_6)
print('check 10')
record_list = []
async def main_7() -> None:
assert (not _core.currently_ki_protected())
ki_self()
record_list.append('ok')
with pytest.raises(KeyboardInterrupt):
(await sleep(10))
_core.run(main_7, restrict_keyboard_interrupt_to_checkpoints=True)
assert (record_list == ['ok'])
record_list = []
with pytest.raises(KeyboardInterrupt):
_core.run(main_7)
assert (record_list == [])
print('check 11')
_core.enable_ki_protection
async def main_8() -> None:
assert _core.currently_ki_protected()
with _core.CancelScope() as cancel_scope:
cancel_scope.cancel()
with pytest.raises(_core.Cancelled):
(await _core.checkpoint())
ki_self()
with pytest.raises(KeyboardInterrupt):
(await _core.checkpoint())
with pytest.raises(_core.Cancelled):
(await _core.checkpoint())
_core.run(main_8) |
class PlatiPyClient():
def __init__(self, host, port, api_key, algorithm_name, verify=None):
protocol = '
if (verify is None):
logger.warning('Running without SSL. Not Suitable for Production.')
protocol = '
elif (not os.path.exists(verify)):
raise FileNotFoundError('Verify Certificate file not found')
self.verify = verify
self.base_url = f'{protocol}://{host}:{port}'
logger.info('Initializing client with URL: %s', self.base_url)
self.api_key = api_key
self.algorithm_name = algorithm_name
res = requests.get(API_ALGORITHM.format(self.base_url), headers={'API_KEY': self.api_key}, verify=self.verify)
logger.debug(res.status_code)
def get_dicom_location(self, name):
res = requests.get(API_DICOM_LOCATION.format(self.base_url), headers={'API_KEY': self.api_key}, verify=self.verify)
logger.debug(res.status_code)
for location in res.json():
if (location['name'] == name):
logger.debug(pformat(location))
return location
return None
def add_dicom_location(self, name, host, port, ae_title=None):
location = self.get_dicom_location(name)
if location:
logger.info("Location with name '%s' already exists", name)
return location
params = {'name': name, 'host': host, 'port': port}
if ae_title:
params['ae_title'] = ae_title
res = requests.post(API_DICOM_LOCATION.format(self.base_url), headers={'API_KEY': self.api_key}, data=params, verify=self.verify)
logger.debug(res.status_code)
if ((res.status_code >= 200) and (res.status_code < 300)):
logger.info('Added Location')
location = res.json()
logger.debug(pformat(location))
return location
return None
def get_dataset(self, dataset):
params = {'dataset': dataset}
if isinstance(dataset, dict):
params['dataset'] = dataset['id']
res = requests.get('{0}/{1}'.format(API_DATASET.format(self.base_url), params['dataset']), headers={'API_KEY': self.api_key}, verify=self.verify)
logger.debug(res.status_code)
if (res.status_code == 200):
dataset = res.json()
logger.debug(pformat(dataset))
return dataset
return None
def get_dataset_ready(self, dataset):
params = {'dataset': dataset}
if isinstance(dataset, dict):
params['dataset'] = dataset['id']
res = requests.get('{0}/{1}'.format(API_DATASET_READY.format(self.base_url), params['dataset']), headers={'API_KEY': self.api_key}, verify=self.verify)
logger.debug(res.status_code)
if (res.status_code == 200):
result = res.json()
logger.debug(pformat(result))
return result['ready']
return None
def add_dataset(self, from_dicom_location=None, to_dicom_location=None, timeout=None):
params = {}
if from_dicom_location:
params['from_dicom_location'] = from_dicom_location
if isinstance(from_dicom_location, dict):
params['from_dicom_location'] = from_dicom_location['id']
if to_dicom_location:
params['to_dicom_location'] = to_dicom_location
if isinstance(to_dicom_location, dict):
params['to_dicom_location'] = to_dicom_location['id']
if timeout:
params['timeout'] = timeout
res = requests.post(API_DATASET.format(self.base_url), headers={'API_KEY': self.api_key}, data=params, verify=self.verify)
logger.debug(res.status_code)
if ((res.status_code >= 200) and (res.status_code < 300)):
logger.info('Added Dataset')
dataset = res.json()
logger.debug(pformat(dataset))
return dataset
return None
def add_data_object(self, dataset, series_uid=None, parent=None, meta_data=None, dicom_retrieve=None, file_path=None):
data_object = None
params = {'dataset': dataset}
if isinstance(dataset, dict):
params['dataset'] = dataset['id']
if parent:
params['parent'] = parent
if isinstance(parent, dict):
params['parent'] = parent['id']
if meta_data:
params['meta_data'] = json.dumps(meta_data)
if (series_uid or dicom_retrieve):
if ((not series_uid) or dicom_retrieve):
logger.error('For Dicom, both series_uid and dicom_retrieve must be set')
return None
params['type'] = 'DICOM'
params['seriesUID'] = series_uid
params['dicom_retrieve'] = dicom_retrieve
res = requests.post(API_DATA_OBJECT.format(self.base_url), headers={'API_KEY': self.api_key}, data=params, verify=self.verify)
logger.debug(res.status_code)
if (res.status_code >= 200):
data_object = res.json()
else:
if (not file_path):
logger.error('For a file, provide the file_path')
return None
params['type'] = 'FILE'
params['file_name'] = os.path.basename(file_path)
with open(file_path, 'rb') as file_handle:
res = requests.post(API_DATA_OBJECT.format(self.base_url), headers={'API_KEY': self.api_key}, data=params, files={'file_data': file_handle}, verify=self.verify)
logger.debug(res.status_code)
if (res.status_code >= 200):
data_object = res.json()
logger.debug(pformat(data_object))
return data_object
def get_default_settings(self):
algorithm = None
res = requests.get(API_ALGORITHM.format(self.base_url), headers={'API_KEY': self.api_key}, verify=self.verify)
logger.debug(res.status_code)
if (res.status_code == 200):
for algorithm in res.json():
if (self.algorithm_name in algorithm['name']):
logger.debug(pformat(algorithm))
if ('default_settings' in algorithm):
return algorithm['default_settings']
logger.error('No default_settings provided by algorithm')
return None
def run_algorithm(self, dataset, config=None):
params = {'dataset': dataset, 'algorithm': self.algorithm_name}
if isinstance(dataset, dict):
params['dataset'] = dataset['id']
if config:
default_settings = self.get_default_settings()
if (not (set(default_settings.keys()) == set(config.keys()))):
logger.error('Config keys must be exactly those from the default_settings')
return
params['config'] = json.dumps(config)
res = requests.post(API_TRIGGER.format(self.base_url), headers={'API_KEY': self.api_key}, data=params, verify=self.verify)
logger.debug(res.status_code)
if (res.status_code == 200):
poll_url = '{0}{1}'.format(self.base_url, res.json()['poll'])
while True:
res = requests.get(poll_url, headers={'API_KEY': self.api_key}, verify=self.verify)
status = res.json()
if ((not ('state' in status)) or (status['state'] == 'SUCCESS') or (status['state'] == 'FAILURE')):
break
(yield status)
time.sleep(1)
else:
logger.error(res.json())
logger.info('Algorithm Processing Complete')
def download_output_objects(self, dataset, output_path='.'):
if (not os.path.exists(output_path)):
logger.info('Creating directory')
os.makedirs(output_path)
dataset = self.get_dataset(dataset)
if dataset:
for data_obj in dataset['output_data_objects']:
url = API_DOWNLOAD_OBJECT.format(self.base_url)
res = requests.get('{0}/{1}'.format(url, data_obj['id']), headers={'API_KEY': self.api_key}, verify=self.verify)
logger.debug(res.status_code)
filename = res.headers['Content-Disposition'].split('filename=')[1]
output_file = os.path.join(output_path, filename)
logger.info('Downloading to: %s', output_file)
open(output_file, 'wb').write(res.content) |
def parse_hp_block_header(block: Union[(bytes, bytearray)], is_big_endian: bool, length_before_block: Optional[int]=None, raise_on_late_block: bool=False) -> Tuple[(int, int)]:
begin = block.find(b'#A')
if (begin < 0):
raise ValueError(('Could not find the standard block header (#A) indicating the start of the block. The block begin by %r' % block[:25]))
length_before_block = (length_before_block or DEFAULT_LENGTH_BEFORE_BLOCK)
if (begin > length_before_block):
msg = ('The beginning of the block has been found at %d which is an unexpectedly large value. The actual block may have been missing a beginning marker but the block contained one:\n%s' % (begin, repr(block)))
if raise_on_late_block:
raise RuntimeError(msg)
else:
warnings.warn(msg, UserWarning)
offset = (begin + 4)
data_length = int.from_bytes(block[(begin + 2):offset], byteorder=('big' if is_big_endian else 'little'))
return (offset, data_length) |
.parametrize('nelec, nx', ((2, 10), (6, 8), (8, 12)))
def test_potential_bloq(nelec, nx):
ngrid_x = ((2 * nx) + 1)
bitsize = ((ngrid_x - 1).bit_length() + 1)
poly_bitsize = 15
pe = PotentialEnergy(nelec, ngrid_x)
qlt_testing.assert_valid_bloq_decomposition(pe)
poly_coeffs = get_inverse_square_root_poly_coeffs()
qrom_data = build_qrom_data_for_poly_fit(((2 * bitsize) + 2), poly_bitsize, poly_coeffs)
qrom_data = tuple((tuple((int(k) for k in d)) for d in qrom_data))
pp = PairPotential(bitsize=bitsize, qrom_data=qrom_data, poly_bitsize=pe.poly_bitsize)
qlt_testing.assert_valid_bloq_decomposition(pp)
fac = ((nelec * (nelec - 1)) // 2)
assert ((fac * pp.t_complexity().t) == pe.t_complexity().t) |
class AwaitLayer(MergeLayer):
def __init__(self, incoming, layer_to_await, **kwargs):
super(AwaitLayer, self).__init__([incoming, layer_to_await], **kwargs)
def get_output_for(self, inputs, **kwargs):
return inputs[0]
def get_output_shape_for(self, input_shapes, **kwargs):
return input_shapes[0] |
class lmdbDataset(Dataset):
def __init__(self, root=None, transform=None, target_transform=None):
self.env = lmdb.open(root, max_readers=1, readonly=True, lock=False, readahead=False, meminit=False)
if (not self.env):
print(('cannot creat lmdb from %s' % root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get('num-samples'.encode()))
print('nSamples:{}'.format(nSamples))
self.nSamples = nSamples
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert (index <= len(self)), 'index range error'
index += 1
with self.env.begin(write=False) as txn:
img_key = ('image-%09d' % index)
imgbuf = txn.get(img_key.encode())
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
img = Image.open(buf).convert('L')
except IOError:
print(('Corrupted image for %d' % index))
if (index > (self.nSamples - 1)):
index = 0
return self[(index + 1)]
if (self.transform is not None):
img = self.transform(img)
label_key = ('label-%09d' % index)
label = str(txn.get(label_key.encode()), 'utf-8')
if (self.target_transform is not None):
label = self.target_transform(label)
return (img, label) |
class DummyConnection():
description_format = 'DummyConnection<>'
def __init__(self, **kwargs):
self.kwargs = kwargs
self.pid = os.getpid()
def connect(self):
pass
def can_read(self):
return False
def send_command(self, command):
pass
def read_response(self):
return ['OK', 'OK']
def pack_commands(self, *args):
pass
def send_packed_command(self, *args):
pass
def disconnect(self):
pass |
def compute_dense_reward(self, action: np.ndarray):
reward = 0
ee_coords = np.array(self.robot.get_ee_coords())
handle_pcd = self.cabinet.handle.get_world_pcd()
dist_ees_to_handle = sdist.cdist(ee_coords.reshape((- 1), 3), handle_pcd)
dist_ees_to_handle = dist_ees_to_handle.min(0)
dist_ee_to_handle = dist_ees_to_handle.mean()
log_dist_ee_to_handle = np.log((dist_ee_to_handle + 1e-05))
reward += ((- dist_ee_to_handle) - np.clip(log_dist_ee_to_handle, (- 10), 0))
action_norm = np.linalg.norm(action)
reward -= (action_norm * 1e-06)
qpos_change = (self.cabinet.handle.qpos - self.cabinet.handle.target_qpos)
reward += (qpos_change * 0.1)
handle_vel_norm = np.linalg.norm(self.cabinet.handle.velocity)
reward -= (handle_vel_norm * 0.01)
cabinet_vel_norm = np.linalg.norm(self.cabinet.velocity)
reward -= (cabinet_vel_norm * 0.01)
stage_reward = (- 10)
if (dist_ee_to_handle < 0.1):
stage_reward += 2
if (self.cabinet.handle.qpos >= self.cabinet.handle.target_qpos):
stage_reward += 8
reward += stage_reward
return reward |
class GateSetBasis():
def __init__(self, name: str, gates: Dict[(str, Union[(Callable, Gate)])], spam: Dict[(str, Tuple[str])]):
self.name = name
self.gate_labels = list(gates.keys())
self.gates = gates
self.gate_matrices = {name: np.real(self._gate_matrix(gate)) for (name, gate) in gates.items()}
self.spam_labels = tuple(sorted(spam.keys()))
self.spam_spec = spam
def _gate_matrix(self, gate):
if isinstance(gate, Gate):
return PTM(gate).data
if callable(gate):
c = QuantumCircuit(1)
gate(c, c.qubits[0])
return PTM(c).data
return None
def add_gate(self, gate: Union[(Callable, Gate)], name: Optional[str]=None):
if (name is None):
if isinstance(gate, Gate):
name = gate.name
else:
raise RuntimeError('Gate name is missing')
self.gate_labels.append(name)
self.gates[name] = gate
self.gate_matrices[name] = self._gate_matrix(gate)
def add_gate_to_circuit(self, circ: QuantumCircuit, qubit: QuantumRegister, op: str):
if (op not in self.gates):
raise RuntimeError('{} is not a SPAM circuit'.format(op))
gate = self.gates[op]
if callable(gate):
gate(circ, qubit)
if isinstance(gate, Gate):
circ.append(gate, [qubit], [])
def add_spam_to_circuit(self, circ: QuantumCircuit, qubit: QuantumRegister, op: str):
if (op not in self.spam_spec):
raise RuntimeError('{} is not a SPAM circuit'.format(op))
op_gates = self.spam_spec[op]
for gate_name in op_gates:
self.add_gate_to_circuit(circ, qubit, gate_name)
def measurement_circuit(self, op: str, qubit: QuantumRegister, clbit: ClassicalRegister) -> QuantumCircuit:
circ = QuantumCircuit([qubit, clbit])
self.add_spam_to_circuit(circ, qubit, op)
circ.measure(qubit, clbit)
return circ
def measurement_matrix(self, label: str) -> np.array:
return self.gate_matrices[label]
def preparation_circuit(self, op: str, qubit: QuantumRegister) -> QuantumCircuit:
circ = QuantumCircuit([qubit])
self.add_spam_to_circuit(circ, qubit, op)
return circ
def preparation_matrix(self, label: str) -> np.array:
return self.gate_matrices[label]
def spam_matrix(self, label: str) -> np.array:
spec = self.spam_spec[label]
f_matrices = [self.gate_matrices[gate_label] for gate_label in spec]
result = functools.reduce((lambda a, b: (a b)), f_matrices)
return result
def get_tomography_basis(self) -> TomographyBasis:
return TomographyBasis(self.name, measurement=(self.spam_labels, self.measurement_circuit, self.measurement_matrix), preparation=(self.spam_labels, self.preparation_circuit, self.preparation_matrix)) |
(bdd.parsers.parse('I setup a fake editor replacing "{text}" by "{replacement}"'))
def set_up_editor_replacement(quteproc, server, tmpdir, text, replacement):
text = text.replace('(port)', str(server.port))
script = (tmpdir / 'script.py')
script.write(textwrap.dedent('\n import sys\n\n with open(sys.argv[1], encoding=\'utf-8\') as f:\n data = f.read()\n\n data = data.replace("{text}", "{replacement}")\n\n with open(sys.argv[1], \'w\', encoding=\'utf-8\') as f:\n f.write(data)\n '.format(text=text, replacement=replacement)))
editor = json.dumps([sys.executable, str(script), '{}'])
quteproc.set_setting('editor.command', editor) |
class MainWindow(TemplateBaseClass):
def __init__(self):
TemplateBaseClass.__init__(self)
self.setWindowTitle('pyqtgraph example: Qt Designer')
self.ui = WindowTemplate()
self.ui.setupUi(self)
self.ui.plotBtn.clicked.connect(self.plot)
self.show()
def plot(self):
self.ui.plot.plot(np.random.normal(size=100), clear=True) |
class TFDistributionGaussianDiag(TFDistribution):
class StdType(Enum):
Default = 0
Constant = 1
Variable = 2
def identity(dim, name='identity'):
mean = np.zeros(dim)
logstd = np.zeros(dim)
dist = TFDistributionGaussianDiag(input=None, dim=dim, std_type=TFDistributionGaussianDiag.StdType.Default, mean_kernel_init=None, mean_bias_init=None, logstd_kernel_init=None, logstd_bias_init=None, name=name, direct_mean=mean, direct_logstd=logstd)
return dist
def from_params(mean, logstd, name='dist_gauss_diag'):
dim = int(mean.shape[(- 1)])
assert (dim == logstd.shape[(- 1)])
dist = TFDistributionGaussianDiag(input=None, dim=dim, std_type=TFDistributionGaussianDiag.StdType.Default, mean_kernel_init=None, mean_bias_init=None, logstd_kernel_init=None, logstd_bias_init=None, name=name, direct_mean=mean, direct_logstd=logstd)
return dist
def __init__(self, input, dim, std_type, mean_kernel_init=tf.contrib.layers.xavier_initializer(), mean_bias_init=tf.zeros_initializer(), logstd_kernel_init=tf.contrib.layers.xavier_initializer(), logstd_bias_init=tf.zeros_initializer(), name='dist_gauss_diag', direct_mean=None, direct_logstd=None, reuse=False):
super().__init__(input)
isinstance(logstd_bias_init, np.ndarray)
self._dim = dim
self._std_type = std_type
self._mean_kernel_init = mean_kernel_init
self._mean_bias_init = mean_bias_init
self._logstd_kernel_init = logstd_kernel_init
self._logstd_bias_init = logstd_bias_init
self._mean = direct_mean
self._logstd = direct_logstd
with tf.variable_scope(name, reuse=reuse):
self._build_params(reuse)
mean = self.get_mean()
logstd = self.get_logstd()
std = self.get_std()
mean_shape = mean.get_shape().as_list()
logstd_shape = logstd.get_shape().as_list()
std_shape = std.get_shape().as_list()
assert (mean_shape[(- 1)] == self._dim)
assert (logstd_shape[(- 1)] == self._dim)
assert (std_shape[(- 1)] == self._dim)
return
def get_dim(self):
return self._dim
def get_mean(self):
return self._mean
def set_mean(self, mean_tf):
self._mean = mean_tf
return
def get_logstd(self):
return self._logstd
def set_logstd(self, logstd_tf):
self._logstd = logstd_tf
return
def get_std(self):
return self._std
def flat_params(self):
mean_tf = self._mean
logstd_tf = self._logstd
mean_shape = mean_tf.get_shape().as_list()
logstd_shape = logstd_tf.get_shape().as_list()
if ((len(mean_shape) == 2) and (len(logstd_shape) == 1)):
mean_rows = tf.shape(mean_tf)[0]
logstd_tf = tf.reshape(logstd_tf, [1, logstd_shape[(- 1)]])
logstd_tf = tf.tile(logstd_tf, [mean_rows, 1])
else:
assert (len(mean_shape) == len(logstd_shape))
params = tf.concat([mean_tf, logstd_tf], axis=(- 1))
return params
def logp(self, x):
diff_tf = (x - self._mean)
logp_tf = ((- 0.5) * tf.reduce_sum(tf.square((diff_tf / self._std)), axis=(- 1)))
logp_tf += ((((- 0.5) * self._dim) * np.log((2.0 * np.pi))) - tf.reduce_sum(self._logstd, axis=(- 1)))
return logp_tf
def kl(self, other, eps=0):
assert isinstance(other, TFDistributionGaussianDiag)
other_var = tf.square(other.get_std())
if (eps > 0):
other_var = tf.maximum(other_var, (eps * eps))
kl_tf = tf.reduce_sum(((other.get_logstd() - self._logstd) + ((tf.square(self._std) + tf.square((self._mean - other.get_mean()))) / (2.0 * other_var))), axis=(- 1))
kl_tf += ((- 0.5) * self._dim)
return kl_tf
def kl_reg(self):
kl_tf = tf.reduce_sum(((- self._logstd) + (0.5 * (tf.square(self._std) + tf.square(self._mean)))), axis=(- 1))
kl_tf += ((- 0.5) * self._dim)
return kl_tf
def entropy(self):
ent_tf = self._calc_entropy(self._logstd)
return ent_tf
def sample(self):
shape_tf = tf.shape(self._mean)
noise = tf.random_normal(shape_tf)
samples_tf = self.sample_noise(noise)
return samples_tf
def sample_noise(self, noise):
samples_tf = (self._std * noise)
samples_tf += self._mean
return samples_tf
def sample_clip(self, noise_clip):
assert (noise_clip >= 0.0)
shape_tf = tf.shape(self._mean)
noise = tf.random_normal(shape_tf)
noise = tf.clip_by_value(noise, (- noise_clip), noise_clip)
samples_tf = self.sample_noise(noise)
return samples_tf
def sample_cond(self, cond):
cond_tf = cond
cond_shape = cond_tf.get_shape().as_list()
shape_tf = tf.shape(self._mean)
sample_tf = (self._std * tf.random_normal(shape_tf))
sample_shape = sample_tf.get_shape().as_list()
assert (len(sample_shape) == (len(cond_shape) + 1))
cond_tf = tf.expand_dims(cond_tf, axis=(- 1))
sample_tf = (cond_tf * sample_tf)
sample_tf += self._mean
return sample_tf
def get_mode(self):
return self.get_mean()
def param_reg_loss(self):
params_tf = self._mean
err = tf.reduce_sum(tf.square(params_tf), axis=(- 1))
loss = (0.5 * tf.reduce_mean(err))
return loss
def _build_params(self, reuse):
if (self._mean is None):
self._mean = self._build_params_mean(self._input, 'mean', reuse)
elif isinstance(self._mean, np.ndarray):
self._mean = tf.convert_to_tensor(self._mean, dtype=tf.float32)
if (self._logstd is None):
self._logstd = self._build_params_logstd(self._input, tf.shape(self._mean), 'logstd', reuse)
elif isinstance(self._logstd, np.ndarray):
self._logstd = tf.convert_to_tensor(self._logstd, dtype=tf.float32)
self._std = tf.exp(self._logstd)
return
def _build_params_mean(self, input, name, reuse):
mean = tf.layers.dense(inputs=input, units=self._dim, activation=None, kernel_initializer=self._mean_kernel_init, bias_initializer=self._mean_bias_init, name=name, reuse=reuse)
return mean
def _build_params_logstd(self, input, mean_shape, name, reuse):
if ((self._std_type == self.StdType.Default) or (self._std_type == self.StdType.Constant)):
with tf.variable_scope(name, reuse=reuse):
trainable = (self._std_type == self.StdType.Constant)
logstd = tf.get_variable(dtype=tf.float32, name='bias', initializer=self._logstd_bias_init, trainable=trainable)
logstd = tf.broadcast_to(logstd, mean_shape)
elif (self._std_type == self.StdType.Variable):
logstd = tf.layers.dense(inputs=input, units=self._dim, activation=None, kernel_initializer=self._logstd_kernel_init, bias_initializer=self._logstd_bias_init, name=name, reuse=reuse)
else:
assert False, 'Unsupported standard deviation type'
return logstd
def _calc_entropy(self, logstd):
ent_tf = tf.reduce_sum(logstd, axis=(- 1))
ent_tf += ((0.5 * self._dim) * np.log(((2.0 * np.pi) * np.e)))
return ent_tf |
_metaclass(ABCMeta)
class SecurityScannerAPIInterface(object):
def state(self):
pass
def index(self, manifest, layers):
pass
def index_report(self, manifest_hash):
pass
def vulnerability_report(self, manifest_hash):
pass
def retrieve_notification_page(self, notification_id, next_param=None):
pass |
def convert_loras_to_safeloras_with_embeds(modelmap: Dict[(str, Tuple[(str, Set[str], int)])]={}, embeds: Dict[(str, torch.Tensor)]={}, outpath='./lora.safetensors'):
weights = {}
metadata = {}
for (name, (path, target_replace_module, r)) in modelmap.items():
metadata[name] = json.dumps(list(target_replace_module))
lora = torch.load(path)
for (i, weight) in enumerate(lora):
is_up = ((i % 2) == 0)
i = (i // 2)
if is_up:
metadata[f'{name}:{i}:rank'] = str(r)
weights[f'{name}:{i}:up'] = weight
else:
weights[f'{name}:{i}:down'] = weight
for (token, tensor) in embeds.items():
metadata[token] = EMBED_FLAG
weights[token] = tensor
print(f'Saving weights to {outpath}')
safe_save(weights, outpath, metadata) |
class TestReturn(TestNameCheckVisitorBase):
_passes()
def test_type_inference(self):
from asynq import async_proxy, AsyncTask, asynq, ConstFuture, FutureBase
def returns_3():
return 3
(pure=True)
def pure_async_fn():
return 4
()
def async_fn():
return 3
class WithAProperty(object):
def this_is_one(self):
return str(5)
_proxy(pure=True)
def pure_async_proxy(oid):
return ConstFuture(oid)
_proxy()
def impure_async_proxy():
return ConstFuture(3)
def capybara(oid):
assert_is_value(returns_3(), KnownValue(3))
assert_is_value(pure_async_fn(), AsyncTaskIncompleteValue(AsyncTask, KnownValue(4)))
assert_is_value(async_fn(), KnownValue(3))
assert_is_value(async_fn.asynq(), AsyncTaskIncompleteValue(AsyncTask, KnownValue(3)))
assert_is_value(WithAProperty().this_is_one, TypedValue(str))
assert_is_value(pure_async_proxy(oid), AnyValue(AnySource.unannotated))
assert_is_value(impure_async_proxy(), AnyValue(AnySource.unannotated))
assert_is_value(impure_async_proxy.asynq(), AsyncTaskIncompleteValue(FutureBase, AnyValue(AnySource.unannotated)))
_fails(ErrorCode.missing_return)
def test_asynq_missing_return(self):
from asynq import asynq
()
def f() -> int:
(yield f.asynq())
_fails(ErrorCode.missing_return)
def test_asynq_missing_branch(self):
from asynq import asynq
()
def capybara(cond: bool) -> int:
if cond:
return 3
(yield capybara.asynq(False)) |
def test_compose_1():
transform = tta.Compose([tta.HorizontalFlip(), tta.VerticalFlip(), tta.Rotate90(angles=[0, 90, 180, 270]), tta.Scale(scales=[1, 2, 4], interpolation='nearest')])
assert (len(transform) == (((2 * 2) * 4) * 3))
dummy_label = torch.ones(2).reshape(2, 1).float()
dummy_image = torch.arange((((2 * 3) * 4) * 5)).reshape(2, 3, 4, 5).float()
dummy_model = (lambda x: {'label': dummy_label, 'mask': x})
for augmenter in transform:
augmented_image = augmenter.augment_image(dummy_image)
model_output = dummy_model(augmented_image)
deaugmented_mask = augmenter.deaugment_mask(model_output['mask'])
deaugmented_label = augmenter.deaugment_label(model_output['label'])
assert torch.allclose(deaugmented_mask, dummy_image)
assert torch.allclose(deaugmented_label, dummy_label) |
def main():
Format()
(ex, ey, ez) = MV.setup('e*x|y|z')
A = MV('A', 'mv')
print('\\bm{A} =', A)
A.Fmt(2, '\\bm{A}')
A.Fmt(3, '\\bm{A}')
X = (x, y, z) = symbols('x y z')
(ex, ey, ez, grad) = MV.setup('e_x e_y e_z', metric='[1,1,1]', coords=X)
f = MV('f', 'scalar', fct=True)
A = MV('A', 'vector', fct=True)
B = MV('B', 'grade2', fct=True)
print('\\bm{A} =', A)
print('\\bm{B} =', B)
print('grad*f =', (grad * f))
print('grad|\\bm{A} =', (grad | A))
print('grad*\\bm{A} =', (grad * A))
print('-I*(grad^\\bm{A}) =', ((- MV.I) * (grad ^ A)))
print('grad*\\bm{B} =', (grad * B))
print('grad^\\bm{B} =', (grad ^ B))
print('grad|\\bm{B} =', (grad | B))
(a, b, c, d) = MV.setup('a b c d')
print('g_{ij} =', MV.metric)
print('\\bm{a|(b*c)} =', (a | (b * c)))
print('\\bm{a|(b^c)} =', (a | (b ^ c)))
print('\\bm{a|(b^c^d)} =', (a | ((b ^ c) ^ d)))
print('\\bm{a|(b^c)+c|(a^b)+b|(c^a)} =', (((a | (b ^ c)) + (c | (a ^ b))) + (b | (c ^ a))))
print('\\bm{a*(b^c)-b*(a^c)+c*(a^b)} =', (((a * (b ^ c)) - (b * (a ^ c))) + (c * (a ^ b))))
print('\\bm{a*(b^c^d)-b*(a^c^d)+c*(a^b^d)-d*(a^b^c)} =', ((((a * ((b ^ c) ^ d)) - (b * ((a ^ c) ^ d))) + (c * ((a ^ b) ^ d))) - (d * ((a ^ b) ^ c))))
print('\\bm{(a^b)|(c^d)} =', ((a ^ b) | (c ^ d)))
print('\\bm{((a^b)|c)|d} =', (((a ^ b) | c) | d))
print('\\bm{(a^b)\\times (c^d)} =', Ga.com((a ^ b), (c ^ d)))
metric = (('1 # #,' + '# 1 #,') + '# # 1')
(e1, e2, e3) = MV.setup('e1 e2 e3', metric)
E = ((e1 ^ e2) ^ e3)
Esq = (E * E).scalar()
print('E =', E)
print('%E^{2} =', Esq)
Esq_inv = (1 / Esq)
E1 = ((e2 ^ e3) * E)
E2 = (((- 1) * (e1 ^ e3)) * E)
E3 = ((e1 ^ e2) * E)
print('E1 = (e2^e3)*E =', E1)
print('E2 =-(e1^e3)*E =', E2)
print('E3 = (e1^e2)*E =', E3)
print('E1|e2 =', (E1 | e2).expand())
print('E1|e3 =', (E1 | e3).expand())
print('E2|e1 =', (E2 | e1).expand())
print('E2|e3 =', (E2 | e3).expand())
print('E3|e1 =', (E3 | e1).expand())
print('E3|e2 =', (E3 | e2).expand())
w = (E1 | e1).expand().scalar()
Esq = expand(Esq)
print('%(E1\\cdot e1)/E^{2} =', simplify((w / Esq)))
w = (E2 | e2).expand().scalar()
print('%(E2\\cdot e2)/E^{2} =', simplify((w / Esq)))
w = (E3 | e3).expand().scalar()
print('%(E3\\cdot e3)/E^{2} =', simplify((w / Esq)))
X = (r, th, phi) = symbols('r theta phi')
curv = [[((r * cos(phi)) * sin(th)), ((r * sin(phi)) * sin(th)), (r * cos(th))], [1, r, (r * sin(th))]]
(er, eth, ephi, grad) = MV.setup('e_r e_theta e_phi', metric='[1,1,1]', coords=X, curv=curv)
f = MV('f', 'scalar', fct=True)
A = MV('A', 'vector', fct=True)
B = MV('B', 'grade2', fct=True)
print('A =', A)
print('B =', B)
print('grad*f =', (grad * f))
print('grad|A =', (grad | A))
print('-I*(grad^A) =', ((- MV.I) * (grad ^ A)))
print('grad^B =', (grad ^ B))
vars = symbols('t x y z')
(g0, g1, g2, g3, grad) = MV.setup('gamma*t|x|y|z', metric='[1,-1,-1,-1]', coords=vars)
I = MV.I
B = MV('B', 'vector', fct=True)
E = MV('E', 'vector', fct=True)
B.set_coef(1, 0, 0)
E.set_coef(1, 0, 0)
B *= g0
E *= g0
J = MV('J', 'vector', fct=True)
F = (E + (I * B))
print('B = \\bm{B\\gamma_{t}} =', B)
print('E = \\bm{E\\gamma_{t}} =', E)
print('F = E+IB =', F)
print('J =', J)
gradF = (grad * F)
gradF.Fmt(3, 'grad*F')
print('grad*F = J')
(gradF.grade(1) - J).Fmt(3, '%\\grade{\\nabla F}_{1} -J = 0')
gradF.grade(3).Fmt(3, '%\\grade{\\nabla F}_{3} = 0')
(alpha, beta, gamma) = symbols('alpha beta gamma')
(x, t, xp, tp) = symbols("x t x' t'")
(g0, g1) = MV.setup('gamma*t|x', metric='[1,-1]')
R = (cosh((alpha / 2)) + (sinh((alpha / 2)) * (g0 ^ g1)))
X = ((t * g0) + (x * g1))
Xp = ((tp * g0) + (xp * g1))
print('R =', R)
print("#%t\\bm{\\gamma_{t}}+x\\bm{\\gamma_{x}} = t'\\bm{\\gamma'_{t}}+x'\\bm{\\gamma'_{x}} = R\\lp t'\\bm{\\gamma_{t}}+x'\\bm{\\gamma_{x}}\\rp R^{\\dagger}")
Xpp = ((R * Xp) * R.rev())
Xpp = Xpp.collect()
Xpp = Xpp.subs({((2 * sinh((alpha / 2))) * cosh((alpha / 2))): sinh(alpha), ((sinh((alpha / 2)) ** 2) + (cosh((alpha / 2)) ** 2)): cosh(alpha)})
print('%t\\bm{\\gamma_{t}}+x\\bm{\\gamma_{x}} =', Xpp)
Xpp = Xpp.subs({sinh(alpha): (gamma * beta), cosh(alpha): gamma})
print('%\\f{\\sinh}{\\alpha} = \\gamma\\beta')
print('%\\f{\\cosh}{\\alpha} = \\gamma')
print('%t\\bm{\\gamma_{t}}+x\\bm{\\gamma_{x}} =', Xpp.collect())
vars = symbols('t x y z')
(g0, g1, g2, g3, grad) = MV.setup('gamma*t|x|y|z', metric='[1,-1,-1,-1]', coords=vars)
I = MV.I
(m, e) = symbols('m e')
psi = MV('psi', 'spinor', fct=True)
A = MV('A', 'vector', fct=True)
sig_z = (g3 * g0)
print('\\bm{A} =', A)
print('\\bm{\\psi} =', psi)
dirac_eq = (((((grad * psi) * I) * sig_z) - ((e * A) * psi)) - ((m * psi) * g0))
dirac_eq.simplify()
dirac_eq.Fmt(3, '\\nabla \\bm{\\psi} I \\sigma_{z}-e\\bm{A}\\bm{\\psi}-m\\bm{\\psi}\\gamma_{t} = 0')
xpdf(pdfprog=None)
return |
class VGGNet(nn.Module):
def __init__(self):
super(VGGNet, self).__init__()
def _initialize_weights(self, mode='fan_in'):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode=mode, nonlinearity='relu')
if (m.bias is not None):
m.bias.data.zero_()
elif (isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d)):
assert (m.track_running_stats == self.batchnorm_track)
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.