code stringlengths 281 23.7M |
|---|
def get_split_enum(split):
if (split == TRAIN_SPLIT):
split_enum = learning_spec.Split.TRAIN
elif (split == VALID_SPLIT):
split_enum = learning_spec.Split.VALID
elif (split == TEST_SPLIT):
split_enum = learning_spec.Split.TEST
else:
raise UnexpectedSplitError(split)
return split_enum |
class IncrValueModular(Component):
def construct(s):
s.in_ = InPort(Bits8)
s.out = OutPort(Bits8)
s.buf1 = Wire(Bits8)
s.buf2 = Wire(Bits8)
connect(s.in_, s.buf1)
s.out //= s.buf2
def upB():
s.buf2 = (s.buf1 + b8(1))
def line_trace(s):
return '{:2} (+1) {:2}'.format(int(s.in_), int(s.out)) |
def open_mmpa_writer(destination, format, title, fragment_options, fragment_index, index_options, properties, environment_cache):
from . import index_writers
return index_writers.open_mmpa_writer(destination=destination, format=format, title=title, fragment_options=fragment_options, fragment_index=fragment_index, index_options=index_options, properties=properties, environment_cache=environment_cache) |
class TestExceptions():
def test_listen_error(self, qlocalserver):
qlocalserver.listen(None)
exc = ipc.ListenError(qlocalserver)
assert (exc.code == QAbstractSocket.SocketError.HostNotFoundError)
assert (exc.message == 'QLocalServer::listen: Name error')
msg = 'Error while listening to IPC server: QLocalServer::listen: Name error (HostNotFoundError)'
assert (str(exc) == msg)
with pytest.raises(ipc.Error):
raise exc
def test_socket_error(self, qlocalserver):
socket = FakeSocket(error=QLocalSocket.LocalSocketError.ConnectionRefusedError)
exc = ipc.SocketError('testing', socket)
assert (exc.code == QLocalSocket.LocalSocketError.ConnectionRefusedError)
assert (exc.message == 'Error string')
assert (str(exc) == 'Error while testing: Error string (ConnectionRefusedError)')
with pytest.raises(ipc.Error):
raise exc |
class Effect6426(BaseEffect):
type = ('active', 'projected')
def handler(fit, module, context, projectionRange, **kwargs):
if ('projected' not in context):
return
if fit.ship.getModifiedItemAttr('disallowOffensiveModifiers'):
return
appliedBoost = (module.getModifiedItemAttr('speedFactor') * calculateRangeFactor(srcOptimalRange=module.getModifiedItemAttr('maxRange'), srcFalloffRange=module.getModifiedItemAttr('falloffEffectiveness'), distance=projectionRange))
fit.ship.boostItemAttr('maxVelocity', appliedBoost, stackingPenalties=True, **kwargs) |
def process(output_dir, wav_files, train_dir, test_dir, num_workers):
executor = ProcessPoolExecutor(max_workers=num_workers)
results = []
names = []
random.shuffle(wav_files)
train_num = int((len(wav_files) * train_rate))
for wav_file in wav_files[0:train_num]:
fid = os.path.basename(wav_file).replace('.wav', '.npy')
names.append(fid)
results.append(executor.submit(partial(data_prepare, os.path.join(train_dir, 'audio', fid), os.path.join(train_dir, 'mel', fid), wav_file)))
with open(os.path.join(output_dir, 'train', 'names.pkl'), 'wb') as f:
pickle.dump(names, f)
names = []
for wav_file in wav_files[train_num:len(wav_files)]:
fid = os.path.basename(wav_file).replace('.wav', '.npy')
names.append(fid)
results.append(executor.submit(partial(data_prepare, os.path.join(test_dir, 'audio', fid), os.path.join(test_dir, 'mel', fid), wav_file)))
with open(os.path.join(output_dir, 'test', 'names.pkl'), 'wb') as f:
pickle.dump(names, f)
return [result.result() for result in tqdm(results)] |
def test_envget_pass_with_substitutions():
os.environ['ARB_DELETE_ME1'] = 'arb value from $ENV ARB_DELETE_ME1'
context = Context({'key1': 'value1', 'key2': 'value2', 'env_val1': 'ARB_DELETE_ME1', 'env_val2': 'ARB_DELETE_ME2', 'default_val': 'blah', 'key_val': 'key3', 'envGet': [{'env': '{env_val1}', 'key': '{key_val}', 'default': 'blah'}, {'env': '{env_val2}', 'key': 'key4', 'default': '{default_val}'}]})
pypyr.steps.envget.run_step(context)
del os.environ['ARB_DELETE_ME1']
assert (context['key1'] == 'value1')
assert (context['key2'] == 'value2')
assert (context['key3'] == 'arb value from $ENV ARB_DELETE_ME1')
assert (context['key4'] == 'blah') |
class UNet(nn.Module):
def __init__(self, num_classes, input_channels=3, **kwargs):
super().__init__()
nb_filter = [32, 64, 128, 256, 512]
self.pool = nn.MaxPool2d(2, 2)
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv0_0 = VGGBlock(input_channels, nb_filter[0], nb_filter[0])
self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1], nb_filter[1])
self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2], nb_filter[2])
self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3], nb_filter[3])
self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4], nb_filter[4])
self.conv3_1 = VGGBlock((nb_filter[3] + nb_filter[4]), nb_filter[3], nb_filter[3])
self.conv2_2 = VGGBlock((nb_filter[2] + nb_filter[3]), nb_filter[2], nb_filter[2])
self.conv1_3 = VGGBlock((nb_filter[1] + nb_filter[2]), nb_filter[1], nb_filter[1])
self.conv0_4 = VGGBlock((nb_filter[0] + nb_filter[1]), nb_filter[0], nb_filter[0])
self.final = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)
def forward(self, input):
x0_0 = self.conv0_0(input)
x1_0 = self.conv1_0(self.pool(x0_0))
x2_0 = self.conv2_0(self.pool(x1_0))
x3_0 = self.conv3_0(self.pool(x2_0))
x4_0 = self.conv4_0(self.pool(x3_0))
x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1))
x2_2 = self.conv2_2(torch.cat([x2_0, self.up(x3_1)], 1))
x1_3 = self.conv1_3(torch.cat([x1_0, self.up(x2_2)], 1))
x0_4 = self.conv0_4(torch.cat([x0_0, self.up(x1_3)], 1))
output = self.final(x0_4)
return output |
def test_verify_is_single_image():
single_image = torch.zeros(1, 1, 1)
image_.verify_is_single_image(single_image)
for dtype in (torch.uint8, torch.int):
with pytest.raises(TypeError):
image = single_image.clone().to(dtype)
image_.verify_is_single_image(image)
for dim in (2, 4):
with pytest.raises(TypeError):
image = torch.tensor(*([0.0] * dim))
image_.verify_is_single_image(image) |
def bind_socket(endpoint: EndpointConfiguration) -> socket.socket:
sock = socket.socket(endpoint.family, socket.SOCK_STREAM)
flags = fcntl.fcntl(sock.fileno(), fcntl.F_GETFD)
fcntl.fcntl(sock.fileno(), fcntl.F_SETFD, (flags | fcntl.FD_CLOEXEC))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if (endpoint.family in (socket.AF_INET, socket.AF_INET6)):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.bind(endpoint.address)
sock.listen(128)
return sock |
_vectorize_node.register(Op)
def vectorize_node_fallback(op: Op, node: Apply, *bached_inputs) -> Apply:
for inp in node.inputs:
if (not isinstance(inp.type, (TensorType, ScalarType))):
raise NotImplementedError(f'Cannot vectorize node {node} with input {inp} of type {inp.type}')
if hasattr(op, 'gufunc_signature'):
signature = op.gufunc_signature
else:
signature = safe_signature(node.inputs, node.outputs)
return cast(Apply, Blockwise(op, signature=signature).make_node(*bached_inputs)) |
def get_variant_spec_base(universe, domain, task, policy, algorithm, env_params):
print('get algorithms', algorithm)
algorithm_params = deep_update(env_params, ALGORITHM_PARAMS_PER_DOMAIN.get(domain, {}))
algorithm_params = deep_update(algorithm_params, ALGORITHM_PARAMS_ADDITIONAL.get(algorithm, {}))
variant_spec = {'environment_params': {'training': {'domain': domain, 'task': task, 'universe': universe, 'kwargs': {}}, 'evaluation': (lambda spec: spec['environment_params']['training'])}, 'policy_params': deep_update(POLICY_PARAMS_BASE[policy], POLICY_PARAMS_FOR_DOMAIN[policy].get(domain, {})), 'Q_params': {'type': 'double_feedforward_Q_function', 'kwargs': {'hidden_layer_sizes': (M, M)}}, 'algorithm_params': algorithm_params, 'replay_pool_params': {'type': 'SimpleReplayPool', 'kwargs': {'max_size': (lambda spec: {'SimpleReplayPool': int(1000000.0), 'TrajectoryReplayPool': int(10000.0)}.get(spec['replay_pool_params']['type'], int(1000000.0)))}}, 'sampler_params': {'type': 'SimpleSampler', 'kwargs': {'max_path_length': MAX_PATH_LENGTH_PER_DOMAIN.get(domain, DEFAULT_MAX_PATH_LENGTH), 'min_pool_size': MAX_PATH_LENGTH_PER_DOMAIN.get(domain, DEFAULT_MAX_PATH_LENGTH), 'batch_size': 256}}, 'run_params': {'seed': 88, 'checkpoint_at_end': True, 'checkpoint_frequency': (NUM_EPOCHS_PER_DOMAIN.get(domain, DEFAULT_NUM_EPOCHS) // NUM_CHECKPOINTS), 'checkpoint_replay_pool': False, 'info': ''}}
return variant_spec |
def get_distance(model, sentence_emb, sentences_dict, query, similarity_treshold):
query_embedding = model.encode(query.lower(), show_progress_bar=False)
highlights = []
for sentence in sentences_dict:
sentence_embedding = sentence_emb[sentence]
score = (1 - distance.cosine(sentence_embedding, query_embedding))
if (score > similarity_treshold):
highlights.append([sentence, score, sentences_dict[sentence]['text']])
highlights = sorted(highlights, key=(lambda x: x[1]), reverse=True)
return highlights |
class SKCImputerABC(SKCTransformerABC):
_skcriteria_abstract_class = True
def _impute(self, matrix):
raise NotImplementedError()
_inherit(SKCTransformerABC._transform_data)
def _transform_data(self, matrix, **kwargs):
imputed_matrix = self._impute(matrix=matrix)
kwargs.update(matrix=imputed_matrix, dtypes=None)
return kwargs |
def _limit_font_scale(target_font_scale: float, image_height: int) -> float:
min_font_scale = max((FONT_SCALE_MIN_RELATIVE * image_height), FONT_SCALE_MIN_ABSOLUTE)
max_font_scale = (FONT_SCALE_MAX_RELATIVE * image_height)
return min(max(min_font_scale, target_font_scale), max_font_scale) |
class AbsoluteAxis(Control):
X = 'x'
Y = 'y'
Z = 'z'
RX = 'rx'
RY = 'ry'
RZ = 'rz'
HAT = 'hat'
HAT_X = 'hat_x'
HAT_Y = 'hat_y'
def __init__(self, name, minimum, maximum, raw_name=None, inverted=False):
super().__init__(name, raw_name, inverted)
self.min = minimum
self.max = maximum |
def test_do_class_cleanups_on_success(pytester: Pytester) -> None:
testpath = pytester.makepyfile('\n import unittest\n class MyTestCase(unittest.TestCase):\n values = []\n \n def setUpClass(cls):\n def cleanup():\n cls.values.append(1)\n cls.addClassCleanup(cleanup)\n def test_one(self):\n pass\n def test_two(self):\n pass\n def test_cleanup_called_exactly_once():\n assert MyTestCase.values == [1]\n ')
reprec = pytester.inline_run(testpath)
(passed, skipped, failed) = reprec.countoutcomes()
assert (failed == 0)
assert (passed == 3) |
class GmmRecognizer(Recognizer):
def __init__(self, transition_model, acoustic_model, decoder, symbols=None, allow_partial=True, acoustic_scale=0.1):
if (not isinstance(acoustic_model, _gmm_am.AmDiagGmm)):
raise TypeError('acoustic_model argument should be a diagonal GMM')
self.transition_model = transition_model
self.acoustic_model = acoustic_model
super(GmmRecognizer, self).__init__(decoder, symbols, allow_partial, acoustic_scale)
def read_model(model_rxfilename):
with _util_io.xopen(model_rxfilename) as ki:
transition_model = _hmm.TransitionModel().read(ki.stream(), ki.binary)
acoustic_model = _gmm_am.AmDiagGmm().read(ki.stream(), ki.binary)
return (transition_model, acoustic_model)
def _make_decodable(self, features):
if (features.num_rows == 0):
raise ValueError('Empty feature matrix.')
return _gmm_am.DecodableAmDiagGmmScaled(self.acoustic_model, self.transition_model, features, self.acoustic_scale)
def _determinize_lattice(self, lattice):
opts = self.decoder.get_options()
if opts.determinize_lattice:
return _lat_funcs.determinize_lattice_phone_pruned(lattice, self.transition_model, opts.lattice_beam, opts.det_opts, True)
else:
return lattice |
class GdbExpandVariable(sublime_plugin.TextCommand):
def run(self, edit):
gdb_variables_view.expand_collapse_variable(self.view)
def is_enabled(self):
if (not is_running()):
return False
(row, col) = self.view.rowcol(self.view.sel()[0].a)
if (gdb_variables_view.is_open() and (self.view.id() == gdb_variables_view.get_view().id())):
return True
return False |
class F22_TestCase(CommandTest):
command = 'sshkey'
key = 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJGDmFSzIWSvnFYhExf+FbzSiZxsoohJdrKlmPKQhdts8nSg5PH7jyG5X+w6RgWhSetlD3WouKoo3zFOR5nCYq4= '
def runTest(self):
self.assert_parse(('sshkey --username=root "%s"' % self.key), ('sshkey --username=root "%s"\n' % self.key))
self.assertFalse((self.assert_parse(("sshkey --username=root '%s'" % self.key)) is None))
self.assertTrue((self.assert_parse(("sshkey --username=A '%s'" % self.key)) != self.assert_parse(("sshkey --username=B '%s'" % self.key))))
self.assertFalse((self.assert_parse(("sshkey --username=A '%s'" % self.key)) == self.assert_parse(("sshkey --username=B '%s'" % self.key))))
self.assert_parse_error('sshkey')
self.assert_parse_error('sshkey --foo')
self.assert_parse_error('sshkey --username=root --bogus-option')
self.assert_parse_error('sshkey --username')
self.assert_parse_error('sshkey --username=root')
self.assert_parse_error('sshkey --username=root key1 key2')
sshkey = self.handler().commands[self.command]
sshkey.sshUserList.append('someguy')
self.assertEqual(sshkey.__str__(), 'someguy') |
def _dict_get_impl(ctx: CallContext) -> ImplReturn:
default = ctx.vars['default']
def inner(key: Value) -> Value:
self_value = ctx.vars['self']
if isinstance(self_value, AnnotatedValue):
self_value = self_value.value
if (not _check_dict_key_hashability(key, ctx, 'k')):
return AnyValue(AnySource.error)
if isinstance(self_value, KnownValue):
if isinstance(key, KnownValue):
try:
return_value = self_value.val[key.val]
except Exception:
return default
else:
return (KnownValue(return_value) | default)
self_value = replace_known_sequence_value(self_value)
if isinstance(self_value, TypedDictValue):
if (not TypedValue(str).is_assignable(key, ctx.visitor)):
ctx.show_error(f'TypedDict key must be str, not {key}', ErrorCode.invalid_typeddict_key, arg='k')
return AnyValue(AnySource.error)
elif isinstance(key, KnownValue):
try:
(required, value) = self_value.items[key.val]
except Exception:
if (self_value.extra_keys is None):
ctx.show_error(f'Unknown TypedDict key {key.val!r}', ErrorCode.invalid_typeddict_key, arg='k')
return AnyValue(AnySource.error)
else:
if required:
return value
else:
return (value | default)
if (self_value.extra_keys is not None):
return (self_value.extra_keys | default)
ctx.show_error(f'TypedDict key must be a literal, not {key}', ErrorCode.invalid_typeddict_key, arg='k')
return AnyValue(AnySource.error)
elif isinstance(self_value, DictIncompleteValue):
val = self_value.get_value(key, ctx.visitor)
if (val is UNINITIALIZED_VALUE):
return default
return (val | default)
elif isinstance(self_value, TypedValue):
key_type = self_value.get_generic_arg_for_type(dict, ctx.visitor, 0)
can_assign = key_type.can_assign(key, ctx.visitor)
if isinstance(can_assign, CanAssignError):
ctx.show_error(f'Dictionary does not accept keys of type {key}', error_code=ErrorCode.incompatible_argument, detail=str(can_assign), arg='key')
value_type = self_value.get_generic_arg_for_type(dict, ctx.visitor, 1)
return (value_type | default)
else:
return AnyValue(AnySource.inference)
return flatten_unions(inner, ctx.vars['key']) |
def test_replica_get_size(config):
try:
cfg = config()
replica_url = cfg.replica_url
tmp_dir = '/tmp/'
with open((tmp_dir + TEMP_FILENAME), 'wb') as f:
f.write(('x' * (FILE_SIZE * pow(2, 20))))
_ = rs.replica.LogicalDirectory(replica_url)
myfile = rs.replica.LogicalFile((replica_url + TEMP_FILENAME))
myfile.upload((tmp_dir + TEMP_FILENAME), ('irods:///path/is/ignored/?resource=' + IRODS_RESOURCE), rs.replica.OVERWRITE)
myfile = rs.replica.LogicalFile((replica_url + TEMP_FILENAME))
assert True
except rs.SagaException as ex:
assert False, ('unexpected exception %s\n%s' % (ex.traceback, ex)) |
class MajoranaOperator():
def __init__(self, term=None, coefficient=1.0):
self.terms = {}
if (term is not None):
(term, parity) = _sort_majorana_term(term)
self.terms[term] = (coefficient * ((- 1) ** parity))
def from_dict(terms):
op = MajoranaOperator()
op.terms = terms
return op
def commutes_with(self, other):
if (not isinstance(other, type(self))):
raise TypeError('Can only test commutation with another MajoranaOperator.')
if ((len(self.terms) == 1) and (len(other.terms) == 1)):
return _majorana_terms_commute(list(self.terms.keys())[0], list(other.terms.keys())[0])
return ((self * other) == (other * self))
def with_basis_rotated_by(self, transformation_matrix):
if (not _is_real_orthogonal(transformation_matrix)):
raise ValueError('Transformation matrix is not real orthogonal.')
rotated_op = MajoranaOperator()
for (term, coeff) in self.terms.items():
rotated_term = _rotate_basis(term, transformation_matrix)
rotated_term *= coeff
rotated_op += rotated_term
return rotated_op
def __iadd__(self, other):
if (not isinstance(other, type(self))):
return NotImplemented
for (term, coefficient) in other.terms.items():
if (term in self.terms):
self.terms[term] += coefficient
else:
self.terms[term] = coefficient
return self
def __add__(self, other):
if (not isinstance(other, type(self))):
return NotImplemented
terms = {}
terms.update(self.terms)
for (term, coefficient) in other.terms.items():
if (term in terms):
terms[term] += coefficient
else:
terms[term] = coefficient
return MajoranaOperator.from_dict(terms)
def __isub__(self, other):
if (not isinstance(other, type(self))):
return NotImplemented
for (term, coefficient) in other.terms.items():
if (term in self.terms):
self.terms[term] -= coefficient
else:
self.terms[term] = coefficient
return self
def __sub__(self, other):
if (not isinstance(other, type(self))):
return NotImplemented
terms = {}
terms.update(self.terms)
for (term, coefficient) in other.terms.items():
if (term in terms):
terms[term] -= coefficient
else:
terms[term] = (- coefficient)
return MajoranaOperator.from_dict(terms)
def __mul__(self, other):
if (not isinstance(other, (type(self), int, float, complex))):
return NotImplemented
if isinstance(other, (int, float, complex)):
terms = {term: (coefficient * other) for (term, coefficient) in self.terms.items()}
return MajoranaOperator.from_dict(terms)
terms = {}
for (left_term, left_coefficient) in self.terms.items():
for (right_term, right_coefficient) in other.terms.items():
(new_term, parity) = _merge_majorana_terms(left_term, right_term)
coefficient = ((left_coefficient * right_coefficient) * ((- 1) ** parity))
if (new_term in terms):
terms[new_term] += coefficient
else:
terms[new_term] = coefficient
return MajoranaOperator.from_dict(terms)
def __imul__(self, other):
if (not isinstance(other, (type(self), int, float, complex))):
return NotImplemented
if isinstance(other, (int, float, complex)):
for term in self.terms:
self.terms[term] *= other
return self
return (self * other)
def __rmul__(self, other):
if (not isinstance(other, (int, float, complex))):
return NotImplemented
return (self * other)
def __truediv__(self, other):
if (not isinstance(other, (int, float, complex))):
return NotImplemented
terms = {term: (coefficient / other) for (term, coefficient) in self.terms.items()}
return MajoranaOperator.from_dict(terms)
def __itruediv__(self, other):
if (not isinstance(other, (int, float, complex))):
return NotImplemented
for term in self.terms:
self.terms[term] /= other
return self
def __pow__(self, other):
if (not isinstance(other, int)):
return NotImplemented
if (other < 0):
raise TypeError('Cannot raise MajoranaOperator to negative power.')
result = MajoranaOperator(())
for _ in range(other):
result *= self
return result
def __neg__(self):
return ((- 1) * self)
def __eq__(self, other):
if (not isinstance(other, type(self))):
return NotImplemented
for term in (self.terms.keys() | other.terms.keys()):
if ((term in self.terms) and (term in other.terms)):
if (not numpy.isclose(self.terms[term], other.terms[term])):
return False
elif (term in self.terms):
if (not numpy.isclose(self.terms[term], 0.0)):
return False
elif (not numpy.isclose(other.terms[term], 0.0)):
return False
return True
def __ne__(self, other):
return (not (self == other))
def __str__(self):
if (not self.terms):
return '0'
lines = []
for (term, coeff) in sorted(self.terms.items()):
if numpy.isclose(coeff, 0.0):
continue
lines.append('{} {} +'.format(coeff, term))
if (not lines):
return '0'
return '\n'.join(lines)[:(- 2)]
def __repr__(self):
return 'MajoranaOperator.from_dict(terms={!r})'.format(self.terms) |
class AttentionQAWithYesNo(MultipleContextModel):
def __init__(self, encoder: QuestionsAndParagraphsEncoder, word_embed: Optional[WordEmbedder], char_embed: Optional[CharWordEmbedder], embed_mapper: Optional[Union[(SequenceMapper, ElmoWrapper)]], question_mapper: Optional[SequenceMapper], context_mapper: Optional[SequenceMapper], memory_builder: SequenceBiMapper, attention: AttentionMapper, match_encoder: SequenceMapper, yes_no_question_encoder: SequenceEncoder, yes_no_context_encoder: SequenceEncoder, predictor: SequencePredictionLayer, max_batch_size: Optional[int]=None, elmo_model: Optional[LanguageModel]=None):
super().__init__(encoder=encoder, word_embed=word_embed, char_embed=char_embed, max_batch_size=max_batch_size, elmo_model=elmo_model)
self.embed_mapper = embed_mapper
self.question_mapper = question_mapper
self.context_mapper = context_mapper
self.memory_builder = memory_builder
self.attention = attention
self.match_encoder = match_encoder
self.yes_no_question_encoder = yes_no_question_encoder
self.yes_no_context_encoder = yes_no_context_encoder
self.predictor = predictor
def _get_predictions_for(self, is_train, question_embed, question_mask, context_embed, context_mask, answer, question_lm=None, context_lm=None, sentence_segments=None, sentence_mask=None):
(question_rep, context_rep) = (question_embed, context_embed)
(context_rep,) = tf.unstack(context_rep, axis=1, num=1)
(context_mask,) = tf.unstack(context_mask, axis=1, num=1)
(q_lm_in, c_lm_in) = ([], [])
if self.use_elmo:
(context_lm,) = tf.unstack(context_lm, axis=1, num=1)
q_lm_in = [question_lm]
c_lm_in = [context_lm]
if (self.embed_mapper is not None):
with tf.variable_scope('map_embed'):
context_rep = self.embed_mapper.apply(is_train, context_rep, context_mask, *c_lm_in)
with tf.variable_scope('map_embed', reuse=True):
question_rep = self.embed_mapper.apply(is_train, question_rep, question_mask, *q_lm_in)
with tf.variable_scope('yes_no_question_prediction'):
yes_no_q_enc = self.yes_no_question_encoder.apply(is_train, question_rep, question_mask)
yes_no_choice_logits = fully_connected(yes_no_q_enc, 2, use_bias=True, activation=None, kernel_initializer=get_keras_initialization('glorot_uniform'), name='yes_no_choice')
if (self.question_mapper is not None):
with tf.variable_scope('map_question'):
question_rep = self.question_mapper.apply(is_train, question_rep, question_mask)
if (self.context_mapper is not None):
with tf.variable_scope('map_context'):
context_rep = self.context_mapper.apply(is_train, context_rep, context_mask)
with tf.variable_scope('buid_memories'):
(keys, memories) = self.memory_builder.apply(is_train, question_rep, question_mask)
with tf.variable_scope('apply_attention'):
context_rep = self.attention.apply(is_train, context_rep, keys, memories, context_mask, question_mask)
if (self.match_encoder is not None):
with tf.variable_scope('process_attention'):
context_rep = self.match_encoder.apply(is_train, context_rep, context_mask)
with tf.variable_scope('yes_no_answer_prediction'):
yes_no_c_enc = self.yes_no_context_encoder.apply(is_train, context_rep, context_mask)
yes_no_answer_logits = fully_connected(yes_no_c_enc, 2, use_bias=True, activation=None, kernel_initializer=get_keras_initialization('glorot_uniform'), name='yes_no_answer')
with tf.variable_scope('predict'):
return self.predictor.apply(is_train, context_rep, answer, context_mask, yes_no_choice_logits=yes_no_choice_logits, yes_no_answer_logits=yes_no_answer_logits) |
class HardExampleMinerTest(tf.test.TestCase):
def testHardMiningWithSingleLossType(self):
location_losses = tf.constant([[100, 90, 80, 0], [0, 1, 2, 3]], tf.float32)
cls_losses = tf.constant([[0, 10, 50, 110], [9, 6, 3, 0]], tf.float32)
box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9]], tf.float32)
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
decoded_boxlist_list.append(box_list.BoxList(box_corners))
loss_op = losses.HardExampleMiner(num_hard_examples=1, iou_threshold=0.0, loss_type='loc', cls_loss_weight=1, loc_loss_weight=1)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses, decoded_boxlist_list)
exp_loc_loss = (100 + 3)
exp_cls_loss = (0 + 0)
with self.test_session() as sess:
loc_loss_output = sess.run(loc_loss)
self.assertAllClose(loc_loss_output, exp_loc_loss)
cls_loss_output = sess.run(cls_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
def testHardMiningWithBothLossType(self):
location_losses = tf.constant([[100, 90, 80, 0], [0, 1, 2, 3]], tf.float32)
cls_losses = tf.constant([[0, 10, 50, 110], [9, 6, 3, 0]], tf.float32)
box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9]], tf.float32)
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
decoded_boxlist_list.append(box_list.BoxList(box_corners))
loss_op = losses.HardExampleMiner(num_hard_examples=1, iou_threshold=0.0, loss_type='both', cls_loss_weight=1, loc_loss_weight=1)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses, decoded_boxlist_list)
exp_loc_loss = (80 + 0)
exp_cls_loss = (50 + 9)
with self.test_session() as sess:
loc_loss_output = sess.run(loc_loss)
self.assertAllClose(loc_loss_output, exp_loc_loss)
cls_loss_output = sess.run(cls_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
def testHardMiningNMS(self):
location_losses = tf.constant([[100, 90, 80, 0], [0, 1, 2, 3]], tf.float32)
cls_losses = tf.constant([[0, 10, 50, 110], [9, 6, 3, 0]], tf.float32)
box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9], [0.9, 0.9, 0.99, 0.99], [0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9]], tf.float32)
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
decoded_boxlist_list.append(box_list.BoxList(box_corners))
loss_op = losses.HardExampleMiner(num_hard_examples=2, iou_threshold=0.5, loss_type='cls', cls_loss_weight=1, loc_loss_weight=1)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses, decoded_boxlist_list)
exp_loc_loss = (((0 + 90) + 0) + 1)
exp_cls_loss = (((110 + 10) + 9) + 6)
with self.test_session() as sess:
loc_loss_output = sess.run(loc_loss)
self.assertAllClose(loc_loss_output, exp_loc_loss)
cls_loss_output = sess.run(cls_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
def testEnforceNegativesPerPositiveRatio(self):
location_losses = tf.constant([[100, 90, 80, 0, 1, 2, 3, 10, 20, 100, 20, 3]], tf.float32)
cls_losses = tf.constant([[0, 0, 100, 0, 90, 70, 0, 60, 0, 17, 13, 0]], tf.float32)
box_corners = tf.constant([[0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.5, 0.1], [0.0, 0.0, 0.6, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.8, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 1.0, 0.1], [0.0, 0.0, 1.1, 0.1], [0.0, 0.0, 0.2, 0.1]], tf.float32)
match_results = tf.constant([2, (- 1), 0, (- 1), (- 1), 1, (- 1), (- 1), (- 1), (- 1), (- 1), 3])
match_list = [matcher.Match(match_results)]
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
max_negatives_per_positive_list = [0.0, 0.5, 1.0, 1.5, 10]
exp_loc_loss_list = [(80 + 2), ((80 + 1) + 2), (((80 + 1) + 2) + 10), ((((80 + 1) + 2) + 10) + 100), (((((80 + 1) + 2) + 10) + 100) + 20)]
exp_cls_loss_list = [(100 + 70), ((100 + 90) + 70), (((100 + 90) + 70) + 60), ((((100 + 90) + 70) + 60) + 17), (((((100 + 90) + 70) + 60) + 17) + 13)]
for (max_negatives_per_positive, exp_loc_loss, exp_cls_loss) in zip(max_negatives_per_positive_list, exp_loc_loss_list, exp_cls_loss_list):
loss_op = losses.HardExampleMiner(num_hard_examples=None, iou_threshold=0.9999, loss_type='cls', cls_loss_weight=1, loc_loss_weight=1, max_negatives_per_positive=max_negatives_per_positive)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses, decoded_boxlist_list, match_list)
loss_op.summarize()
with self.test_session() as sess:
loc_loss_output = sess.run(loc_loss)
self.assertAllClose(loc_loss_output, exp_loc_loss)
cls_loss_output = sess.run(cls_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
def testEnforceNegativesPerPositiveRatioWithMinNegativesPerImage(self):
location_losses = tf.constant([[100, 90, 80, 0, 1, 2, 3, 10, 20, 100, 20, 3]], tf.float32)
cls_losses = tf.constant([[0, 0, 100, 0, 90, 70, 0, 60, 0, 17, 13, 0]], tf.float32)
box_corners = tf.constant([[0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.5, 0.1], [0.0, 0.0, 0.6, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.8, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 1.0, 0.1], [0.0, 0.0, 1.1, 0.1], [0.0, 0.0, 0.2, 0.1]], tf.float32)
match_results = tf.constant(([(- 1)] * 12))
match_list = [matcher.Match(match_results)]
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
min_negatives_per_image_list = [0, 1, 2, 4, 5, 6]
exp_loc_loss_list = [0, 80, (80 + 1), (((80 + 1) + 2) + 10), ((((80 + 1) + 2) + 10) + 100), (((((80 + 1) + 2) + 10) + 100) + 20)]
exp_cls_loss_list = [0, 100, (100 + 90), (((100 + 90) + 70) + 60), ((((100 + 90) + 70) + 60) + 17), (((((100 + 90) + 70) + 60) + 17) + 13)]
for (min_negatives_per_image, exp_loc_loss, exp_cls_loss) in zip(min_negatives_per_image_list, exp_loc_loss_list, exp_cls_loss_list):
loss_op = losses.HardExampleMiner(num_hard_examples=None, iou_threshold=0.9999, loss_type='cls', cls_loss_weight=1, loc_loss_weight=1, max_negatives_per_positive=3, min_negatives_per_image=min_negatives_per_image)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses, decoded_boxlist_list, match_list)
with self.test_session() as sess:
loc_loss_output = sess.run(loc_loss)
self.assertAllClose(loc_loss_output, exp_loc_loss)
cls_loss_output = sess.run(cls_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss) |
class AssignTypeNode(NodeNG):
def assign_type(self):
return self
def _get_filtered_stmts(self, lookup_node, node, _stmts, mystmt: (Statement | None)):
if (self is mystmt):
return (_stmts, True)
if (self.statement() is mystmt):
return ([node], True)
return (_stmts, False) |
def test_alive_gc_multi_derived(capture):
class Derived(m.Parent, m.Child):
def __init__(self):
m.Parent.__init__(self)
m.Child.__init__(self)
n_inst = ConstructorStats.detail_reg_inst()
p = Derived()
p.addChildKeepAlive(m.Child())
assert (ConstructorStats.detail_reg_inst() == (n_inst + 3))
lst = [p]
lst.append(lst)
with capture:
del p, lst
assert (ConstructorStats.detail_reg_inst() == n_inst)
assert (capture == '\n Releasing parent.\n Releasing child.\n Releasing child.\n ') |
()
('--input_dir', '-i', required=True, type=click.Path(), help='Input DICOM directory. This should be at the same level as the parent field (default=PatientName).')
('--output_dir', '-o', default='./', show_default=True, required=False, type=click.Path(), help='Output directory. A folder structure will be created at this location.')
('--sort_by', '-b', default='PatientName', help='DICOM tag to sort at the highest level.', show_default=True)
('--image_format', default='{parent_sorting_data}_{study_uid_index}_{Modality}_{image_desc}_{SeriesNumber}', help="Format for output images. There are three special options that can be used: parent_sorting_data (same as sort_by option), study_uid_index (a counter for distinct DICOM studies), image_desc (info from DICOM header, more nicely formatted). Additionally, any DICOM header tag can be used (e.g. Modality, SeriesNumber, AcquisitionData). Any DICOM header tag that doesn't exist will return a 0.", show_default=True)
('--structure_format', default='{parent_sorting_data}_{study_uid_index}_{Modality}_{structure_name}', help='Format for output structures. Any of the options for images can be used, as well as: structure_name', show_default=True)
('--dose_format', default='{parent_sorting_data}_{study_uid_index}_{DoseSummationType}', show_default=True, help='Format for output radiotherapy dose distributions.')
('--overwrite', is_flag=True, default=False, help='Overwrite files if they exist.', show_default=True)
('--file_suffix', default='.nii.gz', help='Output file suffix. Defines the file type.', show_default=True)
('--short_description', '-s', is_flag=True, default=False, show_default=True, help='Use less verbose descriptions for DICOM images.')
('--verbose', '-v', is_flag=True, default=False, show_default=True, help='Print more information while running.')
def click_command(input_dir, output_dir, sort_by, image_format, structure_format, dose_format, overwrite, file_suffix, short_description, verbose):
logger.info('')
logger.info(' Running DICOM crawler ')
logger.info('')
process_dicom_directory(input_dir, parent_sorting_field=sort_by, output_image_name_format=image_format, output_structure_name_format=structure_format, output_dose_name_format=dose_format, return_extra=(not short_description), output_directory=output_dir, output_file_suffix=file_suffix, overwrite_existing_files=overwrite, write_to_disk=True, verbose=verbose)
logger.info('')
logger.info(' DICOM crawler complete')
logger.info('') |
def avg_nr_of_trades_per1y(trades_returns: QFSeries, start_date: datetime, end_date: datetime):
period_length = (end_date - start_date)
period_length_in_years = (to_days(period_length) / DAYS_PER_YEAR_AVG)
avg_number_of_trades_1y = (len(trades_returns) / period_length_in_years)
return avg_number_of_trades_1y |
def weld_unwelded_result(d):
constant_smiles = d['constant']
to_smiles = d['to_smiles']
start_num_heavies = d.pop('start_num_heavies')
(new_smiles, welded_mol) = weld_fragments(constant_smiles, to_smiles)
final_num_heavies = welded_mol.GetNumHeavyAtoms()
d['final'] = new_smiles
d['heavies_diff'] = (final_num_heavies - start_num_heavies)
return d |
class TestDebugError(unittest.TestCase):
def setUp(self):
self._old_debug = pyppeteer.DEBUG
self.logger = logging.getLogger('pyppeteer.test')
def tearDown(self):
pyppeteer.DEBUG = self._old_debug
def test_debug_default(self):
with self.assertLogs('pyppeteer.test', logging.DEBUG):
debugError(self.logger, 'test')
with self.assertRaises(AssertionError):
with self.assertLogs('pyppeteer', logging.INFO):
debugError(self.logger, 'test')
def test_debug_enabled(self):
pyppeteer.DEBUG = True
with self.assertLogs('pyppeteer.test', logging.ERROR):
debugError(self.logger, 'test')
def test_debug_enable_disable(self):
pyppeteer.DEBUG = True
with self.assertLogs('pyppeteer.test', logging.ERROR):
debugError(self.logger, 'test')
pyppeteer.DEBUG = False
with self.assertLogs('pyppeteer.test', logging.DEBUG):
debugError(self.logger, 'test')
with self.assertRaises(AssertionError):
with self.assertLogs('pyppeteer.test', logging.INFO):
debugError(self.logger, 'test')
def test_debug_logger(self):
with self.assertRaises(AssertionError):
with self.assertLogs('pyppeteer', logging.DEBUG):
debugError(logging.getLogger('test'), 'test message') |
def test_has_unsupported_features(preset_manager):
preset = preset_manager.default_preset_for_game(RandovaniaGame.METROID_DREAD).get_preset()
assert isinstance(preset.configuration, DreadConfiguration)
configuration = preset.configuration
gd = default_database.game_description_for(preset.game)
suitless = gd.resource_database.get_by_type_and_index(ResourceType.TRICK, 'Suitless')
configuration = dataclasses.replace(configuration, trick_level=configuration.trick_level.set_level_for_trick(suitless, LayoutTrickLevel.HYPERMODE), artifacts=DreadArtifactConfig(prefer_emmi=False, prefer_major_bosses=False, required_artifacts=1))
assert (configuration.unsupported_features() == ['Metroid DNA on non-boss/EMMI', 'Enabled Heat/Cold Runs']) |
def write_to_cache(db_name, data):
if (not os.path.exists(os.path.dirname(CACHE_FILE))):
try:
os.makedirs(os.path.dirname(CACHE_FILE))
with open(CACHE_FILE, 'w') as _:
_.write(json.dumps({}))
LOG.debug('Cache file created')
except OSError as exc:
LOG.debug('Unable to create the cache file because: %s', exc.errno)
if (exc.errno != errno.EEXIST):
raise
with open(CACHE_FILE, 'r') as f:
try:
cache = json.loads(f.read())
except json.JSONDecodeError:
LOG.debug('JSONDecodeError in the local cache, dumping the full cache file.')
cache = {}
with open(CACHE_FILE, 'w') as f:
cache[db_name] = {'cached_at': time.time(), 'db': data}
f.write(json.dumps(cache))
LOG.debug('Safety updated the cache file for %s database.', db_name) |
class SubVector(_VectorBase, _matrix_ext.SubVector):
def __init__(self, obj, start=0, length=None):
if (not isinstance(obj, _kaldi_vector.VectorBase)):
obj = numpy.array(obj, dtype=numpy.float32, copy=False, order='C')
if (obj.ndim != 1):
raise ValueError('obj should be a 1-D vector like object.')
obj_len = len(obj)
if (not (0 <= start <= obj_len)):
raise IndexError('start={0} should be in the range [0,{1}] when len(obj)={1}.'.format(start, obj_len))
max_len = (obj_len - start)
if (length is None):
length = max_len
if (not (0 <= length <= max_len)):
raise IndexError('length={} should be in the range [0,{}] when start={} and len(obj)={}.'.format(length, max_len, start, obj_len))
super(SubVector, self).__init__(obj, start, length) |
class LeaveOrgViewTest(TestCase):
def setUpTestData(cls):
add_default_data()
def login(self, name, password=None):
self.client.login(username=name, password=(password if password else name))
self.pu = PytitionUser.objects.get(user__username=name)
return self.pu
def logout(self):
self.client.logout()
def tearDown(self):
pass
def test_NotLoggedIn(self):
self.logout()
org = Organization.objects.get(name='RAP')
response = self.client.get(reverse('leave_org', args=[org.slugname]), follow=True)
self.assertRedirects(response, ((reverse('login') + '?next=') + reverse('leave_org', args=[org.slugname])))
self.assertTemplateUsed(response, 'registration/login.html')
self.assertTemplateUsed(response, 'layouts/base.html')
def test_leave_org_ok(self):
max = self.login('max')
org = Organization.objects.get(name='Les Amis de la Terre')
response = self.client.get(reverse('leave_org', args=[org.slugname]))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, reverse('account_settings'))
self.assertNotIn(org, max.organization_set.all())
def test_leave_refuse_alone(self):
julia = self.login('julia')
org = Organization.objects.get(name='RAP')
response = self.client.get(reverse('leave_org', args=[org.slugname]))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, (reverse('account_settings') + '#a_org_form'))
self.assertIn(org, julia.organization_set.all())
def test_leave_refuse_lastAdmin(self):
julia = self.login('julia')
org = Organization.objects.get(name='Les Amis de la Terre')
response = self.client.get(reverse('leave_org', args=[org.slugname]))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, (reverse('account_settings') + '#a_org_form'))
self.assertIn(org, julia.organization_set.all()) |
('mini-imagenet')
class MiniImageNet(Dataset):
def __init__(self, root_path, split='train', **kwargs):
split_tag = split
if (split == 'train'):
split_tag = 'train_phase_train'
split_file = 'miniImageNet_category_split_{}.pickle'.format(split_tag)
with open(os.path.join(root_path, split_file), 'rb') as f:
pack = pickle.load(f, encoding='latin1')
data = pack['data']
label = pack['labels']
image_size = 80
data = [Image.fromarray(x) for x in data]
min_label = min(label)
label = [(x - min_label) for x in label]
self.data = data
self.label = label
self.n_classes = (max(self.label) + 1)
norm_params = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}
normalize = transforms.Normalize(**norm_params)
self.default_transform = transforms.Compose([transforms.Resize(image_size), transforms.ToTensor(), normalize])
augment = kwargs.get('augment')
if (augment == 'resize'):
self.transform = transforms.Compose([transforms.RandomResizedCrop(image_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])
elif (augment == 'crop'):
self.transform = transforms.Compose([transforms.Resize(image_size), transforms.RandomCrop(image_size, padding=8), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])
elif (augment is None):
self.transform = self.default_transform
def convert_raw(x):
mean = torch.tensor(norm_params['mean']).view(3, 1, 1).type_as(x)
std = torch.tensor(norm_params['std']).view(3, 1, 1).type_as(x)
return ((x * std) + mean)
self.convert_raw = convert_raw
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return (self.transform(self.data[i]), self.label[i]) |
def test_register_equilibrium_solver(mocker):
from solcore import registries
mock_gr = mocker.patch('solcore.registries.generic_register')
name = 'custom_equilibrium'
overwrite = False
reason_to_exclude = None
_equilibrium_solver(name, overwrite=overwrite, reason_to_exclude=reason_to_exclude)
def solver(*args, **kwargs):
pass
mock_gr.assert_called_once_with(name=name, registrator_name='Equilibrium solver', registry=registries.EQUILIBRIUM_SOLVER_REGISTRY, signature=registries.EQUILIBRIUM_SOLVER_SIGNATURE, overwrite=overwrite, reason_to_exclude=reason_to_exclude) |
_checkable
class BackendType(Protocol[_App]):
Options: Callable[(..., Any)]
def configure(self, app: _App, component: RootComponentConstructor, options: (Any | None)=None) -> None:
def create_development_app(self) -> _App:
async def serve_development_app(self, app: _App, host: str, port: int, started: (asyncio.Event | None)=None) -> None: |
class DataTrainingArguments():
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
train_ref_file: Optional[str] = field(default=None, metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'})
validation_ref_file: Optional[str] = field(default=None, metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
validation_split_percentage: Optional[int] = field(default=5, metadata={'help': "The percentage of the train set used as validation set in case there's no validation split"})
max_seq_length: Optional[int] = field(default=None, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated. Default to the max input length of the model.'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
mlm_probability: float = field(default=0.15, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch.'})
def __post_init__(self):
if (self.train_file is not None):
extension = self.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json', 'txt']), '`train_file` should be a csv, a json or a txt file.'
if (self.validation_file is not None):
extension = self.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json', 'txt']), '`validation_file` should be a csv, a json or a txt file.' |
def main():
args = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_devices
os.environ['NVIDIA_VISIBLE_DEVICES'] = args.cuda_devices
accelerator = Accelerator()
args.device = accelerator.device
logger = get_logger(args, accelerator)
(raw_datasets, label_list, num_labels) = get_dataset(args)
(tokenizer, model) = get_model(args, num_labels)
(train_dataset, eval_dataset, test_dataset, data_collator, eval_data_collator) = preprocess(args, model, tokenizer, raw_datasets, num_labels, label_list, logger, accelerator)
if (args.external_ratio > 1):
raw_external_dataset = get_external_dataset(args)
(external_dataset, text_column_name) = preprocess_external(args, raw_external_dataset, tokenizer, logger)
external_data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=args.mlm_probability)
external_dataloader = DataLoader(external_dataset, shuffle=True, collate_fn=external_data_collator, batch_size=args.per_device_train_batch_size)
else:
external_dataloader = None
train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size)
eval_dataloader = DataLoader(eval_dataset, collate_fn=eval_data_collator, batch_size=args.per_device_eval_batch_size)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
if (external_dataloader is not None):
(model, optimizer, external_dataloader, train_dataloader) = accelerator.prepare(model, optimizer, external_dataloader, train_dataloader)
else:
(model, optimizer, train_dataloader) = accelerator.prepare(model, optimizer, train_dataloader)
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if (args.max_train_steps is None):
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
else:
args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch))
lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps)
if (args.task_name is not None):
metric = load_metric('./src/metrics.py', args.task_name)
else:
metric = load_metric('accuracy')
test_dataloader = DataLoader(test_dataset, collate_fn=eval_data_collator, batch_size=args.per_device_eval_batch_size)
if (args.model_name_or_path and args.from_ckpt):
checkpoint_dir = args.model_name_or_path
else:
checkpoint_dir = None
trainer = Trainer(args=args, model=model, optimizer=optimizer, lr_scheduler=lr_scheduler, train_dataloader=train_dataloader, eval_dataloader=eval_dataloader, external_dataloader=external_dataloader, logger=logger, accelerator=accelerator, from_checkpoint=checkpoint_dir, test_dataloader=test_dataloader, metric=metric, label_list=label_list, tokenizer=tokenizer)
trainer.train() |
class BCELoss(nn.Module):
def __init__(self, num_classes, epsilon=0.1, use_gpu=True, label_smooth=True):
super(BCELoss, self).__init__()
self.num_classes = num_classes
self.epsilon = (epsilon if label_smooth else 0)
self.use_gpu = use_gpu
self.sigmoid = nn.Sigmoid()
def forward(self, inputs, targets):
if self.use_gpu:
targets = targets.cuda()
targets = (((1 - self.epsilon) * targets) + (self.epsilon / self.num_classes))
max_val = (- inputs).clamp(min=0)
loss = (((inputs - (inputs * targets)) + max_val) + ((- max_val).exp() + ((- inputs) - max_val).exp()).log())
return loss.mean(0).sum() |
def test_matrix_variable_selection_inclusion(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(project_path)
helpers.update_project_environment(project, 'default', {'skip-install': True, **project.config.envs['default']})
helpers.update_project_environment(project, 'test', {'matrix': [{'version': ['9000', '42']}]})
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('run', '+version=9000', 'test:python', '-c', "import os,sys;open('test.txt', 'a').write(sys.executable+os.linesep[-1])")
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent('\n test.9000 \n Creating environment: test.9000\n Checking dependencies\n '))
output_file = (project_path / 'test.txt')
assert output_file.is_file()
env_data_path = ((data_path / 'env') / 'virtual')
assert env_data_path.is_dir()
project_data_path = (env_data_path / project_path.name)
assert project_data_path.is_dir()
storage_dirs = list(project_data_path.iterdir())
assert (len(storage_dirs) == 1)
storage_path = storage_dirs[0]
assert (len(storage_path.name) == 8)
env_dirs = list(storage_path.iterdir())
assert (len(env_dirs) == 1)
env_path = env_dirs[0]
assert (env_path.name == 'test.9000')
python_path = str(output_file.read_text()).strip()
assert (str(env_path) in python_path) |
def plot_trajectories(trajectory_dict, title, add_legend=True):
carla_map = CarlaMap('Town01_nemesis', 0.1653, 50)
image = mpimg.imread('carla/planner/Town01_nemesis.png')
(fig, ax) = plt.subplots(1)
pad = 30
fig.set_size_inches(10, 10)
plt.rcParams.update({'font.size': 12})
ax.imshow(image, alpha=0.4)
all_x_pixels = []
all_y_pixels = []
for (label, positions) in trajectory_dict.items():
x_position = positions['x']
y_position = positions['y']
pixelX = []
pixelY = []
for i in range(len(x_position)):
pixel = carla_map.convert_to_pixel([x_position[i], y_position[i], 0])
pixelX.append(pixel[0])
pixelY.append(pixel[1])
all_x_pixels.append(pixel[0])
all_y_pixels.append(pixel[1])
if (len(x_position) == 1):
plt.scatter(pixelX[0], pixelY[0], label=label, s=500)
elif (label.lower() == 'baseline'):
plt.plot(pixelX, pixelY, linestyle='dashed', label=label, color='k', markersize=12, linewidth=4)
else:
plt.plot(pixelX, pixelY, linestyle='dashed', label=label, color='blue', markersize=12, linewidth=4)
xmin = np.maximum(0, (min(all_x_pixels) - pad))
xmax = np.minimum(image.shape[1], (max(all_x_pixels) + pad))
ymin = np.maximum(0, (min(all_y_pixels) - pad))
ymax = np.minimum(image.shape[0], (max(all_y_pixels) + pad))
plt.axis([xmin, xmax, ymax, ymin])
plt.title(title)
if add_legend:
plt.legend()
plt.xlabel('x')
plt.ylabel('y')
return plt |
class AlpinePackage(Package):
def is_installed(self):
return (self.run_test('apk -e info %s', self.name).rc == 0)
def version(self):
out = self.check_output('apk -e -v info %s', self.name).split('-')
return out[(- 2)]
def release(self):
out = self.check_output('apk -e -v info %s', self.name).split('-')
return out[(- 1)] |
class LogCmd():
def __init__(self, cmd, env=None) -> None:
self.cmd = cmd
self.env = env
def __repr__(self) -> str:
cmd_repr = ' '.join((quote(str(c)) for c in self.cmd))
if (self.env is not None):
cmd_repr = f'{cmd_repr} env of {self.env!r}'
return cmd_repr |
def test_set_pypi_token(config: Config, with_simple_keyring: None, dummy_keyring: DummyBackend) -> None:
manager = PasswordManager(config)
assert manager.keyring.is_available()
manager.set_pypi_token('foo', 'baz')
assert (config.get('pypi-token.foo') is None)
assert (dummy_keyring.get_password('poetry-repository-foo', '__token__') == 'baz') |
class DirectionalLightShadow(LightShadow):
def __init__(self) -> None:
super().__init__(OrthographicCamera(1000, 1000, depth_range=((- 500), 500)))
def _update_matrix(self, light):
camera = self.camera
camera.update_projection_matrix()
super()._update_matrix(light) |
def handle_network_errors(fn: typing.Callable[(typing.Concatenate[(MultiplayerSessionApi, Param)], RetType)]) -> typing.Callable[(Param, RetType)]:
(fn)
async def wrapper(self: MultiplayerSessionApi, *args, **kwargs):
parent = self.widget_root
try:
return (await fn(self, *args, **kwargs))
except error.InvalidActionError as e:
(await async_dialog.warning(parent, 'Invalid action', f'{e}'))
except error.ServerError:
(await async_dialog.warning(parent, 'Server error', 'An error occurred on the server while processing your request.'))
except error.NotLoggedInError:
(await async_dialog.warning(parent, 'Unauthenticated', 'You must be logged in.'))
except error.NotAuthorizedForActionError:
(await async_dialog.warning(parent, 'Unauthorized', "You're not authorized to perform that action."))
except error.UserNotAuthorizedToUseServerError:
(await async_dialog.warning(parent, 'Unauthorized', "You're not authorized to use this build.\nPlease check #dev-builds for more details."))
except error.UnsupportedClientError as e:
s = e.detail.replace('\n', '<br />')
(await async_dialog.warning(parent, 'Unsupported client', s))
except UnableToConnect as e:
s = e.reason.replace('\n', '<br />')
(await async_dialog.warning(parent, 'Connection Error', f'<b>Unable to connect to the server:</b><br /><br />{s}'))
except error.RequestTimeoutError as e:
(await async_dialog.warning(parent, 'Connection Error', f'<b>Timeout while communicating with the server:</b><br /><br />{e}<br />Further attempts will wait for longer.'))
except error.WorldDoesNotExistError:
(await async_dialog.warning(parent, 'World does not exist', 'The world you tried to change does not exist. If this error keeps happening, please reopen the Window and/or Randovania.'))
return None
return wrapper |
class GraphConv(nn.Module):
def __init__(self, edge_feature_dim, node_feature_in_dim, node_feature_out_dim, hidden_dims=[32, 64], aggr='mean', batch_norm=True, mlp_activation=torch.nn.Sigmoid(), final_activation=torch.nn.LeakyReLU()):
super(GraphConv, self).__init__()
self.mlp = MLP(in_dim=edge_feature_dim, out_dim=(node_feature_in_dim * node_feature_out_dim), hidden_layer_dims=hidden_dims, activation=mlp_activation, batch_norm=False)
self.nnConv = NNConv(node_feature_in_dim, node_feature_out_dim, self.mlp, aggr=aggr)
self.activation = final_activation
if batch_norm:
self.batch_norm = nn.BatchNorm1d(node_feature_out_dim)
def forward(self, x, edge_index, edge_features):
x = self.nnConv(x, edge_index, edge_features)
if (self.activation is not None):
x = self.activation(x)
if self.batch_norm:
x = self.batch_norm(x)
return (x, edge_index, edge_features) |
def render_notebook(nbspec: NotebookSpecV2) -> None:
(nb, nb_path) = _init_notebook(path_stem=nbspec.path_stem, directory=nbspec.directory)
cells = {'title_cell': _MarkdownCell('\n'.join(_get_title_lines(nbspec.title, nbspec.module)), cell_id='title_cell'), 'top_imports': _PyCell(_IMPORTS, cell_id='top_imports')}
for bds in nbspec.bloq_specs:
cells |= {c.cell_id: c for c in get_cells(bds)}
cqids_to_render: List[str] = list(cells.keys())
for i in range(len(nb.cells)):
nb_node = nb.cells[i]
if (_K_CQ_AUTOGEN in nb_node.metadata):
cqid: str = nb_node.metadata[_K_CQ_AUTOGEN]
new_cell = cells.get(cqid, None)
if (new_cell is None):
print(f'[{nbspec.path_stem}] Superfluous {cqid} cell.')
continue
print(f'[{nbspec.path_stem}] Replacing {cqid} cell.')
new_nbnode = _cell_to_nbnode(new_cell)
new_nbnode.id = nb_node.id
nb.cells[i] = new_nbnode
cqids_to_render.remove(cqid)
for cqid in cqids_to_render:
print(f'[{nbspec.path_stem}] Adding {cqid}')
new_cell = cells[cqid]
new_nbnode = _cell_to_nbnode(new_cell)
nb.cells.append(new_nbnode)
with nb_path.open('w') as f:
nbformat.write(nb, f) |
def fort_file(filename, txts, header=None):
try:
if header:
f = open(filename, 'a')
f.write((json.dumps(header) + '\n'))
for txt in txts:
f.write((json.dumps(txt) + '\n'))
f.close()
else:
with open(filename, 'a') as f:
for txt in txts:
f.write(txt)
except Exception as e:
logging.getLogger().error(',:{}'.format(e)) |
class CmdExamine(ObjManipCommand):
key = 'investigate'
aliases = []
locks = 'cmd:perm(examine) or perm(Builder)'
help_category = 'Building'
arg_regex = '(/\\w+?(\\s|$))|\\s|$'
account_mode = False
def list_attribute(self, crop, attr, category, value):
if crop:
if (not isinstance(value, str)):
value = utils.to_str(value)
value = utils.crop(value)
if category:
string = ('\n %s[%s] = %s' % (attr, category, value))
else:
string = ('\n %s = %s' % (attr, value))
string = raw(string)
return string
def format_attributes(self, obj, attrname=None, crop=True):
if attrname:
db_attr = [(attrname, obj.attributes.get(attrname), None)]
try:
ndb_attr = [(attrname, object.__getattribute__(obj.ndb, attrname))]
except Exception:
ndb_attr = None
else:
db_attr = [(attr.key, attr.value, attr.category) for attr in obj.db_attributes.all()]
try:
ndb_attr = obj.nattributes.all(return_tuples=True)
except Exception:
ndb_attr = None
string = ''
if (db_attr and db_attr[0]):
string += '\n|wPersistent attributes|n:'
for (attr, value, category) in db_attr:
string += self.list_attribute(crop, attr, category, value)
if (ndb_attr and ndb_attr[0]):
string += '\n|wNon-Persistent attributes|n:'
for (attr, value) in ndb_attr:
string += self.list_attribute(crop, attr, None, value)
return string
def format_output(self, obj, avail_cmdset):
string = ('\n|wName/key|n: |c%s|n (%s)' % (obj.name, obj.dbref))
if (hasattr(obj, 'aliases') and obj.aliases.all()):
string += ('\n|wAliases|n: %s' % ', '.join(utils.make_iter(str(obj.aliases))))
if (hasattr(obj, 'sessions') and obj.sessions.all()):
string += ('\n|wSession id(s)|n: %s' % ', '.join((('#%i' % sess.sessid) for sess in obj.sessions.all())))
if (hasattr(obj, 'email') and obj.email):
string += ('\n|wEmail|n: |c%s|n' % obj.email)
if (hasattr(obj, 'has_account') and obj.has_account):
string += ('\n|wAccount|n: |c%s|n' % obj.account.name)
perms = obj.account.permissions.all()
if obj.account.is_superuser:
perms = ['<Superuser>']
elif (not perms):
perms = ['<None>']
string += ('\n|wAccount Perms|n: %s' % ', '.join(perms))
if obj.account.attributes.has('_quell'):
string += ' |r(quelled)|n'
string += ('\n|wTypeclass|n: %s (%s)' % (obj.typename, obj.typeclass_path))
if hasattr(obj, 'location'):
string += ('\n|wLocation|n: %s' % obj.location)
if obj.location:
string += (' (#%s)' % obj.location.id)
if hasattr(obj, 'home'):
string += ('\n|wHome|n: %s' % obj.home)
if obj.home:
string += (' (#%s)' % obj.home.id)
if (hasattr(obj, 'destination') and obj.destination):
string += ('\n|wDestination|n: %s' % obj.destination)
if obj.destination:
string += (' (#%s)' % obj.destination.id)
perms = obj.permissions.all()
if perms:
perms_string = ', '.join(perms)
else:
perms_string = '<None>'
if obj.is_superuser:
perms_string += ' [Superuser]'
string += ('\n|wPermissions|n: %s' % perms_string)
locks = str(obj.locks)
if locks:
locks_string = utils.fill('; '.join([lock for lock in locks.split(';')]), indent=6)
else:
locks_string = ' Default'
string += ('\n|wLocks|n:%s' % locks_string)
if (not ((len(obj.cmdset.all()) == 1) and (obj.cmdset.current.key == '_EMPTY_CMDSET'))):
stored_cmdsets = sorted(obj.cmdset.all(), key=(lambda x: x.priority), reverse=True)
string += ('\n|wStored Cmdset(s)|n:\n %s' % '\n '.join((('%s [%s] (%s, prio %s)' % (cmdset.path, cmdset.key, cmdset.mergetype, cmdset.priority)) for cmdset in stored_cmdsets if (cmdset.key != '_EMPTY_CMDSET'))))
all_cmdsets = [(cmdset.key, cmdset) for cmdset in avail_cmdset.merged_from]
if (hasattr(obj, 'account') and obj.account):
all_cmdsets.extend([(cmdset.key, cmdset) for cmdset in obj.account.cmdset.all()])
if obj.sessions.count():
all_cmdsets.extend([(cmdset.key, cmdset) for cmdset in obj.account.sessions.all()[0].cmdset.all()])
else:
try:
all_cmdsets.extend([(cmdset.key, cmdset) for cmdset in obj.get_session(obj.sessions.get()).cmdset.all()])
except (TypeError, AttributeError):
pass
all_cmdsets = [cmdset for cmdset in dict(all_cmdsets).values()]
all_cmdsets.sort(key=(lambda x: x.priority), reverse=True)
string += ('\n|wMerged Cmdset(s)|n:\n %s' % '\n '.join((('%s [%s] (%s, prio %s)' % (cmdset.path, cmdset.key, cmdset.mergetype, cmdset.priority)) for cmdset in all_cmdsets)))
avail_cmdset = sorted([cmd.key for cmd in avail_cmdset if cmd.access(obj, 'cmd')])
cmdsetstr = utils.fill(', '.join(avail_cmdset), indent=2)
string += ('\n|wCommands available to %s (result of Merged CmdSets)|n:\n %s' % (obj.key, cmdsetstr))
if (hasattr(obj, 'scripts') and hasattr(obj.scripts, 'all') and obj.scripts.all()):
string += ('\n|wScripts|n:\n %s' % obj.scripts)
string += self.format_attributes(obj)
tags_string = utils.fill(', '.join((('%s[%s]' % (tag, category)) for (tag, category) in obj.tags.all(return_key_and_category=True))), indent=5)
if tags_string:
string += ('\n|wTags[category]|n: %s' % tags_string.strip())
exits = []
pobjs = []
things = []
if hasattr(obj, 'contents'):
for content in obj.contents:
if content.destination:
exits.append(content)
elif content.account:
pobjs.append(content)
else:
things.append(content)
if exits:
string += ('\n|wExits|n: %s' % ', '.join([('%s(%s)' % (exit.name, exit.dbref)) for exit in exits]))
if pobjs:
string += ('\n|wCharacters|n: %s' % ', '.join([('|c%s|n(%s)' % (pobj.name, pobj.dbref)) for pobj in pobjs]))
if things:
string += ('\n|wContents|n: %s' % ', '.join([('%s(%s)' % (cont.name, cont.dbref)) for cont in obj.contents if ((cont not in exits) and (cont not in pobjs))]))
separator = ('-' * _DEFAULT_WIDTH)
return ('%s\n%s\n%s' % (separator, string.strip(), separator))
def func(self):
caller = self.caller
def get_cmdset_callback(cmdset):
string = self.format_output(obj, cmdset)
self.msg(string.strip())
if (not self.args):
if hasattr(caller, 'location'):
obj = caller.location
if (not obj.access(caller, 'examine')):
self.msg(caller.at_look(obj))
return
get_and_merge_cmdsets(obj, self.session, self.account, obj, 'object', self.raw_string).addCallback(get_cmdset_callback)
else:
self.msg('You need to supply a target to examine.')
return
for objdef in self.lhs_objattr:
obj = None
obj_name = objdef['name']
obj_attrs = objdef['attrs']
self.account_mode = (utils.inherits_from(caller, 'evennia.accounts.accounts.DefaultAccount') or ('account' in self.switches) or obj_name.startswith('*'))
if self.account_mode:
try:
obj = caller.search_account(obj_name.lstrip('*'))
except AttributeError:
obj = caller.search(obj_name.lstrip('*'), search_object=('object' in self.switches))
else:
obj = caller.search(obj_name)
if (not obj):
continue
if (not obj.access(caller, 'examine')):
self.msg(caller.at_look(obj))
continue
if obj_attrs:
for attrname in obj_attrs:
caller.msg(self.format_attributes(obj, attrname, crop=False))
else:
if obj.sessions.count():
mergemode = 'session'
elif self.account_mode:
mergemode = 'account'
else:
mergemode = 'object'
get_and_merge_cmdsets(obj, self.session, self.account, obj, mergemode, self.raw_string).addCallback(get_cmdset_callback) |
def train_step(model, dataset, optimizer, scheduler, scaler, amp=False):
model.train()
with autocast(enabled=amp):
logits = model(graph=dataset.graph, x=dataset.node_features)
loss = dataset.loss_fn(input=logits[dataset.train_idx], target=dataset.labels[dataset.train_idx])
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
scheduler.step() |
def parse_dates(data, tree, sup, regions, territory):
week_data = data.setdefault('week_data', {})
supelem = sup.find('.//weekData')
for elem in supelem.findall('minDays'):
if _should_skip_elem(elem):
continue
territories = elem.attrib['territories'].split()
if ((territory in territories) or any(((r in territories) for r in regions))):
week_data['min_days'] = int(elem.attrib['count'])
for elem in supelem.findall('firstDay'):
if _should_skip_elem(elem):
continue
territories = elem.attrib['territories'].split()
if ((territory in territories) or any(((r in territories) for r in regions))):
week_data['first_day'] = weekdays[elem.attrib['day']]
for elem in supelem.findall('weekendStart'):
if _should_skip_elem(elem):
continue
territories = elem.attrib['territories'].split()
if ((territory in territories) or any(((r in territories) for r in regions))):
week_data['weekend_start'] = weekdays[elem.attrib['day']]
for elem in supelem.findall('weekendEnd'):
if _should_skip_elem(elem):
continue
territories = elem.attrib['territories'].split()
if ((territory in territories) or any(((r in territories) for r in regions))):
week_data['weekend_end'] = weekdays[elem.attrib['day']]
zone_formats = data.setdefault('zone_formats', {})
for elem in tree.findall('.//timeZoneNames/gmtFormat'):
if (not _should_skip_elem(elem)):
zone_formats['gmt'] = str(elem.text).replace('{0}', '%s')
break
for elem in tree.findall('.//timeZoneNames/regionFormat'):
if (not _should_skip_elem(elem)):
zone_formats['region'] = str(elem.text).replace('{0}', '%s')
break
for elem in tree.findall('.//timeZoneNames/fallbackFormat'):
if (not _should_skip_elem(elem)):
zone_formats['fallback'] = str(elem.text).replace('{0}', '%(0)s').replace('{1}', '%(1)s')
break
for elem in tree.findall('.//timeZoneNames/fallbackRegionFormat'):
if (not _should_skip_elem(elem)):
zone_formats['fallback_region'] = str(elem.text).replace('{0}', '%(0)s').replace('{1}', '%(1)s')
break
time_zones = data.setdefault('time_zones', {})
for elem in tree.findall('.//timeZoneNames/zone'):
info = {}
city = elem.findtext('exemplarCity')
if city:
info['city'] = str(city)
for child in elem.findall('long/*'):
info.setdefault('long', {})[child.tag] = str(child.text)
for child in elem.findall('short/*'):
info.setdefault('short', {})[child.tag] = str(child.text)
time_zones[elem.attrib['type']] = info
meta_zones = data.setdefault('meta_zones', {})
for elem in tree.findall('.//timeZoneNames/metazone'):
info = {}
city = elem.findtext('exemplarCity')
if city:
info['city'] = str(city)
for child in elem.findall('long/*'):
info.setdefault('long', {})[child.tag] = str(child.text)
for child in elem.findall('short/*'):
info.setdefault('short', {})[child.tag] = str(child.text)
meta_zones[elem.attrib['type']] = info |
_grad()
def scan_evaluate(predictions):
num_heads = len(predictions)
output = []
for head in predictions:
probs = head['probabilities']
neighbors = head['neighbors']
anchors = torch.arange(neighbors.size(0)).view((- 1), 1).expand_as(neighbors)
entropy_loss = entropy(torch.mean(probs, dim=0), input_as_probabilities=True).item()
similarity = torch.matmul(probs, probs.t())
neighbors = neighbors.contiguous().view((- 1))
anchors = anchors.contiguous().view((- 1))
similarity = similarity[(anchors, neighbors)]
ones = torch.ones_like(similarity)
consistency_loss = F.binary_cross_entropy(similarity, ones).item()
total_loss = ((- entropy_loss) + consistency_loss)
output.append({'entropy': entropy_loss, 'consistency': consistency_loss, 'total_loss': total_loss})
total_losses = [output_['total_loss'] for output_ in output]
lowest_loss_head = np.argmin(total_losses)
lowest_loss = np.min(total_losses)
return {'scan': output, 'lowest_loss_head': lowest_loss_head, 'lowest_loss': lowest_loss} |
class FillFormatter(Formatter):
def __init__(self, num_headers=1):
super().__init__()
self.__num_headers = num_headers
self.__prev_cell = None
def clear(self, cell):
self.__prev_cell = cell
def apply(self, cell, *args, **kwargs):
if (self.__prev_cell is None):
return
prev_cell = self.__prev_cell
self.__prev_cell = None
prev_range = prev_cell.to_range(com_package='win32com')
new_range = cell.to_range(com_package='win32com')
if (prev_range.Rows.Count > new_range.Rows.Count):
num_rows = (prev_range.Rows.Count - new_range.Rows.Count)
rows = prev_range.GetOffset(RowOffset=new_range.Rows.Count)
rows = rows.GetResize(RowSize=num_rows)
rows.ClearFormats()
if (prev_range.Columns.Count > new_range.Columns.Count):
num_cols = (prev_range.Columns.Count - new_range.Columns.Count)
cols = prev_range.GetOffset(ColumnOffset=new_range.Columns.Count)
cols = cols.GetResize(ColumnSize=num_cols)
cols.ClearFormats()
if ((new_range.Columns.Count > prev_range.Columns.Count) or (new_range.Rows.Count > prev_range.Rows.Count)):
prev_rows = prev_range
new_rows = new_range
if (self.__num_headers > 0):
prev_header = prev_range.GetResize(RowSize=self.__num_headers)
new_header = new_range.GetResize(RowSize=self.__num_headers)
prev_header.Copy()
new_header.PasteSpecial(Paste=constants.xlPasteFormats, Operation=constants.xlNone)
if (prev_range.Rows.Count > self.__num_headers):
prev_rows = prev_rows.GetOffset(RowOffset=self.__num_headers)
prev_rows = prev_rows.GetResize(RowSize=(prev_range.Rows.Count - self.__num_headers))
new_rows = new_rows.GetOffset(RowOffset=self.__num_headers)
new_rows = new_rows.GetResize(RowSize=(new_range.Rows.Count - self.__num_headers))
prev_rows.Copy()
new_rows.PasteSpecial(Paste=constants.xlPasteFormats, Operation=constants.xlNone)
new_rows.Application.CutCopyMode = False |
class FC6_XConfig(FC3_XConfig):
removedKeywords = (FC3_XConfig.removedKeywords + ['card', 'hsync', 'monitor', 'noProbe', 'server', 'vsync'])
removedAttrs = (FC3_XConfig.removedAttrs + ['card', 'hsync', 'monitor', 'noProbe', 'server', 'vsync'])
def __init__(self, writePriority=0, *args, **kwargs):
FC3_XConfig.__init__(self, writePriority, *args, **kwargs)
self.deleteRemovedAttrs()
self.driver = kwargs.get('driver', '')
def __str__(self):
retval = KickstartCommand.__str__(self)
if (hasattr(self, 'driver') and self.driver):
retval += (' --driver=%s' % self.driver)
if self.defaultdesktop:
retval += (' --defaultdesktop=%s' % self.defaultdesktop)
if (self.depth != 0):
retval += (' --depth=%d' % self.depth)
if (hasattr(self, 'resolution') and self.resolution):
retval += (' --resolution=%s' % self.resolution)
if self.startX:
retval += ' --startxonboot'
if (hasattr(self, 'videoRam') and self.videoRam):
retval += (' --videoram=%s' % self.videoRam)
if retval:
retval = ('# X Window System configuration information\nxconfig %s\n' % retval)
return retval
def _getParser(self):
op = FC3_XConfig._getParser(self)
op.add_argument('--card', deprecated=FC6)
op.add_argument('--driver', version=FC6, help='REMOVED')
op.add_argument('--hsync', deprecated=FC6)
op.add_argument('--monitor', deprecated=FC6)
op.add_argument('--noprobe', deprecated=FC6)
op.add_argument('--vsync', deprecated=FC6)
op.remove_argument('--server', version=FC6, help='')
return op |
def record_time(time_tracker):
if time_tracker:
average = defaultdict(float)
for check in time_tracker['Iteration 1']:
iterations = 0
for values in time_tracker.values():
if (check in values):
average[check] += values[check]
iterations += 1
average[check] /= iterations
time_tracker['Average'] = average
with open('./time_tracker.json', 'w+') as file:
json.dump(time_tracker, file, indent=4, separators=(',', ': ')) |
_module()
class CLAMP(BasePose):
def __init__(self, backbone, text_encoder, context_decoder, class_names, context_length, score_concat_index=3, identity_head=None, upconv_head=None, token_embed_dim=512, text_dim=1024, clip_pretrained=None, matching_only=False, visual_dim=256, CL_ratio=1.0, prompt_encoder=None, keypoint_head=None, train_cfg=None, test_cfg=None, loss_pose=None, pretrained=None):
super().__init__()
self.fp16_enabled = False
self.backbone = builder.build_backbone(backbone)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if (keypoint_head is not None):
keypoint_head['train_cfg'] = train_cfg
keypoint_head['test_cfg'] = test_cfg
if (('loss_keypoint' not in keypoint_head) and (loss_pose is not None)):
warnings.warn('`loss_pose` for TopDown is deprecated, use `loss_keypoint` for heads instead. See for more information.', DeprecationWarning)
keypoint_head['loss_keypoint'] = loss_pose
self.keypoint_head = builder.build_head(keypoint_head)
if (text_encoder is not None):
self.text_encoder = builder.build_backbone(text_encoder)
if (context_decoder is not None):
self.context_decoder = builder.build_backbone(context_decoder)
self.with_prompt_encoder = False
if (prompt_encoder is not None):
self.prompt_encoder = builder.build_backbone(prompt_encoder)
self.with_prompt_encoder = True
self.init_weights(pretrained=None, clip_pretrained=clip_pretrained)
self.context_length = context_length
self.score_concat_index = score_concat_index
self.with_identity_head = False
self.with_upconv_head = False
self._init_identity_head(identity_head)
self._init_upconv_head(upconv_head)
self.class_names = class_names
self.matching_only = matching_only
self.texts = torch.cat([tokenize(c, context_length=self.context_length) for c in class_names])
self.num_classes = len(self.class_names)
self.logit_scale = nn.Parameter((torch.ones([]) * np.log((1 / 0.07))))
self.text_projection = nn.Parameter(torch.empty(text_encoder['embed_dim'], visual_dim))
nn.init.normal_(self.text_projection, std=(text_encoder['embed_dim'] ** (- 0.5)))
self.CL_visual = nn.CrossEntropyLoss(reduce=False)
self.CL_text = nn.CrossEntropyLoss(reduce=False)
self.CL_ratio = CL_ratio
context_length = (self.text_encoder.context_length - self.context_length)
self.contexts = nn.Parameter(torch.randn(1, context_length, token_embed_dim))
nn.init.trunc_normal_(self.contexts)
self.gamma = nn.Parameter((torch.ones(text_dim) * 0.001))
def with_keypoint(self):
return hasattr(self, 'keypoint_head')
def init_weights(self, pretrained=None, clip_pretrained=None):
self.backbone.init_weights(pretrained=clip_pretrained)
if self.with_keypoint:
self.keypoint_head.init_weights()
self.text_encoder.init_weights()
def load_checkpoint(self, filename, map_location='cpu', strict=False, revise_keys=[('^module.', '')]):
logger = logging.getLogger()
return load_checkpoint_cococlip(self, filename, map_location, strict, logger, revise_keys=revise_keys)
def _init_identity_head(self, identity_head):
if (identity_head is not None):
self.with_identity_head = True
self.identity_head = builder.build_head(identity_head)
def _init_upconv_head(self, upconv_head):
if (upconv_head is not None):
self.with_upconv_head = True
self.upconv_head = builder.build_head(upconv_head)
_fp16(apply_to=('img',))
def forward(self, img, target=None, target_weight=None, img_metas=None, return_loss=True, return_heatmap=False, **kwargs):
if return_loss:
return self.forward_train(img, target, target_weight, img_metas, **kwargs)
return self.forward_test(img, img_metas, return_heatmap=return_heatmap, **kwargs)
def spatial_adapt(self, x):
x_orig = list(x[0:4])
(cls_token, visual_embeddings) = x[4]
(B, C, H, W) = visual_embeddings.shape
text_embeddings = self.text_encoder(self.texts.to(cls_token.device), self.contexts).expand(B, (- 1), (- 1))
if self.with_prompt_encoder:
text_embeddings = self.prompt_encoder(text_embeddings)
visual_tokens = torch.cat([cls_token.reshape(B, C, 1), visual_embeddings.reshape(B, C, (H * W))], dim=2).permute(0, 2, 1)
refine_emb = self.context_decoder(text_embeddings, visual_tokens)
prompt_embeddings = (text_embeddings + (self.gamma * refine_emb))
visual_embeddings_norm = F.normalize(visual_embeddings, dim=1, p=2)
prompt_embeddings_norm = F.normalize(prompt_embeddings, dim=2, p=2)
score_map = torch.einsum('bchw,bkc->bkhw', visual_embeddings_norm, prompt_embeddings_norm)
x_orig[self.score_concat_index] = torch.cat([x_orig[self.score_concat_index], score_map], dim=1)
return (prompt_embeddings, x_orig[self.score_concat_index], score_map)
def feature_adapt(self, visual_embeddings, text_embeddings, target, target_weight):
(B, C, H, W) = visual_embeddings.shape
(B, K, D) = text_embeddings.shape
if (D != C):
text_embeddings = (text_embeddings self.text_projection)
target_mask = torch.where((target == 1), 1, 0)
visual_embeddings = torch.sum(torch.einsum('bkhw,bchw->bkhwc', target_mask, visual_embeddings), dim=(2, 3))
visual_embeddings = F.normalize(visual_embeddings, p=2, dim=(- 1))
text_embeddings = F.normalize(text_embeddings, p=2, dim=(- 1))
logit_scale = self.logit_scale.exp()
logits_per_image = (logit_scale * torch.einsum('bhc,bwc->bhw', visual_embeddings, text_embeddings))
logits_per_text = logits_per_image.transpose(1, 2).contiguous()
losses = dict()
labels = torch.arange(K, device=logits_per_image.device).expand(B, (- 1))
loss_visual = (self.CL_visual(logits_per_image, labels) * target_weight.squeeze())
loss_text = (self.CL_text(logits_per_text, labels) * target_weight.squeeze())
contrastive_loss = ((loss_visual.mean() + loss_text.mean()) / 2)
losses['feature_loss'] = (contrastive_loss * self.CL_ratio)
return losses
def forward_train(self, img, target, target_weight, img_metas, **kwargs):
(target, target_down) = target
(target_weight, target_down_weight) = target_weight
x = self.backbone(img)
(text_embeddings, output, score_map) = self.spatial_adapt(x)
if self.with_keypoint:
output = self.keypoint_head(output)
losses = dict()
contrastive_loss = self.feature_adapt(x[4][1], text_embeddings, target_down, target_down_weight)
losses.update(contrastive_loss)
if self.with_keypoint:
keypoint_losses = self.keypoint_head.get_loss(output, target, target_weight)
losses.update(keypoint_losses)
if (not self.matching_only):
keypoint_accuracy = self.keypoint_head.get_accuracy(output, target, target_weight)
losses.update(keypoint_accuracy)
if self.with_upconv_head:
score_map = self.upconv_head(score_map)
if self.with_identity_head:
spatial_losses = self.identity_head.get_loss(score_map, target, target_weight)
losses.update(spatial_losses)
if self.matching_only:
keypoint_accuracy = self.identity_head.get_accuracy(score_map, target, target_weight)
losses.update(keypoint_accuracy)
return losses
def forward_test(self, img, img_metas, return_heatmap=False, **kwargs):
assert (img.size(0) == len(img_metas))
(batch_size, _, img_height, img_width) = img.shape
if (batch_size > 1):
assert ('bbox_id' in img_metas[0])
result = {}
features = self.backbone(img)
(text_embeddings, features, score_map) = self.spatial_adapt(features)
if self.with_keypoint:
if (not self.matching_only):
output_heatmap = self.keypoint_head.inference_model(features, flip_pairs=None)
else:
assert self.with_upconv_head
output_heatmap = self.upconv_head.inference_model(score_map, flip_pairs=None)
if self.test_cfg.get('flip_test', True):
img_flipped = img.flip(3)
features_flipped = self.backbone(img_flipped)
(text_embeddings, features_flipped, score_map_flipped) = self.spatial_adapt(features_flipped)
if self.with_keypoint:
if (not self.matching_only):
output_flipped_heatmap = self.keypoint_head.inference_model(features_flipped, img_metas[0]['flip_pairs'])
else:
assert self.with_upconv_head
output_flipped_heatmap = self.upconv_head.inference_model(score_map_flipped, img_metas[0]['flip_pairs'])
output_heatmap = ((output_heatmap + output_flipped_heatmap) * 0.5)
if self.with_keypoint:
if (not self.matching_only):
keypoint_result = self.keypoint_head.decode(img_metas, output_heatmap, img_size=[img_width, img_height])
else:
keypoint_result = self.upconv_head.decode(img_metas, output_heatmap, img_size=[img_width, img_height])
result.update(keypoint_result)
if (not return_heatmap):
output_heatmap = None
result['output_heatmap'] = output_heatmap
return result
_api_warning({'pose_limb_color': 'pose_link_color'}, cls_name='TopDown')
def show_result(self, img, result, skeleton=None, kpt_score_thr=0.3, bbox_color='green', pose_kpt_color=None, pose_link_color=None, text_color='white', radius=4, thickness=1, font_scale=0.5, bbox_thickness=1, win_name='', show=False, show_keypoint_weight=False, wait_time=0, out_file=None):
img = mmcv.imread(img)
img = img.copy()
bbox_result = []
bbox_labels = []
pose_result = []
for res in result:
if ('bbox' in res):
bbox_result.append(res['bbox'])
bbox_labels.append(res.get('label', None))
pose_result.append(res['keypoints'])
if bbox_result:
bboxes = np.vstack(bbox_result)
imshow_bboxes(img, bboxes, labels=bbox_labels, colors=bbox_color, text_color=text_color, thickness=bbox_thickness, font_scale=font_scale, show=False)
if pose_result:
imshow_keypoints(img, pose_result, skeleton, kpt_score_thr, pose_kpt_color, pose_link_color, radius, thickness)
if show:
imshow(img, win_name, wait_time)
if (out_file is not None):
imwrite(img, out_file)
return img |
class W_Plumber(values.W_Object):
_attrs_ = ['callbacks', 'weak_callbacks']
def __init__(self, callbacks={}, weak_callbacks={}):
self.callbacks = callbacks
self.weak_callbacks = weak_callbacks
def get_callbacks(self):
return self.callbacks
def get_weak_callbacks(self):
return self.weak_callbacks
def set_callback(self, h, proc):
self.callbacks[h] = proc
def set_weak_callback(self, h, proc):
self.weak_callbacks[h] = proc
def remove_handle(self, handle):
if (handle in self.callbacks):
del self.callbacks[handle]
if (handle in self.weak_callbacks):
del self.weak_callbacks[handle] |
def finalize_construction(breakpoints):
breakpoints.sort()
breakpoints_out = []
f_last = None
for (f, c) in breakpoints:
if ((f_last is not None) and (f == f_last)):
breakpoints_out[(- 1)][1] += c
else:
breakpoints_out.append([f, c])
f_last = f
breakpoints_out = [(f, c) for (f, c) in breakpoints_out if (c != 0)]
return breakpoints_out |
def test_function_utils():
def dummmy_func2d(x):
return (x + 1)
(T, D) = (10, 24)
np.random.seed(1234)
X = np.random.rand(2, T, D)
lengths = [60, 100]
Y = apply_each2d_padded(dummmy_func2d, X, lengths)
for (i, l) in enumerate(lengths):
assert np.allclose((X[i][:l] + 1), Y[i][:l])
assert np.all((Y[i][l:] == 0))
for (i, l) in enumerate(lengths):
X[i][l:] = 0
Y = apply_each2d_trim(dummmy_func2d, X)
for (i, l) in enumerate(lengths):
assert np.allclose((X[i][:l] + 1), Y[i][:l])
assert np.all((Y[i][l:] == 0)) |
def test_svg_circuit():
g = cq_testing.GateHelper(MultiAnd(cvs=(1, 1, 1)))
svg = svg_circuit(g.circuit, g.r)
svg_str = svg.data
assert (svg_str.find('ctrl') < svg_str.find('junk') < svg_str.find('target'))
with pytest.raises(ValueError):
svg_circuit(cirq.Circuit())
with pytest.raises(ValueError):
svg_circuit(cirq.Circuit(cirq.Moment())) |
def test_stdsim_line_buffering(base_app):
import os
import tempfile
file = tempfile.NamedTemporaryFile(mode='wt')
file.line_buffering = True
stdsim = cu.StdSim(file, echo=True)
saved_size = os.path.getsize(file.name)
bytes_to_write = b'hello\n'
stdsim.buffer.write(bytes_to_write)
assert (os.path.getsize(file.name) == (saved_size + len(bytes_to_write)))
saved_size = os.path.getsize(file.name)
bytes_to_write = b'hello\r'
stdsim.buffer.write(bytes_to_write)
assert (os.path.getsize(file.name) == (saved_size + len(bytes_to_write))) |
def main():
pp.connect(use_gui=True)
pp.add_data_path()
p.resetDebugVisualizerCamera(cameraDistance=1.5, cameraPitch=(- 20), cameraYaw=80, cameraTargetPosition=[0, 0, 0.2])
p.loadURDF('plane.urdf')
ri = safepicking.pybullet.PandaRobotInterface()
cube = pp.create_box(0.03, 0.05, 0.1, mass=0.1, color=(0, 1, 0, 1))
ee_to_world = ri.get_pose('tipLink')
pp.draw_pose(ee_to_world)
obj_to_ee = ([0, 0, 0.05], [0, 0, 0, 1])
obj_to_world = pp.multiply(ee_to_world, obj_to_ee)
p.resetBasePositionAndOrientation(cube, *obj_to_world)
ri.gripper.contact_constraint = p.createConstraint(parentBodyUniqueId=ri.robot, parentLinkIndex=ri.ee, childBodyUniqueId=cube, childLinkIndex=(- 1), jointType=p.JOINT_FIXED, jointAxis=(0, 0, 0), parentFramePosition=obj_to_ee[0], parentFrameOrientation=obj_to_ee[1], childFramePosition=(0, 0, 0), childFrameOrientation=(0, 0, 0, 1))
ri.gripper.activated = True
attachments = [pp.Attachment(ri.robot, ri.ee, obj_to_ee, cube)]
c_init = safepicking.geometry.Coordinate(*pp.get_pose(cube))
c1 = c_init.copy()
c1.translate([0.3, (- 0.3), (- 0.3)], wrt='world')
c1.rotate([np.deg2rad(45), 0, 0], wrt='local')
pp.draw_pose(c1.pose)
c2 = c_init.copy()
c2.translate([0.3, 0.3, (- 0.3)], wrt='world')
c2.rotate([np.deg2rad((- 45)), 0, 0], wrt='local')
pp.draw_pose(c2.pose)
robot_model = ri.get_skrobot(attachments=attachments)
while True:
joint_positions = robot_model.inverse_kinematics(c1.skrobot_coords, move_target=robot_model.attachment_link0)
for _ in ri.movej(joint_positions[:(- 1)]):
p.stepSimulation()
time.sleep((1 / 240))
joint_positions = robot_model.inverse_kinematics(c2.skrobot_coords, move_target=robot_model.attachment_link0)
for _ in ri.movej(joint_positions[:(- 1)]):
p.stepSimulation()
time.sleep((1 / 240))
pp.disconnect() |
class ExtendedNet(nn.Module):
def __init__(self):
super(ExtendedNet, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5, padding=(2, 2))
self.conv2 = nn.Conv2d(32, 64, kernel_size=5, padding=(2, 2), bias=False)
self.conv2_drop = nn.Dropout2d()
self.conv3 = nn.Conv2d(64, 64, kernel_size=5, padding=(2, 2))
self.fc1 = nn.Linear(((3 * 3) * 64), 1024, bias=False)
self.fc2 = nn.Linear(1024, 10)
def forward(self, *inputs):
x = functional.relu(functional.max_pool2d(self.conv1(*inputs), 2))
x = functional.relu(functional.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = functional.relu(functional.max_pool2d(self.conv3(x), 2))
x = x.view(x.size(0), (- 1))
x = functional.relu(self.fc1(x))
x = functional.dropout(x, training=self.training)
x = self.fc2(x)
return functional.log_softmax(x, dim=1) |
def mask_requests_args(url, validating=False, params_checker=None, **kwargs):
requests_kwargs = {key: val for (key, val) in iteritems(kwargs) if (key in ALLOWED_REQUESTS_KWARGS)}
if (params_checker is not None):
(url, s_params) = params_checker(url)
if s_params:
if ('params' in requests_kwargs):
requests_kwargs['params'].update(s_params)
else:
requests_kwargs['params'] = s_params
requests_kwargs['timeout'] = (1.0 if validating else 30.0)
requests_kwargs.update(SHARED_REQUESTS_KWARGS)
request_pair = namedtuple('RequestPair', ('requests_kwargs', 'url'))
return request_pair(requests_kwargs, url) |
def all_gather(data):
world_size = get_world_size()
if (world_size == 1):
return [data]
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to('cuda')
local_size = torch.IntTensor([tensor.numel()]).to('cuda')
size_list = [torch.IntTensor([0]).to('cuda') for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
if (local_size != max_size):
padding = torch.ByteTensor(size=((max_size - local_size),)).to('cuda')
tensor = torch.cat((tensor, padding), 0)
dist.all_gather(tensor_list, tensor)
data_list = []
for (size, tensor) in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list |
class HKCCM1(FinTS3Segment):
account = DataElementGroupField(type=KTI1, _d='Kontoverbindung international')
sum_amount = DataElementGroupField(type=Amount1, _d='Summenfeld')
request_single_booking = DataElementField(type='jn', _d='Einzelbuchung gewunscht')
sepa_descriptor = DataElementField(type='an', max_length=256, _d='SEPA Descriptor')
sepa_pain_message = DataElementField(type='bin', _d='SEPA pain message') |
def create_floors(bm, faces, prop):
(slabs, walls, roof) = extrude_slabs_and_floors(bm, faces, prop)
bmesh.ops.recalc_face_normals(bm, faces=bm.faces)
add_faces_to_group(bm, slabs, MaterialGroup.SLABS)
add_faces_to_group(bm, walls, MaterialGroup.WALLS)
add_faces_to_group(bm, roof, MaterialGroup.ROOF) |
class BaseListSchema(Schema):
OPTIONS_CLASS = BaseOpts
_load
def wrap_data_envelope(self, data, **kwargs):
data = dict(data=data)
return data
_dump
def unwrap_data_envelope(self, data, **kwargs):
return data['data']
_load
def make_object(self, data, **kwargs):
decoding_class = self.opts.decoding_class
list_ = data['data']
return decoding_class(list_) |
def create_dialogue(utterances, segment_ids, redundancy_ids):
dialogue = []
for (index, utterance) in enumerate(utterances):
if (index in segment_ids):
dialogue.append('[TS]')
if (index in redundancy_ids):
words = utterance.split()
assert (words[1] == ':')
words.insert(2, '[RD]')
utterance = ' '.join(words)
dialogue.append(utterance)
return ' <|endoftext|> '.join(dialogue) |
class Model(nn.Module):
def __init__(self, *, n_num_features: int, n_bin_features: int, cat_cardinalities: list[int], n_classes: Optional[int], num_embeddings: Optional[dict], backbone: dict) -> None:
assert (n_num_features or n_bin_features or cat_cardinalities)
if (num_embeddings is not None):
assert n_num_features
assert (backbone['type'] in ['MLP'])
super().__init__()
if (num_embeddings is None):
self.m_num = (nn.Identity() if n_num_features else None)
d_num = n_num_features
else:
self.m_num = lib.make_module(num_embeddings, n_features=n_num_features)
d_num = (n_num_features * num_embeddings['d_embedding'])
self.m_bin = (nn.Identity() if n_bin_features else None)
self.m_cat = (lib.OneHotEncoder(cat_cardinalities) if cat_cardinalities else None)
self.backbone = lib.make_module(backbone, d_in=((d_num + n_bin_features) + sum(cat_cardinalities)), d_out=lib.get_d_out(n_classes))
self.flat = True
def forward(self, *, x_num: Optional[Tensor], x_bin: Optional[Tensor], x_cat: Optional[Tensor]) -> Tensor:
x = []
for (module, x_) in [(self.m_num, x_num), (self.m_bin, x_bin), (self.m_cat, x_cat)]:
if (x_ is None):
assert (module is None)
else:
assert (module is not None)
x.append(module(x_))
del x_
if self.flat:
x = torch.cat([x_.flatten(1, (- 1)) for x_ in x], dim=1)
else:
assert all(((x_.ndim == 3) for x_ in x))
x = torch.cat(x, dim=1)
x = self.backbone(x)
return x |
def loss_fn(cls_outputs: List[torch.Tensor], box_outputs: List[torch.Tensor], cls_targets: List[torch.Tensor], box_targets: List[torch.Tensor], num_positives: torch.Tensor, num_classes: int, alpha: float, gamma: float, delta: float, box_loss_weight: float, label_smoothing: float=0.0, new_focal: bool=False) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
num_positives_sum = (num_positives.sum() + 1.0).float()
levels = len(cls_outputs)
cls_losses = []
box_losses = []
for l in range(levels):
cls_targets_at_level = cls_targets[l]
box_targets_at_level = box_targets[l]
cls_targets_at_level_oh = one_hot(cls_targets_at_level, num_classes)
(bs, height, width, _, _) = cls_targets_at_level_oh.shape
cls_targets_at_level_oh = cls_targets_at_level_oh.view(bs, height, width, (- 1))
cls_outputs_at_level = cls_outputs[l].permute(0, 2, 3, 1).float()
if new_focal:
cls_loss = new_focal_loss(cls_outputs_at_level, cls_targets_at_level_oh, alpha=alpha, gamma=gamma, normalizer=num_positives_sum, label_smoothing=label_smoothing)
else:
cls_loss = focal_loss_legacy(cls_outputs_at_level, cls_targets_at_level_oh, alpha=alpha, gamma=gamma, normalizer=num_positives_sum)
cls_loss = cls_loss.view(bs, height, width, (- 1), num_classes)
cls_loss = (cls_loss * (cls_targets_at_level != (- 2)).unsqueeze((- 1)))
cls_losses.append(cls_loss.sum())
box_losses.append(_box_loss(box_outputs[l].permute(0, 2, 3, 1).float(), box_targets_at_level, num_positives_sum, delta=delta))
cls_loss = torch.sum(torch.stack(cls_losses, dim=(- 1)), dim=(- 1))
box_loss = torch.sum(torch.stack(box_losses, dim=(- 1)), dim=(- 1))
total_loss = (cls_loss + (box_loss_weight * box_loss))
return (total_loss, cls_loss, box_loss) |
def write_stack_trace(ex: Exception) -> None:
file = NamedTemporaryFile('w', prefix=f'raiden-exception-{datetime.datetime.utcnow():%Y-%m-%dT%H-%M}', suffix='.txt', delete=False)
with file as traceback_file:
traceback.print_exc(file=traceback_file)
traceback.print_exc()
click.secho(f'''FATAL: An unexpected exception occurred. A traceback has been written to {traceback_file.name}
{ex}''', fg='red') |
.parametrize('protocol', ['ucx', 'ucxx'])
.parametrize('params', [{'enable_infiniband': False, 'enable_nvlink': False, 'enable_rdmacm': False}, {'enable_infiniband': True, 'enable_nvlink': True, 'enable_rdmacm': False}, {'enable_infiniband': True, 'enable_nvlink': False, 'enable_rdmacm': True}, {'enable_infiniband': True, 'enable_nvlink': True, 'enable_rdmacm': True}, {'enable_infiniband': None, 'enable_nvlink': None, 'enable_rdmacm': None}])
.skipif((_get_dgx_version() == DGXVersion.DGX_A100), reason=('Automatic InfiniBand device detection Unsupported for %s' % _get_dgx_name()))
def test_ucx_infiniband_nvlink(protocol, params):
if (protocol == 'ucx'):
pytest.importorskip('ucp')
elif (protocol == 'ucxx'):
pytest.importorskip('ucxx')
skip_queue = mp.Queue()
p = mp.Process(target=_test_ucx_infiniband_nvlink, args=(skip_queue, protocol, params['enable_infiniband'], params['enable_nvlink'], params['enable_rdmacm']))
p.start()
p.join()
skip_msg = skip_queue.get()
if (skip_msg != 'ok'):
pytest.skip(skip_msg)
assert (not p.exitcode) |
class TestEnvFileCombinations(EnvironmentTestCase):
def test_run_with_both_env_files(self, runner, target, env1, env2):
env = self.run_environ(runner, *target, '--default-env-file', env1, '--env-file', env2)
assert (env.get('SECRET') == 'unknown')
assert (env.get('PASSWORD') == 'bitter')
assert (env.get('PATH') == 'second')
def test_run_with_both_env_files_then_overrides(self, runner, target, env1, env2):
env = self.run_environ(runner, *target, '--default-env-file', env1, '--env-file', env2, '--env', 'PASSWORD=mine', '--env', 'SECRET=s3cr3t')
assert (env.get('SECRET') == 's3cr3t')
assert (env.get('PASSWORD') == 'mine')
assert (env.get('PATH') == 'second') |
def test_log_player_take_damage():
events = telemetry.events_from_type('LogPlayerTakeDamage')
data = events[0]
assert isinstance(data, LogPlayerTakeDamage)
assert isinstance(data.attacker, Character)
assert isinstance(data.victim, Character)
assert (data.damage > 0)
assert (data.damage_type_category in DAMAGE_TYPE_MAP)
assert (data.damage_reason in DAMAGE_REASON)
assert (data.damage_causer_name in DAMAGE_CAUSER_MAP) |
def runDbmsSchedulerModule(args):
status = True
if (checkOptionsGivenByTheUser(args, ['test-module', 'exec', 'reverse-shell', 'make-download']) == False):
return EXIT_MISS_ARGUMENT
dbmsScheduler = DbmsScheduler(args)
status = dbmsScheduler.connection(stopIfError=True)
if (args['test-module'] == True):
args['print'].title('Test if the DBMSScheduler library can be used')
status = dbmsScheduler.testAll()
if (args['exec'] != None):
args['print'].title('Execute the `{0}` on the {1} server'.format(args['exec'], args['server']))
status = dbmsScheduler.execOSCommand(args['exec'], prepandWindCmdPath=args['cmd-exe'])
if (status == True):
args['print'].goodNews('The `{0}` command was executed on the {1} server'.format(args['exec'], args['server']))
else:
args['print'].badNews('The `{0}` command was not executed on the {1} server: {2}'.format(args['exec'], args['server'], str(status)))
dbmsScheduler.__getJobStatus__()
dbmsScheduler.__removeJob__(dbmsScheduler.jobName, force=True, defer=False)
if (args['reverse-shell'] != None):
args['print'].title('Try to give you a reverse shell from the {0} server'.format(args['server']))
dbmsScheduler.giveReverseShell(localip=args['reverse-shell'][0], localport=args['reverse-shell'][1])
if (args['make-download'] != None):
args['print'].title('Try to make the target {0} download local file {1} with powershell over and saved it in {2}'.format(args['server'], args['make-download'][0], args['make-download'][1]))
args['print'].printImportantNotice("You have to serve the file according to your path {0} over a http server. 'python -m SimpleHTTPServer PORT' can be used for example ".format(args['make-download'][0]))
dbmsScheduler.makeDownloadFile(urlToFile=args['make-download'][0], remoteFilePath=args['make-download'][1])
dbmsScheduler.close() |
class TestBiasCorrection(unittest.TestCase):
.cuda
def test_correct_bias_on_mnist(self):
def modified_parse(serialized_example):
dim = 28
features = tf.compat.v1.parse_single_example(serialized_example, features={'label': tf.compat.v1.FixedLenFeature([], tf.int64), 'image_raw': tf.compat.v1.FixedLenFeature([], tf.string)})
image = tf.compat.v1.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([(dim * dim)])
image = (tf.cast(image, tf.float32) / 255)
return image
tf.compat.v1.reset_default_graph()
batch_size = 2
num_samples = 10
dataset = tf.data.TFRecordDataset([os.path.join(mnist_tfrecords_path, 'validation.tfrecords')]).repeat(1)
dataset = dataset.map(modified_parse, num_parallel_calls=batch_size)
dataset = dataset.batch(batch_size=batch_size)
quant_params = QuantParams()
bias_correction_params = BiasCorrectionParams(batch_size=batch_size, num_quant_samples=num_samples, num_bias_correct_samples=num_samples, input_op_names=['reshape_input'], output_op_names=['dense_1/BiasAdd'])
meta_path = os.path.join(mnist_model_path, 'mnist_save.meta')
checkpoint_path = os.path.join(mnist_model_path, 'mnist_save')
sess = load_model_from_meta(meta_path=meta_path, checkpoint_path=checkpoint_path)
BiasCorrection.correct_bias(sess, bias_correction_params, quant_params, dataset)
self.assertTrue(1)
sess.close()
.cuda
def test_correct_bias_on_mnist_with_analytical_bc(self):
def modified_parse(serialized_example):
dim = 28
features = tf.compat.v1.parse_single_example(serialized_example, features={'label': tf.compat.v1.FixedLenFeature([], tf.int64), 'image_raw': tf.compat.v1.FixedLenFeature([], tf.string)})
image = tf.compat.v1.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([(dim * dim)])
image = (tf.cast(image, tf.float32) / 255)
return image
tf.compat.v1.reset_default_graph()
batch_size = 2
num_samples = 10
dataset = tf.data.TFRecordDataset([os.path.join(mnist_tfrecords_path, 'validation.tfrecords')]).repeat(1)
dataset = dataset.map(modified_parse, num_parallel_calls=batch_size)
dataset = dataset.batch(batch_size=batch_size)
quant_params = QuantParams()
bias_correction_params = BiasCorrectionParams(batch_size=batch_size, num_quant_samples=num_samples, num_bias_correct_samples=num_samples, input_op_names=['reshape_input'], output_op_names=['dense_1/BiasAdd'])
meta_path = os.path.join(mnist_model_path, 'mnist_save.meta')
checkpoint_path = os.path.join(mnist_model_path, 'mnist_save')
sess = load_model_from_meta(meta_path=meta_path, checkpoint_path=checkpoint_path)
BiasCorrection.correct_bias(sess, bias_correction_params, quant_params, dataset, perform_only_empirical_bias_corr=False)
self.assertTrue(1)
sess.close()
def test_dummy(self):
pass |
def accuracy(output, target, meta):
batch_size = target.size(0)
target = target.cpu().numpy()
(err, cnt) = (0, 0)
for i in range(batch_size):
if (meta[(i, 0)] < (1 + ref.eps)):
cnt += 1
for j in range(ref.J):
err += (((((output[i][(j * 3)] - target[i][j][0]) ** 2) + ((output[i][((j * 3) + 1)] - target[i][j][1]) ** 2)) + ((output[i][((j * 3) + 2)] - target[i][j][2]) ** 2)) ** 0.5)
if (cnt > 0):
return ((err / ref.J) / cnt)
else:
return 0 |
def print_final_status_json(iterations, cerberus_status, exit_status_code):
status_json = {'iterations': iterations, 'cluster_health': cerberus_status, 'exit_status': exit_status_code}
with open('final_cerberus_info.json', 'w') as file:
file.write(str(status_json))
logging.info('Final status information written to final_cerberus_info.json') |
class Packages_OldVersion_CamelCase_TestCase(ParserTest):
def __init__(self, *args, **kwargs):
ParserTest.__init__(self, *args, **kwargs)
self.version = RHEL8
self.ks = '%packages --instLangs cs_CZ --excludeWeakdeps\nsomething\n\n%end\n'
def runTest(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.parser.readKickstartFromString(self.ks)
self.assertEqual(len(w), 0)
self.assertEqual(str(self.handler.packages), '\n%packages --instLangs=cs_CZ --excludeWeakdeps\nsomething\n\n%end\n') |
.parametrize(('word', 'result'), [('', ['', '', '']), ('', ['', '', '']), ('', ['', '', '']), ('', ['', '', '']), ('', ['', '', '']), ('', ['', '', '']), ('', ['', '', '']), ('', ['', '', '']), ('', ['', '', ''])])
def test_plural_forms(word, result, morph):
parsed = morph.parse(word)
assert len(parsed)
for (plural, num) in zip(result, [1, 2, 5]):
assert (parsed[0].make_agree_with_number(num).word == plural) |
class AgdaLexer(RegexLexer):
name = 'Agda'
url = '
aliases = ['agda']
filenames = ['*.agda']
mimetypes = ['text/x-agda']
version_added = '2.0'
reserved = ('abstract', 'codata', 'coinductive', 'constructor', 'data', 'do', 'eta-equality', 'field', 'forall', 'hiding', 'in', 'inductive', 'infix', 'infixl', 'infixr', 'instance', 'interleaved', 'let', 'macro', 'mutual', 'no-eta-equality', 'open', 'overlap', 'pattern', 'postulate', 'primitive', 'private', 'quote', 'quoteTerm', 'record', 'renaming', 'rewrite', 'syntax', 'tactic', 'unquote', 'unquoteDecl', 'unquoteDef', 'using', 'variable', 'where', 'with')
tokens = {'root': [('^(\\s*)([^\\s(){}]+)(\\s*)(:)(\\s*)', bygroups(Whitespace, Name.Function, Whitespace, Operator.Word, Whitespace)), ('--(?![!#$%&*+./<=>?^|_~:\\\\]).*?$', Comment.Single), ('\\{-', Comment.Multiline, 'comment'), ('\\{!', Comment.Directive, 'hole'), (("\\b(%s)(?!\\')\\b" % '|'.join(reserved)), Keyword.Reserved), ('(import|module)(\\s+)', bygroups(Keyword.Reserved, Whitespace), 'module'), ('\\b(Set|Prop)[\\u2080-\\u2089]*\\b', Keyword.Type), ('(\\(|\\)|\\{|\\})', Operator), ('(\\.{1,3}|\\||\\u03BB|\\u2200|\\u2192|:|=|->)', Operator.Word), ('\\d+[eE][+-]?\\d+', Number.Float), ('\\d+\\.\\d+([eE][+-]?\\d+)?', Number.Float), ('0[xX][\\da-fA-F]+', Number.Hex), ('\\d+', Number.Integer), ("'", String.Char, 'character'), ('"', String, 'string'), ('[^\\s(){}]+', Text), ('\\s+?', Whitespace)], 'hole': [('[^!{}]+', Comment.Directive), ('\\{!', Comment.Directive, '#push'), ('!\\}', Comment.Directive, '#pop'), ('[!{}]', Comment.Directive)], 'module': [('\\{-', Comment.Multiline, 'comment'), ("[a-zA-Z][\\w.\\']*", Name, '#pop'), ('[\\W0-9_]+', Text)], 'comment': HaskellLexer.tokens['comment'], 'character': HaskellLexer.tokens['character'], 'string': HaskellLexer.tokens['string'], 'escape': HaskellLexer.tokens['escape']} |
class ComplexParameter(pTypes.GroupParameter):
def __init__(self, **opts):
opts['type'] = 'bool'
opts['value'] = True
pTypes.GroupParameter.__init__(self, **opts)
self.addChild({'name': 'A = 1/B', 'type': 'float', 'value': 7, 'suffix': 'Hz', 'siPrefix': True})
self.addChild({'name': 'B = 1/A', 'type': 'float', 'value': (1 / 7.0), 'suffix': 's', 'siPrefix': True})
self.a = self.param('A = 1/B')
self.b = self.param('B = 1/A')
self.a.sigValueChanged.connect(self.aChanged)
self.b.sigValueChanged.connect(self.bChanged)
def aChanged(self):
self.b.setValue((1.0 / self.a.value()), blockSignal=self.bChanged)
def bChanged(self):
self.a.setValue((1.0 / self.b.value()), blockSignal=self.aChanged) |
def get_pretraining_file(backbone):
if ('mitb5' in backbone):
return 'pretrained/mit_b5.pth'
if ('mitb4' in backbone):
return 'pretrained/mit_b4.pth'
if ('mitb3' in backbone):
return 'pretrained/mit_b3.pth'
if ('r101v1c' in backbone):
return 'open-mmlab://resnet101_v1c'
return {'r50v1c': 'open-mmlab://resnet50_v1c', 'x50-32': 'open-mmlab://resnext50_32x4d', 'x101-32': 'open-mmlab://resnext101_32x4d', 's50': 'open-mmlab://resnest50', 's101': 'open-mmlab://resnest101', 's200': 'open-mmlab://resnest200'}[backbone] |
class RecordsExtractor(object):
def _clean_up_cols(self, columns):
return re.sub(' +', '', columns).split(',')
def _generate_data_payloads(self, data_count, payload, cols=[], index=0):
payload = clean_up_offset_payload(payload)
payloads = {}
for i in range(index, data_count):
payloads.update({i: []})
for c in cols:
payloads[i].append({'column': c, 'payload': payload.format(col=c, index=i)})
return payloads
def _data_count(self, db='', tbl=''):
_temp = []
if (db and tbl):
count_payloads = []
[count_payloads.extend(v) for (_, v) in PAYLOADS_RECS_COUNT.items()]
if self._dbms:
count_payloads = PAYLOADS_RECS_COUNT.get(self._dbms, count_payloads)
for i in count_payloads:
data = i.format(db=db, tbl=tbl)
_temp.append(data)
payloads = self._generat_payload(payloads_list=_temp)
return self._extact(payloads=payloads)
def __generate_records_tables(self, tbl, cols, count):
table_name = f'{count}_{tbl}_data'
tmp_table_name = f'{count}_{tbl}_tmp'
query = TBL_RECS.format(name=f'{tmp_table_name}', tbl_name=f'{tmp_table_name}')
ok = session.drop_table(session_filepath=self.session_filepath, table_name=table_name, columns=cols, query=query, auto_create=True, exec_query=True)
return ok
def data_dump(self, db='', tbl='', cols=''):
index = 0
_temp = []
is_resumed = False
fetched_data = {}
_temp_payloads = []
fetched_records = []
cols = self._clean_up_cols(cols)
count = '{0:03d}'.format(len(cols))
RecordsResponse = collections.namedtuple('RecordsResponse', ['fetched', 'count', 'database', 'table', 'columns', 'records'])
if (db and tbl and cols and isinstance(cols, list)):
dump_payloads = []
[dump_payloads.extend(v) for (_, v) in PAYLOADS_RECS_DUMP.items()]
test_column = '0x72306f'
if self._dbms:
dump_payloads = PAYLOADS_RECS_DUMP.get(self._dbms, dump_payloads)
test_column = '1337'
for i in dump_payloads:
data = i.format(col=test_column, db=db, tbl=tbl)
_temp_payloads.append(data)
try:
tmp_table_name = f'{count}_{tbl.strip()}_tmp'
fetched_data = session.fetch_from_table(session_filepath=self.session_filepath, table_name=tmp_table_name, group_by_columns='`index`,`column_name`,`column_value`', cursor=False)
if fetched_data:
is_resumed = True
except Exception as error:
pass
logger.info(("fetching column(s) '%s' for table '%s' in database: '%s'" % (', '.join(cols), tbl, db)))
last_seen = 0
remainder = 0
retval = self._data_count(db=db, tbl=tbl)
if retval.is_injected:
data_count = int(retval.result)
if (data_count != 0):
logger.info(('used SQL query returns %d entries' % data_count))
if (data_count == 0):
logger.warning(("used SQL query returns %d entries of columns '%s' for table '%s' in database '%s'" % (data_count, ', '.join(cols), tbl, db)))
return RecordsResponse(fetched=False, count=data_count, database=db, table=tbl, columns=cols, records=[])
if is_resumed:
_temp = fetched_data
for entry in fetched_data:
last_seen = index = entry.get('index')
value = entry.get('column_value')
fetched_records.append(value)
remainder = (len(fetched_data) % len(cols))
if (remainder > 0):
index -= 1
last_seen = (last_seen - 1)
fetched_records = fetched_records[(- remainder):]
should_fetch = True
if is_resumed:
if (index == data_count):
should_fetch = False
if should_fetch:
payloads = self._generat_payload(payloads_list=_temp_payloads)
retval = self._extact(payloads=payloads)
if retval.is_injected:
payload = clean_up_payload(payload=retval.payload, replace_with='{col}')
payloads = self._generate_data_payloads(data_count=data_count, payload=payload, cols=cols, index=index)
if (is_resumed and (remainder > 0)):
remaing_records = payloads[last_seen][remainder:]
payloads.update({last_seen: remaing_records})
if (not is_resumed):
self.__generate_records_tables(tbl=tbl, cols=cols, count=count)
response_data = self._extract_data(payloads=payloads, table=tbl, columns=cols, fetched_records=fetched_records, count=count)
if response_data.is_fetched:
_temp.extend(response_data.result)
table_name = f'{count}_{tbl}_data'
self._pprint_records(field_names=', '.join(cols), database=db, table_name=table_name, table=tbl, columns=cols)
return RecordsResponse(fetched=True, count=data_count, database=db, table=tbl, columns=cols, records=_temp)
if (not retval.is_injected):
status_code = retval.status_code
error = retval.error
count = retval.payloads_count
if (status_code not in [200, 0]):
message = f'{error} - {count} times'
logger.warning(f'''HTTP error codes detected during run:
{message}''')
else:
message = f"tested with '{count}' queries, unable to find working SQL query."
logger.critical(message)
else:
table_name = f'{count}_{tbl}_data'
self._pprint_records(field_names=', '.join(cols), database=db, table_name=table_name, table=tbl, columns=cols)
return RecordsResponse(fetched=True, count=data_count, database=db, table=tbl, columns=cols, records=_temp)
if (not retval.is_injected):
status_code = retval.status_code
error = retval.error
count = retval.payloads_count
if (status_code not in [200, 0]):
message = f'{error} - {count} times'
logger.warning(f'''HTTP error codes detected during run:
{message}''')
else:
message = f"tested with '{count}' queries, unable to find working SQL query."
logger.critical(message)
return RecordsResponse(fetched=False, count=0, database=None, table=None, columns=None, records=None)
def _pprint_records(self, field_names, database='', table_name='', table='', columns=None):
group_by_columns = ''
if columns:
group_by_columns = ','.join([f'`{i.strip()}`' for i in columns])
cursor_or_list = session.fetch_from_table(session_filepath=self.session_filepath, table_name=table_name, group_by_columns=group_by_columns)
ok = session.dump_to_csv(cursor=cursor_or_list, filepath=self.session_filepath, database=database, table=table)
cursor_or_list = session.fetch_from_table(session_filepath=self.session_filepath, table_name=table_name, group_by_columns=group_by_columns)
obj = prettifier(cursor_or_list, field_names, header=True)
data = obj.data
entries = obj.entries
logger.success(f'Database: {database}')
logger.success(f'Table: {table}')
logger.success(f'[{entries} entries]')
logger.success(f'{data}')
def _save_records(self, table=None, column_names=None, records=None, count=None, clean_insert=False):
table_name = f'{count}_{table}_data'
if (table_name and column_names and records):
session.save(session_filepath=self.session_filepath, table_name=table_name, columns=column_names, records=records, clean_insert=clean_insert)
return 'done'
def _extract_data(self, payloads, table=None, columns=None, fetched_records=None, count=None):
(_temp, is_interrupted) = ([], False)
Response = collections.namedtuple('Response', ['is_fetched', 'result'])
for (index, values) in payloads.items():
__temp = ([] if (not fetched_records) else fetched_records)
position = 0
while (position < len(values)):
p = values[position]
name = p.get('column')
payload = p.get('payload')
payload_request = prepare_payload_request(self, payload)
url = payload_request.url
data = payload_request.data
regex = payload_request.regex
headers = payload_request.headers
try:
response = request.inject_payload(url=url, regex=regex, data=data, headers=headers, proxy=self._proxy)
except KeyboardInterrupt:
logger.warning('user aborted during enumeration. Xpath will display partial output')
is_interrupted = True
break
else:
if response.ok:
result = response.result
logger.info(("retrieved: '%s'" % (result if (result != '<blank_value>') else '')))
_temp.append({'index': (index + 1), 'column_name': name, 'column_value': result})
__temp.append(result)
table_name = f'{count}_{table}_tmp'
PREPARED_STATEMENT = f'INSERT INTO `{table_name}` (`index`, `column_name`, `column_value`) VALUES (?, ?, ?);'
retval = session.dump(session_filepath=self.session_filepath, query=PREPARED_STATEMENT, values=((index + 1), name, result))
position += 1
_ = self._save_records(table=table, column_names=columns, records=__temp, count=count)
if is_interrupted:
break
if (_temp and (len(_temp) > 0)):
resp = Response(is_fetched=True, result=_temp)
else:
resp = Response(is_fetched=False, result=_temp)
return resp |
def test_SumScaler_simple_both():
dm = skcriteria.mkdm(matrix=[[1, 2, 3], [4, 5, 6]], objectives=[min, max, min], weights=[1, 2, 3])
expected = skcriteria.mkdm(matrix=[[(1 / 5), (2 / 7), (3 / 9)], [(4 / 5), (5 / 7), (6 / 9)]], objectives=[min, max, min], weights=[(1 / 6), (2 / 6), (3 / 6)], dtypes=[float, float, float])
scaler = SumScaler(target='both')
result = scaler.transform(dm)
assert result.equals(expected) |
class CuFile():
def __init__(self, file: (pathlib.Path | str), flags: str='r'):
assert ('a' not in flags)
self._closed = False
self._filepath = str(file)
self._flags = flags
with open(self._filepath, mode=flags):
pass
def close(self) -> None:
self._closed = True
def closed(self) -> bool:
return self._closed
def fileno(self) -> int:
raise RuntimeError("Legate-KvikIO doesn't expose any file descriptor")
def open_flags(self) -> int:
raise RuntimeError("Legate-KvikIO doesn't expose any file descriptor")
def __enter__(self) -> CuFile:
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.close()
def read(self, buf: Any) -> None:
assert (not self._closed)
if (('r' not in self._flags) and ('+' not in self._flags)):
raise ValueError(f'Cannot read a file opened with flags={self._flags}')
output = get_legate_store(buf)
task = context.create_auto_task(TaskOpCode.READ)
task.add_scalar_arg(self._filepath, types.string)
task.add_output(output)
task.set_side_effect(True)
task.execute()
def write(self, buf: Any) -> None:
assert (not self._closed)
if (('w' not in self._flags) and ('+' not in self._flags)):
raise ValueError(f'Cannot write to a file opened with flags={self._flags}')
input = get_legate_store(buf)
task = context.create_auto_task(TaskOpCode.WRITE)
task.add_scalar_arg(self._filepath, types.string)
task.add_input(input)
task.set_side_effect(True)
task.execute() |
class Migration(migrations.Migration):
dependencies = [('adserver', '0081_rollout_ad_prioritization_pacing')]
operations = [migrations.AddField(model_name='historicalpublisher', name='allowed_domains', field=models.CharField(blank=True, default='', help_text="A space separated list of domains where the publisher's ads can appear", max_length=1024, verbose_name='Allowed domains')), migrations.AddField(model_name='publisher', name='allowed_domains', field=models.CharField(blank=True, default='', help_text="A space separated list of domains where the publisher's ads can appear", max_length=1024, verbose_name='Allowed domains'))] |
def test_validate_well_structured_bad_gate():
(q0, q1) = cirq.LineQubit.range(2)
circuit = cirq.Circuit([cirq.Moment([cirq.PhasedXPowGate(phase_exponent=0).on(q0)]), cirq.Moment([cirq.XPowGate(exponent=0.5).on(q0)]), cirq.Moment([cg.SYC(q0, q1)]), cirq.measure(q0, q1, key='z')])
with pytest.raises(BadlyStructuredCircuitError) as e:
validate_well_structured(circuit)
assert e.match('non-device') |
class FxThread(ThreadJob):
def __init__(self, config: SimpleConfig, network: Optional[Network]):
ThreadJob.__init__(self)
self.config = config
self.network = network
util.register_callback(self.set_proxy, ['proxy_set'])
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.cache_dir = os.path.join(config.path, 'cache')
self._trigger = asyncio.Event()
self._trigger.set()
self.set_exchange(self.config_exchange())
make_dir(self.cache_dir)
def set_proxy(self, trigger_name, *args):
self._trigger.set()
def get_currencies(history: bool) -> Sequence[str]:
d = get_exchanges_by_ccy(history)
return sorted(d.keys())
def get_exchanges_by_ccy(ccy: str, history: bool) -> Sequence[str]:
d = get_exchanges_by_ccy(history)
return d.get(ccy, [])
def remove_thousands_separator(text):
return text.replace(',', '')
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = ('{:%s.%df}' % ((',' if commas else ''), max(0, prec)))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
async def run(self):
while True:
try:
async with timeout_after(150):
(await self._trigger.wait())
self._trigger.clear()
if (self.is_enabled() and self.show_history()):
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
except TaskTimeout:
pass
if self.is_enabled():
(await self.exchange.update_safe(self.ccy))
def is_enabled(self):
return bool(self.config.get('use_exchange_rate', DEFAULT_ENABLED))
def set_enabled(self, b):
self.config.set_key('use_exchange_rate', bool(b))
self.trigger_update()
def get_history_config(self, *, allow_none=False):
val = self.config.get('history_rates', None)
if ((val is None) and allow_none):
return None
return bool(val)
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_history_capital_gains_config(self):
return bool(self.config.get('history_rates_capital_gains', False))
def set_history_capital_gains_config(self, b):
self.config.set_key('history_rates_capital_gains', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
return self.config.get('currency', DEFAULT_CURRENCY)
def config_exchange(self):
return self.config.get('use_exchange', DEFAULT_EXCHANGE)
def show_history(self):
return (self.is_enabled() and self.get_history_config() and (self.ccy in self.exchange.history_ccys()))
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.trigger_update()
self.on_quotes()
def trigger_update(self):
if self.network:
self.network.asyncio_loop.call_soon_threadsafe(self._trigger.set)
def set_exchange(self, name):
class_ = (globals().get(name) or globals().get(DEFAULT_EXCHANGE))
self.logger.info(f'using exchange {name}')
if (self.config_exchange() != name):
self.config.set_key('use_exchange', name, True)
assert issubclass(class_, ExchangeBase), f'unexpected type {class_} for {name}'
self.exchange = class_(self.on_quotes, self.on_history)
self.trigger_update()
self.exchange.read_historical_rates(self.ccy, self.cache_dir)
def on_quotes(self):
util.trigger_callback('on_quotes')
def on_history(self):
util.trigger_callback('on_history')
def exchange_rate(self) -> Decimal:
rate = self.exchange.quotes.get(self.ccy)
if (rate is None):
return Decimal('NaN')
return Decimal(rate)
def format_amount(self, btc_balance):
rate = self.exchange_rate()
return ('' if rate.is_nan() else ('%s' % self.value_str(btc_balance, rate)))
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return ('' if rate.is_nan() else ('%s %s' % (self.value_str(btc_balance, rate), self.ccy)))
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return (_(' (No FX rate available)') if rate.is_nan() else (' 1 %s~%s %s' % (base_unit, self.value_str((COIN / (10 ** (8 - decimal_point))), rate), self.ccy)))
def fiat_value(self, satoshis, rate):
return (Decimal('NaN') if (satoshis is None) else ((Decimal(satoshis) / COIN) * Decimal(rate)))
def value_str(self, satoshis, rate):
return self.format_fiat(self.fiat_value(satoshis, rate))
def format_fiat(self, value):
if value.is_nan():
return _('No data')
return ('%s' % self.ccy_amount_str(value, True))
def history_rate(self, d_t):
if (d_t is None):
return Decimal('NaN')
rate = self.exchange.historical_rate(self.ccy, d_t)
if ((rate in ('NaN', None)) and ((datetime.today().date() - d_t.date()).days <= 2)):
rate = self.exchange.quotes.get(self.ccy, 'NaN')
self.history_used_spot = True
if (rate is None):
rate = 'NaN'
return Decimal(rate)
def historical_value_str(self, satoshis, d_t):
return self.format_fiat(self.historical_value(satoshis, d_t))
def historical_value(self, satoshis, d_t):
return self.fiat_value(satoshis, self.history_rate(d_t))
def timestamp_rate(self, timestamp):
from .util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date) |
class ContextStringFormatter(string.Formatter):
def __init__(self, formatters: ChainMap) -> None:
super().__init__()
self.__formatters = formatters
def vformat(self, format_string: str, args: Sequence[Any], kwargs: Mapping[(str, Any)]) -> str:
used_args = set()
(result, _) = self._vformat(format_string, args, kwargs, used_args, 10)
self.check_unused_args(used_args, args, kwargs)
return result
def get_value(self, key: (int | str), args: Sequence[Any], kwargs: Mapping[(str, Any)]) -> Any:
if (key in self.__formatters):
return kwargs.get(str(key))
try:
return super().get_value(key, args, kwargs)
except KeyError:
message = f'Unknown context field `{key}`'
raise ValueError(message) from None
def format_field(self, value: Any, format_spec: str) -> Any:
(formatter, _, data) = format_spec.partition(':')
if (formatter in self.__formatters):
return self.__formatters[formatter](value, data)
return super().format_field(value, format_spec)
def parse(self, format_string: str) -> Iterable:
for (literal_text, field_name, format_spec, conversion) in super().parse(format_string):
if (field_name in self.__formatters):
(yield (literal_text, field_name, f'{field_name}:{format_spec}', conversion))
else:
(yield (literal_text, field_name, format_spec, conversion)) |
def dict_from_prop(prop):
valid_types = (int, str, bool, float, tuple, Vector, bpy.types.Material, bpy.types.Object)
result = {}
for p in dir(prop):
if (p.startswith('__') or (p in ['rna_type', 'bl_rna'])):
continue
if (not hasattr(prop, p)):
continue
pn = getattr(prop, p)
if isinstance(pn, valid_types):
result[p] = pn
elif (isinstance(pn, bpy.types.PropertyGroup) and (not isinstance(pn, type(prop)))):
result.update(dict_from_prop(pn))
return result |
.parametrize(('version', 'expected_next'), [pytest.param(meta('1.0.0', config=c), '1.0.0', id='SemVer exact stays'), pytest.param(meta('1.0.0', config=c_non_normalize, dirty=True), '09.02.13.1.dev0', id='SemVer dirty is replaced by date', marks=pytest.mark.filterwarnings('ignore:.*legacy version.*:UserWarning'))])
def test_calver_by_date_semver(version: ScmVersion, expected_next: str) -> None:
computed = calver_by_date(version)
assert (computed == expected_next) |
def test_update_merge_request_approvals_set_approvers(project, resp_mr_approval_rules):
approvals = project.mergerequests.get(1, lazy=True).approvals
assert isinstance(approvals, gitlab.v4.objects.merge_request_approvals.ProjectMergeRequestApprovalManager)
assert (approvals._update_method is UpdateMethod.POST)
response = approvals.set_approvers(updated_approval_rule_approvals_required, approver_ids=updated_approval_rule_user_ids, approver_group_ids=group_ids, approval_rule_name=approval_rule_name)
assert (response.approvals_required == updated_approval_rule_approvals_required)
assert (len(response.eligible_approvers) == len(updated_approval_rule_user_ids))
assert (response.eligible_approvers[0]['id'] == updated_approval_rule_user_ids[0])
assert (response.name == approval_rule_name) |
def project_delta_file_metadata_on_table(delta_file_envelope: DeltaFileEnvelope) -> pa.Table:
table = delta_file_envelope.table
ordered_file_number = delta_file_envelope.file_index
ordered_file_number_iterator = repeat(int(ordered_file_number), len(table))
table = append_file_idx_column(table, ordered_file_number_iterator)
stream_position = delta_file_envelope.stream_position
stream_position_iterator = repeat(int(stream_position), len(table))
table = append_stream_position_column(table, stream_position_iterator)
delta_type = delta_file_envelope.delta_type
delta_type_iterator = repeat(delta_type_to_field(delta_type), len(table))
table = append_delta_type_col(table, delta_type_iterator)
is_source_iterator = repeat((True if delta_file_envelope.is_src_delta else False), len(table))
table = append_is_source_col(table, is_source_iterator)
file_record_count_iterator = repeat(delta_file_envelope.file_record_count, len(table))
table = append_file_record_count_col(table, file_record_count_iterator)
return table |
def file_handler(loglevel, logfile, log_format, command):
if (logfile is not None):
filename = logfile
else:
filename = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'faceswap')
filename += ('_gui.log' if (command == 'gui') else '.log')
should_rotate = os.path.isfile(filename)
log_file = RotatingFileHandler(filename, backupCount=1)
if should_rotate:
log_file.doRollover()
log_file.setFormatter(log_format)
log_file.setLevel(loglevel)
return log_file |
def _file_handler_exists(logger: Logger, log_dir: str, log_base_file_name: str) -> bool:
handler_exists = False
base_file_path = os.path.join(log_dir, log_base_file_name)
if (len(logger.handlers) > 0):
norm_base_file_path = os.path.normpath(base_file_path)
handler_exists = any([(isinstance(handler, logging.FileHandler) and (os.path.normpath(handler.baseFilename) == norm_base_file_path)) for handler in logger.handlers])
return handler_exists |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.