code stringlengths 281 23.7M |
|---|
class TestIssubclassBrain():
def test_type_type(self) -> None:
assert (_get_result('issubclass(type, type)') == 'True')
def test_object_type(self) -> None:
assert (_get_result('issubclass(object, type)') == 'False')
def test_type_object(self) -> None:
assert (_get_result('issubclass(type, object)') == 'True')
def test_issubclass_same_class(self) -> None:
assert (_get_result('issubclass(int, int)') == 'True')
def test_issubclass_not_the_same_class(self) -> None:
assert (_get_result('issubclass(str, int)') == 'False')
def test_issubclass_object_true(self) -> None:
assert (_get_result('\n class Bar(object):\n pass\n issubclass(Bar, object)\n ') == 'True')
def test_issubclass_same_user_defined_class(self) -> None:
assert (_get_result('\n class Bar(object):\n pass\n issubclass(Bar, Bar)\n ') == 'True')
def test_issubclass_different_user_defined_classes(self) -> None:
assert (_get_result('\n class Foo(object):\n pass\n class Bar(object):\n pass\n issubclass(Bar, Foo)\n ') == 'False')
def test_issubclass_type_false(self) -> None:
assert (_get_result('\n class Bar(object):\n pass\n issubclass(Bar, type)\n ') == 'False')
def test_isinstance_tuple_argument(self) -> None:
assert (_get_result('issubclass(int, (str, int))') == 'True')
def test_isinstance_object_true2(self) -> None:
assert (_get_result('\n class Bar(type):\n pass\n issubclass(Bar, object)\n ') == 'True')
def test_issubclass_short_circuit(self) -> None:
assert (_get_result('issubclass(int, (int, 1))') == 'True')
def test_uninferable_bad_type(self) -> None:
with pytest.raises(InferenceError):
_get_result_node('issubclass(int, 1)')
def test_uninferable_keywords(self) -> None:
with pytest.raises(InferenceError):
_get_result_node('issubclass(int, class_or_tuple=int)')
def test_too_many_args(self) -> None:
with pytest.raises(InferenceError):
_get_result_node('issubclass(int, int, str)') |
class Effect8021(BaseEffect):
runTime = 'early'
type = 'passive'
def handler(fit, implant, context, projectionRange, **kwargs):
for attr in ('hydraDroneTrackingBonus', 'hydraDroneRangeBonus', 'hydraMissileFlightTimeBonus', 'hydraMissileExplosionVelocityBonus'):
fit.appliedImplants.filteredItemMultiply((lambda implant: implant.item.requiresSkill('Cybernetics')), attr, implant.getModifiedItemAttr('implantSetHydra'), **kwargs) |
def _eval_update(i, epochs, min_epochs, model, optimizer, batch_dim, eval_batch):
mols = samples(data, model, session, model.sample_z(n_samples), sample=True)
(m0, m1) = all_scores(mols, data, norm=True)
m0 = {k: np.array(v)[np.nonzero(v)].mean() for (k, v) in m0.items()}
m0.update(m1)
return m0 |
def test_output():
with Simulation(MODEL_WEIR_SETTING_PATH) as sim:
for step in sim:
pass
out = Output(MODEL_WEIR_SETTING_PATH.replace('inp', 'out'))
out.open()
assert (len(out.subcatchments) == 3)
assert (len(out.nodes) == 5)
assert (len(out.links) == 4)
assert (len(out.pollutants) == 0)
flow_rate = out.link_series('C3', 'flow_rate')
times = list(flow_rate.keys())
assert (times[0] == datetime(2015, 11, 1, 14, 1))
assert (times[(- 1)] == datetime(2015, 11, 4))
assert (len(flow_rate) == 3480)
out.close() |
class OnnxModel(object):
def __init__(self, model_path):
sess_options = onnxruntime.SessionOptions()
onnx_gpu = (onnxruntime.get_device() == 'GPU')
providers = (['CUDAExecutionProvider', 'CPUExecutionProvider'] if onnx_gpu else ['CPUExecutionProvider'])
self.sess = onnxruntime.InferenceSession(model_path, sess_options, providers=providers)
self._input_names = [item.name for item in self.sess.get_inputs()]
self._output_names = [item.name for item in self.sess.get_outputs()]
def input_names(self):
return self._input_names
def output_names(self):
return self._output_names
def forward(self, inputs):
to_list_flag = False
if (not isinstance(inputs, (tuple, list))):
inputs = [inputs]
to_list_flag = True
input_feed = {name: input for (name, input) in zip(self.input_names, inputs)}
outputs = self.sess.run(self.output_names, input_feed)
if ((len(self.output_names) == 1) and to_list_flag):
return outputs[0]
else:
return outputs |
class WeightedIOULocalizationLossTest(tf.test.TestCase):
def testReturnsCorrectLoss(self):
prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1], [0, 0, 1, 1], [0, 0, 0.5, 0.25]]])
target_tensor = tf.constant([[[1.5, 0, 2.4, 1], [0, 0, 1, 1], [50, 50, 500.5, 100.25]]])
weights = [[1.0, 0.5, 2.0]]
loss_op = losses.WeightedIOULocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
exp_loss = 2.0
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss) |
class SocketTests(unittest.TestCase):
def setUp(self):
self.server = object()
self.client = Client(self.server)
self.orgsocket = socket.socket
socket.socket = MockSocket
def tearDown(self):
socket.socket = self.orgsocket
def testReopen(self):
self.client._SocketOpen()
sock = self.client._socket
self.client._SocketOpen()
self.assertTrue((sock is self.client._socket))
def testBind(self):
self.client.bind((BIND_IP, BIND_PORT))
self.assertEqual(self.client._socket.address, (BIND_IP, BIND_PORT))
self.assertEqual(self.client._socket.options, [(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)])
def testBindClosesSocket(self):
s = MockSocket(socket.AF_INET, socket.SOCK_DGRAM)
self.client._socket = s
self.client._poll = MockPoll()
self.client.bind((BIND_IP, BIND_PORT))
self.assertEqual(s.closed, True)
def testSendPacket(self):
def MockSend(self, pkt, port):
self._mock_pkt = pkt
self._mock_port = port
_SendPacket = Client._SendPacket
Client._SendPacket = MockSend
self.client.SendPacket(AuthPacket())
self.assertEqual(self.client._mock_port, self.client.authport)
self.client.SendPacket(AcctPacket())
self.assertEqual(self.client._mock_port, self.client.acctport)
Client._SendPacket = _SendPacket
def testNoRetries(self):
self.client.retries = 0
self.assertRaises(Timeout, self.client._SendPacket, None, None)
def testSingleRetry(self):
self.client.retries = 1
self.client.timeout = 0
packet = MockPacket(AccessRequest)
self.assertRaises(Timeout, self.client._SendPacket, packet, 432)
self.assertEqual(self.client._socket.output, [('request packet', (self.server, 432))])
def testDoubleRetry(self):
self.client.retries = 2
self.client.timeout = 0
packet = MockPacket(AccessRequest)
self.assertRaises(Timeout, self.client._SendPacket, packet, 432)
self.assertEqual(self.client._socket.output, [('request packet', (self.server, 432)), ('request packet', (self.server, 432))])
def testAuthDelay(self):
self.client.retries = 2
self.client.timeout = 1
packet = MockPacket(AccessRequest)
self.assertRaises(Timeout, self.client._SendPacket, packet, 432)
self.assertFalse(('Acct-Delay-Time' in packet))
def testSingleAccountDelay(self):
self.client.retries = 2
self.client.timeout = 1
packet = MockPacket(AccountingRequest)
self.assertRaises(Timeout, self.client._SendPacket, packet, 432)
self.assertEqual(packet['Acct-Delay-Time'], [1])
def testDoubleAccountDelay(self):
self.client.retries = 3
self.client.timeout = 1
packet = MockPacket(AccountingRequest)
self.assertRaises(Timeout, self.client._SendPacket, packet, 432)
self.assertEqual(packet['Acct-Delay-Time'], [2])
def testIgnorePacketError(self):
self.client.retries = 1
self.client.timeout = 1
self.client._socket = MockSocket(1, 2, six.b('valid reply'))
packet = MockPacket(AccountingRequest, verify=True, error=True)
self.assertRaises(Timeout, self.client._SendPacket, packet, 432)
def testValidReply(self):
self.client.retries = 1
self.client.timeout = 1
self.client._socket = MockSocket(1, 2, six.b('valid reply'))
self.client._poll = MockPoll()
MockPoll.results = [(1, select.POLLIN)]
packet = MockPacket(AccountingRequest, verify=True)
reply = self.client._SendPacket(packet, 432)
self.assertTrue((reply is packet.reply))
def testInvalidReply(self):
self.client.retries = 1
self.client.timeout = 1
self.client._socket = MockSocket(1, 2, six.b('invalid reply'))
MockPoll.results = [(1, select.POLLIN)]
packet = MockPacket(AccountingRequest, verify=False)
self.assertRaises(Timeout, self.client._SendPacket, packet, 432) |
def _test_cache(fn, protocol: SerializationProtocolBase=None, assert_equal_fn: Callable=None):
if (not assert_equal_fn):
assert_equal_fn = _assert_equal_default
cache_dir = '/tmp/test_dir'
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir)
try:
cache = Cache()
call_count = 0
('test', protocol)
def _fn(*args, **kwargs):
nonlocal call_count
call_count += 1
return fn(*args, **kwargs)
with cache.enable(cache_dir):
ret = _fn()
with cache.enable(cache_dir):
_ret = _fn()
assert_equal_fn(ret, _ret)
assert (call_count == 1)
finally:
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir) |
def broadcast_shape_iter(arrays: Iterable[Union[(TensorVariable, tuple[(TensorVariable, ...)])]], arrays_are_shapes: bool=False, allow_runtime_broadcast: bool=False) -> tuple[(ps.ScalarVariable, ...)]:
one = pytensor.scalar.ScalarConstant(pytensor.scalar.int64, 1)
if arrays_are_shapes:
max_dims = max((len(a) for a in arrays))
array_shapes = [(((one,) * (max_dims - len(a))) + tuple(((one if ((sh == 1) or (isinstance(sh, Constant) and (sh.value == 1))) else (ps.as_scalar(sh) if (not isinstance(sh, Variable)) else sh)) for sh in a))) for a in arrays]
else:
max_dims = max((a.ndim for a in arrays))
_arrays = tuple((ptb.as_tensor_variable(a) for a in arrays))
array_shapes = [(((one,) * (max_dims - a.ndim)) + tuple(((one if (t_sh == 1) else sh) for (sh, t_sh) in zip(a.shape, a.type.shape)))) for a in _arrays]
result_dims = []
for dim_shapes in zip(*array_shapes):
non_bcast_shapes = [shape for shape in dim_shapes if (shape != one)]
if (len(non_bcast_shapes) == 0):
result_dims.append(one)
elif (len(non_bcast_shapes) == 1):
result_dims.extend(non_bcast_shapes)
else:
nonconst_nb_shapes: set[int] = set()
const_nb_shapes: set[Variable] = set()
for shape in non_bcast_shapes:
if isinstance(shape, Constant):
const_nb_shapes.add(shape.value.item())
else:
nonconst_nb_shapes.add(shape)
if (len(const_nb_shapes) > 1):
raise ValueError(f'Could not broadcast dimensions. Incompatible shapes were {array_shapes}.')
if (len(const_nb_shapes) == 1):
(first_length,) = const_nb_shapes
other_lengths = nonconst_nb_shapes
first_length = ps.as_scalar(first_length)
else:
(first_length, *other_lengths) = nonconst_nb_shapes
if (len(other_lengths) == 0):
result_dims.append(first_length)
continue
if (not allow_runtime_broadcast):
condition = pt_all([pt_eq(first_length, other) for other in other_lengths])
result_dims.append(_broadcast_assert(first_length, condition))
else:
lengths = as_tensor_variable((first_length, *other_lengths))
runtime_broadcastable = pt_eq(lengths, one)
result_dim = pt_abs(pt_max(switch(runtime_broadcastable, (- one), lengths)))
condition = pt_all(switch((~ runtime_broadcastable), pt_eq(lengths, result_dim), np.array(True)))
result_dims.append(_runtime_broadcast_assert(result_dim, condition))
return tuple(result_dims) |
_module()
class FastSCNN(nn.Module):
def __init__(self, in_channels=3, downsample_dw_channels=(32, 48), global_in_channels=64, global_block_channels=(64, 96, 128), global_block_strides=(2, 2, 1), global_out_channels=128, higher_in_channels=64, lower_in_channels=128, fusion_out_channels=128, out_indices=(0, 1, 2), conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), align_corners=False):
super(FastSCNN, self).__init__()
if (global_in_channels != higher_in_channels):
raise AssertionError('Global Input Channels must be the same with Higher Input Channels!')
elif (global_out_channels != lower_in_channels):
raise AssertionError('Global Output Channels must be the same with Lower Input Channels!')
self.in_channels = in_channels
self.downsample_dw_channels1 = downsample_dw_channels[0]
self.downsample_dw_channels2 = downsample_dw_channels[1]
self.global_in_channels = global_in_channels
self.global_block_channels = global_block_channels
self.global_block_strides = global_block_strides
self.global_out_channels = global_out_channels
self.higher_in_channels = higher_in_channels
self.lower_in_channels = lower_in_channels
self.fusion_out_channels = fusion_out_channels
self.out_indices = out_indices
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.align_corners = align_corners
self.learning_to_downsample = LearningToDownsample(in_channels, downsample_dw_channels, global_in_channels, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.global_feature_extractor = GlobalFeatureExtractor(global_in_channels, global_block_channels, global_out_channels, strides=self.global_block_strides, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, align_corners=self.align_corners)
self.feature_fusion = FeatureFusionModule(higher_in_channels, lower_in_channels, fusion_out_channels, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, align_corners=self.align_corners)
def init_weights(self, pretrained=None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
def forward(self, x):
higher_res_features = self.learning_to_downsample(x)
lower_res_features = self.global_feature_extractor(higher_res_features)
fusion_output = self.feature_fusion(higher_res_features, lower_res_features)
outs = [higher_res_features, lower_res_features, fusion_output]
outs = [outs[i] for i in self.out_indices]
return tuple(outs) |
class Layer(object):
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert (kwarg in allowed_kwargs), ('Invalid keyword argument: ' + kwarg)
name = kwargs.get('name')
if (not name):
layer = self.__class__.__name__.lower()
name = ((layer + '_') + str(get_layer_uid(layer)))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if (self.logging and (not self.sparse_inputs)):
tf.summary.histogram((self.name + '/inputs'), inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram((self.name + '/outputs'), outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(((self.name + '/vars/') + var), self.vars[var]) |
class FairseqCriterion(_Loss):
def __init__(self, task):
super().__init__()
self.task = task
if hasattr(task, 'target_dictionary'):
tgt_dict = task.target_dictionary
self.padding_idx = (tgt_dict.pad() if (tgt_dict is not None) else (- 100))
def add_args(cls, parser):
dc = getattr(cls, '__dataclass', None)
if (dc is not None):
gen_parser_from_dataclass(parser, dc())
def build_criterion(cls, cfg: FairseqDataclass, task):
init_args = {}
for p in inspect.signature(cls).parameters.values():
if ((p.kind == p.POSITIONAL_ONLY) or (p.kind == p.VAR_POSITIONAL) or (p.kind == p.VAR_KEYWORD)):
raise NotImplementedError('{} not supported'.format(p.kind))
assert (p.kind in {p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY})
if (p.name == 'task'):
init_args['task'] = task
elif (p.name == 'cfg'):
init_args['cfg'] = cfg
elif hasattr(cfg, p.name):
init_args[p.name] = getattr(cfg, p.name)
elif (p.default != p.empty):
pass
else:
raise NotImplementedError('Unable to infer Criterion arguments, please implement {}.build_criterion'.format(cls.__name__))
return cls(**init_args)
def forward(self, model, sample, reduce=True):
raise NotImplementedError
def aggregate_logging_outputs(logging_outputs: List[Dict[(str, Any)]]) -> Dict[(str, Any)]:
utils.deprecation_warning('The aggregate_logging_outputs API is deprecated. Please use the reduce_metrics API instead.')
raise NotImplementedError
def reduce_metrics(cls, logging_outputs: List[Dict[(str, Any)]]) -> None:
utils.deprecation_warning('Criterions should implement the reduce_metrics API. Falling back to deprecated aggregate_logging_outputs API.')
agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs)
for (k, v) in agg_logging_outputs.items():
if (k in {'nsentences', 'ntokens', 'sample_size'}):
continue
metrics.log_scalar(k, v)
def logging_outputs_can_be_summed() -> bool:
return False |
(field_fixture=FieldFixture)
class ChoiceFixture(Fixture):
def new_field(self, field_class=None):
field_class = (field_class or ChoiceField)
field = field_class(self.choices)
field.bind('choice_value', self.model_object)
return field
def new_model_object(self):
return EmptyStub()
def plain_choices(self):
self.all_choices = [Choice(1, IntegerField(label='One')), Choice('2', Field(label='Two'))]
self.choices = self.all_choices
self.groups = []
self.valid_inputs = ['1', '2']
self.invalid_input = 'not an valid option'
self.input_to_value_map = {'1': 1, '2': '2'}
self.expected_validation_constraint = AllowedValuesConstraint
def grouped_choices(self):
self.all_choices = [Choice(1, IntegerField(label='One')), Choice('2', Field(label='Two'))]
self.groups = [ChoiceGroup('', self.all_choices)]
self.choices = self.groups
self.valid_inputs = ['1', '2']
self.invalid_input = 'not an valid option'
self.input_to_value_map = {'1': 1, '2': '2'}
self.expected_validation_constraint = AllowedValuesConstraint
def multi_choices(self):
self.all_choices = [Choice(1, IntegerField(label='One')), Choice(2, IntegerField(label='Two')), Choice(3, IntegerField(label='Three'))]
self.groups = []
self.choices = self.all_choices
self.field = self.new_field(MultiChoiceField)
self.valid_inputs = [('1',), ['1', '2']]
self.invalid_input = ['not an valid option', '1']
self.input_to_value_map = {('1',): [1], ('1', '2'): [1, 2]}
self.expected_validation_constraint = MultiChoiceConstraint
def multi_choices_required(self):
self.all_choices = [Choice(1, IntegerField(label='One'))]
self.groups = []
self.choices = self.all_choices
field = self.new_field(MultiChoiceField)
self.field = field.with_validation_constraint(RequiredConstraint())
self.valid_inputs = [('1',), ['1']]
self.invalid_input = []
self.input_to_value_map = {('1',): [1]}
self.expected_validation_constraint = RequiredConstraint |
class Effect4088(BaseEffect):
runTime = 'early'
type = ('projected', 'passive')
def handler(fit, module, context, projectionRange, **kwargs):
fit.modules.filteredItemMultiply((lambda mod: (mod.item.requiresSkill('Remote Armor Repair Systems') or mod.item.requiresSkill('Capital Remote Armor Repair Systems'))), 'armorDamageAmount', module.getModifiedItemAttr('armorDamageAmountMultiplierRemote'), stackingPenalties=True, penaltyGroup='postMul', **kwargs) |
class TestRBUtils(unittest.TestCase):
def test_coherence_limit(self):
t1 = 100.0
t2 = 100.0
gate2Q = 0.5
gate1Q = 0.1
twoq_coherence_err = rb.rb_utils.coherence_limit(2, [t1, t1], [t2, t2], gate2Q)
oneq_coherence_err = rb.rb_utils.coherence_limit(1, [t1], [t2], gate1Q)
self.assertAlmostEqual(oneq_coherence_err, 0., 6, 'Error: 1Q Coherence Limit')
self.assertAlmostEqual(twoq_coherence_err, 0.00597, 5, 'Error: 2Q Coherence Limit')
def create_fake_circuits(num_gates):
circs = []
for num_gate in num_gates:
circ = qiskit.QuantumCircuit(2)
for _ in range(num_gate[0]):
circ.append(U1Gate(0), [0])
for _ in range(num_gate[1]):
circ.append(U2Gate(0, 0), [0])
for _ in range(num_gate[2]):
circ.append(U3Gate(0, 0, 0), [0])
for _ in range(num_gate[3]):
circ.cx(0, 1)
circs.append(circ)
return circs
def test_gates_per_clifford(self):
num_gates = [[6, 7, 5, 8], [10, 12, 8, 14]]
clifford_lengths = np.array([4, 8])
circs = self.create_fake_circuits(num_gates)
gpc = rb.rb_utils.gates_per_clifford(transpiled_circuits_list=[circs], clifford_lengths=clifford_lengths, basis=['u1', 'u2', 'u3', 'cx'], qubits=[0])
ncliffs = np.sum((clifford_lengths + 1))
self.assertAlmostEqual(gpc[0]['u1'], ((num_gates[0][0] + num_gates[1][0]) / ncliffs))
self.assertAlmostEqual(gpc[0]['u2'], ((num_gates[0][1] + num_gates[1][1]) / ncliffs))
self.assertAlmostEqual(gpc[0]['u3'], ((num_gates[0][2] + num_gates[1][2]) / ncliffs))
self.assertAlmostEqual(gpc[0]['cx'], ((num_gates[0][3] + num_gates[1][3]) / ncliffs))
def test_gates_per_clifford_with_invalid_basis(self):
num_gates = [[1, 1, 1, 1]]
clifford_lengths = np.array([1])
circs = self.create_fake_circuits(num_gates)
gpc = rb.rb_utils.gates_per_clifford(transpiled_circuits_list=[circs], clifford_lengths=clifford_lengths, basis=['u1', 'u2', 'u3', 'cx', 'fake_gate'], qubits=[0])
self.assertAlmostEqual(gpc[0]['fake_gate'], 0)
def test_calculate_1q_epg(self):
gpc = {0: {'cx': 0, 'u1': 0.1, 'u2': 0.3, 'u3': 0.5}}
epc = 0.00026
epg_u1 = 0
epg_u2 = (epc / (0.3 + (2 * 0.5)))
epg_u3 = (2 * epg_u2)
epgs = rb.calculate_1q_epg(gpc, epc, 0)
with self.assertRaises(QiskitError):
rb.calculate_1q_epg(gpc, epc, 1)
self.assertAlmostEqual(epgs['u1'], epg_u1)
self.assertAlmostEqual(epgs['u2'], epg_u2)
self.assertAlmostEqual(epgs['u3'], epg_u3)
def test_calculate_1q_epg_with_wrong_basis(self):
gpc = {0: {'cx': 0, 'rx': 0.3, 'ry': 0.3, 'rz': 0.3}}
epc = 0.00026
with self.assertRaises(QiskitError):
rb.calculate_1q_epg(gpc, epc, 0)
def test_calculate_1q_epg_with_cx(self):
gpc = {0: {'cx': 1.5, 'u1': 0.1, 'u2': 0.3, 'u3': 0.5}}
epc = 0.00026
with self.assertRaises(QiskitError):
rb.calculate_1q_epg(gpc, epc, 0)
def test_calculate_2q_epg(self):
gpc = {0: {'cx': 1.5, 'u1': 0.1, 'u2': 0.3, 'u3': 0.5}, 1: {'cx': 1.5, 'u1': 0.1, 'u2': 0.3, 'u3': 0.5}}
epgs_q0 = {'u1': 0, 'u2': 0.0001, 'u3': 0.0002}
epgs_q1 = {'u1': 0, 'u2': 0.0001, 'u3': 0.0002}
epc = 0.01
alpha_1q = (((1 - (2 * 0.0001)) ** 0.3) * ((1 - (2 * 0.0002)) ** 0.5))
alpha_c_1q = ((1 / 5) * ((2 * alpha_1q) + (3 * (alpha_1q ** 2))))
alpha_c_2q = ((1 - ((4 / 3) * epc)) / alpha_c_1q)
epg_with_1q_epgs = (((3 / 4) * (1 - alpha_c_2q)) / 1.5)
epg_without_1q_epgs = (epc / 1.5)
with self.assertRaises(QiskitError):
rb.calculate_2q_epg(gpc, epc, [0, 1, 2], [epgs_q0, epgs_q1])
with self.assertRaises(QiskitError):
rb.calculate_2q_epg(gpc, epc, [0, 2], [epgs_q0, epgs_q1])
self.assertAlmostEqual(rb.calculate_2q_epg(gpc, epc, [0, 1]), epg_without_1q_epgs)
self.assertAlmostEqual(rb.calculate_2q_epg(gpc, epc, [0, 1], [epgs_q0, epgs_q1]), epg_with_1q_epgs)
def test_calculate_2q_epg_with_another_gate_name(self):
gpc = {0: {'cz': 1.5, 'u1': 0.1, 'u2': 0.3, 'u3': 0.5}, 1: {'cz': 1.5, 'u1': 0.1, 'u2': 0.3, 'u3': 0.5}}
epc = 0.01
with self.assertRaises(QiskitError):
rb.calculate_2q_epg(gpc, epc, [0, 1])
self.assertAlmostEqual(rb.calculate_2q_epg(gpc, epc, [0, 1], two_qubit_name='cz'), (epc / 1.5))
def test_twoQ_clifford(self):
error_1q = 0.001
error_2q = 0.01
gpc = {0: {'cx': 1.5, 'u1': 0.1, 'u2': 0.3, 'u3': 0.5}, 1: {'cx': 1.5, 'u1': 0.1, 'u2': 0.3, 'u3': 0.5}}
gate_qubit = [0, 0, 0, 1, 1, 1, (- 1)]
gate_err = [0, error_1q, (2 * error_1q), 0, error_1q, (2 * error_1q), error_2q]
alpha_2q = ((1 - ((4 / 3) * error_2q)) ** 1.5)
alpha_1q = (((1 - (2 * error_1q)) ** 0.3) * ((1 - (4 * error_1q)) ** 0.5))
alpha_c_2q = (((1 / 5) * ((2 * alpha_1q) + ((3 * alpha_1q) * alpha_1q))) * alpha_2q)
with self.assertWarns(DeprecationWarning):
epc = rb.twoQ_clifford_error(gpc, gate_qubit, gate_err)
self.assertAlmostEqual(epc, ((3 / 4) * (1 - alpha_c_2q)))
def test_calculate_1q_epc(self):
gpc = {0: {'cx': 1.5, 'u1': 0.1, 'u2': 0.3, 'u3': 0.5}}
epgs = {'u1': 0, 'u2': 0.001, 'u3': 0.002}
epc_ref = (1 - (((1 - 0.001) ** 0.3) * ((1 - 0.002) ** 0.5)))
epc = rb.calculate_1q_epc(gpc, epgs, 0)
self.assertAlmostEqual(epc, epc_ref)
def test_calculate_2q_epc(self):
gpc = {0: {'cx': 1.5, 'u1': 0.1, 'u2': 0.3, 'u3': 0.5}, 1: {'cx': 1.5, 'u1': 0.1, 'u2': 0.3, 'u3': 0.5}}
epgs_q0 = {'u1': 0, 'u2': 0.0001, 'u3': 0.0002}
epgs_q1 = {'u1': 0, 'u2': 0.0001, 'u3': 0.0002}
epg_q01 = 0.001
alpha_1q = (((1 - (2 * 0.0001)) ** 0.3) * ((1 - (2 * 0.0002)) ** 0.5))
alpha_2q = ((1 - ((4 / 3) * 0.001)) ** 1.5)
alpha_c = (((1 / 5) * ((2 * alpha_1q) + (3 * (alpha_1q ** 2)))) * alpha_2q)
self.assertAlmostEqual(rb.calculate_2q_epc(gpc, epg_q01, [0, 1], [epgs_q0, epgs_q1]), ((3 / 4) * (1 - alpha_c)))
def test_calculate_2q_epc_with_another_gate_name(self):
gpc = {0: {'cz': 1.5, 'u1': 0.1, 'u2': 0.3, 'u3': 0.5}, 1: {'cz': 1.5, 'u1': 0.1, 'u2': 0.3, 'u3': 0.5}}
epgs_q0 = {'u1': 0, 'u2': 0.0001, 'u3': 0.0002}
epgs_q1 = {'u1': 0, 'u2': 0.0001, 'u3': 0.0002}
epg_q01 = 0.001
alpha_1q = (((1 - (2 * 0.0001)) ** 0.3) * ((1 - (2 * 0.0002)) ** 0.5))
alpha_2q = ((1 - ((4 / 3) * 0.001)) ** 1.5)
alpha_c = (((1 / 5) * ((2 * alpha_1q) + (3 * (alpha_1q ** 2)))) * alpha_2q)
with self.assertRaises(QiskitError):
rb.calculate_2q_epc(gpc, epg_q01, [0, 1], [epgs_q0, epgs_q1])
self.assertAlmostEqual(rb.calculate_2q_epc(gpc, epg_q01, [0, 1], [epgs_q0, epgs_q1], two_qubit_name='cz'), ((3 / 4) * (1 - alpha_c))) |
def upsample(data, weight):
n_data = len(data)
assert (weight >= 1)
integral = (list(range(n_data)) * int(math.floor(weight)))
residual = list(range(n_data))
shuffle(residual)
residual = residual[:int((n_data * (weight - int(math.floor(weight)))))]
return [deepcopy(data[idx]) for idx in (integral + residual)] |
class SpecialTagDirective():
def __init__(self, value):
self.value = value
def __bool__(self):
return bool(self.value)
def __str__(self):
return str(self.value)
def __repr__(self):
return f'{self.__class__.__name__}({self.value!r})'
def __eq__(self, other):
return (repr(self) == repr(other))
def to_yaml(cls, representer, node):
return representer.represent_scalar(cls.yaml_tag, node.value)
def from_yaml(cls, constructor, node):
return cls(node.value)
def get_value(self, context=None):
raise NotImplementedError('Implement this to provide the processed value of your custom tag during formatting operations.') |
def _format_protfuncs():
out = []
sorted_funcs = [(key, func) for (key, func) in sorted(protlib.PROT_FUNCS.items(), key=(lambda tup: tup[0]))]
for (protfunc_name, protfunc) in sorted_funcs:
out.append('- |c${name}|n - |W{docs}'.format(name=protfunc_name, docs=utils.justify(protfunc.__doc__.strip(), align='l', indent=10).strip()))
return '\n '.join(out) |
def preprocess_blizzard(args):
in_dir = os.path.join(args.base_dir, 'Blizzard2012')
out_dir = os.path.join(args.base_dir, args.output)
os.makedirs(out_dir, exist_ok=True)
metadata = blizzard.build_from_path(in_dir, out_dir, args.num_workers, tqdm=tqdm)
write_metadata(metadata, out_dir) |
def test_solver_should_use_the_python_constraint_from_the_environment_if_available(solver: Solver, repo: Repository, package: ProjectPackage) -> None:
set_package_python_versions(solver.provider, '~2.7 || ^3.5')
package.add_dependency(Factory.create_dependency('A', '^1.0'))
a = get_package('A', '1.0.0')
a.add_dependency(Factory.create_dependency('B', {'version': '^1.0.0', 'markers': 'python_version < "3.2"'}))
b = get_package('B', '1.0.0')
b.python_versions = '>=2.6, <3'
repo.add_package(a)
repo.add_package(b)
with solver.use_environment(MockEnv((2, 7, 18))):
transaction = solver.solve()
check_solver_result(transaction, [{'job': 'install', 'package': b}, {'job': 'install', 'package': a}]) |
_uncanonicalize
_rewriter([neg])
def local_max_to_min(fgraph, node):
if ((node.op == neg) and node.inputs[0].owner):
max = node.inputs[0]
if (max.owner and isinstance(max.owner.op, CAReduce) and (max.owner.op.scalar_op == ps.scalar_maximum)):
neg_node = max.owner.inputs[0]
if (neg_node.owner and (neg_node.owner.op == neg)):
new = Min(max.owner.op.axis)(neg_node.owner.inputs[0])
return [copy_stack_trace(node.outputs[0], new)]
return False |
class ResponseHHDUC(DataElementGroup):
atc = DataElementField(type='an', max_length=5, _d='ATC')
ac = DataElementField(type='bin', max_length=256, _d='Application Cryptogram AC')
ef_id_data = DataElementField(type='bin', max_length=256, _d='EF_ID Data')
cvr = DataElementField(type='bin', max_length=256, _d='CVR')
version_info_chiptan = DataElementField(type='bin', max_length=256, _d='Versionsinfo der chipTAN-Applikation') |
def update_project_environment(project, name, config):
project_file = (project.root / 'pyproject.toml')
raw_config = load_toml_file(str(project_file))
env_config = raw_config.setdefault('tool', {}).setdefault('hatch', {}).setdefault('envs', {}).setdefault(name, {})
env_config.update(config)
project.config.envs[name] = project.config.envs.get(name, project.config.envs['default']).copy()
project.config.envs[name].update(env_config)
with open(str(project_file), 'w', encoding='utf-8') as f:
f.write(tomli_w.dumps(raw_config)) |
class Solution(object):
def levelOrderBottom(self, root):
if (root is None):
return []
stack = [[root]]
res = []
while (len(stack) > 0):
top = stack.pop()
res.insert(0, [t.val for t in top])
temp = []
for node in top:
if (node.left is not None):
temp.append(node.left)
if (node.right is not None):
temp.append(node.right)
if (len(temp) > 0):
stack.append(temp)
return res |
def test_frequency():
with expected_protocol(TeledyneT3AFG, [('C1:BSWV FRQ,1000', None), ('SYST:ERR?', '-0, No errors'), ('C1:BSWV?', 'C1:BSWV WVTP,SINE,FRQ,0.3HZ,PERI,3.33333S,AMP,0.08V,AMPVRMS,0.02828Vrms,MAX_OUTPUT_AMP,4.6V,OFST,-2V,HLEV,-1.96V,LLEV,-2.04V,PHSE,0')]) as inst:
inst.ch_1.frequency = 1000
assert (inst.ch_1.frequency == 0.3) |
class UniCodeHandler(BaseHandler):
async def get(self):
Rtv = {}
try:
content = self.get_argument('content', '')
html_unescape = self.get_argument('html_unescape', 'false')
tmp = bytes(content, 'unicode_escape').decode('utf-8').replace('\\u', '\\\\u').replace('\\\\\\u', '\\\\u')
tmp = bytes(tmp, 'utf-8').decode('unicode_escape')
tmp = tmp.encode('utf-8').replace(b'\xc2\xa0', b'\xa0').decode('unicode_escape')
if strtobool(html_unescape):
tmp = html.unescape(tmp)
Rtv[u''] = tmp
Rtv[u''] = '200'
except Exception as e:
Rtv[u''] = str(e)
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.write(json.dumps(Rtv, ensure_ascii=False, indent=4))
return
async def post(self):
Rtv = {}
try:
content = self.get_argument('content', '')
html_unescape = self.get_argument('html_unescape', 'false')
tmp = bytes(content, 'unicode_escape').decode('utf-8').replace('\\u', '\\\\u').replace('\\\\\\u', '\\\\u')
tmp = bytes(tmp, 'utf-8').decode('unicode_escape')
tmp = tmp.encode('utf-8').replace(b'\xc2\xa0', b'\xa0').decode('unicode_escape')
if strtobool(html_unescape):
tmp = html.unescape(tmp)
Rtv[u''] = tmp
Rtv[u''] = '200'
except Exception as e:
Rtv[u''] = str(e)
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.write(json.dumps(Rtv, ensure_ascii=False, indent=4))
return |
.parametrize('extra_headers', [[], [(b'upgrade', b'h2')]])
def test_handshake_response_broken_upgrade_header(extra_headers: Headers) -> None:
with pytest.raises(RemoteProtocolError) as excinfo:
_make_handshake(101, ([(b'connection', b'Upgrade')] + extra_headers))
assert (str(excinfo.value) == "Missing header, 'Upgrade: websocket'") |
class ExchangeDataProvider(BaseDataProvider):
def __init__(self, token: str, tickers: Union[(str, List[str])], stockmarket: StockMarket=StockMarket.LONDON, start: datetime.datetime=datetime.datetime(2016, 1, 1), end: datetime.datetime=datetime.datetime(2016, 1, 30)) -> None:
super().__init__()
if (not _HAS_QUANDL):
raise MissingOptionalLibraryError(libname='Quandl', name='ExchangeDataProvider', pip_install='pip install quandl')
self._tickers = []
if isinstance(tickers, list):
self._tickers = tickers
else:
self._tickers = tickers.replace('\n', ';').split(';')
self._n = len(self._tickers)
if (stockmarket not in [StockMarket.LONDON, StockMarket.EURONEXT, StockMarket.SINGAPORE]):
msg = 'ExchangeDataProvider does not support '
msg += stockmarket.value
msg += ' as a stock market.'
raise QiskitFinanceError(msg)
self._stockmarket = str(stockmarket.value)
self._token = token
self._tickers = tickers
self._start = start.strftime('%Y-%m-%d')
self._end = end.strftime('%Y-%m-%d')
def run(self) -> None:
quandl.ApiConfig.api_key = self._token
quandl.ApiConfig.api_version = '2015-04-09'
self._data = []
stocks_notfound = []
stocks_forbidden = []
for (_, ticker_name) in enumerate(self._tickers):
stock_data = None
name = ((self._stockmarket + '/') + ticker_name)
try:
stock_data = quandl.get(name, start_date=self._start, end_date=self._end)
except quandl.AuthenticationError as ex:
raise QiskitFinanceError('Quandl invalid token.') from ex
except quandl.NotFoundError:
stocks_notfound.append(name)
continue
except quandl.ForbiddenError:
stocks_forbidden.append(name)
continue
except quandl.QuandlError as ex:
raise QiskitFinanceError("Quandl Error for '{}'.".format(name)) from ex
try:
self._data.append(stock_data['Close'])
except KeyError as ex:
raise QiskitFinanceError("Cannot parse Quandl '{}'output.".format(name)) from ex
if (stocks_notfound or stocks_forbidden):
err_msg = ('Stocks not found: {}. '.format(stocks_notfound) if stocks_notfound else '')
if stocks_forbidden:
err_msg += 'You do not have permission to view this data. Please subscribe to this database: {}'.format(stocks_forbidden)
raise QiskitFinanceError(err_msg) |
class EphemeralBuilderManager(BuildStateInterface):
PHASES_NOT_ALLOWED_TO_CANCEL_FROM = (BUILD_PHASE.PUSHING, BUILD_PHASE.COMPLETE, BUILD_PHASE.ERROR, BUILD_PHASE.INTERNAL_ERROR, BUILD_PHASE.CANCELLED)
ARCHIVABLE_BUILD_PHASES = (BUILD_PHASE.COMPLETE, BUILD_PHASE.ERROR, BUILD_PHASE.CANCELLED)
COMPLETED_PHASES = (ARCHIVABLE_BUILD_PHASES + (BUILD_PHASE.INTERNAL_ERROR,))
EXECUTORS = {'popen': PopenExecutor, 'ec2': EC2Executor, 'kubernetes': KubernetesExecutor, 'kubernetesPodman': KubernetesPodmanExecutor}
def __init__(self, registry_hostname, manager_hostname, queue, build_logs, user_files, instance_keys):
self._registry_hostname = registry_hostname
self._manager_hostname = manager_hostname
self._queue = queue
self._build_logs = build_logs
self._user_files = user_files
self._instance_keys = instance_keys
self._ordered_executors = []
self._executor_name_to_executor = {}
self._manager_config = {}
self._orchestrator = None
def initialize(self, manager_config):
self._manager_config = manager_config
if manager_config.get('EXECUTORS'):
for executor_config in manager_config['EXECUTORS']:
self._load_executor(executor_config.get('EXECUTOR'), executor_config)
else:
self._load_executor(manager_config.get('EXECUTOR'), manager_config.get('EXECUTOR_CONFIG'))
logger.debug('calling orchestrator_from_config')
self._orchestrator = orchestrator_from_config(manager_config)
logger.debug('setting on_key_change callbacks for job expiry, cancel')
self._orchestrator.on_key_change(self._job_prefix, self._job_expired_callback)
self._orchestrator.on_key_change(self._cancel_prefix, self._job_cancelled_callback)
def _load_executor(self, executor_kind_name, executor_config):
executor_klass = EphemeralBuilderManager.EXECUTORS.get(executor_kind_name)
if (executor_klass is None):
logger.error('Unknown executor %s; skipping install', executor_kind_name)
return
executor = executor_klass(executor_config, self._registry_hostname, self._manager_hostname)
if (executor.name in self._executor_name_to_executor):
raise Exception(('Executor with name %s already registered' % executor.name))
self._ordered_executors.append(executor)
self._executor_name_to_executor[executor.name] = executor
def generate_build_token(self, token_type, build_id, job_id, expiration):
return build_token(self._manager_hostname, token_type, build_id, job_id, expiration, self._instance_keys)
def verify_build_token(self, token, token_type):
return verify_build_token(token, self._manager_hostname, token_type, self._instance_keys)
def _config_prefix(self, key):
if (self._manager_config.get('ORCHESTRATOR') is None):
return key
prefix = self._manager_config.get('ORCHESTRATOR_PREFIX', '')
return (slash_join(prefix, key).lstrip('/') + '/')
def _job_prefix(self):
return self._config_prefix(JOB_PREFIX)
def _cancel_prefix(self):
return self._config_prefix(CANCEL_PREFIX)
def _metric_prefix(self):
return self._config_prefix(METRIC_PREFIX)
def _lock_prefix(self):
return self._config_prefix(LOCK_PREFIX)
def machine_max_expiration(self):
return self._manager_config.get('MACHINE_MAX_TIME', 7200)
def _lock_key(self, build_id):
return slash_join(self._lock_prefix, build_id)
def _metric_key(self, build_id):
return slash_join(self._metric_prefix, build_id)
def _job_key(self, build_id):
return slash_join(self._job_prefix, build_id)
def _build_job_from_job_id(self, job_id):
try:
job_data = self._orchestrator.get_key(job_id)
except KeyError:
raise BuildJobDoesNotExistsError(job_id)
except (OrchestratorConnectionError, OrchestratorError) as oe:
raise BuildJobError(oe)
job_metadata = json.loads(job_data)
build_job = BuildJob(AttrDict(job_metadata['job_queue_item']))
return build_job
def create_job(self, build_id, build_metadata):
max_expiration = (datetime.utcnow() + timedelta(seconds=self.machine_max_expiration))
build_metadata['max_expiration'] = calendar.timegm(max_expiration.timetuple())
build_metadata['last_heartbeat'] = None
build_metadata['created_at'] = time.time()
job_key = self._job_key(build_id)
try:
self._orchestrator.set_key(job_key, json.dumps(build_metadata), overwrite=False, expiration=self._manager_config.get('JOB_REGISTRATION_TIMEOUT', JOB_REGISTRATION_TIMEOUT))
except KeyError:
raise BuildJobAlreadyExistsError(job_key)
except (OrchestratorConnectionError, OrchestratorError) as je:
raise BuildJobError(je)
return job_key
def job_scheduled(self, job_id, control_plane, execution_id, max_startup_time):
try:
job_data = self._orchestrator.get_key(job_id)
job_data_json = json.loads(job_data)
except KeyError:
logger.warning('Failed to mark job %s as scheduled. Job no longer exists in the orchestrator', job_id)
return False
except Exception as e:
logger.warning('Exception loading job %s from orchestrator: %s', job_id, e)
return False
job_data_json['executor_name'] = control_plane
job_data_json['execution_id'] = execution_id
try:
self._orchestrator.set_key(job_id, json.dumps(job_data_json), overwrite=True, expiration=max_startup_time)
except Exception as e:
logger.warning('Exception updating job %s in orchestrator: %s', job_id, e)
return False
build_job = BuildJob(AttrDict(job_data_json['job_queue_item']))
updated = self.update_job_phase(job_id, BUILD_PHASE.BUILD_SCHEDULED)
if updated:
self._queue.extend_processing(build_job.job_item, seconds_from_now=(max_startup_time + 60), minimum_extension=MINIMUM_JOB_EXTENSION)
logger.debug('Job scheduled for job %s with execution with ID %s on control plane %s with max startup time of %s', job_id, execution_id, control_plane, max_startup_time)
else:
logger.warning('Job %s not scheduled. Unable update build phase to SCHEDULED', job_id)
return updated
def job_unschedulable(self, job_id):
try:
build_job = self._build_job_from_job_id(job_id)
self._cleanup_job_from_orchestrator(build_job)
except Exception as e:
logger.warning('Exception trying to mark job %s as unschedulable. Some state may not have been cleaned/updated: %s', job_id, e)
def on_job_complete(self, build_job, job_result, executor_name, execution_id):
job_id = self._job_key(build_job.build_uuid)
logger.debug('Calling job complete callback for job %s with result %s', job_id, job_result)
self._write_duration_metric(build_duration, build_job.build_uuid, job_status=job_result)
if (job_result == BuildJobResult.EXPIRED):
self._queue.incomplete(build_job.job_item, restore_retry=False, retry_after=30)
if (not build_job.has_retries_remaining()):
build_job.send_notification('build_failure')
logger.warning('Job %s completed with result %s. Requeuing build without restoring retry.', job_id, job_result)
elif (job_result == BuildJobResult.INCOMPLETE):
logger.warning('Job %s completed with result %s. Requeuing build with retry restored.', job_id, job_result)
self._queue.incomplete(build_job.job_item, restore_retry=True, retry_after=30)
elif (job_result in (BuildJobResult.ERROR, BuildJobResult.COMPLETE, BuildJobResult.CANCELLED)):
if ((job_result == BuildJobResult.ERROR) and (not build_job.has_retries_remaining())):
build_jobs.labels('false').inc()
build_job.send_notification('build_failure')
if (job_result == BuildJobResult.COMPLETE):
build_job.send_notification('build_success')
build_jobs.labels('true').inc()
logger.warning('Job %s completed with result %s. Marking build done in queue.', job_id, job_result)
self._queue.complete(build_job.job_item)
if (build_job.repo_build.trigger is not None):
model.build.update_trigger_disable_status(build_job.repo_build.trigger, RESULT_PHASES[job_result])
if (executor_name and execution_id):
self._terminate_executor(executor_name, execution_id)
self._cleanup_job_from_orchestrator(build_job)
logger.debug('Job completed for job %s with result %s', job_id, job_result)
def start_job(self, job_id):
try:
job_data = self._orchestrator.get_key(job_id)
job_data_json = json.loads(job_data)
build_job = BuildJob(AttrDict(job_data_json['job_queue_item']))
except KeyError:
logger.warning('Failed to start job %s. Job does not exists in orchestrator', job_id)
return (None, None)
except Exception as e:
logger.error('Exception loading job %s from orchestrator: %s', job_id, e)
return (None, None)
repo = build_job.repo_build.repository
repository_name = ((repo.namespace_user.username + '/') + repo.name)
(context, dockerfile_path) = build_job.extract_dockerfile_args()
base_image_information = {}
if build_job.pull_credentials:
base_image_information['username'] = build_job.pull_credentials.get('username', '')
base_image_information['password'] = build_job.pull_credentials.get('password', '')
build_args = {'build_package': build_job.get_build_package_url(self._user_files), 'context': context, 'dockerfile_path': dockerfile_path, 'repository': repository_name, 'registry': self._registry_hostname, 'pull_token': build_job.repo_build.access_token.get_code(), 'push_token': build_job.repo_build.access_token.get_code(), 'tag_names': build_job.build_config.get('docker_tags', ['latest']), 'base_image': base_image_information}
private_key = None
if ((build_job.repo_build.trigger is not None) and (build_job.repo_build.trigger.secure_private_key is not None)):
private_key = build_job.repo_build.trigger.secure_private_key.decrypt()
if (private_key is not None):
build_args['git'] = {'url': build_job.build_config['trigger_metadata'].get('git_url', ''), 'sha': build_job.commit_sha(), 'private_key': (private_key or '')}
if ((not build_args['build_package']) and (not build_args['git'])):
logger.error('Failed to start job %s: insufficient build args - No package url or git', job_id)
self.update_job_phase(job_id, BUILD_PHASE.INTERNAL_ERROR)
return (None, None)
token = self.generate_build_token(BUILD_JOB_TOKEN_TYPE, build_job.build_uuid, job_id, self.machine_max_expiration)
self._write_duration_metric(build_ack_duration, build_job.build_uuid)
logger.debug('Started build job %s with arguments %s', job_id, build_args)
return (token, build_args)
def update_job_phase(self, job_id, phase, phase_metadata=None):
try:
job_data = self._orchestrator.get_key(job_id)
job_data_json = json.loads(job_data)
build_job = BuildJob(AttrDict(job_data_json['job_queue_item']))
except KeyError:
logger.warning('Job %s no longer exists in the orchestrator, likely expired', job_id)
return False
except Exception as e:
logger.error('Exception loading job %s from orchestrator: %s', job_id, e)
return False
if (build_job.repo_build.phase in EphemeralBuilderManager.ARCHIVABLE_BUILD_PHASES):
logger.warning('Job %s is already in a final completed phase (%s), cannot update to %s', job_id, build_job.repo_build.phase, phase)
return False
if (build_job.repo_build.phase == phase):
logger.warning('Job %s is already in the desired state/phase (%s)', job_id, phase)
return True
phase_metadata = (phase_metadata or {})
updated = model.build.update_phase_then_close(build_job.build_uuid, phase)
if updated:
self.append_log_message(build_job.build_uuid, phase, self._build_logs.PHASE, phase_metadata)
if (updated and (phase in EphemeralBuilderManager.COMPLETED_PHASES)):
executor_name = job_data_json.get('executor_name')
execution_id = job_data_json.get('execution_id')
if (phase == BUILD_PHASE.ERROR):
self.on_job_complete(build_job, BuildJobResult.ERROR, executor_name, execution_id)
elif (phase == BUILD_PHASE.COMPLETE):
self.on_job_complete(build_job, BuildJobResult.COMPLETE, executor_name, execution_id)
elif (phase == BUILD_PHASE.INTERNAL_ERROR):
self.on_job_complete(build_job, BuildJobResult.INCOMPLETE, executor_name, execution_id)
elif (phase == BUILD_PHASE.CANCELLED):
self.on_job_complete(build_job, BuildJobResult.CANCELLED, executor_name, execution_id)
return updated
def job_heartbeat(self, job_id):
try:
job_data = self._orchestrator.get_key(job_id)
job_data_json = json.loads(job_data)
build_job = BuildJob(AttrDict(job_data_json['job_queue_item']))
except KeyError:
logger.warning('Job %s no longer exists in the orchestrator, likely expired', job_id)
return False
except Exception as e:
logger.error('Exception loading job %s from orchestrator: %s', job_id, e)
return False
max_expiration = datetime.utcfromtimestamp(job_data_json['max_expiration'])
max_expiration_remaining = (max_expiration - datetime.utcnow())
max_expiration_sec = max(1, int(max_expiration_remaining.total_seconds()))
ttl = min((HEARTBEAT_PERIOD_SECONDS * 2), max_expiration_sec)
if (job_data_json['last_heartbeat'] and (dateutil.parser.isoparse(job_data_json['last_heartbeat']) < (datetime.utcnow() - HEARTBEAT_DELTA))):
logger.warning('Heartbeat expired for job %s. Marking job as expired. Last heartbeat received at %s', job_data_json['last_heartbeat'])
self.update_job_phase(job_id, BUILD_PHASE.INTERNAL_ERROR)
return False
job_data_json['last_heartbeat'] = str(datetime.utcnow())
self._queue.extend_processing(build_job.job_item, seconds_from_now=JOB_TIMEOUT_SECONDS, minimum_extension=MINIMUM_JOB_EXTENSION)
try:
self._orchestrator.set_key(job_id, json.dumps(job_data_json), overwrite=True, expiration=ttl)
except OrchestratorConnectionError:
logger.error('Could not update heartbeat for job %s. Orchestrator is not available', job_id)
return False
return True
def cancel_build(self, build_id):
build = model.build.get_repository_build(build_id)
if (build.phase in EphemeralBuilderManager.PHASES_NOT_ALLOWED_TO_CANCEL_FROM):
return False
cancelled = model.build.update_phase_then_close(build_id, BUILD_PHASE.CANCELLED)
if cancelled:
try:
job_data = self._orchestrator.get_key(self._job_key(build_id))
job_data_json = json.loads(job_data)
build_job = BuildJob(AttrDict(job_data_json['job_queue_item']))
self.on_job_complete(build_job, BuildJobResult.CANCELLED, job_data_json.get('executor_name'), job_data_json.get('execution_id'))
except KeyError:
logger.warning('Could not cleanup cancelled job %s. Job does not exist in orchestrator', job_id)
return cancelled
def determine_cached_tag(self, build_id, base_image_id):
job_id = self._job_key(build_id)
try:
job_data = self._orchestrator.get_key(job_id)
job_data_json = json.loads(job_data)
build_job = BuildJob(AttrDict(job_data_json['job_queue_item']))
except KeyError:
logger.warning('Job %s does not exist in orchestrator', job_id)
return None
except Exception as e:
logger.warning('Exception loading job from orchestrator: %s', e)
return None
return build_job.determine_cached_tag(base_image_id)
def schedule(self, build_id):
logger.debug('Scheduling build %s', build_id)
allowed_worker_count = self._manager_config.get('ALLOWED_WORKER_COUNT', 1)
try:
if (self._running_workers() >= allowed_worker_count):
logger.warning('Could not schedule build %s. Number of workers at capacity: %s.', build_id, self._running_workers())
return (False, TOO_MANY_WORKERS_SLEEP_DURATION)
except Exception as exe:
logger.warning('Failed to get worker count from executors: %s', exe)
return (False, EPHEMERAL_API_TIMEOUT)
job_id = self._job_key(build_id)
try:
build_job = self._build_job_from_job_id(job_id)
except BuildJobDoesNotExistsError as bjne:
logger.warning('Failed to schedule job %s - Job no longer exists in the orchestrator, likely expired: %s', job_id, bjne)
return (False, CREATED_JOB_TIMEOUT_SLEEP_DURATION)
except BuildJobError as bje:
logger.warning('Failed to schedule job %s - Could not get job from orchestrator: %s', job_id, bje)
return (False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
registration_timeout = self._manager_config.get('JOB_REGISTRATION_TIMEOUT', JOB_REGISTRATION_TIMEOUT)
registration_token = self.generate_build_token(BUILD_JOB_REGISTRATION_TYPE, build_job.build_uuid, job_id, (registration_timeout + SETUP_LEEWAY_SECONDS))
started_with_executor = None
execution_id = None
for executor in self._ordered_executors:
namespace = build_job.namespace
if (not executor.allowed_for_namespace(namespace)):
logger.warning('Job %s (namespace: %s) cannot use executor %s', job_id, namespace, executor.name)
continue
if (executor.minimum_retry_threshold > build_job.retries_remaining):
build_fallback.labels(executor.name).inc()
logger.warning('Job %s cannot use executor %s as it is below retry threshold %s (retry #%s) - Falling back to next configured executor', job_id, executor.name, executor.minimum_retry_threshold, build_job.retries_remaining)
continue
logger.debug('Starting builder for job %s with selected executor: %s', job_id, executor.name)
try:
execution_id = executor.start_builder(registration_token, build_job.build_uuid)
except:
logger.exception('Exception when starting builder for job: %s - Falling back to next configured executor', job_id)
continue
started_with_executor = executor
break
if (started_with_executor is None):
logger.error('Could not start ephemeral worker for build %s', build_job.build_uuid)
self._orchestrator.delete_key(job_id)
return (False, EPHEMERAL_API_TIMEOUT)
metric_spec = json.dumps({'executor_name': started_with_executor.name, 'start_time': time.time()})
setup_time = (started_with_executor.setup_time or EPHEMERAL_SETUP_TIMEOUT)
if (not self.job_scheduled(job_id, started_with_executor.name, execution_id, setup_time)):
return (False, EPHEMERAL_API_TIMEOUT)
self._write_metric_spec(build_job.build_uuid, metric_spec)
return (True, None)
def _job_expired_callback(self, key_change):
if (key_change.event == KeyEvent.EXPIRE):
job_metadata = json.loads(key_change.value)
build_job = BuildJob(AttrDict(job_metadata['job_queue_item']))
logger.info('Build job key expire event: %s', build_job.build_uuid)
executor_name = job_metadata.get('executor_name')
execution_id = job_metadata.get('execution_id')
job_result = BuildJobResult.EXPIRED
model.build.update_phase_then_close(build_job.build_uuid, RESULT_PHASES[job_result])
self.on_job_complete(build_job, job_result, executor_name, execution_id)
def _job_cancelled_callback(self, key_change):
if (key_change.event not in (KeyEvent.CREATE, KeyEvent.SET)):
return
job_metadata = json.loads(key_change.value)
build_job = BuildJob(AttrDict(job_metadata['job_queue_item']))
executor_name = job_metadata.get('executor_name')
execution_id = job_metadata.get('execution_id')
job_result = BuildJobResult.CANCELLED
self.on_job_complete(build_job, job_result, executor_name, execution_id)
def _cleanup_job_from_orchestrator(self, build_job):
lock_key = self._lock_key(build_job.build_uuid)
lock_acquired = self._orchestrator.lock(lock_key)
if lock_acquired:
try:
self._orchestrator.delete_key(self._job_key(build_job.build_uuid))
self._orchestrator.delete_key(self._metric_key(build_job.build_uuid))
except KeyError:
pass
finally:
self._orchestrator.delete_key(lock_key)
def append_build_log(self, build_id, log_message):
try:
log_data = json.loads(log_message)
except ValueError:
return False
fully_unwrapped = ''
keys_to_extract = ['error', 'status', 'stream']
for key in keys_to_extract:
if (key in log_data):
fully_unwrapped = log_data[key]
break
current_log_string = str(fully_unwrapped)
current_step = _extract_current_step(current_log_string)
if current_step:
self.append_log_message(build_id, current_log_string, log_type=self._build_logs.COMMAND)
else:
self.append_log_message(build_id, current_log_string)
return True
def append_log_message(self, build_id, log_message, log_type=None, log_data=None):
log_data = (log_data or {})
log_data['datetime'] = str(datetime.now())
try:
self._build_logs.append_log_message(build_id, log_message, log_type, log_data)
except Exception as e:
logger.exception('Could not append log to buildlogs for build %s - %s', e, build_id)
def _running_workers(self):
return sum([x.running_builders_count for x in self._ordered_executors])
def _terminate_executor(self, executor_name, execution_id):
executor = self._executor_name_to_executor.get(executor_name)
if (executor is None):
logger.error('Could not find registered executor %s to terminate %s', executor_name, execution_id)
return
logger.debug('Terminating executor %s with execution id %s', executor_name, execution_id)
executor.stop_builder(execution_id)
def _write_metric_spec(self, build_id, payload):
metric_key = self._metric_key(build_id)
try:
self._orchestrator.set_key(metric_key, payload, overwrite=False, expiration=(self.machine_max_expiration + 60))
except KeyError:
logger.warning('Metric already exists in orchestrator for build %s. Build was likely started before and requeued.', build_id)
except (OrchestratorConnectionError, OrchestratorError) as oe:
logger.error('Error when writing metric for build %s to orchestrator: %s', build_id, oe)
def _write_duration_metric(self, metric, build_id, job_status=None):
try:
metric_data = self._orchestrator.get_key(self._metric_key(build_id))
parsed_metric_data = json.loads(metric_data)
start_time = parsed_metric_data['start_time']
executor = parsed_metric_data.get('executor_name', 'unknown')
if (job_status is not None):
metric.labels(executor, str(job_status)).observe((time.time() - start_time))
else:
metric.labels(executor).observe((time.time() - start_time))
except Exception:
logger.warning('Could not write metric for build %s', build_id)
def _work_checker(self):
logger.debug('Initializing work checker')
while True:
logger.debug('Writing queue metrics')
self._queue.update_metrics()
with database.CloseForLongOperation(app.config):
time.sleep(WORK_CHECK_TIMEOUT)
logger.debug('Checking for more work from the build queue')
processing_time = (EPHEMERAL_SETUP_TIMEOUT + SETUP_LEEWAY_SECONDS)
job_item = self._queue.get(processing_time=processing_time, ordering_required=True)
if (job_item is None):
logger.debug('No additional work found. Going to sleep for %s seconds', WORK_CHECK_TIMEOUT)
continue
try:
build_job = BuildJob(job_item)
except BuildJobLoadException as bjle:
logger.error('BuildJobLoadException. Job data: %s. No retry restore. - %s', job_item.body, bjle)
self._queue.incomplete(job_item, restore_retry=False)
continue
build_id = build_job.build_uuid
job_id = self._job_key(build_id)
try:
logger.debug('Creating build job for build %s', build_id)
self.create_job(build_id, {'job_queue_item': build_job.job_item})
except BuildJobAlreadyExistsError:
logger.warning('Attempted to create job %s that already exists. Cleaning up existing job and returning it to the queue.', job_id)
self.job_unschedulable(job_id)
self._queue.incomplete(job_item, restore_retry=True)
continue
except BuildJobError as je:
logger.error('Create job exception. Build %s - %s', build_id, je)
self._queue.incomplete(job_item, restore_retry=True)
continue
try:
logger.debug('Scheduling build job %s', job_id)
(schedule_success, retry_timeout) = self.schedule(build_id)
except Exception as se:
logger.exception('Exception when scheduling job %s: %s', build_job.build_uuid, se)
self._queue.incomplete(job_item, restore_retry=True, retry_after=WORK_CHECK_TIMEOUT)
continue
if schedule_success:
logger.debug('Build job %s scheduled.', job_id)
else:
build_jobs.labels('false').inc()
logger.warning('Unsuccessful schedule. Build ID: %s. Check build executor service.', build_job.repo_build.uuid)
self.job_unschedulable(job_id)
self._queue.incomplete(job_item, restore_retry=False, retry_after=retry_timeout)
job_data = self._orchestrator.get_key(job_id)
job_data_json = json.loads(job_data)
created_at = job_data_json['created_at']
build_queue_duration.labels(schedule_success).observe((time.time() - created_at)) |
def extend_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
subparsers = parser.add_subparsers(title='resource', dest='gitlab_resource', help='The GitLab resource to manipulate.')
subparsers.required = True
classes = set()
for cls in gitlab.v4.objects.__dict__.values():
if (not isinstance(cls, type)):
continue
if issubclass(cls, gitlab.base.RESTManager):
if (cls._obj_cls is not None):
classes.add(cls._obj_cls)
for cls in sorted(classes, key=operator.attrgetter('__name__')):
arg_name = cli.cls_to_gitlab_resource(cls)
object_group = subparsers.add_parser(arg_name, formatter_class=cli.VerticalHelpFormatter)
object_subparsers = object_group.add_subparsers(title='action', dest='resource_action', help='Action to execute on the GitLab resource.')
_populate_sub_parser_by_class(cls, object_subparsers)
object_subparsers.required = True
return parser |
class RCC_APB2RSTR(IntEnum):
TIM1RST = (1 << 0)
USART1RST = (1 << 4)
USART6RST = (1 << 5)
ADCRST = (1 << 8)
SDIORST = (1 << 11)
SPI1RST = (1 << 12)
SPI4RST = (1 << 13)
SYSCFGRST = (1 << 14)
TIM9RST = (1 << 16)
TIM10RST = (1 << 17)
TIM11RST = (1 << 18)
SPI5RST = (1 << 20) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--vidLen', type=int, default=32, help='Number of frames in a clip')
parser.add_argument('--batchSize', type=int, default=4, help='Training batch size')
parser.add_argument('--preprocessData', help='whether need to preprocess data ( make npy file from video clips )', action='store_true')
parser.add_argument('--mode', type=str, default='both', help='model type - both, only_frames, only_difference', choices=['both', 'only_frames', 'only_difference'])
parser.add_argument('--dataset', type=str, default='rwf2000', help='dataset - rwf2000, movies, hockey', choices=['rwf2000', 'movies', 'hockey'])
parser.add_argument('--lstmType', type=str, default='sepconv', help='lstm - sepconv, asepconv', choices=['sepconv', 'asepconv'])
parser.add_argument('--weightsPath', type=str, default='NOT_SET', help='path to the weights pretrained on rwf dataset')
parser.add_argument('--fusionType', type=str, default='concat', help='fusion type - A for add, M for multiply, C for concat', choices=['C', 'A', 'M'])
parser.add_argument('--flowGatedNet', help='measure the efficiency of FlowGatedNet by Ming et. al.', action='store_true')
args = parser.parse_args()
evaluateEfficiency(args) |
class Registry(object):
def __init__(self, name: str) -> None:
self._name: str = name
self._obj_map: Dict[(str, object)] = {}
def _do_register(self, name: str, obj: object) -> None:
assert (name not in self._obj_map), "An object named '{}' was already registered in '{}' registry!".format(name, self._name)
self._obj_map[name] = obj
def register(self, obj: object=None) -> Optional[object]:
if (obj is None):
def deco(func_or_class: object) -> object:
name = func_or_class.__name__
self._do_register(name, func_or_class)
return func_or_class
return deco
name = obj.__name__
self._do_register(name, obj)
def get(self, name: str) -> object:
ret = self._obj_map.get(name)
if (ret is None):
raise KeyError("No object named '{}' found in '{}' registry!".format(name, self._name))
return ret |
def test_unstructure_deeply_nested_generics_list(genconverter):
class Inner():
a: int
class Outer(Generic[T]):
inner: List[T]
initial = Outer[Inner]([Inner(1)])
raw = genconverter.unstructure(initial, Outer[Inner])
assert (raw == {'inner': [{'a': 1}]})
raw = genconverter.unstructure(initial)
assert (raw == {'inner': [{'a': 1}]}) |
class Effect11429(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Torpedoes')), 'aoeVelocity', ship.getModifiedItemAttr('shipBonusMB'), skill='Minmatar Battleship', **kwargs) |
def test(model, tensor_loader, criterion, device):
model.eval()
test_acc = 0
test_loss = 0
for data in tensor_loader:
(inputs, labels) = data
inputs = inputs.to(device)
labels.to(device)
labels = labels.type(torch.LongTensor)
outputs = model(inputs)
outputs = outputs.type(torch.FloatTensor)
outputs.to(device)
loss = criterion(outputs, labels)
predict_y = torch.argmax(outputs, dim=1).to(device)
accuracy = ((predict_y == labels.to(device)).sum().item() / labels.size(0))
test_acc += accuracy
test_loss += (loss.item() * inputs.size(0))
test_acc = (test_acc / len(tensor_loader))
test_loss = (test_loss / len(tensor_loader.dataset))
print('validation accuracy:{:.4f}, loss:{:.5f}'.format(float(test_acc), float(test_loss)))
return |
.parametrize('annotation, value', [('int', 42), ('bytes', b'')])
def test_enums_type_annotation_non_str_member(annotation, value) -> None:
node = builder.extract_node(f'''
from enum import Enum
class Veg(Enum):
TOMATO: {annotation} = {value}
Veg.TOMATO.value
''')
inferred_member_value = node.inferred()[0]
assert isinstance(inferred_member_value, nodes.Const)
assert (inferred_member_value.value == value) |
class FakeHDF4FileHandlerPolar(FakeHDF4FileHandler):
def get_test_content(self, filename, filename_info, filetype_info):
file_content = {'/attr/platform': 'SNPP', '/attr/sensor': 'VIIRS'}
file_content['longitude'] = xr.DataArray(da.from_array(DEFAULT_LON_DATA, chunks=4096), attrs={'_FillValue': np.nan, 'scale_factor': 1.0, 'add_offset': 0.0, 'standard_name': 'longitude'})
file_content['longitude/shape'] = DEFAULT_FILE_SHAPE
file_content['latitude'] = xr.DataArray(da.from_array(DEFAULT_LAT_DATA, chunks=4096), attrs={'_FillValue': np.nan, 'scale_factor': 1.0, 'add_offset': 0.0, 'standard_name': 'latitude'})
file_content['latitude/shape'] = DEFAULT_FILE_SHAPE
file_content['variable1'] = xr.DataArray(da.from_array(DEFAULT_FILE_DATA, chunks=4096).astype(np.float32), attrs={'_FillValue': (- 1), 'scale_factor': 1.0, 'add_offset': 0.0, 'units': '1'})
file_content['variable1/shape'] = DEFAULT_FILE_SHAPE
file_content['variable2'] = xr.DataArray(da.from_array(DEFAULT_FILE_DATA, chunks=4096).astype(np.float32), attrs={'_FillValue': (- 1), 'scale_factor': 1.0, 'add_offset': 0.0, 'units': '1'})
file_content['variable2/shape'] = DEFAULT_FILE_SHAPE
file_content['variable2'] = file_content['variable2'].where(((file_content['variable2'] % 2) != 0))
file_content['variable3'] = xr.DataArray(da.from_array(DEFAULT_FILE_DATA, chunks=4096).astype(np.byte), attrs={'SCALED': 0, '_FillValue': (- 128), 'flag_meanings': 'clear water supercooled mixed ice unknown', 'flag_values': [0, 1, 2, 3, 4, 5], 'units': 'none'})
file_content['variable3/shape'] = DEFAULT_FILE_SHAPE
return file_content |
def get_proj_incdirs(proj_dir: Path) -> list[str]:
proj_incdir = os.environ.get('PROJ_INCDIR')
incdirs = []
if (proj_incdir is None):
if (proj_dir / 'include').exists():
incdirs.append(str((proj_dir / 'include')))
else:
raise SystemExit('ERROR: PROJ_INCDIR dir not found. Please set PROJ_INCDIR.')
else:
incdirs.append(proj_incdir)
return incdirs |
class ST_UniversalMeasure(BaseSimpleType):
def convert_from_xml(cls, str_value: str) -> Emu:
(float_part, units_part) = (str_value[:(- 2)], str_value[(- 2):])
quantity = float(float_part)
multiplier = {'mm': 36000, 'cm': 360000, 'in': 914400, 'pt': 12700, 'pc': 152400, 'pi': 152400}[units_part]
return Emu(int(round((quantity * multiplier)))) |
class QCSchema(QCSchemaInput):
provenance: QCProvenance
return_result: (float | Sequence[float])
success: bool
properties: QCProperties
error: (QCError | None) = None
wavefunction: (QCWavefunction | None) = None
def from_dict(cls, data: dict[(str, Any)]) -> QCSchema:
error: (QCError | None) = None
if ('error' in data.keys()):
error = QCError(**data.pop('error'))
model = QCModel(**data.pop('model'))
molecule = QCTopology(**data.pop('molecule'))
provenance = QCProvenance(**data.pop('provenance'))
properties = QCProperties(**data.pop('properties'))
wavefunction: (QCWavefunction | None) = None
if ('wavefunction' in data.keys()):
wavefunction = QCWavefunction.from_dict(data.pop('wavefunction'))
return cls(**data, error=error, model=model, molecule=molecule, provenance=provenance, properties=properties, wavefunction=wavefunction)
def to_hdf5(self, group: h5py.Group) -> None:
group.attrs['schema_name'] = self.schema_name
group.attrs['schema_version'] = self.schema_version
group.attrs['driver'] = self.driver
group.attrs['return_result'] = self.return_result
group.attrs['success'] = self.success
molecule_group = group.require_group('molecule')
self.molecule.to_hdf5(molecule_group)
model_group = group.require_group('model')
self.model.to_hdf5(model_group)
provenance_group = group.require_group('provenance')
self.provenance.to_hdf5(provenance_group)
properties_group = group.require_group('properties')
self.properties.to_hdf5(properties_group)
if (self.error is not None):
error_group = group.require_group('error')
self.error.to_hdf5(error_group)
if (self.wavefunction is not None):
wavefunction_group = group.require_group('wavefunction')
self.wavefunction.to_hdf5(wavefunction_group)
keywords_group = group.require_group('keywords')
for (key, value) in self.keywords.items():
keywords_group.attrs[key] = value
def _from_hdf5_group(cls, h5py_group: h5py.Group) -> QCSchemaInput:
data = dict(h5py_group.attrs.items())
data['molecule'] = cast(QCTopology, QCTopology.from_hdf5(h5py_group['molecule']))
data['model'] = cast(QCModel, QCModel.from_hdf5(h5py_group['model']))
data['provenance'] = cast(QCProvenance, QCProvenance.from_hdf5(h5py_group['provenance']))
data['properties'] = cast(QCProperties, QCProperties.from_hdf5(h5py_group['properties']))
if ('error' in h5py_group.keys()):
data['error'] = cast(QCError, QCError.from_hdf5(h5py_group['error']))
if ('wavefunction' in h5py_group.keys()):
data['wavefunction'] = cast(QCWavefunction, QCWavefunction.from_hdf5(h5py_group['wavefunction']))
data['keywords'] = dict(h5py_group['keywords'].attrs.items())
return cls(**data) |
def post_process_sql(sql_str, df, table_title=None, process_program_with_fuzzy_match_on_db=True, verbose=False):
def basic_fix(sql_str, all_headers, table_title=None):
def finditer(sub_str: str, mother_str: str):
result = []
start_index = 0
while True:
start_index = mother_str.find(sub_str, start_index, (- 1))
if (start_index == (- 1)):
break
end_idx = (start_index + len(sub_str))
result.append((start_index, end_idx))
start_index = end_idx
return result
if table_title:
sql_str = sql_str.replace(('FROM ' + table_title), 'FROM w')
sql_str = sql_str.replace(('FROM ' + table_title.lower()), 'FROM w')
'Case 1: Fix the `` missing. '
while ('' in all_headers):
all_headers.remove('')
sql_str = sql_str.replace('\\n', '\n')
sql_str = sql_str.replace('\n', '\\n')
all_headers.sort(key=(lambda x: len(x)), reverse=True)
have_matched = [0 for i in range(len(sql_str))]
idx_s_single_quotation = [_ for _ in range(1, len(sql_str)) if ((sql_str[_] in ["'"]) and (sql_str[(_ - 1)] not in ["'"]))]
idx_s_double_quotation = [_ for _ in range(1, len(sql_str)) if ((sql_str[_] in ['"']) and (sql_str[(_ - 1)] not in ['"']))]
for idx_s in [idx_s_single_quotation, idx_s_double_quotation]:
if ((len(idx_s) % 2) == 0):
for idx in range(int((len(idx_s) / 2))):
start_idx = idx_s[(idx * 2)]
end_idx = idx_s[((idx * 2) + 1)]
have_matched[start_idx:end_idx] = [2 for _ in range((end_idx - start_idx))]
for header in all_headers:
if ((header in sql_str) and (header not in ALL_KEY_WORDS)):
all_matched_of_this_header = finditer(header, sql_str)
for matched_of_this_header in all_matched_of_this_header:
(start_idx, end_idx) = matched_of_this_header
if ((all(have_matched[start_idx:end_idx]) == 0) and (not (sql_str[(start_idx - 1)] == '`')) and (not (sql_str[end_idx] == '`'))):
have_matched[start_idx:end_idx] = [1 for _ in range((end_idx - start_idx))]
start_have_matched = ([0] + have_matched)
end_have_matched = (have_matched + [0])
start_idx_s = [(idx - 1) for idx in range(1, len(start_have_matched)) if ((start_have_matched[(idx - 1)] == 0) and (start_have_matched[idx] == 1))]
end_idx_s = [idx for idx in range((len(end_have_matched) - 1)) if ((end_have_matched[idx] == 1) and (end_have_matched[(idx + 1)] == 0))]
assert (len(start_idx_s) == len(end_idx_s))
spans = []
current_idx = 0
for (start_idx, end_idx) in zip(start_idx_s, end_idx_s):
spans.append(sql_str[current_idx:start_idx])
spans.append(sql_str[start_idx:(end_idx + 1)])
current_idx = (end_idx + 1)
spans.append(sql_str[current_idx:])
sql_str = '`'.join(spans)
return sql_str
def fuzzy_match_process(sql_str, df, verbose=False):
def _get_matched_cells(value_str, df, fuzz_threshold=70):
matched_cells = []
for (row_id, row) in df.iterrows():
for cell in row:
cell = str(cell)
fuzz_score = fuzz.ratio(value_str, cell)
if (fuzz_score == 100):
matched_cells = [(cell, fuzz_score)]
return matched_cells
if (fuzz_score >= fuzz_threshold):
matched_cells.append((cell, fuzz_score))
matched_cells = sorted(matched_cells, key=(lambda x: x[1]), reverse=True)
return matched_cells
def _check_valid_fuzzy_match(value_str, matched_cell):
number_pattern = '[+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?'
numbers_in_value = re.findall(number_pattern, value_str)
numbers_in_matched_cell = re.findall(number_pattern, matched_cell)
try:
numbers_in_value = [float(num.replace(',', '')) for num in numbers_in_value]
except:
print(f"Can't convert number string {numbers_in_value} into float in _check_valid_fuzzy_match().")
try:
numbers_in_matched_cell = [float(num.replace(',', '')) for num in numbers_in_matched_cell]
except:
print(f"Can't convert number string {numbers_in_matched_cell} into float in _check_valid_fuzzy_match().")
numbers_in_value = set(numbers_in_value)
numbers_in_matched_cell = set(numbers_in_matched_cell)
if (numbers_in_value.issubset(numbers_in_matched_cell) or numbers_in_matched_cell.issubset(numbers_in_value)):
return True
else:
return False
sql_str = sql_str.rstrip('```').rstrip('\n')
qa_pattern = 'QA\\(.+?;.*?`.+?`.*?\\)'
qas = re.findall(qa_pattern, sql_str)
for (idx, qa) in enumerate(qas):
sql_str = sql_str.replace(qa, f'placeholder{idx}')
sql_tokens = tokenize(sql_str)
sql_template_tokens = extract_partial_template_from_sql(sql_str)
fixed_sql_template_tokens = []
sql_tok_bias = 0
for (idx, sql_templ_tok) in enumerate(sql_template_tokens):
sql_tok = sql_tokens[(idx + sql_tok_bias)]
if ((sql_tok == 'between') and (sql_templ_tok == '[WHERE_OP]')):
fixed_sql_template_tokens.extend(['[WHERE_OP]', '[VALUE]', 'and'])
sql_tok_bias += 2
else:
fixed_sql_template_tokens.append(sql_templ_tok)
sql_template_tokens = fixed_sql_template_tokens
for (idx, tok) in enumerate(sql_tokens):
if (tok in ALL_KEY_WORDS):
sql_tokens[idx] = tok.upper()
if verbose:
print(sql_tokens)
print(sql_template_tokens)
assert (len(sql_tokens) == len(sql_template_tokens))
value_indices = [idx for idx in range(len(sql_template_tokens)) if (sql_template_tokens[idx] == '[VALUE]')]
for value_idx in value_indices:
if ((value_idx >= 2) and sql_tokens[(value_idx - 2)].startswith('placeholder')):
continue
value_str = sql_tokens[value_idx]
is_string = False
if ((value_str[0] == '"') and (value_str[(- 1)] == '"')):
value_str = value_str[1:(- 1)]
is_string = True
if ((value_str[0] == '%') or (value_str[(- 1)] == '%')):
continue
value_str = value_str.lower()
matched_cells = _get_matched_cells(value_str, df)
if verbose:
print(matched_cells)
new_value_str = value_str
if matched_cells:
for (matched_cell, fuzz_score) in matched_cells:
if _check_valid_fuzzy_match(value_str, matched_cell):
new_value_str = matched_cell
if (verbose and (new_value_str != value_str)):
print('\tfuzzy match replacing!', value_str, '->', matched_cell, f'fuzz_score:{fuzz_score}')
break
if is_string:
new_value_str = f'"{new_value_str}"'
sql_tokens[value_idx] = new_value_str
new_sql_str = ' '.join(sql_tokens)
sql_columns = re.findall('`\\s(.*?)\\s`', new_sql_str)
for sql_col in sql_columns:
matched_columns = []
for col in df.columns:
score = fuzz.ratio(sql_col.lower(), col)
if (score == 100):
matched_columns = [(col, score)]
break
if (score >= 80):
matched_columns.append((col, score))
matched_columns = sorted(matched_columns, key=(lambda x: x[1]), reverse=True)
if matched_columns:
matched_col = matched_columns[0][0]
new_sql_str = new_sql_str.replace(f'` {sql_col} `', f'`{matched_col}`')
else:
new_sql_str = new_sql_str.replace(f'` {sql_col} `', f'`{sql_col}`')
for (idx, qa) in enumerate(qas):
new_sql_str = new_sql_str.replace(f'placeholder{idx}', qa)
new_sql_str = new_sql_str.replace('< >', '<>')
return new_sql_str
sql_str = basic_fix(sql_str, list(df.columns), table_title)
if process_program_with_fuzzy_match_on_db:
try:
sql_str = fuzzy_match_process(sql_str, df, verbose)
except:
pass
return sql_str |
def deepsize(obj, max_depth=4):
def _recurse(o, dct, depth):
if (0 <= max_depth < depth):
return
for ref in gc.get_referents(o):
idr = id(ref)
if (idr not in dct):
dct[idr] = (ref, sys.getsizeof(ref, default=0))
_recurse(ref, dct, (depth + 1))
sizedict = {}
_recurse(obj, sizedict, 0)
size = (sys.getsizeof(obj) + sum([p[1] for p in sizedict.values()]))
return size |
def train():
for epoch_idx in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch_idx, args.lr, args.lrepochs)
for (batch_idx, sample) in enumerate(TrainImgLoader):
global_step = ((len(TrainImgLoader) * epoch_idx) + batch_idx)
start_time = time.time()
do_summary = ((global_step % args.summary_freq) == 0)
(loss, scalar_outputs, image_outputs) = train_sample(sample, compute_metrics=do_summary)
if do_summary:
save_scalars(logger, 'train', scalar_outputs, global_step)
save_images(logger, 'train', image_outputs, global_step)
del scalar_outputs, image_outputs
print('Epoch {}/{}, Iter {}/{}, train loss = {:.3f}, time = {:.3f}'.format(epoch_idx, args.epochs, batch_idx, len(TrainImgLoader), loss, (time.time() - start_time)))
if (((epoch_idx + 1) % args.save_freq) == 0):
checkpoint_data = {'epoch': epoch_idx, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}
torch.save(checkpoint_data, '{}/checkpoint_{:0>6}.ckpt'.format(args.logdir, epoch_idx))
gc.collect()
avg_test_scalars = AverageMeterDict()
for (batch_idx, sample) in enumerate(TestImgLoader):
global_step = ((len(TestImgLoader) * epoch_idx) + batch_idx)
start_time = time.time()
do_summary = ((global_step % args.summary_freq) == 0)
(loss, scalar_outputs, image_outputs) = test_sample(sample, compute_metrics=do_summary)
if do_summary:
save_scalars(logger, 'test', scalar_outputs, global_step)
save_images(logger, 'test', image_outputs, global_step)
avg_test_scalars.update(scalar_outputs)
del scalar_outputs, image_outputs
print('Epoch {}/{}, Iter {}/{}, test loss = {:.3f}, time = {:3f}'.format(epoch_idx, args.epochs, batch_idx, len(TestImgLoader), loss, (time.time() - start_time)))
avg_test_scalars = avg_test_scalars.mean()
save_scalars(logger, 'fulltest', avg_test_scalars, (len(TrainImgLoader) * (epoch_idx + 1)))
print('avg_test_scalars', avg_test_scalars)
gc.collect() |
class MonomerWidget(gtk.DrawingArea):
def __init__(self, monomer):
gtk.DrawingArea.__init__(self)
self.connect('expose_event', self.expose)
self.set_size_request(100, (sites_y_pos + (sites_y_spacing * len(monomer.sites))))
self.monomer = monomer
def expose(self, widget, event):
self.draw()
return False
def draw(self):
rect = self.get_allocation()
context = self.context = self.window.cairo_create()
context.set_line_width(1)
(x1, x2) = ((corner_radius + 0.5), ((rect.width - corner_radius) - 0.5))
(y1, y2) = ((corner_radius + 0.5), ((rect.height - corner_radius) - 0.5))
angles = [((v * math.pi) / 2.0) for v in range(0, 4)]
context.arc(x1, y1, corner_radius, angles[2], angles[3])
context.arc(x2, y1, corner_radius, angles[3], angles[0])
context.arc(x2, y2, corner_radius, angles[0], angles[1])
context.arc(x1, y2, corner_radius, angles[1], angles[2])
context.close_path()
context.set_source_rgb(1, 1, 1)
context.fill_preserve()
context.set_source_rgb(0, 0, 0)
context.stroke()
context.set_font_size(14)
context.move_to(corner_radius, (context.font_extents()[0] + 3))
context.show_text(self.monomer.name)
context.move_to(0, (context.font_extents()[0] + 8.5))
context.rel_line_to(100, 0)
context.stroke()
context.translate(corner_radius, sites_y_pos)
for site in self.monomer.sites:
self.draw_site(site)
context.translate(0, sites_y_spacing)
def draw_site(self, site):
context = self.context
context.set_source_rgb(0, 0, 0)
context.arc(site_radius, 0, site_radius, 0, (2 * math.pi))
context.stroke()
context.move_to(0, (site_radius + context.font_extents()[0]))
context.show_text(site)
context.new_path() |
def node_prototype_desc(caller):
text = "\n The |cPrototype-Description|n briefly describes the prototype when it's viewed in listings.\n\n {current}\n ".format(current=_get_current_value(caller, 'prototype_desc'))
helptext = '\n Giving a brief description helps you and others to locate the prototype for use later.\n '
text = (text, helptext)
options = _wizard_options('prototype_desc', 'prototype_key', 'prototype_tags')
options.append({'key': '_default', 'goto': (_set_property, dict(prop='prototype_desc', processor=(lambda s: s.strip()), next_node='node_prototype_desc'))})
return (text, options) |
class LegacyGRUCell(tf.nn.rnn_cell.RNNCell):
def __init__(self, num_units, reuse=None):
super(LegacyGRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
def __call__(self, inputs, state, scope=None):
with tf.variable_scope(scope, default_name='gru_cell', values=[inputs, state]):
if (not isinstance(inputs, (list, tuple))):
inputs = [inputs]
all_inputs = (list(inputs) + [state])
r = tf.nn.sigmoid(linear(all_inputs, self._num_units, False, False, scope='reset_gate'))
u = tf.nn.sigmoid(linear(all_inputs, self._num_units, False, False, scope='update_gate'))
all_inputs = (list(inputs) + [(r * state)])
c = linear(all_inputs, self._num_units, True, False, scope='candidate')
new_state = (((1.0 - u) * state) + (u * tf.tanh(c)))
return (new_state, new_state)
def state_size(self):
return self._num_units
def output_size(self):
return self._num_units |
def list_check(head):
nb = 0
if (head.type == list_head.get_type().pointer()):
head = head.dereference()
elif (head.type != list_head.get_type()):
raise gdb.GdbError('argument must be of type (struct list_head [*])')
c = head
try:
gdb.write('Starting with: {}\n'.format(c))
except gdb.MemoryError:
gdb.write('head is not accessible\n')
return
while True:
p = c['prev'].dereference()
n = c['next'].dereference()
try:
if (p['next'] != c.address):
gdb.write('prev.next != current: {current_addr}={current} {p_addr}={p}\n'.format(current_addr=c.address, current=c, p_addr=p.address, p=p))
return
except gdb.MemoryError:
gdb.write('prev is not accessible: {current_addr}={current}\n'.format(current_addr=c.address, current=c))
return
try:
if (n['prev'] != c.address):
gdb.write('next.prev != current: {current_addr}={current} {n_addr}={n}\n'.format(current_addr=c.address, current=c, n_addr=n.address, n=n))
return
except gdb.MemoryError:
gdb.write('next is not accessible: {current_addr}={current}\n'.format(current_addr=c.address, current=c))
return
c = n
nb += 1
if (c == head):
gdb.write('list is consistent: {} node(s)\n'.format(nb))
return |
class Pctsp(object):
def __init__(self):
self.prize = []
self.penal = []
self.cost = []
self.prize_min = 0
def load(self, file_name, prize_min):
f = open(file_name, 'r')
for (i, line) in enumerate(f):
if (i is 5):
break
if (i is 1):
self.prize = np.fromstring(line, dtype=int, sep=' ')
if (i is 4):
self.penal = np.fromstring(line, dtype=int, sep=' ')
f.close()
self.cost = np.loadtxt(file_name, dtype=int, skiprows=7)
self.prize_min = prize_min
assert (sum(self.prize) >= prize_min), 'Infeasible' |
def calculate_distance(lat1, lon1, lat2, lon2):
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
dlat = (lat2 - lat1)
dlon = (lon2 - lon1)
a = ((math.sin((dlat / 2)) ** 2) + ((math.cos(lat1) * math.cos(lat2)) * (math.sin((dlon / 2)) ** 2)))
c = (2 * math.asin(math.sqrt(a)))
r = 6371393
distance = (c * r)
return distance |
def test_create_right_lane_split_first_lane():
lanedef = xodr.LaneDef(10, 20, 1, 2, 1)
lanes = xodr.create_lanes_merge_split([lanedef], 0, 30, xodr.std_roadmark_solid_solid(), 3, 3)
assert (len(lanes.lanesections) == 3)
assert (lanes.lanesections[0].s == 0)
assert (lanes.lanesections[1].s == 10)
assert (lanes.lanesections[2].s == 20)
assert (len(lanes.lanesections[0].leftlanes) == 0)
assert (len(lanes.lanesections[1].leftlanes) == 0)
assert (len(lanes.lanesections[2].leftlanes) == 0)
assert (len(lanes.lanesections[0].rightlanes) == 1)
assert (len(lanes.lanesections[1].rightlanes) == 2)
assert (len(lanes.lanesections[2].rightlanes) == 2)
assert (lanes.lanesections[0].rightlanes[0].roadmark[0].marking_type == xodr.RoadMarkType.solid)
assert (lanes.lanesections[0].rightlanes[0].widths[0].a == 3)
assert (lanes.lanesections[0].rightlanes[0].widths[0].c == 0)
assert (lanes.lanesections[1].rightlanes[0].roadmark[0].marking_type == xodr.RoadMarkType.broken)
assert (lanes.lanesections[1].rightlanes[0].widths[0].a == 0)
assert (lanes.lanesections[1].rightlanes[0].widths[0].c != 0)
assert (lanes.lanesections[1].rightlanes[1].roadmark[0].marking_type == xodr.RoadMarkType.solid)
assert (lanes.lanesections[1].rightlanes[1].widths[0].a == 3)
assert (lanes.lanesections[1].rightlanes[1].widths[0].c == 0)
assert (lanes.lanesections[2].rightlanes[0].roadmark[0].marking_type == xodr.RoadMarkType.broken)
assert (lanes.lanesections[2].rightlanes[1].roadmark[0].marking_type == xodr.RoadMarkType.solid)
assert (lanes.lanesections[2].rightlanes[0].widths[0].a == 3)
assert (lanes.lanesections[2].rightlanes[0].widths[0].c == 0)
assert (lanes.lanesections[2].rightlanes[1].widths[0].a == 3)
assert (lanes.lanesections[2].rightlanes[1].widths[0].c == 0) |
def classification_error(model: nn.Module, X_test, y_test, batch_size=1024, device=None):
device = (device or infer_model_device(model))
with torch.no_grad(), training_mode(model, is_train=False):
val_logits = process_in_chunks(model, torch.as_tensor(X_test, device=device), batch_size=batch_size)
val_logits = check_numpy(val_logits)
error_rate = (check_numpy(y_test) != np.argmax(val_logits, axis=1)).mean()
return error_rate |
class Retriever():
def __init__(self, config):
with open(config.DB_dir, 'r') as f:
self.businessDB_dict = json.load(f)
with open(config.value_nl_dict_dir, 'r') as f:
self.value_nl_dict = json.load(f)
self.value2nl_dict = {}
for facet in self.value_nl_dict.keys():
value2nl = {}
for (key, value) in self.value_nl_dict[facet].items():
value2nl[value] = key
self.value2nl_dict[facet] = value2nl
def filter_bussiness(self, condition_dict_list):
condition_dict_nl_list = []
for condition_dict in condition_dict_list:
condition_dict_nl = {}
for facet in condition_dict.keys():
condition_dict_nl[facet] = self.value2nl_dict[facet][condition_dict[facet]]
condition_dict_nl_list.append(condition_dict_nl)
def check_satisfy(bussiness_dict):
satisfy_num = len(condition_dict_list[0])
for condition_dict in condition_dict_list:
equal_num = 0
for facet in condition_dict.keys():
if (condition_dict[facet] == bussiness_dict[facet]):
equal_num += 1
else:
break
if (equal_num == satisfy_num):
return True
return False
bussiness_list = []
for bussiness_id in self.businessDB_dict.keys():
if check_satisfy(self.businessDB_dict[bussiness_id]):
bussiness_list.append(int(bussiness_id))
return bussiness_list |
class DescribeBlock(pytest.Module):
def from_parent(cls, parent, obj):
name = getattr(obj, '_mangled_name', obj.__name__)
nodeid = ((parent.nodeid + '::') + name)
if PYTEST_GTE_7_0:
self = super().from_parent(parent=parent, path=parent.path, nodeid=nodeid)
elif PYTEST_GTE_5_4:
self = super().from_parent(parent=parent, fspath=parent.fspath, nodeid=nodeid)
else:
self = cls(parent=parent, fspath=parent.fspath, nodeid=nodeid)
self.name = name
self.funcobj = obj
return self
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super().collect()
def _getobj(self):
return self._importtestmodule()
def _importtestmodule(self):
module = make_module_from_function(self.funcobj)
self.own_markers = getattr(self.funcobj, 'pytestmark', [])
return module
def funcnamefilter(self, name):
return (not name.startswith('_'))
def classnamefilter(self, name):
return False
def __repr__(self):
return f'<{self.__class__.__name__} {self.name!r}>' |
def fancy_time_ax_format(inc):
l0_fmt_brief = ''
l2_fmt = ''
l2_trig = 0
if (inc < 1e-06):
l0_fmt = '.%n'
l0_center = False
l1_fmt = '%H:%M:%S'
l1_trig = 6
l2_fmt = '%b %d, %Y'
l2_trig = 3
elif (inc < 0.001):
l0_fmt = '.%u'
l0_center = False
l1_fmt = '%H:%M:%S'
l1_trig = 6
l2_fmt = '%b %d, %Y'
l2_trig = 3
elif (inc < 1):
l0_fmt = '.%r'
l0_center = False
l1_fmt = '%H:%M:%S'
l1_trig = 6
l2_fmt = '%b %d, %Y'
l2_trig = 3
elif (inc < 60):
l0_fmt = '%H:%M:%S'
l0_center = False
l1_fmt = '%b %d, %Y'
l1_trig = 3
elif (inc < 3600):
l0_fmt = '%H:%M'
l0_center = False
l1_fmt = '%b %d, %Y'
l1_trig = 3
elif (inc < sday):
l0_fmt = '%H:%M'
l0_center = False
l1_fmt = '%b %d, %Y'
l1_trig = 3
elif (inc < smonth):
l0_fmt = '%a %d'
l0_fmt_brief = '%d'
l0_center = True
l1_fmt = '%b, %Y'
l1_trig = 2
elif (inc < syear):
l0_fmt = '%b'
l0_center = True
l1_fmt = '%Y'
l1_trig = 1
else:
l0_fmt = '%Y'
l0_center = False
l1_fmt = ''
l1_trig = 0
return (l0_fmt, l0_fmt_brief, l0_center, l1_fmt, l1_trig, l2_fmt, l2_trig) |
.parametrize(('use_swaths', 'copy_dst_swath'), [(False, None), (True, None), (True, 'dask'), (True, 'swath_def')])
def test_base_resampler_does_nothing_when_src_and_dst_areas_are_equal(_geos_area, use_swaths, copy_dst_swath):
src_geom = (_geos_area if (not use_swaths) else _xarray_swath_def_from_area(_geos_area))
dst_geom = src_geom
if (copy_dst_swath == 'dask'):
dst_geom = _xarray_swath_def_from_area(_geos_area)
elif (copy_dst_swath == 'swath_def'):
dst_geom = SwathDefinition(dst_geom.lons, dst_geom.lats)
resampler = BaseResampler(src_geom, dst_geom)
some_data = xr.DataArray(da.zeros(src_geom.shape, dtype=np.float64), dims=('y', 'x'))
assert (resampler.resample(some_data) is some_data) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = P4MConvP4M(in_planes, planes, kernel_size=1, bias=False, batch_norm=True)
self.conv2 = P4MConvP4M(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False, batch_norm=True)
self.conv3 = P4MConvP4M(planes, (self.expansion * planes), kernel_size=1, bias=False, batch_norm=True)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(P4MConvP4M(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False, batch_norm=True))
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.relu(self.conv2(out))
out = self.conv3(out)
out += self.shortcut(x)
out = F.relu(out)
return out |
def load_txt_info(gt_file, img_info):
with open(gt_file, 'r', encoding='latin1') as f:
anno_info = []
for line in f:
line = line.strip('\n')
if ((line[0] == '[') or (line[0] == 'x')):
continue
ann = line.split(',')
bbox = ann[0:4]
bbox = [int(coord) for coord in bbox]
(x, y, w, h) = bbox
segmentation = [x, y, (x + w), y, (x + w), (y + h), x, (y + h)]
anno = dict(iscrowd=0, category_id=1, bbox=bbox, area=(w * h), segmentation=[segmentation])
anno_info.append(anno)
img_info.update(anno_info=anno_info)
return img_info |
def read_resource_database(game: RandovaniaGame, data: dict) -> ResourceDatabase:
reader = ResourceReader()
item = read_dict(data['items'], reader.read_item_resource_info)
db = ResourceDatabase(game_enum=game, item=item, event=reader.read_resource_info_array(data['events'], ResourceType.EVENT), trick=read_dict(data['tricks'], reader.read_trick_resource_info), damage=reader.read_resource_info_array(data['damage'], ResourceType.DAMAGE), version=reader.read_resource_info_array(data['versions'], ResourceType.VERSION), misc=reader.read_resource_info_array(data['misc'], ResourceType.MISC), requirement_template={}, damage_reductions={}, energy_tank_item=search.find_resource_info_with_id(item, data['energy_tank_item_index'], ResourceType.ITEM))
db.requirement_template.update(read_requirement_templates(data['requirement_template'], db))
db.damage_reductions.update(read_resource_reductions_dict(data['damage_reductions'], db))
return db |
def coherence_limit(nQ=2, T1_list=None, T2_list=None, gatelen=0.1):
T1 = np.array(T1_list)
if (T2_list is None):
T2 = (2 * T1)
else:
T2 = np.array(T2_list)
if ((len(T1) != nQ) or (len(T2) != nQ)):
raise ValueError('T1 and/or T2 not the right length')
coherence_limit_err = 0
if (nQ == 1):
coherence_limit_err = (0.5 * ((1.0 - ((2.0 / 3.0) * np.exp(((- gatelen) / T2[0])))) - ((1.0 / 3.0) * np.exp(((- gatelen) / T1[0])))))
elif (nQ == 2):
T1factor = 0
T2factor = 0
for i in range(2):
T1factor += ((1.0 / 15.0) * np.exp(((- gatelen) / T1[i])))
T2factor += ((2.0 / 15.0) * (np.exp(((- gatelen) / T2[i])) + np.exp(((- gatelen) * ((1.0 / T2[i]) + (1.0 / T1[(1 - i)]))))))
T1factor += ((1.0 / 15.0) * np.exp(((- gatelen) * np.sum((1 / T1)))))
T2factor += ((4.0 / 15.0) * np.exp(((- gatelen) * np.sum((1 / T2)))))
coherence_limit_err = (0.75 * ((1.0 - T1factor) - T2factor))
else:
raise ValueError('Not a valid number of qubits')
return coherence_limit_err |
def convert_path(pathname):
if (os.sep == '/'):
return pathname
if (not pathname):
return pathname
if (pathname[0] == '/'):
raise ValueError(("path '%s' cannot be absolute" % pathname))
if (pathname[(- 1)] == '/'):
raise ValueError(("path '%s' cannot end with '/'" % pathname))
paths = pathname.split('/')
while ('.' in paths):
paths.remove('.')
if (not paths):
return os.curdir
return os.path.join(*paths) |
class CMakeBuild(build_ext):
def run(self):
try:
subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError('CMake is not available.')
super().run()
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
if (not extdir.endswith(os.path.sep)):
extdir += os.path.sep
if ('DEBUG' in os.environ):
cfg = ('Debug' if (os.environ['DEBUG'] == '1') else 'Release')
else:
cfg = ('Debug' if self.debug else 'Release')
if ('USE_TORCH' in os.environ):
import torch
CMAKE_PREFIX_PATH = f'-DCMAKE_PREFIX_PATH={torch.utils.cmake_prefix_path}'
USE_TORCH = '-DUSE_TORCH=ON'
TORCH_FLAGS = [CMAKE_PREFIX_PATH, USE_TORCH]
else:
USE_TORCH = '-DUSE_TORCH=OFF'
TORCH_FLAGS = [USE_TORCH]
cmake_args = ([f'-DCMAKE_BUILD_TYPE={cfg}', f'-DCMAKE_INSTALL_PREFIX={extdir}', '-DCMAKE_VERBOSE_MAKEFILE=ON', '-DVELOX_CODEGEN_SUPPORT=OFF', '-DVELOX_BUILD_MINIMAL=ON'] + TORCH_FLAGS)
build_args = ['--target', 'install']
if ('CMAKE_GENERATOR' not in os.environ):
cmake_args += ['-GNinja']
if ('CMAKE_BUILD_PARALLEL_LEVEL' not in os.environ):
if (hasattr(self, 'parallel') and self.parallel):
build_args += ['-j{}'.format(self.parallel)]
if (not os.path.exists(self.build_temp)):
os.makedirs(self.build_temp)
subprocess.check_call((['cmake', str(ROOT_DIR)] + cmake_args), cwd=self.build_temp)
subprocess.check_call((['cmake', '--build', '.'] + build_args), cwd=self.build_temp) |
class TerminusDeleteWordCommand(sublime_plugin.TextCommand):
def run(self, edit, forward=False):
view = self.view
terminal = Terminal.from_id(view.id())
if (not terminal):
return
if ((len(view.sel()) != 1) or (not view.sel()[0].empty())):
return
if forward:
pt = view.sel()[0].end()
line = view.line(pt)
text = view.substr(sublime.Region(pt, line.end()))
match = re.search('(?<=\\w)\\b', text)
if match:
n = match.span()[0]
n = (n if (n > 0) else 1)
else:
n = 1
delete_code = get_key_code('delete')
else:
pt = view.sel()[0].end()
line = view.line(pt)
text = view.substr(sublime.Region(line.begin(), pt))
matches = list(re.finditer('\\b(?=\\w)', text))
if matches:
for match in matches:
pass
n = (view.rowcol(pt)[1] - match.span()[0])
(n if (n > 0) else 1)
else:
n = 1
delete_code = get_key_code('backspace')
self.view.run_command('terminus_show_cursor')
terminal.send_string((delete_code * n)) |
class LiterateCryptolLexer(LiterateLexer):
name = 'Literate Cryptol'
aliases = ['literate-cryptol', 'lcryptol', 'lcry']
filenames = ['*.lcry']
mimetypes = ['text/x-literate-cryptol']
url = '
version_added = '2.0'
def __init__(self, **options):
crylexer = CryptolLexer(**options)
LiterateLexer.__init__(self, crylexer, **options) |
class ProjectSavingContext():
def __init__(self, asset, gameObject, project, filename=''):
if (not isinstance(asset, Asset)):
raise ProjectParseException(f'{type(asset).__name__} does not subclass Asset')
if (not isinstance(project, Project)):
raise ProjectParseException(f'{project!r} is not a GameObject')
self.asset = asset
self.gameObject = gameObject
self.project = project
self.filename = filename
from . import Loader
self.savers = Loader.savers |
class Scaling():
def setup(self):
self.n = 1000
lat = np.array((9.99, 10, 10.01))
lon = np.array((4.99, 5, 5.01))
self.coordinates = np.array([(lati, loni) for (lati, loni) in zip(lat, lon)])
self.times = pd.date_range('2019-01-01', freq='1T', periods=self.n)
self.positions = np.array([[0, 0], [100, 0], [100, 100], [0, 100]])
self.clearsky_index = pd.Series(np.random.rand(self.n), index=self.times)
self.cloud_speed = 5
self.tmscales = np.array((1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096))
def time_latlon_to_xy(self):
scaling.latlon_to_xy(self.coordinates)
def time__compute_wavelet(self):
scaling._compute_wavelet(self.clearsky_index, dt=1)
def time__compute_vr(self):
scaling._compute_vr(self.positions, self.cloud_speed, self.tmscales)
def time_wvm(self):
scaling.wvm(self.clearsky_index, self.positions, self.cloud_speed, dt=1) |
class ResnetCompleteNetworkTest(tf.test.TestCase):
def _resnet_small(self, inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, reuse=None, scope='resnet_v2_small'):
block = resnet_v2.resnet_v2_block
blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)]
return resnet_v2.resnet_v2(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, reuse=reuse, scope=scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(logits, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue(('predictions' in end_points))
self.assertListEqual(end_points['predictions'].get_shape().as_list(), [2, 1, 1, num_classes])
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, scope='resnet')
endpoint_to_shape = {'resnet/block1': [2, 28, 28, 4], 'resnet/block2': [2, 14, 14, 8], 'resnet/block3': [2, 7, 7, 16], 'resnet/block4': [2, 7, 7, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, scope='resnet')
endpoint_to_shape = {'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 21, 21, 8], 'resnet/block3': [2, 11, 11, 16], 'resnet/block4': [2, 11, 11, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, include_root_block=False, scope='resnet')
endpoint_to_shape = {'resnet/block1': [2, 64, 64, 4], 'resnet/block2': [2, 32, 32, 8], 'resnet/block3': [2, 16, 16, 16], 'resnet/block4': [2, 16, 16, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, output_stride=output_stride, scope='resnet')
endpoint_to_shape = {'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 41, 41, 8], 'resnet/block3': [2, 41, 41, 16], 'resnet/block4': [2, 41, 41, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
(output, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False, output_stride=output_stride)
if (output_stride is None):
factor = 1
else:
factor = (nominal_stride // output_stride)
output = resnet_utils.subsample(output, factor)
tf.get_variable_scope().reuse_variables()
(expected, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(), atol=0.0001, rtol=0.0001)
def testUnknownBatchSize(self):
batch = 2
(height, width) = (65, 65)
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(logits, _) = self._resnet_small(inputs, num_classes, global_pool=global_pool, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
(height, width) = (65, 65)
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(output, _) = self._resnet_small(inputs, None, global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
(height, width) = (65, 65)
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(output, _) = self._resnet_small(inputs, None, global_pool=global_pool, output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32)) |
def test_background_check_order(pytester):
pytester.makefile('.feature', background=textwrap.dedent(FEATURE))
pytester.makeconftest(textwrap.dedent(STEPS))
pytester.makepyfile(textwrap.dedent(' from pytest_bdd import scenario\n\n ("background.feature", "Background steps are executed first")\n def test_background():\n pass\n\n '))
result = pytester.runpytest()
result.assert_outcomes(passed=1) |
def scrapping_empresas():
file = urlopen(EMPRESAS_FILE)
file = file.read().decode(encoding='utf-8')
region = state = city = ''
empresas = []
for line in file.split('\n'):
if line.startswith('## '):
region = line[2:].strip()
elif line.startswith('### '):
state = line[3:].strip()
elif line.startswith('#### '):
city = line[4:].strip()
elif (line.startswith('!') and region and state and city):
parts = line.split('|')
site = parts[2].split('(')[1].strip().strip(')')
name = parts[1].strip()
logo = (EMPRESAS_LOGO_PATH + parts[0].split('(')[1].strip().strip(')'))
empresas.append({'nome': name, 'regiao': region, 'estado': state, 'cidade': city, 'site': site, 'logo': logo})
return empresas |
class Solution(object):
def maximumProduct(self, nums):
min1 = min2 = float('inf')
max1 = max2 = max3 = float('-inf')
for num in nums:
if (num <= min1):
min2 = min1
min1 = num
elif (num <= min2):
min2 = num
if (num >= max1):
max3 = max2
max2 = max1
max1 = num
elif (num >= max2):
max3 = max2
max2 = num
elif (num >= max3):
max3 = num
return max(((min1 * min2) * max1), ((max1 * max2) * max3)) |
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, norm_layer=None):
super(SeparableConv2d, self).__init__()
self.kernel_size = kernel_size
self.dilation = dilation
padding = get_padding(kernel_size, stride, dilation)
self.conv_dw = nn.Conv2d(inplanes, inplanes, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=inplanes, bias=bias)
self.bn = norm_layer(num_features=inplanes)
self.conv_pw = nn.Conv2d(inplanes, planes, kernel_size=1, bias=bias)
def forward(self, x):
x = self.conv_dw(x)
x = self.bn(x)
x = self.conv_pw(x)
return x |
class IDAUp(nn.Module):
def __init__(self, in_channels, out_channel, up_f, norm_func):
super(IDAUp, self).__init__()
for i in range(1, len(in_channels)):
in_channel = in_channels[i]
f = int(up_f[i])
proj = DeformConv(in_channel, out_channel, norm_func)
node = DeformConv(out_channel, out_channel, norm_func)
up = nn.ConvTranspose2d(out_channel, out_channel, kernel_size=(f * 2), stride=f, padding=(f // 2), output_padding=0, groups=out_channel, bias=False)
_fill_up_weights(up)
setattr(self, ('proj_' + str(i)), proj)
setattr(self, ('up_' + str(i)), up)
setattr(self, ('node_' + str(i)), node)
def forward(self, layers, startp, endp):
for i in range((startp + 1), endp):
upsample = getattr(self, ('up_' + str((i - startp))))
project = getattr(self, ('proj_' + str((i - startp))))
layers[i] = upsample(project(layers[i]))
node = getattr(self, ('node_' + str((i - startp))))
layers[i] = node((layers[i] + layers[(i - 1)])) |
class ShieldTimeColumn(GraphColumn):
name = 'ShieldTime'
def __init__(self, fittingView, params):
super().__init__(fittingView, 1392, (3, 0, 0))
def _getValue(self, fit):
return ((fit.ship.getModifiedItemAttr('shieldRechargeRate') / 1000), 's')
def _getFitTooltip(self):
return 'Time to regenerate shield from 0% to 98.7%' |
def bind_table(bindtable, row_site, col_site, kf=None):
s_rows = [row[0] for row in bindtable[1:]]
s_cols = bindtable[0]
kmatrix = [row[1:] for row in bindtable[1:]]
kiter = itertools.chain.from_iterable(kmatrix)
if (any((isinstance(x, numbers.Real) for x in kiter)) and (kf is None)):
raise ValueError('must specify kf when using single kd values')
components = ComponentSet()
for (r, s_row) in enumerate(s_rows):
for (c, s_col) in enumerate(s_cols):
klist = kmatrix[r][c]
if (klist is not None):
if isinstance(klist, numbers.Real):
kd = klist
klist = (kf, (kd * kf))
components |= bind(s_row(), row_site, s_col(), col_site, klist)
return components |
.parametrize('cfg_file', ['../configs/textrecog/sar/sar_r31_parallel_decoder_academic.py'])
def test_model_batch_inference_raises_exception_error_aug_test_recog(cfg_file):
tmp_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
config_file = os.path.join(tmp_dir, cfg_file)
model = build_model(config_file)
with pytest.raises(Exception, match='aug test does not support inference with batch size'):
sample_img_path = os.path.join(tmp_dir, '../demo/demo_text_det.jpg')
model_inference(model, [sample_img_path, sample_img_path])
with pytest.raises(Exception, match='aug test does not support inference with batch size'):
img = imread(sample_img_path)
model_inference(model, [img, img]) |
class UNext(nn.Module):
def __init__(self, num_classes, input_channels=3, deep_supervision=False, img_size=224, patch_size=16, in_chans=3, embed_dims=[128, 160, 256], num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=nn.LayerNorm, depths=[1, 1, 1], sr_ratios=[8, 4, 2, 1], **kwargs):
super().__init__()
self.encoder1 = nn.Conv2d(3, 16, 3, stride=1, padding=1)
self.encoder2 = nn.Conv2d(16, 32, 3, stride=1, padding=1)
self.encoder3 = nn.Conv2d(32, 128, 3, stride=1, padding=1)
self.ebn1 = nn.BatchNorm2d(16)
self.ebn2 = nn.BatchNorm2d(32)
self.ebn3 = nn.BatchNorm2d(128)
self.norm3 = norm_layer(embed_dims[1])
self.norm4 = norm_layer(embed_dims[2])
self.dnorm3 = norm_layer(160)
self.dnorm4 = norm_layer(128)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
self.block1 = nn.ModuleList([shiftedBlock(dim=embed_dims[1], num_heads=num_heads[0], mlp_ratio=1, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[0], norm_layer=norm_layer, sr_ratio=sr_ratios[0])])
self.block2 = nn.ModuleList([shiftedBlock(dim=embed_dims[2], num_heads=num_heads[0], mlp_ratio=1, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[1], norm_layer=norm_layer, sr_ratio=sr_ratios[0])])
self.dblock1 = nn.ModuleList([shiftedBlock(dim=embed_dims[1], num_heads=num_heads[0], mlp_ratio=1, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[0], norm_layer=norm_layer, sr_ratio=sr_ratios[0])])
self.dblock2 = nn.ModuleList([shiftedBlock(dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=1, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[1], norm_layer=norm_layer, sr_ratio=sr_ratios[0])])
self.patch_embed3 = OverlapPatchEmbed(img_size=(img_size // 4), patch_size=3, stride=2, in_chans=embed_dims[0], embed_dim=embed_dims[1])
self.patch_embed4 = OverlapPatchEmbed(img_size=(img_size // 8), patch_size=3, stride=2, in_chans=embed_dims[1], embed_dim=embed_dims[2])
self.decoder1 = nn.Conv2d(256, 160, 3, stride=1, padding=1)
self.decoder2 = nn.Conv2d(160, 128, 3, stride=1, padding=1)
self.decoder3 = nn.Conv2d(128, 32, 3, stride=1, padding=1)
self.decoder4 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
self.dbn1 = nn.BatchNorm2d(160)
self.dbn2 = nn.BatchNorm2d(128)
self.dbn3 = nn.BatchNorm2d(32)
self.dbn4 = nn.BatchNorm2d(16)
self.final = nn.Conv2d(16, num_classes, kernel_size=1)
self.soft = nn.Softmax(dim=1)
def forward(self, x):
B = x.shape[0]
out = F.relu(F.max_pool2d(self.ebn1(self.encoder1(x)), 2, 2))
t1 = out
out = F.relu(F.max_pool2d(self.ebn2(self.encoder2(out)), 2, 2))
t2 = out
out = F.relu(F.max_pool2d(self.ebn3(self.encoder3(out)), 2, 2))
t3 = out
(out, H, W) = self.patch_embed3(out)
for (i, blk) in enumerate(self.block1):
out = blk(out, H, W)
out = self.norm3(out)
out = out.reshape(B, H, W, (- 1)).permute(0, 3, 1, 2).contiguous()
t4 = out
(out, H, W) = self.patch_embed4(out)
for (i, blk) in enumerate(self.block2):
out = blk(out, H, W)
out = self.norm4(out)
out = out.reshape(B, H, W, (- 1)).permute(0, 3, 1, 2).contiguous()
out = F.relu(F.interpolate(self.dbn1(self.decoder1(out)), scale_factor=(2, 2), mode='bilinear'))
out = torch.add(out, t4)
(_, _, H, W) = out.shape
out = out.flatten(2).transpose(1, 2)
for (i, blk) in enumerate(self.dblock1):
out = blk(out, H, W)
out = self.dnorm3(out)
out = out.reshape(B, H, W, (- 1)).permute(0, 3, 1, 2).contiguous()
out = F.relu(F.interpolate(self.dbn2(self.decoder2(out)), scale_factor=(2, 2), mode='bilinear'))
out = torch.add(out, t3)
(_, _, H, W) = out.shape
out = out.flatten(2).transpose(1, 2)
for (i, blk) in enumerate(self.dblock2):
out = blk(out, H, W)
out = self.dnorm4(out)
out = out.reshape(B, H, W, (- 1)).permute(0, 3, 1, 2).contiguous()
out = F.relu(F.interpolate(self.dbn3(self.decoder3(out)), scale_factor=(2, 2), mode='bilinear'))
out = torch.add(out, t2)
out = F.relu(F.interpolate(self.dbn4(self.decoder4(out)), scale_factor=(2, 2), mode='bilinear'))
out = torch.add(out, t1)
out = F.relu(F.interpolate(self.decoder5(out), scale_factor=(2, 2), mode='bilinear'))
return self.final(out) |
def eval_loss():
raise NotImplementedError('not finished yet')
model.eval()
from utils.viz_utils import show_pred_and_gt
with torch.no_grad():
accum_loss = 0.0
for (sample_id, data) in enumerate(train_loader):
data = data.to(device)
gt = data.y.view((- 1), out_channels).to(device)
optimizer.zero_grad()
out = model(data)
loss = F.mse_loss(out, gt)
accum_loss += (batch_size * loss.item())
print(f'loss for sample {sample_id}: {loss.item():.3f}')
for i in range(gt.size(0)):
pred_y = out[i].numpy().reshape(((- 1), 2)).cumsum(axis=0)
y = gt[i].numpy().reshape(((- 1), 2)).cumsum(axis=0)
show_pred_and_gt(pred_y, y)
plt.show()
print(f'eval overall loss: {(accum_loss / len(ds)):.3f}') |
.parametrize('tsys_zmore', [dict(sysname='typem1', K=2.0, atol=0.0015, result=(float('Inf'), (- 120.0007), float('NaN'), 0.5774)), dict(sysname='type0', K=0.8, atol=0.0015, result=(10.0014, float('inf'), 1.7322, float('nan'))), dict(sysname='type0', K=2.0, atol=0.01, result=(4.0, 67.6058, 1.7322, 0.7663)), dict(sysname='type1', K=1.0, atol=0.0001, result=(float('Inf'), 144.9032, float('NaN'), 0.3162)), dict(sysname='type2', K=1.0, atol=0.0001, result=(float('Inf'), 44.4594, float('NaN'), 0.7907)), dict(sysname='type3', K=1.0, atol=0.0015, result=(0.0626, 37.1748, 0.1119, 0.7951)), dict(sysname='example21', K=1.0, atol=0.01, result=(0.01, (- 14.564), 0, 0.0022)), dict(sysname='example21', K=1000.0, atol=0.01, result=(0.1793, 22.5215, 0.0243, 0.063)), dict(sysname='example21', K=5000.0, atol=0.0015, result=(4.5596, 21.2101, 0.4385, 0.1868))], indirect=True)
def test_zmore_margin(tsys_zmore):
res = margin((tsys_zmore['sys'] * tsys_zmore['K']))
assert_allclose(res, tsys_zmore['result'], atol=tsys_zmore['atol']) |
_specialize
_rewriter([mul, true_div])
def local_mul_pow_to_pow_add(fgraph, node):
pow_nodes = defaultdict(list)
rest = []
for n in node.inputs:
if (n.owner and hasattr(n.owner.op, 'scalar_op') and isinstance(n.owner.op.scalar_op, ps.Pow)):
base_node = n.owner.inputs[0]
pow_nodes[base_node].append(n)
else:
rest.append(n)
can_rewrite = [k for (k, v) in pow_nodes.items() if (len(v) >= 2)]
if (len(can_rewrite) >= 1):
(orig_op, new_op) = (mul, add)
if isinstance(node.op.scalar_op, ps.TrueDiv):
(orig_op, new_op) = (true_div, sub)
pow_factors = []
for base in can_rewrite:
exponents = [n.owner.inputs[1] for n in pow_nodes[base]]
new_node = (base ** new_op(*exponents))
if (new_node.dtype != node.outputs[0].dtype):
new_node = cast(new_node, dtype=node.outputs[0].dtype)
pow_factors.append(new_node)
sole_pows = [v[0] for (k, v) in pow_nodes.items() if (k not in can_rewrite)]
if ((len(pow_factors) > 1) or (len(sole_pows) > 0) or (len(rest) > 0)):
new_out = orig_op(*pow_factors, *sole_pows, *rest)
if (new_out.dtype != node.outputs[0].dtype):
new_out = cast(new_out, dtype=node.outputs[0].dtype)
else:
new_out = pow_factors[0]
return [new_out] |
class StreamingEpochBatchIterator(EpochBatchIterating):
def __init__(self, dataset, epoch=0, num_shards=1, shard_id=0):
assert isinstance(dataset, torch.utils.data.IterableDataset)
self.dataset = dataset
self.epoch = epoch
self._current_epoch_iterator = None
self.num_shards = num_shards
self.shard_id = shard_id
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
self.epoch += 1
self.dataset.set_epoch(self.epoch)
self._current_epoch_iterator = CountingIterator(iterable=ShardedIterator(iterable=self.dataset, num_shards=self.num_shards, shard_id=self.shard_id))
return self._current_epoch_iterator
def end_of_epoch(self) -> bool:
return (not self._current_epoch_iterator.has_next())
def iterations_in_epoch(self) -> int:
if (self._current_epoch_iterator is not None):
return self._current_epoch_iterator.count
return 0
def state_dict(self):
return {'epoch': self.epoch}
def load_state_dict(self, state_dict):
self.epoch = state_dict['epoch'] |
_operation
def mtimes_real_complex(a: torch.Tensor, b: torch.Tensor, conj_b=False):
if is_real(b):
raise ValueError('Incorrect dimensions.')
if (not conj_b):
return complex(torch.matmul(a, b[(..., 0)]), torch.matmul(a, b[(..., 1)]))
if conj_b:
return complex(torch.matmul(a, b[(..., 0)]), (- torch.matmul(a, b[(..., 1)]))) |
class Balancer(Amm):
def __init__(self, reserves: list[int], weights: list[float]):
super().__init__(reserves, weights)
def conservation_function(self):
C = 1
for (i, qty) in enumerate(self.reserves):
C *= (qty ** self.weights[i])
return C
def spot_price(self, asset_in_ix: int, asset_out_ix: int):
return ((self.reserves[asset_in_ix] * self.weights[asset_out_ix]) / (self.reserves[asset_out_ix] * self.weights[asset_in_ix]))
def _compute_trade_qty_out(self, qty_in: int, asset_in_ix: int, asset_out_ix: int):
pre_trade_reserves_in_ix = self.reserves[asset_in_ix]
pre_trade_reserves_out_ix = self.reserves[asset_out_ix]
updated_reserves_in_ix = (pre_trade_reserves_in_ix + qty_in)
updated_reserves_out_ix = (pre_trade_reserves_out_ix * ((pre_trade_reserves_in_ix / updated_reserves_in_ix) ** (self.weights[asset_in_ix] / self.weights[asset_out_ix])))
return (updated_reserves_in_ix, updated_reserves_out_ix)
def trade(self, qty_in: int, asset_in_ix: int, asset_out_ix: int):
pre_trade_reserves_out_ix = self.reserves[asset_out_ix]
(updated_reserves_in_ix, updated_reserves_out_ix) = self._compute_trade_qty_out(qty_in, asset_in_ix, asset_out_ix)
self.reserves[asset_in_ix] = updated_reserves_in_ix
self.reserves[asset_out_ix] = updated_reserves_out_ix
return (pre_trade_reserves_out_ix - self.reserves[asset_out_ix])
def slippage(self, qty_in: int, asset_in_ix: int, asset_out_ix: int):
x_1 = qty_in
(_, r_2_prime) = self._compute_trade_qty_out(qty_in, asset_in_ix, asset_out_ix)
x_2 = (self.reserves[asset_out_ix] - r_2_prime)
p = self.spot_price(asset_in_ix, asset_out_ix)
return (((x_1 / x_2) / p) - 1)
def value_pool(self, pct_change: float, asset_in_ix: int, asset_out_ix: int):
V = (self.reserves[asset_in_ix] / self.weights[asset_in_ix])
V_prime = (V * ((1 + pct_change) ** self.weights[asset_out_ix]))
return V_prime |
class GCNLayer(nn.Module):
def __init__(self, in_features, out_features, bias=False, batch_norm=False):
super(GCNLayer, self).__init__()
self.weight = torch.Tensor(in_features, out_features)
self.weight = nn.Parameter(nn.init.xavier_uniform_(self.weight))
if bias:
self.bias = torch.Tensor(out_features)
self.bias = nn.Parameter(nn.init.xavier_uniform_(self.bias))
else:
self.register_parameter('bias', None)
self.bn = (nn.BatchNorm1d(out_features) if batch_norm else None)
def forward(self, input, adj, batch_norm=True):
support = torch.matmul(input, self.weight)
output = torch.matmul(adj, support)
if (self.bias is not None):
output = (output + self.bias)
if ((self.bn is not None) and batch_norm):
output = self.compute_bn(output)
return output
def compute_bn(self, x):
if (len(x.shape) == 2):
return self.bn(x)
else:
return self.bn(x.view((- 1), x.size((- 1)))).view(x.size()) |
def callback_graph(weights, obj_func_eval):
clear_output(wait=True)
objective_func_vals.append(obj_func_eval)
plt.title('Objective function value against iteration')
plt.xlabel('Iteration')
plt.ylabel('Objective function value')
plt.plot(range(len(objective_func_vals)), objective_func_vals)
plt.show() |
class AverageAttention(nn.Module):
def __init__(self, model_dim, dropout=0.1, aan_useffn=False):
self.model_dim = model_dim
self.aan_useffn = aan_useffn
super(AverageAttention, self).__init__()
if aan_useffn:
self.average_layer = PositionwiseFeedForward(model_dim, model_dim, dropout)
self.gating_layer = nn.Linear((model_dim * 2), (model_dim * 2))
def cumulative_average_mask(self, batch_size, inputs_len, device):
triangle = torch.tril(torch.ones(inputs_len, inputs_len, dtype=torch.float, device=device))
weights = (torch.ones(1, inputs_len, dtype=torch.float, device=device) / torch.arange(1, (inputs_len + 1), dtype=torch.float, device=device))
mask = (triangle * weights.transpose(0, 1))
return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len)
def cumulative_average(self, inputs, mask_or_step, layer_cache=None, step=None):
if (layer_cache is not None):
step = mask_or_step
average_attention = ((inputs + (step * layer_cache['prev_g'])) / (step + 1))
layer_cache['prev_g'] = average_attention
return average_attention
else:
mask = mask_or_step
return torch.matmul(mask.to(inputs.dtype), inputs)
def forward(self, inputs, mask=None, layer_cache=None, step=None):
batch_size = inputs.size(0)
inputs_len = inputs.size(1)
average_outputs = self.cumulative_average(inputs, (self.cumulative_average_mask(batch_size, inputs_len, inputs.device) if (layer_cache is None) else step), layer_cache=layer_cache)
if self.aan_useffn:
average_outputs = self.average_layer(average_outputs)
gating_outputs = self.gating_layer(torch.cat((inputs, average_outputs), (- 1)))
(input_gate, forget_gate) = torch.chunk(gating_outputs, 2, dim=2)
gating_outputs = ((torch.sigmoid(input_gate) * inputs) + (torch.sigmoid(forget_gate) * average_outputs))
return (gating_outputs, average_outputs) |
def test_invalid_directjson(tmpdir):
wflowjson = yadage.workflow_loader.workflow('workflow.yml', 'tests/testspecs/local-helloworld')
with pytest.raises(jsonschema.exceptions.ValidationError):
ys = YadageSteering.create(dataarg=('local:' + os.path.join(str(tmpdir), 'workdir')), workflow_json={'invalid': 'data'}) |
class TestEstCommonCoord(object):
def setup_method(self):
self.res = 1.0
self.long_coord = np.arange(0, 360, self.res)
self.short_coord = np.arange(10, 350, (10.0 * self.res))
return
def teardown_method(self):
del self.long_coord, self.short_coord, self.res
return
def test_establish_common_coord_overlap(self):
out = coords.establish_common_coord([self.long_coord, self.short_coord])
out_res = np.unique((out[1:] - out[:(- 1)]))
assert (self.short_coord.min() == out.min()), 'unexpected minimum value'
assert (self.short_coord.max() == out.max()), 'unexpected maximum value'
assert (len(out_res) == 1), 'inconsistend coordinate resolution'
assert (out_res[0] == self.res), 'unexpected coordinate resolution'
return
def test_establish_common_coord_max_range(self):
out = coords.establish_common_coord([self.short_coord, self.long_coord], common=False)
out_res = np.unique((out[1:] - out[:(- 1)]))
assert (self.long_coord.min() == out.min()), 'unexpected minimum value'
assert (self.long_coord.max() == out.max()), 'unexpected maximum value'
assert (len(out_res) == 1), 'inconsistend coordinate resolution'
assert (out_res[0] == self.res), 'unexpected coordinate resolution'
return
def test_establish_common_coord_single_val(self):
out = coords.establish_common_coord([self.short_coord[0], self.long_coord], common=False)
out_res = np.unique((out[1:] - out[:(- 1)]))
assert (self.long_coord.min() == out.min()), 'unexpected minimum value'
assert (self.long_coord.max() == out.max()), 'unexpected maximum value'
assert (len(out_res) == 1), 'inconsistend coordinate resolution'
assert (out_res[0] == self.res), 'unexpected coordinate resolution'
return
def test_establish_common_coord_single_val_only(self):
out = coords.establish_common_coord([self.short_coord[0]])
assert (self.short_coord[0] == out[0]), 'unexpected value'
assert (len(out) == 1), 'unexpected coordinate length'
return |
class Jacobian():
def __init__(self, known_jacs=None, clear_domain=True):
self._known_jacs = (known_jacs or {})
self._clear_domain = clear_domain
def jac(self, symbol, variable):
try:
return self._known_jacs[symbol]
except KeyError:
jac = self._jac(symbol, variable)
self._known_jacs[symbol] = jac
return jac
def _jac(self, symbol, variable):
if isinstance(symbol, pybamm.BinaryOperator):
(left, right) = symbol.children
left_jac = self.jac(left, variable)
right_jac = self.jac(right, variable)
jac = symbol._binary_jac(left_jac, right_jac)
elif isinstance(symbol, pybamm.UnaryOperator):
child_jac = self.jac(symbol.child, variable)
jac = symbol._unary_jac(child_jac)
elif isinstance(symbol, pybamm.Function):
children_jacs = ([None] * len(symbol.children))
for (i, child) in enumerate(symbol.children):
children_jacs[i] = self.jac(child, variable)
jac = symbol._function_jac(children_jacs)
elif isinstance(symbol, pybamm.Concatenation):
children_jacs = [self.jac(child, variable) for child in symbol.children]
if (len(children_jacs) == 1):
jac = children_jacs[0]
else:
jac = symbol._concatenation_jac(children_jacs)
else:
try:
jac = symbol._jac(variable)
except NotImplementedError:
raise NotImplementedError(f"Cannot calculate Jacobian of symbol of type '{type(symbol)}'")
if self._clear_domain:
jac.clear_domains()
return jac |
class CassandraDatabaseCreation(BaseDatabaseCreation):
def create_test_db(self, verbosity=1, autoclobber=False, **kwargs):
from django.conf import settings
from django.core.management import call_command
self.connection.connect()
default_alias = get_default_cassandra_connection()[0]
self.connection.connection.keyspace = self.connection.settings_dict['NAME']
test_database_name = self._get_test_db_name()
self.set_models_keyspace(test_database_name)
if (verbosity >= 1):
test_db_repr = ''
if (verbosity >= 2):
test_db_repr = (" ('%s')" % test_database_name)
logger.info("Creating test database for alias '%s'%s...", self.connection.alias, test_db_repr)
options = self.connection.settings_dict.get('OPTIONS', {})
connection_options_copy = options.get('connection', {}).copy()
if (not connection_options_copy.get('schema_metadata_enabled', True)):
options['connection']['schema_metadata_enabled'] = True
self.connection.reconnect()
set_default_connection(default_alias)
replication_opts = options.get('replication', {})
replication_factor = replication_opts.pop('replication_factor', 1)
create_keyspace_simple(test_database_name, replication_factor, connections=[self.connection.alias])
settings.DATABASES[self.connection.alias]['NAME'] = test_database_name
self.connection.settings_dict['NAME'] = test_database_name
self.connection.reconnect()
set_default_connection(default_alias)
call_command('sync_cassandra', verbosity=max((verbosity - 1), 0), database=self.connection.alias)
if (not connection_options_copy.get('schema_metadata_enabled', True)):
logger.info('Disabling metadata on %s', self.connection.settings_dict['NAME'])
options['connection']['schema_metadata_enabled'] = connection_options_copy['schema_metadata_enabled']
self.connection.reconnect()
set_default_connection(default_alias)
return test_database_name
def _destroy_test_db(self, test_database_name, verbosity=1, **kwargs):
drop_keyspace(test_database_name, connections=[self.connection.alias])
def set_models_keyspace(self, keyspace):
for models in self.connection.introspection.cql_models.values():
for model in models:
model.__keyspace__ = keyspace |
def constant_fold_binary_op_extended(op: str, left: ConstantValue, right: ConstantValue) -> (ConstantValue | None):
if ((not isinstance(left, bytes)) and (not isinstance(right, bytes))):
return constant_fold_binary_op(op, left, right)
if ((op == '+') and isinstance(left, bytes) and isinstance(right, bytes)):
return (left + right)
elif ((op == '*') and isinstance(left, bytes) and isinstance(right, int)):
return (left * right)
elif ((op == '*') and isinstance(left, int) and isinstance(right, bytes)):
return (left * right)
return None |
def create_bases(model, kws=None, gpu=True):
kws = ([] if (kws is None) else kws)
ws0 = copy.deepcopy(model.state_dict())
bases = [rand_basis(ws0, gpu) for _ in range(2)]
bases = [normalize_filter(bs, ws0) for bs in bases]
bases = [ignore_bn(bs) for bs in bases]
bases = [ignore_kw(bs, kws) for bs in bases]
return bases |
class AuxiliaryHead(nn.Module):
def __init__(self, C, num_classes):
super(AuxiliaryHead, self).__init__()
self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True))
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0), (- 1)))
return x |
class TrainDataset(Dataset):
def __init__(self, args, raw_datasets, cache_root):
self.raw_datasets = raw_datasets
cache_path = os.path.join(cache_root, 'compwebq_train.cache')
if (os.path.exists(cache_path) and args.dataset.use_cache):
self.data = torch.load(cache_path)
else:
self.data = []
for raw_data in tqdm(self.raw_datasets):
extend_data = deepcopy(raw_data)
question = extend_data['question']
answers = extend_data['answers']
kg_tuples = extend_data['kg_tuples']
(question, serialized_kg) = kgqa_get_input(question, kg_tuples)
seq_out = ', '.join(answers)
extend_data.update({'struct_in': serialized_kg, 'text_in': question, 'seq_out': seq_out})
self.data.append(extend_data)
if args.dataset.use_cache:
torch.save(self.data, cache_path)
def __getitem__(self, index) -> T_co:
return self.data[index]
def __len__(self):
return len(self.data) |
def get_files(**kwargs):
return [File(Path('LICENSES', 'Apache-2.0.txt'), Apache_2_0), File(Path('LICENSES', 'MIT.txt'), MIT.replace('<year>', f"{kwargs['year']}-present", 1).replace('<copyright holders>', f"{kwargs['author']} <{kwargs['email']}>", 1)), File(Path('src', kwargs['package_name'], '__init__.py'), f'''# SPDX-FileCopyrightText: {kwargs['year']}-present {kwargs['author']} <{kwargs['email']}>
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
'''), File(Path('src', kwargs['package_name'], '__about__.py'), f'''# SPDX-FileCopyrightText: {kwargs['year']}-present {kwargs['author']} <{kwargs['email']}>
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
__version__ = "0.0.1"
'''), File(Path('tests', '__init__.py'), f'''# SPDX-FileCopyrightText: {kwargs['year']}-present {kwargs['author']} <{kwargs['email']}>
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
'''), File(Path('README.md'), f'''# {kwargs['project_name']}
[
- [License](#license)
## Installation
```console
pip install {kwargs['project_name_normalized']}
```
## License
`{kwargs['project_name_normalized']}` is distributed under the terms of any of the following licenses:
- [Apache-2.0](
- [MIT](
'''), File(Path('pyproject.toml'), f'''[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "{kwargs['project_name_normalized']}"
dynamic = ["version"]
description = ''
readme = "README.md"
requires-python = ">=3.8"
license = "Apache-2.0 OR MIT"
license-files = {{ globs = ["LICENSES/*"] }}
keywords = []
authors = [
{{ name = "{kwargs['author']}", email = "{kwargs['email']}" }},
]
classifiers = [
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
dependencies = []
[project.urls]
Documentation = "
Issues = "
Source = "
[tool.hatch.version]
path = "src/{kwargs['package_name']}/__about__.py"
[tool.hatch.envs.default]
dependencies = [
"coverage[toml]>=6.5",
"pytest",
]
[tool.hatch.envs.default.scripts]
test = "pytest {{args:tests}}"
test-cov = "coverage run -m pytest {{args:tests}}"
cov-report = [
"- coverage combine",
"coverage report",
]
cov = [
"test-cov",
"cov-report",
]
[[tool.hatch.envs.all.matrix]]
python = ["3.8", "3.9", "3.10", "3.11", "3.12"]
[tool.hatch.envs.types]
dependencies = [
"mypy>=1.0.0",
]
[tool.hatch.envs.types.scripts]
check = "mypy --install-types --non-interactive {{args:src/{kwargs['package_name']} tests}}"
[tool.coverage.run]
source_pkgs = ["{kwargs['package_name']}", "tests"]
branch = true
parallel = true
omit = [
"src/{kwargs['package_name']}/__about__.py",
]
[tool.coverage.paths]
{kwargs['package_name']} = ["src/{kwargs['package_name']}", "*/{kwargs['project_name_normalized']}/src/{kwargs['package_name']}"]
tests = ["tests", "*/{kwargs['project_name_normalized']}/tests"]
[tool.coverage.report]
exclude_lines = [
"no cov",
"if __name__ == .__main__.:",
"if TYPE_CHECKING:",
]
''')] |
def convert_clap_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path, enable_fusion=False):
(clap_model, clap_model_cfg) = init_clap(checkpoint_path, enable_fusion=enable_fusion)
clap_model.eval()
state_dict = clap_model.state_dict()
state_dict = rename_state_dict(state_dict)
transformers_config = ClapConfig()
transformers_config.audio_config.enable_fusion = enable_fusion
model = ClapModel(transformers_config)
model.load_state_dict(state_dict, strict=False)
model.save_pretrained(pytorch_dump_folder_path)
transformers_config.save_pretrained(pytorch_dump_folder_path) |
def configure_environment(config_filename, environment):
factory = environment.factory
if (not os.path.exists(config_filename)):
raise PysmtIOError(("File '%s' does not exists." % config_filename))
config = cp.RawConfigParser()
config.read(config_filename)
new_solvers_sections = [s for s in config.sections() if s.lower().startswith('smtlibsolver ')]
for s in new_solvers_sections:
name = s[len('smtlibsolver '):]
cmd = config.get(s, 'command')
assert (cmd is not None), ("Missing 'command' value in definitionof '%s' solver" % name)
logics_string = config.get(s, 'logics')
if (logics_string is None):
warn(("Missing 'logics' value in definition of '%s' solver" % name), stacklevel=2)
continue
logics = [get_logic_by_name(l) for l in logics_string.split()]
factory.add_generic_solver(name, cmd.split(), logics)
if ('global' in config.sections()):
infix = config.get('global', 'use_infix_notation')
pref_list = config.get('global', 'solver_preference_list')
if (infix is not None):
if (infix.lower() == 'true'):
environment.enable_infix_notation = True
elif (infix.lower() == 'false'):
environment.enable_infix_notation = True
else:
warn(("Unknown value for 'use_infix_notation': %s" % infix), stacklevel=2)
if (pref_list is not None):
prefs = pref_list.split()
for s in prefs:
if (s not in factory.all_solvers()):
warn(("Unknown solver '%s' in solver_preference_list" % s), stacklevel=2)
for s in factory.all_solvers():
if (s not in prefs):
warn(("Solver '%s' is not in the preference list, and will be disabled." % s), stacklevel=2)
factory.set_solver_preference_list(prefs) |
def refers_to_fullname(node: Expression, fullnames: (str | tuple[(str, ...)])) -> bool:
if (not isinstance(fullnames, tuple)):
fullnames = (fullnames,)
if (not isinstance(node, RefExpr)):
return False
if (node.fullname in fullnames):
return True
if isinstance(node.node, TypeAlias):
return is_named_instance(node.node.target, fullnames)
return False |
def test_unknown(hatch, helpers, path_append, mocker):
install = mocker.patch('hatch.python.core.PythonManager.install')
result = hatch('python', 'install', 'foo', 'bar')
assert (result.exit_code == 1), result.output
assert (result.output == helpers.dedent('\n Unknown distributions: foo, bar\n '))
install.assert_not_called()
path_append.assert_not_called() |
def get_resnet_v1_d_base(input_x, freeze_norm, scope='resnet50_v1d', bottleneck_nums=[3, 4, 6, 3], base_channels=[64, 128, 256, 512], freeze=[True, False, False, False, False], is_training=True):
assert (len(bottleneck_nums) == len(base_channels)), 'bottleneck num should same as base_channels size'
assert (len(freeze) == (len(bottleneck_nums) + 1)), 'should satisfy:: len(freeze) == len(bottleneck_nums) + 1'
feature_dict = {}
with tf.variable_scope(scope):
with slim.arg_scope(resnet_arg_scope(is_training=((not freeze[0]) and is_training), freeze_norm=freeze_norm)):
net = stem_stack_3x3(net=input_x, input_channel=32, scope='C1')
feature_dict['C1'] = net
for i in range(2, (len(bottleneck_nums) + 2)):
spatial_downsample = (False if (i == 2) else True)
with slim.arg_scope(resnet_arg_scope(is_training=((not freeze[(i - 1)]) and is_training), freeze_norm=freeze_norm)):
net = make_block(net=net, base_channel=base_channels[(i - 2)], bottleneck_nums=bottleneck_nums[(i - 2)], scope=('C%d' % i), avg_down=True, spatial_downsample=spatial_downsample)
feature_dict[('C%d' % i)] = net
return (net, feature_dict) |
class Effect1049(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Emission Systems')), 'maxRange', src.getModifiedItemAttr('shipBonusMC2'), skill='Minmatar Cruiser', **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.