code stringlengths 281 23.7M |
|---|
class Session(object):
def __init__(self):
self._headers = HEADERS
self._session = requests.sessions.Session()
def _set_auth_headers(self, access_token=''):
self._headers['Authorization'] = 'Bearer {}'.format(access_token)
def _get(self, url):
return self._session.get(url, headers=self._headers)
def _post(self, url, data):
return self._session.post(url, json=data, headers=self._headers)
def terminate(self):
pass |
def test_TVRegDiff():
n = 800
x = np.linspace((- 10), 10, n)
print(np.std(x))
np.random.seed(1)
noise = (np.random.normal(0, np.std(x), x.shape) * 0.05)
y_clean = np.sin(x)
y_grad = np.cos(x)
y_noise = (y_clean + noise)
dx = (x[1] - x[0])
width = 60
fig = plt.figure(figsize=(7, 7))
fig.add_subplot(2, 2, 1)
plt.plot(x[width:(- width)], y_noise[width:(- width)], label='Noisy data')
plt.plot(x[width:(- width)], y_clean[width:(- width)], label='Clean data')
plt.legend(fontsize=10, loc=1)
plt.xlabel('t', fontsize=12)
plt.ylabel('x', fontsize=12)
plt.title('Clean and noisy data')
u1a = TVRegDiff(y_noise, 1, 0.5, dx=dx, plotflag=False, diffkernel='sq')
fig.add_subplot(2, 2, 2)
plt.plot(x[width:(- width)], y_grad[width:(- width)], label='Ground truth')
plt.plot(x[width:(- width)], u1a[width:(- width)], label='Prediction')
plt.legend(fontsize=10, loc=1)
plt.title('TVRegDiff')
plt.xlabel('t', fontsize=12)
plt.ylabel('dx/dt', fontsize=12)
grad_f = ps.SINDyDerivative(kind='finite_difference', k=1)
x_dot = grad_f(y_noise, x)
x_2dot = grad_f(x_dot, x)
fig.add_subplot(2, 2, 3)
plt.plot(x[width:(- width)], y_grad[width:(- width)], label='Ground truth')
plt.plot(x[width:(- width)], x_dot[width:(- width)], label='Prediction')
plt.legend(fontsize=10, loc=1)
plt.xlabel('t', fontsize=12)
plt.ylabel('dx/dt', fontsize=12)
plt.title('FiniteDiff')
(_, x_dot) = PolyDiff(y_noise, x, width=width, deg=3)
fig.add_subplot(2, 2, 4)
plt.plot(x[width:(- width)], y_grad[width:(- width)], label='Ground truth')
plt.plot(x[width:(- width)], x_dot, label='Prediction')
plt.legend(fontsize=10, loc=1)
plt.title('PolyDiff')
plt.xlabel('t', fontsize=12)
plt.ylabel('dx/dt', fontsize=12)
plt.tight_layout()
plt.savefig('../results/2.jpg', dpi=300) |
class TestDistributedTimeoutWrapper(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_no_timeout(self):
module = DistributedTimeoutWrapper(ModuleWithDelay(1), 0, signal.SIGINT)
module(torch.rand(5))
module.stop_timeout()
def test_timeout_safe(self):
module = DistributedTimeoutWrapper(ModuleWithDelay(1), 10, signal.SIGINT)
module(torch.rand(5))
module.stop_timeout()
def test_timeout_killed(self):
with self.assertRaises(KeyboardInterrupt):
module = DistributedTimeoutWrapper(ModuleWithDelay(5), 1, signal.SIGINT)
module(torch.rand(5))
module.stop_timeout() |
class CommaSeparatedUUIDField(Field):
version = LooseVersion('2.4')
def deserialize(self, value):
if (not value):
return DirtyableList([])
if hasattr(value, 'split'):
value = value.split(',')
return DirtyableList([uuid.UUID(v) for v in value])
def serialize(self, value):
if (not value):
value = []
if (not hasattr(value, '__iter__')):
raise ValueError(('Value must be list or tuple, not %r.' % value))
if (self.version < LooseVersion('2.5')):
return ','.join([str(v) for v in value])
else:
return [str(v) for v in value] |
class TestTransportMode():
.parametrize(['enum_member', 'enum_name', 'java_type'], [(r5py.TransportMode.AIR, 'AIR', com.conveyal.r5.api.util.TransitModes), (r5py.TransportMode.BUS, 'BUS', com.conveyal.r5.api.util.TransitModes), (r5py.TransportMode.CABLE_CAR, 'CABLE_CAR', com.conveyal.r5.api.util.TransitModes), (r5py.TransportMode.FERRY, 'FERRY', com.conveyal.r5.api.util.TransitModes), (r5py.TransportMode.FUNICULAR, 'FUNICULAR', com.conveyal.r5.api.util.TransitModes), (r5py.TransportMode.RAIL, 'RAIL', com.conveyal.r5.api.util.TransitModes), (r5py.TransportMode.SUBWAY, 'SUBWAY', com.conveyal.r5.api.util.TransitModes), (r5py.TransportMode.TRAM, 'TRAM', com.conveyal.r5.api.util.TransitModes), (r5py.TransportMode.TRANSIT, 'TRANSIT', com.conveyal.r5.api.util.TransitModes), (r5py.TransportMode.BICYCLE, 'BICYCLE', com.conveyal.r5.profile.StreetMode), (r5py.TransportMode.CAR, 'CAR', com.conveyal.r5.profile.StreetMode), (r5py.TransportMode.WALK, 'WALK', com.conveyal.r5.profile.StreetMode), (r5py.TransportMode.BICYCLE, 'BICYCLE', com.conveyal.r5.api.util.LegMode), (r5py.TransportMode.CAR, 'CAR', com.conveyal.r5.api.util.LegMode), (r5py.TransportMode.WALK, 'WALK', com.conveyal.r5.api.util.LegMode), (r5py.TransportMode.BICYCLE_RENT, 'BICYCLE_RENT', com.conveyal.r5.api.util.LegMode), (r5py.TransportMode.CAR_PARK, 'CAR_PARK', com.conveyal.r5.api.util.LegMode)])
def test_transportmode(self, enum_member, enum_name, java_type):
assert (enum_member.name == enum_name)
assert (enum_member.name == java_type.valueOf(enum_name).name())
.parametrize(['str_transport_mode', 'expected_transport_mode'], [('AIR', r5py.TransportMode.AIR), ('BUS', r5py.TransportMode.BUS), ('CABLE_CAR', r5py.TransportMode.CABLE_CAR), ('FERRY', r5py.TransportMode.FERRY), ('FUNICULAR', r5py.TransportMode.FUNICULAR), ('GONDOLA', r5py.TransportMode.GONDOLA), ('RAIL', r5py.TransportMode.RAIL), ('SUBWAY', r5py.TransportMode.SUBWAY), ('TRAM', r5py.TransportMode.TRAM), ('TRANSIT', r5py.TransportMode.TRANSIT), ('BICYCLE', r5py.TransportMode.BICYCLE), ('CAR', r5py.TransportMode.CAR), ('WALK', r5py.TransportMode.WALK), ('BICYCLE_RENT', r5py.TransportMode.BICYCLE_RENT), ('CAR_PARK', r5py.TransportMode.CAR_PARK), ('air', r5py.TransportMode.AIR), ('bus', r5py.TransportMode.BUS), ('cable_car', r5py.TransportMode.CABLE_CAR), ('ferry', r5py.TransportMode.FERRY), ('funicular', r5py.TransportMode.FUNICULAR), ('gondola', r5py.TransportMode.GONDOLA), ('rail', r5py.TransportMode.RAIL), ('subway', r5py.TransportMode.SUBWAY), ('tram', r5py.TransportMode.TRAM), ('transit', r5py.TransportMode.TRANSIT), ('bicycle', r5py.TransportMode.BICYCLE), ('car', r5py.TransportMode.CAR), ('walk', r5py.TransportMode.WALK), ('bicycle_rent', r5py.TransportMode.BICYCLE_RENT), ('car_park', r5py.TransportMode.CAR_PARK), ('AiR', r5py.TransportMode.AIR), ('bUs', r5py.TransportMode.BUS), ('CaBlE_CaR', r5py.TransportMode.CABLE_CAR), ('fErRy', r5py.TransportMode.FERRY), ('FuNiCuLaR', r5py.TransportMode.FUNICULAR), ('gOnDoLa', r5py.TransportMode.GONDOLA), ('RaIl', r5py.TransportMode.RAIL), ('SuBwAy', r5py.TransportMode.SUBWAY), ('TrAm', r5py.TransportMode.TRAM), ('TrAnSiT', r5py.TransportMode.TRANSIT), ('bIcYcLe', r5py.TransportMode.BICYCLE), ('CaR', r5py.TransportMode.CAR), ('wAlK', r5py.TransportMode.WALK), ('bIcYcLe_ReNt', r5py.TransportMode.BICYCLE_RENT), ('CaR_PaRk', r5py.TransportMode.CAR_PARK)])
def test_str_transportmode(self, str_transport_mode, expected_transport_mode):
assert (r5py.TransportMode(str_transport_mode) == expected_transport_mode)
.parametrize(['invalid_transport_mode'], [('Helicopter',), ('adsffoobar',), (13,), (None,), ('1234',)])
def test_invalid_transportmode(self, invalid_transport_mode):
with pytest.raises(ValueError, match='is not a valid TransportMode'):
_ = r5py.TransportMode(invalid_transport_mode)
.parametrize(['mode', 'expected'], [('CAR', True), ('BICYCLE', True), (r5py.TransportMode.WALK, True), (r5py.TransportMode.BICYCLE_RENT, True), (r5py.TransportMode.BUS, False), ('TRANSIT', False)])
def test_is_leg_mode(self, mode, expected):
assert (r5py.TransportMode(mode).is_leg_mode == expected)
.parametrize(['mode', 'expected'], [('CAR', True), ('BICYCLE', True), (r5py.TransportMode.WALK, True), (r5py.TransportMode.BICYCLE_RENT, False), (r5py.TransportMode.BUS, False), ('TRANSIT', False)])
def test_is_street_mode(self, mode, expected):
assert (r5py.TransportMode(mode).is_street_mode == expected)
.parametrize(['mode', 'expected'], [('CAR', False), ('BICYCLE', False), (r5py.TransportMode.WALK, False), (r5py.TransportMode.BICYCLE_RENT, False), (r5py.TransportMode.BUS, True), ('TRANSIT', True)])
def test_is_transit_mode(self, mode, expected):
assert (r5py.TransportMode(mode).is_transit_mode == expected)
def test_add(self):
transport_mode1 = r5py.TransportMode.TRANSIT
transport_mode2 = r5py.TransportMode.WALK
transport_modes = (transport_mode1 + transport_mode2)
assert isinstance(transport_modes, list)
assert (transport_modes == [transport_mode1, transport_mode2])
transport_modes = (transport_mode2 + transport_mode1)
assert isinstance(transport_modes, list)
assert (transport_modes == [transport_mode2, transport_mode1])
transport_modes = sum([transport_mode1, transport_mode2])
assert isinstance(transport_modes, list)
assert (transport_modes == [transport_mode1, transport_mode2])
transport_modes = ([transport_mode1] + transport_mode2)
assert isinstance(transport_modes, list)
assert (transport_modes == [transport_mode1, transport_mode2])
transport_modes = (transport_mode1 + [transport_mode2])
assert isinstance(transport_modes, list)
assert (transport_modes == [transport_mode1, transport_mode2])
.parametrize(['invalid_other'], [(123.0,), (1,), ('asdfasdf',), ({},)])
def test_add_invalid_type(self, invalid_other):
transport_mode = r5py.TransportMode.TRANSIT
with pytest.raises(TypeError, match='unsupported operand type'):
_ = (transport_mode + invalid_other)
with pytest.raises(TypeError, match='unsupported operand type'):
_ = (invalid_other + transport_mode) |
_criterion('label_smoothed_cross_entropy')
class LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.eps = args.label_smoothing
def add_args(parser):
parser.add_argument('--label-smoothing', default=0.0, type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing')
def forward(self, model, sample, reduce=True):
net_output = model(**sample['net_input'])
(loss, nll_loss, sample_status) = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (sample['target'].size(0) if self.args.sentence_avg else sample['ntokens'])
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'nll_loss': (utils.item(nll_loss.data) if reduce else nll_loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size, 'sample_status': sample_status}
return (loss, sample_size, logging_output)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
origin_target = model.get_targets(sample, net_output)
sample_status = None
if (not model.training):
_lprobs = lprobs
_target = origin_target.unsqueeze((- 1))
_pad_mask = _target.eq(self.padding_idx)
target_lprob = _lprobs.gather(dim=(- 1), index=_target)
mtarget_lprob = target_lprob.masked_fill(_pad_mask, 2.0)
_argmax = torch.argmax(_lprobs, dim=(- 1), keepdims=True)
target_acc = torch.eq(_target, _argmax).to(dtype=mtarget_lprob.dtype)
mtarget_acc = target_acc.masked_fill(_pad_mask, 2.0)
sample_lprob_list = mtarget_lprob.squeeze().tolist()
sample_acc_list = mtarget_acc.squeeze().tolist()
sample_id_list = sample['id'].squeeze().tolist()
sample_status = list(zip(sample_id_list, sample_lprob_list, sample_acc_list))
lprobs = lprobs.view((- 1), lprobs.size((- 1)))
target = origin_target.view((- 1), 1)
(loss, nll_loss) = label_smoothed_nll_loss(lprobs, target, self.eps, ignore_index=self.padding_idx, reduce=reduce)
return (loss, nll_loss, sample_status)
def aggregate_logging_outputs(logging_outputs):
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
sample_status = [log.get('sample_status', 0) for log in logging_outputs]
return {'loss': (((sum((log.get('loss', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'nll_loss': (((sum((log.get('nll_loss', 0) for log in logging_outputs)) / ntokens) / math.log(2)) if (ntokens > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size, 'sample_status': sample_status} |
class LKA_Attention3d(nn.Module):
def __init__(self, d_model):
super().__init__()
self.proj_1 = nn.Conv3d(d_model, d_model, 1)
self.activation = nn.GELU()
self.spatial_gating_unit = LKA3d(d_model)
self.proj_2 = nn.Conv3d(d_model, d_model, 1)
def forward(self, x):
shortcut = x.clone()
x = self.proj_1(x)
x = self.activation(x)
x = self.spatial_gating_unit(x)
x = self.proj_2(x)
x = (x + shortcut)
return x |
class CortexMBitband(QlPeripheral):
def __init__(self, ql, label, base, size):
super().__init__(ql, label)
self.bitband_base = base
self.bitband_size = (size * 32)
def _bitband_addr(self, offset):
return (self.bitband_base | ((offset & ) >> 5))
def region(self):
return [(0, self.bitband_size)]
def read(self, offset, size):
addr = (self._bitband_addr(offset) & (- size))
buf = self.ql.mem.read(addr, size)
bitpos = ((offset >> 2) & ((size * 8) - 1))
bit = ((buf[(bitpos >> 3)] >> (bitpos & 7)) & 1)
return bit
def write(self, offset, size, value):
addr = (self._bitband_addr(offset) & (- size))
buf = self.ql.mem.read(addr, size)
bitpos = ((offset >> 2) & ((size * 8) - 1))
bit = (1 << (bitpos & 7))
if (value & 1):
buf[(bitpos >> 3)] |= bit
else:
buf[(bitpos >> 3)] &= (~ bit)
self.ql.mem.write(addr, bytes(buf)) |
.parametrize('package_name', ['pycowsay', 'pycowsay==0.0.0.2', 'pycowsay>=0.0.0.2'])
('os.execvpe', new=execvpe_mock)
def test_simple_run(pipx_temp_env, monkeypatch, capsys, package_name):
run_pipx_cli_exit(['run', package_name, '--help'])
captured = capsys.readouterr()
assert ('Download the latest version of a package' not in captured.out) |
class TransformerEncoderLayer(nn.Module):
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.quant_noise = getattr(args, 'quant_noise_pq', 0)
self.quant_noise_block_size = getattr(args, 'quant_noise_pq_block_size', 8)
self.self_attn = self.build_self_attention(self.embed_dim, args)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__)
self.activation_fn = utils.get_activation_fn(activation=getattr(args, 'activation_fn', 'relu'))
activation_dropout_p = getattr(args, 'activation_dropout', 0)
if (activation_dropout_p == 0):
activation_dropout_p = getattr(args, 'relu_dropout', 0)
self.activation_dropout_module = FairseqDropout(float(activation_dropout_p), module_name=self.__class__.__name__)
self.normalize_before = args.encoder_normalize_before
self.fc1 = self.build_fc1(self.embed_dim, args.encoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size)
self.fc2 = self.build_fc2(args.encoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size)
self.final_layer_norm = LayerNorm(self.embed_dim)
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size)
def build_self_attention(self, embed_dim, args):
return MultiheadAttention(embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size)
def upgrade_state_dict_named(self, state_dict, name):
layer_norm_map = {'0': 'self_attn_layer_norm', '1': 'final_layer_norm'}
for (old, new) in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if (k in state_dict):
state_dict['{}.{}.{}'.format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor]=None):
if (attn_mask is not None):
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), (- .0))
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
(x, _) = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask, attn_mask=attn_mask)
x = self.dropout_module(x)
x = (residual + x)
if (not self.normalize_before):
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = (residual + x)
if (not self.normalize_before):
x = self.final_layer_norm(x)
return x |
class Assign(_base_nodes.AssignTypeNode, _base_nodes.Statement):
targets: list[NodeNG]
value: NodeNG
type_annotation: (NodeNG | None)
_astroid_fields = ('targets', 'value')
_other_other_fields = ('type_annotation',)
def postinit(self, targets: list[NodeNG], value: NodeNG, type_annotation: (NodeNG | None)) -> None:
self.targets = targets
self.value = value
self.type_annotation = type_annotation
assigned_stmts = protocols.assign_assigned_stmts
def get_children(self):
(yield from self.targets)
(yield self.value)
_property
def _assign_nodes_in_scope(self) -> list[nodes.Assign]:
return [self, *self.value._assign_nodes_in_scope]
def _get_yield_nodes_skip_functions(self):
(yield from self.value._get_yield_nodes_skip_functions())
def _get_yield_nodes_skip_lambdas(self):
(yield from self.value._get_yield_nodes_skip_lambdas()) |
def test_low_sun_angles():
result = tracking.singleaxis(apparent_zenith=80, apparent_azimuth=338, axis_tilt=30, axis_azimuth=180, max_angle=60, backtrack=True, gcr=0.35)
expected = {'tracker_theta': np.array([60.0]), 'aoi': np.array([80.420987]), 'surface_azimuth': np.array([253.897886]), 'surface_tilt': np.array([64.341094])}
for (k, v) in result.items():
assert_allclose(expected[k], v) |
.parametrize('klass', (ShmemVecEnv, SubprocVecEnv))
.parametrize('dtype', ('uint8', 'float32'))
def test_vec_env(klass, dtype):
num_envs = 3
num_steps = 100
shape = (3, 8)
def make_fn(seed):
return (lambda : SimpleEnv(seed, shape, dtype))
fns = [make_fn(i) for i in range(num_envs)]
env1 = DummyVecEnv(fns)
env2 = klass(fns)
assert_envs_equal(env1, env2, num_steps=num_steps) |
class Resize(object):
def __init__(self, size, interpolation=Image.BILINEAR):
assert (isinstance(size, int) or (isinstance(size, Iterable) and (len(size) == 2)))
self.size = size
self.interpolation = interpolation
def __call__(self, img_list):
return [F.resize(img, self.size, self.interpolation) for img in img_list]
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
return (self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)) |
.parametrize('func', [(lambda df: df.query('x > 1 and x < 4', engine='python')), (lambda df: df.x.value_counts().nlargest(2))])
def test_dataframe_simple(func):
df = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
expected = func(df)
a = DataFrame(example=df)
L = func(a).stream.sink_to_list()
a.emit(df)
assert_eq(L[0], expected) |
class MessageLogModelTest(RapidTest):
def setUp(self):
self.contact = self.create_contact()
self.connection = self.lookup_connections([''])[0]
self.connection.contact = self.contact
self.data = {'contact': self.contact, 'connection': self.connection, 'direction': Message.INCOMING, 'date': now(), 'text': 'hello'}
def test_no_contact_or_connection(self):
self.data.pop('contact')
self.data.pop('connection')
with self.assertRaises(ValidationError):
Message.objects.create(**self.data)
def test_no_contact(self):
self.data.pop('contact')
msg = Message.objects.create(**self.data)
self.assertEqual(msg.contact, self.contact)
self.connection.contact = None
msg = Message.objects.create(**self.data)
self.assertEqual(msg.contact, None)
def test_no_connection(self):
self.data.pop('connection')
msg = Message.objects.create(**self.data)
self.assertEqual(msg.connection, None)
def test_contact_mismatch(self):
self.data['contact'] = self.create_contact()
with self.assertRaises(ValidationError):
Message.objects.create(**self.data) |
def recursively_load_weights_wav2vec2(fairseq_model, hf_model):
unused_weights = []
fairseq_dict = fairseq_model.state_dict()
feature_extractor = hf_model.feature_extractor
adapter = hf_model.adapter
for (name, value) in fairseq_dict.items():
is_used = False
if ('conv_layers' in name):
load_conv_layer(name, value, feature_extractor, unused_weights, (hf_model.config.feat_extract_norm == 'group'))
is_used = True
elif any(((x in name) for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'])):
load_adapter(name, value, adapter, unused_weights)
is_used = True
else:
for (key, mapped_key) in MAPPING.items():
if ((key in name) or (key.split('w2v_model.')[(- 1)] == name.split('.')[0])):
is_used = True
if ('*' in mapped_key):
layer_index = name.split(key)[0].split('.')[(- 2)]
mapped_key = mapped_key.replace('*', layer_index)
if ('weight_g' in name):
weight_type = 'weight_g'
elif ('weight_v' in name):
weight_type = 'weight_v'
elif ('bias' in name):
weight_type = 'bias'
elif ('weight' in name):
weight_type = 'weight'
else:
weight_type = None
set_recursively(hf_model, mapped_key, value, name, weight_type)
continue
if (not is_used):
unused_weights.append(name)
logger.warning(f'Unused weights: {unused_weights}') |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--device', action='store', type=str, default='cuda')
parser.add_argument('--seed', type=int, default=1234, metavar='N', help='random seed (default: 1234)')
parser.add_argument('--evaluate', action='store', type=bool, default=True)
parser.add_argument('--testing_case_dir', action='store', type=str, default='testing_cases/')
parser.add_argument('--testing_case', action='store', type=str, default=None)
parser.add_argument('--load_model', action='store', type=bool, default=False)
parser.add_argument('--model_path', action='store', type=str, default='')
parser.add_argument('--num_episode', action='store', type=int, default=15)
parser.add_argument('--eval', type=bool, default=True, help='Evaluates a policy every 10 episode (default: True)')
parser.add_argument('--max_episode_step', type=int, default=15)
parser.add_argument('--patch_size', type=int, default=32)
parser.add_argument('--width', type=int, default=512)
parser.add_argument('--layers', type=int, default=1)
parser.add_argument('--heads', type=int, default=8)
parser.add_argument('--hidden_size', type=int, default=256, metavar='N', help='hidden size (default: 256)')
parser.add_argument('--lr', type=float, default=0.0003, metavar='G', help='learning rate (default: 0.0003)')
args = parser.parse_args()
return args |
def test_inbound_shipment_item_from_plan_constructor(create_inbound_shipment_plan_dummy_response):
resp_parsed = create_inbound_shipment_plan_dummy_response.parsed
item = resp_parsed.InboundShipmentPlans.member.Items.member
item_model_1 = InboundShipmentItem.from_plan_item(item)
assert (item_model_1.sku == 'SKU00001')
assert (item_model_1.quantity == '1')
assert (item_model_1.quantity_in_case is None)
assert (item_model_1.release_date is None)
assert (item_model_1.prep_details_list[0].prep_instruction == 'Taping')
assert (item_model_1.prep_details_list[0].prep_owner == 'AMAZON')
assert (item_model_1.fnsku == 'FNSKU00001')
item_model_2 = InboundShipmentItem.from_plan_item(item, quantity_in_case=4, release_date=datetime.datetime(2020, 11, 3))
assert (item_model_2.quantity_in_case == 4)
assert (item_model_2.release_date == datetime.datetime(2020, 11, 3)) |
class Timezone_TestCase(unittest.TestCase):
def runTest(self):
for cmd_class in [FC6_Timezone, F25_Timezone, RHEL7_Timezone]:
cmd = cmd_class()
op = cmd._getParser()
for action in op._actions:
if ('--isUtc' in action.option_strings):
self.assertFalse(action.default)
cmd = F18_Timezone()
self.assertEqual(cmd.__str__(), '') |
class Dict(TokenConverter):
def __init__(self, expr):
super().__init__(expr)
self.saveAsList = True
def postParse(self, instring, loc, tokenlist):
for (i, tok) in enumerate(tokenlist):
if (len(tok) == 0):
continue
ikey = tok[0]
if isinstance(ikey, int):
ikey = str(tok[0]).strip()
if (len(tok) == 1):
tokenlist[ikey] = _ParseResultsWithOffset('', i)
elif ((len(tok) == 2) and (not isinstance(tok[1], ParseResults))):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
else:
dictvalue = tok.copy()
del dictvalue[0]
if ((len(dictvalue) != 1) or (isinstance(dictvalue, ParseResults) and dictvalue.haskeys())):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
if self.resultsName:
return [tokenlist]
else:
return tokenlist |
def repo_with_git_flow_and_release_channels_angular_commits_using_tag_format(git_repo_factory, file_in_repo):
git_repo = git_repo_factory()
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='Initial commit')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='0.1.0'))
git_repo.git.tag('vpy0.1.0', m='vpy0.1.0')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='fix: add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='0.1.1-rc.1'))
git_repo.git.tag('vpy0.1.1-rc.1', m='vpy0.1.1-rc.1')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='feat!: add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='1.0.0-rc.1'))
git_repo.git.tag('vpy1.0.0-rc.1', m='vpy1.0.0-rc.1')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='feat: add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='1.0.0'))
git_repo.git.tag('vpy1.0.0', m='vpy1.0.0')
assert (git_repo.commit('vpy1.0.0').hexsha == git_repo.head.commit.hexsha)
git_repo.create_head('dev')
git_repo.heads.dev.checkout()
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='feat: (dev) add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='1.1.0-rc.1'))
git_repo.git.tag('vpy1.1.0-rc.1', m='vpy1.1.0-rc.1')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='fix: (dev) add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='1.1.0-rc.2'))
git_repo.git.tag('vpy1.1.0-rc.2', m='vpy1.1.0-rc.2')
assert (git_repo.commit('vpy1.1.0-rc.2').hexsha == git_repo.head.commit.hexsha)
git_repo.create_head('feature')
git_repo.heads.feature.checkout()
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='feat: (feature) add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='1.1.0-alpha.1'))
git_repo.git.tag('vpy1.1.0-alpha.1', m='vpy1.1.0-alpha.1')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='feat: (feature) add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='1.1.0-alpha.2'))
git_repo.git.tag('vpy1.1.0-alpha.2', m='vpy1.1.0-alpha.2')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='fix: (feature) add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='1.1.0-alpha.3'))
git_repo.git.tag('vpy1.1.0-alpha.3', m='vpy1.1.0-alpha.3')
assert (git_repo.commit('vpy1.1.0-alpha.3').hexsha == git_repo.head.commit.hexsha)
assert (git_repo.active_branch.name == 'feature')
(yield git_repo)
git_repo.close() |
def test_resnet3d_layer():
with pytest.raises(AssertionError):
ResNet3dLayer(22, None)
with pytest.raises(AssertionError):
ResNet3dLayer(50, None, stage=4)
res_layer = ResNet3dLayer(50, None, stage=3, norm_eval=True)
res_layer.init_weights()
res_layer.train()
input_shape = (1, 1024, 1, 4, 4)
imgs = generate_backbone_demo_inputs(input_shape)
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
res_layer = res_layer.cuda()
imgs_gpu = imgs.cuda()
feat = res_layer(imgs_gpu)
assert (feat.shape == torch.Size([1, 2048, 1, 2, 2]))
else:
feat = res_layer(imgs)
assert (feat.shape == torch.Size([1, 2048, 1, 2, 2]))
res_layer = ResNet3dLayer(50, 'torchvision://resnet50', stage=3, all_frozen=True)
res_layer.init_weights()
res_layer.train()
imgs = generate_backbone_demo_inputs(input_shape)
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
res_layer = res_layer.cuda()
imgs_gpu = imgs.cuda()
feat = res_layer(imgs_gpu)
assert (feat.shape == torch.Size([1, 2048, 1, 2, 2]))
else:
feat = res_layer(imgs)
assert (feat.shape == torch.Size([1, 2048, 1, 2, 2])) |
class StochasticConvMLP(nn.Module):
def __init__(self, arch, in_dim):
super().__init__()
self.arch = arch
self.z_sz = arch[2]
layers = [nn.Conv2d(arch[0], arch[1], kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2), nn.Flatten(start_dim=1), nn.Linear((arch[1] * int(((in_dim[0] * in_dim[1]) / 4))), arch[2]), nn.ReLU()]
self.pre_feats = nn.Sequential(*layers)
self.pred_mu = nn.Linear(arch[2], arch[3])
self.pred_sigma = nn.Sequential(*([nn.Linear(arch[2], arch[3])] + [nn.Softplus()]))
self.cls = nn.Sequential(nn.Linear(arch[3], arch[4]))
self.pre_feats.apply((lambda module: add_tag(module, 0)))
self.pred_mu.apply((lambda module: add_tag(module, 0)))
self.pred_sigma.apply((lambda module: add_tag(module, 0)))
self.cls.apply((lambda module: add_tag(module, 1)))
def forward(self, x, repr=False):
feats = self.pre_feats(x)
means = self.pred_mu(feats)
stds = self.pred_sigma(feats).clamp(min=EPS)
eps = torch.randn_like(means)
z = (means + (stds * eps))
if repr:
distr = torch.distributions.normal.Normal(means, stds)
z_prob = z
if DETACH:
z_prob = z.detach()
logprob = distr.log_prob(z_prob).sum(dim=1)
return (z, logprob)
return (self.cls(z), stds.mean().item())
def log_marg_prob(self, z, d_x, jensen):
(batch_sz, L) = z.shape
batch_sz2 = d_x.shape[0]
feats = self.pre_feats(d_x)
means = self.pred_mu(feats)
stds = self.pred_sigma(feats).clamp(min=EPS)
means = means.unsqueeze(0).expand(batch_sz, batch_sz2, L)
stds = stds.unsqueeze(0).expand(batch_sz, batch_sz2, L)
z = z.unsqueeze(1).expand(batch_sz, batch_sz2, L)
distr = torch.distributions.normal.Normal(means, stds)
z_prob = z
if DETACH:
z_prob = z.detach()
logprob = distr.log_prob(z_prob)
assert (logprob.shape == (batch_sz, batch_sz2, L))
logprob = logprob.sum(dim=2)
if jensen:
log_margprob = logprob.mean(dim=1)
else:
log_margprob = ((- np.log(batch_sz2)) + torch.logsumexp(logprob, dim=1))
assert (log_margprob.shape == (batch_sz,))
return log_margprob |
def rotate_random_angle(game):
sample_angle = random.randint(0, 359)
player_angle = game.get_game_variable(vzd.GameVariable.ANGLE)
smallest_diff = util.get_angle_diff(player_angle, sample_angle)
while (abs(smallest_diff) > 5):
game.make_action([(- smallest_diff), 0])
player_angle = game.get_game_variable(vzd.GameVariable.ANGLE)
smallest_diff = util.get_angle_diff(player_angle, sample_angle) |
def test_connections(rpaths):
conn_len = len(Globals.connections)
if (conn_len == 1):
log.Log('No remote connections specified, only local one available', log.ERROR)
return Globals.RET_CODE_FILE_ERR
elif (conn_len != (len(rpaths) + 1)):
print("All {pa} parameters must be remote of the form 'server::path'".format(pa=len(rpaths)), log.ERROR)
return Globals.RET_CODE_FILE_ERR
results = map((lambda i: _test_connection(i, rpaths[(i - 1)])), range(1, conn_len))
if all(results):
return Globals.RET_CODE_OK
else:
return Globals.RET_CODE_ERR |
class Effect4216(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Energy Nosferatu')), 'powerTransferAmount', src.getModifiedItemAttr('subsystemBonusAmarrCore2'), skill='Amarr Core Systems', **kwargs) |
class BaseDataset(object):
def __init__(self, type: str, name: str, device: str='cpu'):
self.type = type
self.name = name
self.device = device
if (self.type in ['cocitation', 'coauthorship']):
self.dataset_dir = osp.join('dataset', self.type, self.name)
else:
self.dataset_dir = osp.join('dataset', self.name)
self.split_dir = osp.join(self.dataset_dir, 'splits')
self.load_dataset()
self.preprocess_dataset()
def load_dataset(self):
with open(osp.join(self.dataset_dir, 'features.pickle'), 'rb') as f:
self.features = pickle.load(f)
with open(osp.join(self.dataset_dir, 'hypergraph.pickle'), 'rb') as f:
self.hypergraph = pickle.load(f)
with open(osp.join(self.dataset_dir, 'labels.pickle'), 'rb') as f:
self.labels = pickle.load(f)
def load_splits(self, seed: int):
with open(osp.join(self.split_dir, f'{seed}.pickle'), 'rb') as f:
splits = pickle.load(f)
return splits
def preprocess_dataset(self):
edge_set = set(self.hypergraph.keys())
edge_to_num = {}
num_to_edge = {}
num = 0
for edge in edge_set:
edge_to_num[edge] = num
num_to_edge[num] = edge
num += 1
incidence_matrix = []
processed_hypergraph = {}
for edge in edge_set:
nodes = self.hypergraph[edge]
processed_hypergraph[edge_to_num[edge]] = nodes
for node in nodes:
incidence_matrix.append([node, edge_to_num[edge]])
self.processed_hypergraph = processed_hypergraph
self.features = torch.as_tensor(self.features.toarray())
self.hyperedge_index = torch.LongTensor(incidence_matrix).T.contiguous()
self.labels = torch.LongTensor(self.labels)
self.num_nodes = (int(self.hyperedge_index[0].max()) + 1)
self.num_edges = (int(self.hyperedge_index[1].max()) + 1)
self.edge_to_num = edge_to_num
self.num_to_edge = num_to_edge
weight = torch.ones(self.num_edges)
Dn = scatter_add(weight[self.hyperedge_index[1]], self.hyperedge_index[0], dim=0, dim_size=self.num_nodes)
De = scatter_add(torch.ones(self.hyperedge_index.shape[1]), self.hyperedge_index[1], dim=0, dim_size=self.num_edges)
self.to(self.device)
def to(self, device: str):
self.features = self.features.to(device)
self.hyperedge_index = self.hyperedge_index.to(device)
self.labels = self.labels.to(device)
self.device = device
return self
def generate_random_split(self, train_ratio: float=0.1, val_ratio: float=0.1, seed: Optional[int]=None, use_stored_split: bool=True):
if use_stored_split:
splits = self.load_splits(seed)
train_mask = torch.tensor(splits['train_mask'], dtype=torch.bool, device=self.device)
val_mask = torch.tensor(splits['val_mask'], dtype=torch.bool, device=self.device)
test_mask = torch.tensor(splits['test_mask'], dtype=torch.bool, device=self.device)
else:
num_train = int((self.num_nodes * train_ratio))
num_val = int((self.num_nodes * val_ratio))
num_test = (self.num_nodes - (num_train + num_val))
if (seed is not None):
generator = torch.Generator().manual_seed(seed)
else:
generator = torch.default_generator
(train_set, val_set, test_set) = random_split(torch.arange(0, self.num_nodes), (num_train, num_val, num_test), generator=generator)
(train_idx, val_idx, test_idx) = (train_set.indices, val_set.indices, test_set.indices)
train_mask = torch.zeros((self.num_nodes,), device=self.device).to(torch.bool)
val_mask = torch.zeros((self.num_nodes,), device=self.device).to(torch.bool)
test_mask = torch.zeros((self.num_nodes,), device=self.device).to(torch.bool)
train_mask[train_idx] = True
val_mask[val_idx] = True
test_mask[test_idx] = True
return [train_mask, val_mask, test_mask] |
class TestFileSelectionDefaults():
def test_include(self, isolation):
builder = MockBuilder(str(isolation))
assert (builder.config.default_include() == [])
def test_exclude(self, isolation):
builder = MockBuilder(str(isolation))
assert (builder.config.default_exclude() == [])
def test_packages(self, isolation):
builder = MockBuilder(str(isolation))
assert (builder.config.default_packages() == [])
def test_only_include(self, isolation):
builder = MockBuilder(str(isolation))
assert (builder.config.default_only_include() == [])
def test_global_exclude(self, isolation):
builder = MockBuilder(str(isolation))
assert (builder.config.default_global_exclude() == ['*.py[cdo]', '/dist']) |
class BatchNormalization(Layer):
_batchnorm_support
def __init__(self, axis=(- 1), momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, **kwargs):
super(BatchNormalization, self).__init__(**kwargs)
self.supports_masking = True
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
self.moving_variance_initializer = initializers.get(moving_variance_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
dim = input_shape[self.axis]
if (dim is None):
raise ValueError((((('Axis ' + str(self.axis)) + ' of input tensor should have a defined dimension but the layer received an input with shape ') + str(input_shape)) + '.'))
self.input_spec = InputSpec(ndim=len(input_shape), axes={self.axis: dim})
shape = (dim,)
if self.scale:
self.gamma = self.add_weight(shape=shape, name='gamma', initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(shape=shape, name='beta', initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint)
else:
self.beta = None
self.moving_mean = self.add_weight(shape=shape, name='moving_mean', initializer=self.moving_mean_initializer, trainable=False)
self.moving_variance = self.add_weight(shape=shape, name='moving_variance', initializer=self.moving_variance_initializer, trainable=False)
self.built = True
def call(self, inputs, training=None):
input_shape = K.int_shape(inputs)
ndim = len(input_shape)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = ([1] * len(input_shape))
broadcast_shape[self.axis] = input_shape[self.axis]
needs_broadcasting = (sorted(reduction_axes) != list(range(ndim))[:(- 1)])
def normalize_inference():
if needs_broadcasting:
broadcast_moving_mean = K.reshape(self.moving_mean, broadcast_shape)
broadcast_moving_variance = K.reshape(self.moving_variance, broadcast_shape)
if self.center:
broadcast_beta = K.reshape(self.beta, broadcast_shape)
else:
broadcast_beta = None
if self.scale:
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
else:
broadcast_gamma = None
return K.batch_normalization(inputs, broadcast_moving_mean, broadcast_moving_variance, broadcast_beta, broadcast_gamma, epsilon=self.epsilon)
else:
return K.batch_normalization(inputs, self.moving_mean, self.moving_variance, self.beta, self.gamma, epsilon=self.epsilon)
if (training in {0, False}):
return normalize_inference()
(normed_training, mean, variance) = K.normalize_batch_in_training(inputs, self.gamma, self.beta, reduction_axes, epsilon=self.epsilon)
self.add_update([K.moving_average_update(self.moving_mean, mean, self.momentum), K.moving_average_update(self.moving_variance, variance, self.momentum)], inputs)
return K.in_train_phase(normed_training, normalize_inference, training=training)
def get_config(self):
config = {'axis': self.axis, 'momentum': self.momentum, 'epsilon': self.epsilon, 'center': self.center, 'scale': self.scale, 'beta_initializer': initializers.serialize(self.beta_initializer), 'gamma_initializer': initializers.serialize(self.gamma_initializer), 'moving_mean_initializer': initializers.serialize(self.moving_mean_initializer), 'moving_variance_initializer': initializers.serialize(self.moving_variance_initializer), 'beta_regularizer': regularizers.serialize(self.beta_regularizer), 'gamma_regularizer': regularizers.serialize(self.gamma_regularizer), 'beta_constraint': constraints.serialize(self.beta_constraint), 'gamma_constraint': constraints.serialize(self.gamma_constraint)}
base_config = super(BatchNormalization, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
class StatisticsVisitor(TraverserVisitor):
def __init__(self, inferred: bool, filename: str, modules: dict[(str, MypyFile)], typemap: (dict[(Expression, Type)] | None)=None, all_nodes: bool=False, visit_untyped_defs: bool=True) -> None:
self.inferred = inferred
self.filename = filename
self.modules = modules
self.typemap = typemap
self.all_nodes = all_nodes
self.visit_untyped_defs = visit_untyped_defs
self.num_precise_exprs = 0
self.num_imprecise_exprs = 0
self.num_any_exprs = 0
self.num_simple_types = 0
self.num_generic_types = 0
self.num_tuple_types = 0
self.num_function_types = 0
self.num_typevar_types = 0
self.num_complex_types = 0
self.num_any_types = 0
self.line = (- 1)
self.line_map: dict[(int, int)] = {}
self.type_of_any_counter: Counter[int] = Counter()
self.any_line_map: dict[(int, list[AnyType])] = {}
self.checked_scopes = [True]
self.output: list[str] = []
TraverserVisitor.__init__(self)
def visit_mypy_file(self, o: MypyFile) -> None:
self.cur_mod_node = o
self.cur_mod_id = o.fullname
super().visit_mypy_file(o)
def visit_import_from(self, imp: ImportFrom) -> None:
self.process_import(imp)
def visit_import_all(self, imp: ImportAll) -> None:
self.process_import(imp)
def process_import(self, imp: (ImportFrom | ImportAll)) -> None:
(import_id, ok) = correct_relative_import(self.cur_mod_id, imp.relative, imp.id, self.cur_mod_node.is_package_init_file())
if (ok and (import_id in self.modules)):
kind = TYPE_PRECISE
else:
kind = TYPE_ANY
self.record_line(imp.line, kind)
def visit_import(self, imp: Import) -> None:
if all(((id in self.modules) for (id, _) in imp.ids)):
kind = TYPE_PRECISE
else:
kind = TYPE_ANY
self.record_line(imp.line, kind)
def visit_func_def(self, o: FuncDef) -> None:
with self.enter_scope(o):
self.line = o.line
if ((len(o.expanded) > 1) and (o.expanded != ([o] * len(o.expanded)))):
if (o in o.expanded):
print('{}:{}: ERROR: cycle in function expansion; skipping'.format(self.filename, o.line))
return
for defn in o.expanded:
assert isinstance(defn, FuncDef)
self.visit_func_def(defn)
else:
if o.type:
assert isinstance(o.type, CallableType)
sig = o.type
arg_types = sig.arg_types
if (sig.arg_names and (sig.arg_names[0] == 'self') and (not self.inferred)):
arg_types = arg_types[1:]
for arg in arg_types:
self.type(arg)
self.type(sig.ret_type)
elif self.all_nodes:
self.record_line(self.line, TYPE_ANY)
if ((not o.is_dynamic()) or self.visit_untyped_defs):
super().visit_func_def(o)
def enter_scope(self, o: FuncDef) -> Iterator[None]:
self.checked_scopes.append(((o.type is not None) and self.checked_scopes[(- 1)]))
(yield None)
self.checked_scopes.pop()
def is_checked_scope(self) -> bool:
return self.checked_scopes[(- 1)]
def visit_class_def(self, o: ClassDef) -> None:
self.record_line(o.line, TYPE_PRECISE)
for d in o.decorators:
d.accept(self)
o.defs.accept(self)
def visit_type_application(self, o: TypeApplication) -> None:
self.line = o.line
for t in o.types:
self.type(t)
super().visit_type_application(o)
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
self.line = o.line
if (isinstance(o.rvalue, nodes.CallExpr) and isinstance(o.rvalue.analyzed, nodes.TypeVarExpr)):
return
if o.type:
self.type(o.type)
elif (self.inferred and (not self.all_nodes)):
for lvalue in o.lvalues:
if isinstance(lvalue, nodes.TupleExpr):
items = lvalue.items
else:
items = [lvalue]
for item in items:
if (isinstance(item, RefExpr) and item.is_inferred_def):
if (self.typemap is not None):
self.type(self.typemap.get(item))
super().visit_assignment_stmt(o)
def visit_expression_stmt(self, o: ExpressionStmt) -> None:
if isinstance(o.expr, (StrExpr, BytesExpr)):
self.record_line(o.line, TYPE_EMPTY)
else:
super().visit_expression_stmt(o)
def visit_pass_stmt(self, o: PassStmt) -> None:
self.record_precise_if_checked_scope(o)
def visit_break_stmt(self, o: BreakStmt) -> None:
self.record_precise_if_checked_scope(o)
def visit_continue_stmt(self, o: ContinueStmt) -> None:
self.record_precise_if_checked_scope(o)
def visit_name_expr(self, o: NameExpr) -> None:
if (o.fullname in ('builtins.None', 'builtins.True', 'builtins.False', 'builtins.Ellipsis')):
self.record_precise_if_checked_scope(o)
else:
self.process_node(o)
super().visit_name_expr(o)
def visit_yield_from_expr(self, o: YieldFromExpr) -> None:
if o.expr:
o.expr.accept(self)
def visit_call_expr(self, o: CallExpr) -> None:
self.process_node(o)
if o.analyzed:
o.analyzed.accept(self)
else:
o.callee.accept(self)
for a in o.args:
a.accept(self)
self.record_call_target_precision(o)
def record_call_target_precision(self, o: CallExpr) -> None:
if ((not self.typemap) or (o.callee not in self.typemap)):
return
callee_type = get_proper_type(self.typemap[o.callee])
if isinstance(callee_type, CallableType):
self.record_callable_target_precision(o, callee_type)
else:
pass
def record_callable_target_precision(self, o: CallExpr, callee: CallableType) -> None:
assert self.typemap
typemap = self.typemap
actual_to_formal = map_formals_to_actuals(o.arg_kinds, o.arg_names, callee.arg_kinds, callee.arg_names, (lambda n: typemap[o.args[n]]))
for formals in actual_to_formal:
for n in formals:
formal = get_proper_type(callee.arg_types[n])
if isinstance(formal, AnyType):
self.record_line(o.line, TYPE_ANY)
elif is_imprecise(formal):
self.record_line(o.line, TYPE_IMPRECISE)
def visit_member_expr(self, o: MemberExpr) -> None:
self.process_node(o)
super().visit_member_expr(o)
def visit_op_expr(self, o: OpExpr) -> None:
self.process_node(o)
super().visit_op_expr(o)
def visit_comparison_expr(self, o: ComparisonExpr) -> None:
self.process_node(o)
super().visit_comparison_expr(o)
def visit_index_expr(self, o: IndexExpr) -> None:
self.process_node(o)
super().visit_index_expr(o)
def visit_assignment_expr(self, o: AssignmentExpr) -> None:
self.process_node(o)
super().visit_assignment_expr(o)
def visit_unary_expr(self, o: UnaryExpr) -> None:
self.process_node(o)
super().visit_unary_expr(o)
def visit_str_expr(self, o: StrExpr) -> None:
self.record_precise_if_checked_scope(o)
def visit_bytes_expr(self, o: BytesExpr) -> None:
self.record_precise_if_checked_scope(o)
def visit_int_expr(self, o: IntExpr) -> None:
self.record_precise_if_checked_scope(o)
def visit_float_expr(self, o: FloatExpr) -> None:
self.record_precise_if_checked_scope(o)
def visit_complex_expr(self, o: ComplexExpr) -> None:
self.record_precise_if_checked_scope(o)
def visit_ellipsis(self, o: EllipsisExpr) -> None:
self.record_precise_if_checked_scope(o)
def process_node(self, node: Expression) -> None:
if self.all_nodes:
if (self.typemap is not None):
self.line = node.line
self.type(self.typemap.get(node))
def record_precise_if_checked_scope(self, node: Node) -> None:
if (isinstance(node, Expression) and self.typemap and (node not in self.typemap)):
kind = TYPE_UNANALYZED
elif self.is_checked_scope():
kind = TYPE_PRECISE
else:
kind = TYPE_ANY
self.record_line(node.line, kind)
def type(self, t: (Type | None)) -> None:
t = get_proper_type(t)
if (not t):
self.record_line(self.line, TYPE_UNANALYZED)
return
if (isinstance(t, AnyType) and is_special_form_any(t)):
self.record_line(self.line, TYPE_PRECISE)
return
if isinstance(t, AnyType):
self.log((' !! Any type around line %d' % self.line))
self.num_any_exprs += 1
self.record_line(self.line, TYPE_ANY)
elif (((not self.all_nodes) and is_imprecise(t)) or (self.all_nodes and is_imprecise2(t))):
self.log((' !! Imprecise type around line %d' % self.line))
self.num_imprecise_exprs += 1
self.record_line(self.line, TYPE_IMPRECISE)
else:
self.num_precise_exprs += 1
self.record_line(self.line, TYPE_PRECISE)
for typ in (get_proper_types(collect_all_inner_types(t)) + [t]):
if isinstance(typ, AnyType):
typ = get_original_any(typ)
if is_special_form_any(typ):
continue
self.type_of_any_counter[typ.type_of_any] += 1
self.num_any_types += 1
if (self.line in self.any_line_map):
self.any_line_map[self.line].append(typ)
else:
self.any_line_map[self.line] = [typ]
elif isinstance(typ, Instance):
if typ.args:
if any((is_complex(arg) for arg in typ.args)):
self.num_complex_types += 1
else:
self.num_generic_types += 1
else:
self.num_simple_types += 1
elif isinstance(typ, FunctionLike):
self.num_function_types += 1
elif isinstance(typ, TupleType):
if any((is_complex(item) for item in typ.items)):
self.num_complex_types += 1
else:
self.num_tuple_types += 1
elif isinstance(typ, TypeVarType):
self.num_typevar_types += 1
def log(self, string: str) -> None:
self.output.append(string)
def record_line(self, line: int, precision: int) -> None:
self.line_map[line] = max(precision, self.line_map.get(line, TYPE_EMPTY)) |
('iM_product_vect_batch')
def _iM_product_vect_batch(args, axes):
((q, vect), size_batch) = check_batch_inputs(args, axes)
if (len(q.shape) <= 2):
return (_iM_product_vect_prim.bind(q, vect), 0)
for i in range(size_batch):
mCompute = _iM_product_vect_prim.bind(q[i], vect[i])[None]
batch_res = (mCompute if (i == 0) else jnp.vstack((batch_res, mCompute)))
return (batch_res, 0) |
class ProtocolClient(Protocol):
_req_sent = None
def __init__(self, request_queue, response_queue):
self.request_queue = request_queue
self.response_queue = response_queue
self._req_sent = None
def can_take_request(self):
return (self._req_sent is None)
def waiting_for_response(self):
return (self._req_sent is not None)
def request_sent(self, request=True):
if (not self.can_take_request()):
raise Exception('Protocol only supports one request in the Queue')
self._req_sent = request
def request_served(self, result=None):
if (not self.waiting_for_response()):
raise Exception('Expected no pending requests, but something got served', result)
self._req_sent = None
def discard_existing_request(self):
if self.waiting_for_response():
response = self.response_queue.get(block=True)
self.request_served(response)
def request_limit(self, num_batches, limit_fn=None, worker_num_batches=None):
if (not self.can_take_request()):
raise Exception('Can not `limit` while we are still waiting response for previous request')
request = communication.messages.LimitRequest(num_batches, limit_fn, worker_num_batches)
self.request_queue.put(request)
self.request_sent(request)
def request_pause(self, pause_fn=None):
if (not self.can_take_request()):
raise Exception('Can not `pause` while we are still waiting response for previous request')
request = communication.messages.PauseRequest(pause_fn)
self.request_queue.put(request)
self.request_sent(request)
def request_resume(self, resume_fn=None):
if (not self.can_take_request()):
raise Exception('Can not `resume` while we are still waiting response for previous request')
request = communication.messages.ResumeRequest(resume_fn)
self.request_queue.put(request)
self.request_sent(request)
def request_terminate(self):
if (not self.can_take_request()):
self._req_sent = None
request = communication.messages.TerminateRequest()
self.request_queue.put(request)
self.request_sent(request) |
class TestLabelSmoothingCrossEntropyLoss(unittest.TestCase):
def test_build_label_smoothing_cross_entropy(self):
config = {'name': 'label_smoothing_cross_entropy', 'ignore_index': (- 1), 'smoothing_param': 0.1}
crit = build_loss(config)
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
self.assertEqual(crit._ignore_index, (- 1))
def test_smoothing_one_hot_targets(self):
config = {'name': 'label_smoothing_cross_entropy', 'ignore_index': (- 1), 'smoothing_param': 0.1}
crit = build_loss(config)
targets = torch.tensor([[0, 0, 0, 0, 1]])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 5)
self.assertTrue(torch.allclose(valid_targets, torch.tensor([[0.0, 0.0, 0.0, 0.0, 1.0]])))
smoothed_targets = crit.smooth_targets(valid_targets, 5)
self.assertTrue(torch.allclose(smoothed_targets, torch.tensor([[(0.2 / 11), (0.2 / 11), (0.2 / 11), (0.2 / 11), (10.2 / 11)]])))
def test_smoothing_ignore_index_one_hot_targets(self):
config = {'name': 'label_smoothing_cross_entropy', 'ignore_index': (- 1), 'smoothing_param': 0.5}
crit = build_loss(config)
targets = torch.tensor([[(- 1), 0, 0, 0, 1]])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 5)
self.assertTrue(torch.allclose(valid_targets, torch.tensor([[0.0, 0.0, 0.0, 0.0, 1.0]])))
smoothed_targets = crit.smooth_targets(valid_targets, 5)
self.assertTrue(torch.allclose(smoothed_targets, torch.tensor([[(1 / 15), (1 / 15), (1 / 15), (1 / 15), (11 / 15)]])))
def test_smoothing_multilabel_one_hot_targets(self):
config = {'name': 'label_smoothing_cross_entropy', 'ignore_index': (- 1), 'smoothing_param': 0.5}
crit = build_loss(config)
targets = torch.tensor([[1, 0, 0, 0, 1]])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 5)
self.assertTrue(torch.allclose(valid_targets, torch.tensor([[1.0, 0.0, 0.0, 0.0, 1.0]])))
smoothed_targets = crit.smooth_targets(valid_targets, 5)
self.assertTrue(torch.allclose(smoothed_targets, torch.tensor([[(6 / 15), (1 / 15), (1 / 15), (1 / 15), (6 / 15)]])))
def test_smoothing_all_ones_one_hot_targets(self):
config = {'name': 'label_smoothing_cross_entropy', 'ignore_index': (- 1), 'smoothing_param': 0.1}
crit = build_loss(config)
targets = torch.tensor([[1, 1, 1, 1]])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 4)
self.assertTrue(torch.allclose(valid_targets, torch.tensor([[1.0, 1.0, 1.0, 1.0]])))
smoothed_targets = crit.smooth_targets(valid_targets, 4)
self.assertTrue(torch.allclose(smoothed_targets, torch.tensor([[0.25, 0.25, 0.25, 0.25]])))
def test_smoothing_mixed_one_hot_targets(self):
config = {'name': 'label_smoothing_cross_entropy', 'ignore_index': (- 1), 'smoothing_param': 0.5}
crit = build_loss(config)
targets = torch.tensor([[1, 1, 1, 1, 1], [1, 0, 0, 0, 1]])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 5)
self.assertTrue(torch.allclose(valid_targets, torch.tensor([[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 0.0, 0.0, 1.0]])))
smoothed_targets = crit.smooth_targets(valid_targets, 5)
self.assertTrue(torch.allclose(smoothed_targets, torch.tensor([[0.2, 0.2, 0.2, 0.2, 0.2], [(6 / 15), (1 / 15), (1 / 15), (1 / 15), (6 / 15)]])))
def test_smoothing_class_targets(self):
config = {'name': 'label_smoothing_cross_entropy', 'ignore_index': (- 1), 'smoothing_param': 0.5}
crit = build_loss(config)
targets = torch.tensor([4, (- 1)])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 5)
self.assertTrue(torch.allclose(valid_targets, torch.tensor([[0.0, 0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0]])))
smoothed_targets = crit.smooth_targets(valid_targets, 5)
self.assertTrue(torch.allclose(smoothed_targets, torch.tensor([[(1 / 15), (1 / 15), (1 / 15), (1 / 15), (11 / 15)], [0.2, 0.2, 0.2, 0.2, 0.2]])))
def test_unnormalized_label_smoothing_cross_entropy(self):
config = {'name': 'label_smoothing_cross_entropy', 'ignore_index': (- 1), 'smoothing_param': 0.5}
crit = LabelSmoothingCrossEntropyLoss.from_config(config)
outputs = torch.tensor([[0.0, 7.0, 0.0, 0.0, 2.0]])
targets = torch.tensor([[0, 0, 0, 0, 1]])
self.assertAlmostEqual(crit(outputs, targets).item(), 5., places=5)
def test_ignore_index_label_smoothing_cross_entropy(self):
config = {'name': 'label_smoothing_cross_entropy', 'ignore_index': (- 1), 'smoothing_param': 0.2}
crit = LabelSmoothingCrossEntropyLoss.from_config(config)
outputs = torch.tensor([[0.0, 7.0]])
targets = torch.tensor([[(- 1)]])
self.assertAlmostEqual(crit(outputs, targets).item(), 3.)
def test_class_integer_label_smoothing_cross_entropy(self):
config = {'name': 'label_smoothing_cross_entropy', 'ignore_index': (- 1), 'smoothing_param': 0.2}
crit = LabelSmoothingCrossEntropyLoss.from_config(config)
outputs = torch.tensor([[1.0, 2.0], [0.0, 2.0]])
targets = torch.tensor([[0], [1]])
self.assertAlmostEqual(crit(outputs, targets).item(), 0.)
def test_deep_copy(self):
config = {'name': 'label_smoothing_cross_entropy', 'ignore_index': (- 1), 'smoothing_param': 0.5}
crit = build_loss(config)
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
outputs = torch.tensor([[0.0, 7.0, 0.0, 0.0, 2.0]])
targets = torch.tensor([[0, 0, 0, 0, 1]])
crit(outputs, targets)
crit2 = copy.deepcopy(crit)
self.assertAlmostEqual(crit2(outputs, targets).item(), 5., places=5) |
class Invite(models.Model):
key_salt = 'rdmo.projects.models.invite.Invite'
project = models.ForeignKey('Project', on_delete=models.CASCADE, related_name='invites', verbose_name=_('Project'), help_text=_('The project for this invite.'))
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, verbose_name=_('User'), help_text=_('The user for this membership.'))
email = models.EmailField(blank=True, verbose_name=_('E-mail'), help_text=_('The e-mail for this membership.'))
role = models.CharField(max_length=12, choices=ROLE_CHOICES, verbose_name=_('Role'), help_text=_('The role for this invite.'))
token = models.CharField(max_length=20, verbose_name=_('Token'), help_text=_('The token for this invite.'))
timestamp = models.DateTimeField(verbose_name=_('Timestamp'), help_text=_('The timestamp for this invite.'))
objects = InviteManager()
class Meta():
ordering = ('timestamp',)
verbose_name = _('Invite')
verbose_name_plural = _('Invites')
def __str__(self):
return f'{self.project.title} / {self.email} / {self.role}'
def save(self, *args, **kwargs):
if (self.timestamp is None):
self.timestamp = now()
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('project', kwargs={'pk': self.project.pk})
def is_expired(self):
if settings.PROJECT_INVITE_TIMEOUT:
return ((now() - self.timestamp).total_seconds() > settings.PROJECT_INVITE_TIMEOUT)
else:
return False
def make_token(self):
self.token = salted_hmac(self.key_salt, self._make_hash_value()).hexdigest()[::2]
def _make_hash_value(self):
return (((str(self.project_id) + str(self.email)) + str(self.role)) + str(self.timestamp)) |
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup('general')
group.addoption('--fixtures', '--funcargs', action='store_true', dest='showfixtures', default=False, help="Show available fixtures, sorted by plugin appearance (fixtures with leading '_' are only shown with '-v')")
group.addoption('--fixtures-per-test', action='store_true', dest='show_fixtures_per_test', default=False, help='Show fixtures per test')
parser.addini('python_files', type='args', default=['test_*.py', '*_test.py'], help='Glob-style file patterns for Python test module discovery')
parser.addini('python_classes', type='args', default=['Test'], help='Prefixes or glob names for Python test class discovery')
parser.addini('python_functions', type='args', default=['test'], help='Prefixes or glob names for Python test function and method discovery')
parser.addini('disable_test_id_escaping_and_forfeit_all_rights_to_community_support', type='bool', default=False, help='Disable string escape non-ASCII characters, might cause unwanted side effects(use at your own risk)') |
def linear_from_cg_shape(self, x):
params_cg = OrderedDict()
if (self.bias is None):
params_cg['weight'] = torch.nn.Parameter(x)
else:
(W, b) = torch.split(x, self.weight.size(1), dim=0)
params_cg['weight'] = torch.nn.Parameter(W.t())
params_cg['bias'] = torch.nn.Parameter(torch.squeeze(b))
return params_cg |
def test_get_expansion():
assert (get_expansion(ViPNAS_Bottleneck, 2) == 2)
assert (get_expansion(ViPNAS_Bottleneck) == 1)
class MyResBlock(nn.Module):
expansion = 8
assert (get_expansion(MyResBlock) == 8)
with pytest.raises(TypeError):
get_expansion(ViPNAS_Bottleneck, '0')
with pytest.raises(TypeError):
class SomeModule(nn.Module):
pass
get_expansion(SomeModule) |
class BaseTermination():
def __init__(self, value):
self.value = value
def get_event(self, variables, step_value):
raise NotImplementedError
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.value == other.value)
else:
return False |
class TargetTask(TransferTask):
role: ClassVar[TransferRole] = TransferRole.TARGET
token_network_address: TokenNetworkAddress = field(init=False, repr=False)
canonical_identifier: CanonicalIdentifier
target_state: TargetTransferState = field(repr=False)
def __post_init__(self) -> None:
self.token_network_address = self.canonical_identifier.token_network_address
def channel_identifier(self) -> ChannelID:
return self.canonical_identifier.channel_identifier |
class QlOsPath():
def __init__(self, rootfs: str, cwd: str, emulos: QL_OS) -> None:
nt_path_os = (QL_OS.WINDOWS, QL_OS.DOS)
posix_path_os = QL_OS_POSIX
self._rootfs_path = Path(rootfs).resolve(strict=True)
if (emulos in nt_path_os):
self.PureVirtualPath = PureWindowsPath
elif (emulos in posix_path_os):
self.PureVirtualPath = PurePosixPath
else:
raise ValueError(f'unexpected os type: {emulos}')
self.cwd = cwd
self.transform_to_relative_path = self.virtual_abspath
self.transform_to_real_path = self.virtual_to_host_path
def __strip_parent_refs(path: AnyPurePath) -> AnyPurePath:
if path.parts:
pardir = '..'
while (path.parts[0] == pardir):
path = path.relative_to(pardir)
return path
def root(self) -> str:
return str(self._cwd_anchor)
def cwd(self) -> str:
return str((self._cwd_anchor / self._cwd_vpath))
def cwd(self, virtpath: str) -> None:
vpath = self.PureVirtualPath(virtpath)
if (not vpath.is_absolute()):
raise ValueError(f'current working directory must be an absolute path: {virtpath}')
cwd_anchor = self.PureVirtualPath(vpath.anchor)
cwd_vpath = vpath.relative_to(cwd_anchor)
cwd_vpath = QlOsPath.__strip_parent_refs(cwd_vpath)
self._cwd_anchor = cwd_anchor
self._cwd_vpath = cwd_vpath
def __virtual_abspath(self, virtpath: Union[(str, AnyPurePath)]) -> AnyPurePath:
vpath = self.PureVirtualPath(virtpath)
if vpath.is_absolute():
return vpath
absvpath = (self._cwd_vpath / vpath)
absvpath = QlOsPath.__strip_parent_refs(absvpath)
return (self._cwd_anchor / absvpath)
def __resolved_vsymlink(self, basepath: Path, name: str):
fullpath = ((self._rootfs_path / basepath) / name)
vpath = None
if fullpath.is_symlink():
resolved = fullpath.resolve(strict=False)
try:
vpath = (self._cwd_anchor / resolved.relative_to(self._rootfs_path))
except ValueError:
vpath = resolved
return vpath
def __virtual_resolve(self, virtpath: Union[(str, AnyPurePath)]) -> AnyPurePath:
vpath = self.PureVirtualPath(virtpath)
if (not vpath.is_absolute()):
vpath = self.__virtual_abspath(vpath)
acc_hpath = Path()
acc_vpath = self.PureVirtualPath(vpath.anchor)
vpath = vpath.relative_to(vpath.anchor)
for part in vpath.parts:
if (part == '..'):
acc_hpath = acc_hpath.parent
acc_vpath = acc_vpath.parent
else:
vtemp = self.__resolved_vsymlink(acc_hpath, part)
if (vtemp is None):
acc_hpath = (acc_hpath / part)
acc_vpath = (acc_vpath / part)
else:
new_vpath = (acc_vpath / vtemp)
vres = self.__virtual_resolve(new_vpath)
acc_hpath = Path(vres)
acc_vpath = vres
return acc_vpath
def __virtual_to_host_path(self, virtpath: Union[(str, AnyPurePath)]) -> Path:
absvpath = self.__virtual_abspath(virtpath)
vpath = absvpath.relative_to(absvpath.anchor)
return (self._rootfs_path / vpath)
def __is_safe_host_path(self, hostpath: Path, strict: bool=False) -> bool:
hostpath = hostpath.resolve(strict=strict)
try:
_ = hostpath.relative_to(self._rootfs_path)
except ValueError:
return False
else:
return True
def host_to_virtual_path(self, hostpath: str) -> str:
resolved = Path(hostpath).resolve(strict=False)
virtpath = (self._cwd_anchor / resolved.relative_to(self._rootfs_path))
return str(virtpath)
def is_virtual_abspath(self, virtpath: str) -> bool:
vpath = self.PureVirtualPath(virtpath)
return vpath.is_absolute()
def virtual_abspath(self, virtpath: str) -> str:
absvpath = self.__virtual_abspath(virtpath)
return str(absvpath)
def virtual_to_host_path(self, virtpath: str) -> str:
absvpath = self.__virtual_resolve(virtpath)
hostpath = self.__virtual_to_host_path(absvpath)
return str(hostpath)
def is_safe_host_path(self, hostpath: str) -> bool:
hpath = Path(hostpath)
return self.__is_safe_host_path(hpath, strict=False)
def __host_casefold_path(hostpath: str) -> Optional[str]:
p = PurePosixPath(hostpath)
norm = Path(p.anchor)
for elem in p.relative_to(norm).parts:
folded = elem.casefold()
try:
norm = next((entry for entry in norm.iterdir() if (entry.name.casefold() == folded)))
except StopIteration:
return None
return str(norm)
def host_casefold_path(self, hostpath: str) -> Optional[str]:
if (self.PureVirtualPath is PureWindowsPath):
return QlOsPath.__host_casefold_path(hostpath)
return hostpath |
class SpiceLexer(RegexLexer):
name = 'Spice'
url = '
filenames = ['*.spice']
aliases = ['spice', 'spicelang']
mimetypes = ['text/x-spice']
version_added = '2.11'
tokens = {'root': [('\\n', Whitespace), ('\\s+', Whitespace), ('\\\\\\n', Text), ('//(.*?)\\n', Comment.Single), ('/(\\\\\\n)?[*]{2}(.|\\n)*?[*](\\\\\\n)?/', String.Doc), ('/(\\\\\\n)?[*](.|\\n)*?[*](\\\\\\n)?/', Comment.Multiline), ('(import|as)\\b', Keyword.Namespace), ('(f|p|type|struct|interface|enum|alias|operator)\\b', Keyword.Declaration), (words(('if', 'else', 'for', 'foreach', 'do', 'while', 'break', 'continue', 'return', 'assert', 'unsafe', 'ext'), suffix='\\b'), Keyword), (words(('const', 'signed', 'unsigned', 'inline', 'public', 'heap'), suffix='\\b'), Keyword.Pseudo), (words(('new', 'switch', 'case', 'yield', 'stash', 'pick', 'sync', 'class'), suffix='\\b'), Keyword.Reserved), ('(true|false|nil)\\b', Keyword.Constant), (words(('double', 'int', 'short', 'long', 'byte', 'char', 'string', 'bool', 'dyn'), suffix='\\b'), Keyword.Type), (words(('printf', 'sizeof', 'alignof', 'len', 'panic'), suffix='\\b(\\()'), bygroups(Name.Builtin, Punctuation)), ('[-]?[0-9]*[.][0-9]+([eE][+-]?[0-9]+)?', Number.Double), ('0[bB][01]+[slu]?', Number.Bin), ('0[oO][0-7]+[slu]?', Number.Oct), ('0[xXhH][0-9a-fA-F]+[slu]?', Number.Hex), ('(0[dD])?[0-9]+[slu]?', Number.Integer), ('"(|\\\\[^\\\\]|[^"\\\\])*"', String), ("\\'(|\\\\[^\\\\]|[^\\'\\\\])\\'", String.Char), ('<<=|>>=|<<|>>|<=|>=|\\+=|-=|\\*=|/=|\\%=|\\|=|&=|\\^=|&&|\\|\\||&|\\||\\+\\+|--|\\%|\\^|\\~|==|!=|->|::|[.]{3}|#!|#|[+\\-*/&]', Operator), ('[|<>=!()\\[\\]{}.,;:\\?]', Punctuation), ('[^\\W\\d]\\w*', Name.Other)]} |
class ModuleListModel(nn.Module):
def __init__(self, num_classes=3):
super(ModuleListModel, self).__init__()
self.mod_list = nn.ModuleList([nn.MaxPool2d(kernel_size=2, stride=2, padding=1), nn.ReLU(inplace=True), nn.Conv2d(16, 8, kernel_size=2, stride=2, padding=2), nn.ReLU(), nn.Conv2d(3, 16, kernel_size=2, stride=2, padding=2, bias=False)])
self.seq_list = nn.Sequential(nn.Conv2d(8, 4, kernel_size=2, stride=2, padding=2), nn.ReLU(), nn.BatchNorm2d(16))
self.fc = nn.Linear(64, num_classes)
def forward(self, *inputs):
x = self.mod_list[4](inputs[0])
x = self.seq_list[2](x)
x = self.mod_list[1](x)
x = self.mod_list[0](x)
x = self.mod_list[2](x)
x = self.seq_list[0](x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
def get_all_binary_label(num_label, class_range):
all_binary_label = []
coding_len = get_code_len(class_range)
tmp = (10 ** coding_len)
for i in range(num_label):
binay = bin(i)
binay = (int(binay.split('0b')[(- 1)]) + tmp)
binay = np.array(list(str(binay)[1:]), np.int32)
all_binary_label.append(binay)
return np.array(all_binary_label) |
def check(app: TestApp, browser: Browser='firefox', html_report_dir: Optional[str]=None, interpreter_log_file: Optional[str]=None, driver_log_file: Optional[str]=None, headful: bool=False, stdout: TextIO=sys.stdout, stderr: TextIO=sys.stderr):
include_flags = [arg for path in include_paths for arg in ['-I', path]]
def optional(name, value):
if (value is not None):
return [name, value]
else:
return []
args = ((((((['quickstrom'] + include_flags) + ['--log-level=debug', 'check', app.module, app.origin_url(), f'--browser={browser}', '--reporter=console', '--capture-screenshots', ('--headful' if headful else '--headless')]) + optional('--reporter', ('html' if (html_report_dir is not None) else None))) + optional('--html-report-directory', html_report_dir)) + optional('--interpreter-log-file', interpreter_log_file)) + optional('--driver-log-file', driver_log_file))
click.echo(f"Command: {' '.join(args)}")
click.echo('')
p = subprocess.Popen(args, stdout=stdout, stderr=stderr)
return result_from_exit_code(p.wait()) |
.unit()
def test_sort_tasks_topologically(dag):
sorter = TopologicalSorter.from_dag(dag)
topo_ordering = []
while sorter.is_active():
task_name = sorter.get_ready()[0]
topo_ordering.append(task_name)
sorter.done(task_name)
topo_names = [dag.nodes[sig]['task'].name for sig in topo_ordering]
assert (topo_names == [f'.::{i}' for i in range(5)]) |
class OrjsonConverter(Converter):
def dumps(self, obj: Any, unstructure_as: Any=None, **kwargs: Any) -> bytes:
return dumps(self.unstructure(obj, unstructure_as=unstructure_as), **kwargs)
def loads(self, data: Union[(bytes, bytearray, memoryview, str)], cl: Type[T]) -> T:
return self.structure(loads(data), cl) |
.skipif(((((pg.Qt.QT_LIB == 'PySide2') and pg.Qt.QtVersion.startswith('5.15')) or (pg.Qt.QT_LIB == 'PySide6')) and (sys.version_info >= (3, 9))), reason='Unknown Issue')
.usefixtures('tmp_module')
def test_reload(tmp_module):
mod = os.path.join(tmp_module, 'reload_test_mod.py')
print('\nRELOAD FILE:', mod)
with open(mod, 'w') as file_:
file_.write(code.format(path_repr=pgpath_repr, msg='C.fn() Version1'))
import reload_test_mod
print('RELOAD MOD:', reload_test_mod.__file__)
c = reload_test_mod.C()
c.sig.connect(c.fn)
v1 = (reload_test_mod.C, reload_test_mod.C.sig, reload_test_mod.C.fn, c.sig, c.fn, c.fn.__func__)
with open(mod, 'w') as file_:
file_.write(code.format(path_repr=pgpath_repr, msg='C.fn() Version 2'))
time.sleep(1.1)
_ = pg.reload.reloadAll(tmp_module, debug=True)
v2 = (reload_test_mod.C, reload_test_mod.C.sig, reload_test_mod.C.fn, c.sig, c.fn, c.fn.__func__)
oldcfn = pg.reload.getPreviousVersion(c.fn)
if (oldcfn is None):
raise Exception('Function did not reload. (This can happen when using py.test with assertion rewriting; use --assert=plain for this test.)')
assert (oldcfn.__func__ is v1[2])
assert (oldcfn.__self__ is c)
with open(mod, 'w') as file_:
file_.write(code.format(path_repr=pgpath_repr, msg='C.fn() Version2'))
time.sleep(1.1)
_ = pg.reload.reloadAll(tmp_module, debug=True)
_ = (reload_test_mod.C, reload_test_mod.C.sig, reload_test_mod.C.fn, c.sig, c.fn, c.fn.__func__)
cfn1 = pg.reload.getPreviousVersion(c.fn)
cfn2 = pg.reload.getPreviousVersion(cfn1)
assert (cfn1.__func__ is v2[2])
assert (cfn2.__func__ is v1[2])
assert (cfn1.__self__ is c)
assert (cfn2.__self__ is c)
pg.functions.disconnect(c.sig, c.fn) |
def test_membership_stacked_nested_last(benchmark):
td = big_nested_stacked_td()[0][0]
subtd = td
key = []
while True:
for (_key, value) in subtd.items():
key += [_key]
if is_tensor_collection(value):
subtd = value
break
else:
subtd = None
break
if (subtd is None):
break
key = tuple(key)
benchmark((lambda : (key in td.keys(True)))) |
def test_init_factory_alias():
cstats = [m.TestFactory6.get_cstats(), m.TestFactory6.get_alias_cstats()]
cstats[0].alive()
n_inst = ConstructorStats.detail_reg_inst()
a = m.TestFactory6(tag.base, 1)
assert (a.get() == 1)
assert (not a.has_alias())
b = m.TestFactory6(tag.alias, 'hi there')
assert (b.get() == 8)
assert b.has_alias()
c = m.TestFactory6(tag.alias, 3)
assert (c.get() == 3)
assert c.has_alias()
d = m.TestFactory6(tag.alias, tag.pointer, 4)
assert (d.get() == 4)
assert d.has_alias()
e = m.TestFactory6(tag.base, tag.pointer, 5)
assert (e.get() == 5)
assert (not e.has_alias())
f = m.TestFactory6(tag.base, tag.alias, tag.pointer, 6)
assert (f.get() == 6)
assert f.has_alias()
assert (ConstructorStats.detail_reg_inst() == (n_inst + 6))
assert ([i.alive() for i in cstats] == [6, 4])
del a, b, e
assert ([i.alive() for i in cstats] == [3, 3])
assert (ConstructorStats.detail_reg_inst() == (n_inst + 3))
del f, c, d
assert ([i.alive() for i in cstats] == [0, 0])
assert (ConstructorStats.detail_reg_inst() == n_inst)
class MyTest(m.TestFactory6):
def __init__(self, *args):
m.TestFactory6.__init__(self, *args)
def get(self):
return ((- 5) + m.TestFactory6.get(self))
z = MyTest(tag.base, 123)
assert (z.get() == 118)
assert z.has_alias()
y = MyTest(tag.alias, 'why hello!')
assert (y.get() == 5)
assert y.has_alias()
x = MyTest(tag.base, tag.pointer, 47)
assert (x.get() == 42)
assert x.has_alias()
assert (ConstructorStats.detail_reg_inst() == (n_inst + 3))
assert ([i.alive() for i in cstats] == [3, 3])
del x, y, z
assert ([i.alive() for i in cstats] == [0, 0])
assert (ConstructorStats.detail_reg_inst() == n_inst)
assert ([i.values() for i in cstats] == [['1', '8', '3', '4', '5', '6', '123', '10', '47'], ['hi there', '3', '4', '6', 'move', '123', 'why hello!', 'move', '47']]) |
class HAM10000DatasetFast(Dataset):
def __init__(self, mode, data_dir=None, one_hot=True, image_size=224, aug=None, aug_empty=None, transform=None, img_transform=None, msk_transform=None, add_boundary_mask=False, add_boundary_dist=False, logger=None, **kwargs):
self.print = (logger.info if logger else print)
self.data_dir = (data_dir if data_dir else '/path/to/datasets/HAM10000')
self.one_hot = one_hot
self.image_size = image_size
self.aug = aug
self.aug_empty = aug_empty
self.transform = transform
self.img_transform = img_transform
self.msk_transform = msk_transform
self.mode = mode
self.add_boundary_mask = add_boundary_mask
self.add_boundary_dist = add_boundary_dist
data_preparer = PrepareHAM10000(data_dir=self.data_dir, image_size=self.image_size, logger=logger)
data = data_preparer.get_data()
(X, Y) = (data['x'], data['y'])
X = torch.tensor(X)
Y = torch.tensor(Y)
if (kwargs.get('data_scale', 'full') == 'full'):
(tr_length, vl_length) = (7200, 1800)
elif (kwargs.get('data_scale') == 'medium'):
(tr_length, vl_length) = ((7200 // 5), 1800)
elif (kwargs.get('data_scale') == 'lite'):
(tr_length, vl_length) = ((7200 // 10), 1800)
elif (kwargs.get('data_scale') == 'ultra-lite'):
(tr_length, vl_length) = ((7200 // 20), 1800)
else:
raise ValueError(f"the value of <data_scale> param ({kwargs.get('data_scale')}) is dataset is invalid. valid in (full, medium, lite, ultra-lite)")
if (mode == 'tr'):
self.imgs = X[:tr_length]
self.msks = Y[:tr_length]
elif (mode == 'vl'):
self.imgs = X[7200:(7200 + vl_length)]
self.msks = Y[7200:(7200 + vl_length)]
elif (mode == 'te'):
self.imgs = X[(7200 + vl_length):]
self.msks = Y[(7200 + vl_length):]
else:
raise ValueError()
def __len__(self):
return len(self.imgs)
def __getitem__(self, idx):
data_id = idx
img = self.imgs[idx]
msk = self.msks[idx]
if self.one_hot:
if (len(np.unique(msk)) > 1):
msk = ((msk - msk.min()) / (msk.max() - msk.min()))
msk = F.one_hot(torch.squeeze(msk).to(torch.int64))
msk = torch.moveaxis(msk, (- 1), 0).to(torch.float)
if self.aug:
if ((self.mode == 'tr') and (np.random.rand() > 0.5)):
img_ = np.uint8(torch.moveaxis((img * 255), 0, (- 1)).detach().numpy())
msk_ = np.uint8(torch.moveaxis((msk * 255), 0, (- 1)).detach().numpy())
augmented = self.aug(image=img_, mask=msk_)
img = torch.moveaxis(torch.tensor(augmented['image'], dtype=torch.float32), (- 1), 0)
msk = torch.moveaxis(torch.tensor(augmented['mask'], dtype=torch.float32), (- 1), 0)
elif self.aug_empty:
img_ = np.uint8(torch.moveaxis((img * 255), 0, (- 1)).detach().numpy())
msk_ = np.uint8(torch.moveaxis((msk * 255), 0, (- 1)).detach().numpy())
augmented = self.aug_empty(image=img_, mask=msk_)
img = torch.moveaxis(torch.tensor(augmented['image'], dtype=torch.float32), (- 1), 0)
msk = torch.moveaxis(torch.tensor(augmented['mask'], dtype=torch.float32), (- 1), 0)
img = img.nan_to_num(127)
img = normalize(img)
msk = msk.nan_to_num(0)
msk = normalize(msk)
if (self.add_boundary_mask or self.add_boundary_dist):
msk_ = np.uint8(torch.moveaxis((msk * 255), 0, (- 1)).detach().numpy())
if self.add_boundary_mask:
boundary_mask = calc_edge(msk_, mode='canny')
boundary_mask = np_normalize(boundary_mask)
msk = torch.concatenate([msk, torch.tensor(boundary_mask).unsqueeze(0)], dim=0)
if self.add_boundary_dist:
boundary_mask = (boundary_mask if self.add_boundary_mask else calc_edge(msk_, mode='canny'))
distance_map = calc_distance_map(boundary_mask, mode='l2')
distance_map = np_normalize(distance_map)
msk = torch.concatenate([msk, torch.tensor(distance_map).unsqueeze(0)], dim=0)
if self.img_transform:
img = self.img_transform(img)
if self.msk_transform:
msk = self.msk_transform(msk)
img = img.nan_to_num(0.5)
msk = msk.nan_to_num((- 1))
sample = {'image': img, 'mask': msk, 'id': data_id}
return sample |
class PlatformDirsABC(ABC):
def __init__(self, appname: (str | None)=None, appauthor: ((str | None) | Literal[False])=None, version: (str | None)=None, roaming: bool=False, multipath: bool=False, opinion: bool=True):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
self.opinion = opinion
def _append_app_name_and_version(self, *base: str) -> str:
params = list(base[1:])
if self.appname:
params.append(self.appname)
if self.version:
params.append(self.version)
return os.path.join(base[0], *params)
def user_data_dir(self) -> str:
def site_data_dir(self) -> str:
def user_config_dir(self) -> str:
def site_config_dir(self) -> str:
def user_cache_dir(self) -> str:
def user_state_dir(self) -> str:
def user_log_dir(self) -> str:
def user_documents_dir(self) -> str:
def user_runtime_dir(self) -> str:
def user_data_path(self) -> Path:
return Path(self.user_data_dir)
def site_data_path(self) -> Path:
return Path(self.site_data_dir)
def user_config_path(self) -> Path:
return Path(self.user_config_dir)
def site_config_path(self) -> Path:
return Path(self.site_config_dir)
def user_cache_path(self) -> Path:
return Path(self.user_cache_dir)
def user_state_path(self) -> Path:
return Path(self.user_state_dir)
def user_log_path(self) -> Path:
return Path(self.user_log_dir)
def user_documents_path(self) -> Path:
return Path(self.user_documents_dir)
def user_runtime_path(self) -> Path:
return Path(self.user_runtime_dir) |
class Migration(migrations.Migration):
dependencies = [('sponsors', '0095_auto__2025')]
operations = [migrations.AlterModelManagers(name='benefitfeatureconfiguration', managers=[('objects', django.db.models.manager.Manager()), ('non_polymorphic', django.db.models.manager.Manager())]), migrations.AlterModelManagers(name='emailtargetableconfiguration', managers=[]), migrations.AlterModelManagers(name='logoplacementconfiguration', managers=[]), migrations.AlterModelManagers(name='providedfileassetconfiguration', managers=[]), migrations.AlterModelManagers(name='providedtextassetconfiguration', managers=[]), migrations.AlterModelManagers(name='requiredimgassetconfiguration', managers=[]), migrations.AlterModelManagers(name='requiredresponseassetconfiguration', managers=[]), migrations.AlterModelManagers(name='requiredtextassetconfiguration', managers=[]), migrations.AlterModelManagers(name='tieredbenefitconfiguration', managers=[])] |
def test_files_from_regex_2(*args, **kwargs):
path = join(TEST_FOLDER_PATH, '*co2*.par')
path_test = get_files_from_regex(path)
path_actual = ['geisa_CO2_fragment.par', 'hitran_CO2_fragment.par', 'hitran_co2_626_bandhead_4165_4200nm.par']
path_actual.sort()
path_test.sort()
for i in range(len(path_actual)):
path_test[i] = basename(path_test[i])
assert (path_test[i] == path_actual[i]) |
def state_with_pickup(state: State, pickup: PickupEntry) -> State:
new_state = state.copy()
new_state.previous_state = state
add_pickup_to_state(new_state, pickup)
if (new_state.maximum_energy > state.maximum_energy):
new_state.energy = new_state.maximum_energy
return new_state |
class Identity(nn.Module):
def __init__(self, inp, oup, stride):
super(Identity, self).__init__()
if ((stride != 1) or (inp != oup)):
self.downsample = nn.Sequential(nn.Conv2d(inp, oup, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(oup))
else:
self.downsample = None
def forward(self, x):
if (self.downsample is not None):
x = self.downsample(x)
return x |
class MeanEllipticalSlice(EllipticalSliceSampler):
def __init__(self, f_init, dist, lnpdf, nsamples, pdf_params=()):
mean_vector = dist.mean
demeaned_lnpdf = (lambda g: lnpdf((g + mean_vector), *pdf_params))
demeaned_init = (f_init - mean_vector)
samples = dist.sample(sample_shape=torch.Size((nsamples,))).t()
demeaned_samples = (samples - mean_vector.unsqueeze(1))
super(MeanEllipticalSlice, self).__init__(demeaned_init, demeaned_samples, demeaned_lnpdf, nsamples, pdf_params=())
self.mean_vector = mean_vector
def run(self):
(self.f_sampled, self.ell) = super().run()
self.f_sampled = (self.f_sampled + self.mean_vector.unsqueeze(1))
return (self.f_sampled, self.ell) |
def test_activate_activates_non_existing_virtualenv_no_envs_file(tmp_path: Path, manager: EnvManager, poetry: Poetry, config: Config, mocker: MockerFixture, venv_name: str, venv_flags_default: dict[(str, bool)]) -> None:
if ('VIRTUAL_ENV' in os.environ):
del os.environ['VIRTUAL_ENV']
config.merge({'virtualenvs': {'path': str(tmp_path)}})
mocker.patch('shutil.which', side_effect=(lambda py: f'/usr/bin/{py}'))
mocker.patch('subprocess.check_output', side_effect=check_output_wrapper())
m = mocker.patch('poetry.utils.env.EnvManager.build_venv', side_effect=build_venv)
env = manager.activate('python3.7')
m.assert_called_with((tmp_path / f'{venv_name}-py3.7'), executable=Path('/usr/bin/python3.7'), flags=venv_flags_default, prompt='simple-project-py3.7')
envs_file = TOMLFile((tmp_path / 'envs.toml'))
assert envs_file.exists()
envs: dict[(str, Any)] = envs_file.read()
assert (envs[venv_name]['minor'] == '3.7')
assert (envs[venv_name]['patch'] == '3.7.1')
assert (env.path == (tmp_path / f'{venv_name}-py3.7'))
assert (env.base == Path('/usr')) |
class UpdateGrantInput(BaseGrantInput):
instance: strawberry.ID
name: str
full_name: str
conference: strawberry.ID
age_group: AgeGroup
gender: str
occupation: Occupation
grant_type: GrantType
python_usage: str
been_to_other_events: str
community_contribution: str
interested_in_volunteering: InterestedInVolunteering
needs_funds_for_travel: bool
need_visa: bool
need_accommodation: bool
why: str
notes: str
travelling_from: str
website: str
twitter_handle: str
github_handle: str
linkedin_url: str
mastodon_handle: str |
def align_and_update_state_dicts(model_state_dict, loaded_state_dict):
current_keys = sorted(list(model_state_dict.keys()))
loaded_keys = sorted(list(loaded_state_dict.keys()))
match_matrix = [(len(j) if i.endswith(j) else 0) for i in current_keys for j in loaded_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(current_keys), len(loaded_keys))
(max_match_size, idxs) = match_matrix.max(1)
idxs[(max_match_size == 0)] = (- 1)
for (idx_new, idx_old) in enumerate(idxs.tolist()):
if (idx_old == (- 1)):
continue
key = current_keys[idx_new]
key_old = loaded_keys[idx_old]
model_state_dict[key] = loaded_state_dict[key_old] |
def extract_transmit_timestamp(ntp_packet):
encoded_transmit_timestamp = ntp_packet[40:48]
(seconds, fraction) = struct.unpack('!II', encoded_transmit_timestamp)
base_time = datetime.datetime(1900, 1, 1)
offset = datetime.timedelta(seconds=(seconds + (fraction / (2 ** 32))))
return (base_time + offset) |
class GroupEmitter():
def __init__(self, exprs):
self.exprs = ParseResults(exprs)
def make_generator(self):
def group_gen():
def recurse_list(elist):
if (len(elist) == 1):
(yield from elist[0].make_generator()())
else:
for s in elist[0].make_generator()():
for s2 in recurse_list(elist[1:]):
(yield (s + s2))
if self.exprs:
(yield from recurse_list(self.exprs))
return group_gen |
def test_unset_cert(tester: CommandTester, auth_config_source: DictConfigSource, mocker: MockerFixture) -> None:
mocker.spy(ConfigSource, '__init__')
tester.execute('certificates.foo.cert path/to/ca.pem')
assert ('cert' in auth_config_source.config['certificates']['foo'])
tester.execute('certificates.foo.cert --unset')
assert ('cert' not in auth_config_source.config['certificates']['foo']) |
def test_qt_arg(request, quteproc_new, tmp_path):
args = (['--temp-basedir', '--qt-arg', 'stylesheet', str((tmp_path / 'does-not-exist'))] + _base_args(request.config))
quteproc_new.start(args)
msg = 'QCss::Parser - Failed to load file "*does-not-exist"'
line = quteproc_new.wait_for(message=msg)
line.expected = True
quteproc_new.send_cmd(':quit')
quteproc_new.wait_for_quit() |
def start(config_file, url_root='./translator', host='0.0.0.0', port=5000, debug=False):
def prefix_route(route_function, prefix='', mask='{0}{1}'):
def newroute(route, *args, **kwargs):
return route_function(mask.format(prefix, route), *args, **kwargs)
return newroute
if debug:
logger = logging.getLogger('main')
log_format = logging.Formatter('[%(asctime)s %(levelname)s] %(message)s')
file_handler = RotatingFileHandler('debug_requests.log', maxBytes=1000000, backupCount=10)
file_handler.setFormatter(log_format)
logger.addHandler(file_handler)
app = Flask(__name__)
app.route = prefix_route(app.route, url_root)
translation_server = TranslationServer()
translation_server.start(config_file)
('/models', methods=['GET'])
def get_models():
out = translation_server.list_models()
return jsonify(out)
('/health', methods=['GET'])
def health():
out = {}
out['status'] = STATUS_OK
return jsonify(out)
('/clone_model/<int:model_id>', methods=['POST'])
def clone_model(model_id):
out = {}
data = request.get_json(force=True)
timeout = (- 1)
if ('timeout' in data):
timeout = data['timeout']
del data['timeout']
opt = data.get('opt', None)
try:
(model_id, load_time) = translation_server.clone_model(model_id, opt, timeout)
except ServerModelError as e:
out['status'] = STATUS_ERROR
out['error'] = str(e)
else:
out['status'] = STATUS_OK
out['model_id'] = model_id
out['load_time'] = load_time
return jsonify(out)
('/unload_model/<int:model_id>', methods=['GET'])
def unload_model(model_id):
out = {'model_id': model_id}
try:
translation_server.unload_model(model_id)
out['status'] = STATUS_OK
except Exception as e:
out['status'] = STATUS_ERROR
out['error'] = str(e)
return jsonify(out)
('/translate', methods=['POST'])
def translate():
inputs = request.get_json(force=True)
if debug:
logger.info(inputs)
out = {}
try:
(trans, scores, n_best, _, aligns) = translation_server.run(inputs)
assert (len(trans) == (len(inputs) * n_best))
assert (len(scores) == (len(inputs) * n_best))
assert (len(aligns) == (len(inputs) * n_best))
out = [[] for _ in range(n_best)]
for i in range(len(trans)):
response = {'src': inputs[(i // n_best)]['src'], 'tgt': trans[i], 'n_best': n_best, 'pred_score': scores[i]}
if (aligns[i] is not None):
response['align'] = aligns[i]
out[(i % n_best)].append(response)
except ServerModelError as e:
out['error'] = str(e)
out['status'] = STATUS_ERROR
if debug:
logger.info(out)
return jsonify(out)
('/to_cpu/<int:model_id>', methods=['GET'])
def to_cpu(model_id):
out = {'model_id': model_id}
translation_server.models[model_id].to_cpu()
out['status'] = STATUS_OK
return jsonify(out)
('/to_gpu/<int:model_id>', methods=['GET'])
def to_gpu(model_id):
out = {'model_id': model_id}
translation_server.models[model_id].to_gpu()
out['status'] = STATUS_OK
return jsonify(out)
serve(app, host=host, port=port) |
def train_one_epoch(net, optimizer, config, master_bar, dataset=None):
net.train()
num_nodes = config.num_nodes
num_neighbors = config.num_neighbors
batch_size = config.batch_size
batches_per_epoch = config.batches_per_epoch
accumulation_steps = config.accumulation_steps
train_filepath = config.train_filepath
train_target_filepath = config.train_filepath_solution
if (dataset is None):
dataset = DataReader(num_nodes, num_neighbors, batch_size, train_filepath, train_target_filepath, do_shuffle=True, do_prep=False)
else:
dataset.shuffle()
if (batches_per_epoch != (- 1)):
batches_per_epoch = min(batches_per_epoch, dataset.max_iter)
else:
batches_per_epoch = dataset.max_iter
dataset = iter(dataset)
edge_cw = None
running_loss = 0.0
running_pred_tour_len = 0.0
running_gt_tour_len = 0.0
running_nb_data = 0
running_nb_batch = 0
start_epoch = time.time()
for batch_num in progress_bar(range(batches_per_epoch), parent=master_bar):
try:
batch = next(dataset)
except StopIteration:
break
x_nodes_coord = Variable(torch.FloatTensor(batch.nodes_coord).type(dtypeFloat), requires_grad=False)
x_nodes_timew = (Variable(torch.FloatTensor(batch.nodes_timew).type(dtypeFloat), requires_grad=False) if is_tsptw else None)
y_tour = Variable(torch.LongTensor(batch.tour_nodes).type(dtypeLong), requires_grad=False)
if (type(edge_cw) != torch.Tensor):
num_nodes = x_nodes_coord.size(1)
num_edges = (num_nodes * num_nodes)
num_edge_classes = 2
edge_label_bincount = np.array([(num_edges - (2 * num_nodes)), (2 * num_nodes)])
edge_cw = (num_edges / (num_edge_classes * edge_label_bincount))
(y_preds, loss, x_edges_values) = net.forward(x_nodes_coord, x_nodes_timew, y_tour, edge_cw)
loss = loss.mean()
loss = (loss / accumulation_steps)
loss.backward()
if (((batch_num + 1) % accumulation_steps) == 0):
optimizer.step()
optimizer.zero_grad()
pred_tour_len = mean_tour_len_edges(x_edges_values, y_preds)
gt_tour_len = np.mean(batch.tour_len)
running_nb_data += batch_size
running_loss += ((batch_size * loss.data.item()) * accumulation_steps)
running_pred_tour_len += (batch_size * pred_tour_len)
running_gt_tour_len += (batch_size * gt_tour_len)
running_nb_batch += 1
result = 'loss:{loss:.4f} pred_tour_len:{pred_tour_len:.3f} gt_tour_len:{gt_tour_len:.3f}'.format(loss=(running_loss / running_nb_data), pred_tour_len=(running_pred_tour_len / running_nb_data), gt_tour_len=(running_gt_tour_len / running_nb_data))
master_bar.child.comment = result
loss = (running_loss / running_nb_data)
err_edges = 0
err_tour = 0
err_tsp = 0
pred_tour_len = (running_pred_tour_len / running_nb_data)
gt_tour_len = (running_gt_tour_len / running_nb_data)
return ((time.time() - start_epoch), loss, err_edges, err_tour, err_tsp, pred_tour_len, gt_tour_len) |
class MLP_Parrallel(nn.Module):
def __init__(self):
super(MLP_Parrallel, self).__init__()
self.encoder_1 = MLP_encoder()
self.encoder_2 = MLP_encoder()
self.classifier = nn.Linear(128, 14)
def forward(self, x1, x2, flag='unsupervised'):
if (flag == 'supervised'):
x1 = self.encoder_1(x1, flag=flag)
x2 = self.encoder_2(x2, flag=flag)
y1 = self.classifier(x1)
y2 = self.classifier(x2)
return (y1, y2)
x1 = self.encoder_1(x1)
x2 = self.encoder_2(x2)
return (x1, x2) |
def _translation_xgiga(params):
(corpus_type, json_file, args, save_file, en2de, de2en) = params
is_test = (corpus_type == 'test')
if os.path.exists(save_file):
logger.info(('Ignore %s' % save_file))
return
jobs = json.load(open(json_file))
datasets = []
sources = []
tgts = []
pos = 0
for d in jobs:
pos += 1
print('processing', end='')
for i in range((pos % 5)):
print('.', end='')
print(' ', end='\r')
(source, tgt) = (d['src'], d['tgt'])
sent_labels = None
if args.lower:
source = [' '.join(s) for s in source]
tgt = [' '.join(s) for s in tgt]
sources.append(source)
tgts.append(tgt[0])
print('translating tgts...')
eng_tgts = en2de.translate(tgts)
print('translating eng_tgts...')
back_tgts = de2en.translate(eng_tgts)
print('calculating rouge and construct the dataset...')
for (i, each) in enumerate(sources):
rouge = avg_rouge([tgts[i].lower()], [back_tgts[i].lower()])
if ((rouge[0][0] >= 0.6) and (rouge[1][0] >= 0.2)):
tgt_eng = [eng_tgts[i].lower().split()]
tgt = [tgts[i].lower().split()]
source = [s.lower().split() for s in sources[i]]
tmp_json = {'src': source, 'tgt': tgt, 'tgt_eng': tgt_eng}
datasets.append(tmp_json)
logger.info(('Processed instances %d' % len(datasets)))
logger.info(('Saving to %s' % save_file))
with open(save_file, 'w') as save:
save.write(json.dumps(datasets, ensure_ascii=False))
return |
(params=['v1', 'v2_1', 'v2_2', 'oci'])
def puller(request, data_model, jwk):
if (request.param == 'v1'):
return V1Protocol(jwk)
if (request.param == 'v2_2'):
return V2Protocol(jwk, schema='schema2')
if (request.param == 'oci'):
return V2Protocol(jwk, schema='oci')
return V2Protocol(jwk) |
class TrafficStopAction(_ActionType):
def __init__(self, name=None):
self.name = name
def __eq__(self, other):
if isinstance(other, TrafficStopAction):
if (self.get_attributes() == other.get_attributes()):
return True
return False
def parse(element):
trafficaction_element = element.find('TrafficAction')
name = trafficaction_element.attrib['trafficName']
return TrafficStopAction(name)
def get_attributes(self):
retdict = {}
if (self.name and (not self.isVersion(minor=0))):
retdict['trafficName'] = str(self.name)
elif self.isVersion(minor=0):
raise OpenSCENARIOVersionError('TrafficStopAction was introduced in OpenSCENARIO V1.1')
return retdict
def get_element(self):
element = ET.Element('GlobalAction')
trafficaction = ET.SubElement(element, 'TrafficAction', attrib=self.get_attributes())
ET.SubElement(trafficaction, 'TrafficStopAction')
return element |
class _FreeBSDBattery(_Battery):
def __init__(self, battery='0') -> None:
self.battery = battery
def update_status(self) -> BatteryStatus:
try:
info = check_output(['acpiconf', '-i', self.battery]).decode('utf-8')
except CalledProcessError:
raise RuntimeError('acpiconf exited incorrectly')
stat_match = re.search('State:\\t+([a-z]+)', info)
if (stat_match is None):
raise RuntimeError('Could not get battery state!')
stat = stat_match.group(1)
if (stat == 'charging'):
state = BatteryState.CHARGING
elif (stat == 'discharging'):
state = BatteryState.DISCHARGING
elif (stat == 'high'):
state = BatteryState.FULL
else:
state = BatteryState.UNKNOWN
percent_re = re.search('Remaining capacity:\\t+([0-9]+)', info)
if percent_re:
percent = (int(percent_re.group(1)) / 100)
else:
raise RuntimeError('Could not get battery percentage!')
power_re = re.search('Present rate:\\t+(?:[0-9]+ mA )*\\(?([0-9]+) mW', info)
if power_re:
power = (float(power_re.group(1)) / 1000)
else:
raise RuntimeError('Could not get battery power!')
time_re = re.search('Remaining time:\\t+([0-9]+:[0-9]+|unknown)', info)
if time_re:
if (time_re.group(1) == 'unknown'):
time = 0
else:
(hours, _, minutes) = time_re.group(1).partition(':')
time = ((int(hours) * 3600) + (int(minutes) * 60))
else:
raise RuntimeError('Could not get remaining battery time!')
return BatteryStatus(state, percent=percent, power=power, time=time) |
def calc_inception(gen, batchsize=100):
.make_extension()
def evaluation(trainer):
model = load_inception_model()
ims = []
xp = gen.xp
n_ims = 50000
for i in range(0, n_ims, batchsize):
z = Variable(xp.asarray(gen.make_hidden(batchsize)))
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
x = gen(z)
x = chainer.cuda.to_cpu(x.data)
x = np.asarray(np.clip(((x * 127.5) + 127.5), 0.0, 255.0), dtype=np.uint8)
ims.append(x)
ims = np.asarray(ims)
(_, _, _, h, w) = ims.shape
ims = ims.reshape((n_ims, 3, h, w)).astype('f')
(mean, _) = inception_score(model, ims)
if (gen.name == 'g'):
chainer.reporter.report({'IS': mean})
elif (gen.name == 'g_ema'):
chainer.reporter.report({'IS_ema': mean})
elif (gen.name == 'g_ma'):
chainer.reporter.report({'IS_ma': mean})
return evaluation |
def prepare_ref(lines: List[str], ltp_tokenizer: LTP, bert_tokenizer: BertTokenizer):
ltp_res = []
for i in range(0, len(lines), 100):
res = ltp_tokenizer.pipeline(lines[i:(i + 100)], tasks=['cws']).cws
res = [get_chinese_word(r) for r in res]
ltp_res.extend(res)
assert (len(ltp_res) == len(lines))
bert_res = []
for i in range(0, len(lines), 100):
res = bert_tokenizer(lines[i:(i + 100)], add_special_tokens=True, truncation=True, max_length=512)
bert_res.extend(res['input_ids'])
assert (len(bert_res) == len(lines))
ref_ids = []
for (input_ids, chinese_word) in zip(bert_res, ltp_res):
input_tokens = []
for id in input_ids:
token = bert_tokenizer._convert_id_to_token(id)
input_tokens.append(token)
input_tokens = add_sub_symbol(input_tokens, chinese_word)
ref_id = []
for (i, token) in enumerate(input_tokens):
if (token[:2] == '##'):
clean_token = token[2:]
if ((len(clean_token) == 1) and _is_chinese_char(ord(clean_token))):
ref_id.append(i)
ref_ids.append(ref_id)
assert (len(ref_ids) == len(bert_res))
return ref_ids |
class AttentionGate(nn.Module):
def __init__(self):
super(AttentionGate, self).__init__()
kernel_size = 7
self.compress = ZPool()
self.conv = BasicConv(2, 1, kernel_size, stride=1, padding=((kernel_size - 1) // 2), relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.conv(x_compress)
scale = torch.sigmoid_(x_out)
return (x * scale) |
class PhysicMaterial():
def _setattrException(self, name, value):
raise PyUnityException('Cannot modify properties of PhysicMaterial: it is immutable')
def __init__(self, restitution=0.75, friction=1, immutable=False):
self.restitution = restitution
self.friction = friction
self.combine = (- 1)
if immutable:
self.__setattr__ = self._setattrException |
def fmcw_tx():
angle = np.arange((- 90), 91, 1)
pattern = ((20 * np.log10((np.cos(((angle / 180) * np.pi)) + 0.01))) + 6)
tx_channel = {'location': (0, 0, 0), 'azimuth_angle': angle, 'azimuth_pattern': pattern, 'elevation_angle': angle, 'elevation_pattern': pattern}
return Transmitter(f=[(.0 - .0), (.0 + .0)], t=8e-05, tx_power=10, prp=0.0001, pulses=256, channels=[tx_channel]) |
def wrap_function_for_tracing(session: Session, task: PTask) -> None:
_pdb = PytaskPDB._init_pdb('runcall')
task_function = task.function
(task_function)
def wrapper(*args: Any, **kwargs: Any) -> None:
capman = session.config['pm'].get_plugin('capturemanager')
live_manager = session.config['pm'].get_plugin('live_manager')
capman.suspend(in_=True)
(out, err) = capman.read()
live_manager.stop()
if (out or err):
console.print()
if out:
console.rule('Captured stdout', style='default')
console.print(out)
if err:
console.rule('Captured stderr', style='default')
console.print(err)
_pdb.runcall(task_function, *args, **kwargs)
live_manager.resume()
capman.resume()
task.function = wrapper |
def downgrade(op, tables, tester):
op.drop_column('repositorybuildtrigger', 'successive_internal_error_count')
op.drop_column('repositorybuildtrigger', 'successive_failure_count')
op.execute(tables.disablereason.delete().where((tables.disablereason.c.name == op.inline_literal('successive_internal_error_count'))))
op.execute(tables.disablereason.delete().where((tables.disablereason.c.name == op.inline_literal('successive_failure_count')))) |
def load_class_from_name(fqcn):
paths = fqcn.split('.')
modulename = '.'.join(paths[:(- 1)])
classname = paths[(- 1)]
__import__(modulename, globals(), locals(), ['*'])
cls = getattr(sys.modules[modulename], classname)
if (not inspect.isclass(cls)):
raise TypeError(('%s is not a class' % fqcn))
return cls |
def video_post_process(opt, video_list, video_dict):
for video_name in video_list:
df = pd.read_csv((('./output/PEM_results/' + video_name) + '.csv'))
df['score'] = ((df.iou_score.values[:] * df.xmin_score.values[:]) * df.xmax_score.values[:])
if (len(df) > 1):
df = Soft_NMS(df, opt)
df = df.sort_values(by='score', ascending=False)
video_info = video_dict[video_name]
video_duration = ((float(((video_info['duration_frame'] / 16) * 16)) / video_info['duration_frame']) * video_info['duration_second'])
proposal_list = []
for j in range(min(opt['post_process_top_K'], len(df))):
tmp_proposal = {}
tmp_proposal['score'] = df.score.values[j]
tmp_proposal['segment'] = [(max(0, df.xmin.values[j]) * video_duration), (min(1, df.xmax.values[j]) * video_duration)]
proposal_list.append(tmp_proposal)
result_dict[video_name[2:]] = proposal_list |
class WordsInContext(Task):
VERSION = 0
DATASET_PATH = 'super_glue'
DATASET_NAME = 'wic'
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if (self._training_docs is None):
self._training_docs = list(self.dataset['train'])
return self._training_docs
def validation_docs(self):
return self.dataset['validation']
def doc_to_text(self, doc):
return "Sentence 1: {}\nSentence 2: {}\nQuestion: Is the word '{}' used in the same way in the two sentences above?\nAnswer:".format(doc['sentence1'], doc['sentence2'], doc['sentence1'][doc['start1']:doc['end1']])
def doc_to_target(self, doc):
return ' {}'.format({0: 'no', 1: 'yes'}[doc['label']])
def construct_requests(self, doc, ctx):
(ll_yes, _) = rf.loglikelihood(ctx, ' yes')
(ll_no, _) = rf.loglikelihood(ctx, ' no')
return (ll_yes, ll_no)
def process_results(self, doc, results):
(ll_yes, ll_no) = results
gold = doc['label']
acc = (1.0 if ((ll_yes > ll_no) == gold) else 0.0)
return {'acc': acc}
def higher_is_better(self):
return {'acc': True}
def aggregation(self):
return {'acc': mean} |
class Ui_ImageSettingsUi(object):
def setupUi(self, ImageSettingsUi):
ImageSettingsUi.setObjectName('ImageSettingsUi')
ImageSettingsUi.resize(332, 270)
self.gridLayout = QtWidgets.QGridLayout(ImageSettingsUi)
self.gridLayout.setObjectName('gridLayout')
self.groupBox_2 = QtWidgets.QGroupBox(ImageSettingsUi)
self.groupBox_2.setObjectName('groupBox_2')
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout_2.setObjectName('gridLayout_2')
self.label_8 = QtWidgets.QLabel(self.groupBox_2)
self.label_8.setObjectName('label_8')
self.gridLayout_2.addWidget(self.label_8, 0, 0, 1, 2)
self.imageResolutionBox = QtWidgets.QComboBox(self.groupBox_2)
self.imageResolutionBox.setObjectName('imageResolutionBox')
self.gridLayout_2.addWidget(self.imageResolutionBox, 1, 0, 1, 2)
self.label_6 = QtWidgets.QLabel(self.groupBox_2)
self.label_6.setObjectName('label_6')
self.gridLayout_2.addWidget(self.label_6, 2, 0, 1, 2)
self.imageCodecBox = QtWidgets.QComboBox(self.groupBox_2)
self.imageCodecBox.setObjectName('imageCodecBox')
self.gridLayout_2.addWidget(self.imageCodecBox, 3, 0, 1, 2)
self.label_7 = QtWidgets.QLabel(self.groupBox_2)
self.label_7.setObjectName('label_7')
self.gridLayout_2.addWidget(self.label_7, 4, 0, 1, 1)
self.imageQualitySlider = QtWidgets.QSlider(self.groupBox_2)
self.imageQualitySlider.setMaximum(4)
self.imageQualitySlider.setOrientation(QtCore.Qt.Horizontal)
self.imageQualitySlider.setObjectName('imageQualitySlider')
self.gridLayout_2.addWidget(self.imageQualitySlider, 4, 1, 1, 1)
self.gridLayout.addWidget(self.groupBox_2, 0, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 14, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 1, 0, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(ImageSettingsUi)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons((QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok))
self.buttonBox.setObjectName('buttonBox')
self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 1)
self.retranslateUi(ImageSettingsUi)
self.buttonBox.accepted.connect(ImageSettingsUi.accept)
self.buttonBox.rejected.connect(ImageSettingsUi.reject)
QtCore.QMetaObject.connectSlotsByName(ImageSettingsUi)
def retranslateUi(self, ImageSettingsUi):
_translate = QtCore.QCoreApplication.translate
ImageSettingsUi.setWindowTitle(_translate('ImageSettingsUi', 'Dialog'))
self.groupBox_2.setTitle(_translate('ImageSettingsUi', 'Image'))
self.label_8.setText(_translate('ImageSettingsUi', 'Resolution:'))
self.label_6.setText(_translate('ImageSettingsUi', 'Image Format:'))
self.label_7.setText(_translate('ImageSettingsUi', 'Quality:')) |
class ScoreboardView(View):
def __init__(self, bot: Bot):
super().__init__()
self.bot = bot
def _int_to_ordinal(number: int) -> str:
suffix = ['th', 'st', 'nd', 'rd', 'th'][min((number % 10), 4)]
if ((number % 100) in {11, 12, 13}):
suffix = 'th'
return (str(number) + suffix)
async def create_main_leaderboard(self) -> Embed:
formatted_string = ''
for (current_placement, (user, points)) in enumerate(self.points.items()):
if ((current_placement + 1) > 30):
break
user = (await self.bot.fetch_user(int(user)))
formatted_string += f'**{(current_placement + 1)}.** {user.mention} '
formatted_string += f'''({points:.1f} pts)
'''
if (((current_placement + 1) % 10) == 0):
formatted_string += '\n'
main_embed = Embed(title='Winners of the Trivia Night', description=formatted_string, color=Colours.python_blue)
return main_embed
async def _create_speed_embed(self) -> Embed:
formatted_string = ''
for (current_placement, (user, time_taken)) in enumerate(self.speed.items()):
if ((current_placement + 1) > 30):
break
user = (await self.bot.fetch_user(int(user)))
formatted_string += f'**{(current_placement + 1)}.** {user.mention} '
formatted_string += f'''({(time_taken[(- 1)] / time_taken[0]):.1f}s)
'''
if (((current_placement + 1) % 10) == 0):
formatted_string += '\n'
speed_embed = Embed(title='Average time taken to answer a question', description=formatted_string, color=Colours.python_blue)
return speed_embed
def _get_rank(self, member: Member) -> Embed:
rank_embed = Embed(title=f'Ranks for {member.display_name}', color=Colours.python_blue)
try:
points_rank = str((list(self.points).index(member.id) + 1))
speed_rank = str((list(self.speed).index(member.id) + 1))
except ValueError:
return Embed(title=choice(NEGATIVE_REPLIES), description="It looks like you didn't participate in the Trivia Night event!", color=Colours.soft_red)
rank_embed.add_field(name='Total Points', value=f'You got {self._int_to_ordinal(int(points_rank))} place with {self.points[member.id]:.1f} points.', inline=False)
rank_embed.add_field(name='Average Speed', value=f'You got {self._int_to_ordinal(int(speed_rank))} place with a time of {(self.speed[member.id][1] / self.speed[member.id][0]):.1f} seconds.', inline=False)
return rank_embed
.button(label='Scoreboard for Speed', style=ButtonStyle.green)
async def speed_leaderboard(self, interaction: Interaction, _: Button) -> None:
(await interaction.response.send_message(embed=(await self._create_speed_embed()), ephemeral=True))
.button(label="What's my rank?", style=ButtonStyle.blurple)
async def rank_button(self, interaction: Interaction, _: Button) -> None:
(await interaction.response.send_message(embed=self._get_rank(interaction.user), ephemeral=True)) |
class FeedForwardNetworks(Model):
def __init__(self, sess, tf_flag):
self.sess = sess
self.oparam = Parameters()
self.oparam.learning_rate = tf_flag.learning_rate
self.oparam.max_iter = tf_flag.max_iter
self.oparam.batch_size = tf_flag.batch_size
self.oparam.image_size = tf_flag.image_size
self.oparam.component = tf_flag.component
self.oparam.threads = tf_flag.threads
self.oparam.dataset = tf_flag.dataset
self.oparam.model_type = tf_flag.model_type
self.oparam.checkpoint_dir = tf_flag.checkpoint_dir
self.oparam.is_train = tf_flag.training
self.oparam.weights = {'loss_transfer': 1.0, 'loss_syntax*': 0.1, 'loss_AE': 0.1, 'loss_xentropy*': 2.0, 'loss_feedback*': 1.0, 'loss_disentgl*': 0.1, 'loss_vgg_percept*': 0.1, 'loss_unsup*': 0.01}
self.oparam.weights.update(weight_map(tf_flag.weights, 'weights'))
self.oparam.params = {'decay_rate': 0.99, 'decay_steps': 10000, 'augment': 1, 'augment_src': 'best', 'augment_mirror': 0, 'resi_global': 0, 'resi_ch': 66, 'gen_passes': 1, 'decoder': 1, 'discr_img': 0, 'discr_latent': 0, 'discr_instr': 0, 'discr_type': 'l2', 'feedback': 0, 'mean_img': 0, 'runit': 'relu', 'syntax_binary': 0, 'bMILloss': 1, 'bloss_unsup': 0, 'bloss_ae': 1, 'bloss_disentangle': 0, 'bvggloss': 0, 'vgg16or19': '16', 'bg_type': 'local', 'bg_weight': 0.1, 'bunet_test': 0, 'use_resnet': 0, 'use_renderer': 0}
self.oparam.params.update(weight_map(tf_flag.params, 'params'))
self.oparam.params['gram_layers'] = tf_flag.gram_layers
self.oparam.params['discr'] = (1 if (((self.oparam.params['discr_img'] + self.oparam.params['discr_latent']) + self.oparam.params['discr_instr']) >= 1) else 0)
self.oparam.params['dataset'] = self.oparam.dataset
self.load_params((not self.oparam.is_train))
if self.oparam.params['mean_img']:
print(('Using mean images in %s' % os.path.join(self.oparam.dataset, 'mean')))
mean_path = (lambda name: os.path.join(self.oparam.dataset, 'mean', name))
mean_imgs = {'mean_rend': mean_path('rendering.jpg'), 'mean_tran': mean_path('transfer.jpg'), 'mean_real': mean_path('real.jpg')}
self.oparam.params.update(mean_imgs)
if (not self.oparam.params.get('decoder', 1)):
self.oparam.params.update({'bloss_unsup': 0, 'bloss_ae': 0, 'bloss_disentangle': 0, 'bvggloss': 0, 'resi_ch': 0})
if self.oparam.params.get('use_renderer', 0):
self.oparam.params['use_tran'] = 0
if self.oparam.params.get('use_resnet', 0):
self.oparam.params['xfer_type'] = 'rgb'
runit_type = self.oparam.params['runit']
print('Rectifier unit:', runit_type)
set_runit(runit_type)
self.lr = self.oparam.learning_rate
self.batch = self.oparam.batch_size
self._attrs = ['model_type', 'lr', 'batch']
self.options = []
self.oparam.params['training'] = self.oparam.is_train
if self.oparam.is_train:
self.load_params(False)
else:
self.oparam.params['use_tran'] = False
self.oparam.params['use_rend'] = False
self.loader = Loader(self.oparam.dataset, self.oparam.batch_size, self.oparam.threads, self.oparam.params)
if (len(self.loader.fakes) > 1):
print('\n\n/!\\ Using multiple types of fake data.\nMake sure this is intended and not an error!\n', self.loader.fakes)
if self.oparam.is_train:
self.build_model()
else:
self.build_model_test()
def model_define(self, X_in, Y_out, is_train=False):
net = dict()
self.oparam.params['is_train'] = is_train
if (is_train and self.oparam.params.get('augment_mirror', 0)):
t_cond_real = tf.greater(tf.random_uniform([self.batch]), 0.5)
t_cond_synt = tf.greater(tf.random_uniform([self.batch]), 0.5)
for key in X_in.keys():
t_img = X_in[key]
if (key == 'real'):
t_cond = t_cond_real
else:
t_cond = t_cond_synt
X_in[key] = tf.where(t_cond, tf_mirror_image(t_img), t_img)
for key in Y_out.keys():
t_inst = Y_out[key]
if (key == 'real'):
t_cond = t_cond_real
else:
t_cond = t_cond_synt
Y_out[key] = tf.where(t_cond, tf_mirror_instr(t_inst), t_inst)
if (self.oparam.params.get('use_unsup', 0) == 0):
if ('unsup' in X_in.keys()):
del X_in[UNSU]
if self.oparam.params.get('use_renderer'):
net = rendnet.model_composited(Y_out, X_in, self.oparam.params)
if is_train:
(loss_dict_Disc, loss_dict_Gene, metrics) = rendnet.total_loss(net, X_in, self.oparam.params)
elif self.oparam.params.get('use_resnet'):
net = basenet.model_composited(X_in, Y_out, self.oparam.params)
if is_train:
(loss_dict_Disc, loss_dict_Gene, metrics) = basenet.total_loss(net, Y_out, self.oparam.params)
elif (self.oparam.params.get('bunet_test', 0) == 1):
if ('rend' in X_in.keys()):
del X_in['rend']
if ('tran' in X_in.keys()):
del X_in['tran']
net = danet.model_composited(X_in, Y_out, self.oparam.params)
if is_train:
(loss_dict_Disc, loss_dict_Gene, metrics) = danet.total_loss(net, Y_out[INST_SYNT], Y_out[INST_REAL], self.oparam.params)
elif (self.oparam.params.get('bunet_test', 0) == 2):
if self.oparam.params.get('use_cgan', 0):
X_in['tran'] = X_in['cgan']
net = danet.model_composited_RFI_2(X_in, Y_out, self.oparam.params)
if is_train:
(loss_dict_Disc, loss_dict_Gene, metrics) = danet.total_loss_RFI(net, Y_out, self.oparam.params)
elif (self.oparam.params.get('bunet_test', 0) == 3):
net = danet.model_composited_RFI_complexnet(X_in, Y_out, self.oparam.params)
if is_train:
(loss_dict_Disc, loss_dict_Gene, metrics) = danet.total_loss_RFI(net, Y_out, self.oparam.params)
elif self.oparam.params.get('use_autoencoder', 0):
net = layer_modules.model_composited(X_in, Y_out, self.oparam.params)
if is_train:
(loss_dict_Disc, loss_dict_Gene, metrics) = layer_modules.total_loss(net, Y_out, self.oparam.params)
else:
raise ValueError('No model selected (use_renderer | use_resnet | bunet_test | use_autoencoder)')
if (not is_train):
loss_dict_Disc = None
loss_dict_Gene = None
metrics = None
return (net, loss_dict_Disc, loss_dict_Gene, metrics)
def build_model(self):
print('Model build')
train_iter = self.loader.iter(set_option='train')
val_iter = self.loader.iter(set_option='val')
self.train_handle = self.sess.run(train_iter.string_handle())
self.val_handle = self.sess.run(val_iter.string_handle())
self.batch_handle = tf.placeholder(tf.string, shape=[])
batch_iter = tf.data.Iterator.from_string_handle(self.batch_handle, train_iter.output_types)
curbatch = batch_iter.get_next()
img_size = [self.loader.batch_size, 160, 160, 1]
lbl_size = [self.loader.batch_size, 20, 20, 1]
inst_synt = curbatch['synt'][(- 1)]
(real, inst_real) = curbatch['real']
if ('unsup' in curbatch.keys()):
unsup = curbatch['unsup'][0]
for t_img in curbatch['synt'][0:(- 1)]:
t_img.set_shape(img_size)
real.set_shape(img_size)
if ('unsup' in curbatch.keys()):
unsup.set_shape(img_size)
for t_lbl in [inst_synt, inst_real]:
t_lbl.set_shape(lbl_size)
self.tf_models = Parameters()
print('Model build')
self.tf_models.X = {REAL: real}
self.tf_models.Y = {INST_SYNT: inst_synt, INST_REAL: inst_real}
for i in range(len(self.loader.fakes)):
name = self.loader.fakes[i]
t_img = curbatch['synt'][i]
self.tf_models.X[name] = t_img
if self.oparam.params.get('replay_worst', 0):
name = 'worst'
self.tf_models.X[name] = tf.Variable(tf.ones_like(real), name='worst-input', dtype=tf.float32, trainable=False)
self.tf_models.Y[name] = tf.Variable(tf.zeros_like(inst_real), name='worst-output', dtype=tf.int32, trainable=False)
if self.oparam.is_train:
with tf.device('/device:GPU:0'):
(self.tf_models.net, self.tf_models.loss_dict_Disc, self.tf_models.loss_dict_Gene, self.tf_models.metrics) = self.model_define(X_in=self.tf_models.X, Y_out=self.tf_models.Y, is_train=self.oparam.is_train)
else:
return
def dispatch_weights():
new_weights = dict()
for (name, value) in self.oparam.weights.items():
if name.endswith('*'):
prefix = name[:(- 1)]
for loss_name in self.tf_models.loss_dict_Gene.keys():
if loss_name.startswith(prefix):
new_weights[loss_name] = value
for loss_name in self.tf_models.loss_dict_Disc.keys():
if loss_name.startswith(prefix):
new_weights[loss_name] = value
if name.startswith('*'):
suffix = name[1:]
for loss_name in self.tf_models.loss_dict_Gene.keys():
if loss_name.endswith(suffix):
new_weights[loss_name] = value
for loss_name in self.tf_models.loss_dict_Disc.keys():
if loss_name.endswith(suffix):
new_weights[loss_name] = value
for (name, value) in self.oparam.weights.items():
for loss_name in (list(self.tf_models.loss_dict_Gene.keys()) + list(self.tf_models.loss_dict_Disc.keys())):
if (name == loss_name):
new_weights[name] = value
for (name, value) in new_weights.items():
self.oparam.weights[name] = value
dispatch_weights()
if self.oparam.params.get('balance_weights', 1):
if (len(self.loader.fakes) == 0):
print('Balancing weights for real data only')
for name in self.tf_models.loss_dict_Gene.keys():
if name.endswith('/real'):
weight = self.oparam.weights.get(name, 1.0)
self.oparam.weights[name] = (weight * 2)
print(('- %s: %f -> %f' % (name, weight, (weight * 2))))
print('Losses:')
for name in self.tf_models.loss_dict_Gene.keys():
weight = self.oparam.weights.get(name, 1.0)
if (weight > 0):
print(('[gen] %s (%f)' % (name, weight)))
for name in self.tf_models.loss_dict_Disc.keys():
weight = self.oparam.weights.get(name, 1.0)
if (weight > 0):
print(('[dis] %s (%f)' % (name, weight)))
self.tf_models.loss_total_gene = tf.add_n([tf.reduce_mean((l * self.oparam.weights.get(i, 1.0))) for (i, l) in self.tf_models.loss_dict_Gene.items()])
self.tf_models.loss_main_gene = tf.add_n([tf.reduce_mean((l * self.oparam.weights.get(i, 1.0))) for (i, l) in self.tf_models.loss_dict_Gene.items() if (('adapt' not in i) and ('gen' not in i) and ('feedback' not in i))])
if self.oparam.params.get('discr', 1):
self.tf_models.loss_total_disc = tf.add_n([tf.reduce_mean((l * self.oparam.weights.get(i, 1.0))) for (i, l) in self.tf_models.loss_dict_Disc.items()])
else:
self.tf_models.loss_total_disc = tf.constant(0)
self.summaries = {}
net = self.tf_models.net
def res_dict_imgs(res_dict, target='real', src=None):
if (src is None):
src = target
if (src not in net.mean_imgs):
src = 'real'
real_dict = dict()
if target.startswith('*'):
for (key, value) in res_dict.items():
if key.endswith(target[1:]):
real_dict[key] = value
elif (target in res_dict):
real_dict[target] = res_dict[target]
return real_dict
use_renderer = self.oparam.params.get('use_renderer', 0)
self.summaries['images'] = dict()
images = {'inputs': net.imgs, 'res-inps': net.resi_imgs, 'res-outs': net.resi_outs, 'ae': res_dict_imgs(net.resi_outs, 'real'), 'adapt': res_dict_imgs(net.resi_outs, '*_real'), 'generator': res_dict_imgs(net.resi_outs, '*_gen')}
for name in net.discr.keys():
images[('discr-' + name)] = net.discr[name]
for (cat, data_dict) in images.items():
for (name, tf_img) in data_dict.items():
sum_name = ((cat + '/') + name)
if ((cat != 'inputs') and (use_renderer == 0)):
tf_img = (tf_img + 0.5)
self.summaries['images'][sum_name] = tf.summary.image(sum_name, fn_clipping01(tf_img), max_outputs=5)
images = {'gt': self.tf_models.Y, 'outputs': dict(), 'outputs-adapt': dict(), 'outputs-gen': dict()}
for (name, t_instr) in net.instr.items():
if ('_real' in name):
images['outputs-adapt'][name] = t_instr
elif ('_gen' in name):
images['outputs-gen'][name] = t_instr
else:
images['outputs'][name] = t_instr
for (cat, data_dict) in images.items():
for (name, tf_img) in data_dict.items():
if ('feedback' in name):
sum_name = ('feedback/' + name.replace('_feedback', ''))
else:
sum_name = ((cat + '/') + name)
label = tf_ind_to_rgb(tf_img)
self.summaries['images'][sum_name] = tf.summary.image(sum_name, label, max_outputs=5)
for (name, t_bg) in net.bg.items():
sum_name = ('bg/' + name)
self.summaries['images'][sum_name] = tf.summary.image(sum_name, tf.cast(t_bg, tf.float32), max_outputs=5)
self.summaries['scalar'] = dict()
self.summaries['scalar']['total_loss'] = tf.summary.scalar('loss_total', self.tf_models.loss_total_gene)
for (loss_name, tf_loss) in dict(self.tf_models.loss_dict_Gene, **self.tf_models.loss_dict_Disc).items():
weight = self.oparam.weights.get(loss_name, 1.0)
if (weight > 0.0):
self.summaries['scalar'][loss_name] = tf.summary.scalar(loss_name, tf.reduce_mean((tf_loss * weight)))
for (metric_name, tf_metric) in self.tf_models.metrics.items():
if metric_name.startswith('confusionmat'):
self.summaries['images'][metric_name] = tf.summary.image(metric_name, tf_summary_confusionmat(tf_metric, numlabel=layer_modules.prog_ch, tag=metric_name), max_outputs=5)
else:
self.summaries['scalar'][metric_name] = tf.summary.scalar(metric_name, tf_metric)
def build_model_test(self):
print('Model build')
test_iter = self.loader.iter(set_option='test')
self.test_handle = self.sess.run(test_iter.string_handle())
self.batch_handle = tf.placeholder(tf.string, shape=[])
batch_iter = tf.data.Iterator.from_string_handle(self.batch_handle, test_iter.output_types)
curbatch = batch_iter.get_next()
img_size = [self.loader.batch_size, 160, 160, 1]
lbl_size = [self.loader.batch_size, 20, 20, 1]
(real, inst_real, self.input_names) = curbatch['real']
real.set_shape(img_size)
inst_real.set_shape(lbl_size)
self.tf_models = Parameters()
print('Model build')
self.tf_models.X = {REAL: real}
self.tf_models.Y = {INST_REAL: inst_real}
with tf.device('/device:GPU:0'):
(self.tf_models.net, _, _, _) = self.model_define(X_in=self.tf_models.X, Y_out=self.tf_models.Y, is_train=False)
def train(self):
self.step = tf.train.get_or_create_global_step()
lr = tf.train.exponential_decay(self.oparam.learning_rate, global_step=self.step, decay_steps=self.oparam.params.get('decay_steps', 50000), decay_rate=self.oparam.params.get('decay_rate', 0.3), staircase=True)
use_discr = self.oparam.params.get('discr', 1)
def create_train_op(lr, loss, tvars, global_step):
optim = tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=0.0001)
grads_and_vars = optim.compute_gradients(loss, tvars, colocate_gradients_with_ops=True)
return optim.apply_gradients(grads_and_vars, global_step=global_step)
base_deps = []
runit_type = self.oparam.params.get('runit', 'relu')
if 1:
base_deps.extend(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
else:
base_deps = None
replay_worst = self.oparam.params.get('replay_worst', 0)
if replay_worst:
replay_deps = []
net = self.tf_models.net
with tf.variable_scope('replay_worst', tf.AUTO_REUSE):
worst_type = self.oparam.params.get('worst_type', 'fg')
assert (worst_type in net.acc), 'Invalid worst type'
acc = net.acc[worst_type]
real_accs = tf.concat([acc[REAL], acc['worst']], axis=0)
real_inps = tf.concat([self.tf_models.X[REAL], self.tf_models.X['worst']], axis=0)
real_outs = tf.concat([self.tf_models.Y[INST_REAL], self.tf_models.Y['worst']], axis=0)
(_, worst_idx) = tf.nn.top_k((- tf.squeeze(real_accs)), self.loader.batch_size)
worst_inps = tf.gather(real_inps, worst_idx)
dep = self.tf_models.X['worst'].assign(worst_inps, read_value=False)
replay_deps.append(dep)
worst_outs = tf.gather(real_outs, worst_idx)
dep = self.tf_models.Y['worst'].assign(worst_outs, read_value=False)
replay_deps.append(dep)
if (base_deps is None):
base_deps = replay_deps
else:
base_deps += replay_deps
if self.oparam.params.get('use_hosyntax', 0):
rendnet.load_weights(self.sess, self.oparam.params.get('render_type', 'dense'))
with tf.name_scope('generator_train'):
gen_tvars = [var for var in tf.trainable_variables() if (re.search('generator', var.name) != None)]
for var in gen_tvars:
print(('gen var %s' % var.name))
gen_deps = []
if (base_deps is not None):
gen_deps.extend(base_deps)
else:
gen_deps = None
with tf.control_dependencies(gen_deps):
self.gen_train_op = create_train_op(lr, self.tf_models.loss_total_gene, gen_tvars, self.step)
if use_discr:
dis_deps = []
if (gen_deps is not None):
dis_deps.extend(gen_deps)
dis_deps.append(self.gen_train_op)
with tf.name_scope('discriminator_train'):
dis_tvars = [var for var in tf.trainable_variables() if (re.search('discriminator', var.name) != None)]
for var in dis_tvars:
print(('dis var %s' % var.name))
with tf.control_dependencies(dis_deps):
self.dis_train_op = create_train_op((lr * 0.5), self.tf_models.loss_total_disc, dis_tvars, None)
self.summaries['scalar']['learning_rate'] = tf.summary.scalar('learning_rate', lr)
loss_summary = tf.summary.merge(list(self.summaries['scalar'].values()))
val1_summary = tf.summary.merge((list(self.summaries['images'].values()) + [loss_summary]))
val2_summary = tf.summary.merge_all()
train_writer = tf.summary.FileWriter((self.oparam.checkpoint_dir + '/train'), self.sess.graph)
val_writer = tf.summary.FileWriter((self.oparam.checkpoint_dir + '/val'), self.sess.graph)
tf.local_variables_initializer().run()
tf.global_variables_initializer().run()
self.load(self.oparam.checkpoint_dir)
with open(os.path.join(self.oparam.checkpoint_dir, 'params.pkl'), 'wb') as f:
pickle.dump(self.oparam.params, f)
start_time = time.time()
start_iter = int((self.step.eval() + 1))
train_setup = {self.batch_handle: self.train_handle}
val_setup = {self.batch_handle: self.val_handle}
global_step = 0
while (global_step < self.oparam.max_iter):
try:
global_step = tf.train.global_step(self.sess, tf.train.get_global_step())
if ((global_step != 0) and ((global_step % 10000) == 0)):
self.save(self.oparam.checkpoint_dir, tf.train.get_global_step())
if ((global_step != 0) and ((global_step % 500) == 0)):
if ((global_step % 1000) == 0):
val_summary = val2_summary
else:
val_summary = val1_summary
if use_discr:
(summary_str, loss_probe_d, loss_probe_g) = self.sess.run([val_summary, self.tf_models.loss_total_disc, self.tf_models.loss_total_gene], feed_dict=val_setup)
else:
loss_probe_d = 0
(summary_str, loss_probe_g) = self.sess.run([val_summary, self.tf_models.loss_total_gene], feed_dict=val_setup)
val_writer.add_summary(summary_str, global_step)
print(('Iter: [%2d/%7d] time: %4.4f, vloss: [d %.4f, g %.4f]' % (global_step, self.oparam.max_iter, (time.time() - start_time), loss_probe_d, loss_probe_g)))
if use_discr:
Ngen = int(self.oparam.params.get('gen_passes', 2.0))
for g in range(Ngen):
_ = self.sess.run([self.gen_train_op], feed_dict=train_setup)
_ = self.sess.run([self.dis_train_op], feed_dict=train_setup)
if ((global_step % 100) == 0):
(summary_str, loss_tr_d, loss_tr_g) = self.sess.run([loss_summary, self.tf_models.loss_total_disc, self.tf_models.loss_total_gene], feed_dict=train_setup)
else:
(loss_tr_d, loss_tr_g) = self.sess.run([self.tf_models.loss_total_disc, self.tf_models.loss_total_gene], feed_dict=train_setup)
else:
loss_tr_d = 0.0
(summary_str, loss_tr_g, _) = self.sess.run([loss_summary, self.tf_models.loss_total_gene, self.gen_train_op], feed_dict=train_setup)
if ((global_step % 10) < 1):
print(('Iter: [%2d/%7d] time: %4.4f, loss: [d %.4f, g %.4f]' % (global_step, self.oparam.max_iter, (time.time() - start_time), loss_tr_d, loss_tr_g)))
if ((global_step % 100) == 0):
train_writer.add_summary(summary_str, global_step)
except tf.errors.OutOfRangeError:
break
print('Training ends.')
self.save(self.oparam.checkpoint_dir, global_step)
train_writer.close()
val_writer.close()
def test_imgs(self, fnames_img, name='test_imgs'):
pass
def test(self, name='test'):
tf.global_variables_initializer().run()
self.load(self.oparam.checkpoint_dir, True)
import cv2
def fn_rescaleimg(x):
x += 0.5
x[(x > 1)] = 1.0
x[(x < 0)] = 0.0
return (x * 255.0)
svpath = os.path.join(self.oparam.checkpoint_dir, 'eval')
fn_path = (lambda x: os.path.join(svpath, x))
if (not os.path.exists(svpath)):
os.makedirs(svpath)
test_setup = {self.batch_handle: self.test_handle}
lst_eval_tensors = [self.input_names, self.tf_models.net.instr['real'], tf.nn.softmax(self.tf_models.net.logits['real'])]
if ('real' in self.tf_models.net.resi_outs.keys()):
lst_eval_tensors.append(self.tf_models.net.resi_outs['real'])
sgpath = os.path.join(svpath, 'gen')
if (not os.path.exists(sgpath)):
os.makedirs(sgpath)
cnt1 = 0
cnt2 = 0
show_info = self.oparam.params.get('show_confidence', 0)
while 1:
try:
rst = self.sess.run(lst_eval_tensors, feed_dict=test_setup)
names = rst[0]
labels = rst[1]
probs = rst[2]
for i in range(names.shape[0]):
fname = str(names[i], encoding='utf-8')
if show_info:
p = probs[i]
max_p = np.amax(p, axis=(- 1))
conf_mean = np.mean(max_p)
conf_std = np.std(max_p)
print(('%d %s (conf: m=%f, s=%f)' % ((cnt1 + 1), fname, conf_mean, conf_std)))
else:
sys.stdout.write(('\r%d %s' % ((cnt1 + 1), fname)))
sys.stdout.flush()
fpath = os.path.join(svpath, (fname + '.png'))
save_instr(fpath, labels[i])
cnt1 += 1
if ('real' in self.tf_models.net.resi_outs.keys()):
regul = rst[3]
fpath = os.path.join(sgpath, (fname + '.png'))
cv2.imwrite(fpath, fn_rescaleimg(regul[i]))
cnt2 += 1
except tf.errors.OutOfRangeError:
break
print('\nProcessing Done!')
return
def load_params(self, needed=False):
fname = os.path.join(self.oparam.checkpoint_dir, 'params.pkl')
try:
with open(fname, 'rb') as f:
new_params = pickle.load(f)
self.oparam.params.update(new_params)
if needed:
self.oparam.params['is_train'] = False
print(('Loaded parameters from %s' % fname))
for (key, value) in self.oparam.params.items():
print('-', key, '=', value)
except:
if needed:
print(('[!] Error loading parameters from %s' % fname))
raise |
class TestConfigurable(unittest.TestCase):
def testInitWithArgs(self):
_ = _TestClassA(arg1=1, arg2=2, arg3=3)
_ = _TestClassB('shape', arg1=1, arg2=2)
_ = _TestClassC('shape', arg1=1, arg2=2)
_ = _TestClassD('shape', arg1=1, arg2=2, arg3=3)
def testPatchedAttr(self):
self.assertTrue(('Doc' in _TestClassB.__init__.__doc__))
self.assertEqual(_TestClassD.__init__.__annotations__['arg1'], int)
def testInitWithCfg(self):
cfg = get_cfg()
cfg.ARG1 = 1
cfg.ARG2 = 2
cfg.ARG3 = 3
_ = _TestClassA(cfg)
_ = _TestClassB(cfg, input_shape='shape')
_ = _TestClassC(cfg, input_shape='shape')
_ = _TestClassD(cfg, input_shape='shape')
_ = _LegacySubClass(cfg, input_shape='shape')
_ = _NewSubClassNewInit(cfg, input_shape='shape')
_ = _LegacySubClassNotCfg(cfg, input_shape='shape')
with self.assertRaises(TypeError):
_ = _TestClassD(cfg, 'shape')
_ = _TestClassA(cfg=cfg)
_ = _TestClassB(cfg=cfg, input_shape='shape')
_ = _TestClassC(cfg=cfg, input_shape='shape')
_ = _TestClassD(cfg=cfg, input_shape='shape')
_ = _LegacySubClass(cfg=cfg, input_shape='shape')
_ = _NewSubClassNewInit(cfg=cfg, input_shape='shape')
_ = _LegacySubClassNotCfg(config=cfg, input_shape='shape')
def testInitWithCfgOverwrite(self):
cfg = get_cfg()
cfg.ARG1 = 1
cfg.ARG2 = 999
with self.assertRaises(AssertionError):
_ = _TestClassA(cfg, arg3=3)
_ = _TestClassA(cfg, arg2=2, arg3=3)
_ = _TestClassB(cfg, input_shape='shape', arg2=2, arg3=3)
_ = _TestClassC(cfg, input_shape='shape', arg2=2, arg3=3)
_ = _TestClassD(cfg, input_shape='shape', arg2=2, arg3=3)
_ = _TestClassA(cfg=cfg, arg2=2, arg3=3)
_ = _TestClassB(cfg=cfg, input_shape='shape', arg2=2, arg3=3)
_ = _TestClassC(cfg=cfg, input_shape='shape', arg2=2, arg3=3)
_ = _TestClassD(cfg=cfg, input_shape='shape', arg2=2, arg3=3)
def testInitWithCfgWrongArgs(self):
cfg = get_cfg()
cfg.ARG1 = 1
cfg.ARG2 = 2
with self.assertRaises(TypeError):
_ = _TestClassB(cfg, 'shape', not_exist=1)
with self.assertRaises(TypeError):
_ = _TestClassC(cfg, 'shape', not_exist=1)
with self.assertRaises(TypeError):
_ = _TestClassD(cfg, 'shape', not_exist=1)
def testBadClass(self):
class _BadClass1():
def __init__(self, a=1, b=2):
pass
class _BadClass2():
def __init__(self, a=1, b=2):
pass
def from_config(self, cfg):
pass
class _BadClass3():
def __init__(self, a=1, b=2):
pass
def from_config(cls, config):
pass
with self.assertRaises(AttributeError):
_ = _BadClass1(a=1)
with self.assertRaises(TypeError):
_ = _BadClass2(a=1)
with self.assertRaises(TypeError):
_ = _BadClass3(get_cfg())
def testFuncWithCfg(self):
cfg = get_cfg()
cfg.ARG1 = 10
cfg.ARG3 = 30
self.assertEqual(_test_func(1), (1, 2, 3, 4))
with self.assertRaises(TypeError):
_test_func(cfg)
self.assertEqual(_test_func(cfg, arg2=2), (10, 2, 30, 4))
self.assertEqual(_test_func(cfg, arg1=100, arg2=20), (100, 20, 30, 4))
self.assertEqual(_test_func(cfg, arg1=100, arg2=20, arg4=40), (100, 20, 30, 40))
self.assertTrue(callable(_test_func.from_config))
def testOmegaConf(self):
cfg = model_zoo.get_config('COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml')
cfg = OmegaConf.create(cfg.dump())
if (not torch.cuda.is_available()):
cfg.MODEL.DEVICE = 'cpu'
build_model(cfg) |
_REGISTRY.register()
class ESRGANModel(SRGANModel):
def optimize_parameters(self, current_iter):
for p in self.net_d.parameters():
p.requires_grad = False
self.optimizer_g.zero_grad()
self.output = self.net_g(self.lq)
l_g_total = 0
loss_dict = OrderedDict()
if (((current_iter % self.net_d_iters) == 0) and (current_iter > self.net_d_init_iters)):
if self.cri_pix:
l_g_pix = self.cri_pix(self.output, self.gt)
l_g_total += l_g_pix
loss_dict['l_g_pix'] = l_g_pix
if self.cri_perceptual:
(l_g_percep, l_g_style) = self.cri_perceptual(self.output, self.gt)
if (l_g_percep is not None):
l_g_total += l_g_percep
loss_dict['l_g_percep'] = l_g_percep
if (l_g_style is not None):
l_g_total += l_g_style
loss_dict['l_g_style'] = l_g_style
real_d_pred = self.net_d(self.gt).detach()
fake_g_pred = self.net_d(self.output)
l_g_real = self.cri_gan((real_d_pred - torch.mean(fake_g_pred)), False, is_disc=False)
l_g_fake = self.cri_gan((fake_g_pred - torch.mean(real_d_pred)), True, is_disc=False)
l_g_gan = ((l_g_real + l_g_fake) / 2)
l_g_total += l_g_gan
loss_dict['l_g_gan'] = l_g_gan
l_g_total.backward()
self.optimizer_g.step()
for p in self.net_d.parameters():
p.requires_grad = True
self.optimizer_d.zero_grad()
fake_d_pred = self.net_d(self.output).detach()
real_d_pred = self.net_d(self.gt)
l_d_real = (self.cri_gan((real_d_pred - torch.mean(fake_d_pred)), True, is_disc=True) * 0.5)
l_d_real.backward()
fake_d_pred = self.net_d(self.output.detach())
l_d_fake = (self.cri_gan((fake_d_pred - torch.mean(real_d_pred.detach())), False, is_disc=True) * 0.5)
l_d_fake.backward()
self.optimizer_d.step()
loss_dict['l_d_real'] = l_d_real
loss_dict['l_d_fake'] = l_d_fake
loss_dict['out_d_real'] = torch.mean(real_d_pred.detach())
loss_dict['out_d_fake'] = torch.mean(fake_d_pred.detach())
self.log_dict = self.reduce_loss_dict(loss_dict)
if (self.ema_decay > 0):
self.model_ema(decay=self.ema_decay) |
def _new_policy_set(new_policy):
if isinstance(new_policy, TrioPolicy):
raise RuntimeError("You can't set the Trio loop policy manually")
if _in_trio_context():
raise RuntimeError("You can't change the event loop policy in Trio context")
if ((new_policy is not None) and (not isinstance(new_policy, asyncio.AbstractEventLoopPolicy))):
raise_type = (TypeError if (sys.version_info >= (3, 11)) else AssertionError)
raise raise_type(f"policy must be an instance of AbstractEventLoopPolicy or None, not '{type(new_policy).__name__}'")
_faked_policy.policy = new_policy |
class TestWarningsWrapper():
def test_warn(self):
warn_message = 'short and stout'
warn_source = 'teapot'
with warnings.catch_warnings(record=True) as caught_warnings:
utils.warn(message=warn_message, category=UserWarning, source=warn_source)
assert (len(caught_warnings) == 1)
warning = caught_warnings[0]
assert (__file__ == warning.filename)
assert (warning.category == UserWarning)
assert isinstance(warning.message, UserWarning)
assert (warn_message in str(warning.message))
assert (__file__ in str(warning.message))
assert (warn_source == warning.source) |
class Effect5553(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Heavy Assault Missiles')), 'maxVelocity', ship.getModifiedItemAttr('eliteBonusHeavyGunship1'), skill='Heavy Assault Cruisers', **kwargs) |
class TransformerEncoderBlock(nn.Sequential):
def __init__(self, emb_size=900, drop_p=0.0, forward_expansion=4, forward_drop_p=0.0, **kwargs):
super().__init__(ResidualAdd(nn.Sequential(nn.LayerNorm(emb_size), MultiHeadAttention(emb_size, **kwargs), nn.Dropout(drop_p))), ResidualAdd(nn.Sequential(nn.LayerNorm(emb_size), FeedForwardBlock(emb_size, expansion=forward_expansion, drop_p=forward_drop_p), nn.Dropout(drop_p)))) |
def initialize_vars():
global Productions, Prodnames, Prodmap, Terminals
global Nonterminals, First, Follow, Precedence, LRitems
global Errorfunc, Signature, Requires
Productions = [None]
Prodnames = {}
Prodmap = {}
Terminals = {}
Nonterminals = {}
First = {}
Follow = {}
Precedence = {}
LRitems = []
Errorfunc = None
Signature = md5.new()
Requires = {}
global _vf, _vfc
_vf = cStringIO.StringIO()
_vfc = cStringIO.StringIO() |
_arg_scope
def softmax(logits, scope=None):
with variable_scope.variable_scope(scope, 'softmax', [logits]):
num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)
logits_2d = array_ops.reshape(logits, [(- 1), num_logits])
predictions = nn.softmax(logits_2d)
predictions = array_ops.reshape(predictions, array_ops.shape(logits))
if (not context.executing_eagerly()):
predictions.set_shape(logits.get_shape())
return predictions |
class PolypTrainer(nnUNetTrainer):
base_ch = 16
block = 'FusedMBConv'
use_my_unet = True
network_name = 'my_unet'
project_prefix = 'polyp'
setting = 2
def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool=True, device: torch.device=torch.device('cuda'), debug=True, job_id=None):
super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device, debug, job_id)
self.initial_lr = 0.001
if self.debug:
self.batch_size = 2
else:
pass
self.batch_size = 12
def _get_deep_supervision_scales(self):
pool_op_kernel_sizes = self.configuration_manager.pool_op_kernel_sizes
deep_supervision_scales = list((list(i) for i in (1 / np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0))))
deep_supervision_scales = deep_supervision_scales[:4]
return deep_supervision_scales
def _build_loss(self):
if self.label_manager.has_regions:
loss = DC_and_BCE_loss({}, {'batch_dice': self.configuration_manager.batch_dice, 'do_bg': True, 'smooth': 1e-05, 'ddp': self.is_ddp}, use_ignore_label=(self.label_manager.ignore_label is not None), dice_class=MemoryEfficientSoftDiceLoss)
else:
loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice, 'smooth': 1e-05, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1.5, ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss)
deep_supervision_scales = self._get_deep_supervision_scales()
weights = np.array([(1 / (2 ** i)) for i in range(len(deep_supervision_scales))])
weights = (weights / weights.sum())
print(f'ds wegihts: {weights}')
loss = DeepSupervisionWrapper(loss, weights)
return loss
def build_network_architecture(plans_manager: PlansManager, dataset_json, configuration_manager: ConfigurationManager, num_input_channels, enable_deep_supervision: bool=True) -> nn.Module:
return get_network_from_plans(plans_manager, dataset_json, configuration_manager, num_input_channels, deep_supervision=enable_deep_supervision, base_ch=PolypTrainer.base_ch, block=PolypTrainer.block, use_my_unet=PolypTrainer.use_my_unet, setting=PolypTrainer.setting)
def configure_optimizers(self):
backbone_params = list(map(id, self.network.backbone.parameters()))
other_params = filter((lambda p: (id(p) not in backbone_params)), self.network.parameters())
optimizer = torch.optim.SGD([{'params': self.network.backbone.parameters(), 'lr': 1e-05}, {'params': other_params}], self.initial_lr, weight_decay=self.weight_decay, momentum=0.99, nesterov=True)
lr_scheduler = PolyLRScheduler(optimizer, self.initial_lr, self.num_epochs)
return (optimizer, lr_scheduler)
def run_training(self):
self.on_train_start()
if ((not self.debug) and (self.local_rank == 0)):
wandb.login(key='66b58ac7004a123a43487d7a6cf34ebb4571a7ea')
self.initialize_wandb(project=f'{self.project_prefix}_{self.fold}', name=f'{self.network.__class__.__name__}_{self.job_id}_{self.fold}_lr_{self.initial_lr}', dir=self.output_folder, id=None)
print(f'debug: {self.debug}'.center(50, '='))
for epoch in range(self.current_epoch, self.num_epochs):
self.on_epoch_start()
self.on_train_epoch_start()
train_outputs = []
self.print_to_log_file(f'start training, {self.num_iterations_per_epoch}')
if self.debug:
for batch_id in range(1):
train_outputs.append(self.train_step(next(self.dataloader_train)))
else:
print(f'num of epochs: {self.num_iterations_per_epoch}'.center(50, '='))
for batch_id in range(self.num_iterations_per_epoch):
train_outputs.append(self.train_step(next(self.dataloader_train)))
self.print_to_log_file(f'finished training')
self.on_train_epoch_end(train_outputs)
self.real_validation_retina()
with torch.no_grad():
self.on_validation_epoch_start()
val_outputs = []
for batch_id in range(self.num_val_iterations_per_epoch):
val_outputs.append(self.validation_step(next(self.dataloader_val)))
self.on_validation_epoch_end(val_outputs)
self.on_epoch_end()
torch.cuda.empty_cache()
self.on_train_end()
def train_step(self, batch: dict) -> dict:
data = batch['data']
target = batch['target']
data = data.to(torch.float16).to(self.device, non_blocking=True)
if isinstance(target, list):
target = [i.to(torch.float16).to(self.device, non_blocking=True) for i in target]
else:
target = target.to(torch.float16).to(self.device, non_blocking=True)
self.optimizer.zero_grad()
with (autocast(self.device.type, enabled=True) if (self.device.type == 'cuda') else dummy_context()):
output = self.network(data)
l = self.loss(output, target)
if (self.grad_scaler is not None):
self.grad_scaler.scale(l).backward()
self.grad_scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.grad_scaler.step(self.optimizer)
self.grad_scaler.update()
else:
l.backward()
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
return {'loss': l.detach().cpu().numpy()}
def real_validation_retina(self):
self.set_deep_supervision_enabled(False)
self.network.eval()
predictor = nnUNetPredictor(tile_step_size=0.5, use_gaussian=True, use_mirroring=True, perform_everything_on_gpu=True, device=self.device, verbose=False, verbose_preprocessing=False, allow_tqdm=False)
predictor.manual_initialization(self.network, self.plans_manager, self.configuration_manager, None, self.dataset_json, self.__class__.__name__, self.inference_allowed_mirroring_axes)
with multiprocessing.get_context('spawn').Pool(default_num_processes) as segmentation_export_pool:
worker_list = segmentation_export_pool._pool
validation_output_folder = join(self.output_folder, 'validation')
maybe_mkdir_p(validation_output_folder)
(tr_keys, val_keys) = self.do_split()
if self.is_ddp:
val_keys = val_keys[self.local_rank::dist.get_world_size()]
tr_keys = tr_keys[self.local_rank::dist.get_world_size()]
dataset_val = nnUNetDataset(self.preprocessed_dataset_folder, val_keys, folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage, num_images_properties_loading_threshold=0)
dataset_all = nnUNetDataset(self.preprocessed_dataset_folder, (tr_keys + val_keys), folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage, num_images_properties_loading_threshold=0)
next_stages = self.configuration_manager.next_stage_names
if (next_stages is not None):
_ = [maybe_mkdir_p(join(self.output_folder_base, 'predicted_next_stage', n)) for n in next_stages]
results = []
for k in list(dataset_val.keys()):
proceed = (not check_workers_alive_and_busy(segmentation_export_pool, worker_list, results, allowed_num_queued=(2 * len(segmentation_export_pool._pool))))
while (not proceed):
sleep(0.1)
proceed = (not check_workers_alive_and_busy(segmentation_export_pool, worker_list, results, allowed_num_queued=(2 * len(segmentation_export_pool._pool))))
(data, seg, properties) = dataset_all.load_case(k)
if self.is_cascaded:
data = np.vstack((data, convert_labelmap_to_one_hot(seg[(- 1)], self.label_manager.foreground_labels, output_dtype=data.dtype)))
with warnings.catch_warnings():
warnings.simplefilter('ignore')
data = torch.from_numpy(data)
output_filename_truncated = join(validation_output_folder, k)
start_time = end_time = (- 1)
try:
start_time = time()
prediction = predictor.predict_sliding_window_return_logits(data)
end_time = time()
except RuntimeError:
predictor.perform_everything_on_gpu = False
prediction = predictor.predict_sliding_window_return_logits(data)
predictor.perform_everything_on_gpu = True
assert ((start_time != (- 1)) and (end_time != (- 1)) and (start_time < end_time))
self.print_to_log_file(f'predicting {k} took {(end_time - start_time):.2f} s')
prediction = prediction.cpu()
ref_file = join(self.preprocessed_dataset_folder_base, 'gt_segmentations', f'{k}.nii.gz')
import SimpleITK as sitk
ref_arr = sitk.GetArrayFromImage(sitk.ReadImage(ref_file))
pred = torch.softmax(prediction.float(), dim=0).argmax(dim=0).detach().cpu().numpy()
results.append(segmentation_export_pool.starmap_async(export_prediction_from_logits, ((prediction, properties, self.configuration_manager, self.plans_manager, self.dataset_json, output_filename_truncated, True),)))
if (next_stages is not None):
for n in next_stages:
next_stage_config_manager = self.plans_manager.get_configuration(n)
expected_preprocessed_folder = join(nnUNet_preprocessed, self.plans_manager.dataset_name, next_stage_config_manager.data_identifier)
try:
tmp = nnUNetDataset(expected_preprocessed_folder, [k], num_images_properties_loading_threshold=0)
(d, s, p) = tmp.load_case(k)
except FileNotFoundError:
self.print_to_log_file(f'Predicting next stage {n} failed for case {k} because the preprocessed file is missing! Run the preprocessing for this configuration first!')
continue
target_shape = d.shape[1:]
output_folder = join(self.output_folder_base, 'predicted_next_stage', n)
output_file = join(output_folder, (k + '.npz'))
results.append(segmentation_export_pool.starmap_async(resample_and_save, ((prediction, target_shape, output_file, self.plans_manager, self.configuration_manager, properties, self.dataset_json),)))
_ = [r.get() for r in results]
if self.is_ddp:
dist.barrier()
output_file = join(validation_output_folder, 'summary.json')
if (output_file is not None):
assert output_file.endswith('.json'), 'output_file should end with .json'
folder_ref = join(self.preprocessed_dataset_folder_base, 'gt_segmentations')
folder_pred = validation_output_folder
files_ref = subfiles(join(self.preprocessed_dataset_folder_base, 'gt_segmentations'), suffix=self.dataset_json['file_ending'], join=False)
files_pred = subfiles(validation_output_folder, suffix=self.dataset_json['file_ending'], join=False)
chill = True
if (not chill):
present = [isfile(join(folder_pred, i)) for i in files_ref]
assert all(present), 'Not all files in folder_pred exist in folder_ref'
files_ref = [join(folder_ref, os.path.basename(i)) for i in files_pred]
files_pred = [join(folder_pred, i) for i in files_pred]
all_dice_slice_level = []
all_dice_vol_level = []
def dice_score_slice(y_pred, y_true, num_classes):
y_pred = F.one_hot(y_pred, num_classes=num_classes).to(torch.uint8)
y_true = F.one_hot(y_true, num_classes=num_classes).to(torch.uint8)
eps = 0.0001
FN = torch.sum(((1 - y_pred) * y_true), dim=(1, 2))
FP = torch.sum(((1 - y_true) * y_pred), dim=(1, 2))
Pred = y_pred
GT = y_true
inter = torch.sum((GT * Pred), dim=(1, 2))
union = (torch.sum(GT, dim=(1, 2)) + torch.sum(Pred, dim=(1, 2)))
dice = (((2 * inter) + eps) / (union + eps))
return dice
def dice_score_vol(y_pred, y_true, num_classes):
y_pred = F.one_hot(y_pred, num_classes=num_classes).to(torch.uint8)
y_true = F.one_hot(y_true, num_classes=num_classes).to(torch.uint8)
eps = 0.0001
FN = torch.sum(((1 - y_pred) * y_true), dim=(0, 1, 2))
FP = torch.sum(((1 - y_true) * y_pred), dim=(0, 1, 2))
Pred = y_pred
GT = y_true
inter = torch.sum((GT * Pred), dim=(0, 1, 2))
union = (torch.sum(GT, dim=(0, 1, 2)) + torch.sum(Pred, dim=(0, 1, 2)))
dice = (((2 * inter) + eps) / (union + eps))
return dice
def get_score(file_ref, file_pred, ref_reader, pred_reader):
(seg_ref, seg_ref_dict) = ref_reader.read_seg(seg_fname=file_ref)
(seg_pred, seg_pred_dict) = pred_reader.read_seg(file_pred)
(seg_ref, seg_pred) = (seg_ref.squeeze(axis=0), seg_pred.squeeze(axis=0))
assert (seg_ref.shape == seg_pred.shape), f'invalid shape, seg: {seg_pred.shape}, ref: {seg_ref.shape}'
seg_ref = torch.tensor(seg_ref, dtype=torch.int64)
seg_pred = torch.tensor(seg_pred, dtype=torch.int64)
case_slice_dice = dice_score_slice(seg_pred, seg_ref, num_classes=4)
case_vol_dice = dice_score_vol(seg_pred, seg_ref, num_classes=4)
return (case_slice_dice.detach().cpu().numpy(), case_vol_dice.detach().cpu().numpy())
reader = self.plans_manager.image_reader_writer_class()
results = Parallel((- 1), prefer='threads')((delayed(get_score)(file_ref, file_pred, ref_reader, pred_reader) for (file_ref, file_pred, ref_reader, pred_reader) in zip(files_ref, files_pred, ([reader] * len(files_ref)), ([reader] * len(files_pred)))))
for res in results:
all_dice_slice_level.append(res[0])
all_dice_vol_level.append(res[1])
self.print_to_log_file(f'starting computing scores...')
final_dsc_slice = []
final_dsc_v = []
final_avd = []
final_bacc = []
all_dice_slice_level = np.concatenate(all_dice_slice_level, axis=0)
all_dice_vol_level = np.array(all_dice_vol_level)
dsc_slice = np.mean(all_dice_slice_level, axis=0)
dsc_mean = np.mean(dsc_slice[1:])
dsc_v = np.mean(all_dice_vol_level, axis=0)
dsc_v_mean = np.mean(dsc_v[1:])
if (self.local_rank == 0):
self.print_to_log_file(f'DSC: {dsc_mean:.2%}')
self.print_to_log_file(f'DSC_v: {dsc_v_mean:.2%}')
self.print_to_log_file(f'IRF: {dsc_slice[1]:.2%}, SRF: {dsc_slice[2]:.2%}, PED: {dsc_slice[3]:.2%}')
if (not self.debug):
wandb.log(data={'test/DSC': dsc_mean}, step=self.current_epoch)
wandb.log(data={'test/DSC_v': dsc_v_mean}, step=self.current_epoch)
wandb.log(data={'test/IRF': dsc_slice[1]}, step=self.current_epoch)
wandb.log(data={'test/SRF': dsc_slice[2]}, step=self.current_epoch)
wandb.log(data={'test/PED': dsc_slice[3]}, step=self.current_epoch)
if (dsc_mean > self.best_score['dsc'][1]):
self.best_score['dsc'] = (self.current_epoch, float(dsc_v_mean), float(dsc_mean))
if (dsc_v_mean > self.best_score['dsc_v'][1]):
self.best_score['dsc_v'] = (self.current_epoch, float(dsc_v_mean), float(dsc_mean))
self.print_to_log_file(f"current best dsc_v: {self.best_score['dsc_v'][1]} at epoch: {self.best_score['dsc_v'][0]}, {self.best_score['dsc_v']}")
self.print_to_log_file(f"current best dsc: {self.best_score['dsc'][1]} at epoch: {self.best_score['dsc'][0]}, {self.best_score['dsc']}")
if ((dsc_mean > self.best_metric) and (self.local_rank == 0)):
self.best_metric = dsc_mean
self.best_epoch = self.current_epoch
self.save_checkpoint(join(self.output_folder, 'dsc_slice_best.pth'))
self.print_to_log_file(f'finished real validation')
self.set_deep_supervision_enabled(True)
compute_gaussian.cache_clear() |
class HITAN6(FinTS3Segment):
tan_process = DataElementField(type='code', length=1, _d='TAN-Prozess')
task_hash_value = DataElementField(type='bin', max_length=256, required=False, _d='Auftrags-Hashwert')
task_reference = DataElementField(type='an', max_length=35, required=False, _d='Auftragsreferenz')
challenge = DataElementField(type='an', max_length=2048, required=False, _d='Challenge')
challenge_hhduc = DataElementField(type='bin', required=False, _d='Challenge HHD_UC')
challenge_valid_until = DataElementGroupField(type=ChallengeValidUntil, required=False, _d='Gultigkeitsdatum und -uhrzeit fur Challenge')
tan_medium_name = DataElementField(type='an', max_length=32, required=False, _d='Bezeichnung des TAN-Mediums') |
class TestCitationsTracked(unittest.TestCase):
def setUp(self):
self.plugin = get_dummy_plugin()
def test_import(self):
data = qiime2.Artifact.import_data(IntSequence1, [1, 2, 3, 4])
archiver = data._archiver
expected = [(('framework|qiime2:%s|0' % qiime2.__version__), 'Reproducible, interactive, scalable and extensible microbiome data science using QIIME 2'), ('plugin|dummy-plugin:0.0.0-dev|0', 'Does knuckle cracking lead to arthritis of the fingers?'), ('plugin|dummy-plugin:0.0.0-dev|1', 'Of flying frogs and levitrons'), ('transformer|dummy-plugin:0.0.0-dev|builtins:list->IntSequenceDirectoryFormat|0', 'An in-depth analysis of a piece of shit: distribution of Schistosoma mansoni and hookworm eggs in human stool'), ('view|dummy-plugin:0.0.0-dev|IntSequenceDirectoryFormat|0', 'Walking with coffee: Why does it spill?')]
obs = list(map((lambda item: (item[0], item[1].fields['title'])), archiver.citations.items()))
self.assertEqual(obs, expected)
with ((archiver.provenance_dir / 'action') / 'action.yaml').open() as fh:
action_yaml = fh.read()
for (key, _) in expected:
self.assertIn(('!cite %r' % key), action_yaml)
def test_action(self):
data = qiime2.Artifact.import_data(IntSequence1, [1, 2, 3, 4])
action = self.plugin.methods['split_ints']
(left, right) = action(data)
archiver = left._archiver
expected = [(('framework|qiime2:%s|0' % qiime2.__version__), 'Reproducible, interactive, scalable and extensible microbiome data science using QIIME 2'), ('action|dummy-plugin:0.0.0-dev|method:split_ints|0', 'Sword swallowing and its side effects'), ('action|dummy-plugin:0.0.0-dev|method:split_ints|1', 'Response behaviors of Svalbard reindeer towards humans and humans disguised as polar bears on Edgeya'), ('plugin|dummy-plugin:0.0.0-dev|0', 'Does knuckle cracking lead to arthritis of the fingers?'), ('plugin|dummy-plugin:0.0.0-dev|1', 'Of flying frogs and levitrons'), ('view|dummy-plugin:0.0.0-dev|IntSequenceDirectoryFormat|0', 'Walking with coffee: Why does it spill?'), ('transformer|dummy-plugin:0.0.0-dev|builtins:list->IntSequenceDirectoryFormat|0', 'An in-depth analysis of a piece of shit: distribution of Schistosoma mansoni and hookworm eggs in human stool')]
obs = list(map((lambda item: (item[0], item[1].fields['title'])), archiver.citations.items()))
self.assertEqual(obs, expected)
with ((archiver.provenance_dir / 'action') / 'action.yaml').open() as fh:
action_yaml = fh.read()
for (key, _) in expected:
self.assertIn(('!cite %r' % key), action_yaml) |
.parametrize('cli_flat_fee, expected_channel_flat_fee', [(FeeAmount(42), FeeAmount(21)), (FeeAmount(43), FeeAmount(21))])
def test_prepare_mediation_fee_config_flat_fee(cli_flat_fee, expected_channel_flat_fee):
token_address = factories.make_token_address()
fee_config = prepare_mediation_fee_config(cli_token_to_flat_fee=((token_address, cli_flat_fee),), cli_token_to_proportional_fee=((token_address, ProportionalFeeAmount(0)),), cli_token_to_proportional_imbalance_fee=((token_address, ProportionalFeeAmount(0)),), cli_cap_mediation_fees=False)
assert (fee_config.get_flat_fee(token_address) == expected_channel_flat_fee)
assert (fee_config.cap_mediation_fees is False) |
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
toggle_1 = Toggle()
toggle_2 = AnimatedToggle(checked_color='#FFB000', pulse_checked_color='#44FFB000')
container = QtWidgets.QWidget()
layout = QtWidgets.QVBoxLayout()
layout.addWidget(toggle_1)
layout.addWidget(toggle_2)
container.setLayout(layout)
self.setCentralWidget(container) |
class PerlinNoiseFactory(object):
def __init__(self, dimension, octaves=1, tile=(), unbias=False, random_state=np.random.RandomState()):
self.dimension = dimension
self.octaves = octaves
self.tile = (tile + ((0,) * dimension))
self.unbias = unbias
self.scale_factor = (2 * (dimension ** (- 0.5)))
self.gradient = {}
self.random_state = random_state
def _generate_gradient(self):
if (self.dimension == 1):
return (self.random_state.uniform((- 1), 1),)
random_point = self.random_state.normal(0, 1, size=self.dimension)
scale = (sum(((n * n) for n in random_point)) ** (- 0.5))
return tuple(((coord * scale) for coord in random_point))
def get_plain_noise(self, *point):
if (len(point) != self.dimension):
raise ValueError('Expected {} values, got {}'.format(self.dimension, len(point)))
grid_coords = []
for coord in point:
min_coord = math.floor(coord)
max_coord = (min_coord + 1)
grid_coords.append((min_coord, max_coord))
dots = []
for grid_point in product(*grid_coords):
if (grid_point not in self.gradient):
self.gradient[grid_point] = self._generate_gradient()
gradient = self.gradient[grid_point]
dot = 0
for i in range(self.dimension):
dot += (gradient[i] * (point[i] - grid_point[i]))
dots.append(dot)
dim = self.dimension
while (len(dots) > 1):
dim -= 1
s = smoothstep((point[dim] - grid_coords[dim][0]))
next_dots = []
while dots:
next_dots.append(lerp(s, dots.pop(0), dots.pop(0)))
dots = next_dots
return (dots[0] * self.scale_factor)
def __call__(self, *point):
ret = 0
for o in range(self.octaves):
o2 = (1 << o)
new_point = []
for (i, coord) in enumerate(point):
coord *= o2
if self.tile[i]:
coord %= (self.tile[i] * o2)
new_point.append(coord)
ret += (self.get_plain_noise(*new_point) / o2)
ret /= (2 - (2 ** (1 - self.octaves)))
if self.unbias:
r = ((ret + 1) / 2)
for _ in range(int(((self.octaves / 2) + 0.5))):
r = smoothstep(r)
ret = ((r * 2) - 1)
return ret |
class TestBagType(unittest.TestCase):
def _create(self, *fields, typename='abc'):
bt = BagType(typename, fields)
assert (bt._fields == fields)
assert (len(bt._fields) == len(bt._attrs))
return bt
def test_factory(self):
Point = BagType('Point', ('x', 'y'))
self.assertEqual(Point.__name__, 'Point')
self.assertEqual(Point.__slots__, ())
self.assertEqual(Point.__module__, __name__)
self.assertEqual(Point.__getitem__, tuple.__getitem__)
assert (Point._fields == ('x', 'y'))
assert (Point._attrs == ('x', 'y'))
self.assertRaises(ValueError, BagType, 'abc%', ('efg', 'ghi'))
self.assertRaises(ValueError, BagType, 'class', ('efg', 'ghi'))
self.assertRaises(ValueError, BagType, '9abc', ('efg', 'ghi'))
assert (self._create('efg', 'g%hi')._attrs == ('efg', 'g_hi'))
assert (self._create('abc', 'class')._attrs == ('abc', '_class'))
assert (self._create('8efg', '9ghi')._attrs == ('_8efg', '_9ghi'))
assert (self._create('_efg', 'ghi')._attrs == ('_efg', 'ghi'))
self.assertRaises(ValueError, BagType, 'abc', ('efg', 'efg', 'ghi'))
self._create('x1', 'y2', typename='Point0')
self._create('a', 'b', 'c', typename='_')
bt = self._create('a!', 'a?')
assert (bt._attrs == ('a0', 'a1'))
x = bt('foo', 'bar')
assert (x.get('a!') == 'foo')
assert (x.a0 == 'foo')
assert (x.get('a?') == 'bar')
assert (x.a1 == 'bar')
bt = self._create('the', 'quick', 'brown', 'fox')
assert ("u'" not in repr(bt._fields))
self.assertRaises(TypeError, Point._make, [11])
self.assertRaises(TypeError, Point._make, [11, 22, 33])
((sys.flags.optimize >= 2), 'Docstrings are omitted with -O2 and above')
def test_factory_doc_attr(self):
Point = BagType('Point', ('x', 'y'))
self.assertEqual(Point.__doc__, 'Point(x, y)')
((sys.flags.optimize >= 2), 'Docstrings are omitted with -O2 and above')
def test_doc_writable(self):
Point = BagType('Point', ('x', 'y'))
self.assertEqual(Point.x.__doc__, "Alias for 'x'")
Point.x.__doc__ = 'docstring for Point.x'
self.assertEqual(Point.x.__doc__, 'docstring for Point.x')
def test_name_fixer(self):
for (spec, renamed) in [[('efg', 'g%hi'), ('efg', 'g_hi')], [('abc', 'class'), ('abc', '_class')], [('8efg', '9ghi'), ('_8efg', '_9ghi')], [('abc', '_efg'), ('abc', '_efg')], [('abc', '', 'x'), ('abc', '_0', 'x')], [('&', ' ', '*'), ('_0', '_1', '_2')]]:
assert (self._create(*spec)._attrs == renamed)
def test_module_parameter(self):
NT = BagType('NT', ['x', 'y'], module=collections)
self.assertEqual(NT.__module__, collections)
def test_instance(self):
Point = self._create('x', 'y', typename='Point')
p = Point(11, 22)
self.assertEqual(p, Point(x=11, y=22))
self.assertEqual(p, Point(11, y=22))
self.assertEqual(p, Point(y=22, x=11))
self.assertEqual(p, Point(*(11, 22)))
self.assertEqual(p, Point(**dict(x=11, y=22)))
self.assertRaises(TypeError, Point, 1)
self.assertRaises(TypeError, Point, 1, 2, 3)
self.assertRaises(TypeError, eval, 'Point(XXX=1, y=2)', locals())
self.assertRaises(TypeError, eval, 'Point(x=1)', locals())
self.assertEqual(repr(p), 'Point(x=11, y=22)')
self.assertNotIn('__weakref__', dir(p))
self.assertEqual(p, Point._make([11, 22]))
self.assertEqual(p._fields, ('x', 'y'))
self.assertEqual(p._replace(x=1), (1, 22))
self.assertEqual(p._asdict(), dict(x=11, y=22))
try:
p._replace(x=1, error=2)
except ValueError:
pass
else:
self._fail('Did not detect an incorrect fieldname')
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
def test_tupleness(self):
Point = BagType('Point', ('x', 'y'))
p = Point(11, 22)
self.assertIsInstance(p, tuple)
self.assertEqual(p, (11, 22))
self.assertEqual(tuple(p), (11, 22))
self.assertEqual(list(p), [11, 22])
self.assertEqual(max(p), 22)
self.assertEqual(max(*p), 22)
(x, y) = p
self.assertEqual(p, (x, y))
self.assertEqual((p[0], p[1]), (11, 22))
self.assertRaises(IndexError, p.__getitem__, 3)
self.assertEqual(p.x, x)
self.assertEqual(p.y, y)
self.assertRaises(AttributeError, eval, 'p.z', locals())
def test_odd_sizes(self):
Zero = BagType('Zero', ())
self.assertEqual(Zero(), ())
self.assertEqual(Zero._make([]), ())
self.assertEqual(repr(Zero()), 'Zero()')
self.assertEqual(Zero()._asdict(), {})
self.assertEqual(Zero()._fields, ())
Dot = BagType('Dot', ('d',))
self.assertEqual(Dot(1), (1,))
self.assertEqual(Dot._make([1]), (1,))
self.assertEqual(Dot(1).d, 1)
self.assertEqual(repr(Dot(1)), 'Dot(d=1)')
self.assertEqual(Dot(1)._asdict(), {'d': 1})
self.assertEqual(Dot(1)._replace(d=999), (999,))
self.assertEqual(Dot(1)._fields, ('d',))
n = (5000 if (sys.version_info >= (3, 7)) else 254)
names = list(set((''.join([choice(string.ascii_letters) for j in range(10)]) for i in range(n))))
n = len(names)
Big = BagType('Big', names)
b = Big(*range(n))
self.assertEqual(b, tuple(range(n)))
self.assertEqual(Big._make(range(n)), tuple(range(n)))
for (pos, name) in enumerate(names):
self.assertEqual(getattr(b, name), pos)
repr(b)
d = b._asdict()
d_expected = dict(zip(names, range(n)))
self.assertEqual(d, d_expected)
b2 = b._replace(**dict([(names[1], 999), (names[(- 5)], 42)]))
b2_expected = list(range(n))
b2_expected[1] = 999
b2_expected[(- 5)] = 42
self.assertEqual(b2, tuple(b2_expected))
self.assertEqual(b._fields, tuple(names))
def test_pickle(self):
p = TBag(x=10, y=20, z=30)
for module in (pickle,):
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in range((- 1), (module.HIGHEST_PROTOCOL + 1)):
q = loads(dumps(p, protocol))
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
self.assertNotIn(b'OrderedDict', dumps(p, protocol))
def test_copy(self):
p = TBag(x=10, y=20, z=30)
for copier in (copy.copy, copy.deepcopy):
q = copier(p)
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
def test_name_conflicts(self):
T = BagType('T', ('itemgetter', 'property', 'self', 'cls', 'tuple'))
t = T(1, 2, 3, 4, 5)
self.assertEqual(t, (1, 2, 3, 4, 5))
newt = t._replace(itemgetter=10, property=20, self=30, cls=40, tuple=50)
self.assertEqual(newt, (10, 20, 30, 40, 50))
words = {'Alias', 'At', 'AttributeError', 'Build', 'Bypass', 'Create', 'Encountered', 'Expected', 'Field', 'For', 'Got', 'Helper', 'IronPython', 'Jython', 'KeyError', 'Make', 'Modify', 'Note', 'OrderedDict', 'Point', 'Return', 'Returns', 'Type', 'TypeError', 'Used', 'Validate', 'ValueError', 'Variables', 'a', 'accessible', 'add', 'added', 'all', 'also', 'an', 'arg_list', 'args', 'arguments', 'automatically', 'be', 'build', 'builtins', 'but', 'by', 'cannot', 'class_namespace', 'classmethod', 'cls', 'collections', 'convert', 'copy', 'created', 'creation', 'd', 'debugging', 'defined', 'dict', 'dictionary', 'doc', 'docstring', 'docstrings', 'duplicate', 'effect', 'either', 'enumerate', 'environments', 'error', 'example', 'exec', 'f', 'f_globals', 'field', 'field_names', 'fields', 'formatted', 'frame', 'function', 'functions', 'generate', 'getter', 'got', 'greater', 'has', 'help', 'identifiers', 'indexable', 'instance', 'instantiate', 'interning', 'introspection', 'isidentifier', 'isinstance', 'itemgetter', 'iterable', 'join', 'keyword', 'keywords', 'kwds', 'len', 'like', 'list', 'map', 'maps', 'message', 'metadata', 'method', 'methods', 'module', 'module_name', 'must', 'name', 'named', 'namedtuple', 'namedtuple_', 'names', 'namespace', 'needs', 'new', 'nicely', 'num_fields', 'number', 'object', 'of', 'operator', 'option', 'p', 'particular', 'pickle', 'pickling', 'plain', 'pop', 'positional', 'property', 'r', 'regular', 'rename', 'replace', 'replacing', 'repr', 'repr_fmt', 'representation', 'result', 'reuse_itemgetter', 's', 'seen', 'sequence', 'set', 'side', 'specified', 'split', 'start', 'startswith', 'step', 'str', 'string', 'strings', 'subclass', 'sys', 'targets', 'than', 'the', 'their', 'this', 'to', 'tuple_new', 'type', 'typename', 'underscore', 'unexpected', 'unpack', 'up', 'use', 'used', 'user', 'valid', 'values', 'variable', 'verbose', 'where', 'which', 'work', 'x', 'y', 'z', 'zip'}
sorted_words = tuple(sorted(words))
T = BagType('T', sorted_words)
values = tuple(range(len(words)))
t = T(*values)
self.assertEqual(t, values)
t = T(**dict(zip(T._attrs, values)))
self.assertEqual(t, values)
t = T._make(values)
self.assertEqual(t, values)
repr(t)
self.assertEqual(t._asdict(), dict(zip(T._fields, values)))
t = T._make(values)
newvalues = tuple(((v * 10) for v in values))
newt = t._replace(**dict(zip(T._fields, newvalues)))
self.assertEqual(newt, newvalues)
self.assertEqual(T._attrs, sorted_words)
self.assertEqual(t.__getnewargs__(), values)
def test_repr(self):
A = BagType('A', ('x',))
self.assertEqual(repr(A(1)), 'A(x=1)')
class B(A):
pass
self.assertEqual(repr(B(1)), 'B(x=1)')
def test_namedtuple_subclass_issue_24931(self):
class Point(BagType('_Point', ['x', 'y'])):
pass
a = Point(3, 4)
self.assertEqual(a._asdict(), OrderedDict([('x', 3), ('y', 4)]))
a.w = 5
self.assertEqual(a.__dict__, {'w': 5})
def test_annoying_attribute_names(self):
self._create('__slots__', '__getattr__', '_attrs', '_fields', '__new__', '__getnewargs__', '__repr__', '_make', 'get', '_replace', '_asdict', '_cls', 'self', 'tuple') |
def test_internal_errors_propagate_to_controller(pytester: pytest.Pytester) -> None:
pytester.makeconftest('\n def pytest_collection_modifyitems():\n raise RuntimeError("Some runtime error")\n ')
pytester.makepyfile('def test(): pass')
result = pytester.runpytest('-n1')
result.stdout.fnmatch_lines(['*RuntimeError: Some runtime error*']) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.