code stringlengths 281 23.7M |
|---|
def pwre(id, raw_buf):
data = struct.unpack_from('<IQ', raw_buf)
payload = data[1]
hw = ((payload >> 7) & 1)
cstate = ((payload >> 12) & 15)
subcstate = ((payload >> 8) & 15)
value = struct.pack('!hiqiiiiiB', 4, 8, id, 4, cstate, 4, subcstate, 1, hw)
pwre_file.write(value) |
class HRModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True)):
super(HRModule, self).__init__()
self._check_branches(num_branches, num_blocks, in_channels, num_channels)
self.in_channels = in_channels
self.num_branches = num_branches
self.multiscale_output = multiscale_output
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.with_cp = with_cp
self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=False)
def _check_branches(self, num_branches, num_blocks, in_channels, num_channels):
if (num_branches != len(num_blocks)):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_BLOCKS({len(num_blocks)})'
raise ValueError(error_msg)
if (num_branches != len(num_channels)):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_CHANNELS({len(num_channels)})'
raise ValueError(error_msg)
if (num_branches != len(in_channels)):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_INCHANNELS({len(in_channels)})'
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1):
downsample = None
if ((stride != 1) or (self.in_channels[branch_index] != (num_channels[branch_index] * block.expansion))):
downsample = nn.Sequential(build_conv_layer(self.conv_cfg, self.in_channels[branch_index], (num_channels[branch_index] * block.expansion), kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, (num_channels[branch_index] * block.expansion))[1])
layers = []
layers.append(block(self.in_channels[branch_index], num_channels[branch_index], stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg))
self.in_channels[branch_index] = (num_channels[branch_index] * block.expansion)
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.in_channels[branch_index], num_channels[branch_index], with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if (self.num_branches == 1):
return None
num_branches = self.num_branches
in_channels = self.in_channels
fuse_layers = []
num_out_branches = (num_branches if self.multiscale_output else 1)
for i in range(num_out_branches):
fuse_layer = []
for j in range(num_branches):
if (j > i):
fuse_layer.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=1, stride=1, padding=0, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1], Upsample(scale_factor=(2 ** (j - i)), mode='bilinear', align_corners=False)))
elif (j == i):
fuse_layer.append(None)
else:
conv_downsamples = []
for k in range((i - j)):
if (k == ((i - j) - 1)):
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1]))
else:
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[j], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[j])[1], nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def forward(self, x):
if (self.num_branches == 1):
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = 0
for j in range(self.num_branches):
if (i == j):
y += x[j]
elif (j > i):
y = (y + resize(self.fuse_layers[i][j](x[j]), size=x[i].shape[2:], mode='bilinear', align_corners=False))
else:
y += self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse |
def test_pass_extra_reporting(pytester: Pytester) -> None:
pytester.makepyfile('def test_this(): assert 1')
result = pytester.runpytest()
result.stdout.no_fnmatch_line('*short test summary*')
result = pytester.runpytest('-rp')
result.stdout.fnmatch_lines(['*test summary*', 'PASS*test_pass_extra_reporting*']) |
class MBConvBlock(nn.Module):
def __init__(self, block_args, global_params, image_size=None):
super().__init__()
self._block_args = block_args
self._bn_mom = (1 - global_params.batch_norm_momentum)
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = ((self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1))
self.id_skip = block_args.id_skip
inp = self._block_args.input_filters
oup = (self._block_args.input_filters * self._block_args.expand_ratio)
if (self._block_args.expand_ratio != 1):
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
k = self._block_args.kernel_size
s = self._block_args.stride
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._depthwise_conv = Conv2d(in_channels=oup, out_channels=oup, groups=oup, kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
image_size = calculate_output_image_size(image_size, s)
if self.has_se:
Conv2d = get_same_padding_conv2d(image_size=(1, 1))
num_squeezed_channels = max(1, int((self._block_args.input_filters * self._block_args.se_ratio)))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
final_oup = self._block_args.output_filters
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
x = inputs
if (self._block_args.expand_ratio != 1):
x = self._expand_conv(inputs)
x = self._bn0(x)
x = self._swish(x)
x = self._depthwise_conv(x)
x = self._bn1(x)
x = self._swish(x)
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_reduce(x_squeezed)
x_squeezed = self._swish(x_squeezed)
x_squeezed = self._se_expand(x_squeezed)
x = (torch.sigmoid(x_squeezed) * x)
x = self._project_conv(x)
x = self._bn2(x)
(input_filters, output_filters) = (self._block_args.input_filters, self._block_args.output_filters)
if (self.id_skip and (self._block_args.stride == 1) and (input_filters == output_filters)):
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = (x + inputs)
return x
def set_swish(self, memory_efficient=True):
self._swish = (MemoryEfficientSwish() if memory_efficient else Swish()) |
class Solution():
def removeElement(self, nums: List[int], val: int) -> int:
i = 0
x = len(nums)
if (x == 1):
if (nums[0] == val):
nums.remove(val)
return len(nums)
while (i < x):
if (nums[i] == val):
nums.remove(nums[i])
x = len(nums)
if (x == 1):
if (nums[0] == val):
nums.remove(val)
return len(nums)
i = 0
else:
i = (i + 1)
return len(nums) |
class TypedDictTests(BaseTestCase):
def assert_typeddict_deprecated(self):
with self.assertWarnsRegex(DeprecationWarning, 'mypy_extensions.TypedDict is deprecated'):
(yield)
def test_basics_iterable_syntax(self):
with self.assert_typeddict_deprecated():
Emp = TypedDict('Emp', {'name': str, 'id': int})
self.assertIsSubclass(Emp, dict)
self.assertIsSubclass(Emp, typing.MutableMapping)
if (sys.version_info[0] >= 3):
import collections.abc
self.assertNotIsSubclass(Emp, collections.abc.Sequence)
jim = Emp(name='Jim', id=1)
self.assertIs(type(jim), dict)
self.assertEqual(jim['name'], 'Jim')
self.assertEqual(jim['id'], 1)
self.assertEqual(Emp.__name__, 'Emp')
self.assertEqual(Emp.__module__, __name__)
self.assertEqual(Emp.__bases__, (dict,))
self.assertEqual(Emp.__annotations__, {'name': str, 'id': int})
self.assertEqual(Emp.__total__, True)
def test_basics_keywords_syntax(self):
with self.assert_typeddict_deprecated():
Emp = TypedDict('Emp', name=str, id=int)
self.assertIsSubclass(Emp, dict)
self.assertIsSubclass(Emp, typing.MutableMapping)
if (sys.version_info[0] >= 3):
import collections.abc
self.assertNotIsSubclass(Emp, collections.abc.Sequence)
jim = Emp(name='Jim', id=1)
self.assertIs(type(jim), dict)
self.assertEqual(jim['name'], 'Jim')
self.assertEqual(jim['id'], 1)
self.assertEqual(Emp.__name__, 'Emp')
self.assertEqual(Emp.__module__, __name__)
self.assertEqual(Emp.__bases__, (dict,))
self.assertEqual(Emp.__annotations__, {'name': str, 'id': int})
self.assertEqual(Emp.__total__, True)
def test_typeddict_errors(self):
with self.assert_typeddict_deprecated():
Emp = TypedDict('Emp', {'name': str, 'id': int})
self.assertEqual(TypedDict.__module__, 'mypy_extensions')
jim = Emp(name='Jim', id=1)
with self.assertRaises(TypeError):
isinstance({}, Emp)
with self.assertRaises(TypeError):
isinstance(jim, Emp)
with self.assertRaises(TypeError):
issubclass(dict, Emp)
with self.assertRaises(TypeError), self.assert_typeddict_deprecated():
TypedDict('Hi', x=())
with self.assertRaises(TypeError), self.assert_typeddict_deprecated():
TypedDict('Hi', [('x', int), ('y', ())])
with self.assertRaises(TypeError):
TypedDict('Hi', [('x', int)], y=int)
(PY36, 'Python 3.6 required')
def test_py36_class_syntax_usage(self):
self.assertEqual(LabelPoint2D.__name__, 'LabelPoint2D')
self.assertEqual(LabelPoint2D.__module__, __name__)
self.assertEqual(LabelPoint2D.__annotations__, {'x': int, 'y': int, 'label': str})
self.assertEqual(LabelPoint2D.__bases__, (dict,))
self.assertEqual(LabelPoint2D.__total__, True)
self.assertNotIsSubclass(LabelPoint2D, typing.Sequence)
not_origin = Point2D(x=0, y=1)
self.assertEqual(not_origin['x'], 0)
self.assertEqual(not_origin['y'], 1)
other = LabelPoint2D(x=0, y=1, label='hi')
self.assertEqual(other['label'], 'hi')
if PY36:
exec(dedent('\n def test_py36_class_usage_emits_deprecations(self):\n with self.assert_typeddict_deprecated():\n class Foo(TypedDict):\n bar: int\n '))
def test_pickle(self):
global EmpD
with self.assert_typeddict_deprecated():
EmpD = TypedDict('EmpD', name=str, id=int)
jane = EmpD({'name': 'jane', 'id': 37})
for proto in range((pickle.HIGHEST_PROTOCOL + 1)):
z = pickle.dumps(jane, proto)
jane2 = pickle.loads(z)
self.assertEqual(jane2, jane)
self.assertEqual(jane2, {'name': 'jane', 'id': 37})
ZZ = pickle.dumps(EmpD, proto)
EmpDnew = pickle.loads(ZZ)
self.assertEqual(EmpDnew({'name': 'jane', 'id': 37}), jane)
def test_optional(self):
with self.assert_typeddict_deprecated():
EmpD = TypedDict('EmpD', name=str, id=int)
self.assertEqual(typing.Optional[EmpD], typing.Union[(None, EmpD)])
self.assertNotEqual(typing.List[EmpD], typing.Tuple[EmpD])
def test_total(self):
with self.assert_typeddict_deprecated():
D = TypedDict('D', {'x': int}, total=False)
self.assertEqual(D(), {})
self.assertEqual(D(x=1), {'x': 1})
self.assertEqual(D.__total__, False)
if PY36:
self.assertEqual(Options(), {})
self.assertEqual(Options(log_level=2), {'log_level': 2})
self.assertEqual(Options.__total__, False) |
def save_command(command):
url = (BH_URL + '/api/v1/command')
try:
r = requests.post(url, data=command.to_JSON(), headers=json_auth_headers())
except ConnectionError as error:
print("Sorry, looks like there's a connection error")
pass
except Exception as error:
if (r.status_code in (403, 401)):
print('Permissons Issue. Run bashhub setup to re-login.') |
class SourceConverter(commands.Converter):
async def convert(ctx: commands.Context, argument: str) -> SourceType:
cog = ctx.bot.get_cog(argument)
if cog:
return cog
cmd = ctx.bot.get_command(argument)
if cmd:
return cmd
raise commands.BadArgument(f'Unable to convert `{argument}` to valid command or Cog.') |
class Window(operator):
def __init__(self, var, tumbling, only, binding_seq, s_when, e_when, vars):
self.var = var
self.tumbling = tumbling
self.only = only
self.binding_seq = binding_seq
self.s_when = s_when
self.e_when = e_when
self.vars = vars
def defined_vars(self):
return {self.var}
def used_vars(self):
from pythonql.Ast import get_all_vars, get_ast
return get_all_vars(get_ast(expr.binding_seq))
def execute(self, table, prior_locs, prior_globs):
from pythonql.Executor import processWindowClause
return processWindowClause(self, table, prior_locs, prior_globs) |
def test_add_opening_quote_delimited_text_is_common_prefix(cmd2_app):
text = '/home/user/file'
line = 'test_delimited {}'.format(text)
endidx = len(line)
begidx = (endidx - len(text))
expected_common_prefix = '"/home/user/file'
expected_display = sorted(['file.txt', 'file space.txt'], key=cmd2_app.default_sort_key)
first_match = complete_tester(text, line, begidx, endidx, cmd2_app)
assert ((first_match is not None) and (os.path.commonprefix(cmd2_app.completion_matches) == expected_common_prefix) and (cmd2_app.display_matches == expected_display)) |
def test_threadpolltext_force_update(minimal_conf_noscreen, manager_nospawn):
config = minimal_conf_noscreen
tpoll = PollingWidget('Not polled')
config.screens = [libqtile.config.Screen(top=libqtile.bar.Bar([tpoll], 10))]
manager_nospawn.start(config)
widget = manager_nospawn.c.widget['pollingwidget']
assert (widget.info()['text'] == 'Poll count: 1')
widget.force_update()
assert (widget.info()['text'] == 'Poll count: 2') |
class TestVmap():
.skipif((not _has_functorch), reason=f'functorch not found: err={FUNCTORCH_ERR}')
.parametrize('moduletype,batch_params', [['linear', False], ['bn1', True], ['linear', True]])
def test_vmap_patch(self, moduletype, batch_params):
if (moduletype == 'linear'):
module = nn.Linear(3, 4)
elif (moduletype == 'bn1'):
module = nn.BatchNorm1d(3)
else:
raise NotImplementedError
if (moduletype == 'linear'):
params = make_functional(module)
fmodule = module
x = torch.randn(10, 1, 3)
if batch_params:
params = params.expand(10, *params.batch_size)
y = vmap(fmodule, (0, 0))(x, params)
else:
y = vmap(fmodule, (0, None))(x, params)
assert (y.shape == torch.Size([10, 1, 4]))
elif (moduletype == 'bn1'):
params = make_functional(module)
fmodule = module
x = torch.randn(10, 2, 3)
if batch_params:
params = params.expand(10, *params.batch_size).contiguous().lock_()
y = vmap(fmodule, (0, 0))(x, params)
else:
raise NotImplementedError
assert (y.shape == torch.Size([10, 2, 3]))
.skipif((not _has_functorch), reason=f'functorch not found: err={FUNCTORCH_ERR}')
.parametrize('moduletype,batch_params', [['linear', False], ['bn1', True], ['linear', True]])
def test_vmap_tdmodule_functorch(self, moduletype, batch_params):
if (moduletype == 'linear'):
module = nn.Linear(3, 4)
elif (moduletype == 'bn1'):
module = nn.BatchNorm1d(3)
else:
raise NotImplementedError
if (moduletype == 'linear'):
tdmodule = TensorDictModule(module, in_keys=['x'], out_keys=['y'])
(tdmodule, params, buffers) = functorch_make_functional_with_buffers(tdmodule)
x = torch.randn(10, 1, 3)
td = TensorDict({'x': x}, [10])
if batch_params:
params = expand_list(params, 10)
buffers = expand_list(buffers, 10)
td = vmap(tdmodule, (0, 0, 0))(params, buffers, td)
else:
td = vmap(tdmodule, (None, None, 0))(params, buffers, td)
y = td['y']
assert (y.shape == torch.Size([10, 1, 4]))
elif (moduletype == 'bn1'):
tdmodule = TensorDictModule(module, in_keys=['x'], out_keys=['y'])
(tdmodule, params, buffers) = functorch_make_functional_with_buffers(tdmodule)
x = torch.randn(10, 2, 3)
td = TensorDict({'x': x}, [10])
if batch_params:
params = expand_list(params, 10)
buffers = expand_list(buffers, 10)
td = vmap(tdmodule, (0, 0, 0))(params, buffers, td)
else:
raise NotImplementedError
y = td['y']
assert (y.shape == torch.Size([10, 2, 3]))
.skipif((not _has_functorch), reason=f'functorch not found: err={FUNCTORCH_ERR}')
.parametrize('moduletype,batch_params', [['linear', False], ['bn1', True], ['linear', True]])
def test_vmap_tdmodule_nativebuilt(self, moduletype, batch_params):
if (moduletype == 'linear'):
module = nn.Linear(3, 4)
elif (moduletype == 'bn1'):
module = nn.BatchNorm1d(3)
else:
raise NotImplementedError
if (moduletype == 'linear'):
tdmodule = TensorDictModule(module, in_keys=['x'], out_keys=['y'])
params = make_functional(tdmodule)
x = torch.randn(10, 1, 3)
td = TensorDict({'x': x}, [10])
if batch_params:
params = params.expand(10, *params.batch_size).lock_()
td = vmap(tdmodule, (0, 0))(td, params)
else:
td = vmap(tdmodule, (0, None))(td, params)
y = td['y']
assert (y.shape == torch.Size([10, 1, 4]))
elif (moduletype == 'bn1'):
tdmodule = TensorDictModule(module, in_keys=['x'], out_keys=['y'])
params = make_functional(tdmodule)
x = torch.randn(10, 2, 3)
td = TensorDict({'x': x}, [10])
if batch_params:
params = params.expand(10, *params.batch_size).contiguous().lock_()
td = vmap(tdmodule, (0, 0))(td, params)
else:
raise NotImplementedError
y = td['y']
assert (y.shape == torch.Size([10, 2, 3]))
.skipif((not _has_functorch), reason=f'functorch not found: err={FUNCTORCH_ERR}')
.parametrize('moduletype,batch_params', [['linear', False], ['bn1', True], ['linear', True]])
def test_vmap_tdsequence_functorch(self, moduletype, batch_params):
if (moduletype == 'linear'):
module1 = nn.Linear(3, 4)
module2 = nn.Linear(4, 5)
elif (moduletype == 'bn1'):
module1 = nn.BatchNorm1d(3)
module2 = nn.BatchNorm1d(3)
else:
raise NotImplementedError
if (moduletype == 'linear'):
tdmodule1 = TensorDictModule(module1, in_keys=['x'], out_keys=['y'])
tdmodule2 = TensorDictModule(module2, in_keys=['y'], out_keys=['z'])
tdmodule = TensorDictSequential(tdmodule1, tdmodule2)
(tdmodule, params, buffers) = functorch_make_functional_with_buffers(tdmodule)
x = torch.randn(10, 1, 3)
td = TensorDict({'x': x}, [10])
if batch_params:
params = expand_list(params, 10)
buffers = expand_list(buffers, 10)
td = vmap(tdmodule, (0, 0, 0))(params, buffers, td)
else:
td = vmap(tdmodule, (None, None, 0))(params, buffers, td)
z = td['z']
assert (z.shape == torch.Size([10, 1, 5]))
elif (moduletype == 'bn1'):
tdmodule1 = TensorDictModule(module1, in_keys=['x'], out_keys=['y'])
tdmodule2 = TensorDictModule(module2, in_keys=['y'], out_keys=['z'])
tdmodule = TensorDictSequential(tdmodule1, tdmodule2)
(tdmodule, params, buffers) = functorch_make_functional_with_buffers(tdmodule)
x = torch.randn(10, 2, 3)
td = TensorDict({'x': x}, [10])
if batch_params:
params = expand_list(params, 10)
buffers = expand_list(buffers, 10)
td = vmap(tdmodule, (0, 0, 0))(params, buffers, td)
else:
raise NotImplementedError
z = td['z']
assert (z.shape == torch.Size([10, 2, 3]))
.skipif((not _has_functorch), reason=f'functorch not found: err={FUNCTORCH_ERR}')
.parametrize('moduletype,batch_params', [['linear', False], ['bn1', True], ['linear', True]])
def test_vmap_tdsequence_nativebuilt(self, moduletype, batch_params):
if (moduletype == 'linear'):
module1 = nn.Linear(3, 4)
module2 = nn.Linear(4, 5)
elif (moduletype == 'bn1'):
module1 = nn.BatchNorm1d(3)
module2 = nn.BatchNorm1d(3)
else:
raise NotImplementedError
if (moduletype == 'linear'):
tdmodule1 = TensorDictModule(module1, in_keys=['x'], out_keys=['y'])
tdmodule2 = TensorDictModule(module2, in_keys=['y'], out_keys=['z'])
tdmodule = TensorDictSequential(tdmodule1, tdmodule2)
params = make_functional(tdmodule)
assert ({'0', '1'} == set(params['module'].keys()))
x = torch.randn(10, 1, 3)
td = TensorDict({'x': x}, [10])
if batch_params:
params = params.expand(10, *params.batch_size)
td = vmap(tdmodule, (0, 0))(td, params)
else:
td = vmap(tdmodule, (0, None))(td, params)
z = td['z']
assert (z.shape == torch.Size([10, 1, 5]))
elif (moduletype == 'bn1'):
tdmodule1 = TensorDictModule(module1, in_keys=['x'], out_keys=['y'])
tdmodule2 = TensorDictModule(module2, in_keys=['y'], out_keys=['z'])
tdmodule = TensorDictSequential(tdmodule1, tdmodule2)
params = make_functional(tdmodule)
assert ({'0', '1'} == set(params['module'].keys()))
x = torch.randn(10, 2, 3)
td = TensorDict({'x': x}, [10])
if batch_params:
params = params.expand(10, *params.batch_size).contiguous().lock_()
td = vmap(tdmodule, (0, 0))(td, params)
else:
raise NotImplementedError
z = td['z']
assert (z.shape == torch.Size([10, 2, 3]))
def test_vmap_names(self):
def fun(a, b):
b['c'] = (a['a'] + b['b'])
return b
a = TensorDict({'a': torch.randn(3, 4)}, [3])
b = TensorDict({'b': torch.randn(3, 5, 4)}, [3, 5])
a.names = ['0']
b.names = ['A', 'B']
c = vmap(fun, (None, 1))(a, b)
assert (c.names == [None, 'A'])
a = TensorDict({'a': torch.randn(5, 4)}, [5])
b = TensorDict({'b': torch.randn(3, 5, 4)}, [3, 5])
a.names = ['0']
b.names = ['A', 'B']
c = vmap(fun, (None, 0))(a, b)
assert (c.names == [None, 'B'])
.parametrize('out_dim', [0, 1])
.parametrize('in_dim', [0, 1])
.parametrize('stack_dim', [0, 1])
.parametrize('lock_x', [True, False])
.parametrize('lock_y', [True, False])
.parametrize('key', ['a', ('a', 'b')])
def test_vmap_write_lazystack(self, in_dim, out_dim, stack_dim, lock_x, lock_y, key):
fun = vmap((lambda x, y: x.set(key, (y.get(key) + x.get(key)))), (in_dim, in_dim), (out_dim,))
td0 = TensorDict({key: [1.0]}, [1])
td1 = TensorDict({key: [2.0]}, [1])
x = torch.stack([td0, td0.clone()], stack_dim)
y = torch.stack([td1, td1.clone()], stack_dim)
if lock_x:
x.lock_()
if lock_y:
y.lock_()
out = fun(x, y)
assert isinstance(out, LazyStackedTensorDict)
if (out_dim == 0):
assert (out.shape[out_dim] == x.shape[in_dim])
else:
assert (out.shape[out_dim] == x.shape[in_dim]) |
class SSData(BaseDbModel):
class Meta():
table = 'ss_data'
id = fields.IntField(pk=True)
author_id = fields.BigIntField()
channel_id = fields.BigIntField()
message_id = fields.BigIntField()
dhash = fields.CharField(max_length=1024, null=True)
phash = fields.CharField(max_length=1024, null=True)
submitted_at = fields.DatetimeField(auto_now=True)
def author(self):
return self.bot.get_user(self.author_id)
def jump_url(self):
return (' + f'{self.channel_id}/{self.message_id}') |
def random_integer_and_continuously_increasing_data(janela, trend, limit):
random_series = [(i + randrange(10)) for i in range(1, 101)]
random_series = pd.DataFrame(random_series)
random_series.index = sorted(pd.to_datetime(np.random.randint(1, 101, size=100), unit='d').tolist())
random_series.columns = ['random']
random_series = random_series[['random']]
random_series['random'].plot()
getTrend3 = detecttrend.detecttrend(random_series, trend=trend, limit=limit, window=janela)
vizplot.plot_trend(random_series, getTrend3, 'random', trend)
return getTrend3 |
def test_loading_extension_which_raises_exceptions_init(extensionregistry, mocker):
class SimpleExtension(object):
LOAD_IF = staticmethod((lambda config: True))
def __init__(self):
raise AssertionError('some error')
with pytest.raises(AssertionError) as exc:
extensionregistry.load(mocker.MagicMock())
assert (str(exc.value) == 'some error') |
class CloudGuruCourseDownload(object):
def __init__(self):
self._id = None
self._title = None
self._course = None
def __repr__(self):
course = '{title}'.format(title=self.title)
return course
def id(self):
return self._id
def title(self):
return self._title
def get_course(self, keep_alive=True):
if (not self._course):
self._process_course(keep_alive=keep_alive)
return self._course |
class TestInputFileWithRequest():
async def test_send_bytes(self, bot, chat_id):
message = (await bot.send_document(chat_id, data_file('text_file.txt').read_bytes()))
out = BytesIO()
(await (await message.document.get_file()).download_to_memory(out=out))
out.seek(0)
assert (out.read().decode('utf-8') == 'PTB Rocks! 78')
async def test_send_string(self, bot, chat_id):
message = (await bot.send_document(chat_id, InputFile(data_file('text_file.txt').read_text(encoding='utf-8'))))
out = BytesIO()
(await (await message.document.get_file()).download_to_memory(out=out))
out.seek(0)
assert (out.read().decode('utf-8') == 'PTB Rocks! 78') |
class SaveScrim(ScrimsButton):
def __init__(self, ctx: Context):
super().__init__(style=discord.ButtonStyle.green, label='Save Scrim', disabled=True)
self.ctx = ctx
async def callback(self, interaction: Interaction):
(await interaction.response.defer())
self.ctx.bot.loop.create_task(self.view.record.setup_logs())
self.view.record.autoclean_time = (self.ctx.bot.current_time.replace(hour=4, minute=0, second=0, microsecond=0) + timedelta(days=1))
(await self.view.record.save())
(await self.ctx.bot.reminders.create_timer(self.view.record.open_time, 'scrim_open', scrim_id=self.view.record.id))
(await self.ctx.bot.reminders.create_timer(self.view.record.autoclean_time, 'autoclean', scrim_id=self.view.record.id))
self.view.stop()
(await self.ctx.success(f'Scrim was successfully created. (Registration: {dt(self.view.record.open_time)})', 6))
from .main import ScrimsMain
view = ScrimsMain(self.ctx)
view.message = (await self.view.message.edit(embed=(await view.initial_embed()), view=view)) |
def smart_repr(x):
if isinstance(x, tuple):
if (len(x) == 0):
return 'tuple()'
elif (len(x) == 1):
return ('(%s,)' % smart_repr(x[0]))
else:
return (('(' + ','.join(map(smart_repr, x))) + ')')
elif hasattr(x, '__call__'):
return ("__import__('pydoc').locate('%s')" % ((x.__module__ + '.') + x.__name__))
else:
return repr(x) |
def _lines_to_gdf(net, points, node_id):
(starts, ends, edge_data) = zip(*net.edges(data=True), strict=True)
gdf_edges = gpd.GeoDataFrame(list(edge_data))
if (points is True):
gdf_edges['node_start'] = [net.nodes[s][node_id] for s in starts]
gdf_edges['node_end'] = [net.nodes[e][node_id] for e in ends]
if ('crs' in net.graph):
gdf_edges.crs = net.graph['crs']
return gdf_edges |
class SMPLMarket(Dataset):
def __init__(self, data_dir, train_flag=True, random_pick=True):
super().__init__()
self.data_dir = data_dir
self.random_pick = random_pick
paths_pkl_path = osp.join(data_dir, 'train_test_img_paths_pid.pkl')
with open(paths_pkl_path, 'rb') as f:
all_paths = pickle.load(f)
if train_flag:
self.img_paths_dict = all_paths['out_dict_train']
else:
self.img_paths_dict = all_paths['out_dict_test']
self.pids = list(self.img_paths_dict.keys())
self.smpl_dir = osp.join(data_dir, 'SMPL_RSC', 'pkl')
self.smpl_part_seg_dir = osp.join(data_dir, 'SMPL_RSC', 'parts')
self.smpl_part_seg_mapping = {3: 1, 6: 2, 1: 3, 2: 3, 7: 4, 8: 4, 4: 5, 5: 5, 9: 6, 10: 6, 11: 7, 12: 7}
self.part_seg_dir = osp.join(data_dir, 'part_seg_EANet')
def __len__(self):
return len(self.pids)
def preprocess_img(self, img):
img = (((img / 255.0) * 2) - 1)
img = torch.from_numpy(img).float().permute(2, 0, 1)
return img
def preprocess_seg(self, seg):
seg_float = (((seg / 7.0) * 2) - 1)
seg_float = torch.from_numpy(seg_float).float().unsqueeze(0)
return seg_float
def preprocess_smpl_seg(self, smpl_seg):
smpl_seg_long = np.zeros(smpl_seg.shape, dtype=int)
for k in self.smpl_part_seg_mapping.keys():
smpl_seg_long[(smpl_seg == k)] = self.smpl_part_seg_mapping[k]
smpl_seg_long = torch.from_numpy(smpl_seg_long).long().unsqueeze(0)
return smpl_seg_long
def get_coord(self, shape):
y = np.linspace((- 1.0), 1.0, num=shape[0])
x = np.linspace((- 1.0), 1.0, num=shape[1])
(coord_y, coord_x) = np.meshgrid(y, x, indexing='ij')
coord = np.concatenate((coord_y[None], coord_x[None]), axis=0)
return torch.from_numpy(coord).float()
def get_data(self, img_path, suffix=''):
img_name = img_path.split('/')[(- 1)]
img = imageio.imread(osp.join(self.data_dir, img_path))
img = self.preprocess_img(img)
coord = self.get_coord(img.shape[(- 2):])
pkl_path = osp.join(self.smpl_dir, (img_name[:(- 4)] + '.pkl'))
with open(pkl_path, 'rb') as f:
smpl_list = pickle.load(f)
verts = torch.from_numpy(smpl_list[0])
cam_t = torch.from_numpy(smpl_list[1])
smpl_seg_path = osp.join(self.smpl_part_seg_dir, (img_name[:(- 4)] + '.png'))
smpl_seg = imageio.imread(smpl_seg_path)
smpl_seg = self.preprocess_smpl_seg(smpl_seg)
seg_path = osp.join(self.part_seg_dir, (img_path.split('.')[0] + '.png'))
try:
seg = imageio.imread(seg_path)
except:
seg = imageio.imread((seg_path + '.png'))
seg_long = torch.from_numpy(seg).long().unsqueeze(0)
seg_float = self.preprocess_seg(seg)
sample = {('img' + suffix): img, ('verts' + suffix): verts, ('cam_t' + suffix): cam_t, ('seg' + suffix): seg_float, ('seg_long' + suffix): seg_long, ('smpl_seg' + suffix): smpl_seg, ('coord' + suffix): coord, ('img_name' + suffix): img_name}
return sample
def __getitem__(self, idx):
pid = self.pids[idx]
pid_all_paths = self.img_paths_dict[pid]
if self.random_pick:
(img_path1, img_path2) = np.random.choice(a=pid_all_paths, size=2, replace=False)
else:
(img_path1, img_path2) = pid_all_paths[:2]
sample = self.get_data(img_path1, '')
sample2 = self.get_data(img_path2, '2')
sample.update(sample2)
return sample |
class GlyphTextureAtlas(image.atlas.TextureAtlas):
texture_class = GlyphTexture
def __init__(self, width=2048, height=2048, fmt=GL_RGBA, min_filter=GL_LINEAR, mag_filter=GL_LINEAR):
self.texture = self.texture_class.create(width, height, GL_TEXTURE_2D, fmt, min_filter, mag_filter, fmt=fmt)
self.allocator = image.atlas.Allocator(width, height) |
class Effect2252(BaseEffect):
type = 'passive'
def handler(fit, container, context, projectionRange, **kwargs):
fit.modules.filteredItemForce((lambda mod: mod.item.requiresSkill('Cloaking')), 'moduleReactivationDelay', container.getModifiedItemAttr('covertOpsAndReconOpsCloakModuleDelay'), **kwargs) |
def calculate_image_aggregate_size(ancestors_str, image_size, parent_image):
ancestors = ancestors_str.split('/')[1:(- 1)]
if (not ancestors):
return image_size
if (parent_image is None):
raise DataModelException('Could not load parent image')
ancestor_size = parent_image.aggregate_size
if (ancestor_size is not None):
return (ancestor_size + image_size)
ancestor_size = ImageStorage.select(fn.Sum(ImageStorage.image_size)).join(Image).where((Image.id << ancestors)).scalar()
if (ancestor_size is None):
return None
return (ancestor_size + image_size) |
class DepthwiseConv2D(layers.DepthwiseConv2D):
__doc__ += layers.DepthwiseConv2D.__doc__
def call(self, inputs, params=None):
if (params[(self.name + '/depthwise_kernel:0')] is None):
return super(layers.DepthwiseConv2D, self).call(inputs)
else:
depthwise_kernel = params.get((self.name + '/depthwise_kernel:0'))
bias = params.get((self.name + '/bias:0'))
outputs = backend.depthwise_conv2d(inputs, depthwise_kernel, strides=self.strides, padding=self.padding, dilation_rate=self.dilation_rate, data_format=self.data_format)
if self.use_bias:
outputs = backend.bias_add(outputs, bias, data_format=self.data_format)
if (self.activation is not None):
return self.activation(outputs)
return outputs |
class MultiChoiceBatchTransform(Transform):
def transform(x: List[Dict], y: List[Dict]=None, **kwargs: Any) -> str:
if ((not isinstance(x[0], Dict)) or (y and (not isinstance(y[0], Dict)))):
raise TypeError('x and y should be dict in multi-choice task.')
transformed = ''
for (idx, x_) in enumerate(x, 1):
transformed += f'''Q[{idx}]: {x_['question']}
'''
transformed += 'Answer choices[{}]: {}\n'.format(idx, ' '.join(['({}) {}'.format(label.lower(), text.lower()) for (label, text) in zip(x_['choices']['label'], x_['choices']['text'])]))
if (not kwargs.get('drop_answer_prefix', False)):
transformed += 'A[1]: '
return transformed |
class ConfigDict(Dict):
def __missing__(self, name):
raise KeyError(name)
def __getattr__(self, name):
try:
value = super(ConfigDict, self).__getattr__(name)
except KeyError:
ex = AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'")
except Exception as e:
ex = e
else:
return value
raise ex |
class TextEditBox(Gtk.HBox):
def __init__(self, default=''):
super().__init__(spacing=6)
sw = Gtk.ScrolledWindow()
sw.set_shadow_type(Gtk.ShadowType.IN)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
sw.add(TextView(buffer=TextBuffer()))
self.pack_start(sw, True, True, 0)
self.buffer = sw.get_child().get_buffer()
box = Gtk.VBox(spacing=6)
rev = Button(_('_Revert'), Icons.DOCUMENT_REVERT)
app = Button(_('_Apply'))
box.pack_start(rev, False, True, 0)
box.pack_start(app, False, True, 0)
self.pack_start(box, False, True, 0)
connect_obj(rev, 'clicked', self.buffer.set_text, default)
self.revert = rev
self.apply = app
def text(self):
(start, end) = self.buffer.get_bounds()
return self.buffer.get_text(start, end, True)
def text(self, value):
self.buffer.set_text(value, (- 1)) |
class UnitTest(unittest.TestCase):
def test_initialization_and_get_next_batch(self) -> None:
unit = TestUnit()
self.assertIsNotNone(unit.train_progress)
self.assertIsNotNone(unit.eval_progress)
self.assertIsNotNone(unit.predict_progress)
tensor_1 = torch.ones(1)
tensor_2 = torch.zeros(1)
state = get_dummy_train_state()
train_data_iter = iter([tensor_1, tensor_2])
self.assertEqual(unit.get_next_train_batch(state, train_data_iter), tensor_1)
self.assertEqual(unit.get_next_train_batch(state, train_data_iter), tensor_2)
self.assertEqual(unit.get_next_predict_batch(state, iter([tensor_1, tensor_2])), tensor_1)
data_iter = iter([tensor_1, tensor_2])
next_eval_batch = unit.get_next_eval_batch(state, data_iter)
self.assertEqual(next_eval_batch, data_iter) |
class Rotation(CGAThing):
def __init__(self, cga, *args) -> None:
super().__init__(cga)
if (len(args) == 0):
U = self.layout.randomMV()(2)
U = self.cga.I_base.project(U)
self.mv = (e ** U)
elif (len(args) == 1):
arg = args[0]
if isinstance(arg, MultiVector):
if (arg.grades() == {0, 2}):
self.mv = arg
elif (arg.grades() == {2}):
if ((arg ^ self.cga.I_base) != 0):
arg = self.cga.I_base.project(arg)
self.mv = (e ** arg)
else:
raise ValueError('bad input')
else:
raise ValueError('bad input')
else:
raise ValueError('bad input')
def __repr__(self) -> str:
return 'Rotation' |
class Transformer(Classifier):
def __init__(self, dataset, config):
super(Transformer, self).__init__(dataset, config)
self.pad = dataset.token_map[dataset.VOCAB_PADDING]
if (config.feature.feature_names[0] == 'token'):
seq_max_len = config.feature.max_token_len
else:
seq_max_len = config.feature.max_char_len
self.position_enc = PositionEmbedding(seq_max_len, config.embedding.dimension, self.pad)
if config.Transformer.use_star:
self.layer_stack = nn.ModuleList([StarEncoderLayer(config.embedding.dimension, config.Transformer.n_head, config.Transformer.d_k, config.Transformer.d_v, dropout=config.Transformer.dropout) for _ in range(config.Transformer.n_layers)])
else:
self.layer_stack = nn.ModuleList([EncoderLayer(config.embedding.dimension, config.Transformer.d_inner, config.Transformer.n_head, config.Transformer.d_k, config.Transformer.d_v, dropout=config.Transformer.dropout) for _ in range(config.Transformer.n_layers)])
hidden_size = config.embedding.dimension
self.linear = torch.nn.Linear(hidden_size, len(dataset.label_map))
self.dropout = torch.nn.Dropout(p=config.train.hidden_layer_dropout)
def get_parameter_optimizer_dict(self):
params = list()
params.append({'params': self.token_embedding.parameters()})
params.append({'params': self.char_embedding.parameters()})
for i in range(0, len(self.layer_stack)):
params.append({'params': self.layer_stack[i].parameters()})
params.append({'params': self.linear.parameters()})
return params
def update_lr(self, optimizer, epoch):
if (epoch > self.config.train.num_epochs_static_embedding):
for param_group in optimizer.param_groups[:2]:
param_group['lr'] = self.config.optimizer.learning_rate
else:
for param_group in optimizer.param_groups[:2]:
param_group['lr'] = 0
def forward(self, batch):
def _get_non_pad_mask(seq, pad):
assert (seq.dim() == 2)
return seq.ne(pad).type(torch.float).unsqueeze((- 1))
def _get_attn_key_pad_mask(seq_k, seq_q, pad):
len_q = seq_q.size(1)
padding_mask = seq_k.eq(pad)
padding_mask = padding_mask.unsqueeze(1).expand((- 1), len_q, (- 1))
return padding_mask
if (self.config.feature.feature_names[0] == 'token'):
src_seq = batch[cDataset.DOC_TOKEN].to(self.config.device)
embedding = self.token_embedding(src_seq)
else:
src_seq = batch[cDataset.DOC_CHAR].to(self.config.device)
embedding = self.char_embedding(src_seq)
slf_attn_mask = _get_attn_key_pad_mask(seq_k=src_seq, seq_q=src_seq, pad=self.pad)
non_pad_mask = _get_non_pad_mask(src_seq, self.pad)
batch_lens = (src_seq != self.pad).sum(dim=(- 1))
src_pos = torch.zeros_like(src_seq, dtype=torch.long)
for (row, length) in enumerate(batch_lens):
src_pos[row][:length] = torch.arange(1, (length + 1))
enc_output = (embedding + self.position_enc(src_pos))
if self.config.Transformer.use_star:
s = torch.mean(embedding, 1)
h = enc_output
for enc_layer in self.layer_stack:
(h, s) = enc_layer(h, embedding, s, non_pad_mask=non_pad_mask, slf_attn_mask=None)
(h_max, _) = torch.max(h, 1)
enc_output = (h_max + s)
else:
for enc_layer in self.layer_stack:
(enc_output, _) = enc_layer(enc_output, non_pad_mask=non_pad_mask, slf_attn_mask=slf_attn_mask)
enc_output = torch.mean(enc_output, 1)
return self.dropout(self.linear(enc_output)) |
def handle_transferreroute(payment_state: InitiatorPaymentState, state_change: ActionTransferReroute, channelidentifiers_to_channels: Dict[(ChannelID, NettingChannelState)], addresses_to_channel: Dict[(Tuple[(TokenNetworkAddress, Address)], NettingChannelState)], pseudo_random_generator: random.Random, block_number: BlockNumber) -> TransitionResult[InitiatorPaymentState]:
try:
initiator_state = payment_state.initiator_transfers[state_change.transfer.lock.secrethash]
channel_identifier = initiator_state.channel_identifier
channel_state = channelidentifiers_to_channels[channel_identifier]
except KeyError:
return TransitionResult(payment_state, [])
refund_transfer = state_change.transfer
original_transfer = initiator_state.transfer
is_valid_lock = ((refund_transfer.lock.secrethash == original_transfer.lock.secrethash) and (refund_transfer.lock.amount == original_transfer.lock.amount) and (refund_transfer.lock.expiration == original_transfer.lock.expiration))
is_valid_refund = channel.refund_transfer_matches_transfer(refund_transfer, original_transfer)
recipient_address = channel_state.partner_state.address
recipient_metadata = get_address_metadata(recipient_address, payment_state.routes)
(is_valid, channel_events, _) = channel.handle_receive_lockedtransfer(channel_state, refund_transfer, recipient_metadata=recipient_metadata)
if ((not is_valid_lock) or (not is_valid_refund) or (not is_valid)):
return TransitionResult(payment_state, [])
events: List[Event] = []
events.extend(channel_events)
old_description = initiator_state.transfer_description
filtered_route_states = routes.filter_acceptable_routes(route_states=payment_state.routes, blacklisted_channel_ids=payment_state.cancelled_channels, addresses_to_channel=addresses_to_channel, token_network_address=old_description.token_network_address, our_address=channel_state.our_state.address)
transfer_description = TransferDescriptionWithSecretState(token_network_registry_address=old_description.token_network_registry_address, payment_identifier=old_description.payment_identifier, amount=old_description.amount, token_network_address=old_description.token_network_address, initiator=old_description.initiator, target=old_description.target, secret=state_change.secret, secrethash=state_change.secrethash)
sub_iteration = initiator.try_new_route(addresses_to_channel=addresses_to_channel, candidate_route_states=filtered_route_states, transfer_description=transfer_description, pseudo_random_generator=pseudo_random_generator, block_number=block_number)
events.extend(sub_iteration.events)
if (sub_iteration.new_state is None):
return TransitionResult(payment_state, events)
new_transfer = sub_iteration.new_state.transfer
payment_state.initiator_transfers[new_transfer.lock.secrethash] = sub_iteration.new_state
return TransitionResult(payment_state, events) |
class PageTests(TestCase):
_settings(EVENTS_PAGES_PATH=PAGES_PATH)
def test_valid_event_page_reponse_200(self):
pages = (reverse('events:page', args=('my-event',)), reverse('events:page', args=('my-event/subpage',)))
for page in pages:
with self.subTest(page=page):
resp = self.client.get(page)
self.assertEqual(resp.status_code, 200)
_settings(EVENTS_PAGES_PATH=PAGES_PATH)
def test_invalid_event_page_404(self):
pages = (reverse('events:page', args=('invalid',)), reverse('events:page', args=('invalid/invalid',)))
for page in pages:
with self.subTest(page=page):
resp = self.client.get(page)
self.assertEqual(resp.status_code, 404) |
class VersionConflict(ResolutionError):
_template = '{self.dist} is installed but {self.req} is required'
def dist(self):
return self.args[0]
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
if (not required_by):
return self
args = (self.args + (required_by,))
return ContextualVersionConflict(*args) |
class _FmtResult(GaPrintable):
def __new__(cls, obj, label: str) -> GaPrintable:
if (label is None):
return obj
self = super().__new__(cls)
self._obj = obj
self._label = label
return self
def _latex(self, printer):
return ((self._label + ' = ') + printer._print(self._obj))
def _sympystr(self, printer):
return ((self._label + ' = ') + printer._print(self._obj)) |
class TestRealWorldLocate():
def setup_method(self) -> None:
self.dirpath = os.path.join(os.path.dirname(__file__), './data/')
network_distance = pandas.read_csv((self.dirpath + 'SF_network_distance_candidateStore_16_censusTract_205_new.csv'))
ntw_dist_piv = network_distance.pivot_table(values='distance', index='DestinationName', columns='name')
self.cost_matrix = ntw_dist_piv.to_numpy()
demand_points = pandas.read_csv((self.dirpath + 'SF_demand_205_centroid_uniform_weight.csv'))
facility_points = pandas.read_csv((self.dirpath + 'SF_store_site_16_longlat.csv'))
self.facility_points_gdf = geopandas.GeoDataFrame(facility_points, geometry=geopandas.points_from_xy(facility_points.long, facility_points.lat)).sort_values(by=['NAME']).reset_index()
self.demand_points_gdf = geopandas.GeoDataFrame(demand_points, geometry=geopandas.points_from_xy(demand_points.long, demand_points.lat)).sort_values(by=['NAME']).reset_index()
self.service_dist = 5000.0
self.p_facility = 4
self.ai = self.demand_points_gdf['POP2000'].to_numpy()
def test_optimality_mclp_from_cost_matrix(self):
mclp = MCLP.from_cost_matrix(self.cost_matrix, self.ai, self.service_dist, self.p_facility)
mclp = mclp.solve(pulp.PULP_CBC_CMD(msg=False))
assert (mclp.problem.status == pulp.LpStatusOptimal)
def test_infeasibility_mclp_from_cost_matrix(self):
mclp = MCLP.from_cost_matrix(self.cost_matrix, self.ai, self.service_dist, 1000)
with pytest.raises(RuntimeError, match='Model is not solved: Infeasible.'):
mclp.solve(pulp.PULP_CBC_CMD(msg=False))
def test_mixin_mclp_get_uncovered_clients(self):
uncovered_clients_expected = 21
mclp = MCLP.from_cost_matrix(self.cost_matrix, self.ai, self.service_dist, self.p_facility)
mclp = mclp.solve(pulp.PULP_CBC_CMD(msg=False))
assert (mclp.n_cli_uncov == uncovered_clients_expected)
def test_mixin_mclp_get_percentage(self):
percentage_expected = 89.
mclp = MCLP.from_cost_matrix(self.cost_matrix, self.ai, self.service_dist, self.p_facility)
mclp = mclp.solve(pulp.PULP_CBC_CMD(msg=False))
assert (mclp.perc_cov == pytest.approx(percentage_expected))
def test_optimality_mclp_from_geodataframe(self):
mclp = MCLP.from_geodataframe(self.demand_points_gdf, self.facility_points_gdf, 'geometry', 'geometry', 'POP2000', self.service_dist, self.p_facility)
mclp = mclp.solve(pulp.PULP_CBC_CMD(msg=False))
assert (mclp.problem.status == pulp.LpStatusOptimal)
def test_infeasibility_mclp_from_geodataframe(self):
mclp = MCLP.from_geodataframe(self.demand_points_gdf, self.facility_points_gdf, 'geometry', 'geometry', 'POP2000', self.service_dist, 1000)
with pytest.raises(RuntimeError, match='Model is not solved: Infeasible.'):
mclp.solve(pulp.PULP_CBC_CMD(msg=False))
def test_attribute_error_fac2cli_MCLP_facility_client_array(self):
mclp = MCLP.from_geodataframe(self.demand_points_gdf, self.facility_points_gdf, 'geometry', 'geometry', 'POP2000', self.service_dist, self.p_facility)
mclp = mclp.solve(pulp.PULP_CBC_CMD(msg=False), results=False)
with pytest.raises(AttributeError, match="'MCLP' object has no attribute 'fac2cli'"):
mclp.fac2cli
def test_attribute_error_cli2fac_MCLP_facility_client_array(self):
mclp = MCLP.from_geodataframe(self.demand_points_gdf, self.facility_points_gdf, 'geometry', 'geometry', 'POP2000', self.service_dist, self.p_facility)
mclp = mclp.solve(pulp.PULP_CBC_CMD(msg=False), results=False)
with pytest.raises(AttributeError, match="'MCLP' object has no attribute 'cli2fac'"):
mclp.cli2fac
def test_attribute_error_ncliuncov_MCLP_facility_client_array(self):
mclp = MCLP.from_geodataframe(self.demand_points_gdf, self.facility_points_gdf, 'geometry', 'geometry', 'POP2000', self.service_dist, self.p_facility)
mclp = mclp.solve(pulp.PULP_CBC_CMD(msg=False), results=False)
with pytest.raises(AttributeError, match="'MCLP' object has no attribute 'n_cli_uncov'"):
mclp.n_cli_uncov
def test_attribute_error_percentage_MCLP_facility_client_array(self):
mclp = MCLP.from_geodataframe(self.demand_points_gdf, self.facility_points_gdf, 'geometry', 'geometry', 'POP2000', self.service_dist, self.p_facility)
mclp = mclp.solve(pulp.PULP_CBC_CMD(msg=False), results=False)
with pytest.raises(AttributeError, match='The attribute `n_cli_uncov` is not set.'):
mclp.get_percentage() |
def Pulling(Loss_type, embedding, Jm):
if (Loss_type == 'NpairLoss'):
embedding_split = tf.split(embedding, 2, axis=0)
anc = embedding_split[0]
pos = embedding_split[1]
neg = pos
anc_tile = tf.reshape(tf.tile(anc, [1, int((FLAGS.batch_size / 2))]), [(- 1), int(FLAGS.embedding_size)])
pos_tile = tf.reshape(tf.tile(pos, [1, int((FLAGS.batch_size / 2))]), [(- 1), int(FLAGS.embedding_size)])
neg_tile = tf.tile(neg, [(FLAGS.batch_size / 2), 1])
neg2_tile = (anc_tile + tf.multiply((neg_tile - anc_tile), tf.tile(((distance(anc_tile, pos_tile) + ((distance(anc_tile, neg_tile) - distance(anc_tile, pos_tile)) * tf.exp(((- FLAGS.alpha) / Jm)))) / distance(anc_tile, neg_tile)), [1, int(FLAGS.embedding_size)])))
neg_mask = tf.greater_equal(distance(anc_tile, pos_tile), distance(anc_tile, neg_tile))
op_neg_mask = tf.logical_not(neg_mask)
neg_mask = tf.cast(neg_mask, tf.float32)
op_neg_mask = tf.cast(op_neg_mask, tf.float32)
neg_tile = (tf.multiply(neg_tile, neg_mask) + tf.multiply(neg2_tile, op_neg_mask))
embedding_z_quta = tf.concat([anc, neg_tile], axis=0)
return embedding_z_quta
elif (Loss_type == 'Triplet'):
embedding_split = tf.split(embedding, 3, axis=0)
anc = embedding_split[0]
pos = embedding_split[1]
neg = embedding_split[2]
neg2 = (anc + tf.multiply((neg - anc), tf.tile(((distance(anc, pos) + ((distance(anc, neg) - distance(anc, pos)) * tf.exp(((- FLAGS.alpha) / Jm)))) / distance(anc, neg)), [1, FLAGS.embedding_size])))
neg_mask = tf.greater_equal(distance(anc, pos), distance(anc, neg))
op_neg_mask = tf.logical_not(neg_mask)
neg_mask = tf.cast(neg_mask, tf.float32)
op_neg_mask = tf.cast(op_neg_mask, tf.float32)
neg = (tf.multiply(neg, neg_mask) + tf.multiply(neg2, op_neg_mask))
embedding_z_quta = tf.concat([anc, pos, neg], axis=0)
return embedding_z_quta
else:
print('Your loss type is not suit for HDML')
os._exit() |
class Vacation():
name: str
accommodations: List[Accommodation]
events: List[str]
def __init__(self):
self.accommodations = []
self.events = []
def setName(self, name: str) -> None:
self.name = name
def setAccommodations(self, accommodations: List[Accommodation]) -> None:
self.accommodations = accommodations
def setEvents(self, events: List[str]) -> None:
self.events = events
def __str__(self) -> str:
display: StringBuffer = StringBuffer()
display.append(f'''---- {self.name} ----
''')
for a in self.accommodations:
display.append(a)
for e in self.events:
display.append(f'''{e}
''')
return display.toString()
def __repr__(self) -> str:
return str(self) |
class TemplateConfig(LazilyParsedConfig):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._field_name = FIELD_TO_PARSE
self._field_email = FIELD_TO_PARSE
self._field_licenses = FIELD_TO_PARSE
self._field_plugins = FIELD_TO_PARSE
def name(self):
if (self._field_name is FIELD_TO_PARSE):
if ('name' in self.raw_data):
name = self.raw_data['name']
if (not isinstance(name, str)):
self.raise_error('must be a string')
self._field_name = name
else:
name = os.environ.get('GIT_AUTHOR_NAME')
if (name is None):
import subprocess
try:
name = subprocess.check_output(['git', 'config', '--get', 'user.name'], text=True).strip()
except Exception:
name = 'U.N. Owen'
self._field_name = self.raw_data['name'] = name
return self._field_name
def name(self, value):
self.raw_data['name'] = value
self._field_name = FIELD_TO_PARSE
def email(self):
if (self._field_email is FIELD_TO_PARSE):
if ('email' in self.raw_data):
email = self.raw_data['email']
if (not isinstance(email, str)):
self.raise_error('must be a string')
self._field_email = email
else:
email = os.environ.get('GIT_AUTHOR_EMAIL')
if (email is None):
import subprocess
try:
email = subprocess.check_output(['git', 'config', '--get', 'user.email'], text=True).strip()
except Exception:
email = ''
self._field_email = self.raw_data['email'] = email
return self._field_email
def email(self, value):
self.raw_data['email'] = value
self._field_email = FIELD_TO_PARSE
def licenses(self):
if (self._field_licenses is FIELD_TO_PARSE):
if ('licenses' in self.raw_data):
licenses = self.raw_data['licenses']
if (not isinstance(licenses, dict)):
self.raise_error('must be a table')
self._field_licenses = LicensesConfig(licenses, (*self.steps, 'licenses'))
else:
licenses = {}
self.raw_data['licenses'] = licenses
self._field_licenses = LicensesConfig(licenses, (*self.steps, 'licenses'))
return self._field_licenses
def licenses(self, value):
self.raw_data['licenses'] = value
self._field_licenses = FIELD_TO_PARSE
def plugins(self):
if (self._field_plugins is FIELD_TO_PARSE):
if ('plugins' in self.raw_data):
plugins = self.raw_data['plugins']
if (not isinstance(plugins, dict)):
self.raise_error('must be a table')
for (name, data) in plugins.items():
if (not isinstance(data, dict)):
self.raise_error('must be a table', extra_steps=(name,))
self._field_plugins = plugins
else:
self._field_plugins = self.raw_data['plugins'] = {'default': {'tests': True, 'ci': False, 'src-layout': True}}
return self._field_plugins
def plugins(self, value):
self.raw_data['plugins'] = value
self._field_plugins = FIELD_TO_PARSE |
def convert_bip32_intpath_to_strpath(path: Sequence[int], *, hardened_char=BIP32_HARDENED_CHAR) -> str:
assert isinstance(hardened_char, str), hardened_char
assert (len(hardened_char) == 1), hardened_char
s = 'm/'
for child_index in path:
if (not isinstance(child_index, int)):
raise TypeError(f'bip32 path child index must be int: {child_index}')
if (not (0 <= child_index <= UINT32_MAX)):
raise ValueError(f'bip32 path child index out of range: {child_index}')
prime = ''
if (child_index & BIP32_PRIME):
prime = hardened_char
child_index = (child_index ^ BIP32_PRIME)
s += ((str(child_index) + prime) + '/')
s = s[:(- 1)]
return s |
.script
def finalize_hypos_loop_scores(finalized_scores_list: List[Tensor], finalized_idxs, pad_idx: int, finalized_tokens, finalized_scores):
for i in range(finalized_idxs.size(0)):
cutoff = finalized_scores[i].ne(pad_idx)
scores = finalized_scores[i][cutoff]
finalized_scores_list[finalized_idxs[i]] = scores
return finalized_scores_list |
def fr_ssn(value: str):
if (not value):
return False
matched = re.match(_ssn_pattern(), value)
if (not matched):
return False
groups = list(matched.groups())
control_key = groups[(- 1)]
department = groups[3]
if ((department != '99') and (not fr_department(department))):
return False
if (control_key is None):
return True
if (len(department) == len(groups[4])):
return False
if (department in ('2A', '2B')):
groups[3] = ('19' if (department == '2A') else '18')
digits = int(''.join(groups[:(- 1)]))
return (int(control_key) == (97 - (digits % 97))) |
class Slice3D_test_norm(torch.utils.data.Dataset):
suitableJobs = ['seg', 'cla']
def __init__(self, image96, classes, job, spacing=None, crop=None, ratio=None, rotate=None, include_slices=None):
assert (job in self.suitableJobs), 'not suitable jobs'
self.job = job
if (job == 'seg'):
assert (classes[0] == 0)
if (job == 'cla'):
assert (len(classes) > 1)
self.classes = classes
self.image96 = image96
if (include_slices is None):
self.slice_indice = 3
else:
assert (len(include_slices) > 0)
self.slice_indice = 3
self.imgdata96 = []
self.imgdata48 = []
self.imgdata24 = []
print(self.image96[0])
imgdata1 = sitk.ReadImage(self.image96[0])
imgdata1 = sitk.GetArrayFromImage(imgdata1)
imgdata1 = np.clip(imgdata1, (- 400), 600)
imgdata1 = ((imgdata1 - imgdata1.mean()) / (imgdata1.max() - imgdata1.min()))
self.imgdata96.append(imgdata1)
imgdata1 = sitk.ReadImage(self.image96[(- 1)])
imgdata1 = sitk.GetArrayFromImage(imgdata1)
imgdata1 = np.clip(imgdata1, (- 400), 600)
imgdata1 = ((imgdata1 - imgdata1.mean()) / (imgdata1.max() - imgdata1.min()))
self.imgdata96.append(imgdata1)
self.loss_weights = [2.5, 1.5, 1.2, 1.0, 2.2, 1.5, 1.2, 3.5, 3, 5.0]
temp_data = range(5)
def __len__(self):
return self.slice_indice
def __getitem__(self, index):
ED_num = 0
ES_num = 1
ED_image96 = self.imgdata96[ED_num]
ES_image96 = self.imgdata96[ES_num]
ED_image96 = ED_image96.reshape(((1,) + ED_image96.shape))
ED_image96 = torch.from_numpy(ED_image96.astype(np.float32))
ES_image96 = ES_image96.reshape(((1,) + ES_image96.shape))
ES_image96 = torch.from_numpy(ES_image96.astype(np.float32))
return (ED_image96, ES_image96) |
class SolveDiscreteARE(pt.Op):
__props__ = ('enforce_Q_symmetric',)
def __init__(self, enforce_Q_symmetric=False):
self.enforce_Q_symmetric = enforce_Q_symmetric
def make_node(self, A, B, Q, R):
A = as_tensor_variable(A)
B = as_tensor_variable(B)
Q = as_tensor_variable(Q)
R = as_tensor_variable(R)
out_dtype = pytensor.scalar.upcast(A.dtype, B.dtype, Q.dtype, R.dtype)
X = pytensor.tensor.matrix(dtype=out_dtype)
return pytensor.graph.basic.Apply(self, [A, B, Q, R], [X])
def perform(self, node, inputs, output_storage):
(A, B, Q, R) = inputs
X = output_storage[0]
if self.enforce_Q_symmetric:
Q = (0.5 * (Q + Q.T))
X[0] = scipy.linalg.solve_discrete_are(A, B, Q, R).astype(node.outputs[0].type.dtype)
def infer_shape(self, fgraph, node, shapes):
return [shapes[0]]
def grad(self, inputs, output_grads):
(A, B, Q, R) = inputs
(dX,) = output_grads
X = self(A, B, Q, R)
K_inner = (R + pt.linalg.matrix_dot(B.T, X, B))
K_inner_inv = pt.linalg.solve(K_inner, pt.eye(R.shape[0]))
K = matrix_dot(K_inner_inv, B.T, X, A)
A_tilde = (A - B.dot(K))
dX_symm = (0.5 * (dX + dX.T))
S = solve_discrete_lyapunov(A_tilde, dX_symm).astype(dX.type.dtype)
A_bar = (2 * matrix_dot(X, A_tilde, S))
B_bar = ((- 2) * matrix_dot(X, A_tilde, S, K.T))
Q_bar = S
R_bar = matrix_dot(K, S, K.T)
return [A_bar, B_bar, Q_bar, R_bar] |
class MarkingDecorator():
def __init__(self, function):
self.function = function
self.fixture_class = None
def bind_class(self, fixture_class):
self.fixture_class = fixture_class
def __get__(self, instance, owner):
self.bind_class(owner)
if (instance is None):
return self
else:
return types.MethodType(self.function, instance)
def name(self):
return self.function.__name__
def method_for(self, fixture):
return getattr(fixture, self.name) |
class DeterminismTest(TestCase):
(IS_WINDOWS, 'Remove when is fixed')
('num_workers', [1, 8])
def test_mprs_determinism(self, num_workers):
data_length = 64
exp = list(range(data_length))
data_source = IterableWrapper(exp)
dp = data_source.shuffle().sharding_filter().map(_random_fn)
rs = MultiProcessingReadingService(num_workers=num_workers)
dl = DataLoader2(dp, reading_service=rs)
res = []
for (d, *_) in dl:
res.append(d)
self.assertEqual(sorted(res), exp)
results = []
for _ in range(2):
res = []
ran_res = []
torch.manual_seed(123)
random.seed(123)
np.random.seed(123)
for (d, *ran_nums) in dl:
res.append(d)
ran_res.append(ran_nums)
self.assertEqual(sorted(res), exp)
results.append((res, ran_res))
self.assertEqual(results[0], results[1])
res = []
ran_res = []
torch.manual_seed(321)
random.seed(321)
np.random.seed(321)
for (d, *ran_nums) in dl:
res.append(d)
ran_res.append(ran_nums)
self.assertEqual(sorted(res), exp)
self.assertNotEqual(results[0][0], res)
self.assertNotEqual(results[0][1], ran_res)
def test_graph_random_settings(self):
def _get_dp_seeds_after_setting(worker_id, seed=123):
data_source = IterableWrapper(list(range(100)))
dp0 = data_source.shuffle()
(dp1, dp2, dp3) = dp0.fork(3)
dp1 = dp1.sharding_filter()
dp2 = dp2.shuffle()
dp3 = dp3.shuffle()
dp3_ = dp3.sharding_filter()
dp4 = dp1.zip(dp2, dp3_).shuffle()
sg = SeedGenerator(seed).spawn(worker_id)
set_graph_random_seed(dp4, sg)
return ((dp0._seed, dp3._seed), (dp2._seed, dp4._seed))
(ss_0_123, ds_0_123) = _get_dp_seeds_after_setting(worker_id=0, seed=123)
(ss_1_123, ds_1_123) = _get_dp_seeds_after_setting(worker_id=1, seed=123)
self.assertEqual(ss_0_123, ss_1_123)
self.assertNotEqual(ds_0_123, ds_1_123)
(ss_0_123_, ds_0_123_) = _get_dp_seeds_after_setting(worker_id=0, seed=123)
self.assertEqual(ss_0_123, ss_0_123_)
self.assertEqual(ds_0_123, ds_0_123_)
(ss_0_321, ds_0_321) = _get_dp_seeds_after_setting(worker_id=0, seed=321)
self.assertNotEqual(ss_0_123, ss_0_321)
self.assertNotEqual(ds_0_123, ds_0_321)
def test_sprs_determinism(self):
data_length = 64
exp = list(range(data_length))
data_source = IterableWrapper(exp)
dp = data_source.shuffle().sharding_filter().map(_random_fn)
rs = InProcessReadingService()
dl = DataLoader2(dp, reading_service=rs)
res = []
for (d, *_) in dl:
res.append(d)
self.assertEqual(sorted(res), exp)
results = []
for _ in range(2):
res = []
ran_res = []
torch.manual_seed(123)
random.seed(123)
np.random.seed(123)
for (d, *ran_nums) in dl:
res.append(d)
ran_res.append(ran_nums)
self.assertEqual(sorted(res), exp)
results.append((res, ran_res))
self.assertEqual(results[0], results[1])
res = []
ran_res = []
torch.manual_seed(321)
random.seed(321)
np.random.seed(321)
for (d, *ran_nums) in dl:
res.append(d)
ran_res.append(ran_nums)
self.assertEqual(sorted(res), exp)
self.assertNotEqual(results[0][0], res)
self.assertNotEqual(results[0][1], ran_res) |
class QLWinSingleTest():
def __init__(self, test):
self._test = test
def _run_test(self, results):
try:
results['result'] = self._test()
except Exception as e:
tb = traceback.format_exc()
results['exception'] = tb
results['result'] = False
def run(self):
with mb.Manager() as m:
results = m.dict()
p = mb.Process(target=QLWinSingleTest._run_test, args=(self, results))
p.start()
p.join()
if ('exception' not in results):
return results['result']
else:
raise RuntimeError(f'''
Got an exception during subprocess:
{results['exception']}''') |
class TWavpackFile(TestCase):
def setUp(self):
self.song = WavpackFile(get_data_path('silence-44-s.wv'))
def test_length(self):
self.assertAlmostEqual(self.song('~#length'), 3.68471, 3)
def test_channels(self):
assert (self.song('~#channels') == 2)
def test_samplerate(self):
assert (self.song('~#samplerate') == 44100)
def test_bitrate(self):
self.assertEqual(self.song('~#bitrate'), 76)
def test_format_codec(self):
self.assertEqual(self.song('~format'), 'WavPack')
self.assertEqual(self.song('~codec'), 'WavPack')
self.assertEqual(self.song('~encoding'), '') |
class CustomPythonBuild(build_py):
def pin_version(self):
path = os.path.join(self.build_lib, 'mypy')
self.mkpath(path)
with open(os.path.join(path, 'version.py'), 'w') as stream:
stream.write(f'''__version__ = "{version}"
''')
def run(self):
self.execute(self.pin_version, ())
build_py.run(self) |
class AvgMeter(object):
def __init__(self, num=40):
self.num = num
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.losses = []
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
self.losses.append(val)
def show(self):
return np.mean(self.losses[np.maximum((len(self.losses) - self.num), 0):]) |
def get_shot_to_precision(shots, logits, targets):
shot_to_precision = collections.defaultdict(list)
for (episode_num, episode_shots) in enumerate(shots):
episode_logits = logits[episode_num]
episode_targets = targets[episode_num]
for (class_id, class_shot) in enumerate(episode_shots):
class_precision = compute_class_precision(class_id, episode_logits, episode_targets)
shot_to_precision[class_shot].append(class_precision)
return shot_to_precision |
def create_dataset(queryset_items, name):
if (not queryset_items):
return
out_filepath = os.path.join(settings.DATASET_FOLDER, name)
data = {'links': [x.get_data4cls(status=True) for x in queryset_items]}
if (not os.path.exists(os.path.dirname(out_filepath))):
os.makedirs(os.path.dirname(out_filepath))
with open(out_filepath, 'w') as fio:
json.dump(data, fio) |
class FakeFilesystemUnitTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.root_name = self.filesystem.root_dir_name
self.fake_file = fake_filesystem.FakeFile('foobar', filesystem=self.filesystem)
self.fake_child = fake_filesystem.FakeDirectory('foobaz', filesystem=self.filesystem)
self.fake_grandchild = fake_filesystem.FakeDirectory('quux', filesystem=self.filesystem)
def test_new_filesystem(self):
self.assertEqual('/', self.filesystem.path_separator)
self.assertTrue((stat.S_IFDIR & self.filesystem.root.st_mode))
self.assertEqual({}, self.filesystem.root_dir.entries)
def test_none_raises_type_error(self):
with self.assertRaises(TypeError):
self.filesystem.exists(None)
def test_empty_string_does_not_exist(self):
self.assertFalse(self.filesystem.exists(''))
def test_exists_root(self):
self.assertTrue(self.filesystem.exists(self.root_name))
def test_exists_unadded_file(self):
self.assertFalse(self.filesystem.exists(self.fake_file.name))
def test_not_exists_subpath_named_like_file_contents(self):
file_path = '/foo/bar'
self.filesystem.create_file(file_path, contents='baz')
self.assertFalse(self.filesystem.exists((file_path + '/baz')))
def test_get_root_object(self):
self.assertEqual(self.filesystem.root_dir, self.filesystem.get_object(self.root_name))
def test_add_object_to_root(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertEqual({'foobar': self.fake_file}, self.filesystem.root_dir.entries)
def test_windows_root_dir_name(self):
self.filesystem.is_windows_fs = True
self.assertEqual('C:/', self.filesystem.root_dir_name)
self.filesystem.cwd = 'E:/foo'
self.assertEqual('E:/', self.filesystem.root_dir_name)
self.filesystem.cwd = '//foo/bar'
self.assertEqual('//foo/bar/', self.filesystem.root_dir_name)
def test_exists_added_file(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertTrue(self.filesystem.exists(self.fake_file.name))
def test_exists_relative_path_posix(self):
self.filesystem.is_windows_fs = False
self.filesystem.create_file('/a/b/file_one')
self.filesystem.create_file('/a/c/file_two')
self.assertTrue(self.filesystem.exists('a/b/../c/file_two'))
self.assertTrue(self.filesystem.exists('/a/c/../b/file_one'))
self.assertTrue(self.filesystem.exists('/a/c/../../a/b/file_one'))
self.assertFalse(self.filesystem.exists('a/b/../z/d'))
self.assertFalse(self.filesystem.exists('a/b/../z/../c/file_two'))
self.filesystem.cwd = '/a/c'
self.assertTrue(self.filesystem.exists('../b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/../../a/c/file_two'))
self.assertFalse(self.filesystem.exists('../z/file_one'))
self.assertFalse(self.filesystem.exists('../z/../c/file_two'))
def test_exists_relative_path_windows(self):
self.filesystem.is_windows_fs = True
self.filesystem.is_macos = False
self.filesystem.create_file('/a/b/file_one')
self.filesystem.create_file('/a/c/file_two')
self.assertTrue(self.filesystem.exists('a/b/../c/file_two'))
self.assertTrue(self.filesystem.exists('/a/c/../b/file_one'))
self.assertTrue(self.filesystem.exists('/a/c/../../a/b/file_one'))
self.assertFalse(self.filesystem.exists('a/b/../z/d'))
self.assertTrue(self.filesystem.exists('a/b/../z/../c/file_two'))
self.filesystem.cwd = 'C:/a/c'
self.assertTrue(self.filesystem.exists('../b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/../../a/c/file_two'))
self.assertFalse(self.filesystem.exists('../z/file_one'))
self.assertTrue(self.filesystem.exists('../z/../c/file_two'))
def test_get_object_from_root(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertEqual(self.fake_file, self.filesystem.get_object('foobar'))
def test_get_nonexistent_object_from_root_error(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertEqual(self.fake_file, self.filesystem.get_object('foobar'))
with self.raises_os_error(errno.ENOENT):
self.filesystem.get_object('some_bogus_filename')
def test_remove_object_from_root(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.filesystem.remove_object(self.fake_file.name)
with self.raises_os_error(errno.ENOENT):
self.filesystem.get_object(self.fake_file.name)
def test_remove_nonexisten_object_from_root_error(self):
with self.raises_os_error(errno.ENOENT):
self.filesystem.remove_object('some_bogus_filename')
def test_exists_removed_file(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.filesystem.remove_object(self.fake_file.name)
self.assertFalse(self.filesystem.exists(self.fake_file.name))
def test_add_object_to_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
self.assertEqual({self.fake_file.name: self.fake_file}, self.filesystem.root_dir.get_entry(self.fake_child.name).entries)
def test_add_object_to_regular_file_error_posix(self):
self.filesystem.is_windows_fs = False
self.filesystem.add_object(self.filesystem.root_dir_name, self.fake_file)
with self.raises_os_error(errno.ENOTDIR):
self.filesystem.add_object(self.fake_file.name, self.fake_file)
def test_add_object_to_regular_file_error_windows(self):
self.filesystem.is_windows_fs = True
self.filesystem.add_object(self.root_name, self.fake_file)
with self.raises_os_error(errno.ENOENT):
self.filesystem.add_object(self.fake_file.name, self.fake_file)
def test_exists_file_added_to_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
path = self.filesystem.joinpaths(self.fake_child.name, self.fake_file.name)
self.assertTrue(self.filesystem.exists(path))
def test_get_object_from_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
self.assertEqual(self.fake_file, self.filesystem.get_object(self.filesystem.joinpaths(self.fake_child.name, self.fake_file.name)))
def test_get_nonexistent_object_from_child_error(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
with self.raises_os_error(errno.ENOENT):
self.filesystem.get_object(self.filesystem.joinpaths(self.fake_child.name, 'some_bogus_filename'))
def test_remove_object_from_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
target_path = self.filesystem.joinpaths(self.fake_child.name, self.fake_file.name)
self.filesystem.remove_object(target_path)
with self.raises_os_error(errno.ENOENT):
self.filesystem.get_object(target_path)
def test_remove_object_from_child_error(self):
self.filesystem.add_object(self.root_name, self.fake_child)
with self.raises_os_error(errno.ENOENT):
self.filesystem.remove_object(self.filesystem.joinpaths(self.fake_child.name, 'some_bogus_filename'))
def test_remove_object_from_non_directory_error(self):
self.filesystem.add_object(self.root_name, self.fake_file)
with self.raises_os_error(errno.ENOTDIR):
self.filesystem.remove_object(self.filesystem.joinpaths(('%s' % self.fake_file.name), 'file_does_not_matter_since_parent_not_a_directory'))
def test_exists_file_removed_from_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
path = self.filesystem.joinpaths(self.fake_child.name, self.fake_file.name)
self.filesystem.remove_object(path)
self.assertFalse(self.filesystem.exists(path))
def test_operate_on_grandchild_directory(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_grandchild)
grandchild_directory = self.filesystem.joinpaths(self.fake_child.name, self.fake_grandchild.name)
grandchild_file = self.filesystem.joinpaths(grandchild_directory, self.fake_file.name)
with self.assertRaises(OSError):
self.filesystem.get_object(grandchild_file)
self.filesystem.add_object(grandchild_directory, self.fake_file)
self.assertEqual(self.fake_file, self.filesystem.get_object(grandchild_file))
self.assertTrue(self.filesystem.exists(grandchild_file))
self.filesystem.remove_object(grandchild_file)
with self.assertRaises(OSError):
self.filesystem.get_object(grandchild_file)
self.assertFalse(self.filesystem.exists(grandchild_file))
def test_create_directory_in_root_directory(self):
path = 'foo'
self.filesystem.create_dir(path)
new_dir = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_dir.name)
self.assertTrue((stat.S_IFDIR & new_dir.st_mode))
def test_create_directory_in_root_directory_already_exists_error(self):
path = 'foo'
self.filesystem.create_dir(path)
with self.raises_os_error(errno.EEXIST):
self.filesystem.create_dir(path)
def test_create_directory(self):
path = 'foo/bar/baz'
self.filesystem.create_dir(path)
new_dir = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_dir.name)
self.assertTrue((stat.S_IFDIR & new_dir.st_mode))
path = ('%s/quux' % path)
self.filesystem.create_dir(path)
new_dir = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_dir.name)
self.assertTrue((stat.S_IFDIR & new_dir.st_mode))
def test_create_directory_already_exists_error(self):
path = 'foo/bar/baz'
self.filesystem.create_dir(path)
with self.raises_os_error(errno.EEXIST):
self.filesystem.create_dir(path)
def test_create_file_in_read_only_directory_raises_in_posix(self):
self.filesystem.is_windows_fs = False
dir_path = '/foo/bar'
self.filesystem.create_dir(dir_path, perm_bits=365)
file_path = (dir_path + '/baz')
if (not is_root()):
with self.raises_os_error(errno.EACCES):
self.filesystem.create_file(file_path)
else:
self.filesystem.create_file(file_path)
self.assertTrue(self.filesystem.exists(file_path))
def test_create_file_in_read_only_directory_possible_in_windows(self):
self.filesystem.is_windows_fs = True
dir_path = 'C:/foo/bar'
self.filesystem.create_dir(dir_path, perm_bits=365)
file_path = (dir_path + '/baz')
self.filesystem.create_file(file_path)
self.assertTrue(self.filesystem.exists(file_path))
def test_create_file_in_current_directory(self):
path = 'foo'
contents = 'dummy data'
self.filesystem.create_file(path, contents=contents)
self.assertTrue(self.filesystem.exists(path))
self.assertFalse(self.filesystem.exists(os.path.dirname(path)))
path = ('./%s' % path)
self.assertTrue(self.filesystem.exists(os.path.dirname(path)))
def test_create_file_in_root_directory(self):
path = '/foo'
contents = 'dummy data'
self.filesystem.create_file(path, contents=contents)
new_file = self.filesystem.get_object(path)
self.assertTrue(self.filesystem.exists(path))
self.assertTrue(self.filesystem.exists(os.path.dirname(path)))
self.assertEqual(os.path.basename(path), new_file.name)
self.assertTrue((stat.S_IFREG & new_file.st_mode))
self.assertEqual(contents, new_file.contents)
def test_create_file_with_size_but_no_content_creates_large_file(self):
path = 'large_foo_bar'
self.filesystem.create_file(path, st_size=)
new_file = self.filesystem.get_object(path)
self.assertEqual(None, new_file.contents)
self.assertEqual(, new_file.st_size)
def test_create_file_in_root_directory_already_exists_error(self):
path = 'foo'
self.filesystem.create_file(path)
with self.raises_os_error(errno.EEXIST):
self.filesystem.create_file(path)
def test_create_file(self):
path = 'foo/bar/baz'
retval = self.filesystem.create_file(path, contents='dummy_data')
self.assertTrue(self.filesystem.exists(path))
self.assertTrue(self.filesystem.exists(os.path.dirname(path)))
new_file = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_file.name)
if IS_WIN:
self.assertEqual(1, new_file.st_uid)
self.assertEqual(1, new_file.st_gid)
else:
self.assertEqual(os.getuid(), new_file.st_uid)
self.assertEqual(os.getgid(), new_file.st_gid)
self.assertEqual(new_file, retval)
def test_create_file_with_changed_ids(self):
path = 'foo/bar/baz'
set_uid(42)
set_gid(2)
self.filesystem.create_file(path)
self.assertTrue(self.filesystem.exists(path))
new_file = self.filesystem.get_object(path)
self.assertEqual(42, new_file.st_uid)
self.assertEqual(2, new_file.st_gid)
reset_ids()
def test_empty_file_created_for_none_contents(self):
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
path = 'foo/bar/baz'
self.filesystem.create_file(path, contents=None)
with fake_open(path) as f:
self.assertEqual('', f.read())
def test_create_file_with_incorrect_mode_type(self):
with self.assertRaises(TypeError):
self.filesystem.create_file('foo', 'bar')
def test_create_file_already_exists_error(self):
path = 'foo/bar/baz'
self.filesystem.create_file(path, contents='dummy_data')
with self.raises_os_error(errno.EEXIST):
self.filesystem.create_file(path)
def test_create_link(self):
path = 'foo/bar/baz'
target_path = 'foo/bar/quux'
new_file = self.filesystem.create_symlink(path, 'quux')
self.assertFalse(self.filesystem.exists(path))
self.assertFalse(self.filesystem.exists(target_path))
self.assertTrue((stat.S_IFLNK & new_file.st_mode))
self.filesystem.create_file(target_path)
self.assertTrue(self.filesystem.exists(path))
self.assertTrue(self.filesystem.exists(target_path))
def test_resolve_object(self):
target_path = 'dir/target'
target_contents = 'ABCDEF'
link_name = 'x'
self.filesystem.create_dir('dir')
self.filesystem.create_file('dir/target', contents=target_contents)
self.filesystem.create_symlink(link_name, target_path)
obj = self.filesystem.resolve(link_name)
self.assertEqual('target', obj.name)
self.assertEqual(target_contents, obj.contents)
def check_lresolve_object(self):
target_path = 'dir/target'
target_contents = 'ABCDEF'
link_name = 'x'
self.filesystem.create_dir('dir')
self.filesystem.create_file('dir/target', contents=target_contents)
self.filesystem.create_symlink(link_name, target_path)
obj = self.filesystem.lresolve(link_name)
self.assertEqual(link_name, obj.name)
self.assertEqual(target_path, obj.contents)
def test_lresolve_object_windows(self):
self.filesystem.is_windows_fs = True
self.check_lresolve_object()
def test_lresolve_object_posix(self):
self.filesystem.is_windows_fs = False
self.check_lresolve_object()
def check_directory_access_on_file(self, error_subtype):
self.filesystem.create_file('not_a_dir')
with self.raises_os_error(error_subtype):
self.filesystem.resolve('not_a_dir/foo')
with self.raises_os_error(error_subtype):
self.filesystem.lresolve('not_a_dir/foo/bar')
def test_directory_access_on_file_windows(self):
self.filesystem.is_windows_fs = True
self.check_directory_access_on_file(errno.ENOENT)
def test_directory_access_on_file_posix(self):
self.filesystem.is_windows_fs = False
self.check_directory_access_on_file(errno.ENOTDIR)
def test_pickle_fs(self):
import pickle
self.filesystem.open_files = []
p = pickle.dumps(self.filesystem)
fs = pickle.loads(p)
self.assertEqual(str(fs.root), str(self.filesystem.root))
self.assertEqual(fs.mount_points, self.filesystem.mount_points) |
class ResponsiveOption():
def __init__(self, allowed_options, prefix=None, xs=None, sm=None, md=None, lg=None, xl=None):
self.prefix = prefix
self.allowed_options = allowed_options
all_options = {'xs': xs, 'sm': sm, 'md': md, 'lg': lg, 'xl': xl}
self.device_options = {DeviceClass(device_class_name): option_value for (device_class_name, option_value) in all_options.items() if option_value}
if (not all([(i in self.allowed_options) for i in self.device_options.values()])):
raise ProgrammerError(('Illegal option. Allowed options: %s, got: %s' % (self.allowed_options, self.device_options)))
def __len__(self):
return self.device_options.__len__()
def add_css_classes(self, html_widget, prefix=None):
classes = []
for (device_class, value) in self.device_options.items():
prefix_to_use = (prefix or self.prefix)
css_class = device_class.as_combined_css_class(([prefix_to_use] if prefix_to_use else []), ([str(value)] if (value is not True) else []))
html_widget.append_class(css_class)
classes.append(css_class)
return classes |
class GroupSampler(Sampler):
def __init__(self, dataset, samples_per_gpu=1):
assert hasattr(dataset, 'flag')
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.flag = dataset.flag.astype(np.int64)
self.epoch = 0
self.group_sizes = np.bincount(self.flag)
if (min(self.group_sizes) < self.samples_per_gpu):
for i in range(len(self.flag)):
self.flag[i] = (i % 2)
self.group_sizes = np.bincount(self.flag)
print('\x1b[1;35m >>> group sampler randomly aranged!\x1b[0;0m')
self.num_samples = 0
for (i, size) in enumerate(self.group_sizes):
self.num_samples += (int(np.ceil((size / self.samples_per_gpu))) * self.samples_per_gpu)
def __iter__(self):
indices = []
for (i, size) in enumerate(self.group_sizes):
if (size == 0):
continue
indice = np.where((self.flag == i))[0]
assert (len(indice) == size)
np.random.shuffle(indice)
num_extra = ((int(np.ceil((size / self.samples_per_gpu))) * self.samples_per_gpu) - len(indice))
indice = np.concatenate([indice, indice[:num_extra]])
indices.append(indice)
indices = np.concatenate(indices)
indices = [indices[(i * self.samples_per_gpu):((i + 1) * self.samples_per_gpu)] for i in np.random.permutation(range((len(indices) // self.samples_per_gpu)))]
indices = np.concatenate(indices)
indices = indices.astype(np.int64).tolist()
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch |
def _get_list_output(python_version: str, package_version: str, package_name: str, new_install: bool, exposed_binary_names: List[str], unavailable_binary_names: List[str], exposed_man_pages: List[str], unavailable_man_pages: List[str], injected_packages: Optional[Dict[(str, PackageInfo)]]=None, suffix: str='') -> str:
output = []
suffix = (f' ({bold(shlex.quote((package_name + suffix)))})' if suffix else '')
output.append(f" {('installed' if new_install else '')} package {bold(shlex.quote(package_name))} {bold(package_version)}{suffix}, installed using {python_version}")
if (new_install and (exposed_binary_names or unavailable_binary_names)):
output.append(' These apps are now globally available')
for name in exposed_binary_names:
output.append(f' - {name}')
for name in unavailable_binary_names:
output.append(f' - {red(name)} (symlink missing or pointing to unexpected location)')
if (new_install and (exposed_man_pages or unavailable_man_pages)):
output.append(' These manual pages are now globally available')
for name in exposed_man_pages:
output.append(f' - {name}')
for name in unavailable_man_pages:
output.append(f' - {red(name)} (symlink missing or pointing to unexpected location)')
if injected_packages:
output.append(' Injected Packages:')
for name in injected_packages:
output.append(f' - {name} {injected_packages[name].package_version}')
return '\n'.join(output) |
class PauseTagHandler(EtreeTagHandler):
def validate(self):
return self.__handler(validate=True)
def process(self):
self.__handler()
def __handler(self, validate=False):
msg = self.element.text
if (not validate):
ops.pause(msg)
return True |
def dePem(s, name):
prefix = ('-----BEGIN %s-----' % name)
postfix = ('-----END %s-----' % name)
start = s.find(prefix)
if (start == (- 1)):
raise SyntaxError('Missing PEM prefix')
end = s.find(postfix, (start + len(prefix)))
if (end == (- 1)):
raise SyntaxError('Missing PEM postfix')
s = s[(start + len(('-----BEGIN %s-----' % name))):end]
retBytes = a2b_base64(s)
return retBytes |
def matrix_loads(explode: bool, name: str, schema_type: str, location: Mapping[(str, Any)]) -> Any:
if (explode == False):
m = re.match(f'^;{name}=(.*)$', location[f';{name}'])
if (m is None):
raise KeyError(name)
value = m.group(1)
if (schema_type == 'array'):
return split(value)
if (schema_type == 'object'):
return dict(map(split, split(value, step=2)))
return value
else:
if (schema_type == 'array'):
return re.findall(f';{name}=([^;]*)', location[f';{name}*'])
if (schema_type == 'object'):
value = location[f';{name}*']
return dict(map(partial(split, separator='='), split(value[1:], separator=';')))
m = re.match(f'^;{name}=(.*)$', location[f';{name}*'])
if (m is None):
raise KeyError(name)
value = m.group(1)
return value |
class Window(ABCWindow, message='Cannot load example window provider'):
def __init__(self, name):
pass
def refresh(self):
pass
def quit(self):
pass
def setResize(self, resize):
pass
def updateFunc(self):
pass
def getMouse(self, mousecode, mousestate):
return False
def getKey(self, keycode, keystate):
return False
def getMousePos(self):
return (0, 0) |
class TestPassportElementErrorReverseSideWithoutRequest(TestPassportElementErrorReverseSideBase):
def test_slot_behaviour(self, passport_element_error_reverse_side):
inst = passport_element_error_reverse_side
for attr in inst.__slots__:
assert (getattr(inst, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(inst)) == len(set(mro_slots(inst)))), 'duplicate slot'
def test_expected_values(self, passport_element_error_reverse_side):
assert (passport_element_error_reverse_side.source == self.source)
assert (passport_element_error_reverse_side.type == self.type_)
assert (passport_element_error_reverse_side.file_hash == self.file_hash)
assert (passport_element_error_reverse_side.message == self.message)
def test_to_dict(self, passport_element_error_reverse_side):
passport_element_error_reverse_side_dict = passport_element_error_reverse_side.to_dict()
assert isinstance(passport_element_error_reverse_side_dict, dict)
assert (passport_element_error_reverse_side_dict['source'] == passport_element_error_reverse_side.source)
assert (passport_element_error_reverse_side_dict['type'] == passport_element_error_reverse_side.type)
assert (passport_element_error_reverse_side_dict['file_hash'] == passport_element_error_reverse_side.file_hash)
assert (passport_element_error_reverse_side_dict['message'] == passport_element_error_reverse_side.message)
def test_equality(self):
a = PassportElementErrorReverseSide(self.type_, self.file_hash, self.message)
b = PassportElementErrorReverseSide(self.type_, self.file_hash, self.message)
c = PassportElementErrorReverseSide(self.type_, '', '')
d = PassportElementErrorReverseSide('', self.file_hash, '')
e = PassportElementErrorReverseSide('', '', self.message)
f = PassportElementErrorSelfie(self.type_, self.file_hash, self.message)
assert (a == b)
assert (hash(a) == hash(b))
assert (a is not b)
assert (a != c)
assert (hash(a) != hash(c))
assert (a != d)
assert (hash(a) != hash(d))
assert (a != e)
assert (hash(a) != hash(e))
assert (a != f)
assert (hash(a) != hash(f)) |
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('successstories', '0009_auto__0352')]
operations = [migrations.AddField(model_name='story', name='submitted_by', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL))] |
class FitCapacitorGraph(FitGraph):
internalName = 'capacitorGraph'
name = _t('Capacitor')
xDefs = [XDef(handle='time', unit='s', label=_t('Time'), mainInput=('time', 's')), XDef(handle='capAmount', unit='GJ', label=_t('Cap amount'), mainInput=('capAmount', '%')), XDef(handle='capAmount', unit='%', label=_t('Cap amount'), mainInput=('capAmount', '%'))]
yDefs = [YDef(handle='capAmount', unit='GJ', label=_t('Cap amount')), YDef(handle='capRegen', unit='GJ/s', label=_t('Cap regen'))]
inputs = [Input(handle='time', unit='s', label=_t('Time'), iconID=1392, defaultValue=120, defaultRange=(0, 300), conditions=[(('time', 's'), None)]), Input(handle='capAmount', unit='%', label=_t('Cap amount'), iconID=1668, defaultValue=25, defaultRange=(0, 100), conditions=[(('capAmount', 'GJ'), None), (('capAmount', '%'), None)]), Input(handle='capAmountT0', unit='%', label=_t('Starting cap amount'), iconID=1668, defaultValue=100, defaultRange=(0, 100), conditions=[(('time', 's'), None)])]
checkboxes = [InputCheckbox(handle='useCapsim', label=_t('Use capacitor simulator'), defaultValue=True, conditions=[(('time', 's'), ('capAmount', 'GJ'))])]
srcExtraCols = ('CapAmount', 'CapTime')
_normalizers = {('capAmount', '%'): (lambda v, src, tgt: ((v / 100) * src.item.ship.getModifiedItemAttr('capacitorCapacity'))), ('capAmountT0', '%'): (lambda v, src, tgt: (None if (v is None) else ((v / 100) * src.item.ship.getModifiedItemAttr('capacitorCapacity'))))}
_limiters = {'time': (lambda src, tgt: (0, 3600)), 'capAmount': (lambda src, tgt: (0, src.item.ship.getModifiedItemAttr('capacitorCapacity'))), 'capAmountT0': (lambda src, tgt: (0, src.item.ship.getModifiedItemAttr('capacitorCapacity')))}
_getters = {('time', 'capAmount'): Time2CapAmountGetter, ('time', 'capRegen'): Time2CapRegenGetter, ('capAmount', 'capAmount'): CapAmount2CapAmountGetter, ('capAmount', 'capRegen'): CapAmount2CapRegenGetter}
_denormalizers = {('capAmount', '%'): (lambda v, src, tgt: ((v * 100) / src.item.ship.getModifiedItemAttr('capacitorCapacity')))} |
def test_hidden_false(tmp_path, tmp_env):
text = '\n [[command]]\n name = "my-visible-command"\n hidden = false\n\n [[command.stages]]\n command = "eval"\n params = {code=\'1\'}\n '
(tmp_path / 'config.toml').write_text(text)
result = helpers.run(['--help'], env=tmp_env).decode()
assert ('my-visible-command' in result) |
class TypeVarTupleExpr(TypeVarLikeExpr):
__slots__ = 'tuple_fallback'
tuple_fallback: mypy.types.Instance
__match_args__ = ('name', 'upper_bound', 'default')
def __init__(self, name: str, fullname: str, upper_bound: mypy.types.Type, tuple_fallback: mypy.types.Instance, default: mypy.types.Type, variance: int=INVARIANT) -> None:
super().__init__(name, fullname, upper_bound, default, variance)
self.tuple_fallback = tuple_fallback
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_type_var_tuple_expr(self)
def serialize(self) -> JsonDict:
return {'.class': 'TypeVarTupleExpr', 'name': self._name, 'fullname': self._fullname, 'upper_bound': self.upper_bound.serialize(), 'tuple_fallback': self.tuple_fallback.serialize(), 'default': self.default.serialize(), 'variance': self.variance}
def deserialize(cls, data: JsonDict) -> TypeVarTupleExpr:
assert (data['.class'] == 'TypeVarTupleExpr')
return TypeVarTupleExpr(data['name'], data['fullname'], mypy.types.deserialize_type(data['upper_bound']), mypy.types.Instance.deserialize(data['tuple_fallback']), mypy.types.deserialize_type(data['default']), data['variance']) |
def test_get_group_symbol():
assert (numbers.get_group_symbol('en_US') == ',')
assert (numbers.get_group_symbol('en_US', numbering_system='latn') == ',')
assert (numbers.get_group_symbol('en_US', numbering_system='default') == ',')
assert (numbers.get_group_symbol('ar_EG') == ',')
assert (numbers.get_group_symbol('ar_EG', numbering_system='default') == '')
assert (numbers.get_group_symbol('ar_EG', numbering_system='arab') == '')
assert (numbers.get_group_symbol('ar_EG', numbering_system='latn') == ',') |
def test(args):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
(log_dir, model_name) = os.path.split(args.model_path)
model = torch.load(args.model_path)
model = model.to(device)
writer = SummaryWriter(log_dir=log_dir)
class_names = ['upper_ns', 'middle_ns', 'lower_ns', 'rijnland_chalk', 'scruff', 'zechstein']
running_metrics_overall = runningScore(6)
if ('both' in args.split):
splits = ['test1', 'test2']
else:
splits = args.split
for (sdx, split) in enumerate(splits):
labels = np.load(pjoin('data', 'test_once', (split + '_labels.npy')))
(irange, xrange, depth) = labels.shape
if args.inline:
i_list = list(range(irange))
i_list = [('i_' + str(inline)) for inline in i_list]
else:
i_list = []
if args.crossline:
x_list = list(range(xrange))
x_list = [('x_' + str(crossline)) for crossline in x_list]
else:
x_list = []
list_test = (i_list + x_list)
file_object = open(pjoin('data', 'splits', (('section_' + split) + '.txt')), 'w')
file_object.write('\n'.join(list_test))
file_object.close()
test_set = section_loader(is_transform=True, split=split, augmentations=None)
n_classes = test_set.n_classes
test_loader = data.DataLoader(test_set, batch_size=1, num_workers=4, shuffle=False)
running_metrics_split = runningScore(n_classes)
with torch.no_grad():
model.eval()
total_iteration = 0
for (i, (images, labels)) in enumerate(test_loader):
total_iteration = (total_iteration + 1)
(image_original, labels_original) = (images, labels)
(images, labels) = (images.to(device), labels.to(device))
outputs = model(images)
pred = outputs.detach().max(1)[1].cpu().numpy()
gt = labels.detach().cpu().numpy()
running_metrics_split.update(gt, pred)
running_metrics_overall.update(gt, pred)
numbers = [0, 99, 149, 399, 499]
if (i in numbers):
tb_original_image = vutils.make_grid(image_original[0][0], normalize=True, scale_each=True)
writer.add_image('test/original_image', tb_original_image, i)
labels_original = labels_original.numpy()[0]
correct_label_decoded = test_set.decode_segmap(np.squeeze(labels_original))
writer.add_image('test/original_label', np_to_tb(correct_label_decoded), i)
out = F.softmax(outputs, dim=1)
prediction = out.max(1)[1].cpu().numpy()[0]
confidence = out.max(1)[0].cpu().detach()[0]
tb_confidence = vutils.make_grid(confidence, normalize=True, scale_each=True)
decoded = test_set.decode_segmap(np.squeeze(prediction))
writer.add_image('test/predicted', np_to_tb(decoded), i)
writer.add_image('test/confidence', tb_confidence, i)
unary = outputs.cpu().detach()
unary_max = torch.max(unary)
unary_min = torch.min(unary)
unary = unary.add(((- 1) * unary_min))
unary = (unary / (unary_max - unary_min))
for channel in range(0, len(class_names)):
decoded_channel = unary[0][channel]
tb_channel = vutils.make_grid(decoded_channel, normalize=True, scale_each=True)
writer.add_image(f'test_classes/_{class_names[channel]}', tb_channel, i)
(score, class_iou) = running_metrics_split.get_scores()
writer.add_text(f'test__{split}/', f"Pixel Acc: {score['Pixel Acc: ']:.3f}", 0)
for (cdx, class_name) in enumerate(class_names):
writer.add_text(f'test__{split}/', f" {class_name}_accuracy {score['Class Accuracy: '][cdx]:.3f}", 0)
writer.add_text(f'test__{split}/', f"Mean Class Acc: {score['Mean Class Acc: ']:.3f}", 0)
writer.add_text(f'test__{split}/', f"Freq Weighted IoU: {score['Freq Weighted IoU: ']:.3f}", 0)
writer.add_text(f'test__{split}/', f"Mean IoU: {score['Mean IoU: ']:0.3f}", 0)
running_metrics_split.reset()
(score, class_iou) = running_metrics_overall.get_scores()
writer.add_text('test_final', f"Pixel Acc: {score['Pixel Acc: ']:.3f}", 0)
for (cdx, class_name) in enumerate(class_names):
writer.add_text('test_final', f" {class_name}_accuracy {score['Class Accuracy: '][cdx]:.3f}", 0)
writer.add_text('test_final', f"Mean Class Acc: {score['Mean Class Acc: ']:.3f}", 0)
writer.add_text('test_final', f"Freq Weighted IoU: {score['Freq Weighted IoU: ']:.3f}", 0)
writer.add_text('test_final', f"Mean IoU: {score['Mean IoU: ']:0.3f}", 0)
print(' FINAL RESULTS ')
print(f"Pixel Acc: {score['Pixel Acc: ']:.3f}")
for (cdx, class_name) in enumerate(class_names):
print(f" {class_name}_accuracy {score['Class Accuracy: '][cdx]:.3f}")
print(f"Mean Class Acc: {score['Mean Class Acc: ']:.3f}")
print(f"Freq Weighted IoU: {score['Freq Weighted IoU: ']:.3f}")
print(f"Mean IoU: {score['Mean IoU: ']:0.3f}")
confusion = score['confusion_matrix']
np.savetxt(pjoin(log_dir, 'confusion.csv'), confusion, delimiter=' ')
writer.close()
return |
class DjangoIntegration(UnmarshallingProcessor[(HttpRequest, HttpResponse)]):
request_cls = DjangoOpenAPIRequest
response_cls = DjangoOpenAPIResponse
def get_openapi_request(self, request: HttpRequest) -> DjangoOpenAPIRequest:
return self.request_cls(request)
def get_openapi_response(self, response: HttpResponse) -> DjangoOpenAPIResponse:
assert (self.response_cls is not None)
return self.response_cls(response)
def should_validate_response(self) -> bool:
return (self.response_cls is not None)
def handle_response(self, request: HttpRequest, response: HttpResponse, errors_handler: ErrorsHandlerCallable[HttpResponse]) -> HttpResponse:
if (not self.should_validate_response()):
return response
return super().handle_response(request, response, errors_handler) |
class FCIDumpDriver(FermionicDriver):
def __init__(self, fcidump_input: str, atoms: Optional[List[str]]=None) -> None:
super().__init__()
if (not isinstance(fcidump_input, str)):
raise QiskitChemistryError("The fcidump_input must be str, not '{}'".format(fcidump_input))
self._fcidump_input = fcidump_input
if (atoms and (not isinstance(atoms, list)) and (not all(((sym in QMolecule.symbols) for sym in atoms)))):
raise QiskitChemistryError("The atoms must be a list of valid atomic symbols, not '{}'".format(atoms))
self.atoms = atoms
def run(self) -> QMolecule:
fcidump_data = parse(self._fcidump_input)
q_mol = QMolecule()
q_mol.nuclear_repulsion_energy = fcidump_data.get('ecore', None)
q_mol.num_orbitals = fcidump_data.get('NORB')
q_mol.multiplicity = (fcidump_data.get('MS2', 0) + 1)
q_mol.molecular_charge = 0
q_mol.num_beta = ((fcidump_data.get('NELEC') - (q_mol.multiplicity - 1)) // 2)
q_mol.num_alpha = (fcidump_data.get('NELEC') - q_mol.num_beta)
if (self.atoms is not None):
q_mol.num_atoms = len(self.atoms)
q_mol.atom_symbol = self.atoms
q_mol.atom_xyz = ([([float('NaN')] * 3)] * len(self.atoms))
q_mol.mo_onee_ints = fcidump_data.get('hij', None)
q_mol.mo_onee_ints_b = fcidump_data.get('hij_b', None)
q_mol.mo_eri_ints = fcidump_data.get('hijkl', None)
q_mol.mo_eri_ints_bb = fcidump_data.get('hijkl_bb', None)
q_mol.mo_eri_ints_ba = fcidump_data.get('hijkl_ba', None)
return q_mol
def dump(q_mol: QMolecule, outpath: str, orbsym: Optional[List[str]]=None, isym: int=1) -> None:
dump(outpath, q_mol.num_orbitals, (q_mol.num_alpha + q_mol.num_beta), (q_mol.mo_onee_ints, q_mol.mo_onee_ints_b), (q_mol.mo_eri_ints, q_mol.mo_eri_ints_ba, q_mol.mo_eri_ints_bb), q_mol.nuclear_repulsion_energy, ms2=(q_mol.multiplicity - 1), orbsym=orbsym, isym=isym) |
def refresh_suppressed_submodules(module: str, path: (str | None), deps: dict[(str, set[str])], graph: Graph, fscache: FileSystemCache, refresh_file: Callable[([str, str], list[str])]) -> (list[str] | None):
messages = None
if ((path is None) or (not path.endswith(INIT_SUFFIXES))):
return None
pkgdir = os.path.dirname(path)
try:
entries = fscache.listdir(pkgdir)
except FileNotFoundError:
entries = []
for fnam in entries:
if ((not fnam.endswith(('.py', '.pyi'))) or fnam.startswith('__init__.') or (fnam.count('.') != 1)):
continue
shortname = fnam.split('.')[0]
submodule = ((module + '.') + shortname)
trigger = make_trigger(submodule)
ensure_deps_loaded(module, deps, graph)
if (trigger in deps):
for dep in deps[trigger]:
state = graph.get(dep)
if (not state):
dep_module = module_prefix(graph, dep)
if (dep_module is not None):
state = graph.get(dep_module)
if state:
if (state.tree is None):
assert (state.path is not None)
messages = refresh_file(state.id, state.path)
tree = state.tree
assert tree
for imp in tree.imports:
if isinstance(imp, ImportFrom):
if ((imp.id == module) and any(((name == shortname) for (name, _) in imp.names)) and (submodule not in state.suppressed_set)):
state.suppressed.append(submodule)
state.suppressed_set.add(submodule)
return messages |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--n_jobs', type=int, default=32)
parser.add_argument('--episode_size', type=int, default=8192)
parser.add_argument('--batch_size', type=int, default=1024)
parser.add_argument('--entropy_weight', type=int, default=1)
parser.add_argument('--kl_div_weight', type=int, default=10)
parser.add_argument('--output_dir', default=None)
parser.add_argument('--clip_param', type=int, default=0.2)
parser.add_argument('--num_epochs', type=int, default=20)
parser.add_argument('--model_path', default=None)
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--suite', default='test')
args = parser.parse_args()
np.random.seed(args.seed)
setup_default_logger()
if (args.output_dir is None):
args.output_dir = os.path.dirname(os.path.realpath(__file__))
if (args.model_path is None):
dir_path = os.path.dirname(os.path.realpath(__file__))
args.model_path = os.path.join(dir_path, 'pretrained_model', 'model_final_0.473.pt')
with open(os.path.join(args.output_dir, 'goal_directed_params.json'), 'w') as jf:
json.dump(vars(args), jf, sort_keys=True, indent=4)
optimiser = PPODirectedGenerator(pretrained_model_path=args.model_path, num_epochs=args.num_epochs, episode_size=args.episode_size, batch_size=args.batch_size, entropy_weight=args.entropy_weight, kl_div_weight=args.kl_div_weight, clip_param=args.clip_param)
json_file_path = os.path.join(args.output_dir, 'goal_directed_results.json')
assess_goal_directed_generation(optimiser, json_output_file=json_file_path, benchmark_version=args.suite) |
def Weir_Goudet_beta(ds: Dataset, *, stat_identity_by_state: Hashable=variables.stat_identity_by_state, merge: bool=True) -> Dataset:
ds = define_variable_if_absent(ds, variables.stat_identity_by_state, stat_identity_by_state, identity_by_state)
variables.validate(ds, {stat_identity_by_state: variables.stat_identity_by_state_spec})
ibs = da.asarray(ds[stat_identity_by_state].data)
num = da.nansum(da.tril(ibs, (- 1)))
denom = da.nansum(da.tril((~ da.isnan(ibs)), (- 1)))
avg = (num / denom)
beta = ((ibs - avg) / (1 - avg))
new_ds = create_dataset({variables.stat_Weir_Goudet_beta: (('samples_0', 'samples_1'), beta)})
return conditional_merge_datasets(ds, new_ds, merge) |
class OptimizationApplication(ABC):
def to_quadratic_program(self) -> QuadraticProgram:
pass
def interpret(self, result: Union[(OptimizationResult, np.ndarray)]):
pass
def _result_to_x(self, result: Union[(OptimizationResult, np.ndarray)]) -> np.ndarray:
if isinstance(result, OptimizationResult):
x = result.x
elif isinstance(result, np.ndarray):
x = result
else:
raise TypeError('Unsupported format of result. Provide an\u3000OptimizationResult or a', f' binary array using np.ndarray instead of {type(result)}')
return x
def sample_most_likely(state_vector: Union[(QuasiDistribution, Statevector, np.ndarray, Dict)]) -> np.ndarray:
if isinstance(state_vector, QuasiDistribution):
probabilities = state_vector.binary_probabilities()
binary_string = max(probabilities.items(), key=(lambda kv: kv[1]))[0]
x = np.asarray([int(y) for y in reversed(list(binary_string))])
return x
elif isinstance(state_vector, Statevector):
probabilities = state_vector.probabilities()
n = state_vector.num_qubits
k = np.argmax(np.abs(probabilities))
x = np.zeros(n)
for i in range(n):
x[i] = (k % 2)
k >>= 1
return x
elif isinstance(state_vector, (OrderedDict, dict)):
binary_string = max(state_vector.items(), key=(lambda kv: kv[1]))[0]
x = np.asarray([int(y) for y in reversed(list(binary_string))])
return x
elif isinstance(state_vector, np.ndarray):
n = int(np.log2(state_vector.shape[0]))
k = np.argmax(np.abs(state_vector))
x = np.zeros(n)
for i in range(n):
x[i] = (k % 2)
k >>= 1
return x
else:
raise ValueError(f'state vector should be QuasiDistribution, Statevector, ndarray, or dict. But it is {type(state_vector)}.') |
class DE(DE_yabox):
def solve(self, show_progress=False):
best_pop_evo = []
best_fitn_evo = []
mean_fitn_evo = []
if show_progress:
from tqdm import tqdm
iterator = tqdm(self.iterator(), total=self.maxiters, desc='Optimizing ({0})'.format(self.name))
else:
iterator = self.iterator()
for step in iterator:
idx = step.best_idx
P = step.population
fitness = step.fitness
if (step.iteration > self.maxiters):
if show_progress:
iterator.n = self.maxiters
iterator.refresh()
iterator.close()
break
best_pop_evo.append(P[idx])
best_fitn_evo.append(fitness[idx])
mean_fitn_evo.append(np.mean(fitness))
return (self.denormalize(P[idx]), fitness[idx], self.denormalize(best_pop_evo[(self.popsize - 1)::self.popsize]), np.array(best_fitn_evo[(self.popsize - 1)::self.popsize]), np.array(mean_fitn_evo[(self.popsize - 1)::self.popsize])) |
_funcify.register(CAReduce)
def numba_funcify_CAReduce(op, node, **kwargs):
axes = op.axis
if (axes is None):
axes = list(range(node.inputs[0].ndim))
if (hasattr(op, 'acc_dtype') and (op.acc_dtype is not None)):
acc_dtype = op.acc_dtype
else:
acc_dtype = node.outputs[0].type.dtype
np_acc_dtype = np.dtype(acc_dtype)
scalar_op_identity = op.scalar_op.identity
if ((np_acc_dtype.kind == 'i') and (not np.isfinite(scalar_op_identity))):
if np.isposinf(scalar_op_identity):
scalar_op_identity = np.iinfo(np_acc_dtype).max
else:
scalar_op_identity = np.iinfo(np_acc_dtype).min
scalar_op_identity = np.array(scalar_op_identity, dtype=np_acc_dtype)
input_name = get_name_for_object(node.inputs[0])
ndim = node.inputs[0].ndim
careduce_py_fn = create_multiaxis_reducer(op.scalar_op, scalar_op_identity, axes, ndim, np.dtype(node.outputs[0].type.dtype), input_name=input_name)
careduce_fn = jit_compile_reducer(node, careduce_py_fn, reduce_to_scalar=False)
return careduce_fn |
class ProtocolTestCase(FramesTestCase):
def assertFrameSent(self, connection, frame, eof=False):
frames_sent = [(None if (write is SEND_EOF) else self.parse(write, mask=(connection.side is CLIENT), extensions=connection.extensions)) for write in connection.data_to_send()]
frames_expected = ([] if (frame is None) else [frame])
if eof:
frames_expected += [None]
self.assertEqual(frames_sent, frames_expected)
def assertFrameReceived(self, connection, frame):
frames_received = connection.events_received()
frames_expected = ([] if (frame is None) else [frame])
self.assertEqual(frames_received, frames_expected)
def assertConnectionClosing(self, connection, code=None, reason=''):
close_frame = Frame(OP_CLOSE, (b'' if (code is None) else Close(code, reason).serialize()))
self.assertFrameReceived(connection, close_frame)
self.assertFrameSent(connection, close_frame, eof=(connection.side is SERVER))
def assertConnectionFailing(self, connection, code=None, reason=''):
close_frame = Frame(OP_CLOSE, (b'' if (code is None) else Close(code, reason).serialize()))
self.assertFrameReceived(connection, None)
self.assertFrameSent(connection, close_frame, eof=(connection.side is SERVER)) |
class Trainer():
def __init__(self, exp_dir='experiment', score_type='exprsco', batch_size=64, random_seed=42, print_every=100, checkpoint_every=1000, samp_rate=2000, KL_rate=0.9999, free_bits=60):
if (random_seed is not None):
torch.manual_seed(random_seed)
if (not os.path.isabs(exp_dir)):
exp_dir = os.path.join(os.getcwd(), exp_dir)
if (not os.path.exists(exp_dir)):
os.makedirs(exp_dir)
self.exp_dir = exp_dir
self.optimizer = None
self.print_every = print_every
self.checkpoint_every = checkpoint_every
self.loss_fn = nn.CrossEntropyLoss()
self.scheduler = None
self.batch_size = batch_size
self.samp_rate = samp_rate
self.KL_rate = KL_rate
self.free_bits = free_bits
self.vocab_size = None
def inverse_sigmoid(self, step):
k = self.samp_rate
if (k == None):
return 0
if (k == 1.0):
return 1
return (k / (k + exp((step / k))))
def KL_annealing(self, step, start, end):
return (end + ((start - end) * (self.KL_rate ** step)))
def loss(self, step, encoder, decoder, batch, teacher_forcing):
batch_size = batch.shape[0]
(mu, sig) = encoder(batch)
latent = (mu + (sig * torch.randn_like(mu)))
logits = decoder(latent, temp=None, x=batch, teacher_forcing=teacher_forcing, logits=True)
KL_weight = self.KL_annealing(step, 0, 0.2)
(KL_div, KL_reg) = Gaussian_KL_div(mu, (sig ** 2), 1, self.free_bits)
loss = (KL_weight * KL_reg)
logit_loss = self.loss_fn(logits.view((- 1), self.vocab_size), batch.view((- 1)))
loss = (loss + logit_loss)
correct = (torch.argmax(logits.view((- 1), self.vocab_size), dim=1) == batch.view((- 1))).float().sum()
reconstruction_acc = (correct / (batch_size * batch.shape[1]))
return (loss, reconstruction_acc, (KL_div / batch_size))
def train_batch(self, step, encoder, decoder, batch, teacher_forcing=True):
(loss, reconstruction_accuracy, KL_div) = self.loss(step, encoder, decoder, batch, teacher_forcing)
self.scheduler.step()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return (loss.item(), (reconstruction_accuracy * 100), KL_div)
def train_epochs(self, encoder, decoder, start_epoch, start_step, train_data, dev_data, end_epoch, log_file):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
params = {'batch_size': self.batch_size, 'shuffle': True, 'num_workers': 4, 'drop_last': True}
training_data = DataLoader(train_data, **params)
val_data = DataLoader(dev_data, **params)
steps_per_epoch = len(training_data)
step = start_step
tot_steps = (steps_per_epoch * (end_epoch - start_epoch))
elapsed_steps = 0
for epoch in range(start_epoch, end_epoch):
print('Epoch: {:d} Step: {:d}'.format(epoch, step), file=open(log_file, 'a'))
start = time.time()
elapsed_steps = 0
epoch_loss_total = 0.0
reconstruction_accuracy_total = 0.0
loss_total = 0.0
KL_div_total = 0.0
for batch in training_data:
batch = batch.to(device)
(loss, reconstruction_accuracy, KL_div) = self.train_batch(step, encoder, decoder, batch, self.inverse_sigmoid(step))
loss_total += loss
epoch_loss_total += loss
reconstruction_accuracy_total += reconstruction_accuracy
KL_div_total += KL_div
step += 1
elapsed_steps += 1
if ((step % self.print_every) == 0):
if (elapsed_steps > self.print_every):
cnt = self.print_every
else:
cnt = elapsed_steps
loss_avg = (loss_total / cnt)
reconstruction_accuracy_avg = (reconstruction_accuracy_total / cnt)
KL_div_avg = (KL_div_total / cnt)
loss_total = 0
reconstruction_accuracy_total = 0
KL_div_total = 0
print('Progress: {:.2f}% Average Loss: {:2.2f} Reconstruction Accuracy: {:2.2f}% KL Divergence: {:2.2f} '.format(((elapsed_steps / steps_per_epoch) * 100), loss_avg, reconstruction_accuracy_avg, KL_div_avg), file=open(log_file, 'a'))
if ((step % self.checkpoint_every) == 0):
print('Trying to checkpoint.')
Checkpoint(encoder=encoder, decoder=decoder, epoch=epoch, step=step, optimizer=self.optimizer, scheduler=self.scheduler, samp_rate=self.samp_rate, KL_rate=self.KL_rate, free_bits=self.free_bits).save(self.exp_dir)
print('Checkpoint successful!')
print(('End of epoch. Time elapsed: ' + timer(start, time.time())), file=open(log_file, 'a'))
print('Average loss for this epoch: {:2.2f} '.format((epoch_loss_total / elapsed_steps)), file=open(log_file, 'a'))
Checkpoint(encoder=encoder, decoder=decoder, epoch=(epoch + 1), step=step, optimizer=self.optimizer, scheduler=self.scheduler, samp_rate=self.samp_rate, KL_rate=self.KL_rate, free_bits=self.free_bits).save(self.exp_dir)
with torch.no_grad():
reconstruction_accuracy_val = 0.0
reconstruction_accuracy_val_nf = 0.0
val_loss = 0.0
val_KL_tot = 0.0
val_loss_nf = 0.0
val_KL_tot_nf = 0.0
count = 0
for val_batch in val_data:
count += 1
val_batch = val_batch.to(device)
(batch_loss, batch_accuracy, val_KL) = self.loss(step, encoder, decoder, val_batch, 1)
(batch_loss_nf, batch_accuracy_nf, val_KL_nf) = self.loss(step, encoder, decoder, val_batch, 0)
val_loss += batch_loss
reconstruction_accuracy_val += batch_accuracy
val_KL_tot += val_KL
val_loss_nf += batch_loss_nf
reconstruction_accuracy_val_nf += batch_accuracy_nf
val_KL_tot_nf += val_KL_nf
reconstruction_accuracy_val /= count
val_loss /= count
val_KL_tot /= count
reconstruction_accuracy_val_nf /= count
val_loss_nf /= count
val_KL_tot_nf /= count
print('Validation results: ', file=open(log_file, 'a'))
print('Reconstruction Accuracy: {:2.2f}% Loss (Validation): {:2.2f} KL Divergence {:2.2f}'.format((100 * reconstruction_accuracy_val), val_loss, val_KL_tot), file=open(log_file, 'a'))
print('Reconstruction Accuracy (NF): {:2.2f}% Loss (NF): {:2.2f} KL Divergence (NF) {:2.2f}'.format((100 * reconstruction_accuracy_val_nf), val_loss_nf, val_KL_tot_nf), file=open(log_file, 'a'))
def train(self, encoder, decoder, n_epochs, train_data, dev_data, resume, optimizer, log_file):
if resume:
latest_checkpoint_path = Checkpoint.get_latest_checkpoint(self.exp_dir)
resume_checkpoint = Checkpoint.load(latest_checkpoint_path)
encoder = resume_checkpoint.encoder
decoder = resume_checkpoint.decoder
start_epoch = resume_checkpoint.epoch
step = resume_checkpoint.step
self.scheduler = resume_checkpoint.scheduler
self.optimizer = resume_checkpoint.optimizer
self.samp_rate = resume_checkpoint.samp_rate
self.KL_rate = resume_checkpoint.KL_rate
self.free_bits = resume_checkpoint.free_bits
self.vocab_size = decoder.vocab_size
else:
self.optimizer = optimizer
if (optimizer is None):
params = (list(encoder.parameters()) + list(decoder.parameters()))
self.optimizer = Adam(params, lr=0.001)
self.scheduler = LambdaLR(self.optimizer, decay)
self.vocab_size = decoder.vocab_size
start_epoch = 1
step = 0
self.train_epochs(encoder, decoder, start_epoch, step, train_data, dev_data, (start_epoch + n_epochs), log_file)
return (encoder, decoder) |
def driver(request, driver_class, driver_kwargs):
retries = int(request.config.getini('max_driver_init_attempts'))
for retry in Retrying(stop=stop_after_attempt(retries), wait=wait_exponential(), reraise=True):
with retry:
LOGGER.info(f'Driver init, attempt {retry.retry_state.attempt_number}/{retries}')
driver = driver_class(**driver_kwargs)
event_listener = request.config.getoption('event_listener')
if (event_listener is not None):
(mod_name, class_name) = event_listener.rsplit('.', 1)
mod = __import__(mod_name, fromlist=[class_name])
event_listener = getattr(mod, class_name)
if (not isinstance(driver, EventFiringWebDriver)):
driver = EventFiringWebDriver(driver, event_listener())
request.node._driver = driver
(yield driver)
driver.quit() |
class uvm_nonblocking_transport_port(uvm_port_base):
def __init__(self, name, parent):
super().__init__(name, parent)
def nb_transport(self, put_data):
try:
(success, get_data) = self.export.nb_transport(put_data)
except AttributeError:
raise UVMTLMConnectionError(f'Missing or wrong export in {self.get_full_name()}. Did you connect it?')
return (success, get_data) |
def register(parent):
devices = wp.get_devices()
class TestOperators(parent):
pass
add_kernel_test(TestOperators, test_operators_scalar_float, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_scalar_int, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_matrix_index, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_vector_index, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_vec3, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_vec4, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_mat22, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_mat33, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_mat44, dim=1, devices=devices)
return TestOperators |
class DuckTestDrive():
def main(*args):
duck: Duck = MallardDuck()
turkey: Turkey = WildTurkey()
turkeyAdapter: Duck = TurkeyAdapter(turkey)
print('The Turkey says...')
turkey.gobble()
turkey.fly()
print('\nThe Duck says...')
DuckTestDrive.testDuck(duck)
print('\nThe TurkeyAdapter says...')
DuckTestDrive.testDuck(turkeyAdapter)
def testDuck(duck: Duck) -> None:
duck.quack()
duck.fly() |
class TypeReplaceVisitor(SyntheticTypeVisitor[None]):
def __init__(self, replacements: dict[(SymbolNode, SymbolNode)]) -> None:
self.replacements = replacements
def visit_instance(self, typ: Instance) -> None:
typ.type = self.fixup(typ.type)
for arg in typ.args:
arg.accept(self)
if typ.last_known_value:
typ.last_known_value.accept(self)
def visit_type_alias_type(self, typ: TypeAliasType) -> None:
assert (typ.alias is not None)
typ.alias = self.fixup(typ.alias)
for arg in typ.args:
arg.accept(self)
def visit_any(self, typ: AnyType) -> None:
pass
def visit_none_type(self, typ: NoneType) -> None:
pass
def visit_callable_type(self, typ: CallableType) -> None:
for arg in typ.arg_types:
arg.accept(self)
typ.ret_type.accept(self)
if typ.definition:
typ.definition = self.replacements.get(typ.definition, typ.definition)
if (typ.fallback is not None):
typ.fallback.accept(self)
for tv in typ.variables:
if isinstance(tv, TypeVarType):
tv.upper_bound.accept(self)
for value in tv.values:
value.accept(self)
def visit_overloaded(self, t: Overloaded) -> None:
for item in t.items:
item.accept(self)
if (t.fallback is not None):
t.fallback.accept(self)
def visit_erased_type(self, t: ErasedType) -> None:
raise RuntimeError('Cannot handle erased type')
def visit_deleted_type(self, typ: DeletedType) -> None:
pass
def visit_partial_type(self, typ: PartialType) -> None:
raise RuntimeError('Cannot handle partial type')
def visit_tuple_type(self, typ: TupleType) -> None:
for item in typ.items:
item.accept(self)
if (typ.partial_fallback is not None):
typ.partial_fallback.accept(self)
def visit_type_type(self, typ: TypeType) -> None:
typ.item.accept(self)
def visit_type_var(self, typ: TypeVarType) -> None:
typ.upper_bound.accept(self)
typ.default.accept(self)
for value in typ.values:
value.accept(self)
def visit_param_spec(self, typ: ParamSpecType) -> None:
typ.upper_bound.accept(self)
typ.default.accept(self)
def visit_type_var_tuple(self, typ: TypeVarTupleType) -> None:
typ.upper_bound.accept(self)
typ.default.accept(self)
def visit_unpack_type(self, typ: UnpackType) -> None:
typ.type.accept(self)
def visit_parameters(self, typ: Parameters) -> None:
for arg in typ.arg_types:
arg.accept(self)
def visit_typeddict_type(self, typ: TypedDictType) -> None:
for value_type in typ.items.values():
value_type.accept(self)
typ.fallback.accept(self)
def visit_raw_expression_type(self, t: RawExpressionType) -> None:
pass
def visit_literal_type(self, typ: LiteralType) -> None:
typ.fallback.accept(self)
def visit_unbound_type(self, typ: UnboundType) -> None:
for arg in typ.args:
arg.accept(self)
def visit_type_list(self, typ: TypeList) -> None:
for item in typ.items:
item.accept(self)
def visit_callable_argument(self, typ: CallableArgument) -> None:
typ.typ.accept(self)
def visit_ellipsis_type(self, typ: EllipsisType) -> None:
pass
def visit_uninhabited_type(self, typ: UninhabitedType) -> None:
pass
def visit_union_type(self, typ: UnionType) -> None:
for item in typ.items:
item.accept(self)
def visit_placeholder_type(self, t: PlaceholderType) -> None:
for item in t.args:
item.accept(self)
def fixup(self, node: SN) -> SN:
if (node in self.replacements):
new = self.replacements[node]
return cast(SN, new)
return node |
def test_SKCImputerABC__impute_not_implemented(decision_matrix):
class Foo(impute.SKCImputerABC):
_skcriteria_parameters = []
def _impute(self, **kwargs):
return super()._impute(**kwargs)
transformer = Foo()
dm = decision_matrix(seed=42)
with pytest.raises(NotImplementedError):
transformer.transform(dm) |
class File(FileSystemObject):
is_file = True
preview_data = None
preview_known = False
preview_loading = False
_firstbytes = None
def firstbytes(self):
if (self._firstbytes is not None):
return self._firstbytes
try:
with open(self.path, 'rb') as fobj:
self._firstbytes = set(fobj.read(N_FIRST_BYTES))
except (IOError, OSError):
return None
return self._firstbytes
def is_binary(self):
if (self.firstbytes and (CONTROL_CHARACTERS & self.firstbytes)):
return True
return False
def has_preview(self):
if (not self.fm.settings.preview_files):
return False
if (self.is_socket or self.is_fifo or self.is_device):
return False
if (not self.accessible):
return False
if (self.fm.settings.preview_max_size and (self.size > self.fm.settings.preview_max_size)):
return False
if (self.fm.settings.preview_script and self.fm.settings.use_preview_script):
return True
if self.container:
return False
if PREVIEW_WHITELIST.search(self.basename):
return True
if PREVIEW_BLACKLIST.search(self.basename):
return False
if (self.path in ('/dev/core', '/proc/kcore')):
return False
if self.is_binary():
return False
return True
def get_preview_source(self, width, height):
return self.fm.get_preview(self, width, height)
def is_image_preview(self):
try:
return self.fm.previews[self.realpath]['imagepreview']
except KeyError:
return False
def __eq__(self, other):
return (isinstance(other, File) and (self.path == other.path))
def __neq__(self, other):
return (not self.__eq__(other))
def __hash__(self):
return hash(self.path) |
def collate_fn(batch):
max_len = max([len(f['input_ids']) for f in batch])
input_ids = [(f['input_ids'] + ([0] * (max_len - len(f['input_ids'])))) for f in batch]
input_mask = [(([1.0] * len(f['input_ids'])) + ([0.0] * (max_len - len(f['input_ids'])))) for f in batch]
labels = [f['labels'] for f in batch]
ss = [f['ss'] for f in batch]
os = [f['os'] for f in batch]
input_ids = torch.tensor(input_ids, dtype=torch.long)
input_mask = torch.tensor(input_mask, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
ss = torch.tensor(ss, dtype=torch.long)
os = torch.tensor(os, dtype=torch.long)
output = (input_ids, input_mask, labels, ss, os)
return output |
class RandomHorizontalFlip(object):
def __call__(self, sample):
if (random.random() < 0.5):
sample['image'] = sample['image'].transpose(Image.FLIP_LEFT_RIGHT)
sample['sal'] = sample['sal'].transpose(Image.FLIP_LEFT_RIGHT)
return sample
def __str__(self):
return 'RandomHorizontalFlip' |
def test_asyncio_mark_respects_parametrized_loop_policies(pytester: Pytester):
pytester.makepyfile(__init__='', test_parametrization=dedent(' import asyncio\n\n import pytest\n\n pytestmark = pytest.mark.asyncio(scope="package")\n\n (\n scope="package",\n params=[\n asyncio.DefaultEventLoopPolicy(),\n asyncio.DefaultEventLoopPolicy(),\n ],\n )\n def event_loop_policy(request):\n return request.param\n\n async def test_parametrized_loop():\n pass\n '))
result = pytester.runpytest_subprocess('--asyncio-mode=strict')
result.assert_outcomes(passed=2) |
class SnapshotMetadata():
version: str
world_size: int
manifest: Manifest
def to_yaml(self) -> str:
return json.dumps(asdict(self), sort_keys=False, indent=2)
def from_yaml(cls, yaml_str: str) -> 'SnapshotMetadata':
d = yaml.load(yaml_str, Loader=Loader)
manifest: Manifest = {}
for (path, yaml_obj) in d['manifest'].items():
type_name = yaml_obj['type']
if (type_name == 'list'):
manifest[path] = ListEntry.from_yaml_obj(yaml_obj)
elif (type_name == 'dict'):
manifest[path] = DictEntry.from_yaml_obj(yaml_obj)
elif (type_name == 'OrderedDict'):
manifest[path] = OrderedDictEntry.from_yaml_obj(yaml_obj)
elif (type_name in PrimitiveEntry.supported_types):
manifest[path] = PrimitiveEntry.from_yaml_obj(yaml_obj)
elif (type_name == 'Tensor'):
manifest[path] = TensorEntry.from_yaml_obj(yaml_obj)
elif (type_name == 'ShardedTensor'):
manifest[path] = ShardedTensorEntry.from_yaml_obj(yaml_obj)
elif (type_name == 'ChunkedTensor'):
manifest[path] = ChunkedTensorEntry.from_yaml_obj(yaml_obj)
elif (type_name == 'DTensor'):
manifest[path] = DTensorEntry.from_yaml_obj(yaml_obj)
elif (type_name == 'object'):
manifest[path] = ObjectEntry.from_yaml_obj(yaml_obj)
d['manifest'] = manifest
return cls(**d) |
class Effect7061(BaseEffect):
runTime = 'early'
type = ('projected', 'passive', 'gang')
def handler(fit, beacon, context, projectionRange, **kwargs):
for x in range(1, 3):
if beacon.getModifiedItemAttr('warfareBuff{}ID'.format(x)):
value = beacon.getModifiedItemAttr('warfareBuff{}Value'.format(x))
id = beacon.getModifiedItemAttr('warfareBuff{}ID'.format(x))
if id:
fit.addCommandBonus(id, value, beacon, kwargs['effect'], 'early') |
def get_args_parser():
parser = argparse.ArgumentParser('Holistic edge attention transformer', add_help=False)
parser.add_argument('--data_path', default='', help='path to the data`')
parser.add_argument('--lr', default=0.0002, type=float)
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--weight_decay', default=1e-05, type=float)
parser.add_argument('--epochs', default=800, type=int)
parser.add_argument('--lr_drop', default=600, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float, help='gradient clipping max norm')
parser.add_argument('--print_freq', default=40, type=int)
parser.add_argument('--output_dir', default='./checkpoints/ckpts_s3d_256', help='path where to save, empty for no saving')
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--num_workers', default=4, type=int)
parser.add_argument('--image_size', default=256, type=int)
parser.add_argument('--run_validation', action='store_true', help='Whether run validation or not, default: False')
parser.add_argument('--distributed', action='store_true', help='Run distributed training')
parser.add_argument('--hidden_dim', default=256, type=int, help='hidden dim of the core transformer-based model')
parser.add_argument('--repeat_train', default=1, type=int, help='Repeat the training set for each epoch')
return parser |
def parse_gts(gts_list, num_classes):
logger.info('Start parsing gts list......')
index_info = [temp for temp in enumerate(gts_list) if temp[1].startswith('#')]
gts = defaultdict(list)
gts['num'] = np.zeros(num_classes)
for i in range(len(index_info)):
index = index_info[i][0]
img_name = gts_list[(index + 1)].strip()
pure_name = img_name.split('/')[(- 1)][0:(- 4)]
gts[pure_name] = defaultdict(list)
gts[pure_name]['height'] = gts_list[(index + 3)].strip()
gts[pure_name]['width'] = gts_list[(index + 4)].strip()
gts[pure_name]['bbox_num'] = int(gts_list[(index + 7)])
gts[pure_name]['bbox'] = defaultdict(list)
for b in gts_list[(index + 8):((index + 8) + int(gts_list[(index + 7)]))]:
b = b.split()
label = int(b[0])
x1 = int(b[1])
y1 = int(b[2])
x2 = int(b[3])
y2 = int(b[4])
gts[pure_name]['bbox'][label].append([x1, y1, x2, y2])
gts['num'][label] += 1
gts[pure_name]['is_det'] = defaultdict(list)
for l in range(1, num_classes):
gts[pure_name]['is_det'][l] = np.zeros(len(gts[pure_name]['bbox'][l]))
logger.info('Done!')
return gts |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.ca = ChannelAttention((planes * 4))
self.sa = SpatialAttention()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = (self.ca(out) * out)
out = (self.sa(out) * out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
class Log1mexp(UnaryScalarOp):
def static_impl(x):
if (x < np.log(0.5)):
return np.log1p((- np.exp(x)))
else:
return np.log((- np.expm1(x)))
def impl(self, x):
return Log1mexp.static_impl(x)
def grad(self, inp, grads):
(x,) = inp
(gz,) = grads
res = true_div((- 1.0), expm1((- x)))
res = switch(isinf(res), (- np.inf), res)
return [(gz * res)]
def c_code(self, node, name, inp, out, sub):
(x,) = inp
(z,) = out
if (node.inputs[0].type in float_types):
if (node.inputs[0].type == float64):
return f'{z} = {x} < -0. ? log1p(-exp({x})) : log(-expm1({x}));'
else:
return f'{z} = {x} < -0.f ? log1p(-exp({x})) : log(-expm1({x}));'
else:
raise NotImplementedError('only floating point is implemented') |
class MySortModel(QSortFilterProxyModel):
def __init__(self, parent, *, sort_role):
super().__init__(parent)
self._sort_role = sort_role
def lessThan(self, source_left: QModelIndex, source_right: QModelIndex):
item1 = self.sourceModel().itemFromIndex(source_left)
item2 = self.sourceModel().itemFromIndex(source_right)
data1 = item1.data(self._sort_role)
data2 = item2.data(self._sort_role)
if ((data1 is not None) and (data2 is not None)):
return (data1 < data2)
v1 = item1.text()
v2 = item2.text()
try:
return (Decimal(v1) < Decimal(v2))
except:
return (v1 < v2) |
.parametrize('value,order', [('bysource', ['Foo', 'decorator_okay', 'Bar']), ('alphabetical', ['Bar', 'Foo', 'decorator_okay']), ('groupwise', ['Bar', 'Foo', 'decorator_okay'])])
def test_order_members(builder, parse, value, order):
confoverrides = {'autoapi_member_order': value, 'exclude_patterns': ['manualapi.rst']}
builder('pyexample', warningiserror=True, confoverrides=confoverrides)
example_file = parse('_build/html/autoapi/example/index.html')
indexes = [example_file.find(id=f'example.{name}').sourceline for name in order]
assert (indexes == sorted(indexes)) |
class Match2Match(nn.Module):
def __init__(self, feat_dims, luse):
super(Match2Match, self).__init__()
input_dim = 16
layer_num = 6
expand_ratio = 4
bottlen = 26
self.to_embedding = nn.Sequential(Rearrange('b c h1 w1 h2 w2 -> b (h1 w1 h2 w2) c'), nn.Linear(bottlen, input_dim))
self.posenc = nn.Parameter(torch.randn(15, 15, 15, 15, input_dim), requires_grad=True)
layer_pos_emb = RotaryEmbedding(dim=4, freqs_for='pixel')
self.to_original = nn.Sequential(nn.Linear(input_dim, 1), Rearrange('b (h1 w1 h2 w2) c -> b c h1 w1 h2 w2', h1=15, w1=15, h2=15, w2=15))
self.trans_nc = nn.ModuleList([])
for _ in range(layer_num):
self.trans_nc.append(nn.ModuleList([PreNorm(input_dim, FastAttention(input_dim, heads=8, dim_head=4, pos_emb=layer_pos_emb)), PreNorm(input_dim, FeedForward(input_dim))]))
self.relu = nn.ReLU(inplace=True)
def forward(self, src_feats, trg_feats):
correlations = Geometry.cosine_similarity(src_feats, trg_feats)
correlations = torch.stack(correlations, dim=1)
correlations = correlations.squeeze(2)
correlations = self.relu(correlations)
(bsz, ch, side, _, _, _) = correlations.size()
embedded_features = self.to_embedding(correlations)
for (attn, ff) in self.trans_nc:
embedded_features = (attn(embedded_features) + embedded_features)
embedded_features = (ff(embedded_features) + embedded_features)
refined_corr = self.to_original(embedded_features)
correlations = Geometry.interpolate4d(refined_corr.squeeze(1), Geometry.upsample_size).unsqueeze(1)
side = (correlations.size((- 1)) ** 2)
correlations = correlations.view(bsz, side, side).contiguous()
return correlations |
class LineCounter():
__slots__ = ('char_pos', 'line', 'column', 'line_start_pos', 'newline_char')
def __init__(self, newline_char):
self.newline_char = newline_char
self.char_pos = 0
self.line = 1
self.column = 1
self.line_start_pos = 0
def __eq__(self, other):
if (not isinstance(other, LineCounter)):
return NotImplemented
return ((self.char_pos == other.char_pos) and (self.newline_char == other.newline_char))
def feed(self, token: Token, test_newline=True):
if test_newline:
newlines = token.count(self.newline_char)
if newlines:
self.line += newlines
self.line_start_pos = ((self.char_pos + token.rindex(self.newline_char)) + 1)
self.char_pos += len(token)
self.column = ((self.char_pos - self.line_start_pos) + 1) |
class Logger():
def __init__(self, stdout=sys.stdout, verbose=NOTE):
self.stdout = stdout
self.verbose = verbose
self._t0 = process_clock()
self._w0 = perf_counter()
log = log
error = error
warn = warn
note = note
info = info
debug = debug
debug1 = debug1
debug2 = debug2
debug3 = debug3
debug4 = debug4
timer = timer
timer_debug1 = timer_debug1 |
class _IHDRChunk(_Chunk):
def __init__(self, chunk_type, px_width, px_height):
super(_IHDRChunk, self).__init__(chunk_type)
self._px_width = px_width
self._px_height = px_height
def from_offset(cls, chunk_type, stream_rdr, offset):
px_width = stream_rdr.read_long(offset)
px_height = stream_rdr.read_long(offset, 4)
return cls(chunk_type, px_width, px_height)
def px_width(self):
return self._px_width
def px_height(self):
return self._px_height |
class TCPClient(RawTCPClient):
def __init__(self, host, prog, vers, open_timeout=5000):
pmap = TCPPortMapperClient(host, open_timeout)
port = pmap.get_port((prog, vers, IPPROTO_TCP, 0))
pmap.close()
if (port == 0):
raise RPCError('program not registered')
RawTCPClient.__init__(self, host, prog, vers, port, open_timeout) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.