code stringlengths 281 23.7M |
|---|
def cal_train_time(log_dicts, args):
for (i, log_dict) in enumerate(log_dicts):
print('{}Analyze train time of {}{}'.format(('-' * 5), args.json_logs[i], ('-' * 5)))
all_times = []
for epoch in log_dict.keys():
if args.include_outliers:
all_times.append(log_dict[epoch]['time'])
else:
all_times.append(log_dict[epoch]['time'][1:])
all_times = np.array(all_times)
epoch_ave_time = all_times.mean((- 1))
slowest_epoch = epoch_ave_time.argmax()
fastest_epoch = epoch_ave_time.argmin()
std_over_epoch = epoch_ave_time.std()
print('slowest epoch {}, average time is {:.4f}'.format((slowest_epoch + 1), epoch_ave_time[slowest_epoch]))
print('fastest epoch {}, average time is {:.4f}'.format((fastest_epoch + 1), epoch_ave_time[fastest_epoch]))
print('time std over epochs is {:.4f}'.format(std_over_epoch))
print('average iter time: {:.4f} s/iter'.format(np.mean(all_times)))
print() |
def check_any_dt(loader):
raises_exc(TypeLoadError(str, None), (lambda : loader(None)))
raises_exc(TypeLoadError(str, 10), (lambda : loader(10)))
raises_exc(TypeLoadError(str, datetime(2011, 11, 4, 0, 0)), (lambda : loader(datetime(2011, 11, 4, 0, 0))))
raises_exc(TypeLoadError(str, date(2019, 12, 4)), (lambda : loader(date(2019, 12, 4))))
raises_exc(TypeLoadError(str, time(4, 23, 1)), (lambda : loader(time(4, 23, 1)))) |
def test(arg=None):
if (arg == '-v'):
def say(*x):
print(*x)
else:
def say(*x):
pass
say('Start Pool testing')
get_tid = (lambda : threading.current_thread().ident)
def return42():
return 42
def f(x):
return (x * x)
def work(mseconds):
res = str(mseconds)
if (mseconds < 0):
mseconds = (- mseconds)
say(('[%d] Start to work for %fms...' % (get_tid(), (mseconds * 10))))
time.sleep((mseconds / 100.0))
say(('[%d] Work done (%fms).' % (get_tid(), (mseconds * 10))))
return res
pool = Pool(4)
assert (pool.map(return42, []) == [])
assert (pool.apply_async(return42, []).get() == 42)
assert (pool.apply(return42, []) == 42)
assert (list(pool.imap(return42, iter([]))) == [])
assert (list(pool.imap_unordered(return42, iter([]))) == [])
assert (pool.map_async(return42, []).get() == [])
assert (list(pool.imap_async(return42, iter([])).get()) == [])
assert (list(pool.imap_unordered_async(return42, iter([])).get()) == [])
result = pool.apply_async(f, (10,))
assert (result.get(timeout=1) == 100)
assert (list(pool.map(f, range(10))) == list(map(f, range(10))))
it = pool.imap(f, range(10))
assert (next(it) == 0)
assert (next(it) == 1)
assert (next(it) == 4)
result = pool.apply_async(time.sleep, (3,))
try:
say(result.get(timeout=1))
except TimeoutError:
say('Good. Got expected timeout exception.')
else:
assert False, 'Expected exception !'
assert (result.get() is None)
def cb(s):
say(('Result ready: %s' % s))
assert (list(pool.imap(work, range(10, 3, (- 1)), chunksize=4)) == list(map(str, range(10, 3, (- 1)))))
assert (sorted(pool.imap_unordered(work, range(10, 3, (- 1)))) == sorted(map(str, range(10, 3, (- 1)))))
result = pool.map_async(work, range(10), callback=cb)
try:
result.get(timeout=0.01)
except TimeoutError:
say('Good. Got expected timeout exception.')
else:
assert False, 'Expected exception !'
say(result.get())
result = pool.imap_async(work, range(3, 10), callback=cb)
try:
result.get(timeout=0.01)
except TimeoutError:
say('Good. Got expected timeout exception.')
else:
assert False, 'Expected exception !'
for i in result.get():
say('Item:', i)
say('### Loop again:')
for i in result.get():
say('Item2:', i)
result = pool.imap_unordered_async(work, range(10, 3, (- 1)), callback=cb)
try:
say(result.get(timeout=0.01))
except TimeoutError:
say('Good. Got expected timeout exception.')
else:
assert False, 'Expected exception !'
for i in result.get():
say('Item1:', i)
for i in result.get():
say('Item2:', i)
r = result.get()
for i in r:
say('Item3:', i)
for i in r:
say('Item4:', i)
for i in r:
say('Item5:', i)
result = pool.imap_unordered_async(work, range(2, (- 10), (- 1)), callback=cb)
time.sleep(3)
try:
for i in result.get():
say('Got item:', i)
except (IOError, ValueError):
say('Good. Got expected exception')
result = pool.imap_async(work, range(2, (- 10), (- 1)), callback=cb)
time.sleep(3)
try:
for i in result.get():
say('Got item:', i)
except (IOError, ValueError):
say('Good. Got expected exception')
pool.terminate()
pool.join() |
class NordStyle(Style):
name = 'nord'
line_number_color = '#D8DEE9'
line_number_background_color = '#242933'
line_number_special_color = '#242933'
line_number_special_background_color = '#D8DEE9'
background_color = '#2E3440'
highlight_color = '#3B4252'
styles = {Token: '#d8dee9', Whitespace: '#d8dee9', Punctuation: '#eceff4', Comment: 'italic #616e87', Comment.Preproc: '#5e81ac', Keyword: 'bold #81a1c1', Keyword.Pseudo: 'nobold #81a1c1', Keyword.Type: 'nobold #81a1c1', Operator: 'bold #81a1c1', Operator.Word: 'bold #81a1c1', Name: '#d8dee9', Name.Builtin: '#81a1c1', Name.Function: '#88c0d0', Name.Class: '#8fbcbb', Name.Namespace: '#8fbcbb', Name.Exception: '#bf616a', Name.Variable: '#d8dee9', Name.Constant: '#8fbcbb', Name.Entity: '#d08770', Name.Attribute: '#8fbcbb', Name.Tag: '#81a1c1', Name.Decorator: '#d08770', String: '#a3be8c', String.Doc: '#616e87', String.Interpol: '#a3be8c', String.Escape: '#ebcb8b', String.Regex: '#ebcb8b', String.Symbol: '#a3be8c', String.Other: '#a3be8c', Number: '#b48ead', Generic.Heading: 'bold #88c0d0', Generic.Subheading: 'bold #88c0d0', Generic.Deleted: '#bf616a', Generic.Inserted: '#a3be8c', Generic.Error: '#bf616a', Generic.Emph: 'italic', Generic.Strong: 'bold', Generic.EmphStrong: 'bold italic', Generic.Prompt: 'bold #616e88', Generic.Output: '#d8dee9', Generic.Traceback: '#bf616a', Error: '#bf616a', Text: '#d8dee9'} |
.parametrize('type', ['Error', 'Failure'])
def test_testcase_custom_exception_info(pytester: Pytester, type: str) -> None:
pytester.makepyfile(('\n from typing import Generic, TypeVar\n from unittest import TestCase\n import pytest, _pytest._code\n\n class MyTestCase(TestCase):\n def run(self, result):\n excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)\n # We fake an incompatible exception info.\n class FakeExceptionInfo(Generic[TypeVar("E")]):\n def __init__(self, *args, **kwargs):\n mp.undo()\n raise TypeError()\n \n def from_current(cls):\n return cls()\n \n def from_exc_info(cls, *args, **kwargs):\n return cls()\n mp = pytest.MonkeyPatch()\n mp.setattr(_pytest._code, \'ExceptionInfo\', FakeExceptionInfo)\n try:\n excinfo = excinfo._excinfo\n result.add%(type)s(self, excinfo)\n finally:\n mp.undo()\n\n def test_hello(self):\n pass\n ' % locals()))
result = pytester.runpytest()
result.stdout.fnmatch_lines(['NOTE: Incompatible Exception Representation*', '*ZeroDivisionError*', '*1 failed*']) |
class DeterministicMLPRegressor(LayersPowered, Serializable):
def __init__(self, name, input_shape, output_dim, network=None, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, output_nonlinearity=None, optimizer=None, normalize_inputs=True):
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if (optimizer is None):
optimizer = LbfgsOptimizer(name='optimizer')
self.output_dim = output_dim
self.optimizer = optimizer
if (network is None):
network = MLP(input_shape=input_shape, output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, name='network')
l_out = network.output_layer
LayersPowered.__init__(self, [l_out])
xs_var = network.input_layer.input_var
ys_var = tf.placeholder(dtype=tf.float32, shape=[None, output_dim], name='ys')
x_mean_var = tf.get_variable(name='x_mean', shape=((1,) + input_shape), initializer=tf.constant_initializer(0.0, dtype=tf.float32))
x_std_var = tf.get_variable(name='x_std', shape=((1,) + input_shape), initializer=tf.constant_initializer(1.0, dtype=tf.float32))
normalized_xs_var = ((xs_var - x_mean_var) / x_std_var)
fit_ys_var = L.get_output(l_out, {network.input_layer: normalized_xs_var})
loss = (- tf.reduce_mean(tf.square((fit_ys_var - ys_var))))
self.f_predict = tensor_utils.compile_function([xs_var], fit_ys_var)
optimizer_args = dict(loss=loss, target=self, network_outputs=[fit_ys_var])
optimizer_args['inputs'] = [xs_var, ys_var]
self.optimizer.update_opt(**optimizer_args)
self.name = name
self.l_out = l_out
self.normalize_inputs = normalize_inputs
self.x_mean_var = x_mean_var
self.x_std_var = x_std_var
def predict_sym(self, xs):
return L.get_output(self.l_out, xs)
def predict(self, xs):
return self.f_predict(np.asarray(xs))
def get_param_values(self, **tags):
return LayersPowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LayersPowered.set_param_values(self, flattened_params, **tags) |
_module()
class TridentResNet(ResNet):
def __init__(self, depth, num_branch, test_branch_idx, trident_dilations, **kwargs):
assert (num_branch == len(trident_dilations))
assert (depth in (50, 101, 152))
super(TridentResNet, self).__init__(depth, **kwargs)
assert (self.num_stages == 3)
self.test_branch_idx = test_branch_idx
self.num_branch = num_branch
last_stage_idx = (self.num_stages - 1)
stride = self.strides[last_stage_idx]
dilation = trident_dilations
dcn = (self.dcn if self.stage_with_dcn[last_stage_idx] else None)
if (self.plugins is not None):
stage_plugins = self.make_stage_plugins(self.plugins, last_stage_idx)
else:
stage_plugins = None
planes = (self.base_channels * (2 ** last_stage_idx))
res_layer = make_trident_res_layer(TridentBottleneck, inplanes=((self.block.expansion * self.base_channels) * (2 ** (last_stage_idx - 1))), planes=planes, num_blocks=self.stage_blocks[last_stage_idx], stride=stride, trident_dilations=dilation, style=self.style, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, plugins=stage_plugins, test_branch_idx=self.test_branch_idx)
layer_name = f'layer{(last_stage_idx + 1)}'
self.__setattr__(layer_name, res_layer)
self.res_layers.pop(last_stage_idx)
self.res_layers.insert(last_stage_idx, layer_name)
self._freeze_stages() |
class TestSetContent(BaseTestCase):
expectedOutput = '<html><head></head><body><div>hello</div></body></html>'
async def test_set_content(self):
(await self.page.setContent('<div>hello</div>'))
result = (await self.page.content())
self.assertEqual(result, self.expectedOutput)
async def test_with_doctype(self):
doctype = '<!DOCTYPE html>'
(await self.page.setContent((doctype + '<div>hello</div>')))
result = (await self.page.content())
self.assertEqual(result, (doctype + self.expectedOutput))
async def test_with_html4_doctype(self):
doctype = '<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "
(await self.page.setContent((doctype + '<div>hello</div>')))
result = (await self.page.content())
self.assertEqual(result, (doctype + self.expectedOutput)) |
class IncSubtensor(COp):
check_input = False
__props__ = ('idx_list', 'inplace', 'set_instead_of_inc')
def __init__(self, idx_list, inplace=False, set_instead_of_inc=False, destroyhandler_tolerate_aliased=None):
if (destroyhandler_tolerate_aliased is None):
destroyhandler_tolerate_aliased = []
self.idx_list = list(map(index_vars_to_types, idx_list))
self.inplace = inplace
if inplace:
self.destroy_map = {0: [0]}
self.destroyhandler_tolerate_aliased = list(destroyhandler_tolerate_aliased)
self.set_instead_of_inc = set_instead_of_inc
def __hash__(self):
idx_list = tuple((((entry.start, entry.stop, entry.step) if isinstance(entry, slice) else entry) for entry in self.idx_list))
return hash((type(self), idx_list, self.inplace, self.set_instead_of_inc))
def __str__(self):
name = ('SetSubtensor' if self.set_instead_of_inc else 'IncSubtensor')
return f'{name}{{{Subtensor.str_from_indices(self.idx_list)}}}'
def make_node(self, x, y, *inputs):
(x, y) = map(as_tensor_variable, [x, y])
if (y.ndim > x.ndim):
raise ValueError(f'Trying to increment a {int(x.ndim)}-dimensional subtensor with a {int(y.ndim)}-dimensional value.')
inputs = tuple(map(as_nontensor_scalar, inputs))
idx_list = list(self.idx_list)
if (len(idx_list) > x.type.ndim):
raise IndexError('too many indices for array')
input_types = get_slice_elements(idx_list, (lambda entry: isinstance(entry, Type)))
if (len(inputs) != len(input_types)):
raise IndexError('Not enough inputs to fill in the Subtensor template.', inputs, idx_list)
for (input, expected_type) in zip(inputs, input_types):
if (not expected_type.is_super(input.type)):
raise TypeError(f'Wrong type for Subtensor template. Expected {input.type}, got {expected_type}.')
return Apply(self, ((x, y) + inputs), [x.type()])
def decl_view(self):
return 'PyArrayObject * zview = NULL;'
def perform(self, node, inputs, out_):
(out,) = out_
(x, y) = inputs[:2]
indices = list(reversed(inputs[2:]))
def _convert(entry):
if isinstance(entry, Type):
return indices.pop()
elif isinstance(entry, slice):
return slice(_convert(entry.start), _convert(entry.stop), _convert(entry.step))
else:
return entry
cdata = tuple(map(_convert, self.idx_list))
if (len(cdata) == 1):
cdata = cdata[0]
if (not self.inplace):
x = x.copy()
sub_x = x.__getitem__(cdata)
if sub_x.shape:
if (not self.set_instead_of_inc):
sub_x += y
else:
x.__setitem__(cdata, y)
elif (not self.set_instead_of_inc):
x.__setitem__(cdata, (sub_x + y))
else:
x.__setitem__(cdata, y)
out[0] = x
def c_code(self, node, name, inputs, outputs, sub):
self.do_type_checking(node)
if self.inplace:
inplace = 1
else:
inplace = 0
x = inputs[0]
y = inputs[1]
(z,) = outputs
if self.set_instead_of_inc:
op_is_set = 1
else:
op_is_set = 0
fail = sub['fail']
view_ndim = (node.inputs[0].ndim - sum(((not isinstance(idx, slice)) for idx in self.idx_list)))
copy_of_x = self.copy_of_x(x)
copy_input_if_necessary = ('\n if (%(inplace)s)\n {\n if (%(x)s != %(z)s)\n {\n Py_XDECREF(%(z)s);\n Py_INCREF(%(x)s);\n %(z)s = %(x)s;\n }\n }\n else\n {\n Py_XDECREF(%(z)s);\n %(z)s = %(copy_of_x)s;\n if (!%(z)s) {\n // Exception already set\n %(fail)s\n }\n }\n ' % locals())
helper_args = self.get_helper_c_code_args()
get_zview = Subtensor.helper_c_code(node=node, name=name, inputs=(outputs[:1] + inputs[2:]), outputs=outputs, sub=sub, idx_list=self.idx_list, view_ndim=view_ndim, **helper_args)
alloc_zview = self.make_view_array(z, view_ndim)
build_view = ('\n //TODO: give this Op a second output so that this view can be cached\n //TODO: alternatively, fix the memory leak on failure\n %(alloc_zview)s;\n if (!zview)\n {\n %(fail)s;\n }\n ' % locals())
copy_into = self.copy_into('zview', y)
add_to_zview = self.add_to_zview(name, y, fail)
make_modification = ('\n if (%(op_is_set)s)\n {\n if (%(copy_into)s) // does broadcasting\n {\n Py_DECREF(zview);\n %(fail)s;\n }\n }\n else\n {\n %(add_to_zview)s\n }\n ' % locals())
return (((((((self.decl_view() + copy_input_if_necessary) + '{') + get_zview) + build_view) + make_modification) + 'Py_DECREF(zview);') + '}')
def do_type_checking(self, node):
if (not isinstance(node.inputs[0].type, TensorType)):
raise NotImplementedError()
def c_code_cache_version(self):
hv = Subtensor.helper_c_code_cache_version()
if hv:
return (3, hv)
else:
return ()
def copy_of_x(self, x):
return f'''(PyArrayObject*)PyArray_FromAny(py_{x}, NULL, 0, 0,
NPY_ARRAY_ENSURECOPY, NULL)'''
def make_view_array(self, x, view_ndim):
return ('Py_INCREF(PyArray_DESCR(%(x)s));\n zview = (PyArrayObject*)PyArray_NewFromDescr(\n &PyArray_Type,\n PyArray_DESCR(%(x)s),\n %(view_ndim)s,\n xview_dims, //PyArray_DIMS(%(x)s),\n xview_strides, //PyArray_STRIDES(%(x)s),\n PyArray_BYTES(%(x)s) + xview_offset, //PyArray_DATA(%(x)s),\n PyArray_FLAGS(%(x)s),\n NULL);\n ' % locals())
def get_helper_c_code_args(self):
return Subtensor.default_helper_c_code_args()
def copy_into(self, view, source):
return f'PyArray_CopyInto({view}, {source})'
def add_to_zview(self, name, x, fail):
return ('\n PyArrayObject * add_rval = (PyArrayObject*)PyNumber_InPlaceAdd(\n (PyObject*)zview, py_%(x)s);\n if (add_rval)\n {\n assert (PyArray_Check((PyObject*)add_rval));\n assert (PyArray_DATA(add_rval) == PyArray_DATA(zview));\n Py_DECREF(add_rval);\n }\n else\n {\n Py_DECREF(zview);\n %(fail)s;\n }' % locals())
def infer_shape(self, fgraph, node, shapes):
return [shapes[0]]
def R_op(self, inputs, eval_points):
if ((eval_points[0] is None) or (eval_points[1] is None)):
return [None]
return self(eval_points[0], eval_points[1], *inputs[2:], return_list=True)
def connection_pattern(self, node):
rval = [[True], [True]]
for ipt in node.inputs[2:]:
rval.append([False])
return rval
def grad(self, inputs, grads):
(g_output,) = grads
(x, y) = inputs[:2]
idx_list = inputs[2:]
if (x.dtype in discrete_dtypes):
gx = x.zeros_like(dtype=config.floatX)
if (y.dtype in discrete_dtypes):
gy = y.zeros_like(dtype=config.floatX)
else:
gy = y.zeros_like()
elif (x.dtype in complex_dtypes):
raise NotImplementedError('No support for complex grad yet')
else:
if self.set_instead_of_inc:
gx = set_subtensor(Subtensor(idx_list=self.idx_list)(g_output, *idx_list), pytensor.tensor.zeros_like(y))
else:
gx = g_output
gy = Subtensor(idx_list=self.idx_list)(g_output, *idx_list)
gy = _sum_grad_over_bcasted_dims(y, gy)
return ([gx, gy] + ([DisconnectedType()()] * len(idx_list))) |
class UsersViewsTestCase(TestCase):
def setUp(self):
self.user = UserFactory(username='username', password='password', email='', search_visibility=User.SEARCH_PUBLIC, membership=None)
self.user2 = UserFactory(username='spameggs', password='password', search_visibility=User.SEARCH_PRIVATE, email_privacy=User.EMAIL_PRIVATE, public_profile=False)
def assertUserCreated(self, data=None, template_name='account/verification_sent.html'):
post_data = {'username': 'guido', 'email': '', 'password1': 'password', 'password2': 'password', settings.HONEYPOT_FIELD_NAME: settings.HONEYPOT_VALUE}
post_data.update((data or {}))
url = reverse('account_signup')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, template_name)
user = User.objects.get(username=post_data['username'])
self.assertEqual(user.username, post_data['username'])
self.assertEqual(user.email, post_data['email'])
return response
def test_membership_create(self):
url = reverse('users:user_membership_create')
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.client.login(username='username', password='password')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
post_data = {'legal_name': 'Some Name', 'preferred_name': 'Sommy', 'email_address': '', 'city': 'Lawrence', 'region': 'Kansas', 'country': 'USA', 'postal_code': '66044', 'psf_code_of_conduct': True, 'psf_announcements': True, settings.HONEYPOT_FIELD_NAME: settings.HONEYPOT_VALUE}
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, reverse('users:user_membership_thanks'))
def test_membership_update(self):
url = reverse('users:user_membership_edit')
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.assertTrue(self.user2.has_membership)
self.client.login(username=self.user2.username, password='password')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
post_data = {'legal_name': 'Some Name', 'preferred_name': 'Sommy', 'email_address': '', 'city': 'Lawrence', 'region': 'Kansas', 'country': 'USA', 'postal_code': '66044', 'psf_announcements': True, settings.HONEYPOT_FIELD_NAME: settings.HONEYPOT_VALUE}
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302)
def test_membership_update_404(self):
url = reverse('users:user_membership_edit')
self.assertFalse(self.user.has_membership)
self.client.login(username=self.user.username, password='password')
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_user_has_already_have_membership(self):
url = reverse('users:user_membership_create')
self.assertTrue(self.user2.has_membership)
self.client.login(username=self.user2.username, password='password')
response = self.client.get(url)
self.assertRedirects(response, reverse('users:user_membership_edit'))
def test_user_update(self):
self.client.login(username='username', password='password')
url = reverse('users:user_profile_edit')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
post_data = {settings.HONEYPOT_FIELD_NAME: settings.HONEYPOT_VALUE}
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 200)
def test_user_update_redirect(self):
self.client.login(username='username', password='password')
url = reverse('users:user_profile_edit')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
post_data = {'username': 'username', 'search_visibility': 0, 'email_privacy': 1, 'public_profile': False, 'email': '', settings.HONEYPOT_FIELD_NAME: settings.HONEYPOT_VALUE}
response = self.client.post(url, post_data)
profile_url = reverse('users:user_detail', kwargs={'slug': 'username'})
self.assertRedirects(response, profile_url)
another_user_url = reverse('users:user_detail', kwargs={'slug': 'spameggs'})
response = self.client.get(another_user_url)
self.assertEqual(response.status_code, 404)
self.client.logout()
response = self.client.get(profile_url)
self.assertEqual(response.status_code, 404)
def test_user_detail(self):
detail_url = reverse('users:user_detail', kwargs={'slug': self.user.username})
edit_url = reverse('users:user_profile_edit')
response = self.client.get(detail_url)
self.assertTrue(self.user.is_active)
self.assertNotContains(response, edit_url)
self.client.login(username='username', password='password')
response = self.client.get(detail_url)
self.assertContains(response, edit_url)
user = User.objects.create_user(username='foobar', password='baz', email='')
user.is_active = False
user.save()
self.assertFalse(user.is_active)
detail_url = reverse('users:user_detail', kwargs={'slug': user.username})
response = self.client.get(detail_url)
self.assertEqual(response.status_code, 404)
def test_special_usernames(self):
u1 = User.objects.create_user(username='user.name', password='password')
detail_url = reverse('users:user_detail', kwargs={'slug': u1.username})
edit_url = reverse('users:user_profile_edit')
self.client.login(username=u1.username, password='password')
response = self.client.get(detail_url)
self.assertEqual(response.status_code, 200)
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
u2 = User.objects.create_user(username='', password='password')
detail_url = reverse('users:user_detail', kwargs={'slug': u2.username})
edit_url = reverse('users:user_profile_edit')
self.client.login(username=u2.username, password='password')
response = self.client.get(detail_url)
self.assertEqual(response.status_code, 200)
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
def test_user_new_account(self):
self.assertUserCreated(data={'username': 'thisusernamedoesntexist', 'email': '', 'password1': 'password', 'password2': 'password'})
def test_user_duplicate_username_email(self):
post_data = {'username': 'thisusernamedoesntexist', 'email': '', 'password1': 'password', 'password2': 'password'}
self.assertUserCreated(data=post_data)
response = self.assertUserCreated(data=post_data, template_name='account/signup.html')
self.assertContains(response, 'A user with that username already exists.')
self.assertContains(response, 'A user is already registered with this e-mail address.')
def test_usernames(self):
url = reverse('account_signup')
usernames = ['foaso+bar', 'foo.barahgs', '', 'foo.baarBAZ']
post_data = {'username': 'thisusernamedoesntexist', 'email': '', 'password1': 'password', 'password2': 'password', settings.HONEYPOT_FIELD_NAME: settings.HONEYPOT_VALUE}
for (i, username) in enumerate(usernames):
with self.subTest(i=i, username=username):
post_data.update({'username': username, 'email': f'foo{i}'})
response = self.client.post(url, post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'account/verification_sent.html')
def test_is_active_login(self):
url = reverse('account_login')
user = UserFactory(is_active=False)
data = {'login': user.username, 'password': 'password'}
response = self.client.post(url, data)
self.assertRedirects(response, reverse('account_inactive'))
url = reverse('users:user_membership_create')
response = self.client.get(url)
self.assertRedirects(response, '{}?next={}'.format(reverse('account_login'), url))
def test_user_delete_needs_to_be_logged_in(self):
url = reverse('users:user_delete', kwargs={'slug': self.user.username})
response = self.client.delete(url)
self.assertRedirects(response, '{}?next={}'.format(reverse('account_login'), url))
def test_user_delete_invalid_request_method(self):
url = reverse('users:user_delete', kwargs={'slug': self.user.username})
self.client.login(username=self.user.username, password='password')
response = self.client.get(url)
self.assertEqual(response.status_code, 405)
def test_user_delete_different_user(self):
url = reverse('users:user_delete', kwargs={'slug': self.user.username})
self.client.login(username=self.user2.username, password='password')
response = self.client.delete(url)
self.assertEqual(response.status_code, 403)
def test_user_delete(self):
url = reverse('users:user_delete', kwargs={'slug': self.user.username})
self.client.login(username=self.user.username, password='password')
response = self.client.delete(url)
self.assertRedirects(response, reverse('home'))
self.assertRaises(User.DoesNotExist, User.objects.get, username=self.user.username)
self.assertRaises(Membership.DoesNotExist, Membership.objects.get, creator=self.user)
def test_membership_delete_needs_to_be_logged_in(self):
url = reverse('users:user_membership_delete', kwargs={'slug': self.user2.username})
response = self.client.delete(url)
self.assertRedirects(response, '{}?next={}'.format(reverse('account_login'), url))
def test_membership_delete_invalid_request_method(self):
url = reverse('users:user_membership_delete', kwargs={'slug': self.user2.username})
self.client.login(username=self.user2.username, password='password')
response = self.client.get(url)
self.assertEqual(response.status_code, 405)
def test_membership_delete_different_user_membership(self):
user = UserFactory()
self.assertTrue(user.has_membership)
url = reverse('users:user_membership_delete', kwargs={'slug': user.username})
self.client.login(username=self.user2.username, password='password')
response = self.client.delete(url)
self.assertEqual(response.status_code, 403)
def test_membership_does_not_exist(self):
self.assertFalse(self.user.has_membership)
url = reverse('users:user_membership_delete', kwargs={'slug': self.user.username})
self.client.login(username=self.user.username, password='password')
response = self.client.delete(url)
self.assertEqual(response.status_code, 404)
def test_membership_delete(self):
self.assertTrue(self.user2.has_membership)
url = reverse('users:user_membership_delete', kwargs={'slug': self.user2.username})
self.client.login(username=self.user2.username, password='password')
response = self.client.delete(url)
self.assertRedirects(response, reverse('users:user_detail', kwargs={'slug': self.user2.username}))
with self.assertRaises(Membership.DoesNotExist):
Membership.objects.get(pk=self.user2.membership.pk)
def test_password_change_honeypot(self):
url = reverse('account_change_password')
data = {'oldpassword': 'password', 'password1': 'newpassword', 'password2': 'newpassword'}
self.client.login(username=self.user.username, password='password')
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 400)
data[settings.HONEYPOT_FIELD_NAME] = settings.HONEYPOT_VALUE
response = self.client.post(url, data, follow=True)
self.assertRedirects(response, reverse('users:user_profile_edit'))
self.client.logout()
logged_in = self.client.login(username=self.user.username, password='newpassword')
self.assertTrue(logged_in) |
def filter_by_aspect(dataset, aspect_filter, use_attribute=False):
for example in dataset:
example = copy(example)
aspect_sentiment = defaultdict(list)
for (a, b) in example['aspect_sentiment']:
if ((aspect_filter is not None) and (a not in aspect_filter)):
continue
if (not use_attribute):
new_a = (a[:a.find('#')] if ('#' in a) else a)
else:
new_a = a.replace('#', ' ').replace('_', ' ')
aspect_sentiment[new_a].append(b)
correct_aspect_sentiment = dict()
for a in aspect_sentiment:
c = Counter(aspect_sentiment[a])
for s in ['positive', 'negative', 'neutral']:
if (s not in c):
c[s] = 0
if (c['positive'] == c['negative']):
correct_aspect_sentiment[a] = 'neutral'
elif (c['positive'] > c['negative']):
correct_aspect_sentiment[a] = 'positive'
else:
correct_aspect_sentiment[a] = 'negative'
example['aspect_sentiment'] = list(correct_aspect_sentiment.items())
if (len(example['aspect_sentiment']) > 0):
(yield example) |
class Dilation2d(nn.Layer):
def __init__(self, m=1):
super(Dilation2d, self).__init__()
self.m = m
self.pad = [m, m, m, m]
def forward(self, x):
(batch_size, c, h, w) = x.shape
x_pad = F.pad(x, pad=self.pad, mode='constant', value=(- .0))
channel = nn.functional.unfold(x_pad, ((2 * self.m) + 1), strides=1, paddings=0).reshape([batch_size, c, (- 1), h, w])
result = paddle.max(channel, axis=2)
return result |
class LNChannelVerifier(NetworkJobOnDefaultServer):
def __init__(self, network: 'Network', channel_db: 'ChannelDB'):
self.channel_db = channel_db
self.lock = threading.Lock()
self.unverified_channel_info = {}
self.blacklist = set()
NetworkJobOnDefaultServer.__init__(self, network)
def _reset(self):
super()._reset()
self.started_verifying_channel = set()
def add_new_channel_info(self, short_channel_id: ShortChannelID, msg: dict) -> bool:
if (short_channel_id in self.unverified_channel_info):
return False
if (short_channel_id in self.blacklist):
return False
with self.lock:
self.unverified_channel_info[short_channel_id] = msg
return True
async def _start_tasks(self):
async with self.taskgroup as group:
(await group.spawn(self.main))
async def main(self):
while True:
(await self._verify_some_channels())
(await asyncio.sleep(0.1))
async def _verify_some_channels(self):
blockchain = self.network.blockchain()
local_height = blockchain.height()
with self.lock:
unverified_channel_info = list(self.unverified_channel_info)
for short_channel_id in unverified_channel_info:
if (short_channel_id in self.started_verifying_channel):
continue
block_height = short_channel_id.block_height
if ((block_height <= 0) or (block_height > local_height)):
continue
header = blockchain.read_header(block_height)
if (header is None):
if (block_height < constants.net.max_checkpoint()):
(await self.taskgroup.spawn(self.network.request_chunk(block_height, None, can_return_early=True)))
continue
self.started_verifying_channel.add(short_channel_id)
(await self.taskgroup.spawn(self.verify_channel(block_height, short_channel_id)))
async def verify_channel(self, block_height: int, short_channel_id: ShortChannelID):
try:
result = (await self.network.get_txid_from_txpos(block_height, short_channel_id.txpos, True))
except aiorpcx.jsonrpc.RPCError:
self._blacklist_short_channel_id(short_channel_id)
return
tx_hash = result['tx_hash']
merkle_branch = result['merkle']
async with self.network.bhi_lock:
header = self.network.blockchain().read_header(block_height)
try:
verify_tx_is_in_block(tx_hash, merkle_branch, short_channel_id.txpos, header, block_height)
except MerkleVerificationFailure as e:
raise GracefulDisconnect(e) from e
try:
raw_tx = (await self.network.get_transaction(tx_hash))
except aiorpcx.jsonrpc.RPCError as e:
raise GracefulDisconnect(e) from e
tx = Transaction(raw_tx)
try:
tx.deserialize()
except Exception:
self.logger.warning(f'cannot deserialize transaction, skipping {tx_hash}')
return
if (tx_hash != tx.txid()):
self.logger.info(f'received tx does not match expected txid ({tx_hash} != {tx.txid()})')
return
chan_ann_msg = self.unverified_channel_info[short_channel_id]
redeem_script = funding_output_script_from_keys(chan_ann_msg['bitcoin_key_1'], chan_ann_msg['bitcoin_key_2'])
expected_address = bitcoin.redeem_script_to_address('p2wsh', redeem_script)
try:
actual_output = tx.outputs()[short_channel_id.output_index]
except IndexError:
self._blacklist_short_channel_id(short_channel_id)
return
if (expected_address != actual_output.address):
self.logger.info(f'funding output script mismatch for {short_channel_id}')
self._remove_channel_from_unverified_db(short_channel_id)
return
self.channel_db.add_verified_channel_info(chan_ann_msg, capacity_sat=actual_output.value)
self._remove_channel_from_unverified_db(short_channel_id)
def _remove_channel_from_unverified_db(self, short_channel_id: ShortChannelID):
with self.lock:
self.unverified_channel_info.pop(short_channel_id, None)
self.started_verifying_channel.discard(short_channel_id)
def _blacklist_short_channel_id(self, short_channel_id: ShortChannelID) -> None:
self.blacklist.add(short_channel_id)
with self.lock:
self.unverified_channel_info.pop(short_channel_id, None) |
class HerbertTokenizer(XLMTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, tokenizer_file=None, cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', sep_token='</s>', do_lowercase_and_remove_accent=False, **kwargs):
super().__init__(vocab_file, merges_file, tokenizer_file=None, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, sep_token=sep_token, do_lowercase_and_remove_accent=do_lowercase_and_remove_accent, **kwargs)
self.bert_pre_tokenizer = BasicTokenizer(do_lower_case=False, never_split=self.all_special_tokens, tokenize_chinese_chars=False, strip_accents=False)
def _tokenize(self, text):
pre_tokens = self.bert_pre_tokenizer.tokenize(text)
split_tokens = []
for token in pre_tokens:
if token:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
return split_tokens |
class Effect7076(BaseEffect):
type = 'passive'
def handler(fit, container, context, projectionRange, **kwargs):
level = (container.level if ('skill' in context) else 1)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Large Disintegrator Specialization')), 'damageMultiplier', (container.getModifiedItemAttr('damageMultiplierBonus') * level), **kwargs) |
class RankSubmission(models.Model):
rank_request = models.ForeignKey(RankRequest, on_delete=models.CASCADE, verbose_name=_('rank request'), related_name='rank_submissions')
tag = models.ForeignKey('submissions.SubmissionTag', on_delete=models.CASCADE, verbose_name=_('tag'), null=True)
total_submissions_per_tag = models.PositiveSmallIntegerField(_('Total Submissions Per Tag'))
submission = models.ForeignKey('submissions.Submission', on_delete=models.CASCADE, verbose_name=_('submission'), related_name='rankings')
rank = models.PositiveIntegerField(_('rank'))
score = models.DecimalField(_('score'), decimal_places=6, max_digits=9)
def __str__(self):
return f'<{self.rank_request.conference.code}> | {self.submission.title} | {self.rank}' |
class VGGMultiLayerEncoder(ModelMultiLayerEncoder):
def __init__(self, arch: str, **kwargs: Any) -> None:
_parse_arch(arch)
self.arch = arch
super().__init__(**kwargs)
def state_dict_url(self, framework: str) -> str:
return select_url(self.arch, framework)
def collect_modules(self, inplace: bool) -> Tuple[(List[Tuple[(str, nn.Module)]], Dict[(str, str)])]:
model = MODELS[self.arch](pretrained=False)
modules = []
state_dict_key_map = {}
block = depth = 1
for (idx, module) in model.features.named_children():
if isinstance(module, nn.Conv2d):
name = f'conv{block}_{depth}'
elif isinstance(module, nn.BatchNorm2d):
name = f'bn{block}_{depth}'
elif isinstance(module, nn.ReLU):
module = nn.ReLU(inplace=inplace)
name = f'relu{block}_{depth}'
depth += 1
else:
name = f'pool{block}'
block += 1
depth = 1
modules.append((name, module))
state_dict_key_map.update({f'features.{idx}.{key}': f'{name}.{key}' for key in module.state_dict().keys()})
return (modules, state_dict_key_map)
def _properties(self) -> Dict[(str, Any)]:
dct = super()._properties()
dct['arch'] = self.arch
dct.move_to_end('arch', last=False)
return dct |
def batching_list_instances(config: Config, insts: List[Instance]):
train_num = len(insts)
batch_size = config.batch_size
total_batch = (((train_num // batch_size) + 1) if ((train_num % batch_size) != 0) else (train_num // batch_size))
batched_data = []
for batch_id in range(total_batch):
one_batch_insts = insts[(batch_id * batch_size):((batch_id + 1) * batch_size)]
batched_data.append(simple_batching(config, one_batch_insts))
return batched_data |
class TwoStepParameters5(TwoStepParametersCommon):
zka_id = DataElementField(type='an', max_length=32, _d='ZKA TAN-Verfahren')
zka_version = DataElementField(type='an', max_length=10, _d='Version ZKA TAN-Verfahren')
name = DataElementField(type='an', max_length=30, _d='Name des Zwei-Schritt-Verfahrens')
max_length_input = DataElementField(type='num', max_length=2, _d='Maximale Lange des Eingabewertes im Zwei-Schritt-Verfahren')
allowed_format = CodeField(enum=AllowedFormat, length=1, _d='Erlaubtes Format im Zwei-Schritt-Verfahren')
text_return_value = DataElementField(type='an', max_length=30, _d='Text zur Belegung des Ruckgabewertes im Zwei-Schritt-Verfahren')
max_length_return_value = DataElementField(type='num', max_length=4, _d='Maximale Lange des Ruckgabewertes im Zwei-Schritt-Verfahren')
number_of_supported_lists = DataElementField(type='num', length=1, _d='Anzahl unterstutzter aktiver TAN-Listen')
multiple_tans_allowed = DataElementField(type='jn', _d='Mehrfach-TAN erlaubt')
tan_time_dialog_association = CodeField(enum=TANTimeDialogAssociation, length=1, _d='TAN Zeit- und Dialogbezug')
tan_list_number_required = CodeField(enum=TANListNumberRequired, length=1, _d='TAN-Listennummer erforderlich')
cancel_allowed = DataElementField(type='jn', _d='Auftragsstorno erlaubt')
sms_charge_account_required = CodeField(enum=SMSChargeAccountRequired, length=1, _d='SMS-Abbuchungskonto erforderlich')
principal_account_required = CodeField(enum=PrincipalAccountRequired, length=1, _d='Auftraggeberkonto erforderlich')
challenge_class_required = DataElementField(type='jn', _d='Challenge-Klasse erforderlich')
challenge_structured = DataElementField(type='jn', _d='Challenge strukturiert')
initialization_mode = CodeField(enum=InitializationMode, _d='Initialisierungsmodus')
description_required = CodeField(enum=DescriptionRequired, length=1, _d='Bezeichnung des TAN-Medium erforderlich')
supported_media_number = DataElementField(type='num', length=1, required=False, _d='Anzahl unterstutzter aktiver TAN-Medien') |
def create_quant_sim_model(sess: tf.Session, start_op_names: List[str], output_op_names: List[str], use_cuda: bool, evaluator: Callable[([tf.Session, Any], None)], logdir: str) -> QuantizationSimModel:
copied_sess = save_and_load_graph(sess=sess, meta_path=logdir)
quant_scheme = QuantScheme.training_range_learning_with_tf_enhanced_init
rounding_mode = 'nearest'
default_output_bw = 8
default_param_bw = 8
quant_sim_model = QuantizationSimModel(session=copied_sess, starting_op_names=start_op_names, output_op_names=output_op_names, quant_scheme=quant_scheme, rounding_mode=rounding_mode, default_output_bw=default_output_bw, default_param_bw=default_param_bw, use_cuda=use_cuda)
iterations = 5
quant_sim_model.compute_encodings(forward_pass_callback=evaluator, forward_pass_callback_args=iterations)
return quant_sim_model |
class Bool(BaseType):
def __init__(self, *, none_ok: bool=False, completions: _Completions=None) -> None:
super().__init__(none_ok=none_ok, completions=completions)
self.valid_values = ValidValues('true', 'false', generate_docs=False)
def to_py(self, value: Union[(bool, str, None)]) -> Optional[bool]:
self._basic_py_validation(value, bool)
assert (not isinstance(value, str))
return value
def from_str(self, value: str) -> Optional[bool]:
self._basic_str_validation(value)
if (not value):
return None
try:
return BOOLEAN_STATES[value.lower()]
except KeyError:
raise configexc.ValidationError(value, 'must be a boolean!')
def to_str(self, value: Optional[bool]) -> str:
mapping = {None: '', True: 'true', False: 'false'}
return mapping[value] |
class PWGOptimizer():
def __init__(self, model: ParallelWaveGAN, generator_optimizer_params={'lr': 0.0001, 'eps': 1e-06}, generator_scheduler_params={'step_size': 200000, 'gamma': 0.5}, discriminator_optimizer_params={'lr': 5e-05, 'eps': 1e-06}, discriminator_scheduler_params={'step_size': 200000, 'gamma': 0.5}):
self.generator_optimizer = RAdam(model.generator.parameters(), **generator_optimizer_params)
self.generator_scheduler = torch.optim.lr_scheduler.StepLR(optimizer=self.generator_optimizer, **generator_scheduler_params)
self.discriminator_optimizer = RAdam(model.discriminator.parameters(), **discriminator_optimizer_params)
self.discriminator_scheduler = torch.optim.lr_scheduler.StepLR(optimizer=self.discriminator_optimizer, **discriminator_scheduler_params)
def state_dict(self):
return {'generator_optimizer': self.generator_optimizer.state_dict(), 'generator_scheduler': self.generator_scheduler.state_dict(), 'discriminator_optimizer': self.discriminator_optimizer.state_dict(), 'discriminator_scheduler': self.discriminator_scheduler.state_dict()}
def load_state_dict(self, state_dict):
self.generator_optimizer.load_state_dict(state_dict['generator_optimizer'])
self.generator_scheduler.load_state_dict(state_dict['generator_scheduler'])
self.discriminator_optimizer.load_state_dict(state_dict['discriminator_optimizer'])
self.discriminator_scheduler.load_state_dict(state_dict['discriminator_scheduler']) |
def test_location_pool_row_actions(pickup_node, skip_qtbot):
widget = LocationPoolRowWidget(pickup_node, 'Fancy name for a pickup')
skip_qtbot.addWidget(widget)
signal_received = False
def edit_closure():
nonlocal signal_received
signal_received = True
widget.changed.connect(edit_closure)
assert (not signal_received)
widget.set_can_have_progression(True)
assert signal_received
assert widget.radio_shuffled.isChecked() |
class FC4_TestCase(FC3_TestCase):
def runTest(self):
FC3_TestCase.runTest(self)
self.assert_parse(('raid / --device=md0 --fstype="ext3" --level=6 --fsoptions "these=are,options"%s raid.01 raid.02' % self.bytesPerInode), ('raid / --device=0 --fstype="ext3" --level=RAID6 --fsoptions="these=are,options"%s raid.01 raid.02\n' % self.bytesPerInode)) |
class ELF32_Rela(ELF_Rela):
Rela_SIZE = (4 * 3)
def __init__(self, buf, endian=0, ptr=None):
if (len(buf) != self.Rela_SIZE):
raise
self.ptr = ptr
self.fmt = ('<IIi' if (endian == 0) else '>IIi')
(r_offset, r_info, r_addend) = struct.unpack(self.fmt, buf)
super(ELF32_Rela, self).__init__(r_offset, r_info, r_addend)
def r_type(self):
return (self.r_info & 255)
def r_sym(self):
return (self.r_info >> 8)
def pack(self):
return struct.pack(self.fmt, self.r_offset, self.r_info, self.r_addend) |
class Config():
output_dir = 'outputs'
model_dir = os.path.join(output_dir, 'model_dump')
eval_dir = os.path.join(output_dir, 'eval_dump')
init_weights = '/data/model/resnet50_fbaug.pth'
image_mean = np.array([103.53, 116.28, 123.675])
image_std = np.array([57.375, 57.12, 58.395])
train_image_short_size = 800
train_image_max_size = 1400
eval_resize = True
eval_image_short_size = 800
eval_image_max_size = 1400
seed_dataprovider = 3
train_source = Crowd_human.train_source
eval_source = Crowd_human.eval_source
image_folder = Crowd_human.image_folder
class_names = Crowd_human.class_names
num_classes = Crowd_human.num_classes
class_names2id = dict(list(zip(class_names, list(range(num_classes)))))
gt_boxes_name = 'fbox'
backbone_freeze_at = 2
train_batch_per_gpu = 2
momentum = 0.9
weight_decay = 0.0001
base_lr = 0.0003125
focal_loss_alpha = 0.25
focal_loss_gamma = 2
warm_iter = 800
max_epoch = 50
lr_decay = [33, 43]
nr_images_epoch = 15000
log_dump_interval = 20
test_layer_topk = 1000
test_nms = 0.5
test_nms_method = 'normal_nms'
visulize_threshold = 0.3
pred_cls_threshold = 0.01
nr_box_dim = 5
max_boxes_of_image = 500
anchor_base_size = 32
anchor_base_scale = [(2 ** 0), (2 ** (1 / 3)), (2 ** (2 / 3))]
anchor_aspect_ratios = [1, 2, 3]
num_cell_anchors = (len(anchor_aspect_ratios) * len(anchor_base_scale))
smooth_l1_beta = 0.1
negative_thresh = 0.4
positive_thresh = 0.5
allow_low_quality = True |
class Agent(object):
def __init__(self, *args, **kwargs):
pass
def init_states(self, *args, **kwargs):
raise NotImplementedError
def update_states(self, states, new_state):
raise NotImplementedError
def finish_eval(self, states, new_state):
raise NotImplementedError
def policy(self, state):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def decode(self, session, low=0, high=100000, num_thread=10):
corpus_info = session.corpus_info()
high = min((corpus_info['num_sentences'] - 1), high)
if (low >= high):
return
t0 = time.time()
if (num_thread > 1):
with Pool(10) as p:
p.map(partial(self._decode_one, session), [sent_id for sent_id in range(low, (high + 1))])
else:
for sent_id in range(low, (high + 1)):
self._decode_one(session, sent_id)
print(f'Finished {low} to {high} in {(time.time() - t0)}s')
def _decode_one(self, session, sent_id):
action = {}
self.reset()
states = self.init_states()
while (action.get('value', None) != DEFAULT_EOS):
action = self.policy(states)
if (action['key'] == GET):
new_states = session.get_src(sent_id, action['value'])
states = self.update_states(states, new_states)
elif (action['key'] == SEND):
session.send_hypo(sent_id, action['value'])
print(' '.join(states['tokens']['tgt'])) |
class TestQuantizationSimTransformers(unittest.TestCase):
def test_gelu_static_quantization(self):
model = ConvGeLUNet()
model.eval()
input_shapes = (1, 3, 32, 32)
inp_tensor_list = create_rand_tensors_given_shapes(input_shapes, utils.get_device(model))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*inp_tensor_list)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=inp_tensor_list)
self.assertTrue(isinstance(sim.model.gelu, StaticGridQuantWrapper))
sim.compute_encodings(forward_pass, None)
self.assertTrue(sim.model.gelu.output_quantizers[0].encoding)
out_quantizer = sim.model.gelu.output_quantizers[0]
self.assertTrue(out_quantizer.enabled)
self.assertEqual(out_quantizer.round_mode, libpymo.RoundingMode.ROUND_NEAREST)
self.assertEqual(out_quantizer.quant_scheme, QuantScheme.post_training_tf)
self.assertEqual(out_quantizer.bitwidth, 8)
forward_pass(sim.model, None)
def test_compare_pt_with_bert_layernorm(self):
torch.manual_seed(10)
random_input = torch.rand(1, 4, 4)
bert_m = BertLayerNorm(4, eps=1e-05)
pt_m = nn.LayerNorm(4, eps=1e-05)
bert_m.eval()
pt_m.eval()
hugginface_bert_ln_output = bert_m(random_input).detach().numpy()
pytorch_ln_output = pt_m(random_input).detach().numpy()
self.assertTrue(np.allclose(hugginface_bert_ln_output, pytorch_ln_output))
def test_pytorch_layernorm_quantization(self):
torch.manual_seed(10)
model = ModelWithPtLayerNorm()
model.eval()
random_input = torch.rand(1, 4, 4)
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*random_input)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=random_input)
sim.compute_encodings(forward_pass, None)
self.assertTrue(isinstance(sim.model.ln1.param_quantizers['weight'], StaticGridPerTensorQuantizer))
self.assertTrue(isinstance(sim.model.ln1.param_quantizers['bias'], StaticGridPerTensorQuantizer))
self.assertTrue(isinstance(sim.model.ln1.input_quantizers[0], StaticGridPerTensorQuantizer))
self.assertTrue(isinstance(sim.model.ln1.output_quantizers[0], StaticGridPerTensorQuantizer))
self.assertTrue(isinstance(sim.model.ln1, StaticGridQuantWrapper))
sim.compute_encodings(forward_pass, None)
sim.export('./data/', 'two_input_model2', random_input)
self.assertTrue(sim.model.ln1.output_quantizers[0].encoding)
out_quantizer = sim.model.ln1.output_quantizers[0]
self.assertTrue(out_quantizer.enabled)
self.assertEqual(out_quantizer.round_mode, libpymo.RoundingMode.ROUND_NEAREST)
self.assertEqual(out_quantizer.quant_scheme, QuantScheme.post_training_tf)
self.assertEqual(out_quantizer.bitwidth, 8)
self.assertFalse(sim.model.ln1.input_quantizers[0].encoding)
self.assertTrue(sim.model.ln1.param_quantizers['weight'].encoding)
weight_quantizer = sim.model.ln1.param_quantizers['weight']
self.assertTrue(weight_quantizer.enabled)
self.assertEqual(weight_quantizer.round_mode, libpymo.RoundingMode.ROUND_NEAREST)
self.assertEqual(weight_quantizer.quant_scheme, QuantScheme.post_training_tf)
self.assertEqual(weight_quantizer.bitwidth, 8)
self.assertFalse(sim.model.ln1.param_quantizers['bias'].encoding)
forward_pass(sim.model, None)
def test_custom_bert_layernorm_quantization_custom_hw_config(self):
torch.manual_seed(10)
model = ModelWithBertCustomLayerNorm()
model.eval()
file_path = './data/quantsim_config.json'
generate_custom_quantsim_config(file_path)
random_input = torch.rand(1, 4, 4)
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*random_input)
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = True
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=random_input, config_file='./data/quantsim_config.json')
sim.compute_encodings(forward_pass, None)
self.assertTrue(isinstance(sim.model.customln1.param_quantizers['weight'], StaticGridPerTensorQuantizer))
self.assertTrue(isinstance(sim.model.customln1.param_quantizers['bias'], StaticGridPerTensorQuantizer))
self.assertTrue(isinstance(sim.model.customln1.input_quantizers[0], StaticGridPerTensorQuantizer))
self.assertTrue(isinstance(sim.model.customln1.output_quantizers[0], StaticGridPerTensorQuantizer))
self.assertTrue(isinstance(sim.model.customln1, StaticGridQuantWrapper))
sim.compute_encodings(forward_pass, None)
self.assertTrue(sim.model.linear1.output_quantizers[0].encoding)
self.assertTrue((sim.model.linear1.output_quantizers[0].bitwidth == 8))
self.assertTrue((sim.model.linear1.output_quantizers[0].data_type == QuantizationDataType.int))
self.assertIsNone(sim.model.customln1.output_quantizers[0].encoding)
self.assertTrue((sim.model.customln1.output_quantizers[0].bitwidth == 16))
self.assertTrue((sim.model.customln1.output_quantizers[0].data_type == QuantizationDataType.float))
self.assertTrue((sim.model.customln1.param_quantizers['weight'].bitwidth == 16))
self.assertTrue((sim.model.customln1.param_quantizers['weight'].data_type == QuantizationDataType.float))
self.assertTrue((sim.model.customln1.param_quantizers['bias'].bitwidth == 16))
self.assertTrue((sim.model.customln1.param_quantizers['bias'].data_type == QuantizationDataType.float))
self.assertIsNone(sim.model.customln1.param_quantizers['weight'].encoding)
self.assertIsNone(sim.model.customln1.param_quantizers['bias'].encoding)
self.assertTrue((sim.model.gelu.output_quantizers[0].bitwidth == 16))
self.assertTrue((sim.model.gelu.output_quantizers[0].data_type == QuantizationDataType.float))
self.assertIsNone(sim.model.gelu.output_quantizers[0].encoding)
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
if os.path.exists(file_path):
os.remove(file_path)
def test_custom_quantizable_multi_head_attn_unit(self):
seed = 10
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
embed_dim = 128
num_heads = 8
batch_size = 32
seq_size = 27
key = torch.rand(seq_size, batch_size, embed_dim)
query = torch.rand(seq_size, batch_size, embed_dim)
value = torch.rand(seq_size, batch_size, embed_dim)
weights_q = torch.rand(embed_dim, embed_dim)
weights_k = torch.rand(embed_dim, embed_dim)
weights_v = torch.rand(embed_dim, embed_dim)
weights_o = torch.rand(embed_dim, embed_dim)
def SimpleMHA():
_q = torch.matmul(query, weights_q.T).transpose(0, 1)
_k = torch.matmul(key, weights_k.T).transpose(0, 1)
_v = torch.matmul(value, weights_v.T).transpose(0, 1)
def split_heads(v):
return v.reshape(batch_size, (seq_size * num_heads), (embed_dim // num_heads))
def merge_heads(v):
return v.reshape(batch_size, seq_size, embed_dim)
_q = split_heads(_q)
_k = split_heads(_k)
_v = split_heads(_v)
mm1 = (torch.matmul(_q, _k.transpose((- 1), (- 2))) / (_v.size((- 1)) ** 0.5))
w = nn.functional.softmax(mm1, dim=(- 1))
mm2 = torch.matmul(w, _v)
mm2 = merge_heads(mm2)
out = torch.matmul(mm2, weights_o.T)
return (out, w)
nn_mha = nn.MultiheadAttention(embed_dim, num_heads, bias=False)
with torch.no_grad():
nn_mha.in_proj_weight.copy_(torch.cat([weights_q, weights_k, weights_v], dim=0))
nn_mha.out_proj.weight.copy_(weights_o)
nn_outputs = nn_mha(query, key, value)
from aimet_torch.transformers.activation import QuantizableMultiheadAttention
nncq_mha = QuantizableMultiheadAttention(embed_dim, num_heads, bias=False)
with torch.no_grad():
nncq_mha.linear_Q.weight.copy_(weights_q)
nncq_mha.linear_K.weight.copy_(weights_k)
nncq_mha.linear_V.weight.copy_(weights_v)
nncq_mha.out_proj.weight.copy_(weights_o)
nncq_outputs = nncq_mha(query, key, value)
for outputs in zip(nn_outputs, nncq_outputs):
for i in range(1, len(outputs)):
self.assertTrue(np.allclose(outputs[0].detach().numpy(), outputs[i].detach()))
def test_pt_ops_with_modules(self):
seed = 10
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
src = torch.rand(10, 32, 512)
num_encoder_layers = 12
default_decoder_layers = 6
transformer_model = nn.Transformer(nhead=16, num_encoder_layers=num_encoder_layers)
transformer_model.eval()
for i in range(num_encoder_layers):
self.assertTrue(isinstance(transformer_model.encoder.layers[i].self_attn, torch.nn.MultiheadAttention))
for i in range(default_decoder_layers):
self.assertTrue(isinstance(transformer_model.decoder.layers[i].self_attn, torch.nn.MultiheadAttention))
utils.replace_modules_of_type1_using_constructor(transformer_model, torch.nn.MultiheadAttention, create_quantizable_multihead_attention)
for i in range(num_encoder_layers):
self.assertTrue(isinstance(transformer_model.encoder.layers[i].self_attn, QuantizableMultiheadAttention))
for i in range(default_decoder_layers):
self.assertTrue(isinstance(transformer_model.decoder.layers[i].self_attn, QuantizableMultiheadAttention))
_ = transformer_model(src=src, tgt=src)
def test_prepare_model_with_pytorch_transformer_layer_after_act_replacement(self):
seed = 10
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
src = torch.rand(10, 32, 512)
dummy_input = torch.rand(10, 32, 512)
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(dummy_input, dummy_input)
num_encoder_layers = 12
default_num_decoder_layers = 6
transformer_model = nn.Transformer(nhead=16, num_encoder_layers=num_encoder_layers)
transformer_model.eval()
from torch import fx
prepare_pt_transformer_for_quantsim(transformer_model)
utils.replace_modules_of_type1_using_constructor(transformer_model, torch.nn.MultiheadAttention, create_quantizable_multihead_attention)
ops_with_missing_modules = connectedgraph_utils.get_ops_with_missing_modules(transformer_model, (src, src))
sim = QuantizationSimModel(transformer_model, dummy_input=(src, src))
sim.compute_encodings(forward_pass, None)
for i in range(num_encoder_layers):
self.assertTrue(sim.model.encoder.layers[i].self_attn.linear_Q.output_quantizers[0].encoding)
self.assertTrue(sim.model.encoder.layers[i].self_attn.linear_K.output_quantizers[0].encoding)
self.assertTrue(sim.model.encoder.layers[i].self_attn.linear_V.output_quantizers[0].encoding)
self.assertTrue(sim.model.encoder.layers[i].self_attn.matmul_1.output_quantizers[0].encoding)
self.assertTrue(sim.model.encoder.layers[i].self_attn.matmul_2.output_quantizers[0].encoding)
self.assertTrue(sim.model.encoder.layers[i].self_attn.softmax.output_quantizers[0].encoding)
with tempfile.TemporaryDirectory() as tmpdir:
sim.export(tmpdir, 'transformer', dummy_input=(src, src))
onnx_path = os.path.join(tmpdir, 'transformer.onnx')
onnx_model = onnx.load(onnx_path)
mha_names = {'.'.join(n.name.split('#')[0].split('.')[:(- 1)]) for n in onnx_model.graph.node if ('self_attn.' in n.name)}
assert (len(mha_names) == (default_num_decoder_layers + num_encoder_layers))
((version.parse(torch.__version__) < version.parse('1.13.1')), reason='torch1.13.1 is required.')
def test_transformer_transformation(self):
seed = 10
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
src = torch.rand(10, 32, 512)
transformer_model_1 = nn.Transformer(num_encoder_layers=2, num_decoder_layers=2)
transformer_model_1.eval()
transformer_model_2 = copy.deepcopy(transformer_model_1)
out_fp = transformer_model_1(src=copy.deepcopy(src), tgt=copy.deepcopy(src))
prepare_pt_transformer_for_quantsim(transformer_model_1)
replace_modules_of_type1_using_constructor(transformer_model_1, torch.nn.MultiheadAttention, create_quantizable_multihead_attention)
transformer_model_1.eval()
out_fp_1 = transformer_model_1(src=copy.deepcopy(src), tgt=copy.deepcopy(src))
diff = (out_fp_1 - out_fp)
print('max diff:', diff.max(), 'min diff:', diff.min())
assert torch.allclose(out_fp, out_fp_1, atol=0.0001)
prepare_pt_transformer_for_quantsim(transformer_model_2)
replace_modules_of_type1_using_constructor(transformer_model_2.encoder, nn.TransformerEncoderLayer, create_quantizable_transformer_encoder_layer)
replace_modules_of_type1_using_constructor(transformer_model_2.decoder, nn.TransformerDecoderLayer, create_quantizable_transformer_decoder_layer)
replace_modules_of_type1_using_constructor(transformer_model_2, torch.nn.MultiheadAttention, create_quantizable_multihead_attention)
transformer_model_2.eval()
out_fp_2 = transformer_model_2(src=copy.deepcopy(src), tgt=copy.deepcopy(src))
diff = (out_fp_2 - out_fp)
print('max diff:', diff.max(), 'min diff:', diff.min())
assert torch.allclose(out_fp, out_fp_2, atol=0.0001)
def test_mha_as_leaf(self):
seed = 10
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
x = torch.rand(15, 8, 128)
dummy_input = (x, x, x)
class MhaModel(torch.nn.Module):
def __init__(self):
super(MhaModel, self).__init__()
self.nn_mha = nn.MultiheadAttention(128, 1, bias=True)
def forward(self, x1, x2, x3):
return self.nn_mha(x1, x2, x3)
def forward_pass(m, _):
m.eval()
with torch.no_grad():
return m(*copy.deepcopy(dummy_input))
model = MhaModel().eval()
aimet_torch.utils.modules_to_treat_as_leaf = [torch.nn.MultiheadAttention, QuantizableMultiheadAttention]
utils.replace_modules_of_type1_using_constructor(model, torch.nn.MultiheadAttention, create_quantizable_multihead_attention)
aimet_sim = QuantizationSimModel(model, dummy_input=copy.deepcopy(dummy_input), quant_scheme='tf')
assert (len(aimet_sim.model._modules) == 1)
assert utils.is_leaf_module(aimet_sim.model.nn_mha._module_to_wrap)
aimet_sim.compute_encodings(forward_pass, None)
_ = forward_pass(aimet_sim.model, None)
aimet_torch.utils.modules_to_treat_as_leaf = [] |
def main():
if (len(sys.argv) < 2):
print((('usage: ' + sys.argv[0]) + ' [image]'))
sys.exit(1)
image = Image.open(sys.argv[1]).convert('RGBA')
lines = find_lines(image, 110, 35, 0)
for line in lines:
print(line)
draw(image, lines, 'test.png')
print(('lines: %d' % len(lines))) |
class Effect8154(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.drones.filteredItemBoost((lambda drone: drone.item.requiresSkill('Drones')), 'maxRange', ship.getModifiedItemAttr('eliteBonusBlackOps2'), skill='Black Ops', **kwargs)
fit.drones.filteredItemBoost((lambda drone: drone.item.requiresSkill('Drones')), 'trackingSpeed', ship.getModifiedItemAttr('eliteBonusBlackOps2'), skill='Black Ops', **kwargs) |
class Migration(migrations.Migration):
dependencies = [('adserver', '0002_image-upload')]
operations = [migrations.CreateModel(name='AdType', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='Publication date')), ('modified_date', models.DateTimeField(auto_now=True, verbose_name='Modified date')), ('name', models.CharField(max_length=200, verbose_name='Name')), ('slug', models.SlugField(max_length=200, verbose_name='Slug')), ('has_image', models.BooleanField(default=True, verbose_name='Has image?')), ('image_width', models.PositiveIntegerField(blank=True, null=True)), ('image_height', models.PositiveIntegerField(blank=True, null=True)), ('has_text', models.BooleanField(default=True, verbose_name='Has text?')), ('max_text_length', models.PositiveIntegerField(blank=True, help_text='Max length does not include HTML tags', null=True)), ('allowed_html_tags', models.CharField(blank=True, default='a b strong i em code', help_text='Space separated list of allowed HTML tag names', max_length=255, verbose_name='Allowed HTML tags')), ('template', models.TextField(blank=True, help_text='Override the template for rendering this ad type', null=True, verbose_name='Ad template'))], options={'ordering': ('name',)}), migrations.CreateModel(name='Advertiser', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='Publication date')), ('modified_date', models.DateTimeField(auto_now=True, verbose_name='Modified date')), ('name', models.CharField(max_length=200, verbose_name='Name')), ('slug', models.SlugField(max_length=200, verbose_name='Publisher Slug'))], options={'ordering': ('name',)}), migrations.CreateModel(name='Publisher', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='Publication date')), ('modified_date', models.DateTimeField(auto_now=True, verbose_name='Modified date')), ('name', models.CharField(max_length=200, verbose_name='Name')), ('slug', models.SlugField(max_length=200, verbose_name='Publisher Slug'))], options={'ordering': ('name',)}), migrations.RemoveField(model_name='campaign', name='secret'), migrations.AlterField(model_name='advertisement', name='image', field=models.ImageField(blank=True, help_text='Sized according to the ad type', max_length=255, null=True, upload_to='images/%Y/%m/', verbose_name='Image')), migrations.AlterField(model_name='advertisement', name='link', field=models.URLField(default=' max_length=255, verbose_name='Link URL'), preserve_default=False), migrations.AlterField(model_name='advertisement', name='slug', field=models.SlugField(max_length=200, verbose_name='Slug')), migrations.AlterField(model_name='advertisement', name='text', field=models.TextField(blank=True, help_text='Different ad types have different text requirements', verbose_name='Text')), migrations.AddField(model_name='adtype', name='publisher', field=models.ForeignKey(blank=True, help_text='For publisher-specific ad types', null=True, on_delete=django.db.models.deletion.CASCADE, to='adserver.Publisher')), migrations.AddField(model_name='advertisement', name='ad_type', field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.PROTECT, to='adserver.AdType')), migrations.AddField(model_name='campaign', name='advertiser', field=models.ForeignKey(blank=True, default=None, help_text='The advertiser for this campaign. A campaign without an advertiser is run by the ad network.', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='campaigns', to='adserver.Advertiser')), migrations.AddField(model_name='campaign', name='publishers', field=models.ManyToManyField(blank=True, help_text='Ads for this campaign are eligible for display on these publishers', related_name='flights', to='adserver.Publisher'))] |
class LightningTxDialog(WindowModalDialog):
def __init__(self, parent: 'ElectrumWindow', tx_item: dict):
WindowModalDialog.__init__(self, parent, _('Lightning Payment'))
self.parent = parent
self.is_sent = bool((tx_item['direction'] == 'sent'))
self.label = tx_item['label']
self.timestamp = tx_item['timestamp']
self.amount = (Decimal(tx_item['amount_msat']) / 1000)
self.payment_hash = tx_item['payment_hash']
self.preimage = tx_item['preimage']
self.setMinimumWidth(700)
vbox = QVBoxLayout()
self.setLayout(vbox)
vbox.addWidget(QLabel(((_('Amount') + ': ') + self.parent.format_amount_and_units(self.amount))))
if self.is_sent:
fee = (Decimal(tx_item['fee_msat']) / 1000)
vbox.addWidget(QLabel(((_('Fee') + ': ') + self.parent.format_amount_and_units(fee))))
time_str = datetime.datetime.fromtimestamp(self.timestamp).isoformat(' ')[:(- 3)]
vbox.addWidget(QLabel(((_('Date') + ': ') + time_str)))
qr_icon = ('qrcode_white.png' if ColorScheme.dark_scheme else 'qrcode.png')
vbox.addWidget(QLabel((_('Payment hash') + ':')))
self.hash_e = ButtonsLineEdit(self.payment_hash)
self.hash_e.addCopyButton()
self.hash_e.addButton(qr_icon, self.show_qr(self.hash_e, _('Payment hash')), _('Show QR Code'))
self.hash_e.setReadOnly(True)
self.hash_e.setFont(QFont(MONOSPACE_FONT))
vbox.addWidget(self.hash_e)
vbox.addWidget(QLabel((_('Preimage') + ':')))
self.preimage_e = ButtonsLineEdit(self.preimage)
self.preimage_e.addCopyButton()
self.preimage_e.addButton(qr_icon, self.show_qr(self.preimage_e, _('Preimage')), _('Show QR Code'))
self.preimage_e.setReadOnly(True)
self.preimage_e.setFont(QFont(MONOSPACE_FONT))
vbox.addWidget(self.preimage_e)
vbox.addLayout(Buttons(CloseButton(self)))
def show_qr(self, line_edit, title=''):
def f():
text = line_edit.text()
try:
self.parent.show_qrcode(text, title, parent=self)
except Exception as e:
self.show_message(repr(e))
return f |
def _save_sample_stats(sample_settings, sample_stats, chains, trace: MultiTrace, return_inferencedata: bool, _t_sampling, idata_kwargs, model: Model) -> Tuple[(Optional[Any], Optional[InferenceData])]:
sample_settings_dict = sample_settings[0]
sample_settings_dict['_t_sampling'] = _t_sampling
sample_stats_dict = sample_stats[0]
if (chains > 1):
for stat in sample_stats[0].keys():
value_list = []
for chain_sample_stats in sample_stats:
value_list.append(chain_sample_stats[stat])
sample_stats_dict[stat] = value_list
idata: Optional[InferenceData] = None
if (not return_inferencedata):
for (stat, value) in sample_stats_dict.items():
setattr(trace.report, stat, value)
for (stat, value) in sample_settings_dict.items():
setattr(trace.report, stat, value)
else:
for (stat, value) in sample_stats_dict.items():
if (chains > 1):
sample_stats_dict[stat] = np.array(value, dtype=object)
else:
sample_stats_dict[stat] = np.array(value)
sample_stats = dict_to_dataset(sample_stats_dict, attrs=sample_settings_dict, library=pymc)
ikwargs: Dict[(str, Any)] = dict(model=model)
if (idata_kwargs is not None):
ikwargs.update(idata_kwargs)
idata = to_inference_data(trace, **ikwargs)
idata = InferenceData(**idata, sample_stats=sample_stats)
return (sample_stats, idata) |
def test_different_data_types():
a = np.zeros((10, 3), np.float16)
b = gfx.Buffer(a)
assert (b.format == '3xf2')
a = memoryview(np.zeros((10, 2), np.int16))
b = gfx.Buffer(a)
assert (b.format == '2xi2')
a = b''
b = gfx.Buffer(a)
assert (b.format == 'u1')
b = gfx.Buffer(a, format='2xf4')
assert (b.format == '2xf4')
assert (b.nitems == 16)
assert (b.itemsize == 1)
with pytest.raises(TypeError):
gfx.Buffer([1, 2, 3, 4, 5]) |
def CustomWindowProvider(cls):
if (not isinstance(cls, type)):
raise PyUnityException('Provided window provider is not a class')
if (not issubclass(cls, ABCWindow)):
raise PyUnityException('Provided window provider does not subclass Window.ABCWindow')
Logger.LogLine(Logger.DEBUG, 'Using window provider', cls.__name__)
config.windowProvider = cls
return cls |
class DenseNetBlock(nn.Module):
def __init__(self, in_channels=128, ks=3, padding=1, stride=1):
super(DenseNetBlock, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=16, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(nn.Conv2d(16, 16, 3, 1, 1), nn.ReLU(inplace=True))
self.conv3 = nn.Sequential(nn.Conv2d(32, 16, 3, 1, 1), nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(nn.Conv2d(48, 16, 3, 1, 1), nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(nn.Conv2d(64, 16, 3, 1, 1), nn.ReLU(inplace=True))
self.conv6 = nn.Sequential(nn.Conv2d(80, 16, 3, 1, 1), nn.ReLU(inplace=True))
self.conv7 = nn.Sequential(nn.Conv2d(96, 16, 3, 1, 1), nn.ReLU(inplace=True))
self.conv8 = nn.Sequential(nn.Conv2d(112, 16, 3, 1, 1), nn.ReLU(inplace=True))
def forward(self, x):
conv1_out = self.conv1(x)
conv2_out = self.conv2(conv1_out)
conv3_in = torch.cat([conv1_out, conv2_out], 1)
conv3_out = self.conv3(conv3_in)
conv4_in = torch.cat([conv1_out, conv2_out, conv3_out], 1)
conv4_out = self.conv4(conv4_in)
conv5_in = torch.cat([conv1_out, conv2_out, conv3_out, conv4_out], 1)
conv5_out = self.conv5(conv5_in)
conv6_in = torch.cat([conv1_out, conv2_out, conv3_out, conv4_out, conv5_out], 1)
conv6_out = self.conv6(conv6_in)
conv7_in = torch.cat([conv1_out, conv2_out, conv3_out, conv4_out, conv5_out, conv6_out], 1)
conv7_out = self.conv7(conv7_in)
conv8_in = torch.cat([conv1_out, conv2_out, conv3_out, conv4_out, conv5_out, conv6_out, conv7_out], 1)
conv8_out = self.conv8(conv8_in)
output = torch.cat([conv1_out, conv2_out, conv3_out, conv4_out, conv5_out, conv6_out, conv7_out, conv8_out], 1)
return output |
class TfExampleDecoder(data_decoder.DataDecoder):
def __init__(self):
self.keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/filename': tf.FixedLenFeature((), tf.string, default_value=''), 'image/key/sha256': tf.FixedLenFeature((), tf.string, default_value=''), 'image/source_id': tf.FixedLenFeature((), tf.string, default_value=''), 'image/height': tf.FixedLenFeature((), tf.int64, 1), 'image/width': tf.FixedLenFeature((), tf.int64, 1), 'image/object/bbox/xmin': tf.VarLenFeature(tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(tf.float32), 'image/object/class/label': tf.VarLenFeature(tf.int64), 'image/object/area': tf.VarLenFeature(tf.float32), 'image/object/is_crowd': tf.VarLenFeature(tf.int64), 'image/object/difficult': tf.VarLenFeature(tf.int64), 'image/segmentation/object': tf.VarLenFeature(tf.int64), 'image/segmentation/object/class': tf.VarLenFeature(tf.int64)}
self.items_to_handlers = {fields.InputDataFields.image: slim_example_decoder.Image(image_key='image/encoded', format_key='image/format', channels=3), fields.InputDataFields.source_id: slim_example_decoder.Tensor('image/source_id'), fields.InputDataFields.key: slim_example_decoder.Tensor('image/key/sha256'), fields.InputDataFields.filename: slim_example_decoder.Tensor('image/filename'), fields.InputDataFields.groundtruth_boxes: slim_example_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'), fields.InputDataFields.groundtruth_classes: slim_example_decoder.Tensor('image/object/class/label'), fields.InputDataFields.groundtruth_area: slim_example_decoder.Tensor('image/object/area'), fields.InputDataFields.groundtruth_is_crowd: slim_example_decoder.Tensor('image/object/is_crowd'), fields.InputDataFields.groundtruth_difficult: slim_example_decoder.Tensor('image/object/difficult'), fields.InputDataFields.groundtruth_instance_masks: slim_example_decoder.ItemHandlerCallback(['image/segmentation/object', 'image/height', 'image/width'], self._reshape_instance_masks), fields.InputDataFields.groundtruth_instance_classes: slim_example_decoder.Tensor('image/segmentation/object/class')}
def decode(self, tf_example_string_tensor):
serialized_example = tf.reshape(tf_example_string_tensor, shape=[])
decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features, self.items_to_handlers)
keys = decoder.list_items()
tensors = decoder.decode(serialized_example, items=keys)
tensor_dict = dict(zip(keys, tensors))
is_crowd = fields.InputDataFields.groundtruth_is_crowd
tensor_dict[is_crowd] = tf.cast(tensor_dict[is_crowd], dtype=tf.bool)
tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3])
return tensor_dict
def _reshape_instance_masks(self, keys_to_tensors):
masks = keys_to_tensors['image/segmentation/object']
if isinstance(masks, tf.SparseTensor):
masks = tf.sparse_tensor_to_dense(masks)
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
to_shape = tf.cast(tf.stack([(- 1), height, width]), tf.int32)
return tf.cast(tf.reshape(masks, to_shape), tf.bool) |
def load_SQuAD1(detectLLM):
f = pd.read_csv('datasets/SQuAD1_LLMs.csv')
q = f['Question'].tolist()
a_human = [eval(_)['text'][0] for _ in f['answers'].tolist()]
a_chat = f[f'{detectLLM}_answer'].fillna('').tolist()
res = []
for i in range(len(q)):
if ((len(a_human[i].split()) > 1) and (len(a_chat[i].split()) > 1)):
a_human[i] = check_period(a_human[i])
res.append([q[i], a_human[i], a_chat[i]])
data_new = {'train': {'text': [], 'label': []}, 'test': {'text': [], 'label': []}}
index_list = list(range(len(res)))
random.seed(0)
random.shuffle(index_list)
total_num = len(res)
for i in tqdm.tqdm(range(total_num), desc='parsing data'):
if (i < (total_num * 0.8)):
data_partition = 'train'
else:
data_partition = 'test'
data_new[data_partition]['text'].append(process_spaces(res[index_list[i]][1]))
data_new[data_partition]['label'].append(0)
data_new[data_partition]['text'].append(process_spaces(res[index_list[i]][2]))
data_new[data_partition]['label'].append(1)
return data_new |
def _format_object_to_py(obj):
if (isinstance(obj, dict) and (obj.get('isObject') is True)):
object_type = obj.get('objectType')
available_subclasses = {cls.__name__: cls for cls in PymiereBaseObject.__subclasses__()}
available_subclasses.update({cls.__name__: cls for cls in PymiereBaseCollection.__subclasses__()})
if (object_type in available_subclasses):
return available_subclasses[object_type](pymiere_id=obj.get('pymiereId'))
elif ('ollection' in object_type):
raise NotImplementedError('Pymiere does not support collections as generic object...')
elif (object_type == '$'):
return available_subclasses['Dollar'](pymiere_id=obj.get('pymiereId'))
else:
return PymiereGenericObject(pymiere_id=obj['pymiereId'])
if (isinstance(obj, dict) and obj.get('pymiere_id')):
return PymiereGenericObject(pymiere_id=obj['pymiere_id'])
return obj |
def _migrate_v44(preset: dict) -> dict:
def add_node_name(location):
node_name = migration_data.get_node_name_for_area(preset['game'], location['world_name'], location['area_name'])
location['node_name'] = node_name
for loc in preset['configuration']['starting_location']:
add_node_name(loc)
if ('elevators' in preset['configuration']):
result = []
for loc in preset['configuration']['elevators']['excluded_teleporters']:
try:
add_node_name(loc)
result.append(loc)
except KeyError:
continue
preset['configuration']['elevators']['excluded_teleporters'] = result
for loc in preset['configuration']['elevators']['excluded_targets']:
add_node_name(loc)
return preset |
class MarginLoss(nn.Module):
def __init__(self, num_classes=10, margin=0.995, use_gpu=True):
super(MarginLoss, self).__init__()
self.margin = margin
self.num_classes = num_classes
self.use_gpu = use_gpu
def forward(self, x, labels):
batch_size = x.size(0)
classes = torch.arange(self.num_classes).long()
if self.use_gpu:
classes = classes.cuda()
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(classes.expand(batch_size, self.num_classes))
x = F.softmax(x, dim=1)
p_gt = torch.mul(mask.float(), x).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes)
diff = ((self.margin + x) - p_gt)
zeros = torch.zeros(batch_size, self.num_classes).cuda()
loss = (((torch.max(zeros, diff).sum() / batch_size) - self.margin) / (self.num_classes - 1))
return loss |
.filterwarnings('ignore:Trie.has_keys_with_prefix is deprecated')
def test_has_keys_with_prefix():
fruit_trie = marisa_trie.BytesTrie([('apple', b'foo'), ('pear', b'bar'), ('peach', b'baz')])
assert fruit_trie.has_keys_with_prefix('')
assert fruit_trie.has_keys_with_prefix('a')
assert fruit_trie.has_keys_with_prefix('pe')
assert fruit_trie.has_keys_with_prefix('pear')
assert (not fruit_trie.has_keys_with_prefix('x')) |
class SelectionInfo():
wrapper: Optional[str] = None
outcomes: Dict[(str, str)] = dataclasses.field(default_factory=dict)
reason: SelectionReason = SelectionReason.unknown
def set_module_error(self, name: str, error: Exception) -> None:
self.outcomes[name] = f'{type(error).__name__}: {error}'
def use_wrapper(self, wrapper: str) -> None:
self.wrapper = wrapper
self.outcomes[wrapper] = 'success'
def __str__(self) -> str:
if (not self.outcomes):
return f'Qt wrapper: {self.wrapper} (via {self.reason.value})'
lines = ['Qt wrapper info:']
for wrapper in WRAPPERS:
outcome = self.outcomes.get(wrapper, 'not imported')
lines.append(f' {wrapper}: {outcome}')
lines.append(f' -> selected: {self.wrapper} (via {self.reason.value})')
return '\n'.join(lines)
def to_html(self) -> str:
return html.escape(str(self)).replace('\n', '<br>') |
def get_input(caller, prompt, callback, session=None, *args, **kwargs):
if (not callable(callback)):
raise RuntimeError('get_input: input callback is not callable.')
caller.ndb._getinput = _Prompt()
caller.ndb._getinput._callback = callback
caller.ndb._getinput._prompt = prompt
caller.ndb._getinput._session = session
caller.ndb._getinput._args = args
caller.ndb._getinput._kwargs = kwargs
caller.cmdset.add(InputCmdSet)
caller.msg(prompt, session=session) |
class TlbLexer(RegexLexer):
name = 'Tl-b'
aliases = ['tlb']
filenames = ['*.tlb']
url = '
version_added = ''
tokens = {'root': [('\\s+', Whitespace), include('comments'), ('[0-9]+', Number), (words(('+', '-', '*', '=', '?', '~', '.', '^', '==', '<', '>', '<=', '>=', '!=')), Operator), (words(('##', '#<', '#<=')), Name.Tag), ('#[0-9a-f]*_?', Name.Tag), ('\\$[01]*_?', Name.Tag), ('[a-zA-Z_][0-9a-zA-Z_]*', Name), ('[;():\\[\\]{}]', Punctuation)], 'comments': [('//.*', Comment.Singleline), ('/\\*', Comment.Multiline, 'comment')], 'comment': [('[^/*]+', Comment.Multiline), ('/\\*', Comment.Multiline, '#push'), ('\\*/', Comment.Multiline, '#pop'), ('[*/]', Comment.Multiline)]} |
class WordEmbedding(nn.Module):
def __init__(self, args, vocab_size):
super(WordEmbedding, self).__init__()
self.dropout = nn.Dropout(args.dropout)
self.word_embedding = nn.Embedding(vocab_size, args.emb_size)
self.layer_norm = LayerNorm(args.emb_size)
def forward(self, src, _):
emb = self.word_embedding(src)
emb = self.dropout(self.layer_norm(emb))
return emb |
class ppc(QlCommonBaseCC):
_retreg = UC_PPC_REG_3
_argregs = ((UC_PPC_REG_3, UC_PPC_REG_4, UC_PPC_REG_5, UC_PPC_REG_6, UC_PPC_REG_7, UC_PPC_REG_8) + ((None,) * 10))
def getNumSlots(argbits: int):
return 1
def setReturnAddress(self, addr: int):
self.arch.regs.lr = addr |
def job_fssdJ1q_imq_optv(p, data_source, tr, te, r, J=1, b=(- 0.5), null_sim=None):
if (null_sim is None):
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)
Xtr = tr.data()
with util.ContextTimer() as t:
c = 1.0
V0 = util.fit_gaussian_draw(Xtr, J, seed=(r + 1), reg=1e-06)
ops = {'reg': 1e-05, 'max_iter': 30, 'tol_fun': 1e-06, 'disp': True, 'locs_bounds_frac': 20.0}
(V_opt, info) = gof.IMQFSSD.optimize_locs(p, tr, b, c, V0, **ops)
k_imq = kernel.KIMQ(b=b, c=c)
fssd_imq = gof.FSSD(p, k_imq, V_opt, null_sim=null_sim, alpha=alpha)
fssd_imq_result = fssd_imq.perform_test(te)
return {'test_result': fssd_imq_result, 'time_secs': t.secs, 'goftest': fssd_imq, 'opt_info': info} |
def main():
read_cfg(args.cfg)
cfg.memonger = args.memonger
pprint.pprint(cfg)
aogs = []
for i in range(len(cfg.AOG.dims)):
aog = get_aog(dim=cfg.AOG.dims[i], min_size=1, tnode_max_size=cfg.AOG.dims[i], turn_off_unit_or_node=cfg.AOG.TURN_OFF_UNIT_OR_NODE)
aogs.append(aog)
symbol = AOGNet.get_symbol(aogs=aogs, cfg=cfg)
internals = symbol.get_internals()
if (cfg.dataset.data_type == 'imagenet'):
dshape = (cfg.batch_size, 3, 224, 224)
elif (cfg.dataset.data_type in ['cifar10', 'cifar100']):
dshape = (cfg.batch_size, 3, 32, 32)
(_, out_shapes, _) = internals.infer_shape(data=dshape)
shape_dict = dict(zip(internals.list_outputs(), out_shapes))
stages_kw = {'stage_0': 0.0, 'stage_1': 0.0, 'stage_2': 0.0, 'stage_3': 0.0}
sum = 0.0
for k in shape_dict.keys():
if (k.split('_')[(- 1)] in ['weight', 'bias', 'gamma', 'beta']):
size = 1
for val in shape_dict[k]:
size *= val
for key in stages_kw:
if (key in k):
stages_kw[key] += size
sum += size
print('total number of params: {} M'.format((sum / 1000000.0)))
for (k, v) in stages_kw.items():
if (v > 0):
print('{} has param size: {} M'.format(k, (v / 1000000.0)))
if args.memonger:
dshape_ = ((1,) + dshape[1:])
old_cost = memonger.get_cost(symbol, data=dshape_)
symbol = memonger.search_plan(symbol, data=dshape_)
new_cost = memonger.get_cost(symbol, data=dshape_)
print('batch size=1, old cost= {} MB, new cost= {} MB'.format(old_cost, new_cost))
kv = mx.kvstore.create(args.kv_store)
devs = (mx.cpu() if (args.gpus is None) else [mx.gpu(int(i)) for i in args.gpus.split(',')])
epoch_size = max(int(((cfg.dataset.num_examples / cfg.batch_size) / kv.num_workers)), 1)
if (not os.path.exists(args.modeldir)):
os.makedirs(args.modeldir)
model_prefix = os.path.join(args.modeldir, 'checkpoint')
checkpoint = mx.callback.do_checkpoint(model_prefix)
arg_params = None
aux_params = None
if args.resume:
(_, arg_params, aux_params) = mx.model.load_checkpoint(model_prefix, args.resume)
begin_epoch = args.resume
else:
begin_epoch = 0
(train, val) = eval((cfg.dataset.data_type + '_iterator'))(cfg, kv)
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type='in', magnitude=2)
lr_scheduler = multi_factor_scheduler(begin_epoch, epoch_size, step=cfg.train.lr_steps, factor=0.1)
optimizer_params = {'learning_rate': cfg.train.lr, 'momentum': cfg.train.mom, 'wd': cfg.train.wd, 'lr_scheduler': lr_scheduler}
model = mx.mod.Module(context=devs, symbol=symbol)
if (cfg.dataset.data_type in ['cifar10', 'cifar100']):
eval_metric = ['acc', 'ce']
elif (cfg.dataset.data_type == 'imagenet'):
eval_metric = ['acc', mx.metric.create('top_k_accuracy', top_k=5)]
model.fit(train, begin_epoch=begin_epoch, num_epoch=cfg.num_epoch, eval_data=val, eval_metric=eval_metric, kvstore=kv, optimizer='sgd', optimizer_params=optimizer_params, arg_params=arg_params, aux_params=aux_params, initializer=initializer, allow_missing=True, batch_end_callback=mx.callback.Speedometer(cfg.batch_size, args.frequent), epoch_end_callback=checkpoint) |
def test_fixture_arg_ordering(pytester: Pytester) -> None:
p1 = pytester.makepyfile('\n import pytest\n\n suffixes = []\n\n \n def fix_1(): suffixes.append("fix_1")\n \n def fix_2(): suffixes.append("fix_2")\n \n def fix_3(): suffixes.append("fix_3")\n \n def fix_4(): suffixes.append("fix_4")\n \n def fix_5(): suffixes.append("fix_5")\n\n \n def fix_combined(fix_1, fix_2, fix_3, fix_4, fix_5): pass\n\n def test_suffix(fix_combined):\n assert suffixes == ["fix_1", "fix_2", "fix_3", "fix_4", "fix_5"]\n ')
result = pytester.runpytest('-vv', str(p1))
assert (result.ret == 0) |
class JsonToCsv():
def flattenjson(self, mp, delim='_'):
ret = []
if isinstance(mp, dict):
for k in mp.keys():
csvs = self.flattenjson(mp[k], delim)
for csv in csvs:
ret.append(((k + delim) + str(csv)))
elif isinstance(mp, list):
for k in mp:
csvs = self.flattenjson(k, delim)
for csv in csvs:
ret.append(str(csv))
else:
ret.append(mp)
return ret
def dumps(self, data):
flat_json = self.flattenjson(data)
output = io.StringIO()
writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(flat_json)
return output.getvalue() |
_cache(maxsize=2)
def compute_gaussian(tile_size: Union[(Tuple[(int, ...)], List[int])], sigma_scale: float=(1.0 / 8), value_scaling_factor: float=1, dtype=torch.float16, device=torch.device('cuda', 0)) -> torch.Tensor:
tmp = np.zeros(tile_size)
center_coords = [(i // 2) for i in tile_size]
sigmas = [(i * sigma_scale) for i in tile_size]
tmp[tuple(center_coords)] = 1
gaussian_importance_map = gaussian_filter(tmp, sigmas, 0, mode='constant', cval=0)
gaussian_importance_map = torch.from_numpy(gaussian_importance_map).type(dtype).to(device)
gaussian_importance_map = ((gaussian_importance_map / torch.max(gaussian_importance_map)) * value_scaling_factor)
gaussian_importance_map = gaussian_importance_map.type(dtype)
gaussian_importance_map[(gaussian_importance_map == 0)] = torch.min(gaussian_importance_map[(gaussian_importance_map != 0)])
return gaussian_importance_map |
class DefaultGuest(DefaultAccount):
def create(cls, **kwargs):
return cls.authenticate(**kwargs)
def authenticate(cls, **kwargs):
errors = []
account = None
username = None
ip = kwargs.get('ip', '').strip()
zone = kwargs.get('zone', '').strip()
if (not settings.GUEST_ENABLED):
errors.append('Guest accounts are not enabled on this server.')
return (None, errors)
try:
for name in settings.GUEST_LIST:
if (not AccountDB.objects.filter(username__iexact=name).count()):
username = name
break
if (not username):
errors.append('All guest accounts are in use. Please try again later.')
if ip:
LOGIN_THROTTLE.update(ip, 'Too many requests for Guest access.')
return (None, errors)
else:
password = ('%016x' % getrandbits(64))
home = settings.GUEST_HOME
permissions = settings.PERMISSION_GUEST_DEFAULT
typeclass = settings.BASE_GUEST_TYPECLASS
(account, errs) = super(DefaultGuest, cls).create(guest=True, username=username, password=password, permissions=permissions, typeclass=typeclass, home=home, ip=ip, zone=zone)
errors.extend(errs)
return (account, errors)
except Exception as e:
errors.append('An error occurred. Please e-mail an admin if the problem persists.')
logger.log_trace()
return (None, errors)
return (account, errors)
def at_post_login(self, session=None, **kwargs):
self._send_to_connect_channel(f'|G{self.key} connected|n')
self.puppet_object(session, self.db._last_puppet)
def at_server_shutdown(self):
super().at_server_shutdown()
characters = self.db._playable_characters
for character in characters:
if character:
character.delete()
def at_post_disconnect(self, **kwargs):
super().at_post_disconnect()
characters = self.db._playable_characters
for character in characters:
if character:
character.delete()
self.delete() |
.parametrize('n', [*range(2, 5)])
.parametrize('val', [3, 4, 5, 7, 8, 9])
def test_less_than_consistent_protocols(n: int, val: int):
g = LessThanConstant(n, val)
assert_decompose_is_consistent_with_t_complexity(g)
u = cirq.unitary(g)
np.testing.assert_allclose((u u), np.eye((2 ** (n + 1))))
assert_valid_bloq_decomposition(g) |
def is_private_netaddress(host: str) -> bool:
if (str(host) in ('localhost', 'localhost.')):
return True
if ((host[0] == '[') and (host[(- 1)] == ']')):
host = host[1:(- 1)]
try:
ip_addr = ipaddress.ip_address(host)
return ip_addr.is_private
except ValueError:
pass
return False |
class MultiItr(object):
def __init__(self, itr):
self.itr = itr
self._counts = [0 for x in itr]
def __len__(self):
return sum((len(itr) for itr in self.itr))
def __iter__(self):
return self
def __next__(self):
ratios = [(count / len(itr)) for (count, itr) in zip(self._counts, self.itr)]
idx = ratios.index(min(ratios))
self._counts[idx] += 1
return next(self.itr[idx]) |
def test_update_matrix_world():
root = WorldObject()
root.local.position = ((- 5), 8, 0)
root.local.rotation = la.quat_from_euler(((pi / 4), 0, 0))
child1 = WorldObject()
child1.local.position = (0, 0, 5)
root.add(child1)
child2 = WorldObject()
child2.local.rotation = la.quat_from_euler((0, ((- pi) / 4), 0))
child1.add(child2)
expected = (((root.local child1.local) child2.local) la.vec_homogeneous((10, 10, 10)))
actual = (child2.world la.vec_homogeneous((10, 10, 10)))
assert np.allclose(actual, expected) |
class ExtraDiscordTokenSettings(BaseModel):
pings_for_bot_description: ClassVar[str] = 'A sequence. Who should be pinged if the token found belongs to a bot.'
pings_for_user_description: ClassVar[str] = 'A sequence. Who should be pinged if the token found belongs to a user.'
pings_for_bot: set[str] = Field(default_factory=set)
pings_for_user: set[str] = Field(default_factory=(lambda : {'Moderators'})) |
def test(oracle_file):
symexec = SimpleSymExec(ARCH.X86_64)
symexec.initialize_register('rip', RIP_ADDR)
symexec.initialize_register('rsp', RSP_ADDR)
symexec.execute_blob(blob, RIP_ADDR)
rax = symexec.get_register_ast('rax')
ltm = InputOutputOracleLevelDB.load(oracle_file)
synthesizer = TopDownSynthesizer(ltm)
(synt_rax, simp) = synthesizer.synthesize(rax)
print(f'simplified: {simp}')
print(f'synthesized expression: {synt_rax.pp_str}')
(sz, nsz) = (rax.node_count, synt_rax.node_count)
print(f'''size: {rax.node_count} -> {synt_rax.node_count}
size reduction:{(((sz - nsz) * 100) / sz):.2f}%''')
return (symexec, rax, synt_rax) |
class Constant(BaseModel):
def get_fundamental_variables(self):
eps_dict = {}
depsdt_dict = {}
for domain in self.options.whole_cell_domains:
eps_dict[domain] = self.param.domain_params[domain.split()[0]].epsilon_init
depsdt_dict[domain] = pybamm.FullBroadcast(0, domain, 'current collector')
variables = self._get_standard_porosity_variables(eps_dict)
variables.update(self._get_standard_porosity_change_variables(depsdt_dict))
return variables
def set_events(self, variables):
pass |
class CurrentUserEmailManager(RetrieveMixin, CreateMixin, DeleteMixin, RESTManager):
_path = '/user/emails'
_obj_cls = CurrentUserEmail
_create_attrs = RequiredOptional(required=('email',))
def get(self, id: Union[(str, int)], lazy: bool=False, **kwargs: Any) -> CurrentUserEmail:
return cast(CurrentUserEmail, super().get(id=id, lazy=lazy, **kwargs)) |
class EncodedFastaDataset(FastaDataset):
def __init__(self, path, dictionary):
super().__init__(path, cache_indices=True)
self.dictionary = dictionary
def __getitem__(self, idx):
(desc, seq) = super().__getitem__(idx)
return self.dictionary.encode_line(seq, line_tokenizer=list).long() |
class ModelArguments():
model_name_or_path: str = field(default=None, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `huggingface-cli login` (necessary to use this script with private models).'}) |
class SockTourney(BaseModel):
id: Optional[int] = None
guild_id: int
name: str = 'Quotient-Tourney'
registration_channel_id: int
confirm_channel_id: int
role_id: int
required_mentions: int = 4
total_slots: int
banned_users: List[int]
host_id: int
multiregister: bool = False
open_role_id: Optional[int] = None
teamname_compulsion: bool = False
ping_role_id: Optional[int] = None
no_duplicate_name: bool = True
autodelete_rejected: bool = True
success_message: Optional[str] = None |
def test_fileformatyaml_pass_with_encoding_ut32(fs):
in_path = './tests/testfiles/testsubst.yaml'
fs.create_file(in_path, contents='key: "{k1}value1 !$%# *"\n"key2{k2}": blah\n# there is a comment here\nkey3:\n- l1\n # and another\n- \'!$% * {k3}\'\n- l2\n- - l31{k4}\n - l32:\n - l321\n - l322{k5}\n', encoding='utf-32')
context = Context({'k1': 'v1', 'k2': 'v2', 'k3': 'v3', 'k4': 'v4', 'k5': 'v5', 'enc': 'utf-32', 'pathIn': 'testsubst', 'pathOut': 'outsubst', 'fileFormatYaml': {'in': './tests/testfiles/{pathIn}.yaml', 'out': './tests/testfiles/out/{pathOut}.yaml', 'encoding': '{enc}'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 9), 'context should have 9 items'
assert (context['k1'] == 'v1')
assert (context['fileFormatYaml'] == {'in': './tests/testfiles/{pathIn}.yaml', 'out': './tests/testfiles/out/{pathOut}.yaml', 'encoding': '{enc}'})
with open('./tests/testfiles/out/outsubst.yaml', encoding='utf-32') as outfile:
yaml_loader = yaml.YAML(typ='rt', pure=True)
outcontents = yaml_loader.load(outfile)
expected = {'key': 'v1value1 !$%# *', 'key2v2': 'blah', 'key3': ['l1', '!$% * v3', 'l2', ['l31v4', {'l32': ['l321', 'l322v5']}]]}
assert (outcontents == expected) |
def _super_impl(ctx: CallContext) -> Value:
typ = ctx.vars['type']
obj = ctx.vars['obj']
if (typ is _NO_ARG_SENTINEL):
if ctx.visitor.in_comprehension_body:
ctx.show_error('Zero-argument super() does not work inside a comprehension', ErrorCode.bad_super_call)
elif ctx.visitor.scopes.is_nested_function():
ctx.show_error('Zero-argument super() does not work inside a nested function', ErrorCode.bad_super_call)
current_class = ctx.visitor.asynq_checker.current_class
if (current_class is not None):
try:
first_arg = ctx.visitor.scopes.get('%first_arg', None, ctx.visitor.state)
except KeyError:
ctx.show_error('failed to find %first_arg', ErrorCode.bad_super_call)
return AnyValue(AnySource.error)
else:
if (isinstance(first_arg, SubclassValue) and isinstance(first_arg.typ, TypedValue)):
return KnownValue(super(current_class, first_arg.typ.typ))
elif isinstance(first_arg, KnownValue):
return KnownValue(super(current_class, first_arg.val))
elif isinstance(first_arg, TypedValue):
return TypedValue(super(current_class, first_arg.typ))
else:
return AnyValue(AnySource.inference)
return AnyValue(AnySource.inference)
if isinstance(typ, KnownValue):
if inspect.isclass(typ.val):
cls = typ.val
else:
ctx.show_error('First argument to super must be a class', ErrorCode.bad_super_call)
return AnyValue(AnySource.error)
else:
return AnyValue(AnySource.inference)
if (isinstance(obj, TypedValue) and (obj.typ is not type)):
tobj = obj.get_type_object(ctx.visitor)
is_value = True
elif (isinstance(obj, SubclassValue) and isinstance(obj.typ, TypedValue)):
tobj = obj.typ.get_type_object(ctx.visitor)
is_value = False
else:
return AnyValue(AnySource.inference)
if (not tobj.is_assignable_to_type(cls)):
ctx.show_error('Incompatible arguments to super', ErrorCode.bad_super_call)
current_class = ctx.visitor.asynq_checker.current_class
if ((current_class is not None) and (cls is not current_class)):
ctx.show_error('First argument to super() is not the current class', ErrorCode.bad_super_call)
if isinstance(tobj.typ, str):
return AnyValue(AnySource.inference)
try:
super_val = super(cls, tobj.typ)
except Exception:
ctx.show_error('Bad arguments to super', ErrorCode.bad_super_call)
return AnyValue(AnySource.error)
if is_value:
return TypedValue(super_val)
else:
return KnownValue(super_val) |
class Config(StoredObject):
payment_basepoint = attr.ib(type=OnlyPubkeyKeypair, converter=json_to_keypair)
multisig_key = attr.ib(type=OnlyPubkeyKeypair, converter=json_to_keypair)
htlc_basepoint = attr.ib(type=OnlyPubkeyKeypair, converter=json_to_keypair)
delayed_basepoint = attr.ib(type=OnlyPubkeyKeypair, converter=json_to_keypair)
revocation_basepoint = attr.ib(type=OnlyPubkeyKeypair, converter=json_to_keypair)
to_self_delay = attr.ib(type=int)
dust_limit_sat = attr.ib(type=int)
max_htlc_value_in_flight_msat = attr.ib(type=int)
max_accepted_htlcs = attr.ib(type=int)
initial_msat = attr.ib(type=int)
reserve_sat = attr.ib(type=int)
htlc_minimum_msat = attr.ib(type=int)
def validate_params(self, *, funding_sat: int) -> None:
conf_name = type(self).__name__
for key in (self.payment_basepoint, self.multisig_key, self.htlc_basepoint, self.delayed_basepoint, self.revocation_basepoint):
if (not ((len(key.pubkey) == 33) and ecc.ECPubkey.is_pubkey_bytes(key.pubkey))):
raise Exception(f'{conf_name}. invalid pubkey in channel config')
if (self.reserve_sat < self.dust_limit_sat):
raise Exception(f'{conf_name}. MUST set channel_reserve_satoshis greater than or equal to dust_limit_satoshis')
if (self.dust_limit_sat < bitcoin.DUST_LIMIT_DEFAULT_SAT_LEGACY):
raise Exception(f'{conf_name}. dust limit too low: {self.dust_limit_sat} sat')
if (self.reserve_sat > (funding_sat // 100)):
raise Exception(f'{conf_name}. reserve too high: {self.reserve_sat}, funding_sat: {funding_sat}')
if (self.htlc_minimum_msat > 1000):
raise Exception(f'{conf_name}. htlc_minimum_msat too high: {self.htlc_minimum_msat} msat')
HTLC_MINIMUM_MSAT_MIN = 0
if (self.htlc_minimum_msat < HTLC_MINIMUM_MSAT_MIN):
raise Exception(f'{conf_name}. htlc_minimum_msat too low: {self.htlc_minimum_msat} msat < {HTLC_MINIMUM_MSAT_MIN}')
if (self.max_accepted_htlcs < 1):
raise Exception(f'{conf_name}. max_accepted_htlcs too low: {self.max_accepted_htlcs}')
if (self.max_accepted_htlcs > 483):
raise Exception(f'{conf_name}. max_accepted_htlcs too high: {self.max_accepted_htlcs}')
if (self.to_self_delay > MAXIMUM_REMOTE_TO_SELF_DELAY_ACCEPTED):
raise Exception(f'{conf_name}. to_self_delay too high: {self.to_self_delay} > {MAXIMUM_REMOTE_TO_SELF_DELAY_ACCEPTED}')
if (self.max_htlc_value_in_flight_msat < min((1000 * funding_sat), )):
raise Exception(f'{conf_name}. max_htlc_value_in_flight_msat is too small: {self.max_htlc_value_in_flight_msat}') |
class ClassificationCollator(Collator):
def __init__(self, conf, label_size):
super(ClassificationCollator, self).__init__(conf.device)
self.classification_type = conf.task_info.label_type
min_seq = 1
if (conf.model_name == 'TextCNN'):
min_seq = conf.TextCNN.top_k_max_pooling
elif (conf.model_name == 'DPCNN'):
min_seq = (conf.DPCNN.kernel_size * (2 ** conf.DPCNN.blocks))
elif (conf.model_name == 'RegionEmbedding'):
min_seq = conf.feature.max_token_len
self.min_token_max_len = min_seq
self.min_char_max_len = min_seq
self.label_size = label_size
def _get_multi_hot_label(self, doc_labels):
batch_size = len(doc_labels)
max_label_num = max([len(x) for x in doc_labels])
doc_labels_extend = [[doc_labels[i][0] for x in range(max_label_num)] for i in range(batch_size)]
for i in range(0, batch_size):
doc_labels_extend[i][0:len(doc_labels[i])] = doc_labels[i]
y = torch.Tensor(doc_labels_extend).long()
y_onehot = torch.zeros(batch_size, self.label_size).scatter_(1, y, 1)
return y_onehot
def _append_label(self, doc_labels, sample):
if (self.classification_type == ClassificationType.SINGLE_LABEL):
assert (len(sample[cDataset.DOC_LABEL]) == 1)
doc_labels.extend(sample[cDataset.DOC_LABEL])
elif (self.classification_type == ClassificationType.MULTI_LABEL):
doc_labels.append(sample[cDataset.DOC_LABEL])
else:
raise TypeError(('Unsupported classification type: %s. Supported classification type is: %s' % (self.classification_type, ClassificationType.str())))
def __call__(self, batch):
def _append_vocab(ori_vocabs, vocabs, max_len):
padding = ([cDataset.VOCAB_PADDING] * (max_len - len(ori_vocabs)))
vocabs.append((ori_vocabs + padding))
doc_labels = []
doc_token = []
doc_char = []
doc_char_in_token = []
doc_token_len = []
doc_char_len = []
doc_char_in_token_len = []
doc_token_max_len = self.min_token_max_len
doc_char_max_len = self.min_char_max_len
doc_char_in_token_max_len = 0
for (_, value) in enumerate(batch):
doc_token_max_len = max(doc_token_max_len, len(value[cDataset.DOC_TOKEN]))
doc_char_max_len = max(doc_char_max_len, len(value[cDataset.DOC_CHAR]))
for char_in_token in value[cDataset.DOC_CHAR_IN_TOKEN]:
doc_char_in_token_max_len = max(doc_char_in_token_max_len, len(char_in_token))
for (_, value) in enumerate(batch):
self._append_label(doc_labels, value)
_append_vocab(value[cDataset.DOC_TOKEN], doc_token, doc_token_max_len)
doc_token_len.append(len(value[cDataset.DOC_TOKEN]))
_append_vocab(value[cDataset.DOC_CHAR], doc_char, doc_char_max_len)
doc_char_len.append(len(value[cDataset.DOC_CHAR]))
doc_char_in_token_len_tmp = []
for char_in_token in value[cDataset.DOC_CHAR_IN_TOKEN]:
_append_vocab(char_in_token, doc_char_in_token, doc_char_in_token_max_len)
doc_char_in_token_len_tmp.append(len(char_in_token))
padding = ([cDataset.VOCAB_PADDING] * doc_char_in_token_max_len)
for _ in range(len(value[cDataset.DOC_CHAR_IN_TOKEN]), doc_token_max_len):
doc_char_in_token.append(padding)
doc_char_in_token_len_tmp.append(0)
doc_char_in_token_len.append(doc_char_in_token_len_tmp)
if (self.classification_type == ClassificationType.SINGLE_LABEL):
tensor_doc_labels = torch.tensor(doc_labels)
doc_label_list = [[x] for x in doc_labels]
elif (self.classification_type == ClassificationType.MULTI_LABEL):
tensor_doc_labels = self._get_multi_hot_label(doc_labels)
doc_label_list = doc_labels
batch_map = {cDataset.DOC_LABEL: tensor_doc_labels, cDataset.DOC_LABEL_LIST: doc_label_list, cDataset.DOC_TOKEN: torch.tensor(doc_token), cDataset.DOC_CHAR: torch.tensor(doc_char), cDataset.DOC_CHAR_IN_TOKEN: torch.tensor(doc_char_in_token), cDataset.DOC_TOKEN_MASK: torch.tensor(doc_token).gt(0).float(), cDataset.DOC_CHAR_MASK: torch.tensor(doc_char).gt(0).float(), cDataset.DOC_CHAR_IN_TOKEN_MASK: torch.tensor(doc_char_in_token).gt(0).float(), cDataset.DOC_TOKEN_LEN: torch.tensor(doc_token_len, dtype=torch.float32), cDataset.DOC_CHAR_LEN: torch.tensor(doc_char_len, dtype=torch.float32), cDataset.DOC_CHAR_IN_TOKEN_LEN: torch.tensor(doc_char_in_token_len, dtype=torch.float32), cDataset.DOC_TOKEN_MAX_LEN: torch.tensor([doc_token_max_len], dtype=torch.float32), cDataset.DOC_CHAR_MAX_LEN: torch.tensor([doc_char_max_len], dtype=torch.float32), cDataset.DOC_CHAR_IN_TOKEN_MAX_LEN: torch.tensor([doc_char_in_token_max_len], dtype=torch.float32)}
return batch_map |
('the image is {px_height_str} pixels high')
def then_image_is_cx_pixels_high(context, px_height_str):
expected_px_height = int(px_height_str)
px_height = context.image.px_height
assert (px_height == expected_px_height), ('expected pixel height %d, got %d' % (expected_px_height, px_height)) |
.parametrize('untied', [True, False])
def test_RanksComparator_r2_score(untied):
rank0 = agg.RankResult('test', ['a', 'b'], [1, 1], {})
rank1 = agg.RankResult('test', ['a', 'b'], [1, 1], {})
r2 = ranks_cmp.mkrank_cmp(rank0, rank1).r2_score(untied=untied)
expected = pd.DataFrame.from_dict({'test_1': {'test_1': 1.0, 'test_2': 1.0}, 'test_2': {'test_1': 1.0, 'test_2': 1.0}})
expected.columns.name = 'Method'
expected.index.name = 'Method'
pd.testing.assert_frame_equal(r2, expected) |
class SPPLayer(nn.Module):
def __init__(self, pool_size, pool=nn.MaxPool2d):
super(SPPLayer, self).__init__()
self.pool_size = pool_size
self.pool = pool
self.out_length = np.sum((np.array(self.pool_size) ** 2))
def forward(self, x):
(B, C, H, W) = x.size()
for i in range(len(self.pool_size)):
h_wid = int(math.ceil((H / self.pool_size[i])))
w_wid = int(math.ceil((W / self.pool_size[i])))
h_pad = ((((h_wid * self.pool_size[i]) - H) + 1) / 2)
w_pad = ((((w_wid * self.pool_size[i]) - W) + 1) / 2)
out = self.pool((h_wid, w_wid), stride=(h_wid, w_wid), padding=(h_pad, w_pad))(x)
if (i == 0):
spp = out.view(B, (- 1))
else:
spp = torch.cat([spp, out.view(B, (- 1))], dim=1)
return spp |
def get_model(n_frames, n_mels, n_conditions, lr):
sub_model = MobileNetV2(input_shape=(n_frames, n_mels, 3), alpha=0.5, weights=None, classes=n_conditions)
x = Input(shape=(n_frames, n_mels, 1))
h = x
h = Concatenate()([h, h, h])
h = sub_model(h)
model = Model(x, h)
model.compile(optimizer=keras.optimizers.Adam(lr=lr), loss='categorical_crossentropy', metrics=['accuracy'])
return model |
class TestStreamsClosedByEndStream(object):
example_request_headers = [(':authority', 'example.com'), (':path', '/'), (':scheme', ' (':method', 'GET')]
example_response_headers = [(':status', '200'), ('server', 'fake-serv/0.1.0')]
server_config = h2.config.H2Configuration(client_side=False)
.parametrize('frame', [(lambda self, ff: ff.build_headers_frame(self.example_request_headers, flags=['END_STREAM'])), (lambda self, ff: ff.build_headers_frame(self.example_request_headers))])
.parametrize('clear_streams', [True, False])
def test_frames_after_recv_end_will_error(self, frame_factory, frame, clear_streams):
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.initiate_connection()
f = frame_factory.build_headers_frame(self.example_request_headers, flags=['END_STREAM'])
c.receive_data(f.serialize())
c.send_headers(stream_id=1, headers=self.example_response_headers, end_stream=True)
if clear_streams:
c.open_inbound_streams
c.clear_outbound_data_buffer()
f = frame(self, frame_factory)
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(f.serialize())
f = frame_factory.build_goaway_frame(last_stream_id=1, error_code=h2.errors.ErrorCodes.STREAM_CLOSED)
assert (c.data_to_send() == f.serialize())
.parametrize('frame', [(lambda self, ff: ff.build_headers_frame(self.example_response_headers, flags=['END_STREAM'])), (lambda self, ff: ff.build_headers_frame(self.example_response_headers))])
.parametrize('clear_streams', [True, False])
def test_frames_after_send_end_will_error(self, frame_factory, frame, clear_streams):
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(stream_id=1, headers=self.example_request_headers, end_stream=True)
f = frame_factory.build_headers_frame(self.example_response_headers, flags=['END_STREAM'])
c.receive_data(f.serialize())
if clear_streams:
c.open_outbound_streams
c.clear_outbound_data_buffer()
f = frame(self, frame_factory)
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(f.serialize())
f = frame_factory.build_goaway_frame(last_stream_id=0, error_code=h2.errors.ErrorCodes.STREAM_CLOSED)
assert (c.data_to_send() == f.serialize())
.parametrize('frame', [(lambda self, ff: ff.build_window_update_frame(1, 1)), (lambda self, ff: ff.build_rst_stream_frame(1))])
def test_frames_after_send_end_will_be_ignored(self, frame_factory, frame):
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.initiate_connection()
f = frame_factory.build_headers_frame(self.example_request_headers, flags=['END_STREAM'])
c.receive_data(f.serialize())
c.send_headers(stream_id=1, headers=self.example_response_headers, end_stream=True)
c.clear_outbound_data_buffer()
f = frame(self, frame_factory)
events = c.receive_data(f.serialize())
assert (not events) |
class SentryBaseplateObserver(BaseplateObserver):
def on_server_span_created(self, context: RequestContext, server_span: Span) -> None:
sentry_hub = sentry_sdk.Hub.current
observer = _SentryServerSpanObserver(sentry_hub, server_span)
server_span.register(observer)
context.sentry = sentry_hub |
def values(m, *, sol=(- 1)):
if isinstance(m, Variable):
return value(m, sol=sol)
if isinstance(m, (list, tuple, set, frozenset, types.GeneratorType)):
g = [values(v, sol=sol) for v in m]
return (ListInt(g) if ((len(g) > 0) and (isinstance(g[0], (int, ListInt)) or (g[0] == ANY))) else g) |
class ArrayField(Field, list):
def __init__(self, field: Field, **kwargs) -> None:
super().__init__(**kwargs)
self.sub_field = field
self.SQL_TYPE = ('%s[]' % field.SQL_TYPE)
def to_python_value(self, value: Any) -> Any:
return list(map(self.sub_field.to_python_value, value))
def to_db_value(self, value: Any, instance: Any) -> Any:
return [self.sub_field.to_db_value(val, instance) for val in value] |
.requires_internet
def test_scripts_no_environment(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(project_path)
config = dict(project.raw_config)
config['tool']['hatch']['scripts'] = {'py': 'python -c {args}'}
project.save_config(config)
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('run', ':py', "import pathlib,sys;pathlib.Path('test.txt').write_text(sys.executable)")
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent('\n Checking dependencies\n '))
output_file = (project_path / 'test.txt')
assert output_file.is_file()
env_data_path = ((data_path / 'env') / 'virtual')
assert (not env_data_path.exists())
assert (os.path.realpath(output_file.read_text().strip()).lower() == os.path.realpath(sys.executable).lower()) |
class CssSmartyLexer(DelegatingLexer):
name = 'CSS+Smarty'
aliases = ['css+smarty']
version_added = ''
alias_filenames = ['*.css', '*.tpl']
mimetypes = ['text/css+smarty']
url = '
def __init__(self, **options):
super().__init__(CssLexer, SmartyLexer, **options)
def analyse_text(text):
return (SmartyLexer.analyse_text(text) - 0.05) |
def test_dir_level1(fixture_path, capsys):
result = fixture_path.runpytest('-v', '--order-scope-level=1')
result.assert_outcomes(passed=12, failed=0)
result.stdout.fnmatch_lines(['feature0/test_b.py::test_one PASSED', 'feature0/test_b.py::test_two PASSED', 'feature0/test_a.py::test_three PASSED', 'feature0/test_a.py::test_four PASSED', 'feature1/test_b.py::test_one PASSED', 'feature1/test_b.py::test_two PASSED', 'feature1/test_a.py::test_three PASSED', 'feature1/test_a.py::test_four PASSED', 'feature2/test_b.py::test_one PASSED', 'feature2/test_b.py::test_two PASSED', 'feature2/test_a.py::test_three PASSED', 'feature2/test_a.py::test_four PASSED']) |
def binarize(databin_dir, direction, spm_vocab=SPM_VOCAB, prefix='', splits=['train', 'test', 'valid'], pairs_per_shard=None):
def move_databin_files(from_folder, to_folder):
for bin_file in ((glob.glob(f'{from_folder}/*.bin') + glob.glob(f'{from_folder}/*.idx')) + glob.glob(f'{from_folder}/dict*')):
try:
shutil.move(bin_file, to_folder)
except OSError as error:
print(error)
bpe_databin_dir = f'{BPE_DIR}/{direction}{prefix}_databin'
bpe_dir = f'{BPE_DIR}/{direction}{prefix}'
if (pairs_per_shard is None):
binarize_(bpe_dir, bpe_databin_dir, direction, spm_vocab=spm_vocab, splits=splits)
move_databin_files(bpe_databin_dir, databin_dir)
else:
binarize_(bpe_dir, bpe_databin_dir, direction, spm_vocab=spm_vocab, splits=[s for s in splits if (s != 'train')])
for shard_bpe_dir in glob.glob(f'{bpe_dir}/shard*'):
path_strs = os.path.split(shard_bpe_dir)
shard_str = path_strs[(- 1)]
shard_folder = f'{bpe_databin_dir}/{shard_str}'
databin_shard_folder = f'{databin_dir}/{shard_str}'
print(f'working from {shard_folder} to {databin_shard_folder}')
os.makedirs(databin_shard_folder, exist_ok=True)
binarize_(shard_bpe_dir, shard_folder, direction, spm_vocab=spm_vocab, splits=['train'])
for test_data in (glob.glob(f'{bpe_databin_dir}/valid.*') + glob.glob(f'{bpe_databin_dir}/test.*')):
filename = os.path.split(test_data)[(- 1)]
try:
os.symlink(test_data, f'{databin_shard_folder}/{filename}')
except OSError as error:
print(error)
move_databin_files(shard_folder, databin_shard_folder) |
('pypyr.retries.random.uniform', return_value=999)
def test_retries_linearjitter_jrc_down(mock_random):
lj = pypyr.retries.linearjitter(sleep=3, jrc=0.5)
assert (lj(1) == 999)
assert (lj(2) == 999)
assert (lj(3) == 999)
assert (lj(1) == 999)
assert (mock_random.mock_calls == [call(1.5, 3), call(3, 6), call(4.5, 9), call(1.5, 3)]) |
def test_fixture_disallow_marks_on_fixtures():
with pytest.warns(pytest.PytestRemovedIn9Warning, match='Marks applied to fixtures have no effect') as record:
.parametrize('example', ['hello'])
.usefixtures('tmp_path')
def foo():
raise NotImplementedError()
assert (len(record) == 2) |
def attempt_load(weights, map_location=None, inplace=True, fuse=True):
from models.yolo import Detect, Model
model = Ensemble()
for w in (weights if isinstance(weights, list) else [weights]):
ckpt = torch.load(attempt_download(w), map_location=map_location)
if fuse:
model.append(ckpt[('ema' if ckpt.get('ema') else 'model')].float().fuse().eval())
else:
model.append(ckpt[('ema' if ckpt.get('ema') else 'model')].float().eval())
for m in model.modules():
if (type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]):
m.inplace = inplace
if (type(m) is Detect):
if (not isinstance(m.anchor_grid, list)):
delattr(m, 'anchor_grid')
setattr(m, 'anchor_grid', ([torch.zeros(1)] * m.nl))
elif (type(m) is Conv):
m._non_persistent_buffers_set = set()
if (len(model) == 1):
return model[(- 1)]
else:
print(f'''Ensemble created with {weights}
''')
for k in ['names']:
setattr(model, k, getattr(model[(- 1)], k))
model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride
return model |
class NetworkTests(util.TestCase):
signed_cla = 'brettcannon'
not_signed_cla = 'the-knights-who-say-ni'
def setUp(self):
self.bpo = bpo.Host(util.FakeServerHost())
self.loop = asyncio.get_event_loop()
self.session = SessionOnDemand(self.loop)
def test_signed(self):
result = self.run_awaitable(self.bpo.problems(self.session, [self.signed_cla]), loop=self.loop)
self.assertEqual(result, {})
def test_not_signed(self):
usernames = [self.signed_cla, self.not_signed_cla]
result = self.run_awaitable(self.bpo.problems(self.session, usernames), loop=self.loop)
self.assertEqual(result, {ni_abc.Status.not_signed: {self.not_signed_cla}})
def test_missing_username(self):
username_not_found = 'fdsfdsdooisadfsadnfasdfdsf'
usernames = [self.signed_cla, username_not_found]
result = self.run_awaitable(self.bpo.problems(self.session, usernames), loop=self.loop)
self.assertEqual(result, {ni_abc.Status.username_not_found: {username_not_found}}) |
def get_stars(repository_ids):
if (not repository_ids):
return {}
tuples = Star.select(Star.repository, fn.Count(Star.id)).where((Star.repository << repository_ids)).group_by(Star.repository).tuples()
star_map = {}
for record in tuples:
star_map[record[0]] = record[1]
return star_map |
('/api/conversations/register_conversation', methods=['POST'])
def register_conversation() -> Response:
request_json = request.get_json()
user_id = request_json.pop('user_id', DEFAULT_USER_ID)
conversation = request_json.get('conversation', None)
if conversation:
try:
db = get_user_conversation_storage()
conversation_id = conversation['id']
if ((conversation_id is not None) and db.conversation.find_one({'_id': ObjectId(conversation_id)})):
updates = {'name': conversation['name'], 'agent': conversation['agent'], 'prompt': conversation['prompt'], 'temperature': conversation['temperature'], 'folder_id': conversation['folderId'], 'bookmarked_message_ids': conversation.get('bookmarkedMessagesIds', None), 'selected_code_interpreter_plugins': conversation['selectedCodeInterpreterPlugins'], 'selected_plugins': conversation['selectedPlugins']}
db.conversation.update_one({'_id': ObjectId(conversation_id)}, {'$set': updates})
else:
conversation = db.conversation.insert_one({'name': conversation['name'], 'agent': conversation['agent'], 'prompt': conversation['prompt'], 'temperature': conversation['temperature'], 'folder_id': conversation['folderId'], 'bookmarked_message_ids': conversation.get('bookmarkedMessagesIds', None), 'hashed_api_key': '', 'user_id': user_id, 'selected_code_interpreter_plugins': conversation['selectedCodeInterpreterPlugins'], 'selected_plugins': conversation['selectedPlugins'], 'timestamp': datetime.datetime.utcnow()})
conversation_id = str(conversation.inserted_id)
return jsonify({'id': conversation_id})
except Exception as e:
return Response(response=None, status=f'{INTERNAL} error register conversation')
else:
return Response(response=None, status=f'{UNFOUND} missing conversation') |
def download_cached_file(url, check_hash=True, progress=False):
if isinstance(url, (list, tuple)):
(url, filename) = url
else:
parts = urlparse(url)
filename = os.path.basename(parts.path)
cached_file = os.path.join(get_cache_dir(), filename)
if (not os.path.exists(cached_file)):
_logger.info('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = None
if check_hash:
r = HASH_REGEX.search(filename)
hash_prefix = (r.group(1) if r else None)
download_url_to_file(url, cached_file, hash_prefix, progress=progress)
return cached_file |
class VibrationalOp(SparseLabelOp):
_OPERATION_REGEX = re.compile('([\\+\\-]_\\d+_\\d+\\s)*[\\+\\-]_\\d+_\\d+(?!\\s)')
def __init__(self, data: Mapping[(str, _TCoeff)], num_modals: (Sequence[int] | None)=None, *, copy: bool=True, validate: bool=True) -> None:
self.num_modals = num_modals
super().__init__(data, copy=copy, validate=validate)
def num_modals(self) -> (Sequence[int] | None):
return self._num_modals
_modals.setter
def num_modals(self, num_modals: (Sequence[int] | None)):
self._num_modals = (list(num_modals) if (num_modals is not None) else None)
def register_length(self) -> int:
if (self._num_modals is None):
num_modals: list[int] = []
for key in self._data:
for term in key.split():
(_, mode_index_str, modal_index_str) = term.split('_')
mode_index = int(mode_index_str)
modal_index = int(modal_index_str)
if ((mode_index + 1) > len(num_modals)):
num_modals += ([0] * ((mode_index + 1) - len(num_modals)))
if (modal_index > (num_modals[mode_index] - 1)):
num_modals[mode_index] = (modal_index + 1)
return sum(num_modals)
return sum(self.num_modals)
def _new_instance(self, data: Mapping[(str, _TCoeff)], *, other: (VibrationalOp | None)=None) -> VibrationalOp:
num_modals = self.num_modals
if (other is not None):
other_num_modals = other.num_modals
def pad_to_length(a, b):
if (len(a) < len(b)):
(a, b) = (b, a)
return (a, (b + ([0] * (len(a) - len(b)))))
def elementwise_max(a, b):
return [max(i, j) for (i, j) in zip(*pad_to_length(a, b))]
if ((num_modals is not None) and (other_num_modals is not None)):
num_modals = elementwise_max(num_modals, other_num_modals)
return self.__class__(data, copy=False, num_modals=num_modals)
def _validate_keys(self, keys: Collection[str]) -> None:
super()._validate_keys(keys)
num_modals = (self._num_modals if (self._num_modals is not None) else [])
for key in keys:
if (key == ''):
continue
if (not re.fullmatch(VibrationalOp._OPERATION_REGEX, key)):
raise QiskitNatureError(f'{key} is not a valid VibrationalOp label.')
coeff_labels_split = key.split()
for label in coeff_labels_split:
(_, mode_index_str, modal_index_str) = label.split('_')
mode_index = int(mode_index_str)
modal_index = int(modal_index_str)
if ((mode_index + 1) > len(num_modals)):
num_modals += ([0] * ((mode_index + 1) - len(num_modals)))
if (modal_index > (num_modals[mode_index] - 1)):
num_modals[mode_index] = (modal_index + 1)
self.num_modals = num_modals
def _validate_polynomial_tensor_key(cls, keys: Collection[str]) -> None:
allowed = re.compile('(_\\+\\-)*')
for key in keys:
if (not re.fullmatch(allowed, key)):
raise QiskitNatureError(f"The key '{key}' is invalid. PolynomialTensor keys must be multiples of the '_+-' character sequence, for them to be expandable into a VibrationalOp.")
def from_polynomial_tensor(cls, tensor: PolynomialTensor) -> VibrationalOp:
cls._validate_polynomial_tensor_key(tensor.keys())
data: dict[(str, _TCoeff)] = {}
def _reshape_index(index):
new_index = []
for idx in range(0, len(index), 3):
new_index.extend([index[idx], index[(idx + 1)], index[idx], index[(idx + 2)]])
return new_index
for key in tensor:
if (key == ''):
data[''] = tensor[key].item()
continue
mat = tensor[key]
label_template = mat.label_template.format(*key.replace('_', ''))
for (value, index) in mat.coord_iter():
data[label_template.format(*_reshape_index(index))] = value
return cls(data)
def __repr__(self) -> str:
data_str = f'{dict(self.items())}'
return f'VibrationalOp({data_str}, num_modals={self.num_modals}, )'
def __str__(self) -> str:
pre = f'''Vibrational Operator
number modes={len(self.num_modals)}, number modals={self.num_modals}, number terms={len(self)}
'''
ret = (' ' + '\n+ '.join([(f'{coeff} * ( {label} )' if label else f'{coeff}') for (label, coeff) in self.items()]))
return (pre + ret)
def terms(self) -> Iterator[tuple[(list[tuple[(str, int)]], _TCoeff)]]:
num_modals = self.num_modals
partial_sum_modals = ([0] + list(itertools.accumulate(num_modals, operator.add)))
for label in iter(self):
if (not label):
(yield ([], self[label]))
continue
terms = [self._build_register_label(lbl, partial_sum_modals) for lbl in label.split()]
(yield (terms, self[label]))
def from_terms(cls, terms: Sequence[tuple[(list[tuple[(str, int)]], _TCoeff)]]) -> VibrationalOp:
raise NotImplementedError()
def _permute_term(self, term: list[tuple[(str, int)]], permutation: Sequence[int]) -> list[tuple[(str, int)]]:
raise NotImplementedError()
def _build_register_label(self, label: str, partial_sum_modals: list[int]) -> tuple[(str, int)]:
(op, mode_index, modal_index) = label.split('_')
index = (partial_sum_modals[int(mode_index)] + int(modal_index))
return (op, index)
def compose(self, other: VibrationalOp, qargs=None, front: bool=False) -> VibrationalOp:
if (not isinstance(other, VibrationalOp)):
raise TypeError(f"Unsupported operand type(s) for *: 'VibrationalOp' and '{type(other).__name__}'")
if front:
return self._tensor(self, other, offset=False)
else:
return self._tensor(other, self, offset=False)
def tensor(self, other: VibrationalOp) -> VibrationalOp:
return self._tensor(self, other)
def expand(self, other: VibrationalOp) -> VibrationalOp:
return self._tensor(other, self)
def _tensor(cls, a: VibrationalOp, b: VibrationalOp, *, offset: bool=True) -> VibrationalOp:
shift = (len(a.num_modals) if offset else 0)
new_data: dict[(str, _TCoeff)] = {}
for (a_labels, a_coeff) in a.items():
for (b_labels, b_coeff) in b.items():
if (b_labels == ''):
new_label = a_labels
else:
b_terms = [lbl.split('_') for lbl in b_labels.split()]
new_b_label = ' '.join((f'{op}_{(int(i) + shift)}_{j}' for (op, i, j) in b_terms))
new_label = f'{a_labels} {new_b_label}'.strip()
if (new_label in new_data):
new_data[new_label] += (a_coeff * b_coeff)
else:
new_data[new_label] = (a_coeff * b_coeff)
new_op = a._new_instance(new_data, other=b)
if offset:
new_op.num_modals = [*a.num_modals, *b.num_modals]
return new_op
def transpose(self) -> VibrationalOp:
data = {}
trans = ''.maketrans('+-', '-+')
for (label, coeff) in self.items():
data[' '.join((lbl.translate(trans) for lbl in reversed(label.split())))] = coeff
return self._new_instance(data)
def normal_order(self) -> VibrationalOp:
ordered_op = VibrationalOp.zero()
for (label, coeff) in self.items():
terms = []
for lbl in label.split():
(char, mode, modal) = lbl.split('_')
terms.append((char, int(mode), int(modal)))
ordered_op += self._normal_order(terms, coeff)
return self._new_instance({label: coeff for (label, coeff) in ordered_op.items() if (not np.isclose(_to_number(coeff), 0.0, atol=self.atol))})
def _normal_order(self, terms: list[tuple[(str, int, int)]], coeff: _TCoeff) -> VibrationalOp:
if (not terms):
return self._new_instance({'': coeff})
ordered_op = VibrationalOp.zero()
for i in range(1, len(terms)):
for j in range(i, 0, (- 1)):
right = terms[j]
left = terms[(j - 1)]
if ((right[0] == '+') and (left[0] == '-')):
terms[(j - 1)] = right
terms[j] = left
if ((right[1] == left[1]) and (right[2] == left[2])):
new_terms = (terms[:(j - 1)] + terms[(j + 1):])
ordered_op += self._normal_order(new_terms, coeff)
elif (right[0] == left[0]):
if ((right[1] == left[1]) and (right[2] == left[2])):
return ordered_op
elif ((left[1] > right[1]) or ((left[1] == right[1]) and (left[2] > right[2]))):
terms[(j - 1)] = right
terms[j] = left
new_label = ' '.join((f'{term[0]}_{term[1]}_{term[2]}' for term in terms))
ordered_op += self._new_instance({new_label: coeff})
return ordered_op
def index_order(self) -> VibrationalOp:
data = defaultdict(complex)
for (label, coeff) in self.items():
terms = []
for lbl in label.split():
(char, mode, modal) = lbl.split('_')
terms.append((char, int(mode), int(modal)))
(label, coeff) = self._index_order(terms, coeff)
data[label] += coeff
return self._new_instance({label: coeff for (label, coeff) in data.items() if (not np.isclose(_to_number(coeff), 0.0, atol=self.atol))})
def _index_order(self, terms: list[tuple[(str, int, int)]], coeff: _TCoeff) -> tuple[(str, _TCoeff)]:
if (not terms):
return ('', coeff)
for i in range(1, len(terms)):
for j in range(i, 0, (- 1)):
right = terms[j]
left = terms[(j - 1)]
if ((left[1] > right[1]) or ((left[1] == right[1]) and (left[2] > right[2]))):
terms[(j - 1)] = right
terms[j] = left
new_label = ' '.join((f'{term[0]}_{term[1]}_{term[2]}' for term in terms))
return (new_label, coeff)
def simplify(self, atol: (float | None)=None) -> VibrationalOp:
atol = (self.atol if (atol is None) else atol)
data = defaultdict(complex)
for (label, coeff) in self.items():
(label, coeff) = self._simplify_label(label, coeff)
data[label] += coeff
simplified_data = {label: coeff for (label, coeff) in data.items() if (not np.isclose(_to_number(coeff), 0.0, atol=self.atol))}
return self._new_instance(simplified_data)
def _simplify_label(self, label: str, coeff: _TCoeff) -> tuple[(str, _TCoeff)]:
bits = _BitsContainer[Tuple[(int, int)]]()
new_label: dict[(str, None)] = {}
for lbl in label.split():
(char, mode_index, modal_index) = lbl.split('_')
idx = (int(mode_index), int(modal_index))
char_b = (char == '+')
if (idx not in bits):
bits[idx] = int(f'{char_b:b}{(not char_b):b}{char_b:b}{char_b:b}', base=2)
new_label[lbl] = None
elif (bits.get_last(idx) == char_b):
return ('', 0)
elif (bits.get_plus(idx) and bits.get_minus(idx)):
pop_lbl = f"{('-' if char_b else '+')}_{idx[0]}_{idx[1]}"
new_label.pop(pop_lbl)
bits.set_plus_or_minus(idx, (not char_b), False)
bits.set_last(idx, char_b)
else:
bits.set_plus_or_minus(idx, char_b, True)
new_label[lbl] = None
bits.set_last(idx, char_b)
return (' '.join(new_label), coeff)
def build_dual_index(num_modals: Sequence[int], index: int) -> str:
for (mode_index, num_modals_per_mode) in enumerate(num_modals):
if (index < num_modals_per_mode):
return f'{mode_index}_{index}'
else:
index -= num_modals_per_mode
raise ValueError('Invalid index: index > sum(num_modals) - 1.') |
def test_dumping(retort, debug_trail):
retort = retort.replace(debug_trail=debug_trail).extend(recipe=[dumper(int, int_dumper)])
first_dumper = retort.get_dumper(Tuple[(str, str)])
assert (first_dumper(['a', 'b']) == ('a', 'b'))
assert (first_dumper({'a': 1, 'b': 2}) == ('a', 'b'))
assert (first_dumper(['a', 'b']) == ('a', 'b'))
assert (first_dumper(('a', 'b')) == ('a', 'b'))
assert (first_dumper(['1', '2']) == ('1', '2'))
assert (first_dumper({'a': 0, 'b': 0}) == ('a', 'b'))
second_dumper = retort.get_dumper(Tuple[(str, int)])
third_dumper = retort.get_dumper(Tuple[(int, str)])
if (debug_trail == DebugTrail.DISABLE):
raises_exc(TypeError(), (lambda : second_dumper([10, '20'])))
raises_exc(TypeError(), (lambda : third_dumper(['10', 20])))
elif (debug_trail == DebugTrail.FIRST):
raises_exc(with_trail(TypeError(), [0]), (lambda : second_dumper([10, '20'])))
raises_exc(with_trail(TypeError(), [0]), (lambda : third_dumper(['10', 20])))
elif (debug_trail == DebugTrail.ALL):
raises_exc(CompatExceptionGroup('while dumping tuple', [with_trail(TypeError(), [0]), with_trail(TypeError(), [1])]), (lambda : second_dumper([10, '20'])))
raises_exc(CompatExceptionGroup('while dumping tuple', [with_trail(TypeError(), [0]), with_trail(TypeError(), [1])]), (lambda : third_dumper(['10', 20]))) |
.arg(4)
def hash_iter_ref(ht, n, env, cont, returns):
from pycket.interpreter import return_value, return_multi_vals
try:
(w_key, w_val) = ht.get_item(n)
if (returns == _KEY):
return return_value(w_key, env, cont)
if (returns == _VALUE):
return return_value(w_val, env, cont)
if (returns == _KEY_AND_VALUE):
vals = values.Values._make2(w_key, w_val)
return return_multi_vals(vals, env, cont)
if (returns == _PAIR):
vals = values.W_Cons.make(w_key, w_val)
return return_value(vals, env, cont)
assert False, 'unknown return code'
except KeyError:
raise SchemeException('hash-iterate-key: invalid position')
except IndexError:
raise SchemeException('hash-iterate-key: invalid position') |
def _gui() -> game.GameGui:
from randovania.games.common.prime_family.gui.prime_trilogy_teleporter_details_tab import PrimeTrilogyTeleporterDetailsTab
from randovania.games.prime2 import gui
from randovania.games.prime2.pickup_database import progressive_items
return game.GameGui(tab_provider=gui.prime2_preset_tabs, cosmetic_dialog=gui.EchoesCosmeticPatchesDialog, export_dialog=gui.EchoesGameExportDialog, progressive_item_gui_tuples=progressive_items.tuples(), spoiler_visualizer=(PrimeTrilogyTeleporterDetailsTab, gui.TranslatorGateDetailsTab, gui.PortalDetailsTab, gui.EchoesHintDetailsTab), game_tab=gui.EchoesGameTabWidget) |
def contingency_matrix(ref_labels, sys_labels):
if (ref_labels.ndim != sys_labels.ndim):
raise ValueError(('ref_labels and sys_labels should either both be 1D arrays of labels or both be 2D arrays of one-hot encoded labels: shapes are %r, %r' % (ref_labels.shape, sys_labels.shape)))
if (ref_labels.shape[0] != sys_labels.shape[0]):
raise ValueError(('ref_labels and sys_labels must have same size: received %d and %d' % (ref_labels.shape[0], sys_labels.shape[0])))
if (ref_labels.ndim == 1):
(ref_classes, ref_class_inds) = np.unique(ref_labels, return_inverse=True)
(sys_classes, sys_class_inds) = np.unique(sys_labels, return_inverse=True)
n_frames = ref_labels.size
cm = coo_matrix((np.ones(n_frames), (ref_class_inds, sys_class_inds)), shape=(ref_classes.size, sys_classes.size), dtype=np.int)
cm = cm.toarray()
else:
ref_labels = ref_labels.astype('int64', copy=False)
sys_labels = sys_labels.astype('int64', copy=False)
cm = ref_labels.T.dot(sys_labels)
if issparse(cm):
cm = cm.toarray()
return cm |
class TestInternAtom(EndianTest):
def setUp(self):
self.req_args_0 = {'name': 'fuzzy_prop', 'only_if_exists': 0}
self.req_bin_0 = b'\x10\x00\x00\x05\x00\n\x00\x00fuzzy_prop\x00\x00'
self.reply_args_0 = {'atom': , 'sequence_number': 45122}
self.reply_bin_0 = b'\x01\x00\xb0B\x00\x00\x00\x00)\x83\x18\xbf\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def testPackRequest0(self):
bin = request.InternAtom._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.InternAtom._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0)
def testPackReply0(self):
bin = request.InternAtom._reply.to_binary(*(), **self.reply_args_0)
self.assertBinaryEqual(bin, self.reply_bin_0)
def testUnpackReply0(self):
(args, remain) = request.InternAtom._reply.parse_binary(self.reply_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.reply_args_0) |
def load_nist_vectors(vector_data):
test_data = {}
data = []
for line in vector_data:
line = line.strip()
if ((not line) or line.startswith('#') or (line.startswith('[') and line.endswith(']'))):
continue
if (line.strip() == 'FAIL'):
test_data['fail'] = True
continue
(name, value) = (c.strip() for c in line.split('='))
value = value.replace('\\0', '\x00')
if (name.upper() == 'COUNT'):
test_data = {}
data.append(test_data)
continue
else:
test_data[name.lower()] = value.encode('ascii')
return data |
def test_avro_schema():
mock_avro_schema_str = '\n {\n "type": "record",\n "name": "User",\n "fields": [\n {"name": "name", "type": "string"},\n {"name": "age", "type": "int"}\n ]\n }\n '
client = FilesystemClient(scheme='file')
with patch.object(client, '_read_file', return_value=mock_avro_schema_str):
result = client.schema('dummy_user.avsc')
assert isinstance(result, StructType)
assert (len(result.fields) == 2)
assert (result.fields[0].extra_attrs['name'] == 'name')
assert isinstance(result.fields[0], StringType)
assert (result.fields[1].extra_attrs['name'] == 'age')
assert isinstance(result.fields[1], IntType) |
.object(QuickCheck, '_socket_send', autospec=True)
class TestQcMethods(TestCase):
def test_connect(self, _socket_send):
qc = QuickCheck('127.0.0.1')
_socket_send.side_effect = mock_socket_send
qc.connect()
self.assertTrue(qc.connected)
self.assertEqual('SER;1000', qc.data)
qc.close()
self.assertFalse(qc.connected)
def test_get_measurements(self, socket_send):
qc = QuickCheck('127.0.0.1')
socket_send.side_effect = mock_socket_send
qc.connect()
qc.get_measurements()
qc.close()
self.assertEqual((3, 73), qc.measurements.shape)
self.assertEqual(6, qc.measurements.iloc[0]['TASK_En'])
self.assertEqual(100.83, qc.measurements.iloc[1]['AV_CAX_Value'])
self.assertEqual('VERSA BETA', qc.measurements.iloc[2]['WORK_Name'])
def test_not_connected(self, socket_send):
qc = QuickCheck('127.0.0.1')
socket_send.side_effect = mock_socket_send
self.assertFalse(qc.connected)
with self.assertRaises(ValueError):
qc.get_measurements()
qc.close() |
class Network(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(Network, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
stem_multiplier = 3
C_curr = (stem_multiplier * C)
self.stem = nn.Sequential(nn.Conv2d(3, C_curr, 3, padding=1, bias=False), nn.BatchNorm2d(C_curr))
(C_prev_prev, C_prev, C_curr) = (C_curr, C_curr, C)
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if (i in [(layers // 3), ((2 * layers) // 3)]):
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
(C_prev_prev, C_prev) = (C_prev, (cell.multiplier * C_curr))
if (i == ((2 * layers) // 3)):
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHead(C_to_auxiliary, num_classes)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = s1 = self.stem(input)
for (i, cell) in enumerate(self.cells):
(s0, s1) = (s1, cell(s0, s1, self.drop_path_prob))
if (i == ((2 * self._layers) // 3)):
if (self._auxiliary and self.training):
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), (- 1)))
return (logits, logits_aux) |
class TestTerminal():
def test_default(self):
config = RootConfig({})
assert (config.terminal.styles.info == config.terminal.styles.info == 'bold')
assert (config.terminal.styles.success == config.terminal.styles.success == 'bold cyan')
assert (config.terminal.styles.error == config.terminal.styles.error == 'bold red')
assert (config.terminal.styles.warning == config.terminal.styles.warning == 'bold yellow')
assert (config.terminal.styles.waiting == config.terminal.styles.waiting == 'bold magenta')
assert (config.terminal.styles.debug == config.terminal.styles.debug == 'bold')
assert (config.terminal.styles.spinner == config.terminal.styles.spinner == 'simpleDotsScrolling')
assert (config.raw_data == {'terminal': {'styles': {'info': 'bold', 'success': 'bold cyan', 'error': 'bold red', 'warning': 'bold yellow', 'waiting': 'bold magenta', 'debug': 'bold', 'spinner': 'simpleDotsScrolling'}}})
def test_not_table(self, helpers):
config = RootConfig({'terminal': 9000})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal\n must be a table')):
_ = config.terminal
def test_set_lazy_error(self, helpers):
config = RootConfig({})
config.terminal = 9000
assert (config.raw_data == {'terminal': 9000})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal\n must be a table')):
_ = config.terminal
def test_styles_not_table(self, helpers):
config = RootConfig({'terminal': {'styles': 9000}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles\n must be a table')):
_ = config.terminal.styles
def test_styles_set_lazy_error(self, helpers):
config = RootConfig({})
config.terminal.styles = 9000
assert (config.raw_data == {'terminal': {'styles': 9000}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles\n must be a table')):
_ = config.terminal.styles
def test_styles_info(self):
config = RootConfig({'terminal': {'styles': {'info': 'foo'}}})
assert (config.terminal.styles.info == 'foo')
assert (config.raw_data == {'terminal': {'styles': {'info': 'foo'}}})
def test_styles_info_not_string(self, helpers):
config = RootConfig({'terminal': {'styles': {'info': 9000}}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles -> info\n must be a string')):
_ = config.terminal.styles.info
def test_styles_info_set_lazy_error(self, helpers):
config = RootConfig({})
config.terminal.styles.info = 9000
assert (config.raw_data == {'terminal': {'styles': {'info': 9000}}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles -> info\n must be a string')):
_ = config.terminal.styles.info
def test_styles_success(self):
config = RootConfig({'terminal': {'styles': {'success': 'foo'}}})
assert (config.terminal.styles.success == 'foo')
assert (config.raw_data == {'terminal': {'styles': {'success': 'foo'}}})
def test_styles_success_not_string(self, helpers):
config = RootConfig({'terminal': {'styles': {'success': 9000}}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles -> success\n must be a string')):
_ = config.terminal.styles.success
def test_styles_success_set_lazy_error(self, helpers):
config = RootConfig({})
config.terminal.styles.success = 9000
assert (config.raw_data == {'terminal': {'styles': {'success': 9000}}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles -> success\n must be a string')):
_ = config.terminal.styles.success
def test_styles_error(self):
config = RootConfig({'terminal': {'styles': {'error': 'foo'}}})
assert (config.terminal.styles.error == 'foo')
assert (config.raw_data == {'terminal': {'styles': {'error': 'foo'}}})
def test_styles_error_not_string(self, helpers):
config = RootConfig({'terminal': {'styles': {'error': 9000}}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles -> error\n must be a string')):
_ = config.terminal.styles.error
def test_styles_error_set_lazy_error(self, helpers):
config = RootConfig({})
config.terminal.styles.error = 9000
assert (config.raw_data == {'terminal': {'styles': {'error': 9000}}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles -> error\n must be a string')):
_ = config.terminal.styles.error
def test_styles_warning(self):
config = RootConfig({'terminal': {'styles': {'warning': 'foo'}}})
assert (config.terminal.styles.warning == 'foo')
assert (config.raw_data == {'terminal': {'styles': {'warning': 'foo'}}})
def test_styles_warning_not_string(self, helpers):
config = RootConfig({'terminal': {'styles': {'warning': 9000}}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles -> warning\n must be a string')):
_ = config.terminal.styles.warning
def test_styles_warning_set_lazy_error(self, helpers):
config = RootConfig({})
config.terminal.styles.warning = 9000
assert (config.raw_data == {'terminal': {'styles': {'warning': 9000}}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles -> warning\n must be a string')):
_ = config.terminal.styles.warning
def test_styles_waiting(self):
config = RootConfig({'terminal': {'styles': {'waiting': 'foo'}}})
assert (config.terminal.styles.waiting == 'foo')
assert (config.raw_data == {'terminal': {'styles': {'waiting': 'foo'}}})
def test_styles_waiting_not_string(self, helpers):
config = RootConfig({'terminal': {'styles': {'waiting': 9000}}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles -> waiting\n must be a string')):
_ = config.terminal.styles.waiting
def test_styles_waiting_set_lazy_error(self, helpers):
config = RootConfig({})
config.terminal.styles.waiting = 9000
assert (config.raw_data == {'terminal': {'styles': {'waiting': 9000}}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles -> waiting\n must be a string')):
_ = config.terminal.styles.waiting
def test_styles_debug(self):
config = RootConfig({'terminal': {'styles': {'debug': 'foo'}}})
assert (config.terminal.styles.debug == 'foo')
assert (config.raw_data == {'terminal': {'styles': {'debug': 'foo'}}})
def test_styles_debug_not_string(self, helpers):
config = RootConfig({'terminal': {'styles': {'debug': 9000}}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles -> debug\n must be a string')):
_ = config.terminal.styles.debug
def test_styles_debug_set_lazy_error(self, helpers):
config = RootConfig({})
config.terminal.styles.debug = 9000
assert (config.raw_data == {'terminal': {'styles': {'debug': 9000}}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles -> debug\n must be a string')):
_ = config.terminal.styles.debug
def test_styles_spinner(self):
config = RootConfig({'terminal': {'styles': {'spinner': 'foo'}}})
assert (config.terminal.styles.spinner == 'foo')
assert (config.raw_data == {'terminal': {'styles': {'spinner': 'foo'}}})
def test_styles_spinner_not_string(self, helpers):
config = RootConfig({'terminal': {'styles': {'spinner': 9000}}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles -> spinner\n must be a string')):
_ = config.terminal.styles.spinner
def test_styles_spinner_set_lazy_error(self, helpers):
config = RootConfig({})
config.terminal.styles.spinner = 9000
assert (config.raw_data == {'terminal': {'styles': {'spinner': 9000}}})
with pytest.raises(ConfigurationError, match=helpers.dedent('\n Error parsing config:\n terminal -> styles -> spinner\n must be a string')):
_ = config.terminal.styles.spinner |
class ConvNormActAa(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, aa_layer=None, drop_layer=None):
super(ConvNormActAa, self).__init__()
use_aa = ((aa_layer is not None) and (stride == 2))
self.conv = create_conv2d(in_channels, out_channels, kernel_size, stride=(1 if use_aa else stride), padding=padding, dilation=dilation, groups=groups, bias=bias)
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
norm_kwargs = (dict(drop_layer=drop_layer) if (drop_layer is not None) else {})
self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs)
self.aa = create_aa(aa_layer, out_channels, stride=stride, enable=use_aa)
def in_channels(self):
return self.conv.in_channels
def out_channels(self):
return self.conv.out_channels
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.aa(x)
return x |
class executor_age(Executor):
def __init__(self, conf, model=None, comet_exp=None):
super(executor_age, self).__init__(conf, model, comet_exp)
def init_train_data(self):
loader = data_loader(self.conf)
(img_yng_tr, age_yng_tr, _, _, _, _, img_old_tr, age_old_tr, _, _, _, _) = loader.load_data()
self.train_img_yng = img_yng_tr[0:int(((len(img_yng_tr) // self.conf.batch_size) * self.conf.batch_size))]
self.train_age_yng = age_yng_tr[0:int(((len(age_yng_tr) // self.conf.batch_size) * self.conf.batch_size))]
self.train_img_old = img_old_tr[0:int(((len(img_old_tr) // self.conf.batch_size) * self.conf.batch_size))]
self.train_age_old = age_old_tr[0:int(((len(age_old_tr) // self.conf.batch_size) * self.conf.batch_size))]
self.conf.data_len = len(self.train_img_yng)
def get_loss_names(self):
return ['Discriminator_loss', 'Discriminator_real_loss', 'Discriminator_fake_loss', 'Generator_fake_loss', 'Generator_l1_reg_loss', 'Discriminator_gp_loss']
def train(self):
self.init_train_data()
gen_dict = self.get_datagen_params()
old_gen = ImageDataGenerator(**gen_dict).flow(x=self.train_img_old, y=self.train_age_old, batch_size=self.conf.batch_size)
yng_gen = ImageDataGenerator(**gen_dict).flow(x=self.train_img_yng, y=self.train_age_yng, batch_size=self.conf.batch_size)
batches = int(np.ceil((self.conf.data_len / self.conf.batch_size)))
progress_bar = Progbar(target=(batches * self.conf.batch_size))
sl = SaveLoss(self.conf.folder)
cl = CSVLogger((self.conf.folder + '/training.csv'))
cl.on_train_begin()
img_clb = ImageCallback(self.conf, self.model, self.comet_exp)
loss_names = self.get_loss_names()
total_loss = {n: [] for n in loss_names}
for epoch in range(self.conf.epochs):
log.info(('Train epoch %d/%d' % (epoch, self.conf.epochs)))
epoch_loss = {n: [] for n in loss_names}
epoch_loss_list = []
(pool_to_print_old, pool_to_print_yng) = ([], [])
for batch in range(batches):
(old_img, old_age) = next(old_gen)
(yng_img, yng_age) = next(yng_gen)
if (len(pool_to_print_old) < 30):
pool_to_print_old.append(old_img)
if (len(pool_to_print_yng) < 30):
pool_to_print_yng.append(yng_img)
real_pred = (- np.ones((old_img.shape[0], 1)))
fake_pred = np.ones((old_img.shape[0], 1))
dummy = np.zeros((old_img.shape[0], 1))
dummy_Img = np.ones(old_img.shape)
diff_age = get_age_ord_vector(calculate_age_diff(yng_age, old_age), expand_dim=1, con=self.conf.age_con, ord=self.conf.age_ord, age_dim=self.conf.age_dim)
gen_masks = self.model.generator.predict([yng_img, diff_age])
gen_old_img = (np.tanh((gen_masks + yng_img)) if self.conf.use_tanh else (gen_masks + yng_img))
if (epoch < 25):
for _ in range(self.conf.ncritic[0]):
epsilon = np.random.uniform(0, 1, size=(old_img.shape[0], 1, 1, 1))
interpolation = ((epsilon * old_img) + ((1 - epsilon) * gen_old_img))
h_d = self.model.critic_model.fit([old_img, old_age, gen_old_img, old_age, interpolation, old_age], [real_pred, fake_pred, dummy], epochs=1, verbose=0)
else:
for _ in range(self.conf.ncritic[1]):
epsilon = np.random.uniform(0, 1, size=(old_img.shape[0], 1, 1, 1))
interpolation = ((epsilon * old_img) + ((1 - epsilon) * gen_old_img))
h_d = self.model.critic_model.fit([old_img, old_age, gen_old_img, old_age, interpolation, old_age], [real_pred, fake_pred, dummy], epochs=1, verbose=0)
print('d_real_loss', np.mean(h_d.history['d_real_loss']), 'd_fake_loss', np.mean(h_d.history['d_fake_loss']))
d_loss_bce = np.mean([h_d.history['d_real_loss'], h_d.history['d_fake_loss']])
d_loss_real = np.mean(h_d.history['d_real_loss'])
d_loss_fake = np.mean(h_d.history['d_fake_loss'])
d_loss_gp = np.mean(h_d.history['gp_loss'])
epoch_loss['Discriminator_loss'].append(d_loss_bce)
epoch_loss['Discriminator_real_loss'].append(d_loss_real)
epoch_loss['Discriminator_fake_loss'].append(d_loss_fake)
epoch_loss['Discriminator_gp_loss'].append(d_loss_gp)
h = self.model.gan.fit([yng_img, old_age, diff_age], [real_pred, dummy_Img], epochs=1, verbose=0)
g_loss_bce = h.history['discriminator_loss']
g_loss_l1 = h.history['map_l1_reg_loss']
epoch_loss['Generator_fake_loss'].append(g_loss_bce)
epoch_loss['Generator_l1_reg_loss'].append(g_loss_l1)
progress_bar.update(((batch + 1) * self.conf.batch_size))
for n in loss_names:
epoch_loss_list.append((n, np.mean(epoch_loss[n])))
total_loss[n].append(np.mean(epoch_loss[n]))
log.info((str(('Epoch %d/%d: ' + ', '.join([(l + ' Loss = %.3f') for l in loss_names]))) % ((epoch, self.conf.epochs) + tuple((total_loss[l][(- 1)] for l in loss_names)))))
logs = {l: total_loss[l][(- 1)] for l in loss_names}
cl.model = self.model.discriminator
cl.model.stop_training = False
cl.on_epoch_end(epoch, logs)
sl.on_epoch_end(epoch, logs)
img_clb.on_epoch_end(epoch, yng_img, yng_age, old_img, old_age) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.