code stringlengths 281 23.7M |
|---|
def write_audacity_labels(dst_path, labels):
with open(dst_path, 'w') as of:
for (s, e, l) in labels:
(s, e) = ((s * 1e-07), (e * 1e-07))
if (('-' in l) and ('+' in l)):
ph = l.split('-')[1].split('+')[0]
else:
ph = l
of.write('{:.4f}\t{:.4f}\t{}\n'.format(s, e, ph)) |
def patch_game_name_and_id(game_files_path: Path, new_name: str, publisher_id: str):
b = new_name.encode('ASCII')
if (len(b) > 40):
raise ValueError(f"Name '{new_name}' is bigger than 40 bytes")
b = (b + (b'\x00' * (40 - len(b))))
pid = publisher_id.encode('ASCII')
if (len(pid) != 2):
raise ValueError(f"Publisher ID '{publisher_id}' is not exactly 2 bytes")
with game_files_path.joinpath('sys', 'boot.bin').open('r+b') as boot_bin:
boot_bin.seek(4)
boot_bin.write(pid)
boot_bin.seek(32)
boot_bin.write(b)
with game_files_path.joinpath('files', 'opening.bnr').open('r+b') as banner:
banner.seek(6240)
banner.write(b) |
def readable(tag, plural=False):
try:
if (tag[0] == '~'):
if (tag[1] == '#'):
tag = tag[2:]
else:
tag = tag[1:]
except IndexError:
return ngettext('Invalid tag', 'Invalid tags', 1)
def desc(tag):
if plural:
plural_desc = _TAGS[tag].plural
if plural_desc:
return plural_desc
return _TAGS[tag].desc
if (tag in _TAGS):
return desc(tag)
elif (tag == 'people:real'):
return desc('people')
else:
roles = False
if tag.endswith(':roles'):
roles = True
tag = tag[:(- 6)]
parts = []
if tag.endswith('sort'):
v = _TAGS.get(tag[:(- 4)])
if ((v is not None) and v.has_sort):
tag = tag[:(- 4)]
parts.append(_('sort'))
else:
v = _TAGS.get(tag[:(- 4)])
if roles:
v = _TAGS.get(tag)
if ((v is not None) and v.has_roles):
parts.append(_('roles'))
if (tag in _TAGS):
desc = desc(tag)
if parts:
desc += (' (%s)' % ', '.join(parts))
return desc
return tag |
class TestDataID(unittest.TestCase):
def test_basic_init(self):
from satpy.dataset.dataid import DataID
from satpy.dataset.dataid import default_id_keys_config as dikc
from satpy.dataset.dataid import minimal_default_keys_config as mdkc
did = DataID(dikc, name='a')
assert (did['name'] == 'a')
assert (did['modifiers'] == tuple())
DataID(dikc, name='a', wavelength=0.86)
DataID(dikc, name='a', resolution=1000)
DataID(dikc, name='a', calibration='radiance')
DataID(dikc, name='a', wavelength=0.86, resolution=250, calibration='radiance')
DataID(dikc, name='a', wavelength=0.86, resolution=250, calibration='radiance', modifiers=('sunz_corrected',))
with pytest.raises(ValueError, match='Required field name missing.'):
DataID(dikc, wavelength=0.86)
did = DataID(mdkc, name='comp24', resolution=500)
assert (did['resolution'] == 500)
def test_init_bad_modifiers(self):
from satpy.dataset.dataid import DataID
from satpy.dataset.dataid import default_id_keys_config as dikc
with pytest.raises(TypeError):
DataID(dikc, name='a', modifiers='str')
def test_compare_no_wl(self):
from satpy.dataset.dataid import DataID
from satpy.dataset.dataid import default_id_keys_config as dikc
d1 = DataID(dikc, name='a', wavelength=(0.1, 0.2, 0.3))
d2 = DataID(dikc, name='a', wavelength=None)
assert (not (d1 < d2))
assert (d2 < d1)
def test_bad_calibration(self):
from satpy.dataset.dataid import DataID
from satpy.dataset.dataid import default_id_keys_config as dikc
with pytest.raises(ValueError, match="_bad_ invalid value for <enum 'calibration'>"):
DataID(dikc, name='C05', calibration='_bad_')
def test_is_modified(self):
from satpy.dataset.dataid import DataID
from satpy.dataset.dataid import default_id_keys_config as dikc
d1 = DataID(dikc, name='a', wavelength=(0.1, 0.2, 0.3), modifiers=('hej',))
d2 = DataID(dikc, name='a', wavelength=(0.1, 0.2, 0.3), modifiers=tuple())
assert d1.is_modified()
assert (not d2.is_modified())
def test_create_less_modified_query(self):
from satpy.dataset.dataid import DataID
from satpy.dataset.dataid import default_id_keys_config as dikc
d1 = DataID(dikc, name='a', wavelength=(0.1, 0.2, 0.3), modifiers=('hej',))
d2 = DataID(dikc, name='a', wavelength=(0.1, 0.2, 0.3), modifiers=tuple())
assert (not d1.create_less_modified_query()['modifiers'])
assert (not d2.create_less_modified_query()['modifiers']) |
_menu('Selection to IPython', menu='IPython')
def set_selection_in_ipython(*args):
try:
if ((not getattr(sys, '_ipython_app', None)) or (not sys._ipython_kernel_running)):
raise Exception('IPython kernel not running')
xl = xl_app(com_package='win32com')
selection = xl.Selection
if (not selection):
raise Exception('Nothing selected')
value = selection.Value
pyxll_version = int(pyxll.__version__.split('.')[0])
if ((pyxll_version >= 4) and isinstance(value, str)):
try:
to_object = get_type_converter('var', 'object')
value = to_object(value)
except KeyError:
pass
sys._ipython_app.shell.user_ns['_'] = value
print('\n\n>>> Selected value set as _')
except:
if win32api:
win32api.MessageBox(None, 'Error setting selection in Excel')
_log.error('Error setting selection in Excel', exc_info=True) |
class ProxyPLoss(nn.Module):
def __init__(self, num_classes, scale):
super(ProxyPLoss, self).__init__()
self.soft_plus = nn.Softplus()
self.label = torch.LongTensor([i for i in range(num_classes)])
self.scale = scale
def forward(self, feature, target, proxy):
feature = F.normalize(feature, p=2, dim=1)
pred = F.linear(feature, F.normalize(proxy, p=2, dim=1))
label = (self.label.unsqueeze(1).to(feature.device) == target.unsqueeze(0))
pred = torch.masked_select(pred.transpose(1, 0), label)
pred = pred.unsqueeze(1)
feature = torch.matmul(feature, feature.transpose(1, 0))
label_matrix = (target.unsqueeze(1) == target.unsqueeze(0))
index_label = torch.LongTensor([i for i in range(feature.shape[0])])
index_matrix = (index_label.unsqueeze(1) == index_label.unsqueeze(0))
feature = (feature * (~ label_matrix))
feature = feature.masked_fill((feature < 1e-06), (- np.inf))
logits = torch.cat([pred, feature], dim=1)
label = torch.zeros(logits.size(0), dtype=torch.long).to(feature.device)
loss = F.nll_loss(F.log_softmax((self.scale * logits), dim=1), label)
return loss |
class TestEvaluate(BaseTestCase):
async def test_evaluate(self):
result = (await self.page.evaluate('() => 7 * 3'))
self.assertEqual(result, 21)
async def test_await_promise(self):
result = (await self.page.evaluate('() => Promise.resolve(8 * 7)'))
self.assertEqual(result, 56)
async def test_error_on_reload(self):
with self.assertRaises(Exception) as cm:
(await self.page.evaluate('() => {\n location.reload();\n return new Promise(resolve => {\n setTimeout(() => resolve(1), 0);\n }\n )}'))
self.assertIn('Protocol error', cm.exception.args[0])
async def test_after_framenavigation(self):
frameEvaluation = asyncio.get_event_loop().create_future()
async def evaluate_frame(frame):
frameEvaluation.set_result((await frame.evaluate('() => 6 * 7')))
self.page.on('framenavigated', (lambda frame: asyncio.ensure_future(evaluate_frame(frame))))
(await self.page.goto((self.url + 'empty')))
(await frameEvaluation)
self.assertEqual(frameEvaluation.result(), 42)
('Pyppeteer does not support async for exposeFunction')
async def test_inside_expose_function(self):
async def callController(a, b):
result = (await self.page.evaluate('(a, b) => a + b', a, b))
return result
(await self.page.exposeFunction('callController', (lambda *args: asyncio.ensure_future(callController(*args)))))
result = (await self.page.evaluate('async function() { return await callController(9, 3); }'))
self.assertEqual(result, 27)
async def test_promise_reject(self):
with self.assertRaises(ElementHandleError) as cm:
(await self.page.evaluate('() => not.existing.object.property'))
self.assertIn('not is not defined', cm.exception.args[0])
async def test_string_as_error_message(self):
with self.assertRaises(Exception) as cm:
(await self.page.evaluate('() => { throw "qwerty"; }'))
self.assertIn('qwerty', cm.exception.args[0])
async def test_number_as_error_message(self):
with self.assertRaises(Exception) as cm:
(await self.page.evaluate('() => { throw 100500; }'))
self.assertIn('100500', cm.exception.args[0])
async def test_return_complex_object(self):
obj = {'foo': 'bar!'}
result = (await self.page.evaluate('(a) => a', obj))
self.assertIsNot(result, obj)
self.assertEqual(result, obj)
async def test_return_nan(self):
result = (await self.page.evaluate('() => NaN'))
self.assertIsNone(result)
async def test_return_minus_zero(self):
result = (await self.page.evaluate('() => -0'))
self.assertEqual(result, (- 0))
async def test_return_infinity(self):
result = (await self.page.evaluate('() => Infinity'))
self.assertEqual(result, math.inf)
async def test_return_infinity_minus(self):
result = (await self.page.evaluate('() => -Infinity'))
self.assertEqual(result, (- math.inf))
async def test_accept_none(self):
result = (await self.page.evaluate('(a, b) => Object.is(a, null) && Object.is(b, "foo")', None, 'foo'))
self.assertTrue(result)
async def test_serialize_null_field(self):
result = (await self.page.evaluate('() => ({a: undefined})'))
self.assertEqual(result, {})
async def test_fail_window_object(self):
self.assertIsNone((await self.page.evaluate('() => window')))
self.assertIsNone((await self.page.evaluate('() => [Symbol("foo4")]')))
async def test_fail_for_circular_object(self):
result = (await self.page.evaluate('() => {\n const a = {};\n const b = {a};\n a.b = b;\n return a;\n }'))
self.assertIsNone(result)
async def test_accept_string(self):
result = (await self.page.evaluate('1 + 2'))
self.assertEqual(result, 3)
async def test_evaluate_force_expression(self):
result = (await self.page.evaluate('() => null;\n1 + 2;', force_expr=True))
self.assertEqual(result, 3)
async def test_accept_string_with_semicolon(self):
result = (await self.page.evaluate('1 + 5;'))
self.assertEqual(result, 6)
async def test_accept_string_with_comments(self):
result = (await self.page.evaluate('2 + 5;\n// do some math!'))
self.assertEqual(result, 7)
async def test_element_handle_as_argument(self):
(await self.page.setContent('<section>42</section>'))
element = (await self.page.J('section'))
text = (await self.page.evaluate('(e) => e.textContent', element))
self.assertEqual(text, '42')
async def test_element_handle_disposed(self):
(await self.page.setContent('<section>39</section>'))
element = (await self.page.J('section'))
self.assertTrue(element)
(await element.dispose())
with self.assertRaises(ElementHandleError) as cm:
(await self.page.evaluate('(e) => e.textContent', element))
self.assertIn('JSHandle is disposed', cm.exception.args[0])
async def test_element_handle_from_other_frame(self):
(await attachFrame(self.page, 'frame1', (self.url + 'empty')))
body = (await self.page.frames[1].J('body'))
with self.assertRaises(ElementHandleError) as cm:
(await self.page.evaluate('body => body.innerHTML', body))
self.assertIn('JSHandles can be evaluated only in the context they were created', cm.exception.args[0])
async def test_object_handle_as_argument(self):
navigator = (await self.page.evaluateHandle('() => navigator'))
self.assertTrue(navigator)
text = (await self.page.evaluate('(e) => e.userAgent', navigator))
self.assertIn('Mozilla', text)
async def test_object_handle_to_primitive_value(self):
aHandle = (await self.page.evaluateHandle('() => 5'))
isFive = (await self.page.evaluate('(e) => Object.is(e, 5)', aHandle))
self.assertTrue(isFive)
async def test_simulate_user_gesture(self):
playAudio = "function playAudio() {\n const audio = document.createElement('audio');\n audio.src = 'data:audio/wav;base64,UklGRiQAAABXQVZFZm10IBAAAAABAAEARKwAAIhYAQACABAAZGF0YQAAAAA=';\n return audio.play();\n }"
(await self.page.evaluate(playAudio))
(await self.page.evaluate('({})()'.format(playAudio), force_expr=True))
async def test_nice_error_after_navigation(self):
executionContext = (await self.page.mainFrame.executionContext())
(await asyncio.wait([self.page.waitForNavigation(), executionContext.evaluate('window.location.reload()')]))
with self.assertRaises(NetworkError) as cm:
(await executionContext.evaluate('() => null'))
self.assertIn('navigation', cm.exception.args[0]) |
class CapacitorViewFull(StatsView):
name = 'capacitorViewFull'
def __init__(self, parent):
StatsView.__init__(self)
self.parent = parent
def getHeaderText(self, fit):
return _t('Capacitor')
def getTextExtentW(self, text):
(width, height) = self.parent.GetTextExtent(text)
return width
def populatePanel(self, contentPanel, headerPanel):
contentSizer = contentPanel.GetSizer()
parent = self.panel = contentPanel
self.headerPanel = headerPanel
panel = 'full'
sizerCapacitor = wx.GridSizer(1, 2, 0, 0)
contentSizer.Add(sizerCapacitor, 0, wx.EXPAND, 0)
baseBox = wx.BoxSizer(wx.HORIZONTAL)
sizerCapacitor.Add(baseBox, 0, wx.ALIGN_LEFT)
bitmap = BitmapLoader.getStaticBitmap('capacitorInfo_big', parent, 'gui')
tooltip = wx.ToolTip(_t('Capacitor stability'))
bitmap.SetToolTip(tooltip)
baseBox.Add(bitmap, 0, wx.ALIGN_CENTER)
box = wx.BoxSizer(wx.VERTICAL)
baseBox.Add(box, 0, wx.ALIGN_LEFT)
hbox = wx.BoxSizer(wx.HORIZONTAL)
box.Add(hbox, 0, wx.ALIGN_LEFT)
hbox.Add(wx.StaticText(parent, wx.ID_ANY, _t('Total: ')), 0, (wx.ALIGN_LEFT | wx.LEFT), 3)
lbl = wx.StaticText(parent, wx.ID_ANY, '0.0')
setattr(self, ('label%sCapacitorCapacity' % panel.capitalize()), lbl)
hbox.Add(lbl, 0, wx.ALIGN_LEFT)
hbox.Add(wx.StaticText(parent, wx.ID_ANY, ' GJ'), 0, wx.ALIGN_LEFT)
hbox = wx.BoxSizer(wx.HORIZONTAL)
box.Add(hbox, 0, wx.ALIGN_LEFT)
lbl = wx.StaticText(parent, wx.ID_ANY, _t('Lasts '))
hbox.Add(lbl, 0, (wx.ALIGN_LEFT | wx.LEFT), 3)
setattr(self, ('label%sCapacitorState' % panel.capitalize()), lbl)
lbl = wx.StaticText(parent, wx.ID_ANY, _t('0s'))
setattr(self, ('label%sCapacitorTime' % panel.capitalize()), lbl)
hbox.Add(lbl, 0, wx.ALIGN_LEFT)
baseBox = wx.BoxSizer(wx.HORIZONTAL)
sizerCapacitor.Add(baseBox, 0, wx.ALIGN_CENTER_HORIZONTAL)
tooltip = wx.ToolTip(_t('Extra stats'))
bitmap = BitmapLoader.getStaticBitmap('capacitorRecharge_big', parent, 'gui')
bitmap.SetToolTip(tooltip)
baseBox.Add(bitmap, 0, wx.ALIGN_CENTER)
chargeSizer = wx.BoxSizer(wx.VERTICAL)
baseBox.Add(chargeSizer, 0, wx.ALIGN_CENTER)
lbl = wx.StaticText(parent, wx.ID_ANY, '0 GJ/s')
setattr(self, ('label%sCapacitorDelta' % panel.capitalize()), lbl)
chargeSizer.Add(lbl, 0, wx.ALIGN_CENTER)
lbl = wx.StaticText(parent, wx.ID_ANY, '0%')
setattr(self, ('label%sCapacitorResist' % panel.capitalize()), lbl)
chargeSizer.Add(lbl, 0, wx.ALIGN_CENTER)
def refreshPanel(self, fit):
stats = (('label%sCapacitorCapacity', (lambda : fit.ship.getModifiedItemAttr('capacitorCapacity')), 3, 0, 9, False, ''), ('label%sCapacitorDelta', (lambda : fit.capDelta), 3, 0, 0, True, ' GJ/s'), ('label%sCapacitorResist', (lambda : ((1 - fit.ship.getModifiedItemAttr('energyWarfareResistance', 1)) * 100)), 3, 0, 0, False, '%'))
if (fit is not None):
cap_amount = fit.ship.getModifiedItemAttr('capacitorCapacity')
cap_recharge = fit.capRecharge
cap_use = fit.capUsed
neut_res = fit.ship.getModifiedItemAttr('energyWarfareResistance', 1)
else:
cap_amount = 0
cap_recharge = 0
cap_use = 0
neut_res = 1
panel = 'Full'
for (labelName, value, prec, lowest, highest, forceSign, unit) in stats:
label = getattr(self, (labelName % panel))
value = (value() if (fit is not None) else 0)
value = (value if (value is not None) else 0)
if isinstance(value, str):
label.SetLabel(value)
label.SetToolTip(wx.ToolTip(value))
else:
label.SetLabel('{}{}'.format(formatAmount(value, prec, lowest, highest, forceSign=forceSign), unit))
label.SetToolTip(wx.ToolTip(('%.1f' % value)))
if ((labelName == 'label%sCapacitorDelta') and (cap_recharge or cap_use)):
lines = [_t('Capacitor delta:'), ' +{} GJ/s'.format(formatAmount(cap_recharge, 3, 0, 3)), ' -{} GJ/s'.format(formatAmount(cap_use, 3, 0, 3))]
delta = round((cap_recharge - cap_use), 3)
if ((delta > 0) and (0 < round(neut_res, 4) < 1)):
lines.append('')
lines.append('Effective excessive gain:')
lines.append(' +{} GJ/s'.format(formatAmount((delta / neut_res), 3, 0, 3)))
label.SetToolTip(wx.ToolTip('\n'.join(lines)))
if (labelName == 'label%sCapacitorResist'):
texts = [_t('Neutralizer resistance')]
if ((cap_amount > 0) and (0 < round(neut_res, 4) < 1)):
texts.append((_t('Effective capacity') + ': {} GJ'.format(formatAmount((cap_amount / neut_res), 3, 0, 9))))
label.SetToolTip(wx.ToolTip('\n'.join(texts)))
capState = (fit.capState if (fit is not None) else 0)
capStable = (fit.capStable if (fit is not None) else False)
lblNameTime = 'label%sCapacitorTime'
lblNameState = 'label%sCapacitorState'
if (isinstance(capState, tuple) and (len(capState) >= 2)):
t = ('{0}%-{1}%', capState[0], capState[1])
s = ''
else:
if capStable:
t = ('%.1f%%' % capState)
elif (capState > 60):
t = ('%dm%ds' % divmod(capState, 60))
else:
t = ('%ds' % capState)
s = (_t('Stable: ') if capStable else _t('Lasts '))
getattr(self, (lblNameTime % panel)).SetLabel(t)
getattr(self, (lblNameState % panel)).SetLabel(s)
self.panel.Layout()
self.headerPanel.Layout() |
def get_comp_grnd_probs(model, pointer_logprobs, step_history, grounding):
assert (len(model.ids_to_grounding_choices) == len(pointer_logprobs))
keep_pointer_logprobs = []
refs = [idx for (rule, idx) in step_history if (rule == 'ref')]
assert (len(refs) == 2), refs
is_filter = (refs[0] == refs[1])
lhs_val_type = set([grnd.data_type for grnd in grounding[refs[1]]])
(col_name, val_type) = (None, None)
for (rule, idx) in step_history:
if (rule == 'grounding'):
column_grnd = model.ids_to_grounding_choices[idx]
assert (column_grnd.choice_type == 'column')
(tbl_name, col_name) = column_grnd.choice
val_type = model.column_data[tbl_name][col_name]
break
has_column = (val_type is not None)
has_comp_op = any([idx for (rule, idx) in step_history if (rule == 'CompOp')])
for (idx, grnd_choice) in model.ids_to_grounding_choices.items():
if (grnd_choice.choice_type == 'value'):
assert (pointer_logprobs[idx][0] == idx)
if has_column:
for val_unit in grnd_choice.choice:
if (((val_unit.value_type == val_type) and (val_unit.column is None)) or ((val_unit.column == col_name) and (val_unit.table == tbl_name))):
keep_pointer_logprobs.append(pointer_logprobs[idx])
break
else:
for val_unit in grnd_choice.choice:
if ((val_unit.column is None) and (val_unit.value_type in lhs_val_type)):
keep_pointer_logprobs.append(pointer_logprobs[idx])
elif ((not has_column) and (not has_comp_op) and is_filter):
keep_pointer_logprobs.append(pointer_logprobs[idx])
assert keep_pointer_logprobs, (model.ids_to_grounding_choices, val_type, col_name)
return keep_pointer_logprobs |
def test_parse_tree():
problem = '(q-transform/hint (quote (lambda (cdr (cdr (var ()))))) (quote ((() y . 1) (#f y () . #t) (#f b () b . y) (x #f (#f . #f) . #t) (a #f y x s . a))))'
step = 0
print('Starting problem:', problem)
with Interaction(lisp.parse(problem)) as env:
signal = None
while (signal != 'solved'):
parsed_subtree = parse_split_trees(env.state)
print(len(parsed_subtree))
signal = env.follow_path(env.good_path)
step += 1
print('Step', step, 'Signal:', signal)
print('Completed.') |
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
num_imgs = tensor.size(0)
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
imgs = []
for img_id in range(num_imgs):
img = tensor[(img_id, ...)].cpu().numpy().transpose(1, 2, 0)
img = mmcv.imdenormalize(img, mean, std, to_bgr=to_rgb).astype(np.uint8)
imgs.append(np.ascontiguousarray(img))
return imgs |
((enum is None), 'enum is not available')
class TestNestedStateEnums(TestEnumsAsStates):
def setUp(self):
super(TestNestedStateEnums, self).setUp()
self.machine_cls = HierarchicalMachine
def test_root_enums(self):
states = [self.States.RED, self.States.YELLOW, {'name': self.States.GREEN, 'children': ['tick', 'tock'], 'initial': 'tick'}]
m = self.machine_cls(states=states, initial=self.States.GREEN)
self.assertTrue(m.is_GREEN(allow_substates=True))
self.assertTrue(m.is_GREEN_tick())
m.to_RED()
self.assertTrue((m.state is self.States.RED))
def test_nested_enums(self):
states = ['A', self.States.GREEN, {'name': 'C', 'children': self.States, 'initial': self.States.GREEN}]
m1 = self.machine_cls(states=states, initial='C')
m2 = self.machine_cls(states=states, initial='A')
self.assertEqual(m1.state, self.States.GREEN)
self.assertTrue(m1.is_GREEN())
m2.to_GREEN()
self.assertTrue(m2.is_C_GREEN())
self.assertEqual(m1.state, m2.state)
m1.to_A()
self.assertNotEqual(m1.state, m2.state)
def test_initial_enum(self):
m1 = self.machine_cls(states=self.States, initial=self.States.GREEN)
self.assertEqual(self.States.GREEN, m1.state)
self.assertEqual(m1.state.name, self.States.GREEN.name)
def test_duplicate_states(self):
with self.assertRaises(ValueError):
self.machine_cls(states=['A', 'A'])
def test_duplicate_states_from_enum_members(self):
class Foo(enum.Enum):
A = 1
with self.assertRaises(ValueError):
self.machine_cls(states=[Foo.A, Foo.A])
def test_add_enum_transition(self):
class Foo(enum.Enum):
A = 0
B = 1
class Bar(enum.Enum):
FOO = Foo
C = 2
m = self.machine_cls(states=Bar, initial=Bar.C, auto_transitions=False)
m.add_transition('go', Bar.C, Foo.A, conditions=(lambda : False))
trans = m.events['go'].transitions['C']
self.assertEqual(1, len(trans))
self.assertEqual('FOO_A', trans[0].dest)
m.add_transition('go', Bar.C, 'FOO_B')
self.assertEqual(2, len(trans))
self.assertEqual('FOO_B', trans[1].dest)
m.go()
self.assertTrue(m.is_FOO_B())
m.add_transition('go', Foo.B, 'C')
trans = m.events['go'].transitions['FOO_B']
self.assertEqual(1, len(trans))
self.assertEqual('C', trans[0].dest)
m.go()
self.assertEqual(m.state, Bar.C)
def test_add_nested_enums_as_nested_state(self):
class Foo(enum.Enum):
A = 0
B = 1
class Bar(enum.Enum):
FOO = Foo
C = 2
m = self.machine_cls(states=Bar, initial=Bar.C)
self.assertEqual(sorted(m.states['FOO'].states.keys()), ['A', 'B'])
m.add_transition('go', 'FOO_A', 'C')
m.add_transition('go', 'C', 'FOO_B')
m.add_transition('foo', Bar.C, Bar.FOO)
m.to_FOO_A()
self.assertFalse(m.is_C())
self.assertTrue(m.is_FOO(allow_substates=True))
self.assertTrue(m.is_FOO_A())
self.assertTrue(m.is_FOO_A(allow_substates=True))
m.go()
self.assertEqual(Bar.C, m.state)
m.go()
self.assertEqual(Foo.B, m.state)
m.to_state(m, Bar.C.name)
self.assertEqual(Bar.C, m.state)
m.foo()
self.assertEqual(Bar.FOO, m.state)
def test_enum_model_conversion(self):
class Inner(enum.Enum):
I1 = 1
I2 = 2
I3 = 3
I4 = 0
class Middle(enum.Enum):
M1 = 10
M2 = 20
M3 = 30
M4 = Inner
class Outer(enum.Enum):
O1 = 100
O2 = 200
O3 = 300
O4 = Middle
m = self.machine_cls(states=Outer, initial=Outer.O1)
def test_enum_initial(self):
class Foo(enum.Enum):
A = 0
B = 1
class Bar(enum.Enum):
FOO = dict(children=Foo, initial=Foo.A)
C = 2
m = self.machine_cls(states=Bar, initial=Bar.FOO)
self.assertTrue(m.is_FOO_A())
def test_separator_naming_error(self):
class UnderscoreEnum(enum.Enum):
STATE_NAME = 0
with self.assertRaises(ValueError):
self.machine_cls(states=UnderscoreEnum)
class DotNestedState(self.machine_cls.state_cls):
separator = '.'
class DotMachine(self.machine_cls):
state_cls = DotNestedState
m = DotMachine(states=UnderscoreEnum)
def test_get_nested_transitions(self):
class Errors(enum.Enum):
NONE = self.States
UNKNOWN = 2
POWER = 3
m = self.machine_cls(states=Errors, initial=Errors.NONE.value.RED, auto_transitions=False)
m.add_transition('error', Errors.NONE, Errors.UNKNOWN)
m.add_transition('outage', [Errors.NONE, Errors.UNKNOWN], Errors.POWER)
m.add_transition('reset', '*', self.States.RED)
m.add_transition('toggle', self.States.RED, self.States.GREEN)
m.add_transition('toggle', self.States.GREEN, self.States.YELLOW)
m.add_transition('toggle', self.States.YELLOW, self.States.RED)
self.assertEqual(5, len(m.get_transitions(dest=self.States.RED)))
self.assertEqual(1, len(m.get_transitions(source=self.States.RED, dest=self.States.RED, delegate=True)))
self.assertEqual(1, len(m.get_transitions(source=self.States.RED, dest=self.States.GREEN)))
self.assertEqual(1, len(m.get_transitions(dest=self.States.GREEN)))
self.assertEqual(3, len(m.get_transitions(trigger='toggle')))
def test_multiple_deeper(self):
class X(enum.Enum):
X1 = 1
X2 = 2
class B(enum.Enum):
B1 = dict(parallel=X)
B2 = 2
class A(enum.Enum):
A1 = dict(parallel=B)
A2 = 2
class Q(enum.Enum):
Q1 = 1
Q2 = dict(parallel=A)
class P(enum.Enum):
P1 = 1
P2 = dict(parallel=Q)
class States(enum.Enum):
S1 = 1
S2 = dict(parallel=P)
m = self.machine_cls(states=States, initial=States.S1)
self.assertEqual(m.state, States.S1)
m.to_S2()
ref_state = [P.P1, [Q.Q1, [[[X.X1, X.X2], B.B2], A.A2]]]
self.assertEqual(ref_state, m.state) |
class BridgeTowerVisionConfig(PretrainedConfig):
model_type = 'bridgetower_vision_model'
def __init__(self, hidden_size=768, num_hidden_layers=12, num_channels=3, patch_size=16, image_size=288, initializer_factor=1, layer_norm_eps=1e-05, stop_gradient=False, share_layernorm=True, remove_last_layer=False, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_factor = initializer_factor
self.layer_norm_eps = layer_norm_eps
self.stop_gradient = stop_gradient
self.share_layernorm = share_layernorm
self.remove_last_layer = remove_last_layer
def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], **kwargs) -> 'PretrainedConfig':
(config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if (config_dict.get('model_type') == 'bridgetower'):
config_dict = config_dict['text_config']
if (('model_type' in config_dict) and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type)):
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs) |
class CalculatorForm(Form):
def __init__(self, view):
super().__init__(view, 'dynamic_content_error_form')
self.use_layout(FormLayout())
self.calculator = Calculator.for_current_session()
try:
self.enable_refresh(on_refresh=self.calculator.events.inputs_changed)
except DomainException as ex:
self.layout.add_alert_for_domain_exception(ex)
controls = self.add_child(FieldSet(view).use_layout(InlineFormLayout()))
self.add_inputs(controls)
self.display_result(controls)
def add_inputs(self, controls):
operand_a_input = TextInput(self, self.calculator.fields.operand_a, refresh_widget=self)
controls.layout.add_input(operand_a_input, hide_label=True)
operator_input = SelectInput(self, self.calculator.fields.operator, refresh_widget=self)
controls.layout.add_input(operator_input, hide_label=True)
operand_b_input = TextInput(self, self.calculator.fields.operand_b, refresh_widget=self)
controls.layout.add_input(operand_b_input, hide_label=True)
def display_result(self, controls):
if (self.calculator.result is not None):
message = ('= %s' % self.calculator.result)
else:
message = '= ---'
controls.add_child(Span(self.view, text=message)) |
def put_html(html: Any, sanitize: bool=False, scope: str=None, position: int=OutputPosition.BOTTOM) -> Output:
if hasattr(html, '__html__'):
html = html.__html__()
elif hasattr(html, '_repr_html_'):
html = html._repr_html_()
spec = _get_output_spec('html', content=html, sanitize=sanitize, scope=scope, position=position)
return Output(spec) |
def _inner_fmt(k: str, v: Any, table: TableFmt) -> Iterator[str]:
quote_function = table.get('quote', (lambda a: a))
if isinstance(v, list):
for inner_v in v:
qv = quote_function(inner_v)
(yield table['item'].format(k=k, v=qv))
else:
qv = quote_function(v)
(yield table['item'].format(k=k, v=qv)) |
class ClientGenerator(BaseGenerator):
def __init__(self, keep_sync: Optional[List[str]]=None, class_replace_map: Optional[Dict[(str, str)]]=None, import_replace_map: Optional[Dict[(str, str)]]=None, exclude_methods: Optional[List[str]]=None):
super().__init__()
self._async_methods: Optional[List[str]] = None
self.transformers.append(ImportTransformer(import_replace_map=import_replace_map))
self.transformers.append(ImportFromTransformer(import_replace_map=import_replace_map))
self.transformers.append(ClientFunctionDefTransformer(keep_sync=keep_sync, class_replace_map=class_replace_map, exclude_methods=exclude_methods, async_methods=self.async_methods))
self.transformers.append(ClassDefTransformer(class_replace_map=class_replace_map))
self.transformers.append(CallTransformer(class_replace_map=class_replace_map, async_methods=self.async_methods))
self.transformers.append(NameTransformer(class_replace_map=class_replace_map, import_replace_map=import_replace_map))
def async_methods(self) -> List[str]:
if (self._async_methods is None):
self._async_methods = self.get_async_methods(AsyncQdrantBase)
return self._async_methods
def get_async_methods(class_obj: type) -> List[str]:
async_methods = []
for (name, method) in inspect.getmembers(class_obj):
if inspect.iscoroutinefunction(method):
async_methods.append(name)
return async_methods |
class TestUTCDateTimeAttribute():
def setup_method(self):
self.attr = UTCDateTimeAttribute()
self.dt = datetime(2047, 1, 6, 8, 21, 30, 2000, tzinfo=timezone.utc)
def test_utc_datetime_attribute(self):
attr = UTCDateTimeAttribute(default=self.dt)
assert (attr.attr_type == STRING)
assert (attr.default == self.dt)
def test_utc_date_time_serialize(self):
assert (self.attr.serialize(self.dt) == '2047-01-06T08:21:30.002000+0000')
def test_utc_date_time_serialize_pre_1000(self):
dt = self.dt.replace(year=1)
assert (self.attr.serialize(dt) == '0001-01-06T08:21:30.002000+0000')
def test_utc_date_time_deserialize(self):
assert (self.attr.deserialize('2047-01-06T08:21:30.002000+0000') == self.dt)
def test_utc_date_time_deserialize_pre_1000_not_padded(self):
assert (self.attr.deserialize('1-01-06T08:21:30.002000+0000') == self.dt.replace(year=1))
.parametrize('invalid_string', ['2047-01-06T08:21:30.002000', '2047-01-06T08:21:30+0000', '2047-01-06T08:21:30.001+0000', '2047-01-06T08:21:30.002000--01-06T08:21:30.002000+-01-06 08:21:30.002000+0000', '2.47-01-06T08:21:30.002000+0000', 'abcd-01-06T08:21:30.002000+0000', '2047-ab-06T08:21:30.002000+0000', '2047-01-abT08:21:30.002000+0000', '2047-01-06Tab:21:30.002000+0000', '2047-01-06T08:ab:30.002000+0000', '2047-01-06T08:21:ab.002000+0000', '2047-01-06T08:21:30.a00000+0000'])
def test_utc_date_time_invalid(self, invalid_string):
with pytest.raises(ValueError, match="does not match format '%Y-%m-%dT%H:%M:%S.%f\\+0000'"):
self.attr.deserialize(invalid_string) |
('beeref.selection.SelectableMixin.mousePressEvent')
def test_mouse_press_event_when_leftclick(mouse_mock):
item = MultiSelectItem()
item.fit_selection_area(QtCore.QRectF(0, 0, 100, 80))
event = MagicMock(button=MagicMock(return_value=Qt.MouseButton.LeftButton))
item.mousePressEvent(event)
event.ignore.assert_not_called()
mouse_mock.assert_called_once_with(event) |
class TestMWSResponseObject():
def test_mwsresponse_repr(self, simple_mwsresponse):
assert (repr(simple_mwsresponse) == '<MWSResponse [200]>')
def test_mwsresponse_base_attrs(self, simple_mwsresponse):
mws_response = simple_mwsresponse
assert isinstance(mws_response.original, Response)
assert (mws_response.headers == mws_response.original.headers)
assert (mws_response.text == mws_response.original.text)
assert (mws_response.content == mws_response.original.content)
assert (mws_response.status_code == mws_response.original.status_code)
assert (mws_response.encoding == mws_response.original.encoding)
assert (mws_response.reason == mws_response.original.reason)
assert (mws_response.cookies == mws_response.original.cookies)
assert (mws_response.elapsed == mws_response.original.elapsed)
assert (mws_response.request == mws_response.original.request)
print(mws_response.timestamp)
assert (mws_response.encoding == MWS_ENCODING)
def test_mwsresponse_with_key(self, simple_mwsresponse_with_resultkey):
mws_response = simple_mwsresponse_with_resultkey
assert (mws_response.metadata == DotDict({'RequestId': 'd384713e-7c79-4a6d-81cd-d0aa68c7b409'}))
assert (mws_response.request_id == 'd384713e-7c79-4a6d-81cd-d0aa68c7b409')
def test_mwsresponse_no_metadata(self, simple_mwsresponse_no_metadata):
mws_response = simple_mwsresponse_no_metadata
assert (mws_response.metadata is None)
assert (mws_response.request_id is None)
def test_mwsresponse_with_timestamp(self, simple_mwsresponse_with_timestamp):
mws_response = simple_mwsresponse_with_timestamp
assert (mws_response.timestamp == datetime.datetime(2020, 8, 24, 16, 30)) |
def main():
utils.change_cwd()
out_filename = 'misc/file_version_info.txt'
filevers = (qutebrowser.__version_info__ + (0,))
prodvers = (qutebrowser.__version_info__ + (0,))
str_filevers = qutebrowser.__version__
str_prodvers = qutebrowser.__version__
comment_text = qutebrowser.__doc__
copyright_text = qutebrowser.__copyright__
trademark_text = 'qutebrowser is free software under the GNU General Public License'
en_us = 1033
utf_16 = 1200
ffi = vs.FixedFileInfo(filevers, prodvers)
kids = [vs.StringFileInfo([vs.StringTable('040904B0', [vs.StringStruct('Comments', comment_text), vs.StringStruct('CompanyName', 'qutebrowser.org'), vs.StringStruct('FileDescription', 'qutebrowser'), vs.StringStruct('FileVersion', str_filevers), vs.StringStruct('InternalName', 'qutebrowser'), vs.StringStruct('LegalCopyright', copyright_text), vs.StringStruct('LegalTrademarks', trademark_text), vs.StringStruct('OriginalFilename', 'qutebrowser.exe'), vs.StringStruct('ProductName', 'qutebrowser'), vs.StringStruct('ProductVersion', str_prodvers)])]), vs.VarFileInfo([vs.VarStruct('Translation', [en_us, utf_16])])]
file_version_info = vs.VSVersionInfo(ffi, kids)
with open(out_filename, 'w', encoding='utf-8') as f:
f.write(str(file_version_info)) |
('--update', is_flag=True, help='Update shared modules everywhere?', default=False)
('--status', is_flag=True, help='Show status of shared modules everywhere.', default=False)
('--delete', is_flag=True, help='Delete shared modules everywhere?', default=False)
_commands.command(name='git-submodule')
def git_submodule(update=False, status=False, delete=False):
if update:
for component in COMPONENTS_USING_SHARED_MODULE_COMMONS:
for cmd in ['rsync -az ../reana-commons modules']:
run_command(cmd, component)
for component in COMPONENTS_USING_SHARED_MODULE_DB:
for cmd in ['rsync -az ../reana-db modules']:
run_command(cmd, component)
elif delete:
for component in set((COMPONENTS_USING_SHARED_MODULE_COMMONS + COMPONENTS_USING_SHARED_MODULE_DB)):
for cmd in ['rm -rf ./modules/']:
run_command(cmd, component)
elif status:
for component in COMPONENTS_USING_SHARED_MODULE_COMMONS:
for cmd in ['git status -s']:
run_command(cmd, component)
for component in COMPONENTS_USING_SHARED_MODULE_DB:
for cmd in ['git status -s']:
run_command(cmd, component)
else:
click.echo('Unknown action. Please specify `--update`, `--status` or `--delete`. Exiting.')
sys.exit(1) |
class MixedNonTagRefTest(models.Model):
name = models.CharField(max_length=10)
singletag = tagulous.models.SingleTagField(MixedNonTagModel, blank=True, related_name='singletags')
tags = tagulous.models.TagField(MixedNonTagModel, blank=True, related_name='tags')
fk = models.ForeignKey(MixedNonTagModel, blank=True, null=True, related_name='fk', on_delete=models.CASCADE)
mm = models.ManyToManyField(MixedNonTagModel, blank=True, related_name='mm') |
def test_reduce_concatenations() -> None:
assert (str(parse('aa').reduce()) == 'a{2}')
assert (str(parse('bb').reduce()) == 'b{2}')
assert (str(parse('b*b').reduce()) == 'b+')
assert (str(parse('aa{2,}').reduce()) == 'a{3,}')
assert (str(parse('a*a{2}').reduce()) == 'a{2,}')
assert (str(parse('aa{0,8}').reduce()) == 'a{1,9}')
assert (str(parse('b{0,8}b').reduce()) == 'b{1,9}')
assert (str(parse('aab').reduce()) == 'a{2}b')
assert (str(parse('abb').reduce()) == 'ab{2}')
assert (str(parse('abb*').reduce()) == 'ab+')
assert (str(parse('abbc').reduce()) == 'ab{2}c')
assert (str(parse('a?ab').reduce()) == 'a{1,2}b')
assert (str(parse('(ac{2}|bc+)c').reduce()) == '(ac|bc*)c{2}')
assert (str(parse('a(a{2}b|a+c)').reduce()) == 'a{2}(a*c|ab)')
assert (str(parse('a{2,3}(a{2}b|a+c)').reduce()) == 'a{3,4}(a*c|ab)')
assert (str(parse('(ba{2}|ca+)a{2,3}').reduce()) == '(ba|ca*)a{3,4}')
assert (str(parse('za{2,3}(a{2}b|a+c)').reduce()) == 'za{3,4}(a*c|ab)')
assert (str(parse('(ba{2}|ca+)a{2,3}z').reduce()) == '(ba|ca*)a{3,4}z')
assert (str(parse('(a|bc)(a|bc)').reduce()) == '(a|bc){2}')
assert (str(parse('a+[ab]+').reduce()) == 'a[ab]+')
assert (str(parse('a{3,8}[ab]+').reduce()) == 'a{3}[ab]+')
assert (str(parse('[ab]+b+').reduce()) == '[ab]+b')
assert (str(parse('[ab]+a{3,8}').reduce()) == '[ab]+a{3}')
assert (str(parse('\\d+\\w+').reduce()) == '\\d\\w+')
assert (str(parse('[ab]+a?').reduce()) == '[ab]+') |
def test_to_recap_decimal():
converter = AvroConverter()
avro_schema = {'type': 'record', 'name': 'test_decimal', 'fields': [{'name': 'decimal', 'type': {'type': 'bytes', 'logicalType': 'decimal', 'precision': 5, 'scale': 2}}]}
schema = converter.to_recap(json.dumps(avro_schema))
field = schema.fields[0]
assert isinstance(field, BytesType)
assert (field.logical == 'build.recap.Decimal')
assert (field.bytes_ == avro_schema['fields'][0]['type'].get('size', ))
assert (field.variable == (avro_schema['fields'][0]['type']['type'] == 'fixed'))
assert (field.extra_attrs['precision'] == avro_schema['fields'][0]['type']['precision'])
assert (field.extra_attrs['scale'] == avro_schema['fields'][0]['type'].get('scale', 0)) |
def uccsd_generator(single_amplitudes, double_amplitudes, anti_hermitian=True):
generator = FermionOperator()
if (isinstance(single_amplitudes, numpy.ndarray) or isinstance(double_amplitudes, numpy.ndarray)):
(single_amplitudes, double_amplitudes) = uccsd_convert_amplitude_format(single_amplitudes, double_amplitudes)
for ((i, j), t_ij) in single_amplitudes:
(i, j) = (int(i), int(j))
generator += FermionOperator(((i, 1), (j, 0)), t_ij)
if anti_hermitian:
generator += FermionOperator(((j, 1), (i, 0)), (- t_ij))
for ((i, j, k, l), t_ijkl) in double_amplitudes:
(i, j, k, l) = (int(i), int(j), int(k), int(l))
generator += FermionOperator(((i, 1), (j, 0), (k, 1), (l, 0)), t_ijkl)
if anti_hermitian:
generator += FermionOperator(((l, 1), (k, 0), (j, 1), (i, 0)), (- t_ijkl))
return generator |
def is_duplicate_mapping(mapping: list[int], actual_types: list[Type], actual_kinds: list[ArgKind]) -> bool:
return ((len(mapping) > 1) and (not ((len(mapping) == 2) and (actual_kinds[mapping[0]] == nodes.ARG_STAR) and (actual_kinds[mapping[1]] == nodes.ARG_STAR2))) and (not all((((actual_kinds[m] == nodes.ARG_STAR2) and (not isinstance(get_proper_type(actual_types[m]), TypedDictType))) for m in mapping)))) |
class RustLexer(RegexLexer):
name = 'Rust'
url = '
filenames = ['*.rs', '*.rs.in']
aliases = ['rust', 'rs']
mimetypes = ['text/rust', 'text/x-rust']
version_added = '1.6'
keyword_types = (words(('u8', 'u16', 'u32', 'u64', 'u128', 'i8', 'i16', 'i32', 'i64', 'i128', 'usize', 'isize', 'f32', 'f64', 'char', 'str', 'bool'), suffix='\\b'), Keyword.Type)
builtin_funcs_types = (words(('Copy', 'Send', 'Sized', 'Sync', 'Unpin', 'Drop', 'Fn', 'FnMut', 'FnOnce', 'drop', 'Box', 'ToOwned', 'Clone', 'PartialEq', 'PartialOrd', 'Eq', 'Ord', 'AsRef', 'AsMut', 'Into', 'From', 'Default', 'Iterator', 'Extend', 'IntoIterator', 'DoubleEndedIterator', 'ExactSizeIterator', 'Option', 'Some', 'None', 'Result', 'Ok', 'Err', 'String', 'ToString', 'Vec'), suffix='\\b'), Name.Builtin)
builtin_macros = (words(('asm', 'assert', 'assert_eq', 'assert_ne', 'cfg', 'column', 'compile_error', 'concat', 'concat_idents', 'dbg', 'debug_assert', 'debug_assert_eq', 'debug_assert_ne', 'env', 'eprint', 'eprintln', 'file', 'format', 'format_args', 'format_args_nl', 'global_asm', 'include', 'include_bytes', 'include_str', 'is_aarch64_feature_detected', 'is_arm_feature_detected', 'is_mips64_feature_detected', 'is_mips_feature_detected', 'is_powerpc64_feature_detected', 'is_powerpc_feature_detected', 'is_x86_feature_detected', 'line', 'llvm_asm', 'log_syntax', 'macro_rules', 'matches', 'module_path', 'option_env', 'panic', 'print', 'println', 'stringify', 'thread_local', 'todo', 'trace_macros', 'unimplemented', 'unreachable', 'vec', 'write', 'writeln'), suffix='!'), Name.Function.Magic)
tokens = {'root': [('#![^[\\r\\n].*$', Comment.Preproc), default('base')], 'base': [('\\n', Whitespace), ('\\s+', Whitespace), ('//!.*?\\n', String.Doc), ('///(\\n|[^/].*?\\n)', String.Doc), ('//(.*?)\\n', Comment.Single), ('/\\*\\*(\\n|[^/*])', String.Doc, 'doccomment'), ('/\\*!', String.Doc, 'doccomment'), ('/\\*', Comment.Multiline, 'comment'), ('\\$([a-zA-Z_]\\w*|\\(,?|\\),?|,?)', Comment.Preproc), (words(('as', 'async', 'await', 'box', 'const', 'crate', 'dyn', 'else', 'extern', 'for', 'if', 'impl', 'in', 'loop', 'match', 'move', 'mut', 'pub', 'ref', 'return', 'static', 'super', 'trait', 'unsafe', 'use', 'where', 'while'), suffix='\\b'), Keyword), (words(('abstract', 'become', 'do', 'final', 'macro', 'override', 'priv', 'typeof', 'try', 'unsized', 'virtual', 'yield'), suffix='\\b'), Keyword.Reserved), ('(true|false)\\b', Keyword.Constant), ('self\\b', Name.Builtin.Pseudo), ('mod\\b', Keyword, 'modname'), ('let\\b', Keyword.Declaration), ('fn\\b', Keyword, 'funcname'), ('(struct|enum|type|union)\\b', Keyword, 'typename'), ('(default)(\\s+)(type|fn)\\b', bygroups(Keyword, Text, Keyword)), keyword_types, ('[sS]elf\\b', Name.Builtin.Pseudo), builtin_funcs_types, builtin_macros, ('::\\b', Text), ('(?::|->)', Text, 'typename'), ("(break|continue)(\\b\\s*)(\\'[A-Za-z_]\\w*)?", bygroups(Keyword, Text.Whitespace, Name.Label)), ('\'(\\\\[\'"\\\\nrt]|\\\\x[0-7][0-9a-fA-F]|\\\\0|\\\\u\\{[0-9a-fA-F]{1,6}\\}|.)\'', String.Char), ('b\'(\\\\[\'"\\\\nrt]|\\\\x[0-9a-fA-F]{2}|\\\\0|\\\\u\\{[0-9a-fA-F]{1,6}\\}|.)\'', String.Char), ('0b[01_]+', Number.Bin, 'number_lit'), ('0o[0-7_]+', Number.Oct, 'number_lit'), ('0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'), ('[0-9][0-9_]*(\\.[0-9_]+[eE][+\\-]?[0-9_]+|\\.[0-9_]*(?!\\.)|[eE][+\\-]?[0-9_]+)', Number.Float, 'number_lit'), ('[0-9][0-9_]*', Number.Integer, 'number_lit'), ('b"', String, 'bytestring'), ('"', String, 'string'), ('(?s)b?r(#*)".*?"\\1', String), ("'", Operator, 'lifetime'), ('\\.\\.=?', Operator), ('[{}()\\[\\],.;]', Punctuation), ('[+\\-*/%&|<>^!~=:?]', Operator), ('[a-zA-Z_]\\w*', Name), ('r#[a-zA-Z_]\\w*', Name), ('#!?\\[', Comment.Preproc, 'attribute['), ('#', Text)], 'comment': [('[^*/]+', Comment.Multiline), ('/\\*', Comment.Multiline, '#push'), ('\\*/', Comment.Multiline, '#pop'), ('[*/]', Comment.Multiline)], 'doccomment': [('[^*/]+', String.Doc), ('/\\*', String.Doc, '#push'), ('\\*/', String.Doc, '#pop'), ('[*/]', String.Doc)], 'modname': [('\\s+', Text), ('[a-zA-Z_]\\w*', Name.Namespace, '#pop'), default('#pop')], 'funcname': [('\\s+', Text), ('[a-zA-Z_]\\w*', Name.Function, '#pop'), default('#pop')], 'typename': [('\\s+', Text), ('&', Keyword.Pseudo), ("'", Operator, 'lifetime'), builtin_funcs_types, keyword_types, ('[a-zA-Z_]\\w*', Name.Class, '#pop'), default('#pop')], 'lifetime': [('(static|_)', Name.Builtin), ('[a-zA-Z_]+\\w*', Name.Attribute), default('#pop')], 'number_lit': [('[ui](8|16|32|64|size)', Keyword, '#pop'), ('f(32|64)', Keyword, '#pop'), default('#pop')], 'string': [('"', String, '#pop'), ('\\\\[\'"\\\\nrt]|\\\\x[0-7][0-9a-fA-F]|\\\\0|\\\\u\\{[0-9a-fA-F]{1,6}\\}', String.Escape), ('[^\\\\"]+', String), ('\\\\', String)], 'bytestring': [('\\\\x[89a-fA-F][0-9a-fA-F]', String.Escape), include('string')], 'attribute_common': [('"', String, 'string'), ('\\[', Comment.Preproc, 'attribute[')], 'attribute[': [include('attribute_common'), ('\\]', Comment.Preproc, '#pop'), ('[^"\\]\\[]+', Comment.Preproc)]} |
def plot_rat_stats(rat_sim, save_root, fmt):
for rating in range(1, 6):
exp_sim = (rat_sim == rating).astype(np.int32)
name = 'rat_stats_{}.{}'.format(rating, fmt)
title = 'Item Rating {} Distribution'.format(rating)
plot_exp_stats(exp_sim, save_root, fmt, name, title) |
class MenuButton(TelegramObject):
__slots__ = ('type',)
def __init__(self, type: str, *, api_kwargs: Optional[JSONDict]=None):
super().__init__(api_kwargs=api_kwargs)
self.type: str = type
self._id_attrs = (self.type,)
self._freeze()
def de_json(cls, data: Optional[JSONDict], bot: 'Bot') -> Optional['MenuButton']:
data = cls._parse_data(data)
if (data is None):
return None
if ((not data) and (cls is MenuButton)):
return None
_class_mapping: Dict[(str, Type[MenuButton])] = {cls.COMMANDS: MenuButtonCommands, cls.WEB_APP: MenuButtonWebApp, cls.DEFAULT: MenuButtonDefault}
if ((cls is MenuButton) and (data.get('type') in _class_mapping)):
return _class_mapping[data.pop('type')].de_json(data, bot=bot)
return super().de_json(data=data, bot=bot)
COMMANDS: Final[str] = constants.MenuButtonType.COMMANDS
WEB_APP: Final[str] = constants.MenuButtonType.WEB_APP
DEFAULT: Final[str] = constants.MenuButtonType.DEFAULT |
def RunGUI(sdkpath, args):
root = tk.Tk()
style = ttk.Style(root)
style.theme_use('default')
ttk.Style().configure('TButton', padding=6, relief='groove', border=2, foreground=GetButtonTextColour(), background=GetButtonBackground())
ttk.Style().configure('TLabel', foreground=GetTextColour(), background=GetBackground())
ttk.Style().configure('TCheckbutton', foreground=GetTextColour(), background=GetBackground())
ttk.Style().configure('TRadiobutton', foreground=GetTextColour(), background=GetBackground())
ttk.Style().configure('TLabelframe', foreground=GetTextColour(), background=GetBackground())
ttk.Style().configure('TLabelframe.Label', foreground=GetTextColour(), background=GetBackground())
ttk.Style().configure('TCombobox', foreground=GetTextColour(), background=GetBackground())
ttk.Style().configure('TListbox', foreground=GetTextColour(), background=GetBackground())
ttk.Style().map('TCheckbutton', background=[('disabled', GetBackground())])
ttk.Style().map('TRadiobutton', background=[('disabled', GetBackground())])
ttk.Style().map('TButton', background=[('disabled', GetBackground())])
ttk.Style().map('TLabel', background=[('background', GetBackground())])
app = ProjectWindow(root, sdkpath, args)
app.configure(background=GetBackground())
root.mainloop()
sys.exit(ExitCodes.SUCCESS) |
class CallBackVerification(object):
def __init__(self, val_targets, rec_prefix, summary_writer=None, image_size=(112, 112)):
self.rank: int = distributed.get_rank()
self.highest_acc: float = 0.0
self.highest_acc_list: List[float] = ([0.0] * len(val_targets))
self.ver_list: List[object] = []
self.ver_name_list: List[str] = []
if (self.rank is 0):
self.init_dataset(val_targets=val_targets, data_dir=rec_prefix, image_size=image_size)
self.summary_writer = summary_writer
def ver_test(self, backbone: torch.nn.Module, global_step: int):
results = []
for i in range(len(self.ver_list)):
(acc1, std1, acc2, std2, xnorm, embeddings_list) = verification.test(self.ver_list[i], backbone, 10, 10)
logging.info(('[%s][%d]XNorm: %f' % (self.ver_name_list[i], global_step, xnorm)))
logging.info(('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' % (self.ver_name_list[i], global_step, acc2, std2)))
self.summary_writer: SummaryWriter
self.summary_writer.add_scalar(tag=self.ver_name_list[i], scalar_value=acc2, global_step=global_step)
if (acc2 > self.highest_acc_list[i]):
self.highest_acc_list[i] = acc2
logging.info(('[%s][%d]Accuracy-Highest: %1.5f' % (self.ver_name_list[i], global_step, self.highest_acc_list[i])))
results.append(acc2)
def init_dataset(self, val_targets, data_dir, image_size):
for name in val_targets:
path = os.path.join(data_dir, (name + '.bin'))
if os.path.exists(path):
data_set = verification.load_bin(path, image_size)
self.ver_list.append(data_set)
self.ver_name_list.append(name)
def __call__(self, num_update, backbone: torch.nn.Module):
if ((self.rank is 0) and (num_update > 0)):
backbone.eval()
self.ver_test(backbone, num_update)
backbone.train() |
def attr_parse(stream, length, attr_type_cls):
values = collections.OrderedDict()
while (length > 0):
(attr_type, value) = struct.unpack('>HH', stream.read(4))
length -= 4
if (attr_type & 32768):
attr_type &= 32767
else:
length -= value
value = stream.read(value)
values[attr_type_cls(attr_type)] = value
return values |
class ValidationCallback(Callback):
def __init__(self, manager: Manager, summary_writer: SummaryWriter, args, training_state: 'TrainingState', last_model_path, validation_frequency):
super().__init__()
self._args = args
self._validation_frequency = validation_frequency
self._training_state = training_state
self._last_model_path = last_model_path
self._summary_writer = summary_writer
self._manager = manager
self._task_is_pending = False
def on_epoch_end(self, epoch, logs=None):
if (self._validation_frequency and (epoch != 0) and ((epoch % self._validation_frequency) == 0)):
self._collect_results_and_submit_validation_task(epoch)
def on_train_end(self, logs=None):
self._collect_results_if_pending()
self._manager.input_queue.put(ChildTask(do_exit=True, args=None))
def _collect_results_and_submit_validation_task(self, epoch):
self._collect_results_if_pending()
if os.path.exists(self._last_model_path):
self._manager.input_queue.put(ChildTask(do_exit=False, args=(self._args, self._training_state.best_metric, epoch)))
self._task_is_pending = True
def _collect_results_if_pending(self):
if self._task_is_pending:
self._collect_results()
def _collect_results(self):
validation_result: ValidationResult = self._manager.output_queue.get()
evaluation = validation_result.evaluation
self._task_is_pending = False
if (evaluation.main_metric > self._training_state.best_metric):
self._training_state.best_metric = evaluation.main_metric
self._training_state.metric_name = evaluation.main_metric_name
self._training_state.best_epoch = validation_result.epoch
scalars_to_write = evaluation.scalars_for_logging()
_write_scalars(self._summary_writer, scalars_to_write, validation_result.epoch) |
def test_geodesic_inv__string_init(scalar_and_array):
geod = Geod('+ellps=clrk66')
(az12, az21, dist) = geod.inv(scalar_and_array(_BOSTON_LON), scalar_and_array(_BOSTON_LAT), scalar_and_array(_PORTLAND_LON), scalar_and_array(_PORTLAND_LAT))
assert_almost_equal((az12, az21, dist), (scalar_and_array((- 66.531)), scalar_and_array(75.654), scalar_and_array(4164192.708)), decimal=3) |
def _expand_shape_to_4d(weight_tensor: libpymo.TensorParams):
dims = len(weight_tensor.shape)
if (dims > 5):
raise RuntimeError
if (dims == 4):
(yield weight_tensor)
else:
orig_shape = weight_tensor.shape
if (dims < 4):
_4d_shape = np.append(orig_shape, [1 for _ in range((4 - dims))]).astype(int)
else:
_4d_shape = np.array((orig_shape[:3] + [math.prod(orig_shape[3:])]))
try:
weight_tensor.shape = _4d_shape
(yield weight_tensor)
finally:
weight_tensor.shape = orig_shape |
def get_version_and_doc(filename):
NS = dict(__version__='', __doc__='')
docStatus = 0
with open(filename, 'rb') as fd:
data = fd.read()
for line in data.decode().splitlines():
if line.startswith('__version__'):
exec(line.strip(), NS, NS)
elif line.startswith('"""'):
if (docStatus == 0):
docStatus = 1
line = line.lstrip('"')
elif (docStatus == 1):
docStatus = 2
if (docStatus == 1):
NS['__doc__'] += (line.rstrip() + '\n')
if (not NS['__version__']):
raise RuntimeError('Could not find __version__')
return (NS['__version__'], NS['__doc__']) |
def recover_coef1(seed):
input_list = ['m', 'k', 'A0', 'c']
output_coef = 'm_coef'
D_in = np.mat('1, 0, 0; 1, 0, -2; 0, 1, 0; 1, 0, -1').T
D_out = np.mat('0;, 0; 1')
dimension_info = [D_in, D_out]
basis1_in = np.array([1, 1, 0, (- 2)]).reshape((- 1), 1)
basis2_in = np.array([1, 0, 0, (- 1)]).reshape((- 1), 1)
basis_list = [basis1_in, basis2_in]
dimensionless_learning = DimensionlessLearning(df, input_list, output_coef, dimension_info, basis_list)
(r2, coef, coef_w) = dimensionless_learning.fit_pattern_search(seed=seed)
if (r2 > 0.8):
print('final r2', r2, coef.flatten(), coef_w) |
class Effect6600(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.ship.boostItemAttr('shieldThermalDamageResonance', src.getModifiedItemAttr('shipBonusCarrierC1'), skill='Caldari Carrier', **kwargs)
fit.ship.boostItemAttr('shieldEmDamageResonance', src.getModifiedItemAttr('shipBonusCarrierC1'), skill='Caldari Carrier', **kwargs)
fit.ship.boostItemAttr('shieldKineticDamageResonance', src.getModifiedItemAttr('shipBonusCarrierC1'), skill='Caldari Carrier', **kwargs)
fit.ship.boostItemAttr('shieldExplosiveDamageResonance', src.getModifiedItemAttr('shipBonusCarrierC1'), skill='Caldari Carrier', **kwargs) |
class TestReadmeSample(QiskitMLTestCase):
def test_readme_sample(self):
def print(*args):
if args:
self.log.debug(args[0], *args[1:])
from qiskit import BasicAer
from qiskit.aqua import QuantumInstance, aqua_globals
from qiskit.aqua.algorithms import VQC
from qiskit.aqua.components.optimizers import COBYLA
from qiskit.aqua.components.feature_maps import RawFeatureVector
from qiskit.ml.datasets import wine
from qiskit.circuit.library import TwoLocal
seed = 1376
aqua_globals.random_seed = seed
feature_dim = 4
(_, training_input, test_input, _) = wine(training_size=12, test_size=4, n=feature_dim)
feature_map = RawFeatureVector(feature_dimension=feature_dim)
vqc = VQC(COBYLA(maxiter=100), feature_map, TwoLocal(feature_map.num_qubits, ['ry', 'rz'], 'cz', reps=3), training_input, test_input)
result = vqc.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'), shots=1024, seed_simulator=seed, seed_transpiler=seed))
print('Testing accuracy: {:0.2f}'.format(result['testing_accuracy']))
self.assertGreater(result['testing_accuracy'], 0.8) |
_config
def test_maximize_with_move_to_screen(manager):
manager.test_window('one')
manager.c.window.toggle_maximize()
assert (manager.c.window.info()['width'] == 464)
assert (manager.c.window.info()['height'] == 316)
assert (manager.c.window.info()['x'] == 16)
assert (manager.c.window.info()['y'] == 0)
assert (manager.c.window.info()['group'] == 'a')
manager.c.to_screen(1)
assert (manager.c.screen.info() == {'y': 0, 'x': 500, 'index': 1, 'width': 300, 'height': 380})
assert (manager.c.group.info()['name'] == 'b')
manager.c.group['a'].toscreen()
assert (manager.c.window.info()['width'] == 288)
assert (manager.c.window.info()['height'] == 326)
assert (manager.c.window.info()['x'] == 512)
assert (manager.c.window.info()['y'] == 30)
assert (manager.c.window.info()['group'] == 'a') |
def get_matching_convtransp(conv_op: Type[_ConvNd]=None, dimension: int=None) -> Type[_ConvTransposeNd]:
assert (not ((conv_op is not None) and (dimension is not None))), 'You MUST set EITHER conv_op OR dimension. Do not set both!'
if (conv_op is not None):
dimension = convert_conv_op_to_dim(conv_op)
assert (dimension in [1, 2, 3]), 'Dimension must be 1, 2 or 3'
if (dimension == 1):
return nn.ConvTranspose1d
elif (dimension == 2):
return nn.ConvTranspose2d
elif (dimension == 3):
return nn.ConvTranspose3d |
def train(epoch, criterion_list, optimizer):
train_loss = AverageMeter('train_loss', ':.4e')
train_loss_cls = AverageMeter('train_loss_cls', ':.4e')
train_loss_div = AverageMeter('train_loss_div', ':.4e')
top1_num = 0
top5_num = 0
total = 0
if (epoch >= args.warmup_epoch):
lr = adjust_lr(optimizer, epoch, args)
start_time = time.time()
criterion_cls = criterion_list[0]
criterion_div = criterion_list[1]
if args.method.startswith('PSKD'):
if (epoch == 0):
all_predictions = torch.zeros(len(trainloader.dataset), num_classes, dtype=torch.float32)
else:
all_predictions = torch.load(os.path.join(args.checkpoint_dir, 'predictions.pth.tar'), map_location=torch.device('cpu'))['prev_pred']
net.train()
for (batch_idx, (inputs, targets)) in enumerate(trainloader):
batch_start_time = time.time()
if (isinstance(inputs, list) is False):
inputs = inputs.cuda()
batch_size = inputs.size(0)
else:
batch_size = inputs[0].size(0)
if (isinstance(targets, list) is False):
targets = targets.cuda()
else:
input_indices = targets[1].cuda()
targets = targets[0].cuda()
if (epoch < args.warmup_epoch):
lr = adjust_lr(optimizer, epoch, args, batch_idx, len(trainloader))
loss_div = torch.tensor(0.0).cuda()
loss_cls = torch.tensor(0.0).cuda()
if (args.method == 'cross_entropy'):
logit = net(inputs)
loss_cls += criterion_cls(logit, targets)
elif (args.method == 'mixup'):
(logit, mixup_loss) = Mixup(net, inputs, targets, criterion_cls, alpha=0.4)
loss_cls += mixup_loss
elif (args.method == 'manifold_mixup'):
(logit, manifold_mixup_loss) = ManifoldMixup(net, inputs, targets, criterion_cls, alpha=2.0)
loss_cls += manifold_mixup_loss
elif (args.method == 'cutmix'):
(logit, cutmix_loss) = CutMix(net, inputs, targets, criterion_cls, alpha=1.0)
loss_cls += cutmix_loss
elif (args.method == 'label_smooth'):
logit = net(inputs)
loss_cls += LabelSmooth(logit, targets, num_classes=num_classes)
elif (args.method == 'FocalLoss'):
logit = net(inputs)
loss_cls += FocalLoss(logit, targets)
elif (args.method == 'TF_KD_self_reg'):
logit = net(inputs)
loss_cls += criterion_cls(logit, targets)
loss_div += TF_KD_reg(logit, targets, num_classes, epsilon=0.1, T=20)
elif (args.method == 'virtual_softmax'):
logit = net(inputs, targets, loss_type='virtual_softmax')
loss_cls += criterion_cls(logit, targets)
elif (args.method == 'Maximum_entropy'):
logit = net(inputs, targets)
entropy = (F.softmax(logit, dim=1) * F.log_softmax(logit, dim=1)).mean()
loss_cls += (criterion_cls(logit, targets) + (0.5 * entropy))
elif (args.method == 'DKS'):
(logit, dks_loss_cls, dks_loss_div) = DKS(net, inputs, targets, criterion_cls, criterion_div)
loss_cls += dks_loss_cls
loss_div += dks_loss_div
elif (args.method == 'SAD'):
(logit, sad_loss_cls, sad_loss_div) = SAD(net, inputs, targets, criterion_cls, criterion_div)
loss_cls += sad_loss_cls
loss_div += sad_loss_div
elif (args.method == 'BYOT'):
(logit, byot_loss_cls, byot_loss_div) = BYOT(net, inputs, targets, criterion_cls, criterion_div)
loss_cls += byot_loss_cls
loss_div += byot_loss_div
elif (args.method == 'DDGSD'):
(logit, ddsgd_loss_cls, ddsgd_loss_div) = DDGSD(net, inputs, targets, criterion_cls, criterion_div)
loss_cls += ddsgd_loss_cls
loss_div += ddsgd_loss_div
elif (args.method == 'CS-KD'):
(logit, cs_kd_loss_cls, cs_kd_loss_div) = CS_KD(net, inputs, targets, criterion_cls, criterion_div)
targets = targets[:(batch_size // 2)]
batch_size = (batch_size // 2)
loss_cls += cs_kd_loss_cls
loss_div += cs_kd_loss_div
elif args.method.startswith('FRSKD'):
(logit, frskd_loss_cls, frskd_loss_div) = FRSKD(net, inputs, targets, criterion_cls, criterion_div)
loss_cls += frskd_loss_cls
loss_div += frskd_loss_div
elif args.method.startswith('PSKD'):
(logit, pskd_loss_cls) = PSKD(net, inputs, targets, input_indices, epoch, all_predictions, num_classes, args)
loss_cls += pskd_loss_cls
elif args.method.startswith('BAKE'):
(logit, bake_loss_cls, bake_loss_div) = BAKE(net, inputs, targets, criterion_cls, criterion_div, args)
loss_cls += bake_loss_cls
loss_div += bake_loss_div
else:
raise ValueError('Unknown method: {}'.format(args.method))
loss = (loss_cls + loss_div)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss.update(loss.item(), batch_size)
train_loss_cls.update(loss_cls.item(), batch_size)
train_loss_div.update(loss_div.item(), batch_size)
(top1, top5) = correct_num(logit, targets, topk=(1, 5))
top1_num += top1
top5_num += top5
total += targets.size(0)
print('Epoch:{}, batch_idx:{}/{}, lr:{:.5f}, Acc:{:.4f}, Duration:{:.2f}'.format(epoch, batch_idx, len(trainloader), lr, (top1_num.item() / total), (time.time() - batch_start_time)))
train_info = 'Epoch:{}\t lr:{:.5f}\t duration:{:.3f}\ntrain_loss:{:.5f}\t train_loss_cls:{:.5f}\t train_loss_div:{:.5f}\ntrain top1_acc: {:.4f} \t train top5_acc:{:.4f}'.format(epoch, lr, (time.time() - start_time), train_loss.avg, train_loss_cls.avg, train_loss_div.avg, (top1_num / total).item(), (top5_num / total).item())
print(train_info)
with open(args.log_txt, 'a+') as f:
f.write((train_info + '\n'))
if args.method.startswith('PSKD'):
torch.save({'prev_pred': all_predictions.cpu()}, os.path.join(args.checkpoint_dir, 'predictions.pth.tar')) |
def _create_method_tolerance_ap_values(tolerance_ap_data_frame, method, ordered_class_names: List[str]):
method_tolerance_ap_data_frame = tolerance_ap_data_frame[(tolerance_ap_data_frame[METHOD] == method)]
return [method_tolerance_ap_data_frame[(method_tolerance_ap_data_frame[SpottingEvaluation.CLASS_NAME] == class_name)][SpottingEvaluation.AVERAGE_PRECISION].values[0] for class_name in ordered_class_names] |
class HSTPIPIER(IntEnum):
RXINES = (1 << 0)
TXOUTES = (1 << 1)
TXSTPES = (1 << 2)
UNDERFIES = (1 << 2)
PERRES = (1 << 3)
NAKEDES = (1 << 4)
OVERFIES = (1 << 5)
RXSTALLDES = (1 << 6)
CRCERRES = (1 << 6)
SHORTPACKETIES = (1 << 7)
NBUSYBKES = (1 << 12)
PDISHDMAS = (1 << 16)
PFREEZES = (1 << 17)
RSTDTS = (1 << 18) |
def init_network_weights(model, init_type='normal', gain=0.02):
def _init_func(m):
classname = m.__class__.__name__
if (hasattr(m, 'weight') and ((classname.find('Conv') != (- 1)) or (classname.find('Linear') != (- 1)))):
if (init_type == 'normal'):
nn.init.normal_(m.weight.data, 0.0, gain)
elif (init_type == 'xavier'):
nn.init.xavier_normal_(m.weight.data, gain=gain)
elif (init_type == 'kaiming'):
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif (init_type == 'orthogonal'):
nn.init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method {} is not implemented'.format(init_type))
if (hasattr(m, 'bias') and (m.bias is not None)):
nn.init.constant_(m.bias.data, 0.0)
elif (classname.find('BatchNorm') != (- 1)):
nn.init.constant_(m.weight.data, 1.0)
nn.init.constant_(m.bias.data, 0.0)
elif (classname.find('InstanceNorm') != (- 1)):
if ((m.weight is not None) and (m.bias is not None)):
nn.init.constant_(m.weight.data, 1.0)
nn.init.constant_(m.bias.data, 0.0)
model.apply(_init_func) |
def create_access_token(repo, role, kind=None, friendly_name=None):
role = Role.get((Role.name == role))
kind_ref = None
if (kind is not None):
kind_ref = AccessTokenKind.get((AccessTokenKind.name == kind))
new_token = AccessToken.create(repository=repo, temporary=True, role=role, kind=kind_ref, friendly_name=friendly_name)
return new_token |
def _dump_test(unit, test_type, test_files, timeout, test_dir, custom_deps, test_data, python_paths, split_factor, fork_mode, test_size, tags, requirements, binary_path='', old_pytest=False, test_cwd=None):
if (test_type == 'PY_TEST'):
script_rel_path = 'py.test'
elif (test_type == 'FLEUR'):
script_rel_path = 'ytest.py'
elif (test_type == 'PEP8'):
script_rel_path = 'py.test.pep8'
elif (test_type == 'PY_FLAKES'):
script_rel_path = 'py.test.flakes'
else:
script_rel_path = test_type
fork_test_files = unit.get('FORK_TEST_FILES_MODE')
fork_mode = (' '.join(fork_mode) if fork_mode else '')
use_arcadia_python = unit.get('USE_ARCADIA_PYTHON')
if test_cwd:
test_cwd = test_cwd.replace('$TEST_CWD_VALUE', '').replace('"MACRO_CALLS_DELIM"', '').strip()
if binary_path:
if (fork_test_files == 'on'):
tests = test_files
else:
tests = [os.path.basename(binary_path)]
else:
tests = test_files
for test_name in tests:
test_record = {'TEST-NAME': os.path.splitext(test_name)[0], 'TEST-TIMEOUT': timeout, 'SCRIPT-REL-PATH': script_rel_path, 'TESTED-PROJECT-NAME': test_name, 'SOURCE-FOLDER-PATH': test_dir, 'CUSTOM-DEPENDENCIES': ' '.join(custom_deps), 'TEST-DATA': serialize_list(test_data), 'SPLIT-FACTOR': split_factor, 'FORK-MODE': fork_mode, 'FORK-TEST-FILES': fork_test_files, 'TEST-FILES': serialize_list(tests), 'SIZE': test_size, 'TAG': serialize_list(tags), 'REQUIREMENTS': serialize_list(requirements), 'USE_ARCADIA_PYTHON': (use_arcadia_python or ''), 'OLD_PYTEST': ('yes' if old_pytest else 'no'), 'PYTHON-PATHS': serialize_list(python_paths), 'TEST-CWD': (test_cwd or ''), 'SKIP_TEST': (unit.get('SKIP_TEST_VALUE') or '')}
if binary_path:
test_record['BINARY-PATH'] = strip_roots(binary_path)
data = dump_test(test_record)
if data:
unit.set_property(['DART_DATA', data])
save_in_file(unit.get('TEST_DART_OUT_FILE'), data) |
class _DebuggingTips(SetuptoolsWarning):
_SUMMARY = 'Problem in editable installation.'
_DETAILS = '\n An error happened while installing `{project}` in editable mode.\n\n The following steps are recommended to help debug this problem:\n\n - Try to install the project normally, without using the editable mode.\n Does the error still persist?\n (If it does, try fixing the problem before attempting the editable mode).\n - If you are using binary extensions, make sure you have all OS-level\n dependencies installed (e.g. compilers, toolchains, binary libraries, ...).\n - Try the latest version of setuptools (maybe the error was already fixed).\n - If you (or your project dependencies) are using any setuptools extension\n or customization, make sure they support the editable mode.\n\n After following the steps above, if the problem still persists and\n you think this is related to how setuptools handles editable installations,\n please submit a reproducible example\n (see to:\n\n '
_SEE_DOCS = 'userguide/development_mode.html' |
_funcify.register(ptr.RandomVariable)
def jax_funcify_RandomVariable(op, node, **kwargs):
rv = node.outputs[1]
out_dtype = rv.type.dtype
out_size = rv.type.shape
if (op.ndim_supp > 0):
out_size = node.outputs[1].type.shape[:(- op.ndim_supp)]
if (None in out_size):
assert_size_argument_jax_compatible(node)
def sample_fn(rng, size, dtype, *parameters):
return jax_sample_fn(op)(rng, size, out_dtype, *parameters)
else:
def sample_fn(rng, size, dtype, *parameters):
return jax_sample_fn(op)(rng, out_size, out_dtype, *parameters)
return sample_fn |
def test_direct_junction_offsets_suc_suc_2_left(direct_junction_left_lane_fixture):
(main_road, small_road, junction_creator) = direct_junction_left_lane_fixture
main_road.add_successor(xodr.ElementType.junction, junction_creator.id)
small_road.add_successor(xodr.ElementType.junction, junction_creator.id)
junction_creator.add_connection(main_road, small_road, (- 3), 1)
assert (main_road.succ_direct_junction == {small_road.id: 2})
assert (small_road.succ_direct_junction == {main_road.id: 2})
assert (junction_creator.junction.connections[0].links[0] == ((- 3), 1)) |
class MPRIS(EventPlugin):
PLUGIN_ID = 'mpris'
PLUGIN_NAME = _('MPRIS D-Bus Support')
PLUGIN_DESC_MARKUP = _('Allows control of Quod Libet using the <a href=" 2</a> D-Bus Interface Specification. This allows various Linux desktop integrations (e.g. multimedia keys).')
PLUGIN_ICON = Icons.NETWORK_WORKGROUP
def PluginPreferences(self, parent):
box = Gtk.HBox()
ccb = ConfigCheckButton(_('Hide main window on close'), 'plugins', 'mpris_window_hide')
ccb.set_active(self.__do_hide())
box.pack_start(qltk.Frame(_('Preferences'), child=ccb), True, True, 0)
return box
def __do_hide(self):
return config.getboolean('plugins', 'mpris_window_hide', False)
def __window_delete(self, win, event):
if self.__do_hide():
win.hide()
return True
def enabled(self):
self.__sig = app.window.connect('delete-event', self.__window_delete)
self.objects = []
for service in [MPRIS2]:
try:
self.objects.append(service())
except dbus.DBusException:
pass
if indicate:
self.__indicate_server = s = indicate.indicate_server_ref_default()
s.set_type('music.quodlibet')
s.set_desktop_file('/usr/share/applications/io.github.quodlibet.QuodLibet.desktop')
s.show()
def disabled(self):
if indicate:
self.__indicate_server.hide()
for obj in self.objects:
obj.remove_from_connection()
self.objects = []
import gc
gc.collect()
app.window.disconnect(self.__sig)
def plugin_on_paused(self):
for obj in self.objects:
obj.paused()
def plugin_on_unpaused(self):
for obj in self.objects:
obj.unpaused()
def plugin_on_song_started(self, song):
for obj in self.objects:
obj.song_started(song)
def plugin_on_song_ended(self, song, skipped):
for obj in self.objects:
obj.song_ended(song, skipped) |
class KnownValues(unittest.TestCase):
def test_ea_adc2_k(self):
(e, v, p, x) = kadc.kernel(nroots=3, kptlist=[0])
self.assertAlmostEqual(e[0][0], 0., 4)
self.assertAlmostEqual(e[0][1], 1., 4)
self.assertAlmostEqual(e[0][2], 1., 4)
self.assertAlmostEqual(p[0][0], 1., 4)
self.assertAlmostEqual(p[0][1], 0., 4)
self.assertAlmostEqual(p[0][2], 2.99e-06, 4)
def test_ea_adc2x_k_high_cost(self):
nmp = [2, 2, 2]
kpts = cell.make_kpts(nmp)
kpts -= kpts[0]
kmf = scf.KRHF(cell, kpts, exxdiv=None).density_fit().run()
kadc = adc.KRADC(kmf)
kadc.method = 'adc(2)-x'
kadc.method_type = 'ea'
(e, v, p, x) = kadc.kernel(nroots=3, kptlist=[0])
self.assertAlmostEqual(e[0][0], 0., 4)
self.assertAlmostEqual(e[0][1], 1., 4)
self.assertAlmostEqual(e[0][2], 1., 4)
self.assertAlmostEqual(p[0][0], 1., 4)
self.assertAlmostEqual(p[0][1], 1e-08, 4)
self.assertAlmostEqual(p[0][2], 2e-08, 4)
def test_ea_adc3_k_skip(self):
kadc.method = 'adc(3)'
(e, v, p, x) = kadc.kernel(nroots=3, kptlist=[0])
self.assertAlmostEqual(e[0][0], 0., 4)
self.assertAlmostEqual(e[0][1], 1., 4)
self.assertAlmostEqual(e[0][2], 1., 4)
self.assertAlmostEqual(p[0][0], 1., 4)
self.assertAlmostEqual(p[0][1], 0.0011169, 4)
self.assertAlmostEqual(p[0][2], 0., 4) |
def _sanitize_args_with_chunks(*args):
new_args = []
for arg in args:
if (_is_chunk_tuple(arg) and _chunks_are_irregular(arg)):
new_chunks = _regular_chunks_from_irregular_chunks(arg)
new_args.append(new_chunks)
else:
new_args.append(arg)
return new_args |
def test_invalid_coverage_source(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.makeini('\n [pytest]\n console_output_style=classic\n ')
result = testdir.runpytest('-v', '--cov=non_existent_module', '--cov-report=term-missing', script)
result.stdout.fnmatch_lines(['*10 passed*'])
result.stderr.fnmatch_lines(['*No data was collected.*'])
result.stdout.fnmatch_lines(['*Failed to generate report: No data to report.'])
assert (result.ret == 0)
matching_lines = [line for line in result.outlines if ('%' in line)]
assert (not matching_lines) |
def test_pytest() -> None:
ast_node = builder.extract_node('\n import pytest\n pytest #\n ')
module = next(ast_node.infer())
attrs = ['deprecated_call', 'warns', 'exit', 'fail', 'skip', 'importorskip', 'xfail', 'mark', 'raises', 'freeze_includes', 'set_trace', 'fixture', 'yield_fixture']
for attr in attrs:
assert (attr in module) |
class TFBaseModelOutputWithPastAndCrossAttentions(ModelOutput):
last_hidden_state: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
cross_attentions: Optional[Tuple[tf.Tensor]] = None |
def generate_model_output_with_no_positive_examples() -> Dict[(str, torch._tensor.Tensor)]:
return {'predictions': torch.tensor([[1.0, 0.0, 0.51, 0.8, 1.0, 0.0, 0.51, 0.8, 1.0, 0.0, 0.51, 0.8]]), 'session': torch.tensor([[1, 1, 1, 1, 1, 1, 1, (- 1), (- 1), (- 1), (- 1), (- 1)]]), 'labels': torch.tensor([([0.0] * 12)]), 'weights': torch.tensor([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]), 'expected_recall': torch.tensor([float('nan')])} |
def select_best2(acc_dict, sess):
sess_all_acc = acc_dict[str(sess)]
f = 0
best_e = (len(sess_all_acc) - 1)
for (e, pr) in enumerate(sess_all_acc):
if ((pr[0] < pr[1]) and (f == 0)):
best_base = pr[0]
best_novel = pr[1]
best_e = e
f = 1
if (f == 1):
if ((pr[0] > best_base) and (pr[1] > best_novel)):
best_base = pr[0]
best_novel = pr[1]
best_e = e
return best_e |
class TimeGraph():
def setup(self):
test_file_path = mm.datasets.get_path('bubenec')
self.df_streets = gpd.read_file(test_file_path, layer='streets')
self.network = mm.gdf_to_nx(self.df_streets)
self.network = mm.node_degree(self.network)
self.dual = mm.gdf_to_nx(self.df_streets, approach='dual')
def time_node_degree(self):
mm.node_degree(graph=self.network)
def time_meshedness(self):
mm.meshedness(self.network)
def time_mean_node_dist(self):
mm.mean_node_dist(self.network)
def time_cds_length(self):
mm.cds_length(self.network)
def time_mean_node_degree(self):
mm.mean_node_degree(self.network)
def time_proportion(self):
mm.proportion(self.network, three='three', four='four', dead='dead')
def time_cyclomatic(self):
mm.cyclomatic(self.network)
def time_edge_node_ratio(self):
mm.edge_node_ratio(self.network)
def time_gamma(self):
mm.gamma(self.network)
def time_closeness_centrality(self):
mm.closeness_centrality(self.network, weight='mm_len')
def time_betweenness_centrality_nodes(self):
mm.betweenness_centrality(self.network)
def time_betweenness_centrality_edges(self):
mm.betweenness_centrality(self.network, mode='edges')
def time_betweenness_centrality_angular(self):
mm.betweenness_centrality(self.dual, weight='angle')
def time_betweenness_centrality_local(self):
mm.betweenness_centrality(self.network, radius=5, weight=None)
def time_straightness_centrality(self):
mm.straightness_centrality(self.network)
def time_clustering(self):
mm.clustering(self.network)
def time_subgraph(self):
mm.subgraph(self.network) |
def parse_data(in_file='../../data/GYAFC/em/trn.tsv'):
with open(in_file, 'r') as f:
data = f.read().split('\n')
data.remove('')
contexted = []
for (i, line) in enumerate(data):
source_txt = line.split('\t')[0]
target_txt = line.split('\t')[1]
row = (i, source_txt, target_txt)
contexted.append(row)
columns = ['id', 'source', 'target']
data_df = pd.DataFrame.from_records(contexted, columns=columns)
return data_df |
class Effect5125(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Remote Armor Repair Systems')), 'armorDamageAmount', src.getModifiedItemAttr('shipBonusGC2'), skill='Gallente Cruiser', **kwargs) |
def get_ibnbresnet(blocks, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
if (blocks == 50):
layers = [3, 4, 6, 3]
elif (blocks == 101):
layers = [3, 4, 23, 3]
elif (blocks == 152):
layers = [3, 8, 36, 3]
else:
raise ValueError('Unsupported IBN(b)-ResNet with number of blocks: {}'.format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)]
net = IBNbResNet(channels=channels, init_block_channels=init_block_channels, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
def pdf_a(x, p, m):
scalar = False
if (not hasattr(x, '__len__')):
scalar = True
x = np.asarray([x])
pr = np.array([((pdf_a_single_angle(xi, p, m, alpha) + pdf_a_single_angle(xi, p, m, beta)) if (xi not in [0, 1]) else pdf_a_single_angle(xi, p, m, alpha)) for xi in x]).flatten()
return (pr[0] if scalar else pr) |
class RedHatUserApi(object):
def __init__(self, app_config):
self.cert = (MARKETPLACE_FILE, MARKETPLACE_SECRET)
self.user_endpoint = app_config.get('ENTITLEMENT_RECONCILIATION_USER_ENDPOINT')
def get_account_number(self, user):
email = user.email
account_number = entitlements.get_ebs_account_number(user.id)
if (account_number is None):
account_number = self.lookup_customer_id(email)
if account_number:
entitlements.save_ebs_account_number(user, account_number)
return account_number
def lookup_customer_id(self, email):
request_body_dict = {'by': {'emailStartsWith': email}, 'include': {'accountRelationships': [{'allOf': ['primary_email', 'is_supportable', 'account_details'], 'by': {'active': True}}]}}
request_url = f'{self.user_endpoint}/v2/findUsers'
try:
r = requests.request(method='post', url=request_url, cert=self.cert, json=request_body_dict, verify=True, timeout=REQUEST_TIMEOUT)
except requests.exceptions.ReadTimeout:
logger.info('request to %s timed out', self.user_endpoint)
return None
info = json.loads(r.content)
if (not info):
logger.debug('request to %s did not return any data', self.user_endpoint)
return None
for account in info:
if (account['accountRelationships'][0]['account']['type'] == 'person'):
account_number = account['accountRelationships'][0]['account'].get('ebsAccountNumber')
return account_number
return None |
class Jfif(Jpeg):
def from_stream(cls, stream):
markers = _JfifMarkers.from_stream(stream)
px_width = markers.sof.px_width
px_height = markers.sof.px_height
horz_dpi = markers.app0.horz_dpi
vert_dpi = markers.app0.vert_dpi
return cls(px_width, px_height, horz_dpi, vert_dpi) |
def test_skipif_has_precendence_over_ancestor_failed(runner, tmp_path):
source = '\n from pathlib import Path\n import pytask\n\n def task_example(produces=Path("file.txt")):\n raise Exception\n\n .skipif(True, reason="God knows.")\n def task_example_2(path=Path("file.txt")): ...\n '
tmp_path.joinpath('task_example.py').write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.FAILED)
assert ('1 Failed' in result.output)
assert ('1 Skipped' in result.output) |
class Gen_Data_loader():
def __init__(self, batch_size):
self.batch_size = batch_size
self.token_stream = []
def create_batches(self, data_file):
self.token_stream = []
with open(data_file, 'r') as f:
for line in f:
line = line.strip().split()
parse_line = [int(x) for x in line]
if (len(parse_line) == 20):
self.token_stream.append(parse_line)
self.num_batch = int((len(self.token_stream) / self.batch_size))
self.token_stream = self.token_stream[:(self.num_batch * self.batch_size)]
self.sequence_batch = np.split(np.array(self.token_stream), self.num_batch, 0)
self.pointer = 0
def next_batch(self):
ret = self.sequence_batch[self.pointer]
self.pointer = ((self.pointer + 1) % self.num_batch)
return ret
def reset_pointer(self):
self.pointer = 0 |
class uvm_nonblocking_peek_port(uvm_port_base):
def try_peek(self):
try:
(success, data) = self.export.try_peek()
except AttributeError:
raise UVMTLMConnectionError(f'Missing or wrong export in {self.get_full_name()}. Did you connect it?')
return (success, data)
def can_peek(self):
can = self.export.can_peek()
return can |
class TestMulticomp(TestCase):
def test_fdr(self):
(reject, pval_corr) = fdr(pvals)
assert_array_equal(reject, [False, False, True, False, False])
assert_array_almost_equal(pval_corr, [0.52, 0.175, 0.0005, 0.075, 0.175])
(_, pval_corr) = fdr(pvals2_NA)
assert_array_almost_equal(pval_corr, [0.52, np.nan, 0.28, 0.4, 0.28])
(_, pval_corr) = fdr(pvals_2d)
pval_corr = np.round(pval_corr.ravel(), 3)
assert_array_almost_equal(pval_corr, [0.52, 0.21, 0.001, 0.135, 0.21, 0.52, np.nan, 0.21, 0.386, 0.21])
(reject, pval_corr) = fdr(pvals, method='fdr_by')
assert_array_equal(reject, [False, False, True, False, False])
assert_array_almost_equal(pval_corr, [1.0, 0., 0., 0.17125, 0.])
(_, pval_corr) = fdr(pvals2_NA, method='fdr_by')
assert_array_almost_equal(pval_corr, [1.0, np.nan, 0.5833333, 0.8333333, 0.5833333])
(_, pval_corr) = fdr(pvals_2d, method='fdr_by')
pval_corr = np.round(pval_corr.ravel(), 3)
assert_array_almost_equal(pval_corr, [1.0, 0.594, 0.003, 0.382, 0.594, 1.0, np.nan, 0.594, 1.0, 0.594])
def test_bonf(self):
(reject, pval_corr) = bonf(pvals)
assert_array_equal(reject, [False, False, True, False, False])
assert_array_almost_equal(pval_corr, [1, 0.6, 0.0005, 0.15, 0.7])
(_, pval_corr) = bonf(pvals2_NA)
assert_array_almost_equal(pval_corr, [1, np.nan, 0.4, 1.0, 0.56])
(_, pval_corr) = bonf(pvals_2d)
pval_corr = np.round(pval_corr.ravel(), 3)
assert_array_almost_equal(pval_corr, [1, 1, 0.001, 0.27, 1, 1, np.nan, 0.9, 1.0, 1.0])
def test_sidak(self):
(reject, pval_corr) = sidak(pvals)
assert_array_equal(reject, [False, False, True, False, False])
assert_array_almost_equal(pval_corr, [0., 0., 0., 0., 0.])
(_, pval_corr) = sidak(pvals2_NA)
assert_array_almost_equal(pval_corr, [0., np.nan, 0.3439, 0.7599, 0.])
def test_holm(self):
(reject, pval_corr) = holm(pvals)
assert_array_equal(reject, [False, False, True, False, False])
assert_array_equal(pval_corr, [0.52, 0.36, 0.0005, 0.12, 0.36])
(_, pval_corr) = holm(pvals2)
assert_array_equal(pval_corr, [0.6, 0.5, 0.5, 0.6, 0.5])
(_, pval_corr) = holm(pvals2_NA)
assert_array_almost_equal(pval_corr, [0.6, np.nan, 0.4, 0.6, 0.42])
(_, pval_corr) = holm(pvals_2d)
pval_corr = np.round(pval_corr.ravel(), 3)
assert_array_almost_equal(pval_corr, [1, 0.72, 0.001, 0.24, 0.72, 1.0, np.nan, 0.7, 0.9, 0.72])
def test_multicomp(self):
(reject, pvals_corr) = multicomp(pvals, method='fdr_bh')
(reject, pvals_corr) = multicomp(pvals, method='fdr_by')
(reject, pvals_corr) = multicomp(pvals, method='h')
(reject, pvals_corr) = multicomp(pvals, method='b')
(reject, pvals_corr) = multicomp(pvals, method='sidak')
(reject, pvals_corr) = multicomp(pvals, method='none')
assert_array_equal(pvals, pvals_corr)
(reject, pvals_corr) = multicomp(pvals2, method='holm')
with pytest.raises(ValueError):
(reject, pvals_corr) = multicomp(pvals, method='wrong') |
def _print_preview(tab: apitypes.Tab) -> None:
def print_callback(ok: bool) -> None:
if (not ok):
message.error('Printing failed!')
tab.printing.check_preview_support()
diag = QPrintPreviewDialog(tab)
diag.setAttribute(Qt.WidgetAttribute.WA_DeleteOnClose)
diag.setWindowFlags(((diag.windowFlags() | Qt.WindowType.WindowMaximizeButtonHint) | Qt.WindowType.WindowMinimizeButtonHint))
diag.paintRequested.connect(functools.partial(tab.printing.to_printer, callback=print_callback))
diag.exec() |
class Info(OracleDatabase):
def __init__(self, args):
logging.debug('Info object created')
OracleDatabase.__init__(self, args)
self.version = ''
self.os = ''
def isVersion(self, version=None):
if (version in self.version):
return True
else:
return False
def __str__(self):
return 'Oracle Version: {0} and OS Version: {1}'.format(self.version, self.os) |
def process_camera_connector_chart():
connector_chart_dict = load_connector_chart()
res = subprocess.run(['v4l2-ctl', '--list-devices'], stdout=subprocess.PIPE)
output_string = res.stdout
providers = []
topic_names = []
for (topic_name, usb_id) in connector_chart_dict.items():
dev_number = get_dev(output_string, usb_id)
providers.append(dev_number)
topic_names.append(topic_name)
return (providers, topic_names) |
def test_solver_returns_extras_if_requested_in_dependencies_and_not_in_root_package(solver: Solver, repo: Repository, package: ProjectPackage) -> None:
package.add_dependency(Factory.create_dependency('A', '*'))
package.add_dependency(Factory.create_dependency('B', '*'))
package.add_dependency(Factory.create_dependency('C', '*'))
package_a = get_package('A', '1.0')
package_b = get_package('B', '1.0')
package_c = get_package('C', '1.0')
package_d = get_package('D', '1.0')
package_b.add_dependency(Factory.create_dependency('C', {'version': '^1.0', 'extras': ['foo']}))
package_c.add_dependency(Factory.create_dependency('D', {'version': '^1.0', 'optional': True}))
package_c.extras = {canonicalize_name('foo'): [Factory.create_dependency('D', '^1.0')]}
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
repo.add_package(package_d)
transaction = solver.solve()
check_solver_result(transaction, [{'job': 'install', 'package': package_d}, {'job': 'install', 'package': package_c}, {'job': 'install', 'package': package_a}, {'job': 'install', 'package': package_b}]) |
def main():
default_cass = import_module('settings.default_cassandra')
default_only_cass = import_module('settings.default_only_cassandra')
secondary_cassandra = import_module('settings.secondary_cassandra')
multi_cassandra = import_module('settings.multi_cassandra')
metadata_disabled = import_module('settings.metadata_disabled')
django.setup()
run_tests(default_cass)
run_tests(default_only_cass)
run_tests(secondary_cassandra)
run_tests(multi_cassandra)
run_tests(metadata_disabled)
sys.exit(0) |
class WhisperTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = MAX_MODEL_INPUT_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, merges_file, normalizer_file=None, errors='replace', unk_token='<|endoftext|>', bos_token='<|startoftranscript|>', eos_token='<|endoftext|>', pad_token=None, add_prefix_space=False, language=None, task=None, predict_timestamps=False, **kwargs):
bos_token = (AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token)
eos_token = (AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token)
unk_token = (AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token)
pad_token = (AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token)
super().__init__(errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_prefix_space=add_prefix_space, **kwargs)
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
bpe_merges = merges_handle.read().split('\n')[1:(- 1)]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
if (normalizer_file is not None):
with open(normalizer_file, encoding='utf-8') as vocab_handle:
self.english_spelling_normalizer = json.load(vocab_handle)
else:
self.english_spelling_normalizer = None
self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
self.language = language
self.task = task
self.predict_timestamps = predict_timestamps
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def vocab_size(self) -> int:
return len(self.encoder)
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if (not pairs):
return token
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def set_prefix_tokens(self, language: str=None, task: str=None, predict_timestamps: bool=None):
self.language = (language if (language is not None) else self.language)
self.task = (task if (task is not None) else self.task)
self.predict_timestamps = (predict_timestamps if (predict_timestamps is not None) else self.predict_timestamps)
def prefix_tokens(self) -> List[int]:
all_special_ids = self.all_special_ids
bos_token_id = all_special_ids[(- 106)]
translate_token_id = all_special_ids[(- 6)]
transcribe_token_id = all_special_ids[(- 5)]
notimestamps_token_id = all_special_ids[(- 1)]
langs = tuple(LANGUAGES.keys())
if (self.language is not None):
self.language = self.language.lower()
if (self.language in TO_LANGUAGE_CODE):
language_id = TO_LANGUAGE_CODE[self.language]
elif (self.language in TO_LANGUAGE_CODE.values()):
language_id = self.language
else:
is_language_code = (len(self.language) == 2)
raise ValueError(f'Unsupported language: {self.language}. Language should be one of: {(list(TO_LANGUAGE_CODE.values()) if is_language_code else list(TO_LANGUAGE_CODE.keys()))}.')
if (self.task is not None):
if (self.task not in TASK_IDS):
raise ValueError(f'Unsupported task: {self.task}. Task should be in: {TASK_IDS}')
bos_sequence = [bos_token_id]
if (self.language is not None):
bos_sequence.append(((bos_token_id + 1) + langs.index(language_id)))
if (self.task is not None):
bos_sequence.append((transcribe_token_id if (self.task == 'transcribe') else translate_token_id))
if (not self.predict_timestamps):
bos_sequence.append(notimestamps_token_id)
return bos_sequence
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
if (token_ids_1 is None):
return ((self.prefix_tokens + token_ids_0) + [self.eos_token_id])
return (((self.prefix_tokens + token_ids_0) + token_ids_1) + [self.eos_token_id])
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
prefix_ones = ([1] * len(self.prefix_tokens))
suffix_ones = [1]
if (token_ids_1 is None):
return ((prefix_ones + ([0] * len(token_ids_0))) + suffix_ones)
return (((prefix_ones + ([0] * len(token_ids_0))) + ([0] * len(token_ids_1))) + suffix_ones)
def _tokenize(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index, '')
def _normalize(self, text):
normalizer = EnglishTextNormalizer(self.english_spelling_normalizer)
return normalizer(text)
def _decode_with_timestamps(self, token_ids, time_precision=0.02) -> str:
timestamp_begin = (self.all_special_ids[(- 1)] + 1)
outputs = [[]]
for token in token_ids:
if (token >= timestamp_begin):
timestamp = f'<|{((token - timestamp_begin) * time_precision):.2f}|>'
outputs.append(timestamp)
outputs.append([])
else:
outputs[(- 1)].append(token)
outputs = [(s if isinstance(s, str) else self.decode(s)) for s in outputs]
return ''.join(outputs)
def _compute_offsets(self, token_ids, time_precision=0.02):
offsets = []
token_ids = np.array(token_ids)
if ((token_ids.shape[0] > 1) and (len(token_ids.shape) > 1)):
raise ValueError('Can only process a single input at a time')
timestamp_begin = (self.all_special_ids[(- 1)] + 1)
timestamp_tokens = (token_ids >= timestamp_begin)
consecutive = (np.where((timestamp_tokens[:(- 1)] & timestamp_tokens[1:]))[0] + 1)
if ((consecutive.shape[0] == 0) and (timestamp_tokens.sum() <= 1)):
return []
elif ((np.where(timestamp_tokens)[0][(- 1)] + 1) not in consecutive):
consecutive = np.append(consecutive, (np.where(timestamp_tokens)[0][(- 1)] + 1))
last_slice = np.where(timestamp_tokens)[0][0]
for current_slice in consecutive:
sliced_tokens = token_ids[last_slice:current_slice]
if (len(sliced_tokens) > 1):
start_timestamp_position = (sliced_tokens[0].item() - timestamp_begin)
end_timestamp_position = (sliced_tokens[(- 1)].item() - timestamp_begin)
offsets.append({'text': self._decode(sliced_tokens), 'timestamp': ((start_timestamp_position * time_precision), (end_timestamp_position * time_precision))})
last_slice = current_slice
return offsets
def decode(self, token_ids, skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=True, output_offsets: bool=False, time_precision=0.02, decode_with_timestamps: bool=False, **kwargs) -> str:
text = super().decode(token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
if decode_with_timestamps:
text = self._decode_with_timestamps(token_ids, time_precision=time_precision)
if output_offsets:
offsets = None
offsets = self._compute_offsets(token_ids, time_precision=time_precision)
return {'text': text, 'offsets': offsets}
return text
def _decode(self, token_ids: Union[(int, List[int])], skip_special_tokens: bool=False, normalize: bool=False, **kwargs) -> str:
self._decode_use_source_tokenizer = kwargs.pop('use_source_tokenizer', False)
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
sub_texts = []
current_sub_text = []
for token in filtered_tokens:
if (skip_special_tokens and (token in self.all_special_ids)):
continue
if (token in self.added_tokens_encoder):
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
current_sub_text = []
sub_texts.append(token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
text = ''.join(sub_texts)
if normalize:
clean_text = self._normalize(text)
return clean_text
else:
return text
def convert_tokens_to_string(self, tokens):
text = ''.join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']))
normalizer_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['normalizer_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write((json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n'))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write((' '.join(bpe_tokens) + '\n'))
index += 1
if (self.english_spelling_normalizer is not None):
with open(normalizer_file, 'w', encoding='utf-8') as f:
f.write((json.dumps(self.english_spelling_normalizer, indent=2, sort_keys=True, ensure_ascii=False) + '\n'))
return (vocab_file, merge_file, normalizer_file)
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)
if (is_split_into_words or add_prefix_space):
text = (' ' + text)
return (text, kwargs)
def _build_conversation_input_ids(self, conversation) -> List[int]:
input_ids = []
for (is_user, text) in conversation.iter_texts():
input_ids.extend((self.encode(text, add_special_tokens=False) + [self.eos_token_id]))
if (len(input_ids) > self.model_max_length):
input_ids = input_ids[(- self.model_max_length):]
return input_ids
def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
self.set_prefix_tokens(task=task, language=language, predict_timestamps=(not no_timestamps))
forced_tokens = self.prefix_tokens[1:]
forced_decoder_ids = [((rank + 1), token) for (rank, token) in enumerate(forced_tokens)]
return forced_decoder_ids
def _decode_asr(self, model_outputs, *, return_timestamps, return_language, time_precision):
return _decode_asr(self, model_outputs, return_timestamps=return_timestamps, return_language=return_language, time_precision=time_precision) |
def monitor_kevent(ident: int, filter: int) -> ContextManager[_core.UnboundedQueue[select.kevent]]:
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
try:
return GLOBAL_RUN_CONTEXT.runner.io_manager.monitor_kevent(ident, filter)
except AttributeError:
raise RuntimeError('must be called from async context') from None |
def eval_train(model, device, train_loader):
model.eval()
train_loss = 0
correct = 0
with torch.no_grad():
for (data, target) in train_loader:
(data, target) = (data.to(device), target.to(device))
output = model(data)
train_loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
train_loss /= len(train_loader.dataset)
print('Training: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(train_loss, correct, len(train_loader.dataset), ((100.0 * correct) / len(train_loader.dataset))))
training_accuracy = (correct / len(train_loader.dataset))
return (train_loss, training_accuracy) |
_mode()
def binary_recall_at_fixed_precision(input: torch.Tensor, target: torch.Tensor, *, min_precision: float) -> Tuple[(torch.Tensor, torch.Tensor)]:
_binary_recall_at_fixed_precision_update_input_check(input, target, min_precision)
return _binary_recall_at_fixed_precision_compute(input, target, min_precision) |
class IdleTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, 0, None, s, r)
def fn(self, pkt, r):
i = r
assert isinstance(i, IdleTaskRec)
i.count -= 1
if (i.count == 0):
return self.hold()
elif ((i.control & 1) == 0):
i.control //= 2
return self.release(I_DEVA)
else:
i.control = ((i.control // 2) ^ 53256)
return self.release(I_DEVB) |
def expectation_counts(counts: Dict[(str, int)]) -> Dict[(str, int)]:
shots = np.sum(list(counts.values()))
numq = len(list(counts.keys())[0])
subsets = []
for r in range(numq):
subsets += list(combinations(range(numq), (r + 1)))
exp_data = {'00': shots}
for subset in subsets:
exp_counts = 0
exp_op = (numq * ['0'])
for qubit in subset:
exp_op[qubit] = '1'
for (key, val) in marginal_counts(counts, subset, pad_zeros=True).items():
exp_counts += (((- 1) ** key.count('1')) * val)
exp_data[''.join(exp_op)] = exp_counts
return exp_data |
def test_connection_request_key_header() -> None:
with pytest.raises(RemoteProtocolError) as excinfo:
_make_connection_request([(b'Host', b'localhost'), (b'Connection', b'Keep-Alive, Upgrade'), (b'Upgrade', b'websocket'), (b'Sec-WebSocket-Version', b'13')])
assert (str(excinfo.value) == "Missing header, 'Sec-WebSocket-Key'") |
def create_container(services=None, factory=Container):
container = (factory(services) if services else factory())
if (not ('fs' in container)):
import bonobo
container.setdefault('fs', bonobo.open_fs())
if (not (' in container)):
import requests
container.setdefault(' requests)
return container |
.parametrize('linesep', ['\n', '\r\n'])
def test_init_existing_pyproject_consistent_linesep(tester: CommandTester, source_dir: Path, init_basic_inputs: str, init_basic_toml: str, linesep: str) -> None:
pyproject_file = (source_dir / 'pyproject.toml')
existing_section = '\n[tool.black]\nline-length = 88\n'.replace('\n', linesep)
with open(pyproject_file, 'w', newline='') as f:
f.write(existing_section)
tester.execute(inputs=init_basic_inputs)
with open(pyproject_file, newline='') as f:
content = f.read()
init_basic_toml = init_basic_toml.replace('\n', linesep)
assert (f'{existing_section}{linesep}{init_basic_toml}' in content) |
('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
def test_default_mmcv_home():
os.environ.pop(ENV_MMCV_HOME, None)
os.environ.pop(ENV_XDG_CACHE_HOME, None)
assert (_get_mmcv_home() == os.path.expanduser(os.path.join(DEFAULT_CACHE_DIR, 'mmcv')))
model_urls = get_external_models()
assert (model_urls == mmcv.load(osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json'))) |
class TransformerLayer(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int((dim * mlp_ratio))
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
y = self.norm1(x)
x = (x + self.drop_path(self.attn(y)))
x = (x + self.drop_path(self.mlp(self.norm2(x))))
return x |
class TMVARegressor(TMVABase, Regressor):
def __init__(self, method='kBDT', features=None, factory_options='', **method_parameters):
TMVABase.__init__(self, factory_options=factory_options, method=method, **method_parameters)
Regressor.__init__(self, features=features)
def set_params(self, **params):
for (k, v) in params.items():
if hasattr(self, k):
setattr(self, k, v)
else:
if (k in _IGNORED_PARAMETERS):
continue
self.method_parameters[k] = v
def get_params(self, deep=True):
parameters = self.method_parameters.copy()
parameters['method'] = self.method
parameters['factory_options'] = self.factory_options
parameters['features'] = self.features
return parameters
def fit(self, X, y, sample_weight=None):
(X, y, sample_weight) = check_inputs(X, y, sample_weight=sample_weight, allow_none_weights=False)
X = self._get_features(X).copy()
self.factory_options = '{}:AnalysisType=Regression'.format(self.factory_options)
return self._fit(X, y, sample_weight=sample_weight, model_type='regression')
fit.__doc__ = Regressor.fit.__doc__
def predict(self, X):
X = self._get_features(X)
return self._predict(X, model_type=('regression', None))
predict.__doc__ = Regressor.predict.__doc__
def staged_predict(self, X):
raise AttributeError("'staged_predict' is not supported by the TMVA library") |
class PyrockoSourceDialog(SourceEditDialog):
def __init__(self, delegate, ui_file, *args, **kwargs):
SourceEditDialog.__init__(self, delegate, ui_file, *args, **kwargs)
self.completer = QtWidgets.QCompleter()
self.completer_model = QtGui.QFileSystemModel(self.completer)
self.completer.setModel(self.completer_model)
self.completer.setMaxVisibleItems(8)
self.chooseStoreDirButton.released.connect(self.chooseStoreDir)
self.completer_model.setRootPath('')
self.completer.setParent(self.store_dir)
self.store_dir.setCompleter(self.completer)
self.store_dir.setValue = self.store_dir.setText
self.store_dir.value = self.store_dir.text
self.getSourceParameters()
()
def chooseStoreDir(self):
folder = QtGui.QFileDialog.getExistingDirectory(self, 'Open Pyrocko GF Store', os.getcwd())
if (folder != ''):
self.store_dir.setText(folder) |
def extrinsic_events(network, previous_state, current_state, next_state, indices=None, major_complex=None):
if major_complex:
mc_nodes = major_complex.subsystem.node_indices
elif indices:
mc_nodes = indices
else:
major_complex = compute.network.major_complex(network, current_state)
mc_nodes = major_complex.subsystem.node_indices
mechanisms = list(utils.powerset(mc_nodes, nonempty=True))
all_nodes = network.node_indices
return events(network, previous_state, current_state, next_state, all_nodes, mechanisms=mechanisms) |
def test_ddpg():
env = CartpoleEnv()
policy = DeterministicMLPPolicy(env.spec)
qf = ContinuousMLPQFunction(env.spec)
es = OUStrategy(env.spec)
algo = DDPG(env=env, policy=policy, qf=qf, es=es, n_epochs=1, epoch_length=100, batch_size=32, min_pool_size=50, replay_pool_size=1000, eval_samples=100)
algo.train() |
class Marcus(BaseKinetics):
def __init__(self, param, domain, reaction, options, phase='primary'):
super().__init__(param, domain, reaction, options, phase)
pybamm.citations.register('Sripad2020')
def _get_kinetics(self, j0, ne, eta_r, T, u):
RT = (self.param.R * T)
Feta_RT = ((self.param.F * eta_r) / RT)
mhc_lambda = self.phase_param.mhc_lambda
exp_arg_ox = ((- ((mhc_lambda + Feta_RT) ** 2)) / ((4 * mhc_lambda) * RT))
exp_arg_red = ((- ((mhc_lambda - Feta_RT) ** 2)) / ((4 * mhc_lambda) * RT))
return ((u * j0) * (pybamm.exp(exp_arg_ox) - pybamm.exp(exp_arg_red))) |
.parametrize('fill_color', [(255, 255, 255, 255), (60, 70, 80, 100), (255, 255, 255, 255), (0, 255, 255, 255), (255, 0, 255, 255), (255, 255, 0, 255)])
def test_render_page_fill_color(fill_color, sample_page):
kwargs = dict(fill_color=fill_color, scale=0.5)
image = sample_page.render(**kwargs).to_pil()
image_rev = sample_page.render(**kwargs, rev_byteorder=True).to_pil()
if (PyVersion > (3, 6)):
assert (image == image_rev)
bg_pixel = image.getpixel((0, 0))
if (fill_color[3] == 255):
fill_color = fill_color[:(- 1)]
assert (image.size == (298, 421))
assert (bg_pixel == fill_color) |
class LxDeviceFindByClassName(gdb.Function):
def __init__(self):
super(LxDeviceFindByClassName, self).__init__('lx_device_find_by_class_name')
def invoke(self, cls, name):
name = name.string()
cls = get_class_by_name(cls.string())
for dev in class_for_each_device(cls):
if (dev_name(dev) == name):
return dev |
class OPICHaircut(Haircut):
def __init__(self, source, min_weight: float=0.001, tendency: float=0.7):
super().__init__(source, min_weight)
self.tendency = tendency
def push(self, node, edges: list, **kwargs):
(in_sum, out_sum) = (0, 0)
(in_edges, out_edges) = (list(), list())
for e in edges:
if (e.get('from') == node):
out_sum += float(e.get('value', 0))
out_edges.append(e)
elif (e.get('to') == node):
in_sum += float(e.get('value', 0))
in_edges.append(e)
if ((out_sum == 0) or ((in_sum / out_sum) <= 0)):
return
R = (in_sum / out_sum)
R = (1 if (R > 1) else R)
node_weight = self.weight_map.get(node, 0)
self.weight_map[node] = 0
for oe in out_edges:
out_neibor = oe.get('to')
edge_value = float(oe.get('value'))
self.weight_map[out_neibor] = (self.weight_map.get(out_neibor, 0) + (((node_weight * (edge_value / out_sum)) * self.tendency) * R))
for ie in in_edges:
in_neibor = ie.get('from')
edge_value = float(ie.get('value'))
self.weight_map[in_neibor] = (self.weight_map.get(in_neibor, 0) + (((node_weight * (edge_value / in_sum)) * (1 - self.tendency)) * R)) |
class HTMLReporter(PythonTaReporter):
name = 'HTMLReporter'
_COLOURING = {'black': '<span class="black">', 'black-line': '<span class="black line-num">', 'bold': '<span>', 'code-heading': '<span>', 'style-heading': '<span>', 'code-name': '<span>', 'style-name': '<span>', 'highlight': '<span class="highlight-pyta">', 'grey': '<span class="grey">', 'grey-line': '<span class="grey line-num">', 'gbold': '<span class="gbold">', 'gbold-line': '<span class="gbold line-num">', 'reset': '</span>'}
_PRE_LINE_NUM_SPACES = 0
no_err_message = 'No problems detected, good job!'
no_snippet = 'No code to display for this message.'
code_err_title = 'Code Errors or Forbidden Usage (fix: high priority)'
style_err_title = 'Style or Convention Errors (fix: before submission)'
OUTPUT_FILENAME = 'pyta_report.html'
def print_messages(self, level='all'):
def display_messages(self, layout: BaseLayout) -> None:
grouped_messages = {path: self.group_messages(msgs) for (path, msgs) in self.messages.items()}
template_f = self.linter.config.pyta_template_file
template = Environment(loader=FileSystemLoader(TEMPLATES_DIR)).get_template(template_f)
dt = str(datetime.now().strftime('%a. %b. %d %Y, %I:%M:%S %p'))
rendered_template = template.render(date_time=dt, reporter=self, grouped_messages=grouped_messages, os=os, enumerate=enumerate)
if (self.out is not sys.stdout):
self.writeln(rendered_template)
else:
rendered_template = rendered_template.encode('utf8')
self._open_html_in_browser(rendered_template)
def _open_html_in_browser(self, html: bytes) -> None:
class OneShotRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
buffer_size = (1024 * 1024)
for i in range(0, len(html), buffer_size):
self.wfile.write(html[i:(i + buffer_size)])
def log_message(self, format, *args):
pass
server = HTTPServer(('127.0.0.1', 0), OneShotRequestHandler)
webbrowser.open(f' new=2)
server.handle_request()
server.server_close()
print("[INFO] Your PythonTA report is being opened in your web browser.\n If it doesn't open, please add an output argument to python_ta.check_all\n as follows:\n\n check_all(..., output='pyta_report.html')\n\n This will cause PythonTA to save the report to a file, pyta_report.html,\n that you can open manually in a web browser.", file=sys.stderr)
def _colourify(cls, colour_class: str, text: str) -> str:
colour = cls._COLOURING[colour_class]
new_text = text.replace(' ', cls._SPACE)
if ('-line' not in colour_class):
new_text = highlight(new_text, PythonLexer(), HtmlFormatter(nowrap=True, lineseparator='', classprefix='pygments-'))
return ((colour + new_text) + cls._COLOURING['reset']) |
class CmdPose(RPCommand):
key = 'pose'
def parse(self):
args = self.args.strip()
default = args.startswith('default')
reset = args.startswith('reset')
if default:
args = re.sub('^default', '', args)
if reset:
args = re.sub('^reset', '', args)
target = None
if ('=' in args):
(target, args) = [part.strip() for part in args.split('=', 1)]
self.target = target
self.reset = reset
self.default = default
self.args = args.strip()
def func(self):
caller = self.caller
pose = self.args
target = self.target
if ((not pose) and (not self.reset)):
caller.msg('Usage: pose <pose-text> OR pose obj = <pose-text>')
return
if (not pose.endswith('.')):
pose = ('%s.' % pose)
if target:
target = caller.search(target)
if (not target):
return
if (not target.access(caller, 'edit')):
caller.msg("You can't pose that.")
return
else:
target = caller
if (not target.attributes.has('pose')):
caller.msg(('%s cannot be posed.' % target.key))
return
target_name = (target.sdesc.get() if hasattr(target, 'sdesc') else target.key)
if self.reset:
pose = target.db.pose_default
target.db.pose = pose
elif self.default:
target.db.pose_default = pose
caller.msg(("Default pose is now '%s %s'." % (target_name, pose)))
return
else:
(parsed, mapping) = parse_sdescs_and_recogs(caller, caller.location.contents, pose)
mapping = dict(((ref, (obj.sdesc.get() if hasattr(obj, 'sdesc') else obj.key)) for (ref, obj) in mapping.items()))
pose = parsed.format(**mapping)
if ((len(target_name) + len(pose)) > 60):
caller.msg(("Your pose '%s' is too long." % pose))
return
target.db.pose = pose
caller.msg(("Pose will read '%s %s'." % (target_name, pose))) |
def test_vertical_crs__from_methods():
assert_maker_inheritance_valid(VerticalCRS.from_epsg(5703), VerticalCRS)
assert_maker_inheritance_valid(VerticalCRS.from_string('EPSG:5703'), VerticalCRS)
with pytest.raises(CRSError, match='Invalid type'):
VerticalCRS.from_proj4('+proj=latlon')
assert_maker_inheritance_valid(VerticalCRS.from_user_input(VerticalCRS.from_string('EPSG:5703')), VerticalCRS)
assert_maker_inheritance_valid(VerticalCRS.from_json(CRS(5703).to_json()), VerticalCRS)
assert_maker_inheritance_valid(VerticalCRS.from_json_dict(CRS(5703).to_json_dict()), VerticalCRS) |
def read_required(validator: Validator, required: List[str], instance: Any, schema: Mapping[(Hashable, Any)]) -> Iterator[ValidationError]:
if (not validator.is_type(instance, 'object')):
return
for property in required:
if (property not in instance):
prop_schema = schema.get('properties', {}).get(property)
if prop_schema:
write_only = prop_schema.get('writeOnly', False)
if (getattr(validator, 'read', True) and write_only):
continue
(yield ValidationError(f'{property!r} is a required property')) |
class TypeHintingFactory(interfaces.ITypeHintingFactory):
def make_param_provider(self):
providers = [docstrings.ParamProvider(docstrings.DocstringParamParser(), self.make_resolver()), docstrings.ParamProvider(numpydocstrings.NumPyDocstringParamParser(), self.make_resolver())]
return inheritance.ParamProvider(composite.ParamProvider(*providers))
def make_return_provider(self):
providers = [docstrings.ReturnProvider(docstrings.DocstringReturnParser(), self.make_resolver())]
return inheritance.ReturnProvider(composite.ReturnProvider(*providers))
def make_assignment_provider(self):
providers = [pep0484_type_comments.AssignmentProvider(self.make_resolver()), docstrings.AssignmentProvider(docstrings.DocstringParamParser(), self.make_resolver()), docstrings.AssignmentProvider(numpydocstrings.NumPyDocstringParamParser(), self.make_resolver())]
return inheritance.AssignmentProvider(composite.AssignmentProvider(*providers))
def make_resolver(self):
resolvers = [types.Resolver()]
return composite_resolvers.Resolver(*resolvers) |
class Solution():
def checkRecord(self, s: str) -> bool:
A = 0
L = (- 1)
if (s == ''):
return True
try:
A = s.count('A')
except:
pass
if (A > 1):
return False
try:
L = s.find('LLL')
except:
pass
if (L != (- 1)):
return False
return True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.