code stringlengths 281 23.7M |
|---|
class Validator():
_func_re = re.compile('(.+?)\\((.*)\\)', re.DOTALL)
_key_arg = re.compile('^([a-zA-Z_][a-zA-Z0-9_]*)\\s*=\\s*(.*)$', re.DOTALL)
_list_arg = _list_arg
_list_members = _list_members
_paramfinder = re.compile(_paramstring, (re.VERBOSE | re.DOTALL))
_matchfinder = re.compile(_matchstring, (re.VERBOSE | re.DOTALL))
def __init__(self, functions=None):
self.functions = {'': self._pass, 'integer': is_integer, 'float': is_float, 'boolean': is_boolean, 'ip_addr': is_ip_addr, 'string': is_string, 'list': is_list, 'tuple': is_tuple, 'int_list': is_int_list, 'float_list': is_float_list, 'bool_list': is_bool_list, 'ip_addr_list': is_ip_addr_list, 'string_list': is_string_list, 'mixed_list': is_mixed_list, 'pass': self._pass, 'option': is_option, 'force_list': force_list}
if (functions is not None):
self.functions.update(functions)
self.baseErrorClass = ValidateError
self._cache = {}
def check(self, check, value, missing=False):
(fun_name, fun_args, fun_kwargs, default) = self._parse_with_caching(check)
if missing:
if (default is None):
raise VdtMissingValue()
value = self._handle_none(default)
if (value is None):
return None
return self._check_value(value, fun_name, fun_args, fun_kwargs)
def _handle_none(self, value):
if (value == 'None'):
return None
elif (value in ("'None'", '"None"')):
value = self._unquote(value)
return value
def _parse_with_caching(self, check):
if (check in self._cache):
(fun_name, fun_args, fun_kwargs, default) = self._cache[check]
fun_args = list(fun_args)
fun_kwargs = dict(fun_kwargs)
else:
(fun_name, fun_args, fun_kwargs, default) = self._parse_check(check)
fun_kwargs = {str(key): value for (key, value) in fun_kwargs.items()}
self._cache[check] = (fun_name, list(fun_args), dict(fun_kwargs), default)
return (fun_name, fun_args, fun_kwargs, default)
def _check_value(self, value, fun_name, fun_args, fun_kwargs):
try:
fun = self.functions[fun_name]
except KeyError:
raise VdtUnknownCheckError(fun_name)
else:
return fun(value, *fun_args, **fun_kwargs)
def _parse_check(self, check):
fun_match = self._func_re.match(check)
if fun_match:
fun_name = fun_match.group(1)
arg_string = fun_match.group(2)
arg_match = self._matchfinder.match(arg_string)
if (arg_match is None):
raise VdtParamError(('Bad syntax in check "%s".' % check))
fun_args = []
fun_kwargs = {}
for arg in self._paramfinder.findall(arg_string):
arg = arg.strip()
listmatch = self._list_arg.match(arg)
if listmatch:
(key, val) = self._list_handle(listmatch)
fun_kwargs[key] = val
continue
keymatch = self._key_arg.match(arg)
if keymatch:
val = keymatch.group(2)
if (not (val in ("'None'", '"None"'))):
val = self._unquote(val)
fun_kwargs[keymatch.group(1)] = val
continue
fun_args.append(self._unquote(arg))
else:
return (check, (), {}, None)
default = fun_kwargs.pop('default', None)
return (fun_name, fun_args, fun_kwargs, default)
def _unquote(self, val):
if ((len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[(- 1)])):
val = val[1:(- 1)]
return val
def _list_handle(self, listmatch):
out = []
name = listmatch.group(1)
args = listmatch.group(2)
for arg in self._list_members.findall(args):
out.append(self._unquote(arg))
return (name, out)
def _pass(self, value):
return value
def get_default_value(self, check):
(fun_name, fun_args, fun_kwargs, default) = self._parse_with_caching(check)
if (default is None):
raise KeyError(('Check "%s" has no default value.' % check))
value = self._handle_none(default)
if (value is None):
return value
return self._check_value(value, fun_name, fun_args, fun_kwargs) |
class Exercises():
__init__ = _custom_dataclass_init
concept: List[ExerciseInfo]
practice: List[ExerciseInfo]
foregone: List[str] = None
def __post_init__(self):
if (self.foregone is None):
self.foregone = []
for attr_name in ['concept', 'practice']:
base_path = (Path('exercises') / attr_name)
setattr(self, attr_name, [(ExerciseInfo(path=(base_path / e['slug']), type=attr_name, **e) if isinstance(e, dict) else e) for e in getattr(self, attr_name)])
def all(self, status_filter={ExerciseStatus.Active, ExerciseStatus.Beta}):
return [e for e in chain(self.concept, self.practice) if (e.status in status_filter)] |
def get_numbered_choice(sd_command, prompt, valid_choices, quit_message):
prompt += '\n\nYou can quit by entering q.\n'
while True:
sd_command.log_info(prompt)
selection = input(prompt)
sd_command.log_info(selection)
if (selection.lower() in ['q', 'quit']):
raise SimpleDeployCommandError(sd_command, quit_message)
try:
selection = int(selection)
except ValueError:
msg = 'Please enter a number from the list of choices.'
sd_command.write_output(msg)
continue
if (selection not in valid_choices):
msg = ' Invalid selection. Please try again.'
sd_command.write_output(msg)
continue
return selection |
class ForumProfileForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if (not machina_settings.PROFILE_AVATARS_ENABLED):
del self.fields['avatar']
class Meta():
model = ForumProfile
fields = ['avatar', 'signature'] |
class _DragableTabBar(QtGui.QTabBar):
def __init__(self, root, parent):
QtGui.QTabBar.__init__(self, parent)
if (sys.platform == 'darwin'):
self.setDocumentMode(True)
self._root = root
self._drag_state = None
te = _IndependentLineEdit('', self)
te.hide()
te.editingFinished.connect(te.hide)
te.returnPressed.connect(self._setCurrentTabText)
self._title_edit = te
def resizeEvent(self, e):
if self._title_edit.isVisible():
self._resize_title_edit_to_current_tab()
QtGui.QTabBar.resizeEvent(self, e)
def keyPressEvent(self, e):
if (e.key() == QtCore.Qt.Key.Key_Left):
self._root._move_left(self.parent(), self.currentIndex())
elif (e.key() == QtCore.Qt.Key.Key_Right):
self._root._move_right(self.parent(), self.currentIndex())
else:
e.ignore()
def mouseDoubleClickEvent(self, e):
self._resize_title_edit_to_current_tab()
te = self._title_edit
te.setText(self.tabText(self.currentIndex())[1:])
te.setFocus()
te.selectAll()
te.show()
def mousePressEvent(self, e):
self._root._repeat_focus_changes = False
QtGui.QTabBar.mousePressEvent(self, e)
self._root._repeat_focus_changes = True
self._root._set_current_tab(self.parent(), self.currentIndex())
self._root._set_focus()
if (e.button() != QtCore.Qt.MouseButton.LeftButton):
return
if (self._drag_state is not None):
return
tab = self._tab_at(e.pos())
if ((tab < 0) or (tab != self.currentIndex())):
return
self._drag_state = _DragState(self._root, self, tab, e.pos())
def mouseMoveEvent(self, e):
QtGui.QTabBar.mouseMoveEvent(self, e)
if (self._drag_state is None):
return
if self._drag_state.dragging:
self._drag_state.drag(e.pos())
else:
self._drag_state.start_dragging(e.pos())
if self._drag_state.dragging:
QtGui.QApplication.setOverrideCursor(QtCore.Qt.CursorShape.OpenHandCursor)
def mouseReleaseEvent(self, e):
QtGui.QTabBar.mouseReleaseEvent(self, e)
if (e.button() != QtCore.Qt.MouseButton.LeftButton):
if (e.button() == QtCore.Qt.MouseButton.MiddleButton):
self.tabCloseRequested.emit(self.tabAt(e.pos()))
return
if ((self._drag_state is not None) and self._drag_state.dragging):
QtGui.QApplication.restoreOverrideCursor()
self._drag_state.drop(e.pos())
self._drag_state = None
def _tab_at(self, pos):
for i in range(self.count()):
if self.tabRect(i).contains(pos):
return i
return (- 1)
def _setCurrentTabText(self):
idx = self.currentIndex()
text = self._title_edit.text()
self.setTabText(idx, ('' + text))
self._root.tabTextChanged.emit(self.parent().widget(idx), text)
def _resize_title_edit_to_current_tab(self):
idx = self.currentIndex()
tab = QtGui.QStyleOptionTabV3()
self.initStyleOption(tab, idx)
rect = self.style().subElementRect(QtGui.QStyle.SubElement.SE_TabBarTabText, tab)
self._title_edit.setGeometry(rect.adjusted(0, 8, 0, (- 8))) |
class TestReduce(unittest.TestCase):
def test(self):
pitch_motif = [80, 79, 77, 76, 77, None]
duration_motif = [1.5, (1 / 6), (1 / 6), (1 / 6), 1, 1]
out = reduce(pitch_motif, duration_motif, 1, 3, 'left')
expected = ([80, 77, None], [2, 1, 1])
self.assertEqual(out, expected) |
class Solution():
def widthOfBinaryTree(self, root: TreeNode) -> int:
def fill_levels(node, levels, ci, cl):
if (node is None):
return
if (len(levels) == cl):
levels.append([ci, ci])
else:
levels[cl][0] = min(ci, levels[cl][0])
levels[cl][1] = max(ci, levels[cl][1])
fill_levels(node.left, levels, ((ci * 2) - 1), (cl + 1))
fill_levels(node.right, levels, (ci * 2), (cl + 1))
levels = []
fill_levels(root, levels, 1, 0)
return max([((l[1] - l[0]) + 1) for l in levels]) |
class Return(Instruction):
def __init__(self, values, tags: Optional[Tuple[(Tag, ...)]]=None):
super().__init__(tags)
self._values = ListOperation(values)
def __repr__(self) -> str:
return f'return {repr(self._values)}'
def __str__(self):
return f'return {self._values}'
def __iter__(self) -> Iterator[Expression]:
(yield from self._values)
def complexity(self) -> int:
return self._values.complexity
def requirements_iter(self) -> Iterator[Variable]:
return self._values.requirements_iter
def values(self) -> ListOperation:
return self._values
def substitute(self, replacee: Expression, replacement: Expression) -> None:
self._values.substitute(replacee, replacement)
def copy(self) -> Return:
return Return(self._values.copy(), self.tags)
def accept(self, visitor: DataflowObjectVisitorInterface[T]) -> T:
return visitor.visit_return(self) |
def test_run_transition_pass_arguments_to_sub_transitions(state_machine, event_mock):
model = MyModel(state='draft')
machine = state_machine(model)
machine.send('produce', param1='value1', param2='value2')
assert (model.state == 'producing')
event_mock.on_enter_producing.assert_called_with(param1='value1', param2='value2')
event_mock.on_exit_draft.assert_called_with(param1='value1', param2='value2')
machine.send('deliver', param3='value3')
event_mock.on_enter_closed.assert_called_with()
event_mock.on_exit_producing.assert_called_with() |
class TempStoreClient(DjangoClient):
def __init__(self, **inline) -> None:
inline.setdefault('transport_class', 'tests.fixtures.DummyTransport')
super(TempStoreClient, self).__init__(**inline)
def events(self):
return self._transport.events
def spans_for_transaction(self, transaction):
return [span for span in self.events[SPAN] if (span['transaction_id'] == transaction['id'])] |
class TestOFPEchoReply(unittest.TestCase):
version = ofproto.OFP_VERSION
msg_type = ofproto.OFPT_ECHO_REPLY
msg_len = ofproto.OFP_HEADER_SIZE
xid =
def test_init(self):
c = OFPEchoReply(_Datapath)
eq_(c.data, None)
def _test_parser(self, data):
fmt = ofproto.OFP_HEADER_PACK_STR
buf = pack(fmt, self.version, self.msg_type, self.msg_len, self.xid)
if (data is not None):
buf += data
res = OFPEchoReply.parser(object, self.version, self.msg_type, self.msg_len, self.xid, buf)
eq_(res.version, self.version)
eq_(res.msg_type, self.msg_type)
eq_(res.msg_len, self.msg_len)
eq_(res.xid, self.xid)
if (data is not None):
eq_(res.data, data)
def test_parser_mid(self):
data = b'Reply Message.'
self._test_parser(data)
def test_parser_max(self):
data = b'Reply Message.'.ljust(65527)
self._test_parser(data)
def test_parser_min(self):
data = None
self._test_parser(data)
def _test_serialize(self, data):
fmt = ofproto.OFP_HEADER_PACK_STR
buf = (pack(fmt, self.version, self.msg_type, self.msg_len, self.xid) + data)
c = OFPEchoReply(_Datapath)
c.data = data
c.serialize()
eq_(ofproto.OFP_VERSION, c.version)
eq_(ofproto.OFPT_ECHO_REPLY, c.msg_type)
eq_(0, c.xid)
fmt = ((('!' + ofproto.OFP_HEADER_PACK_STR.replace('!', '')) + str(len(c.data))) + 's')
res = struct.unpack(fmt, six.binary_type(c.buf))
eq_(res[0], ofproto.OFP_VERSION)
eq_(res[1], ofproto.OFPT_ECHO_REPLY)
eq_(res[2], len(buf))
eq_(res[3], 0)
eq_(res[4], data)
def test_serialize_mid(self):
data = b'Reply Message.'
self._test_serialize(data)
def test_serialize_max(self):
data = b'Reply Message.'.ljust(65527)
self._test_serialize(data)
(AssertionError)
def test_serialize_check_data(self):
c = OFPEchoReply(_Datapath)
c.serialize() |
class OefSearchHandler(Handler):
SUPPORTED_PROTOCOL = OefSearchMessage.protocol_id
def setup(self) -> None:
def handle(self, message: Message) -> None:
oef_search_msg = cast(OefSearchMessage, message)
oef_search_dialogues = cast(OefSearchDialogues, self.context.oef_search_dialogues)
oef_search_dialogue = cast(Optional[OefSearchDialogue], oef_search_dialogues.update(oef_search_msg))
if (oef_search_dialogue is None):
self._handle_unidentified_dialogue(oef_search_msg)
return
if (oef_search_msg.performative == OefSearchMessage.Performative.SUCCESS):
self._handle_success(oef_search_msg, oef_search_dialogue)
elif (oef_search_msg.performative == OefSearchMessage.Performative.OEF_ERROR):
self._handle_error(oef_search_msg, oef_search_dialogue)
else:
self._handle_invalid(oef_search_msg, oef_search_dialogue)
def teardown(self) -> None:
def _handle_unidentified_dialogue(self, oef_search_msg: OefSearchMessage) -> None:
self.context.logger.info('received invalid oef_search message={}, unidentified dialogue.'.format(oef_search_msg))
def _handle_success(self, oef_search_success_msg: OefSearchMessage, oef_search_dialogue: OefSearchDialogue) -> None:
self.context.logger.info('received oef_search success message={} in dialogue={}.'.format(oef_search_success_msg, oef_search_dialogue))
target_message = cast(OefSearchMessage, oef_search_dialogue.get_message_by_id(oef_search_success_msg.target))
if (target_message.performative == OefSearchMessage.Performative.REGISTER_SERVICE):
description = target_message.service_description
data_model_name = description.data_model.name
registration_behaviour = cast(ServiceRegistrationBehaviour, self.context.behaviours.service_registration)
if ('location_agent' in data_model_name):
registration_behaviour.register_service()
elif ('set_service_key' in data_model_name):
registration_behaviour.register_genus()
elif (('personality_agent' in data_model_name) and (description.values['piece'] == 'genus')):
registration_behaviour.register_classification()
elif (('personality_agent' in data_model_name) and (description.values['piece'] == 'classification')):
registration_behaviour.is_registered = True
registration_behaviour.registration_in_progress = False
self.context.logger.info('the agent, with its genus and classification, and its service are successfully registered on the SOEF.')
else:
self.context.logger.warning(f'received soef SUCCESS message as a reply to the following unexpected message: {target_message}')
def _handle_error(self, oef_search_error_msg: OefSearchMessage, oef_search_dialogue: OefSearchDialogue) -> None:
self.context.logger.info('received oef_search error message={} in dialogue={}.'.format(oef_search_error_msg, oef_search_dialogue))
target_message = cast(OefSearchMessage, oef_search_dialogue.get_message_by_id(oef_search_error_msg.target))
if (target_message.performative == OefSearchMessage.Performative.REGISTER_SERVICE):
registration_behaviour = cast(ServiceRegistrationBehaviour, self.context.behaviours.service_registration)
registration_behaviour.failed_registration_msg = target_message
def _handle_invalid(self, oef_search_msg: OefSearchMessage, oef_search_dialogue: OefSearchDialogue) -> None:
self.context.logger.warning('cannot handle oef_search message of performative={} in dialogue={}.'.format(oef_search_msg.performative, oef_search_dialogue)) |
def monitor_hoop():
global ready
global shoot_x
input('')
start = time.time()
while True:
cycle = 6.25
delta = (time.time() - start)
perc1 = ((delta % cycle) / cycle)
dir = ('r' if (perc1 < 0.5) else 'l')
perc2 = (perc1 + 0.24)
if ((dir == 'l') and (perc2 > 1)):
perc2 = (perc2 - 1)
if (perc2 > 0.5):
perc2 = (0.5 - (perc2 - 0.5))
shoot_x = (min_x + ((perc2 * 2) * 250))
ready = True
print(shoot_x)
time.sleep(0.005) |
class ArgumentToStringTransformer(Transformer):
def arg(self, rule: List[FileContextToken]) -> FileContextToken:
return rule[0]
def argument_value(self, rule: List[FileContextToken]) -> FileContextToken:
return FileContextToken.join_tokens(rule, separator='')
def forward_model_arguments(self, kw_list) -> List[Tuple[(FileContextToken, FileContextToken)]]:
args = []
for kw_pair in kw_list:
if (kw_pair is not None):
(key, val) = kw_pair.children
args.append((key, val))
return args |
class LiteDRAMController(Module):
def __init__(self, phy_settings, geom_settings, timing_settings, clk_freq, controller_settings=ControllerSettings()):
if (phy_settings.memtype == 'SDR'):
burst_length = phy_settings.nphases
else:
burst_length = burst_lengths[phy_settings.memtype]
address_align = log2_int(burst_length)
self.settings = controller_settings
self.settings.phy = phy_settings
self.settings.geom = geom_settings
self.settings.timing = timing_settings
nranks = phy_settings.nranks
nbanks = (2 ** geom_settings.bankbits)
self.interface = interface = LiteDRAMInterface(address_align, self.settings)
self.dfi = dfi.Interface(addressbits=geom_settings.addressbits, bankbits=geom_settings.bankbits, nranks=phy_settings.nranks, databits=phy_settings.dfi_databits, nphases=phy_settings.nphases)
self.submodules.refresher = self.settings.refresh_cls(self.settings, clk_freq=clk_freq, zqcs_freq=self.settings.refresh_zqcs_freq, postponing=self.settings.refresh_postponing)
bank_machines = []
for n in range((nranks * nbanks)):
bank_machine = BankMachine(n, address_width=interface.address_width, address_align=address_align, nranks=nranks, settings=self.settings)
bank_machines.append(bank_machine)
self.submodules += bank_machine
self.comb += getattr(interface, ('bank' + str(n))).connect(bank_machine.req)
self.submodules.multiplexer = Multiplexer(settings=self.settings, bank_machines=bank_machines, refresher=self.refresher, dfi=self.dfi, interface=interface)
def get_csrs(self):
return self.multiplexer.get_csrs() |
class StorageMix(Mix):
battery: (float | None) = None
hydro: (float | None) = None
def __setattr__(self, name: str, value: (float | None)) -> None:
if (name not in STORAGE_MODES):
raise AttributeError(f'Unknown storage mode: {name}')
return super().__setattr__(name, value)
def merge(cls, storage_mixes: list['StorageMix']) -> 'StorageMix':
merged_storage_mix = cls()
for storage_mix_to_merge in storage_mixes:
for mode in set(STORAGE_MODES).intersection(storage_mix_to_merge.__fields_set__):
value = getattr(storage_mix_to_merge, mode)
merged_storage_mix.add_value(mode, value)
return merged_storage_mix |
class VmScreenshot(APIView):
def __init__(self, request, hostname_or_uuid, data):
super(VmScreenshot, self).__init__(request)
self.hostname_or_uuid = hostname_or_uuid
self.data = data
self.vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True)
def get(self):
vm = self.vm
if (not vm.is_hvm()):
raise OperationNotSupported
result = {'image': vm.screenshot}
if result['image']:
return SuccessTaskResponse(self.request, result, vm=vm)
else:
return FailureTaskResponse(self.request, result, vm=vm)
def post(self):
(request, vm) = (self.request, self.vm)
if (not self.vm.is_hvm()):
raise OperationNotSupported
if (vm.status not in (vm.RUNNING, vm.STOPPING)):
raise VmIsNotOperational
apiview = {'view': 'vm_screenshot', 'method': request.method, 'hostname': vm.hostname}
cmd = ('vmadm sysrq %s nmi >&2 && sleep 0.5 && vmadm sysrq %s screenshot >&2 && cat /%s/%s/root/tmp/vm.ppm' % (vm.uuid, vm.uuid, vm.zpool, vm.uuid))
lock = ('vm_screenshot vm:%s' % vm.uuid)
meta = {'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'image'}, 'replace_stderr': ((vm.uuid, vm.hostname),), 'encode_stdout': True, 'compress_stdout': True, 'apiview': apiview}
callback = ('api.vm.other.tasks.vm_screenshot_cb', {'vm_uuid': vm.uuid})
(tid, err) = execute(request, vm.owner.id, cmd, meta=meta, lock=lock, callback=callback, queue=vm.node.fast_queue)
if err:
return FailureTaskResponse(request, err, vm=vm)
else:
return TaskResponse(request, tid, vm=vm, api_view=apiview, data=self.data) |
def build_post_process(config, global_config=None):
support_dict = ['DBPostProcess', 'EASTPostProcess', 'SASTPostProcess', 'CTCLabelDecode', 'AttnLabelDecode', 'ClsPostProcess', 'SRNLabelDecode', 'PGPostProcess', 'DistillationCTCLabelDecode', 'TableLabelDecode', 'DistillationDBPostProcess', 'NRTRLabelDecode', 'SARLabelDecode', 'SEEDLabelDecode']
if (config['name'] == 'PSEPostProcess'):
from .pse_postprocess import PSEPostProcess
support_dict.append('PSEPostProcess')
config = copy.deepcopy(config)
module_name = config.pop('name')
if (module_name == 'None'):
return
if (global_config is not None):
config.update(global_config)
assert (module_name in support_dict), Exception('post process only support {}'.format(support_dict))
module_class = eval(module_name)(**config)
return module_class |
def get_template(update: 'Update', use_template: str='fedora_errata_template') -> list:
from bodhi.server.models import UpdateStatus, UpdateType
use_template = read_template(use_template)
line = (str(('-' * 80)) + '\n')
templates = []
for build in update.builds:
h = get_rpm_header(build.nvr)
info = {}
info['date'] = str(update.date_pushed)
info['name'] = h['name']
info['summary'] = h['summary']
info['version'] = h['version']
info['release'] = h['release']
info['url'] = h['url']
if (update.status is UpdateStatus.testing):
info['testing'] = ' Test'
info['yum_repository'] = ' --enablerepo=updates-testing'
else:
info['testing'] = ''
info['yum_repository'] = ''
info['subject'] = ('%s%s%s Update: %s' % ((((update.type is UpdateType.security) and '[SECURITY] ') or ''), update.release.long_name, info['testing'], build.nvr))
info['updateid'] = update.alias
info['description'] = h['description']
info['product'] = update.release.long_name
info['notes'] = ''
if (update.notes and len(update.notes)):
plaintext = markdown_to_text(update.notes)
info['notes'] = f'''Update Information:
{wrap_text(plaintext)}
'''
info['notes'] += line
i = 1
info['references'] = ''
if update.bugs:
info['references'] = 'References:\n\n'
parent = (True in [bug.parent for bug in update.bugs])
for bug in update.bugs:
if (update.type is UpdateType.security):
if (parent and (not bug.parent)):
log.debug(('Skipping tracker bug %s' % bug))
continue
title = ((((bug.title != 'Unable to fetch title') and (bug.title != 'Invalid bug number')) and (' - %s' % bug.title)) or '')
info['references'] += (' [ %d ] Bug #%d%s\n %s\n' % (i, bug.bug_id, title, bug.url))
i += 1
info['references'] += line
info['changelog'] = ''
changelog = build.get_changelog(lastupdate=True)
if (changelog is not None):
info['changelog'] = ('ChangeLog:\n\n%s%s' % (changelog, line))
templates.append((info['subject'], (use_template % info)))
return templates |
def thread_gps():
print('GPS: thread starting ..')
rospy.Subscriber('uav_pos', Odometry, rover.ros_gps_callback)
rate = rospy.Rate(10)
freq = 10.0
t = datetime.datetime.now()
t_pre = datetime.datetime.now()
avg_number = 10
while ((not rospy.is_shutdown()) and rover.on):
t = datetime.datetime.now()
dt = (t - t_pre).total_seconds()
if (dt < 0.001):
continue
freq = (((freq * (avg_number - 1)) + (1 / dt)) / avg_number)
t_pre = t
rover.freq_gps = freq
rate.sleep()
print('GPS: thread closed!') |
class TestTimeline():
def test_global_round_num(self) -> None:
tl = Timeline(epoch=1, round=1)
assertEqual(tl.global_round_num(), 1)
tl = Timeline(epoch=1, round=1, rounds_per_epoch=100)
assertEqual(tl.global_round_num(), 1)
tl = Timeline(epoch=2, round=3, rounds_per_epoch=4)
assertEqual(tl.global_round_num(), 7)
tl = Timeline(epoch=1, round=1, rounds_per_epoch=1, global_round=123)
assertEqual(tl.global_round_num(), tl.global_round)
def test_as_float(self) -> None:
tl = Timeline(epoch=1, round=1)
assertAlmostEqual(tl.as_float(), 1.0)
tl = Timeline(epoch=1, round=1, rounds_per_epoch=100)
assertAlmostEqual(tl.as_float(), (1 / 100))
tl = Timeline(epoch=10, round=1)
assertAlmostEqual(tl.as_float(), 10.0)
tl = Timeline(epoch=10, round=1, rounds_per_epoch=100)
assertAlmostEqual(tl.as_float(), (9 + (1 / 100)))
tl = Timeline(epoch=2, round=1, rounds_per_epoch=100)
assertAlmostEqual(tl.as_float((- 1)), 1.0)
tl = Timeline(global_round=12, rounds_per_epoch=10)
assertAlmostEqual(tl.as_float(), 1.2)
assertAlmostEqual(tl.as_float(1), 1.3)
tl = Timeline(global_round=12, rounds_per_epoch=10, epoch=3, round=4)
assertAlmostEqual(tl.as_float(), 1.2)
tl = Timeline(epoch=5, round=2, rounds_per_epoch=3)
assertAlmostEqual(tl.as_float(), 4.66, delta=0.01)
def test_string(self) -> None:
tl = Timeline(epoch=2, round=2, rounds_per_epoch=10)
assertEqual(f'{tl}', '(epoch = 2, round = 2, global round = 12)')
tl = Timeline(global_round=12, rounds_per_epoch=10)
assertEqual(f'{tl}', '(epoch = 2, round = 2, global round = 12)')
tl = Timeline(global_round=10, rounds_per_epoch=10)
assertEqual(f'{tl}', '(epoch = 1, round = 10, global round = 10)')
def test_tick_simple(self) -> None:
tl = Timeline(epoch=1, round=1, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=1))
tl = Timeline(epoch=1, round=10, rounds_per_epoch=10)
assertTrue(tl.tick(tick_interval=1))
tl = Timeline(epoch=2, round=1, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=1))
tl = Timeline(epoch=2, round=10, rounds_per_epoch=10)
assertTrue(tl.tick(tick_interval=1))
tl = Timeline(epoch=1, round=1, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=2))
tl = Timeline(epoch=1, round=10, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=2))
tl = Timeline(epoch=2, round=1, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=2))
tl = Timeline(epoch=2, round=10, rounds_per_epoch=10)
assertTrue(tl.tick(tick_interval=2))
tl = Timeline(epoch=1, round=1, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=0.2))
tl = Timeline(epoch=1, round=2, rounds_per_epoch=10)
assertTrue(tl.tick(tick_interval=0.2))
tl = Timeline(epoch=2, round=1, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=0.2))
tl = Timeline(epoch=1, round=2, rounds_per_epoch=10)
assertTrue(tl.tick(tick_interval=0.2))
tl = Timeline(epoch=1, round=3, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=0.2))
tl = Timeline(epoch=1, round=4, rounds_per_epoch=10)
assertTrue(tl.tick(tick_interval=0.2))
tl = Timeline(global_round=4, rounds_per_epoch=2)
assertFalse(tl.tick(tick_interval=3))
assertTrue(tl.tick(tick_interval=2))
def test_tick_complex(self) -> None:
sum = 0
for e in range(1, 101):
for r in range(1, 11):
tl = Timeline(epoch=e, round=r, rounds_per_epoch=10)
sum += tl.tick(10)
assertEqual(sum, 10)
sum = 0
for e in range(1, 101):
for r in range(1, 11):
tl = Timeline(epoch=e, round=r, rounds_per_epoch=10)
sum += tl.tick(0.9)
assertEqual(sum, 111)
def test_progress_fraction(self) -> None:
tl = Timeline(epoch=1, round=1, rounds_per_epoch=1, total_epochs=10)
assertTrue((tl.progress_fraction() == 0.1), f'Expected progress fraction to be 0.1, but got {tl.progress_fraction()}')
tl = Timeline(epoch=6, round=5, rounds_per_epoch=10, total_epochs=10)
assertTrue((tl.progress_fraction() == 0.55), f'Expected progress fraction to be 0.55, but got {tl.progress_fraction()}')
tl = Timeline(epoch=5, round=5, rounds_per_epoch=5, total_epochs=5)
assertTrue((tl.progress_fraction() == 1.0), f'Expected progress fraction to be 1.0, but got {tl.progress_fraction()}')
tl = Timeline(epoch=1, round=5, rounds_per_epoch=100, total_epochs=0.5)
assertTrue((tl.progress_fraction() == 0.1), f'Expected progress fraction to be 0.1, but got {tl.progress_fraction()}')
tl = Timeline(epoch=1, round=5, rounds_per_epoch=100)
with pytest.raises(Exception):
tl.progress_fraction() |
def get_role_arn(role_name, env, region):
session = boto3.Session(profile_name=env, region_name=region)
iam_client = session.client('iam')
LOG.debug('Searching for %s.', role_name)
role = iam_client.get_role(RoleName=role_name)
role_arn = role['Role']['Arn']
LOG.debug("Found role's %s ARN %s", role_name, role_arn)
return role_arn |
class TestIndexListPeriodFilterName(TestCase):
def builder(self, key='2'):
self.client = Mock()
self.client.info.return_value = get_es_ver()
self.client.cat.indices.return_value = get_testvals(key, 'state')
self.client.indices.get_settings.return_value = get_testvals(key, 'settings')
self.client.indices.stats.return_value = get_testvals(key, 'stats')
self.client.indices.exists_alias.return_value = False
self.ilo = IndexList(self.client)
self.timestring = '%Y.%m.%d'
self.epoch =
self.unit = 'days'
def test_get_name_based_age_in_range(self):
range_from = (- 1)
range_to = 0
expected = ['index-2016.03.03']
self.builder()
self.ilo.filter_period(unit=self.unit, range_from=range_from, range_to=range_to, source='name', timestring=self.timestring, epoch=self.epoch)
self.assertEqual(expected, self.ilo.indices)
def test_get_name_based_age_not_in_range(self):
range_from = (- 3)
range_to = (- 2)
expected = []
self.builder()
self.ilo.filter_period(unit=self.unit, range_from=range_from, range_to=range_to, source='name', timestring=self.timestring, epoch=self.epoch)
self.assertEqual(expected, self.ilo.indices)
def test_bad_arguments(self):
range_from = (- 2)
range_to = (- 3)
self.builder()
self.assertRaises(FailedExecution, self.ilo.filter_period, unit=self.unit, range_from=range_from, range_to=range_to, source='name', timestring=self.timestring, epoch=self.epoch)
def test_missing_creation_date_raises(self):
range_from = (- 1)
range_to = 0
expected = []
self.builder()
self.ilo.get_index_state()
self.ilo.get_index_settings()
self.ilo.index_info['index-2016.03.03']['age'].pop('creation_date')
self.ilo.index_info['index-2016.03.04']['age'].pop('creation_date')
self.ilo.filter_period(unit=self.unit, range_from=range_from, range_to=range_to, source='creation_date', epoch=self.epoch)
self.assertEqual(expected, self.ilo.indices)
def test_non_integer_range_value(self):
self.builder()
self.assertRaises(ConfigurationError, self.ilo.filter_period, range_from='invalid') |
class StackLine(Base):
__tablename__ = '{}StackLine'.format(config.table_prefix)
request_id = Column(Integer, ForeignKey(Request.id), primary_key=True)
request = relationship(Request, backref='stack_lines')
code_id = Column(Integer, ForeignKey(CodeLine.id))
code = relationship(CodeLine)
position = Column(Integer, primary_key=True)
indent = Column(Integer, nullable=False)
duration = Column(Float, nullable=False) |
def _match_name(name, patterns):
if (not patterns):
return True
for pattern in patterns:
neg = False
if (pattern[:1] == '!'):
pattern = pattern[1:]
neg = True
result = fnmatch.fnmatch(name, (('*' + pattern) + '*'))
if neg:
if result:
return False
elif (not result):
return False
return True |
def generate(fields: Dict[(str, FieldEntry)], out_dir: str, default_dirs: bool) -> Tuple[(Dict[(str, FieldNestedEntry)], Dict[(str, Field)])]:
ecs_helpers.make_dirs(join(out_dir))
if default_dirs:
ecs_helpers.yaml_dump(join(out_dir, 'ecs.yml'), fields)
flat: Dict[(str, Field)] = generate_flat_fields(fields)
nested: Dict[(str, FieldNestedEntry)] = generate_nested_fields(fields)
ecs_helpers.yaml_dump(join(out_dir, 'ecs_flat.yml'), flat)
ecs_helpers.yaml_dump(join(out_dir, 'ecs_nested.yml'), nested)
return (nested, flat) |
class TestVideoModel(unittest.TestCase):
def test_transformers_backbone(self):
import torch
from video_transformers import VideoModel
config = {'backbone': {'name': 'TransformersBackbone', 'framework': {'name': 'transformers', 'version': '4.21.1'}, 'mean': [0.485, 0.456, 0.406], 'model_name': 'microsoft/cvt-13', 'num_features': 384, 'num_total_params': , 'num_trainable_params': , 'std': [0.229, 0.224, 0.225], 'type': '2d_backbone'}, 'head': {'name': 'LinearHead', 'dropout_p': 0.0, 'hidden_size': 384, 'num_classes': 6}, 'neck': {'name': 'TransformerNeck', 'dropout_p': 0.1, 'num_features': 384, 'num_timesteps': 8, 'transformer_enc_act': 'gelu', 'transformer_enc_num_heads': 4, 'transformer_enc_num_layers': 2, 'return_mean': True}, 'preprocessor': {'means': [0.485, 0.456, 0.406], 'stds': [0.229, 0.224, 0.225], 'min_short_side': 256, 'input_size': 224, 'num_timesteps': 8}, 'labels': ['BodyWeightSquats', 'JumpRope', 'Lunges', 'PullUps', 'PushUps', 'WallPushups'], 'task': 'single_label_classification'}
batch_size = 2
model = VideoModel.from_config(config)
input = torch.randn(batch_size, 3, config['preprocessor']['num_timesteps'], 224, 224)
output = model(input)
self.assertEqual(output.shape, (batch_size, model.head.num_classes)) |
class ScoringResult():
def __init__(self, prompt, continuations: List[str], num_value_tokens: List[int], seqs: List[dc.seq], model_identifier: str):
self.seqs = [s.expand() for s in seqs]
self.prompt = prompt
self.continuations = continuations
self.num_value_tokens = num_value_tokens
self.model_identifier = model_identifier
def full_token_scores(self):
return [s.logprobs for s in self.seqs]
def token_scores(self):
return [s.logprobs[(- self.num_value_tokens[i]):] for (i, s) in enumerate(self.seqs)]
def scores(self, agg='sum', **kwargs):
if (agg == 'sum'):
return np.array([s.logprobs.sum() for s in self.seqs])
elif (agg == 'mean'):
return np.array([s.logprobs.mean() for s in self.seqs])
elif agg.startswith('normalized'):
alpha = kwargs.get('alpha', 0.7)
seq_lens = np.array([len(s) for s in self.seqs])
sum_scores = np.array([s.logprobs.sum() for s in self.seqs])
return (sum_scores / (seq_lens ** alpha))
else:
raise ValueError('invalid aggregation: {}'.format(agg))
def logprobs(self, agg='sum'):
normalized = nputil.log_softmax(self.scores(agg))
return normalized
def probs(self, agg='sum'):
return np.exp(self.logprobs(agg))
def argmax(self, agg='sum') -> str:
return self.continuations[self.scores(agg=agg).argmax()]
def __str__(self):
return ("lmql.ScoringResult(model='{}')\n".format(self.model_identifier) + '\n'.join([f'-{str([c])[1:(- 1)]}: {score}' for (c, score) in zip(self.continuations, self.scores(agg='sum'))])) |
class MockRedis(Database):
def __init__(self):
self.database = {}
def set(self, key: str, value: str):
self.database[key] = value
def delete(self, key: str):
if (key not in self.database):
raise exc.SlugDoesNotExist('Model with this slug does not exist')
self.database.pop(key, None)
def get(self, key: str):
if (key not in self.database):
raise exc.SlugDoesNotExist('Model with this slug does not exist')
res = self.database[key]
return res |
_default
class Message():
thread = attr.ib()
id = attr.ib(converter=str, type=str)
def session(self):
return self.thread.session
def _delete_many(session, message_ids):
data = {}
for (i, id_) in enumerate(message_ids):
data['message_ids[{}]'.format(i)] = id_
j = session._payload_post('/ajax/mercury/delete_messages.php?dpr=1', data)
def delete(self):
self._delete_many(self.session, [self.id])
def unsend(self):
data = {'message_id': self.id}
j = self.session._payload_post('/messaging/unsend_message/?dpr=1', data)
def react(self, reaction: Optional[str]):
if (reaction and (reaction not in SENDABLE_REACTIONS)):
raise ValueError('Invalid reaction! Please use one of: {}'.format(SENDABLE_REACTIONS))
data = {'action': ('ADD_REACTION' if reaction else 'REMOVE_REACTION'), 'client_mutation_id': '1', 'actor_id': self.session.user.id, 'message_id': self.id, 'reaction': reaction}
data = {'doc_id': , 'variables': _util.json_minimal({'data': data})}
j = self.session._payload_post('/webgraphql/mutation', data)
_exception.handle_graphql_errors(j)
def fetch(self) -> 'MessageData':
message_info = self.thread._forced_fetch(self.id).get('message')
return MessageData._from_graphql(self.thread, message_info)
def format_mentions(text, *args, **kwargs):
result = ''
mentions = list()
offset = 0
f = Formatter()
field_names = [field_name[1] for field_name in f.parse(text)]
automatic = ('' in field_names)
i = 0
for (literal_text, field_name, format_spec, conversion) in f.parse(text):
offset += len(literal_text)
result += literal_text
if (field_name is None):
continue
if (field_name == ''):
field_name = str(i)
i += 1
elif (automatic and field_name.isdigit()):
raise ValueError('cannot switch from automatic field numbering to manual field specification')
(thread_id, name) = f.get_field(field_name, args, kwargs)[0]
if format_spec:
name = f.format_field(name, format_spec)
if conversion:
name = f.convert_field(name, conversion)
result += name
mentions.append(Mention(thread_id=thread_id, offset=offset, length=len(name)))
offset += len(name)
return (result, mentions) |
.param_file((FIXTURE_PATH / 'directive_options.md'))
def test_directive_options(file_params, sphinx_doctree_no_tr: CreateDoctree):
sphinx_doctree_no_tr.set_conf({'extensions': ['myst_parser']})
result = sphinx_doctree_no_tr(file_params.content, 'index.md')
file_params.assert_expected(result.pformat('index'), rstrip_lines=True) |
def test_validate_gas_limit_too_high(noproof_consensus_chain):
block1 = noproof_consensus_chain.mine_block()
block2 = noproof_consensus_chain.mine_block()
exclusive_increase_limit = (block1.header.gas_limit // constants.GAS_LIMIT_ADJUSTMENT_FACTOR)
invalid_high_gas_limit = (block1.header.gas_limit + exclusive_increase_limit)
invalid_header = block2.header.copy(gas_limit=invalid_high_gas_limit)
vm = noproof_consensus_chain.get_vm(block2.header)
with pytest.raises(ValidationError, match='[Gg]as limit'):
vm.validate_header(invalid_header, block1.header) |
class ABIRegistry(Copyable, BaseRegistry):
def __init__(self):
self._encoders = PredicateMapping('encoder registry')
self._decoders = PredicateMapping('decoder registry')
def _get_registration(self, mapping, type_str):
coder = super()._get_registration(mapping, type_str)
if (isinstance(coder, type) and issubclass(coder, BaseCoder)):
return coder.from_type_str(type_str, self)
return coder
_clear_encoder_cache
def register_encoder(self, lookup: Lookup, encoder: Encoder, label: str=None) -> None:
self._register(self._encoders, lookup, encoder, label=label)
_clear_encoder_cache
def unregister_encoder(self, lookup_or_label: Lookup) -> None:
self._unregister(self._encoders, lookup_or_label)
_clear_decoder_cache
def register_decoder(self, lookup: Lookup, decoder: Decoder, label: str=None) -> None:
self._register(self._decoders, lookup, decoder, label=label)
_clear_decoder_cache
def unregister_decoder(self, lookup_or_label: Lookup) -> None:
self._unregister(self._decoders, lookup_or_label)
def register(self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str=None) -> None:
self.register_encoder(lookup, encoder, label=label)
self.register_decoder(lookup, decoder, label=label)
def unregister(self, label: str) -> None:
self.unregister_encoder(label)
self.unregister_decoder(label)
_cache(maxsize=None)
def get_encoder(self, type_str):
return self._get_registration(self._encoders, type_str)
def has_encoder(self, type_str: abi.TypeStr) -> bool:
try:
self.get_encoder(type_str)
except Exception as e:
if isinstance(e, MultipleEntriesFound):
raise e
return False
return True
_cache(maxsize=None)
def get_decoder(self, type_str, strict=True):
decoder = self._get_registration(self._decoders, type_str)
if (hasattr(decoder, 'is_dynamic') and decoder.is_dynamic):
decoder.strict = strict
return decoder
def copy(self):
cpy = type(self)()
cpy._encoders = copy.copy(self._encoders)
cpy._decoders = copy.copy(self._decoders)
return cpy |
def test_set_get_vector_graphics():
with Drawing() as ctx:
ctx.stroke_width = 7
xml = ctx.vector_graphics
assert (xml.index('<stroke-width>7</stroke-width>') > 0)
ctx.vector_graphics = '<wand><stroke-width>8</stroke-width></wand>'
xml = ctx.vector_graphics
assert (xml.index('<stroke-width>8</stroke-width>') > 0)
with raises(TypeError):
ctx.vector_graphics = |
def register_command(argparse_parser, inspection):
_command = inspection.command
subparsers = _resolve_subparsers(argparse_parser)
subparser = subparsers.add_parser(_command.name, aliases=_command.aliases, help=_command.help)
exclusive_args = (_command.exclusive_arguments or [])
mutually_exclusive_groups = defaultdict(subparser.add_mutually_exclusive_group)
for arg in inspection.arguments.values():
(add_argument_args, add_argument_kwargs) = _argument_to_argparse_input(arg)
groups = [group for group in exclusive_args if (arg.name in group)]
if (not groups):
subparser.add_argument(*add_argument_args, **add_argument_kwargs)
elif (len(groups) == 1):
me_group = mutually_exclusive_groups[groups[0]]
me_group.add_argument(*add_argument_args, **add_argument_kwargs)
elif (len(groups) > 1):
msg = 'Argument {} is present in more than one exclusive group: {}. This should not be allowed by the decorator'.format(arg.name, groups)
raise ValueError(msg)
if (len(inspection.subcommands) > 0):
subcommand_parsers = subparser.add_subparsers(dest='_subcmd', help=_command.help, parser_class=create_subparser_class(subparser), metavar='[subcommand]'.format(_command.name))
subcommand_parsers.required = True
for (_, v) in inspection.subcommands:
register_command(subcommand_parsers, v)
return subparser |
def get_distribution_for_column(*, column_type: ColumnType, column_name: str, current: SparkSeries, reference: Optional[SparkSeries]=None) -> Tuple[(Distribution, Optional[Distribution])]:
reference_distribution: Optional[Distribution] = None
if (column_type == ColumnType.Categorical):
current_distribution = get_distribution_for_category_column(current, column_name)
if (reference is not None):
reference_distribution = get_distribution_for_category_column(reference, column_name)
elif (column_type == ColumnType.Numerical):
if (reference is not None):
(bins, dmax, dmin) = hist_bin_doane(current.dropna(subset=[column_name]).union(reference.dropna(subset=[column_name])), column_name)
reference_distribution = get_distribution_for_numerical_column(reference, column_name, bins=bins, dmax=dmax, dmin=dmin)
else:
(bins, dmax, dmin) = hist_bin_doane(current.dropna(subset=[column_name]), column_name)
current_distribution = get_distribution_for_numerical_column(current, column_name, bins=bins, dmax=dmax, dmin=dmin)
else:
raise ValueError(f'Cannot get distribution for a column with type {column_type}')
return (current_distribution, reference_distribution) |
def entryname_to_dbname(n):
n = dict(levellist='levelist', level='levelist', leveltype='levtype', variable='param', parameter='param', realization='number', realisation='number', klass='class').get(n, n)
def add_mars(x):
return ('i_' + x)
def remove_first_underscore(x):
assert (x[0] == '_'), x
return x[1:]
if (n in FILEPARTS_KEY_NAMES):
return remove_first_underscore(n)
if (n in MORE_KEY_NAMES_WITH_UNDERSCORE):
return remove_first_underscore(n)
if (n in STATISTICS_KEY_NAMES):
return n
return add_mars(n) |
def serialize_block(block, transaction_serializer, is_pending):
serialized_transactions = tuple((transaction_serializer(transaction, block, transaction_index, is_pending=is_pending) for (transaction_index, transaction) in enumerate(block['transactions'])))
block_with_transactions = assoc(block, 'transactions', serialized_transactions)
block_with_withdrawals = assoc(block_with_transactions, 'withdrawals', block['withdrawals'])
return block_with_withdrawals |
def rich_format_error(self: click.ClickException, formatter: RichHelpFormatter) -> None:
console = formatter.console
config = formatter.config
if (getattr(self, 'ctx', None) is not None):
if TYPE_CHECKING:
assert hasattr(self, 'ctx')
self.ctx.get_usage()
if config.errors_suggestion:
console.print(Padding(config.errors_suggestion, (0, 1, 0, 1)), style=config.style_errors_suggestion)
elif ((config.errors_suggestion is None) and (getattr(self, 'ctx', None) is not None) and (self.ctx.command.get_help_option(self.ctx) is not None)):
cmd_path = self.ctx.command_path
help_option = self.ctx.help_option_names[0]
console.print(Padding(Columns((Text('Try'), Text(f"'{cmd_path} {help_option}'", style=config.style_errors_suggestion_command), Text('for help'))), (0, 1, 0, 1)), style=config.style_errors_suggestion)
if hasattr(self, 'message'):
console.print(Padding(Panel(formatter.highlighter(self.format_message()), border_style=config.style_errors_panel_border, title=config.errors_panel_title, title_align=config.align_errors_panel), (0, 0, 1, 0)))
if config.errors_epilogue:
console.print(Padding(config.errors_epilogue, (0, 1, 1, 1))) |
('cuda.bert_embeddings.func_decl')
def bert_embeddings_gen_function_decl(func_attrs: Dict[(str, Any)]) -> str:
dtype = python_int_dtype_to_c_dtype(func_attrs['inputs'][0]._attrs['dtype'])
return FUNC_DECL.render(func_signature=FUNC_SIGNATURE.render(func_name=func_attrs['name'], index_type=dtype).strip()) |
class TestFBNetV3MaskRCNNFP32(RCNNBaseTestCases.TemplateTestCase):
def setup_custom_test(self):
super().setup_custom_test()
self.cfg.merge_from_file('detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml')
def test_inference(self):
self._test_inference()
_parameterized_test_export([['_ops', True], ['torchscript', True], ['torchscript__ops', False], ['torchscript_int8', False]])
def test_export(self, predictor_type, compare_match):
_maybe_skip_test(self, predictor_type)
self._test_export(predictor_type, compare_match=compare_match) |
class Project(AbstractTableMeta, models.Model):
title = models.CharField(max_length=100)
desription = models.TextField(blank=True, default='')
url = models.CharField(max_length=255)
level = models.IntegerField(choices=((1, 'Level 1'), (2, 'Level 2')), default=1)
required = models.BooleanField(default=True)
def __str__(self):
return self.title |
def extractPhiarehereWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Protagonist Harem Delusion', 'There Will Always Be Protagonists With Delusions of Starting a Harem', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def test_adding_a_extra_volume_with_volume_mount():
config = '\nextraVolumes: |\n - name: extras\n emptyDir: {}\nextraVolumeMounts: |\n - name: extras\n mountPath: /usr/share/extras\n readOnly: true\n'
r = helm_template(config)
extraVolume = r['statefulset'][uname]['spec']['template']['spec']['volumes']
assert ({'name': 'extras', 'emptyDir': {}} in extraVolume)
extraVolumeMounts = r['statefulset'][uname]['spec']['template']['spec']['containers'][0]['volumeMounts']
assert ({'name': 'extras', 'mountPath': '/usr/share/extras', 'readOnly': True} in extraVolumeMounts) |
def decode_wire(frame):
results = []
x_scale = frame.sx
x_trans = frame.tx
channel = frame.channel
ttl = frame.to_ttl()
diff = (ttl[1:] - ttl[:(- 1)])
edges = np.nonzero(diff)[0]
edges_fall = np.where((diff == (- 1)))[0]
bit_pts = (edges_fall[1:] - edges_fall[:(- 1)]).min()
bit_half_pts = (bit_pts / 2)
value = 0
n = 0
for (i, start) in enumerate(edges):
if (diff[start] != (- 1)):
continue
try:
pts = (edges[(i + 1)] - start)
if (pts > bit_pts):
n = 0
continue
except IndexError:
break
n += 1
bit = int((pts < bit_half_pts))
value = ((value >> 1) | (bit << 7))
if (n == 1):
x_start = (x_trans + (x_scale * (1 + start)))
elif (n == 8):
n = 0
x_stop = (x_trans + (x_scale * ((1 + start) + bit_pts)))
msg = WIRE(channel, x_start, x_stop, value)
results.append(msg)
return results |
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'name'
fields = {'access_token': {'required': False, 'type': 'str', 'no_log': True}, 'enable_log': {'required': False, 'type': 'bool', 'default': False}, 'vdom': {'required': False, 'type': 'str', 'default': 'root'}, 'member_path': {'required': False, 'type': 'str'}, 'member_state': {'type': 'str', 'required': False, 'choices': ['present', 'absent']}, 'state': {'required': True, 'type': 'str', 'choices': ['present', 'absent']}, 'cifs_profile': {'required': False, 'type': 'dict', 'default': None, 'options': {}}}
for attribute_name in module_spec['options']:
fields['cifs_profile']['options'][attribute_name] = module_spec['options'][attribute_name]
if (mkeyname and (mkeyname == attribute_name)):
fields['cifs_profile']['options'][attribute_name]['required'] = True
module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
check_legacy_fortiosapi(module)
is_error = False
has_changed = False
result = None
diff = None
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if ('access_token' in module.params):
connection.set_option('access_token', module.params['access_token'])
if ('enable_log' in module.params):
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, 'cifs_profile')
(is_error, has_changed, result, diff) = fortios_cifs(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if (versions_check_result and (versions_check_result['matched'] is False)):
module.warn('Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv')
if (not is_error):
if (versions_check_result and (versions_check_result['matched'] is False)):
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result, diff=diff)
else:
module.exit_json(changed=has_changed, meta=result, diff=diff)
elif (versions_check_result and (versions_check_result['matched'] is False)):
module.fail_json(msg='Error in repo', version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg='Error in repo', meta=result) |
class PoliticalFaction(Base):
__tablename__ = 'political_faction'
faction_id = Column(Integer, primary_key=True)
country_id = Column(ForeignKey(Country.country_id), index=True)
faction_name = Column(String(80))
faction_id_in_game = Column(Integer, index=True)
faction_type_description_id = Column(ForeignKey(SharedDescription.description_id))
db_faction_type = relationship('SharedDescription')
country = relationship('Country', back_populates='political_factions')
historical_events = relationship('HistoricalEvent', back_populates='faction', cascade='all,delete,delete-orphan')
def type(self):
return self.db_faction_type.text
def rendered_name(self):
rendered = game_info.render_name(self.faction_name)
return rendered |
def terminate(pid):
if WINDOWS:
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, (- 1))
ctypes.windll.kernel32.CloseHandle(handle)
else:
os.kill(pid, signal.SIGTERM) |
class BaseRawItemized(db.Model):
__abstract__ = True
transaction_id = db.Column('tran_id', db.String)
image_number = db.Column('imageno', db.String, doc=docs.IMAGE_NUMBER)
entity_type = db.Column('entity', db.String)
amendment_indicator = db.Column('amend', db.String)
memo_code = db.Column(db.String)
memo_text = db.Column(db.String)
back_reference_transaction_id = db.Column('br_tran_id', db.String)
back_reference_schedule_name = db.Column('br_sname', db.String)
load_timestamp = db.Column('create_dt', db.DateTime)
_property
def report_type(self):
return self.filing.form_type
_property
def cycle(self):
return self.load_timestamp.year
_property
def memoed_subtotal(self):
return (self.memo_code == 'X')
_property
def fec_election_type_desc(self):
election_map = {'P': 'PRIMARY', 'G': 'GENERAL', 'O': 'OTHER'}
if self.pgo:
return election_map.get(str(self.pgo).upper()[0])
return None
def pdf_url(self):
return utils.make_schedule_pdf_url(str(self.image_number)) |
def build_ir(ofinputs_by_version):
classes = []
enums = []
for (wire_version, ofinputs) in ofinputs_by_version.items():
version = OFVersions.from_wire(wire_version)
ofprotocol = loxi_ir.build_protocol(version, ofinputs)
loxi_globals.ir[version] = ofprotocol
loxi_globals.unified = loxi_ir.build_unified_ir(loxi_globals.ir) |
(scope='session')
def project_dir_copy():
dbt_project_copy_dir = mkdtemp(prefix='integration_tests_project_')
try:
shutil.copytree(DBT_PROJECT_PATH, dbt_project_copy_dir, dirs_exist_ok=True, symlinks=True)
(yield dbt_project_copy_dir)
finally:
shutil.rmtree(dbt_project_copy_dir) |
class FidesConfig(FidesSettings):
test_mode: bool = Field(default=get_test_mode(), description='Whether or not the application is being run in test mode.', exclude=True)
hot_reloading: bool = Field(default=(getenv('FIDES__HOT_RELOAD', '').lower() == 'true'), description='Whether or not to enable hot reloading for the webserver.', exclude=True)
dev_mode: bool = Field(default=(getenv('FIDES__DEV_MODE', '').lower() == 'true'), description="Similar to 'test_mode', enables certain features when true.", exclude=True)
oauth_instance: Optional[str] = Field(default=getenv('FIDES__OAUTH_INSTANCE', None), description="A value that is prepended to the generated 'state' param in outbound OAuth2 authorization requests. Used during OAuth2 testing to associate callback responses back to this specific Fides instance.", exclude=True)
admin_ui: AdminUISettings
consent: ConsentSettings
cli: CLISettings
celery: Dict = Field(description='This section can be used to pass config vars to Celery directly.')
credentials: Dict = Field(description='This is a special section that is used to store arbitrary key/value pairs to be used as credentials.')
database: DatabaseSettings
execution: ExecutionSettings
logging: LoggingSettings
notifications: NotificationSettings
redis: RedisSettings
security: SecuritySettings
user: UserSettings
class Config():
case_sensitive = True
def customise_sources(cls, init_settings: SettingsSourceCallable, env_settings: SettingsSourceCallable, file_secret_settings: SettingsSourceCallable) -> Tuple[(SettingsSourceCallable, ...)]:
return (env_settings, init_settings, file_secret_settings)
def log_all_config_values(self) -> None:
for settings in [self.cli, self.user, self.logging, self.database, self.notifications, self.redis, self.security, self.execution, self.admin_ui]:
for (key, value) in settings.dict().items():
log.debug(f'Using config: {settings.Config.env_prefix}{key.upper()} = {value}') |
class OptionPlotoptionsItemSonificationDefaultinstrumentoptions(Options):
def activeWhen(self) -> 'OptionPlotoptionsItemSonificationDefaultinstrumentoptionsActivewhen':
return self._config_sub_data('activeWhen', OptionPlotoptionsItemSonificationDefaultinstrumentoptionsActivewhen)
def instrument(self):
return self._config_get('piano')
def instrument(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionPlotoptionsItemSonificationDefaultinstrumentoptionsMapping':
return self._config_sub_data('mapping', OptionPlotoptionsItemSonificationDefaultinstrumentoptionsMapping)
def midiName(self):
return self._config_get(None)
def midiName(self, text: str):
self._config(text, js_type=False)
def pointGrouping(self) -> 'OptionPlotoptionsItemSonificationDefaultinstrumentoptionsPointgrouping':
return self._config_sub_data('pointGrouping', OptionPlotoptionsItemSonificationDefaultinstrumentoptionsPointgrouping)
def roundToMusicalNotes(self):
return self._config_get(True)
def roundToMusicalNotes(self, flag: bool):
self._config(flag, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def type(self):
return self._config_get('instrument')
def type(self, text: str):
self._config(text, js_type=False) |
class ModelRepositoryGetter(BaseAction):
def __init__(self, model_id, config_json, force_from_github, force_from_s3, repo_path):
BaseAction.__init__(self, model_id=model_id, config_json=config_json, credentials_json=None)
self.token = self.cfg.HUB.TOKEN
self.github_down = GitHubDownloader(self.token)
self.s3_down = S3Downloader()
self.org = self.cfg.HUB.ORG
self.force_from_github = force_from_github
self.force_from_s3 = force_from_s3
self.repo_path = repo_path
def _dev_model_path(self):
pt = Paths()
path = pt.models_development_path()
if (path is not None):
path = os.path.join(path, self.model_id)
if pt.exists(path):
return path
else:
path = pt.ersilia_development_path()
if (path is not None):
path = os.path.join(path, 'test', 'models', self.model_id)
if pt.exists(path):
return path
return None
def _copy_from_local(src, dst):
shutil.copytree(src, dst)
def _copy_from_github(self, dst):
self.github_down.clone(org=self.org, repo=self.model_id, destination=dst)
def _copy_zip_from_s3(self, dst):
self.logger.debug('Downloading model from S3 in zipped format')
tmp_file = os.path.join(tempfile.mkdtemp('ersilia-'), 'model.zip')
self.s3_down.download_from_s3(bucket_url=S3_BUCKET_URL_ZIP, file_name=(self.model_id + '.zip'), destination=tmp_file)
self.logger.debug('Extracting model from {0}'.format(tmp_file))
dst = '/'.join(dst.split('/')[:(- 1)])
self.logger.debug('...to {0}'.format(dst))
with zipfile.ZipFile(tmp_file, 'r') as zip_ref:
zip_ref.extractall(dst)
def _change_py_version_in_dockerfile_if_necessary(self):
self.logger.debug('Changing python version if necessary')
path = self._model_path(model_id=self.model_id)
df = DockerfileFile(path=path)
version = df.get_bentoml_version()
self.logger.debug(version)
dockerfile_path = os.path.join(path, 'Dockerfile')
with open(dockerfile_path, 'r') as f:
R = f.readlines()
S = []
for r in R:
if r.startswith('FROM '):
r = r.split('-')
if r[(- 1)].startswith('py'):
p = version['python']
r = '-'.join((r[:(- 1)] + [p]))
else:
r = '-'.join(r)
S += [r]
with open(dockerfile_path, 'w') as f:
for s in S:
f.write((s + os.linesep))
def _is_root_user():
return (os.geteuid() == 0)
def _remove_sudo_if_root(self):
path = self._model_path(model_id=self.model_id)
dockerfile_path = os.path.join(path, 'Dockerfile')
if self._is_root_user():
self.logger.debug('User is root! Removing sudo commands')
with open(dockerfile_path, 'r') as f:
content = f.read()
content = content.replace('RUN sudo ', 'RUN ')
content = content.replace(' sudo ', ' ')
content = content.replace(';sudo ', '; ')
content = content.replace('&sudo ', '& ')
content = content.replace('|sudo ', '| ')
with open(dockerfile_path, 'w') as f:
f.write(content)
else:
self.logger.debug('User is not root')
def _prepare_inner_template(self):
self.logger.debug('Preparing inner template if necessary')
TemplatePreparer(model_id=self.model_id, config_json=self.config_json).prepare()
_ersilia_exception
def get(self):
folder = self._model_path(self.model_id)
dev_model_path = self._dev_model_path()
if (dev_model_path is not None):
self.logger.debug('Copying from local {0} to {1}'.format(dev_model_path, folder))
self._copy_from_local(dev_model_path, folder)
elif (self.repo_path is not None):
self._copy_from_local(self.repo_path, folder)
elif self.force_from_github:
self._copy_from_github(folder)
else:
try:
self.logger.debug('Trying to download from S3')
self._copy_zip_from_s3(folder)
except:
self.logger.debug('Could not download in zip format in S3. Downloading from GitHub repository.')
if self.force_from_s3:
raise S3DownloaderError(model_id=self.model_id)
else:
self._copy_from_github(folder)
self._prepare_inner_template()
self._change_py_version_in_dockerfile_if_necessary()
self._remove_sudo_if_root() |
def test_describe_with_no_nodes():
runner = CliRunner()
with runner.isolated_filesystem():
pipeline_file = 'pipeline_with_zero_nodes.pipeline'
pipeline_file_path = (((Path(__file__).parent / 'resources') / 'pipelines') / pipeline_file)
assert pipeline_file_path.is_file()
result = runner.invoke(pipeline, ['describe', str(pipeline_file_path)])
assert (result.exit_code == 0), result.output
assert ('Pipeline name: pipeline_with_zero_nodes' in result.output)
assert ('Description: None specified' in result.output)
assert ('Pipeline type: None specified' in result.output)
assert ('Pipeline runtime: Generic' in result.output)
assert ('Pipeline format version: 7' in result.output)
assert ('Number of generic nodes: 0' in result.output)
assert ('Number of generic nodes: 0' in result.output)
assert ('Script dependencies: None specified' in result.output)
assert ('Notebook dependencies: None specified' in result.output)
assert ('Local file dependencies: None specified' in result.output)
assert ('Component dependencies: None specified' in result.output)
assert ('Volume dependencies: None specified' in result.output)
assert ('Container image dependencies: None specified' in result.output)
assert ('Kubernetes secret dependencies: None specified' in result.output)
result = runner.invoke(pipeline, ['describe', str(pipeline_file_path), '--json'])
assert (result.exit_code == 0), result.output
result_json = json.loads(result.output)
assert (result_json['name'] == 'pipeline_with_zero_nodes')
assert (result_json['description'] is None)
assert (result_json['pipeline_type'] is None)
assert (result_json['pipeline_format_version'] == 7)
assert (result_json['pipeline_runtime'] == 'Generic')
assert (result_json['generic_node_count'] == 0)
assert (result_json['custom_node_count'] == 0)
for property in ['scripts', 'notebooks', 'files', 'custom_components', 'container_images', 'volumes', 'kubernetes_secrets']:
assert isinstance(result_json['dependencies'][property], list)
assert (len(result_json['dependencies'][property]) == 0) |
class callbacks(object):
systemAddReinitHandler = staticmethod(system.registerReinitialisationCallback)
systemRemoveReinitHandler = staticmethod(system.unregisterReinitialisationCallback)
systemAddTickHandler = staticmethod(system.registerTickCallback)
systemRemoveTickHandler = system.unregisterTickCallback
statsAddHandler = staticmethod(statistics.registerStatsCallback)
statsRemoveHandler = statistics.unregisterStatsCallback
WEB_METHOD_DASHBOARD = web.WEB_METHOD_DASHBOARD
WEB_METHOD_TEMPLATE = web.WEB_METHOD_TEMPLATE
WEB_METHOD_RAW = web.WEB_METHOD_RAW
webAddHeader = staticmethod(web.registerHeaderCallback)
webRemoveHeader = staticmethod(web.unregisterHeaderCallback)
webAddDashboard = staticmethod(web.registerDashboardCallback)
webRemoveDashboard = staticmethod(web.unregisterDashboardCallback)
webAddMethod = staticmethod(web.registerMethodCallback)
webRemoveMethod = staticmethod(web.unregisterMethodCallback) |
class SensorManager(object):
_instance = None
SETTINGS_FILE = (os.getenv('HOME') + '/.indicator-sysmonitor.json')
digit_regex = re.compile('\\d+')
class __impl():
settings = {'custom_text': 'cpu: {cpu} mem: {mem}', 'interval': 2, 'on_startup': False, 'sensors': {}}
supported_sensors = None
def __init__(self):
self.sensor_instances = [CPUSensor(), AmdGpuSensor(), AmdGpu1Sensor(), NvGPUSensor(), MemSensor(), NetSensor(), NetCompSensor(), TotalNetSensor(), BatSensor(), FSSensor(), SwapSensor(), UporDownSensor(), PublicCountrySensor(), PublicCountryISOCodeSensor(), PublicIPSensor(), CPUTemp(), NvGPUTemp()]
for sensor in self.sensor_instances:
self.settings['sensors'][sensor.name] = (sensor.desc, sensor.cmd)
self._last_net_usage = [0, 0]
self._fetcher = None
def update_regex(self, names=None):
if (names is None):
names = list(self.settings['sensors'].keys())
reg = '|'.join(names)
reg = '\\A({})\\Z'.format(reg)
self.supported_sensors = re.compile('{}'.format(reg))
def get(self, name):
for sensor in self.sensor_instances:
if (sensor.check(name) is not None):
return sensor
return None
def exists(self, name):
return bool(self.supported_sensors.match(name))
def check(self, sensor_string):
for sensor in self.sensor_instances:
sensor.check(sensor_string)
def add(self, name, desc, cmd):
if self.exists(name):
raise ISMError(_('Sensor name already in use.'))
self.settings['sensors'][name] = (desc, cmd)
self.update_regex()
def delete(self, name):
sensors = self.settings['sensors']
names = list(sensors.keys())
if (name not in names):
raise ISMError(_('Sensor is not defined.'))
(_desc, default) = sensors[name]
if (default is True):
raise ISMError(_('Can not delete default sensors.'))
del sensors[name]
self.update_regex()
def edit(self, name, newname, desc, cmd):
try:
sensors = self.settings['sensors']
(_desc, default) = sensors[name]
except KeyError:
raise ISMError(_('Sensor does not exists.'))
if (default is True):
raise ISMError(_('Can not edit default sensors.'))
if (newname != name):
if (newname in list(sensors.keys())):
raise ISMError(_('Sensor name already in use.'))
sensors[newname] = (desc, cmd)
del sensors[name]
self.settings['custom_text'] = self.settings['custom_text'].replace(name, newname)
self.update_regex()
def load_settings(self):
try:
with open(SensorManager.SETTINGS_FILE, 'r') as f:
cfg = json.load(f)
if (cfg['custom_text'] is not None):
self.settings['custom_text'] = cfg['custom_text']
if (cfg['interval'] is not None):
self.settings['interval'] = cfg['interval']
if (cfg['on_startup'] is not None):
self.settings['on_startup'] = cfg['on_startup']
if (cfg['sensors'] is not None):
newcopy = self.settings['sensors']
newcopy.update(cfg['sensors'])
self.settings['sensors'] = newcopy
self.update_regex()
except Exception as ex:
logging.exception(ex)
logging.error('Reading settings failed')
def save_settings(self):
try:
with open(SensorManager.SETTINGS_FILE, 'w') as f:
f.write(json.dumps(self.settings))
except Exception as ex:
logging.exception(ex)
logging.error('Writing settings failed')
def get_guide(self):
data = self._fetcher.fetch()
for key in data:
if key.startswith('fs'):
data[key] = '000gB'
break
data['mem'] = data['cpu'] = data['bat'] = '000%'
data['net'] = '666kB/s 666kB/s'
self.settings['custom_text'].format(**data)
return self.settings['custom_text'].format(**data)
def get_label(self, data):
try:
label = (self.settings['custom_text'].format(**data) if len(data) else _('(no output)'))
except KeyError as ex:
label = _('Invalid Sensor: {}').format(ex)
except Exception as ex:
logging.exception(ex)
label = _('Unknown error: ').format(ex)
return label
def initiate_fetcher(self, parent):
if (self._fetcher is not None):
self._fetcher.stop()
self._fetcher = StatusFetcher(parent)
self._fetcher.start()
logging.info('Fetcher started')
def fill_liststore(self, list_store):
sensors = self.settings['sensors']
for name in list(sensors.keys()):
list_store.append([name, sensors[name][0]])
def get_command(self, name):
cmd = self.settings['sensors'][name][1]
return cmd
def set_custom_text(self, custom_text):
self.settings['custom_text'] = custom_text
def get_custom_text(self):
return self.settings['custom_text']
def set_interval(self, interval):
self.settings['interval'] = interval
def get_interval(self):
return self.settings['interval']
def get_results(self):
res = {}
from preferences import Preferences
global cpu_load
cpu_load = ps.cpu_percent(interval=0, percpu=True)
for sensor in Preferences.sensors_regex.findall(self.settings['custom_text']):
sensor = sensor[1:(- 1)]
instance = self.get(sensor)
if instance:
value = instance.get_value(sensor)
if value:
res[sensor] = value
else:
res[sensor] = BaseSensor.script_exec(self.settings['sensors'][sensor][1])
return res
def __init__(self):
if (SensorManager._instance is None):
SensorManager._instance = SensorManager.__impl()
self.__dict__['_SensorManager__instance'] = SensorManager._instance
def __getattr__(self, attr):
return getattr(self.__instance, attr)
def __setattr__(self, attr, value):
return setattr(self.__instance, attr, value) |
def test_change_iter(full_snapshot):
source_model = SnapshotModel()
model = RealListModel(None, 0)
model.setSourceModel(source_model)
reporting_mode = qt_api.QtTest.QAbstractItemModelTester.FailureReportingMode.Warning
tester = qt_api.QtTest.QAbstractItemModelTester(model, reporting_mode)
source_model._add_snapshot(SnapshotModel.prerender(full_snapshot), 0)
assert (model.index(0, 0, QModelIndex()).data(NodeRole).data['status'] == REALIZATION_STATE_UNKNOWN)
source_model._add_snapshot(SnapshotModel.prerender(full_snapshot), 1)
model.setIter(1)
partial = partial_snapshot(full_snapshot)
partial._realization_states['0'].update({'status': REALIZATION_STATE_FINISHED})
source_model._add_partial_snapshot(SnapshotModel.prerender(partial), 1)
assert (model.index(0, 0, QModelIndex()).data(NodeRole).data['status'] == REALIZATION_STATE_FINISHED) |
def test_remove_stacktrace_locals():
data = {'exception': {'stacktrace': [{'vars': {'foo': 'bar', 'password': 'hello', 'secret': 'hello'}}]}}
result = processors.remove_stacktrace_locals(None, data)
assert ('stacktrace' in result['exception'])
stack = result['exception']['stacktrace']
for frame in stack:
assert ('vars' not in frame) |
def test_ciftify_dlabel_to_vol_custom_map(output_dir):
output_nii = os.path.join(output_dir, 'custom_atlas.nii.gz')
run(['ciftify_dlabel_to_vol', '--input-dlabel', custom_dlabel, '--left-mid-surface', hcp_Lmid, '--volume-template', test_nifti, '--output-nifti', output_nii])
assert os.path.isfile(output_nii) |
class DataAttrs(primitives.JsDataModel):
def __init__(self, page: primitives.PageModel, attrs: dict=None, options: dict=None):
(self.page, self.options, self._attrs) = (page, options, (attrs or {}))
def custom(self, name: str, value: types.JS_DATA_TYPES):
self._attrs[name] = JsUtils.jsConvertData(value, None)
return self
def attr(self, name: str, value: Any):
self._attrs[name] = value
return self
def attrs(self, values: dict):
self._attrs.update(values)
return self
def set_val(self, value: Any, name: str=None, js_type: bool=False):
if ((hasattr(value, 'startswith') and value.startswith('function(')) or js_type):
value = JsUtils.jsWrap(value)
self.attr((name or sys._getframe().f_back.f_code.co_name), value)
def __str__(self):
return ('{%s}' % ', '.join([('%s: %s' % (k, v)) for (k, v) in self._attrs.items()]))
def toStr(self) -> str:
return ('{%s}' % ', '.join([('%s: %s' % (k, JsUtils.jsConvertData(v, None))) for (k, v) in self._attrs.items()])) |
('ecs_deploy.cli.get_client')
def test_deploy_one_new_environment_variable(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-e', 'application', 'foo', 'bar', '-e', 'webserver', 'foo', 'baz'))
assert (result.exit_code == 0)
assert (not result.exception)
assert (u'Deploying based on task definition: test-task:1' in result.output)
assert (u'Updating task definition' in result.output)
assert (u'Changed environment "foo" of container "application" to: "bar"' in result.output)
assert (u'Changed environment "foo" of container "webserver" to: "baz"' in result.output)
assert (u'Changed environment "lorem" of container "webserver" to: "ipsum"' not in result.output)
assert (u'Successfully created revision: 2' in result.output)
assert (u'Successfully deregistered revision: 1' in result.output)
assert (u'Successfully changed task definition to: test-task:2' in result.output)
assert (u'Deployment successful' in result.output) |
def lambda_handler(event, context):
print(event)
error_message = {'statusCode': 502, 'headers': {'Access-Control-Allow-Methods': 'POST,OPTIONS', 'Access-Control-Allow-Origin': '*'}, 'body': '{"success" : false}'}
event_body = json.loads(event['body'])
date = str(datetime.datetime.now().isoformat())
if (event_body['cognito_id'] == ''):
try:
user_cognito_id = look_up_cognito_id(event_body)
except Exception as e:
print(f'Failed to find user Cognito ID - {event_body}, {e} ')
return error_message
event_body['cognito_id'] = user_cognito_id
if (event_body['list'] == ''):
try:
unsubscribe_all(date, event_body['cognito_id'])
except Exception as e:
print(f'Failed to unsubscribe user - {event_body}, {e} ')
return error_message
else:
try:
unsubscribe_single_list(date, event_body['cognito_id'], event_body['list'])
except Exception as e:
print(f'Failed to unsubscribe user - {event_body}, {e} ')
return error_message
return {'statusCode': 200, 'headers': {'Access-Control-Allow-Methods': 'POST,OPTIONS', 'Access-Control-Allow-Origin': '*'}, 'body': '{"success" : true}'} |
class OptionSeriesPyramid3dSonificationContexttracksMappingHighpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def get_args():
parser = argparse.ArgumentParser(description='Debin to hack binaries. This script takes an stripped binary as input and output a binary with predicted debug information.')
parser.add_argument('--binary', dest='binary', type=str, default='', required=True, help='path of the binary you want to analyze.')
parser.add_argument('--output', dest='output', type=str, default='', required=True, help='path of output binary.')
parser.add_argument('--bap', dest='bap', type=str, default='', help='path of cached BAP-IR file.')
parser.add_argument('--elf_modifier', dest='elf_modifier', type=str, default='', required=True, help='path of the library for modifying ELF binaries.')
parser.add_argument('-two_pass', dest='two_pass', action='store_true', default=False, help='whether to use two passes (variable classification and structured prediction). Setting it to false only will only invoke structured prediction.')
parser.add_argument('--fp_model', dest='fp_model', type=str, default='', help='Path of the models for the first pass (variable classification).')
parser.add_argument('--n2p_url', dest='n2p_url', type=str, default='', required=True, help='URL of n2p server.')
args = parser.parse_args()
return args |
class ConditionHandler():
def __init__(self, condition_map: Optional[Dict[(LogicCondition, ConditionSymbol)]]=None):
self._condition_map: Dict[(LogicCondition, ConditionSymbol)] = (dict() if (condition_map is None) else condition_map)
self._symbol_counter = 0
self._logic_context = (next(iter(self._condition_map)).context if self._condition_map else LogicCondition.generate_new_context())
def __eq__(self, other) -> bool:
return (isinstance(other, ConditionHandler) and (other._condition_map == self._condition_map))
def __hash__(self) -> int:
return hash(self._condition_map)
def __len__(self) -> int:
return len(self._condition_map)
def __iter__(self) -> Iterable[LogicCondition]:
(yield from self._condition_map)
def logic_context(self):
return self._logic_context
def copy(self) -> ConditionHandler:
condition_map = {symbol: ConditionSymbol(condition_symbol.condition.copy(), condition_symbol.symbol, condition_symbol.z3_condition) for (symbol, condition_symbol) in self._condition_map.items()}
return ConditionHandler(condition_map)
def get_condition_of(self, symbol: LogicCondition) -> Condition:
return self._condition_map[symbol].condition
def get_z3_condition_of(self, symbol: LogicCondition) -> PseudoLogicCondition:
return self._condition_map[symbol].z3_condition
def get_all_symbols(self) -> Set[LogicCondition]:
return set(self._condition_map.keys())
def get_condition_map(self) -> Dict[(LogicCondition, Condition)]:
return dict(((symbol, condition_symbol.condition) for (symbol, condition_symbol) in self._condition_map.items()))
def get_z3_condition_map(self) -> Dict[(LogicCondition, PseudoLogicCondition)]:
return dict(((symbol, condition_symbol.z3_condition) for (symbol, condition_symbol) in self._condition_map.items()))
def get_reverse_z3_condition_map(self) -> Dict[(PseudoLogicCondition, LogicCondition)]:
return dict(((condition_symbol.z3_condition, symbol) for (symbol, condition_symbol) in self._condition_map.items()))
def update_z3_condition_of(self, symbol: LogicCondition, condition: Condition):
assert symbol.is_symbol, 'Input must be a symbol!'
z3_condition = PseudoLogicCondition.initialize_from_condition(condition, self._logic_context)
pseudo_condition = self.get_condition_of(symbol)
self._condition_map[symbol] = ConditionSymbol(pseudo_condition, symbol, z3_condition)
def add_condition(self, condition: Condition) -> LogicCondition:
z3_condition = PseudoLogicCondition.initialize_from_condition(condition, self._logic_context)
if (symbol := self._condition_already_exists(z3_condition)):
return symbol
symbol = self._get_next_symbol()
condition_symbol = ConditionSymbol(condition, symbol, z3_condition)
self._condition_map[symbol] = condition_symbol
return symbol
def _condition_already_exists(self, z3_condition: PseudoLogicCondition) -> Optional[ConditionSymbol]:
for value in self._condition_map.values():
if value.z3_condition.is_equal_to(z3_condition):
return value.symbol
elif value.z3_condition.is_equal_to((~ z3_condition)):
return (~ value.symbol)
def _get_next_symbol(self) -> LogicCondition:
self._symbol_counter += 1
return LogicCondition.initialize_symbol(f'x{self._symbol_counter}', self._logic_context)
def get_true_value(self) -> LogicCondition:
return LogicCondition.initialize_true(self._logic_context)
def get_false_value(self) -> LogicCondition:
return LogicCondition.initialize_false(self._logic_context) |
def with_analytics(func: Callable) -> Callable:
def wrapper_func(ctx: click.Context, *args, **kwargs) -> Any:
command = ' '.join(filter(None, [ctx.info_name, ctx.invoked_subcommand]))
error = None
executed_at = datetime.now(timezone.utc)
status_code = 0
try:
return ctx.invoke(func, ctx, *args, **kwargs)
except Exception as err:
error = type(err).__name__
status_code = 1
raise err
finally:
if (ctx.obj['CONFIG'].user.analytics_opt_out is False):
event = AnalyticsEvent('cli_command_executed', executed_at, command=command, docker=bool((getenv('RUNNING_IN_DOCKER') == 'TRUE')), error=error, flags=None, resource_counts=None, status_code=status_code)
try:
ctx.meta['ANALYTICS_CLIENT'].send(event)
except AnalyticsError:
pass
return update_wrapper(wrapper_func, func) |
class OptionPlotoptionsSeriesSonificationDefaultinstrumentoptionsMappingTremoloSpeed(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
.parametrize('input_features', [None, ['Age', 'Marks'], np.array(['Age', 'Marks'])])
def test_remove_feature_names_pipe_and_skl_transformer_that_adds_features(df_vartypes, input_features):
features_in = ['Age', 'Marks']
df = df_vartypes[features_in].copy()
pipe = Pipeline([('poly', PolynomialFeatures()), ('transformer', MockSelector())])
pipe.fit(df)
assert (pipe.get_feature_names_out(input_features=input_features) == ['Marks', 'Age^2', 'Age Marks', 'Marks^2']) |
def build_backbone(config, model_type):
if ((model_type == 'det') or (model_type == 'table')):
from .det_mobilenet_v3 import MobileNetV3
from .det_resnet_vd import ResNet
from .det_resnet_vd_sast import ResNet_SAST
support_dict = ['MobileNetV3', 'ResNet', 'ResNet_SAST']
elif ((model_type == 'rec') or (model_type == 'cls')):
from .rec_mobilenet_v3 import MobileNetV3
from .rec_resnet_vd import ResNet
from .rec_resnet_fpn import ResNetFPN
from .rec_mv1_enhance import MobileNetV1Enhance
from .rec_nrtr_mtb import MTB
from .rec_resnet_31 import ResNet31
from .rec_resnet_aster import ResNet_ASTER
support_dict = ['MobileNetV1Enhance', 'MobileNetV3', 'ResNet', 'ResNetFPN', 'MTB', 'ResNet31', 'ResNet_ASTER']
elif (model_type == 'e2e'):
from .e2e_resnet_vd_pg import ResNet
support_dict = ['ResNet']
elif (model_type == 'kie'):
from .kie_unet_sdmgr import Kie_backbone
support_dict = ['Kie_backbone']
elif (model_type == 'table'):
from .table_resnet_vd import ResNet
from .table_mobilenet_v3 import MobileNetV3
support_dict = ['ResNet', 'MobileNetV3']
else:
raise NotImplementedError
module_name = config.pop('name')
assert (module_name in support_dict), Exception('when model typs is {}, backbone only support {}'.format(model_type, support_dict))
module_class = eval(module_name)(**config)
return module_class |
class ListContaining(_RichComparison):
def __init__(self, needle: AnyType) -> None:
self.needle = needle
super().__init__(klass=List)
def __eq__(self, other: List[AnyType]) -> bool:
return (super().__eq__(other) and (self.needle in other))
def __repr__(self) -> str:
return '<{} 0x{:02X}{}>'.format(type(self).__name__, id(self), (f' needle={self.needle}' if (self.needle is not None) else '')) |
class OptionPlotoptionsAreasplinerangeSonificationContexttracksMappingTremoloSpeed(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def test_chaining(events, ball1, ball2, ball3, cushion):
filter_result = filter_events(events, by_ball(['2', '3']), by_time(t=3), by_type([EventType.BALL_LINEAR_CUSHION, EventType.SLIDING_ROLLING, EventType.BALL_BALL]))
assert (filter_result == [sliding_rolling_transition(ball2, 5), ball_ball_collision(ball1, ball3, 7), sliding_rolling_transition(ball3, 9), ball_linear_cushion_collision(ball3, cushion, 12)]) |
def parse_url(url):
if url.startswith('doi://'):
raise ValueError(f"Invalid DOI link '{url}'. You must not use '//' after 'doi:'.")
if url.startswith('doi:'):
protocol = 'doi'
parts = url[4:].split('/')
if ('zenodo' in parts[1].lower()):
netloc = '/'.join(parts[:2])
path = ('/' + '/'.join(parts[2:]))
else:
netloc = '/'.join(parts[:(- 1)])
path = ('/' + parts[(- 1)])
else:
parsed_url = urlsplit(url)
protocol = (parsed_url.scheme or 'file')
netloc = parsed_url.netloc
path = parsed_url.path
return {'protocol': protocol, 'netloc': netloc, 'path': path} |
def _assert_route_tokens_equal(route: List[Swap], expected_token_in_out_pairs: List[Tuple[(str, str)]]) -> None:
assert (len(route) == len(expected_token_in_out_pairs))
for (i, [expected_token_in, expected_token_out]) in enumerate(expected_token_in_out_pairs):
assert (expected_token_in == route[i].token_in_address)
assert (expected_token_out == route[i].token_out_address) |
def update_node_exporter(release_data):
print('Updating Node Exporter...')
print(f"Downloading latest release archive {release_data['asset_name']} ...")
with urlopen(release_data['download_url']) as response:
if (response.status == 200):
with TemporaryDirectory() as tmpdir:
download_target = ((str(tmpdir) + '/') + release_data['asset_name'])
with open(download_target, 'wb') as output_target:
shutil.copyfileobj(response, output_target)
print(f"Extracting archive {release_data['asset_name']} ...")
subprocess.run(['tar', 'xvf', release_data['asset_name']], cwd=str(tmpdir))
extracted_directory = ((str(tmpdir) + '/') + release_data['asset_name'][:(- 7)])
print('Stopping Node Exporter service...')
subprocess.run(['sudo', 'systemctl', 'stop', NODE_EXPORTER_SERVICE_NAME])
print('Updating installed Node Exporter binary...')
extracted_node_exporter_path = ((extracted_directory + '/') + 'node_exporter')
installed_node_exporter_path = (NODE_EXPORTER_INSTALLED_PATH + 'node_exporter')
subprocess.run(['sudo', 'cp', extracted_node_exporter_path, NODE_EXPORTER_INSTALLED_PATH])
subprocess.run(['sudo', 'chown', '-R', NODE_EXPORTER_USER_GROUP, installed_node_exporter_path])
print('Restarting Node Exporter service...')
subprocess.run(['sudo', 'systemctl', 'start', NODE_EXPORTER_SERVICE_NAME])
else:
print(f'Unexpected response status from Github: {response.status}')
return False
return True |
class Event():
def __init__(self, json_payload: dict=None):
json_payload = (json_payload or dict())
self.public = json_payload.get('public')
self.payload = json_payload.get('payload')
rw = json_payload.get('repo', dict())
self.repository = Repository(id=rw.get('id'), name=rw.get('name'), url=rw.get('url'))
rw = json_payload.get('actor', dict())
self.actor = Actor(id=rw.get('id'), login=rw.get('login'), display_login=rw.get('display_login'), url=rw.get('url'))
rw = json_payload.get('org', dict())
self.organization = Organization(id=rw.get('id'), login=rw.get('login'), url=rw.get('url'))
self.created_at = parse_date_field(json_payload.get('created_at'))
self.id = json_payload.get('id') |
class TestEnumClassMissing():
def test_enum_class_missing(self, monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', ['', '--config', './tests/conf/yaml/test_wrong_class_enum.yaml'])
with pytest.raises(_SpockFieldHandlerError):
ConfigArgBuilder(*all_configs, desc='Test Builder') |
def patch_connection_configs(db: Session, configs: conlist(CreateConnectionConfigurationWithSecrets, max_items=50), system: Optional[System]=None) -> BulkPutConnectionConfiguration:
created_or_updated: List[ConnectionConfigurationResponse] = []
failed: List[BulkUpdateFailed] = []
logger.info('Starting bulk upsert for {} connection configuration(s)', len(configs))
for config in configs:
existing_connection_config = ConnectionConfig.get_by(db, field='key', value=config.key)
if (config.connection_type == 'saas'):
if config.secrets:
if existing_connection_config:
config.secrets = validate_secrets(db, config.secrets, existing_connection_config)
else:
if (not config.saas_connector_type):
raise HTTPException(status_code=HTTP_422_UNPROCESSABLE_ENTITY, detail='saas_connector_type is missing')
connector_template = ConnectorRegistry.get_connector_template(config.saas_connector_type)
if (not connector_template):
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f"SaaS connector type '{config.saas_connector_type}' is not yet available in Fides. For a list of available SaaS connectors, refer to {CONNECTION_TYPES}.")
try:
template_values = SaasConnectionTemplateValues(name=config.name, key=config.key, description=config.description, secrets=config.secrets, instance_key=config.key)
if system:
connection_config = create_connection_config_from_template_no_save(db, connector_template, template_values, system_id=system.id)
else:
connection_config = create_connection_config_from_template_no_save(db, connector_template, template_values)
except KeyOrNameAlreadyExists as exc:
raise HTTPException(status_code=HTTP_400_BAD_REQUEST, detail=exc.args[0])
connection_config.secrets = validate_secrets(db, template_values.secrets, connection_config).dict()
connection_config.save(db=db)
created_or_updated.append(ConnectionConfigurationResponse(**connection_config.__dict__))
continue
orig_data = config.dict().copy()
config_dict = config.dict()
config_dict.pop('saas_connector_type', None)
if existing_connection_config:
config_dict = {key: value for (key, value) in {**existing_connection_config.__dict__, **config.dict()}.items() if (isinstance(value, bool) or value)}
if system:
config_dict['system_id'] = system.id
try:
connection_config = ConnectionConfig.create_or_update(db, data=config_dict, check_name=False)
created_or_updated.append(ConnectionConfigurationResponse(**connection_config.__dict__))
except KeyOrNameAlreadyExists as exc:
logger.warning("Create/update failed for connection config with key '{}': {}", config.key, exc)
orig_data.pop('secrets', None)
orig_data.pop('saas_connector_type', None)
failed.append(BulkUpdateFailed(message=exc.args[0], data=orig_data))
except Exception as e:
logger.warning("Create/update failed for connection config with key '{}'.", config.key)
logger.error(e)
orig_data.pop('secrets', None)
orig_data.pop('saas_connector_type', None)
failed.append(BulkUpdateFailed(message='This connection configuration could not be added.', data=orig_data))
requeue_requires_input_requests(db)
return BulkPutConnectionConfiguration(succeeded=created_or_updated, failed=failed) |
.parametrize('alsa_manager', [{'theme_path': '/no/path', 'mode': 'icon'}], indirect=True)
.flaky(reruns=5)
def test_no_icons(alsa_manager, logger):
(ignore_exceptions=(AssertionError,))
def wait_for_failure():
assert any((('Could not find volume icons at /no/path.' in r.msg) for r in logger.get_records('setup')))
wait_for_failure() |
def run50timespersecond():
procarr = []
for x in range(0, len(Settings.Tasks)):
if (Settings.Tasks[x] and (type(Settings.Tasks[x]) is not bool)):
try:
if Settings.Tasks[x].enabled:
if Settings.Tasks[x].timer20ms:
t = threading.Thread(target=Settings.Tasks[x].timer_fifty_per_second)
t.daemon = True
procarr.append(t)
t.start()
except:
pass
if (len(procarr) > 0):
for process in procarr:
process.join(1)
return 0 |
def _unpack_v4(message: bytes) -> Tuple[(datatypes.PublicKey, int, Tuple[(Any, ...)], Hash32)]:
message_hash = Hash32(message[:MAC_SIZE])
if (message_hash != keccak(message[MAC_SIZE:])):
raise WrongMAC('Wrong msg mac')
signature = eth_keys.keys.Signature(message[MAC_SIZE:HEAD_SIZE])
signed_data = message[HEAD_SIZE:]
remote_pubkey = signature.recover_public_key_from_msg(signed_data)
cmd_id = message[HEAD_SIZE]
payload = tuple(rlp.decode(message[(HEAD_SIZE + 1):], strict=False))
return (remote_pubkey, cmd_id, payload, message_hash) |
class Test(unittest.TestCase):
def test_hmm_mapper(self):
in_file = 'tests/fixtures/test_queries.fa'
outdir = 'tests/integration/out'
outprefix = 'test'
database = 'tests/fixtures/hmmer_custom_dbs/bact.hmm'
obs_hmm_hits = os.path.join(outdir, (outprefix + HMM_HITS_SUFFIX))
exp_files_dir = 'tests/fixtures/hmmer_expected_output/'
exp_hmm_hits = os.path.join(exp_files_dir, 'bact.emapper.hmm_hits')
if os.path.isdir(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
cmd = f'./hmm_mapper.py -i {in_file} -d {database} --output_dir {outdir} -o {outprefix}'
(st, out, err) = run(cmd)
if (st != 0):
print(out.decode('utf-8'))
print(err.decode('utf-8'))
assert (st == 0)
check_hmm_hits(obs_hmm_hits, exp_hmm_hits)
if os.path.isdir(outdir):
shutil.rmtree(outdir)
return
def test_usemem(self):
in_file = 'tests/fixtures/test_queries.fa'
outdir = 'tests/integration/out'
outprefix = 'test'
database = 'tests/fixtures/hmmer_custom_dbs/bact.hmm'
obs_hmm_hits = os.path.join(outdir, (outprefix + HMM_HITS_SUFFIX))
exp_files_dir = 'tests/fixtures/hmmer_expected_output/'
exp_hmm_hits = os.path.join(exp_files_dir, 'bact.emapper.hmm_hits')
if os.path.isdir(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
cmd = f'./hmm_mapper.py --usemem -i {in_file} -d {database} --output_dir {outdir} -o {outprefix}'
(st, out, err) = run(cmd)
if (st != 0):
print(out.decode('utf-8'))
print(err.decode('utf-8'))
assert (st == 0)
check_hmm_query_hit(obs_hmm_hits, exp_hmm_hits)
if os.path.isdir(outdir):
shutil.rmtree(outdir)
return
def test_hmmsearch(self):
database = 'tests/fixtures/test_queries.fa'
outdir = 'tests/integration/out'
outprefix = 'test'
in_file = 'tests/fixtures/hmmer_custom_dbs/bact.hmm'
obs_hmm_hits = os.path.join(outdir, (outprefix + HMM_HITS_SUFFIX))
exp_files_dir = 'tests/fixtures/hmmer_expected_output/'
exp_hmm_hits = os.path.join(exp_files_dir, 'bact.hmmsearch.hmm_hits')
if os.path.isdir(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
cmd = f'./hmm_mapper.py --qtype hmm -i {in_file} --dbtype seqdb -d {database} --output_dir {outdir} -o {outprefix}'
(st, out, err) = run(cmd)
if (st != 0):
print(out.decode('utf-8'))
print(err.decode('utf-8'))
assert (st == 0)
check_hmm_hits(obs_hmm_hits, exp_hmm_hits)
if os.path.isdir(outdir):
shutil.rmtree(outdir)
return
def test_hmmsearch_usemem(self):
database = 'tests/fixtures/test_queries.fa'
outdir = 'tests/integration/out'
outprefix = 'test'
in_file = 'tests/fixtures/hmmer_custom_dbs/bact.hmm'
obs_hmm_hits = os.path.join(outdir, (outprefix + HMM_HITS_SUFFIX))
exp_files_dir = 'tests/fixtures/hmmer_expected_output/'
exp_hmm_hits = os.path.join(exp_files_dir, 'bact.hmmsearch.hmm_hits')
if os.path.isdir(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
cmd = f'./hmm_mapper.py --usemem --qtype hmm -i {in_file} --dbtype seqdb -d {database} --output_dir {outdir} -o {outprefix}'
(st, out, err) = run(cmd)
if (st != 0):
print(out.decode('utf-8'))
print(err.decode('utf-8'))
assert (st == 0)
check_hmm_query_hit(obs_hmm_hits, exp_hmm_hits)
if os.path.isdir(outdir):
shutil.rmtree(outdir)
return
def test_phmmer(self):
database = 'tests/fixtures/test_queries.fa'
outdir = 'tests/integration/out'
outprefix = 'test'
in_file = 'tests/fixtures/test_queries.fa'
obs_hmm_hits = os.path.join(outdir, (outprefix + HMM_HITS_SUFFIX))
exp_files_dir = 'tests/fixtures/hmmer_expected_output/'
exp_hmm_hits = os.path.join(exp_files_dir, 'bact.phmmer.hmm_hits')
if os.path.isdir(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
cmd = f'./hmm_mapper.py --qtype seq -i {in_file} --dbtype seqdb -d {database} --output_dir {outdir} -o {outprefix}'
(st, out, err) = run(cmd)
if (st != 0):
print(out.decode('utf-8'))
print(err.decode('utf-8'))
assert (st == 0)
check_hmm_hits(obs_hmm_hits, exp_hmm_hits)
if os.path.isdir(outdir):
shutil.rmtree(outdir)
return
def test_phmmer_usemem(self):
database = 'tests/fixtures/test_queries.fa'
outdir = 'tests/integration/out'
outprefix = 'test'
in_file = 'tests/fixtures/test_queries.fa'
obs_hmm_hits = os.path.join(outdir, (outprefix + HMM_HITS_SUFFIX))
exp_files_dir = 'tests/fixtures/hmmer_expected_output/'
exp_hmm_hits = os.path.join(exp_files_dir, 'bact.phmmer.hmm_hits')
if os.path.isdir(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
cmd = f'./hmm_mapper.py --usemem --qtype seq -i {in_file} --dbtype seqdb -d {database} --output_dir {outdir} -o {outprefix}'
(st, out, err) = run(cmd)
if (st != 0):
print(out.decode('utf-8'))
print(err.decode('utf-8'))
assert (st == 0)
check_hmm_query_hit(obs_hmm_hits, exp_hmm_hits)
if os.path.isdir(outdir):
shutil.rmtree(outdir)
return |
class IFLDataProvider(ABC):
def train_user_ids(self) -> List[int]:
def num_train_users(self) -> int:
def get_train_user(self, user_index: int) -> IFLUserData:
def train_users(self) -> Iterable[IFLUserData]:
def eval_users(self) -> Iterable[IFLUserData]:
def test_users(self) -> Iterable[IFLUserData]: |
def test_custom_media_handler():
class PythonRepresentation(media.BaseHandler):
async def serialize_async(media, content_type):
return repr(media).encode()
class TestResource():
async def on_get(self, req, resp):
resp.content_type = 'text/x-python-repr'
resp.media = {'something': True}
body = (await resp.render_body())
assert (body == b"{'something': True}")
client = create_client(TestResource(), handlers={'text/x-python-repr': PythonRepresentation()})
client.simulate_get('/') |
class PDFResourceManager():
debug = False
def __init__(self, caching=True):
self.caching = caching
self._cached_fonts = {}
return
def get_procset(self, procs):
for proc in procs:
if (proc is LITERAL_PDF):
pass
elif (proc is LITERAL_TEXT):
pass
else:
pass
return
def get_cmap(self, cmapname, strict=False):
try:
return CMapDB.get_cmap(cmapname)
except CMapDB.CMapNotFound:
if strict:
raise
return CMap()
def get_font(self, objid, spec):
if (objid and (objid in self._cached_fonts)):
font = self._cached_fonts[objid]
else:
if self.debug:
logging.info(('get_font: create: objid=%r, spec=%r' % (objid, spec)))
if STRICT:
if (spec['Type'] is not LITERAL_FONT):
raise PDFFontError('Type is not /Font')
if ('Subtype' in spec):
subtype = literal_name(spec['Subtype'])
else:
if STRICT:
raise PDFFontError('Font Subtype is not specified.')
subtype = 'Type1'
if (subtype in ('Type1', 'MMType1')):
font = PDFType1Font(self, spec)
elif (subtype == 'TrueType'):
font = PDFTrueTypeFont(self, spec)
elif (subtype == 'Type3'):
font = PDFType3Font(self, spec)
elif (subtype in ('CIDFontType0', 'CIDFontType2')):
font = PDFCIDFont(self, spec)
elif (subtype == 'Type0'):
dfonts = list_value(spec['DescendantFonts'])
assert dfonts
subspec = dict_value(dfonts[0]).copy()
for k in ('Encoding', 'ToUnicode'):
if (k in spec):
subspec[k] = resolve1(spec[k])
font = self.get_font(None, subspec)
else:
if STRICT:
raise PDFFontError(('Invalid Font spec: %r' % spec))
font = PDFType1Font(self, spec)
if (objid and self.caching):
self._cached_fonts[objid] = font
return font |
def main(page: ft.Page):
page.scroll = 'auto'
page.fonts = {'Roboto Mono': 'RobotoMono-VariableFont_wght.ttf'}
page.add(ft.Markdown(table, selectable=True, extension_set='gitHubWeb', code_theme='atom-one-dark', code_style=ft.TextStyle(font_family='Roboto Mono'), on_tap_link=(lambda e: page.launch_url(e.data)))) |
def replace_dependency(app: FastAPI, dependency: Callable, new_dependency: Callable):
if (dependency not in _security_deps):
collect_security_dependencies(app, dependency)
for (dep, i) in _security_deps[dependency]:
old_dep = dep.dependencies[i]
dep.dependencies[i] = get_param_sub_dependant(param_name=(old_dep.name or ''), depends=Depends(new_dependency, use_cache=old_dep.use_cache), path=(old_dep.path or ''), security_scopes=old_dep.security_scopes) |
_frequency(timedelta(days=1))
def fetch_consumption(zone_key: ZoneKey, session: (Session | None)=None, target_datetime: (datetime | None)=None, logger: Logger=getLogger(__name__)) -> list[dict]:
check_valid_parameters(zone_key, session, target_datetime)
ses = (session or Session())
data = fetch_and_preprocess_data(zone_key, ses, logger, target_datetime)
consumption = TotalConsumptionList(logger)
for event in data:
consumption.append(zoneKey=zone_key, datetime=event['ts'], consumption=event['dem'], source='demanda.ree.es')
return consumption.to_list() |
class Generator(lg.Node):
OUTPUT = lg.Topic(AppendMessage)
OUTPUT_2 = lg.Topic(UpdateMessage)
config: GeneratorConfig
(OUTPUT)
async def generate_noise(self) -> lg.AsyncPublisher:
while True:
(yield (self.OUTPUT, AppendMessage(timestamp=time.time(), data=(random() * 100))))
(await asyncio.sleep((1 / self.config.sample_rate)))
(OUTPUT_2)
async def generate_update_noise(self) -> lg.AsyncPublisher:
while True:
(yield (self.OUTPUT_2, UpdateMessage(domain=np.arange(self.config.num_features), range=np.random.rand(self.config.num_features))))
(await asyncio.sleep(0.5)) |
def extractLazycattlBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class OptionPlotoptionsArearangeSonificationTracksMappingHighpass(Options):
def frequency(self) -> 'OptionPlotoptionsArearangeSonificationTracksMappingHighpassFrequency':
return self._config_sub_data('frequency', OptionPlotoptionsArearangeSonificationTracksMappingHighpassFrequency)
def resonance(self) -> 'OptionPlotoptionsArearangeSonificationTracksMappingHighpassResonance':
return self._config_sub_data('resonance', OptionPlotoptionsArearangeSonificationTracksMappingHighpassResonance) |
class Challenge():
def __init__(self, name, description=None, user_info=None, default=False, selected=False, auto_generated=False, parameters=None, meta_data=None, schedule=None):
self.name = name
self.parameters = (parameters if parameters else {})
self.meta_data = (meta_data if meta_data else {})
self.description = description
self.user_info = user_info
self.serverless_info = []
self.default = default
self.selected = selected
self.auto_generated = auto_generated
self.schedule = (schedule if schedule else [])
def prepend_tasks(self, tasks):
self.schedule = (tasks + self.schedule)
def remove_task(self, task):
self.schedule.remove(task)
def __str__(self):
return self.name
def __repr__(self):
r = []
for (prop, value) in vars(self).items():
r.append(('%s = [%s]' % (prop, repr(value))))
return ', '.join(r)
def __hash__(self):
return (((((((hash(self.name) ^ hash(self.description)) ^ hash(self.default)) ^ hash(self.selected)) ^ hash(self.auto_generated)) ^ hash(self.parameters)) ^ hash(self.meta_data)) ^ hash(self.schedule))
def __eq__(self, othr):
return (isinstance(othr, type(self)) and ((self.name, self.description, self.default, self.selected, self.auto_generated, self.parameters, self.meta_data, self.schedule) == (othr.name, othr.description, othr.default, othr.selected, othr.auto_generated, othr.parameters, othr.meta_data, othr.schedule))) |
def _to_pitch(notation: str) -> Pitch:
note_names = {'C': 0, 'D': 2, 'E': 4, 'F': 5, 'G': 7, 'A': 9, 'B': 11}
accidentals = {'': 0, '#': 1, '##': 2, '-': (- 1), '--': (- 2)}
pitch = ((note_names[notation[0].upper()] + (12 * (int(notation[(- 1)]) + 1))) + accidentals[notation[1:(- 1)]])
return pitch |
class NetworkServices():
__instance = None
def instance():
if (NetworkServices.__instance == None):
NetworkServices.__instance = NetworkServices()
return NetworkServices.__instance
srv_array = []
ports_list = []
def __init__(self):
etcServicesPath = '/etc/services'
if ((not os.path.isfile(etcServicesPath)) and os.path.isfile('/usr/etc/services')):
etcServicesPath = '/usr/etc/services'
try:
etcServices = open(etcServicesPath)
for line in etcServices:
if (line[0] == '#'):
continue
g = re.search('([a-zA-Z0-9\\-]+)( |\\t)+([0-9]+)\\/([a-zA-Z0-9\\-]+)(.*)\\n', line)
if g:
self.srv_array.append('{0}/{1} {2}'.format(g.group(1), g.group(3), ('' if ((len(g.groups()) > 3) and (g.group(4) == '')) else '({0})'.format(g.group(4).replace('\t', '')))))
self.ports_list.append(g.group(3))
self.srv_array.append('wireguard/51820 WireGuard VPN')
self.ports_list.append('51820')
except Exception as e:
print('Error loading {0}: {1}'.format(etcServicesPath, e))
def to_array(self):
return self.srv_array
def service_by_index(self, idx):
return self.srv_array[idx]
def service_by_name(self, name):
return self.srv_array.index(name)
def port_by_index(self, idx):
return self.ports_list[idx]
def index_by_port(self, port):
return self.ports_list.index(str(port)) |
class DcGroupView(APIView):
serializer = GroupSerializer
order_by_default = order_by_fields = ('name',)
def __init__(self, request, name, data):
super(DcGroupView, self).__init__(request)
self.data = data
self.name = name
self.dc = request.dc
if name:
attrs = {'name': name}
if (request.method != 'POST'):
attrs['dc'] = request.dc
roles = get_object(request, Role, attrs, sr=('dc_bound',), exists_ok=True, noexists_fail=True)
else:
roles = self.dc.roles.all().order_by(*self.order_by)
if (self.full or self.extended):
roles = roles.select_related('dc_bound').prefetch_related('permissions', 'user_set')
self.role = roles
def get(self, many=False):
return self._get(self.role, many=many, field_name='name')
def _remove_dc_binding(self, task_id):
if self.role.dc_bound:
remove_dc_binding_virt_object(task_id, LOG_GROUP_UPDATE, self.role, user=self.request.user)
def _remove_user_dc_binding(self, task_id):
for user in self.role.user_set.filter(dc_bound__isnull=False).exclude(dc_bound=self.dc):
remove_user_dc_binding(task_id, user)
def _update_affected_users(self, detach=False):
User.clear_dc_admin_ids(self.dc)
if (detach and (not self.dc.is_default())):
for user in self.role.user_set.select_related('default_dc').filter(default_dc=self.dc).exclude(is_staff=True):
user.reset_current_dc()
def post(self):
(dc, group) = (self.dc, self.role)
if group.dc_set.filter(id=dc.id).exists():
raise ObjectAlreadyExists(model=Role)
ser = self.serializer(self.request, group)
group.dc_set.add(dc)
res = SuccessTaskResponse(self.request, ser.data, obj=group, status=status.HTTP_201_CREATED, detail_dict=ser.detail_dict(), msg=LOG_GROUP_ATTACH)
task_id = res.data.get('task_id')
connection.on_commit((lambda : group_relationship_changed.send(task_id, group_name=group.name, dc_name=dc.name)))
self._remove_dc_binding(task_id)
self._remove_user_dc_binding(task_id)
self._update_affected_users()
return res
def delete(self):
(dc, group) = (self.dc, self.role)
if (not group.dc_set.filter(id=dc.id).exists()):
raise ObjectNotFound(model=Role)
ser = self.serializer(self.request, group)
group.dc_set.remove(self.request.dc)
res = SuccessTaskResponse(self.request, None, obj=group, detail_dict=ser.detail_dict(), msg=LOG_GROUP_DETACH)
task_id = res.data.get('task_id')
connection.on_commit((lambda : group_relationship_changed.send(task_id, group_name=group.name, dc_name=dc.name)))
self._remove_dc_binding(task_id)
self._update_affected_users(detach=True)
return res |
class DBSupportTest(TestCase):
def test_dbid_basic(self) -> None:
primary_key = DBID()
foreign_key = DBID(primary_key)
primary_key.resolve(42)
self.assertEqual(primary_key.resolved(), 42)
self.assertEqual(foreign_key.resolved(), 42)
def test_dbid_reassign(self) -> None:
primary_key = DBID()
primary_key.resolve(1)
primary_key.resolve(2)
primary_key.resolve(42)
self.assertEqual(primary_key.resolved(), 42)
def test_dbid_reassign_after_resolved(self) -> None:
primary_key = DBID()
primary_key.resolve(1)
self.assertEqual(primary_key.resolved(), 1)
primary_key.resolve(42)
self.assertEqual(primary_key.resolved(), 42)
def test_dbid_resolved_to_none(self) -> None:
primary_key = DBID()
self.assertEqual(None, primary_key.resolved()) |
class AclAdmin(admin.ModelAdmin):
search_fields = ('topic__name',)
list_filter = ('acc', 'allow')
ordering = ('topic',)
list_display = ('topic', 'allow', 'acc', 'get_password')
change_password_form = AdminPasswordChangeForm
form = ACLChangeForm
def get_password(self, obj):
return (_('yes') if obj.password else _('no'))
get_password.short_description = 'password'
def get_urls(self):
return ([url('^(.+)/password/$', self.admin_site.admin_view(self.user_change_password), name='django_mqtt_acl_password_change')] + super(AclAdmin, self).get_urls())
_post_parameters_m
def user_change_password(self, request, object_id, form_url=''):
if (not self.has_change_permission(request)):
raise PermissionDenied
acl = self.get_object(request, unquote(object_id))
if (acl is None):
raise Http404((_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_text(self.model._meta.verbose_name), 'key': escape(object_id)}))
if (request.method == 'POST'):
form = self.change_password_form(acl, request.POST)
if form.is_valid():
form.save()
change_message = self.construct_change_message(request, form, None)
self.log_change(request, acl, change_message)
msg = ugettext('Password changed successfully.')
messages.success(request, msg)
return HttpResponseRedirect(reverse(('%s:%s_%s_change' % (self.admin_site.name, acl._meta.app_label, acl._meta.model_name)), args=(acl.pk,)))
else:
form = self.change_password_form(acl)
fieldsets = [(None, {'fields': list(form.base_fields)})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
context = {'title': (_('Change password: %s') % escape(acl)), 'adminForm': adminForm, 'form_url': form_url, 'form': form, 'is_popup': ((IS_POPUP_VAR in request.POST) or (IS_POPUP_VAR in request.GET)), 'add': True, 'change': False, 'has_delete_permission': False, 'has_change_permission': True, 'has_absolute_url': False, 'opts': self.model._meta, 'original': acl, 'save_as': False, 'show_save': True}
context.update(self.admin_site.each_context(request))
request.current_app = self.admin_site.name
return TemplateResponse(request, 'admin/django_mqtt/acl/change_password.html', context) |
def main(args):
try:
(rawOptions, files) = getopt.getopt(args, 'it:x:')
except getopt.GetoptError:
usage()
if (not files):
usage()
with open('report.txt', 'a+') as report:
options = ttx.Options(rawOptions, len(files))
for ttFile in files:
try:
roundTrip(ttFile, options, report)
except KeyboardInterrupt:
print('(Cancelled)')
break
except:
print('*** round tripping aborted ***')
traceback.print_exc()
report.write('\n')
report.write(' An exception occurred while round tripping')
report.write((' "%s"\n' % ttFile))
traceback.print_exc(file=report)
report.write('\n') |
def parse_time(value: str) -> time:
number = get_numeric(value, 'time')
if (number is not None):
if (number >= 86400):
raise TimeError()
return (datetime.min + timedelta(seconds=number)).time()
match = time_re.match(value)
if (match is None):
raise TimeError()
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo = _parse_timezone(kw.pop('tzinfo'), TimeError)
kw_: Dict[(str, Union[(None, int, timezone)])] = {k: int(v) for (k, v) in kw.items() if (v is not None)}
kw_['tzinfo'] = tzinfo
try:
return time(**kw_)
except ValueError:
raise TimeError() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.