code stringlengths 281 23.7M |
|---|
class TestAffiliationAddressTrainingTeiParser():
def test_should_parse_single_token_labelled_training_tei_lines(self):
tei_root = _get_training_tei_with_affiliations([TEI_E('affiliation', *[TEI_E('orgName', {'type': 'institution'}, TOKEN_1, TEI_E('lb')), '\n', TEI_E('address', TEI_E('country', TOKEN_2, TEI_E('lb'))), '\n'])])
tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root)
assert (tag_result == [[(TOKEN_1, 'B-<institution>'), (TOKEN_2, 'B-<country>')]])
def test_should_parse_single_label_with_multiple_lines(self):
tei_root = _get_training_tei_with_affiliations([TEI_E('affiliation', *[TEI_E('orgName', {'type': 'institution'}, TOKEN_1, TEI_E('lb'), '\n', TOKEN_2, TEI_E('lb')), '\n'])])
tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root)
assert (tag_result == [[(TOKEN_1, 'B-<institution>'), (TOKEN_2, 'I-<institution>')]])
def test_should_interpret_text_in_address_as_unlabelled(self):
tei_root = _get_training_tei_with_affiliations([TEI_E('affiliation', *[TEI_E('address', TOKEN_1, TEI_E('lb')), '\n'])])
tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root)
assert (tag_result == [[(TOKEN_1, 'O')]])
def test_should_output_multiple_tokens_of_each_unlabelled_lines(self):
tei_root = _get_training_tei_with_affiliations([TEI_E('affiliation', *[TOKEN_1, ' ', TOKEN_2, TEI_E('lb'), '\n', TOKEN_3, ' ', TOKEN_4, TEI_E('lb'), '\n'])])
tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root)
LOGGER.debug('tag_result: %r', tag_result)
assert (tag_result == [[(TOKEN_1, 'O'), (TOKEN_2, 'O'), (TOKEN_3, 'O'), (TOKEN_4, 'O')]])
def test_should_parse_single_label_with_multiple_tokens_on_multiple_lines(self):
tei_root = _get_training_tei_with_affiliations([TEI_E('affiliation', *[TEI_E('orgName', {'type': 'institution'}, TOKEN_1, ' ', TOKEN_2, TEI_E('lb'), '\n', TOKEN_3, ' ', TOKEN_4, TEI_E('lb')), '\n'])])
tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root)
LOGGER.debug('tag_result: %r', tag_result)
assert (tag_result == [[(TOKEN_1, 'B-<institution>'), (TOKEN_2, 'I-<institution>'), (TOKEN_3, 'I-<institution>'), (TOKEN_4, 'I-<institution>')]])
.parametrize('tei_label,element_path', list(TRAINING_XML_ELEMENT_PATH_BY_LABEL.items()))
def test_should_parse_all_supported_labels(self, tei_label: str, element_path: Sequence[str]):
xml_writer = XmlTreeWriter(TEI_E('tei'), element_maker=TEI_E)
xml_writer.require_path(element_path)
xml_writer.append_all(TOKEN_1, ' ', TOKEN_2, TEI_E('lb'), '\n', TOKEN_3, ' ', TOKEN_4, TEI_E('lb'))
tei_root = xml_writer.root
LOGGER.debug('tei_root: %r', etree.tostring(tei_root))
tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root)
LOGGER.debug('tag_result: %r', tag_result)
assert (tag_result == [[(TOKEN_1, f'B-{tei_label}'), (TOKEN_2, f'I-{tei_label}'), (TOKEN_3, f'I-{tei_label}'), (TOKEN_4, f'I-{tei_label}')]]) |
def run():
args = parse_args(sys.argv[1:])
fn = args.fn
groups = args.groups
strict = args.strict
verbosity = args.verbosity
print(f"Loading data from '{fn}'.")
results = [pysis_to_pv(h5_fn=fn, h5_group=grp) for grp in groups]
print()
for (grp, res) in zip(groups, results):
print(f'### Validating kinetic energy for {grp}')
_ = pv.kinetic_energy.distribution(res, verbosity=verbosity, strict=strict, filename=f'{grp}_kinetic')
print()
print(f'### Estimating T for ensemble validation for {grp}')
pv.ensemble.estimate_interval(res)
print()
if (len(results) == 2):
print('### Validating ensemble')
quantiles = pv.ensemble.check(*results, screen=False, verbosity=verbosity, filename=f'{grp}_ensemble_test')
print(quantiles)
if (len(groups) > 1):
print(f'### Validating integrator energy for {groups}')
i = pv.integrator.convergence(results, verbose=True, filename=f'{grp}_integrator')
print(i) |
def prepare_launch_json():
task_function_source_dir = os.path.dirname(FlyteContextManager.current_context().user_space_params.TASK_FUNCTION_SOURCE_PATH)
launch_json = {'version': '0.2.0', 'configurations': [{'name': 'Interactive Debugging', 'type': 'python', 'request': 'launch', 'program': os.path.join(task_function_source_dir, INTERACTIVE_DEBUGGING_FILE_NAME), 'console': 'integratedTerminal', 'justMyCode': True}, {'name': 'Resume Task', 'type': 'python', 'request': 'launch', 'program': os.path.join(task_function_source_dir, RESUME_TASK_FILE_NAME), 'console': 'integratedTerminal', 'justMyCode': True}]}
vscode_directory = os.path.join(task_function_source_dir, '.vscode')
if (not os.path.exists(vscode_directory)):
os.makedirs(vscode_directory)
with open(os.path.join(vscode_directory, 'launch.json'), 'w') as file:
json.dump(launch_json, file, indent=4) |
('Events > Get Event for a Discount Code > Event Details for a Discount Code')
def event_discount_code(transaction):
with stash['app'].app_context():
event = EventFactoryBasic()
db.session.add(event)
discount_code = DiscountCodeFactory(event_id=1)
db.session.add(discount_code)
db.session.commit() |
class InlineQueryResultLocation(InlineQueryResultBase):
def __init__(self, id, title, latitude, longitude, horizontal_accuracy, live_period=None, reply_markup=None, input_message_content=None, thumbnail_url=None, thumbnail_width=None, thumbnail_height=None, heading=None, proximity_alert_radius=None):
super().__init__('location', id, title=title, input_message_content=input_message_content, reply_markup=reply_markup)
self.latitude = latitude
self.longitude = longitude
self.horizontal_accuracy = horizontal_accuracy
self.live_period = live_period
self.heading: int = heading
self.proximity_alert_radius: int = proximity_alert_radius
self.thumbnail_url = thumbnail_url
self.thumbnail_width = thumbnail_width
self.thumbnail_height = thumbnail_height
def thumb_url(self):
logger.warning('The parameter "thumb_url" is deprecated, use "thumbnail_url" instead')
return self.thumbnail_url
def thumb_width(self):
logger.warning('The parameter "thumb_width" is deprecated, use "thumbnail_width" instead')
return self.thumbnail_width
def thumb_height(self):
logger.warning('The parameter "thumb_height" is deprecated, use "thumbnail_height" instead')
return self.thumbnail_height
def to_dict(self):
json_dict = super().to_dict()
json_dict['latitude'] = self.latitude
json_dict['longitude'] = self.longitude
if self.horizontal_accuracy:
json_dict['horizontal_accuracy'] = self.horizontal_accuracy
if self.live_period:
json_dict['live_period'] = self.live_period
if self.heading:
json_dict['heading'] = self.heading
if self.proximity_alert_radius:
json_dict['proximity_alert_radius'] = self.proximity_alert_radius
if self.thumbnail_url:
json_dict['thumbnail_url'] = self.thumbnail_url
if self.thumbnail_width:
json_dict['thumbnail_width'] = self.thumbnail_width
if self.thumbnail_height:
json_dict['thumbnail_height'] = self.thumbnail_height
return json_dict |
def test_fake_ase_opt():
atoms = Icosahedron('Ar', noshells=2, latticeconstant=3)
atoms.calc = FakeASE(LennardJones())
dyn = BFGS(atoms)
dyn.run(fmax=0.0005)
assert dyn.converged()
assert (dyn.get_number_of_steps() == 14)
assert (np.linalg.norm(dyn.f0) == pytest.approx(0.)) |
class HypreAMS(PCBase):
def initialize(self, obj):
if complex_mode:
raise NotImplementedError('HypreAMS preconditioner not yet implemented in complex mode')
Citations().register('Kolev2009')
(A, P) = obj.getOperators()
appctx = self.get_appctx(obj)
prefix = obj.getOptionsPrefix()
V = get_function_space(obj.getDM())
mesh = V.mesh()
family = str(V.ufl_element().family())
formdegree = V.finat_element.formdegree
degree = V.ufl_element().degree()
try:
degree = max(degree)
except TypeError:
pass
if ((formdegree != 1) or (degree != 1)):
raise ValueError(('Hypre AMS requires lowest order Nedelec elements! (not %s of degree %d)' % (family, degree)))
P1 = FunctionSpace(mesh, 'Lagrange', 1)
G_callback = appctx.get('get_gradient', None)
if (G_callback is None):
G = chop(Interpolator(grad(TestFunction(P1)), V).callable().handle)
else:
G = G_callback(P1, V)
pc = PETSc.PC().create(comm=obj.comm)
pc.incrementTabLevel(1, parent=obj)
pc.setOptionsPrefix((prefix + 'hypre_ams_'))
pc.setOperators(A, P)
pc.setType('hypre')
pc.setHYPREType('ams')
pc.setHYPREDiscreteGradient(G)
zero_beta = PETSc.Options(prefix).getBool('pc_hypre_ams_zero_beta_poisson', default=False)
if zero_beta:
pc.setHYPRESetBetaPoissonMatrix(None)
VectorP1 = VectorFunctionSpace(mesh, 'Lagrange', 1)
pc.setCoordinates(interpolate(SpatialCoordinate(mesh), VectorP1).dat.data_ro.copy())
pc.setFromOptions()
self.pc = pc
def apply(self, pc, x, y):
self.pc.apply(x, y)
def applyTranspose(self, pc, x, y):
self.pc.applyTranspose(x, y)
def view(self, pc, viewer=None):
super().view(pc, viewer)
if hasattr(self, 'pc'):
viewer.printfASCII('PC to apply inverse\n')
self.pc.view(viewer)
def update(self, pc):
self.pc.setUp() |
class DistributedRandomPolicy(RandomPolicy):
def __init__(self, action_spaces_dict: Dict[(Union[(str, int)], spaces.Space)], concurrency: int):
super().__init__(action_spaces_dict)
self.concurrency = (concurrency if (concurrency > 0) else query_cpu())
def compute_action(self, observation: ObservationType, maze_state: Optional[MazeStateType], env: Optional[BaseEnv]=None, actor_id: Optional[ActorID]=None, deterministic: bool=False) -> ActionType:
if actor_id:
action_space = self.action_spaces_dict[actor_id.step_key]
else:
assert (len(self.action_spaces_dict) == 1), 'action spaces for multiple sub-steps are available, please specify actor ID explicitly'
action_space = list(self.action_spaces_dict.values())[0]
return stack_numpy_dict_list([action_space.sample() for _ in range(self.concurrency)]) |
def has_scope_subset(user_scopes: List[str], endpoint_scopes: SecurityScopes) -> bool:
if (not set(endpoint_scopes.scopes).issubset(user_scopes)):
scopes_required = ','.join(endpoint_scopes.scopes)
scopes_provided = ','.join(user_scopes)
logger.debug('Auth token missing required scopes: {}. Scopes provided: {}.', scopes_required, scopes_provided)
return False
return True |
('/knowledge/{space_name}/arguments')
def arguments(space_name: str):
print(f'/knowledge/space/arguments params:')
try:
return Result.succ(knowledge_space_service.arguments(space_name))
except Exception as e:
return Result.failed(code='E000X', msg=f'space list error {e}') |
class meter_config_stats_request(stats_request):
version = 4
type = 18
stats_type = 10
def __init__(self, xid=None, flags=None, meter_id=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (flags != None):
self.flags = flags
else:
self.flags = 0
if (meter_id != None):
self.meter_id = meter_id
else:
self.meter_id = 0
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!H', self.stats_type))
packed.append(struct.pack('!H', self.flags))
packed.append(('\x00' * 4))
packed.append(struct.pack('!L', self.meter_id))
packed.append(('\x00' * 4))
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = meter_config_stats_request()
_version = reader.read('!B')[0]
assert (_version == 4)
_type = reader.read('!B')[0]
assert (_type == 18)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_stats_type = reader.read('!H')[0]
assert (_stats_type == 10)
obj.flags = reader.read('!H')[0]
reader.skip(4)
obj.meter_id = reader.read('!L')[0]
reader.skip(4)
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.flags != other.flags):
return False
if (self.meter_id != other.meter_id):
return False
return True
def pretty_print(self, q):
q.text('meter_config_stats_request {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('flags = ')
value_name_map = {1: 'OFPSF_REQ_MORE'}
q.text(util.pretty_flags(self.flags, value_name_map.values()))
q.text(',')
q.breakable()
q.text('meter_id = ')
q.text(('%#x' % self.meter_id))
q.breakable()
q.text('}') |
def imgur_recon(user_name):
if (not user_name):
return None
url = '
try:
soup = social_soup(url)
try:
bio = soup.find('div', id='account-bio').contents[0]
except:
bio = None
a_date = soup.find('div', class_='textbox bold')
acct_date = a_date.contents[2].split('\n')[1].strip()
return {'acct_date': acct_date, 'bio': bio, 'url': url, 'site': 'Imgur'}
except:
return None |
class DictTransformer(object):
def __init__(self, grammar, class_identifier=None, implicit_terminals=None, implicit_non_terminals=None, attribute_naming_scheme=None, class_naming_scheme=None):
self.grammar = grammar
self.implicit_terminals = (implicit_terminals or False)
self.implicit_non_terminals = (implicit_non_terminals or False)
self.attribute_naming_scheme = (attribute_naming_scheme or SnakeCase())
self.class_naming_scheme = (class_naming_scheme or Identity())
self.type_field = (class_identifier or 'nodeType')
def transform(self, element):
tree = self.transform_element(element)
if any([isinstance(tree, c) for c in self.grammar.productions]):
self.grammar.validate_tree(tree)
else:
raise GrammarError()
return tree
def transform_element(self, element):
if isinstance(element, dict):
return self.__transform_dict(element)
elif isinstance(element, list):
res = []
def get_dummy(element):
for e in element:
if (e is not None):
if isinstance(e, dict):
dummy = dict(e)
dummy['name'] = ('dummy_' + dummy['name'])
return dummy
else:
return None
for (i, e) in enumerate(element):
if (e is None):
tmp = self.transform_element(get_dummy(element))
else:
tmp = self.transform_element(e)
res.append(tmp)
return res
else:
return element
def __transform_dict(self, dict_node):
if (self.type_field not in dict_node):
return dict_node
symbol_name = dict_node[self.type_field]
symbol_name = self.class_naming_scheme(symbol_name)
if (symbol_name not in self.grammar.productions_by_name):
raise GrammarError(f"Symbol '{symbol_name}' not found in grammar.")
symbol = self.grammar.productions_by_name[symbol_name]
node = symbol()
node_info = get_type_hints(symbol)
for (attribute_name, value) in dict_node.items():
attribute_name = self.attribute_naming_scheme(attribute_name)
is_node = any([(isinstance(value, dict) and (self.type_field in value)), (isinstance(value, list) and all([(isinstance(e, dict) and (self.type_field in e)) for e in value]))])
is_declared = (attribute_name in node_info)
is_addable = (is_declared or (self.implicit_non_terminals if is_node else self.implicit_terminals))
if is_addable:
setattr(node, attribute_name, self.transform_element(value))
self.grammar.validate_node(node)
return node |
class AWSPolicyValidationService(PolicyValidationService):
def __init__(self) -> None:
self.logger: logging.Logger = logging.getLogger(__name__)
def _principal_match(self, principal_settings: str, principal: str) -> bool:
match = re.fullmatch('re\\((.*?)\\)', principal_settings)
if match:
principal_regex = match.group(1)
return bool(re.fullmatch(principal_regex, principal))
return (principal_settings == principal)
def _policy_exists_in_statements(self, statements: List[PolicyStatement], effect: str, principal: str, actions: List[str], resources: List[str]) -> bool:
for statement in statements:
contain_principal = any((self._principal_match(principal, stmt_principal) for stmt_principal in statement.principals))
if (contain_principal and (effect == statement.effect) and set(actions).issubset(statement.actions) and set(resources).issubset(statement.resources)):
return True
return False
def is_bucket_policy_statements_valid(self, bucket: str, bucket_statements: List[PolicyStatement], policy_settings: List[PolicySettingsConfig]) -> bool:
for rule in policy_settings:
if (rule.exist != self._policy_exists_in_statements(bucket_statements, rule.effect, rule.principal, rule.actions, rule.resources)):
self.logger.error('The policy of bucket %s does not satisfy the following policy settings: %s.', bucket, str(rule))
return False
return True |
class Cars(list):
def __init__(self, *args):
list.__init__(self, *args)
self.config_filename = 'cars.json'
def get_car_by_vin(self, vin) -> Car:
for car in self:
if (car.vin == vin):
return car
return None
def get_car_by_id(self, vehicle_id) -> Car:
for car in self:
if (car.vehicle_id == vehicle_id):
return car
return None
def add(self, car: Car):
car_with_same_vin = self.get_car_by_vin(car.vin)
if (not car_with_same_vin):
self.append(car)
elif (car_with_same_vin.vehicle_id != car.vehicle_id):
logger.warning('Vehicle ID changed !')
car_with_same_vin.vehicle_id = car.vehicle_id
def from_json(cls, data: list):
cars = list(map(Car.from_json, data))
return cls(cars)
def __str__(self):
return str(list(map(str, self)))
def save_cars(self, name=None):
if (name is None):
name = self.config_filename
config_str = json.dumps(self, default=(lambda car: car.to_dict()), sort_keys=True, indent=4)
with open(name, 'w', encoding='utf-8') as file:
file.write(config_str)
def load_cars(name=None):
if (name is None):
name = Cars().config_filename
try:
with open(name, 'r', encoding='utf-8') as file:
json_str = file.read()
cars = Cars.from_json(json.loads(json_str))
cars.config_filename = name
cars.save_cars()
return cars
except (FileNotFoundError, TypeError) as ex:
logger.debug(ex)
return Cars() |
(firedrake.NonlinearVariationalProblem)
def coarsen_nlvp(problem, self, coefficient_mapping=None):
if hasattr(problem, '_coarse'):
return problem._coarse
def inject_on_restrict(fine, restriction, rscale, injection, coarse):
from firedrake.bcs import DirichletBC
manager = get_transfer_manager(fine)
finectx = get_appctx(fine)
forms = (finectx.F, finectx.J, finectx.Jp)
coefficients = unique(chain.from_iterable((form.coefficients() for form in forms if (form is not None))))
for c in coefficients:
if hasattr(c, '_child'):
if is_dual(c):
manager.restrict(c, c._child)
else:
manager.inject(c, c._child)
for bc in chain(*finectx._problem.bcs):
if isinstance(bc, DirichletBC):
bc.apply(finectx._x)
g = bc.function_arg
if (isinstance(g, firedrake.Function) and hasattr(g, '_child')):
manager.inject(g, g._child)
V = problem.u.function_space()
if (not hasattr(V, '_coarse')):
V.dm.addCoarsenHook(None, inject_on_restrict)
forms = (problem.F, problem.J, problem.Jp)
coefficients = unique(chain.from_iterable((form.coefficients() for form in forms if (form is not None))))
if (coefficient_mapping is None):
coefficient_mapping = {}
for c in coefficients:
coefficient_mapping[c] = self(c, self, coefficient_mapping=coefficient_mapping)
u = coefficient_mapping[problem.u]
bcs = [self(bc, self) for bc in problem.bcs]
J = self(problem.J, self, coefficient_mapping=coefficient_mapping)
Jp = self(problem.Jp, self, coefficient_mapping=coefficient_mapping)
F = self(problem.F, self, coefficient_mapping=coefficient_mapping)
fine = problem
problem = firedrake.NonlinearVariationalProblem(F, u, bcs=bcs, J=J, Jp=Jp, form_compiler_parameters=problem.form_compiler_parameters)
fine._coarse = problem
return problem |
class TestMonitorHandler(TestCase):
def setUp(self):
self.handler = MonitorHandler()
def test_add(self):
obj = mock.Mock()
fieldname = 'db_add'
callback = dummy_func
idstring = 'test'
self.handler.add(obj, fieldname, callback, idstring=idstring)
self.assertIn(fieldname, self.handler.monitors[obj])
self.assertIn(idstring, self.handler.monitors[obj][fieldname])
self.assertEqual(self.handler.monitors[obj][fieldname][idstring], (callback, False, {}))
def test_remove(self):
obj = mock.Mock()
fieldname = 'db_remove'
callback = dummy_func
idstring = 'test_remove'
self.handler.add(obj, fieldname, callback, idstring=idstring)
self.handler.remove(obj, fieldname, idstring=idstring)
self.assertEquals(self.handler.monitors[obj][fieldname], {})
def test_add_with_invalid_function(self):
obj = mock.Mock()
fieldname = 'db_key'
callback = 'not_a_function'
self.handler.add(obj, fieldname, callback)
self.assertNotIn(fieldname, self.handler.monitors[obj])
def test_all(self):
obj = [mock.Mock(), mock.Mock()]
fieldname = ['db_all1', 'db_all2']
callback = dummy_func
idstring = ['test_all1', 'test_all2']
self.handler.add(obj[0], fieldname[0], callback, idstring=idstring[0])
self.handler.add(obj[1], fieldname[1], callback, idstring=idstring[1], persistent=True)
output = self.handler.all()
self.assertEquals(output, [(obj[0], fieldname[0], idstring[0], False, {}), (obj[1], fieldname[1], idstring[1], True, {})])
def test_clear(self):
obj = mock.Mock()
fieldname = 'db_add'
callback = dummy_func
idstring = 'test'
self.handler.add(obj, fieldname, callback, idstring=idstring)
self.assertIn(obj, self.handler.monitors)
self.handler.clear()
self.assertNotIn(obj, self.handler.monitors)
self.assertEquals(defaultdict((lambda : defaultdict(dict))), self.handler.monitors)
def test_add_remove_attribute(self):
obj = mock.Mock()
obj.name = 'testaddattribute'
fieldname = 'name'
callback = dummy_func
idstring = 'test'
category = 'testattribute'
self.handler.add(obj, fieldname, callback, idstring=idstring, category=category)
index = obj.attributes.get(fieldname, return_obj=True)
name = 'db_value[testattribute]'
self.assertIn(name, self.handler.monitors[index])
self.assertIn(idstring, self.handler.monitors[index][name])
self.assertEqual(self.handler.monitors[index][name][idstring], (callback, False, {}))
self.handler.remove(obj, fieldname, idstring=idstring, category=category)
self.assertEquals(self.handler.monitors[index][name], {}) |
class PluginsManager():
def __init__(self, exaile, load=True):
self.user_installed_plugindir = xdg.get_user_plugin_dir()
self.loaded_plugins = {}
self.exaile = exaile
self.enabled_plugins = {}
self.load = load
def __findplugin(self, pluginname):
for plugin_dir in xdg.get_plugin_dirs():
path = os.path.join(plugin_dir, pluginname)
if os.path.exists(path):
return path
return None
def load_plugin(self, pluginname, reload_plugin=False):
if ((not reload_plugin) and (pluginname in self.loaded_plugins)):
return self.loaded_plugins[pluginname]
path = self.__findplugin(pluginname)
if (path is None):
return False
spec = importlib.util.spec_from_file_location(pluginname, os.path.join(path, '__init__.py'))
plugin = importlib.util.module_from_spec(spec)
if (pluginname in sys.modules):
raise InvalidPluginError(_('Plugin is already loaded or has a conflicting name.'))
sys.modules[pluginname] = plugin
spec.loader.exec_module(plugin)
sys.modules[pluginname] = None
if hasattr(plugin, 'plugin_class'):
plugin = plugin.plugin_class()
self.loaded_plugins[pluginname] = plugin
return plugin
def install_plugin(self, path, overwrite_existing: bool=False) -> str:
try:
tar = tarfile.open(path, 'r:*')
except (tarfile.ReadError, OSError):
raise InvalidPluginError(_('Plugin archive is not in the correct format.'))
mems = tar.getmembers()
base = os.path.basename(path).split('.')[0]
installed_plugins = self.list_installed_plugins()
if ((not overwrite_existing) and (base in installed_plugins)):
raise PluginExistsError((_('A plugin with the name "%s" is already installed.') % base))
for m in mems:
if (not m.name.startswith(base)):
raise InvalidPluginError(_('Plugin archive contains an unsafe path.'))
tar.extractall(self.user_installed_plugindir)
return base
def __on_new_plugin_loaded(self, eventname, exaile, maybe_name, fn):
event.remove_callback(self.__on_new_plugin_loaded, eventname)
fn()
def __enable_new_plugin(self, plugin):
if hasattr(plugin, 'on_gui_loaded'):
if self.exaile.loading:
event.add_ui_callback(self.__on_new_plugin_loaded, 'gui_loaded', None, plugin.on_gui_loaded)
else:
plugin.on_gui_loaded()
if hasattr(plugin, 'on_exaile_loaded'):
if self.exaile.loading:
event.add_ui_callback(self.__on_new_plugin_loaded, 'exaile_loaded', None, plugin.on_exaile_loaded)
else:
plugin.on_exaile_loaded()
def uninstall_plugin(self, pluginname: str) -> None:
if (not self.is_user_installed(pluginname)):
raise Exception('Cannot remove built-in plugins')
if (pluginname in self.enabled_plugins):
self.disable_plugin(pluginname)
plugin_path = os.path.join(self.user_installed_plugindir, pluginname)
try:
shutil.rmtree(plugin_path)
except Exception as e:
raise e
def enable_plugin(self, pluginname, installation: bool=False):
try:
plugin = self.load_plugin(pluginname)
if (not plugin):
raise Exception('Error loading plugin')
if (installation and hasattr(plugin, 'on_plugin_installed')):
plugin.on_plugin_installed()
plugin.enable(self.exaile)
if (not inspect.ismodule(plugin)):
self.__enable_new_plugin(plugin)
self.enabled_plugins[pluginname] = plugin
logger.debug('Loaded plugin %s', pluginname)
self.save_enabled()
event.log_event('plugin_enabled', self, pluginname)
except Exception as e:
logger.exception('Unable to enable plugin %s', pluginname)
raise e
def disable_plugin(self, pluginname):
try:
plugin = self.enabled_plugins[pluginname]
del self.enabled_plugins[pluginname]
except KeyError:
logger.exception('Plugin not found, possibly already disabled')
return False
try:
plugin.disable(self.exaile)
logger.debug('Unloaded plugin %s', pluginname)
self.save_enabled()
except Exception as e:
logger.exception('Unable to fully disable plugin %s', pluginname)
raise e
finally:
event.log_event('plugin_disabled', self, pluginname)
return True
def list_installed_plugins(self):
pluginlist = []
for directory in xdg.get_plugin_dirs():
if (not os.path.exists(directory)):
continue
for name in os.listdir(directory):
if ((name == '__pycache__') or (name in pluginlist) or (not os.path.exists(os.path.join(directory, name, 'PLUGININFO')))):
continue
pluginlist.append(name)
return pluginlist
def list_available_plugins(self):
pass
def list_updateable_plugins(self):
pass
def is_user_installed(self, pluginname: str) -> bool:
if os.path.isdir(os.path.join(self.user_installed_plugindir, pluginname)):
return True
return False
def get_plugin_info(self, pluginname):
path = os.path.join(self.__findplugin(pluginname), 'PLUGININFO')
infodict = {}
with open(path) as f:
for line in f:
try:
(key, val) = line.split('=', 1)
infodict[key] = eval(val, {'__builtins__': None, '_': _}, {})
except ValueError:
pass
return infodict
def is_compatible(self, info):
platforms = info.get('Platforms', [])
if (len(platforms) == 0):
platforms = [sys.platform]
for platform in platforms:
if sys.platform.startswith(platform):
return True
return False
def is_potentially_broken(self, info):
import pkgutil
from gi.repository import GIRepository
gir = GIRepository.Repository.get_default()
modules = info.get('RequiredModules', [])
for module in modules:
pair = module.split(':', 1)
if (len(pair) > 1):
(prefix, module) = pair
if (prefix == 'gi'):
if (not gir.enumerate_versions(module)):
return True
elif (not pkgutil.find_loader(module)):
return True
return False
def save_enabled(self):
if self.load:
settings.set_option('plugins/enabled', list(self.enabled_plugins.keys()))
def load_enabled(self):
to_enable = settings.get_option('plugins/enabled', [])
for plugin in to_enable:
try:
self.enable_plugin(plugin)
except Exception:
pass
def teardown(self, main):
for (plugin_name, plugin) in self.enabled_plugins.items():
if hasattr(plugin, 'teardown'):
try:
plugin.teardown(main)
except Exception:
logger.exception('Unable to tear down plugin %s', plugin_name) |
('ecs_deploy.cli.get_client')
def test_scale_ignore_warnings(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key', deployment_errors=True)
result = runner.invoke(cli.scale, (CLUSTER_NAME, SERVICE_NAME, '2', '--ignore-warnings'))
assert (not result.exception)
assert (result.exit_code == 0)
assert (u'Successfully changed desired count to: 2' in result.output)
assert (u'WARNING: Service was unable to Lorem Ipsum' in result.output)
assert (u'Continuing.' in result.output)
assert (u'Scaling successful' in result.output) |
class SlatwallXXX(Boxes):
ui_group = 'SlatWall'
def __init__(self) -> None:
Boxes.__init__(self)
self.addSettingsArgs(edges.FingerJointSettings)
self.addSettingsArgs(edges.SlatWallSettings)
self.buildArgParser(x=100, sx='3*50', y=100, sy='3*50', h=100, hi=0)
self.argparser.add_argument('--XX', action='store', type=float, default=0.5, help='DESCRIPTION')
def render(self):
s = edges.SlatWallSettings(self.thickness, True, **self.edgesettings.get('SlatWall', {}))
s.edgeObjects(self)
self.slatWallHolesAt = edges.SlatWallHoles(self, s) |
def test_xdist_no_isolation(plugintester):
plugintester.makepyfile('def test_nothing(): assert True')
result = plugintester.runpytest()
result.assert_outcomes(passed=1)
result = plugintester.runpytest_subprocess('-n 1')
assert ('xdist workers failed to collect tests' in result.errlines[0]) |
def cmd_queue_pop(jobs: Jobs, queueid: str) -> None:
logger.info('Popping the next job from the queue...')
try:
reqid = jobs.queues[queueid].pop()
except JobQueuePausedError:
logger.warning('job queue is paused')
return
except JobQueueEmptyError:
logger.error('job queue is empty')
sys.exit(1)
job = jobs.get(reqid)
if (not job):
logger.warning('queued request (%s) not found', reqid)
return
status = job.get_status()
if (not status):
logger.warning('queued request (%s) not found', reqid)
elif (status is not Result.STATUS.PENDING):
logger.warning('expected "pending" status for queued request %s, got %r', reqid, status)
else:
pass
print(reqid) |
def check_label_consistency_cot(task: SpanTask) -> List[SpanCoTExample]:
assert task.prompt_examples
assert issubclass(task.prompt_example_type, SpanCoTExample)
null_labels = {task.normalizer(entity.label): entity.label for example in task.prompt_examples for entity in example.spans if (not entity.is_entity)}
if (len(null_labels) > 1):
warnings.warn(f"Negative examples contain multiple negative labels: {', '.join(null_labels.keys())}.")
example_labels = {task.normalizer(entity.label): entity.label for example in task.prompt_examples for entity in example.spans if entity.is_entity}
unspecified_labels = {example_labels[key] for key in (set(example_labels.keys()) - set(task.label_dict.keys()))}
if (not (set(example_labels.keys()) <= set(task.label_dict.keys()))):
warnings.warn(f'Examples contain labels that are not specified in the task configuration. The latter contains the following labels: {sorted(list(set(task.label_dict.values())))}. Labels in examples missing from the task configuration: {sorted(list(unspecified_labels))}. Please ensure your label specification and example labels are consistent.')
include_labels = dict(task.label_dict)
include_labels.update(null_labels)
return [example for example in [task.prompt_example_type(text=example.text, spans=[entity for entity in example.spans if (task.normalizer(entity.label) in include_labels)]) for example in task.prompt_examples] if len(example.spans)] |
class TaskDetailWidget(QtWidgets.QWidget):
def __init__(self, task=None, parent=None, **kwargs):
self._task = None
self.parent = parent
super(TaskDetailWidget, self).__init__(parent=parent)
self.vertical_layout = None
self.form_layout = None
self.name_label = None
self.name_field = None
self.type_label = None
self.type_field = None
self.type_field_is_updating = False
self.created_by_label = None
self.created_by_field = None
self.updated_by_label = None
self.updated_by_field = None
self.timing_label = None
self.timing_field = None
self.priority_label = None
self.priority_field = None
self.cut_in_label = None
self.cut_in_field = None
self.cut_out_label = None
self.cut_out_field = None
self.sequence_label = None
self.sequence_field = None
self._setup_ui()
self.task = task
def _setup_ui(self):
self.setStyleSheet('\n QLabel[labelField="true"] {\n font-weight: bold;\n }\n ')
self.vertical_layout = QtWidgets.QVBoxLayout(self)
from anima.ui.lib import QtCore
self.form_layout = QtWidgets.QFormLayout()
self.form_layout.setLabelAlignment(((QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing) | QtCore.Qt.AlignVCenter))
self.vertical_layout.addLayout(self.form_layout)
i = (- 1)
i += 1
self.name_label = QtWidgets.QLabel(self)
self.name_label.setText('Name')
self.name_label.setProperty('labelField', True)
self.name_field = QtWidgets.QLineEdit(self)
self.form_layout.setWidget(i, QtWidgets.QFormLayout.LabelRole, self.name_label)
self.form_layout.setWidget(i, QtWidgets.QFormLayout.FieldRole, self.name_field)
i += 1
self.type_label = QtWidgets.QLabel(self)
self.type_label.setText('Type')
self.type_label.setProperty('labelField', True)
self.type_field = QtWidgets.QComboBox(self)
self.form_layout.setWidget(i, QtWidgets.QFormLayout.LabelRole, self.type_label)
self.form_layout.setWidget(i, QtWidgets.QFormLayout.FieldRole, self.type_field)
i += 1
self.created_by_label = QtWidgets.QLabel(self)
self.created_by_label.setText('Created By')
self.created_by_label.setProperty('labelField', True)
self.created_by_field = QtWidgets.QLabel(self)
self.form_layout.setWidget(i, QtWidgets.QFormLayout.LabelRole, self.created_by_label)
self.form_layout.setWidget(i, QtWidgets.QFormLayout.FieldRole, self.created_by_field)
i += 1
self.updated_by_label = QtWidgets.QLabel(self)
self.updated_by_label.setText('Updated By')
self.updated_by_label.setProperty('labelField', True)
self.updated_by_field = QtWidgets.QLabel(self)
self.form_layout.setWidget(i, QtWidgets.QFormLayout.LabelRole, self.updated_by_label)
self.form_layout.setWidget(i, QtWidgets.QFormLayout.FieldRole, self.updated_by_field)
i += 1
self.timing_label = QtWidgets.QLabel(self)
self.timing_label.setText('Timing')
self.timing_label.setProperty('labelField', True)
self.timing_field = QtWidgets.QLabel(self)
self.timing_field.setText('23 Hours ago -> an Hour ago!')
self.form_layout.setWidget(i, QtWidgets.QFormLayout.LabelRole, self.timing_label)
self.form_layout.setWidget(i, QtWidgets.QFormLayout.FieldRole, self.timing_field)
i += 1
self.priority_label = QtWidgets.QLabel(self)
self.priority_label.setText('Priority')
self.priority_label.setProperty('labelField', True)
self.priority_field = QtWidgets.QLabel(self)
self.priority_field.setText('950')
self.form_layout.setWidget(i, QtWidgets.QFormLayout.LabelRole, self.priority_label)
self.form_layout.setWidget(i, QtWidgets.QFormLayout.FieldRole, self.priority_field)
i += 1
self.sequence_label = QtWidgets.QLabel(self)
self.sequence_label.setText('Sequence')
self.sequence_label.setProperty('labelField', True)
from anima.ui.widgets.sequence import SequenceComboBox
self.sequence_field = SequenceComboBox(self)
self.sequence_field.currentIndexChanged.connect(self.sequence_changed)
self.form_layout.setWidget(i, QtWidgets.QFormLayout.LabelRole, self.sequence_label)
self.form_layout.setWidget(i, QtWidgets.QFormLayout.FieldRole, self.sequence_field)
i += 1
self.cut_in_label = QtWidgets.QLabel(self)
self.cut_in_label.setText('Cut In')
self.cut_in_label.setProperty('labelField', True)
self.cut_in_field = QtWidgets.QLabel(self)
self.cut_in_field.setText('1')
self.form_layout.setWidget(i, QtWidgets.QFormLayout.LabelRole, self.cut_in_label)
self.form_layout.setWidget(i, QtWidgets.QFormLayout.FieldRole, self.cut_in_field)
i += 1
self.cut_out_label = QtWidgets.QLabel(self)
self.cut_out_label.setText('Cut Out')
self.cut_out_label.setProperty('labelField', True)
self.cut_out_field = QtWidgets.QLabel(self)
self.cut_out_field.setText('1')
self.form_layout.setWidget(i, QtWidgets.QFormLayout.LabelRole, self.cut_out_label)
self.form_layout.setWidget(i, QtWidgets.QFormLayout.FieldRole, self.cut_out_field)
QtCore.QObject.connect(self.type_field, QtCore.SIGNAL('currentIndexChanged(QString)'), self.type_field_changed)
def task(self):
return self._task
def task(self, task):
from stalker import Task
if isinstance(task, Task):
self._task = task
self.name_field.setText(task.name)
self._fill_task_type_widget()
if task.created_by:
self.created_by_field.setText(task.created_by.name)
if task.updated_by:
self.updated_by_field.setText(task.updated_by.name)
self.timing_field.setText(('%s - %s' % (task.start.strftime('%d-%m-%Y %H:%M'), task.end.strftime('%d-%m-%Y %H:%M'))))
self.priority_field.setText(('%s' % task.priority))
from stalker import Shot
if isinstance(task, Shot):
self.sequence_field.project = task.project
self.sequence_field.set_current_sequence((task.sequences[0] if task.sequences else None))
self.sequence_label.setVisible(True)
self.sequence_field.setVisible(True)
self.cut_in_field.setText(('%s' % task.cut_in))
self.cut_out_field.setText(('%s' % task.cut_out))
self.cut_in_label.setVisible(True)
self.cut_in_field.setVisible(True)
self.cut_out_label.setVisible(True)
self.cut_out_field.setVisible(True)
else:
self.sequence_label.setVisible(False)
self.sequence_field.setVisible(False)
self.cut_in_label.setVisible(False)
self.cut_in_field.setVisible(False)
self.cut_out_label.setVisible(False)
self.cut_out_field.setVisible(False)
else:
self._task = None
def _fill_task_type_widget(self):
if (self.task is None):
return
from stalker import Type
from stalker.db.session import DBSession
all_types = DBSession.query(Type.id, Type.name).filter((Type.target_entity_type == self.task.entity_type)).all()
self.type_field_is_updating = True
self.type_field.clear()
self.type_field.addItem('-- No Type --', (- 1))
for type_data in all_types:
self.type_field.addItem(type_data.name, type_data.id)
if self.task.type:
index = self.type_field.findData(self.task.type.id)
if (index != (- 1)):
self.type_field.setCurrentIndex(index)
self.type_field_is_updating = False
def type_field_changed(self):
if (self.task is None):
return
if self.type_field_is_updating:
return
assert isinstance(self.type_field, QtWidgets.QComboBox)
index = self.type_field.currentIndex()
type_id = self.type_field.itemData(index)
if (type_id != (- 1)):
from stalker import Type
type_ = Type.query.get(type_id)
self.task.type = type_
from stalker.db.session import DBSession
DBSession.save(self.task)
def sequence_changed(self):
from stalker import Shot
if ((self.task is None) or (not isinstance(self.task, Shot))):
return
seq = self.sequence_field.get_current_sequence()
if (seq is not None):
self.task.sequences = [seq]
from stalker.db.session import DBSession
DBSession.commit() |
class CssProperties():
def __init__(self, context):
self._context = context
(self.__map_css, self._dyn_cls) = ({}, set())
def text(self) -> str:
return '\n'.join(self._context['css']['text'])
def add_text(self, text: str, map_id: str=None, replace: bool=False):
if ((map_id is None) or (map_id not in self.__map_css)):
self._context['css']['text'].append(text)
if (map_id is not None):
self.__map_css[map_id] = (len(self._context['css']['text']) - 1)
elif ((map_id in self.__map_css) and replace):
self._context['css']['text'][self.__map_css[map_id]] = text
def remove_text(self, map_id: str):
if (map_id in self.__map_css):
i = self.__map_css[map_id]
self._context['css']['text'].pop(i)
tmp_css_context = {}
del self.__map_css[map_id]
for (k, v) in self.__map_css.items():
if (v > i):
tmp_css_context[k] = (v - 1)
else:
tmp_css_context[k] = v
self.__map_css = tmp_css_context
def add_builders(self, builder_def: str):
if ('builders_css' not in self._context['js']):
self._context['js']['builders_css'] = OrderedSet()
if isinstance(builder_def, list):
self._context['js']['builders_css'].extend(builder_def)
else:
self._context['js']['builders_css'].append(builder_def)
def container_style(self, css: dict):
self._context['css']['container'].update(css)
def font_face(self, font_family: str, src, stretch: str='normal', style: str='normal', weight: str='normal'):
self._context['css']['font-face'][font_family] = {'src': ('url(%s)' % src), 'font-stretch': stretch, 'font-style': style, 'font-weight': weight} |
def add_QueryServicer_to_server(servicer, server):
rpc_method_handlers = {'AppVersion': grpc.unary_unary_rpc_method_handler(servicer.AppVersion, request_deserializer=ibc_dot_core_dot_port_dot_v1_dot_query__pb2.QueryAppVersionRequest.FromString, response_serializer=ibc_dot_core_dot_port_dot_v1_dot_query__pb2.QueryAppVersionResponse.SerializeToString)}
generic_handler = grpc.method_handlers_generic_handler('ibc.core.port.v1.Query', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) |
def get_version():
current_dir = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(current_dir, 'pywhisper', '__init__.py')
with io.open(version_file, encoding='utf-8') as f:
return re.search('^__version__ = [\\\'"]([^\\\'"]*)[\\\'"]', f.read(), re.M).group(1) |
def contains(list_one, list_two):
if (not list_two):
return True
if (len(list_two) > len(list_one)):
return False
for idx in range(((len(list_one) - len(list_two)) + 1)):
if (list_one[idx] != list_two[0]):
continue
for (edx, _) in enumerate(list_two):
if (list_one[(idx + edx)] != list_two[edx]):
break
else:
return True
return False |
class Instance():
def __init__(self, config: Dict):
self.user = config.get('user')
self.symbol = config.get('symbol')
self.config = config.get('config')
self.flags = config.get('flags', {})
self.is_in_config_ = bool(config.get('is_in_config', True))
self.is_running_ = None
self.pid_ = None
def get_args(self) -> List[str]:
return [self.user, self.symbol, self.config]
def get_flags(self) -> List[str]:
flags = []
for (k, v) in self.flags.items():
if (v is not None):
flags.extend([k, str(v)])
return flags
def get_id(self) -> str:
return f'{self.get_user()}-{self.get_symbol()}'
def get_symbol(self) -> str:
return self.symbol
def get_user(self) -> str:
return self.user
def get_config(self) -> str:
return self.config
def get_pid_signature(self) -> str:
signature = INSTANCE_SIGNATURE_BASE.copy()
signature.extend([self.user, self.symbol])
return f"^{' '.join(signature)}"
def get_pid(self) -> int:
if (self.pid_ is None):
self.pid_ = ProcessManager.get_pid(self.get_pid_signature())
return self.pid_
def get_pid_str(self) -> str:
pid = self.get_pid()
return (str(pid) if (pid is not None) else '-')
def get_cmd(self) -> List[str]:
cmd = INSTANCE_SIGNATURE_BASE.copy()
cmd.extend(self.get_args())
cmd.extend(self.get_flags())
return cmd
def get_status(self) -> str:
return ('running' if self.is_running() else 'stopped')
def is_running(self) -> bool:
if (self.is_running_ is None):
self.is_running_ = ProcessManager.is_running(self.get_pid_signature())
return self.is_running_
def is_in_config(self, value=None) -> bool:
if (value is not None):
self.is_in_config_ = bool(value)
return self.is_in_config_
def match(self, query: List[str], exact: bool=False) -> bool:
parameters = {'id': self.get_id(), 'pid': self.get_pid_str(), 'symbol': self.get_symbol(), 'user': self.get_user(), 'status': self.get_status()}
if (not exact):
parameters = {k: v.lower() for (k, v) in parameters.items()}
query = [q.lower() for q in query]
matches = 0
for condition in query:
if ('=' in condition):
(k, v) = condition.split('=')
if ((k in parameters) and parameters[k].startswith(v)):
matches += 1
continue
if any(((condition in v) for v in parameters.values())):
matches += 1
return (matches == len(query))
def apply_flags(self, flags: Dict[(str, Any)]):
if (flags is None):
return
for (key, value) in flags.items():
self.flags[key] = value
def reset_state(self):
self.is_running_ = None
self.pid_ = None
def start(self, silent: bool=False) -> bool:
self.reset_state()
log_file = os.path.join(PASSIVBOT_PATH, f'logs/{self.get_user()}/{self.get_symbol()}.log')
try:
if (not os.path.exists(os.path.dirname(log_file))):
os.makedirs(os.path.dirname(log_file))
except:
return False
cmd = self.get_cmd()
if (silent is True):
log_file = '/dev/null'
ProcessManager.add_nohup_process(cmd, log_file)
self.proc_id = ProcessManager.wait_pid_start(self.get_pid_signature())
if (self.proc_id is None):
return False
return True
def stop(self, force=False) -> bool:
self.reset_state()
if (not self.is_running()):
return False
pid = ProcessManager.get_pid(self.get_pid_signature())
if (pid is None):
return False
ProcessManager.kill(pid, force)
return True
def restart(self, force=False, silent=False) -> bool:
self.reset_state()
if self.is_running():
stopped = self.stop(force)
if (not stopped):
return False
return self.start(silent) |
def print_new_versions(strict=False):
new_updates = []
same_updates = []
pinned_updates = []
for req in everything_in(all_reqs):
new_versions = []
same_versions = []
for ver_str in all_versions(req):
if newer(ver_str_to_tuple(ver_str), min_versions[req], strict=True):
new_versions.append(ver_str)
elif ((not strict) and newer(ver_str_to_tuple(ver_str), min_versions[req])):
same_versions.append(ver_str)
update_str = ((((pretty_req(req) + ' = ') + ver_tuple_to_str(min_versions[req])) + ' -> ') + ', '.join((new_versions + [(('(' + v) + ')') for v in same_versions])))
if (req in pinned_min_versions):
pinned_updates.append(update_str)
elif new_versions:
new_updates.append(update_str)
elif same_versions:
same_updates.append(update_str)
print('\n'.join(new_updates))
print()
print('\n'.join(same_updates))
print()
print('\n'.join(pinned_updates)) |
class TestGenericFsUnpacker(TestUnpackerBase):
def test_unpacker_selection_generic(self):
self.check_unpacker_selection('filesystem/btrfs', 'genericFS')
self.check_unpacker_selection('filesystem/dosmbr', 'genericFS')
self.check_unpacker_selection('filesystem/f2fs', 'genericFS')
self.check_unpacker_selection('filesystem/jfs', 'genericFS')
self.check_unpacker_selection('filesystem/minix', 'genericFS')
self.check_unpacker_selection('filesystem/reiserfs', 'genericFS')
self.check_unpacker_selection('filesystem/romfs', 'genericFS')
self.check_unpacker_selection('filesystem/udf', 'genericFS')
self.check_unpacker_selection('filesystem/xfs', 'genericFS')
def test_extraction_romfs(self):
self.check_unpacking_of_standard_unpack_set((TEST_DATA_DIR / 'romfs.img'))
def test_extraction_btrfs(self):
with decompress_test_file((TEST_DATA_DIR / 'btrfs.img.xz')) as test_file:
self.check_unpacking_of_standard_unpack_set(test_file, additional_prefix_folder='get_files_test')
def test_extraction_jfs(self):
with decompress_test_file((TEST_DATA_DIR / 'jfs.img.xz')) as test_file:
self.check_unpacking_of_standard_unpack_set(test_file, additional_prefix_folder='get_files_test')
def test_extraction_minix(self):
with decompress_test_file((TEST_DATA_DIR / 'minix.img.xz')) as test_file:
self.check_unpacking_of_standard_unpack_set(test_file, additional_prefix_folder='get_files_test')
def test_extraction_reiserfs(self):
with decompress_test_file((TEST_DATA_DIR / 'reiserfs.img.xz')) as test_file:
self.check_unpacking_of_standard_unpack_set(test_file, additional_prefix_folder='get_files_test')
def test_extraction_udf(self):
with decompress_test_file((TEST_DATA_DIR / 'udf.img.xz')) as test_file:
self.check_unpacking_of_standard_unpack_set(test_file, additional_prefix_folder='get_files_test')
def test_extraction_xfs(self):
with decompress_test_file((TEST_DATA_DIR / 'xfs.img.xz')) as test_file:
self.check_unpacking_of_standard_unpack_set(test_file, additional_prefix_folder='get_files_test')
def test_extraction_f2fs(self):
with decompress_test_file((TEST_DATA_DIR / 'f2fs.img.xz')) as test_file:
self.check_unpacking_of_standard_unpack_set(test_file, additional_prefix_folder='get_files_test')
def test_extract_multiple_partitions(self):
with decompress_test_file((TEST_DATA_DIR / 'mbr.img.xz')) as test_file:
(files, meta_data) = self.unpacker.extract_files_from_file(str(test_file), self.tmp_dir.name)
expected = [str(Path(self.tmp_dir.name, *file_path)) for file_path in [('partition_0', 'test_data_file.bin'), ('partition_1', 'yara_test_file'), ('partition_2', 'testfile1')]]
assert ('output' in meta_data)
assert (len(files) == 3), 'file number incorrect'
assert (sorted(files) == sorted(expected)), 'wrong files extracted' |
class SlackNotification(object):
def __init__(self, url, service_match):
self.__url = url
self.__service_match_re = re.compile((service_match or ''))
self.__timestamp_start = datetime.utcnow()
def get_payload(self, title, messages, color=None):
fields = []
for message in messages:
field = {'title': message[0], 'value': message[1], 'short': True}
fields.append(field)
payload = {'username': 'ECS Deploy', 'attachments': [{'pretext': title, 'color': color, 'fields': fields}]}
return payload
def notify_start(self, cluster, tag, task_definition, comment, user, service=None, rule=None):
if ((not self.__url) or (not self.__service_match_re.search((service or rule)))):
return
messages = [('Cluster', cluster)]
if service:
messages.append(('Service', service))
if rule:
messages.append(('Scheduled Task', rule))
if tag:
messages.append(('Tag', tag))
if user:
messages.append(('User', user))
if comment:
messages.append(('Comment', comment))
for diff in task_definition.diff:
if (tag and (diff.field == 'image') and diff.value.endswith((':' + tag))):
continue
if (diff.field == 'environment'):
messages.append(('Environment', '_sensitive (therefore hidden)_'))
continue
messages.append((diff.field, diff.value))
payload = self.get_payload('Deployment has started', messages)
response = requests.post(self.__url, json=payload)
if (response.status_code != 200):
raise SlackException('Notifying deployment failed')
return response
def notify_success(self, cluster, revision, service=None, rule=None):
if ((not self.__url) or (not self.__service_match_re.search((service or rule)))):
return
duration = (datetime.utcnow() - self.__timestamp_start)
messages = [('Cluster', cluster)]
if service:
messages.append(('Service', service))
if rule:
messages.append(('Scheduled Task', rule))
messages.append(('Revision', revision))
messages.append(('Duration', str(duration)))
payload = self.get_payload('Deployment finished successfully', messages, 'good')
response = requests.post(self.__url, json=payload)
if (response.status_code != 200):
raise SlackException('Notifying deployment failed')
def notify_failure(self, cluster, error, service=None, rule=None):
if ((not self.__url) or (not self.__service_match_re.search((service or rule)))):
return
duration = (datetime.utcnow() - self.__timestamp_start)
messages = [('Cluster', cluster)]
if service:
messages.append(('Service', service))
if rule:
messages.append(('Scheduled Task', rule))
messages.append(('Duration', str(duration)))
messages.append(('Error', error))
payload = self.get_payload('Deployment failed', messages, 'danger')
response = requests.post(self.__url, json=payload)
if (response.status_code != 200):
raise SlackException('Notifying deployment failed')
return response |
class JSONRenderer(BaseRenderer):
media_type = 'application/json'
charset = None
def render(self, data, media_type, **options):
try:
indent = max(min(int(media_type.params['indent']), 8), 0)
except (KeyError, ValueError, TypeError):
indent = None
indent = options.get('indent', indent)
return current_app.json.dumps(data, ensure_ascii=False, indent=indent) |
class OptionSeriesAreasplineSonificationDefaultspeechoptionsMappingPitch(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get('undefined')
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get('undefined')
def max(self, text: str):
self._config(text, js_type=False)
def min(self):
return self._config_get('undefined')
def min(self, text: str):
self._config(text, js_type=False)
def within(self):
return self._config_get('undefined')
def within(self, text: str):
self._config(text, js_type=False) |
class BijectiveTensor(Tensor):
def __repr__(self, *, tensor_contents: Any=None) -> Any:
r_str = super(BijectiveTensor, self).__repr__().replace('tensor', 'bijective_tensor')
return r_str
def register(self, input: Tensor, output: Tensor, context: Optional[Tensor], bijector: 'Bijector', log_detJ: Optional[Tensor], mode: str) -> 'BijectiveTensor':
self._input = input
self._output = output
self._context = context
self._bijector = bijector
self._log_detJ = log_detJ
self._mode = mode
if (not (self.from_forward() or self.from_inverse())):
raise RuntimeError(f"BijectiveTensor mode must be either `'forward'` or `'inverse'`. got {self._mode}")
return self
def __torch_function__(cls: Type['BijectiveTensor'], func: Any, types: Any, args: Any=(), kwargs: Any=None) -> Union[(Any, Tensor)]:
if (kwargs is None):
kwargs = {}
types = tuple(((Tensor if (_type is BijectiveTensor) else _type) for _type in types))
return Tensor.__torch_function__(func, types, args, kwargs)
def check_bijector(self, bijector: 'Bijector') -> bool:
is_bijector = (bijector in tuple(self.bijectors()))
return is_bijector
def bijectors(self) -> Iterator['Bijector']:
(yield self._bijector)
for parent in self.parents():
if isinstance(parent, BijectiveTensor):
(yield parent._bijector)
def get_parent_from_bijector(self, bijector: 'Bijector') -> Tensor:
if (self._bijector is bijector):
return self.parent
for parent in self.parents():
if (not isinstance(parent, BijectiveTensor)):
break
if (parent._bijector is bijector):
return parent.parent
raise RuntimeError('bijector not found in flow')
def check_context(self, context: Optional[Tensor]) -> bool:
return (self._context is context)
def from_forward(self) -> bool:
return (self._mode == 'forward')
def from_inverse(self) -> bool:
return (self._mode == 'inverse')
def detach_from_flow(self) -> Tensor:
detached_tensor = (self._output if self.from_forward() else self._input)
if isinstance(detached_tensor, BijectiveTensor):
raise RuntimeError('the detached tensor is an instance of BijectiveTensor.')
return detached_tensor
def has_ancestor(self, tensor: Tensor) -> bool:
if (tensor is self):
return False
elif (self.from_forward() and (self._input is tensor)):
return True
elif (self.from_inverse() and (self._output is tensor)):
return True
elif (self.from_forward() and isinstance(self._input, BijectiveTensor)):
return self._input.has_ancestor(tensor)
elif (self.from_inverse() and isinstance(self._output, BijectiveTensor)):
return self._output.has_ancestor(tensor)
else:
return False
def log_detJ(self) -> Optional[Tensor]:
return self._log_detJ
def parent(self) -> Tensor:
if self.from_forward():
return self._input
else:
return self._output
def parents(self) -> Iterator[Tensor]:
child: Union[(Tensor, BijectiveTensor)] = self
while True:
assert isinstance(child, BijectiveTensor)
child = parent = child.parent
(yield parent)
if (not isinstance(child, BijectiveTensor)):
break |
class MoveNet(nn.Module):
def __init__(self, num_classes=17, width_mult=1.0, mode='train'):
super(MoveNet, self).__init__()
self.backbone = Backbone()
self.header = Header(num_classes, mode)
self._initialize_weights()
def forward(self, x):
x = self.backbone(x)
x = self.header(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_() |
class ComboboxSelectField(HiddenField):
def __init__(self, *args, query_endpoint_path: str, query_parameter_name: str='query', value_attr: str='id', label_attr: str='name', **kwargs):
super().__init__(*args, **kwargs)
self.query_endpoint_path = query_endpoint_path
self.query_parameter_name = query_parameter_name
self.value_attr = value_attr
self.label_attr = label_attr
def _value(self):
if (self.data is not None):
try:
return getattr(self.data, self.value_attr)
except AttributeError:
pass
return '' |
def get_fft_parameters(fft_grid, input_grid):
if (not input_grid.is_regular):
raise ValueError('The input grid must be regular to reconstruct an fft grid.')
if (not fft_grid.is_regular):
raise ValueError('The fft grid is not regular and therefore cannot be an fft grid.')
q = (((2 * np.pi) / (input_grid.delta * input_grid.dims)) / fft_grid.delta)
if np.any((q < 1)):
raise ValueError(f'fft_grid is not an FFT grid of input_grid: q of {q} would be < 1.')
zeropadded_dims = (q * input_grid.dims)
if np.any((np.abs((zeropadded_dims - np.round(zeropadded_dims))) > 1e-10)):
raise ValueError(f'fft_grid is not an FFT grid of input_grid: q of {q} does not correspond to an integer zeropadding.')
fov = (fft_grid.dims / zeropadded_dims)
if np.any((fft_grid.dims > (zeropadded_dims + 0.5).astype('int'))):
raise ValueError(f'fft_grid is not an FFT grid of input_grid: fov of {fov} would be > 1 .')
dummy_fft_grid = make_fft_grid(input_grid, q, fov)
wrong_dims = (fft_grid.dims != dummy_fft_grid.dims)
fov[wrong_dims] = ((fft_grid.dims + 0.5) / (input_grid.dims * q))[wrong_dims]
shift = (fft_grid.zero - (fft_grid.delta * (((- fft_grid.dims) / 2) + (np.mod(fft_grid.dims, 2) * 0.5))))
return (q, fov, shift) |
class StoredFilter(Filter):
def __init__(self, name: str, description: str='', **kwargs: Any) -> None:
self.name = name
if (self.name is None):
raise FilterValidationException('Error: A StoredFilter must have a name')
self.description = description
if (self.description is None):
raise FilterValidationException('Error: A StoredFilter description cannot be `None`')
super(StoredFilter, self).__init__(**kwargs)
def from_file(input_path: Path) -> StoredFilter:
json_blob: Dict[(str, Any)] = json.loads(input_path.read_text())
return StoredFilter(**json_blob)
def from_record(record: FilterRecord) -> StoredFilter:
return StoredFilter(record.name, (record.description or ''), **json.loads(record.json))
def to_record(self) -> FilterRecord:
return FilterRecord(name=self.name, description=self.description, json=self.to_json())
def to_file(self) -> str:
output_json: Dict[(str, Any)] = {attribute: value for (attribute, value) in self.__dict__.items() if (value and (value != ['%']))}
return json.dumps(output_json, indent=4) |
class Order(OrderBase):
def build_actions(self, kwargs):
actions = {}
def ascending(a, b):
if (a == b):
return 0
if (a > b):
return 1
if (a < b):
return (- 1)
raise ValueError(f'{a},{b}')
def descending(a, b):
if (a == b):
return 0
if (a > b):
return (- 1)
if (a < b):
return 1
raise ValueError(f'{a},{b}')
class Compare():
def __init__(self, order):
self.order = order
def __call__(self, a, b):
return ascending(self.get(a), self.get(b))
def get(self, x):
return self.order[x]
for (k, v) in kwargs.items():
if ((v == 'ascending') or (v is None)):
actions[k] = ascending
continue
if (v == 'descending'):
actions[k] = descending
continue
if callable(v):
actions[k] = v
continue
assert isinstance(v, (list, tuple)), f'Invalid argument for {k}: {v} ({type(v)})'
order = {}
for (i, key) in enumerate(v):
order[str(key)] = i
try:
order[int(key)] = i
except ValueError:
pass
except TypeError:
print(('Cannot convert "%s" to int (%s)' % (key, type(key))))
raise
try:
order[float(key)] = i
except ValueError:
pass
actions[k] = Compare(order)
return actions |
def test_cli_print_version() -> None:
runner = CliRunner()
result = runner.invoke(cli, ['--version'])
assert (result.exit_code == 0)
assert (('Running uvicorn %s with %s %s on %s' % (uvicorn.__version__, platform.python_implementation(), platform.python_version(), platform.system())) in result.output) |
class RobotSkinEnv(RCareWorld):
def __init__(self, executable_file: str=None, scene_file: str=None, custom_channels: list=[], assets: list=[], **kwargs):
RCareWorld.__init__(self, executable_file=executable_file, scene_file=scene_file, custom_channels=custom_channels, assets=assets, **kwargs)
self.robot = self.create_robot(315893, [3158930], 'kinova_gen3_7dof')
self.init_pose_obj = self.create_object(6666, 'Ini', True)
ini_world_pose = self.init_pose_obj.getPosition()
ini_world_rot = self.init_pose_obj.getQuaternion()
self.robot.moveTo(ini_world_pose, ini_world_rot)
self.robot.closeGripper()
self.skin = self.create_skin(id=114514, name='Skin', is_in_scene=True)
def step(self):
pose = self.init_pose_obj.getPosition()
rot = self.init_pose_obj.getQuaternion()
self.robot.moveTo(pose, rot)
skin_info = self.skin.getInfo()
print(skin_info)
self._step()
def demo(self):
for i in range():
self.step() |
_numba
def test_vector2d_predict_implementations(data2d):
(coords, data) = data2d
pred_numpy = VectorSpline2D(engine='numpy').fit(coords, data).predict(coords)
pred_numba = VectorSpline2D(engine='numba').fit(coords, data).predict(coords)
npt.assert_allclose(pred_numpy, pred_numba, atol=0.0001) |
def list_settings(proj_dir):
log.colored(log.YELLOW, '=== settings:')
if util.is_valid_project_dir(proj_dir):
all_settings = settings.get_all_settings(proj_dir)
for (key, value) in all_settings.items():
is_default = (' (default value)' if (value == settings.get_default(key)) else '')
if (type(value) is bool):
value = ('on' if value else 'off')
log.info(' {}{}:{} {}{}'.format(log.BLUE, key, log.DEF, value, is_default))
else:
log.info(' currently not in a valid project directory') |
def fortios_firewall(data, fos, check_mode):
fos.do_member_operation('firewall', 'internet-service-extension')
if data['firewall_internet_service_extension']:
resp = firewall_internet_service_extension(data, fos, check_mode)
else:
fos._module.fail_json(msg=('missing task body: %s' % 'firewall_internet_service_extension'))
if check_mode:
return resp
return ((not is_successful_status(resp)), (is_successful_status(resp) and (resp['revision_changed'] if ('revision_changed' in resp) else True)), resp, {}) |
def _correct_directories(file_details, subfolder_prefix=''):
if (subfolder_prefix is None):
subfolder_prefix = ''
result = []
visited_folders = []
for file in file_details:
file_type = _file_or_directory(file['logicalPath'], file['fileMetadata']['length'], subfolder_prefix)
if (file_type == 'file'):
result.append({'name': file['logicalPath'], 'size': file['fileMetadata']['length'], 'type': file_type, 'time_modified': file['timeModified'], 'transaction_rid': file['transactionRid'], 'is_open': file['isOpen']})
else:
top_level_folder = _get_top_level_folder(file['logicalPath'], subfolder_prefix)
full_path = str(f"{subfolder_prefix.rstrip('/')}/{top_level_folder}")
if (full_path in visited_folders):
pass
else:
folders_with_same_top_level = [detail for detail in file_details if detail['logicalPath'].startswith(full_path.lstrip('/').rstrip('/'))]
first_created_object = min(folders_with_same_top_level, key=(lambda x: x['timeModified']))
result.append({'name': full_path.lstrip('/').rstrip('/'), 'size': 0, 'type': file_type, 'time_modified': first_created_object['timeModified'], 'transaction_rid': first_created_object['transactionRid'], 'is_open': first_created_object['isOpen']})
visited_folders.append(full_path)
return result |
class RNN(torch.nn.Module):
def __init__(self, input_size, output_size, cell_type, hidden_size, num_layers, dropout=0.0, bidirectional=False, channels=[8, 8], kernel_sizes=[[5, 5], [5, 5]], strides=[[2, 2], [2, 2]]):
super(RNN, self).__init__()
convs = []
in_channels = 1
h_out = input_size
for (out_channels, kernel, stride) in zip(channels, kernel_sizes, strides):
padding = ((kernel[0] // 2), (kernel[1] // 2))
convs.append(torch.nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel, stride=stride, padding=padding))
convs.append(torch.nn.ReLU())
if (dropout > 0):
convs.append(torch.nn.Dropout(dropout))
in_channels = out_channels
h_out //= stride
self.convs = torch.nn.Sequential(*convs)
rnn_input_size = (h_out * out_channels)
if (cell_type.upper() not in ['RNN', 'LSTM', 'GRU']):
raise ValueError(f'Unkown rnn cell type {cell_type}')
self.rnn = getattr(torch.nn, cell_type.upper())(input_size=rnn_input_size, hidden_size=hidden_size, num_layers=num_layers, dropout=dropout, bidirectional=bidirectional, batch_first=True)
self.linear = torch.nn.Linear((hidden_size + (bidirectional * hidden_size)), output_size)
def forward(self, inputs):
outputs = inputs.unsqueeze(1)
outputs = self.convs(outputs)
(b, c, h, w) = outputs.shape
outputs = outputs.reshape(b, (c * h), w).permute(0, 2, 1)
(outputs, _) = self.rnn(outputs)
return self.linear(outputs) |
def _start():
global patch, name, path, monitor
global delay, winx, winy, winwidth, winheight, input_channel, input_image, app, timer, window, triggers, channel, image, thread
delay = patch.getfloat('general', 'delay')
winx = patch.getint('display', 'xpos')
winy = patch.getint('display', 'ypos')
winwidth = patch.getint('display', 'width')
winheight = patch.getint('display', 'height')
(input_channel, input_image) = list(zip(*patch.config.items('input')))
app = QApplication(sys.argv)
app.setWindowIcon(QtGui.QIcon(os.path.join(path, '../../doc/figures/logo-128.ico')))
app.aboutToQuit.connect(_stop)
signal.signal(signal.SIGINT, _stop)
window = Window()
window.show()
timer = QtCore.QTimer()
timer.start(200)
timer.timeout.connect(_loop_once)
triggers = []
for (channel, image) in zip(input_channel, input_image):
triggers.append(TriggerThread(channel, image))
for thread in triggers:
thread.start()
if len(locals()):
print(('LOCALS: ' + ', '.join(locals().keys()))) |
class TestPaToolUnpacker(TestUnpackerBase):
def test_unpacker_selection_generic(self):
self.check_unpacker_selection('application/vnd.ms-cab-compressed', 'PaTool')
self.check_unpacker_selection('application/x-lzh-compressed', 'PaTool')
.parametrize('in_file, ignore', [('test.cab', None), ('test.cpio', None), ('test.jar', {'MANIFEST.MF'}), ('test.lha', None), ('test.shar', None), ('test.tar.Z', None), ('test.tar.bz2', None), ('test.tar.gz', None), ('test.tar.lz', None), ('test.tar.xz', None), ('test.tar.zip', None), ('test.zoo', None), ('test.zpaq', None)])
def test_archive_extraction(self, in_file, ignore):
self.check_unpacking_of_standard_unpack_set((TEST_DATA_DIR / in_file), additional_prefix_folder='get_files_test', output=False, ignore=ignore)
.parametrize('in_file', ['test.a', 'test.bz2', 'test.gz', 'test.lrz', 'test.lz', 'test.lzo', 'test.rz', 'test.xz'])
def test_file_extraction(self, in_file):
(files, meta) = self.unpacker.extract_files_from_file((TEST_DATA_DIR / in_file), self.tmp_dir.name)
assert (len(files) == 1), f'unpacking of {in_file} unsuccessful: {meta}'
assert (meta['plugin_used'] == 'PaTool')
assert get_sha256(Path(files[0]).read_bytes()).startswith('deadc0de')
def test_extraction_arc(self):
with TemporaryDirectory() as tmp_dir:
target_file = (Path(tmp_dir) / 'test.arc')
target_file.write_bytes((TEST_DATA_DIR / 'test.arc').read_bytes())
(files, _) = self.unpacker.extract_files_from_file(target_file, self.tmp_dir.name)
assert (len(files) == 2)
unpacked_files = sorted((Path(f).name for f in files))
assert (unpacked_files == ['testfile1', 'testfile2'])
def test_extract_deb(self):
test_file = (TEST_DATA_DIR / 'test.deb')
(files, meta_data) = self.unpacker.extract_files_from_file(test_file, self.tmp_dir.name)
assert (len(files) == 3), f'file number incorrect: {meta_data}'
assert ('extracted to' in meta_data['output']) |
class OptionSeriesWaterfallSonificationTracksActivewhen(Options):
def crossingDown(self):
return self._config_get(None)
def crossingDown(self, num: float):
self._config(num, js_type=False)
def crossingUp(self):
return self._config_get(None)
def crossingUp(self, num: float):
self._config(num, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get(None)
def prop(self, text: str):
self._config(text, js_type=False) |
class slice_scatter(Operator):
def is_valid(cat_op: Operator) -> bool:
if (cat_op._attrs['op'] != 'concatenate'):
return False
if any(((input_accessor.stride_dim is not None) for input_accessor in cat_op._attrs['input_accessors'])):
return False
return all((((x._attrs['src_ops'] is not None) and (len(x._attrs['src_ops']) == 1) and (len(x._attrs['dst_ops']) == 1) and (list(x._attrs['src_ops'])[0]._attrs['op'] == 'dynamic_slice')) for x in cat_op._attrs['inputs']))
def _update_inputs_outputs(self, cat_op):
self._attrs['inputs'] = []
for slice_op in self._attrs['slice_ops']:
assert (len(slice_op._attrs['inputs']) == 1), 'Slice op should only have 1 input! op: {}'.format(slice_op)
input_tensor = slice_op._attrs['inputs'][0]
if (slice_op in input_tensor._attrs['dst_ops']):
input_tensor._attrs['dst_ops'].remove(slice_op)
input_tensor._attrs['dst_ops'].add(self)
self._attrs['inputs'].append(input_tensor)
self._attrs['output_accessors'] = [TensorAccessor(cat_op._attrs['outputs'][0])]
self._attrs['outputs'] = cat_op._attrs['outputs']
for y in self._attrs['outputs']:
y._attrs['src_ops'] = StableSet({self})
for op in self._attrs['slice_ops']:
op._attrs['outputs'][0]._attrs['src_ops'] = StableSet()
op._attrs['outputs'][0]._attrs['dst_ops'] = StableSet()
for x in cat_op._attrs['inputs']:
x._attrs['src_ops'] = StableSet()
x._attrs['dst_ops'] = StableSet()
def __init__(self, scatter_dim: int) -> None:
super().__init__()
self._attrs['op'] = 'slice_scatter'
self._attrs['has_profiler'] = False
self._attrs['scatter_dim'] = scatter_dim
def make_op(cat_op: Operator) -> Operator:
assert slice_scatter.is_valid(cat_op)
scatter_dim = cat_op._attrs['concat_dim']
new_op = slice_scatter(scatter_dim)
slice_ops = []
for x in cat_op._attrs['inputs']:
src_ops = x.src_ops()
assert (len(src_ops) == 1)
slice_op = list(src_ops)[0]
slice_ops.append(slice_op)
new_op._attrs['slice_ops'] = slice_ops
new_op._update_inputs_outputs(cat_op)
new_op._set_depth()
return new_op
def __call__(self):
raise RuntimeError('op {} cannot be called directly'.format(self._attrs['op']))
def _get_op_attributes(self):
raise NotImplementedError('slice_scatter get op attribute not implemented')
def _get_func(self, fmt_str):
target = backend.target.Target.current()
func_key = fmt_str.format(target=target.name(), op=self._attrs['op'])
return registry.get(func_key)
def gen_function(self) -> str:
func = self._get_func('{target}.{op}.gen_function')
return func(self._attrs)
def _args_for_pseudo_code(self):
return [f"scatter_dim={str(self._attrs['scatter_dim'])}]"] |
class NotificationServer(object):
def __init__(self, service, port=_PORT):
self.executor = Executor(futures.ThreadPoolExecutor(max_workers=10))
self.grpc_server = grpc.server(self.executor)
self.service = service
notification_service_pb2_grpc.add_NotificationServiceServicer_to_server(service, self.grpc_server)
self.grpc_server.add_insecure_port(('[::]:' + str(port)))
def run(self, is_block=False):
self.service.start()
self.grpc_server.start()
logging.info('Notification server started.')
if is_block:
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
self.stop()
else:
pass
def stop(self):
self.executor.shutdown()
self.service.stop()
self.grpc_server.stop(0)
logging.info('Notification server stopped.') |
class TCPMappingTLSOriginationContextCrossNamespaceTest(AmbassadorTest):
extra_ports = [6789]
target: ServiceType
def init(self) -> None:
self.target = HTTP()
def manifests(self) -> str:
return ((namespace_manifest('other-namespace') + f'''
---
apiVersion: v1
kind: Secret
metadata:
name: {self.path.k8s}-clientcert
namespace: other-namespace
type: kubernetes.io/tls
data:
tls.crt: {TLSCerts['presto.example.com'].k8s_crt}
tls.key: {TLSCerts['presto.example.com'].k8s_key}
---
apiVersion: getambassador.io/v2
kind: TLSContext
metadata:
name: {self.path.k8s}-tlsclient
namespace: other-namespace
spec:
ambassador_id: [ {self.ambassador_id} ]
secret: {self.path.k8s}-clientcert
sni: my-hysterical-name
---
apiVersion: getambassador.io/v2
kind: TCPMapping
metadata:
name: {self.path.k8s}
spec:
ambassador_id: [ {self.ambassador_id} ]
port: 6789
service: {self.target.path.fqdn}:443
tls: {self.path.k8s}-tlsclient
''') + super().manifests())
def queries(self):
(yield Query(self.url('', port=6789)))
def check(self):
assert (self.results[0].json['backend'] == self.target.path.k8s)
assert (self.results[0].json['request']['tls']['enabled'] == True)
assert (self.results[0].json['request']['tls']['server-name'] == 'my-hysterical-name') |
class LiveApp():
def __init__(self, name, fct, client_script=None, client_html=None):
self.name = name
self.fct = fct
if (client_script is not None):
with open(client_script, 'r') as f:
self.client_script = f.read()
else:
self.client_script = None
if (client_html is not None):
with open(client_html, 'r') as f:
self.html = f.read()
else:
self.html = None
def __repr__(self) -> str:
return '<LiveApp: {}>'.format(self.name)
def kill():
LiveApp.interrupt = True
async def ainput(string: str='', web=False) -> str:
if web:
fut = asyncio.get_event_loop().create_future()
LiveApp.stdin_queue.put_nowait(fut)
return (await fut)
return (await asyncio.get_event_loop().run_in_executor(None, sys.stdin.readline)).rstrip('\n')
async def send_input(data):
try:
fut = LiveApp.stdin_queue.get_nowait()
fut.set_result(data)
except asyncio.QueueEmpty:
raise Exception('No input request pending')
def cli():
return asyncio.run(LiveApp.async_cli())
async def async_cli(args=None):
if (args is None):
args = sys.argv
if ('endpoints' in set(args)):
for (name, app) in LiveApp.endpoint.items():
print(app.name)
elif ('client-script' in set(args)):
app_name = args[2]
for (name, app) in LiveApp.endpoint.items():
if (app.name == app_name):
if (app.client_script is not None):
print(app.client_script)
else:
print('')
elif ('client-html' in set(args)):
app_name = args[2]
for (name, app) in LiveApp.endpoint.items():
if (app.name == app_name):
if (app.html is not None):
print(app.html)
else:
print('')
else:
endpoint = LiveApp.endpoint[args[1]]
input = json.loads(args[2])
endpoint_args = json.loads(args[3])
if ((args[0] == 'web') and ('web' in inspect.signature(endpoint.fct).parameters)):
kwargs = {'web': True}
else:
kwargs = {}
result = (await endpoint.fct(input, *endpoint_args, **kwargs))
if (result is not None):
print(result) |
.parametrize('features_in', [['Age', 'Marks'], ['Name', 'dob']])
.parametrize('input_features', [None, variables_str, np.array(variables_str)])
def test_new_feature_names_within_pipeline(df_vartypes, features_in, input_features):
transformer = Pipeline([('transformer', MockCreator(variables=features_in, drop_original=False))])
transformer.fit(df_vartypes)
features_out = (list(df_vartypes.columns) + [f'{i}_plus' for i in features_in])
assert (transformer.get_feature_names_out(input_features=input_features) == features_out)
transformer = Pipeline([('transformer', MockCreator(variables=features_in, drop_original=True))])
transformer.fit(df_vartypes)
features_out = ([f for f in df_vartypes.columns if (f not in features_in)] + [f'{i}_plus' for i in features_in])
assert (transformer.get_feature_names_out(input_features=input_features) == features_out) |
class OptionPlotoptionsPieSonificationContexttracksMappingHighpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def perform_security_check(driver):
if is_security_check(driver):
logging.info('***** SECURITY CHECK IN PROGRESS *****')
logging.info(f'Please perform the security check on selenium, you have {SECURITY_CHECK_DURATION} seconds...')
for _ in range(SECURITY_CHECK_DURATION):
time.sleep(1)
logging.info('***** SECURITY CHECK COMPLETED *****')
else:
logging.debug('Security check not asked, continuing') |
def do_statistics(seg_sizes, rom_bytes, seg_split, seg_cached):
unk_size = seg_sizes.get('unk', 0)
rest_size = 0
total_size = len(rom_bytes)
for typ in seg_sizes:
if (typ != 'unk'):
rest_size += seg_sizes[typ]
known_ratio = (rest_size / total_size)
unk_ratio = (unk_size / total_size)
log.write(f'Split {fmt_size(rest_size)} ({known_ratio:.2%}) in defined segments')
for typ in seg_sizes:
if (typ != 'unk'):
tmp_size = seg_sizes[typ]
tmp_ratio = (tmp_size / total_size)
log.write(f'{typ:>20}: {fmt_size(tmp_size):>8} ({tmp_ratio:.2%}) {Fore.GREEN}{seg_split[typ]} split{Style.RESET_ALL}, {Style.DIM}{seg_cached[typ]} cached')
log.write(f"{'unknown':>20}: {fmt_size(unk_size):>8} ({unk_ratio:.2%}) from unknown bin files") |
def _scan_block_scalar(stream: StreamBuffer, style: Literal[('|', '>')]) -> ValueToken:
indent = 0
folded = (style == '>')
chunks = []
start_mark = stream.get_position()
stream.forward()
(chomping, increment) = _scan_block_scalar_indicators(stream, start_mark)
_scan_block_scalar_ignored_line(stream, start_mark)
min_indent = (indent + 1)
if (min_indent < 1):
min_indent = 1
if (increment is None):
(breaks, max_indent, end_mark) = _scan_block_scalar_indentation(stream)
indent = max(min_indent, max_indent)
else:
indent = ((min_indent + increment) - 1)
(breaks, end_mark) = _scan_block_scalar_breaks(stream, indent)
line_break = ''
while ((stream.column == indent) and (stream.peek() != _CHARS_END)):
chunks.extend(breaks)
leading_non_space = (stream.peek() not in ' \t')
length = 0
while (stream.peek(length) not in _CHARS_END_NEWLINE):
length += 1
chunks.append(stream.prefix(length))
stream.forward(length)
line_break = _scan_line_break(stream)
(breaks, end_mark) = _scan_block_scalar_breaks(stream, indent)
if ((stream.column == indent) and (stream.peek() != _CHARS_END)):
if (folded and (line_break == '\n') and leading_non_space and (stream.peek() not in ' \t')):
if (not breaks):
chunks.append(' ')
else:
chunks.append(line_break)
else:
break
if (chomping is not False):
chunks.append(line_break)
if (chomping is True):
chunks.extend(breaks)
return ValueToken(start_mark, end_mark, ''.join(chunks), style) |
def replace_package(package_type: str, public_id: PublicId, package_dir: Union[(Path, str)]) -> None:
with TemporaryDirectory() as tmp_dir:
new_package_dir = os.path.join(tmp_dir, public_id.name)
os.mkdir(new_package_dir)
fetch_package(package_type, public_id=public_id, cwd=tmp_dir, dest=new_package_dir)
shutil.rmtree(package_dir)
shutil.move(new_package_dir, package_dir) |
class OperatorYoutube(OperatorBase):
def pull(self, **kwargs):
print('')
print('# Pulling Youtube video transcripts')
print('')
data_folder = kwargs.setdefault('data_folder', '')
run_id = kwargs.setdefault('run_id', '')
print(f'data_folder: {data_folder}, run_id: {run_id}')
notion_api_key = os.getenv('NOTION_TOKEN')
notion_agent = NotionAgent(notion_api_key)
client = DBClient()
last_created_time = client.get_notion_inbox_created_time('youtube', 'default')
last_created_time = utils.bytes2str(last_created_time)
print(f'Get last_created_time from redis: {last_created_time}')
if (not last_created_time):
last_created_time = (datetime.now() - timedelta(days=1)).isoformat()
op_notion = OperatorNotion()
db_index_id = op_notion.get_index_inbox_dbid()
db_pages = utils.get_notion_database_pages_inbox(notion_agent, db_index_id, 'Youtube')
print(f'The database pages founded: {db_pages}')
db_pages = db_pages[:2]
print(f'The latest 2 databases: {db_pages}')
pages = {}
for db_page in db_pages:
database_id = db_page['database_id']
print(f'Pulling from database_id: {database_id}...')
extracted_pages = notion_agent.queryDatabaseInbox_Youtube(database_id, filter_created_time=last_created_time)
for (page_id, extracted_page) in extracted_pages.items():
page = copy.deepcopy(extracted_page)
title = page['title']
source_url = (page['source_url'] or title)
print(f'====== [Pulling youtube transcript]: title: {title}, page_id: {page_id}, source_url: {source_url} ======')
try:
(transcript, metadata) = utils.load_video_transcript(source_url, source_url, page_id=page_id, data_folder=data_folder, run_id=run_id)
print(f'Pulled youtube transcipt, metadata: {metadata}')
page['__transcript'] = transcript
page['__title'] = metadata.setdefault('title', '')
page['__description'] = metadata.setdefault('description', '')
page['__thumbnail_url'] = metadata.setdefault('thumbnail_url', '')
page['__publish_date'] = ''
if metadata.get('publish_date'):
pd = metadata['publish_date']
if isinstance(pd, str):
page['__publish_date'] = pd
else:
pd_pdt = pd.astimezone(pytz.timezone('America/Los_Angeles'))
page['__publish_date'] = pd_pdt.isoformat()
page['__author'] = metadata.setdefault('author', '')
page['__view_count'] = metadata.setdefault('view_count', 0)
page['__length'] = metadata.setdefault('length', 0)
pages[page_id] = page
print(f'Page pulled succeed, title {title}, source_url: {source_url}')
except Exception as e:
print(f'[ERROR] Exception occurred during pulling Youtube video: {title}, page_id: {page_id}, source_url: {source_url} : {e}')
return pages
def dedup(self, extractedPages, target='toread'):
print('')
print('# Dedup Youtube pages')
print('')
print(f'Number of pages: {len(extractedPages)}')
client = DBClient()
deduped_pages = []
for (page_id, page) in extractedPages.items():
title = page['title']
print(f'Dedupping page, title: {title}')
if client.get_notion_toread_item_id('youtube', 'default', page_id):
print(f'Duplicated youtube found, skip. page_id: {page_id}')
else:
deduped_pages.append(page)
return deduped_pages
def summarize(self, pages):
print('')
print('# Summarize Youtube transcripts')
print('')
SUMMARY_MAX_LENGTH = int(os.getenv('SUMMARY_MAX_LENGTH', 20000))
print(f'Number of pages: {len(pages)}')
print(f'Summary max length: {SUMMARY_MAX_LENGTH}')
llm_agent = LLMAgentSummary()
llm_agent.init_prompt()
llm_agent.init_llm()
client = DBClient()
redis_key_expire_time = os.getenv('BOT_REDIS_KEY_EXPIRE_TIME', 604800)
summarized_pages = []
for page in pages:
title = page['title']
page_id = page['id']
print(f'====== Summarying page, title: {title} ======')
content = page['__transcript']
source_url = page['source_url']
print(f'Summarying page, source_url: {source_url}')
print(f'Page content ({len(content)} chars): {content[:200]}...')
st = time.time()
llm_summary_resp = client.get_notion_summary_item_id('youtube', 'default', page_id)
if (not llm_summary_resp):
if (not content):
print(f'[ERROR] Empty Youtube transcript loaded, title: {title}, source_url: {source_url}, skip it')
continue
content = content[:SUMMARY_MAX_LENGTH]
summary = llm_agent.run(content)
print(f'Cache llm response for {redis_key_expire_time}s, page_id: {page_id}, summary: {summary}')
client.set_notion_summary_item_id('youtube', 'default', page_id, summary, expired_time=int(redis_key_expire_time))
else:
print('Found llm summary from cache, decoding (utf-8) ...')
summary = utils.bytes2str(llm_summary_resp)
summarized_page = copy.deepcopy(page)
summarized_page['__summary'] = summary
print(f'Used {(time.time() - st):.3f}s, Summarized page_id: {page_id}, summary: {summary}')
summarized_pages.append(summarized_page)
return summarized_pages
def rank(self, pages):
print('')
print('# Rank Youtubes')
print('')
print(f'Number of pages: {len(pages)}')
llm_agent = LLMAgentCategoryAndRanking()
llm_agent.init_prompt()
llm_agent.init_llm()
client = DBClient()
redis_key_expire_time = os.getenv('BOT_REDIS_KEY_EXPIRE_TIME', 604800)
ranked = []
for page in pages:
title = page['title']
page_id = page['id']
text = page['__summary']
print(f'Ranking page, title: {title}')
st = time.time()
llm_ranking_resp = client.get_notion_ranking_item_id('youtube', 'default', page_id)
category_and_rank_str = None
if (not llm_ranking_resp):
print('Not found category_and_rank_str in cache, fallback to llm_agent to rank')
category_and_rank_str = llm_agent.run(text)
print(f'Cache llm response for {redis_key_expire_time}s, page_id: {page_id}')
client.set_notion_ranking_item_id('youtube', 'default', page_id, category_and_rank_str, expired_time=int(redis_key_expire_time))
else:
print('Found category_and_rank_str from cache')
category_and_rank_str = utils.bytes2str(llm_ranking_resp)
print(f'Used {(time.time() - st):.3f}s, Category and Rank: text: {text}, rank_resp: {category_and_rank_str}')
category_and_rank = utils.fix_and_parse_json(category_and_rank_str)
print(f'LLM ranked result (json parsed): {category_and_rank}')
ranked_page = copy.deepcopy(page)
if (not category_and_rank):
print('[ERROR] Cannot parse json string, assign default rating -0.01')
ranked_page['__topics'] = []
ranked_page['__categories'] = []
ranked_page['__rate'] = (- 0.01)
else:
ranked_page['__topics'] = [(x['topic'], (x.get('score') or 1)) for x in category_and_rank['topics']]
ranked_page['__categories'] = [(x['category'], (x.get('score') or 1)) for x in category_and_rank['topics']]
ranked_page['__rate'] = category_and_rank['overall_score']
ranked_page['__feedback'] = (category_and_rank.get('feedback') or '')
ranked.append(ranked_page)
print(f'Ranked pages: {ranked}')
return ranked
def push(self, ranked_data, targets, topk=3):
print('')
print('# Push Youtubes')
print('')
print(f'Number of pages: {len(ranked_data)}')
print(f'Targets: {targets}')
print(f'Top-K: {topk}')
print(f'input data: {ranked_data}')
stat = {'total': 0, 'error': 0}
for target in targets:
print(f'Pushing data to target: {target} ...')
if (target == 'notion'):
notion_api_key = os.getenv('NOTION_TOKEN')
notion_agent = NotionAgent(notion_api_key)
op_notion = OperatorNotion()
db_index_id = op_notion.get_index_toread_dbid()
database_id = utils.get_notion_database_id_toread(notion_agent, db_index_id)
print(f'Latest ToRead database id: {database_id}')
if (not database_id):
print('[ERROR] no index db pages found... skip')
break
for ranked_page in ranked_data:
stat['total'] += 1
try:
page_id = ranked_page['id']
title = (ranked_page.get('__title') or ranked_page['title'])
print(f'Pushing page, title: {title}')
topics = ranked_page['__topics']
topics_topk = utils.get_top_items(topics, topk)
topics_topk = [x[0].replace(',', ' ')[:20] for x in topics_topk]
categories = ranked_page['__categories']
categories_topk = utils.get_top_items(categories, topk)
categories_topk = [x[0].replace(',', ' ')[:20] for x in categories_topk]
rating = ranked_page['__rate']
notion_agent.createDatabaseItem_ToRead_Youtube(database_id, ranked_page, topics_topk, categories_topk, rating)
self.markVisited(page_id, source='youtube', list_name='default')
created_time = ranked_page['created_time']
self.updateCreatedTime(created_time, source='youtube', list_name='default')
except Exception as e:
print(f'[ERROR]: Push to notion failed, skip: {e}')
stat['error'] += 1
traceback.print_exc()
else:
print(f'[ERROR]: Unknown target {target}, skip')
return stat |
def get_class_override(css_cls):
if (OVERRIDES is None):
return css_cls
if (css_cls.classname in OVERRIDES):
css_ovr_def = OVERRIDES[css_cls.classname]
if (not css_ovr_def):
return False
if isinstance(css_ovr_def, dict):
if ('css' in css_ovr_def):
css_cls.css(css_ovr_def['css'])
if ('hover' in css_ovr_def):
css_cls.hover.css(css_ovr_def['hover'])
if ('name' in css_ovr_def):
css_cls.classname = css_ovr_def['name']
css_cls.cls_ref = css_ovr_def['name']
css_cls.is_page_scope = css_ovr_def.get('defined', True)
else:
css_cls.classname = css_ovr_def
return css_cls |
def get_unique_name(path: PathIn, *, prefix: str='', suffix: str='', extension: str='', separator: str='-') -> str:
path = _get_path(path)
assert_dir(path)
name = ''
while True:
if prefix:
name += f'{prefix}{separator}'
uid = uuid.uuid4()
name += f'{uid}'
if suffix:
name += f'{separator}{suffix}'
if extension:
extension = extension.lstrip('.').lower()
name += f'.{extension}'
if exists(join_path(path, name)):
continue
break
return name |
class OptionSeriesVennSonificationDefaultspeechoptions(Options):
def activeWhen(self) -> 'OptionSeriesVennSonificationDefaultspeechoptionsActivewhen':
return self._config_sub_data('activeWhen', OptionSeriesVennSonificationDefaultspeechoptionsActivewhen)
def language(self):
return self._config_get('en-US')
def language(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionSeriesVennSonificationDefaultspeechoptionsMapping':
return self._config_sub_data('mapping', OptionSeriesVennSonificationDefaultspeechoptionsMapping)
def pointGrouping(self) -> 'OptionSeriesVennSonificationDefaultspeechoptionsPointgrouping':
return self._config_sub_data('pointGrouping', OptionSeriesVennSonificationDefaultspeechoptionsPointgrouping)
def preferredVoice(self):
return self._config_get(None)
def preferredVoice(self, text: str):
self._config(text, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def type(self):
return self._config_get('speech')
def type(self, text: str):
self._config(text, js_type=False) |
def bind_logger(logger, single_output, out_flex_key):
def wrapper(f):
(f)
def decorated_function(*args, **kwargs):
logger.clear()
outputs = f(*args, **kwargs, dash_logger=logger)
return _append_output(outputs, logger.get_output(), single_output, out_flex_key)
return decorated_function
return wrapper |
def test_namespace_respected():
'
schema = {'type': 'record', 'name': 'explicit_namespace.foo', 'fields': [{'name': 'field1', 'type': {'type': 'record', 'name': 'bar', 'fields': [{'name': 'bar_field', 'type': 'int'}]}}, {'name': 'field2', 'type': 'explicit_namespace.bar'}]}
fastavro.parse_schema(schema) |
class LineDecoder():
decoder: codecs.IncrementalDecoder = attr.ib(factory=(lambda : codecs.getincrementaldecoder(encoding='utf-8')()))
buffer: str = ''
def update(self, chunk: bytes, final: bool=False) -> typing.List[str]:
self.buffer += self.decoder.decode(input=chunk, final=final)
if final:
index = len(self.buffer)
else:
newline_index = self.buffer.rfind('\n')
if (newline_index == (- 1)):
return []
index = (newline_index + 1)
splittable = self.buffer[:index]
self.buffer = self.buffer[index:]
return splittable.splitlines() |
.parametrize('string,expected', [('', ([], {})), ('a', (['a'], {})), ('a,b', (['a', 'b'], {})), ('a,1', (['a', 1], {})), ('1,a', ([1, 'a'], {})), ('a,b=1', (['a'], {'b': 1})), ('a,b="1"', (['a'], {'b': '1'})), ('a , b = "1,2" ', (['a'], {'b': '1,2'})), ('a , b = "1,2", sdf=4 ', (['a'], {'b': '1,2', 'sdf': 4})), ('a,b="""', (['a'], {'b': '"""'}))])
def test_string_to_func_inputs(string, expected):
assert (utils.string_to_func_inputs(string) == expected) |
class Benchmark():
func = None
name = ''
iters = 0
ns_per_op = 0
allocs_per_op = 0
mb_per_s = 0
def __init__(self, **kwargs):
for (k, v) in kwargs.items():
if (not hasattr(self, k)):
raise AttributeError(k)
setattr(self, k, v)
def __str__(self):
kvs = ', '.join(('{}={}'.format(k, v) for (k, v) in self.__dict__.items() if (not k.startswith('_'))))
return 'Benchmark<{}>'.format(kvs)
__repr__ = __str__
def format_result(self, name_pad_to=64):
return 'Benchmark_{b.name}{pad}\t{b.iters}\t{b.ns_per_op} ns/op'.format(b=self, pad=(' ' * ((name_pad_to + 1) - len(self.name))))
def run(self, repeat=5):
wrapper_time = _run_timeit(self.func, 0)
times = []
for _ in range(repeat):
t = _run_timeit(self.func, self.iters)
if (t == 0.0):
raise Exception('{} time=0'.format(repr(self)))
times.append(t)
best_time = (min(times) - wrapper_time)
self.ns_per_op = int(((best_time * .0) / self.iters)) |
class Ttype():
def __init__(self, protocol):
self.ttype_step = 0
self.protocol = protocol
self.protocol.protocol_flags['FORCEDENDLINE'] = True
self.protocol.protocol_flags['TTYPE'] = False
self.protocol.protocol_flags['ANSI'] = True
self.protocol.negotiationMap[TTYPE] = self.will_ttype
self.protocol.do(TTYPE).addCallbacks(self.will_ttype, self.wont_ttype)
def wont_ttype(self, option):
self.protocol.protocol_flags['TTYPE'] = False
self.protocol.handshake_done()
def will_ttype(self, option):
options = self.protocol.protocol_flags
if ((options and options.get('TTYPE', False)) or (self.ttype_step > 3)):
return
try:
option = b''.join(option).lstrip(IS).decode()
except TypeError:
pass
if (self.ttype_step == 0):
self.protocol.requestNegotiation(TTYPE, SEND)
elif (self.ttype_step == 1):
try:
clientname = option.upper()
except AttributeError:
clientname = 'UNKNOWN'
xterm256 = False
if clientname.startswith('MUDLET'):
xterm256 = (clientname.split('MUDLET', 1)[1].strip() >= '1.1')
if (not self.protocol.protocol_flags['NOGOAHEAD']):
self.protocol.protocol_flags['NOGOAHEAD'] = True
self.protocol.protocol_flags['NOPROMPTGOAHEAD'] = False
if (clientname.startswith('XTERM') or clientname.endswith('-256COLOR') or (clientname in ('ATLANTIS', 'CMUD', 'KILDCLIENT', 'MUDLET', 'MUSHCLIENT', 'PUTTY', 'BEIP', 'POTATO', 'TINYFUGUE'))):
xterm256 = True
self.protocol.protocol_flags['ANSI'] = True
self.protocol.protocol_flags['XTERM256'] = xterm256
self.protocol.protocol_flags['CLIENTNAME'] = clientname
self.protocol.requestNegotiation(TTYPE, SEND)
elif (self.ttype_step == 2):
term = option
tupper = term.upper()
xterm256 = (tupper.endswith('-256COLOR') or (tupper.endswith('XTERM') and (not tupper.endswith('-COLOR'))))
if xterm256:
self.protocol.protocol_flags['ANSI'] = True
self.protocol.protocol_flags['XTERM256'] = xterm256
self.protocol.protocol_flags['TERM'] = term
self.protocol.requestNegotiation(TTYPE, SEND)
elif (self.ttype_step == 3):
if option.startswith('MTTS'):
option = option[4:].strip()
if option.isdigit():
option = int(option)
support = dict(((capability, True) for (bitval, capability) in MTTS if ((option & bitval) > 0)))
self.protocol.protocol_flags.update(support)
else:
self.protocol.protocol_flags[option.upper()] = True
self.protocol.protocol_flags['TTYPE'] = True
self.protocol.handshake_done()
self.ttype_step += 1 |
class DoctorMulti(Model):
primary_keys = ['foo', 'bar']
has_many({'appointments': 'AppointmentMulti'}, {'patients': {'via': 'appointments.patient_multi'}}, {'symptoms_to_treat': {'via': 'patients.symptoms'}})
foo = Field.string(default=(lambda : uuid4().hex))
bar = Field.string(default=(lambda : uuid4().hex))
name = Field.string() |
def get_proof_indices(tree_indices):
keys = set(tree_indices)
for i in tree_indices:
x = i
while (x > 1):
keys.add((x ^ 1))
x //= 2
keys = sorted(list(keys))[::(- 1)]
pos = 0
while (pos < len(keys)):
k = keys[pos]
if ((k in keys) and ((k ^ 1) in keys) and ((k // 2) not in keys)):
keys.append((k // 2))
pos += 1
return [x for x in keys if (not (((x * 2) in keys) and (((x * 2) + 1) in keys)))] |
def extractKaytranslationcomHomeBlog(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('TPOCTTWFP', 'The Price Of Confessing To The Wrong Female Protagonist', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class TestTFClusterConfig(unittest.TestCase):
def test_set_node_entry(self):
builder = TFClusterConfig.new_builder()
builder.set_node_entry(entry).set_worker_count(1)
config = builder.build()
self.assertEqual(__file__, config.get_entry_python_file_path())
self.assertEqual('entry', config.get_entry_func_name())
def test_add_node_type(self):
builder = TFClusterConfig.new_builder()
builder.set_node_entry(entry).add_node_type('worker', 2).add_node_type('ps', 3).add_node_type('worker', 1)
config = builder.build()
self.assertEqual(1, config.get_node_count('worker'))
self.assertEqual(3, config.get_node_count('ps'))
def test_set_property(self):
builder = TFClusterConfig.new_builder()
builder.set_node_entry(entry).set_worker_count(1).set_property('k', 'v')
config = builder.build()
self.assertEqual('v', config.get_property('k'))
def test_set_worker_ps_count(self):
builder = TFClusterConfig.new_builder()
builder.set_node_entry(entry).set_worker_count(2).set_ps_count(1)
config = builder.build()
self.assertEqual(2, config.get_node_count('worker'))
self.assertEqual(1, config.get_node_count('ps'))
def test_set_worker_zero_chief(self):
builder = TFClusterConfig.new_builder()
builder.set_node_entry(entry).set_worker_count(1).set_is_worker_zero_chief(True)
config = builder.build()
self.assertEqual('true', config.get_property('tf_is_worker_zero_chief'))
def test__to_j_tf_cluster_config(self):
builder = TFClusterConfig.new_builder()
builder.set_node_entry(entry).set_worker_count(2).set_ps_count(1).set_property('k', 'v').set_is_worker_zero_chief(True)
config = builder.build()
j_tf_cluster_config = config._to_j_tf_cluster_config()
tf_default_properties = {'am_state_machine_class': 'org.flinkextended.flink.ml.tensorflow.cluster.TFAMStateMachineImpl', 'sys:ml_runner_class': 'org.flinkextended.flink.ml.tensorflow.cluster.node.runner.TFMLRunner', 'sys:record_reader_class': 'org.flinkextended.flink.ml.tensorflow.data.TFRecordReaderImpl', 'sys:record_writer_class': 'org.flinkextended.flink.ml.tensorflow.data.TFRecordWriterImpl', 'sys:decoding_class': 'org.flinkextended.flink.ml.operator.coding.RowCSVCoding', 'sys:encoding_class': 'org.flinkextended.flink.ml.operator.coding.RowCSVCoding'}
self.assertDictEqual({'worker': 2, 'ps': 1}, dict(j_tf_cluster_config.getNodeTypeCntMap()))
self.assertDictEqual({**tf_default_properties, 'k': 'v', 'tf_is_worker_zero_chief': 'true'}, dict(j_tf_cluster_config.getProperties()))
self.assertEqual(__file__, j_tf_cluster_config.getEntryPythonFilePath())
self.assertIn(__file__, j_tf_cluster_config.getPythonFilePaths()) |
class Section(object):
def __init__(self, header, name, elffile):
self.header = header
self.name = name
self.elffile = elffile
self.stream = self.elffile.stream
self.structs = self.elffile.structs
self._compressed = (header['sh_flags'] & SH_FLAGS.SHF_COMPRESSED)
if self.compressed:
header = struct_parse(self.structs.Elf_Chdr, self.stream, stream_pos=self['sh_offset'])
self._compression_type = header['ch_type']
self._decompressed_size = header['ch_size']
self._decompressed_align = header['ch_addralign']
else:
self._decompressed_size = header['sh_size']
self._decompressed_align = header['sh_addralign']
def compressed(self):
return self._compressed
def data_size(self):
return self._decompressed_size
def data_alignment(self):
return self._decompressed_align
def data(self):
if self.compressed:
c_type = self._compression_type
if (c_type == 'ELFCOMPRESS_ZLIB'):
hdr_size = self.structs.Elf_Chdr.sizeof()
self.stream.seek((self['sh_offset'] + hdr_size))
compressed = self.stream.read((self['sh_size'] - hdr_size))
decomp = zlib.decompressobj()
result = decomp.decompress(compressed, self.data_size)
else:
raise ELFCompressionError('Unknown compression type: {:#0x}'.format(c_type))
if (len(result) != self._decompressed_size):
raise ELFCompressionError('Decompressed data is {} bytes long, should be {} bytes long'.format(len(result), self._decompressed_size))
else:
self.stream.seek(self['sh_offset'])
result = self.stream.read(self._decompressed_size)
return result
def is_null(self):
return False
def __getitem__(self, name):
return self.header[name]
def __eq__(self, other):
try:
return (self.header == other.header)
except AttributeError:
return False
def __hash__(self):
return hash(self.header) |
.parametrize('options', [(), ('-d', 'std'), ('-o', 'doc'), ('-n', 'ref'), ('-l', 'index.html*')])
def test_inv_cli_v2(options, capsys, file_regression):
inventory_cli([str((STATIC / 'objects_v2.inv')), '-f', 'yaml', *options])
text = (capsys.readouterr().out.strip() + '\n')
file_regression.check(text, extension='.yaml') |
class EmmettGroup(click.Group):
def __init__(self, add_default_commands=True, add_app_option=True, add_debug_option=True, **extra):
params = list((extra.pop('params', None) or ()))
if add_app_option:
params.append(app_option)
click.Group.__init__(self, params=params, **extra)
if add_default_commands:
self.add_command(develop_command)
self.add_command(shell_command)
self.add_command(routes_command)
self.add_command(serve_command)
def list_commands(self, ctx):
rv = super(EmmettGroup, self).list_commands(ctx)
info = ctx.ensure_object(ScriptInfo)
try:
rv = (rv + info.load_app().cli.list_commands(ctx))
except Exception:
pass
return rv
def get_command(self, ctx, name):
rv = click.Group.get_command(self, ctx, name)
if (rv is not None):
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if (rv is not None):
return rv
except Exception:
pass
def main(self, *args, **kwargs):
obj = kwargs.get('obj')
if (obj is None):
obj = ScriptInfo()
kwargs['obj'] = obj
return super().main(*args, **kwargs) |
def osci_change_ranking_ytd_df():
return pd.DataFrame([{OSCIChangeRankingSchema.company: 'Company', OSCIChangeRankingSchema.total: 100, OSCIChangeRankingSchema.active: 50, OSCIChangeRankingSchema.position: 40, OSCIChangeRankingSchema.position_change: 20, OSCIChangeRankingSchema.total_change: 70, OSCIChangeRankingSchema.active_change: 30}]) |
def _smooth(x, window_len=11, window='hanning'):
if (x.ndim != 1):
raise ValueError('smooth only accepts 1 dimension arrays.')
if (x.size < window_len):
raise ValueError('Input vector needs to be bigger than window size.')
if (window_len < 3):
return x
window_funcs = {'flat': (lambda _len: np.ones(_len, 'd')), 'hanning': np.hanning, 'hamming': np.hamming, 'bartlett': np.bartlett, 'blackman': np.blackman}
s = np.r_[(x[(window_len - 1):0:(- 1)], x, x[(- 1):(- window_len):(- 1)])]
try:
w = window_funcs[window](window_len)
except KeyError:
raise ValueError("Window is not one of '{0}', '{1}', '{2}', '{3}', '{4}'".format(*window_funcs.keys()))
y = np.convolve((w / w.sum()), s, mode='valid')
return y |
def test_kronos_new_gateway_checkin(client: TestClient):
from tests.test_database_models import SAMPLE_GATEWAY_HID
response = client.put(f'/api/v1/kronos/gateways/{SAMPLE_GATEWAY_HID}/checkin')
assert (response.status_code == 200)
response = client.get('/api/v1/kronos/gateways')
assert (response.status_code == 200)
gateways = response.json()['data']
assert (len(gateways) == 1)
assert (gateways[0]['hid'] == SAMPLE_GATEWAY_HID) |
def get_mali_structure_stats(root):
from Bio.PDB import PDBParser
from Bio.PDB.DSSP import DSSP
res = []
tool = 'manual'
for (path, directories, files) in os.walk(root):
for f in files:
if (('.pdb' in f) and (tool in f)):
fname = os.path.join(path, f)
parser = PDBParser()
structs = parser.get_structure('', fname)
dssp1 = DSSP(structs[0], fname, dssp='mkdssp')
classes1 = list(map((lambda x: x[2]), dssp1))
len1 = len(classes1)
classes1 = pd.Series(Counter(classes1))
classes1.index = list(map((lambda x: ('x' + x)), classes1.index))
pdb_name = os.path.basename(f).split('.')[0]
stats = classes1
stats['pdb'] = pdb_name
stats['path'] = fname
stats['xlen'] = len1
res.append(stats)
res = pd.DataFrame(res)
return res |
class NoiseStandardDeviation():
def __init__(self, prior_concentration: float, prior_rate: float, val: Optional[float]=None):
if ((prior_concentration <= 0) or (prior_rate <= 0)):
raise ValueError('Invalid prior hyperparameters')
self.prior_concentration = prior_concentration
self.prior_rate = prior_rate
if (val is None):
self.sample(X=torch.Tensor([]), residual=torch.Tensor([]))
else:
self._val = val
def val(self) -> float:
return self._val
def val(self, val: float):
self._val = val
def sample(self, X: torch.Tensor, residual: torch.Tensor) -> float:
self.val = self._get_sample(X, residual)
return self.val
def _get_sample(self, X: torch.Tensor, residual: torch.Tensor) -> float:
posterior_concentration = (self.prior_concentration + (len(X) / 2.0))
posterior_rate = (self.prior_rate + (0.5 * torch.sum(torch.square(residual))))
draw = torch.pow(Gamma(posterior_concentration, posterior_rate).sample(), (- 0.5))
return draw.item() |
class Paillier():
def __init__(self, p=0, q=0, g=0):
self.__p = p
self.__q = q
self.__n = (self.__p * self.__q)
self.__lambda = sympy.lcm((self.__p - 1), (self.__q - 1))
self.__g = g
self.__u = 0
if (self.__g != 0):
self.__u = sympy.mod_inverse(self.__L(((g ** self.__lambda) % (self.__n ** 2))), self.__n)
print('p,q,n,lambda,g,u = ', self.__p, self.__q, self.__n, self.__lambda, self.__g, self.__u)
print('Paillier:: initialized')
def __L(self, x):
return ((x - 1) / self.__n)
def generate_key(self, key_bits=10):
while True:
self.__p = sympy.ntheory.generate.randprime((2 ** key_bits), (2 ** (key_bits + 1)))
self.__q = sympy.ntheory.generate.randprime((2 ** key_bits), (2 ** (key_bits + 1)))
self.__n = (self.__p * self.__q)
if (sympy.gcd(self.__n, ((self.__p - 1) * (self.__q - 1))) == 1):
break
self.__lambda = sympy.lcm((self.__p - 1), (self.__q - 1))
while True:
self.__g = sympy.ntheory.generate.randprime(2, (self.__n ** 2))
self.__u = sympy.mod_inverse(self.__L(((self.__g ** self.__lambda) % (self.__n ** 2))), self.__n)
break
print('Paillier:: key generated')
def encrypt(self, m, r=0):
r = (sympy.ntheory.randprime(3, self.__n) if (r == 0) else r)
return (((self.__g ** m) * (r ** self.__n)) % (self.__n ** 2))
def decrypt(self, c):
return ((self.__L(((c ** self.__lambda) % (self.__n ** 2))) * self.__u) % self.__n)
def get_public_key(self):
return (self.__n, self.__g)
def get_private_key(self):
return (self.__lambda, self.__u)
def __str__(self):
return 'ttt'
def is_validate(self):
return True |
class SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
if text.startswith('R|'):
return text[2:].splitlines()
return argparse.HelpFormatter._split_lines(self, text, width)
def _get_help_string(self, action):
return argparse.ArgumentDefaultsHelpFormatter._get_help_string(self, action) |
def foo_contract(eth_tester, w3):
deploy_address = eth_tester.get_accounts()[0]
abi = '[{"anonymous":false,"inputs":[{"indexed":false,"name":"_bar","type":"string"}],"name":"barred","type":"event"},{"constant":false,"inputs":[{"name":"_bar","type":"string"}],"name":"setBar","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"constant":true,"inputs":[],"name":"bar","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"}]'
bytecode = 'fd5bbf68656c6c6f20776f726ccbbff106100affd1565bdbdb5565b5bdee2565b5090565bbe8565b5090565b90565b6103bbf3fefd5bcbc14aafebb0f7eb600080fd5befd5bbfd5bdfd5bbffd5bff19601fb005b61011b61024c565bbbbfabf35bacea565b507f5f71ad82e16f082de5ff496b140e2fbc8621eeb37b36d59b185c3f1364bbdbff4565bfcaba150565bfef106102be2565bbcfbbff1061032bffbbd565b5ba565b5090565b61038c91905bb5090565b9056fea165627a7aae6ca683d45ee8a71bba45caee29e4815147cd308f772c853a20dfe08214dbb50029'
FooContract = w3.eth.contract(abi=abi, bytecode=bytecode)
tx_hash = FooContract.constructor().transact({'from': deploy_address})
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash, 180)
return FooContract(tx_receipt.contractAddress) |
class Group_():
class init():
def may_be_empty(self):
assert (len(Group()) == 0)
def takes_splat_arg_of_host_strings(self):
g = Group('foo', 'bar')
assert (g[0].host == 'foo')
assert (g[1].host == 'bar')
def takes_splat_kwargs_and_passes_them_to_Connections(self):
g = Group('foo', 'bar', user='admin', forward_agent=True)
assert (g[0].host == 'foo')
assert (g[0].user == 'admin')
assert (g[0].forward_agent is True)
assert (g[1].host == 'bar')
assert (g[1].user == 'admin')
assert (g[1].forward_agent is True)
class from_connections():
def inits_from_iterable_of_Connections(self):
g = Group.from_connections((Connection('foo'), Connection('bar')))
assert (len(g) == 2)
assert (g[1].host == 'bar')
def acts_like_an_iterable_of_Connections(self):
g = Group('foo', 'bar', 'biz')
assert (g[0].host == 'foo')
assert (g[(- 1)].host == 'biz')
assert (len(g) == 3)
for c in g:
assert isinstance(c, Connection)
('method', ALL_METHODS)
def abstract_methods_not_implemented(self, method):
group = Group()
with raises(NotImplementedError):
getattr(group, method)()
class close_and_contextmanager_behavior():
def close_closes_all_member_connections(self):
cxns = [Mock(name=x) for x in ('foo', 'bar', 'biz')]
g = Group.from_connections(cxns)
g.close()
for c in cxns:
c.close.assert_called_once_with()
def contextmanager_behavior_works_like_Connection(self):
cxns = [Mock(name=x) for x in ('foo', 'bar', 'biz')]
g = Group.from_connections(cxns)
with g as my_g:
assert (my_g is g)
for c in cxns:
c.close.assert_called_once_with()
class get():
class local_defaults_to_host_interpolated_path():
def when_no_arg_or_kwarg_given(self):
g = Group('host1', 'host2')
g._do = Mock()
g.get(remote='whatever')
g._do.assert_called_with('get', remote='whatever', local='{host}/')
def not_when_arg_given(self):
g = Group('host1', 'host2')
g._do = Mock()
g.get('whatever', 'lol')
g._do.assert_called_with('get', 'whatever', 'lol')
def not_when_kwarg_given(self):
g = Group('host1', 'host2')
g._do = Mock()
g.get(remote='whatever', local='lol')
g._do.assert_called_with('get', remote='whatever', local='lol') |
('/backend/starting_build/', methods=['POST', 'PUT'])
def backend_starting_build():
debug_output(flask.request.json, 'RECEIVED:')
update = flask.request.json
task_id = '{0}-{1}'.format(update['build_id'], update['chroot'])
build_task = build_task_dict.pop(task_id, None)
if build_task:
started_build_task_dict[task_id] = build_task
response = {'can_start': True}
debug_output(response, 'SENDING BACK:', delim=False)
return flask.jsonify(response) |
class TDMPCConfig():
std_schedule: optax.Schedule
horizon_schedule: optax.Schedule
optimizer: optax.GradientTransformation
batch_size: int = 512
samples_per_insert: float = 512.0
samples_per_insert_tolerance_rate: float = 0.1
max_replay_size: int = int(1000000.0)
variable_update_period: int = 1
per_alpha: float = 0.6
per_beta: float = 0.4
discount: float = 0.99
num_samples: int = 512
min_std: float = 0.05
temperature: float = 0.5
momentum: float = 0.1
num_elites: int = 64
iterations: int = 6
tau: float = 0.01
seed_steps: int = 5000
mixture_coef: float = 0.05
horizon: int = 5
consistency_coef: float = 2
reward_coef: float = 0.5
value_coef: float = 0.1
rho: float = 0.5 |
class Tmp(BaseEnsemble):
def __init__(self, shuffle=False, random_state=None, scorer=None, verbose=False, layers=None, array_check=None, model_selection=False, sample_size=20):
super(Tmp, self).__init__(shuffle=shuffle, random_state=random_state, scorer=scorer, verbose=verbose, layers=layers, array_check=array_check, model_selection=model_selection, sample_size=sample_size) |
class TrinityConfig():
_trinity_root_dir: Path = None
_chain_config: Eth1ChainConfig = None
_data_dir: Path = None
_nodekey_path: Path = None
_logfile_path: Path = None
_nodekey = None
_network_id: int = None
port: int = None
preferred_nodes: Tuple[(KademliaNode, ...)] = None
bootstrap_nodes: Tuple[(KademliaNode, ...)] = None
_genesis_config: Dict[(str, Any)] = None
_app_configs: Dict[(Type['BaseAppConfig'], 'BaseAppConfig')] = None
def __init__(self, network_id: int, app_identifier: str='', genesis_config: Dict[(str, Any)]=None, max_peers: int=25, trinity_root_dir: Path=None, trinity_tmp_root_dir: bool=False, data_dir: Path=None, nodekey_path: Path=None, nodekey: PrivateKey=None, port: int=30303, preferred_nodes: Tuple[(KademliaNode, ...)]=None, bootstrap_nodes: Tuple[(KademliaNode, ...)]=None) -> None:
self.app_identifier = app_identifier
self.network_id = network_id
self.max_peers = max_peers
self.port = port
self._app_configs = {}
if (genesis_config is not None):
self.genesis_config = genesis_config
elif (network_id in PRECONFIGURED_NETWORKS):
self.genesis_config = _load_preconfigured_genesis_config(network_id)
else:
raise TypeError('No `genesis_config` was provided and the `network_id` is not in the known preconfigured networks. Cannot initialize ChainConfig')
if (trinity_root_dir is not None):
self.trinity_root_dir = trinity_root_dir
self.trinity_tmp_root_dir = trinity_tmp_root_dir
if ((not preferred_nodes) and (self.network_id in DEFAULT_PREFERRED_NODES)):
self.preferred_nodes = DEFAULT_PREFERRED_NODES[self.network_id]
else:
self.preferred_nodes = preferred_nodes
if (bootstrap_nodes is None):
if (self.network_id in PRECONFIGURED_NETWORKS):
bootnodes = PRECONFIGURED_NETWORKS[self.network_id].bootnodes
self.bootstrap_nodes = tuple((KademliaNode.from_uri(enode) for enode in bootnodes))
else:
self.bootstrap_nodes = tuple()
else:
self.bootstrap_nodes = bootstrap_nodes
if (data_dir is not None):
self.data_dir = data_dir
if ((nodekey is not None) and (nodekey_path is not None)):
raise ValueError('It is invalid to provide both a `nodekey` and a `nodekey_path`')
elif (nodekey_path is not None):
self.nodekey_path = nodekey_path
elif (nodekey is not None):
self.nodekey = nodekey
def app_suffix(self) -> str:
return ('' if (len(self.app_identifier) == 0) else f'-{self.app_identifier}')
def logfile_path(self) -> Path:
return (self.log_dir / LOG_FILE)
def log_dir(self) -> Path:
return self.with_app_suffix((self.data_dir / LOG_DIR))
def trinity_root_dir(self) -> Path:
if (self._trinity_root_dir is not None):
return self._trinity_root_dir
else:
return get_xdg_trinity_root()
_root_dir.setter
def trinity_root_dir(self, value: str) -> None:
self._trinity_root_dir = Path(value).resolve()
def data_dir(self) -> Path:
if (self._data_dir is not None):
return self._data_dir
else:
return get_data_dir_for_network_id(self.network_id, self.trinity_root_dir)
_dir.setter
def data_dir(self, value: str) -> None:
self._data_dir = Path(value).resolve()
def database_ipc_path(self) -> Path:
return get_database_socket_path(self.ipc_dir)
def enr_db_dir(self) -> Path:
return self.with_app_suffix((self.data_dir / ENR_DB_DIR))
def logging_ipc_path(self) -> Path:
return (self.ipc_dir / LOGGING_IPC_SOCKET_FILENAME)
def ipc_dir(self) -> Path:
return self.with_app_suffix((self.data_dir / IPC_DIR))
def pid_dir(self) -> Path:
return self.with_app_suffix((self.data_dir / PID_DIR))
def jsonrpc_ipc_path(self) -> Path:
return get_jsonrpc_socket_path(self.ipc_dir)
def nodekey_path(self) -> Path:
if (self._nodekey_path is None):
if (self._nodekey is not None):
return None
else:
return get_nodekey_path(self.data_dir)
else:
return self._nodekey_path
_path.setter
def nodekey_path(self, value: str) -> None:
self._nodekey_path = Path(value).resolve()
def nodekey(self) -> PrivateKey:
if (self._nodekey is None):
try:
return load_nodekey(self.nodekey_path)
except FileNotFoundError:
return None
else:
if isinstance(self._nodekey, bytes):
return keys.PrivateKey(self._nodekey)
elif isinstance(self._nodekey, PrivateKey):
return self._nodekey
return self._nodekey
def nodekey(self, value: Union[(bytes, PrivateKey)]) -> None:
if isinstance(value, bytes):
self._nodekey = keys.PrivateKey(value)
elif isinstance(value, PrivateKey):
self._nodekey = value
else:
raise TypeError(f'Nodekey must either be a raw byte-string or an eth_keys `PrivateKey` instance: got {type(self._nodekey)}')
def process_id_file(self, process_name: str):
with PidFile(process_name, self.pid_dir):
(yield)
def from_parser_args(cls, parser_args: argparse.Namespace, app_identifier: str, app_config_types: Iterable[Type['BaseAppConfig']]) -> 'TrinityConfig':
constructor_kwargs = construct_trinity_config_params(parser_args)
trinity_config = cls(app_identifier=app_identifier, **constructor_kwargs)
trinity_config.initialize_app_configs(parser_args, app_config_types)
return trinity_config
def initialize_app_configs(self, parser_args: argparse.Namespace, app_config_types: Iterable[Type['BaseAppConfig']]) -> None:
for app_config_type in app_config_types:
self.add_app_config(app_config_type.from_parser_args(parser_args, self))
def add_app_config(self, app_config: 'BaseAppConfig') -> None:
self._app_configs[type(app_config)] = app_config
def has_app_config(self, app_config_type: Type['BaseAppConfig']) -> bool:
return (app_config_type in self._app_configs.keys())
def get_app_config(self, app_config_type: Type[TAppConfig]) -> TAppConfig:
return cast(TAppConfig, self._app_configs[app_config_type])
def with_app_suffix(self, path: Path) -> Path:
return path.with_name((path.name + self.app_suffix)) |
([Output('run-power-body', 'style'), Output('cycle-power-body', 'style')], [Input('use-run-power-switch', 'on'), Input('use-cycle-power-switch', 'on')], [State('use-run-power-switch', 'on'), State('use-cycle-power-switch', 'on')])
def user_power_data(run_dummy, cycle_dummy, run, cycle):
athlete_info = app.session.query(athlete).filter((athlete.athlete_id == 1)).first()
run_style = ({'display': 'none'} if (not run) else {'display': 'inline'})
cycle_style = ({'display': 'none'} if (not cycle) else {'display': 'inline'})
try:
athlete_info.use_run_power = run
athlete_info.use_cycle_power = cycle
app.session.commit()
app.server.logger.debug(f'use-run-power set to {run}, use-cycle-power set to {cycle}')
except BaseException as e:
app.server.logger.error(e)
app.session.remove()
return (run_style, cycle_style) |
class OptionSeriesBarOnpointConnectoroptions(Options):
def dashstyle(self):
return self._config_get(None)
def dashstyle(self, text: str):
self._config(text, js_type=False)
def stroke(self):
return self._config_get(None)
def stroke(self, text: str):
self._config(text, js_type=False)
def width(self):
return self._config_get(1)
def width(self, num: float):
self._config(num, js_type=False) |
def test_sort_alerts(alerts_api_mock: MockAlertsAPI):
last_test_alert_sent_times = alerts_api_mock.alerts_fetcher.query_last_test_alert_times()
last_model_alert_sent_times = alerts_api_mock.alerts_fetcher.query_last_model_alert_times()
test_alerts = alerts_api_mock.alerts_fetcher.query_pending_test_alerts()
model_alerts = alerts_api_mock.alerts_fetcher.query_pending_model_alerts()
sorted_test_alerts = alerts_api_mock._sort_alerts(test_alerts, last_test_alert_sent_times)
sorted_model_alerts = alerts_api_mock._sort_alerts(model_alerts, last_model_alert_sent_times)
assert ([alert.id for alert in sorted_test_alerts.skip].sort() == ['alert_id_1', 'alert_id_5'].sort())
assert ([alert.id for alert in sorted_test_alerts.send].sort() == ['alert_id_2', 'alert_id_3', 'alert_id_4'].sort())
assert ([alert.id for alert in sorted_model_alerts.skip].sort() == ['alert_id_1', 'alert_id_5'].sort())
assert ([alert.id for alert in sorted_model_alerts.send].sort() == ['alert_id_2', 'alert_id_3', 'alert_id_4'].sort()) |
def dataset(ds):
options = tf.data.Options()
options.threading.private_threadpool_size = 10
options.deterministic = False
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA
return ds.to_tfdataset().batch(10240, num_parallel_calls=tf.data.AUTOTUNE).take(100) |
def extractClockworklilyTumblrCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def get_path_file(filename):
import os
import sys
from os import chdir
from os.path import join
from os.path import dirname
from os import environ
if hasattr(sys, '_MEIPASS'):
chdir(sys._MEIPASS)
filename = join(sys._MEIPASS, filename)
elif ('_MEIPASS2' in environ):
chdir(environ['_MEIPASS2'])
filename = join(environ['_MEIPASS2'], filename)
return filename |
class OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMapping(Options):
def frequency(self) -> 'OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingFrequency':
return self._config_sub_data('frequency', OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingFrequency)
def gapBetweenNotes(self) -> 'OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingGapbetweennotes':
return self._config_sub_data('gapBetweenNotes', OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingGapbetweennotes)
def highpass(self) -> 'OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingHighpass':
return self._config_sub_data('highpass', OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingHighpass)
def lowpass(self) -> 'OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingLowpass':
return self._config_sub_data('lowpass', OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingLowpass)
def noteDuration(self) -> 'OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingNoteduration':
return self._config_sub_data('noteDuration', OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingNoteduration)
def pan(self) -> 'OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingPan':
return self._config_sub_data('pan', OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingPan)
def pitch(self) -> 'OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingPitch':
return self._config_sub_data('pitch', OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingPitch)
def playDelay(self) -> 'OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingPlaydelay':
return self._config_sub_data('playDelay', OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingPlaydelay)
def time(self) -> 'OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingTime':
return self._config_sub_data('time', OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingTime)
def tremolo(self) -> 'OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingTremolo':
return self._config_sub_data('tremolo', OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingTremolo)
def volume(self) -> 'OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingVolume':
return self._config_sub_data('volume', OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingVolume) |
class ElasticsearchMock():
def __init__(self, bulk_size):
self.no_errors = {'took': 500, 'errors': False, 'items': []}
for idx in range(0, bulk_size):
self.no_errors['items'].append({'index': {'_index': 'test', '_type': 'type1', '_id': str(idx), '_version': 1, 'result': 'created', '_shards': {'total': 2, 'successful': 1, 'failed': 0}, 'created': True, 'status': 201, '_seq_no': 0}})
def bulk(self, body=None, index=None, doc_type=None, params=None):
return self.no_errors |
()
def chained_on_sm_class():
class ChainedSM(StateMachine):
s1 = State(initial=True)
s2 = State()
s3 = State()
s4 = State(final=True)
t1 = s1.to(s2)
t2a = s2.to(s2)
t2b = s2.to(s3)
t3 = s3.to(s4)
def __init__(self, rtc=True):
self.spy = mock.Mock()
super().__init__(rtc=rtc)
def on_t1(self):
return [self.t2a(), self.t2b(), self.send('t3')]
def on_enter_state(self, event: str, state: State, source: State):
self.spy('on_enter_state', event=event, state=state.id, source=getattr(source, 'id', ''))
def on_exit_state(self, event: str, state: State, target: State):
self.spy('on_exit_state', event=event, state=state.id, target=target.id)
def on_transition(self, event: str, source: State, target: State):
self.spy('on_transition', event=event, source=source.id, target=target.id)
return event
def after_transition(self, event: str, source: State, target: State):
self.spy('after_transition', event=event, source=source.id, target=target.id)
return ChainedSM |
class TestOFPActionSetNwTtl(unittest.TestCase):
type_ = ofproto.OFPAT_SET_NW_TTL
len_ = ofproto.OFP_ACTION_NW_TTL_SIZE
nw_ttl = 240
fmt = ofproto.OFP_ACTION_NW_TTL_PACK_STR
def test_init(self):
c = OFPActionSetNwTtl(self.nw_ttl)
eq_(self.nw_ttl, c.nw_ttl)
def _test_parser(self, nw_ttl):
buf = pack(self.fmt, self.type_, self.len_, nw_ttl)
res = OFPActionSetNwTtl.parser(buf, 0)
eq_(res.type, self.type_)
eq_(res.len, self.len_)
eq_(res.nw_ttl, nw_ttl)
def test_parser_mid(self):
self._test_parser(self.nw_ttl)
def test_parser_max(self):
self._test_parser(255)
def test_parser_min(self):
self._test_parser(0)
def _test_serialize(self, nw_ttl):
c = OFPActionSetNwTtl(nw_ttl)
buf = bytearray()
c.serialize(buf, 0)
res = struct.unpack(self.fmt, six.binary_type(buf))
eq_(res[0], self.type_)
eq_(res[1], self.len_)
eq_(res[2], nw_ttl)
def test_serialize_mid(self):
self._test_serialize(self.nw_ttl)
def test_serialize_max(self):
self._test_serialize(255)
def test_serialize_min(self):
self._test_serialize(0) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.