code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def sendLocalFiles(
self, file_paths, message=None, thread_id=None, thread_type=ThreadType.USER
):
"""
Sends local files to a thread
:param file_paths: Paths of files to upload and send
:param message: Additional message
:param thread_id: User/Group ID to send to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type thread_type: models.ThreadType
:return: :ref:`Message ID <intro_message_ids>` of the sent files
:raises: FBchatException if request failed
"""
file_paths = require_list(file_paths)
with get_files_from_paths(file_paths) as x:
files = self._upload(x)
return self._sendFiles(
files=files, message=message, thread_id=thread_id, thread_type=thread_type
) | def function[sendLocalFiles, parameter[self, file_paths, message, thread_id, thread_type]]:
constant[
Sends local files to a thread
:param file_paths: Paths of files to upload and send
:param message: Additional message
:param thread_id: User/Group ID to send to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type thread_type: models.ThreadType
:return: :ref:`Message ID <intro_message_ids>` of the sent files
:raises: FBchatException if request failed
]
variable[file_paths] assign[=] call[name[require_list], parameter[name[file_paths]]]
with call[name[get_files_from_paths], parameter[name[file_paths]]] begin[:]
variable[files] assign[=] call[name[self]._upload, parameter[name[x]]]
return[call[name[self]._sendFiles, parameter[]]] | keyword[def] identifier[sendLocalFiles] (
identifier[self] , identifier[file_paths] , identifier[message] = keyword[None] , identifier[thread_id] = keyword[None] , identifier[thread_type] = identifier[ThreadType] . identifier[USER]
):
literal[string]
identifier[file_paths] = identifier[require_list] ( identifier[file_paths] )
keyword[with] identifier[get_files_from_paths] ( identifier[file_paths] ) keyword[as] identifier[x] :
identifier[files] = identifier[self] . identifier[_upload] ( identifier[x] )
keyword[return] identifier[self] . identifier[_sendFiles] (
identifier[files] = identifier[files] , identifier[message] = identifier[message] , identifier[thread_id] = identifier[thread_id] , identifier[thread_type] = identifier[thread_type]
) | def sendLocalFiles(self, file_paths, message=None, thread_id=None, thread_type=ThreadType.USER):
"""
Sends local files to a thread
:param file_paths: Paths of files to upload and send
:param message: Additional message
:param thread_id: User/Group ID to send to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type thread_type: models.ThreadType
:return: :ref:`Message ID <intro_message_ids>` of the sent files
:raises: FBchatException if request failed
"""
file_paths = require_list(file_paths)
with get_files_from_paths(file_paths) as x:
files = self._upload(x) # depends on [control=['with'], data=['x']]
return self._sendFiles(files=files, message=message, thread_id=thread_id, thread_type=thread_type) |
def add_webhook(self, scaling_group, policy, name, metadata=None):
"""
Adds a webhook to the specified policy.
"""
return self._manager.add_webhook(scaling_group, policy, name,
metadata=metadata) | def function[add_webhook, parameter[self, scaling_group, policy, name, metadata]]:
constant[
Adds a webhook to the specified policy.
]
return[call[name[self]._manager.add_webhook, parameter[name[scaling_group], name[policy], name[name]]]] | keyword[def] identifier[add_webhook] ( identifier[self] , identifier[scaling_group] , identifier[policy] , identifier[name] , identifier[metadata] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_manager] . identifier[add_webhook] ( identifier[scaling_group] , identifier[policy] , identifier[name] ,
identifier[metadata] = identifier[metadata] ) | def add_webhook(self, scaling_group, policy, name, metadata=None):
"""
Adds a webhook to the specified policy.
"""
return self._manager.add_webhook(scaling_group, policy, name, metadata=metadata) |
def validateDocument(self, ctxt):
"""Try to validate the document instance basically it does
the all the checks described by the XML Rec i.e. validates
the internal and external subset (if present) and validate
the document tree. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateDocument(ctxt__o, self._o)
return ret | def function[validateDocument, parameter[self, ctxt]]:
constant[Try to validate the document instance basically it does
the all the checks described by the XML Rec i.e. validates
the internal and external subset (if present) and validate
the document tree. ]
if compare[name[ctxt] is constant[None]] begin[:]
variable[ctxt__o] assign[=] constant[None]
variable[ret] assign[=] call[name[libxml2mod].xmlValidateDocument, parameter[name[ctxt__o], name[self]._o]]
return[name[ret]] | keyword[def] identifier[validateDocument] ( identifier[self] , identifier[ctxt] ):
literal[string]
keyword[if] identifier[ctxt] keyword[is] keyword[None] : identifier[ctxt__o] = keyword[None]
keyword[else] : identifier[ctxt__o] = identifier[ctxt] . identifier[_o]
identifier[ret] = identifier[libxml2mod] . identifier[xmlValidateDocument] ( identifier[ctxt__o] , identifier[self] . identifier[_o] )
keyword[return] identifier[ret] | def validateDocument(self, ctxt):
"""Try to validate the document instance basically it does
the all the checks described by the XML Rec i.e. validates
the internal and external subset (if present) and validate
the document tree. """
if ctxt is None:
ctxt__o = None # depends on [control=['if'], data=[]]
else:
ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateDocument(ctxt__o, self._o)
return ret |
def get_collection_for_cls(self, cls):
"""
Returns the collection name for a given document class.
:param cls: The document class for which to return the collection name.
:returns: The collection name for the given class.
"""
if cls not in self.classes:
if issubclass(cls, Document) and cls not in self.classes and cls not in self.deprecated_classes:
self.autoregister(cls)
else:
raise AttributeError("Unknown object type: %s" % cls.__name__)
collection = self.classes[cls]['collection']
return collection | def function[get_collection_for_cls, parameter[self, cls]]:
constant[
Returns the collection name for a given document class.
:param cls: The document class for which to return the collection name.
:returns: The collection name for the given class.
]
if compare[name[cls] <ast.NotIn object at 0x7da2590d7190> name[self].classes] begin[:]
if <ast.BoolOp object at 0x7da1b189cca0> begin[:]
call[name[self].autoregister, parameter[name[cls]]]
variable[collection] assign[=] call[call[name[self].classes][name[cls]]][constant[collection]]
return[name[collection]] | keyword[def] identifier[get_collection_for_cls] ( identifier[self] , identifier[cls] ):
literal[string]
keyword[if] identifier[cls] keyword[not] keyword[in] identifier[self] . identifier[classes] :
keyword[if] identifier[issubclass] ( identifier[cls] , identifier[Document] ) keyword[and] identifier[cls] keyword[not] keyword[in] identifier[self] . identifier[classes] keyword[and] identifier[cls] keyword[not] keyword[in] identifier[self] . identifier[deprecated_classes] :
identifier[self] . identifier[autoregister] ( identifier[cls] )
keyword[else] :
keyword[raise] identifier[AttributeError] ( literal[string] % identifier[cls] . identifier[__name__] )
identifier[collection] = identifier[self] . identifier[classes] [ identifier[cls] ][ literal[string] ]
keyword[return] identifier[collection] | def get_collection_for_cls(self, cls):
"""
Returns the collection name for a given document class.
:param cls: The document class for which to return the collection name.
:returns: The collection name for the given class.
"""
if cls not in self.classes:
if issubclass(cls, Document) and cls not in self.classes and (cls not in self.deprecated_classes):
self.autoregister(cls) # depends on [control=['if'], data=[]]
else:
raise AttributeError('Unknown object type: %s' % cls.__name__) # depends on [control=['if'], data=['cls']]
collection = self.classes[cls]['collection']
return collection |
def create_fields(self, base_model=models.Model, base_manager=models.Manager):
"""
This method will create a model which will hold field types defined
at runtime for each ContentType.
:param base_model: base model class to inherit from
:return:
"""
CONTENT_TYPES = self.content_types_query
class CustomContentTypeFieldManager(base_manager):
pass
@python_2_unicode_compatible
class CustomContentTypeField(base_model):
DATATYPE_CHOICES = (
(CUSTOM_TYPE_TEXT, _('text')),
(CUSTOM_TYPE_INTEGER, _('integer')),
(CUSTOM_TYPE_FLOAT, _('float')),
(CUSTOM_TYPE_TIME, _('time')),
(CUSTOM_TYPE_DATE, _('date')),
(CUSTOM_TYPE_DATETIME, _('datetime')),
(CUSTOM_TYPE_BOOLEAN, _('boolean')),
)
content_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name='+',
limit_choices_to=CONTENT_TYPES)
name = models.CharField(_('name'), max_length=100, db_index=True)
label = models.CharField(_('label'), max_length=100)
data_type = models.CharField(_('data type'), max_length=8, choices=DATATYPE_CHOICES, db_index=True)
help_text = models.CharField(_('help text'), max_length=200, blank=True, null=True)
required = models.BooleanField(_('required'), default=False)
searchable = models.BooleanField(_('searchable'), default=True)
initial = models.CharField(_('initial'), max_length=200, blank=True, null=True)
min_length = models.PositiveIntegerField(_('min length'), blank=True, null=True)
max_length = models.PositiveIntegerField(_('max length'), blank=True, null=True)
min_value = models.FloatField(_('min value'), blank=True, null=True)
max_value = models.FloatField(_('max value'), blank=True, null=True)
objects = CustomContentTypeFieldManager()
class Meta:
verbose_name = _('custom field')
verbose_name_plural = _('custom fields')
abstract = True
def save(self, *args, **kwargs):
super(CustomContentTypeField, self).save(*args, **kwargs)
def clean(self):
# if field is required must issue a initial value
if self.required:
# TODO - must create values for all instances that have not
#print model.objects.values_list('pk', flat=True)
#print self.field.filter(content_type=self.content_type)
#objs = self.field.filter(content_type=self.content_type) \
# .exclude(object_id__in=model.objects.values_list('pk', flat=True))
#for obj in objs:
# print obj
pass
def _check_validate_already_defined_in_model(self):
model = self.content_type.model_class()
if self.name in [f.name for f in model._meta.fields]:
raise ValidationError({ 'name': (_('Custom field already defined as model field for content type %(model_name)s') % {'model_name': model.__name__},) })
def _check_validate_already_defined_in_custom_fields(self):
model = self.content_type.model_class()
qs = self.__class__._default_manager.filter(
content_type=self.content_type,
name=self.name,
)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
raise ValidationError({ 'name': (_('Custom field already defined for content type %(model_name)s') % {'model_name': model.__name__},) })
def __str__(self):
return "%s" % self.name
return CustomContentTypeField | def function[create_fields, parameter[self, base_model, base_manager]]:
constant[
This method will create a model which will hold field types defined
at runtime for each ContentType.
:param base_model: base model class to inherit from
:return:
]
variable[CONTENT_TYPES] assign[=] name[self].content_types_query
class class[CustomContentTypeFieldManager, parameter[]] begin[:]
pass
class class[CustomContentTypeField, parameter[]] begin[:]
variable[DATATYPE_CHOICES] assign[=] tuple[[<ast.Tuple object at 0x7da18f09f6a0>, <ast.Tuple object at 0x7da18f09ebc0>, <ast.Tuple object at 0x7da18f09ffd0>, <ast.Tuple object at 0x7da18f09ce50>, <ast.Tuple object at 0x7da18f09df60>, <ast.Tuple object at 0x7da18f09eb30>, <ast.Tuple object at 0x7da18f09ee90>]]
variable[content_type] assign[=] call[name[models].ForeignKey, parameter[name[ContentType]]]
variable[name] assign[=] call[name[models].CharField, parameter[call[name[_], parameter[constant[name]]]]]
variable[label] assign[=] call[name[models].CharField, parameter[call[name[_], parameter[constant[label]]]]]
variable[data_type] assign[=] call[name[models].CharField, parameter[call[name[_], parameter[constant[data type]]]]]
variable[help_text] assign[=] call[name[models].CharField, parameter[call[name[_], parameter[constant[help text]]]]]
variable[required] assign[=] call[name[models].BooleanField, parameter[call[name[_], parameter[constant[required]]]]]
variable[searchable] assign[=] call[name[models].BooleanField, parameter[call[name[_], parameter[constant[searchable]]]]]
variable[initial] assign[=] call[name[models].CharField, parameter[call[name[_], parameter[constant[initial]]]]]
variable[min_length] assign[=] call[name[models].PositiveIntegerField, parameter[call[name[_], parameter[constant[min length]]]]]
variable[max_length] assign[=] call[name[models].PositiveIntegerField, parameter[call[name[_], parameter[constant[max length]]]]]
variable[min_value] assign[=] call[name[models].FloatField, parameter[call[name[_], parameter[constant[min value]]]]]
variable[max_value] assign[=] call[name[models].FloatField, parameter[call[name[_], parameter[constant[max value]]]]]
variable[objects] assign[=] call[name[CustomContentTypeFieldManager], parameter[]]
class class[Meta, parameter[]] begin[:]
variable[verbose_name] assign[=] call[name[_], parameter[constant[custom field]]]
variable[verbose_name_plural] assign[=] call[name[_], parameter[constant[custom fields]]]
variable[abstract] assign[=] constant[True]
def function[save, parameter[self]]:
call[call[name[super], parameter[name[CustomContentTypeField], name[self]]].save, parameter[<ast.Starred object at 0x7da18f09e8f0>]]
def function[clean, parameter[self]]:
if name[self].required begin[:]
pass
def function[_check_validate_already_defined_in_model, parameter[self]]:
variable[model] assign[=] call[name[self].content_type.model_class, parameter[]]
if compare[name[self].name in <ast.ListComp object at 0x7da18f09c070>] begin[:]
<ast.Raise object at 0x7da18f09ecb0>
def function[_check_validate_already_defined_in_custom_fields, parameter[self]]:
variable[model] assign[=] call[name[self].content_type.model_class, parameter[]]
variable[qs] assign[=] call[name[self].__class__._default_manager.filter, parameter[]]
if <ast.BoolOp object at 0x7da204963fa0> begin[:]
variable[qs] assign[=] call[name[qs].exclude, parameter[]]
if call[name[qs].exists, parameter[]] begin[:]
<ast.Raise object at 0x7da204963520>
def function[__str__, parameter[self]]:
return[binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> name[self].name]]
return[name[CustomContentTypeField]] | keyword[def] identifier[create_fields] ( identifier[self] , identifier[base_model] = identifier[models] . identifier[Model] , identifier[base_manager] = identifier[models] . identifier[Manager] ):
literal[string]
identifier[CONTENT_TYPES] = identifier[self] . identifier[content_types_query]
keyword[class] identifier[CustomContentTypeFieldManager] ( identifier[base_manager] ):
keyword[pass]
@ identifier[python_2_unicode_compatible]
keyword[class] identifier[CustomContentTypeField] ( identifier[base_model] ):
identifier[DATATYPE_CHOICES] =(
( identifier[CUSTOM_TYPE_TEXT] , identifier[_] ( literal[string] )),
( identifier[CUSTOM_TYPE_INTEGER] , identifier[_] ( literal[string] )),
( identifier[CUSTOM_TYPE_FLOAT] , identifier[_] ( literal[string] )),
( identifier[CUSTOM_TYPE_TIME] , identifier[_] ( literal[string] )),
( identifier[CUSTOM_TYPE_DATE] , identifier[_] ( literal[string] )),
( identifier[CUSTOM_TYPE_DATETIME] , identifier[_] ( literal[string] )),
( identifier[CUSTOM_TYPE_BOOLEAN] , identifier[_] ( literal[string] )),
)
identifier[content_type] = identifier[models] . identifier[ForeignKey] ( identifier[ContentType] ,
identifier[verbose_name] = identifier[_] ( literal[string] ),
identifier[related_name] = literal[string] ,
identifier[limit_choices_to] = identifier[CONTENT_TYPES] )
identifier[name] = identifier[models] . identifier[CharField] ( identifier[_] ( literal[string] ), identifier[max_length] = literal[int] , identifier[db_index] = keyword[True] )
identifier[label] = identifier[models] . identifier[CharField] ( identifier[_] ( literal[string] ), identifier[max_length] = literal[int] )
identifier[data_type] = identifier[models] . identifier[CharField] ( identifier[_] ( literal[string] ), identifier[max_length] = literal[int] , identifier[choices] = identifier[DATATYPE_CHOICES] , identifier[db_index] = keyword[True] )
identifier[help_text] = identifier[models] . identifier[CharField] ( identifier[_] ( literal[string] ), identifier[max_length] = literal[int] , identifier[blank] = keyword[True] , identifier[null] = keyword[True] )
identifier[required] = identifier[models] . identifier[BooleanField] ( identifier[_] ( literal[string] ), identifier[default] = keyword[False] )
identifier[searchable] = identifier[models] . identifier[BooleanField] ( identifier[_] ( literal[string] ), identifier[default] = keyword[True] )
identifier[initial] = identifier[models] . identifier[CharField] ( identifier[_] ( literal[string] ), identifier[max_length] = literal[int] , identifier[blank] = keyword[True] , identifier[null] = keyword[True] )
identifier[min_length] = identifier[models] . identifier[PositiveIntegerField] ( identifier[_] ( literal[string] ), identifier[blank] = keyword[True] , identifier[null] = keyword[True] )
identifier[max_length] = identifier[models] . identifier[PositiveIntegerField] ( identifier[_] ( literal[string] ), identifier[blank] = keyword[True] , identifier[null] = keyword[True] )
identifier[min_value] = identifier[models] . identifier[FloatField] ( identifier[_] ( literal[string] ), identifier[blank] = keyword[True] , identifier[null] = keyword[True] )
identifier[max_value] = identifier[models] . identifier[FloatField] ( identifier[_] ( literal[string] ), identifier[blank] = keyword[True] , identifier[null] = keyword[True] )
identifier[objects] = identifier[CustomContentTypeFieldManager] ()
keyword[class] identifier[Meta] :
identifier[verbose_name] = identifier[_] ( literal[string] )
identifier[verbose_name_plural] = identifier[_] ( literal[string] )
identifier[abstract] = keyword[True]
keyword[def] identifier[save] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
identifier[super] ( identifier[CustomContentTypeField] , identifier[self] ). identifier[save] (* identifier[args] ,** identifier[kwargs] )
keyword[def] identifier[clean] ( identifier[self] ):
keyword[if] identifier[self] . identifier[required] :
keyword[pass]
keyword[def] identifier[_check_validate_already_defined_in_model] ( identifier[self] ):
identifier[model] = identifier[self] . identifier[content_type] . identifier[model_class] ()
keyword[if] identifier[self] . identifier[name] keyword[in] [ identifier[f] . identifier[name] keyword[for] identifier[f] keyword[in] identifier[model] . identifier[_meta] . identifier[fields] ]:
keyword[raise] identifier[ValidationError] ({ literal[string] :( identifier[_] ( literal[string] )%{ literal[string] : identifier[model] . identifier[__name__] },)})
keyword[def] identifier[_check_validate_already_defined_in_custom_fields] ( identifier[self] ):
identifier[model] = identifier[self] . identifier[content_type] . identifier[model_class] ()
identifier[qs] = identifier[self] . identifier[__class__] . identifier[_default_manager] . identifier[filter] (
identifier[content_type] = identifier[self] . identifier[content_type] ,
identifier[name] = identifier[self] . identifier[name] ,
)
keyword[if] keyword[not] identifier[self] . identifier[_state] . identifier[adding] keyword[and] identifier[self] . identifier[pk] keyword[is] keyword[not] keyword[None] :
identifier[qs] = identifier[qs] . identifier[exclude] ( identifier[pk] = identifier[self] . identifier[pk] )
keyword[if] identifier[qs] . identifier[exists] ():
keyword[raise] identifier[ValidationError] ({ literal[string] :( identifier[_] ( literal[string] )%{ literal[string] : identifier[model] . identifier[__name__] },)})
keyword[def] identifier[__str__] ( identifier[self] ):
keyword[return] literal[string] % identifier[self] . identifier[name]
keyword[return] identifier[CustomContentTypeField] | def create_fields(self, base_model=models.Model, base_manager=models.Manager):
"""
This method will create a model which will hold field types defined
at runtime for each ContentType.
:param base_model: base model class to inherit from
:return:
"""
CONTENT_TYPES = self.content_types_query
class CustomContentTypeFieldManager(base_manager):
pass
@python_2_unicode_compatible
class CustomContentTypeField(base_model):
DATATYPE_CHOICES = ((CUSTOM_TYPE_TEXT, _('text')), (CUSTOM_TYPE_INTEGER, _('integer')), (CUSTOM_TYPE_FLOAT, _('float')), (CUSTOM_TYPE_TIME, _('time')), (CUSTOM_TYPE_DATE, _('date')), (CUSTOM_TYPE_DATETIME, _('datetime')), (CUSTOM_TYPE_BOOLEAN, _('boolean')))
content_type = models.ForeignKey(ContentType, verbose_name=_('content type'), related_name='+', limit_choices_to=CONTENT_TYPES)
name = models.CharField(_('name'), max_length=100, db_index=True)
label = models.CharField(_('label'), max_length=100)
data_type = models.CharField(_('data type'), max_length=8, choices=DATATYPE_CHOICES, db_index=True)
help_text = models.CharField(_('help text'), max_length=200, blank=True, null=True)
required = models.BooleanField(_('required'), default=False)
searchable = models.BooleanField(_('searchable'), default=True)
initial = models.CharField(_('initial'), max_length=200, blank=True, null=True)
min_length = models.PositiveIntegerField(_('min length'), blank=True, null=True)
max_length = models.PositiveIntegerField(_('max length'), blank=True, null=True)
min_value = models.FloatField(_('min value'), blank=True, null=True)
max_value = models.FloatField(_('max value'), blank=True, null=True)
objects = CustomContentTypeFieldManager()
class Meta:
verbose_name = _('custom field')
verbose_name_plural = _('custom fields')
abstract = True
def save(self, *args, **kwargs):
super(CustomContentTypeField, self).save(*args, **kwargs)
def clean(self):
# if field is required must issue a initial value
if self.required:
# TODO - must create values for all instances that have not
#print model.objects.values_list('pk', flat=True)
#print self.field.filter(content_type=self.content_type)
#objs = self.field.filter(content_type=self.content_type) \
# .exclude(object_id__in=model.objects.values_list('pk', flat=True))
#for obj in objs:
# print obj
pass # depends on [control=['if'], data=[]]
def _check_validate_already_defined_in_model(self):
model = self.content_type.model_class()
if self.name in [f.name for f in model._meta.fields]:
raise ValidationError({'name': (_('Custom field already defined as model field for content type %(model_name)s') % {'model_name': model.__name__},)}) # depends on [control=['if'], data=[]]
def _check_validate_already_defined_in_custom_fields(self):
model = self.content_type.model_class()
qs = self.__class__._default_manager.filter(content_type=self.content_type, name=self.name)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk) # depends on [control=['if'], data=[]]
if qs.exists():
raise ValidationError({'name': (_('Custom field already defined for content type %(model_name)s') % {'model_name': model.__name__},)}) # depends on [control=['if'], data=[]]
def __str__(self):
return '%s' % self.name
return CustomContentTypeField |
def get_triples(self, subject=None, predicate=None, object_=None):
"""Returns triples that correspond to the specified subject,
predicates, and objects."""
for triple in self.triples:
# Filter out non-matches
if subject is not None and triple['subject'] != subject:
continue
if predicate is not None and triple['predicate'] != predicate:
continue
if object_ is not None and triple['object'] != object_:
continue
yield triple | def function[get_triples, parameter[self, subject, predicate, object_]]:
constant[Returns triples that correspond to the specified subject,
predicates, and objects.]
for taget[name[triple]] in starred[name[self].triples] begin[:]
if <ast.BoolOp object at 0x7da1b183a260> begin[:]
continue
if <ast.BoolOp object at 0x7da1b1839990> begin[:]
continue
if <ast.BoolOp object at 0x7da1b1839b10> begin[:]
continue
<ast.Yield object at 0x7da1b1b84d00> | keyword[def] identifier[get_triples] ( identifier[self] , identifier[subject] = keyword[None] , identifier[predicate] = keyword[None] , identifier[object_] = keyword[None] ):
literal[string]
keyword[for] identifier[triple] keyword[in] identifier[self] . identifier[triples] :
keyword[if] identifier[subject] keyword[is] keyword[not] keyword[None] keyword[and] identifier[triple] [ literal[string] ]!= identifier[subject] :
keyword[continue]
keyword[if] identifier[predicate] keyword[is] keyword[not] keyword[None] keyword[and] identifier[triple] [ literal[string] ]!= identifier[predicate] :
keyword[continue]
keyword[if] identifier[object_] keyword[is] keyword[not] keyword[None] keyword[and] identifier[triple] [ literal[string] ]!= identifier[object_] :
keyword[continue]
keyword[yield] identifier[triple] | def get_triples(self, subject=None, predicate=None, object_=None):
"""Returns triples that correspond to the specified subject,
predicates, and objects."""
for triple in self.triples:
# Filter out non-matches
if subject is not None and triple['subject'] != subject:
continue # depends on [control=['if'], data=[]]
if predicate is not None and triple['predicate'] != predicate:
continue # depends on [control=['if'], data=[]]
if object_ is not None and triple['object'] != object_:
continue # depends on [control=['if'], data=[]]
yield triple # depends on [control=['for'], data=['triple']] |
def collect_nonperiodic_features(
featuresdir,
magcol,
outfile,
pklglob='varfeatures-*.pkl',
featurestouse=NONPERIODIC_FEATURES_TO_COLLECT,
maxobjects=None,
labeldict=None,
labeltype='binary',
):
'''This collects variability features into arrays for use with the classifer.
Parameters
----------
featuresdir : str
This is the directory where all the varfeatures pickles are. Use
`pklglob` to specify the glob to search for. The `varfeatures` pickles
contain objectids, a light curve magcol, and features as dict
key-vals. The :py:mod:`astrobase.lcproc.lcvfeatures` module can be used
to produce these.
magcol : str
This is the key in each varfeatures pickle corresponding to the magcol
of the light curve the variability features were extracted from.
outfile : str
This is the filename of the output pickle that will be written
containing a dict of all the features extracted into np.arrays.
pklglob : str
This is the UNIX file glob to use to search for varfeatures pickle files
in `featuresdir`.
featurestouse : list of str
Each varfeatures pickle can contain any combination of non-periodic,
stellar, and periodic features; these must have the same names as
elements in the list of strings provided in `featurestouse`. This tries
to get all the features listed in NONPERIODIC_FEATURES_TO_COLLECT by
default. If `featurestouse` is provided as a list, gets only the
features listed in this kwarg instead.
maxobjects : int or None
The controls how many pickles from the featuresdir to process. If None,
will process all varfeatures pickles.
labeldict : dict or None
If this is provided, it must be a dict with the following key:val list::
'<objectid>':<label value>
for each objectid collected from the varfeatures pickles. This will turn
the collected information into a training set for classifiers.
Example: to carry out non-periodic variable feature collection of fake
LCS prepared by :py:mod:`astrobase.fakelcs.generation`, use the value
of the 'isvariable' dict elem from the `fakelcs-info.pkl` here, like
so::
labeldict={x:y for x,y in zip(fakelcinfo['objectid'],
fakelcinfo['isvariable'])}
labeltype : {'binary', 'classes'}
This is either 'binary' or 'classes' for binary/multi-class
classification respectively.
Returns
-------
dict
This returns a dict with all of the features collected into np.arrays,
ready to use as input to a scikit-learn classifier.
'''
# list of input pickles generated by varfeatures in lcproc.py
pklist = glob.glob(os.path.join(featuresdir, pklglob))
if maxobjects:
pklist = pklist[:maxobjects]
# fancy progress bar with tqdm if present
if TQDM:
listiterator = tqdm(pklist)
else:
listiterator = pklist
# go through all the varfeatures arrays
feature_dict = {'objectids':[],'magcol':magcol, 'availablefeatures':[]}
LOGINFO('collecting features for magcol: %s' % magcol)
for pkl in listiterator:
with open(pkl,'rb') as infd:
varf = pickle.load(infd)
# update the objectid list
objectid = varf['objectid']
if objectid not in feature_dict['objectids']:
feature_dict['objectids'].append(objectid)
thisfeatures = varf[magcol]
if featurestouse and len(featurestouse) > 0:
featurestoget = featurestouse
else:
featurestoget = NONPERIODIC_FEATURES_TO_COLLECT
# collect all the features for this magcol/objectid combination
for feature in featurestoget:
# update the global feature list if necessary
if ((feature not in feature_dict['availablefeatures']) and
(feature in thisfeatures)):
feature_dict['availablefeatures'].append(feature)
feature_dict[feature] = []
if feature in thisfeatures:
feature_dict[feature].append(
thisfeatures[feature]
)
# now that we've collected all the objects and their features, turn the list
# into arrays, and then concatenate them
for feat in feature_dict['availablefeatures']:
feature_dict[feat] = np.array(feature_dict[feat])
feature_dict['objectids'] = np.array(feature_dict['objectids'])
feature_array = np.column_stack([feature_dict[feat] for feat in
feature_dict['availablefeatures']])
feature_dict['features_array'] = feature_array
# if there's a labeldict available, use it to generate a label array. this
# feature collection is now a training set.
if isinstance(labeldict, dict):
labelarray = np.zeros(feature_dict['objectids'].size, dtype=np.int64)
# populate the labels for each object in the training set
for ind, objectid in enumerate(feature_dict['objectids']):
if objectid in labeldict:
# if this is a binary classifier training set, convert bools to
# ones and zeros
if labeltype == 'binary':
if labeldict[objectid]:
labelarray[ind] = 1
# otherwise, use the actual class label integer
elif labeltype == 'classes':
labelarray[ind] = labeldict[objectid]
feature_dict['labels_array'] = labelarray
feature_dict['kwargs'] = {'pklglob':pklglob,
'featurestouse':featurestouse,
'maxobjects':maxobjects,
'labeltype':labeltype}
# write the info to the output pickle
with open(outfile,'wb') as outfd:
pickle.dump(feature_dict, outfd, pickle.HIGHEST_PROTOCOL)
# return the feature_dict
return feature_dict | def function[collect_nonperiodic_features, parameter[featuresdir, magcol, outfile, pklglob, featurestouse, maxobjects, labeldict, labeltype]]:
constant[This collects variability features into arrays for use with the classifer.
Parameters
----------
featuresdir : str
This is the directory where all the varfeatures pickles are. Use
`pklglob` to specify the glob to search for. The `varfeatures` pickles
contain objectids, a light curve magcol, and features as dict
key-vals. The :py:mod:`astrobase.lcproc.lcvfeatures` module can be used
to produce these.
magcol : str
This is the key in each varfeatures pickle corresponding to the magcol
of the light curve the variability features were extracted from.
outfile : str
This is the filename of the output pickle that will be written
containing a dict of all the features extracted into np.arrays.
pklglob : str
This is the UNIX file glob to use to search for varfeatures pickle files
in `featuresdir`.
featurestouse : list of str
Each varfeatures pickle can contain any combination of non-periodic,
stellar, and periodic features; these must have the same names as
elements in the list of strings provided in `featurestouse`. This tries
to get all the features listed in NONPERIODIC_FEATURES_TO_COLLECT by
default. If `featurestouse` is provided as a list, gets only the
features listed in this kwarg instead.
maxobjects : int or None
The controls how many pickles from the featuresdir to process. If None,
will process all varfeatures pickles.
labeldict : dict or None
If this is provided, it must be a dict with the following key:val list::
'<objectid>':<label value>
for each objectid collected from the varfeatures pickles. This will turn
the collected information into a training set for classifiers.
Example: to carry out non-periodic variable feature collection of fake
LCS prepared by :py:mod:`astrobase.fakelcs.generation`, use the value
of the 'isvariable' dict elem from the `fakelcs-info.pkl` here, like
so::
labeldict={x:y for x,y in zip(fakelcinfo['objectid'],
fakelcinfo['isvariable'])}
labeltype : {'binary', 'classes'}
This is either 'binary' or 'classes' for binary/multi-class
classification respectively.
Returns
-------
dict
This returns a dict with all of the features collected into np.arrays,
ready to use as input to a scikit-learn classifier.
]
variable[pklist] assign[=] call[name[glob].glob, parameter[call[name[os].path.join, parameter[name[featuresdir], name[pklglob]]]]]
if name[maxobjects] begin[:]
variable[pklist] assign[=] call[name[pklist]][<ast.Slice object at 0x7da1b00b6ec0>]
if name[TQDM] begin[:]
variable[listiterator] assign[=] call[name[tqdm], parameter[name[pklist]]]
variable[feature_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b00b6b60>, <ast.Constant object at 0x7da1b00b6bc0>, <ast.Constant object at 0x7da1b00b6b90>], [<ast.List object at 0x7da1b00b6b30>, <ast.Name object at 0x7da1b00b6c50>, <ast.List object at 0x7da1b00b69b0>]]
call[name[LOGINFO], parameter[binary_operation[constant[collecting features for magcol: %s] <ast.Mod object at 0x7da2590d6920> name[magcol]]]]
for taget[name[pkl]] in starred[name[listiterator]] begin[:]
with call[name[open], parameter[name[pkl], constant[rb]]] begin[:]
variable[varf] assign[=] call[name[pickle].load, parameter[name[infd]]]
variable[objectid] assign[=] call[name[varf]][constant[objectid]]
if compare[name[objectid] <ast.NotIn object at 0x7da2590d7190> call[name[feature_dict]][constant[objectids]]] begin[:]
call[call[name[feature_dict]][constant[objectids]].append, parameter[name[objectid]]]
variable[thisfeatures] assign[=] call[name[varf]][name[magcol]]
if <ast.BoolOp object at 0x7da1b00b4850> begin[:]
variable[featurestoget] assign[=] name[featurestouse]
for taget[name[feature]] in starred[name[featurestoget]] begin[:]
if <ast.BoolOp object at 0x7da1b00b5f90> begin[:]
call[call[name[feature_dict]][constant[availablefeatures]].append, parameter[name[feature]]]
call[name[feature_dict]][name[feature]] assign[=] list[[]]
if compare[name[feature] in name[thisfeatures]] begin[:]
call[call[name[feature_dict]][name[feature]].append, parameter[call[name[thisfeatures]][name[feature]]]]
for taget[name[feat]] in starred[call[name[feature_dict]][constant[availablefeatures]]] begin[:]
call[name[feature_dict]][name[feat]] assign[=] call[name[np].array, parameter[call[name[feature_dict]][name[feat]]]]
call[name[feature_dict]][constant[objectids]] assign[=] call[name[np].array, parameter[call[name[feature_dict]][constant[objectids]]]]
variable[feature_array] assign[=] call[name[np].column_stack, parameter[<ast.ListComp object at 0x7da1b00b7df0>]]
call[name[feature_dict]][constant[features_array]] assign[=] name[feature_array]
if call[name[isinstance], parameter[name[labeldict], name[dict]]] begin[:]
variable[labelarray] assign[=] call[name[np].zeros, parameter[call[name[feature_dict]][constant[objectids]].size]]
for taget[tuple[[<ast.Name object at 0x7da1b00b5330>, <ast.Name object at 0x7da1b00b5870>]]] in starred[call[name[enumerate], parameter[call[name[feature_dict]][constant[objectids]]]]] begin[:]
if compare[name[objectid] in name[labeldict]] begin[:]
if compare[name[labeltype] equal[==] constant[binary]] begin[:]
if call[name[labeldict]][name[objectid]] begin[:]
call[name[labelarray]][name[ind]] assign[=] constant[1]
call[name[feature_dict]][constant[labels_array]] assign[=] name[labelarray]
call[name[feature_dict]][constant[kwargs]] assign[=] dictionary[[<ast.Constant object at 0x7da1b012e830>, <ast.Constant object at 0x7da1b012e860>, <ast.Constant object at 0x7da1b012f310>, <ast.Constant object at 0x7da1b012d150>], [<ast.Name object at 0x7da1b007feb0>, <ast.Name object at 0x7da1b007fe80>, <ast.Name object at 0x7da1b007fe50>, <ast.Name object at 0x7da1b007fe20>]]
with call[name[open], parameter[name[outfile], constant[wb]]] begin[:]
call[name[pickle].dump, parameter[name[feature_dict], name[outfd], name[pickle].HIGHEST_PROTOCOL]]
return[name[feature_dict]] | keyword[def] identifier[collect_nonperiodic_features] (
identifier[featuresdir] ,
identifier[magcol] ,
identifier[outfile] ,
identifier[pklglob] = literal[string] ,
identifier[featurestouse] = identifier[NONPERIODIC_FEATURES_TO_COLLECT] ,
identifier[maxobjects] = keyword[None] ,
identifier[labeldict] = keyword[None] ,
identifier[labeltype] = literal[string] ,
):
literal[string]
identifier[pklist] = identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[featuresdir] , identifier[pklglob] ))
keyword[if] identifier[maxobjects] :
identifier[pklist] = identifier[pklist] [: identifier[maxobjects] ]
keyword[if] identifier[TQDM] :
identifier[listiterator] = identifier[tqdm] ( identifier[pklist] )
keyword[else] :
identifier[listiterator] = identifier[pklist]
identifier[feature_dict] ={ literal[string] :[], literal[string] : identifier[magcol] , literal[string] :[]}
identifier[LOGINFO] ( literal[string] % identifier[magcol] )
keyword[for] identifier[pkl] keyword[in] identifier[listiterator] :
keyword[with] identifier[open] ( identifier[pkl] , literal[string] ) keyword[as] identifier[infd] :
identifier[varf] = identifier[pickle] . identifier[load] ( identifier[infd] )
identifier[objectid] = identifier[varf] [ literal[string] ]
keyword[if] identifier[objectid] keyword[not] keyword[in] identifier[feature_dict] [ literal[string] ]:
identifier[feature_dict] [ literal[string] ]. identifier[append] ( identifier[objectid] )
identifier[thisfeatures] = identifier[varf] [ identifier[magcol] ]
keyword[if] identifier[featurestouse] keyword[and] identifier[len] ( identifier[featurestouse] )> literal[int] :
identifier[featurestoget] = identifier[featurestouse]
keyword[else] :
identifier[featurestoget] = identifier[NONPERIODIC_FEATURES_TO_COLLECT]
keyword[for] identifier[feature] keyword[in] identifier[featurestoget] :
keyword[if] (( identifier[feature] keyword[not] keyword[in] identifier[feature_dict] [ literal[string] ]) keyword[and]
( identifier[feature] keyword[in] identifier[thisfeatures] )):
identifier[feature_dict] [ literal[string] ]. identifier[append] ( identifier[feature] )
identifier[feature_dict] [ identifier[feature] ]=[]
keyword[if] identifier[feature] keyword[in] identifier[thisfeatures] :
identifier[feature_dict] [ identifier[feature] ]. identifier[append] (
identifier[thisfeatures] [ identifier[feature] ]
)
keyword[for] identifier[feat] keyword[in] identifier[feature_dict] [ literal[string] ]:
identifier[feature_dict] [ identifier[feat] ]= identifier[np] . identifier[array] ( identifier[feature_dict] [ identifier[feat] ])
identifier[feature_dict] [ literal[string] ]= identifier[np] . identifier[array] ( identifier[feature_dict] [ literal[string] ])
identifier[feature_array] = identifier[np] . identifier[column_stack] ([ identifier[feature_dict] [ identifier[feat] ] keyword[for] identifier[feat] keyword[in]
identifier[feature_dict] [ literal[string] ]])
identifier[feature_dict] [ literal[string] ]= identifier[feature_array]
keyword[if] identifier[isinstance] ( identifier[labeldict] , identifier[dict] ):
identifier[labelarray] = identifier[np] . identifier[zeros] ( identifier[feature_dict] [ literal[string] ]. identifier[size] , identifier[dtype] = identifier[np] . identifier[int64] )
keyword[for] identifier[ind] , identifier[objectid] keyword[in] identifier[enumerate] ( identifier[feature_dict] [ literal[string] ]):
keyword[if] identifier[objectid] keyword[in] identifier[labeldict] :
keyword[if] identifier[labeltype] == literal[string] :
keyword[if] identifier[labeldict] [ identifier[objectid] ]:
identifier[labelarray] [ identifier[ind] ]= literal[int]
keyword[elif] identifier[labeltype] == literal[string] :
identifier[labelarray] [ identifier[ind] ]= identifier[labeldict] [ identifier[objectid] ]
identifier[feature_dict] [ literal[string] ]= identifier[labelarray]
identifier[feature_dict] [ literal[string] ]={ literal[string] : identifier[pklglob] ,
literal[string] : identifier[featurestouse] ,
literal[string] : identifier[maxobjects] ,
literal[string] : identifier[labeltype] }
keyword[with] identifier[open] ( identifier[outfile] , literal[string] ) keyword[as] identifier[outfd] :
identifier[pickle] . identifier[dump] ( identifier[feature_dict] , identifier[outfd] , identifier[pickle] . identifier[HIGHEST_PROTOCOL] )
keyword[return] identifier[feature_dict] | def collect_nonperiodic_features(featuresdir, magcol, outfile, pklglob='varfeatures-*.pkl', featurestouse=NONPERIODIC_FEATURES_TO_COLLECT, maxobjects=None, labeldict=None, labeltype='binary'):
"""This collects variability features into arrays for use with the classifer.
Parameters
----------
featuresdir : str
This is the directory where all the varfeatures pickles are. Use
`pklglob` to specify the glob to search for. The `varfeatures` pickles
contain objectids, a light curve magcol, and features as dict
key-vals. The :py:mod:`astrobase.lcproc.lcvfeatures` module can be used
to produce these.
magcol : str
This is the key in each varfeatures pickle corresponding to the magcol
of the light curve the variability features were extracted from.
outfile : str
This is the filename of the output pickle that will be written
containing a dict of all the features extracted into np.arrays.
pklglob : str
This is the UNIX file glob to use to search for varfeatures pickle files
in `featuresdir`.
featurestouse : list of str
Each varfeatures pickle can contain any combination of non-periodic,
stellar, and periodic features; these must have the same names as
elements in the list of strings provided in `featurestouse`. This tries
to get all the features listed in NONPERIODIC_FEATURES_TO_COLLECT by
default. If `featurestouse` is provided as a list, gets only the
features listed in this kwarg instead.
maxobjects : int or None
The controls how many pickles from the featuresdir to process. If None,
will process all varfeatures pickles.
labeldict : dict or None
If this is provided, it must be a dict with the following key:val list::
'<objectid>':<label value>
for each objectid collected from the varfeatures pickles. This will turn
the collected information into a training set for classifiers.
Example: to carry out non-periodic variable feature collection of fake
LCS prepared by :py:mod:`astrobase.fakelcs.generation`, use the value
of the 'isvariable' dict elem from the `fakelcs-info.pkl` here, like
so::
labeldict={x:y for x,y in zip(fakelcinfo['objectid'],
fakelcinfo['isvariable'])}
labeltype : {'binary', 'classes'}
This is either 'binary' or 'classes' for binary/multi-class
classification respectively.
Returns
-------
dict
This returns a dict with all of the features collected into np.arrays,
ready to use as input to a scikit-learn classifier.
"""
# list of input pickles generated by varfeatures in lcproc.py
pklist = glob.glob(os.path.join(featuresdir, pklglob))
if maxobjects:
pklist = pklist[:maxobjects] # depends on [control=['if'], data=[]]
# fancy progress bar with tqdm if present
if TQDM:
listiterator = tqdm(pklist) # depends on [control=['if'], data=[]]
else:
listiterator = pklist
# go through all the varfeatures arrays
feature_dict = {'objectids': [], 'magcol': magcol, 'availablefeatures': []}
LOGINFO('collecting features for magcol: %s' % magcol)
for pkl in listiterator:
with open(pkl, 'rb') as infd:
varf = pickle.load(infd) # depends on [control=['with'], data=['infd']]
# update the objectid list
objectid = varf['objectid']
if objectid not in feature_dict['objectids']:
feature_dict['objectids'].append(objectid) # depends on [control=['if'], data=['objectid']]
thisfeatures = varf[magcol]
if featurestouse and len(featurestouse) > 0:
featurestoget = featurestouse # depends on [control=['if'], data=[]]
else:
featurestoget = NONPERIODIC_FEATURES_TO_COLLECT
# collect all the features for this magcol/objectid combination
for feature in featurestoget:
# update the global feature list if necessary
if feature not in feature_dict['availablefeatures'] and feature in thisfeatures:
feature_dict['availablefeatures'].append(feature)
feature_dict[feature] = [] # depends on [control=['if'], data=[]]
if feature in thisfeatures:
feature_dict[feature].append(thisfeatures[feature]) # depends on [control=['if'], data=['feature', 'thisfeatures']] # depends on [control=['for'], data=['feature']] # depends on [control=['for'], data=['pkl']]
# now that we've collected all the objects and their features, turn the list
# into arrays, and then concatenate them
for feat in feature_dict['availablefeatures']:
feature_dict[feat] = np.array(feature_dict[feat]) # depends on [control=['for'], data=['feat']]
feature_dict['objectids'] = np.array(feature_dict['objectids'])
feature_array = np.column_stack([feature_dict[feat] for feat in feature_dict['availablefeatures']])
feature_dict['features_array'] = feature_array
# if there's a labeldict available, use it to generate a label array. this
# feature collection is now a training set.
if isinstance(labeldict, dict):
labelarray = np.zeros(feature_dict['objectids'].size, dtype=np.int64)
# populate the labels for each object in the training set
for (ind, objectid) in enumerate(feature_dict['objectids']):
if objectid in labeldict:
# if this is a binary classifier training set, convert bools to
# ones and zeros
if labeltype == 'binary':
if labeldict[objectid]:
labelarray[ind] = 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# otherwise, use the actual class label integer
elif labeltype == 'classes':
labelarray[ind] = labeldict[objectid] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['objectid', 'labeldict']] # depends on [control=['for'], data=[]]
feature_dict['labels_array'] = labelarray # depends on [control=['if'], data=[]]
feature_dict['kwargs'] = {'pklglob': pklglob, 'featurestouse': featurestouse, 'maxobjects': maxobjects, 'labeltype': labeltype}
# write the info to the output pickle
with open(outfile, 'wb') as outfd:
pickle.dump(feature_dict, outfd, pickle.HIGHEST_PROTOCOL) # depends on [control=['with'], data=['outfd']]
# return the feature_dict
return feature_dict |
def set_time(self, value: float):
"""
Set the current time. This can be used to jump in the timeline.
Args:
value (float): The new time
"""
if value < 0:
value = 0
self.offset += self.get_time() - value | def function[set_time, parameter[self, value]]:
constant[
Set the current time. This can be used to jump in the timeline.
Args:
value (float): The new time
]
if compare[name[value] less[<] constant[0]] begin[:]
variable[value] assign[=] constant[0]
<ast.AugAssign object at 0x7da18dc98a60> | keyword[def] identifier[set_time] ( identifier[self] , identifier[value] : identifier[float] ):
literal[string]
keyword[if] identifier[value] < literal[int] :
identifier[value] = literal[int]
identifier[self] . identifier[offset] += identifier[self] . identifier[get_time] ()- identifier[value] | def set_time(self, value: float):
"""
Set the current time. This can be used to jump in the timeline.
Args:
value (float): The new time
"""
if value < 0:
value = 0 # depends on [control=['if'], data=['value']]
self.offset += self.get_time() - value |
def regex(self):
"""
RFC822 Email Address Regex
Originally written by Cal Henderson
c.f. http://iamcal.com/publish/articles/php/parsing_email/
Translated to Python by Tim Fletcher with changes suggested by Dan Kubb
http://tfletcher.com/lib/rfc822.py
Licensed under a Creative Commons Attribution-ShareAlike 2.5 License
http://creativecommons.org/licenses/by-sa/2.5/
:return:
"""
qtext = '[^\\x0d\\x22\\x5c\\x80-\\xff]'
dtext = '[^\\x0d\\x5b-\\x5d\\x80-\\xff]'
atom = '[^\\x00-\\x20\\x22\\x28\\x29\\x2c\\x2e\\x3a-\\x3c\\x3e\\x40'
atom += '\\x5b-\\x5d\\x7f-\\xff]+'
quoted_pair = '\\x5c[\\x00-\\x7f]'
domain_literal = "\\x5b(?:%s|%s)*\\x5d" % (dtext, quoted_pair)
quoted_string = "\\x22(?:%s|%s)*\\x22" % (qtext, quoted_pair)
domain_ref = atom
sub_domain = "(?:%s|%s)" % (domain_ref, domain_literal)
word = "(?:%s|%s)" % (atom, quoted_string)
domain = "%s(?:\\x2e%s)*" % (sub_domain, sub_domain)
local_part = "%s(?:\\x2e%s)*" % (word, word)
addr_spec = "%s\\x40%s" % (local_part, domain)
email_address = re.compile('\A%s\Z' % addr_spec)
return email_address | def function[regex, parameter[self]]:
constant[
RFC822 Email Address Regex
Originally written by Cal Henderson
c.f. http://iamcal.com/publish/articles/php/parsing_email/
Translated to Python by Tim Fletcher with changes suggested by Dan Kubb
http://tfletcher.com/lib/rfc822.py
Licensed under a Creative Commons Attribution-ShareAlike 2.5 License
http://creativecommons.org/licenses/by-sa/2.5/
:return:
]
variable[qtext] assign[=] constant[[^\x0d\x22\x5c\x80-\xff]]
variable[dtext] assign[=] constant[[^\x0d\x5b-\x5d\x80-\xff]]
variable[atom] assign[=] constant[[^\x00-\x20\x22\x28\x29\x2c\x2e\x3a-\x3c\x3e\x40]
<ast.AugAssign object at 0x7da18f00e650>
variable[quoted_pair] assign[=] constant[\x5c[\x00-\x7f]]
variable[domain_literal] assign[=] binary_operation[constant[\x5b(?:%s|%s)*\x5d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00ea10>, <ast.Name object at 0x7da18f00c1f0>]]]
variable[quoted_string] assign[=] binary_operation[constant[\x22(?:%s|%s)*\x22] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00e5c0>, <ast.Name object at 0x7da18f00d4b0>]]]
variable[domain_ref] assign[=] name[atom]
variable[sub_domain] assign[=] binary_operation[constant[(?:%s|%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2044c2650>, <ast.Name object at 0x7da2044c1390>]]]
variable[word] assign[=] binary_operation[constant[(?:%s|%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2044c33a0>, <ast.Name object at 0x7da2044c1570>]]]
variable[domain] assign[=] binary_operation[constant[%s(?:\x2e%s)*] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2044c2e90>, <ast.Name object at 0x7da2044c3640>]]]
variable[local_part] assign[=] binary_operation[constant[%s(?:\x2e%s)*] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2044c0a30>, <ast.Name object at 0x7da2044c1930>]]]
variable[addr_spec] assign[=] binary_operation[constant[%s\x40%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2044c14e0>, <ast.Name object at 0x7da2044c2680>]]]
variable[email_address] assign[=] call[name[re].compile, parameter[binary_operation[constant[\A%s\Z] <ast.Mod object at 0x7da2590d6920> name[addr_spec]]]]
return[name[email_address]] | keyword[def] identifier[regex] ( identifier[self] ):
literal[string]
identifier[qtext] = literal[string]
identifier[dtext] = literal[string]
identifier[atom] = literal[string]
identifier[atom] += literal[string]
identifier[quoted_pair] = literal[string]
identifier[domain_literal] = literal[string] %( identifier[dtext] , identifier[quoted_pair] )
identifier[quoted_string] = literal[string] %( identifier[qtext] , identifier[quoted_pair] )
identifier[domain_ref] = identifier[atom]
identifier[sub_domain] = literal[string] %( identifier[domain_ref] , identifier[domain_literal] )
identifier[word] = literal[string] %( identifier[atom] , identifier[quoted_string] )
identifier[domain] = literal[string] %( identifier[sub_domain] , identifier[sub_domain] )
identifier[local_part] = literal[string] %( identifier[word] , identifier[word] )
identifier[addr_spec] = literal[string] %( identifier[local_part] , identifier[domain] )
identifier[email_address] = identifier[re] . identifier[compile] ( literal[string] % identifier[addr_spec] )
keyword[return] identifier[email_address] | def regex(self):
"""
RFC822 Email Address Regex
Originally written by Cal Henderson
c.f. http://iamcal.com/publish/articles/php/parsing_email/
Translated to Python by Tim Fletcher with changes suggested by Dan Kubb
http://tfletcher.com/lib/rfc822.py
Licensed under a Creative Commons Attribution-ShareAlike 2.5 License
http://creativecommons.org/licenses/by-sa/2.5/
:return:
"""
qtext = '[^\\x0d\\x22\\x5c\\x80-\\xff]'
dtext = '[^\\x0d\\x5b-\\x5d\\x80-\\xff]'
atom = '[^\\x00-\\x20\\x22\\x28\\x29\\x2c\\x2e\\x3a-\\x3c\\x3e\\x40'
atom += '\\x5b-\\x5d\\x7f-\\xff]+'
quoted_pair = '\\x5c[\\x00-\\x7f]'
domain_literal = '\\x5b(?:%s|%s)*\\x5d' % (dtext, quoted_pair)
quoted_string = '\\x22(?:%s|%s)*\\x22' % (qtext, quoted_pair)
domain_ref = atom
sub_domain = '(?:%s|%s)' % (domain_ref, domain_literal)
word = '(?:%s|%s)' % (atom, quoted_string)
domain = '%s(?:\\x2e%s)*' % (sub_domain, sub_domain)
local_part = '%s(?:\\x2e%s)*' % (word, word)
addr_spec = '%s\\x40%s' % (local_part, domain)
email_address = re.compile('\\A%s\\Z' % addr_spec)
return email_address |
def set_invite_only(self, invite_only):
"""Set how the room can be joined.
Args:
invite_only(bool): If True, users will have to be invited to join
the room. If False, anyone who knows the room link can join.
Returns:
True if successful, False if not
"""
join_rule = "invite" if invite_only else "public"
try:
self.client.api.set_join_rule(self.room_id, join_rule)
self.invite_only = invite_only
return True
except MatrixRequestError:
return False | def function[set_invite_only, parameter[self, invite_only]]:
constant[Set how the room can be joined.
Args:
invite_only(bool): If True, users will have to be invited to join
the room. If False, anyone who knows the room link can join.
Returns:
True if successful, False if not
]
variable[join_rule] assign[=] <ast.IfExp object at 0x7da1b1736a70>
<ast.Try object at 0x7da1b16c0d90> | keyword[def] identifier[set_invite_only] ( identifier[self] , identifier[invite_only] ):
literal[string]
identifier[join_rule] = literal[string] keyword[if] identifier[invite_only] keyword[else] literal[string]
keyword[try] :
identifier[self] . identifier[client] . identifier[api] . identifier[set_join_rule] ( identifier[self] . identifier[room_id] , identifier[join_rule] )
identifier[self] . identifier[invite_only] = identifier[invite_only]
keyword[return] keyword[True]
keyword[except] identifier[MatrixRequestError] :
keyword[return] keyword[False] | def set_invite_only(self, invite_only):
"""Set how the room can be joined.
Args:
invite_only(bool): If True, users will have to be invited to join
the room. If False, anyone who knows the room link can join.
Returns:
True if successful, False if not
"""
join_rule = 'invite' if invite_only else 'public'
try:
self.client.api.set_join_rule(self.room_id, join_rule)
self.invite_only = invite_only
return True # depends on [control=['try'], data=[]]
except MatrixRequestError:
return False # depends on [control=['except'], data=[]] |
def embed_ising(source_h, source_J, embedding, target_adjacency, chain_strength=1.0):
"""Embed an Ising problem onto a target graph.
Args:
source_h (dict[variable, bias]/list[bias]):
Linear biases of the Ising problem. If a list, the list's indices are used as
variable labels.
source_J (dict[(variable, variable), bias]):
Quadratic biases of the Ising problem.
embedding (dict):
Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...},
where s is a source-model variable and t is a target-model variable.
target_adjacency (dict/:class:`networkx.Graph`):
Adjacency of the target graph as a dict of form {t: Nt, ...},
where t is a target-graph variable and Nt is its set of neighbours.
chain_strength (float, optional):
Magnitude of the quadratic bias (in SPIN-space) applied between variables to form a chain. Note
that the energy penalty of chain breaks is 2 * `chain_strength`.
Returns:
tuple: A 2-tuple:
dict[variable, bias]: Linear biases of the target Ising problem.
dict[(variable, variable), bias]: Quadratic biases of the target Ising problem.
Examples:
This example embeds a fully connected :math:`K_3` graph onto a square target graph.
Embedding is accomplished by an edge contraction operation on the target graph: target-nodes
2 and 3 are chained to represent source-node c.
>>> import dimod
>>> import networkx as nx
>>> # Ising problem for a triangular source graph
>>> h = {}
>>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}
>>> # Target graph is a square graph
>>> target = nx.cycle_graph(4)
>>> # Embedding from source to target graph
>>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}}
>>> # Embed the Ising problem
>>> target_h, target_J = dimod.embed_ising(h, J, embedding, target)
>>> target_J[(0, 1)] == J[('a', 'b')]
True
>>> target_J # doctest: +SKIP
{(0, 1): 1.0, (0, 3): 1.0, (1, 2): 1.0, (2, 3): -1.0}
This example embeds a fully connected :math:`K_3` graph onto the target graph
of a dimod reference structured sampler, `StructureComposite`, using the dimod reference
`ExactSolver` sampler with a square graph specified. Target-nodes 2 and 3 are chained to
represent source-node c.
>>> import dimod
>>> # Ising problem for a triangular source graph
>>> h = {}
>>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}
>>> # Structured dimod sampler with a structure defined by a square graph
>>> sampler = dimod.StructureComposite(dimod.ExactSolver(), [0, 1, 2, 3], [(0, 1), (1, 2), (2, 3), (0, 3)])
>>> # Embedding from source to target graph
>>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}}
>>> # Embed the Ising problem
>>> target_h, target_J = dimod.embed_ising(h, J, embedding, sampler.adjacency)
>>> # Sample
>>> samples = sampler.sample_ising(target_h, target_J)
>>> for sample in samples.samples(n=3, sorted_by='energy'): # doctest: +SKIP
... print(sample)
...
{0: 1, 1: -1, 2: -1, 3: -1}
{0: 1, 1: 1, 2: -1, 3: -1}
{0: -1, 1: 1, 2: -1, 3: -1}
"""
source_bqm = dimod.BinaryQuadraticModel.from_ising(source_h, source_J)
target_bqm = embed_bqm(source_bqm, embedding, target_adjacency, chain_strength=chain_strength)
target_h, target_J, __ = target_bqm.to_ising()
return target_h, target_J | def function[embed_ising, parameter[source_h, source_J, embedding, target_adjacency, chain_strength]]:
constant[Embed an Ising problem onto a target graph.
Args:
source_h (dict[variable, bias]/list[bias]):
Linear biases of the Ising problem. If a list, the list's indices are used as
variable labels.
source_J (dict[(variable, variable), bias]):
Quadratic biases of the Ising problem.
embedding (dict):
Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...},
where s is a source-model variable and t is a target-model variable.
target_adjacency (dict/:class:`networkx.Graph`):
Adjacency of the target graph as a dict of form {t: Nt, ...},
where t is a target-graph variable and Nt is its set of neighbours.
chain_strength (float, optional):
Magnitude of the quadratic bias (in SPIN-space) applied between variables to form a chain. Note
that the energy penalty of chain breaks is 2 * `chain_strength`.
Returns:
tuple: A 2-tuple:
dict[variable, bias]: Linear biases of the target Ising problem.
dict[(variable, variable), bias]: Quadratic biases of the target Ising problem.
Examples:
This example embeds a fully connected :math:`K_3` graph onto a square target graph.
Embedding is accomplished by an edge contraction operation on the target graph: target-nodes
2 and 3 are chained to represent source-node c.
>>> import dimod
>>> import networkx as nx
>>> # Ising problem for a triangular source graph
>>> h = {}
>>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}
>>> # Target graph is a square graph
>>> target = nx.cycle_graph(4)
>>> # Embedding from source to target graph
>>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}}
>>> # Embed the Ising problem
>>> target_h, target_J = dimod.embed_ising(h, J, embedding, target)
>>> target_J[(0, 1)] == J[('a', 'b')]
True
>>> target_J # doctest: +SKIP
{(0, 1): 1.0, (0, 3): 1.0, (1, 2): 1.0, (2, 3): -1.0}
This example embeds a fully connected :math:`K_3` graph onto the target graph
of a dimod reference structured sampler, `StructureComposite`, using the dimod reference
`ExactSolver` sampler with a square graph specified. Target-nodes 2 and 3 are chained to
represent source-node c.
>>> import dimod
>>> # Ising problem for a triangular source graph
>>> h = {}
>>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}
>>> # Structured dimod sampler with a structure defined by a square graph
>>> sampler = dimod.StructureComposite(dimod.ExactSolver(), [0, 1, 2, 3], [(0, 1), (1, 2), (2, 3), (0, 3)])
>>> # Embedding from source to target graph
>>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}}
>>> # Embed the Ising problem
>>> target_h, target_J = dimod.embed_ising(h, J, embedding, sampler.adjacency)
>>> # Sample
>>> samples = sampler.sample_ising(target_h, target_J)
>>> for sample in samples.samples(n=3, sorted_by='energy'): # doctest: +SKIP
... print(sample)
...
{0: 1, 1: -1, 2: -1, 3: -1}
{0: 1, 1: 1, 2: -1, 3: -1}
{0: -1, 1: 1, 2: -1, 3: -1}
]
variable[source_bqm] assign[=] call[name[dimod].BinaryQuadraticModel.from_ising, parameter[name[source_h], name[source_J]]]
variable[target_bqm] assign[=] call[name[embed_bqm], parameter[name[source_bqm], name[embedding], name[target_adjacency]]]
<ast.Tuple object at 0x7da18dc9acb0> assign[=] call[name[target_bqm].to_ising, parameter[]]
return[tuple[[<ast.Name object at 0x7da18dc9a200>, <ast.Name object at 0x7da18dc9b880>]]] | keyword[def] identifier[embed_ising] ( identifier[source_h] , identifier[source_J] , identifier[embedding] , identifier[target_adjacency] , identifier[chain_strength] = literal[int] ):
literal[string]
identifier[source_bqm] = identifier[dimod] . identifier[BinaryQuadraticModel] . identifier[from_ising] ( identifier[source_h] , identifier[source_J] )
identifier[target_bqm] = identifier[embed_bqm] ( identifier[source_bqm] , identifier[embedding] , identifier[target_adjacency] , identifier[chain_strength] = identifier[chain_strength] )
identifier[target_h] , identifier[target_J] , identifier[__] = identifier[target_bqm] . identifier[to_ising] ()
keyword[return] identifier[target_h] , identifier[target_J] | def embed_ising(source_h, source_J, embedding, target_adjacency, chain_strength=1.0):
"""Embed an Ising problem onto a target graph.
Args:
source_h (dict[variable, bias]/list[bias]):
Linear biases of the Ising problem. If a list, the list's indices are used as
variable labels.
source_J (dict[(variable, variable), bias]):
Quadratic biases of the Ising problem.
embedding (dict):
Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...},
where s is a source-model variable and t is a target-model variable.
target_adjacency (dict/:class:`networkx.Graph`):
Adjacency of the target graph as a dict of form {t: Nt, ...},
where t is a target-graph variable and Nt is its set of neighbours.
chain_strength (float, optional):
Magnitude of the quadratic bias (in SPIN-space) applied between variables to form a chain. Note
that the energy penalty of chain breaks is 2 * `chain_strength`.
Returns:
tuple: A 2-tuple:
dict[variable, bias]: Linear biases of the target Ising problem.
dict[(variable, variable), bias]: Quadratic biases of the target Ising problem.
Examples:
This example embeds a fully connected :math:`K_3` graph onto a square target graph.
Embedding is accomplished by an edge contraction operation on the target graph: target-nodes
2 and 3 are chained to represent source-node c.
>>> import dimod
>>> import networkx as nx
>>> # Ising problem for a triangular source graph
>>> h = {}
>>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}
>>> # Target graph is a square graph
>>> target = nx.cycle_graph(4)
>>> # Embedding from source to target graph
>>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}}
>>> # Embed the Ising problem
>>> target_h, target_J = dimod.embed_ising(h, J, embedding, target)
>>> target_J[(0, 1)] == J[('a', 'b')]
True
>>> target_J # doctest: +SKIP
{(0, 1): 1.0, (0, 3): 1.0, (1, 2): 1.0, (2, 3): -1.0}
This example embeds a fully connected :math:`K_3` graph onto the target graph
of a dimod reference structured sampler, `StructureComposite`, using the dimod reference
`ExactSolver` sampler with a square graph specified. Target-nodes 2 and 3 are chained to
represent source-node c.
>>> import dimod
>>> # Ising problem for a triangular source graph
>>> h = {}
>>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}
>>> # Structured dimod sampler with a structure defined by a square graph
>>> sampler = dimod.StructureComposite(dimod.ExactSolver(), [0, 1, 2, 3], [(0, 1), (1, 2), (2, 3), (0, 3)])
>>> # Embedding from source to target graph
>>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}}
>>> # Embed the Ising problem
>>> target_h, target_J = dimod.embed_ising(h, J, embedding, sampler.adjacency)
>>> # Sample
>>> samples = sampler.sample_ising(target_h, target_J)
>>> for sample in samples.samples(n=3, sorted_by='energy'): # doctest: +SKIP
... print(sample)
...
{0: 1, 1: -1, 2: -1, 3: -1}
{0: 1, 1: 1, 2: -1, 3: -1}
{0: -1, 1: 1, 2: -1, 3: -1}
"""
source_bqm = dimod.BinaryQuadraticModel.from_ising(source_h, source_J)
target_bqm = embed_bqm(source_bqm, embedding, target_adjacency, chain_strength=chain_strength)
(target_h, target_J, __) = target_bqm.to_ising()
return (target_h, target_J) |
def signature_validate_single(signature, error = None) :
"is signature a single valid type."
error, my_error = _get_error(error)
result = dbus.dbus_signature_validate_single(signature.encode(), error._dbobj) != 0
my_error.raise_if_set()
return \
result | def function[signature_validate_single, parameter[signature, error]]:
constant[is signature a single valid type.]
<ast.Tuple object at 0x7da20c990cd0> assign[=] call[name[_get_error], parameter[name[error]]]
variable[result] assign[=] compare[call[name[dbus].dbus_signature_validate_single, parameter[call[name[signature].encode, parameter[]], name[error]._dbobj]] not_equal[!=] constant[0]]
call[name[my_error].raise_if_set, parameter[]]
return[name[result]] | keyword[def] identifier[signature_validate_single] ( identifier[signature] , identifier[error] = keyword[None] ):
literal[string]
identifier[error] , identifier[my_error] = identifier[_get_error] ( identifier[error] )
identifier[result] = identifier[dbus] . identifier[dbus_signature_validate_single] ( identifier[signature] . identifier[encode] (), identifier[error] . identifier[_dbobj] )!= literal[int]
identifier[my_error] . identifier[raise_if_set] ()
keyword[return] identifier[result] | def signature_validate_single(signature, error=None):
"""is signature a single valid type."""
(error, my_error) = _get_error(error)
result = dbus.dbus_signature_validate_single(signature.encode(), error._dbobj) != 0
my_error.raise_if_set()
return result |
def prior_transform(self, unit_coords, priors, prior_args=[]):
"""An example of one way to use the `Prior` objects below to go from unit
cube to parameter space, for nested sampling. This takes and returns a
list instead of an array, to accomodate possible vector parameters. Thus
one will need something like ``theta_array=np.concatenate(*theta)``
:param unit_coords:
Coordinates on the unit prior hyper-cube. Iterable.
:param priors:
A list of `Prior` objects, iterable of same length as `unit_coords`.
:param prior_args: (optional)
A list of dictionaries of prior function keyword arguments.
:returns theta:
A list of parameter values corresponding to the given coordinates on
the prior unit hypercube.
"""
theta = []
for i, (u, p) in enumerate(zip(unit_coords, priors)):
func = p.unit_transform
try:
kwargs = prior_args[i]
except(IndexError):
kwargs = {}
theta.append(func(u, **kwargs))
return theta | def function[prior_transform, parameter[self, unit_coords, priors, prior_args]]:
constant[An example of one way to use the `Prior` objects below to go from unit
cube to parameter space, for nested sampling. This takes and returns a
list instead of an array, to accomodate possible vector parameters. Thus
one will need something like ``theta_array=np.concatenate(*theta)``
:param unit_coords:
Coordinates on the unit prior hyper-cube. Iterable.
:param priors:
A list of `Prior` objects, iterable of same length as `unit_coords`.
:param prior_args: (optional)
A list of dictionaries of prior function keyword arguments.
:returns theta:
A list of parameter values corresponding to the given coordinates on
the prior unit hypercube.
]
variable[theta] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1d4a230>, <ast.Tuple object at 0x7da1b1d4a1d0>]]] in starred[call[name[enumerate], parameter[call[name[zip], parameter[name[unit_coords], name[priors]]]]]] begin[:]
variable[func] assign[=] name[p].unit_transform
<ast.Try object at 0x7da1b1d482b0>
call[name[theta].append, parameter[call[name[func], parameter[name[u]]]]]
return[name[theta]] | keyword[def] identifier[prior_transform] ( identifier[self] , identifier[unit_coords] , identifier[priors] , identifier[prior_args] =[]):
literal[string]
identifier[theta] =[]
keyword[for] identifier[i] ,( identifier[u] , identifier[p] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[unit_coords] , identifier[priors] )):
identifier[func] = identifier[p] . identifier[unit_transform]
keyword[try] :
identifier[kwargs] = identifier[prior_args] [ identifier[i] ]
keyword[except] ( identifier[IndexError] ):
identifier[kwargs] ={}
identifier[theta] . identifier[append] ( identifier[func] ( identifier[u] ,** identifier[kwargs] ))
keyword[return] identifier[theta] | def prior_transform(self, unit_coords, priors, prior_args=[]):
"""An example of one way to use the `Prior` objects below to go from unit
cube to parameter space, for nested sampling. This takes and returns a
list instead of an array, to accomodate possible vector parameters. Thus
one will need something like ``theta_array=np.concatenate(*theta)``
:param unit_coords:
Coordinates on the unit prior hyper-cube. Iterable.
:param priors:
A list of `Prior` objects, iterable of same length as `unit_coords`.
:param prior_args: (optional)
A list of dictionaries of prior function keyword arguments.
:returns theta:
A list of parameter values corresponding to the given coordinates on
the prior unit hypercube.
"""
theta = []
for (i, (u, p)) in enumerate(zip(unit_coords, priors)):
func = p.unit_transform
try:
kwargs = prior_args[i] # depends on [control=['try'], data=[]]
except IndexError:
kwargs = {} # depends on [control=['except'], data=[]]
theta.append(func(u, **kwargs)) # depends on [control=['for'], data=[]]
return theta |
def get_incorrect_names_by_namespace(graph: BELGraph, namespace: str) -> Set[str]:
"""Return the set of all incorrect names from the given namespace in the graph.
:return: The set of all incorrect names from the given namespace in the graph
"""
return {
exc.name
for _, exc, _ in graph.warnings
if isinstance(exc, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning)) and exc.namespace == namespace
} | def function[get_incorrect_names_by_namespace, parameter[graph, namespace]]:
constant[Return the set of all incorrect names from the given namespace in the graph.
:return: The set of all incorrect names from the given namespace in the graph
]
return[<ast.SetComp object at 0x7da20c795780>] | keyword[def] identifier[get_incorrect_names_by_namespace] ( identifier[graph] : identifier[BELGraph] , identifier[namespace] : identifier[str] )-> identifier[Set] [ identifier[str] ]:
literal[string]
keyword[return] {
identifier[exc] . identifier[name]
keyword[for] identifier[_] , identifier[exc] , identifier[_] keyword[in] identifier[graph] . identifier[warnings]
keyword[if] identifier[isinstance] ( identifier[exc] ,( identifier[MissingNamespaceNameWarning] , identifier[MissingNamespaceRegexWarning] )) keyword[and] identifier[exc] . identifier[namespace] == identifier[namespace]
} | def get_incorrect_names_by_namespace(graph: BELGraph, namespace: str) -> Set[str]:
"""Return the set of all incorrect names from the given namespace in the graph.
:return: The set of all incorrect names from the given namespace in the graph
"""
return {exc.name for (_, exc, _) in graph.warnings if isinstance(exc, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning)) and exc.namespace == namespace} |
def convert_complexFaultSource(self, node):
"""
Convert the given node into a complex fault object.
:param node: a node with tag areaGeometry
:returns: a :class:`openquake.hazardlib.source.ComplexFaultSource`
instance
"""
geom = node.complexFaultGeometry
edges = self.geo_lines(geom)
mfd = self.convert_mfdist(node)
msr = valid.SCALEREL[~node.magScaleRel]()
with context(self.fname, node):
cmplx = source.ComplexFaultSource(
source_id=node['id'],
name=node['name'],
tectonic_region_type=node.attrib.get('tectonicRegion'),
mfd=mfd,
rupture_mesh_spacing=self.complex_fault_mesh_spacing,
magnitude_scaling_relationship=msr,
rupture_aspect_ratio=~node.ruptAspectRatio,
edges=edges,
rake=~node.rake,
temporal_occurrence_model=self.get_tom(node))
return cmplx | def function[convert_complexFaultSource, parameter[self, node]]:
constant[
Convert the given node into a complex fault object.
:param node: a node with tag areaGeometry
:returns: a :class:`openquake.hazardlib.source.ComplexFaultSource`
instance
]
variable[geom] assign[=] name[node].complexFaultGeometry
variable[edges] assign[=] call[name[self].geo_lines, parameter[name[geom]]]
variable[mfd] assign[=] call[name[self].convert_mfdist, parameter[name[node]]]
variable[msr] assign[=] call[call[name[valid].SCALEREL][<ast.UnaryOp object at 0x7da20c9930d0>], parameter[]]
with call[name[context], parameter[name[self].fname, name[node]]] begin[:]
variable[cmplx] assign[=] call[name[source].ComplexFaultSource, parameter[]]
return[name[cmplx]] | keyword[def] identifier[convert_complexFaultSource] ( identifier[self] , identifier[node] ):
literal[string]
identifier[geom] = identifier[node] . identifier[complexFaultGeometry]
identifier[edges] = identifier[self] . identifier[geo_lines] ( identifier[geom] )
identifier[mfd] = identifier[self] . identifier[convert_mfdist] ( identifier[node] )
identifier[msr] = identifier[valid] . identifier[SCALEREL] [~ identifier[node] . identifier[magScaleRel] ]()
keyword[with] identifier[context] ( identifier[self] . identifier[fname] , identifier[node] ):
identifier[cmplx] = identifier[source] . identifier[ComplexFaultSource] (
identifier[source_id] = identifier[node] [ literal[string] ],
identifier[name] = identifier[node] [ literal[string] ],
identifier[tectonic_region_type] = identifier[node] . identifier[attrib] . identifier[get] ( literal[string] ),
identifier[mfd] = identifier[mfd] ,
identifier[rupture_mesh_spacing] = identifier[self] . identifier[complex_fault_mesh_spacing] ,
identifier[magnitude_scaling_relationship] = identifier[msr] ,
identifier[rupture_aspect_ratio] =~ identifier[node] . identifier[ruptAspectRatio] ,
identifier[edges] = identifier[edges] ,
identifier[rake] =~ identifier[node] . identifier[rake] ,
identifier[temporal_occurrence_model] = identifier[self] . identifier[get_tom] ( identifier[node] ))
keyword[return] identifier[cmplx] | def convert_complexFaultSource(self, node):
"""
Convert the given node into a complex fault object.
:param node: a node with tag areaGeometry
:returns: a :class:`openquake.hazardlib.source.ComplexFaultSource`
instance
"""
geom = node.complexFaultGeometry
edges = self.geo_lines(geom)
mfd = self.convert_mfdist(node)
msr = valid.SCALEREL[~node.magScaleRel]()
with context(self.fname, node):
cmplx = source.ComplexFaultSource(source_id=node['id'], name=node['name'], tectonic_region_type=node.attrib.get('tectonicRegion'), mfd=mfd, rupture_mesh_spacing=self.complex_fault_mesh_spacing, magnitude_scaling_relationship=msr, rupture_aspect_ratio=~node.ruptAspectRatio, edges=edges, rake=~node.rake, temporal_occurrence_model=self.get_tom(node)) # depends on [control=['with'], data=[]]
return cmplx |
def from_hertz(self, hertz, standard_pitch=440):
"""Set the Note name and pitch, calculated from the hertz value.
The standard_pitch argument can be used to set the pitch of A-4,
from which the rest is calculated.
"""
value = ((log((float(hertz) * 1024) / standard_pitch, 2) +
1.0 / 24) * 12 + 9) # notes.note_to_int("A")
self.name = notes.int_to_note(int(value) % 12)
self.octave = int(value / 12) - 6
return self | def function[from_hertz, parameter[self, hertz, standard_pitch]]:
constant[Set the Note name and pitch, calculated from the hertz value.
The standard_pitch argument can be used to set the pitch of A-4,
from which the rest is calculated.
]
variable[value] assign[=] binary_operation[binary_operation[binary_operation[call[name[log], parameter[binary_operation[binary_operation[call[name[float], parameter[name[hertz]]] * constant[1024]] / name[standard_pitch]], constant[2]]] + binary_operation[constant[1.0] / constant[24]]] * constant[12]] + constant[9]]
name[self].name assign[=] call[name[notes].int_to_note, parameter[binary_operation[call[name[int], parameter[name[value]]] <ast.Mod object at 0x7da2590d6920> constant[12]]]]
name[self].octave assign[=] binary_operation[call[name[int], parameter[binary_operation[name[value] / constant[12]]]] - constant[6]]
return[name[self]] | keyword[def] identifier[from_hertz] ( identifier[self] , identifier[hertz] , identifier[standard_pitch] = literal[int] ):
literal[string]
identifier[value] =(( identifier[log] (( identifier[float] ( identifier[hertz] )* literal[int] )/ identifier[standard_pitch] , literal[int] )+
literal[int] / literal[int] )* literal[int] + literal[int] )
identifier[self] . identifier[name] = identifier[notes] . identifier[int_to_note] ( identifier[int] ( identifier[value] )% literal[int] )
identifier[self] . identifier[octave] = identifier[int] ( identifier[value] / literal[int] )- literal[int]
keyword[return] identifier[self] | def from_hertz(self, hertz, standard_pitch=440):
"""Set the Note name and pitch, calculated from the hertz value.
The standard_pitch argument can be used to set the pitch of A-4,
from which the rest is calculated.
"""
value = (log(float(hertz) * 1024 / standard_pitch, 2) + 1.0 / 24) * 12 + 9 # notes.note_to_int("A")
self.name = notes.int_to_note(int(value) % 12)
self.octave = int(value / 12) - 6
return self |
def classify_languages(self):
"""
Returns a new DataFrame with the language data of any blob added to
its row.
>>> blobs_lang_df = blobs_df.classify_languages
:rtype: BlobsWithLanguageDataFrame
"""
return BlobsWithLanguageDataFrame(self._engine_dataframe.classifyLanguages(),
self._session, self._implicits) | def function[classify_languages, parameter[self]]:
constant[
Returns a new DataFrame with the language data of any blob added to
its row.
>>> blobs_lang_df = blobs_df.classify_languages
:rtype: BlobsWithLanguageDataFrame
]
return[call[name[BlobsWithLanguageDataFrame], parameter[call[name[self]._engine_dataframe.classifyLanguages, parameter[]], name[self]._session, name[self]._implicits]]] | keyword[def] identifier[classify_languages] ( identifier[self] ):
literal[string]
keyword[return] identifier[BlobsWithLanguageDataFrame] ( identifier[self] . identifier[_engine_dataframe] . identifier[classifyLanguages] (),
identifier[self] . identifier[_session] , identifier[self] . identifier[_implicits] ) | def classify_languages(self):
"""
Returns a new DataFrame with the language data of any blob added to
its row.
>>> blobs_lang_df = blobs_df.classify_languages
:rtype: BlobsWithLanguageDataFrame
"""
return BlobsWithLanguageDataFrame(self._engine_dataframe.classifyLanguages(), self._session, self._implicits) |
def _infer_tz_from_endpoints(start, end, tz):
"""
If a timezone is not explicitly given via `tz`, see if one can
be inferred from the `start` and `end` endpoints. If more than one
of these inputs provides a timezone, require that they all agree.
Parameters
----------
start : Timestamp
end : Timestamp
tz : tzinfo or None
Returns
-------
tz : tzinfo or None
Raises
------
TypeError : if start and end timezones do not agree
"""
try:
inferred_tz = timezones.infer_tzinfo(start, end)
except Exception:
raise TypeError('Start and end cannot both be tz-aware with '
'different timezones')
inferred_tz = timezones.maybe_get_tz(inferred_tz)
tz = timezones.maybe_get_tz(tz)
if tz is not None and inferred_tz is not None:
if not timezones.tz_compare(inferred_tz, tz):
raise AssertionError("Inferred time zone not equal to passed "
"time zone")
elif inferred_tz is not None:
tz = inferred_tz
return tz | def function[_infer_tz_from_endpoints, parameter[start, end, tz]]:
constant[
If a timezone is not explicitly given via `tz`, see if one can
be inferred from the `start` and `end` endpoints. If more than one
of these inputs provides a timezone, require that they all agree.
Parameters
----------
start : Timestamp
end : Timestamp
tz : tzinfo or None
Returns
-------
tz : tzinfo or None
Raises
------
TypeError : if start and end timezones do not agree
]
<ast.Try object at 0x7da1b1dd9660>
variable[inferred_tz] assign[=] call[name[timezones].maybe_get_tz, parameter[name[inferred_tz]]]
variable[tz] assign[=] call[name[timezones].maybe_get_tz, parameter[name[tz]]]
if <ast.BoolOp object at 0x7da1b1ddabf0> begin[:]
if <ast.UnaryOp object at 0x7da1b1df8fa0> begin[:]
<ast.Raise object at 0x7da1b1df92d0>
return[name[tz]] | keyword[def] identifier[_infer_tz_from_endpoints] ( identifier[start] , identifier[end] , identifier[tz] ):
literal[string]
keyword[try] :
identifier[inferred_tz] = identifier[timezones] . identifier[infer_tzinfo] ( identifier[start] , identifier[end] )
keyword[except] identifier[Exception] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] )
identifier[inferred_tz] = identifier[timezones] . identifier[maybe_get_tz] ( identifier[inferred_tz] )
identifier[tz] = identifier[timezones] . identifier[maybe_get_tz] ( identifier[tz] )
keyword[if] identifier[tz] keyword[is] keyword[not] keyword[None] keyword[and] identifier[inferred_tz] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[timezones] . identifier[tz_compare] ( identifier[inferred_tz] , identifier[tz] ):
keyword[raise] identifier[AssertionError] ( literal[string]
literal[string] )
keyword[elif] identifier[inferred_tz] keyword[is] keyword[not] keyword[None] :
identifier[tz] = identifier[inferred_tz]
keyword[return] identifier[tz] | def _infer_tz_from_endpoints(start, end, tz):
"""
If a timezone is not explicitly given via `tz`, see if one can
be inferred from the `start` and `end` endpoints. If more than one
of these inputs provides a timezone, require that they all agree.
Parameters
----------
start : Timestamp
end : Timestamp
tz : tzinfo or None
Returns
-------
tz : tzinfo or None
Raises
------
TypeError : if start and end timezones do not agree
"""
try:
inferred_tz = timezones.infer_tzinfo(start, end) # depends on [control=['try'], data=[]]
except Exception:
raise TypeError('Start and end cannot both be tz-aware with different timezones') # depends on [control=['except'], data=[]]
inferred_tz = timezones.maybe_get_tz(inferred_tz)
tz = timezones.maybe_get_tz(tz)
if tz is not None and inferred_tz is not None:
if not timezones.tz_compare(inferred_tz, tz):
raise AssertionError('Inferred time zone not equal to passed time zone') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif inferred_tz is not None:
tz = inferred_tz # depends on [control=['if'], data=['inferred_tz']]
return tz |
def set(self, key, value):
"""Sets the rules config for a given key.
Args:
key (str): rules config key to set
value (str): value to set for the rules config key
See: https://auth0.com/docs/api/management/v2#!/Rules_Configs/put_rules_configs_by_key
"""
url = self._url('{}'.format(key))
body = {'value': value}
return self.client.put(url, data=body) | def function[set, parameter[self, key, value]]:
constant[Sets the rules config for a given key.
Args:
key (str): rules config key to set
value (str): value to set for the rules config key
See: https://auth0.com/docs/api/management/v2#!/Rules_Configs/put_rules_configs_by_key
]
variable[url] assign[=] call[name[self]._url, parameter[call[constant[{}].format, parameter[name[key]]]]]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da1b070f7f0>], [<ast.Name object at 0x7da1b070f880>]]
return[call[name[self].client.put, parameter[name[url]]]] | keyword[def] identifier[set] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
identifier[url] = identifier[self] . identifier[_url] ( literal[string] . identifier[format] ( identifier[key] ))
identifier[body] ={ literal[string] : identifier[value] }
keyword[return] identifier[self] . identifier[client] . identifier[put] ( identifier[url] , identifier[data] = identifier[body] ) | def set(self, key, value):
"""Sets the rules config for a given key.
Args:
key (str): rules config key to set
value (str): value to set for the rules config key
See: https://auth0.com/docs/api/management/v2#!/Rules_Configs/put_rules_configs_by_key
"""
url = self._url('{}'.format(key))
body = {'value': value}
return self.client.put(url, data=body) |
def clean_deleted_sessions(cls):
"""remove old :class:`FederateSLO` object for which the session do not exists anymore"""
for federate_slo in cls.objects.all():
if not SessionStore(session_key=federate_slo.session_key).get('authenticated'):
federate_slo.delete() | def function[clean_deleted_sessions, parameter[cls]]:
constant[remove old :class:`FederateSLO` object for which the session do not exists anymore]
for taget[name[federate_slo]] in starred[call[name[cls].objects.all, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b0de5c90> begin[:]
call[name[federate_slo].delete, parameter[]] | keyword[def] identifier[clean_deleted_sessions] ( identifier[cls] ):
literal[string]
keyword[for] identifier[federate_slo] keyword[in] identifier[cls] . identifier[objects] . identifier[all] ():
keyword[if] keyword[not] identifier[SessionStore] ( identifier[session_key] = identifier[federate_slo] . identifier[session_key] ). identifier[get] ( literal[string] ):
identifier[federate_slo] . identifier[delete] () | def clean_deleted_sessions(cls):
"""remove old :class:`FederateSLO` object for which the session do not exists anymore"""
for federate_slo in cls.objects.all():
if not SessionStore(session_key=federate_slo.session_key).get('authenticated'):
federate_slo.delete() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['federate_slo']] |
def addCmdClass(self, ctor, **opts):
'''
Add a Cmd subclass to this cli.
'''
item = ctor(self, **opts)
name = item.getCmdName()
self.cmds[name] = item | def function[addCmdClass, parameter[self, ctor]]:
constant[
Add a Cmd subclass to this cli.
]
variable[item] assign[=] call[name[ctor], parameter[name[self]]]
variable[name] assign[=] call[name[item].getCmdName, parameter[]]
call[name[self].cmds][name[name]] assign[=] name[item] | keyword[def] identifier[addCmdClass] ( identifier[self] , identifier[ctor] ,** identifier[opts] ):
literal[string]
identifier[item] = identifier[ctor] ( identifier[self] ,** identifier[opts] )
identifier[name] = identifier[item] . identifier[getCmdName] ()
identifier[self] . identifier[cmds] [ identifier[name] ]= identifier[item] | def addCmdClass(self, ctor, **opts):
"""
Add a Cmd subclass to this cli.
"""
item = ctor(self, **opts)
name = item.getCmdName()
self.cmds[name] = item |
def add_amino_acid_to_json(code, description, letter='X', modified=None, force_add=False):
""" Add an amino acid to the amino_acids.json file used to populate the amino_acid table.
Parameters
----------
code : str
New code to be added to amino acid table.
description : str
Description of the amino acid, e.g. 'amidated terminal carboxy group'.
letter : str, optional
One letter code for the amino acid.
Defaults to 'X'
modified : str or None, optional
Code of modified amino acid, e.g. 'ALA', or None.
Defaults to None
force_add : bool, optional
If True, will over-write existing dictionary value for code if already in amino_acids.json.
If False, then an IOError is raised if code is already in amino_acids.json.
Raises
------
IOError
If code is already in amino_acids.json and force_add is False.
Returns
-------
None
"""
# If code is already in the dictionary, raise an error
if (not force_add) and code in amino_acids_dict.keys():
raise IOError("{0} is already in the amino_acids dictionary, with values: {1}".format(
code, amino_acids_dict[code]))
# Prepare data to be added.
add_code = code
add_code_dict = {'description': description, 'letter': letter, 'modified': modified}
# Check that data does not already exist, and if not, add it to the dictionary.
amino_acids_dict[add_code] = add_code_dict
# Write over json file with updated dictionary.
with open(_amino_acids_json_path, 'w') as foo:
foo.write(json.dumps(amino_acids_dict))
return | def function[add_amino_acid_to_json, parameter[code, description, letter, modified, force_add]]:
constant[ Add an amino acid to the amino_acids.json file used to populate the amino_acid table.
Parameters
----------
code : str
New code to be added to amino acid table.
description : str
Description of the amino acid, e.g. 'amidated terminal carboxy group'.
letter : str, optional
One letter code for the amino acid.
Defaults to 'X'
modified : str or None, optional
Code of modified amino acid, e.g. 'ALA', or None.
Defaults to None
force_add : bool, optional
If True, will over-write existing dictionary value for code if already in amino_acids.json.
If False, then an IOError is raised if code is already in amino_acids.json.
Raises
------
IOError
If code is already in amino_acids.json and force_add is False.
Returns
-------
None
]
if <ast.BoolOp object at 0x7da1b28448e0> begin[:]
<ast.Raise object at 0x7da1b2846fe0>
variable[add_code] assign[=] name[code]
variable[add_code_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b28535e0>, <ast.Constant object at 0x7da2041d9870>, <ast.Constant object at 0x7da2041d8fd0>], [<ast.Name object at 0x7da2041db8b0>, <ast.Name object at 0x7da2041d8c10>, <ast.Name object at 0x7da2041d9db0>]]
call[name[amino_acids_dict]][name[add_code]] assign[=] name[add_code_dict]
with call[name[open], parameter[name[_amino_acids_json_path], constant[w]]] begin[:]
call[name[foo].write, parameter[call[name[json].dumps, parameter[name[amino_acids_dict]]]]]
return[None] | keyword[def] identifier[add_amino_acid_to_json] ( identifier[code] , identifier[description] , identifier[letter] = literal[string] , identifier[modified] = keyword[None] , identifier[force_add] = keyword[False] ):
literal[string]
keyword[if] ( keyword[not] identifier[force_add] ) keyword[and] identifier[code] keyword[in] identifier[amino_acids_dict] . identifier[keys] ():
keyword[raise] identifier[IOError] ( literal[string] . identifier[format] (
identifier[code] , identifier[amino_acids_dict] [ identifier[code] ]))
identifier[add_code] = identifier[code]
identifier[add_code_dict] ={ literal[string] : identifier[description] , literal[string] : identifier[letter] , literal[string] : identifier[modified] }
identifier[amino_acids_dict] [ identifier[add_code] ]= identifier[add_code_dict]
keyword[with] identifier[open] ( identifier[_amino_acids_json_path] , literal[string] ) keyword[as] identifier[foo] :
identifier[foo] . identifier[write] ( identifier[json] . identifier[dumps] ( identifier[amino_acids_dict] ))
keyword[return] | def add_amino_acid_to_json(code, description, letter='X', modified=None, force_add=False):
""" Add an amino acid to the amino_acids.json file used to populate the amino_acid table.
Parameters
----------
code : str
New code to be added to amino acid table.
description : str
Description of the amino acid, e.g. 'amidated terminal carboxy group'.
letter : str, optional
One letter code for the amino acid.
Defaults to 'X'
modified : str or None, optional
Code of modified amino acid, e.g. 'ALA', or None.
Defaults to None
force_add : bool, optional
If True, will over-write existing dictionary value for code if already in amino_acids.json.
If False, then an IOError is raised if code is already in amino_acids.json.
Raises
------
IOError
If code is already in amino_acids.json and force_add is False.
Returns
-------
None
"""
# If code is already in the dictionary, raise an error
if not force_add and code in amino_acids_dict.keys():
raise IOError('{0} is already in the amino_acids dictionary, with values: {1}'.format(code, amino_acids_dict[code])) # depends on [control=['if'], data=[]]
# Prepare data to be added.
add_code = code
add_code_dict = {'description': description, 'letter': letter, 'modified': modified}
# Check that data does not already exist, and if not, add it to the dictionary.
amino_acids_dict[add_code] = add_code_dict
# Write over json file with updated dictionary.
with open(_amino_acids_json_path, 'w') as foo:
foo.write(json.dumps(amino_acids_dict)) # depends on [control=['with'], data=['foo']]
return |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SettingsContext for this SettingsInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.settings.SettingsContext
"""
if self._context is None:
self._context = SettingsContext(self._version, )
return self._context | def function[_proxy, parameter[self]]:
constant[
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SettingsContext for this SettingsInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.settings.SettingsContext
]
if compare[name[self]._context is constant[None]] begin[:]
name[self]._context assign[=] call[name[SettingsContext], parameter[name[self]._version]]
return[name[self]._context] | keyword[def] identifier[_proxy] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_context] keyword[is] keyword[None] :
identifier[self] . identifier[_context] = identifier[SettingsContext] ( identifier[self] . identifier[_version] ,)
keyword[return] identifier[self] . identifier[_context] | def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SettingsContext for this SettingsInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.settings.SettingsContext
"""
if self._context is None:
self._context = SettingsContext(self._version) # depends on [control=['if'], data=[]]
return self._context |
def _set_concurrent_future_state(concurrent, source):
"""Copy state from a future to a concurrent.futures.Future."""
assert source.done()
if source.cancelled():
concurrent.cancel()
if not concurrent.set_running_or_notify_cancel():
return
exception = source.exception()
if exception is not None:
concurrent.set_exception(exception)
else:
result = source.result()
concurrent.set_result(result) | def function[_set_concurrent_future_state, parameter[concurrent, source]]:
constant[Copy state from a future to a concurrent.futures.Future.]
assert[call[name[source].done, parameter[]]]
if call[name[source].cancelled, parameter[]] begin[:]
call[name[concurrent].cancel, parameter[]]
if <ast.UnaryOp object at 0x7da20c6aa290> begin[:]
return[None]
variable[exception] assign[=] call[name[source].exception, parameter[]]
if compare[name[exception] is_not constant[None]] begin[:]
call[name[concurrent].set_exception, parameter[name[exception]]] | keyword[def] identifier[_set_concurrent_future_state] ( identifier[concurrent] , identifier[source] ):
literal[string]
keyword[assert] identifier[source] . identifier[done] ()
keyword[if] identifier[source] . identifier[cancelled] ():
identifier[concurrent] . identifier[cancel] ()
keyword[if] keyword[not] identifier[concurrent] . identifier[set_running_or_notify_cancel] ():
keyword[return]
identifier[exception] = identifier[source] . identifier[exception] ()
keyword[if] identifier[exception] keyword[is] keyword[not] keyword[None] :
identifier[concurrent] . identifier[set_exception] ( identifier[exception] )
keyword[else] :
identifier[result] = identifier[source] . identifier[result] ()
identifier[concurrent] . identifier[set_result] ( identifier[result] ) | def _set_concurrent_future_state(concurrent, source):
"""Copy state from a future to a concurrent.futures.Future."""
assert source.done()
if source.cancelled():
concurrent.cancel() # depends on [control=['if'], data=[]]
if not concurrent.set_running_or_notify_cancel():
return # depends on [control=['if'], data=[]]
exception = source.exception()
if exception is not None:
concurrent.set_exception(exception) # depends on [control=['if'], data=['exception']]
else:
result = source.result()
concurrent.set_result(result) |
def main():
"""
main function
"""
s = ttk.Style()
s.theme_use('clam')
ents = makeform(root)
root.mainloop() | def function[main, parameter[]]:
constant[
main function
]
variable[s] assign[=] call[name[ttk].Style, parameter[]]
call[name[s].theme_use, parameter[constant[clam]]]
variable[ents] assign[=] call[name[makeform], parameter[name[root]]]
call[name[root].mainloop, parameter[]] | keyword[def] identifier[main] ():
literal[string]
identifier[s] = identifier[ttk] . identifier[Style] ()
identifier[s] . identifier[theme_use] ( literal[string] )
identifier[ents] = identifier[makeform] ( identifier[root] )
identifier[root] . identifier[mainloop] () | def main():
"""
main function
"""
s = ttk.Style()
s.theme_use('clam')
ents = makeform(root)
root.mainloop() |
def __standardize_result(status, message, data=None, debug_msg=None):
'''
Standardizes all responses
:param status:
:param message:
:param data:
:param debug_msg:
:return:
'''
result = {
'status': status,
'message': message
}
if data is not None:
result['return'] = data
if debug_msg is not None and debug:
result['debug'] = debug_msg
return result | def function[__standardize_result, parameter[status, message, data, debug_msg]]:
constant[
Standardizes all responses
:param status:
:param message:
:param data:
:param debug_msg:
:return:
]
variable[result] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c0f610>, <ast.Constant object at 0x7da1b1c0f700>], [<ast.Name object at 0x7da1b1c0c190>, <ast.Name object at 0x7da1b1c0c880>]]
if compare[name[data] is_not constant[None]] begin[:]
call[name[result]][constant[return]] assign[=] name[data]
if <ast.BoolOp object at 0x7da1b1c0e290> begin[:]
call[name[result]][constant[debug]] assign[=] name[debug_msg]
return[name[result]] | keyword[def] identifier[__standardize_result] ( identifier[status] , identifier[message] , identifier[data] = keyword[None] , identifier[debug_msg] = keyword[None] ):
literal[string]
identifier[result] ={
literal[string] : identifier[status] ,
literal[string] : identifier[message]
}
keyword[if] identifier[data] keyword[is] keyword[not] keyword[None] :
identifier[result] [ literal[string] ]= identifier[data]
keyword[if] identifier[debug_msg] keyword[is] keyword[not] keyword[None] keyword[and] identifier[debug] :
identifier[result] [ literal[string] ]= identifier[debug_msg]
keyword[return] identifier[result] | def __standardize_result(status, message, data=None, debug_msg=None):
"""
Standardizes all responses
:param status:
:param message:
:param data:
:param debug_msg:
:return:
"""
result = {'status': status, 'message': message}
if data is not None:
result['return'] = data # depends on [control=['if'], data=['data']]
if debug_msg is not None and debug:
result['debug'] = debug_msg # depends on [control=['if'], data=[]]
return result |
def calculate_best_chunk_size(self, data_length):
"""
Uses the number of dask workers in the cluster (during execution time, meaning when you start the extraction)
to find the optimal chunk_size.
:param data_length: A length which defines how many calculations there need to be.
:type data_length: int
"""
n_workers = len(self.client.scheduler_info()["workers"])
chunk_size, extra = divmod(data_length, n_workers * 5)
if extra:
chunk_size += 1
return chunk_size | def function[calculate_best_chunk_size, parameter[self, data_length]]:
constant[
Uses the number of dask workers in the cluster (during execution time, meaning when you start the extraction)
to find the optimal chunk_size.
:param data_length: A length which defines how many calculations there need to be.
:type data_length: int
]
variable[n_workers] assign[=] call[name[len], parameter[call[call[name[self].client.scheduler_info, parameter[]]][constant[workers]]]]
<ast.Tuple object at 0x7da18bcc8490> assign[=] call[name[divmod], parameter[name[data_length], binary_operation[name[n_workers] * constant[5]]]]
if name[extra] begin[:]
<ast.AugAssign object at 0x7da18bccb880>
return[name[chunk_size]] | keyword[def] identifier[calculate_best_chunk_size] ( identifier[self] , identifier[data_length] ):
literal[string]
identifier[n_workers] = identifier[len] ( identifier[self] . identifier[client] . identifier[scheduler_info] ()[ literal[string] ])
identifier[chunk_size] , identifier[extra] = identifier[divmod] ( identifier[data_length] , identifier[n_workers] * literal[int] )
keyword[if] identifier[extra] :
identifier[chunk_size] += literal[int]
keyword[return] identifier[chunk_size] | def calculate_best_chunk_size(self, data_length):
"""
Uses the number of dask workers in the cluster (during execution time, meaning when you start the extraction)
to find the optimal chunk_size.
:param data_length: A length which defines how many calculations there need to be.
:type data_length: int
"""
n_workers = len(self.client.scheduler_info()['workers'])
(chunk_size, extra) = divmod(data_length, n_workers * 5)
if extra:
chunk_size += 1 # depends on [control=['if'], data=[]]
return chunk_size |
def cycle_dist(x, y, n):
"""Find Distance between x, y by means of a n-length cycle.
Example:
cycle_dist(1, 23, 24) = 2
cycle_dist(5, 13, 24) = 8
cycle_dist(0.0, 2.4, 1.0) = 0.4
cycle_dist(0.0, 2.6, 1.0) = 0.4
"""
dist = abs(x - y) % n
if dist >= 0.5 * n:
dist = n - dist
return dist | def function[cycle_dist, parameter[x, y, n]]:
constant[Find Distance between x, y by means of a n-length cycle.
Example:
cycle_dist(1, 23, 24) = 2
cycle_dist(5, 13, 24) = 8
cycle_dist(0.0, 2.4, 1.0) = 0.4
cycle_dist(0.0, 2.6, 1.0) = 0.4
]
variable[dist] assign[=] binary_operation[call[name[abs], parameter[binary_operation[name[x] - name[y]]]] <ast.Mod object at 0x7da2590d6920> name[n]]
if compare[name[dist] greater_or_equal[>=] binary_operation[constant[0.5] * name[n]]] begin[:]
variable[dist] assign[=] binary_operation[name[n] - name[dist]]
return[name[dist]] | keyword[def] identifier[cycle_dist] ( identifier[x] , identifier[y] , identifier[n] ):
literal[string]
identifier[dist] = identifier[abs] ( identifier[x] - identifier[y] )% identifier[n]
keyword[if] identifier[dist] >= literal[int] * identifier[n] :
identifier[dist] = identifier[n] - identifier[dist]
keyword[return] identifier[dist] | def cycle_dist(x, y, n):
"""Find Distance between x, y by means of a n-length cycle.
Example:
cycle_dist(1, 23, 24) = 2
cycle_dist(5, 13, 24) = 8
cycle_dist(0.0, 2.4, 1.0) = 0.4
cycle_dist(0.0, 2.6, 1.0) = 0.4
"""
dist = abs(x - y) % n
if dist >= 0.5 * n:
dist = n - dist # depends on [control=['if'], data=['dist']]
return dist |
def callsign(msg):
"""Aircraft callsign
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
string: callsign
"""
if common.typecode(msg) < 1 or common.typecode(msg) > 4:
raise RuntimeError("%s: Not a identification message" % msg)
chars = '#ABCDEFGHIJKLMNOPQRSTUVWXYZ#####_###############0123456789######'
msgbin = common.hex2bin(msg)
csbin = msgbin[40:96]
cs = ''
cs += chars[common.bin2int(csbin[0:6])]
cs += chars[common.bin2int(csbin[6:12])]
cs += chars[common.bin2int(csbin[12:18])]
cs += chars[common.bin2int(csbin[18:24])]
cs += chars[common.bin2int(csbin[24:30])]
cs += chars[common.bin2int(csbin[30:36])]
cs += chars[common.bin2int(csbin[36:42])]
cs += chars[common.bin2int(csbin[42:48])]
# clean string, remove spaces and marks, if any.
# cs = cs.replace('_', '')
cs = cs.replace('#', '')
return cs | def function[callsign, parameter[msg]]:
constant[Aircraft callsign
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
string: callsign
]
if <ast.BoolOp object at 0x7da2054a5540> begin[:]
<ast.Raise object at 0x7da2054a41c0>
variable[chars] assign[=] constant[#ABCDEFGHIJKLMNOPQRSTUVWXYZ#####_###############0123456789######]
variable[msgbin] assign[=] call[name[common].hex2bin, parameter[name[msg]]]
variable[csbin] assign[=] call[name[msgbin]][<ast.Slice object at 0x7da2054a4490>]
variable[cs] assign[=] constant[]
<ast.AugAssign object at 0x7da2054a70d0>
<ast.AugAssign object at 0x7da2054a5d20>
<ast.AugAssign object at 0x7da2054a42e0>
<ast.AugAssign object at 0x7da2054a5150>
<ast.AugAssign object at 0x7da2054a4580>
<ast.AugAssign object at 0x7da2054a7c40>
<ast.AugAssign object at 0x7da2054a71f0>
<ast.AugAssign object at 0x7da2054a4100>
variable[cs] assign[=] call[name[cs].replace, parameter[constant[#], constant[]]]
return[name[cs]] | keyword[def] identifier[callsign] ( identifier[msg] ):
literal[string]
keyword[if] identifier[common] . identifier[typecode] ( identifier[msg] )< literal[int] keyword[or] identifier[common] . identifier[typecode] ( identifier[msg] )> literal[int] :
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[msg] )
identifier[chars] = literal[string]
identifier[msgbin] = identifier[common] . identifier[hex2bin] ( identifier[msg] )
identifier[csbin] = identifier[msgbin] [ literal[int] : literal[int] ]
identifier[cs] = literal[string]
identifier[cs] += identifier[chars] [ identifier[common] . identifier[bin2int] ( identifier[csbin] [ literal[int] : literal[int] ])]
identifier[cs] += identifier[chars] [ identifier[common] . identifier[bin2int] ( identifier[csbin] [ literal[int] : literal[int] ])]
identifier[cs] += identifier[chars] [ identifier[common] . identifier[bin2int] ( identifier[csbin] [ literal[int] : literal[int] ])]
identifier[cs] += identifier[chars] [ identifier[common] . identifier[bin2int] ( identifier[csbin] [ literal[int] : literal[int] ])]
identifier[cs] += identifier[chars] [ identifier[common] . identifier[bin2int] ( identifier[csbin] [ literal[int] : literal[int] ])]
identifier[cs] += identifier[chars] [ identifier[common] . identifier[bin2int] ( identifier[csbin] [ literal[int] : literal[int] ])]
identifier[cs] += identifier[chars] [ identifier[common] . identifier[bin2int] ( identifier[csbin] [ literal[int] : literal[int] ])]
identifier[cs] += identifier[chars] [ identifier[common] . identifier[bin2int] ( identifier[csbin] [ literal[int] : literal[int] ])]
identifier[cs] = identifier[cs] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[cs] | def callsign(msg):
"""Aircraft callsign
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
string: callsign
"""
if common.typecode(msg) < 1 or common.typecode(msg) > 4:
raise RuntimeError('%s: Not a identification message' % msg) # depends on [control=['if'], data=[]]
chars = '#ABCDEFGHIJKLMNOPQRSTUVWXYZ#####_###############0123456789######'
msgbin = common.hex2bin(msg)
csbin = msgbin[40:96]
cs = ''
cs += chars[common.bin2int(csbin[0:6])]
cs += chars[common.bin2int(csbin[6:12])]
cs += chars[common.bin2int(csbin[12:18])]
cs += chars[common.bin2int(csbin[18:24])]
cs += chars[common.bin2int(csbin[24:30])]
cs += chars[common.bin2int(csbin[30:36])]
cs += chars[common.bin2int(csbin[36:42])]
cs += chars[common.bin2int(csbin[42:48])]
# clean string, remove spaces and marks, if any.
# cs = cs.replace('_', '')
cs = cs.replace('#', '')
return cs |
def get_ccle_lines_for_mutation(gene, amino_acid_change):
"""Return cell lines with a given point mutation in a given gene.
Checks which cell lines in CCLE have a particular point mutation
in a given gene and return their names in a list.
Parameters
----------
gene : str
The HGNC symbol of the mutated gene in whose product the amino
acid change occurs. Example: "BRAF"
amino_acid_change : str
The amino acid change of interest. Example: "V600E"
Returns
-------
cell_lines : list
A list of CCLE cell lines in which the given mutation occurs.
"""
data = {'cmd': 'getMutationData',
'case_set_id': ccle_study,
'genetic_profile_id': ccle_study + '_mutations',
'gene_list': gene,
'skiprows': 1}
df = send_request(**data)
df = df[df['amino_acid_change'] == amino_acid_change]
cell_lines = df['case_id'].unique().tolist()
return cell_lines | def function[get_ccle_lines_for_mutation, parameter[gene, amino_acid_change]]:
constant[Return cell lines with a given point mutation in a given gene.
Checks which cell lines in CCLE have a particular point mutation
in a given gene and return their names in a list.
Parameters
----------
gene : str
The HGNC symbol of the mutated gene in whose product the amino
acid change occurs. Example: "BRAF"
amino_acid_change : str
The amino acid change of interest. Example: "V600E"
Returns
-------
cell_lines : list
A list of CCLE cell lines in which the given mutation occurs.
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da20c990b50>, <ast.Constant object at 0x7da20c9908b0>, <ast.Constant object at 0x7da20c992230>, <ast.Constant object at 0x7da20c992380>, <ast.Constant object at 0x7da20c992620>], [<ast.Constant object at 0x7da20c993820>, <ast.Name object at 0x7da20c992bc0>, <ast.BinOp object at 0x7da20c991c90>, <ast.Name object at 0x7da20c991de0>, <ast.Constant object at 0x7da20c993460>]]
variable[df] assign[=] call[name[send_request], parameter[]]
variable[df] assign[=] call[name[df]][compare[call[name[df]][constant[amino_acid_change]] equal[==] name[amino_acid_change]]]
variable[cell_lines] assign[=] call[call[call[name[df]][constant[case_id]].unique, parameter[]].tolist, parameter[]]
return[name[cell_lines]] | keyword[def] identifier[get_ccle_lines_for_mutation] ( identifier[gene] , identifier[amino_acid_change] ):
literal[string]
identifier[data] ={ literal[string] : literal[string] ,
literal[string] : identifier[ccle_study] ,
literal[string] : identifier[ccle_study] + literal[string] ,
literal[string] : identifier[gene] ,
literal[string] : literal[int] }
identifier[df] = identifier[send_request] (** identifier[data] )
identifier[df] = identifier[df] [ identifier[df] [ literal[string] ]== identifier[amino_acid_change] ]
identifier[cell_lines] = identifier[df] [ literal[string] ]. identifier[unique] (). identifier[tolist] ()
keyword[return] identifier[cell_lines] | def get_ccle_lines_for_mutation(gene, amino_acid_change):
"""Return cell lines with a given point mutation in a given gene.
Checks which cell lines in CCLE have a particular point mutation
in a given gene and return their names in a list.
Parameters
----------
gene : str
The HGNC symbol of the mutated gene in whose product the amino
acid change occurs. Example: "BRAF"
amino_acid_change : str
The amino acid change of interest. Example: "V600E"
Returns
-------
cell_lines : list
A list of CCLE cell lines in which the given mutation occurs.
"""
data = {'cmd': 'getMutationData', 'case_set_id': ccle_study, 'genetic_profile_id': ccle_study + '_mutations', 'gene_list': gene, 'skiprows': 1}
df = send_request(**data)
df = df[df['amino_acid_change'] == amino_acid_change]
cell_lines = df['case_id'].unique().tolist()
return cell_lines |
def draw(data, format='auto', size=(400, 300), drawing_type='ball and stick',
camera_type='perspective', shader='lambert', display_html=True,
element_properties=None, show_save=False):
"""Draws an interactive 3D visualization of the inputted chemical.
Args:
data: A string or file representing a chemical.
format: The format of the `data` variable (default is 'auto').
size: Starting dimensions of visualization, in pixels.
drawing_type: Specifies the molecular representation. Can be 'ball and
stick', 'wireframe', or 'space filling'.
camera_type: Can be 'perspective' or 'orthographic'.
shader: Specifies shading algorithm to use. Can be 'toon', 'basic',
'phong', or 'lambert'.
display_html: If True (default), embed the html in a IPython display.
If False, return the html as a string.
element_properites: A dictionary providing color and radius information
for custom elements or overriding the defaults in imolecule.js
show_save: If True, displays a save icon for rendering molecule as an
image.
The `format` can be any value specified by Open Babel
(http://openbabel.org/docs/2.3.1/FileFormats/Overview.html). The 'auto'
option uses the extension for files (ie. my_file.mol -> mol) and defaults
to SMILES (smi) for strings.
"""
# Catch errors on string-based input before getting js involved
draw_options = ['ball and stick', 'wireframe', 'space filling']
camera_options = ['perspective', 'orthographic']
shader_options = ['toon', 'basic', 'phong', 'lambert']
if drawing_type not in draw_options:
raise Exception("Invalid drawing type! Please use one of: " +
", ".join(draw_options))
if camera_type not in camera_options:
raise Exception("Invalid camera type! Please use one of: " +
", ".join(camera_options))
if shader not in shader_options:
raise Exception("Invalid shader! Please use one of: " +
", ".join(shader_options))
json_mol = generate(data, format)
if element_properties is None:
element_properties = dict()
json_element_properties = to_json(element_properties)
div_id = uuid.uuid4()
html = """<div id="molecule_%s"></div>
<script type="text/javascript">
require.config({baseUrl: '/',
paths: {imolecule: ['%s', '%s']}});
require(['imolecule'], function () {
var $d = $('#molecule_%s');
$d.width(%d); $d.height(%d);
$d.imolecule = jQuery.extend({}, imolecule);
$d.imolecule.create($d, {drawingType: '%s',
cameraType: '%s',
shader: '%s',
showSave: %s});
$d.imolecule.addElements(%s);
$d.imolecule.draw(%s);
$d.resizable({
aspectRatio: %d / %d,
resize: function (evt, ui) {
$d.imolecule.renderer.setSize(ui.size.width,
ui.size.height);
}
});
});
</script>""" % (div_id, local_path[:-3], remote_path[:-3],
div_id, size[0], size[1], drawing_type,
camera_type, shader,
'true' if show_save else 'false',
json_element_properties,
json_mol, size[0], size[1])
# Execute js and display the results in a div (see script for more)
if display_html:
try:
__IPYTHON__
except NameError:
# We're running outside ipython, let's generate a static HTML and
# show it in the browser
import shutil
import webbrowser
from tempfile import mkdtemp
from time import time
try: # Python 3
from urllib.parse import urljoin
from urllib.request import pathname2url
except ImportError: # Python 2
from urlparse import urljoin
from urllib import pathname2url
from tornado import template
t = template.Loader(file_path).load('viewer.template')
html = t.generate(title="imolecule", json_mol=json_mol,
drawing_type=drawing_type, shader=shader,
camera_type=camera_type,
json_element_properties=json_element_properties)
tempdir = mkdtemp(prefix='imolecule_{:.0f}_'.format(time()))
html_filename = os.path.join(tempdir, 'index.html')
with open(html_filename, 'wb') as f:
f.write(html)
libs = (('server', 'css', 'chosen.css'),
('server', 'css', 'server.css'),
('js', 'jquery-1.11.1.min.js'),
('server', 'js', 'chosen.jquery.min.js'),
('js', 'build', 'imolecule.min.js'))
for lib in libs:
shutil.copy(os.path.join(file_path, *lib), tempdir)
html_file_url = urljoin('file:', pathname2url(html_filename))
print('Opening html file: {}'.format(html_file_url))
webbrowser.open(html_file_url)
else:
# We're running in ipython: display widget
display(HTML(html))
else:
return html | def function[draw, parameter[data, format, size, drawing_type, camera_type, shader, display_html, element_properties, show_save]]:
constant[Draws an interactive 3D visualization of the inputted chemical.
Args:
data: A string or file representing a chemical.
format: The format of the `data` variable (default is 'auto').
size: Starting dimensions of visualization, in pixels.
drawing_type: Specifies the molecular representation. Can be 'ball and
stick', 'wireframe', or 'space filling'.
camera_type: Can be 'perspective' or 'orthographic'.
shader: Specifies shading algorithm to use. Can be 'toon', 'basic',
'phong', or 'lambert'.
display_html: If True (default), embed the html in a IPython display.
If False, return the html as a string.
element_properites: A dictionary providing color and radius information
for custom elements or overriding the defaults in imolecule.js
show_save: If True, displays a save icon for rendering molecule as an
image.
The `format` can be any value specified by Open Babel
(http://openbabel.org/docs/2.3.1/FileFormats/Overview.html). The 'auto'
option uses the extension for files (ie. my_file.mol -> mol) and defaults
to SMILES (smi) for strings.
]
variable[draw_options] assign[=] list[[<ast.Constant object at 0x7da1b109ba90>, <ast.Constant object at 0x7da1b109ba60>, <ast.Constant object at 0x7da1b109ba30>]]
variable[camera_options] assign[=] list[[<ast.Constant object at 0x7da1b109b970>, <ast.Constant object at 0x7da1b109b940>]]
variable[shader_options] assign[=] list[[<ast.Constant object at 0x7da1b109b880>, <ast.Constant object at 0x7da1b109b850>, <ast.Constant object at 0x7da1b109b820>, <ast.Constant object at 0x7da1b109b7f0>]]
if compare[name[drawing_type] <ast.NotIn object at 0x7da2590d7190> name[draw_options]] begin[:]
<ast.Raise object at 0x7da1b109b700>
if compare[name[camera_type] <ast.NotIn object at 0x7da2590d7190> name[camera_options]] begin[:]
<ast.Raise object at 0x7da1b109b490>
if compare[name[shader] <ast.NotIn object at 0x7da2590d7190> name[shader_options]] begin[:]
<ast.Raise object at 0x7da1b1014fa0>
variable[json_mol] assign[=] call[name[generate], parameter[name[data], name[format]]]
if compare[name[element_properties] is constant[None]] begin[:]
variable[element_properties] assign[=] call[name[dict], parameter[]]
variable[json_element_properties] assign[=] call[name[to_json], parameter[name[element_properties]]]
variable[div_id] assign[=] call[name[uuid].uuid4, parameter[]]
variable[html] assign[=] binary_operation[constant[<div id="molecule_%s"></div>
<script type="text/javascript">
require.config({baseUrl: '/',
paths: {imolecule: ['%s', '%s']}});
require(['imolecule'], function () {
var $d = $('#molecule_%s');
$d.width(%d); $d.height(%d);
$d.imolecule = jQuery.extend({}, imolecule);
$d.imolecule.create($d, {drawingType: '%s',
cameraType: '%s',
shader: '%s',
showSave: %s});
$d.imolecule.addElements(%s);
$d.imolecule.draw(%s);
$d.resizable({
aspectRatio: %d / %d,
resize: function (evt, ui) {
$d.imolecule.renderer.setSize(ui.size.width,
ui.size.height);
}
});
});
</script>] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1017040>, <ast.Subscript object at 0x7da1b10142b0>, <ast.Subscript object at 0x7da1b1017220>, <ast.Name object at 0x7da1b1017370>, <ast.Subscript object at 0x7da1b1017280>, <ast.Subscript object at 0x7da1b1016440>, <ast.Name object at 0x7da1b10163b0>, <ast.Name object at 0x7da1b1016260>, <ast.Name object at 0x7da1b1014220>, <ast.IfExp object at 0x7da1b1014250>, <ast.Name object at 0x7da1b1014790>, <ast.Name object at 0x7da1b1014820>, <ast.Subscript object at 0x7da1b1016230>, <ast.Subscript object at 0x7da1b1016470>]]]
if name[display_html] begin[:]
<ast.Try object at 0x7da1b1017700> | keyword[def] identifier[draw] ( identifier[data] , identifier[format] = literal[string] , identifier[size] =( literal[int] , literal[int] ), identifier[drawing_type] = literal[string] ,
identifier[camera_type] = literal[string] , identifier[shader] = literal[string] , identifier[display_html] = keyword[True] ,
identifier[element_properties] = keyword[None] , identifier[show_save] = keyword[False] ):
literal[string]
identifier[draw_options] =[ literal[string] , literal[string] , literal[string] ]
identifier[camera_options] =[ literal[string] , literal[string] ]
identifier[shader_options] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[drawing_type] keyword[not] keyword[in] identifier[draw_options] :
keyword[raise] identifier[Exception] ( literal[string] +
literal[string] . identifier[join] ( identifier[draw_options] ))
keyword[if] identifier[camera_type] keyword[not] keyword[in] identifier[camera_options] :
keyword[raise] identifier[Exception] ( literal[string] +
literal[string] . identifier[join] ( identifier[camera_options] ))
keyword[if] identifier[shader] keyword[not] keyword[in] identifier[shader_options] :
keyword[raise] identifier[Exception] ( literal[string] +
literal[string] . identifier[join] ( identifier[shader_options] ))
identifier[json_mol] = identifier[generate] ( identifier[data] , identifier[format] )
keyword[if] identifier[element_properties] keyword[is] keyword[None] :
identifier[element_properties] = identifier[dict] ()
identifier[json_element_properties] = identifier[to_json] ( identifier[element_properties] )
identifier[div_id] = identifier[uuid] . identifier[uuid4] ()
identifier[html] = literal[string] %( identifier[div_id] , identifier[local_path] [:- literal[int] ], identifier[remote_path] [:- literal[int] ],
identifier[div_id] , identifier[size] [ literal[int] ], identifier[size] [ literal[int] ], identifier[drawing_type] ,
identifier[camera_type] , identifier[shader] ,
literal[string] keyword[if] identifier[show_save] keyword[else] literal[string] ,
identifier[json_element_properties] ,
identifier[json_mol] , identifier[size] [ literal[int] ], identifier[size] [ literal[int] ])
keyword[if] identifier[display_html] :
keyword[try] :
identifier[__IPYTHON__]
keyword[except] identifier[NameError] :
keyword[import] identifier[shutil]
keyword[import] identifier[webbrowser]
keyword[from] identifier[tempfile] keyword[import] identifier[mkdtemp]
keyword[from] identifier[time] keyword[import] identifier[time]
keyword[try] :
keyword[from] identifier[urllib] . identifier[parse] keyword[import] identifier[urljoin]
keyword[from] identifier[urllib] . identifier[request] keyword[import] identifier[pathname2url]
keyword[except] identifier[ImportError] :
keyword[from] identifier[urlparse] keyword[import] identifier[urljoin]
keyword[from] identifier[urllib] keyword[import] identifier[pathname2url]
keyword[from] identifier[tornado] keyword[import] identifier[template]
identifier[t] = identifier[template] . identifier[Loader] ( identifier[file_path] ). identifier[load] ( literal[string] )
identifier[html] = identifier[t] . identifier[generate] ( identifier[title] = literal[string] , identifier[json_mol] = identifier[json_mol] ,
identifier[drawing_type] = identifier[drawing_type] , identifier[shader] = identifier[shader] ,
identifier[camera_type] = identifier[camera_type] ,
identifier[json_element_properties] = identifier[json_element_properties] )
identifier[tempdir] = identifier[mkdtemp] ( identifier[prefix] = literal[string] . identifier[format] ( identifier[time] ()))
identifier[html_filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[tempdir] , literal[string] )
keyword[with] identifier[open] ( identifier[html_filename] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[html] )
identifier[libs] =(( literal[string] , literal[string] , literal[string] ),
( literal[string] , literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] , literal[string] ),
( literal[string] , literal[string] , literal[string] ))
keyword[for] identifier[lib] keyword[in] identifier[libs] :
identifier[shutil] . identifier[copy] ( identifier[os] . identifier[path] . identifier[join] ( identifier[file_path] ,* identifier[lib] ), identifier[tempdir] )
identifier[html_file_url] = identifier[urljoin] ( literal[string] , identifier[pathname2url] ( identifier[html_filename] ))
identifier[print] ( literal[string] . identifier[format] ( identifier[html_file_url] ))
identifier[webbrowser] . identifier[open] ( identifier[html_file_url] )
keyword[else] :
identifier[display] ( identifier[HTML] ( identifier[html] ))
keyword[else] :
keyword[return] identifier[html] | def draw(data, format='auto', size=(400, 300), drawing_type='ball and stick', camera_type='perspective', shader='lambert', display_html=True, element_properties=None, show_save=False):
"""Draws an interactive 3D visualization of the inputted chemical.
Args:
data: A string or file representing a chemical.
format: The format of the `data` variable (default is 'auto').
size: Starting dimensions of visualization, in pixels.
drawing_type: Specifies the molecular representation. Can be 'ball and
stick', 'wireframe', or 'space filling'.
camera_type: Can be 'perspective' or 'orthographic'.
shader: Specifies shading algorithm to use. Can be 'toon', 'basic',
'phong', or 'lambert'.
display_html: If True (default), embed the html in a IPython display.
If False, return the html as a string.
element_properites: A dictionary providing color and radius information
for custom elements or overriding the defaults in imolecule.js
show_save: If True, displays a save icon for rendering molecule as an
image.
The `format` can be any value specified by Open Babel
(http://openbabel.org/docs/2.3.1/FileFormats/Overview.html). The 'auto'
option uses the extension for files (ie. my_file.mol -> mol) and defaults
to SMILES (smi) for strings.
"""
# Catch errors on string-based input before getting js involved
draw_options = ['ball and stick', 'wireframe', 'space filling']
camera_options = ['perspective', 'orthographic']
shader_options = ['toon', 'basic', 'phong', 'lambert']
if drawing_type not in draw_options:
raise Exception('Invalid drawing type! Please use one of: ' + ', '.join(draw_options)) # depends on [control=['if'], data=['draw_options']]
if camera_type not in camera_options:
raise Exception('Invalid camera type! Please use one of: ' + ', '.join(camera_options)) # depends on [control=['if'], data=['camera_options']]
if shader not in shader_options:
raise Exception('Invalid shader! Please use one of: ' + ', '.join(shader_options)) # depends on [control=['if'], data=['shader_options']]
json_mol = generate(data, format)
if element_properties is None:
element_properties = dict() # depends on [control=['if'], data=['element_properties']]
json_element_properties = to_json(element_properties)
div_id = uuid.uuid4()
html = '<div id="molecule_%s"></div>\n <script type="text/javascript">\n require.config({baseUrl: \'/\',\n paths: {imolecule: [\'%s\', \'%s\']}});\n require([\'imolecule\'], function () {\n var $d = $(\'#molecule_%s\');\n $d.width(%d); $d.height(%d);\n $d.imolecule = jQuery.extend({}, imolecule);\n $d.imolecule.create($d, {drawingType: \'%s\',\n cameraType: \'%s\',\n shader: \'%s\',\n showSave: %s});\n $d.imolecule.addElements(%s);\n $d.imolecule.draw(%s);\n\n $d.resizable({\n aspectRatio: %d / %d,\n resize: function (evt, ui) {\n $d.imolecule.renderer.setSize(ui.size.width,\n ui.size.height);\n }\n });\n });\n </script>' % (div_id, local_path[:-3], remote_path[:-3], div_id, size[0], size[1], drawing_type, camera_type, shader, 'true' if show_save else 'false', json_element_properties, json_mol, size[0], size[1])
# Execute js and display the results in a div (see script for more)
if display_html:
try:
__IPYTHON__ # depends on [control=['try'], data=[]]
except NameError:
# We're running outside ipython, let's generate a static HTML and
# show it in the browser
import shutil
import webbrowser
from tempfile import mkdtemp
from time import time
try: # Python 3
from urllib.parse import urljoin
from urllib.request import pathname2url # depends on [control=['try'], data=[]]
except ImportError: # Python 2
from urlparse import urljoin
from urllib import pathname2url # depends on [control=['except'], data=[]]
from tornado import template
t = template.Loader(file_path).load('viewer.template')
html = t.generate(title='imolecule', json_mol=json_mol, drawing_type=drawing_type, shader=shader, camera_type=camera_type, json_element_properties=json_element_properties)
tempdir = mkdtemp(prefix='imolecule_{:.0f}_'.format(time()))
html_filename = os.path.join(tempdir, 'index.html')
with open(html_filename, 'wb') as f:
f.write(html) # depends on [control=['with'], data=['f']]
libs = (('server', 'css', 'chosen.css'), ('server', 'css', 'server.css'), ('js', 'jquery-1.11.1.min.js'), ('server', 'js', 'chosen.jquery.min.js'), ('js', 'build', 'imolecule.min.js'))
for lib in libs:
shutil.copy(os.path.join(file_path, *lib), tempdir) # depends on [control=['for'], data=['lib']]
html_file_url = urljoin('file:', pathname2url(html_filename))
print('Opening html file: {}'.format(html_file_url))
webbrowser.open(html_file_url) # depends on [control=['except'], data=[]]
else:
# We're running in ipython: display widget
display(HTML(html)) # depends on [control=['if'], data=[]]
else:
return html |
def assign_mv_feeder_to_nodes(mv_grid):
"""
Assigns an MV feeder to every generator, LV station, load, and branch tee
Parameters
-----------
mv_grid : :class:`~.grid.grids.MVGrid`
"""
mv_station_neighbors = mv_grid.graph.neighbors(mv_grid.station)
# get all nodes in MV grid and remove MV station to get separate subgraphs
mv_graph_nodes = mv_grid.graph.nodes()
mv_graph_nodes.remove(mv_grid.station)
subgraph = mv_grid.graph.subgraph(mv_graph_nodes)
for neighbor in mv_station_neighbors:
# determine feeder
mv_feeder = mv_grid.graph.line_from_nodes(mv_grid.station, neighbor)
# get all nodes in that feeder by doing a DFS in the disconnected
# subgraph starting from the node adjacent to the MVStation `neighbor`
subgraph_neighbor = nx.dfs_tree(subgraph, source=neighbor)
for node in subgraph_neighbor.nodes():
# in case of an LV station assign feeder to all nodes in that LV
# grid
if isinstance(node, LVStation):
for lv_node in node.grid.graph.nodes():
lv_node.mv_feeder = mv_feeder
else:
node.mv_feeder = mv_feeder | def function[assign_mv_feeder_to_nodes, parameter[mv_grid]]:
constant[
Assigns an MV feeder to every generator, LV station, load, and branch tee
Parameters
-----------
mv_grid : :class:`~.grid.grids.MVGrid`
]
variable[mv_station_neighbors] assign[=] call[name[mv_grid].graph.neighbors, parameter[name[mv_grid].station]]
variable[mv_graph_nodes] assign[=] call[name[mv_grid].graph.nodes, parameter[]]
call[name[mv_graph_nodes].remove, parameter[name[mv_grid].station]]
variable[subgraph] assign[=] call[name[mv_grid].graph.subgraph, parameter[name[mv_graph_nodes]]]
for taget[name[neighbor]] in starred[name[mv_station_neighbors]] begin[:]
variable[mv_feeder] assign[=] call[name[mv_grid].graph.line_from_nodes, parameter[name[mv_grid].station, name[neighbor]]]
variable[subgraph_neighbor] assign[=] call[name[nx].dfs_tree, parameter[name[subgraph]]]
for taget[name[node]] in starred[call[name[subgraph_neighbor].nodes, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[node], name[LVStation]]] begin[:]
for taget[name[lv_node]] in starred[call[name[node].grid.graph.nodes, parameter[]]] begin[:]
name[lv_node].mv_feeder assign[=] name[mv_feeder] | keyword[def] identifier[assign_mv_feeder_to_nodes] ( identifier[mv_grid] ):
literal[string]
identifier[mv_station_neighbors] = identifier[mv_grid] . identifier[graph] . identifier[neighbors] ( identifier[mv_grid] . identifier[station] )
identifier[mv_graph_nodes] = identifier[mv_grid] . identifier[graph] . identifier[nodes] ()
identifier[mv_graph_nodes] . identifier[remove] ( identifier[mv_grid] . identifier[station] )
identifier[subgraph] = identifier[mv_grid] . identifier[graph] . identifier[subgraph] ( identifier[mv_graph_nodes] )
keyword[for] identifier[neighbor] keyword[in] identifier[mv_station_neighbors] :
identifier[mv_feeder] = identifier[mv_grid] . identifier[graph] . identifier[line_from_nodes] ( identifier[mv_grid] . identifier[station] , identifier[neighbor] )
identifier[subgraph_neighbor] = identifier[nx] . identifier[dfs_tree] ( identifier[subgraph] , identifier[source] = identifier[neighbor] )
keyword[for] identifier[node] keyword[in] identifier[subgraph_neighbor] . identifier[nodes] ():
keyword[if] identifier[isinstance] ( identifier[node] , identifier[LVStation] ):
keyword[for] identifier[lv_node] keyword[in] identifier[node] . identifier[grid] . identifier[graph] . identifier[nodes] ():
identifier[lv_node] . identifier[mv_feeder] = identifier[mv_feeder]
keyword[else] :
identifier[node] . identifier[mv_feeder] = identifier[mv_feeder] | def assign_mv_feeder_to_nodes(mv_grid):
"""
Assigns an MV feeder to every generator, LV station, load, and branch tee
Parameters
-----------
mv_grid : :class:`~.grid.grids.MVGrid`
"""
mv_station_neighbors = mv_grid.graph.neighbors(mv_grid.station)
# get all nodes in MV grid and remove MV station to get separate subgraphs
mv_graph_nodes = mv_grid.graph.nodes()
mv_graph_nodes.remove(mv_grid.station)
subgraph = mv_grid.graph.subgraph(mv_graph_nodes)
for neighbor in mv_station_neighbors:
# determine feeder
mv_feeder = mv_grid.graph.line_from_nodes(mv_grid.station, neighbor)
# get all nodes in that feeder by doing a DFS in the disconnected
# subgraph starting from the node adjacent to the MVStation `neighbor`
subgraph_neighbor = nx.dfs_tree(subgraph, source=neighbor)
for node in subgraph_neighbor.nodes():
# in case of an LV station assign feeder to all nodes in that LV
# grid
if isinstance(node, LVStation):
for lv_node in node.grid.graph.nodes():
lv_node.mv_feeder = mv_feeder # depends on [control=['for'], data=['lv_node']] # depends on [control=['if'], data=[]]
else:
node.mv_feeder = mv_feeder # depends on [control=['for'], data=['node']] # depends on [control=['for'], data=['neighbor']] |
def last_archive(self):
'''
Get the last available archive
:return:
'''
archives = {}
for archive in self.archives():
archives[int(archive.split('.')[0].split('-')[-1])] = archive
return archives and archives[max(archives)] or None | def function[last_archive, parameter[self]]:
constant[
Get the last available archive
:return:
]
variable[archives] assign[=] dictionary[[], []]
for taget[name[archive]] in starred[call[name[self].archives, parameter[]]] begin[:]
call[name[archives]][call[name[int], parameter[call[call[call[call[name[archive].split, parameter[constant[.]]]][constant[0]].split, parameter[constant[-]]]][<ast.UnaryOp object at 0x7da18ede69e0>]]]] assign[=] name[archive]
return[<ast.BoolOp object at 0x7da18ede5e40>] | keyword[def] identifier[last_archive] ( identifier[self] ):
literal[string]
identifier[archives] ={}
keyword[for] identifier[archive] keyword[in] identifier[self] . identifier[archives] ():
identifier[archives] [ identifier[int] ( identifier[archive] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )[- literal[int] ])]= identifier[archive]
keyword[return] identifier[archives] keyword[and] identifier[archives] [ identifier[max] ( identifier[archives] )] keyword[or] keyword[None] | def last_archive(self):
"""
Get the last available archive
:return:
"""
archives = {}
for archive in self.archives():
archives[int(archive.split('.')[0].split('-')[-1])] = archive # depends on [control=['for'], data=['archive']]
return archives and archives[max(archives)] or None |
def _run_program(self, bin,fastafile, params=None):
"""
Run Weeder and predict motifs from a FASTA file.
Parameters
----------
bin : str
Command used to run the tool.
fastafile : str
Name of the FASTA input file.
params : dict, optional
Optional parameters. For some of the tools required parameters
are passed using this dictionary.
Returns
-------
motifs : list of Motif instances
The predicted motifs.
stdout : str
Standard out of the tool.
stderr : str
Standard error of the tool.
"""
params = self._parse_params(params)
organism = params["organism"]
weeder_organisms = {
"hg18":"HS",
"hg19":"HS",
"hg38":"HS",
"mm9":"MM",
"mm10":"MM",
"dm3":"DM",
"dm5":"DM",
"dm6":"DM",
"yeast":"SC",
"sacCer2":"SC",
"sacCer3":"SC",
"TAIR10":"AT",
"TAIR11":"AT",
}
weeder_organism = weeder_organisms.get(organism, "HS")
tmp = NamedTemporaryFile(dir=self.tmpdir)
name = tmp.name
tmp.close()
shutil.copy(fastafile, name)
fastafile = name
cmd = "{} -f {} -O".format(
self.cmd,
fastafile,
weeder_organism,
)
if params["single"]:
cmd += " -ss"
#print cmd
stdout, stderr = "", ""
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, cwd=self.tmpdir)
out,err = p.communicate()
stdout += out.decode()
stderr += err.decode()
motifs = []
if os.path.exists(fastafile + ".matrix.w2"):
f = open(fastafile + ".matrix.w2")
motifs = self.parse(f)
f.close()
for m in motifs:
m.id = "{}_{}".format(self.name, m.id.split("\t")[0])
for ext in [".w2", ".matrix.w2" ]:
if os.path.exists(fastafile + ext):
os.unlink(fastafile + ext)
return motifs, stdout, stderr | def function[_run_program, parameter[self, bin, fastafile, params]]:
constant[
Run Weeder and predict motifs from a FASTA file.
Parameters
----------
bin : str
Command used to run the tool.
fastafile : str
Name of the FASTA input file.
params : dict, optional
Optional parameters. For some of the tools required parameters
are passed using this dictionary.
Returns
-------
motifs : list of Motif instances
The predicted motifs.
stdout : str
Standard out of the tool.
stderr : str
Standard error of the tool.
]
variable[params] assign[=] call[name[self]._parse_params, parameter[name[params]]]
variable[organism] assign[=] call[name[params]][constant[organism]]
variable[weeder_organisms] assign[=] dictionary[[<ast.Constant object at 0x7da18bcc95a0>, <ast.Constant object at 0x7da18bcca6b0>, <ast.Constant object at 0x7da18bcc9420>, <ast.Constant object at 0x7da18bcc9a80>, <ast.Constant object at 0x7da18bccbe50>, <ast.Constant object at 0x7da18bcc98d0>, <ast.Constant object at 0x7da18bcca4a0>, <ast.Constant object at 0x7da18bcca6e0>, <ast.Constant object at 0x7da18bcc85b0>, <ast.Constant object at 0x7da18bcc96c0>, <ast.Constant object at 0x7da18bccb580>, <ast.Constant object at 0x7da18bccb7c0>, <ast.Constant object at 0x7da18bccbd60>], [<ast.Constant object at 0x7da18bcc88e0>, <ast.Constant object at 0x7da18bccbfd0>, <ast.Constant object at 0x7da18bcc86d0>, <ast.Constant object at 0x7da18bcc8700>, <ast.Constant object at 0x7da18bcc8b50>, <ast.Constant object at 0x7da18bcc9ba0>, <ast.Constant object at 0x7da18bcc9ff0>, <ast.Constant object at 0x7da18bcca410>, <ast.Constant object at 0x7da18bcca230>, <ast.Constant object at 0x7da18bcc9de0>, <ast.Constant object at 0x7da18bcca020>, <ast.Constant object at 0x7da18bcca3e0>, <ast.Constant object at 0x7da18bcc8a00>]]
variable[weeder_organism] assign[=] call[name[weeder_organisms].get, parameter[name[organism], constant[HS]]]
variable[tmp] assign[=] call[name[NamedTemporaryFile], parameter[]]
variable[name] assign[=] name[tmp].name
call[name[tmp].close, parameter[]]
call[name[shutil].copy, parameter[name[fastafile], name[name]]]
variable[fastafile] assign[=] name[name]
variable[cmd] assign[=] call[constant[{} -f {} -O].format, parameter[name[self].cmd, name[fastafile], name[weeder_organism]]]
if call[name[params]][constant[single]] begin[:]
<ast.AugAssign object at 0x7da18bcc8ca0>
<ast.Tuple object at 0x7da18bccbf40> assign[=] tuple[[<ast.Constant object at 0x7da18bccab90>, <ast.Constant object at 0x7da18bcc8550>]]
variable[p] assign[=] call[name[Popen], parameter[name[cmd]]]
<ast.Tuple object at 0x7da18bcca3b0> assign[=] call[name[p].communicate, parameter[]]
<ast.AugAssign object at 0x7da18f09f2e0>
<ast.AugAssign object at 0x7da18f09d8a0>
variable[motifs] assign[=] list[[]]
if call[name[os].path.exists, parameter[binary_operation[name[fastafile] + constant[.matrix.w2]]]] begin[:]
variable[f] assign[=] call[name[open], parameter[binary_operation[name[fastafile] + constant[.matrix.w2]]]]
variable[motifs] assign[=] call[name[self].parse, parameter[name[f]]]
call[name[f].close, parameter[]]
for taget[name[m]] in starred[name[motifs]] begin[:]
name[m].id assign[=] call[constant[{}_{}].format, parameter[name[self].name, call[call[name[m].id.split, parameter[constant[ ]]]][constant[0]]]]
for taget[name[ext]] in starred[list[[<ast.Constant object at 0x7da18f09d3f0>, <ast.Constant object at 0x7da18f09c640>]]] begin[:]
if call[name[os].path.exists, parameter[binary_operation[name[fastafile] + name[ext]]]] begin[:]
call[name[os].unlink, parameter[binary_operation[name[fastafile] + name[ext]]]]
return[tuple[[<ast.Name object at 0x7da18f09d4e0>, <ast.Name object at 0x7da18f09cac0>, <ast.Name object at 0x7da18f09e3e0>]]] | keyword[def] identifier[_run_program] ( identifier[self] , identifier[bin] , identifier[fastafile] , identifier[params] = keyword[None] ):
literal[string]
identifier[params] = identifier[self] . identifier[_parse_params] ( identifier[params] )
identifier[organism] = identifier[params] [ literal[string] ]
identifier[weeder_organisms] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
identifier[weeder_organism] = identifier[weeder_organisms] . identifier[get] ( identifier[organism] , literal[string] )
identifier[tmp] = identifier[NamedTemporaryFile] ( identifier[dir] = identifier[self] . identifier[tmpdir] )
identifier[name] = identifier[tmp] . identifier[name]
identifier[tmp] . identifier[close] ()
identifier[shutil] . identifier[copy] ( identifier[fastafile] , identifier[name] )
identifier[fastafile] = identifier[name]
identifier[cmd] = literal[string] . identifier[format] (
identifier[self] . identifier[cmd] ,
identifier[fastafile] ,
identifier[weeder_organism] ,
)
keyword[if] identifier[params] [ literal[string] ]:
identifier[cmd] += literal[string]
identifier[stdout] , identifier[stderr] = literal[string] , literal[string]
identifier[p] = identifier[Popen] ( identifier[cmd] , identifier[shell] = keyword[True] , identifier[stdout] = identifier[PIPE] , identifier[stderr] = identifier[PIPE] , identifier[cwd] = identifier[self] . identifier[tmpdir] )
identifier[out] , identifier[err] = identifier[p] . identifier[communicate] ()
identifier[stdout] += identifier[out] . identifier[decode] ()
identifier[stderr] += identifier[err] . identifier[decode] ()
identifier[motifs] =[]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[fastafile] + literal[string] ):
identifier[f] = identifier[open] ( identifier[fastafile] + literal[string] )
identifier[motifs] = identifier[self] . identifier[parse] ( identifier[f] )
identifier[f] . identifier[close] ()
keyword[for] identifier[m] keyword[in] identifier[motifs] :
identifier[m] . identifier[id] = literal[string] . identifier[format] ( identifier[self] . identifier[name] , identifier[m] . identifier[id] . identifier[split] ( literal[string] )[ literal[int] ])
keyword[for] identifier[ext] keyword[in] [ literal[string] , literal[string] ]:
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[fastafile] + identifier[ext] ):
identifier[os] . identifier[unlink] ( identifier[fastafile] + identifier[ext] )
keyword[return] identifier[motifs] , identifier[stdout] , identifier[stderr] | def _run_program(self, bin, fastafile, params=None):
"""
Run Weeder and predict motifs from a FASTA file.
Parameters
----------
bin : str
Command used to run the tool.
fastafile : str
Name of the FASTA input file.
params : dict, optional
Optional parameters. For some of the tools required parameters
are passed using this dictionary.
Returns
-------
motifs : list of Motif instances
The predicted motifs.
stdout : str
Standard out of the tool.
stderr : str
Standard error of the tool.
"""
params = self._parse_params(params)
organism = params['organism']
weeder_organisms = {'hg18': 'HS', 'hg19': 'HS', 'hg38': 'HS', 'mm9': 'MM', 'mm10': 'MM', 'dm3': 'DM', 'dm5': 'DM', 'dm6': 'DM', 'yeast': 'SC', 'sacCer2': 'SC', 'sacCer3': 'SC', 'TAIR10': 'AT', 'TAIR11': 'AT'}
weeder_organism = weeder_organisms.get(organism, 'HS')
tmp = NamedTemporaryFile(dir=self.tmpdir)
name = tmp.name
tmp.close()
shutil.copy(fastafile, name)
fastafile = name
cmd = '{} -f {} -O'.format(self.cmd, fastafile, weeder_organism)
if params['single']:
cmd += ' -ss' # depends on [control=['if'], data=[]]
#print cmd
(stdout, stderr) = ('', '')
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, cwd=self.tmpdir)
(out, err) = p.communicate()
stdout += out.decode()
stderr += err.decode()
motifs = []
if os.path.exists(fastafile + '.matrix.w2'):
f = open(fastafile + '.matrix.w2')
motifs = self.parse(f)
f.close() # depends on [control=['if'], data=[]]
for m in motifs:
m.id = '{}_{}'.format(self.name, m.id.split('\t')[0]) # depends on [control=['for'], data=['m']]
for ext in ['.w2', '.matrix.w2']:
if os.path.exists(fastafile + ext):
os.unlink(fastafile + ext) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ext']]
return (motifs, stdout, stderr) |
def recall(result, reference):
"""
Recall.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
recall : float
The recall between two binary datasets, here mostly binary objects in images,
which is defined as the fraction of relevant instances that are retrieved. The
recall is not symmetric.
See also
--------
:func:`precision`
Notes
-----
Not symmetric. The inverse of the recall is :func:`precision`.
High recall means that an algorithm returned most of the relevant results.
References
----------
.. [1] http://en.wikipedia.org/wiki/Precision_and_recall
.. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
"""
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
tp = numpy.count_nonzero(result & reference)
fn = numpy.count_nonzero(~result & reference)
try:
recall = tp / float(tp + fn)
except ZeroDivisionError:
recall = 0.0
return recall | def function[recall, parameter[result, reference]]:
constant[
Recall.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
recall : float
The recall between two binary datasets, here mostly binary objects in images,
which is defined as the fraction of relevant instances that are retrieved. The
recall is not symmetric.
See also
--------
:func:`precision`
Notes
-----
Not symmetric. The inverse of the recall is :func:`precision`.
High recall means that an algorithm returned most of the relevant results.
References
----------
.. [1] http://en.wikipedia.org/wiki/Precision_and_recall
.. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
]
variable[result] assign[=] call[name[numpy].atleast_1d, parameter[call[name[result].astype, parameter[name[numpy].bool]]]]
variable[reference] assign[=] call[name[numpy].atleast_1d, parameter[call[name[reference].astype, parameter[name[numpy].bool]]]]
variable[tp] assign[=] call[name[numpy].count_nonzero, parameter[binary_operation[name[result] <ast.BitAnd object at 0x7da2590d6b60> name[reference]]]]
variable[fn] assign[=] call[name[numpy].count_nonzero, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b12d95d0> <ast.BitAnd object at 0x7da2590d6b60> name[reference]]]]
<ast.Try object at 0x7da1b12d9450>
return[name[recall]] | keyword[def] identifier[recall] ( identifier[result] , identifier[reference] ):
literal[string]
identifier[result] = identifier[numpy] . identifier[atleast_1d] ( identifier[result] . identifier[astype] ( identifier[numpy] . identifier[bool] ))
identifier[reference] = identifier[numpy] . identifier[atleast_1d] ( identifier[reference] . identifier[astype] ( identifier[numpy] . identifier[bool] ))
identifier[tp] = identifier[numpy] . identifier[count_nonzero] ( identifier[result] & identifier[reference] )
identifier[fn] = identifier[numpy] . identifier[count_nonzero] (~ identifier[result] & identifier[reference] )
keyword[try] :
identifier[recall] = identifier[tp] / identifier[float] ( identifier[tp] + identifier[fn] )
keyword[except] identifier[ZeroDivisionError] :
identifier[recall] = literal[int]
keyword[return] identifier[recall] | def recall(result, reference):
"""
Recall.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
recall : float
The recall between two binary datasets, here mostly binary objects in images,
which is defined as the fraction of relevant instances that are retrieved. The
recall is not symmetric.
See also
--------
:func:`precision`
Notes
-----
Not symmetric. The inverse of the recall is :func:`precision`.
High recall means that an algorithm returned most of the relevant results.
References
----------
.. [1] http://en.wikipedia.org/wiki/Precision_and_recall
.. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
"""
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
tp = numpy.count_nonzero(result & reference)
fn = numpy.count_nonzero(~result & reference)
try:
recall = tp / float(tp + fn) # depends on [control=['try'], data=[]]
except ZeroDivisionError:
recall = 0.0 # depends on [control=['except'], data=[]]
return recall |
def loads(s, check_version=True):
"""
Load VOEvent from bytes.
This parses a VOEvent XML packet string, taking care of some subtleties.
For Python 3 users, ``s`` should be a bytes object - see also
http://lxml.de/FAQ.html,
"Why can't lxml parse my XML from unicode strings?"
(Python 2 users can stick with old-school ``str`` type if preferred)
By default, will raise an exception if the VOEvent is not of version
2.0. This can be disabled but voevent-parse routines are untested with
other versions.
Args:
s (bytes): Bytes containing raw XML.
check_version (bool): (Default=True) Checks that the VOEvent is of a
supported schema version - currently only v2.0 is supported.
Returns:
:py:class:`Voevent`: Root-node of the etree.
Raises:
ValueError: If passed a VOEvent of wrong schema version
(i.e. schema 1.1)
"""
# .. note::
#
# The namespace is removed from the root element tag to make
# objectify access work as expected,
# (see :py:func:`._remove_root_tag_prefix`)
# so we must re-insert it when we want to conform to schema.
v = objectify.fromstring(s)
_remove_root_tag_prefix(v)
if check_version:
version = v.attrib['version']
if not version == '2.0':
raise ValueError('Unsupported VOEvent schema version:' + version)
return v | def function[loads, parameter[s, check_version]]:
constant[
Load VOEvent from bytes.
This parses a VOEvent XML packet string, taking care of some subtleties.
For Python 3 users, ``s`` should be a bytes object - see also
http://lxml.de/FAQ.html,
"Why can't lxml parse my XML from unicode strings?"
(Python 2 users can stick with old-school ``str`` type if preferred)
By default, will raise an exception if the VOEvent is not of version
2.0. This can be disabled but voevent-parse routines are untested with
other versions.
Args:
s (bytes): Bytes containing raw XML.
check_version (bool): (Default=True) Checks that the VOEvent is of a
supported schema version - currently only v2.0 is supported.
Returns:
:py:class:`Voevent`: Root-node of the etree.
Raises:
ValueError: If passed a VOEvent of wrong schema version
(i.e. schema 1.1)
]
variable[v] assign[=] call[name[objectify].fromstring, parameter[name[s]]]
call[name[_remove_root_tag_prefix], parameter[name[v]]]
if name[check_version] begin[:]
variable[version] assign[=] call[name[v].attrib][constant[version]]
if <ast.UnaryOp object at 0x7da1b0bcf430> begin[:]
<ast.Raise object at 0x7da1b0bcfd30>
return[name[v]] | keyword[def] identifier[loads] ( identifier[s] , identifier[check_version] = keyword[True] ):
literal[string]
identifier[v] = identifier[objectify] . identifier[fromstring] ( identifier[s] )
identifier[_remove_root_tag_prefix] ( identifier[v] )
keyword[if] identifier[check_version] :
identifier[version] = identifier[v] . identifier[attrib] [ literal[string] ]
keyword[if] keyword[not] identifier[version] == literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] + identifier[version] )
keyword[return] identifier[v] | def loads(s, check_version=True):
"""
Load VOEvent from bytes.
This parses a VOEvent XML packet string, taking care of some subtleties.
For Python 3 users, ``s`` should be a bytes object - see also
http://lxml.de/FAQ.html,
"Why can't lxml parse my XML from unicode strings?"
(Python 2 users can stick with old-school ``str`` type if preferred)
By default, will raise an exception if the VOEvent is not of version
2.0. This can be disabled but voevent-parse routines are untested with
other versions.
Args:
s (bytes): Bytes containing raw XML.
check_version (bool): (Default=True) Checks that the VOEvent is of a
supported schema version - currently only v2.0 is supported.
Returns:
:py:class:`Voevent`: Root-node of the etree.
Raises:
ValueError: If passed a VOEvent of wrong schema version
(i.e. schema 1.1)
"""
# .. note::
#
# The namespace is removed from the root element tag to make
# objectify access work as expected,
# (see :py:func:`._remove_root_tag_prefix`)
# so we must re-insert it when we want to conform to schema.
v = objectify.fromstring(s)
_remove_root_tag_prefix(v)
if check_version:
version = v.attrib['version']
if not version == '2.0':
raise ValueError('Unsupported VOEvent schema version:' + version) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return v |
def save(self, name, file):
"""
Saves new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = file.name
if not hasattr(file, 'chunks'):
file = File(file, name=name)
name = self.get_available_name(name)
name = self._save(name, file)
# Store filenames with forward slashes, even on Windows
return name.replace('\\', '/') | def function[save, parameter[self, name, file]]:
constant[
Saves new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
]
if compare[name[name] is constant[None]] begin[:]
variable[name] assign[=] name[file].name
if <ast.UnaryOp object at 0x7da204961e70> begin[:]
variable[file] assign[=] call[name[File], parameter[name[file]]]
variable[name] assign[=] call[name[self].get_available_name, parameter[name[name]]]
variable[name] assign[=] call[name[self]._save, parameter[name[name], name[file]]]
return[call[name[name].replace, parameter[constant[\], constant[/]]]] | keyword[def] identifier[save] ( identifier[self] , identifier[name] , identifier[file] ):
literal[string]
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[name] = identifier[file] . identifier[name]
keyword[if] keyword[not] identifier[hasattr] ( identifier[file] , literal[string] ):
identifier[file] = identifier[File] ( identifier[file] , identifier[name] = identifier[name] )
identifier[name] = identifier[self] . identifier[get_available_name] ( identifier[name] )
identifier[name] = identifier[self] . identifier[_save] ( identifier[name] , identifier[file] )
keyword[return] identifier[name] . identifier[replace] ( literal[string] , literal[string] ) | def save(self, name, file):
"""
Saves new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = file.name # depends on [control=['if'], data=['name']]
if not hasattr(file, 'chunks'):
file = File(file, name=name) # depends on [control=['if'], data=[]]
name = self.get_available_name(name)
name = self._save(name, file)
# Store filenames with forward slashes, even on Windows
return name.replace('\\', '/') |
def get_plugin_class(self, plugin_name):
"""Returns the class registered under the given plugin name."""
try:
return self.plugin_classes[plugin_name]
except KeyError:
raise RezPluginError("Unrecognised %s plugin: '%s'"
% (self.pretty_type_name, plugin_name)) | def function[get_plugin_class, parameter[self, plugin_name]]:
constant[Returns the class registered under the given plugin name.]
<ast.Try object at 0x7da2054a60b0> | keyword[def] identifier[get_plugin_class] ( identifier[self] , identifier[plugin_name] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[plugin_classes] [ identifier[plugin_name] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[RezPluginError] ( literal[string]
%( identifier[self] . identifier[pretty_type_name] , identifier[plugin_name] )) | def get_plugin_class(self, plugin_name):
"""Returns the class registered under the given plugin name."""
try:
return self.plugin_classes[plugin_name] # depends on [control=['try'], data=[]]
except KeyError:
raise RezPluginError("Unrecognised %s plugin: '%s'" % (self.pretty_type_name, plugin_name)) # depends on [control=['except'], data=[]] |
def absolute_magnitude(distance_modulus,g,r,prob=None):
""" Calculate the absolute magnitude from a set of bands """
V = g - 0.487*(g - r) - 0.0249
flux = np.sum(10**(-(V-distance_modulus)/2.5))
Mv = -2.5*np.log10(flux)
return Mv | def function[absolute_magnitude, parameter[distance_modulus, g, r, prob]]:
constant[ Calculate the absolute magnitude from a set of bands ]
variable[V] assign[=] binary_operation[binary_operation[name[g] - binary_operation[constant[0.487] * binary_operation[name[g] - name[r]]]] - constant[0.0249]]
variable[flux] assign[=] call[name[np].sum, parameter[binary_operation[constant[10] ** binary_operation[<ast.UnaryOp object at 0x7da2047e9d20> / constant[2.5]]]]]
variable[Mv] assign[=] binary_operation[<ast.UnaryOp object at 0x7da18dc07b50> * call[name[np].log10, parameter[name[flux]]]]
return[name[Mv]] | keyword[def] identifier[absolute_magnitude] ( identifier[distance_modulus] , identifier[g] , identifier[r] , identifier[prob] = keyword[None] ):
literal[string]
identifier[V] = identifier[g] - literal[int] *( identifier[g] - identifier[r] )- literal[int]
identifier[flux] = identifier[np] . identifier[sum] ( literal[int] **(-( identifier[V] - identifier[distance_modulus] )/ literal[int] ))
identifier[Mv] =- literal[int] * identifier[np] . identifier[log10] ( identifier[flux] )
keyword[return] identifier[Mv] | def absolute_magnitude(distance_modulus, g, r, prob=None):
""" Calculate the absolute magnitude from a set of bands """
V = g - 0.487 * (g - r) - 0.0249
flux = np.sum(10 ** (-(V - distance_modulus) / 2.5))
Mv = -2.5 * np.log10(flux)
return Mv |
def dict_to_serializable_repr(x):
"""
Recursively convert values of dictionary to serializable representations.
Convert non-string keys to JSON representations and replace them in the
dictionary with indices of unique JSON strings (e.g. __1, __2, etc..).
"""
# list of JSON representations of hashable objects which were
# used as keys in this dictionary
serialized_key_list = []
serialized_keys_to_names = {}
# use the class of x rather just dict since we might want to convert
# derived classes such as OrderedDict
result = type(x)()
for (k, v) in x.items():
if not isinstance(k, string_types):
# JSON does not support using complex types such as tuples
# or user-defined objects with implementations of __hash__ as
# keys in a dictionary so we must keep the serialized
# representations of such values in a list and refer to indices
# in that list
serialized_key_repr = to_json(k)
if serialized_key_repr in serialized_keys_to_names:
k = serialized_keys_to_names[serialized_key_repr]
else:
k = index_to_serialized_key_name(len(serialized_key_list))
serialized_keys_to_names[serialized_key_repr] = k
serialized_key_list.append(serialized_key_repr)
result[k] = to_serializable_repr(v)
if len(serialized_key_list) > 0:
# only include this list of serialized keys if we had any non-string
# keys
result[SERIALIZED_DICTIONARY_KEYS_FIELD] = serialized_key_list
return result | def function[dict_to_serializable_repr, parameter[x]]:
constant[
Recursively convert values of dictionary to serializable representations.
Convert non-string keys to JSON representations and replace them in the
dictionary with indices of unique JSON strings (e.g. __1, __2, etc..).
]
variable[serialized_key_list] assign[=] list[[]]
variable[serialized_keys_to_names] assign[=] dictionary[[], []]
variable[result] assign[=] call[call[name[type], parameter[name[x]]], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b28f52a0>, <ast.Name object at 0x7da1b28f4730>]]] in starred[call[name[x].items, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b28f5570> begin[:]
variable[serialized_key_repr] assign[=] call[name[to_json], parameter[name[k]]]
if compare[name[serialized_key_repr] in name[serialized_keys_to_names]] begin[:]
variable[k] assign[=] call[name[serialized_keys_to_names]][name[serialized_key_repr]]
call[name[result]][name[k]] assign[=] call[name[to_serializable_repr], parameter[name[v]]]
if compare[call[name[len], parameter[name[serialized_key_list]]] greater[>] constant[0]] begin[:]
call[name[result]][name[SERIALIZED_DICTIONARY_KEYS_FIELD]] assign[=] name[serialized_key_list]
return[name[result]] | keyword[def] identifier[dict_to_serializable_repr] ( identifier[x] ):
literal[string]
identifier[serialized_key_list] =[]
identifier[serialized_keys_to_names] ={}
identifier[result] = identifier[type] ( identifier[x] )()
keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[x] . identifier[items] ():
keyword[if] keyword[not] identifier[isinstance] ( identifier[k] , identifier[string_types] ):
identifier[serialized_key_repr] = identifier[to_json] ( identifier[k] )
keyword[if] identifier[serialized_key_repr] keyword[in] identifier[serialized_keys_to_names] :
identifier[k] = identifier[serialized_keys_to_names] [ identifier[serialized_key_repr] ]
keyword[else] :
identifier[k] = identifier[index_to_serialized_key_name] ( identifier[len] ( identifier[serialized_key_list] ))
identifier[serialized_keys_to_names] [ identifier[serialized_key_repr] ]= identifier[k]
identifier[serialized_key_list] . identifier[append] ( identifier[serialized_key_repr] )
identifier[result] [ identifier[k] ]= identifier[to_serializable_repr] ( identifier[v] )
keyword[if] identifier[len] ( identifier[serialized_key_list] )> literal[int] :
identifier[result] [ identifier[SERIALIZED_DICTIONARY_KEYS_FIELD] ]= identifier[serialized_key_list]
keyword[return] identifier[result] | def dict_to_serializable_repr(x):
"""
Recursively convert values of dictionary to serializable representations.
Convert non-string keys to JSON representations and replace them in the
dictionary with indices of unique JSON strings (e.g. __1, __2, etc..).
"""
# list of JSON representations of hashable objects which were
# used as keys in this dictionary
serialized_key_list = []
serialized_keys_to_names = {}
# use the class of x rather just dict since we might want to convert
# derived classes such as OrderedDict
result = type(x)()
for (k, v) in x.items():
if not isinstance(k, string_types):
# JSON does not support using complex types such as tuples
# or user-defined objects with implementations of __hash__ as
# keys in a dictionary so we must keep the serialized
# representations of such values in a list and refer to indices
# in that list
serialized_key_repr = to_json(k)
if serialized_key_repr in serialized_keys_to_names:
k = serialized_keys_to_names[serialized_key_repr] # depends on [control=['if'], data=['serialized_key_repr', 'serialized_keys_to_names']]
else:
k = index_to_serialized_key_name(len(serialized_key_list))
serialized_keys_to_names[serialized_key_repr] = k
serialized_key_list.append(serialized_key_repr) # depends on [control=['if'], data=[]]
result[k] = to_serializable_repr(v) # depends on [control=['for'], data=[]]
if len(serialized_key_list) > 0:
# only include this list of serialized keys if we had any non-string
# keys
result[SERIALIZED_DICTIONARY_KEYS_FIELD] = serialized_key_list # depends on [control=['if'], data=[]]
return result |
def add(self, name='', type='', agent='', scanner='', location='', language='en', *args, **kwargs):
""" Simplified add for the most common options.
Parameters:
name (str): Name of the library
agent (str): Example com.plexapp.agents.imdb
type (str): movie, show, # check me
location (str): /path/to/files
language (str): Two letter language fx en
kwargs (dict): Advanced options should be passed as a dict. where the id is the key.
**Photo Preferences**
* **agent** (str): com.plexapp.agents.none
* **enableAutoPhotoTags** (bool): Tag photos. Default value false.
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Photo Scanner
**Movie Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner
**IMDB Movie Options** (com.plexapp.agents.imdb)
* **title** (bool): Localized titles. Default value false.
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **only_trailers** (bool): Skip extras which aren't trailers. Default value false.
* **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
* **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **ratings** (int): Ratings Source, Default value 0 Possible options:
0:Rotten Tomatoes, 1:IMDb, 2:The Movie Database.
* **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **country** (int): Default value 46 Possible options 0:Argentina, 1:Australia, 2:Austria,
3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica,
11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador,
16:France, 17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland,
22:Italy, 23:Jamaica, 24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands,
29:New Zealand, 30:Nicaragua, 31:Panama, 32:Paraguay, 33:Peru, 34:Portugal,
35:Peoples Republic of China, 36:Puerto Rico, 37:Russia, 38:Singapore, 39:South Africa,
40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad, 45:United Kingdom,
46:United States, 47:Uruguay, 48:Venezuela.
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **usage** (bool): Send anonymous usage data to Plex. Default value true.
**TheMovieDB Movie Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default value 47 Possible
options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada,
9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador,
16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland,
23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands,
30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa, 41:Spain,
42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States, 48:Uruguay,
49:Venezuela.
**Show Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.thetvdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **episodeSort** (int): Episode order. Default -1 Possible options: 0:Oldest first, 1:Newest first.
* **flattenSeasons** (int): Seasons. Default value 0 Possible options: 0:Show,1:Hide.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Series Scanner
**TheTVDB Show Options** (com.plexapp.agents.thetvdb)
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
**TheMovieDB Show Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default value 47 options
0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada, 9:Chile,
10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador,
16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland,
23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands,
30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa,
41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States,
48:Uruguay, 49:Venezuela.
**Other Video Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner
**IMDB Other Video Options** (com.plexapp.agents.imdb)
* **title** (bool): Localized titles. Default value false.
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **only_trailers** (bool): Skip extras which aren't trailers. Default value false.
* **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
* **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **ratings** (int): Ratings Source Default value 0 Possible options:
0:Rotten Tomatoes,1:IMDb,2:The Movie Database.
* **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **country** (int): Country: Default value 46 Possible options: 0:Argentina, 1:Australia, 2:Austria,
3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica,
11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador, 16:France,
17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland, 22:Italy, 23:Jamaica,
24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands, 29:New Zealand, 30:Nicaragua,
31:Panama, 32:Paraguay, 33:Peru, 34:Portugal, 35:Peoples Republic of China, 36:Puerto Rico,
37:Russia, 38:Singapore, 39:South Africa, 40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad,
45:United Kingdom, 46:United States, 47:Uruguay, 48:Venezuela.
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **usage** (bool): Send anonymous usage data to Plex. Default value true.
**TheMovieDB Other Video Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default
value 47 Possible options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize,
6:Bolivia, 7:Brazil, 8:Canada, 9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic,
13:Denmark, 14:Dominican Republic, 15:Ecuador, 16:El Salvador, 17:France, 18:Germany,
19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland, 23:Italy, 24:Jamaica,
25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands, 30:New Zealand,
31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore,
40:South Africa, 41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad,
46:United Kingdom, 47:United States, 48:Uruguay, 49:Venezuela.
"""
part = '/library/sections?name=%s&type=%s&agent=%s&scanner=%s&language=%s&location=%s' % (
quote_plus(name), type, agent, quote_plus(scanner), language, quote_plus(location)) # noqa E126
if kwargs:
part += urlencode(kwargs)
return self._server.query(part, method=self._server._session.post) | def function[add, parameter[self, name, type, agent, scanner, location, language]]:
constant[ Simplified add for the most common options.
Parameters:
name (str): Name of the library
agent (str): Example com.plexapp.agents.imdb
type (str): movie, show, # check me
location (str): /path/to/files
language (str): Two letter language fx en
kwargs (dict): Advanced options should be passed as a dict. where the id is the key.
**Photo Preferences**
* **agent** (str): com.plexapp.agents.none
* **enableAutoPhotoTags** (bool): Tag photos. Default value false.
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Photo Scanner
**Movie Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner
**IMDB Movie Options** (com.plexapp.agents.imdb)
* **title** (bool): Localized titles. Default value false.
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **only_trailers** (bool): Skip extras which aren't trailers. Default value false.
* **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
* **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **ratings** (int): Ratings Source, Default value 0 Possible options:
0:Rotten Tomatoes, 1:IMDb, 2:The Movie Database.
* **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **country** (int): Default value 46 Possible options 0:Argentina, 1:Australia, 2:Austria,
3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica,
11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador,
16:France, 17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland,
22:Italy, 23:Jamaica, 24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands,
29:New Zealand, 30:Nicaragua, 31:Panama, 32:Paraguay, 33:Peru, 34:Portugal,
35:Peoples Republic of China, 36:Puerto Rico, 37:Russia, 38:Singapore, 39:South Africa,
40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad, 45:United Kingdom,
46:United States, 47:Uruguay, 48:Venezuela.
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **usage** (bool): Send anonymous usage data to Plex. Default value true.
**TheMovieDB Movie Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default value 47 Possible
options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada,
9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador,
16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland,
23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands,
30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa, 41:Spain,
42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States, 48:Uruguay,
49:Venezuela.
**Show Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.thetvdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **episodeSort** (int): Episode order. Default -1 Possible options: 0:Oldest first, 1:Newest first.
* **flattenSeasons** (int): Seasons. Default value 0 Possible options: 0:Show,1:Hide.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Series Scanner
**TheTVDB Show Options** (com.plexapp.agents.thetvdb)
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
**TheMovieDB Show Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default value 47 options
0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada, 9:Chile,
10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador,
16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland,
23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands,
30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa,
41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States,
48:Uruguay, 49:Venezuela.
**Other Video Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner
**IMDB Other Video Options** (com.plexapp.agents.imdb)
* **title** (bool): Localized titles. Default value false.
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **only_trailers** (bool): Skip extras which aren't trailers. Default value false.
* **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
* **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **ratings** (int): Ratings Source Default value 0 Possible options:
0:Rotten Tomatoes,1:IMDb,2:The Movie Database.
* **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **country** (int): Country: Default value 46 Possible options: 0:Argentina, 1:Australia, 2:Austria,
3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica,
11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador, 16:France,
17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland, 22:Italy, 23:Jamaica,
24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands, 29:New Zealand, 30:Nicaragua,
31:Panama, 32:Paraguay, 33:Peru, 34:Portugal, 35:Peoples Republic of China, 36:Puerto Rico,
37:Russia, 38:Singapore, 39:South Africa, 40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad,
45:United Kingdom, 46:United States, 47:Uruguay, 48:Venezuela.
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **usage** (bool): Send anonymous usage data to Plex. Default value true.
**TheMovieDB Other Video Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default
value 47 Possible options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize,
6:Bolivia, 7:Brazil, 8:Canada, 9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic,
13:Denmark, 14:Dominican Republic, 15:Ecuador, 16:El Salvador, 17:France, 18:Germany,
19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland, 23:Italy, 24:Jamaica,
25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands, 30:New Zealand,
31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore,
40:South Africa, 41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad,
46:United Kingdom, 47:United States, 48:Uruguay, 49:Venezuela.
]
variable[part] assign[=] binary_operation[constant[/library/sections?name=%s&type=%s&agent=%s&scanner=%s&language=%s&location=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18eb544f0>, <ast.Name object at 0x7da1b0608fd0>, <ast.Name object at 0x7da1b06081c0>, <ast.Call object at 0x7da1b060ad70>, <ast.Name object at 0x7da1b0608ee0>, <ast.Call object at 0x7da20c6a9f30>]]]
if name[kwargs] begin[:]
<ast.AugAssign object at 0x7da20c6a8b50>
return[call[name[self]._server.query, parameter[name[part]]]] | keyword[def] identifier[add] ( identifier[self] , identifier[name] = literal[string] , identifier[type] = literal[string] , identifier[agent] = literal[string] , identifier[scanner] = literal[string] , identifier[location] = literal[string] , identifier[language] = literal[string] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[part] = literal[string] %(
identifier[quote_plus] ( identifier[name] ), identifier[type] , identifier[agent] , identifier[quote_plus] ( identifier[scanner] ), identifier[language] , identifier[quote_plus] ( identifier[location] ))
keyword[if] identifier[kwargs] :
identifier[part] += identifier[urlencode] ( identifier[kwargs] )
keyword[return] identifier[self] . identifier[_server] . identifier[query] ( identifier[part] , identifier[method] = identifier[self] . identifier[_server] . identifier[_session] . identifier[post] ) | def add(self, name='', type='', agent='', scanner='', location='', language='en', *args, **kwargs):
""" Simplified add for the most common options.
Parameters:
name (str): Name of the library
agent (str): Example com.plexapp.agents.imdb
type (str): movie, show, # check me
location (str): /path/to/files
language (str): Two letter language fx en
kwargs (dict): Advanced options should be passed as a dict. where the id is the key.
**Photo Preferences**
* **agent** (str): com.plexapp.agents.none
* **enableAutoPhotoTags** (bool): Tag photos. Default value false.
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Photo Scanner
**Movie Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner
**IMDB Movie Options** (com.plexapp.agents.imdb)
* **title** (bool): Localized titles. Default value false.
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **only_trailers** (bool): Skip extras which aren't trailers. Default value false.
* **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
* **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **ratings** (int): Ratings Source, Default value 0 Possible options:
0:Rotten Tomatoes, 1:IMDb, 2:The Movie Database.
* **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **country** (int): Default value 46 Possible options 0:Argentina, 1:Australia, 2:Austria,
3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica,
11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador,
16:France, 17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland,
22:Italy, 23:Jamaica, 24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands,
29:New Zealand, 30:Nicaragua, 31:Panama, 32:Paraguay, 33:Peru, 34:Portugal,
35:Peoples Republic of China, 36:Puerto Rico, 37:Russia, 38:Singapore, 39:South Africa,
40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad, 45:United Kingdom,
46:United States, 47:Uruguay, 48:Venezuela.
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **usage** (bool): Send anonymous usage data to Plex. Default value true.
**TheMovieDB Movie Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default value 47 Possible
options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada,
9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador,
16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland,
23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands,
30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa, 41:Spain,
42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States, 48:Uruguay,
49:Venezuela.
**Show Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.thetvdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **episodeSort** (int): Episode order. Default -1 Possible options: 0:Oldest first, 1:Newest first.
* **flattenSeasons** (int): Seasons. Default value 0 Possible options: 0:Show,1:Hide.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Series Scanner
**TheTVDB Show Options** (com.plexapp.agents.thetvdb)
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
**TheMovieDB Show Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default value 47 options
0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada, 9:Chile,
10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador,
16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland,
23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands,
30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa,
41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States,
48:Uruguay, 49:Venezuela.
**Other Video Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner
**IMDB Other Video Options** (com.plexapp.agents.imdb)
* **title** (bool): Localized titles. Default value false.
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **only_trailers** (bool): Skip extras which aren't trailers. Default value false.
* **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
* **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **ratings** (int): Ratings Source Default value 0 Possible options:
0:Rotten Tomatoes,1:IMDb,2:The Movie Database.
* **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **country** (int): Country: Default value 46 Possible options: 0:Argentina, 1:Australia, 2:Austria,
3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica,
11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador, 16:France,
17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland, 22:Italy, 23:Jamaica,
24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands, 29:New Zealand, 30:Nicaragua,
31:Panama, 32:Paraguay, 33:Peru, 34:Portugal, 35:Peoples Republic of China, 36:Puerto Rico,
37:Russia, 38:Singapore, 39:South Africa, 40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad,
45:United Kingdom, 46:United States, 47:Uruguay, 48:Venezuela.
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **usage** (bool): Send anonymous usage data to Plex. Default value true.
**TheMovieDB Other Video Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default
value 47 Possible options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize,
6:Bolivia, 7:Brazil, 8:Canada, 9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic,
13:Denmark, 14:Dominican Republic, 15:Ecuador, 16:El Salvador, 17:France, 18:Germany,
19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland, 23:Italy, 24:Jamaica,
25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands, 30:New Zealand,
31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore,
40:South Africa, 41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad,
46:United Kingdom, 47:United States, 48:Uruguay, 49:Venezuela.
"""
part = '/library/sections?name=%s&type=%s&agent=%s&scanner=%s&language=%s&location=%s' % (quote_plus(name), type, agent, quote_plus(scanner), language, quote_plus(location)) # noqa E126
if kwargs:
part += urlencode(kwargs) # depends on [control=['if'], data=[]]
return self._server.query(part, method=self._server._session.post) |
def current_pos(self):
"""Get circular buffer location where current data is being written."""
new_ptr = _decode(
self._read_fixed_block(0x0020), self.lo_fix_format['current_pos'])
if new_ptr == self._current_ptr:
return self._current_ptr
if self._current_ptr and new_ptr != self.inc_ptr(self._current_ptr):
logger.error(
'unexpected ptr change %06x -> %06x', self._current_ptr, new_ptr)
self._current_ptr = new_ptr
return self._current_ptr | def function[current_pos, parameter[self]]:
constant[Get circular buffer location where current data is being written.]
variable[new_ptr] assign[=] call[name[_decode], parameter[call[name[self]._read_fixed_block, parameter[constant[32]]], call[name[self].lo_fix_format][constant[current_pos]]]]
if compare[name[new_ptr] equal[==] name[self]._current_ptr] begin[:]
return[name[self]._current_ptr]
if <ast.BoolOp object at 0x7da18f7232e0> begin[:]
call[name[logger].error, parameter[constant[unexpected ptr change %06x -> %06x], name[self]._current_ptr, name[new_ptr]]]
name[self]._current_ptr assign[=] name[new_ptr]
return[name[self]._current_ptr] | keyword[def] identifier[current_pos] ( identifier[self] ):
literal[string]
identifier[new_ptr] = identifier[_decode] (
identifier[self] . identifier[_read_fixed_block] ( literal[int] ), identifier[self] . identifier[lo_fix_format] [ literal[string] ])
keyword[if] identifier[new_ptr] == identifier[self] . identifier[_current_ptr] :
keyword[return] identifier[self] . identifier[_current_ptr]
keyword[if] identifier[self] . identifier[_current_ptr] keyword[and] identifier[new_ptr] != identifier[self] . identifier[inc_ptr] ( identifier[self] . identifier[_current_ptr] ):
identifier[logger] . identifier[error] (
literal[string] , identifier[self] . identifier[_current_ptr] , identifier[new_ptr] )
identifier[self] . identifier[_current_ptr] = identifier[new_ptr]
keyword[return] identifier[self] . identifier[_current_ptr] | def current_pos(self):
"""Get circular buffer location where current data is being written."""
new_ptr = _decode(self._read_fixed_block(32), self.lo_fix_format['current_pos'])
if new_ptr == self._current_ptr:
return self._current_ptr # depends on [control=['if'], data=[]]
if self._current_ptr and new_ptr != self.inc_ptr(self._current_ptr):
logger.error('unexpected ptr change %06x -> %06x', self._current_ptr, new_ptr) # depends on [control=['if'], data=[]]
self._current_ptr = new_ptr
return self._current_ptr |
def barlam(self, wavelengths=None):
"""Calculate :ref:`mean log wavelength <synphot-formula-barlam>`.
Parameters
----------
wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
Wavelength values for sampling.
If not a Quantity, assumed to be in Angstrom.
If `None`, `waveset` is used.
Returns
-------
bar_lam : `~astropy.units.quantity.Quantity`
Mean log wavelength.
"""
x = self._validate_wavelengths(wavelengths).value
y = self(x).value
num = np.trapz(y * np.log(x) / x, x=x)
den = np.trapz(y / x, x=x)
if num == 0 or den == 0: # pragma: no cover
bar_lam = 0.0
else:
bar_lam = np.exp(abs(num / den))
return bar_lam * self._internal_wave_unit | def function[barlam, parameter[self, wavelengths]]:
constant[Calculate :ref:`mean log wavelength <synphot-formula-barlam>`.
Parameters
----------
wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
Wavelength values for sampling.
If not a Quantity, assumed to be in Angstrom.
If `None`, `waveset` is used.
Returns
-------
bar_lam : `~astropy.units.quantity.Quantity`
Mean log wavelength.
]
variable[x] assign[=] call[name[self]._validate_wavelengths, parameter[name[wavelengths]]].value
variable[y] assign[=] call[name[self], parameter[name[x]]].value
variable[num] assign[=] call[name[np].trapz, parameter[binary_operation[binary_operation[name[y] * call[name[np].log, parameter[name[x]]]] / name[x]]]]
variable[den] assign[=] call[name[np].trapz, parameter[binary_operation[name[y] / name[x]]]]
if <ast.BoolOp object at 0x7da1b26acb20> begin[:]
variable[bar_lam] assign[=] constant[0.0]
return[binary_operation[name[bar_lam] * name[self]._internal_wave_unit]] | keyword[def] identifier[barlam] ( identifier[self] , identifier[wavelengths] = keyword[None] ):
literal[string]
identifier[x] = identifier[self] . identifier[_validate_wavelengths] ( identifier[wavelengths] ). identifier[value]
identifier[y] = identifier[self] ( identifier[x] ). identifier[value]
identifier[num] = identifier[np] . identifier[trapz] ( identifier[y] * identifier[np] . identifier[log] ( identifier[x] )/ identifier[x] , identifier[x] = identifier[x] )
identifier[den] = identifier[np] . identifier[trapz] ( identifier[y] / identifier[x] , identifier[x] = identifier[x] )
keyword[if] identifier[num] == literal[int] keyword[or] identifier[den] == literal[int] :
identifier[bar_lam] = literal[int]
keyword[else] :
identifier[bar_lam] = identifier[np] . identifier[exp] ( identifier[abs] ( identifier[num] / identifier[den] ))
keyword[return] identifier[bar_lam] * identifier[self] . identifier[_internal_wave_unit] | def barlam(self, wavelengths=None):
"""Calculate :ref:`mean log wavelength <synphot-formula-barlam>`.
Parameters
----------
wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
Wavelength values for sampling.
If not a Quantity, assumed to be in Angstrom.
If `None`, `waveset` is used.
Returns
-------
bar_lam : `~astropy.units.quantity.Quantity`
Mean log wavelength.
"""
x = self._validate_wavelengths(wavelengths).value
y = self(x).value
num = np.trapz(y * np.log(x) / x, x=x)
den = np.trapz(y / x, x=x)
if num == 0 or den == 0: # pragma: no cover
bar_lam = 0.0 # depends on [control=['if'], data=[]]
else:
bar_lam = np.exp(abs(num / den))
return bar_lam * self._internal_wave_unit |
def get_lead(self, lead_id):
"""
Get a specific lead saved on your account.
:param lead_id: Id of the lead to search. Must be defined.
:return: Lead found as a dict.
"""
params = self.base_params
endpoint = self.base_endpoint.format('leads/' + str(lead_id))
return self._query_hunter(endpoint, params) | def function[get_lead, parameter[self, lead_id]]:
constant[
Get a specific lead saved on your account.
:param lead_id: Id of the lead to search. Must be defined.
:return: Lead found as a dict.
]
variable[params] assign[=] name[self].base_params
variable[endpoint] assign[=] call[name[self].base_endpoint.format, parameter[binary_operation[constant[leads/] + call[name[str], parameter[name[lead_id]]]]]]
return[call[name[self]._query_hunter, parameter[name[endpoint], name[params]]]] | keyword[def] identifier[get_lead] ( identifier[self] , identifier[lead_id] ):
literal[string]
identifier[params] = identifier[self] . identifier[base_params]
identifier[endpoint] = identifier[self] . identifier[base_endpoint] . identifier[format] ( literal[string] + identifier[str] ( identifier[lead_id] ))
keyword[return] identifier[self] . identifier[_query_hunter] ( identifier[endpoint] , identifier[params] ) | def get_lead(self, lead_id):
"""
Get a specific lead saved on your account.
:param lead_id: Id of the lead to search. Must be defined.
:return: Lead found as a dict.
"""
params = self.base_params
endpoint = self.base_endpoint.format('leads/' + str(lead_id))
return self._query_hunter(endpoint, params) |
def login_data_valid(self):
"""Check for working login data."""
login_working = False
try:
with self._login(requests.Session()) as sess:
sess.get(self._logout_url)
except self.LoginError:
pass
else:
login_working = True
return login_working | def function[login_data_valid, parameter[self]]:
constant[Check for working login data.]
variable[login_working] assign[=] constant[False]
<ast.Try object at 0x7da18bcc80a0>
return[name[login_working]] | keyword[def] identifier[login_data_valid] ( identifier[self] ):
literal[string]
identifier[login_working] = keyword[False]
keyword[try] :
keyword[with] identifier[self] . identifier[_login] ( identifier[requests] . identifier[Session] ()) keyword[as] identifier[sess] :
identifier[sess] . identifier[get] ( identifier[self] . identifier[_logout_url] )
keyword[except] identifier[self] . identifier[LoginError] :
keyword[pass]
keyword[else] :
identifier[login_working] = keyword[True]
keyword[return] identifier[login_working] | def login_data_valid(self):
"""Check for working login data."""
login_working = False
try:
with self._login(requests.Session()) as sess:
sess.get(self._logout_url) # depends on [control=['with'], data=['sess']] # depends on [control=['try'], data=[]]
except self.LoginError:
pass # depends on [control=['except'], data=[]]
else:
login_working = True
return login_working |
def find_exe(name):
"""
Find an executable with the given name.
:param name:
:return:
"""
for path in os.getenv('PATH').split(os.pathsep):
for ext in ('', '.exe', '.cmd', '.bat', '.sh'):
full_path = os.path.join(path, name + ext)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
return None | def function[find_exe, parameter[name]]:
constant[
Find an executable with the given name.
:param name:
:return:
]
for taget[name[path]] in starred[call[call[name[os].getenv, parameter[constant[PATH]]].split, parameter[name[os].pathsep]]] begin[:]
for taget[name[ext]] in starred[tuple[[<ast.Constant object at 0x7da2047eac80>, <ast.Constant object at 0x7da2047e8be0>, <ast.Constant object at 0x7da2047eb970>, <ast.Constant object at 0x7da2047ebac0>, <ast.Constant object at 0x7da2047e8c40>]]] begin[:]
variable[full_path] assign[=] call[name[os].path.join, parameter[name[path], binary_operation[name[name] + name[ext]]]]
if <ast.BoolOp object at 0x7da2047e8c10> begin[:]
return[name[full_path]]
return[constant[None]] | keyword[def] identifier[find_exe] ( identifier[name] ):
literal[string]
keyword[for] identifier[path] keyword[in] identifier[os] . identifier[getenv] ( literal[string] ). identifier[split] ( identifier[os] . identifier[pathsep] ):
keyword[for] identifier[ext] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ):
identifier[full_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[name] + identifier[ext] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[full_path] ) keyword[and] identifier[os] . identifier[access] ( identifier[full_path] , identifier[os] . identifier[X_OK] ):
keyword[return] identifier[full_path]
keyword[return] keyword[None] | def find_exe(name):
"""
Find an executable with the given name.
:param name:
:return:
"""
for path in os.getenv('PATH').split(os.pathsep):
for ext in ('', '.exe', '.cmd', '.bat', '.sh'):
full_path = os.path.join(path, name + ext)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ext']] # depends on [control=['for'], data=['path']]
return None |
def add_condition(self, conkey, cond):
"""Add a condition, one of the addable ones.
conkey: str
One of 'cond', startcond' or 'stopcond'. 'start' or 'stop'
is accepted as shorts for 'startcond' or 'stopcond'. If the
conkey is given with an explicit number (like 'stopcond3')
and already exist, it will be over-written, else created.
When the trailing number is implicit, the first condition
with a value of None is taken. If no None value is found, a
new condition is added.
cond: str
The condition string. See ...
.. note::
Updates the mask if not no_auto.
.. seealso::
:meth:`~channelpack.ChannelPack.set_duration`
:meth:`~channelpack.ChannelPack.set_samplerate`
:meth:`~channelpack.ChannelPack.set_stopextend`
:meth:`~channelpack.ChannelPack.clear_conditions`
"""
# Audit:
if conkey == 'start' or conkey == 'stop':
conkey += 'cond'
if not any(conkey.startswith(addable) for addable in _ADDABLES):
raise KeyError(conkey)
if not self.conconf.valid_conkey(conkey):
raise KeyError(conkey)
self._parse_cond(cond) # Checking
conkey = self.conconf.next_conkey(conkey)
self.conconf.set_condition(conkey, cond)
if not self.no_auto:
self.make_mask() | def function[add_condition, parameter[self, conkey, cond]]:
constant[Add a condition, one of the addable ones.
conkey: str
One of 'cond', startcond' or 'stopcond'. 'start' or 'stop'
is accepted as shorts for 'startcond' or 'stopcond'. If the
conkey is given with an explicit number (like 'stopcond3')
and already exist, it will be over-written, else created.
When the trailing number is implicit, the first condition
with a value of None is taken. If no None value is found, a
new condition is added.
cond: str
The condition string. See ...
.. note::
Updates the mask if not no_auto.
.. seealso::
:meth:`~channelpack.ChannelPack.set_duration`
:meth:`~channelpack.ChannelPack.set_samplerate`
:meth:`~channelpack.ChannelPack.set_stopextend`
:meth:`~channelpack.ChannelPack.clear_conditions`
]
if <ast.BoolOp object at 0x7da204623d30> begin[:]
<ast.AugAssign object at 0x7da204620130>
if <ast.UnaryOp object at 0x7da204623b20> begin[:]
<ast.Raise object at 0x7da204621780>
if <ast.UnaryOp object at 0x7da2046225c0> begin[:]
<ast.Raise object at 0x7da204622380>
call[name[self]._parse_cond, parameter[name[cond]]]
variable[conkey] assign[=] call[name[self].conconf.next_conkey, parameter[name[conkey]]]
call[name[self].conconf.set_condition, parameter[name[conkey], name[cond]]]
if <ast.UnaryOp object at 0x7da204622230> begin[:]
call[name[self].make_mask, parameter[]] | keyword[def] identifier[add_condition] ( identifier[self] , identifier[conkey] , identifier[cond] ):
literal[string]
keyword[if] identifier[conkey] == literal[string] keyword[or] identifier[conkey] == literal[string] :
identifier[conkey] += literal[string]
keyword[if] keyword[not] identifier[any] ( identifier[conkey] . identifier[startswith] ( identifier[addable] ) keyword[for] identifier[addable] keyword[in] identifier[_ADDABLES] ):
keyword[raise] identifier[KeyError] ( identifier[conkey] )
keyword[if] keyword[not] identifier[self] . identifier[conconf] . identifier[valid_conkey] ( identifier[conkey] ):
keyword[raise] identifier[KeyError] ( identifier[conkey] )
identifier[self] . identifier[_parse_cond] ( identifier[cond] )
identifier[conkey] = identifier[self] . identifier[conconf] . identifier[next_conkey] ( identifier[conkey] )
identifier[self] . identifier[conconf] . identifier[set_condition] ( identifier[conkey] , identifier[cond] )
keyword[if] keyword[not] identifier[self] . identifier[no_auto] :
identifier[self] . identifier[make_mask] () | def add_condition(self, conkey, cond):
"""Add a condition, one of the addable ones.
conkey: str
One of 'cond', startcond' or 'stopcond'. 'start' or 'stop'
is accepted as shorts for 'startcond' or 'stopcond'. If the
conkey is given with an explicit number (like 'stopcond3')
and already exist, it will be over-written, else created.
When the trailing number is implicit, the first condition
with a value of None is taken. If no None value is found, a
new condition is added.
cond: str
The condition string. See ...
.. note::
Updates the mask if not no_auto.
.. seealso::
:meth:`~channelpack.ChannelPack.set_duration`
:meth:`~channelpack.ChannelPack.set_samplerate`
:meth:`~channelpack.ChannelPack.set_stopextend`
:meth:`~channelpack.ChannelPack.clear_conditions`
"""
# Audit:
if conkey == 'start' or conkey == 'stop':
conkey += 'cond' # depends on [control=['if'], data=[]]
if not any((conkey.startswith(addable) for addable in _ADDABLES)):
raise KeyError(conkey) # depends on [control=['if'], data=[]]
if not self.conconf.valid_conkey(conkey):
raise KeyError(conkey) # depends on [control=['if'], data=[]]
self._parse_cond(cond) # Checking
conkey = self.conconf.next_conkey(conkey)
self.conconf.set_condition(conkey, cond)
if not self.no_auto:
self.make_mask() # depends on [control=['if'], data=[]] |
def _record_count(self):
"""
Get number of records in file.
This is maybe suboptimal because we have to seek to the end of
the file.
Side effect: returns file position to record_start.
"""
self.filepath_or_buffer.seek(0, 2)
total_records_length = (self.filepath_or_buffer.tell() -
self.record_start)
if total_records_length % 80 != 0:
warnings.warn("xport file may be corrupted")
if self.record_length > 80:
self.filepath_or_buffer.seek(self.record_start)
return total_records_length // self.record_length
self.filepath_or_buffer.seek(-80, 2)
last_card = self.filepath_or_buffer.read(80)
last_card = np.frombuffer(last_card, dtype=np.uint64)
# 8 byte blank
ix = np.flatnonzero(last_card == 2314885530818453536)
if len(ix) == 0:
tail_pad = 0
else:
tail_pad = 8 * len(ix)
self.filepath_or_buffer.seek(self.record_start)
return (total_records_length - tail_pad) // self.record_length | def function[_record_count, parameter[self]]:
constant[
Get number of records in file.
This is maybe suboptimal because we have to seek to the end of
the file.
Side effect: returns file position to record_start.
]
call[name[self].filepath_or_buffer.seek, parameter[constant[0], constant[2]]]
variable[total_records_length] assign[=] binary_operation[call[name[self].filepath_or_buffer.tell, parameter[]] - name[self].record_start]
if compare[binary_operation[name[total_records_length] <ast.Mod object at 0x7da2590d6920> constant[80]] not_equal[!=] constant[0]] begin[:]
call[name[warnings].warn, parameter[constant[xport file may be corrupted]]]
if compare[name[self].record_length greater[>] constant[80]] begin[:]
call[name[self].filepath_or_buffer.seek, parameter[name[self].record_start]]
return[binary_operation[name[total_records_length] <ast.FloorDiv object at 0x7da2590d6bc0> name[self].record_length]]
call[name[self].filepath_or_buffer.seek, parameter[<ast.UnaryOp object at 0x7da18f00d270>, constant[2]]]
variable[last_card] assign[=] call[name[self].filepath_or_buffer.read, parameter[constant[80]]]
variable[last_card] assign[=] call[name[np].frombuffer, parameter[name[last_card]]]
variable[ix] assign[=] call[name[np].flatnonzero, parameter[compare[name[last_card] equal[==] constant[2314885530818453536]]]]
if compare[call[name[len], parameter[name[ix]]] equal[==] constant[0]] begin[:]
variable[tail_pad] assign[=] constant[0]
call[name[self].filepath_or_buffer.seek, parameter[name[self].record_start]]
return[binary_operation[binary_operation[name[total_records_length] - name[tail_pad]] <ast.FloorDiv object at 0x7da2590d6bc0> name[self].record_length]] | keyword[def] identifier[_record_count] ( identifier[self] ):
literal[string]
identifier[self] . identifier[filepath_or_buffer] . identifier[seek] ( literal[int] , literal[int] )
identifier[total_records_length] =( identifier[self] . identifier[filepath_or_buffer] . identifier[tell] ()-
identifier[self] . identifier[record_start] )
keyword[if] identifier[total_records_length] % literal[int] != literal[int] :
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[if] identifier[self] . identifier[record_length] > literal[int] :
identifier[self] . identifier[filepath_or_buffer] . identifier[seek] ( identifier[self] . identifier[record_start] )
keyword[return] identifier[total_records_length] // identifier[self] . identifier[record_length]
identifier[self] . identifier[filepath_or_buffer] . identifier[seek] (- literal[int] , literal[int] )
identifier[last_card] = identifier[self] . identifier[filepath_or_buffer] . identifier[read] ( literal[int] )
identifier[last_card] = identifier[np] . identifier[frombuffer] ( identifier[last_card] , identifier[dtype] = identifier[np] . identifier[uint64] )
identifier[ix] = identifier[np] . identifier[flatnonzero] ( identifier[last_card] == literal[int] )
keyword[if] identifier[len] ( identifier[ix] )== literal[int] :
identifier[tail_pad] = literal[int]
keyword[else] :
identifier[tail_pad] = literal[int] * identifier[len] ( identifier[ix] )
identifier[self] . identifier[filepath_or_buffer] . identifier[seek] ( identifier[self] . identifier[record_start] )
keyword[return] ( identifier[total_records_length] - identifier[tail_pad] )// identifier[self] . identifier[record_length] | def _record_count(self):
"""
Get number of records in file.
This is maybe suboptimal because we have to seek to the end of
the file.
Side effect: returns file position to record_start.
"""
self.filepath_or_buffer.seek(0, 2)
total_records_length = self.filepath_or_buffer.tell() - self.record_start
if total_records_length % 80 != 0:
warnings.warn('xport file may be corrupted') # depends on [control=['if'], data=[]]
if self.record_length > 80:
self.filepath_or_buffer.seek(self.record_start)
return total_records_length // self.record_length # depends on [control=['if'], data=[]]
self.filepath_or_buffer.seek(-80, 2)
last_card = self.filepath_or_buffer.read(80)
last_card = np.frombuffer(last_card, dtype=np.uint64)
# 8 byte blank
ix = np.flatnonzero(last_card == 2314885530818453536)
if len(ix) == 0:
tail_pad = 0 # depends on [control=['if'], data=[]]
else:
tail_pad = 8 * len(ix)
self.filepath_or_buffer.seek(self.record_start)
return (total_records_length - tail_pad) // self.record_length |
async def prod(self, limit: int = None) -> int:
""".opened
This function is executed by the node each time it gets its share of
CPU time from the event loop.
:param limit: the number of items to be serviced in this attempt
:return: total number of messages serviced by this node
"""
c = 0
if self.last_prod_started:
self.metrics.add_event(MetricsName.LOOPER_RUN_TIME_SPENT, time.perf_counter() - self.last_prod_started)
self.last_prod_started = time.perf_counter()
self.quota_control.update_state({
'request_queue_size': len(self.monitor.requestTracker.unordered())}
)
if self.status is not Status.stopped:
c += await self.serviceReplicas(limit)
c += await self.serviceNodeMsgs(limit)
c += await self.serviceClientMsgs(limit)
with self.metrics.measure_time(MetricsName.SERVICE_NODE_ACTIONS_TIME):
c += self._serviceActions()
with self.metrics.measure_time(MetricsName.SERVICE_TIMERS_TIME):
self.timer.service()
with self.metrics.measure_time(MetricsName.SERVICE_MONITOR_ACTIONS_TIME):
c += self.monitor._serviceActions()
c += await self.serviceViewChanger(limit)
c += await self.service_observable(limit)
c += await self.service_observer(limit)
with self.metrics.measure_time(MetricsName.FLUSH_OUTBOXES_TIME):
self.nodestack.flushOutBoxes()
if self.isGoing():
with self.metrics.measure_time(MetricsName.SERVICE_NODE_LIFECYCLE_TIME):
self.nodestack.serviceLifecycle()
with self.metrics.measure_time(MetricsName.SERVICE_CLIENT_STACK_TIME):
self.clientstack.serviceClientStack()
return c | <ast.AsyncFunctionDef object at 0x7da2047e8280> | keyword[async] keyword[def] identifier[prod] ( identifier[self] , identifier[limit] : identifier[int] = keyword[None] )-> identifier[int] :
literal[string]
identifier[c] = literal[int]
keyword[if] identifier[self] . identifier[last_prod_started] :
identifier[self] . identifier[metrics] . identifier[add_event] ( identifier[MetricsName] . identifier[LOOPER_RUN_TIME_SPENT] , identifier[time] . identifier[perf_counter] ()- identifier[self] . identifier[last_prod_started] )
identifier[self] . identifier[last_prod_started] = identifier[time] . identifier[perf_counter] ()
identifier[self] . identifier[quota_control] . identifier[update_state] ({
literal[string] : identifier[len] ( identifier[self] . identifier[monitor] . identifier[requestTracker] . identifier[unordered] ())}
)
keyword[if] identifier[self] . identifier[status] keyword[is] keyword[not] identifier[Status] . identifier[stopped] :
identifier[c] += keyword[await] identifier[self] . identifier[serviceReplicas] ( identifier[limit] )
identifier[c] += keyword[await] identifier[self] . identifier[serviceNodeMsgs] ( identifier[limit] )
identifier[c] += keyword[await] identifier[self] . identifier[serviceClientMsgs] ( identifier[limit] )
keyword[with] identifier[self] . identifier[metrics] . identifier[measure_time] ( identifier[MetricsName] . identifier[SERVICE_NODE_ACTIONS_TIME] ):
identifier[c] += identifier[self] . identifier[_serviceActions] ()
keyword[with] identifier[self] . identifier[metrics] . identifier[measure_time] ( identifier[MetricsName] . identifier[SERVICE_TIMERS_TIME] ):
identifier[self] . identifier[timer] . identifier[service] ()
keyword[with] identifier[self] . identifier[metrics] . identifier[measure_time] ( identifier[MetricsName] . identifier[SERVICE_MONITOR_ACTIONS_TIME] ):
identifier[c] += identifier[self] . identifier[monitor] . identifier[_serviceActions] ()
identifier[c] += keyword[await] identifier[self] . identifier[serviceViewChanger] ( identifier[limit] )
identifier[c] += keyword[await] identifier[self] . identifier[service_observable] ( identifier[limit] )
identifier[c] += keyword[await] identifier[self] . identifier[service_observer] ( identifier[limit] )
keyword[with] identifier[self] . identifier[metrics] . identifier[measure_time] ( identifier[MetricsName] . identifier[FLUSH_OUTBOXES_TIME] ):
identifier[self] . identifier[nodestack] . identifier[flushOutBoxes] ()
keyword[if] identifier[self] . identifier[isGoing] ():
keyword[with] identifier[self] . identifier[metrics] . identifier[measure_time] ( identifier[MetricsName] . identifier[SERVICE_NODE_LIFECYCLE_TIME] ):
identifier[self] . identifier[nodestack] . identifier[serviceLifecycle] ()
keyword[with] identifier[self] . identifier[metrics] . identifier[measure_time] ( identifier[MetricsName] . identifier[SERVICE_CLIENT_STACK_TIME] ):
identifier[self] . identifier[clientstack] . identifier[serviceClientStack] ()
keyword[return] identifier[c] | async def prod(self, limit: int=None) -> int:
""".opened
This function is executed by the node each time it gets its share of
CPU time from the event loop.
:param limit: the number of items to be serviced in this attempt
:return: total number of messages serviced by this node
"""
c = 0
if self.last_prod_started:
self.metrics.add_event(MetricsName.LOOPER_RUN_TIME_SPENT, time.perf_counter() - self.last_prod_started) # depends on [control=['if'], data=[]]
self.last_prod_started = time.perf_counter()
self.quota_control.update_state({'request_queue_size': len(self.monitor.requestTracker.unordered())})
if self.status is not Status.stopped:
c += await self.serviceReplicas(limit)
c += await self.serviceNodeMsgs(limit)
c += await self.serviceClientMsgs(limit)
with self.metrics.measure_time(MetricsName.SERVICE_NODE_ACTIONS_TIME):
c += self._serviceActions() # depends on [control=['with'], data=[]]
with self.metrics.measure_time(MetricsName.SERVICE_TIMERS_TIME):
self.timer.service() # depends on [control=['with'], data=[]]
with self.metrics.measure_time(MetricsName.SERVICE_MONITOR_ACTIONS_TIME):
c += self.monitor._serviceActions() # depends on [control=['with'], data=[]]
c += await self.serviceViewChanger(limit)
c += await self.service_observable(limit)
c += await self.service_observer(limit)
with self.metrics.measure_time(MetricsName.FLUSH_OUTBOXES_TIME):
self.nodestack.flushOutBoxes() # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
if self.isGoing():
with self.metrics.measure_time(MetricsName.SERVICE_NODE_LIFECYCLE_TIME):
self.nodestack.serviceLifecycle() # depends on [control=['with'], data=[]]
with self.metrics.measure_time(MetricsName.SERVICE_CLIENT_STACK_TIME):
self.clientstack.serviceClientStack() # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
return c |
def component(self, extra_params=None):
"""
The Component currently assigned to the Ticket
"""
if self.get('component_id', None):
components = self.space.components(id=self['component_id'], extra_params=extra_params)
if components:
return components[0] | def function[component, parameter[self, extra_params]]:
constant[
The Component currently assigned to the Ticket
]
if call[name[self].get, parameter[constant[component_id], constant[None]]] begin[:]
variable[components] assign[=] call[name[self].space.components, parameter[]]
if name[components] begin[:]
return[call[name[components]][constant[0]]] | keyword[def] identifier[component] ( identifier[self] , identifier[extra_params] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[get] ( literal[string] , keyword[None] ):
identifier[components] = identifier[self] . identifier[space] . identifier[components] ( identifier[id] = identifier[self] [ literal[string] ], identifier[extra_params] = identifier[extra_params] )
keyword[if] identifier[components] :
keyword[return] identifier[components] [ literal[int] ] | def component(self, extra_params=None):
"""
The Component currently assigned to the Ticket
"""
if self.get('component_id', None):
components = self.space.components(id=self['component_id'], extra_params=extra_params)
if components:
return components[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def export_to_storage_bucket(self, bucket, namespace=None, entity_filter=None, labels=None):
"""
Export entities from Cloud Datastore to Cloud Storage for backup.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/export
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: Description of what data from the project is included in the export.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:return: a resource operation instance.
:rtype: dict
"""
admin_conn = self.get_conn()
output_uri_prefix = 'gs://' + '/'.join(filter(None, [bucket, namespace]))
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
'outputUrlPrefix': output_uri_prefix,
'entityFilter': entity_filter,
'labels': labels,
}
resp = (admin_conn
.projects()
.export(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | def function[export_to_storage_bucket, parameter[self, bucket, namespace, entity_filter, labels]]:
constant[
Export entities from Cloud Datastore to Cloud Storage for backup.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/export
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: Description of what data from the project is included in the export.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:return: a resource operation instance.
:rtype: dict
]
variable[admin_conn] assign[=] call[name[self].get_conn, parameter[]]
variable[output_uri_prefix] assign[=] binary_operation[constant[gs://] + call[constant[/].join, parameter[call[name[filter], parameter[constant[None], list[[<ast.Name object at 0x7da20c6c6f20>, <ast.Name object at 0x7da20c6c5ea0>]]]]]]]
if <ast.UnaryOp object at 0x7da20c6c7460> begin[:]
variable[entity_filter] assign[=] dictionary[[], []]
if <ast.UnaryOp object at 0x7da20c6c6830> begin[:]
variable[labels] assign[=] dictionary[[], []]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c56f0>, <ast.Constant object at 0x7da20c6c7790>, <ast.Constant object at 0x7da20c6c72e0>], [<ast.Name object at 0x7da20c6c4550>, <ast.Name object at 0x7da20c6c6620>, <ast.Name object at 0x7da20c6c7c10>]]
variable[resp] assign[=] call[call[call[name[admin_conn].projects, parameter[]].export, parameter[]].execute, parameter[]]
return[name[resp]] | keyword[def] identifier[export_to_storage_bucket] ( identifier[self] , identifier[bucket] , identifier[namespace] = keyword[None] , identifier[entity_filter] = keyword[None] , identifier[labels] = keyword[None] ):
literal[string]
identifier[admin_conn] = identifier[self] . identifier[get_conn] ()
identifier[output_uri_prefix] = literal[string] + literal[string] . identifier[join] ( identifier[filter] ( keyword[None] ,[ identifier[bucket] , identifier[namespace] ]))
keyword[if] keyword[not] identifier[entity_filter] :
identifier[entity_filter] ={}
keyword[if] keyword[not] identifier[labels] :
identifier[labels] ={}
identifier[body] ={
literal[string] : identifier[output_uri_prefix] ,
literal[string] : identifier[entity_filter] ,
literal[string] : identifier[labels] ,
}
identifier[resp] =( identifier[admin_conn]
. identifier[projects] ()
. identifier[export] ( identifier[projectId] = identifier[self] . identifier[project_id] , identifier[body] = identifier[body] )
. identifier[execute] ( identifier[num_retries] = identifier[self] . identifier[num_retries] ))
keyword[return] identifier[resp] | def export_to_storage_bucket(self, bucket, namespace=None, entity_filter=None, labels=None):
"""
Export entities from Cloud Datastore to Cloud Storage for backup.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/export
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: Description of what data from the project is included in the export.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:return: a resource operation instance.
:rtype: dict
"""
admin_conn = self.get_conn()
output_uri_prefix = 'gs://' + '/'.join(filter(None, [bucket, namespace]))
if not entity_filter:
entity_filter = {} # depends on [control=['if'], data=[]]
if not labels:
labels = {} # depends on [control=['if'], data=[]]
body = {'outputUrlPrefix': output_uri_prefix, 'entityFilter': entity_filter, 'labels': labels}
resp = admin_conn.projects().export(projectId=self.project_id, body=body).execute(num_retries=self.num_retries)
return resp |
def insert(self, _values=None, **values):
"""
Insert a new record into the database
:param _values: The new record values
:type _values: dict or list
:param values: The new record values as keyword arguments
:type values: dict
:return: The result
:rtype: bool
"""
if not values and not _values:
return True
if not isinstance(_values, list):
if _values is not None:
values.update(_values)
values = [values]
else:
values = _values
for i, value in enumerate(values):
values[i] = OrderedDict(sorted(value.items()))
bindings = []
for record in values:
for value in record.values():
bindings.append(value)
sql = self._grammar.compile_insert(self, values)
bindings = self._clean_bindings(bindings)
return self._connection.insert(sql, bindings) | def function[insert, parameter[self, _values]]:
constant[
Insert a new record into the database
:param _values: The new record values
:type _values: dict or list
:param values: The new record values as keyword arguments
:type values: dict
:return: The result
:rtype: bool
]
if <ast.BoolOp object at 0x7da18eb54670> begin[:]
return[constant[True]]
if <ast.UnaryOp object at 0x7da18eb56c80> begin[:]
if compare[name[_values] is_not constant[None]] begin[:]
call[name[values].update, parameter[name[_values]]]
variable[values] assign[=] list[[<ast.Name object at 0x7da18eb54ac0>]]
variable[bindings] assign[=] list[[]]
for taget[name[record]] in starred[name[values]] begin[:]
for taget[name[value]] in starred[call[name[record].values, parameter[]]] begin[:]
call[name[bindings].append, parameter[name[value]]]
variable[sql] assign[=] call[name[self]._grammar.compile_insert, parameter[name[self], name[values]]]
variable[bindings] assign[=] call[name[self]._clean_bindings, parameter[name[bindings]]]
return[call[name[self]._connection.insert, parameter[name[sql], name[bindings]]]] | keyword[def] identifier[insert] ( identifier[self] , identifier[_values] = keyword[None] ,** identifier[values] ):
literal[string]
keyword[if] keyword[not] identifier[values] keyword[and] keyword[not] identifier[_values] :
keyword[return] keyword[True]
keyword[if] keyword[not] identifier[isinstance] ( identifier[_values] , identifier[list] ):
keyword[if] identifier[_values] keyword[is] keyword[not] keyword[None] :
identifier[values] . identifier[update] ( identifier[_values] )
identifier[values] =[ identifier[values] ]
keyword[else] :
identifier[values] = identifier[_values]
keyword[for] identifier[i] , identifier[value] keyword[in] identifier[enumerate] ( identifier[values] ):
identifier[values] [ identifier[i] ]= identifier[OrderedDict] ( identifier[sorted] ( identifier[value] . identifier[items] ()))
identifier[bindings] =[]
keyword[for] identifier[record] keyword[in] identifier[values] :
keyword[for] identifier[value] keyword[in] identifier[record] . identifier[values] ():
identifier[bindings] . identifier[append] ( identifier[value] )
identifier[sql] = identifier[self] . identifier[_grammar] . identifier[compile_insert] ( identifier[self] , identifier[values] )
identifier[bindings] = identifier[self] . identifier[_clean_bindings] ( identifier[bindings] )
keyword[return] identifier[self] . identifier[_connection] . identifier[insert] ( identifier[sql] , identifier[bindings] ) | def insert(self, _values=None, **values):
"""
Insert a new record into the database
:param _values: The new record values
:type _values: dict or list
:param values: The new record values as keyword arguments
:type values: dict
:return: The result
:rtype: bool
"""
if not values and (not _values):
return True # depends on [control=['if'], data=[]]
if not isinstance(_values, list):
if _values is not None:
values.update(_values) # depends on [control=['if'], data=['_values']]
values = [values] # depends on [control=['if'], data=[]]
else:
values = _values
for (i, value) in enumerate(values):
values[i] = OrderedDict(sorted(value.items())) # depends on [control=['for'], data=[]]
bindings = []
for record in values:
for value in record.values():
bindings.append(value) # depends on [control=['for'], data=['value']] # depends on [control=['for'], data=['record']]
sql = self._grammar.compile_insert(self, values)
bindings = self._clean_bindings(bindings)
return self._connection.insert(sql, bindings) |
def tearDown(self):
"""
Be careful if a subclass of BaseCase overrides setUp()
You'll need to add the following line to the subclass's tearDown():
super(SubClassOfBaseCase, self).tearDown()
"""
has_exception = False
if sys.version.startswith('3') and hasattr(self, '_outcome'):
if hasattr(self._outcome, 'errors') and self._outcome.errors:
has_exception = True
else:
has_exception = sys.exc_info()[1] is not None
if self.__delayed_assert_failures:
print(
"\nWhen using self.delayed_assert_*() methods in your tests, "
"remember to call self.process_delayed_asserts() afterwards. "
"Now calling in tearDown()...\nFailures Detected:")
if not has_exception:
self.process_delayed_asserts()
else:
self.process_delayed_asserts(print_only=True)
self.is_pytest = None
try:
# This raises an exception if the test is not coming from pytest
self.is_pytest = sb_config.is_pytest
except Exception:
# Not using pytest (probably nosetests)
self.is_pytest = False
if self.is_pytest:
# pytest-specific code
test_id = "%s.%s.%s" % (self.__class__.__module__,
self.__class__.__name__,
self._testMethodName)
try:
with_selenium = self.with_selenium
except Exception:
sub_class_name = str(
self.__class__.__bases__[0]).split('.')[-1].split("'")[0]
sub_file_name = str(self.__class__.__bases__[0]).split('.')[-2]
sub_file_name = sub_file_name + ".py"
class_name = str(self.__class__).split('.')[-1].split("'")[0]
file_name = str(self.__class__).split('.')[-2] + ".py"
class_name_used = sub_class_name
file_name_used = sub_file_name
if sub_class_name == "BaseCase":
class_name_used = class_name
file_name_used = file_name
fix_setup = "super(%s, self).setUp()" % class_name_used
fix_teardown = "super(%s, self).tearDown()" % class_name_used
message = ("You're overriding SeleniumBase's BaseCase setUp() "
"method with your own setUp() method, which breaks "
"SeleniumBase. You can fix this by going to your "
"%s class located in your %s file and adding the "
"following line of code AT THE BEGINNING of your "
"setUp() method:\n%s\n\nAlso make sure "
"you have added the following line of code AT THE "
"END of your tearDown() method:\n%s\n"
% (class_name_used, file_name_used,
fix_setup, fix_teardown))
raise Exception(message)
if with_selenium:
# Save a screenshot if logging is on when an exception occurs
if has_exception:
self.__add_pytest_html_extra()
if self.with_testing_base and not has_exception and (
self.save_screenshot_after_test):
test_logpath = self.log_path + "/" + test_id
if not os.path.exists(test_logpath):
try:
os.makedirs(test_logpath)
except Exception:
pass # Only reachable during multi-threaded runs
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png)
self.__add_pytest_html_extra()
if self.with_testing_base and has_exception:
test_logpath = self.log_path + "/" + test_id
if not os.path.exists(test_logpath):
try:
os.makedirs(test_logpath)
except Exception:
pass # Only reachable during multi-threaded runs
if ((not self.with_screen_shots) and (
not self.with_basic_test_info) and (
not self.with_page_source)):
# Log everything if nothing specified (if testing_base)
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png)
log_helper.log_test_failure_data(
self, test_logpath, self.driver, self.browser)
log_helper.log_page_source(test_logpath, self.driver)
else:
if self.with_screen_shots:
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png)
if self.with_basic_test_info:
log_helper.log_test_failure_data(
self, test_logpath, self.driver, self.browser)
if self.with_page_source:
log_helper.log_page_source(
test_logpath, self.driver)
# (Pytest) Finally close all open browser windows
self.__quit_all_drivers()
if self.headless:
if self.headless_active:
self.display.stop()
self.display = None
if self.with_db_reporting:
if has_exception:
self.__insert_test_result(constants.State.ERROR, True)
else:
self.__insert_test_result(constants.State.PASS, False)
runtime = int(time.time() * 1000) - self.execution_start_time
self.testcase_manager.update_execution_data(
self.execution_guid, runtime)
if self.with_s3_logging and has_exception:
""" If enabled, upload logs to S3 during test exceptions. """
from seleniumbase.core.s3_manager import S3LoggingBucket
s3_bucket = S3LoggingBucket()
guid = str(uuid.uuid4().hex)
path = "%s/%s" % (self.log_path, test_id)
uploaded_files = []
for logfile in os.listdir(path):
logfile_name = "%s/%s/%s" % (guid,
test_id,
logfile.split(path)[-1])
s3_bucket.upload_file(logfile_name,
"%s/%s" % (path, logfile))
uploaded_files.append(logfile_name)
s3_bucket.save_uploaded_file_names(uploaded_files)
index_file = s3_bucket.upload_index_file(test_id, guid)
print("\n\n*** Log files uploaded: ***\n%s\n" % index_file)
logging.error(
"\n\n*** Log files uploaded: ***\n%s\n" % index_file)
if self.with_db_reporting:
self.testcase_manager = TestcaseManager(self.database_env)
data_payload = TestcaseDataPayload()
data_payload.guid = self.testcase_guid
data_payload.logURL = index_file
self.testcase_manager.update_testcase_log_url(data_payload)
else:
# (Nosetests)
if has_exception:
test_id = "%s.%s.%s" % (self.__class__.__module__,
self.__class__.__name__,
self._testMethodName)
test_logpath = "latest_logs/" + test_id
if not os.path.exists(test_logpath):
try:
os.makedirs(test_logpath)
except Exception:
pass # Only reachable during multi-threaded runs
log_helper.log_test_failure_data(
self, test_logpath, self.driver, self.browser)
if len(self._drivers_list) > 0:
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png)
log_helper.log_page_source(test_logpath, self.driver)
elif self.save_screenshot_after_test:
test_id = "%s.%s.%s" % (self.__class__.__module__,
self.__class__.__name__,
self._testMethodName)
test_logpath = "latest_logs/" + test_id
if not os.path.exists(test_logpath):
try:
os.makedirs(test_logpath)
except Exception:
pass # Only reachable during multi-threaded runs
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png)
if self.report_on:
self._last_page_screenshot = self.__last_page_screenshot_png
try:
self._last_page_url = self.get_current_url()
except Exception:
self._last_page_url = "(Error: Unknown URL)"
# Finally close all open browser windows
self.__quit_all_drivers() | def function[tearDown, parameter[self]]:
constant[
Be careful if a subclass of BaseCase overrides setUp()
You'll need to add the following line to the subclass's tearDown():
super(SubClassOfBaseCase, self).tearDown()
]
variable[has_exception] assign[=] constant[False]
if <ast.BoolOp object at 0x7da1b1b9dea0> begin[:]
if <ast.BoolOp object at 0x7da1b1b9e290> begin[:]
variable[has_exception] assign[=] constant[True]
if name[self].__delayed_assert_failures begin[:]
call[name[print], parameter[constant[
When using self.delayed_assert_*() methods in your tests, remember to call self.process_delayed_asserts() afterwards. Now calling in tearDown()...
Failures Detected:]]]
if <ast.UnaryOp object at 0x7da1b1b199c0> begin[:]
call[name[self].process_delayed_asserts, parameter[]]
name[self].is_pytest assign[=] constant[None]
<ast.Try object at 0x7da1b1b19a20>
if name[self].is_pytest begin[:]
variable[test_id] assign[=] binary_operation[constant[%s.%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1b18400>, <ast.Attribute object at 0x7da1b1b18550>, <ast.Attribute object at 0x7da1b1b19180>]]]
<ast.Try object at 0x7da1b1b18760>
if name[with_selenium] begin[:]
if name[has_exception] begin[:]
call[name[self].__add_pytest_html_extra, parameter[]]
if <ast.BoolOp object at 0x7da1b1b9d720> begin[:]
variable[test_logpath] assign[=] binary_operation[binary_operation[name[self].log_path + constant[/]] + name[test_id]]
if <ast.UnaryOp object at 0x7da1b1b9f0a0> begin[:]
<ast.Try object at 0x7da1b1b9f250>
if <ast.UnaryOp object at 0x7da1b1b9eef0> begin[:]
call[name[self].__set_last_page_screenshot, parameter[]]
call[name[log_helper].log_screenshot, parameter[name[test_logpath], name[self].driver, name[self].__last_page_screenshot_png]]
call[name[self].__add_pytest_html_extra, parameter[]]
if <ast.BoolOp object at 0x7da1b1b9d4b0> begin[:]
variable[test_logpath] assign[=] binary_operation[binary_operation[name[self].log_path + constant[/]] + name[test_id]]
if <ast.UnaryOp object at 0x7da1b1b9c280> begin[:]
<ast.Try object at 0x7da1b1b9d270>
if <ast.BoolOp object at 0x7da1b1b9c460> begin[:]
if <ast.UnaryOp object at 0x7da1b1b9c0d0> begin[:]
call[name[self].__set_last_page_screenshot, parameter[]]
call[name[log_helper].log_screenshot, parameter[name[test_logpath], name[self].driver, name[self].__last_page_screenshot_png]]
call[name[log_helper].log_test_failure_data, parameter[name[self], name[test_logpath], name[self].driver, name[self].browser]]
call[name[log_helper].log_page_source, parameter[name[test_logpath], name[self].driver]]
call[name[self].__quit_all_drivers, parameter[]]
if name[self].headless begin[:]
if name[self].headless_active begin[:]
call[name[self].display.stop, parameter[]]
name[self].display assign[=] constant[None]
if name[self].with_db_reporting begin[:]
if name[has_exception] begin[:]
call[name[self].__insert_test_result, parameter[name[constants].State.ERROR, constant[True]]]
variable[runtime] assign[=] binary_operation[call[name[int], parameter[binary_operation[call[name[time].time, parameter[]] * constant[1000]]]] - name[self].execution_start_time]
call[name[self].testcase_manager.update_execution_data, parameter[name[self].execution_guid, name[runtime]]]
if <ast.BoolOp object at 0x7da1b1b35a80> begin[:]
constant[ If enabled, upload logs to S3 during test exceptions. ]
from relative_module[seleniumbase.core.s3_manager] import module[S3LoggingBucket]
variable[s3_bucket] assign[=] call[name[S3LoggingBucket], parameter[]]
variable[guid] assign[=] call[name[str], parameter[call[name[uuid].uuid4, parameter[]].hex]]
variable[path] assign[=] binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1bc3f70>, <ast.Name object at 0x7da1b1bc32b0>]]]
variable[uploaded_files] assign[=] list[[]]
for taget[name[logfile]] in starred[call[name[os].listdir, parameter[name[path]]]] begin[:]
variable[logfile_name] assign[=] binary_operation[constant[%s/%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1bc1330>, <ast.Name object at 0x7da1b1bc0850>, <ast.Subscript object at 0x7da1b1bc2230>]]]
call[name[s3_bucket].upload_file, parameter[name[logfile_name], binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1bc1c30>, <ast.Name object at 0x7da1b1bc3bb0>]]]]]
call[name[uploaded_files].append, parameter[name[logfile_name]]]
call[name[s3_bucket].save_uploaded_file_names, parameter[name[uploaded_files]]]
variable[index_file] assign[=] call[name[s3_bucket].upload_index_file, parameter[name[test_id], name[guid]]]
call[name[print], parameter[binary_operation[constant[
*** Log files uploaded: ***
%s
] <ast.Mod object at 0x7da2590d6920> name[index_file]]]]
call[name[logging].error, parameter[binary_operation[constant[
*** Log files uploaded: ***
%s
] <ast.Mod object at 0x7da2590d6920> name[index_file]]]]
if name[self].with_db_reporting begin[:]
name[self].testcase_manager assign[=] call[name[TestcaseManager], parameter[name[self].database_env]]
variable[data_payload] assign[=] call[name[TestcaseDataPayload], parameter[]]
name[data_payload].guid assign[=] name[self].testcase_guid
name[data_payload].logURL assign[=] name[index_file]
call[name[self].testcase_manager.update_testcase_log_url, parameter[name[data_payload]]] | keyword[def] identifier[tearDown] ( identifier[self] ):
literal[string]
identifier[has_exception] = keyword[False]
keyword[if] identifier[sys] . identifier[version] . identifier[startswith] ( literal[string] ) keyword[and] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[if] identifier[hasattr] ( identifier[self] . identifier[_outcome] , literal[string] ) keyword[and] identifier[self] . identifier[_outcome] . identifier[errors] :
identifier[has_exception] = keyword[True]
keyword[else] :
identifier[has_exception] = identifier[sys] . identifier[exc_info] ()[ literal[int] ] keyword[is] keyword[not] keyword[None]
keyword[if] identifier[self] . identifier[__delayed_assert_failures] :
identifier[print] (
literal[string]
literal[string]
literal[string] )
keyword[if] keyword[not] identifier[has_exception] :
identifier[self] . identifier[process_delayed_asserts] ()
keyword[else] :
identifier[self] . identifier[process_delayed_asserts] ( identifier[print_only] = keyword[True] )
identifier[self] . identifier[is_pytest] = keyword[None]
keyword[try] :
identifier[self] . identifier[is_pytest] = identifier[sb_config] . identifier[is_pytest]
keyword[except] identifier[Exception] :
identifier[self] . identifier[is_pytest] = keyword[False]
keyword[if] identifier[self] . identifier[is_pytest] :
identifier[test_id] = literal[string] %( identifier[self] . identifier[__class__] . identifier[__module__] ,
identifier[self] . identifier[__class__] . identifier[__name__] ,
identifier[self] . identifier[_testMethodName] )
keyword[try] :
identifier[with_selenium] = identifier[self] . identifier[with_selenium]
keyword[except] identifier[Exception] :
identifier[sub_class_name] = identifier[str] (
identifier[self] . identifier[__class__] . identifier[__bases__] [ literal[int] ]). identifier[split] ( literal[string] )[- literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]
identifier[sub_file_name] = identifier[str] ( identifier[self] . identifier[__class__] . identifier[__bases__] [ literal[int] ]). identifier[split] ( literal[string] )[- literal[int] ]
identifier[sub_file_name] = identifier[sub_file_name] + literal[string]
identifier[class_name] = identifier[str] ( identifier[self] . identifier[__class__] ). identifier[split] ( literal[string] )[- literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]
identifier[file_name] = identifier[str] ( identifier[self] . identifier[__class__] ). identifier[split] ( literal[string] )[- literal[int] ]+ literal[string]
identifier[class_name_used] = identifier[sub_class_name]
identifier[file_name_used] = identifier[sub_file_name]
keyword[if] identifier[sub_class_name] == literal[string] :
identifier[class_name_used] = identifier[class_name]
identifier[file_name_used] = identifier[file_name]
identifier[fix_setup] = literal[string] % identifier[class_name_used]
identifier[fix_teardown] = literal[string] % identifier[class_name_used]
identifier[message] =( literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
%( identifier[class_name_used] , identifier[file_name_used] ,
identifier[fix_setup] , identifier[fix_teardown] ))
keyword[raise] identifier[Exception] ( identifier[message] )
keyword[if] identifier[with_selenium] :
keyword[if] identifier[has_exception] :
identifier[self] . identifier[__add_pytest_html_extra] ()
keyword[if] identifier[self] . identifier[with_testing_base] keyword[and] keyword[not] identifier[has_exception] keyword[and] (
identifier[self] . identifier[save_screenshot_after_test] ):
identifier[test_logpath] = identifier[self] . identifier[log_path] + literal[string] + identifier[test_id]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[test_logpath] ):
keyword[try] :
identifier[os] . identifier[makedirs] ( identifier[test_logpath] )
keyword[except] identifier[Exception] :
keyword[pass]
keyword[if] keyword[not] identifier[self] . identifier[__last_page_screenshot_png] :
identifier[self] . identifier[__set_last_page_screenshot] ()
identifier[log_helper] . identifier[log_screenshot] (
identifier[test_logpath] ,
identifier[self] . identifier[driver] ,
identifier[self] . identifier[__last_page_screenshot_png] )
identifier[self] . identifier[__add_pytest_html_extra] ()
keyword[if] identifier[self] . identifier[with_testing_base] keyword[and] identifier[has_exception] :
identifier[test_logpath] = identifier[self] . identifier[log_path] + literal[string] + identifier[test_id]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[test_logpath] ):
keyword[try] :
identifier[os] . identifier[makedirs] ( identifier[test_logpath] )
keyword[except] identifier[Exception] :
keyword[pass]
keyword[if] (( keyword[not] identifier[self] . identifier[with_screen_shots] ) keyword[and] (
keyword[not] identifier[self] . identifier[with_basic_test_info] ) keyword[and] (
keyword[not] identifier[self] . identifier[with_page_source] )):
keyword[if] keyword[not] identifier[self] . identifier[__last_page_screenshot_png] :
identifier[self] . identifier[__set_last_page_screenshot] ()
identifier[log_helper] . identifier[log_screenshot] (
identifier[test_logpath] ,
identifier[self] . identifier[driver] ,
identifier[self] . identifier[__last_page_screenshot_png] )
identifier[log_helper] . identifier[log_test_failure_data] (
identifier[self] , identifier[test_logpath] , identifier[self] . identifier[driver] , identifier[self] . identifier[browser] )
identifier[log_helper] . identifier[log_page_source] ( identifier[test_logpath] , identifier[self] . identifier[driver] )
keyword[else] :
keyword[if] identifier[self] . identifier[with_screen_shots] :
keyword[if] keyword[not] identifier[self] . identifier[__last_page_screenshot_png] :
identifier[self] . identifier[__set_last_page_screenshot] ()
identifier[log_helper] . identifier[log_screenshot] (
identifier[test_logpath] ,
identifier[self] . identifier[driver] ,
identifier[self] . identifier[__last_page_screenshot_png] )
keyword[if] identifier[self] . identifier[with_basic_test_info] :
identifier[log_helper] . identifier[log_test_failure_data] (
identifier[self] , identifier[test_logpath] , identifier[self] . identifier[driver] , identifier[self] . identifier[browser] )
keyword[if] identifier[self] . identifier[with_page_source] :
identifier[log_helper] . identifier[log_page_source] (
identifier[test_logpath] , identifier[self] . identifier[driver] )
identifier[self] . identifier[__quit_all_drivers] ()
keyword[if] identifier[self] . identifier[headless] :
keyword[if] identifier[self] . identifier[headless_active] :
identifier[self] . identifier[display] . identifier[stop] ()
identifier[self] . identifier[display] = keyword[None]
keyword[if] identifier[self] . identifier[with_db_reporting] :
keyword[if] identifier[has_exception] :
identifier[self] . identifier[__insert_test_result] ( identifier[constants] . identifier[State] . identifier[ERROR] , keyword[True] )
keyword[else] :
identifier[self] . identifier[__insert_test_result] ( identifier[constants] . identifier[State] . identifier[PASS] , keyword[False] )
identifier[runtime] = identifier[int] ( identifier[time] . identifier[time] ()* literal[int] )- identifier[self] . identifier[execution_start_time]
identifier[self] . identifier[testcase_manager] . identifier[update_execution_data] (
identifier[self] . identifier[execution_guid] , identifier[runtime] )
keyword[if] identifier[self] . identifier[with_s3_logging] keyword[and] identifier[has_exception] :
literal[string]
keyword[from] identifier[seleniumbase] . identifier[core] . identifier[s3_manager] keyword[import] identifier[S3LoggingBucket]
identifier[s3_bucket] = identifier[S3LoggingBucket] ()
identifier[guid] = identifier[str] ( identifier[uuid] . identifier[uuid4] (). identifier[hex] )
identifier[path] = literal[string] %( identifier[self] . identifier[log_path] , identifier[test_id] )
identifier[uploaded_files] =[]
keyword[for] identifier[logfile] keyword[in] identifier[os] . identifier[listdir] ( identifier[path] ):
identifier[logfile_name] = literal[string] %( identifier[guid] ,
identifier[test_id] ,
identifier[logfile] . identifier[split] ( identifier[path] )[- literal[int] ])
identifier[s3_bucket] . identifier[upload_file] ( identifier[logfile_name] ,
literal[string] %( identifier[path] , identifier[logfile] ))
identifier[uploaded_files] . identifier[append] ( identifier[logfile_name] )
identifier[s3_bucket] . identifier[save_uploaded_file_names] ( identifier[uploaded_files] )
identifier[index_file] = identifier[s3_bucket] . identifier[upload_index_file] ( identifier[test_id] , identifier[guid] )
identifier[print] ( literal[string] % identifier[index_file] )
identifier[logging] . identifier[error] (
literal[string] % identifier[index_file] )
keyword[if] identifier[self] . identifier[with_db_reporting] :
identifier[self] . identifier[testcase_manager] = identifier[TestcaseManager] ( identifier[self] . identifier[database_env] )
identifier[data_payload] = identifier[TestcaseDataPayload] ()
identifier[data_payload] . identifier[guid] = identifier[self] . identifier[testcase_guid]
identifier[data_payload] . identifier[logURL] = identifier[index_file]
identifier[self] . identifier[testcase_manager] . identifier[update_testcase_log_url] ( identifier[data_payload] )
keyword[else] :
keyword[if] identifier[has_exception] :
identifier[test_id] = literal[string] %( identifier[self] . identifier[__class__] . identifier[__module__] ,
identifier[self] . identifier[__class__] . identifier[__name__] ,
identifier[self] . identifier[_testMethodName] )
identifier[test_logpath] = literal[string] + identifier[test_id]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[test_logpath] ):
keyword[try] :
identifier[os] . identifier[makedirs] ( identifier[test_logpath] )
keyword[except] identifier[Exception] :
keyword[pass]
identifier[log_helper] . identifier[log_test_failure_data] (
identifier[self] , identifier[test_logpath] , identifier[self] . identifier[driver] , identifier[self] . identifier[browser] )
keyword[if] identifier[len] ( identifier[self] . identifier[_drivers_list] )> literal[int] :
keyword[if] keyword[not] identifier[self] . identifier[__last_page_screenshot_png] :
identifier[self] . identifier[__set_last_page_screenshot] ()
identifier[log_helper] . identifier[log_screenshot] (
identifier[test_logpath] ,
identifier[self] . identifier[driver] ,
identifier[self] . identifier[__last_page_screenshot_png] )
identifier[log_helper] . identifier[log_page_source] ( identifier[test_logpath] , identifier[self] . identifier[driver] )
keyword[elif] identifier[self] . identifier[save_screenshot_after_test] :
identifier[test_id] = literal[string] %( identifier[self] . identifier[__class__] . identifier[__module__] ,
identifier[self] . identifier[__class__] . identifier[__name__] ,
identifier[self] . identifier[_testMethodName] )
identifier[test_logpath] = literal[string] + identifier[test_id]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[test_logpath] ):
keyword[try] :
identifier[os] . identifier[makedirs] ( identifier[test_logpath] )
keyword[except] identifier[Exception] :
keyword[pass]
keyword[if] keyword[not] identifier[self] . identifier[__last_page_screenshot_png] :
identifier[self] . identifier[__set_last_page_screenshot] ()
identifier[log_helper] . identifier[log_screenshot] (
identifier[test_logpath] ,
identifier[self] . identifier[driver] ,
identifier[self] . identifier[__last_page_screenshot_png] )
keyword[if] identifier[self] . identifier[report_on] :
identifier[self] . identifier[_last_page_screenshot] = identifier[self] . identifier[__last_page_screenshot_png]
keyword[try] :
identifier[self] . identifier[_last_page_url] = identifier[self] . identifier[get_current_url] ()
keyword[except] identifier[Exception] :
identifier[self] . identifier[_last_page_url] = literal[string]
identifier[self] . identifier[__quit_all_drivers] () | def tearDown(self):
"""
Be careful if a subclass of BaseCase overrides setUp()
You'll need to add the following line to the subclass's tearDown():
super(SubClassOfBaseCase, self).tearDown()
"""
has_exception = False
if sys.version.startswith('3') and hasattr(self, '_outcome'):
if hasattr(self._outcome, 'errors') and self._outcome.errors:
has_exception = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
has_exception = sys.exc_info()[1] is not None
if self.__delayed_assert_failures:
print('\nWhen using self.delayed_assert_*() methods in your tests, remember to call self.process_delayed_asserts() afterwards. Now calling in tearDown()...\nFailures Detected:')
if not has_exception:
self.process_delayed_asserts() # depends on [control=['if'], data=[]]
else:
self.process_delayed_asserts(print_only=True) # depends on [control=['if'], data=[]]
self.is_pytest = None
try:
# This raises an exception if the test is not coming from pytest
self.is_pytest = sb_config.is_pytest # depends on [control=['try'], data=[]]
except Exception:
# Not using pytest (probably nosetests)
self.is_pytest = False # depends on [control=['except'], data=[]]
if self.is_pytest:
# pytest-specific code
test_id = '%s.%s.%s' % (self.__class__.__module__, self.__class__.__name__, self._testMethodName)
try:
with_selenium = self.with_selenium # depends on [control=['try'], data=[]]
except Exception:
sub_class_name = str(self.__class__.__bases__[0]).split('.')[-1].split("'")[0]
sub_file_name = str(self.__class__.__bases__[0]).split('.')[-2]
sub_file_name = sub_file_name + '.py'
class_name = str(self.__class__).split('.')[-1].split("'")[0]
file_name = str(self.__class__).split('.')[-2] + '.py'
class_name_used = sub_class_name
file_name_used = sub_file_name
if sub_class_name == 'BaseCase':
class_name_used = class_name
file_name_used = file_name # depends on [control=['if'], data=[]]
fix_setup = 'super(%s, self).setUp()' % class_name_used
fix_teardown = 'super(%s, self).tearDown()' % class_name_used
message = "You're overriding SeleniumBase's BaseCase setUp() method with your own setUp() method, which breaks SeleniumBase. You can fix this by going to your %s class located in your %s file and adding the following line of code AT THE BEGINNING of your setUp() method:\n%s\n\nAlso make sure you have added the following line of code AT THE END of your tearDown() method:\n%s\n" % (class_name_used, file_name_used, fix_setup, fix_teardown)
raise Exception(message) # depends on [control=['except'], data=[]]
if with_selenium:
# Save a screenshot if logging is on when an exception occurs
if has_exception:
self.__add_pytest_html_extra() # depends on [control=['if'], data=[]]
if self.with_testing_base and (not has_exception) and self.save_screenshot_after_test:
test_logpath = self.log_path + '/' + test_id
if not os.path.exists(test_logpath):
try:
os.makedirs(test_logpath) # depends on [control=['try'], data=[]]
except Exception:
pass # Only reachable during multi-threaded runs # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot() # depends on [control=['if'], data=[]]
log_helper.log_screenshot(test_logpath, self.driver, self.__last_page_screenshot_png)
self.__add_pytest_html_extra() # depends on [control=['if'], data=[]]
if self.with_testing_base and has_exception:
test_logpath = self.log_path + '/' + test_id
if not os.path.exists(test_logpath):
try:
os.makedirs(test_logpath) # depends on [control=['try'], data=[]]
except Exception:
pass # Only reachable during multi-threaded runs # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if not self.with_screen_shots and (not self.with_basic_test_info) and (not self.with_page_source):
# Log everything if nothing specified (if testing_base)
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot() # depends on [control=['if'], data=[]]
log_helper.log_screenshot(test_logpath, self.driver, self.__last_page_screenshot_png)
log_helper.log_test_failure_data(self, test_logpath, self.driver, self.browser)
log_helper.log_page_source(test_logpath, self.driver) # depends on [control=['if'], data=[]]
else:
if self.with_screen_shots:
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot() # depends on [control=['if'], data=[]]
log_helper.log_screenshot(test_logpath, self.driver, self.__last_page_screenshot_png) # depends on [control=['if'], data=[]]
if self.with_basic_test_info:
log_helper.log_test_failure_data(self, test_logpath, self.driver, self.browser) # depends on [control=['if'], data=[]]
if self.with_page_source:
log_helper.log_page_source(test_logpath, self.driver) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# (Pytest) Finally close all open browser windows
self.__quit_all_drivers() # depends on [control=['if'], data=[]]
if self.headless:
if self.headless_active:
self.display.stop()
self.display = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.with_db_reporting:
if has_exception:
self.__insert_test_result(constants.State.ERROR, True) # depends on [control=['if'], data=[]]
else:
self.__insert_test_result(constants.State.PASS, False)
runtime = int(time.time() * 1000) - self.execution_start_time
self.testcase_manager.update_execution_data(self.execution_guid, runtime) # depends on [control=['if'], data=[]]
if self.with_s3_logging and has_exception:
' If enabled, upload logs to S3 during test exceptions. '
from seleniumbase.core.s3_manager import S3LoggingBucket
s3_bucket = S3LoggingBucket()
guid = str(uuid.uuid4().hex)
path = '%s/%s' % (self.log_path, test_id)
uploaded_files = []
for logfile in os.listdir(path):
logfile_name = '%s/%s/%s' % (guid, test_id, logfile.split(path)[-1])
s3_bucket.upload_file(logfile_name, '%s/%s' % (path, logfile))
uploaded_files.append(logfile_name) # depends on [control=['for'], data=['logfile']]
s3_bucket.save_uploaded_file_names(uploaded_files)
index_file = s3_bucket.upload_index_file(test_id, guid)
print('\n\n*** Log files uploaded: ***\n%s\n' % index_file)
logging.error('\n\n*** Log files uploaded: ***\n%s\n' % index_file)
if self.with_db_reporting:
self.testcase_manager = TestcaseManager(self.database_env)
data_payload = TestcaseDataPayload()
data_payload.guid = self.testcase_guid
data_payload.logURL = index_file
self.testcase_manager.update_testcase_log_url(data_payload) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# (Nosetests)
if has_exception:
test_id = '%s.%s.%s' % (self.__class__.__module__, self.__class__.__name__, self._testMethodName)
test_logpath = 'latest_logs/' + test_id
if not os.path.exists(test_logpath):
try:
os.makedirs(test_logpath) # depends on [control=['try'], data=[]]
except Exception:
pass # Only reachable during multi-threaded runs # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
log_helper.log_test_failure_data(self, test_logpath, self.driver, self.browser)
if len(self._drivers_list) > 0:
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot() # depends on [control=['if'], data=[]]
log_helper.log_screenshot(test_logpath, self.driver, self.__last_page_screenshot_png)
log_helper.log_page_source(test_logpath, self.driver) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif self.save_screenshot_after_test:
test_id = '%s.%s.%s' % (self.__class__.__module__, self.__class__.__name__, self._testMethodName)
test_logpath = 'latest_logs/' + test_id
if not os.path.exists(test_logpath):
try:
os.makedirs(test_logpath) # depends on [control=['try'], data=[]]
except Exception:
pass # Only reachable during multi-threaded runs # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot() # depends on [control=['if'], data=[]]
log_helper.log_screenshot(test_logpath, self.driver, self.__last_page_screenshot_png) # depends on [control=['if'], data=[]]
if self.report_on:
self._last_page_screenshot = self.__last_page_screenshot_png
try:
self._last_page_url = self.get_current_url() # depends on [control=['try'], data=[]]
except Exception:
self._last_page_url = '(Error: Unknown URL)' # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
# Finally close all open browser windows
self.__quit_all_drivers() |
def classe(self, name):
"""return a class by its name, raise KeyError if not found
"""
for klass in self.classes():
if klass.node.name == name:
return klass
raise KeyError(name) | def function[classe, parameter[self, name]]:
constant[return a class by its name, raise KeyError if not found
]
for taget[name[klass]] in starred[call[name[self].classes, parameter[]]] begin[:]
if compare[name[klass].node.name equal[==] name[name]] begin[:]
return[name[klass]]
<ast.Raise object at 0x7da1b025bb50> | keyword[def] identifier[classe] ( identifier[self] , identifier[name] ):
literal[string]
keyword[for] identifier[klass] keyword[in] identifier[self] . identifier[classes] ():
keyword[if] identifier[klass] . identifier[node] . identifier[name] == identifier[name] :
keyword[return] identifier[klass]
keyword[raise] identifier[KeyError] ( identifier[name] ) | def classe(self, name):
"""return a class by its name, raise KeyError if not found
"""
for klass in self.classes():
if klass.node.name == name:
return klass # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['klass']]
raise KeyError(name) |
def _debugPrint(hsp, queryLen, localDict, msg=''):
"""
Print debugging information showing the local variables used during
a call to normalizeHSP and the hsp and then raise an C{AssertionError}.
@param hsp: The HSP C{dict} passed to normalizeHSP.
@param queryLen: the length of the query sequence.
@param localDict: A C{dict} of local variables (as produced by locals()).
@param msg: A C{str} message to raise C{AssertionError} with.
@raise AssertionError: unconditionally.
"""
print('normalizeHSP error:', file=sys.stderr)
print(' queryLen: %d' % queryLen, file=sys.stderr)
print(' Original HSP:', file=sys.stderr)
for attr in ['bits', 'btop', 'expect', 'frame', 'query_end', 'query_start',
'sbjct', 'query', 'sbjct_end', 'sbjct_start']:
print(' %s: %r' % (attr, hsp[attr]), file=sys.stderr)
print(' Local variables:', file=sys.stderr)
for var in sorted(localDict):
if var != 'hsp':
print(' %s: %s' % (var, localDict[var]), file=sys.stderr)
raise AssertionError(msg) | def function[_debugPrint, parameter[hsp, queryLen, localDict, msg]]:
constant[
Print debugging information showing the local variables used during
a call to normalizeHSP and the hsp and then raise an C{AssertionError}.
@param hsp: The HSP C{dict} passed to normalizeHSP.
@param queryLen: the length of the query sequence.
@param localDict: A C{dict} of local variables (as produced by locals()).
@param msg: A C{str} message to raise C{AssertionError} with.
@raise AssertionError: unconditionally.
]
call[name[print], parameter[constant[normalizeHSP error:]]]
call[name[print], parameter[binary_operation[constant[ queryLen: %d] <ast.Mod object at 0x7da2590d6920> name[queryLen]]]]
call[name[print], parameter[constant[ Original HSP:]]]
for taget[name[attr]] in starred[list[[<ast.Constant object at 0x7da2041d9ed0>, <ast.Constant object at 0x7da2041db730>, <ast.Constant object at 0x7da2041d85b0>, <ast.Constant object at 0x7da2041da8f0>, <ast.Constant object at 0x7da2041dbe20>, <ast.Constant object at 0x7da2041db2e0>, <ast.Constant object at 0x7da20e956bf0>, <ast.Constant object at 0x7da20e957dc0>, <ast.Constant object at 0x7da20e957790>, <ast.Constant object at 0x7da20e955000>]]] begin[:]
call[name[print], parameter[binary_operation[constant[ %s: %r] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e955d80>, <ast.Subscript object at 0x7da20e9564d0>]]]]]
call[name[print], parameter[constant[ Local variables:]]]
for taget[name[var]] in starred[call[name[sorted], parameter[name[localDict]]]] begin[:]
if compare[name[var] not_equal[!=] constant[hsp]] begin[:]
call[name[print], parameter[binary_operation[constant[ %s: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e955540>, <ast.Subscript object at 0x7da20e954340>]]]]]
<ast.Raise object at 0x7da20e956320> | keyword[def] identifier[_debugPrint] ( identifier[hsp] , identifier[queryLen] , identifier[localDict] , identifier[msg] = literal[string] ):
literal[string]
identifier[print] ( literal[string] , identifier[file] = identifier[sys] . identifier[stderr] )
identifier[print] ( literal[string] % identifier[queryLen] , identifier[file] = identifier[sys] . identifier[stderr] )
identifier[print] ( literal[string] , identifier[file] = identifier[sys] . identifier[stderr] )
keyword[for] identifier[attr] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[print] ( literal[string] %( identifier[attr] , identifier[hsp] [ identifier[attr] ]), identifier[file] = identifier[sys] . identifier[stderr] )
identifier[print] ( literal[string] , identifier[file] = identifier[sys] . identifier[stderr] )
keyword[for] identifier[var] keyword[in] identifier[sorted] ( identifier[localDict] ):
keyword[if] identifier[var] != literal[string] :
identifier[print] ( literal[string] %( identifier[var] , identifier[localDict] [ identifier[var] ]), identifier[file] = identifier[sys] . identifier[stderr] )
keyword[raise] identifier[AssertionError] ( identifier[msg] ) | def _debugPrint(hsp, queryLen, localDict, msg=''):
"""
Print debugging information showing the local variables used during
a call to normalizeHSP and the hsp and then raise an C{AssertionError}.
@param hsp: The HSP C{dict} passed to normalizeHSP.
@param queryLen: the length of the query sequence.
@param localDict: A C{dict} of local variables (as produced by locals()).
@param msg: A C{str} message to raise C{AssertionError} with.
@raise AssertionError: unconditionally.
"""
print('normalizeHSP error:', file=sys.stderr)
print(' queryLen: %d' % queryLen, file=sys.stderr)
print(' Original HSP:', file=sys.stderr)
for attr in ['bits', 'btop', 'expect', 'frame', 'query_end', 'query_start', 'sbjct', 'query', 'sbjct_end', 'sbjct_start']:
print(' %s: %r' % (attr, hsp[attr]), file=sys.stderr) # depends on [control=['for'], data=['attr']]
print(' Local variables:', file=sys.stderr)
for var in sorted(localDict):
if var != 'hsp':
print(' %s: %s' % (var, localDict[var]), file=sys.stderr) # depends on [control=['if'], data=['var']] # depends on [control=['for'], data=['var']]
raise AssertionError(msg) |
def _nav_to_tree(root):
"""Given an etree containing a navigation document structure
rooted from the 'nav' element, parse to a tree:
{'id': <id>|'subcol', 'title': <title>, 'contents': [<tree>, ...]}
"""
def expath(e, x):
return e.xpath(x, namespaces=HTML_DOCUMENT_NAMESPACES)
for li in expath(root, 'xhtml:ol/xhtml:li'):
is_subtree = bool([e for e in li.getchildren()
if e.tag[e.tag.find('}')+1:] == 'ol'])
if is_subtree:
# It's a sub-tree and have a 'span' and 'ol'.
itemid = li.get('cnx-archive-uri', 'subcol')
shortid = li.get('cnx-archive-shortid')
yield {'id': itemid,
# Title is wrapped in a span, div or some other element...
'title': squash_xml_to_text(expath(li, '*')[0],
remove_namespaces=True),
'shortId': shortid,
'contents': [x for x in _nav_to_tree(li)],
}
else:
# It's a node and should only have an li.
a = li.xpath('xhtml:a', namespaces=HTML_DOCUMENT_NAMESPACES)[0]
yield {'id': a.get('href'),
'shortid': li.get('cnx-archive-shortid'),
'title': squash_xml_to_text(a, remove_namespaces=True)} | def function[_nav_to_tree, parameter[root]]:
constant[Given an etree containing a navigation document structure
rooted from the 'nav' element, parse to a tree:
{'id': <id>|'subcol', 'title': <title>, 'contents': [<tree>, ...]}
]
def function[expath, parameter[e, x]]:
return[call[name[e].xpath, parameter[name[x]]]]
for taget[name[li]] in starred[call[name[expath], parameter[name[root], constant[xhtml:ol/xhtml:li]]]] begin[:]
variable[is_subtree] assign[=] call[name[bool], parameter[<ast.ListComp object at 0x7da1b1c7e590>]]
if name[is_subtree] begin[:]
variable[itemid] assign[=] call[name[li].get, parameter[constant[cnx-archive-uri], constant[subcol]]]
variable[shortid] assign[=] call[name[li].get, parameter[constant[cnx-archive-shortid]]]
<ast.Yield object at 0x7da1b196b790> | keyword[def] identifier[_nav_to_tree] ( identifier[root] ):
literal[string]
keyword[def] identifier[expath] ( identifier[e] , identifier[x] ):
keyword[return] identifier[e] . identifier[xpath] ( identifier[x] , identifier[namespaces] = identifier[HTML_DOCUMENT_NAMESPACES] )
keyword[for] identifier[li] keyword[in] identifier[expath] ( identifier[root] , literal[string] ):
identifier[is_subtree] = identifier[bool] ([ identifier[e] keyword[for] identifier[e] keyword[in] identifier[li] . identifier[getchildren] ()
keyword[if] identifier[e] . identifier[tag] [ identifier[e] . identifier[tag] . identifier[find] ( literal[string] )+ literal[int] :]== literal[string] ])
keyword[if] identifier[is_subtree] :
identifier[itemid] = identifier[li] . identifier[get] ( literal[string] , literal[string] )
identifier[shortid] = identifier[li] . identifier[get] ( literal[string] )
keyword[yield] { literal[string] : identifier[itemid] ,
literal[string] : identifier[squash_xml_to_text] ( identifier[expath] ( identifier[li] , literal[string] )[ literal[int] ],
identifier[remove_namespaces] = keyword[True] ),
literal[string] : identifier[shortid] ,
literal[string] :[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[_nav_to_tree] ( identifier[li] )],
}
keyword[else] :
identifier[a] = identifier[li] . identifier[xpath] ( literal[string] , identifier[namespaces] = identifier[HTML_DOCUMENT_NAMESPACES] )[ literal[int] ]
keyword[yield] { literal[string] : identifier[a] . identifier[get] ( literal[string] ),
literal[string] : identifier[li] . identifier[get] ( literal[string] ),
literal[string] : identifier[squash_xml_to_text] ( identifier[a] , identifier[remove_namespaces] = keyword[True] )} | def _nav_to_tree(root):
"""Given an etree containing a navigation document structure
rooted from the 'nav' element, parse to a tree:
{'id': <id>|'subcol', 'title': <title>, 'contents': [<tree>, ...]}
"""
def expath(e, x):
return e.xpath(x, namespaces=HTML_DOCUMENT_NAMESPACES)
for li in expath(root, 'xhtml:ol/xhtml:li'):
is_subtree = bool([e for e in li.getchildren() if e.tag[e.tag.find('}') + 1:] == 'ol'])
if is_subtree:
# It's a sub-tree and have a 'span' and 'ol'.
itemid = li.get('cnx-archive-uri', 'subcol')
shortid = li.get('cnx-archive-shortid')
# Title is wrapped in a span, div or some other element...
yield {'id': itemid, 'title': squash_xml_to_text(expath(li, '*')[0], remove_namespaces=True), 'shortId': shortid, 'contents': [x for x in _nav_to_tree(li)]} # depends on [control=['if'], data=[]]
else:
# It's a node and should only have an li.
a = li.xpath('xhtml:a', namespaces=HTML_DOCUMENT_NAMESPACES)[0]
yield {'id': a.get('href'), 'shortid': li.get('cnx-archive-shortid'), 'title': squash_xml_to_text(a, remove_namespaces=True)} # depends on [control=['for'], data=['li']] |
def get_dataset_date_as_datetime(self):
# type: () -> Optional[datetime]
"""Get dataset date as datetime.datetime object. For range returns start date.
Returns:
Optional[datetime.datetime]: Dataset date in datetime object or None if no date is set
"""
dataset_date = self.data.get('dataset_date', None)
if dataset_date:
if '-' in dataset_date:
dataset_date = dataset_date.split('-')[0]
return datetime.strptime(dataset_date, '%m/%d/%Y')
else:
return None | def function[get_dataset_date_as_datetime, parameter[self]]:
constant[Get dataset date as datetime.datetime object. For range returns start date.
Returns:
Optional[datetime.datetime]: Dataset date in datetime object or None if no date is set
]
variable[dataset_date] assign[=] call[name[self].data.get, parameter[constant[dataset_date], constant[None]]]
if name[dataset_date] begin[:]
if compare[constant[-] in name[dataset_date]] begin[:]
variable[dataset_date] assign[=] call[call[name[dataset_date].split, parameter[constant[-]]]][constant[0]]
return[call[name[datetime].strptime, parameter[name[dataset_date], constant[%m/%d/%Y]]]] | keyword[def] identifier[get_dataset_date_as_datetime] ( identifier[self] ):
literal[string]
identifier[dataset_date] = identifier[self] . identifier[data] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[dataset_date] :
keyword[if] literal[string] keyword[in] identifier[dataset_date] :
identifier[dataset_date] = identifier[dataset_date] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[return] identifier[datetime] . identifier[strptime] ( identifier[dataset_date] , literal[string] )
keyword[else] :
keyword[return] keyword[None] | def get_dataset_date_as_datetime(self):
# type: () -> Optional[datetime]
'Get dataset date as datetime.datetime object. For range returns start date.\n\n Returns:\n Optional[datetime.datetime]: Dataset date in datetime object or None if no date is set\n '
dataset_date = self.data.get('dataset_date', None)
if dataset_date:
if '-' in dataset_date:
dataset_date = dataset_date.split('-')[0] # depends on [control=['if'], data=['dataset_date']]
return datetime.strptime(dataset_date, '%m/%d/%Y') # depends on [control=['if'], data=[]]
else:
return None |
def button_with_image(size='medium', icon=None, text=None, title=None, button=False,
name=None, target=False, disabled=False, extra_css=None, modal=None,
data_target='#modal', viewname=None, next=None, extra_href=False, href=False,
fancyajax=False, viewargs=[], **kwargs):
"""
Output a button using Bingo's button html.
@param size: The size that refers to the icon css class.
@param icon: A css class that refers to an icon - for example 'send_mail'
@param text: Text to display on the button
@param title: Title parameter for the button or if button=False the data-tip for an a tag.
@param button: If true a button tag will be returned.
@param name: A name for the button
@param target: A target for the a tag
@param disabled: If true, css class 'disabled' will be added to the container div.
@param extra_css: A string of extra css classes.
@param model: If True the class 'model' is placed on the a tag.
@param viewname: The django viewname as defined in a urls.py file.
@param **kwargs: Remaining keyword args are supplied to django.core.urlresolvers.reverse
@param next: If specified ?next=[NEXT] will be appended to any generated link.
@return string
"""
html = '<div class="button-std'
if disabled:
html += ' disabled'
html += '">'
if button:
html += '<button type="submit" name="%s" value="%s" class="%s">' % (name, title, icon)
if icon:
html += '<span class="icon-%s icon-%s-%s"></span>' % (size, size, icon)
html += '<span>%s</span></button>' % (title)
else:
html += '<a class="%s' % (icon)
if fancyajax:
html += ' fancybox fancybox.ajax'
if extra_css:
html += ' %s' % (extra_css)
html += '"'
if modal:
html += ' role="button" data-toggle="modal" data-remoteinbody="false"'
if data_target:
html += ' data-target="#modal"'
if fancyajax:
html += ' data-spinner-append="1"'
if viewname:
html += ' href="%s' % (reverse(viewname, args=viewargs, kwargs=kwargs))
elif href:
html += ' href="%s' % (href)
if viewname or href:
if next:
if next.startswith('/'):
html += '?next=%s' % next
else:
html += '?next=%s' % reverse(next)
if extra_href:
html += extra_href
html += '"'
if not title and text:
title = text
if title:
html += ' data-tip="%s"' % (title)
if target:
html += ' target="%s"' % (target)
html += '>'
if icon:
html += '<span class="icon-%s icon-%s-%s"></span>' % (size, size, icon)
if text:
html += '<span class="title">%s</span>' % (text)
html += '</a></div>'
return html | def function[button_with_image, parameter[size, icon, text, title, button, name, target, disabled, extra_css, modal, data_target, viewname, next, extra_href, href, fancyajax, viewargs]]:
constant[
Output a button using Bingo's button html.
@param size: The size that refers to the icon css class.
@param icon: A css class that refers to an icon - for example 'send_mail'
@param text: Text to display on the button
@param title: Title parameter for the button or if button=False the data-tip for an a tag.
@param button: If true a button tag will be returned.
@param name: A name for the button
@param target: A target for the a tag
@param disabled: If true, css class 'disabled' will be added to the container div.
@param extra_css: A string of extra css classes.
@param model: If True the class 'model' is placed on the a tag.
@param viewname: The django viewname as defined in a urls.py file.
@param **kwargs: Remaining keyword args are supplied to django.core.urlresolvers.reverse
@param next: If specified ?next=[NEXT] will be appended to any generated link.
@return string
]
variable[html] assign[=] constant[<div class="button-std]
if name[disabled] begin[:]
<ast.AugAssign object at 0x7da18f7210c0>
<ast.AugAssign object at 0x7da18f7223e0>
if name[button] begin[:]
<ast.AugAssign object at 0x7da18f723e80>
if name[icon] begin[:]
<ast.AugAssign object at 0x7da18f721a80>
<ast.AugAssign object at 0x7da18f7222c0>
return[name[html]] | keyword[def] identifier[button_with_image] ( identifier[size] = literal[string] , identifier[icon] = keyword[None] , identifier[text] = keyword[None] , identifier[title] = keyword[None] , identifier[button] = keyword[False] ,
identifier[name] = keyword[None] , identifier[target] = keyword[False] , identifier[disabled] = keyword[False] , identifier[extra_css] = keyword[None] , identifier[modal] = keyword[None] ,
identifier[data_target] = literal[string] , identifier[viewname] = keyword[None] , identifier[next] = keyword[None] , identifier[extra_href] = keyword[False] , identifier[href] = keyword[False] ,
identifier[fancyajax] = keyword[False] , identifier[viewargs] =[],** identifier[kwargs] ):
literal[string]
identifier[html] = literal[string]
keyword[if] identifier[disabled] :
identifier[html] += literal[string]
identifier[html] += literal[string]
keyword[if] identifier[button] :
identifier[html] += literal[string] %( identifier[name] , identifier[title] , identifier[icon] )
keyword[if] identifier[icon] :
identifier[html] += literal[string] %( identifier[size] , identifier[size] , identifier[icon] )
identifier[html] += literal[string] %( identifier[title] )
keyword[else] :
identifier[html] += literal[string] %( identifier[icon] )
keyword[if] identifier[fancyajax] :
identifier[html] += literal[string]
keyword[if] identifier[extra_css] :
identifier[html] += literal[string] %( identifier[extra_css] )
identifier[html] += literal[string]
keyword[if] identifier[modal] :
identifier[html] += literal[string]
keyword[if] identifier[data_target] :
identifier[html] += literal[string]
keyword[if] identifier[fancyajax] :
identifier[html] += literal[string]
keyword[if] identifier[viewname] :
identifier[html] += literal[string] %( identifier[reverse] ( identifier[viewname] , identifier[args] = identifier[viewargs] , identifier[kwargs] = identifier[kwargs] ))
keyword[elif] identifier[href] :
identifier[html] += literal[string] %( identifier[href] )
keyword[if] identifier[viewname] keyword[or] identifier[href] :
keyword[if] identifier[next] :
keyword[if] identifier[next] . identifier[startswith] ( literal[string] ):
identifier[html] += literal[string] % identifier[next]
keyword[else] :
identifier[html] += literal[string] % identifier[reverse] ( identifier[next] )
keyword[if] identifier[extra_href] :
identifier[html] += identifier[extra_href]
identifier[html] += literal[string]
keyword[if] keyword[not] identifier[title] keyword[and] identifier[text] :
identifier[title] = identifier[text]
keyword[if] identifier[title] :
identifier[html] += literal[string] %( identifier[title] )
keyword[if] identifier[target] :
identifier[html] += literal[string] %( identifier[target] )
identifier[html] += literal[string]
keyword[if] identifier[icon] :
identifier[html] += literal[string] %( identifier[size] , identifier[size] , identifier[icon] )
keyword[if] identifier[text] :
identifier[html] += literal[string] %( identifier[text] )
identifier[html] += literal[string]
keyword[return] identifier[html] | def button_with_image(size='medium', icon=None, text=None, title=None, button=False, name=None, target=False, disabled=False, extra_css=None, modal=None, data_target='#modal', viewname=None, next=None, extra_href=False, href=False, fancyajax=False, viewargs=[], **kwargs):
"""
Output a button using Bingo's button html.
@param size: The size that refers to the icon css class.
@param icon: A css class that refers to an icon - for example 'send_mail'
@param text: Text to display on the button
@param title: Title parameter for the button or if button=False the data-tip for an a tag.
@param button: If true a button tag will be returned.
@param name: A name for the button
@param target: A target for the a tag
@param disabled: If true, css class 'disabled' will be added to the container div.
@param extra_css: A string of extra css classes.
@param model: If True the class 'model' is placed on the a tag.
@param viewname: The django viewname as defined in a urls.py file.
@param **kwargs: Remaining keyword args are supplied to django.core.urlresolvers.reverse
@param next: If specified ?next=[NEXT] will be appended to any generated link.
@return string
"""
html = '<div class="button-std'
if disabled:
html += ' disabled' # depends on [control=['if'], data=[]]
html += '">'
if button:
html += '<button type="submit" name="%s" value="%s" class="%s">' % (name, title, icon)
if icon:
html += '<span class="icon-%s icon-%s-%s"></span>' % (size, size, icon) # depends on [control=['if'], data=[]]
html += '<span>%s</span></button>' % title # depends on [control=['if'], data=[]]
else:
html += '<a class="%s' % icon
if fancyajax:
html += ' fancybox fancybox.ajax' # depends on [control=['if'], data=[]]
if extra_css:
html += ' %s' % extra_css # depends on [control=['if'], data=[]]
html += '"'
if modal:
html += ' role="button" data-toggle="modal" data-remoteinbody="false"'
if data_target:
html += ' data-target="#modal"' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if fancyajax:
html += ' data-spinner-append="1"' # depends on [control=['if'], data=[]]
if viewname:
html += ' href="%s' % reverse(viewname, args=viewargs, kwargs=kwargs) # depends on [control=['if'], data=[]]
elif href:
html += ' href="%s' % href # depends on [control=['if'], data=[]]
if viewname or href:
if next:
if next.startswith('/'):
html += '?next=%s' % next # depends on [control=['if'], data=[]]
else:
html += '?next=%s' % reverse(next) # depends on [control=['if'], data=[]]
if extra_href:
html += extra_href # depends on [control=['if'], data=[]]
html += '"' # depends on [control=['if'], data=[]]
if not title and text:
title = text # depends on [control=['if'], data=[]]
if title:
html += ' data-tip="%s"' % title # depends on [control=['if'], data=[]]
if target:
html += ' target="%s"' % target # depends on [control=['if'], data=[]]
html += '>'
if icon:
html += '<span class="icon-%s icon-%s-%s"></span>' % (size, size, icon) # depends on [control=['if'], data=[]]
if text:
html += '<span class="title">%s</span>' % text # depends on [control=['if'], data=[]]
html += '</a></div>'
return html |
def register_dde_task(self, *args, **kwargs):
"""Register a Dde task."""
kwargs["task_class"] = DdeTask
return self.register_task(*args, **kwargs) | def function[register_dde_task, parameter[self]]:
constant[Register a Dde task.]
call[name[kwargs]][constant[task_class]] assign[=] name[DdeTask]
return[call[name[self].register_task, parameter[<ast.Starred object at 0x7da1b2187400>]]] | keyword[def] identifier[register_dde_task] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= identifier[DdeTask]
keyword[return] identifier[self] . identifier[register_task] (* identifier[args] ,** identifier[kwargs] ) | def register_dde_task(self, *args, **kwargs):
"""Register a Dde task."""
kwargs['task_class'] = DdeTask
return self.register_task(*args, **kwargs) |
def add_action_callback(self, key_value, modifier_mask, a_dict=False):
"""Callback method for add action"""
if react_to_event(self.view, self.tree_view, event=(key_value, modifier_mask)) and self.active_entry_widget is None:
self.on_add(None, a_dict)
return True | def function[add_action_callback, parameter[self, key_value, modifier_mask, a_dict]]:
constant[Callback method for add action]
if <ast.BoolOp object at 0x7da1b1c7ffa0> begin[:]
call[name[self].on_add, parameter[constant[None], name[a_dict]]]
return[constant[True]] | keyword[def] identifier[add_action_callback] ( identifier[self] , identifier[key_value] , identifier[modifier_mask] , identifier[a_dict] = keyword[False] ):
literal[string]
keyword[if] identifier[react_to_event] ( identifier[self] . identifier[view] , identifier[self] . identifier[tree_view] , identifier[event] =( identifier[key_value] , identifier[modifier_mask] )) keyword[and] identifier[self] . identifier[active_entry_widget] keyword[is] keyword[None] :
identifier[self] . identifier[on_add] ( keyword[None] , identifier[a_dict] )
keyword[return] keyword[True] | def add_action_callback(self, key_value, modifier_mask, a_dict=False):
"""Callback method for add action"""
if react_to_event(self.view, self.tree_view, event=(key_value, modifier_mask)) and self.active_entry_widget is None:
self.on_add(None, a_dict)
return True # depends on [control=['if'], data=[]] |
def get_acls_recursive(self, path, depth, include_ephemerals):
"""A recursive generator wrapper for get_acls
:param path: path from which to start
:param depth: depth of the recursion (-1 no recursion, 0 means no limit)
:param include_ephemerals: get ACLs for ephemerals too
"""
yield path, self.get_acls(path)[0]
if depth == -1:
return
for tpath, _ in self.tree(path, depth, full_path=True):
try:
acls, stat = self.get_acls(tpath)
except NoNodeError:
continue
if not include_ephemerals and stat.ephemeralOwner != 0:
continue
yield tpath, acls | def function[get_acls_recursive, parameter[self, path, depth, include_ephemerals]]:
constant[A recursive generator wrapper for get_acls
:param path: path from which to start
:param depth: depth of the recursion (-1 no recursion, 0 means no limit)
:param include_ephemerals: get ACLs for ephemerals too
]
<ast.Yield object at 0x7da1b0780970>
if compare[name[depth] equal[==] <ast.UnaryOp object at 0x7da1b0782860>] begin[:]
return[None]
for taget[tuple[[<ast.Name object at 0x7da1b07802b0>, <ast.Name object at 0x7da1b0783d30>]]] in starred[call[name[self].tree, parameter[name[path], name[depth]]]] begin[:]
<ast.Try object at 0x7da1b0781ea0>
if <ast.BoolOp object at 0x7da20c990880> begin[:]
continue
<ast.Yield object at 0x7da20c990fa0> | keyword[def] identifier[get_acls_recursive] ( identifier[self] , identifier[path] , identifier[depth] , identifier[include_ephemerals] ):
literal[string]
keyword[yield] identifier[path] , identifier[self] . identifier[get_acls] ( identifier[path] )[ literal[int] ]
keyword[if] identifier[depth] ==- literal[int] :
keyword[return]
keyword[for] identifier[tpath] , identifier[_] keyword[in] identifier[self] . identifier[tree] ( identifier[path] , identifier[depth] , identifier[full_path] = keyword[True] ):
keyword[try] :
identifier[acls] , identifier[stat] = identifier[self] . identifier[get_acls] ( identifier[tpath] )
keyword[except] identifier[NoNodeError] :
keyword[continue]
keyword[if] keyword[not] identifier[include_ephemerals] keyword[and] identifier[stat] . identifier[ephemeralOwner] != literal[int] :
keyword[continue]
keyword[yield] identifier[tpath] , identifier[acls] | def get_acls_recursive(self, path, depth, include_ephemerals):
"""A recursive generator wrapper for get_acls
:param path: path from which to start
:param depth: depth of the recursion (-1 no recursion, 0 means no limit)
:param include_ephemerals: get ACLs for ephemerals too
"""
yield (path, self.get_acls(path)[0])
if depth == -1:
return # depends on [control=['if'], data=[]]
for (tpath, _) in self.tree(path, depth, full_path=True):
try:
(acls, stat) = self.get_acls(tpath) # depends on [control=['try'], data=[]]
except NoNodeError:
continue # depends on [control=['except'], data=[]]
if not include_ephemerals and stat.ephemeralOwner != 0:
continue # depends on [control=['if'], data=[]]
yield (tpath, acls) # depends on [control=['for'], data=[]] |
def set_mode(self, gpio, mode):
"""
Sets the gpio mode.
gpio:= 0-53.
mode:= INPUT, OUTPUT, ALT0, ALT1, ALT2, ALT3, ALT4, ALT5.
...
pi.set_mode( 4, apigpio.INPUT) # gpio 4 as input
pi.set_mode(17, apigpio.OUTPUT) # gpio 17 as output
pi.set_mode(24, apigpio.ALT2) # gpio 24 as ALT2
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_MODES, gpio, mode)
return _u2i(res) | def function[set_mode, parameter[self, gpio, mode]]:
constant[
Sets the gpio mode.
gpio:= 0-53.
mode:= INPUT, OUTPUT, ALT0, ALT1, ALT2, ALT3, ALT4, ALT5.
...
pi.set_mode( 4, apigpio.INPUT) # gpio 4 as input
pi.set_mode(17, apigpio.OUTPUT) # gpio 17 as output
pi.set_mode(24, apigpio.ALT2) # gpio 24 as ALT2
...
]
variable[res] assign[=] <ast.YieldFrom object at 0x7da1b24ae4a0>
return[call[name[_u2i], parameter[name[res]]]] | keyword[def] identifier[set_mode] ( identifier[self] , identifier[gpio] , identifier[mode] ):
literal[string]
identifier[res] = keyword[yield] keyword[from] identifier[self] . identifier[_pigpio_aio_command] ( identifier[_PI_CMD_MODES] , identifier[gpio] , identifier[mode] )
keyword[return] identifier[_u2i] ( identifier[res] ) | def set_mode(self, gpio, mode):
"""
Sets the gpio mode.
gpio:= 0-53.
mode:= INPUT, OUTPUT, ALT0, ALT1, ALT2, ALT3, ALT4, ALT5.
...
pi.set_mode( 4, apigpio.INPUT) # gpio 4 as input
pi.set_mode(17, apigpio.OUTPUT) # gpio 17 as output
pi.set_mode(24, apigpio.ALT2) # gpio 24 as ALT2
...
"""
res = (yield from self._pigpio_aio_command(_PI_CMD_MODES, gpio, mode))
return _u2i(res) |
def data(self, data):
""" :type: numppy.ndarray """
self._assert_shape(data, self._x_indexes, self._y_indexes)
data[data == -np.inf] = 0.0
data[data == np.inf] = 0.0
self._data = data
self._min_value = np.nanmin(self.data)
self._max_value = np.nanmax(self.data)
self._data_x_indexes = list(range(data.shape[0]))
self._data_y_indexes = list(range(data.shape[1]))
self._dirty = False | def function[data, parameter[self, data]]:
constant[ :type: numppy.ndarray ]
call[name[self]._assert_shape, parameter[name[data], name[self]._x_indexes, name[self]._y_indexes]]
call[name[data]][compare[name[data] equal[==] <ast.UnaryOp object at 0x7da20c7969e0>]] assign[=] constant[0.0]
call[name[data]][compare[name[data] equal[==] name[np].inf]] assign[=] constant[0.0]
name[self]._data assign[=] name[data]
name[self]._min_value assign[=] call[name[np].nanmin, parameter[name[self].data]]
name[self]._max_value assign[=] call[name[np].nanmax, parameter[name[self].data]]
name[self]._data_x_indexes assign[=] call[name[list], parameter[call[name[range], parameter[call[name[data].shape][constant[0]]]]]]
name[self]._data_y_indexes assign[=] call[name[list], parameter[call[name[range], parameter[call[name[data].shape][constant[1]]]]]]
name[self]._dirty assign[=] constant[False] | keyword[def] identifier[data] ( identifier[self] , identifier[data] ):
literal[string]
identifier[self] . identifier[_assert_shape] ( identifier[data] , identifier[self] . identifier[_x_indexes] , identifier[self] . identifier[_y_indexes] )
identifier[data] [ identifier[data] ==- identifier[np] . identifier[inf] ]= literal[int]
identifier[data] [ identifier[data] == identifier[np] . identifier[inf] ]= literal[int]
identifier[self] . identifier[_data] = identifier[data]
identifier[self] . identifier[_min_value] = identifier[np] . identifier[nanmin] ( identifier[self] . identifier[data] )
identifier[self] . identifier[_max_value] = identifier[np] . identifier[nanmax] ( identifier[self] . identifier[data] )
identifier[self] . identifier[_data_x_indexes] = identifier[list] ( identifier[range] ( identifier[data] . identifier[shape] [ literal[int] ]))
identifier[self] . identifier[_data_y_indexes] = identifier[list] ( identifier[range] ( identifier[data] . identifier[shape] [ literal[int] ]))
identifier[self] . identifier[_dirty] = keyword[False] | def data(self, data):
""" :type: numppy.ndarray """
self._assert_shape(data, self._x_indexes, self._y_indexes)
data[data == -np.inf] = 0.0
data[data == np.inf] = 0.0
self._data = data
self._min_value = np.nanmin(self.data)
self._max_value = np.nanmax(self.data)
self._data_x_indexes = list(range(data.shape[0]))
self._data_y_indexes = list(range(data.shape[1]))
self._dirty = False |
def read_property_to_result_element(obj, propertyIdentifier, propertyArrayIndex=None):
"""Read the specified property of the object, with the optional array index,
and cast the result into an Any object."""
if _debug: read_property_to_result_element._debug("read_property_to_result_element %s %r %r", obj, propertyIdentifier, propertyArrayIndex)
# save the result in the property value
read_result = ReadAccessResultElementChoice()
try:
if not obj:
raise ExecutionError(errorClass='object', errorCode='unknownObject')
read_result.propertyValue = read_property_to_any(obj, propertyIdentifier, propertyArrayIndex)
if _debug: read_property_to_result_element._debug(" - success")
except PropertyError as error:
if _debug: read_property_to_result_element._debug(" - error: %r", error)
read_result.propertyAccessError = ErrorType(errorClass='property', errorCode='unknownProperty')
except ExecutionError as error:
if _debug: read_property_to_result_element._debug(" - error: %r", error)
read_result.propertyAccessError = ErrorType(errorClass=error.errorClass, errorCode=error.errorCode)
# make an element for this value
read_access_result_element = ReadAccessResultElement(
propertyIdentifier=propertyIdentifier,
propertyArrayIndex=propertyArrayIndex,
readResult=read_result,
)
if _debug: read_property_to_result_element._debug(" - read_access_result_element: %r", read_access_result_element)
# fini
return read_access_result_element | def function[read_property_to_result_element, parameter[obj, propertyIdentifier, propertyArrayIndex]]:
constant[Read the specified property of the object, with the optional array index,
and cast the result into an Any object.]
if name[_debug] begin[:]
call[name[read_property_to_result_element]._debug, parameter[constant[read_property_to_result_element %s %r %r], name[obj], name[propertyIdentifier], name[propertyArrayIndex]]]
variable[read_result] assign[=] call[name[ReadAccessResultElementChoice], parameter[]]
<ast.Try object at 0x7da1b084e650>
variable[read_access_result_element] assign[=] call[name[ReadAccessResultElement], parameter[]]
if name[_debug] begin[:]
call[name[read_property_to_result_element]._debug, parameter[constant[ - read_access_result_element: %r], name[read_access_result_element]]]
return[name[read_access_result_element]] | keyword[def] identifier[read_property_to_result_element] ( identifier[obj] , identifier[propertyIdentifier] , identifier[propertyArrayIndex] = keyword[None] ):
literal[string]
keyword[if] identifier[_debug] : identifier[read_property_to_result_element] . identifier[_debug] ( literal[string] , identifier[obj] , identifier[propertyIdentifier] , identifier[propertyArrayIndex] )
identifier[read_result] = identifier[ReadAccessResultElementChoice] ()
keyword[try] :
keyword[if] keyword[not] identifier[obj] :
keyword[raise] identifier[ExecutionError] ( identifier[errorClass] = literal[string] , identifier[errorCode] = literal[string] )
identifier[read_result] . identifier[propertyValue] = identifier[read_property_to_any] ( identifier[obj] , identifier[propertyIdentifier] , identifier[propertyArrayIndex] )
keyword[if] identifier[_debug] : identifier[read_property_to_result_element] . identifier[_debug] ( literal[string] )
keyword[except] identifier[PropertyError] keyword[as] identifier[error] :
keyword[if] identifier[_debug] : identifier[read_property_to_result_element] . identifier[_debug] ( literal[string] , identifier[error] )
identifier[read_result] . identifier[propertyAccessError] = identifier[ErrorType] ( identifier[errorClass] = literal[string] , identifier[errorCode] = literal[string] )
keyword[except] identifier[ExecutionError] keyword[as] identifier[error] :
keyword[if] identifier[_debug] : identifier[read_property_to_result_element] . identifier[_debug] ( literal[string] , identifier[error] )
identifier[read_result] . identifier[propertyAccessError] = identifier[ErrorType] ( identifier[errorClass] = identifier[error] . identifier[errorClass] , identifier[errorCode] = identifier[error] . identifier[errorCode] )
identifier[read_access_result_element] = identifier[ReadAccessResultElement] (
identifier[propertyIdentifier] = identifier[propertyIdentifier] ,
identifier[propertyArrayIndex] = identifier[propertyArrayIndex] ,
identifier[readResult] = identifier[read_result] ,
)
keyword[if] identifier[_debug] : identifier[read_property_to_result_element] . identifier[_debug] ( literal[string] , identifier[read_access_result_element] )
keyword[return] identifier[read_access_result_element] | def read_property_to_result_element(obj, propertyIdentifier, propertyArrayIndex=None):
"""Read the specified property of the object, with the optional array index,
and cast the result into an Any object."""
if _debug:
read_property_to_result_element._debug('read_property_to_result_element %s %r %r', obj, propertyIdentifier, propertyArrayIndex) # depends on [control=['if'], data=[]]
# save the result in the property value
read_result = ReadAccessResultElementChoice()
try:
if not obj:
raise ExecutionError(errorClass='object', errorCode='unknownObject') # depends on [control=['if'], data=[]]
read_result.propertyValue = read_property_to_any(obj, propertyIdentifier, propertyArrayIndex)
if _debug:
read_property_to_result_element._debug(' - success') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except PropertyError as error:
if _debug:
read_property_to_result_element._debug(' - error: %r', error) # depends on [control=['if'], data=[]]
read_result.propertyAccessError = ErrorType(errorClass='property', errorCode='unknownProperty') # depends on [control=['except'], data=['error']]
except ExecutionError as error:
if _debug:
read_property_to_result_element._debug(' - error: %r', error) # depends on [control=['if'], data=[]]
read_result.propertyAccessError = ErrorType(errorClass=error.errorClass, errorCode=error.errorCode) # depends on [control=['except'], data=['error']]
# make an element for this value
read_access_result_element = ReadAccessResultElement(propertyIdentifier=propertyIdentifier, propertyArrayIndex=propertyArrayIndex, readResult=read_result)
if _debug:
read_property_to_result_element._debug(' - read_access_result_element: %r', read_access_result_element) # depends on [control=['if'], data=[]]
# fini
return read_access_result_element |
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command)) | def function[_list, parameter[self, args]]:
constant[
Process local commands
]
call[name[args].pop, parameter[constant[0]]]
variable[command] assign[=] call[name[args]][constant[0]]
if compare[name[command] equal[==] constant[packages]] begin[:]
call[name[self]._list_packages, parameter[name[args]]] | keyword[def] identifier[_list] ( identifier[self] , identifier[args] ):
literal[string]
identifier[args] . identifier[pop] ( literal[int] )
identifier[command] = identifier[args] [ literal[int] ]
keyword[if] identifier[command] == literal[string] :
identifier[self] . identifier[_list_packages] ( identifier[args] )
keyword[elif] identifier[command] == literal[string] :
identifier[self] . identifier[_list_files] ( identifier[args] )
keyword[elif] identifier[command] == literal[string] :
identifier[self] . identifier[_repo_list] ( identifier[args] )
keyword[else] :
keyword[raise] identifier[SPMInvocationError] ( literal[string] . identifier[format] ( identifier[command] )) | def _list(self, args):
"""
Process local commands
"""
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args) # depends on [control=['if'], data=[]]
elif command == 'files':
self._list_files(args) # depends on [control=['if'], data=[]]
elif command == 'repos':
self._repo_list(args) # depends on [control=['if'], data=[]]
else:
raise SPMInvocationError("Invalid list command '{0}'".format(command)) |
def detect_intent_with_model_selection(project_id, session_id, audio_file_path,
language_code):
"""Returns the result of detect intent with model selection on an audio file
as input
Using the same `session_id` between requests allows continuation
of the conversaion."""
import dialogflow_v2beta1 as dialogflow
session_client = dialogflow.SessionsClient()
# Note: hard coding audio_encoding and sample_rate_hertz for simplicity.
audio_encoding = dialogflow.enums.AudioEncoding.AUDIO_ENCODING_LINEAR_16
sample_rate_hertz = 16000
session_path = session_client.session_path(project_id, session_id)
print('Session path: {}\n'.format(session_path))
with open(audio_file_path, 'rb') as audio_file:
input_audio = audio_file.read()
# Which Speech model to select for the given request.
# Possible models: video, phone_call, command_and_search, default
model = 'phone_call'
audio_config = dialogflow.types.InputAudioConfig(
audio_encoding=audio_encoding, language_code=language_code,
sample_rate_hertz=sample_rate_hertz,
model=model)
query_input = dialogflow.types.QueryInput(audio_config=audio_config)
response = session_client.detect_intent(
session=session_path, query_input=query_input,
input_audio=input_audio)
print('=' * 20)
print('Query text: {}'.format(response.query_result.query_text))
print('Detected intent: {} (confidence: {})\n'.format(
response.query_result.intent.display_name,
response.query_result.intent_detection_confidence))
print('Fulfillment text: {}\n'.format(
response.query_result.fulfillment_text)) | def function[detect_intent_with_model_selection, parameter[project_id, session_id, audio_file_path, language_code]]:
constant[Returns the result of detect intent with model selection on an audio file
as input
Using the same `session_id` between requests allows continuation
of the conversaion.]
import module[dialogflow_v2beta1] as alias[dialogflow]
variable[session_client] assign[=] call[name[dialogflow].SessionsClient, parameter[]]
variable[audio_encoding] assign[=] name[dialogflow].enums.AudioEncoding.AUDIO_ENCODING_LINEAR_16
variable[sample_rate_hertz] assign[=] constant[16000]
variable[session_path] assign[=] call[name[session_client].session_path, parameter[name[project_id], name[session_id]]]
call[name[print], parameter[call[constant[Session path: {}
].format, parameter[name[session_path]]]]]
with call[name[open], parameter[name[audio_file_path], constant[rb]]] begin[:]
variable[input_audio] assign[=] call[name[audio_file].read, parameter[]]
variable[model] assign[=] constant[phone_call]
variable[audio_config] assign[=] call[name[dialogflow].types.InputAudioConfig, parameter[]]
variable[query_input] assign[=] call[name[dialogflow].types.QueryInput, parameter[]]
variable[response] assign[=] call[name[session_client].detect_intent, parameter[]]
call[name[print], parameter[binary_operation[constant[=] * constant[20]]]]
call[name[print], parameter[call[constant[Query text: {}].format, parameter[name[response].query_result.query_text]]]]
call[name[print], parameter[call[constant[Detected intent: {} (confidence: {})
].format, parameter[name[response].query_result.intent.display_name, name[response].query_result.intent_detection_confidence]]]]
call[name[print], parameter[call[constant[Fulfillment text: {}
].format, parameter[name[response].query_result.fulfillment_text]]]] | keyword[def] identifier[detect_intent_with_model_selection] ( identifier[project_id] , identifier[session_id] , identifier[audio_file_path] ,
identifier[language_code] ):
literal[string]
keyword[import] identifier[dialogflow_v2beta1] keyword[as] identifier[dialogflow]
identifier[session_client] = identifier[dialogflow] . identifier[SessionsClient] ()
identifier[audio_encoding] = identifier[dialogflow] . identifier[enums] . identifier[AudioEncoding] . identifier[AUDIO_ENCODING_LINEAR_16]
identifier[sample_rate_hertz] = literal[int]
identifier[session_path] = identifier[session_client] . identifier[session_path] ( identifier[project_id] , identifier[session_id] )
identifier[print] ( literal[string] . identifier[format] ( identifier[session_path] ))
keyword[with] identifier[open] ( identifier[audio_file_path] , literal[string] ) keyword[as] identifier[audio_file] :
identifier[input_audio] = identifier[audio_file] . identifier[read] ()
identifier[model] = literal[string]
identifier[audio_config] = identifier[dialogflow] . identifier[types] . identifier[InputAudioConfig] (
identifier[audio_encoding] = identifier[audio_encoding] , identifier[language_code] = identifier[language_code] ,
identifier[sample_rate_hertz] = identifier[sample_rate_hertz] ,
identifier[model] = identifier[model] )
identifier[query_input] = identifier[dialogflow] . identifier[types] . identifier[QueryInput] ( identifier[audio_config] = identifier[audio_config] )
identifier[response] = identifier[session_client] . identifier[detect_intent] (
identifier[session] = identifier[session_path] , identifier[query_input] = identifier[query_input] ,
identifier[input_audio] = identifier[input_audio] )
identifier[print] ( literal[string] * literal[int] )
identifier[print] ( literal[string] . identifier[format] ( identifier[response] . identifier[query_result] . identifier[query_text] ))
identifier[print] ( literal[string] . identifier[format] (
identifier[response] . identifier[query_result] . identifier[intent] . identifier[display_name] ,
identifier[response] . identifier[query_result] . identifier[intent_detection_confidence] ))
identifier[print] ( literal[string] . identifier[format] (
identifier[response] . identifier[query_result] . identifier[fulfillment_text] )) | def detect_intent_with_model_selection(project_id, session_id, audio_file_path, language_code):
"""Returns the result of detect intent with model selection on an audio file
as input
Using the same `session_id` between requests allows continuation
of the conversaion."""
import dialogflow_v2beta1 as dialogflow
session_client = dialogflow.SessionsClient()
# Note: hard coding audio_encoding and sample_rate_hertz for simplicity.
audio_encoding = dialogflow.enums.AudioEncoding.AUDIO_ENCODING_LINEAR_16
sample_rate_hertz = 16000
session_path = session_client.session_path(project_id, session_id)
print('Session path: {}\n'.format(session_path))
with open(audio_file_path, 'rb') as audio_file:
input_audio = audio_file.read() # depends on [control=['with'], data=['audio_file']]
# Which Speech model to select for the given request.
# Possible models: video, phone_call, command_and_search, default
model = 'phone_call'
audio_config = dialogflow.types.InputAudioConfig(audio_encoding=audio_encoding, language_code=language_code, sample_rate_hertz=sample_rate_hertz, model=model)
query_input = dialogflow.types.QueryInput(audio_config=audio_config)
response = session_client.detect_intent(session=session_path, query_input=query_input, input_audio=input_audio)
print('=' * 20)
print('Query text: {}'.format(response.query_result.query_text))
print('Detected intent: {} (confidence: {})\n'.format(response.query_result.intent.display_name, response.query_result.intent_detection_confidence))
print('Fulfillment text: {}\n'.format(response.query_result.fulfillment_text)) |
def remove(self, child):
"""
Remove the specified child element or attribute.
@param child: A child to remove.
@type child: L{Element}|L{Attribute}
@return: The detached I{child} when I{child} is an element, else None.
@rtype: L{Element}|None
"""
if isinstance(child, Element):
return child.detach()
if isinstance(child, Attribute):
self.attributes.remove(child)
return None | def function[remove, parameter[self, child]]:
constant[
Remove the specified child element or attribute.
@param child: A child to remove.
@type child: L{Element}|L{Attribute}
@return: The detached I{child} when I{child} is an element, else None.
@rtype: L{Element}|None
]
if call[name[isinstance], parameter[name[child], name[Element]]] begin[:]
return[call[name[child].detach, parameter[]]]
if call[name[isinstance], parameter[name[child], name[Attribute]]] begin[:]
call[name[self].attributes.remove, parameter[name[child]]]
return[constant[None]] | keyword[def] identifier[remove] ( identifier[self] , identifier[child] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[child] , identifier[Element] ):
keyword[return] identifier[child] . identifier[detach] ()
keyword[if] identifier[isinstance] ( identifier[child] , identifier[Attribute] ):
identifier[self] . identifier[attributes] . identifier[remove] ( identifier[child] )
keyword[return] keyword[None] | def remove(self, child):
"""
Remove the specified child element or attribute.
@param child: A child to remove.
@type child: L{Element}|L{Attribute}
@return: The detached I{child} when I{child} is an element, else None.
@rtype: L{Element}|None
"""
if isinstance(child, Element):
return child.detach() # depends on [control=['if'], data=[]]
if isinstance(child, Attribute):
self.attributes.remove(child) # depends on [control=['if'], data=[]]
return None |
def clear_grade_system(self):
"""Clears the grading system.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if (self.get_grade_system_metadata().is_read_only() or
self.get_grade_system_metadata().is_required()):
raise errors.NoAccess()
self._my_map['gradeSystemId'] = self._grade_system_default | def function[clear_grade_system, parameter[self]]:
constant[Clears the grading system.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
]
if <ast.BoolOp object at 0x7da20c7c81c0> begin[:]
<ast.Raise object at 0x7da2046203a0>
call[name[self]._my_map][constant[gradeSystemId]] assign[=] name[self]._grade_system_default | keyword[def] identifier[clear_grade_system] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[get_grade_system_metadata] (). identifier[is_read_only] () keyword[or]
identifier[self] . identifier[get_grade_system_metadata] (). identifier[is_required] ()):
keyword[raise] identifier[errors] . identifier[NoAccess] ()
identifier[self] . identifier[_my_map] [ literal[string] ]= identifier[self] . identifier[_grade_system_default] | def clear_grade_system(self):
"""Clears the grading system.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if self.get_grade_system_metadata().is_read_only() or self.get_grade_system_metadata().is_required():
raise errors.NoAccess() # depends on [control=['if'], data=[]]
self._my_map['gradeSystemId'] = self._grade_system_default |
def get_resources_for_api_version(self, prefix, group, version, preferred):
""" returns a dictionary of resources associated with provided (prefix, group, version)"""
resources = defaultdict(list)
subresources = {}
path = '/'.join(filter(None, [prefix, group, version]))
resources_response = load_json(self.client.request('GET', path))['resources']
resources_raw = list(filter(lambda resource: '/' not in resource['name'], resources_response))
subresources_raw = list(filter(lambda resource: '/' in resource['name'], resources_response))
for subresource in subresources_raw:
resource, name = subresource['name'].split('/')
if not subresources.get(resource):
subresources[resource] = {}
subresources[resource][name] = subresource
for resource in resources_raw:
# Prevent duplicate keys
for key in ('prefix', 'group', 'api_version', 'client', 'preferred'):
resource.pop(key, None)
resourceobj = Resource(
prefix=prefix,
group=group,
api_version=version,
client=self.client,
preferred=preferred,
subresources=subresources.get(resource['name']),
**resource
)
resources[resource['kind']].append(resourceobj)
resource_list = ResourceList(self.client, group=group, api_version=version, base_kind=resource['kind'])
resources[resource_list.kind].append(resource_list)
return resources | def function[get_resources_for_api_version, parameter[self, prefix, group, version, preferred]]:
constant[ returns a dictionary of resources associated with provided (prefix, group, version)]
variable[resources] assign[=] call[name[defaultdict], parameter[name[list]]]
variable[subresources] assign[=] dictionary[[], []]
variable[path] assign[=] call[constant[/].join, parameter[call[name[filter], parameter[constant[None], list[[<ast.Name object at 0x7da18dc99b40>, <ast.Name object at 0x7da18dc9a740>, <ast.Name object at 0x7da18dc99ae0>]]]]]]
variable[resources_response] assign[=] call[call[name[load_json], parameter[call[name[self].client.request, parameter[constant[GET], name[path]]]]]][constant[resources]]
variable[resources_raw] assign[=] call[name[list], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da2047e8340>, name[resources_response]]]]]
variable[subresources_raw] assign[=] call[name[list], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da2047ea560>, name[resources_response]]]]]
for taget[name[subresource]] in starred[name[subresources_raw]] begin[:]
<ast.Tuple object at 0x7da2047e9b40> assign[=] call[call[name[subresource]][constant[name]].split, parameter[constant[/]]]
if <ast.UnaryOp object at 0x7da2047ea230> begin[:]
call[name[subresources]][name[resource]] assign[=] dictionary[[], []]
call[call[name[subresources]][name[resource]]][name[name]] assign[=] name[subresource]
for taget[name[resource]] in starred[name[resources_raw]] begin[:]
for taget[name[key]] in starred[tuple[[<ast.Constant object at 0x7da2047ea170>, <ast.Constant object at 0x7da2047e9c60>, <ast.Constant object at 0x7da2047ea140>, <ast.Constant object at 0x7da2047e9180>, <ast.Constant object at 0x7da2047e9e10>]]] begin[:]
call[name[resource].pop, parameter[name[key], constant[None]]]
variable[resourceobj] assign[=] call[name[Resource], parameter[]]
call[call[name[resources]][call[name[resource]][constant[kind]]].append, parameter[name[resourceobj]]]
variable[resource_list] assign[=] call[name[ResourceList], parameter[name[self].client]]
call[call[name[resources]][name[resource_list].kind].append, parameter[name[resource_list]]]
return[name[resources]] | keyword[def] identifier[get_resources_for_api_version] ( identifier[self] , identifier[prefix] , identifier[group] , identifier[version] , identifier[preferred] ):
literal[string]
identifier[resources] = identifier[defaultdict] ( identifier[list] )
identifier[subresources] ={}
identifier[path] = literal[string] . identifier[join] ( identifier[filter] ( keyword[None] ,[ identifier[prefix] , identifier[group] , identifier[version] ]))
identifier[resources_response] = identifier[load_json] ( identifier[self] . identifier[client] . identifier[request] ( literal[string] , identifier[path] ))[ literal[string] ]
identifier[resources_raw] = identifier[list] ( identifier[filter] ( keyword[lambda] identifier[resource] : literal[string] keyword[not] keyword[in] identifier[resource] [ literal[string] ], identifier[resources_response] ))
identifier[subresources_raw] = identifier[list] ( identifier[filter] ( keyword[lambda] identifier[resource] : literal[string] keyword[in] identifier[resource] [ literal[string] ], identifier[resources_response] ))
keyword[for] identifier[subresource] keyword[in] identifier[subresources_raw] :
identifier[resource] , identifier[name] = identifier[subresource] [ literal[string] ]. identifier[split] ( literal[string] )
keyword[if] keyword[not] identifier[subresources] . identifier[get] ( identifier[resource] ):
identifier[subresources] [ identifier[resource] ]={}
identifier[subresources] [ identifier[resource] ][ identifier[name] ]= identifier[subresource]
keyword[for] identifier[resource] keyword[in] identifier[resources_raw] :
keyword[for] identifier[key] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ):
identifier[resource] . identifier[pop] ( identifier[key] , keyword[None] )
identifier[resourceobj] = identifier[Resource] (
identifier[prefix] = identifier[prefix] ,
identifier[group] = identifier[group] ,
identifier[api_version] = identifier[version] ,
identifier[client] = identifier[self] . identifier[client] ,
identifier[preferred] = identifier[preferred] ,
identifier[subresources] = identifier[subresources] . identifier[get] ( identifier[resource] [ literal[string] ]),
** identifier[resource]
)
identifier[resources] [ identifier[resource] [ literal[string] ]]. identifier[append] ( identifier[resourceobj] )
identifier[resource_list] = identifier[ResourceList] ( identifier[self] . identifier[client] , identifier[group] = identifier[group] , identifier[api_version] = identifier[version] , identifier[base_kind] = identifier[resource] [ literal[string] ])
identifier[resources] [ identifier[resource_list] . identifier[kind] ]. identifier[append] ( identifier[resource_list] )
keyword[return] identifier[resources] | def get_resources_for_api_version(self, prefix, group, version, preferred):
""" returns a dictionary of resources associated with provided (prefix, group, version)"""
resources = defaultdict(list)
subresources = {}
path = '/'.join(filter(None, [prefix, group, version]))
resources_response = load_json(self.client.request('GET', path))['resources']
resources_raw = list(filter(lambda resource: '/' not in resource['name'], resources_response))
subresources_raw = list(filter(lambda resource: '/' in resource['name'], resources_response))
for subresource in subresources_raw:
(resource, name) = subresource['name'].split('/')
if not subresources.get(resource):
subresources[resource] = {} # depends on [control=['if'], data=[]]
subresources[resource][name] = subresource # depends on [control=['for'], data=['subresource']]
for resource in resources_raw:
# Prevent duplicate keys
for key in ('prefix', 'group', 'api_version', 'client', 'preferred'):
resource.pop(key, None) # depends on [control=['for'], data=['key']]
resourceobj = Resource(prefix=prefix, group=group, api_version=version, client=self.client, preferred=preferred, subresources=subresources.get(resource['name']), **resource)
resources[resource['kind']].append(resourceobj)
resource_list = ResourceList(self.client, group=group, api_version=version, base_kind=resource['kind'])
resources[resource_list.kind].append(resource_list) # depends on [control=['for'], data=['resource']]
return resources |
def do_forceescape(value):
"""Enforce HTML escaping. This will probably double escape variables."""
if hasattr(value, '__html__'):
value = value.__html__()
return escape(text_type(value)) | def function[do_forceescape, parameter[value]]:
constant[Enforce HTML escaping. This will probably double escape variables.]
if call[name[hasattr], parameter[name[value], constant[__html__]]] begin[:]
variable[value] assign[=] call[name[value].__html__, parameter[]]
return[call[name[escape], parameter[call[name[text_type], parameter[name[value]]]]]] | keyword[def] identifier[do_forceescape] ( identifier[value] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[value] , literal[string] ):
identifier[value] = identifier[value] . identifier[__html__] ()
keyword[return] identifier[escape] ( identifier[text_type] ( identifier[value] )) | def do_forceescape(value):
"""Enforce HTML escaping. This will probably double escape variables."""
if hasattr(value, '__html__'):
value = value.__html__() # depends on [control=['if'], data=[]]
return escape(text_type(value)) |
def loadFile(self, fileName):
"""
Display the file associated with the appletID.
"""
# Assign QFile object with the current name.
self.file = QtCore.QFile(fileName)
if self.file.exists():
self.qteText.append(open(fileName).read())
else:
msg = "File <b>{}</b> does not exist".format(self.qteAppletID())
self.qteLogger.info(msg) | def function[loadFile, parameter[self, fileName]]:
constant[
Display the file associated with the appletID.
]
name[self].file assign[=] call[name[QtCore].QFile, parameter[name[fileName]]]
if call[name[self].file.exists, parameter[]] begin[:]
call[name[self].qteText.append, parameter[call[call[name[open], parameter[name[fileName]]].read, parameter[]]]] | keyword[def] identifier[loadFile] ( identifier[self] , identifier[fileName] ):
literal[string]
identifier[self] . identifier[file] = identifier[QtCore] . identifier[QFile] ( identifier[fileName] )
keyword[if] identifier[self] . identifier[file] . identifier[exists] ():
identifier[self] . identifier[qteText] . identifier[append] ( identifier[open] ( identifier[fileName] ). identifier[read] ())
keyword[else] :
identifier[msg] = literal[string] . identifier[format] ( identifier[self] . identifier[qteAppletID] ())
identifier[self] . identifier[qteLogger] . identifier[info] ( identifier[msg] ) | def loadFile(self, fileName):
"""
Display the file associated with the appletID.
"""
# Assign QFile object with the current name.
self.file = QtCore.QFile(fileName)
if self.file.exists():
self.qteText.append(open(fileName).read()) # depends on [control=['if'], data=[]]
else:
msg = 'File <b>{}</b> does not exist'.format(self.qteAppletID())
self.qteLogger.info(msg) |
def get_next_occurrence(self) -> date:
""" Returns the next occurrence date for transaction """
result = get_next_occurrence(self.transaction)
assert isinstance(result, date)
return result | def function[get_next_occurrence, parameter[self]]:
constant[ Returns the next occurrence date for transaction ]
variable[result] assign[=] call[name[get_next_occurrence], parameter[name[self].transaction]]
assert[call[name[isinstance], parameter[name[result], name[date]]]]
return[name[result]] | keyword[def] identifier[get_next_occurrence] ( identifier[self] )-> identifier[date] :
literal[string]
identifier[result] = identifier[get_next_occurrence] ( identifier[self] . identifier[transaction] )
keyword[assert] identifier[isinstance] ( identifier[result] , identifier[date] )
keyword[return] identifier[result] | def get_next_occurrence(self) -> date:
""" Returns the next occurrence date for transaction """
result = get_next_occurrence(self.transaction)
assert isinstance(result, date)
return result |
def _get_specification(self, specification):
"""
Read the specification provided. It can either be a url or a file
location.
"""
result = six.moves.urllib.parse.urlparse(specification)
# If the specification has an http or an https scheme we can
# retrieve it via an HTTP get request, else we try to open it
# as a file.
if result.scheme in ['http', 'https']:
response = requests.get(specification)
spec_json = response.json()
else:
with open(specification, 'r') as spec_file:
spec_json = json.load(spec_file)
return spec_json | def function[_get_specification, parameter[self, specification]]:
constant[
Read the specification provided. It can either be a url or a file
location.
]
variable[result] assign[=] call[name[six].moves.urllib.parse.urlparse, parameter[name[specification]]]
if compare[name[result].scheme in list[[<ast.Constant object at 0x7da20c6c6ec0>, <ast.Constant object at 0x7da20c6c58d0>]]] begin[:]
variable[response] assign[=] call[name[requests].get, parameter[name[specification]]]
variable[spec_json] assign[=] call[name[response].json, parameter[]]
return[name[spec_json]] | keyword[def] identifier[_get_specification] ( identifier[self] , identifier[specification] ):
literal[string]
identifier[result] = identifier[six] . identifier[moves] . identifier[urllib] . identifier[parse] . identifier[urlparse] ( identifier[specification] )
keyword[if] identifier[result] . identifier[scheme] keyword[in] [ literal[string] , literal[string] ]:
identifier[response] = identifier[requests] . identifier[get] ( identifier[specification] )
identifier[spec_json] = identifier[response] . identifier[json] ()
keyword[else] :
keyword[with] identifier[open] ( identifier[specification] , literal[string] ) keyword[as] identifier[spec_file] :
identifier[spec_json] = identifier[json] . identifier[load] ( identifier[spec_file] )
keyword[return] identifier[spec_json] | def _get_specification(self, specification):
"""
Read the specification provided. It can either be a url or a file
location.
"""
result = six.moves.urllib.parse.urlparse(specification)
# If the specification has an http or an https scheme we can
# retrieve it via an HTTP get request, else we try to open it
# as a file.
if result.scheme in ['http', 'https']:
response = requests.get(specification)
spec_json = response.json() # depends on [control=['if'], data=[]]
else:
with open(specification, 'r') as spec_file:
spec_json = json.load(spec_file) # depends on [control=['with'], data=['spec_file']]
return spec_json |
def extract_yang_model(self, content):
"""
Extracts one or more YANG models from an RFC or draft text string in
which the models are specified. The function skips over page
formatting (Page Headers and Footers) and performs basic YANG module
syntax checking. In strict mode, the function also enforces the
<CODE BEGINS> / <CODE ENDS> tags - a model is not extracted unless
the tags are present.
:return: None
"""
model = []
output_file = None
in_model = False
example_match = False
i = 0
level = 0
quotes = 0
while i < len(content):
line = content[i]
# Try to match '<CODE ENDS>'
if self.CODE_ENDS_TAG.match(line):
if in_model is False:
self.warning("Line %d: misplaced <CODE ENDS>" % i)
in_model = False
if "\"" in line:
if line.count("\"") % 2 == 0:
quotes = 0
else:
if quotes == 1:
quotes = 0
else:
quotes = 1
# Try to match '(sub)module <module_name> {'
match = self.MODULE_STATEMENT.match(line)
if match:
# We're already parsing a module
if quotes == 0:
if level > 0:
self.error("Line %d - 'module' statement within another module" % i)
return
# Check if we should enforce <CODE BEGINS> / <CODE ENDS>
# if we do enforce, we ignore models not enclosed in <CODE BEGINS> / <CODE ENDS>
if match.groups()[1] or match.groups()[4]:
self.warning('Line %d - Module name should not be enclosed in quotes' % i)
# do the module name checking, etc.
example_match = self.EXAMPLE_TAG.match(match.groups()[2])
if in_model is True:
if example_match:
self.error("Line %d - YANG module '%s' with <CODE BEGINS> and starting with 'example-'" %
(i, match.groups()[2]))
else:
if not example_match:
self.error("Line %d - YANG module '%s' with no <CODE BEGINS> and not starting with 'example-'" %
(i, match.groups()[2]))
# now decide if we're allowed to set the level
# (i.e. signal that we're in a module) to 1 and if
# we're allowed to output the module at all with the
# strict examples flag
# if self.strict is True:
# if in_model is True:
# level = 1
# else:
# level = 1
# always set the level to 1; we decide whether or not
# to output at the end
if quotes == 0:
level = 1
if not output_file and level == 1 and quotes == 0:
print("\nExtracting '%s'" % match.groups()[2])
output_file = '%s.yang' % match.groups()[2].strip('"\'')
if self.debug_level > 0:
print(' Getting YANG file name from module name: %s' % output_file)
if level > 0:
self.debug_print_line(i, level, content[i])
# Try to match the Footer ('[Page <page_num>]')
# If match found, skip over page headers and footers
if self.PAGE_TAG.match(line):
self.strip_empty_lines_backward(model, 3)
self.debug_print_strip_msg(i, content[i])
i += 1 # Strip the
# Strip empty lines between the Footer and the next page Header
i = self.strip_empty_lines_forward(content, i)
if i < len(content):
self.debug_print_strip_msg(i, content[i])
i += 1 # Strip the next page Header
else:
self.error("<End of File> - EOF encountered while parsing the model")
return
# Strip empty lines between the page Header and real content on the page
i = self.strip_empty_lines_forward(content, i) - 1
if i >= len(content):
self.error("<End of File> - EOF encountered while parsing the model")
return
else:
model.append([line, i + 1])
counter = Counter(line)
if quotes == 0:
if "\"" in line and "}" in line:
if line.index("}") > line.rindex("\"") or line.index("}") < line.index("\""):
level += (counter['{'] - counter['}'])
else:
level += (counter['{'] - counter['}'])
if level == 1:
if self.strict:
if self.strict_examples:
if example_match and not in_model:
self.write_model_to_file(model, output_file)
elif in_model:
self.write_model_to_file(model, output_file)
else:
self.write_model_to_file(model, output_file)
self.max_line_len = 0
model = []
output_file = None
level = 0
# Try to match '<CODE BEGINS>'
match = self.CODE_BEGINS_TAG.match(line)
if match:
# Found the beginning of the YANG module code section; make sure we're not parsing a model already
if level > 0:
self.error("Line %d - <CODE BEGINS> within a model" % i)
return
if in_model is True:
self.error("Line %d - Misplaced <CODE BEGINS> or missing <CODE ENDS>" % i)
in_model = True
mg = match.groups()
# Get the YANG module's file name
if mg[2]:
print("\nExtracting '%s'" % match.groups()[2])
output_file = mg[2].strip()
else:
if mg[0] and mg[1] is None:
self.error('Line %d - Missing file name in <CODE BEGINS>' % i)
else:
self.error("Line %d - YANG file not specified in <CODE BEGINS>" % i)
i += 1
if level > 0:
self.error("<End of File> - EOF encountered while parsing the model")
return
if in_model is True:
self.error("Line %d - Missing <CODE ENDS>" % i) | def function[extract_yang_model, parameter[self, content]]:
constant[
Extracts one or more YANG models from an RFC or draft text string in
which the models are specified. The function skips over page
formatting (Page Headers and Footers) and performs basic YANG module
syntax checking. In strict mode, the function also enforces the
<CODE BEGINS> / <CODE ENDS> tags - a model is not extracted unless
the tags are present.
:return: None
]
variable[model] assign[=] list[[]]
variable[output_file] assign[=] constant[None]
variable[in_model] assign[=] constant[False]
variable[example_match] assign[=] constant[False]
variable[i] assign[=] constant[0]
variable[level] assign[=] constant[0]
variable[quotes] assign[=] constant[0]
while compare[name[i] less[<] call[name[len], parameter[name[content]]]] begin[:]
variable[line] assign[=] call[name[content]][name[i]]
if call[name[self].CODE_ENDS_TAG.match, parameter[name[line]]] begin[:]
if compare[name[in_model] is constant[False]] begin[:]
call[name[self].warning, parameter[binary_operation[constant[Line %d: misplaced <CODE ENDS>] <ast.Mod object at 0x7da2590d6920> name[i]]]]
variable[in_model] assign[=] constant[False]
if compare[constant["] in name[line]] begin[:]
if compare[binary_operation[call[name[line].count, parameter[constant["]]] <ast.Mod object at 0x7da2590d6920> constant[2]] equal[==] constant[0]] begin[:]
variable[quotes] assign[=] constant[0]
variable[match] assign[=] call[name[self].MODULE_STATEMENT.match, parameter[name[line]]]
if name[match] begin[:]
if compare[name[quotes] equal[==] constant[0]] begin[:]
if compare[name[level] greater[>] constant[0]] begin[:]
call[name[self].error, parameter[binary_operation[constant[Line %d - 'module' statement within another module] <ast.Mod object at 0x7da2590d6920> name[i]]]]
return[None]
if <ast.BoolOp object at 0x7da1b26afeb0> begin[:]
call[name[self].warning, parameter[binary_operation[constant[Line %d - Module name should not be enclosed in quotes] <ast.Mod object at 0x7da2590d6920> name[i]]]]
variable[example_match] assign[=] call[name[self].EXAMPLE_TAG.match, parameter[call[call[name[match].groups, parameter[]]][constant[2]]]]
if compare[name[in_model] is constant[True]] begin[:]
if name[example_match] begin[:]
call[name[self].error, parameter[binary_operation[constant[Line %d - YANG module '%s' with <CODE BEGINS> and starting with 'example-'] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26af670>, <ast.Subscript object at 0x7da1b26ac640>]]]]]
if compare[name[quotes] equal[==] constant[0]] begin[:]
variable[level] assign[=] constant[1]
if <ast.BoolOp object at 0x7da1b0fd1450> begin[:]
call[name[print], parameter[binary_operation[constant[
Extracting '%s'] <ast.Mod object at 0x7da2590d6920> call[call[name[match].groups, parameter[]]][constant[2]]]]]
variable[output_file] assign[=] binary_operation[constant[%s.yang] <ast.Mod object at 0x7da2590d6920> call[call[call[name[match].groups, parameter[]]][constant[2]].strip, parameter[constant["']]]]
if compare[name[self].debug_level greater[>] constant[0]] begin[:]
call[name[print], parameter[binary_operation[constant[ Getting YANG file name from module name: %s] <ast.Mod object at 0x7da2590d6920> name[output_file]]]]
if compare[name[level] greater[>] constant[0]] begin[:]
call[name[self].debug_print_line, parameter[name[i], name[level], call[name[content]][name[i]]]]
if call[name[self].PAGE_TAG.match, parameter[name[line]]] begin[:]
call[name[self].strip_empty_lines_backward, parameter[name[model], constant[3]]]
call[name[self].debug_print_strip_msg, parameter[name[i], call[name[content]][name[i]]]]
<ast.AugAssign object at 0x7da1b0fd06d0>
variable[i] assign[=] call[name[self].strip_empty_lines_forward, parameter[name[content], name[i]]]
if compare[name[i] less[<] call[name[len], parameter[name[content]]]] begin[:]
call[name[self].debug_print_strip_msg, parameter[name[i], call[name[content]][name[i]]]]
<ast.AugAssign object at 0x7da1b0fd1540>
variable[i] assign[=] binary_operation[call[name[self].strip_empty_lines_forward, parameter[name[content], name[i]]] - constant[1]]
if compare[name[i] greater_or_equal[>=] call[name[len], parameter[name[content]]]] begin[:]
call[name[self].error, parameter[constant[<End of File> - EOF encountered while parsing the model]]]
return[None]
variable[match] assign[=] call[name[self].CODE_BEGINS_TAG.match, parameter[name[line]]]
if name[match] begin[:]
if compare[name[level] greater[>] constant[0]] begin[:]
call[name[self].error, parameter[binary_operation[constant[Line %d - <CODE BEGINS> within a model] <ast.Mod object at 0x7da2590d6920> name[i]]]]
return[None]
if compare[name[in_model] is constant[True]] begin[:]
call[name[self].error, parameter[binary_operation[constant[Line %d - Misplaced <CODE BEGINS> or missing <CODE ENDS>] <ast.Mod object at 0x7da2590d6920> name[i]]]]
variable[in_model] assign[=] constant[True]
variable[mg] assign[=] call[name[match].groups, parameter[]]
if call[name[mg]][constant[2]] begin[:]
call[name[print], parameter[binary_operation[constant[
Extracting '%s'] <ast.Mod object at 0x7da2590d6920> call[call[name[match].groups, parameter[]]][constant[2]]]]]
variable[output_file] assign[=] call[call[name[mg]][constant[2]].strip, parameter[]]
<ast.AugAssign object at 0x7da1b10a6e00>
if compare[name[level] greater[>] constant[0]] begin[:]
call[name[self].error, parameter[constant[<End of File> - EOF encountered while parsing the model]]]
return[None]
if compare[name[in_model] is constant[True]] begin[:]
call[name[self].error, parameter[binary_operation[constant[Line %d - Missing <CODE ENDS>] <ast.Mod object at 0x7da2590d6920> name[i]]]] | keyword[def] identifier[extract_yang_model] ( identifier[self] , identifier[content] ):
literal[string]
identifier[model] =[]
identifier[output_file] = keyword[None]
identifier[in_model] = keyword[False]
identifier[example_match] = keyword[False]
identifier[i] = literal[int]
identifier[level] = literal[int]
identifier[quotes] = literal[int]
keyword[while] identifier[i] < identifier[len] ( identifier[content] ):
identifier[line] = identifier[content] [ identifier[i] ]
keyword[if] identifier[self] . identifier[CODE_ENDS_TAG] . identifier[match] ( identifier[line] ):
keyword[if] identifier[in_model] keyword[is] keyword[False] :
identifier[self] . identifier[warning] ( literal[string] % identifier[i] )
identifier[in_model] = keyword[False]
keyword[if] literal[string] keyword[in] identifier[line] :
keyword[if] identifier[line] . identifier[count] ( literal[string] )% literal[int] == literal[int] :
identifier[quotes] = literal[int]
keyword[else] :
keyword[if] identifier[quotes] == literal[int] :
identifier[quotes] = literal[int]
keyword[else] :
identifier[quotes] = literal[int]
identifier[match] = identifier[self] . identifier[MODULE_STATEMENT] . identifier[match] ( identifier[line] )
keyword[if] identifier[match] :
keyword[if] identifier[quotes] == literal[int] :
keyword[if] identifier[level] > literal[int] :
identifier[self] . identifier[error] ( literal[string] % identifier[i] )
keyword[return]
keyword[if] identifier[match] . identifier[groups] ()[ literal[int] ] keyword[or] identifier[match] . identifier[groups] ()[ literal[int] ]:
identifier[self] . identifier[warning] ( literal[string] % identifier[i] )
identifier[example_match] = identifier[self] . identifier[EXAMPLE_TAG] . identifier[match] ( identifier[match] . identifier[groups] ()[ literal[int] ])
keyword[if] identifier[in_model] keyword[is] keyword[True] :
keyword[if] identifier[example_match] :
identifier[self] . identifier[error] ( literal[string] %
( identifier[i] , identifier[match] . identifier[groups] ()[ literal[int] ]))
keyword[else] :
keyword[if] keyword[not] identifier[example_match] :
identifier[self] . identifier[error] ( literal[string] %
( identifier[i] , identifier[match] . identifier[groups] ()[ literal[int] ]))
keyword[if] identifier[quotes] == literal[int] :
identifier[level] = literal[int]
keyword[if] keyword[not] identifier[output_file] keyword[and] identifier[level] == literal[int] keyword[and] identifier[quotes] == literal[int] :
identifier[print] ( literal[string] % identifier[match] . identifier[groups] ()[ literal[int] ])
identifier[output_file] = literal[string] % identifier[match] . identifier[groups] ()[ literal[int] ]. identifier[strip] ( literal[string] )
keyword[if] identifier[self] . identifier[debug_level] > literal[int] :
identifier[print] ( literal[string] % identifier[output_file] )
keyword[if] identifier[level] > literal[int] :
identifier[self] . identifier[debug_print_line] ( identifier[i] , identifier[level] , identifier[content] [ identifier[i] ])
keyword[if] identifier[self] . identifier[PAGE_TAG] . identifier[match] ( identifier[line] ):
identifier[self] . identifier[strip_empty_lines_backward] ( identifier[model] , literal[int] )
identifier[self] . identifier[debug_print_strip_msg] ( identifier[i] , identifier[content] [ identifier[i] ])
identifier[i] += literal[int]
identifier[i] = identifier[self] . identifier[strip_empty_lines_forward] ( identifier[content] , identifier[i] )
keyword[if] identifier[i] < identifier[len] ( identifier[content] ):
identifier[self] . identifier[debug_print_strip_msg] ( identifier[i] , identifier[content] [ identifier[i] ])
identifier[i] += literal[int]
keyword[else] :
identifier[self] . identifier[error] ( literal[string] )
keyword[return]
identifier[i] = identifier[self] . identifier[strip_empty_lines_forward] ( identifier[content] , identifier[i] )- literal[int]
keyword[if] identifier[i] >= identifier[len] ( identifier[content] ):
identifier[self] . identifier[error] ( literal[string] )
keyword[return]
keyword[else] :
identifier[model] . identifier[append] ([ identifier[line] , identifier[i] + literal[int] ])
identifier[counter] = identifier[Counter] ( identifier[line] )
keyword[if] identifier[quotes] == literal[int] :
keyword[if] literal[string] keyword[in] identifier[line] keyword[and] literal[string] keyword[in] identifier[line] :
keyword[if] identifier[line] . identifier[index] ( literal[string] )> identifier[line] . identifier[rindex] ( literal[string] ) keyword[or] identifier[line] . identifier[index] ( literal[string] )< identifier[line] . identifier[index] ( literal[string] ):
identifier[level] +=( identifier[counter] [ literal[string] ]- identifier[counter] [ literal[string] ])
keyword[else] :
identifier[level] +=( identifier[counter] [ literal[string] ]- identifier[counter] [ literal[string] ])
keyword[if] identifier[level] == literal[int] :
keyword[if] identifier[self] . identifier[strict] :
keyword[if] identifier[self] . identifier[strict_examples] :
keyword[if] identifier[example_match] keyword[and] keyword[not] identifier[in_model] :
identifier[self] . identifier[write_model_to_file] ( identifier[model] , identifier[output_file] )
keyword[elif] identifier[in_model] :
identifier[self] . identifier[write_model_to_file] ( identifier[model] , identifier[output_file] )
keyword[else] :
identifier[self] . identifier[write_model_to_file] ( identifier[model] , identifier[output_file] )
identifier[self] . identifier[max_line_len] = literal[int]
identifier[model] =[]
identifier[output_file] = keyword[None]
identifier[level] = literal[int]
identifier[match] = identifier[self] . identifier[CODE_BEGINS_TAG] . identifier[match] ( identifier[line] )
keyword[if] identifier[match] :
keyword[if] identifier[level] > literal[int] :
identifier[self] . identifier[error] ( literal[string] % identifier[i] )
keyword[return]
keyword[if] identifier[in_model] keyword[is] keyword[True] :
identifier[self] . identifier[error] ( literal[string] % identifier[i] )
identifier[in_model] = keyword[True]
identifier[mg] = identifier[match] . identifier[groups] ()
keyword[if] identifier[mg] [ literal[int] ]:
identifier[print] ( literal[string] % identifier[match] . identifier[groups] ()[ literal[int] ])
identifier[output_file] = identifier[mg] [ literal[int] ]. identifier[strip] ()
keyword[else] :
keyword[if] identifier[mg] [ literal[int] ] keyword[and] identifier[mg] [ literal[int] ] keyword[is] keyword[None] :
identifier[self] . identifier[error] ( literal[string] % identifier[i] )
keyword[else] :
identifier[self] . identifier[error] ( literal[string] % identifier[i] )
identifier[i] += literal[int]
keyword[if] identifier[level] > literal[int] :
identifier[self] . identifier[error] ( literal[string] )
keyword[return]
keyword[if] identifier[in_model] keyword[is] keyword[True] :
identifier[self] . identifier[error] ( literal[string] % identifier[i] ) | def extract_yang_model(self, content):
"""
Extracts one or more YANG models from an RFC or draft text string in
which the models are specified. The function skips over page
formatting (Page Headers and Footers) and performs basic YANG module
syntax checking. In strict mode, the function also enforces the
<CODE BEGINS> / <CODE ENDS> tags - a model is not extracted unless
the tags are present.
:return: None
"""
model = []
output_file = None
in_model = False
example_match = False
i = 0
level = 0
quotes = 0
while i < len(content):
line = content[i]
# Try to match '<CODE ENDS>'
if self.CODE_ENDS_TAG.match(line):
if in_model is False:
self.warning('Line %d: misplaced <CODE ENDS>' % i) # depends on [control=['if'], data=[]]
in_model = False # depends on [control=['if'], data=[]]
if '"' in line:
if line.count('"') % 2 == 0:
quotes = 0 # depends on [control=['if'], data=[]]
elif quotes == 1:
quotes = 0 # depends on [control=['if'], data=['quotes']]
else:
quotes = 1 # depends on [control=['if'], data=['line']]
# Try to match '(sub)module <module_name> {'
match = self.MODULE_STATEMENT.match(line)
if match:
# We're already parsing a module
if quotes == 0:
if level > 0:
self.error("Line %d - 'module' statement within another module" % i)
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Check if we should enforce <CODE BEGINS> / <CODE ENDS>
# if we do enforce, we ignore models not enclosed in <CODE BEGINS> / <CODE ENDS>
if match.groups()[1] or match.groups()[4]:
self.warning('Line %d - Module name should not be enclosed in quotes' % i) # depends on [control=['if'], data=[]]
# do the module name checking, etc.
example_match = self.EXAMPLE_TAG.match(match.groups()[2])
if in_model is True:
if example_match:
self.error("Line %d - YANG module '%s' with <CODE BEGINS> and starting with 'example-'" % (i, match.groups()[2])) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif not example_match:
self.error("Line %d - YANG module '%s' with no <CODE BEGINS> and not starting with 'example-'" % (i, match.groups()[2])) # depends on [control=['if'], data=[]]
# now decide if we're allowed to set the level
# (i.e. signal that we're in a module) to 1 and if
# we're allowed to output the module at all with the
# strict examples flag
# if self.strict is True:
# if in_model is True:
# level = 1
# else:
# level = 1
# always set the level to 1; we decide whether or not
# to output at the end
if quotes == 0:
level = 1 # depends on [control=['if'], data=[]]
if not output_file and level == 1 and (quotes == 0):
print("\nExtracting '%s'" % match.groups()[2])
output_file = '%s.yang' % match.groups()[2].strip('"\'')
if self.debug_level > 0:
print(' Getting YANG file name from module name: %s' % output_file) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if level > 0:
self.debug_print_line(i, level, content[i])
# Try to match the Footer ('[Page <page_num>]')
# If match found, skip over page headers and footers
if self.PAGE_TAG.match(line):
self.strip_empty_lines_backward(model, 3)
self.debug_print_strip_msg(i, content[i])
i += 1 # Strip the
# Strip empty lines between the Footer and the next page Header
i = self.strip_empty_lines_forward(content, i)
if i < len(content):
self.debug_print_strip_msg(i, content[i])
i += 1 # Strip the next page Header # depends on [control=['if'], data=['i']]
else:
self.error('<End of File> - EOF encountered while parsing the model')
return
# Strip empty lines between the page Header and real content on the page
i = self.strip_empty_lines_forward(content, i) - 1
if i >= len(content):
self.error('<End of File> - EOF encountered while parsing the model')
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
model.append([line, i + 1])
counter = Counter(line)
if quotes == 0:
if '"' in line and '}' in line:
if line.index('}') > line.rindex('"') or line.index('}') < line.index('"'):
level += counter['{'] - counter['}'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
level += counter['{'] - counter['}'] # depends on [control=['if'], data=[]]
if level == 1:
if self.strict:
if self.strict_examples:
if example_match and (not in_model):
self.write_model_to_file(model, output_file) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif in_model:
self.write_model_to_file(model, output_file) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
self.write_model_to_file(model, output_file)
self.max_line_len = 0
model = []
output_file = None
level = 0 # depends on [control=['if'], data=['level']] # depends on [control=['if'], data=['level']]
# Try to match '<CODE BEGINS>'
match = self.CODE_BEGINS_TAG.match(line)
if match:
# Found the beginning of the YANG module code section; make sure we're not parsing a model already
if level > 0:
self.error('Line %d - <CODE BEGINS> within a model' % i)
return # depends on [control=['if'], data=[]]
if in_model is True:
self.error('Line %d - Misplaced <CODE BEGINS> or missing <CODE ENDS>' % i) # depends on [control=['if'], data=[]]
in_model = True
mg = match.groups()
# Get the YANG module's file name
if mg[2]:
print("\nExtracting '%s'" % match.groups()[2])
output_file = mg[2].strip() # depends on [control=['if'], data=[]]
elif mg[0] and mg[1] is None:
self.error('Line %d - Missing file name in <CODE BEGINS>' % i) # depends on [control=['if'], data=[]]
else:
self.error('Line %d - YANG file not specified in <CODE BEGINS>' % i) # depends on [control=['if'], data=[]]
i += 1 # depends on [control=['while'], data=['i']]
if level > 0:
self.error('<End of File> - EOF encountered while parsing the model')
return # depends on [control=['if'], data=[]]
if in_model is True:
self.error('Line %d - Missing <CODE ENDS>' % i) # depends on [control=['if'], data=[]] |
def challenge():
"""Creates an enum for contest type"""
enums = dict(
ACTIVE="active",
UPCOMING="upcoming",
HIRING="hiring",
ALL="all",
SHORT="short",
)
return type('Enum', (), enums) | def function[challenge, parameter[]]:
constant[Creates an enum for contest type]
variable[enums] assign[=] call[name[dict], parameter[]]
return[call[name[type], parameter[constant[Enum], tuple[[]], name[enums]]]] | keyword[def] identifier[challenge] ():
literal[string]
identifier[enums] = identifier[dict] (
identifier[ACTIVE] = literal[string] ,
identifier[UPCOMING] = literal[string] ,
identifier[HIRING] = literal[string] ,
identifier[ALL] = literal[string] ,
identifier[SHORT] = literal[string] ,
)
keyword[return] identifier[type] ( literal[string] ,(), identifier[enums] ) | def challenge():
"""Creates an enum for contest type"""
enums = dict(ACTIVE='active', UPCOMING='upcoming', HIRING='hiring', ALL='all', SHORT='short')
return type('Enum', (), enums) |
def transferCoincidences(network, fromElementName, toElementName):
"""
Gets the coincidence matrix from one element and sets it on
another element
(using locked handles, a la nupic.bindings.research.lockHandle).
TODO: Generalize to more node types, parameter name pairs, etc.
Does not work across processes.
"""
coincidenceHandle = getLockedHandle(
runtimeElement=network.getElement(fromElementName),
# TODO: Re-purpose for use with nodes other than PMXClassifierNode.
expression="self._cd._W"
)
network.getElement(toElementName).setParameter("coincidencesAbove",
coincidenceHandle) | def function[transferCoincidences, parameter[network, fromElementName, toElementName]]:
constant[
Gets the coincidence matrix from one element and sets it on
another element
(using locked handles, a la nupic.bindings.research.lockHandle).
TODO: Generalize to more node types, parameter name pairs, etc.
Does not work across processes.
]
variable[coincidenceHandle] assign[=] call[name[getLockedHandle], parameter[]]
call[call[name[network].getElement, parameter[name[toElementName]]].setParameter, parameter[constant[coincidencesAbove], name[coincidenceHandle]]] | keyword[def] identifier[transferCoincidences] ( identifier[network] , identifier[fromElementName] , identifier[toElementName] ):
literal[string]
identifier[coincidenceHandle] = identifier[getLockedHandle] (
identifier[runtimeElement] = identifier[network] . identifier[getElement] ( identifier[fromElementName] ),
identifier[expression] = literal[string]
)
identifier[network] . identifier[getElement] ( identifier[toElementName] ). identifier[setParameter] ( literal[string] ,
identifier[coincidenceHandle] ) | def transferCoincidences(network, fromElementName, toElementName):
"""
Gets the coincidence matrix from one element and sets it on
another element
(using locked handles, a la nupic.bindings.research.lockHandle).
TODO: Generalize to more node types, parameter name pairs, etc.
Does not work across processes.
"""
# TODO: Re-purpose for use with nodes other than PMXClassifierNode.
coincidenceHandle = getLockedHandle(runtimeElement=network.getElement(fromElementName), expression='self._cd._W')
network.getElement(toElementName).setParameter('coincidencesAbove', coincidenceHandle) |
def set_trace(self, frame=None):
"""See https://github.com/python/cpython/blob/b02774f42108aaf18eb19865472c8d5cd95b5f11/Lib/bdb.py#L319-L332"""
self.reset()
if frame is None:
frame = sys._getframe().f_back
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
# Automatically proceed to next break point
self.set_continue()
sys.settrace(self.trace_dispatch) | def function[set_trace, parameter[self, frame]]:
constant[See https://github.com/python/cpython/blob/b02774f42108aaf18eb19865472c8d5cd95b5f11/Lib/bdb.py#L319-L332]
call[name[self].reset, parameter[]]
if compare[name[frame] is constant[None]] begin[:]
variable[frame] assign[=] call[name[sys]._getframe, parameter[]].f_back
while name[frame] begin[:]
name[frame].f_trace assign[=] name[self].trace_dispatch
name[self].botframe assign[=] name[frame]
variable[frame] assign[=] name[frame].f_back
call[name[self].set_continue, parameter[]]
call[name[sys].settrace, parameter[name[self].trace_dispatch]] | keyword[def] identifier[set_trace] ( identifier[self] , identifier[frame] = keyword[None] ):
literal[string]
identifier[self] . identifier[reset] ()
keyword[if] identifier[frame] keyword[is] keyword[None] :
identifier[frame] = identifier[sys] . identifier[_getframe] (). identifier[f_back]
keyword[while] identifier[frame] :
identifier[frame] . identifier[f_trace] = identifier[self] . identifier[trace_dispatch]
identifier[self] . identifier[botframe] = identifier[frame]
identifier[frame] = identifier[frame] . identifier[f_back]
identifier[self] . identifier[set_continue] ()
identifier[sys] . identifier[settrace] ( identifier[self] . identifier[trace_dispatch] ) | def set_trace(self, frame=None):
"""See https://github.com/python/cpython/blob/b02774f42108aaf18eb19865472c8d5cd95b5f11/Lib/bdb.py#L319-L332"""
self.reset()
if frame is None:
frame = sys._getframe().f_back # depends on [control=['if'], data=['frame']]
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back # depends on [control=['while'], data=[]]
# Automatically proceed to next break point
self.set_continue()
sys.settrace(self.trace_dispatch) |
def run(self, files, stack):
"Convert dates"
for filename, post in files.items():
if self.date_field in post.metadata:
post[self.date_field] = parse(post[self.date_field]) | def function[run, parameter[self, files, stack]]:
constant[Convert dates]
for taget[tuple[[<ast.Name object at 0x7da1b0ab9c60>, <ast.Name object at 0x7da1b0abb430>]]] in starred[call[name[files].items, parameter[]]] begin[:]
if compare[name[self].date_field in name[post].metadata] begin[:]
call[name[post]][name[self].date_field] assign[=] call[name[parse], parameter[call[name[post]][name[self].date_field]]] | keyword[def] identifier[run] ( identifier[self] , identifier[files] , identifier[stack] ):
literal[string]
keyword[for] identifier[filename] , identifier[post] keyword[in] identifier[files] . identifier[items] ():
keyword[if] identifier[self] . identifier[date_field] keyword[in] identifier[post] . identifier[metadata] :
identifier[post] [ identifier[self] . identifier[date_field] ]= identifier[parse] ( identifier[post] [ identifier[self] . identifier[date_field] ]) | def run(self, files, stack):
"""Convert dates"""
for (filename, post) in files.items():
if self.date_field in post.metadata:
post[self.date_field] = parse(post[self.date_field]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def directives():
'''
Return list of directives together with expected arguments
and places where the directive is valid (``apachectl -L``)
CLI Example:
.. code-block:: bash
salt '*' apache.directives
'''
cmd = '{0} -L'.format(_detect_os())
ret = {}
out = __salt__['cmd.run'](cmd)
out = out.replace('\n\t', '\t')
for line in out.splitlines():
if not line:
continue
comps = line.split('\t')
desc = '\n'.join(comps[1:])
ret[comps[0]] = desc
return ret | def function[directives, parameter[]]:
constant[
Return list of directives together with expected arguments
and places where the directive is valid (``apachectl -L``)
CLI Example:
.. code-block:: bash
salt '*' apache.directives
]
variable[cmd] assign[=] call[constant[{0} -L].format, parameter[call[name[_detect_os], parameter[]]]]
variable[ret] assign[=] dictionary[[], []]
variable[out] assign[=] call[call[name[__salt__]][constant[cmd.run]], parameter[name[cmd]]]
variable[out] assign[=] call[name[out].replace, parameter[constant[
], constant[ ]]]
for taget[name[line]] in starred[call[name[out].splitlines, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da18f58dd80> begin[:]
continue
variable[comps] assign[=] call[name[line].split, parameter[constant[ ]]]
variable[desc] assign[=] call[constant[
].join, parameter[call[name[comps]][<ast.Slice object at 0x7da18f58c5e0>]]]
call[name[ret]][call[name[comps]][constant[0]]] assign[=] name[desc]
return[name[ret]] | keyword[def] identifier[directives] ():
literal[string]
identifier[cmd] = literal[string] . identifier[format] ( identifier[_detect_os] ())
identifier[ret] ={}
identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[cmd] )
identifier[out] = identifier[out] . identifier[replace] ( literal[string] , literal[string] )
keyword[for] identifier[line] keyword[in] identifier[out] . identifier[splitlines] ():
keyword[if] keyword[not] identifier[line] :
keyword[continue]
identifier[comps] = identifier[line] . identifier[split] ( literal[string] )
identifier[desc] = literal[string] . identifier[join] ( identifier[comps] [ literal[int] :])
identifier[ret] [ identifier[comps] [ literal[int] ]]= identifier[desc]
keyword[return] identifier[ret] | def directives():
"""
Return list of directives together with expected arguments
and places where the directive is valid (``apachectl -L``)
CLI Example:
.. code-block:: bash
salt '*' apache.directives
"""
cmd = '{0} -L'.format(_detect_os())
ret = {}
out = __salt__['cmd.run'](cmd)
out = out.replace('\n\t', '\t')
for line in out.splitlines():
if not line:
continue # depends on [control=['if'], data=[]]
comps = line.split('\t')
desc = '\n'.join(comps[1:])
ret[comps[0]] = desc # depends on [control=['for'], data=['line']]
return ret |
def upsample(self, factor):
"""TODO Add docstring."""
self.command.append("upsample")
self.command.append(factor)
return self | def function[upsample, parameter[self, factor]]:
constant[TODO Add docstring.]
call[name[self].command.append, parameter[constant[upsample]]]
call[name[self].command.append, parameter[name[factor]]]
return[name[self]] | keyword[def] identifier[upsample] ( identifier[self] , identifier[factor] ):
literal[string]
identifier[self] . identifier[command] . identifier[append] ( literal[string] )
identifier[self] . identifier[command] . identifier[append] ( identifier[factor] )
keyword[return] identifier[self] | def upsample(self, factor):
"""TODO Add docstring."""
self.command.append('upsample')
self.command.append(factor)
return self |
def _scroll_up(self, cli):
" Scroll window up. "
info = self.render_info
if info.vertical_scroll > 0:
# TODO: not entirely correct yet in case of line wrapping and long lines.
if info.cursor_position.y >= info.window_height - 1 - info.configured_scroll_offsets.bottom:
self.content.move_cursor_up(cli)
self.vertical_scroll -= 1 | def function[_scroll_up, parameter[self, cli]]:
constant[ Scroll window up. ]
variable[info] assign[=] name[self].render_info
if compare[name[info].vertical_scroll greater[>] constant[0]] begin[:]
if compare[name[info].cursor_position.y greater_or_equal[>=] binary_operation[binary_operation[name[info].window_height - constant[1]] - name[info].configured_scroll_offsets.bottom]] begin[:]
call[name[self].content.move_cursor_up, parameter[name[cli]]]
<ast.AugAssign object at 0x7da1b26af880> | keyword[def] identifier[_scroll_up] ( identifier[self] , identifier[cli] ):
literal[string]
identifier[info] = identifier[self] . identifier[render_info]
keyword[if] identifier[info] . identifier[vertical_scroll] > literal[int] :
keyword[if] identifier[info] . identifier[cursor_position] . identifier[y] >= identifier[info] . identifier[window_height] - literal[int] - identifier[info] . identifier[configured_scroll_offsets] . identifier[bottom] :
identifier[self] . identifier[content] . identifier[move_cursor_up] ( identifier[cli] )
identifier[self] . identifier[vertical_scroll] -= literal[int] | def _scroll_up(self, cli):
""" Scroll window up. """
info = self.render_info
if info.vertical_scroll > 0:
# TODO: not entirely correct yet in case of line wrapping and long lines.
if info.cursor_position.y >= info.window_height - 1 - info.configured_scroll_offsets.bottom:
self.content.move_cursor_up(cli) # depends on [control=['if'], data=[]]
self.vertical_scroll -= 1 # depends on [control=['if'], data=[]] |
def load_config(file_, *args, **kwargs):
"""
Does exactly the same as loads_config() but instead of a json string this function
receives the path to a file containing the json string or a file like object with a read()
method.
:param file_: Filename or a file like object with read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
Defaults to UTF-8.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
"""
json_str = load_utf_text_file(
file_,
default_encoding=kwargs.pop('default_encoding', 'UTF-8'),
use_utf8_strings=kwargs.pop('use_utf8_strings', True),
)
return loads_config(json_str, *args, **kwargs) | def function[load_config, parameter[file_]]:
constant[
Does exactly the same as loads_config() but instead of a json string this function
receives the path to a file containing the json string or a file like object with a read()
method.
:param file_: Filename or a file like object with read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
Defaults to UTF-8.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
]
variable[json_str] assign[=] call[name[load_utf_text_file], parameter[name[file_]]]
return[call[name[loads_config], parameter[name[json_str], <ast.Starred object at 0x7da207f001c0>]]] | keyword[def] identifier[load_config] ( identifier[file_] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[json_str] = identifier[load_utf_text_file] (
identifier[file_] ,
identifier[default_encoding] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] ),
identifier[use_utf8_strings] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] ),
)
keyword[return] identifier[loads_config] ( identifier[json_str] ,* identifier[args] ,** identifier[kwargs] ) | def load_config(file_, *args, **kwargs):
"""
Does exactly the same as loads_config() but instead of a json string this function
receives the path to a file containing the json string or a file like object with a read()
method.
:param file_: Filename or a file like object with read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
Defaults to UTF-8.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
"""
json_str = load_utf_text_file(file_, default_encoding=kwargs.pop('default_encoding', 'UTF-8'), use_utf8_strings=kwargs.pop('use_utf8_strings', True))
return loads_config(json_str, *args, **kwargs) |
def create_namespaced_persistent_volume_claim(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_persistent_volume_claim # noqa: E501
create a PersistentVolumeClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_persistent_volume_claim(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1PersistentVolumeClaim body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1PersistentVolumeClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_persistent_volume_claim_with_http_info(namespace, body, **kwargs) # noqa: E501
else:
(data) = self.create_namespaced_persistent_volume_claim_with_http_info(namespace, body, **kwargs) # noqa: E501
return data | def function[create_namespaced_persistent_volume_claim, parameter[self, namespace, body]]:
constant[create_namespaced_persistent_volume_claim # noqa: E501
create a PersistentVolumeClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_persistent_volume_claim(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1PersistentVolumeClaim body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1PersistentVolumeClaim
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].create_namespaced_persistent_volume_claim_with_http_info, parameter[name[namespace], name[body]]]] | keyword[def] identifier[create_namespaced_persistent_volume_claim] ( identifier[self] , identifier[namespace] , identifier[body] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[create_namespaced_persistent_volume_claim_with_http_info] ( identifier[namespace] , identifier[body] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[create_namespaced_persistent_volume_claim_with_http_info] ( identifier[namespace] , identifier[body] ,** identifier[kwargs] )
keyword[return] identifier[data] | def create_namespaced_persistent_volume_claim(self, namespace, body, **kwargs): # noqa: E501
"create_namespaced_persistent_volume_claim # noqa: E501\n\n create a PersistentVolumeClaim # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_namespaced_persistent_volume_claim(namespace, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param V1PersistentVolumeClaim body: (required)\n :param bool include_uninitialized: If true, partially initialized resources are included in the response.\n :param str pretty: If 'true', then the output is pretty printed.\n :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed\n :return: V1PersistentVolumeClaim\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_persistent_volume_claim_with_http_info(namespace, body, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.create_namespaced_persistent_volume_claim_with_http_info(namespace, body, **kwargs) # noqa: E501
return data |
def get_foldrate(seq, secstruct):
"""Submit sequence and structural class to FOLD-RATE calculator (http://www.iitm.ac.in/bioinfo/fold-rate/)
to calculate kinetic folding rate.
Args:
seq (str, Seq, SeqRecord): Amino acid sequence
secstruct (str): Structural class: `all-alpha``, ``all-beta``, ``mixed``, or ``unknown``
Returns:
float: Kinetic folding rate k_f
"""
seq = ssbio.protein.sequence.utils.cast_to_str(seq)
url = 'http://www.iitm.ac.in/bioinfo/cgi-bin/fold-rate/foldrateCalculator.pl'
values = {'sequence': seq, 'eqn': secstruct}
data = urlencode(values)
data = data.encode('ASCII')
response = urlopen(url, data)
result = str(response.read())
ind = str.find(result, 'The folding rate,')
result2 = result[ind:ind + 70]
ind1 = str.find(result2, '=')
ind2 = str.find(result2, '/sec')
rate = result2[ind1 + 2:ind2]
return rate | def function[get_foldrate, parameter[seq, secstruct]]:
constant[Submit sequence and structural class to FOLD-RATE calculator (http://www.iitm.ac.in/bioinfo/fold-rate/)
to calculate kinetic folding rate.
Args:
seq (str, Seq, SeqRecord): Amino acid sequence
secstruct (str): Structural class: `all-alpha``, ``all-beta``, ``mixed``, or ``unknown``
Returns:
float: Kinetic folding rate k_f
]
variable[seq] assign[=] call[name[ssbio].protein.sequence.utils.cast_to_str, parameter[name[seq]]]
variable[url] assign[=] constant[http://www.iitm.ac.in/bioinfo/cgi-bin/fold-rate/foldrateCalculator.pl]
variable[values] assign[=] dictionary[[<ast.Constant object at 0x7da1b0e83c70>, <ast.Constant object at 0x7da1b0e81060>], [<ast.Name object at 0x7da1b0e833d0>, <ast.Name object at 0x7da1b0e82c80>]]
variable[data] assign[=] call[name[urlencode], parameter[name[values]]]
variable[data] assign[=] call[name[data].encode, parameter[constant[ASCII]]]
variable[response] assign[=] call[name[urlopen], parameter[name[url], name[data]]]
variable[result] assign[=] call[name[str], parameter[call[name[response].read, parameter[]]]]
variable[ind] assign[=] call[name[str].find, parameter[name[result], constant[The folding rate,]]]
variable[result2] assign[=] call[name[result]][<ast.Slice object at 0x7da1b0e83340>]
variable[ind1] assign[=] call[name[str].find, parameter[name[result2], constant[=]]]
variable[ind2] assign[=] call[name[str].find, parameter[name[result2], constant[/sec]]]
variable[rate] assign[=] call[name[result2]][<ast.Slice object at 0x7da1b0e809d0>]
return[name[rate]] | keyword[def] identifier[get_foldrate] ( identifier[seq] , identifier[secstruct] ):
literal[string]
identifier[seq] = identifier[ssbio] . identifier[protein] . identifier[sequence] . identifier[utils] . identifier[cast_to_str] ( identifier[seq] )
identifier[url] = literal[string]
identifier[values] ={ literal[string] : identifier[seq] , literal[string] : identifier[secstruct] }
identifier[data] = identifier[urlencode] ( identifier[values] )
identifier[data] = identifier[data] . identifier[encode] ( literal[string] )
identifier[response] = identifier[urlopen] ( identifier[url] , identifier[data] )
identifier[result] = identifier[str] ( identifier[response] . identifier[read] ())
identifier[ind] = identifier[str] . identifier[find] ( identifier[result] , literal[string] )
identifier[result2] = identifier[result] [ identifier[ind] : identifier[ind] + literal[int] ]
identifier[ind1] = identifier[str] . identifier[find] ( identifier[result2] , literal[string] )
identifier[ind2] = identifier[str] . identifier[find] ( identifier[result2] , literal[string] )
identifier[rate] = identifier[result2] [ identifier[ind1] + literal[int] : identifier[ind2] ]
keyword[return] identifier[rate] | def get_foldrate(seq, secstruct):
"""Submit sequence and structural class to FOLD-RATE calculator (http://www.iitm.ac.in/bioinfo/fold-rate/)
to calculate kinetic folding rate.
Args:
seq (str, Seq, SeqRecord): Amino acid sequence
secstruct (str): Structural class: `all-alpha``, ``all-beta``, ``mixed``, or ``unknown``
Returns:
float: Kinetic folding rate k_f
"""
seq = ssbio.protein.sequence.utils.cast_to_str(seq)
url = 'http://www.iitm.ac.in/bioinfo/cgi-bin/fold-rate/foldrateCalculator.pl'
values = {'sequence': seq, 'eqn': secstruct}
data = urlencode(values)
data = data.encode('ASCII')
response = urlopen(url, data)
result = str(response.read())
ind = str.find(result, 'The folding rate,')
result2 = result[ind:ind + 70]
ind1 = str.find(result2, '=')
ind2 = str.find(result2, '/sec')
rate = result2[ind1 + 2:ind2]
return rate |
def decrypt(self, pkey, cert, flags=0):
"""
Decrypts message
@param pkey - private key to decrypt
@param cert - certificate of this private key (to find
neccessary RecipientInfo
@param flags - flags
@returns - decrypted data
"""
if not pkey.cansign:
raise ValueError("Specified keypair has no private part")
if pkey != cert.pubkey:
raise ValueError("Certificate doesn't match private key")
bio = Membio()
res = libcrypto.CMS_decrypt(self.ptr, pkey.key, cert.cert, None,
bio.bio, flags)
if res <= 0:
raise CMSError("decrypting CMS")
return str(bio) | def function[decrypt, parameter[self, pkey, cert, flags]]:
constant[
Decrypts message
@param pkey - private key to decrypt
@param cert - certificate of this private key (to find
neccessary RecipientInfo
@param flags - flags
@returns - decrypted data
]
if <ast.UnaryOp object at 0x7da1b2839720> begin[:]
<ast.Raise object at 0x7da1b28397b0>
if compare[name[pkey] not_equal[!=] name[cert].pubkey] begin[:]
<ast.Raise object at 0x7da1b2838e20>
variable[bio] assign[=] call[name[Membio], parameter[]]
variable[res] assign[=] call[name[libcrypto].CMS_decrypt, parameter[name[self].ptr, name[pkey].key, name[cert].cert, constant[None], name[bio].bio, name[flags]]]
if compare[name[res] less_or_equal[<=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b28390c0>
return[call[name[str], parameter[name[bio]]]] | keyword[def] identifier[decrypt] ( identifier[self] , identifier[pkey] , identifier[cert] , identifier[flags] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[pkey] . identifier[cansign] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[pkey] != identifier[cert] . identifier[pubkey] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[bio] = identifier[Membio] ()
identifier[res] = identifier[libcrypto] . identifier[CMS_decrypt] ( identifier[self] . identifier[ptr] , identifier[pkey] . identifier[key] , identifier[cert] . identifier[cert] , keyword[None] ,
identifier[bio] . identifier[bio] , identifier[flags] )
keyword[if] identifier[res] <= literal[int] :
keyword[raise] identifier[CMSError] ( literal[string] )
keyword[return] identifier[str] ( identifier[bio] ) | def decrypt(self, pkey, cert, flags=0):
"""
Decrypts message
@param pkey - private key to decrypt
@param cert - certificate of this private key (to find
neccessary RecipientInfo
@param flags - flags
@returns - decrypted data
"""
if not pkey.cansign:
raise ValueError('Specified keypair has no private part') # depends on [control=['if'], data=[]]
if pkey != cert.pubkey:
raise ValueError("Certificate doesn't match private key") # depends on [control=['if'], data=[]]
bio = Membio()
res = libcrypto.CMS_decrypt(self.ptr, pkey.key, cert.cert, None, bio.bio, flags)
if res <= 0:
raise CMSError('decrypting CMS') # depends on [control=['if'], data=[]]
return str(bio) |
def make_graph(schema):
""" Construct the task graph (dag) from a given schema.
Parses the graph schema definition and creates the task graph. Tasks are the
vertices of the graph and the connections defined in the schema become the edges.
A key in the schema dict represents a parent task and the value one or more
children:
{parent: [child]} or {parent: [child1, child2]}
The data output of one task can be routed to a labelled input slot of successor
tasks using a dictionary instead of a list for the children:
{parent: {child1: 'positive', child2: 'negative'}}
An empty slot name or None skips the creation of a labelled slot:
{parent: {child1: '', child2: None}}
The underlying graph library creates nodes automatically, when an edge between
non-existing nodes is created.
Args:
schema (dict): A dictionary with the schema definition.
Returns:
DiGraph: A reference to the fully constructed graph object.
Raises:
DirectedAcyclicGraphUndefined: If the schema is not defined.
"""
if schema is None:
raise DirectedAcyclicGraphUndefined()
# sanitize the input schema such that it follows the structure:
# {parent: {child_1: slot_1, child_2: slot_2, ...}, ...}
sanitized_schema = {}
for parent, children in schema.items():
child_dict = {}
if children is not None:
if isinstance(children, list):
if len(children) > 0:
child_dict = {child: None for child in children}
else:
child_dict = {None: None}
elif isinstance(children, dict):
for child, slot in children.items():
child_dict[child] = slot if slot != '' else None
else:
child_dict = {children: None}
else:
child_dict = {None: None}
sanitized_schema[parent] = child_dict
# build the graph from the sanitized schema
graph = nx.DiGraph()
for parent, children in sanitized_schema.items():
for child, slot in children.items():
if child is not None:
graph.add_edge(parent, child, slot=slot)
else:
graph.add_node(parent)
return graph | def function[make_graph, parameter[schema]]:
constant[ Construct the task graph (dag) from a given schema.
Parses the graph schema definition and creates the task graph. Tasks are the
vertices of the graph and the connections defined in the schema become the edges.
A key in the schema dict represents a parent task and the value one or more
children:
{parent: [child]} or {parent: [child1, child2]}
The data output of one task can be routed to a labelled input slot of successor
tasks using a dictionary instead of a list for the children:
{parent: {child1: 'positive', child2: 'negative'}}
An empty slot name or None skips the creation of a labelled slot:
{parent: {child1: '', child2: None}}
The underlying graph library creates nodes automatically, when an edge between
non-existing nodes is created.
Args:
schema (dict): A dictionary with the schema definition.
Returns:
DiGraph: A reference to the fully constructed graph object.
Raises:
DirectedAcyclicGraphUndefined: If the schema is not defined.
]
if compare[name[schema] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b11141f0>
variable[sanitized_schema] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1115300>, <ast.Name object at 0x7da1b1117610>]]] in starred[call[name[schema].items, parameter[]]] begin[:]
variable[child_dict] assign[=] dictionary[[], []]
if compare[name[children] is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[children], name[list]]] begin[:]
if compare[call[name[len], parameter[name[children]]] greater[>] constant[0]] begin[:]
variable[child_dict] assign[=] <ast.DictComp object at 0x7da1b0f1d1e0>
call[name[sanitized_schema]][name[parent]] assign[=] name[child_dict]
variable[graph] assign[=] call[name[nx].DiGraph, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0f1c760>, <ast.Name object at 0x7da1b0f1c730>]]] in starred[call[name[sanitized_schema].items, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0f1c610>, <ast.Name object at 0x7da1b0f1c5e0>]]] in starred[call[name[children].items, parameter[]]] begin[:]
if compare[name[child] is_not constant[None]] begin[:]
call[name[graph].add_edge, parameter[name[parent], name[child]]]
return[name[graph]] | keyword[def] identifier[make_graph] ( identifier[schema] ):
literal[string]
keyword[if] identifier[schema] keyword[is] keyword[None] :
keyword[raise] identifier[DirectedAcyclicGraphUndefined] ()
identifier[sanitized_schema] ={}
keyword[for] identifier[parent] , identifier[children] keyword[in] identifier[schema] . identifier[items] ():
identifier[child_dict] ={}
keyword[if] identifier[children] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[children] , identifier[list] ):
keyword[if] identifier[len] ( identifier[children] )> literal[int] :
identifier[child_dict] ={ identifier[child] : keyword[None] keyword[for] identifier[child] keyword[in] identifier[children] }
keyword[else] :
identifier[child_dict] ={ keyword[None] : keyword[None] }
keyword[elif] identifier[isinstance] ( identifier[children] , identifier[dict] ):
keyword[for] identifier[child] , identifier[slot] keyword[in] identifier[children] . identifier[items] ():
identifier[child_dict] [ identifier[child] ]= identifier[slot] keyword[if] identifier[slot] != literal[string] keyword[else] keyword[None]
keyword[else] :
identifier[child_dict] ={ identifier[children] : keyword[None] }
keyword[else] :
identifier[child_dict] ={ keyword[None] : keyword[None] }
identifier[sanitized_schema] [ identifier[parent] ]= identifier[child_dict]
identifier[graph] = identifier[nx] . identifier[DiGraph] ()
keyword[for] identifier[parent] , identifier[children] keyword[in] identifier[sanitized_schema] . identifier[items] ():
keyword[for] identifier[child] , identifier[slot] keyword[in] identifier[children] . identifier[items] ():
keyword[if] identifier[child] keyword[is] keyword[not] keyword[None] :
identifier[graph] . identifier[add_edge] ( identifier[parent] , identifier[child] , identifier[slot] = identifier[slot] )
keyword[else] :
identifier[graph] . identifier[add_node] ( identifier[parent] )
keyword[return] identifier[graph] | def make_graph(schema):
""" Construct the task graph (dag) from a given schema.
Parses the graph schema definition and creates the task graph. Tasks are the
vertices of the graph and the connections defined in the schema become the edges.
A key in the schema dict represents a parent task and the value one or more
children:
{parent: [child]} or {parent: [child1, child2]}
The data output of one task can be routed to a labelled input slot of successor
tasks using a dictionary instead of a list for the children:
{parent: {child1: 'positive', child2: 'negative'}}
An empty slot name or None skips the creation of a labelled slot:
{parent: {child1: '', child2: None}}
The underlying graph library creates nodes automatically, when an edge between
non-existing nodes is created.
Args:
schema (dict): A dictionary with the schema definition.
Returns:
DiGraph: A reference to the fully constructed graph object.
Raises:
DirectedAcyclicGraphUndefined: If the schema is not defined.
"""
if schema is None:
raise DirectedAcyclicGraphUndefined() # depends on [control=['if'], data=[]]
# sanitize the input schema such that it follows the structure:
# {parent: {child_1: slot_1, child_2: slot_2, ...}, ...}
sanitized_schema = {}
for (parent, children) in schema.items():
child_dict = {}
if children is not None:
if isinstance(children, list):
if len(children) > 0:
child_dict = {child: None for child in children} # depends on [control=['if'], data=[]]
else:
child_dict = {None: None} # depends on [control=['if'], data=[]]
elif isinstance(children, dict):
for (child, slot) in children.items():
child_dict[child] = slot if slot != '' else None # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
child_dict = {children: None} # depends on [control=['if'], data=['children']]
else:
child_dict = {None: None}
sanitized_schema[parent] = child_dict # depends on [control=['for'], data=[]]
# build the graph from the sanitized schema
graph = nx.DiGraph()
for (parent, children) in sanitized_schema.items():
for (child, slot) in children.items():
if child is not None:
graph.add_edge(parent, child, slot=slot) # depends on [control=['if'], data=['child']]
else:
graph.add_node(parent) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return graph |
def _encrypt(cipher, key, data, iv, padding):
"""
Encrypts plaintext
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
if not isinstance(key, byte_cls):
raise TypeError(pretty_message(
'''
key must be a byte string, not %s
''',
type_name(key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if cipher != 'rc4' and not isinstance(iv, byte_cls):
raise TypeError(pretty_message(
'''
iv must be a byte string, not %s
''',
type_name(iv)
))
if cipher != 'rc4' and not padding:
raise ValueError('padding must be specified')
if _backend == 'winlegacy':
return _advapi32_encrypt(cipher, key, data, iv, padding)
return _bcrypt_encrypt(cipher, key, data, iv, padding) | def function[_encrypt, parameter[cipher, key, data, iv, padding]]:
constant[
Encrypts plaintext
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
]
if <ast.UnaryOp object at 0x7da1aff3e680> begin[:]
<ast.Raise object at 0x7da1aff3d720>
if <ast.UnaryOp object at 0x7da1aff3dd80> begin[:]
<ast.Raise object at 0x7da1aff3d630>
if <ast.BoolOp object at 0x7da1aff3dd20> begin[:]
<ast.Raise object at 0x7da1aff3d9f0>
if <ast.BoolOp object at 0x7da1aff3df90> begin[:]
<ast.Raise object at 0x7da1aff3dea0>
if compare[name[_backend] equal[==] constant[winlegacy]] begin[:]
return[call[name[_advapi32_encrypt], parameter[name[cipher], name[key], name[data], name[iv], name[padding]]]]
return[call[name[_bcrypt_encrypt], parameter[name[cipher], name[key], name[data], name[iv], name[padding]]]] | keyword[def] identifier[_encrypt] ( identifier[cipher] , identifier[key] , identifier[data] , identifier[iv] , identifier[padding] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[key] , identifier[byte_cls] ):
keyword[raise] identifier[TypeError] ( identifier[pretty_message] (
literal[string] ,
identifier[type_name] ( identifier[key] )
))
keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[byte_cls] ):
keyword[raise] identifier[TypeError] ( identifier[pretty_message] (
literal[string] ,
identifier[type_name] ( identifier[data] )
))
keyword[if] identifier[cipher] != literal[string] keyword[and] keyword[not] identifier[isinstance] ( identifier[iv] , identifier[byte_cls] ):
keyword[raise] identifier[TypeError] ( identifier[pretty_message] (
literal[string] ,
identifier[type_name] ( identifier[iv] )
))
keyword[if] identifier[cipher] != literal[string] keyword[and] keyword[not] identifier[padding] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[_backend] == literal[string] :
keyword[return] identifier[_advapi32_encrypt] ( identifier[cipher] , identifier[key] , identifier[data] , identifier[iv] , identifier[padding] )
keyword[return] identifier[_bcrypt_encrypt] ( identifier[cipher] , identifier[key] , identifier[data] , identifier[iv] , identifier[padding] ) | def _encrypt(cipher, key, data, iv, padding):
"""
Encrypts plaintext
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
if not isinstance(key, byte_cls):
raise TypeError(pretty_message('\n key must be a byte string, not %s\n ', type_name(key))) # depends on [control=['if'], data=[]]
if not isinstance(data, byte_cls):
raise TypeError(pretty_message('\n data must be a byte string, not %s\n ', type_name(data))) # depends on [control=['if'], data=[]]
if cipher != 'rc4' and (not isinstance(iv, byte_cls)):
raise TypeError(pretty_message('\n iv must be a byte string, not %s\n ', type_name(iv))) # depends on [control=['if'], data=[]]
if cipher != 'rc4' and (not padding):
raise ValueError('padding must be specified') # depends on [control=['if'], data=[]]
if _backend == 'winlegacy':
return _advapi32_encrypt(cipher, key, data, iv, padding) # depends on [control=['if'], data=[]]
return _bcrypt_encrypt(cipher, key, data, iv, padding) |
def set_commit(self, commit, logmsg=None):
"""As set_object, but restricts the type of object to be a Commit
:raise ValueError: If commit is not a Commit object or doesn't point to
a commit
:return: self"""
# check the type - assume the best if it is a base-string
invalid_type = False
if isinstance(commit, Object):
invalid_type = commit.type != Commit.type
elif isinstance(commit, SymbolicReference):
invalid_type = commit.object.type != Commit.type
else:
try:
invalid_type = self.repo.rev_parse(commit).type != Commit.type
except (BadObject, BadName):
raise ValueError("Invalid object: %s" % commit)
# END handle exception
# END verify type
if invalid_type:
raise ValueError("Need commit, got %r" % commit)
# END handle raise
# we leave strings to the rev-parse method below
self.set_object(commit, logmsg)
return self | def function[set_commit, parameter[self, commit, logmsg]]:
constant[As set_object, but restricts the type of object to be a Commit
:raise ValueError: If commit is not a Commit object or doesn't point to
a commit
:return: self]
variable[invalid_type] assign[=] constant[False]
if call[name[isinstance], parameter[name[commit], name[Object]]] begin[:]
variable[invalid_type] assign[=] compare[name[commit].type not_equal[!=] name[Commit].type]
if name[invalid_type] begin[:]
<ast.Raise object at 0x7da1b1d00cd0>
call[name[self].set_object, parameter[name[commit], name[logmsg]]]
return[name[self]] | keyword[def] identifier[set_commit] ( identifier[self] , identifier[commit] , identifier[logmsg] = keyword[None] ):
literal[string]
identifier[invalid_type] = keyword[False]
keyword[if] identifier[isinstance] ( identifier[commit] , identifier[Object] ):
identifier[invalid_type] = identifier[commit] . identifier[type] != identifier[Commit] . identifier[type]
keyword[elif] identifier[isinstance] ( identifier[commit] , identifier[SymbolicReference] ):
identifier[invalid_type] = identifier[commit] . identifier[object] . identifier[type] != identifier[Commit] . identifier[type]
keyword[else] :
keyword[try] :
identifier[invalid_type] = identifier[self] . identifier[repo] . identifier[rev_parse] ( identifier[commit] ). identifier[type] != identifier[Commit] . identifier[type]
keyword[except] ( identifier[BadObject] , identifier[BadName] ):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[commit] )
keyword[if] identifier[invalid_type] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[commit] )
identifier[self] . identifier[set_object] ( identifier[commit] , identifier[logmsg] )
keyword[return] identifier[self] | def set_commit(self, commit, logmsg=None):
"""As set_object, but restricts the type of object to be a Commit
:raise ValueError: If commit is not a Commit object or doesn't point to
a commit
:return: self"""
# check the type - assume the best if it is a base-string
invalid_type = False
if isinstance(commit, Object):
invalid_type = commit.type != Commit.type # depends on [control=['if'], data=[]]
elif isinstance(commit, SymbolicReference):
invalid_type = commit.object.type != Commit.type # depends on [control=['if'], data=[]]
else:
try:
invalid_type = self.repo.rev_parse(commit).type != Commit.type # depends on [control=['try'], data=[]]
except (BadObject, BadName):
raise ValueError('Invalid object: %s' % commit) # depends on [control=['except'], data=[]]
# END handle exception
# END verify type
if invalid_type:
raise ValueError('Need commit, got %r' % commit) # depends on [control=['if'], data=[]]
# END handle raise
# we leave strings to the rev-parse method below
self.set_object(commit, logmsg)
return self |
def size(self):
"""Calculate and return the file size in bytes."""
old = self.__file.tell() # old position
self.__file.seek(0, 2) # end of file
n_bytes = self.__file.tell() # file size in bytes
self.__file.seek(old) # back to old position
return n_bytes | def function[size, parameter[self]]:
constant[Calculate and return the file size in bytes.]
variable[old] assign[=] call[name[self].__file.tell, parameter[]]
call[name[self].__file.seek, parameter[constant[0], constant[2]]]
variable[n_bytes] assign[=] call[name[self].__file.tell, parameter[]]
call[name[self].__file.seek, parameter[name[old]]]
return[name[n_bytes]] | keyword[def] identifier[size] ( identifier[self] ):
literal[string]
identifier[old] = identifier[self] . identifier[__file] . identifier[tell] ()
identifier[self] . identifier[__file] . identifier[seek] ( literal[int] , literal[int] )
identifier[n_bytes] = identifier[self] . identifier[__file] . identifier[tell] ()
identifier[self] . identifier[__file] . identifier[seek] ( identifier[old] )
keyword[return] identifier[n_bytes] | def size(self):
"""Calculate and return the file size in bytes."""
old = self.__file.tell() # old position
self.__file.seek(0, 2) # end of file
n_bytes = self.__file.tell() # file size in bytes
self.__file.seek(old) # back to old position
return n_bytes |
def plot_movie2d_with_matplotlib(
worlds, plane='xy', marker_size=3, figsize=6, grid=True,
wireframe=False, species_list=None, max_count=None, angle=None, noaxis=False,
interval=0.16, repeat_delay=3000, stride=1, rotate=None,
legend=True, scale=1, output=None, crf=10, bitrate='1M', **kwargs):
"""
Generate a movie projected on the given plane from the received list
of instances of World, and show it on IPython notebook.
This function may require ffmpeg.
Parameters
----------
worlds : list or FixedIntervalHDF5Observer
A list of Worlds to render.
plane : str, default 'xy'
'xy', 'yz', 'zx'.
marker_size : float, default 3
Marker size for all species. Size is passed to scatter function
as argument, s=(2 ** marker_size).
figsize : float, default 6
Size of the plotting area. Given in inch.
species_list : array of string, default None
If set, plot_world will not search the list of species.
max_count : Integer, default None
The maximum number of particles to show for each species.
None means no limitation.
angle : tuple, default None
A tuple of view angle which is given as (azim, elev, dist).
If None, use default assumed to be (-60, 30, 10).
interval : Integer, default 0.16
Parameters for matplotlib.animation.ArtistAnimation.
stride : Integer, default 1
Stride per frame.
rotate : tuple, default None
A pair of rotation angles, elev and azim, for animation.
None means no rotation, same as (0, 0).
legend : bool, default True
scale : float, default 1
A length-scaling factor
output : str, default None
An output filename. '.webm' or '.mp4' is only accepted.
If None, display a movie on IPython Notebook.
crf : int, default 10
The CRF value can be from 4-63. Lower values mean better quality.
bitrate : str, default '1M'
Target bitrate
"""
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from ecell4_base.core import Species, FixedIntervalHDF5Observer
from .simulation import load_world
plane = plane.lower()
if len(plane) != 2 or plane[0] not in ('x', 'y', 'z') or plane[1] not in ('x', 'y', 'z'):
raise ValueError("invalid 'plane' argument [{}] was given.".format(repr(plane)))
xidx = 0 if plane[0] == 'x' else (1 if plane[0] == 'y' else 2)
yidx = 0 if plane[1] == 'x' else (1 if plane[1] == 'y' else 2)
if isinstance(worlds, FixedIntervalHDF5Observer):
obs = worlds
worlds = []
for i in range(0, obs.num_steps(), stride):
filename = obs.filename(i)
if os.path.isfile(filename):
worlds.append(load_world(filename))
elif len(worlds) >0:
worlds.append(worlds[-1])
else:
worlds = worlds[:: stride]
if species_list is None:
species_list = []
for world in worlds:
species_list.extend(
[p.species().serial() for pid, p in world.list_particles()])
species_list = sorted(
set(species_list), key=species_list.index) # XXX: pick unique ones
wrange = __get_range_of_world(worlds[0], scale)
wrange = (wrange['x'], wrange['y'], wrange['z'])
wrange = {'x': wrange[xidx], 'y': wrange[yidx]}
fig = plt.figure(figsize=(figsize, figsize))
ax = fig.gca()
color_scale = matplotlib_color_scale()
def _update_plot(i, worlds, species_list):
ax.cla()
ax.set_aspect('equal')
ax.grid(grid)
ax.set_xlim(*wrange['x'])
ax.set_ylim(*wrange['y'])
ax.set_xlabel(plane[0].upper())
ax.set_ylabel(plane[1].upper())
if noaxis:
ax.set_axis_off()
_legend = False
world = worlds[i]
for i, name in enumerate(species_list):
offsets = ([], [])
particles = world.list_particles_exact(Species(name))
if len(particles) == 0:
continue
_legend = True
if max_count is not None and len(particles) > max_count:
particles = random.sample(particles, max_count)
for pid, p in particles:
pos = p.position() * scale
offsets[0].append(pos[xidx])
offsets[1].append(pos[yidx])
ax.scatter(
offsets[0], offsets[1], marker='o', s=(2 ** marker_size),
lw=0, facecolor=color_scale.get_color(name), label=name)
if legend is not None and legend is not False and _legend:
legend_opts = {"loc": "upper right", "shadow": True}
if isinstance(legend, dict):
legend_opts.update(legend)
ax.legend(**legend_opts)
fig.canvas.draw()
ani = animation.FuncAnimation(
fig, _update_plot, fargs=(worlds, species_list),
frames=len(worlds), interval=interval, blit=False)
plt.close(ani._fig)
display_anim(ani, output, fps=1.0 / interval, crf=crf, bitrate=bitrate) | def function[plot_movie2d_with_matplotlib, parameter[worlds, plane, marker_size, figsize, grid, wireframe, species_list, max_count, angle, noaxis, interval, repeat_delay, stride, rotate, legend, scale, output, crf, bitrate]]:
constant[
Generate a movie projected on the given plane from the received list
of instances of World, and show it on IPython notebook.
This function may require ffmpeg.
Parameters
----------
worlds : list or FixedIntervalHDF5Observer
A list of Worlds to render.
plane : str, default 'xy'
'xy', 'yz', 'zx'.
marker_size : float, default 3
Marker size for all species. Size is passed to scatter function
as argument, s=(2 ** marker_size).
figsize : float, default 6
Size of the plotting area. Given in inch.
species_list : array of string, default None
If set, plot_world will not search the list of species.
max_count : Integer, default None
The maximum number of particles to show for each species.
None means no limitation.
angle : tuple, default None
A tuple of view angle which is given as (azim, elev, dist).
If None, use default assumed to be (-60, 30, 10).
interval : Integer, default 0.16
Parameters for matplotlib.animation.ArtistAnimation.
stride : Integer, default 1
Stride per frame.
rotate : tuple, default None
A pair of rotation angles, elev and azim, for animation.
None means no rotation, same as (0, 0).
legend : bool, default True
scale : float, default 1
A length-scaling factor
output : str, default None
An output filename. '.webm' or '.mp4' is only accepted.
If None, display a movie on IPython Notebook.
crf : int, default 10
The CRF value can be from 4-63. Lower values mean better quality.
bitrate : str, default '1M'
Target bitrate
]
import module[matplotlib.pyplot] as alias[plt]
import module[matplotlib.animation] as alias[animation]
from relative_module[ecell4_base.core] import module[Species], module[FixedIntervalHDF5Observer]
from relative_module[simulation] import module[load_world]
variable[plane] assign[=] call[name[plane].lower, parameter[]]
if <ast.BoolOp object at 0x7da1b0e9f1f0> begin[:]
<ast.Raise object at 0x7da1b0e9edd0>
variable[xidx] assign[=] <ast.IfExp object at 0x7da1b0e9ebc0>
variable[yidx] assign[=] <ast.IfExp object at 0x7da1b0e9e890>
if call[name[isinstance], parameter[name[worlds], name[FixedIntervalHDF5Observer]]] begin[:]
variable[obs] assign[=] name[worlds]
variable[worlds] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[obs].num_steps, parameter[]], name[stride]]]] begin[:]
variable[filename] assign[=] call[name[obs].filename, parameter[name[i]]]
if call[name[os].path.isfile, parameter[name[filename]]] begin[:]
call[name[worlds].append, parameter[call[name[load_world], parameter[name[filename]]]]]
if compare[name[species_list] is constant[None]] begin[:]
variable[species_list] assign[=] list[[]]
for taget[name[world]] in starred[name[worlds]] begin[:]
call[name[species_list].extend, parameter[<ast.ListComp object at 0x7da1b0e9d7b0>]]
variable[species_list] assign[=] call[name[sorted], parameter[call[name[set], parameter[name[species_list]]]]]
variable[wrange] assign[=] call[name[__get_range_of_world], parameter[call[name[worlds]][constant[0]], name[scale]]]
variable[wrange] assign[=] tuple[[<ast.Subscript object at 0x7da1b0e9d0f0>, <ast.Subscript object at 0x7da1b0e9d060>, <ast.Subscript object at 0x7da1b0e9cfd0>]]
variable[wrange] assign[=] dictionary[[<ast.Constant object at 0x7da1b0e9ceb0>, <ast.Constant object at 0x7da1b0e9ce80>], [<ast.Subscript object at 0x7da1b0e9ce50>, <ast.Subscript object at 0x7da1b0e9cdc0>]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[ax] assign[=] call[name[fig].gca, parameter[]]
variable[color_scale] assign[=] call[name[matplotlib_color_scale], parameter[]]
def function[_update_plot, parameter[i, worlds, species_list]]:
call[name[ax].cla, parameter[]]
call[name[ax].set_aspect, parameter[constant[equal]]]
call[name[ax].grid, parameter[name[grid]]]
call[name[ax].set_xlim, parameter[<ast.Starred object at 0x7da1b0e9c4f0>]]
call[name[ax].set_ylim, parameter[<ast.Starred object at 0x7da1b0e9c340>]]
call[name[ax].set_xlabel, parameter[call[call[name[plane]][constant[0]].upper, parameter[]]]]
call[name[ax].set_ylabel, parameter[call[call[name[plane]][constant[1]].upper, parameter[]]]]
if name[noaxis] begin[:]
call[name[ax].set_axis_off, parameter[]]
variable[_legend] assign[=] constant[False]
variable[world] assign[=] call[name[worlds]][name[i]]
for taget[tuple[[<ast.Name object at 0x7da1b0ec3b20>, <ast.Name object at 0x7da1b0ec3af0>]]] in starred[call[name[enumerate], parameter[name[species_list]]]] begin[:]
variable[offsets] assign[=] tuple[[<ast.List object at 0x7da1b0ec39a0>, <ast.List object at 0x7da1b0ec3970>]]
variable[particles] assign[=] call[name[world].list_particles_exact, parameter[call[name[Species], parameter[name[name]]]]]
if compare[call[name[len], parameter[name[particles]]] equal[==] constant[0]] begin[:]
continue
variable[_legend] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b0dbe590> begin[:]
variable[particles] assign[=] call[name[random].sample, parameter[name[particles], name[max_count]]]
for taget[tuple[[<ast.Name object at 0x7da1b0dbe6b0>, <ast.Name object at 0x7da1b0dbed10>]]] in starred[name[particles]] begin[:]
variable[pos] assign[=] binary_operation[call[name[p].position, parameter[]] * name[scale]]
call[call[name[offsets]][constant[0]].append, parameter[call[name[pos]][name[xidx]]]]
call[call[name[offsets]][constant[1]].append, parameter[call[name[pos]][name[yidx]]]]
call[name[ax].scatter, parameter[call[name[offsets]][constant[0]], call[name[offsets]][constant[1]]]]
if <ast.BoolOp object at 0x7da1b0dbe050> begin[:]
variable[legend_opts] assign[=] dictionary[[<ast.Constant object at 0x7da1b0dbdb40>, <ast.Constant object at 0x7da1b0dbda20>], [<ast.Constant object at 0x7da1b0dbf5e0>, <ast.Constant object at 0x7da1b0dbe230>]]
if call[name[isinstance], parameter[name[legend], name[dict]]] begin[:]
call[name[legend_opts].update, parameter[name[legend]]]
call[name[ax].legend, parameter[]]
call[name[fig].canvas.draw, parameter[]]
variable[ani] assign[=] call[name[animation].FuncAnimation, parameter[name[fig], name[_update_plot]]]
call[name[plt].close, parameter[name[ani]._fig]]
call[name[display_anim], parameter[name[ani], name[output]]] | keyword[def] identifier[plot_movie2d_with_matplotlib] (
identifier[worlds] , identifier[plane] = literal[string] , identifier[marker_size] = literal[int] , identifier[figsize] = literal[int] , identifier[grid] = keyword[True] ,
identifier[wireframe] = keyword[False] , identifier[species_list] = keyword[None] , identifier[max_count] = keyword[None] , identifier[angle] = keyword[None] , identifier[noaxis] = keyword[False] ,
identifier[interval] = literal[int] , identifier[repeat_delay] = literal[int] , identifier[stride] = literal[int] , identifier[rotate] = keyword[None] ,
identifier[legend] = keyword[True] , identifier[scale] = literal[int] , identifier[output] = keyword[None] , identifier[crf] = literal[int] , identifier[bitrate] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[import] identifier[matplotlib] . identifier[pyplot] keyword[as] identifier[plt]
keyword[import] identifier[matplotlib] . identifier[animation] keyword[as] identifier[animation]
keyword[from] identifier[ecell4_base] . identifier[core] keyword[import] identifier[Species] , identifier[FixedIntervalHDF5Observer]
keyword[from] . identifier[simulation] keyword[import] identifier[load_world]
identifier[plane] = identifier[plane] . identifier[lower] ()
keyword[if] identifier[len] ( identifier[plane] )!= literal[int] keyword[or] identifier[plane] [ literal[int] ] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ) keyword[or] identifier[plane] [ literal[int] ] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[plane] )))
identifier[xidx] = literal[int] keyword[if] identifier[plane] [ literal[int] ]== literal[string] keyword[else] ( literal[int] keyword[if] identifier[plane] [ literal[int] ]== literal[string] keyword[else] literal[int] )
identifier[yidx] = literal[int] keyword[if] identifier[plane] [ literal[int] ]== literal[string] keyword[else] ( literal[int] keyword[if] identifier[plane] [ literal[int] ]== literal[string] keyword[else] literal[int] )
keyword[if] identifier[isinstance] ( identifier[worlds] , identifier[FixedIntervalHDF5Observer] ):
identifier[obs] = identifier[worlds]
identifier[worlds] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[obs] . identifier[num_steps] (), identifier[stride] ):
identifier[filename] = identifier[obs] . identifier[filename] ( identifier[i] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[filename] ):
identifier[worlds] . identifier[append] ( identifier[load_world] ( identifier[filename] ))
keyword[elif] identifier[len] ( identifier[worlds] )> literal[int] :
identifier[worlds] . identifier[append] ( identifier[worlds] [- literal[int] ])
keyword[else] :
identifier[worlds] = identifier[worlds] [:: identifier[stride] ]
keyword[if] identifier[species_list] keyword[is] keyword[None] :
identifier[species_list] =[]
keyword[for] identifier[world] keyword[in] identifier[worlds] :
identifier[species_list] . identifier[extend] (
[ identifier[p] . identifier[species] (). identifier[serial] () keyword[for] identifier[pid] , identifier[p] keyword[in] identifier[world] . identifier[list_particles] ()])
identifier[species_list] = identifier[sorted] (
identifier[set] ( identifier[species_list] ), identifier[key] = identifier[species_list] . identifier[index] )
identifier[wrange] = identifier[__get_range_of_world] ( identifier[worlds] [ literal[int] ], identifier[scale] )
identifier[wrange] =( identifier[wrange] [ literal[string] ], identifier[wrange] [ literal[string] ], identifier[wrange] [ literal[string] ])
identifier[wrange] ={ literal[string] : identifier[wrange] [ identifier[xidx] ], literal[string] : identifier[wrange] [ identifier[yidx] ]}
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( identifier[figsize] , identifier[figsize] ))
identifier[ax] = identifier[fig] . identifier[gca] ()
identifier[color_scale] = identifier[matplotlib_color_scale] ()
keyword[def] identifier[_update_plot] ( identifier[i] , identifier[worlds] , identifier[species_list] ):
identifier[ax] . identifier[cla] ()
identifier[ax] . identifier[set_aspect] ( literal[string] )
identifier[ax] . identifier[grid] ( identifier[grid] )
identifier[ax] . identifier[set_xlim] (* identifier[wrange] [ literal[string] ])
identifier[ax] . identifier[set_ylim] (* identifier[wrange] [ literal[string] ])
identifier[ax] . identifier[set_xlabel] ( identifier[plane] [ literal[int] ]. identifier[upper] ())
identifier[ax] . identifier[set_ylabel] ( identifier[plane] [ literal[int] ]. identifier[upper] ())
keyword[if] identifier[noaxis] :
identifier[ax] . identifier[set_axis_off] ()
identifier[_legend] = keyword[False]
identifier[world] = identifier[worlds] [ identifier[i] ]
keyword[for] identifier[i] , identifier[name] keyword[in] identifier[enumerate] ( identifier[species_list] ):
identifier[offsets] =([],[])
identifier[particles] = identifier[world] . identifier[list_particles_exact] ( identifier[Species] ( identifier[name] ))
keyword[if] identifier[len] ( identifier[particles] )== literal[int] :
keyword[continue]
identifier[_legend] = keyword[True]
keyword[if] identifier[max_count] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[particles] )> identifier[max_count] :
identifier[particles] = identifier[random] . identifier[sample] ( identifier[particles] , identifier[max_count] )
keyword[for] identifier[pid] , identifier[p] keyword[in] identifier[particles] :
identifier[pos] = identifier[p] . identifier[position] ()* identifier[scale]
identifier[offsets] [ literal[int] ]. identifier[append] ( identifier[pos] [ identifier[xidx] ])
identifier[offsets] [ literal[int] ]. identifier[append] ( identifier[pos] [ identifier[yidx] ])
identifier[ax] . identifier[scatter] (
identifier[offsets] [ literal[int] ], identifier[offsets] [ literal[int] ], identifier[marker] = literal[string] , identifier[s] =( literal[int] ** identifier[marker_size] ),
identifier[lw] = literal[int] , identifier[facecolor] = identifier[color_scale] . identifier[get_color] ( identifier[name] ), identifier[label] = identifier[name] )
keyword[if] identifier[legend] keyword[is] keyword[not] keyword[None] keyword[and] identifier[legend] keyword[is] keyword[not] keyword[False] keyword[and] identifier[_legend] :
identifier[legend_opts] ={ literal[string] : literal[string] , literal[string] : keyword[True] }
keyword[if] identifier[isinstance] ( identifier[legend] , identifier[dict] ):
identifier[legend_opts] . identifier[update] ( identifier[legend] )
identifier[ax] . identifier[legend] (** identifier[legend_opts] )
identifier[fig] . identifier[canvas] . identifier[draw] ()
identifier[ani] = identifier[animation] . identifier[FuncAnimation] (
identifier[fig] , identifier[_update_plot] , identifier[fargs] =( identifier[worlds] , identifier[species_list] ),
identifier[frames] = identifier[len] ( identifier[worlds] ), identifier[interval] = identifier[interval] , identifier[blit] = keyword[False] )
identifier[plt] . identifier[close] ( identifier[ani] . identifier[_fig] )
identifier[display_anim] ( identifier[ani] , identifier[output] , identifier[fps] = literal[int] / identifier[interval] , identifier[crf] = identifier[crf] , identifier[bitrate] = identifier[bitrate] ) | def plot_movie2d_with_matplotlib(worlds, plane='xy', marker_size=3, figsize=6, grid=True, wireframe=False, species_list=None, max_count=None, angle=None, noaxis=False, interval=0.16, repeat_delay=3000, stride=1, rotate=None, legend=True, scale=1, output=None, crf=10, bitrate='1M', **kwargs):
"""
Generate a movie projected on the given plane from the received list
of instances of World, and show it on IPython notebook.
This function may require ffmpeg.
Parameters
----------
worlds : list or FixedIntervalHDF5Observer
A list of Worlds to render.
plane : str, default 'xy'
'xy', 'yz', 'zx'.
marker_size : float, default 3
Marker size for all species. Size is passed to scatter function
as argument, s=(2 ** marker_size).
figsize : float, default 6
Size of the plotting area. Given in inch.
species_list : array of string, default None
If set, plot_world will not search the list of species.
max_count : Integer, default None
The maximum number of particles to show for each species.
None means no limitation.
angle : tuple, default None
A tuple of view angle which is given as (azim, elev, dist).
If None, use default assumed to be (-60, 30, 10).
interval : Integer, default 0.16
Parameters for matplotlib.animation.ArtistAnimation.
stride : Integer, default 1
Stride per frame.
rotate : tuple, default None
A pair of rotation angles, elev and azim, for animation.
None means no rotation, same as (0, 0).
legend : bool, default True
scale : float, default 1
A length-scaling factor
output : str, default None
An output filename. '.webm' or '.mp4' is only accepted.
If None, display a movie on IPython Notebook.
crf : int, default 10
The CRF value can be from 4-63. Lower values mean better quality.
bitrate : str, default '1M'
Target bitrate
"""
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from ecell4_base.core import Species, FixedIntervalHDF5Observer
from .simulation import load_world
plane = plane.lower()
if len(plane) != 2 or plane[0] not in ('x', 'y', 'z') or plane[1] not in ('x', 'y', 'z'):
raise ValueError("invalid 'plane' argument [{}] was given.".format(repr(plane))) # depends on [control=['if'], data=[]]
xidx = 0 if plane[0] == 'x' else 1 if plane[0] == 'y' else 2
yidx = 0 if plane[1] == 'x' else 1 if plane[1] == 'y' else 2
if isinstance(worlds, FixedIntervalHDF5Observer):
obs = worlds
worlds = []
for i in range(0, obs.num_steps(), stride):
filename = obs.filename(i)
if os.path.isfile(filename):
worlds.append(load_world(filename)) # depends on [control=['if'], data=[]]
elif len(worlds) > 0:
worlds.append(worlds[-1]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
else:
worlds = worlds[::stride]
if species_list is None:
species_list = []
for world in worlds:
species_list.extend([p.species().serial() for (pid, p) in world.list_particles()])
species_list = sorted(set(species_list), key=species_list.index) # XXX: pick unique ones # depends on [control=['for'], data=['world']] # depends on [control=['if'], data=['species_list']]
wrange = __get_range_of_world(worlds[0], scale)
wrange = (wrange['x'], wrange['y'], wrange['z'])
wrange = {'x': wrange[xidx], 'y': wrange[yidx]}
fig = plt.figure(figsize=(figsize, figsize))
ax = fig.gca()
color_scale = matplotlib_color_scale()
def _update_plot(i, worlds, species_list):
ax.cla()
ax.set_aspect('equal')
ax.grid(grid)
ax.set_xlim(*wrange['x'])
ax.set_ylim(*wrange['y'])
ax.set_xlabel(plane[0].upper())
ax.set_ylabel(plane[1].upper())
if noaxis:
ax.set_axis_off() # depends on [control=['if'], data=[]]
_legend = False
world = worlds[i]
for (i, name) in enumerate(species_list):
offsets = ([], [])
particles = world.list_particles_exact(Species(name))
if len(particles) == 0:
continue # depends on [control=['if'], data=[]]
_legend = True
if max_count is not None and len(particles) > max_count:
particles = random.sample(particles, max_count) # depends on [control=['if'], data=[]]
for (pid, p) in particles:
pos = p.position() * scale
offsets[0].append(pos[xidx])
offsets[1].append(pos[yidx]) # depends on [control=['for'], data=[]]
ax.scatter(offsets[0], offsets[1], marker='o', s=2 ** marker_size, lw=0, facecolor=color_scale.get_color(name), label=name) # depends on [control=['for'], data=[]]
if legend is not None and legend is not False and _legend:
legend_opts = {'loc': 'upper right', 'shadow': True}
if isinstance(legend, dict):
legend_opts.update(legend) # depends on [control=['if'], data=[]]
ax.legend(**legend_opts) # depends on [control=['if'], data=[]]
fig.canvas.draw()
ani = animation.FuncAnimation(fig, _update_plot, fargs=(worlds, species_list), frames=len(worlds), interval=interval, blit=False)
plt.close(ani._fig)
display_anim(ani, output, fps=1.0 / interval, crf=crf, bitrate=bitrate) |
def append_use_flags(atom, uses=None, overwrite=False):
'''
Append a list of use flags for a given package or DEPEND atom
CLI Example:
.. code-block:: bash
salt '*' portage_config.append_use_flags "app-admin/salt[ldap, -libvirt]"
salt '*' portage_config.append_use_flags ">=app-admin/salt-0.14.1" "['ldap', '-libvirt']"
'''
if not uses:
uses = portage.dep.dep_getusedeps(atom)
if not uses:
return
atom = atom[:atom.rfind('[')]
append_to_package_conf('use', atom=atom, flags=uses, overwrite=overwrite) | def function[append_use_flags, parameter[atom, uses, overwrite]]:
constant[
Append a list of use flags for a given package or DEPEND atom
CLI Example:
.. code-block:: bash
salt '*' portage_config.append_use_flags "app-admin/salt[ldap, -libvirt]"
salt '*' portage_config.append_use_flags ">=app-admin/salt-0.14.1" "['ldap', '-libvirt']"
]
if <ast.UnaryOp object at 0x7da1b219c3a0> begin[:]
variable[uses] assign[=] call[name[portage].dep.dep_getusedeps, parameter[name[atom]]]
if <ast.UnaryOp object at 0x7da1b210b1f0> begin[:]
return[None]
variable[atom] assign[=] call[name[atom]][<ast.Slice object at 0x7da1b210a6b0>]
call[name[append_to_package_conf], parameter[constant[use]]] | keyword[def] identifier[append_use_flags] ( identifier[atom] , identifier[uses] = keyword[None] , identifier[overwrite] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[uses] :
identifier[uses] = identifier[portage] . identifier[dep] . identifier[dep_getusedeps] ( identifier[atom] )
keyword[if] keyword[not] identifier[uses] :
keyword[return]
identifier[atom] = identifier[atom] [: identifier[atom] . identifier[rfind] ( literal[string] )]
identifier[append_to_package_conf] ( literal[string] , identifier[atom] = identifier[atom] , identifier[flags] = identifier[uses] , identifier[overwrite] = identifier[overwrite] ) | def append_use_flags(atom, uses=None, overwrite=False):
"""
Append a list of use flags for a given package or DEPEND atom
CLI Example:
.. code-block:: bash
salt '*' portage_config.append_use_flags "app-admin/salt[ldap, -libvirt]"
salt '*' portage_config.append_use_flags ">=app-admin/salt-0.14.1" "['ldap', '-libvirt']"
"""
if not uses:
uses = portage.dep.dep_getusedeps(atom) # depends on [control=['if'], data=[]]
if not uses:
return # depends on [control=['if'], data=[]]
atom = atom[:atom.rfind('[')]
append_to_package_conf('use', atom=atom, flags=uses, overwrite=overwrite) |
def qos_map_cos_mutation_cos5(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos")
map = ET.SubElement(qos, "map")
cos_mutation = ET.SubElement(map, "cos-mutation")
name_key = ET.SubElement(cos_mutation, "name")
name_key.text = kwargs.pop('name')
cos5 = ET.SubElement(cos_mutation, "cos5")
cos5.text = kwargs.pop('cos5')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[qos_map_cos_mutation_cos5, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[qos] assign[=] call[name[ET].SubElement, parameter[name[config], constant[qos]]]
variable[map] assign[=] call[name[ET].SubElement, parameter[name[qos], constant[map]]]
variable[cos_mutation] assign[=] call[name[ET].SubElement, parameter[name[map], constant[cos-mutation]]]
variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[cos_mutation], constant[name]]]
name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[cos5] assign[=] call[name[ET].SubElement, parameter[name[cos_mutation], constant[cos5]]]
name[cos5].text assign[=] call[name[kwargs].pop, parameter[constant[cos5]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[qos_map_cos_mutation_cos5] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[qos] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[map] = identifier[ET] . identifier[SubElement] ( identifier[qos] , literal[string] )
identifier[cos_mutation] = identifier[ET] . identifier[SubElement] ( identifier[map] , literal[string] )
identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[cos_mutation] , literal[string] )
identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[cos5] = identifier[ET] . identifier[SubElement] ( identifier[cos_mutation] , literal[string] )
identifier[cos5] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def qos_map_cos_mutation_cos5(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
qos = ET.SubElement(config, 'qos', xmlns='urn:brocade.com:mgmt:brocade-qos')
map = ET.SubElement(qos, 'map')
cos_mutation = ET.SubElement(map, 'cos-mutation')
name_key = ET.SubElement(cos_mutation, 'name')
name_key.text = kwargs.pop('name')
cos5 = ET.SubElement(cos_mutation, 'cos5')
cos5.text = kwargs.pop('cos5')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def binary_op(data, op, other, blen=None, storage=None, create='array',
**kwargs):
"""Compute a binary operation block-wise over `data`."""
# normalise scalars
if hasattr(other, 'shape') and len(other.shape) == 0:
other = other[()]
if np.isscalar(other):
def f(block):
return op(block, other)
return map_blocks(data, f, blen=blen, storage=storage, create=create, **kwargs)
elif len(data) == len(other):
def f(a, b):
return op(a, b)
return map_blocks((data, other), f, blen=blen, storage=storage, create=create,
**kwargs)
else:
raise NotImplementedError('argument type not supported') | def function[binary_op, parameter[data, op, other, blen, storage, create]]:
constant[Compute a binary operation block-wise over `data`.]
if <ast.BoolOp object at 0x7da2041dbc10> begin[:]
variable[other] assign[=] call[name[other]][tuple[[]]]
if call[name[np].isscalar, parameter[name[other]]] begin[:]
def function[f, parameter[block]]:
return[call[name[op], parameter[name[block], name[other]]]]
return[call[name[map_blocks], parameter[name[data], name[f]]]] | keyword[def] identifier[binary_op] ( identifier[data] , identifier[op] , identifier[other] , identifier[blen] = keyword[None] , identifier[storage] = keyword[None] , identifier[create] = literal[string] ,
** identifier[kwargs] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[other] , literal[string] ) keyword[and] identifier[len] ( identifier[other] . identifier[shape] )== literal[int] :
identifier[other] = identifier[other] [()]
keyword[if] identifier[np] . identifier[isscalar] ( identifier[other] ):
keyword[def] identifier[f] ( identifier[block] ):
keyword[return] identifier[op] ( identifier[block] , identifier[other] )
keyword[return] identifier[map_blocks] ( identifier[data] , identifier[f] , identifier[blen] = identifier[blen] , identifier[storage] = identifier[storage] , identifier[create] = identifier[create] ,** identifier[kwargs] )
keyword[elif] identifier[len] ( identifier[data] )== identifier[len] ( identifier[other] ):
keyword[def] identifier[f] ( identifier[a] , identifier[b] ):
keyword[return] identifier[op] ( identifier[a] , identifier[b] )
keyword[return] identifier[map_blocks] (( identifier[data] , identifier[other] ), identifier[f] , identifier[blen] = identifier[blen] , identifier[storage] = identifier[storage] , identifier[create] = identifier[create] ,
** identifier[kwargs] )
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] ) | def binary_op(data, op, other, blen=None, storage=None, create='array', **kwargs):
"""Compute a binary operation block-wise over `data`."""
# normalise scalars
if hasattr(other, 'shape') and len(other.shape) == 0:
other = other[()] # depends on [control=['if'], data=[]]
if np.isscalar(other):
def f(block):
return op(block, other)
return map_blocks(data, f, blen=blen, storage=storage, create=create, **kwargs) # depends on [control=['if'], data=[]]
elif len(data) == len(other):
def f(a, b):
return op(a, b)
return map_blocks((data, other), f, blen=blen, storage=storage, create=create, **kwargs) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('argument type not supported') |
def Move(self, units):
' Move some distance units from current position '
steps = units * self.SPU # translate units to steps
if steps > 0: # look for direction
spi.SPI_write_byte(self.CS, 0x40 | (~self.Dir & 1))
else:
spi.SPI_write_byte(self.CS, 0x40 | (self.Dir & 1))
steps = int(abs(steps))
spi.SPI_write_byte(self.CS, (steps >> 16) & 0xFF)
spi.SPI_write_byte(self.CS, (steps >> 8) & 0xFF)
spi.SPI_write_byte(self.CS, steps & 0xFF) | def function[Move, parameter[self, units]]:
constant[ Move some distance units from current position ]
variable[steps] assign[=] binary_operation[name[units] * name[self].SPU]
if compare[name[steps] greater[>] constant[0]] begin[:]
call[name[spi].SPI_write_byte, parameter[name[self].CS, binary_operation[constant[64] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[<ast.UnaryOp object at 0x7da18f00cbb0> <ast.BitAnd object at 0x7da2590d6b60> constant[1]]]]]
variable[steps] assign[=] call[name[int], parameter[call[name[abs], parameter[name[steps]]]]]
call[name[spi].SPI_write_byte, parameter[name[self].CS, binary_operation[binary_operation[name[steps] <ast.RShift object at 0x7da2590d6a40> constant[16]] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]]
call[name[spi].SPI_write_byte, parameter[name[self].CS, binary_operation[binary_operation[name[steps] <ast.RShift object at 0x7da2590d6a40> constant[8]] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]]
call[name[spi].SPI_write_byte, parameter[name[self].CS, binary_operation[name[steps] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]] | keyword[def] identifier[Move] ( identifier[self] , identifier[units] ):
literal[string]
identifier[steps] = identifier[units] * identifier[self] . identifier[SPU]
keyword[if] identifier[steps] > literal[int] :
identifier[spi] . identifier[SPI_write_byte] ( identifier[self] . identifier[CS] , literal[int] |(~ identifier[self] . identifier[Dir] & literal[int] ))
keyword[else] :
identifier[spi] . identifier[SPI_write_byte] ( identifier[self] . identifier[CS] , literal[int] |( identifier[self] . identifier[Dir] & literal[int] ))
identifier[steps] = identifier[int] ( identifier[abs] ( identifier[steps] ))
identifier[spi] . identifier[SPI_write_byte] ( identifier[self] . identifier[CS] ,( identifier[steps] >> literal[int] )& literal[int] )
identifier[spi] . identifier[SPI_write_byte] ( identifier[self] . identifier[CS] ,( identifier[steps] >> literal[int] )& literal[int] )
identifier[spi] . identifier[SPI_write_byte] ( identifier[self] . identifier[CS] , identifier[steps] & literal[int] ) | def Move(self, units):
""" Move some distance units from current position """
steps = units * self.SPU # translate units to steps
if steps > 0: # look for direction
spi.SPI_write_byte(self.CS, 64 | ~self.Dir & 1) # depends on [control=['if'], data=[]]
else:
spi.SPI_write_byte(self.CS, 64 | self.Dir & 1)
steps = int(abs(steps))
spi.SPI_write_byte(self.CS, steps >> 16 & 255)
spi.SPI_write_byte(self.CS, steps >> 8 & 255)
spi.SPI_write_byte(self.CS, steps & 255) |
def write_additional(self, productversion, channel):
"""Write the additional information to the MAR header.
Args:
productversion (str): product and version string
channel (str): channel string
"""
self.fileobj.seek(self.additional_offset)
extras = extras_header.build(dict(
count=1,
sections=[dict(
channel=six.u(channel),
productversion=six.u(productversion),
size=len(channel) + len(productversion) + 2 + 8,
padding=b'',
)],
))
self.fileobj.write(extras)
self.last_offset = self.fileobj.tell() | def function[write_additional, parameter[self, productversion, channel]]:
constant[Write the additional information to the MAR header.
Args:
productversion (str): product and version string
channel (str): channel string
]
call[name[self].fileobj.seek, parameter[name[self].additional_offset]]
variable[extras] assign[=] call[name[extras_header].build, parameter[call[name[dict], parameter[]]]]
call[name[self].fileobj.write, parameter[name[extras]]]
name[self].last_offset assign[=] call[name[self].fileobj.tell, parameter[]] | keyword[def] identifier[write_additional] ( identifier[self] , identifier[productversion] , identifier[channel] ):
literal[string]
identifier[self] . identifier[fileobj] . identifier[seek] ( identifier[self] . identifier[additional_offset] )
identifier[extras] = identifier[extras_header] . identifier[build] ( identifier[dict] (
identifier[count] = literal[int] ,
identifier[sections] =[ identifier[dict] (
identifier[channel] = identifier[six] . identifier[u] ( identifier[channel] ),
identifier[productversion] = identifier[six] . identifier[u] ( identifier[productversion] ),
identifier[size] = identifier[len] ( identifier[channel] )+ identifier[len] ( identifier[productversion] )+ literal[int] + literal[int] ,
identifier[padding] = literal[string] ,
)],
))
identifier[self] . identifier[fileobj] . identifier[write] ( identifier[extras] )
identifier[self] . identifier[last_offset] = identifier[self] . identifier[fileobj] . identifier[tell] () | def write_additional(self, productversion, channel):
"""Write the additional information to the MAR header.
Args:
productversion (str): product and version string
channel (str): channel string
"""
self.fileobj.seek(self.additional_offset)
extras = extras_header.build(dict(count=1, sections=[dict(channel=six.u(channel), productversion=six.u(productversion), size=len(channel) + len(productversion) + 2 + 8, padding=b'')]))
self.fileobj.write(extras)
self.last_offset = self.fileobj.tell() |
def findcorrectionhandling(self, cls):
"""Find the proper correctionhandling given a textclass by looking in the underlying corrections where it is reused"""
if cls == "current":
return CorrectionHandling.CURRENT
elif cls == "original":
return CorrectionHandling.ORIGINAL #backward compatibility
else:
correctionhandling = None
#but any other class may be anything
#Do we have corrections at all? otherwise no need to bother
for correction in self.select(Correction):
#yes, in which branch is the text class found?
found = False
hastext = False
if correction.hasnew():
found = True
doublecorrection = correction.new().count(Correction) > 0
if doublecorrection: return None #skipping text validation, correction is too complex (nested) to handle for now
for t in correction.new().select(TextContent):
hastext = True
if t.cls == cls:
if correctionhandling is not None and correctionhandling != CorrectionHandling.CURRENT:
return None #inconsistent
else:
correctionhandling = CorrectionHandling.CURRENT
break
elif correction.hascurrent():
found = True
doublecorrection = correction.current().count(Correction) > 0
if doublecorrection: return None #skipping text validation, correction is too complex (nested) to handle for now
for t in correction.current().select(TextContent):
hastext = True
if t.cls == cls:
if correctionhandling is not None and correctionhandling != CorrectionHandling.CURRENT:
return None #inconsistent
else:
correctionhandling = CorrectionHandling.CURRENT
break
if correction.hasoriginal():
found = True
doublecorrection = correction.original().count(Correction) > 0
if doublecorrection: return None #skipping text validation, correction is too complex (nested) to handle for now
for t in correction.original().select(TextContent):
hastext = True
if t.cls == cls:
if correctionhandling is not None and correctionhandling != CorrectionHandling.ORIGINAL:
return None #inconsistent
else:
correctionhandling = CorrectionHandling.ORIGINAL
break
if correctionhandling is None:
#well, we couldn't find our textclass in any correction, just fall back to current and let text validation fail if needed
return CorrectionHandling.CURRENT | def function[findcorrectionhandling, parameter[self, cls]]:
constant[Find the proper correctionhandling given a textclass by looking in the underlying corrections where it is reused]
if compare[name[cls] equal[==] constant[current]] begin[:]
return[name[CorrectionHandling].CURRENT] | keyword[def] identifier[findcorrectionhandling] ( identifier[self] , identifier[cls] ):
literal[string]
keyword[if] identifier[cls] == literal[string] :
keyword[return] identifier[CorrectionHandling] . identifier[CURRENT]
keyword[elif] identifier[cls] == literal[string] :
keyword[return] identifier[CorrectionHandling] . identifier[ORIGINAL]
keyword[else] :
identifier[correctionhandling] = keyword[None]
keyword[for] identifier[correction] keyword[in] identifier[self] . identifier[select] ( identifier[Correction] ):
identifier[found] = keyword[False]
identifier[hastext] = keyword[False]
keyword[if] identifier[correction] . identifier[hasnew] ():
identifier[found] = keyword[True]
identifier[doublecorrection] = identifier[correction] . identifier[new] (). identifier[count] ( identifier[Correction] )> literal[int]
keyword[if] identifier[doublecorrection] : keyword[return] keyword[None]
keyword[for] identifier[t] keyword[in] identifier[correction] . identifier[new] (). identifier[select] ( identifier[TextContent] ):
identifier[hastext] = keyword[True]
keyword[if] identifier[t] . identifier[cls] == identifier[cls] :
keyword[if] identifier[correctionhandling] keyword[is] keyword[not] keyword[None] keyword[and] identifier[correctionhandling] != identifier[CorrectionHandling] . identifier[CURRENT] :
keyword[return] keyword[None]
keyword[else] :
identifier[correctionhandling] = identifier[CorrectionHandling] . identifier[CURRENT]
keyword[break]
keyword[elif] identifier[correction] . identifier[hascurrent] ():
identifier[found] = keyword[True]
identifier[doublecorrection] = identifier[correction] . identifier[current] (). identifier[count] ( identifier[Correction] )> literal[int]
keyword[if] identifier[doublecorrection] : keyword[return] keyword[None]
keyword[for] identifier[t] keyword[in] identifier[correction] . identifier[current] (). identifier[select] ( identifier[TextContent] ):
identifier[hastext] = keyword[True]
keyword[if] identifier[t] . identifier[cls] == identifier[cls] :
keyword[if] identifier[correctionhandling] keyword[is] keyword[not] keyword[None] keyword[and] identifier[correctionhandling] != identifier[CorrectionHandling] . identifier[CURRENT] :
keyword[return] keyword[None]
keyword[else] :
identifier[correctionhandling] = identifier[CorrectionHandling] . identifier[CURRENT]
keyword[break]
keyword[if] identifier[correction] . identifier[hasoriginal] ():
identifier[found] = keyword[True]
identifier[doublecorrection] = identifier[correction] . identifier[original] (). identifier[count] ( identifier[Correction] )> literal[int]
keyword[if] identifier[doublecorrection] : keyword[return] keyword[None]
keyword[for] identifier[t] keyword[in] identifier[correction] . identifier[original] (). identifier[select] ( identifier[TextContent] ):
identifier[hastext] = keyword[True]
keyword[if] identifier[t] . identifier[cls] == identifier[cls] :
keyword[if] identifier[correctionhandling] keyword[is] keyword[not] keyword[None] keyword[and] identifier[correctionhandling] != identifier[CorrectionHandling] . identifier[ORIGINAL] :
keyword[return] keyword[None]
keyword[else] :
identifier[correctionhandling] = identifier[CorrectionHandling] . identifier[ORIGINAL]
keyword[break]
keyword[if] identifier[correctionhandling] keyword[is] keyword[None] :
keyword[return] identifier[CorrectionHandling] . identifier[CURRENT] | def findcorrectionhandling(self, cls):
"""Find the proper correctionhandling given a textclass by looking in the underlying corrections where it is reused"""
if cls == 'current':
return CorrectionHandling.CURRENT # depends on [control=['if'], data=[]]
elif cls == 'original':
return CorrectionHandling.ORIGINAL #backward compatibility # depends on [control=['if'], data=[]]
else:
correctionhandling = None
#but any other class may be anything
#Do we have corrections at all? otherwise no need to bother
for correction in self.select(Correction):
#yes, in which branch is the text class found?
found = False
hastext = False
if correction.hasnew():
found = True
doublecorrection = correction.new().count(Correction) > 0
if doublecorrection:
return None #skipping text validation, correction is too complex (nested) to handle for now # depends on [control=['if'], data=[]]
for t in correction.new().select(TextContent):
hastext = True
if t.cls == cls:
if correctionhandling is not None and correctionhandling != CorrectionHandling.CURRENT:
return None #inconsistent # depends on [control=['if'], data=[]]
else:
correctionhandling = CorrectionHandling.CURRENT
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t']] # depends on [control=['if'], data=[]]
elif correction.hascurrent():
found = True
doublecorrection = correction.current().count(Correction) > 0
if doublecorrection:
return None #skipping text validation, correction is too complex (nested) to handle for now # depends on [control=['if'], data=[]]
for t in correction.current().select(TextContent):
hastext = True
if t.cls == cls:
if correctionhandling is not None and correctionhandling != CorrectionHandling.CURRENT:
return None #inconsistent # depends on [control=['if'], data=[]]
else:
correctionhandling = CorrectionHandling.CURRENT
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t']] # depends on [control=['if'], data=[]]
if correction.hasoriginal():
found = True
doublecorrection = correction.original().count(Correction) > 0
if doublecorrection:
return None #skipping text validation, correction is too complex (nested) to handle for now # depends on [control=['if'], data=[]]
for t in correction.original().select(TextContent):
hastext = True
if t.cls == cls:
if correctionhandling is not None and correctionhandling != CorrectionHandling.ORIGINAL:
return None #inconsistent # depends on [control=['if'], data=[]]
else:
correctionhandling = CorrectionHandling.ORIGINAL
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['correction']]
if correctionhandling is None:
#well, we couldn't find our textclass in any correction, just fall back to current and let text validation fail if needed
return CorrectionHandling.CURRENT # depends on [control=['if'], data=[]] |
def sendMessage(self, message, thread_id=None, thread_type=ThreadType.USER):
"""
Deprecated. Use :func:`fbchat.Client.send` instead
"""
return self.send(
Message(text=message), thread_id=thread_id, thread_type=thread_type
) | def function[sendMessage, parameter[self, message, thread_id, thread_type]]:
constant[
Deprecated. Use :func:`fbchat.Client.send` instead
]
return[call[name[self].send, parameter[call[name[Message], parameter[]]]]] | keyword[def] identifier[sendMessage] ( identifier[self] , identifier[message] , identifier[thread_id] = keyword[None] , identifier[thread_type] = identifier[ThreadType] . identifier[USER] ):
literal[string]
keyword[return] identifier[self] . identifier[send] (
identifier[Message] ( identifier[text] = identifier[message] ), identifier[thread_id] = identifier[thread_id] , identifier[thread_type] = identifier[thread_type]
) | def sendMessage(self, message, thread_id=None, thread_type=ThreadType.USER):
"""
Deprecated. Use :func:`fbchat.Client.send` instead
"""
return self.send(Message(text=message), thread_id=thread_id, thread_type=thread_type) |
def resolves_for(self, node):
"""
Resolves this query relative to the given node.
Args:
node (node.Base): The node to be evaluated.
Returns:
int: The number of matches found.
"""
self.node = node
self.actual_styles = node.style(*self.expected_styles.keys())
return all(
toregex(value).search(self.actual_styles[style])
for style, value in iter(self.expected_styles.items())) | def function[resolves_for, parameter[self, node]]:
constant[
Resolves this query relative to the given node.
Args:
node (node.Base): The node to be evaluated.
Returns:
int: The number of matches found.
]
name[self].node assign[=] name[node]
name[self].actual_styles assign[=] call[name[node].style, parameter[<ast.Starred object at 0x7da1b0328640>]]
return[call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b03b8850>]]] | keyword[def] identifier[resolves_for] ( identifier[self] , identifier[node] ):
literal[string]
identifier[self] . identifier[node] = identifier[node]
identifier[self] . identifier[actual_styles] = identifier[node] . identifier[style] (* identifier[self] . identifier[expected_styles] . identifier[keys] ())
keyword[return] identifier[all] (
identifier[toregex] ( identifier[value] ). identifier[search] ( identifier[self] . identifier[actual_styles] [ identifier[style] ])
keyword[for] identifier[style] , identifier[value] keyword[in] identifier[iter] ( identifier[self] . identifier[expected_styles] . identifier[items] ())) | def resolves_for(self, node):
"""
Resolves this query relative to the given node.
Args:
node (node.Base): The node to be evaluated.
Returns:
int: The number of matches found.
"""
self.node = node
self.actual_styles = node.style(*self.expected_styles.keys())
return all((toregex(value).search(self.actual_styles[style]) for (style, value) in iter(self.expected_styles.items()))) |
def getPointsForInterpolation(self,EndOfPrdvP,aLvlNow):
'''
Finds endogenous interpolation points (x,m) for the expenditure function.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aLvlNow : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
Returns
-------
x_for_interpolation : np.array
Total expenditure points for interpolation.
m_for_interpolation : np.array
Corresponding market resource points for interpolation.
p_for_interpolation : np.array
Corresponding permanent income points for interpolation.
'''
# Get size of each state dimension
mCount = aLvlNow.shape[1]
pCount = aLvlNow.shape[0]
MedCount = self.MedShkVals.size
# Calculate endogenous gridpoints and controls
cLvlNow = np.tile(np.reshape(self.uPinv(EndOfPrdvP),(1,pCount,mCount)),(MedCount,1,1))
MedBaseNow = np.tile(np.reshape(self.uMedPinv(self.MedPrice*EndOfPrdvP),(1,pCount,mCount)),
(MedCount,1,1))
MedShkVals_tiled = np.tile(np.reshape(self.MedShkVals**(1.0/self.CRRAmed),(MedCount,1,1)),
(1,pCount,mCount))
MedLvlNow = MedShkVals_tiled*MedBaseNow
aLvlNow_tiled = np.tile(np.reshape(aLvlNow,(1,pCount,mCount)),(MedCount,1,1))
xLvlNow = cLvlNow + self.MedPrice*MedLvlNow
mLvlNow = xLvlNow + aLvlNow_tiled
# Limiting consumption is zero as m approaches the natural borrowing constraint
x_for_interpolation = np.concatenate((np.zeros((MedCount,pCount,1)),xLvlNow),axis=-1)
temp = np.tile(self.BoroCnstNat(np.reshape(self.pLvlGrid,(1,self.pLvlGrid.size,1))),
(MedCount,1,1))
m_for_interpolation = np.concatenate((temp,mLvlNow),axis=-1)
# Make a 3D array of permanent income for interpolation
p_for_interpolation = np.tile(np.reshape(self.pLvlGrid,(1,pCount,1)),(MedCount,1,mCount+1))
# Store for use by cubic interpolator
self.cLvlNow = cLvlNow
self.MedLvlNow = MedLvlNow
self.MedShkVals_tiled = np.tile(np.reshape(self.MedShkVals,(MedCount,1,1)),(1,pCount,mCount))
return x_for_interpolation, m_for_interpolation, p_for_interpolation | def function[getPointsForInterpolation, parameter[self, EndOfPrdvP, aLvlNow]]:
constant[
Finds endogenous interpolation points (x,m) for the expenditure function.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aLvlNow : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
Returns
-------
x_for_interpolation : np.array
Total expenditure points for interpolation.
m_for_interpolation : np.array
Corresponding market resource points for interpolation.
p_for_interpolation : np.array
Corresponding permanent income points for interpolation.
]
variable[mCount] assign[=] call[name[aLvlNow].shape][constant[1]]
variable[pCount] assign[=] call[name[aLvlNow].shape][constant[0]]
variable[MedCount] assign[=] name[self].MedShkVals.size
variable[cLvlNow] assign[=] call[name[np].tile, parameter[call[name[np].reshape, parameter[call[name[self].uPinv, parameter[name[EndOfPrdvP]]], tuple[[<ast.Constant object at 0x7da20e9b01f0>, <ast.Name object at 0x7da20e9b2080>, <ast.Name object at 0x7da20e9b3d30>]]]], tuple[[<ast.Name object at 0x7da20e9b2a10>, <ast.Constant object at 0x7da20e9b2f80>, <ast.Constant object at 0x7da20e9b26b0>]]]]
variable[MedBaseNow] assign[=] call[name[np].tile, parameter[call[name[np].reshape, parameter[call[name[self].uMedPinv, parameter[binary_operation[name[self].MedPrice * name[EndOfPrdvP]]]], tuple[[<ast.Constant object at 0x7da20e9b2590>, <ast.Name object at 0x7da20e9b3c70>, <ast.Name object at 0x7da20e9b20e0>]]]], tuple[[<ast.Name object at 0x7da20e9b0760>, <ast.Constant object at 0x7da20e9b1e10>, <ast.Constant object at 0x7da20e9b3340>]]]]
variable[MedShkVals_tiled] assign[=] call[name[np].tile, parameter[call[name[np].reshape, parameter[binary_operation[name[self].MedShkVals ** binary_operation[constant[1.0] / name[self].CRRAmed]], tuple[[<ast.Name object at 0x7da20e9b1360>, <ast.Constant object at 0x7da20e9b3ca0>, <ast.Constant object at 0x7da20e9b1150>]]]], tuple[[<ast.Constant object at 0x7da20e9b2e90>, <ast.Name object at 0x7da20e9b15a0>, <ast.Name object at 0x7da20e9b2bc0>]]]]
variable[MedLvlNow] assign[=] binary_operation[name[MedShkVals_tiled] * name[MedBaseNow]]
variable[aLvlNow_tiled] assign[=] call[name[np].tile, parameter[call[name[np].reshape, parameter[name[aLvlNow], tuple[[<ast.Constant object at 0x7da20e9b3700>, <ast.Name object at 0x7da20e9b2800>, <ast.Name object at 0x7da20e9b2cb0>]]]], tuple[[<ast.Name object at 0x7da20e9b18d0>, <ast.Constant object at 0x7da20e9b0070>, <ast.Constant object at 0x7da20e9b3070>]]]]
variable[xLvlNow] assign[=] binary_operation[name[cLvlNow] + binary_operation[name[self].MedPrice * name[MedLvlNow]]]
variable[mLvlNow] assign[=] binary_operation[name[xLvlNow] + name[aLvlNow_tiled]]
variable[x_for_interpolation] assign[=] call[name[np].concatenate, parameter[tuple[[<ast.Call object at 0x7da20e9b03a0>, <ast.Name object at 0x7da20e9b3c10>]]]]
variable[temp] assign[=] call[name[np].tile, parameter[call[name[self].BoroCnstNat, parameter[call[name[np].reshape, parameter[name[self].pLvlGrid, tuple[[<ast.Constant object at 0x7da204346500>, <ast.Attribute object at 0x7da204345720>, <ast.Constant object at 0x7da2043464a0>]]]]]], tuple[[<ast.Name object at 0x7da204344040>, <ast.Constant object at 0x7da204346830>, <ast.Constant object at 0x7da2043456c0>]]]]
variable[m_for_interpolation] assign[=] call[name[np].concatenate, parameter[tuple[[<ast.Name object at 0x7da204345870>, <ast.Name object at 0x7da2043460b0>]]]]
variable[p_for_interpolation] assign[=] call[name[np].tile, parameter[call[name[np].reshape, parameter[name[self].pLvlGrid, tuple[[<ast.Constant object at 0x7da2043452a0>, <ast.Name object at 0x7da2043459f0>, <ast.Constant object at 0x7da204344910>]]]], tuple[[<ast.Name object at 0x7da204344c40>, <ast.Constant object at 0x7da204347100>, <ast.BinOp object at 0x7da2043447f0>]]]]
name[self].cLvlNow assign[=] name[cLvlNow]
name[self].MedLvlNow assign[=] name[MedLvlNow]
name[self].MedShkVals_tiled assign[=] call[name[np].tile, parameter[call[name[np].reshape, parameter[name[self].MedShkVals, tuple[[<ast.Name object at 0x7da204345540>, <ast.Constant object at 0x7da2043476d0>, <ast.Constant object at 0x7da204346e00>]]]], tuple[[<ast.Constant object at 0x7da2043445b0>, <ast.Name object at 0x7da204345f30>, <ast.Name object at 0x7da204346ec0>]]]]
return[tuple[[<ast.Name object at 0x7da1b088df30>, <ast.Name object at 0x7da1b088e3b0>, <ast.Name object at 0x7da1b088e3e0>]]] | keyword[def] identifier[getPointsForInterpolation] ( identifier[self] , identifier[EndOfPrdvP] , identifier[aLvlNow] ):
literal[string]
identifier[mCount] = identifier[aLvlNow] . identifier[shape] [ literal[int] ]
identifier[pCount] = identifier[aLvlNow] . identifier[shape] [ literal[int] ]
identifier[MedCount] = identifier[self] . identifier[MedShkVals] . identifier[size]
identifier[cLvlNow] = identifier[np] . identifier[tile] ( identifier[np] . identifier[reshape] ( identifier[self] . identifier[uPinv] ( identifier[EndOfPrdvP] ),( literal[int] , identifier[pCount] , identifier[mCount] )),( identifier[MedCount] , literal[int] , literal[int] ))
identifier[MedBaseNow] = identifier[np] . identifier[tile] ( identifier[np] . identifier[reshape] ( identifier[self] . identifier[uMedPinv] ( identifier[self] . identifier[MedPrice] * identifier[EndOfPrdvP] ),( literal[int] , identifier[pCount] , identifier[mCount] )),
( identifier[MedCount] , literal[int] , literal[int] ))
identifier[MedShkVals_tiled] = identifier[np] . identifier[tile] ( identifier[np] . identifier[reshape] ( identifier[self] . identifier[MedShkVals] **( literal[int] / identifier[self] . identifier[CRRAmed] ),( identifier[MedCount] , literal[int] , literal[int] )),
( literal[int] , identifier[pCount] , identifier[mCount] ))
identifier[MedLvlNow] = identifier[MedShkVals_tiled] * identifier[MedBaseNow]
identifier[aLvlNow_tiled] = identifier[np] . identifier[tile] ( identifier[np] . identifier[reshape] ( identifier[aLvlNow] ,( literal[int] , identifier[pCount] , identifier[mCount] )),( identifier[MedCount] , literal[int] , literal[int] ))
identifier[xLvlNow] = identifier[cLvlNow] + identifier[self] . identifier[MedPrice] * identifier[MedLvlNow]
identifier[mLvlNow] = identifier[xLvlNow] + identifier[aLvlNow_tiled]
identifier[x_for_interpolation] = identifier[np] . identifier[concatenate] (( identifier[np] . identifier[zeros] (( identifier[MedCount] , identifier[pCount] , literal[int] )), identifier[xLvlNow] ), identifier[axis] =- literal[int] )
identifier[temp] = identifier[np] . identifier[tile] ( identifier[self] . identifier[BoroCnstNat] ( identifier[np] . identifier[reshape] ( identifier[self] . identifier[pLvlGrid] ,( literal[int] , identifier[self] . identifier[pLvlGrid] . identifier[size] , literal[int] ))),
( identifier[MedCount] , literal[int] , literal[int] ))
identifier[m_for_interpolation] = identifier[np] . identifier[concatenate] (( identifier[temp] , identifier[mLvlNow] ), identifier[axis] =- literal[int] )
identifier[p_for_interpolation] = identifier[np] . identifier[tile] ( identifier[np] . identifier[reshape] ( identifier[self] . identifier[pLvlGrid] ,( literal[int] , identifier[pCount] , literal[int] )),( identifier[MedCount] , literal[int] , identifier[mCount] + literal[int] ))
identifier[self] . identifier[cLvlNow] = identifier[cLvlNow]
identifier[self] . identifier[MedLvlNow] = identifier[MedLvlNow]
identifier[self] . identifier[MedShkVals_tiled] = identifier[np] . identifier[tile] ( identifier[np] . identifier[reshape] ( identifier[self] . identifier[MedShkVals] ,( identifier[MedCount] , literal[int] , literal[int] )),( literal[int] , identifier[pCount] , identifier[mCount] ))
keyword[return] identifier[x_for_interpolation] , identifier[m_for_interpolation] , identifier[p_for_interpolation] | def getPointsForInterpolation(self, EndOfPrdvP, aLvlNow):
"""
Finds endogenous interpolation points (x,m) for the expenditure function.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aLvlNow : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
Returns
-------
x_for_interpolation : np.array
Total expenditure points for interpolation.
m_for_interpolation : np.array
Corresponding market resource points for interpolation.
p_for_interpolation : np.array
Corresponding permanent income points for interpolation.
"""
# Get size of each state dimension
mCount = aLvlNow.shape[1]
pCount = aLvlNow.shape[0]
MedCount = self.MedShkVals.size
# Calculate endogenous gridpoints and controls
cLvlNow = np.tile(np.reshape(self.uPinv(EndOfPrdvP), (1, pCount, mCount)), (MedCount, 1, 1))
MedBaseNow = np.tile(np.reshape(self.uMedPinv(self.MedPrice * EndOfPrdvP), (1, pCount, mCount)), (MedCount, 1, 1))
MedShkVals_tiled = np.tile(np.reshape(self.MedShkVals ** (1.0 / self.CRRAmed), (MedCount, 1, 1)), (1, pCount, mCount))
MedLvlNow = MedShkVals_tiled * MedBaseNow
aLvlNow_tiled = np.tile(np.reshape(aLvlNow, (1, pCount, mCount)), (MedCount, 1, 1))
xLvlNow = cLvlNow + self.MedPrice * MedLvlNow
mLvlNow = xLvlNow + aLvlNow_tiled
# Limiting consumption is zero as m approaches the natural borrowing constraint
x_for_interpolation = np.concatenate((np.zeros((MedCount, pCount, 1)), xLvlNow), axis=-1)
temp = np.tile(self.BoroCnstNat(np.reshape(self.pLvlGrid, (1, self.pLvlGrid.size, 1))), (MedCount, 1, 1))
m_for_interpolation = np.concatenate((temp, mLvlNow), axis=-1)
# Make a 3D array of permanent income for interpolation
p_for_interpolation = np.tile(np.reshape(self.pLvlGrid, (1, pCount, 1)), (MedCount, 1, mCount + 1))
# Store for use by cubic interpolator
self.cLvlNow = cLvlNow
self.MedLvlNow = MedLvlNow
self.MedShkVals_tiled = np.tile(np.reshape(self.MedShkVals, (MedCount, 1, 1)), (1, pCount, mCount))
return (x_for_interpolation, m_for_interpolation, p_for_interpolation) |
def findpath(target, start=os.path.curdir):
r"""
Find a path from start to target where target is relative to start.
>>> orig_wd = os.getcwd()
>>> os.chdir('c:\\windows') # so we know what the working directory is
>>> findpath('d:\\')
'd:\\'
>>> findpath('d:\\', 'c:\\windows')
'd:\\'
>>> findpath('\\bar', 'd:\\')
'd:\\bar'
>>> findpath('\\bar', 'd:\\foo') # fails with '\\bar'
'd:\\bar'
>>> findpath('bar', 'd:\\foo')
'd:\\foo\\bar'
>>> findpath('bar\\baz', 'd:\\foo')
'd:\\foo\\bar\\baz'
>>> findpath('\\baz', 'd:\\foo\\bar') # fails with '\\baz'
'd:\\baz'
Since we're on the C drive, findpath may be allowed to return
relative paths for targets on the same drive. I use abspath to
confirm that the ultimate target is what we expect.
>>> os.path.abspath(findpath('\\bar'))
'c:\\bar'
>>> os.path.abspath(findpath('bar'))
'c:\\windows\\bar'
>>> findpath('..', 'd:\\foo\\bar')
'd:\\foo'
>>> findpath('..\\bar', 'd:\\foo')
'd:\\bar'
The parent of the root directory is the root directory.
>>> findpath('..', 'd:\\')
'd:\\'
restore the original working directory
>>> os.chdir(orig_wd)
"""
return os.path.normpath(os.path.join(start, target)) | def function[findpath, parameter[target, start]]:
constant[
Find a path from start to target where target is relative to start.
>>> orig_wd = os.getcwd()
>>> os.chdir('c:\\windows') # so we know what the working directory is
>>> findpath('d:\\')
'd:\\'
>>> findpath('d:\\', 'c:\\windows')
'd:\\'
>>> findpath('\\bar', 'd:\\')
'd:\\bar'
>>> findpath('\\bar', 'd:\\foo') # fails with '\\bar'
'd:\\bar'
>>> findpath('bar', 'd:\\foo')
'd:\\foo\\bar'
>>> findpath('bar\\baz', 'd:\\foo')
'd:\\foo\\bar\\baz'
>>> findpath('\\baz', 'd:\\foo\\bar') # fails with '\\baz'
'd:\\baz'
Since we're on the C drive, findpath may be allowed to return
relative paths for targets on the same drive. I use abspath to
confirm that the ultimate target is what we expect.
>>> os.path.abspath(findpath('\\bar'))
'c:\\bar'
>>> os.path.abspath(findpath('bar'))
'c:\\windows\\bar'
>>> findpath('..', 'd:\\foo\\bar')
'd:\\foo'
>>> findpath('..\\bar', 'd:\\foo')
'd:\\bar'
The parent of the root directory is the root directory.
>>> findpath('..', 'd:\\')
'd:\\'
restore the original working directory
>>> os.chdir(orig_wd)
]
return[call[name[os].path.normpath, parameter[call[name[os].path.join, parameter[name[start], name[target]]]]]] | keyword[def] identifier[findpath] ( identifier[target] , identifier[start] = identifier[os] . identifier[path] . identifier[curdir] ):
literal[string]
keyword[return] identifier[os] . identifier[path] . identifier[normpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[start] , identifier[target] )) | def findpath(target, start=os.path.curdir):
"""
Find a path from start to target where target is relative to start.
>>> orig_wd = os.getcwd()
>>> os.chdir('c:\\\\windows') # so we know what the working directory is
>>> findpath('d:\\\\')
'd:\\\\'
>>> findpath('d:\\\\', 'c:\\\\windows')
'd:\\\\'
>>> findpath('\\\\bar', 'd:\\\\')
'd:\\\\bar'
>>> findpath('\\\\bar', 'd:\\\\foo') # fails with '\\\\bar'
'd:\\\\bar'
>>> findpath('bar', 'd:\\\\foo')
'd:\\\\foo\\\\bar'
>>> findpath('bar\\\\baz', 'd:\\\\foo')
'd:\\\\foo\\\\bar\\\\baz'
>>> findpath('\\\\baz', 'd:\\\\foo\\\\bar') # fails with '\\\\baz'
'd:\\\\baz'
Since we're on the C drive, findpath may be allowed to return
relative paths for targets on the same drive. I use abspath to
confirm that the ultimate target is what we expect.
>>> os.path.abspath(findpath('\\\\bar'))
'c:\\\\bar'
>>> os.path.abspath(findpath('bar'))
'c:\\\\windows\\\\bar'
>>> findpath('..', 'd:\\\\foo\\\\bar')
'd:\\\\foo'
>>> findpath('..\\\\bar', 'd:\\\\foo')
'd:\\\\bar'
The parent of the root directory is the root directory.
>>> findpath('..', 'd:\\\\')
'd:\\\\'
restore the original working directory
>>> os.chdir(orig_wd)
"""
return os.path.normpath(os.path.join(start, target)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.