docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
checks the type (type name) of an unique named object (no fully qualified
name)
Args:
root: start of search
name: name of object
class_name: the name of the type to be checked
Returns:
nothing (if not unique, raises an error)
|
def check_unique_named_object_has_class(root, name, class_name):
assert class_name == get_unique_named_object(root, name).__class__.__name__
| 353,504
|
get a list of all models stored within a model
(including the owning model).
Args:
model: the owning model
Returns:
a list of all models
|
def get_all_models_including_attached_models(model):
if (hasattr(model, "_tx_model_repository")):
models = list(
model._tx_model_repository.all_models.filename_to_model.values())
if model not in models:
models.append(model)
else:
models = [model]
return models
| 353,506
|
create a new repo for a model
Args:
all_models: models to be added to this new repository.
|
def __init__(self, all_models=None):
self.local_models = ModelRepository() # used for current model
if all_models:
self.all_models = all_models # used to reuse already loaded models
else:
self.all_models = ModelRepository()
| 353,509
|
add a new model to all relevant objects
Args:
filename_pattern: models to be loaded
model: model holding the loaded models in its _tx_model_repository
field (may be None).
glob_args: arguments passed to the glob.glob function.
Returns:
the list of loaded models
|
def load_models_using_filepattern(
self, filename_pattern, model, glob_args, is_main_model=False,
encoding='utf-8', add_to_local_models=True):
if (model):
self.update_model_in_repo_based_on_filename(model)
filenames = glob.glob(filename_pattern, **glob_args)
if len(filenames) == 0:
raise IOError(
errno.ENOENT, os.strerror(errno.ENOENT), filename_pattern)
loaded_models = []
for filename in filenames:
the_metamodel = MetaModelProvider.get_metamodel(model, filename)
loaded_models.append(
self.load_model(the_metamodel, filename, is_main_model,
encoding=encoding,
add_to_local_models=add_to_local_models))
return loaded_models
| 353,510
|
add a new model to all relevant objects
Args:
filename: models to be loaded
model: model holding the loaded models in its _tx_model_repository
field (may be None).
search_path: list of search directories.
Returns:
the loaded model
|
def load_model_using_search_path(
self, filename, model, search_path, is_main_model=False,
encoding='utf8', add_to_local_models=True):
if (model):
self.update_model_in_repo_based_on_filename(model)
for the_path in search_path:
full_filename = join(the_path, filename)
# print(full_filename)
if exists(full_filename):
the_metamodel = \
MetaModelProvider.get_metamodel(model, full_filename)
return self.load_model(the_metamodel,
full_filename,
is_main_model,
encoding=encoding,
add_to_local_models=add_to_local_models)
raise IOError(
errno.ENOENT, os.strerror(errno.ENOENT), filename)
| 353,511
|
load a single model
Args:
the_metamodel: the metamodel used to load the model
filename: the model to be loaded (if not cached)
Returns:
the loaded/cached model
|
def load_model(
self, the_metamodel, filename, is_main_model, encoding='utf-8',
add_to_local_models=True):
if not self.local_models.has_model(filename):
if self.all_models.has_model(filename):
new_model = self.all_models.filename_to_model[filename]
else:
# print("LOADING {}".format(filename))
# all models loaded here get their references resolved from the
# root model
new_model = the_metamodel.internal_model_from_file(
filename, pre_ref_resolution_callback=lambda
other_model: self.pre_ref_resolution_callback(other_model),
is_main_model=is_main_model, encoding=encoding)
self.all_models.filename_to_model[filename] = new_model
# print("ADDING {}".format(filename))
if add_to_local_models:
self.local_models.filename_to_model[filename] = new_model
assert self.all_models.has_model(filename) # to be sure...
return self.all_models.filename_to_model[filename]
| 353,512
|
Adds a model to the repo (not initially visible)
Args:
model: the model to be added. If the model
has no filename, a name is invented
Returns: the filename of the model added to the repo
|
def update_model_in_repo_based_on_filename(self, model):
if model._tx_filename is None:
for fn in self.all_models.filename_to_model:
if self.all_models.filename_to_model[fn] == model:
return fn
i = 0
while self.all_models.has_model("anonymous{}".format(i)):
i += 1
myfilename = "anonymous{}".format(i)
self.all_models.filename_to_model[myfilename] = model
else:
myfilename = model._tx_filename
if (not self.all_models.has_model(myfilename)):
self.all_models.filename_to_model[myfilename] = model
return myfilename
| 353,514
|
(internal: used to store a model after parsing into the repository)
Args:
other_model: the parsed model
Returns:
nothing
|
def pre_ref_resolution_callback(self, other_model):
# print("PRE-CALLBACK{}".format(filename))
filename = other_model._tx_filename
assert (filename)
other_model._tx_model_repository = \
GlobalModelRepository(self.all_models)
self.all_models.filename_to_model[filename] = other_model
| 353,515
|
Finds first object up the parent chain of the given type.
If no parent of the given type exists None is returned.
Args:
typ(str or python class): The type of the model object we are
looking for.
obj (model object): Python model object which is the start of the
search process.
|
def get_parent_of_type(typ, obj):
if type(typ) is not text:
typ = typ.__name__
while hasattr(obj, 'parent'):
obj = obj.parent
if obj.__class__.__name__ == typ:
return obj
| 353,546
|
Returns a list of all model elements of type 'typ' starting from model
element 'root'. The search process will follow containment links only.
Non-containing references shall not be followed.
Args:
decider(obj): a callable returning True if the object is of interest.
root (model object): Python model object which is the start of the
search process.
|
def get_children(decider, root):
collected = []
def follow(elem):
if elem in collected:
return
# Use meta-model to search for all contained child elements.
cls = elem.__class__
if hasattr(cls, '_tx_attrs') and decider(elem):
collected.append(elem)
if hasattr(cls, '_tx_attrs'):
for attr_name, attr in cls._tx_attrs.items():
# Follow only attributes with containment semantics
if attr.cont:
if attr.mult in (MULT_ONE, MULT_OPTIONAL):
new_elem = getattr(elem, attr_name)
if new_elem:
follow(new_elem)
else:
new_elem_list = getattr(elem, attr_name)
if new_elem_list:
for new_elem in new_elem_list:
follow(new_elem)
follow(root)
return collected
| 353,547
|
Returns a list of all model elements of type 'typ' starting from model
element 'root'. The search process will follow containment links only.
Non-containing references shall not be followed.
Args:
typ(str or python class): The type of the model object we are
looking for.
root (model object): Python model object which is the start of the
search process.
|
def get_children_of_type(typ, root):
if type(typ) is not text:
typ = typ.__name__
return get_children(lambda x: x.__class__.__name__ == typ, root)
| 353,548
|
the default scope provider
Args:
obj: unused (used for multi_metamodel_support)
attr: unused
obj_ref: the cross reference to be resolved
Returns:
the resolved reference or None
|
def __call__(self, obj, attr, obj_ref):
from textx.const import RULE_COMMON, RULE_ABSTRACT
from textx.model import ObjCrossRef
from textx.scoping.tools import get_parser
if obj_ref is None:
return None # an error! (see model.py: resolve_refs (TODO check)
assert type(obj_ref) is ObjCrossRef, type(obj_ref)
if get_parser(obj).debug:
get_parser(obj).dprint("Resolving obj crossref: {}:{}"
.format(obj_ref.cls, obj_ref.obj_name))
def _inner_resolve_link_rule_ref(cls, obj_name):
if cls._tx_type is RULE_ABSTRACT:
for inherited in cls._tx_inh_by:
result = _inner_resolve_link_rule_ref(inherited,
obj_name)
if result:
return result
elif cls._tx_type == RULE_COMMON:
# TODO make this code exchangable
# allow to know the current attribute (model location for
# namespace) and to navigate through the whole model...
# OR (with another scope provider) to make custom lookups in
# the model
#
# Scopeprovider
# - needs: .current reference (in the model)
# .the model (?)
# - provides: the resolved object or None
if id(cls) in get_parser(obj)._instances:
objs = get_parser(obj)._instances[id(cls)]
return objs.get(obj_name)
if self.multi_metamodel_support:
from textx import get_model, get_children
from textx import textx_isinstance
result_lst = get_children(
lambda x:
hasattr(x, "name") and x.name == obj_ref.obj_name
and textx_isinstance(x, obj_ref.cls), get_model(obj))
if len(result_lst) == 1:
result = result_lst[0]
elif len(result_lst) > 1:
line, col = get_parser(obj).pos_to_linecol(obj_ref.position)
raise TextXSemanticError(
"name {} is not unique.".format(obj_ref.obj_name),
line=line, col=col, filename=get_model(obj)._tx_filename)
else:
result = None
else:
result = _inner_resolve_link_rule_ref(obj_ref.cls,
obj_ref.obj_name)
if result:
return result
return None
| 353,557
|
find a fully qualified name.
Use this callable as scope_provider in a meta-model:
my_metamodel.register_scope_provider(
{"*.*":textx.scoping.providers.FQN})
Args:
current_obj: object corresponding a instance of an
object (rule instance)
attr: the referencing attribute (unused)
obj_ref: ObjCrossRef to be resolved
Returns: None or the referenced object
|
def __call__(self, current_obj, attr, obj_ref):
def _find_obj_fqn(p, fqn_name, cls):
def find_obj(parent, name):
if parent is not current_obj and \
self.scope_redirection_logic is not None:
from textx.scoping import Postponed
res = self.scope_redirection_logic(parent)
assert res is not None, \
"scope_redirection_logic must not return None"
if type(res) is Postponed:
return res
for m in res:
return_value = find_obj(m, name)
if return_value is not None:
return return_value
for attr in [a for a in parent.__dict__ if
not a.startswith('__') and not
a.startswith('_tx_') and not
callable(getattr(parent, a))]:
obj = getattr(parent, attr)
if isinstance(obj, (list, tuple)):
for innerobj in obj:
if hasattr(innerobj, "name") \
and innerobj.name == name:
return innerobj
else:
if hasattr(obj, "name") and obj.name == name:
return obj
return None
for n in fqn_name.split('.'):
obj = find_obj(p, n)
if obj:
if type(obj) is Postponed:
return obj
p = obj
else:
return None
from textx import textx_isinstance
if textx_isinstance(obj, cls):
return p
else:
return None
def _find_referenced_obj(p, name, cls):
ret = _find_obj_fqn(p, name, cls)
if ret:
return ret
while hasattr(p, "parent"):
p = p.parent
ret = _find_obj_fqn(p, name, cls)
if ret:
return ret
# else continue to next parent or return None
from textx.model import ObjCrossRef
assert type(obj_ref) is ObjCrossRef, type(obj_ref)
obj_cls, obj_name = obj_ref.cls, obj_ref.obj_name
return _find_referenced_obj(current_obj, obj_name, obj_cls)
| 353,558
|
Constructs parser and initializes metamodel from language description
given in textX language.
Args:
language_def (str): A language description in textX.
metamodel (TextXMetaModel): A metamodel to initialize.
Returns:
Parser for the new language.
|
def language_from_str(language_def, metamodel):
if type(language_def) is not text:
raise TextXError("textX accepts only unicode strings.")
if metamodel.debug:
metamodel.dprint("*** PARSING LANGUAGE DEFINITION ***")
# Check the cache for already conctructed textX parser
if metamodel.debug in textX_parsers:
parser = textX_parsers[metamodel.debug]
else:
# Create parser for TextX grammars using
# the arpeggio grammar specified in this module
parser = ParserPython(textx_model, comment_def=comment,
ignore_case=False,
reduce_tree=False,
memoization=metamodel.memoization,
debug=metamodel.debug,
file=metamodel.file)
# Cache it for subsequent calls
textX_parsers[metamodel.debug] = parser
# Parse language description with textX parser
try:
parse_tree = parser.parse(language_def)
except NoMatch as e:
line, col = parser.pos_to_linecol(e.position)
raise TextXSyntaxError(text(e), line, col)
# Construct new parser and meta-model based on the given language
# description.
lang_parser = visit_parse_tree(parse_tree,
TextXVisitor(parser, metamodel))
# Meta-model is constructed. Validate its semantics.
metamodel.validate()
# Here we connect meta-model and language parser for convenience.
lang_parser.metamodel = metamodel
metamodel._parser_blueprint = lang_parser
if metamodel.debug:
# Create dot file for debuging purposes
PMDOTExporter().exportFile(
lang_parser.parser_model,
"{}_parser_model.dot".format(metamodel.rootcls.__name__))
return lang_parser
| 353,578
|
Creates a new metamodel from the textX description given as a string.
Args:
lang_desc(str): A textX language description.
metamodel(TextXMetaModel): A metamodel that should be used.
other params: See TextXMetaModel.
|
def metamodel_from_str(lang_desc, metamodel=None, **kwargs):
if not metamodel:
metamodel = TextXMetaModel(**kwargs)
language_from_str(lang_desc, metamodel)
return metamodel
| 353,614
|
Creates new metamodel from the given file.
Args:
file_name(str): The name of the file with textX language description.
other params: See metamodel_from_str.
|
def metamodel_from_file(file_name, **kwargs):
with codecs.open(file_name, 'r', 'utf-8') as f:
lang_desc = f.read()
metamodel = metamodel_from_str(lang_desc=lang_desc,
file_name=file_name,
**kwargs)
return metamodel
| 353,615
|
Starts a new import.
Args:
import_name(str): A relative import in the dot syntax
(e.g. "first.second.expressions")
|
def _new_import(self, import_name):
# Import can't be used if meta-model is loaded from string
assert self.root_path is not None, \
'"import" statement can not be used if meta-model is ' \
'loaded from string.'
# Find the absolute file name of the import based on the relative
# import_name and current namespace
current_namespace = self._namespace_stack[-1]
if '.' in current_namespace:
root_namespace = current_namespace.rsplit('.', 1)[0]
import_name = "%s.%s" % (root_namespace, import_name)
import_file_name = "%s.tx" % os.path.join(self.root_path,
*import_name.split("."))
if import_name not in self.namespaces:
self._enter_namespace(import_name)
if self.debug:
self.dprint("*** IMPORTING FILE: %s" % import_file_name)
metamodel_from_file(import_file_name, metamodel=self)
self._leave_namespace()
# Add the import to the imported_namespaces for current namespace
# so that resolving of current grammar searches imported grammars
# in the order of import
self._imported_namespaces[current_namespace].append(
self.namespaces[import_name])
| 353,620
|
Initialize obj attributes.
Args:
obj(object): A python object to set attributes to.
user(bool): If this object is a user object mangle attribute names.
|
def _init_obj_attrs(self, obj, user=False):
for attr in obj.__class__._tx_attrs.values():
if user:
# Mangle name to prvent name clashing
attr_name = "_txa_%s" % attr.name
else:
attr_name = attr.name
if attr.mult in [MULT_ZEROORMORE, MULT_ONEORMORE]:
# list
setattr(obj, attr_name, [])
elif attr.cls.__name__ in BASE_TYPE_NAMES:
# Instantiate base python type
if self.auto_init_attributes:
setattr(obj, attr_name,
python_type(attr.cls.__name__)())
else:
# See https://github.com/textX/textX/issues/11
if attr.bool_assignment:
# Only ?= assignments shall have default
# value of False.
setattr(obj, attr_name, False)
else:
# Set base type attribute to None initially
# in order to be able to detect if an optional
# values are given in the model. Default values
# can be specified using object processors.
setattr(obj, attr_name, None)
else:
# Reference to other obj
setattr(obj, attr_name, None)
| 353,624
|
Object processors are callables that will be called after
each successful model object construction.
Those callables receive model object as its parameter.
Registration of new object processors will replace previous.
Args:
obj_processors(dict): A dictionary where key=class name,
value=callable
|
def register_obj_processors(self, obj_processors):
self.obj_processors = obj_processors
self.type_convertors.update(obj_processors)
| 353,633
|
Initializes and runs the processor.
Arguments:
serializer: a DREST serializer
data: the serializer's representation
|
def __init__(self, serializer, data):
if isinstance(serializer, ListSerializer):
serializer = serializer.child
self.data = {}
self.seen = defaultdict(set)
self.plural_name = serializer.get_plural_name()
self.name = serializer.get_name()
# process the data, optionally sideloading
self.process(data)
# add the primary resource data into the response data
resource_name = self.name if isinstance(
data,
dict
) else self.plural_name
self.data[resource_name] = data
| 353,829
|
Return a field given a model and field name.
Arguments:
model: a Django model
field_name: the name of a field
Returns:
A Django field if `field_name` is a valid field for `model`,
None otherwise.
|
def get_model_field(model, field_name):
meta = model._meta
try:
if DJANGO19:
field = meta.get_field(field_name)
else:
field = meta.get_field_by_name(field_name)[0]
return field
except:
if DJANGO19:
related_objs = (
f for f in meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created and not f.concrete
)
related_m2m_objs = (
f for f in meta.get_fields(include_hidden=True)
if f.many_to_many and f.auto_created
)
else:
related_objs = meta.get_all_related_objects()
related_m2m_objs = meta.get_all_related_many_to_many_objects()
related_objects = {
o.get_accessor_name(): o
for o in chain(related_objs, related_m2m_objs)
}
if field_name in related_objects:
return related_objects[field_name]
else:
# check virtual fields (1.7)
if hasattr(meta, 'virtual_fields'):
for field in meta.virtual_fields:
if field.name == field_name:
return field
raise AttributeError(
'%s is not a valid field for %s' % (field_name, model)
)
| 353,840
|
Check whether a given model field is a remote field.
A remote field is the inverse of a one-to-many or a
many-to-many relationship.
Arguments:
model: a Django model
field_name: the name of a field
Returns:
True if `field_name` is a remote field, False otherwise.
|
def is_field_remote(model, field_name):
if not hasattr(model, '_meta'):
# ephemeral model with no metaclass
return False
model_field = get_model_field(model, field_name)
return isinstance(model_field, (ManyToManyField, RelatedObject))
| 353,842
|
Parses the request for a particular feature.
Arguments:
name: A feature name.
Returns:
A feature parsed from the URL if the feature is supported, or None.
|
def get_request_feature(self, name):
if '[]' in name:
# array-type
return self.request.query_params.getlist(
name) if name in self.features else None
elif '{}' in name:
# object-type (keys are not consistent)
return self._extract_object_params(
name) if name in self.features else {}
else:
# single-type
return self.request.query_params.get(
name) if name in self.features else None
| 353,905
|
Returns a queryset for this request.
Arguments:
queryset: Optional root-level queryset.
|
def get_queryset(self, queryset=None):
serializer = self.get_serializer()
return getattr(self, 'queryset', serializer.Meta.model.objects.all())
| 353,907
|
Modified to_representation with optimizations.
1) Returns a plain old dict as opposed to OrderedDict.
(Constructing ordered dict is ~100x slower than `{}`.)
2) Ensure we use a cached list of fields
(this optimization exists in DRF 3.2 but not 3.1)
Arguments:
instance: a model instance or data object
Returns:
Dict of primitive datatypes.
|
def _faster_to_representation(self, instance):
ret = {}
fields = self._readable_fields
is_fast = isinstance(instance, prefetch.FastObject)
id_fields = self._readable_id_fields
for field in fields:
attribute = None
# we exclude dynamic fields here because the proper fastquery
# dereferencing happens in the `get_attribute` method now
if (
is_fast and
not isinstance(
field,
(DynamicGenericRelationField, DynamicRelationField)
)
):
if field in id_fields and field.source not in instance:
# TODO - make better.
attribute = instance.get(field.source + '_id')
ret[field.field_name] = attribute
continue
else:
try:
attribute = instance[field.source]
except KeyError:
# slower, but does more stuff
# Also, some temp debugging
if hasattr(instance, field.source):
attribute = getattr(instance, field.source)
else:
# Fall back on DRF behavior
attribute = field.get_attribute(instance)
print(
'Missing %s from %s' % (
field.field_name,
self.__class__.__name__
)
)
else:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
ret[field.field_name] = None
else:
ret[field.field_name] = field.to_representation(attribute)
return ret
| 353,948
|
Modified to_representation method. Optionally may cache objects.
Arguments:
instance: A model instance or data object.
Returns:
Instance ID if the serializer is meant to represent its ID.
Otherwise, a tagged data dict representation.
|
def to_representation(self, instance):
if self.id_only():
return instance.pk
pk = getattr(instance, 'pk', None)
if not settings.ENABLE_SERIALIZER_OBJECT_CACHE or pk is None:
return self._to_representation(instance)
else:
if pk not in self.obj_cache:
self.obj_cache[pk] = self._to_representation(instance)
return self.obj_cache[pk]
| 353,950
|
Provides post processing. Sub-classes should implement their own
to_representation method, but pass the resulting dict through
this function to get tagging and field selection.
Arguments:
instance: Serialized dict, or object. If object,
it will be serialized by the super class's
to_representation() method.
|
def to_representation(self, instance):
if not isinstance(instance, dict):
data = super(
DynamicEphemeralSerializer,
self
).to_representation(instance)
else:
data = instance
instance = EphemeralObject(data)
if self.id_only():
return data
else:
return tag_dict(data, serializer=self, instance=instance)
| 353,955
|
Register a viewset that should be considered the canonical
endpoint for a particular resource. In addition to generating
and registering the route, it adds the route in a reverse map
to allow DREST to build the canonical URL for a given resource.
Arguments:
viewset - viewset class, should have `serializer_class` attr.
namespace - (optional) URL namespace, e.g. 'v3'.
|
def register_resource(self, viewset, namespace=None):
# Try to extract resource name from viewset.
try:
serializer = viewset.serializer_class()
resource_key = serializer.get_resource_key()
resource_name = serializer.get_name()
path_name = serializer.get_plural_name()
except:
import traceback
traceback.print_exc()
raise Exception(
"Failed to extract resource name from viewset: '%s'."
" It, or its serializer, may not be DREST-compatible." % (
viewset
)
)
# Construct canonical path and register it.
if namespace:
namespace = namespace.rstrip('/') + '/'
base_path = namespace or ''
base_path = r'%s' % base_path + path_name
self.register(base_path, viewset)
# Make sure resource isn't already registered.
if resource_key in resource_map:
raise Exception(
"The resource '%s' has already been mapped to '%s'."
" Each resource can only be mapped to one canonical"
" path. " % (
resource_key,
resource_map[resource_key]['path']
)
)
# Register resource in reverse map.
resource_map[resource_key] = {
'path': base_path,
'viewset': viewset
}
# Make sure the resource name isn't registered, either
# TODO: Think of a better way to clean this up, there's a lot of
# duplicated effort here, between `resource_name` and `resource_key`
# This resource name -> key mapping is currently only used by
# the DynamicGenericRelationField
if resource_name in resource_name_map:
resource_key = resource_name_map[resource_name]
raise Exception(
"The resource name '%s' has already been mapped to '%s'."
" A resource name can only be used once." % (
resource_name,
resource_map[resource_key]['path']
)
)
# map the resource name to the resource key for easier lookup
resource_name_map[resource_name] = resource_key
| 353,961
|
Return canonical resource path.
Arguments:
resource_key - Canonical resource key
i.e. Serializer.get_resource_key().
pk - (Optional) Object's primary key for a single-resource URL.
Returns: Absolute URL as string.
|
def get_canonical_path(resource_key, pk=None):
if resource_key not in resource_map:
# Note: Maybe raise?
return None
base_path = get_script_prefix() + resource_map[resource_key]['path']
if pk:
return '%s/%s/' % (base_path, pk)
else:
return base_path
| 353,962
|
Return canonical serializer for a given resource name.
Arguments:
resource_key - Resource key, usually DB table for model-based
resources, otherwise the plural name.
model - (Optional) Model class to look up by.
instance - (Optional) Model object instance.
Returns: serializer class
|
def get_canonical_serializer(
resource_key,
model=None,
instance=None,
resource_name=None
):
if model:
resource_key = get_model_table(model)
elif instance:
resource_key = instance._meta.db_table
elif resource_name:
resource_key = resource_name_map[resource_name]
if resource_key not in resource_map:
return None
return resource_map[resource_key]['viewset'].serializer_class
| 353,963
|
Get the key that can be passed to Django's filter method.
To account for serialier field name rewrites, this method
translates serializer field names to model field names
by inspecting `serializer`.
For example, a query like `filter{users.events}` would be
returned as `users__events`.
Arguments:
serializer: A DRF serializer
Returns:
A filter key.
|
def generate_query_key(self, serializer):
rewritten = []
last = len(self.field) - 1
s = serializer
field = None
for i, field_name in enumerate(self.field):
# Note: .fields can be empty for related serializers that aren't
# sideloaded. Fields that are deferred also won't be present.
# If field name isn't in serializer.fields, get full list from
# get_all_fields() method. This is somewhat expensive, so only do
# this if we have to.
fields = s.fields
if field_name not in fields:
fields = getattr(s, 'get_all_fields', lambda: {})()
if field_name == 'pk':
rewritten.append('pk')
continue
if field_name not in fields:
raise ValidationError(
"Invalid filter field: %s" % field_name
)
field = fields[field_name]
# For remote fields, strip off '_set' for filtering. This is a
# weird Django inconsistency.
model_field_name = field.source or field_name
model_field = get_model_field(s.get_model(), model_field_name)
if isinstance(model_field, RelatedObject):
model_field_name = model_field.field.related_query_name()
# If get_all_fields() was used above, field could be unbound,
# and field.source would be None
rewritten.append(model_field_name)
if i == last:
break
# Recurse into nested field
s = getattr(field, 'serializer', None)
if isinstance(s, serializers.ListSerializer):
s = s.child
if not s:
raise ValidationError(
"Invalid nested filter field: %s" % field_name
)
if self.operator:
rewritten.append(self.operator)
return ('__'.join(rewritten), field)
| 353,973
|
Stringifies input dict as toml
Args:
o: Object to dump into toml
preserve: Boolean parameter. If true, preserve inline tables.
Returns:
String containing the toml corresponding to dict
|
def dumps(o, encoder=None):
retval = ""
if encoder is None:
encoder = TomlEncoder(o.__class__)
addtoretval, sections = encoder.dump_sections(o, "")
retval += addtoretval
outer_objs = [id(o)]
while sections:
section_ids = [id(section) for section in sections]
for outer_obj in outer_objs:
if outer_obj in section_ids:
raise ValueError("Circular reference detected")
outer_objs += section_ids
newsections = encoder.get_empty_table()
for section in sections:
addtoretval, addtosections = encoder.dump_sections(
sections[section], section)
if addtoretval or (not addtoretval and not addtosections):
if retval and retval[-2:] != "\n\n":
retval += "\n"
retval += "[" + section + "]\n"
if addtoretval:
retval += addtoretval
for s in addtosections:
newsections[section + "." + s] = addtosections[s]
sections = newsections
return retval
| 353,992
|
This function returns the sum of the squared error
Parameters:
two arrays constrained to 0..1
Returns:
sum of the squared error between the histograms
|
def histogram_distance(arr1, arr2, bins=None):
eps = 1e-6
assert arr1.min() > 0 - eps
assert arr1.max() < 1 + eps
assert arr2.min() > 0 - eps
assert arr2.max() < 1 + eps
if not bins:
bins = [x / 10 for x in range(11)]
hist1 = np.histogram(arr1, bins=bins)[0] / arr1.size
hist2 = np.histogram(arr2, bins=bins)[0] / arr2.size
assert abs(hist1.sum() - 1.0) < eps
assert abs(hist2.sum() - 1.0) < eps
sqerr = (hist1 - hist2) ** 2
return sqerr.sum()
| 354,040
|
Validates that the 'iss' claim is valid.
The "iss" (issuer) claim identifies the principal that issued the
JWT. The processing of this claim is generally application specific.
The "iss" value is a case-sensitive string containing a StringOrURI
value. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
issuer (str or iterable): Acceptable value(s) for the issuer that
signed the token.
|
def _validate_iss(claims, issuer=None):
if issuer is not None:
if isinstance(issuer, string_types):
issuer = (issuer,)
if claims.get('iss') not in issuer:
raise JWTClaimsError('Invalid issuer')
| 354,611
|
Helper method to base64url_decode a string.
Args:
input (str): A base64url_encoded string to decode.
|
def base64url_decode(input):
rem = len(input) % 4
if rem > 0:
input += b'=' * (4 - rem)
return base64.urlsafe_b64decode(input)
| 354,627
|
Helper for comparing string in constant time, independent
of the python version being used.
Args:
a (str): A string to compare
b (str): A string to compare
|
def constant_time_string_compare(a, b):
try:
return hmac.compare_digest(a, b)
except AttributeError:
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
| 354,628
|
Generates the subset of 'characters' that can be encoded by 'encodings'.
Args:
characters: The characters to check for encodeability e.g. 'abcd'.
encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5'].
Returns:
The subset of 'characters' that can be encoded using one of the provided
encodings.
|
def generate_encodeable_characters(characters: Iterable[str],
encodings: Iterable[str]) -> Iterable[str]:
for c in characters:
for encoding in encodings:
try:
c.encode(encoding)
yield c
except UnicodeEncodeError:
pass
| 354,836
|
Return a mapping between each given character and its length.
Args:
measurer: The TextMeasurer used to measure the width of the text in
pixels.
characters: The characters to measure e.g. "ml".
Returns:
A mapping from the given characters to their length in pixels, as
determined by 'measurer' e.g. {'m': 5.2, 'l', 1.2}.
|
def calculate_character_to_length_mapping(
measurer: text_measurer.TextMeasurer,
characters: Iterable[str]) -> Mapping[str, float]:
char_to_length = {}
for c in characters:
char_to_length[c] = measurer.text_width(c)
return char_to_length
| 354,837
|
A container for a SAX SVG light tree objects document.
This class provides functions for extracting SVG data into Path objects.
Args:
filename (str): The filename of the SVG file
|
def __init__(self, filename):
self.root_values = {}
self.tree = []
# remember location of original svg file
if filename is not None and os.path.dirname(filename) == '':
self.original_filename = os.path.join(os.getcwd(), filename)
else:
self.original_filename = filename
if filename is not None:
self.sax_parse(filename)
| 356,279
|
Returns the apriori candidates as a list.
Arguments:
prev_candidates -- Previous candidates as a list.
length -- The lengths of the next candidates.
|
def create_next_candidates(prev_candidates, length):
# Solve the items.
item_set = set()
for candidate in prev_candidates:
for item in candidate:
item_set.add(item)
items = sorted(item_set)
# Create the temporary candidates. These will be filtered below.
tmp_next_candidates = (frozenset(x) for x in combinations(items, length))
# Return all the candidates if the length of the next candidates is 2
# because their subsets are the same as items.
if length < 3:
return list(tmp_next_candidates)
# Filter candidates that all of their subsets are
# in the previous candidates.
next_candidates = [
candidate for candidate in tmp_next_candidates
if all(
True if frozenset(x) in prev_candidates else False
for x in combinations(candidate, length - 1))
]
return next_candidates
| 356,535
|
Returns a generator of support records with given transactions.
Arguments:
transaction_manager -- Transactions as a TransactionManager instance.
min_support -- A minimum support (float).
Keyword arguments:
max_length -- The maximum length of relations (integer).
|
def gen_support_records(transaction_manager, min_support, **kwargs):
# Parse arguments.
max_length = kwargs.get('max_length')
# For testing.
_create_next_candidates = kwargs.get(
'_create_next_candidates', create_next_candidates)
# Process.
candidates = transaction_manager.initial_candidates()
length = 1
while candidates:
relations = set()
for relation_candidate in candidates:
support = transaction_manager.calc_support(relation_candidate)
if support < min_support:
continue
candidate_set = frozenset(relation_candidate)
relations.add(candidate_set)
yield SupportRecord(candidate_set, support)
length += 1
if max_length and length > max_length:
break
candidates = _create_next_candidates(relations, length)
| 356,536
|
Returns a generator of ordered statistics as OrderedStatistic instances.
Arguments:
transaction_manager -- Transactions as a TransactionManager instance.
record -- A support record as a SupportRecord instance.
|
def gen_ordered_statistics(transaction_manager, record):
items = record.items
for combination_set in combinations(sorted(items), len(items) - 1):
items_base = frozenset(combination_set)
items_add = frozenset(items.difference(items_base))
confidence = (
record.support / transaction_manager.calc_support(items_base))
lift = confidence / transaction_manager.calc_support(items_add)
yield OrderedStatistic(
frozenset(items_base), frozenset(items_add), confidence, lift)
| 356,537
|
Filter OrderedStatistic objects.
Arguments:
ordered_statistics -- A OrderedStatistic iterable object.
Keyword arguments:
min_confidence -- The minimum confidence of relations (float).
min_lift -- The minimum lift of relations (float).
|
def filter_ordered_statistics(ordered_statistics, **kwargs):
min_confidence = kwargs.get('min_confidence', 0.0)
min_lift = kwargs.get('min_lift', 0.0)
for ordered_statistic in ordered_statistics:
if ordered_statistic.confidence < min_confidence:
continue
if ordered_statistic.lift < min_lift:
continue
yield ordered_statistic
| 356,538
|
Executes Apriori algorithm and returns a RelationRecord generator.
Arguments:
transactions -- A transaction iterable object
(eg. [['A', 'B'], ['B', 'C']]).
Keyword arguments:
min_support -- The minimum support of relations (float).
min_confidence -- The minimum confidence of relations (float).
min_lift -- The minimum lift of relations (float).
max_length -- The maximum length of the relation (integer).
|
def apriori(transactions, **kwargs):
# Parse the arguments.
min_support = kwargs.get('min_support', 0.1)
min_confidence = kwargs.get('min_confidence', 0.0)
min_lift = kwargs.get('min_lift', 0.0)
max_length = kwargs.get('max_length', None)
# Check arguments.
if min_support <= 0:
raise ValueError('minimum support must be > 0')
# For testing.
_gen_support_records = kwargs.get(
'_gen_support_records', gen_support_records)
_gen_ordered_statistics = kwargs.get(
'_gen_ordered_statistics', gen_ordered_statistics)
_filter_ordered_statistics = kwargs.get(
'_filter_ordered_statistics', filter_ordered_statistics)
# Calculate supports.
transaction_manager = TransactionManager.create(transactions)
support_records = _gen_support_records(
transaction_manager, min_support, max_length=max_length)
# Calculate ordered stats.
for support_record in support_records:
ordered_statistics = list(
_filter_ordered_statistics(
_gen_ordered_statistics(transaction_manager, support_record),
min_confidence=min_confidence,
min_lift=min_lift,
)
)
if not ordered_statistics:
continue
yield RelationRecord(
support_record.items, support_record.support, ordered_statistics)
| 356,539
|
Parse commandline arguments.
Arguments:
argv -- An argument list without the program name.
|
def parse_args(argv):
output_funcs = {
'json': dump_as_json,
'tsv': dump_as_two_item_tsv,
}
default_output_func_key = 'json'
parser = argparse.ArgumentParser()
parser.add_argument(
'-v', '--version', action='version',
version='%(prog)s {0}'.format(__version__))
parser.add_argument(
'input', metavar='inpath', nargs='*',
help='Input transaction file (default: stdin).',
type=argparse.FileType('r'), default=[sys.stdin])
parser.add_argument(
'-o', '--output', metavar='outpath',
help='Output file (default: stdout).',
type=argparse.FileType('w'), default=sys.stdout)
parser.add_argument(
'-l', '--max-length', metavar='int',
help='Max length of relations (default: infinite).',
type=int, default=None)
parser.add_argument(
'-s', '--min-support', metavar='float',
help='Minimum support ratio (must be > 0, default: 0.1).',
type=float, default=0.1)
parser.add_argument(
'-c', '--min-confidence', metavar='float',
help='Minimum confidence (default: 0.5).',
type=float, default=0.5)
parser.add_argument(
'-t', '--min-lift', metavar='float',
help='Minimum lift (default: 0.0).',
type=float, default=0.0)
parser.add_argument(
'-d', '--delimiter', metavar='str',
help='Delimiter for items of transactions (default: tab).',
type=str, default='\t')
parser.add_argument(
'-f', '--out-format', metavar='str',
help='Output format ({0}; default: {1}).'.format(
', '.join(output_funcs.keys()), default_output_func_key),
type=str, choices=output_funcs.keys(), default=default_output_func_key)
args = parser.parse_args(argv)
args.output_func = output_funcs[args.out_format]
return args
| 356,540
|
Load transactions and returns a generator for transactions.
Arguments:
input_file -- An input file.
Keyword arguments:
delimiter -- The delimiter of the transaction.
|
def load_transactions(input_file, **kwargs):
delimiter = kwargs.get('delimiter', '\t')
for transaction in csv.reader(input_file, delimiter=delimiter):
yield transaction if transaction else ['']
| 356,541
|
Dump an relation record as a json value.
Arguments:
record -- A RelationRecord instance to dump.
output_file -- A file to output.
|
def dump_as_json(record, output_file):
def default_func(value):
if isinstance(value, frozenset):
return sorted(value)
raise TypeError(repr(value) + " is not JSON serializable")
converted_record = record._replace(
ordered_statistics=[x._asdict() for x in record.ordered_statistics])
json.dump(
converted_record._asdict(), output_file,
default=default_func, ensure_ascii=False)
output_file.write(os.linesep)
| 356,542
|
Dump a relation record as TSV only for 2 item relations.
Arguments:
record -- A RelationRecord instance to dump.
output_file -- A file to output.
|
def dump_as_two_item_tsv(record, output_file):
for ordered_stats in record.ordered_statistics:
if len(ordered_stats.items_base) != 1:
continue
if len(ordered_stats.items_add) != 1:
continue
output_file.write('{0}\t{1}\t{2:.8f}\t{3:.8f}\t{4:.8f}{5}'.format(
list(ordered_stats.items_base)[0], list(ordered_stats.items_add)[0],
record.support, ordered_stats.confidence, ordered_stats.lift,
os.linesep))
| 356,543
|
Initialize.
Arguments:
transactions -- A transaction iterable object
(eg. [['A', 'B'], ['B', 'C']]).
|
def __init__(self, transactions):
self.__num_transaction = 0
self.__items = []
self.__transaction_index_map = {}
for transaction in transactions:
self.add_transaction(transaction)
| 356,545
|
Add a transaction.
Arguments:
transaction -- A transaction as an iterable object (eg. ['A', 'B']).
|
def add_transaction(self, transaction):
for item in transaction:
if item not in self.__transaction_index_map:
self.__items.append(item)
self.__transaction_index_map[item] = set()
self.__transaction_index_map[item].add(self.__num_transaction)
self.__num_transaction += 1
| 356,546
|
Returns a support for items.
Arguments:
items -- Items as an iterable object (eg. ['A', 'B']).
|
def calc_support(self, items):
# Empty items is supported by all transactions.
if not items:
return 1.0
# Empty transactions supports no items.
if not self.num_transaction:
return 0.0
# Create the transaction index intersection.
sum_indexes = None
for item in items:
indexes = self.__transaction_index_map.get(item)
if indexes is None:
# No support for any set that contains a not existing item.
return 0.0
if sum_indexes is None:
# Assign the indexes on the first time.
sum_indexes = indexes
else:
# Calculate the intersection on not the first time.
sum_indexes = sum_indexes.intersection(indexes)
# Calculate and return the support.
return float(len(sum_indexes)) / self.__num_transaction
| 356,547
|
Convert PIL image to numpy grayscale array and numpy alpha array.
Args:
img (PIL.Image): PIL Image object.
Returns:
(gray, alpha): both numpy arrays.
|
def to_grayscale(img):
gray = numpy.asarray(ImageOps.grayscale(img)).astype(numpy.float)
imbands = img.getbands()
alpha = None
if 'A' in imbands:
alpha = numpy.asarray(img.split()[-1]).astype(numpy.float)
return gray, alpha
| 356,830
|
Create an SSIMImage.
Args:
img (str or PIL.Image): PIL Image object or file name.
gaussian_kernel_1d (np.ndarray, optional): Gaussian kernel
that was generated with utils.get_gaussian_kernel is used
to precompute common objects for SSIM computation
size (tuple, optional): New image size to resize image to.
|
def __init__(self, img, gaussian_kernel_1d=None, size=None):
# Use existing or create a new PIL.Image
self.img = img if not isinstance(img, compat.basestring) \
else compat.Image.open(img)
# Resize image if size is defined and different
# from original image
if size and size != self.img.size:
self.img = self.img.resize(size, Image.ANTIALIAS)
# Set the size of the image
self.size = self.img.size
# If gaussian kernel is defined we create
# common SSIM objects
if gaussian_kernel_1d is not None:
self.gaussian_kernel_1d = gaussian_kernel_1d
# np.array of grayscale and alpha image
self.img_gray, self.img_alpha = to_grayscale(self.img)
if self.img_alpha is not None:
self.img_gray[self.img_alpha == 255] = 0
# Squared grayscale
self.img_gray_squared = self.img_gray ** 2
# Convolve grayscale image with gaussian
self.img_gray_mu = convolve_gaussian_2d(
self.img_gray, self.gaussian_kernel_1d)
# Squared mu
self.img_gray_mu_squared = self.img_gray_mu ** 2
# Convolve squared grayscale with gaussian
self.img_gray_sigma_squared = convolve_gaussian_2d(
self.img_gray_squared, self.gaussian_kernel_1d)
# Substract squared mu
self.img_gray_sigma_squared -= self.img_gray_mu_squared
# If we don't define gaussian kernel, we create
# common CW-SSIM objects
else:
# Grayscale PIL.Image
self.img_gray = ImageOps.grayscale(self.img)
| 356,832
|
Compute the SSIM value from the reference image to the target image.
Args:
target (str or PIL.Image): Input image to compare the reference image
to. This may be a PIL Image object or, to save time, an SSIMImage
object (e.g. the img member of another SSIM object).
Returns:
Computed SSIM float value.
|
def ssim_value(self, target):
# Performance boost if handed a compatible SSIMImage object.
if not isinstance(target, SSIMImage) \
or not np.array_equal(self.gaussian_kernel_1d,
target.gaussian_kernel_1d):
target = SSIMImage(target, self.gaussian_kernel_1d, self.img.size)
img_mat_12 = self.img.img_gray * target.img_gray
img_mat_sigma_12 = convolve_gaussian_2d(
img_mat_12, self.gaussian_kernel_1d)
img_mat_mu_12 = self.img.img_gray_mu * target.img_gray_mu
img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12
# Numerator of SSIM
num_ssim = ((2 * img_mat_mu_12 + self.c_1) *
(2 * img_mat_sigma_12 + self.c_2))
# Denominator of SSIM
den_ssim = (
(self.img.img_gray_mu_squared + target.img_gray_mu_squared +
self.c_1) *
(self.img.img_gray_sigma_squared +
target.img_gray_sigma_squared + self.c_2))
ssim_map = num_ssim / den_ssim
index = np.average(ssim_map)
return index
| 356,834
|
Compute the complex wavelet SSIM (CW-SSIM) value from the reference
image to the target image.
Args:
target (str or PIL.Image): Input image to compare the reference image
to. This may be a PIL Image object or, to save time, an SSIMImage
object (e.g. the img member of another SSIM object).
width: width for the wavelet convolution (default: 30)
Returns:
Computed CW-SSIM float value.
|
def cw_ssim_value(self, target, width=30):
if not isinstance(target, SSIMImage):
target = SSIMImage(target, size=self.img.size)
# Define a width for the wavelet convolution
widths = np.arange(1, width+1)
# Use the image data as arrays
sig1 = np.asarray(self.img.img_gray.getdata())
sig2 = np.asarray(target.img_gray.getdata())
# Convolution
cwtmatr1 = signal.cwt(sig1, signal.ricker, widths)
cwtmatr2 = signal.cwt(sig2, signal.ricker, widths)
# Compute the first term
c1c2 = np.multiply(abs(cwtmatr1), abs(cwtmatr2))
c1_2 = np.square(abs(cwtmatr1))
c2_2 = np.square(abs(cwtmatr2))
num_ssim_1 = 2 * np.sum(c1c2, axis=0) + self.k
den_ssim_1 = np.sum(c1_2, axis=0) + np.sum(c2_2, axis=0) + self.k
# Compute the second term
c1c2_conj = np.multiply(cwtmatr1, np.conjugate(cwtmatr2))
num_ssim_2 = 2 * np.abs(np.sum(c1c2_conj, axis=0)) + self.k
den_ssim_2 = 2 * np.sum(np.abs(c1c2_conj), axis=0) + self.k
# Construct the result
ssim_map = (num_ssim_1 / den_ssim_1) * (num_ssim_2 / den_ssim_2)
# Average the per pixel results
index = np.average(ssim_map)
return index
| 356,835
|
Computes SSIM.
Args:
im1: First PIL Image object to compare.
im2: Second PIL Image object to compare.
Returns:
SSIM float value.
|
def compute_ssim(image1, image2, gaussian_kernel_sigma=1.5,
gaussian_kernel_width=11):
gaussian_kernel_1d = get_gaussian_kernel(
gaussian_kernel_width, gaussian_kernel_sigma)
return SSIM(image1, gaussian_kernel_1d).ssim_value(image2)
| 356,836
|
Sort blocks by Logical Erase Block number.
Arguments:
List:blocks -- List of block objects to sort.
Returns:
List -- Indexes of blocks sorted by LEB.
|
def by_leb(blocks):
slist_len = len(blocks)
slist = ['x'] * slist_len
for block in blocks:
if blocks[block].leb_num >= slist_len:
add_elements = blocks[block].leb_num - slist_len + 1
slist += (['x'] * add_elements)
slist_len = len(slist)
slist[blocks[block].leb_num] = block
return slist
| 360,057
|
Sort blocks by volume id
Arguments:
Obj:blocks -- List of block objects.
List:slist -- (optional) List of block indexes.
Return:
Dict -- blocks grouped in lists with dict key as volume id.
|
def by_vol_id(blocks, slist=None):
vol_blocks = {}
# sort block by volume
# not reliable with multiple partitions (fifo)
for i in blocks:
if slist and i not in slist:
continue
elif not blocks[i].is_valid:
continue
if blocks[i].vid_hdr.vol_id not in vol_blocks:
vol_blocks[blocks[i].vid_hdr.vol_id] = []
vol_blocks[blocks[i].vid_hdr.vol_id].append(blocks[i].peb_num)
return vol_blocks
| 360,058
|
Extract UBIFS contents to_path/
Arguments:
Obj:ubifs -- UBIFS object.
Str:out_path -- Path to extract contents to.
|
def extract_files(ubifs, out_path, perms=False):
try:
inodes = {}
bad_blocks = []
walk.index(ubifs, ubifs.master_node.root_lnum, ubifs.master_node.root_offs, inodes, bad_blocks)
if len(inodes) < 2:
raise Exception('No inodes found')
for dent in inodes[1]['dent']:
extract_dents(ubifs, inodes, dent, out_path, perms)
if len(bad_blocks):
error(extract_files, 'Warning', 'Data may be missing or corrupted, bad blocks, LEB [%s]' % ','.join(map(str, bad_blocks)))
except Exception as e:
error(extract_files, 'Error', '%s' % e)
| 360,060
|
Filter out old layout blocks from list
Arguments:
List:blocks -- List of block objects
List:layout_blocks -- List of layout block indexes
Returns:
List -- Newest layout blocks in list
|
def get_newest(blocks, layout_blocks):
layout_temp = list(layout_blocks)
for i in range(0, len(layout_temp)):
for k in range(0, len(layout_blocks)):
if blocks[layout_temp[i]].ec_hdr.image_seq != blocks[layout_blocks[k]].ec_hdr.image_seq:
continue
if blocks[layout_temp[i]].leb_num != blocks[layout_blocks[k]].leb_num:
continue
if blocks[layout_temp[i]].vid_hdr.sqnum > blocks[layout_blocks[k]].vid_hdr.sqnum:
del layout_blocks[k]
break
return layout_blocks
| 360,077
|
Sort a list of layout blocks into pairs
Arguments:
List:blocks -- List of block objects
List:layout_blocks -- List of layout block indexes
Returns:
List -- Layout block pair indexes grouped in a list
|
def group_pairs(blocks, layout_blocks_list):
image_dict={}
for block_id in layout_blocks_list:
image_seq=blocks[block_id].ec_hdr.image_seq
if image_seq not in image_dict:
image_dict[image_seq]=[block_id]
else:
image_dict[image_seq].append(block_id)
log(group_pairs, 'Layout blocks found at PEBs: %s' % list(image_dict.values()))
return list(image_dict.values())
| 360,078
|
Group block indexes with appropriate layout pairs
Arguments:
List:blocks -- List of block objects
List:layout_pairs -- List of grouped layout blocks
Int:start_peb_num -- Number of the PEB to start from.
Returns:
List -- Layout block pairs grouped with associated block ranges.
|
def associate_blocks(blocks, layout_pairs, start_peb_num):
seq_blocks = []
for layout_pair in layout_pairs:
seq_blocks = sort.by_image_seq(blocks, blocks[layout_pair[0]].ec_hdr.image_seq)
layout_pair.append(seq_blocks)
return layout_pairs
| 360,079
|
Get a list of UBI volume objects from list of blocks
Arguments:
List:blocks -- List of layout block objects
List:layout_info -- Layout info (indexes of layout blocks and
associated data blocks.)
Returns:
Dict -- Of Volume objects by volume name, including any
relevant blocks.
|
def get_volumes(blocks, layout_info):
volumes = {}
vol_blocks_lists = sort.by_vol_id(blocks, layout_info[2])
for vol_rec in blocks[layout_info[0]].vtbl_recs:
vol_name = vol_rec.name.strip(b'\x00').decode('utf-8')
if vol_rec.rec_index not in vol_blocks_lists:
vol_blocks_lists[vol_rec.rec_index] = []
volumes[vol_name] = description(vol_rec.rec_index, vol_rec, vol_blocks_lists[vol_rec.rec_index])
return volumes
| 360,088
|
Parse node key
Arguments:
Str:key -- Hex string literal of node key.
Returns:
Int:key_type -- Type of key, data, ino, dent, etc.
Int:ino_num -- Inode number.
Int:khash -- Key hash.
|
def parse_key(key):
hkey, lkey = struct.unpack('<II',key[0:UBIFS_SK_LEN])
ino_num = hkey & UBIFS_S_KEY_HASH_MASK
key_type = lkey >> UBIFS_S_KEY_BLOCK_BITS
khash = lkey
#if key_type < UBIFS_KEY_TYPES_CNT:
return {'type':key_type, 'ino_num':ino_num, 'khash': khash}
| 360,091
|
Decompress data.
Arguments:
Int:ctype -- Compression type LZO, ZLIB (*currently unused*).
Int:unc_len -- Uncompressed data lenth.
Str:data -- Data to be uncompessed.
Returns:
Uncompressed Data.
|
def decompress(ctype, unc_len, data):
if ctype == UBIFS_COMPR_LZO:
try:
return lzo.decompress(b''.join((b'\xf0', struct.pack('>I', unc_len), data)))
except Exception as e:
error(decompress, 'Warn', 'LZO Error: %s' % e)
elif ctype == UBIFS_COMPR_ZLIB:
try:
return zlib.decompress(data, -11)
except Exception as e:
error(decompress, 'Warn', 'ZLib Error: %s' % e)
else:
return data
| 360,092
|
Get LEB size from superblock
Arguments:
Str:path -- Path to file.
Returns:
Int -- LEB size.
Searches file for superblock and retrieves leb size.
|
def guess_leb_size(path):
f = open(path, 'rb')
f.seek(0,2)
file_size = f.tell()+1
f.seek(0)
block_size = None
for _ in range(0, file_size, FILE_CHUNK_SZ):
buf = f.read(FILE_CHUNK_SZ)
for m in re.finditer(UBIFS_NODE_MAGIC, buf):
start = m.start()
chdr = nodes.common_hdr(buf[start:start+UBIFS_COMMON_HDR_SZ])
if chdr and chdr.node_type == UBIFS_SB_NODE:
sb_start = start + UBIFS_COMMON_HDR_SZ
sb_end = sb_start + UBIFS_SB_NODE_SZ
if chdr.len != len(buf[sb_start:sb_end]):
f.seek(sb_start)
buf = f.read(UBIFS_SB_NODE_SZ)
else:
buf = buf[sb_start:sb_end]
sbn = nodes.sb_node(buf)
block_size = sbn.leb_size
f.close()
return block_size
f.close()
return block_size
| 360,104
|
Determine the most likely block size
Arguments:
Str:path -- Path to file.
Returns:
Int -- PEB size.
Searches file for Magic Number, picks most
common length between them.
|
def guess_peb_size(path):
file_offset = 0
offsets = []
f = open(path, 'rb')
f.seek(0,2)
file_size = f.tell()+1
f.seek(0)
for _ in range(0, file_size, FILE_CHUNK_SZ):
buf = f.read(FILE_CHUNK_SZ)
for m in re.finditer(UBI_EC_HDR_MAGIC, buf):
start = m.start()
if not file_offset:
file_offset = start
idx = start
else:
idx = start+file_offset
offsets.append(idx)
file_offset += FILE_CHUNK_SZ
f.close()
occurances = {}
for i in range(0, len(offsets)):
try:
diff = offsets[i] - offsets[i-1]
except:
diff = offsets[i]
if diff not in occurances:
occurances[diff] = 0
occurances[diff] += 1
most_frequent = 0
block_size = None
for offset in occurances:
if occurances[offset] > most_frequent:
most_frequent = occurances[offset]
block_size = offset
return block_size
| 360,105
|
Parse the image upload response to obtain status.
Args:
res: http_utils.FetchResponse instance, the upload response
Returns:
dict, sessionStatus of the response
Raises:
hangups.NetworkError: If the upload request failed.
|
def _get_upload_session_status(res):
response = json.loads(res.body.decode())
if 'sessionStatus' not in response:
try:
info = (
response['errorMessage']['additionalInfo']
['uploader_service.GoogleRupioAdditionalInfo']
['completionInfo']['customerSpecificInfo']
)
reason = '{} : {}'.format(info['status'], info['message'])
except KeyError:
reason = 'unknown reason'
raise exceptions.NetworkError('image upload failed: {}'.format(
reason
))
return response['sessionStatus']
| 360,378
|
Send a Protocol Buffer formatted chat API request.
Args:
endpoint (str): The chat API endpoint to use.
request_pb: The request body as a Protocol Buffer message.
response_pb: The response body as a Protocol Buffer message.
Raises:
NetworkError: If the request fails.
|
async def _pb_request(self, endpoint, request_pb, response_pb):
logger.debug('Sending Protocol Buffer request %s:\n%s', endpoint,
request_pb)
res = await self._base_request(
'https://clients6.google.com/chat/v1/{}'.format(endpoint),
'application/x-protobuf', # Request body is Protocol Buffer.
'proto', # Response body is Protocol Buffer.
request_pb.SerializeToString()
)
try:
response_pb.ParseFromString(base64.b64decode(res.body))
except binascii.Error as e:
raise exceptions.NetworkError(
'Failed to decode base64 response: {}'.format(e)
)
except google.protobuf.message.DecodeError as e:
raise exceptions.NetworkError(
'Failed to decode Protocol Buffer response: {}'.format(e)
)
logger.debug('Received Protocol Buffer response:\n%s', response_pb)
status = response_pb.response_header.status
if status != hangouts_pb2.RESPONSE_STATUS_OK:
description = response_pb.response_header.error_description
raise exceptions.NetworkError(
'Request failed with status {}: \'{}\''
.format(status, description)
)
| 360,381
|
Build :class:`.UserList` and :class:`.ConversationList`.
This method requests data necessary to build the list of conversations and
users. Users that are not in the contact list but are participating in a
conversation will also be retrieved.
Args:
client (Client): Connected client.
Returns:
(:class:`.UserList`, :class:`.ConversationList`):
Tuple of built objects.
|
async def build_user_conversation_list(client):
conv_states, sync_timestamp = await _sync_all_conversations(client)
# Retrieve entities participating in all conversations.
required_user_ids = set()
for conv_state in conv_states:
required_user_ids |= {
user.UserID(chat_id=part.id.chat_id, gaia_id=part.id.gaia_id)
for part in conv_state.conversation.participant_data
}
required_entities = []
if required_user_ids:
logger.debug('Need to request additional users: {}'
.format(required_user_ids))
try:
response = await client.get_entity_by_id(
hangouts_pb2.GetEntityByIdRequest(
request_header=client.get_request_header(),
batch_lookup_spec=[
hangouts_pb2.EntityLookupSpec(
gaia_id=user_id.gaia_id,
create_offnetwork_gaia=True,
)
for user_id in required_user_ids
],
)
)
for entity_result in response.entity_result:
required_entities.extend(entity_result.entity)
except exceptions.NetworkError as e:
logger.warning('Failed to request missing users: {}'.format(e))
# Build list of conversation participants.
conv_part_list = []
for conv_state in conv_states:
conv_part_list.extend(conv_state.conversation.participant_data)
# Retrieve self entity.
get_self_info_response = await client.get_self_info(
hangouts_pb2.GetSelfInfoRequest(
request_header=client.get_request_header(),
)
)
self_entity = get_self_info_response.self_entity
user_list = user.UserList(client, self_entity, required_entities,
conv_part_list)
conversation_list = ConversationList(client, conv_states,
user_list, sync_timestamp)
return (user_list, conversation_list)
| 360,433
|
Sync all conversations by making paginated requests.
Conversations are ordered by ascending sort timestamp.
Args:
client (Client): Connected client.
Raises:
NetworkError: If the requests fail.
Returns:
tuple of list of ``ConversationState`` messages and sync timestamp
|
async def _sync_all_conversations(client):
conv_states = []
sync_timestamp = None
request = hangouts_pb2.SyncRecentConversationsRequest(
request_header=client.get_request_header(),
max_conversations=CONVERSATIONS_PER_REQUEST,
max_events_per_conversation=1,
sync_filter=[
hangouts_pb2.SYNC_FILTER_INBOX,
hangouts_pb2.SYNC_FILTER_ARCHIVED,
]
)
for _ in range(MAX_CONVERSATION_PAGES):
logger.info(
'Requesting conversations page %s', request.last_event_timestamp
)
response = await client.sync_recent_conversations(request)
conv_states = list(response.conversation_state) + conv_states
sync_timestamp = parsers.from_timestamp(
# SyncRecentConversations seems to return a sync_timestamp 4
# minutes before the present. To prevent SyncAllNewEvents later
# breaking requesting events older than what we already have, use
# current_server_time instead.
response.response_header.current_server_time
)
if response.continuation_end_timestamp == 0:
logger.info('Reached final conversations page')
break
else:
request.last_event_timestamp = response.continuation_end_timestamp
else:
logger.warning('Exceeded maximum number of conversation pages')
logger.info('Synced %s total conversations', len(conv_states))
return conv_states, sync_timestamp
| 360,434
|
:class:`.Event` fired when an event occurs in this conversation.
Args:
conv_event: :class:`.ConversationEvent` that occurred.
|
def __init__(self, client, user_list, conversation, events=[],
event_cont_token=None):
# pylint: disable=dangerous-default-value
self._client = client # Client
self._user_list = user_list # UserList
self._conversation = conversation # hangouts_pb2.Conversation
self._events = [] # [hangouts_pb2.Event]
self._events_dict = {} # {event_id: ConversationEvent}
self._send_message_lock = asyncio.Lock()
self._watermarks = {} # {UserID: datetime.datetime}
self._event_cont_token = event_cont_token
for event_ in events:
# Workaround to ignore observed events returned from
# syncrecentconversations.
if event_.event_type != hangouts_pb2.EVENT_TYPE_OBSERVED_EVENT:
self.add_event(event_)
self.on_event = event.Event('Conversation.on_event')
self.on_typing = event.Event('Conversation.on_typing')
self.on_watermark_notification = event.Event(
'Conversation.on_watermark_notification'
)
self.on_watermark_notification.add_observer(
self._on_watermark_notification
)
| 360,435
|
Update the internal state of the conversation.
This method is used by :class:`.ConversationList` to maintain this
instance.
Args:
conversation: ``Conversation`` message.
|
def update_conversation(self, conversation):
# StateUpdate.conversation is actually a delta; fields that aren't
# specified are assumed to be unchanged. Until this class is
# refactored, hide this by saving and restoring previous values where
# necessary.
new_state = conversation.self_conversation_state
old_state = self._conversation.self_conversation_state
self._conversation = conversation
# delivery_medium_option
if not new_state.delivery_medium_option:
new_state.delivery_medium_option.extend(
old_state.delivery_medium_option
)
# latest_read_timestamp
old_timestamp = old_state.self_read_state.latest_read_timestamp
new_timestamp = new_state.self_read_state.latest_read_timestamp
if new_timestamp == 0:
new_state.self_read_state.latest_read_timestamp = old_timestamp
# user_read_state(s)
for new_entry in conversation.read_state:
tstamp = parsers.from_timestamp(new_entry.latest_read_timestamp)
if tstamp == 0:
continue
uid = parsers.from_participantid(new_entry.participant_id)
if uid not in self._watermarks or self._watermarks[uid] < tstamp:
self._watermarks[uid] = tstamp
| 360,442
|
Add an event to the conversation.
This method is used by :class:`.ConversationList` to maintain this
instance.
Args:
event_: ``Event`` message.
Returns:
:class:`.ConversationEvent` representing the event.
|
def add_event(self, event_):
conv_event = self._wrap_event(event_)
if conv_event.id_ not in self._events_dict:
self._events.append(conv_event)
self._events_dict[conv_event.id_] = conv_event
else:
# If this happens, there's probably a bug.
logger.info('Conversation %s ignoring duplicate event %s',
self.id_, conv_event.id_)
return None
return conv_event
| 360,444
|
Rename this conversation.
Hangouts only officially supports renaming group conversations, so
custom names for one-to-one conversations may or may not appear in all
first party clients.
Args:
name (str): New name.
Raises:
.NetworkError: If conversation cannot be renamed.
|
async def rename(self, name):
await self._client.rename_conversation(
hangouts_pb2.RenameConversationRequest(
request_header=self._client.get_request_header(),
new_name=name,
event_request_header=self._get_event_request_header(),
)
)
| 360,449
|
Set the notification level of this conversation.
Args:
level: ``NOTIFICATION_LEVEL_QUIET`` to disable notifications, or
``NOTIFICATION_LEVEL_RING`` to enable them.
Raises:
.NetworkError: If the request fails.
|
async def set_notification_level(self, level):
await self._client.set_conversation_notification_level(
hangouts_pb2.SetConversationNotificationLevelRequest(
request_header=self._client.get_request_header(),
conversation_id=hangouts_pb2.ConversationId(id=self.id_),
level=level,
)
)
| 360,450
|
Set your typing status in this conversation.
Args:
typing: (optional) ``TYPING_TYPE_STARTED``, ``TYPING_TYPE_PAUSED``,
or ``TYPING_TYPE_STOPPED`` to start, pause, or stop typing,
respectively. Defaults to ``TYPING_TYPE_STARTED``.
Raises:
.NetworkError: If typing status cannot be set.
|
async def set_typing(self, typing=hangouts_pb2.TYPING_TYPE_STARTED):
# TODO: Add rate-limiting to avoid unnecessary requests.
try:
await self._client.set_typing(
hangouts_pb2.SetTypingRequest(
request_header=self._client.get_request_header(),
conversation_id=hangouts_pb2.ConversationId(id=self.id_),
type=typing,
)
)
except exceptions.NetworkError as e:
logger.warning('Failed to set typing status: {}'.format(e))
raise
| 360,451
|
Update the timestamp of the latest event which has been read.
This method will avoid making an API request if it will have no effect.
Args:
read_timestamp (datetime.datetime): (optional) Timestamp to set.
Defaults to the timestamp of the newest event.
Raises:
.NetworkError: If the timestamp cannot be updated.
|
async def update_read_timestamp(self, read_timestamp=None):
if read_timestamp is None:
read_timestamp = (self.events[-1].timestamp if self.events else
datetime.datetime.now(datetime.timezone.utc))
if read_timestamp > self.latest_read_timestamp:
logger.info(
'Setting {} latest_read_timestamp from {} to {}'
.format(self.id_, self.latest_read_timestamp, read_timestamp)
)
# Prevent duplicate requests by updating the conversation now.
state = self._conversation.self_conversation_state
state.self_read_state.latest_read_timestamp = (
parsers.to_timestamp(read_timestamp)
)
try:
await self._client.update_watermark(
hangouts_pb2.UpdateWatermarkRequest(
request_header=self._client.get_request_header(),
conversation_id=hangouts_pb2.ConversationId(
id=self.id_
),
last_read_timestamp=parsers.to_timestamp(
read_timestamp
),
)
)
except exceptions.NetworkError as e:
logger.warning('Failed to update read timestamp: {}'.format(e))
raise
| 360,452
|
Get the event following another event in this conversation.
Args:
event_id (str): ID of the event.
prev (bool): If ``True``, return the previous event rather than the
next event. Defaults to ``False``.
Raises:
KeyError: If no such :class:`.ConversationEvent` is known.
Returns:
:class:`.ConversationEvent` or ``None`` if there is no following
event.
|
def next_event(self, event_id, prev=False):
i = self.events.index(self._events_dict[event_id])
if prev and i > 0:
return self.events[i - 1]
elif not prev and i + 1 < len(self.events):
return self.events[i + 1]
else:
return None
| 360,454
|
:class:`.Event` fired when an event occurs in any conversation.
Args:
conv_event: :class:`ConversationEvent` that occurred.
|
def __init__(self, client, conv_states, user_list, sync_timestamp):
self._client = client # Client
self._conv_dict = {} # {conv_id: Conversation}
self._sync_timestamp = sync_timestamp # datetime
self._user_list = user_list # UserList
# Initialize the list of conversations from Client's list of
# hangouts_pb2.ConversationState.
for conv_state in conv_states:
self._add_conversation(conv_state.conversation, conv_state.event,
conv_state.event_continuation_token)
self._client.on_state_update.add_observer(self._on_state_update)
self._client.on_connect.add_observer(self._sync)
self._client.on_reconnect.add_observer(self._sync)
self.on_event = event.Event('ConversationList.on_event')
self.on_typing = event.Event('ConversationList.on_typing')
self.on_watermark_notification = event.Event(
'ConversationList.on_watermark_notification'
)
| 360,455
|
Get all the conversations.
Args:
include_archived (bool): (optional) Whether to include archived
conversations. Defaults to ``False``.
Returns:
List of all :class:`.Conversation` objects.
|
def get_all(self, include_archived=False):
return [conv for conv in self._conv_dict.values()
if not conv.is_archived or include_archived]
| 360,456
|
Leave a conversation.
Args:
conv_id (str): ID of conversation to leave.
|
async def leave_conversation(self, conv_id):
logger.info('Leaving conversation: {}'.format(conv_id))
await self._conv_dict[conv_id].leave()
del self._conv_dict[conv_id]
| 360,457
|
Receive a StateUpdate and fan out to Conversations.
Args:
state_update: hangouts_pb2.StateUpdate instance
|
async def _on_state_update(self, state_update):
# The state update will include some type of notification:
notification_type = state_update.WhichOneof('state_update')
# If conversation fields have been updated, the state update will have
# a conversation containing changed fields. Handle updating the
# conversation from this delta:
if state_update.HasField('conversation'):
try:
await self._handle_conversation_delta(
state_update.conversation
)
except exceptions.NetworkError:
logger.warning(
'Discarding %s for %s: Failed to fetch conversation',
notification_type.replace('_', ' '),
state_update.conversation.conversation_id.id
)
return
if notification_type == 'typing_notification':
await self._handle_set_typing_notification(
state_update.typing_notification
)
elif notification_type == 'watermark_notification':
await self._handle_watermark_notification(
state_update.watermark_notification
)
elif notification_type == 'event_notification':
await self._on_event(
state_update.event_notification.event
)
| 360,459
|
Get a cached conversation or fetch a missing conversation.
Args:
conv_id: string, conversation identifier
Raises:
NetworkError: If the request to fetch the conversation fails.
Returns:
:class:`.Conversation` with matching ID.
|
async def _get_or_fetch_conversation(self, conv_id):
conv = self._conv_dict.get(conv_id, None)
if conv is None:
logger.info('Fetching unknown conversation %s', conv_id)
res = await self._client.get_conversation(
hangouts_pb2.GetConversationRequest(
request_header=self._client.get_request_header(),
conversation_spec=hangouts_pb2.ConversationSpec(
conversation_id=hangouts_pb2.ConversationId(
id=conv_id
)
), include_event=False
)
)
conv_state = res.conversation_state
event_cont_token = None
if conv_state.HasField('event_continuation_token'):
event_cont_token = conv_state.event_continuation_token
return self._add_conversation(conv_state.conversation,
event_cont_token=event_cont_token)
else:
return conv
| 360,460
|
Receive a hangouts_pb2.Event and fan out to Conversations.
Args:
event_: hangouts_pb2.Event instance
|
async def _on_event(self, event_):
conv_id = event_.conversation_id.id
try:
conv = await self._get_or_fetch_conversation(conv_id)
except exceptions.NetworkError:
logger.warning(
'Failed to fetch conversation for event notification: %s',
conv_id
)
else:
self._sync_timestamp = parsers.from_timestamp(event_.timestamp)
conv_event = conv.add_event(event_)
# conv_event may be None if the event was a duplicate.
if conv_event is not None:
await self.on_event.fire(conv_event)
await conv.on_event.fire(conv_event)
| 360,461
|
Receive Conversation delta and create or update the conversation.
Args:
conversation: hangouts_pb2.Conversation instance
Raises:
NetworkError: A request to fetch the complete conversation failed.
|
async def _handle_conversation_delta(self, conversation):
conv_id = conversation.conversation_id.id
conv = self._conv_dict.get(conv_id, None)
if conv is None:
# Ignore the delta and fetch the complete conversation.
await self._get_or_fetch_conversation(conv_id)
else:
# Update conversation using the delta.
conv.update_conversation(conversation)
| 360,462
|
Receive SetTypingNotification and update the conversation.
Args:
set_typing_notification: hangouts_pb2.SetTypingNotification
instance
|
async def _handle_set_typing_notification(self, set_typing_notification):
conv_id = set_typing_notification.conversation_id.id
res = parsers.parse_typing_status_message(set_typing_notification)
await self.on_typing.fire(res)
try:
conv = await self._get_or_fetch_conversation(conv_id)
except exceptions.NetworkError:
logger.warning(
'Failed to fetch conversation for typing notification: %s',
conv_id
)
else:
await conv.on_typing.fire(res)
| 360,463
|
Receive WatermarkNotification and update the conversation.
Args:
watermark_notification: hangouts_pb2.WatermarkNotification instance
|
async def _handle_watermark_notification(self, watermark_notification):
conv_id = watermark_notification.conversation_id.id
res = parsers.parse_watermark_notification(watermark_notification)
await self.on_watermark_notification.fire(res)
try:
conv = await self._get_or_fetch_conversation(conv_id)
except exceptions.NetworkError:
logger.warning(
'Failed to fetch conversation for watermark notification: %s',
conv_id
)
else:
await conv.on_watermark_notification.fire(res)
| 360,464
|
Upgrade name type of this user.
Google Voice participants often first appear with no name at all, and
then get upgraded unpredictably to numbers ("+12125551212") or names.
Args:
user_ (~hangups.user.User): User to upgrade with.
|
def upgrade_name(self, user_):
if user_.name_type > self.name_type:
self.full_name = user_.full_name
self.first_name = user_.first_name
self.name_type = user_.name_type
logger.debug('Added %s name to User "%s": %s',
self.name_type.name.lower(), self.full_name, self)
| 360,467
|
Construct user from ``Entity`` message.
Args:
entity: ``Entity`` message.
self_user_id (~hangups.user.UserID or None): The ID of the current
user. If ``None``, assume ``entity`` is the current user.
Returns:
:class:`~hangups.user.User` object.
|
def from_entity(entity, self_user_id):
user_id = UserID(chat_id=entity.id.chat_id,
gaia_id=entity.id.gaia_id)
return User(user_id, entity.properties.display_name,
entity.properties.first_name,
entity.properties.photo_url,
entity.properties.email,
(self_user_id == user_id) or (self_user_id is None))
| 360,468
|
Construct user from ``ConversationParticipantData`` message.
Args:
conv_part_id: ``ConversationParticipantData`` message.
self_user_id (~hangups.user.UserID or None): The ID of the current
user. If ``None``, assume ``conv_part_id`` is the current user.
Returns:
:class:`~hangups.user.User` object.
|
def from_conv_part_data(conv_part_data, self_user_id):
user_id = UserID(chat_id=conv_part_data.id.chat_id,
gaia_id=conv_part_data.id.gaia_id)
return User(user_id, conv_part_data.fallback_name, None, None, [],
(self_user_id == user_id) or (self_user_id is None))
| 360,469
|
Get a user by its ID.
Args:
user_id (~hangups.user.UserID): The ID of the user.
Raises:
KeyError: If no such user is known.
Returns:
:class:`~hangups.user.User` with the given ID.
|
def get_user(self, user_id):
try:
return self._user_dict[user_id]
except KeyError:
logger.warning('UserList returning unknown User for UserID %s',
user_id)
return User(user_id, None, None, None, [], False)
| 360,471
|
Add an observer to this event.
Args:
callback: A function or coroutine callback to call when the event
is fired.
Raises:
ValueError: If the callback has already been added.
|
def add_observer(self, callback):
if callback in self._observers:
raise ValueError('{} is already an observer of {}'
.format(callback, self))
self._observers.append(callback)
| 360,473
|
Remove an observer from this event.
Args:
callback: A function or coroutine callback to remove from this
event.
Raises:
ValueError: If the callback is not an observer of this event.
|
def remove_observer(self, callback):
if callback not in self._observers:
raise ValueError('{} is not an observer of {}'
.format(callback, self))
self._observers.remove(callback)
| 360,474
|
Run a hangups example coroutine.
Args:
example_coroutine (coroutine): Coroutine to run with a connected
hangups client and arguments namespace as arguments.
extra_args (str): Any extra command line arguments required by the
example.
|
def run_example(example_coroutine, *extra_args):
args = _get_parser(extra_args).parse_args()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.WARNING)
# Obtain hangups authentication cookies, prompting for credentials from
# standard input if necessary.
cookies = hangups.auth.get_auth_stdin(args.token_path)
client = hangups.Client(cookies)
loop = asyncio.get_event_loop()
task = asyncio.ensure_future(_async_main(example_coroutine, client, args),
loop=loop)
try:
loop.run_until_complete(task)
except KeyboardInterrupt:
task.cancel()
loop.run_until_complete(task)
finally:
loop.close()
| 360,481
|
Print column headers and rows as a reStructuredText table.
Args:
col_tuple: Tuple of column name strings.
row_tuples: List of tuples containing row data.
|
def print_table(col_tuple, row_tuples):
col_widths = [max(len(str(row[col])) for row in [col_tuple] + row_tuples)
for col in range(len(col_tuple))]
format_str = ' '.join('{{:<{}}}'.format(col_width)
for col_width in col_widths)
header_border = ' '.join('=' * col_width for col_width in col_widths)
print(header_border)
print(format_str.format(*col_tuple))
print(header_border)
for row_tuple in row_tuples:
print(format_str.format(*row_tuple))
print(header_border)
print()
| 360,484
|
Generate doc for an enum.
Args:
enum_descriptor: descriptor_pb2.EnumDescriptorProto instance for enum
to generate docs for.
locations: Dictionary of location paths tuples to
descriptor_pb2.SourceCodeInfo.Location instances.
path: Path tuple to the enum definition.
name_prefix: Optional prefix for this enum's name.
|
def generate_enum_doc(enum_descriptor, locations, path, name_prefix=''):
print(make_subsection(name_prefix + enum_descriptor.name))
location = locations[path]
if location.HasField('leading_comments'):
print(textwrap.dedent(location.leading_comments))
row_tuples = []
for value_index, value in enumerate(enum_descriptor.value):
field_location = locations[path + (2, value_index)]
row_tuples.append((
make_code(value.name),
value.number,
textwrap.fill(get_comment_from_location(field_location), INFINITY),
))
print_table(('Name', 'Number', 'Description'), row_tuples)
| 360,485
|
Generate docs for message and nested messages and enums.
Args:
message_descriptor: descriptor_pb2.DescriptorProto instance for message
to generate docs for.
locations: Dictionary of location paths tuples to
descriptor_pb2.SourceCodeInfo.Location instances.
path: Path tuple to the message definition.
name_prefix: Optional prefix for this message's name.
|
def generate_message_doc(message_descriptor, locations, path, name_prefix=''):
# message_type is 4
prefixed_name = name_prefix + message_descriptor.name
print(make_subsection(prefixed_name))
location = locations[path]
if location.HasField('leading_comments'):
print(textwrap.dedent(location.leading_comments))
row_tuples = []
for field_index, field in enumerate(message_descriptor.field):
field_location = locations[path + (2, field_index)]
if field.type not in [11, 14]:
type_str = TYPE_TO_STR[field.type]
else:
type_str = make_link(field.type_name.lstrip('.'))
row_tuples.append((
make_code(field.name),
field.number,
type_str,
LABEL_TO_STR[field.label],
textwrap.fill(get_comment_from_location(field_location), INFINITY),
))
print_table(('Field', 'Number', 'Type', 'Label', 'Description'),
row_tuples)
# Generate nested messages
nested_types = enumerate(message_descriptor.nested_type)
for index, nested_message_desc in nested_types:
generate_message_doc(nested_message_desc, locations,
path + (3, index),
name_prefix=prefixed_name + '.')
# Generate nested enums
for index, nested_enum_desc in enumerate(message_descriptor.enum_type):
generate_enum_doc(nested_enum_desc, locations, path + (4, index),
name_prefix=prefixed_name + '.')
| 360,486
|
Compile proto file to descriptor set.
Args:
proto_file_path: Path to proto file to compile.
Returns:
Path to file containing compiled descriptor set.
Raises:
SystemExit if the compilation fails.
|
def compile_protofile(proto_file_path):
out_file = tempfile.mkstemp()[1]
try:
subprocess.check_output(['protoc', '--include_source_info',
'--descriptor_set_out', out_file,
proto_file_path])
except subprocess.CalledProcessError as e:
sys.exit('protoc returned status {}'.format(e.returncode))
return out_file
| 360,487
|
Simple wrapper for :func:`get_auth` that prompts the user using stdin.
Args:
refresh_token_filename (str): Path to file where refresh token will be
cached.
manual_login (bool): If true, prompt user to log in through a browser
and enter authorization code manually. Defaults to false.
Raises:
GoogleAuthError: If authentication with Google fails.
|
def get_auth_stdin(refresh_token_filename, manual_login=False):
refresh_token_cache = RefreshTokenCache(refresh_token_filename)
return get_auth(
CredentialsPrompt(), refresh_token_cache, manual_login=manual_login
)
| 360,540
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.