repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
APSL/transmanager
transmanager/manager.py
Manager.get_elements
def get_elements(self, object_list): """ Recursive method to iterate the tree of children in order to flatten it :param object_list: :return: """ result = [] for item in object_list: if isinstance(item, list): result += self.get_elements(item) elif isinstance(item, TranslatableModel): result.append(item) return result
python
def get_elements(self, object_list): """ Recursive method to iterate the tree of children in order to flatten it :param object_list: :return: """ result = [] for item in object_list: if isinstance(item, list): result += self.get_elements(item) elif isinstance(item, TranslatableModel): result.append(item) return result
[ "def", "get_elements", "(", "self", ",", "object_list", ")", ":", "result", "=", "[", "]", "for", "item", "in", "object_list", ":", "if", "isinstance", "(", "item", ",", "list", ")", ":", "result", "+=", "self", ".", "get_elements", "(", "item", ")", ...
Recursive method to iterate the tree of children in order to flatten it :param object_list: :return:
[ "Recursive", "method", "to", "iterate", "the", "tree", "of", "children", "in", "order", "to", "flatten", "it" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L553-L566
APSL/transmanager
transmanager/manager.py
Manager.update_model_languages
def update_model_languages(self, model_class, languages): """ Update the TransModelLanguages model with the selected languages :param model_class: :param languages: :return: """ # get the langs we have to add to the TransModelLanguage qs = TransLanguage.objects.filter(code__in=languages) new_langs = [lang for lang in qs] if not new_langs: return mod_lan, created = TransModelLanguage.objects.get_or_create( model='{} - {}'.format(model_class._meta.app_label, model_class._meta.model.__name__.lower()), ) exist_langs_codes = [lang.code for lang in mod_lan.languages.all()] for lang in new_langs: if lang.code not in exist_langs_codes: try: mod_lan.languages.add(lang) except IntegrityError: pass
python
def update_model_languages(self, model_class, languages): """ Update the TransModelLanguages model with the selected languages :param model_class: :param languages: :return: """ # get the langs we have to add to the TransModelLanguage qs = TransLanguage.objects.filter(code__in=languages) new_langs = [lang for lang in qs] if not new_langs: return mod_lan, created = TransModelLanguage.objects.get_or_create( model='{} - {}'.format(model_class._meta.app_label, model_class._meta.model.__name__.lower()), ) exist_langs_codes = [lang.code for lang in mod_lan.languages.all()] for lang in new_langs: if lang.code not in exist_langs_codes: try: mod_lan.languages.add(lang) except IntegrityError: pass
[ "def", "update_model_languages", "(", "self", ",", "model_class", ",", "languages", ")", ":", "# get the langs we have to add to the TransModelLanguage", "qs", "=", "TransLanguage", ".", "objects", ".", "filter", "(", "code__in", "=", "languages", ")", "new_langs", "=...
Update the TransModelLanguages model with the selected languages :param model_class: :param languages: :return:
[ "Update", "the", "TransModelLanguages", "model", "with", "the", "selected", "languages" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L568-L591
APSL/transmanager
transmanager/manager.py
Manager.add_item_languages
def add_item_languages(self, item, languages): """ Update the TransItemLanguage model with the selected languages :param item: :param languages: :return: """ # get the langs we have to add to the TransModelLanguage qs = TransLanguage.objects.filter(code__in=languages) new_langs = [lang for lang in qs] if not new_langs: return ct_item = ContentType.objects.get_for_model(item) item_lan, created = TransItemLanguage.objects.get_or_create(content_type_id=ct_item.id, object_id=item.id) item_lan.languages.add(*new_langs)
python
def add_item_languages(self, item, languages): """ Update the TransItemLanguage model with the selected languages :param item: :param languages: :return: """ # get the langs we have to add to the TransModelLanguage qs = TransLanguage.objects.filter(code__in=languages) new_langs = [lang for lang in qs] if not new_langs: return ct_item = ContentType.objects.get_for_model(item) item_lan, created = TransItemLanguage.objects.get_or_create(content_type_id=ct_item.id, object_id=item.id) item_lan.languages.add(*new_langs)
[ "def", "add_item_languages", "(", "self", ",", "item", ",", "languages", ")", ":", "# get the langs we have to add to the TransModelLanguage", "qs", "=", "TransLanguage", ".", "objects", ".", "filter", "(", "code__in", "=", "languages", ")", "new_langs", "=", "[", ...
Update the TransItemLanguage model with the selected languages :param item: :param languages: :return:
[ "Update", "the", "TransItemLanguage", "model", "with", "the", "selected", "languages" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L593-L609
APSL/transmanager
transmanager/manager.py
Manager.remove_item_languages
def remove_item_languages(self, item, languages): """ delete the selected languages from the TransItemLanguage model :param item: :param languages: :return: """ # get the langs we have to add to the TransModelLanguage qs = TransLanguage.objects.filter(code__in=languages) remove_langs = [lang for lang in qs] if not remove_langs: return ct_item = ContentType.objects.get_for_model(item) item_lan, created = TransItemLanguage.objects.get_or_create(content_type_id=ct_item.id, object_id=item.id) for lang in remove_langs: item_lan.languages.remove(lang) if item_lan.languages.count() == 0: item_lan.delete()
python
def remove_item_languages(self, item, languages): """ delete the selected languages from the TransItemLanguage model :param item: :param languages: :return: """ # get the langs we have to add to the TransModelLanguage qs = TransLanguage.objects.filter(code__in=languages) remove_langs = [lang for lang in qs] if not remove_langs: return ct_item = ContentType.objects.get_for_model(item) item_lan, created = TransItemLanguage.objects.get_or_create(content_type_id=ct_item.id, object_id=item.id) for lang in remove_langs: item_lan.languages.remove(lang) if item_lan.languages.count() == 0: item_lan.delete()
[ "def", "remove_item_languages", "(", "self", ",", "item", ",", "languages", ")", ":", "# get the langs we have to add to the TransModelLanguage", "qs", "=", "TransLanguage", ".", "objects", ".", "filter", "(", "code__in", "=", "languages", ")", "remove_langs", "=", ...
delete the selected languages from the TransItemLanguage model :param item: :param languages: :return:
[ "delete", "the", "selected", "languages", "from", "the", "TransItemLanguage", "model" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L611-L630
APSL/transmanager
transmanager/manager.py
Manager.get_translation_from_instance
def get_translation_from_instance(instance, lang): """ Get the translation from the instance in a specific language, hits the db :param instance: :param lang: :return: """ try: translation = get_translation(instance, lang) except (AttributeError, ObjectDoesNotExist): translation = None return translation
python
def get_translation_from_instance(instance, lang): """ Get the translation from the instance in a specific language, hits the db :param instance: :param lang: :return: """ try: translation = get_translation(instance, lang) except (AttributeError, ObjectDoesNotExist): translation = None return translation
[ "def", "get_translation_from_instance", "(", "instance", ",", "lang", ")", ":", "try", ":", "translation", "=", "get_translation", "(", "instance", ",", "lang", ")", "except", "(", "AttributeError", ",", "ObjectDoesNotExist", ")", ":", "translation", "=", "None"...
Get the translation from the instance in a specific language, hits the db :param instance: :param lang: :return:
[ "Get", "the", "translation", "from", "the", "instance", "in", "a", "specific", "language", "hits", "the", "db" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L633-L645
APSL/transmanager
transmanager/manager.py
Manager.create_translations_for_item_and_its_children
def create_translations_for_item_and_its_children(self, item, languages=None): """ Creates the translations from an item and defined languages and return the id's of the created tasks :param item: (master) :param languages: :return: """ if not self.master: self.set_master(item) if not languages: languages = self.get_languages() result_ids = [] # first process main object fields = self._get_translated_field_names(item) tasks = self.create_from_item(languages, item, fields) if tasks: result_ids += [task.pk for task in tasks] # then process child objects from main children = self.get_translatable_children(item) for child in children: fields = self._get_translated_field_names(child) tasks = self.create_from_item(languages, child, fields) if tasks: result_ids += [task.pk for task in tasks] return result_ids
python
def create_translations_for_item_and_its_children(self, item, languages=None): """ Creates the translations from an item and defined languages and return the id's of the created tasks :param item: (master) :param languages: :return: """ if not self.master: self.set_master(item) if not languages: languages = self.get_languages() result_ids = [] # first process main object fields = self._get_translated_field_names(item) tasks = self.create_from_item(languages, item, fields) if tasks: result_ids += [task.pk for task in tasks] # then process child objects from main children = self.get_translatable_children(item) for child in children: fields = self._get_translated_field_names(child) tasks = self.create_from_item(languages, child, fields) if tasks: result_ids += [task.pk for task in tasks] return result_ids
[ "def", "create_translations_for_item_and_its_children", "(", "self", ",", "item", ",", "languages", "=", "None", ")", ":", "if", "not", "self", ".", "master", ":", "self", ".", "set_master", "(", "item", ")", "if", "not", "languages", ":", "languages", "=", ...
Creates the translations from an item and defined languages and return the id's of the created tasks :param item: (master) :param languages: :return:
[ "Creates", "the", "translations", "from", "an", "item", "and", "defined", "languages", "and", "return", "the", "id", "s", "of", "the", "created", "tasks" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L647-L678
APSL/transmanager
transmanager/manager.py
Manager.delete_translations_for_item_and_its_children
def delete_translations_for_item_and_its_children(self, item, languages=None): """ deletes the translations task of an item and its children used when a model is not enabled anymore :param item: :param languages: :return: """ self.log('--- Deleting translations ---') if not self.master: self.set_master(item) object_name = '{} - {}'.format(item._meta.app_label.lower(), item._meta.verbose_name) object_class = item.__class__.__name__ object_pk = item.pk filter_by = { 'object_class': object_class, 'object_name': object_name, 'object_pk': object_pk, 'done': False } if languages: filter_by.update({'language__code__in': languages}) TransTask.objects.filter(**filter_by).delete() # then process child objects from main children = self.get_translatable_children(item) for child in children: self.delete_translations_for_item_and_its_children(child, languages)
python
def delete_translations_for_item_and_its_children(self, item, languages=None): """ deletes the translations task of an item and its children used when a model is not enabled anymore :param item: :param languages: :return: """ self.log('--- Deleting translations ---') if not self.master: self.set_master(item) object_name = '{} - {}'.format(item._meta.app_label.lower(), item._meta.verbose_name) object_class = item.__class__.__name__ object_pk = item.pk filter_by = { 'object_class': object_class, 'object_name': object_name, 'object_pk': object_pk, 'done': False } if languages: filter_by.update({'language__code__in': languages}) TransTask.objects.filter(**filter_by).delete() # then process child objects from main children = self.get_translatable_children(item) for child in children: self.delete_translations_for_item_and_its_children(child, languages)
[ "def", "delete_translations_for_item_and_its_children", "(", "self", ",", "item", ",", "languages", "=", "None", ")", ":", "self", ".", "log", "(", "'--- Deleting translations ---'", ")", "if", "not", "self", ".", "master", ":", "self", ".", "set_master", "(", ...
deletes the translations task of an item and its children used when a model is not enabled anymore :param item: :param languages: :return:
[ "deletes", "the", "translations", "task", "of", "an", "item", "and", "its", "children", "used", "when", "a", "model", "is", "not", "enabled", "anymore", ":", "param", "item", ":", ":", "param", "languages", ":", ":", "return", ":" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L680-L711
michal-stuglik/django-blastplus
blastplus/views.py
blast
def blast(request, blast_form, template_init, template_result, blast_commandline, sample_fasta_path, extra_context=None): """ Process blastn/tblastn (blast+) query or set up initial blast form. """ if request.method == 'POST': form = blast_form(request.POST) if form.is_valid(): query_file_object_tmp = form.cleaned_data['sequence_in_form'] evalue = float(form.cleaned_data['evalue_in_form']) word_size = int(form.cleaned_data['word_size_in_form']) database_path = str(form.cleaned_data['blast_db_in_form']) standard_opt_dic = {'query': query_file_object_tmp, 'evalue': evalue, 'outfmt': 5, 'db': database_path, 'word_size': word_size} annotated = utils.get_annotation(database_path, BLAST_DB_NUCL_LIST) # none standard options: try: matrix = str(form.cleaned_data['matrix_in_form']) standard_opt_dic["matrix"] = matrix except: pass sensitivity_opt_dic = ast.literal_eval(str(form.cleaned_data['search_sensitivity_in_form'])) blast_records__file_xml = None try: # blast search, parse results from temp file, put them into template for rendering. blast_records__file_xml, blast_error = utils.run_blast_commands(blast_commandline, **dict(standard_opt_dic, **sensitivity_opt_dic)) if len(blast_error) > 0: return render(request=request, template_name=template_result, context={"blast_record": '', blast_error: BLAST_CORRECT_PARAMS}) else: # converts blast results into objects and pack into list blast_records_in_object_and_list = utils.blast_records_to_object( list(NCBIXML.parse(blast_records__file_xml))) # user defined function to modify blast results # e.g. join blast results with external database in template if extra_context is not None: blast_records_in_object_and_list = extra_context(blast_records_in_object_and_list) return render(request=request, template_name=template_result, context={'application': blast_records_in_object_and_list[0].application, 'version': blast_records_in_object_and_list[0].version, 'blast_records': blast_records_in_object_and_list, 'annotated': annotated}) finally: # remove result - temporary file if blast_records__file_xml is not None: os.remove(blast_records__file_xml.name) else: form = blast_form(initial={'sequence_in_form': '', 'evalue_in_form': EVALUE_BLAST_DEFAULT}) return render(request=request, template_name=template_init, context={'form': form, 'sequence_sample_in_fasta': utils.get_sample_data(sample_fasta_path), "blast_max_number_seq_in_input": BLAST_MAX_NUMBER_SEQ_IN_INPUT, })
python
def blast(request, blast_form, template_init, template_result, blast_commandline, sample_fasta_path, extra_context=None): """ Process blastn/tblastn (blast+) query or set up initial blast form. """ if request.method == 'POST': form = blast_form(request.POST) if form.is_valid(): query_file_object_tmp = form.cleaned_data['sequence_in_form'] evalue = float(form.cleaned_data['evalue_in_form']) word_size = int(form.cleaned_data['word_size_in_form']) database_path = str(form.cleaned_data['blast_db_in_form']) standard_opt_dic = {'query': query_file_object_tmp, 'evalue': evalue, 'outfmt': 5, 'db': database_path, 'word_size': word_size} annotated = utils.get_annotation(database_path, BLAST_DB_NUCL_LIST) # none standard options: try: matrix = str(form.cleaned_data['matrix_in_form']) standard_opt_dic["matrix"] = matrix except: pass sensitivity_opt_dic = ast.literal_eval(str(form.cleaned_data['search_sensitivity_in_form'])) blast_records__file_xml = None try: # blast search, parse results from temp file, put them into template for rendering. blast_records__file_xml, blast_error = utils.run_blast_commands(blast_commandline, **dict(standard_opt_dic, **sensitivity_opt_dic)) if len(blast_error) > 0: return render(request=request, template_name=template_result, context={"blast_record": '', blast_error: BLAST_CORRECT_PARAMS}) else: # converts blast results into objects and pack into list blast_records_in_object_and_list = utils.blast_records_to_object( list(NCBIXML.parse(blast_records__file_xml))) # user defined function to modify blast results # e.g. join blast results with external database in template if extra_context is not None: blast_records_in_object_and_list = extra_context(blast_records_in_object_and_list) return render(request=request, template_name=template_result, context={'application': blast_records_in_object_and_list[0].application, 'version': blast_records_in_object_and_list[0].version, 'blast_records': blast_records_in_object_and_list, 'annotated': annotated}) finally: # remove result - temporary file if blast_records__file_xml is not None: os.remove(blast_records__file_xml.name) else: form = blast_form(initial={'sequence_in_form': '', 'evalue_in_form': EVALUE_BLAST_DEFAULT}) return render(request=request, template_name=template_init, context={'form': form, 'sequence_sample_in_fasta': utils.get_sample_data(sample_fasta_path), "blast_max_number_seq_in_input": BLAST_MAX_NUMBER_SEQ_IN_INPUT, })
[ "def", "blast", "(", "request", ",", "blast_form", ",", "template_init", ",", "template_result", ",", "blast_commandline", ",", "sample_fasta_path", ",", "extra_context", "=", "None", ")", ":", "if", "request", ".", "method", "==", "'POST'", ":", "form", "=", ...
Process blastn/tblastn (blast+) query or set up initial blast form.
[ "Process", "blastn", "/", "tblastn", "(", "blast", "+", ")", "query", "or", "set", "up", "initial", "blast", "form", "." ]
train
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/views.py#L17-L86
noxdafox/vminspect
vminspect/winreg.py
RegistryHive.keys
def keys(self): """Iterates over the hive's keys. Yields WinRegKey namedtuples containing: path: path of the key "RootKey\\Key\\..." timestamp: date and time of last modification values: list of values (("ValueKey", "ValueType", ValueValue), ... ) """ for node in self.node_children(self.root()): yield from self._visit_registry(node, self._rootkey)
python
def keys(self): """Iterates over the hive's keys. Yields WinRegKey namedtuples containing: path: path of the key "RootKey\\Key\\..." timestamp: date and time of last modification values: list of values (("ValueKey", "ValueType", ValueValue), ... ) """ for node in self.node_children(self.root()): yield from self._visit_registry(node, self._rootkey)
[ "def", "keys", "(", "self", ")", ":", "for", "node", "in", "self", ".", "node_children", "(", "self", ".", "root", "(", ")", ")", ":", "yield", "from", "self", ".", "_visit_registry", "(", "node", ",", "self", ".", "_rootkey", ")" ]
Iterates over the hive's keys. Yields WinRegKey namedtuples containing: path: path of the key "RootKey\\Key\\..." timestamp: date and time of last modification values: list of values (("ValueKey", "ValueType", ValueValue), ... )
[ "Iterates", "over", "the", "hive", "s", "keys", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/winreg.py#L89-L100
noxdafox/vminspect
vminspect/winreg.py
RegistryHive._value_data
def _value_data(self, value): """Parses binary and unidentified values.""" return codecs.decode( codecs.encode(self.value_value(value)[1], 'base64'), 'utf8')
python
def _value_data(self, value): """Parses binary and unidentified values.""" return codecs.decode( codecs.encode(self.value_value(value)[1], 'base64'), 'utf8')
[ "def", "_value_data", "(", "self", ",", "value", ")", ":", "return", "codecs", ".", "decode", "(", "codecs", ".", "encode", "(", "self", ".", "value_value", "(", "value", ")", "[", "1", "]", ",", "'base64'", ")", ",", "'utf8'", ")" ]
Parses binary and unidentified values.
[ "Parses", "binary", "and", "unidentified", "values", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/winreg.py#L123-L126
rapidpro/dash
dash/utils/__init__.py
intersection
def intersection(*args): """ Return the intersection of lists, using the first list to determine item order """ if not args: return [] # remove duplicates from first list whilst preserving order base = list(OrderedDict.fromkeys(args[0])) if len(args) == 1: return base else: others = set(args[1]).intersection(*args[2:]) return [e for e in base if e in others]
python
def intersection(*args): """ Return the intersection of lists, using the first list to determine item order """ if not args: return [] # remove duplicates from first list whilst preserving order base = list(OrderedDict.fromkeys(args[0])) if len(args) == 1: return base else: others = set(args[1]).intersection(*args[2:]) return [e for e in base if e in others]
[ "def", "intersection", "(", "*", "args", ")", ":", "if", "not", "args", ":", "return", "[", "]", "# remove duplicates from first list whilst preserving order", "base", "=", "list", "(", "OrderedDict", ".", "fromkeys", "(", "args", "[", "0", "]", ")", ")", "i...
Return the intersection of lists, using the first list to determine item order
[ "Return", "the", "intersection", "of", "lists", "using", "the", "first", "list", "to", "determine", "item", "order" ]
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/utils/__init__.py#L16-L30
rapidpro/dash
dash/utils/__init__.py
union
def union(*args): """ Return the union of lists, ordering by first seen in any list """ if not args: return [] base = args[0] for other in args[1:]: base.extend(other) return list(OrderedDict.fromkeys(base))
python
def union(*args): """ Return the union of lists, ordering by first seen in any list """ if not args: return [] base = args[0] for other in args[1:]: base.extend(other) return list(OrderedDict.fromkeys(base))
[ "def", "union", "(", "*", "args", ")", ":", "if", "not", "args", ":", "return", "[", "]", "base", "=", "args", "[", "0", "]", "for", "other", "in", "args", "[", "1", ":", "]", ":", "base", ".", "extend", "(", "other", ")", "return", "list", "...
Return the union of lists, ordering by first seen in any list
[ "Return", "the", "union", "of", "lists", "ordering", "by", "first", "seen", "in", "any", "list" ]
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/utils/__init__.py#L33-L44
rapidpro/dash
dash/utils/__init__.py
random_string
def random_string(length): """ Generates a random alphanumeric string """ # avoid things that could be mistaken ex: 'I' and '1' letters = "23456789ABCDEFGHJKLMNPQRSTUVWXYZ" return "".join([random.choice(letters) for _ in range(length)])
python
def random_string(length): """ Generates a random alphanumeric string """ # avoid things that could be mistaken ex: 'I' and '1' letters = "23456789ABCDEFGHJKLMNPQRSTUVWXYZ" return "".join([random.choice(letters) for _ in range(length)])
[ "def", "random_string", "(", "length", ")", ":", "# avoid things that could be mistaken ex: 'I' and '1'", "letters", "=", "\"23456789ABCDEFGHJKLMNPQRSTUVWXYZ\"", "return", "\"\"", ".", "join", "(", "[", "random", ".", "choice", "(", "letters", ")", "for", "_", "in", ...
Generates a random alphanumeric string
[ "Generates", "a", "random", "alphanumeric", "string" ]
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/utils/__init__.py#L47-L53
rapidpro/dash
dash/utils/__init__.py
filter_dict
def filter_dict(d, keys): """ Creates a new dict from an existing dict that only has the given keys """ return {k: v for k, v in d.items() if k in keys}
python
def filter_dict(d, keys): """ Creates a new dict from an existing dict that only has the given keys """ return {k: v for k, v in d.items() if k in keys}
[ "def", "filter_dict", "(", "d", ",", "keys", ")", ":", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", "if", "k", "in", "keys", "}" ]
Creates a new dict from an existing dict that only has the given keys
[ "Creates", "a", "new", "dict", "from", "an", "existing", "dict", "that", "only", "has", "the", "given", "keys" ]
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/utils/__init__.py#L56-L60
rapidpro/dash
dash/utils/__init__.py
get_cacheable
def get_cacheable(cache_key, cache_ttl, calculate, recalculate=False): """ Gets the result of a method call, using the given key and TTL as a cache """ if not recalculate: cached = cache.get(cache_key) if cached is not None: return json.loads(cached) calculated = calculate() cache.set(cache_key, json.dumps(calculated), cache_ttl) return calculated
python
def get_cacheable(cache_key, cache_ttl, calculate, recalculate=False): """ Gets the result of a method call, using the given key and TTL as a cache """ if not recalculate: cached = cache.get(cache_key) if cached is not None: return json.loads(cached) calculated = calculate() cache.set(cache_key, json.dumps(calculated), cache_ttl) return calculated
[ "def", "get_cacheable", "(", "cache_key", ",", "cache_ttl", ",", "calculate", ",", "recalculate", "=", "False", ")", ":", "if", "not", "recalculate", ":", "cached", "=", "cache", ".", "get", "(", "cache_key", ")", "if", "cached", "is", "not", "None", ":"...
Gets the result of a method call, using the given key and TTL as a cache
[ "Gets", "the", "result", "of", "a", "method", "call", "using", "the", "given", "key", "and", "TTL", "as", "a", "cache" ]
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/utils/__init__.py#L63-L75
rapidpro/dash
dash/utils/__init__.py
get_obj_cacheable
def get_obj_cacheable(obj, attr_name, calculate, recalculate=False): """ Gets the result of a method call, using the given object and attribute name as a cache """ if not recalculate and hasattr(obj, attr_name): return getattr(obj, attr_name) calculated = calculate() setattr(obj, attr_name, calculated) return calculated
python
def get_obj_cacheable(obj, attr_name, calculate, recalculate=False): """ Gets the result of a method call, using the given object and attribute name as a cache """ if not recalculate and hasattr(obj, attr_name): return getattr(obj, attr_name) calculated = calculate() setattr(obj, attr_name, calculated) return calculated
[ "def", "get_obj_cacheable", "(", "obj", ",", "attr_name", ",", "calculate", ",", "recalculate", "=", "False", ")", ":", "if", "not", "recalculate", "and", "hasattr", "(", "obj", ",", "attr_name", ")", ":", "return", "getattr", "(", "obj", ",", "attr_name",...
Gets the result of a method call, using the given object and attribute name as a cache
[ "Gets", "the", "result", "of", "a", "method", "call", "using", "the", "given", "object", "and", "attribute", "name", "as", "a", "cache" ]
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/utils/__init__.py#L78-L89
rapidpro/dash
dash/utils/__init__.py
datetime_to_ms
def datetime_to_ms(dt): """ Converts a datetime to a millisecond accuracy timestamp """ seconds = calendar.timegm(dt.utctimetuple()) return seconds * 1000 + int(dt.microsecond / 1000)
python
def datetime_to_ms(dt): """ Converts a datetime to a millisecond accuracy timestamp """ seconds = calendar.timegm(dt.utctimetuple()) return seconds * 1000 + int(dt.microsecond / 1000)
[ "def", "datetime_to_ms", "(", "dt", ")", ":", "seconds", "=", "calendar", ".", "timegm", "(", "dt", ".", "utctimetuple", "(", ")", ")", "return", "seconds", "*", "1000", "+", "int", "(", "dt", ".", "microsecond", "/", "1000", ")" ]
Converts a datetime to a millisecond accuracy timestamp
[ "Converts", "a", "datetime", "to", "a", "millisecond", "accuracy", "timestamp" ]
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/utils/__init__.py#L92-L97
rapidpro/dash
dash/utils/__init__.py
ms_to_datetime
def ms_to_datetime(ms): """ Converts a millisecond accuracy timestamp to a datetime """ dt = datetime.datetime.utcfromtimestamp(ms / 1000) return dt.replace(microsecond=(ms % 1000) * 1000).replace(tzinfo=pytz.utc)
python
def ms_to_datetime(ms): """ Converts a millisecond accuracy timestamp to a datetime """ dt = datetime.datetime.utcfromtimestamp(ms / 1000) return dt.replace(microsecond=(ms % 1000) * 1000).replace(tzinfo=pytz.utc)
[ "def", "ms_to_datetime", "(", "ms", ")", ":", "dt", "=", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "ms", "/", "1000", ")", "return", "dt", ".", "replace", "(", "microsecond", "=", "(", "ms", "%", "1000", ")", "*", "1000", ")", ".", ...
Converts a millisecond accuracy timestamp to a datetime
[ "Converts", "a", "millisecond", "accuracy", "timestamp", "to", "a", "datetime" ]
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/utils/__init__.py#L100-L105
rapidpro/dash
dash/utils/__init__.py
get_month_range
def get_month_range(d=None): """ Gets the start (inclusive) and end (exclusive) datetimes of the current month in the same timezone as the given date """ if not d: d = timezone.now() start = d.replace(day=1, hour=0, minute=0, second=0, microsecond=0) end = start + relativedelta(months=1) return start, end
python
def get_month_range(d=None): """ Gets the start (inclusive) and end (exclusive) datetimes of the current month in the same timezone as the given date """ if not d: d = timezone.now() start = d.replace(day=1, hour=0, minute=0, second=0, microsecond=0) end = start + relativedelta(months=1) return start, end
[ "def", "get_month_range", "(", "d", "=", "None", ")", ":", "if", "not", "d", ":", "d", "=", "timezone", ".", "now", "(", ")", "start", "=", "d", ".", "replace", "(", "day", "=", "1", ",", "hour", "=", "0", ",", "minute", "=", "0", ",", "secon...
Gets the start (inclusive) and end (exclusive) datetimes of the current month in the same timezone as the given date
[ "Gets", "the", "start", "(", "inclusive", ")", "and", "end", "(", "exclusive", ")", "datetimes", "of", "the", "current", "month", "in", "the", "same", "timezone", "as", "the", "given", "date" ]
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/utils/__init__.py#L108-L118
rapidpro/dash
dash/utils/__init__.py
chunks
def chunks(iterable, size): """ Splits a very large list into evenly sized chunks. Returns an iterator of lists that are no more than the size passed in. """ it = iter(iterable) item = list(islice(it, size)) while item: yield item item = list(islice(it, size))
python
def chunks(iterable, size): """ Splits a very large list into evenly sized chunks. Returns an iterator of lists that are no more than the size passed in. """ it = iter(iterable) item = list(islice(it, size)) while item: yield item item = list(islice(it, size))
[ "def", "chunks", "(", "iterable", ",", "size", ")", ":", "it", "=", "iter", "(", "iterable", ")", "item", "=", "list", "(", "islice", "(", "it", ",", "size", ")", ")", "while", "item", ":", "yield", "item", "item", "=", "list", "(", "islice", "("...
Splits a very large list into evenly sized chunks. Returns an iterator of lists that are no more than the size passed in.
[ "Splits", "a", "very", "large", "list", "into", "evenly", "sized", "chunks", ".", "Returns", "an", "iterator", "of", "lists", "that", "are", "no", "more", "than", "the", "size", "passed", "in", "." ]
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/utils/__init__.py#L121-L130
rapidpro/dash
dash/utils/__init__.py
is_dict_equal
def is_dict_equal(d1, d2, keys=None, ignore_none_values=True): """ Compares two dictionaries to see if they are equal :param d1: the first dictionary :param d2: the second dictionary :param keys: the keys to limit the comparison to (optional) :param ignore_none_values: whether to ignore none values :return: true if the dictionaries are equal, else false """ if keys or ignore_none_values: d1 = {k: v for k, v in d1.items() if (keys is None or k in keys) and (v is not None or not ignore_none_values)} d2 = {k: v for k, v in d2.items() if (keys is None or k in keys) and (v is not None or not ignore_none_values)} return d1 == d2
python
def is_dict_equal(d1, d2, keys=None, ignore_none_values=True): """ Compares two dictionaries to see if they are equal :param d1: the first dictionary :param d2: the second dictionary :param keys: the keys to limit the comparison to (optional) :param ignore_none_values: whether to ignore none values :return: true if the dictionaries are equal, else false """ if keys or ignore_none_values: d1 = {k: v for k, v in d1.items() if (keys is None or k in keys) and (v is not None or not ignore_none_values)} d2 = {k: v for k, v in d2.items() if (keys is None or k in keys) and (v is not None or not ignore_none_values)} return d1 == d2
[ "def", "is_dict_equal", "(", "d1", ",", "d2", ",", "keys", "=", "None", ",", "ignore_none_values", "=", "True", ")", ":", "if", "keys", "or", "ignore_none_values", ":", "d1", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "d1", ".", "items", ...
Compares two dictionaries to see if they are equal :param d1: the first dictionary :param d2: the second dictionary :param keys: the keys to limit the comparison to (optional) :param ignore_none_values: whether to ignore none values :return: true if the dictionaries are equal, else false
[ "Compares", "two", "dictionaries", "to", "see", "if", "they", "are", "equal", ":", "param", "d1", ":", "the", "first", "dictionary", ":", "param", "d2", ":", "the", "second", "dictionary", ":", "param", "keys", ":", "the", "keys", "to", "limit", "the", ...
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/utils/__init__.py#L155-L168
brbsix/pip-utils
pip_utils/locate.py
command_locate
def command_locate(options): """Command launched by CLI.""" matches = find_owners(options.file.name) if matches: print(*matches, sep='\n')
python
def command_locate(options): """Command launched by CLI.""" matches = find_owners(options.file.name) if matches: print(*matches, sep='\n')
[ "def", "command_locate", "(", "options", ")", ":", "matches", "=", "find_owners", "(", "options", ".", "file", ".", "name", ")", "if", "matches", ":", "print", "(", "*", "matches", ",", "sep", "=", "'\\n'", ")" ]
Command launched by CLI.
[ "Command", "launched", "by", "CLI", "." ]
train
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/locate.py#L21-L26
brbsix/pip-utils
pip_utils/locate.py
find_owners
def find_owners(path): """Return the package(s) that file belongs to.""" abspath = os.path.abspath(path) packages = search_packages_info( sorted((d.project_name for d in get_installed_distributions(user_only=ENABLE_USER_SITE)), key=lambda d: d.lower())) return [p['name'] for p in packages if is_owner(p, abspath)]
python
def find_owners(path): """Return the package(s) that file belongs to.""" abspath = os.path.abspath(path) packages = search_packages_info( sorted((d.project_name for d in get_installed_distributions(user_only=ENABLE_USER_SITE)), key=lambda d: d.lower())) return [p['name'] for p in packages if is_owner(p, abspath)]
[ "def", "find_owners", "(", "path", ")", ":", "abspath", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "packages", "=", "search_packages_info", "(", "sorted", "(", "(", "d", ".", "project_name", "for", "d", "in", "get_installed_distributions", "...
Return the package(s) that file belongs to.
[ "Return", "the", "package", "(", "s", ")", "that", "file", "belongs", "to", "." ]
train
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/locate.py#L29-L38
brbsix/pip-utils
pip_utils/locate.py
is_owner
def is_owner(package, abspath): """Determine whether `abspath` belongs to `package`.""" try: files = package['files'] location = package['location'] except KeyError: return False paths = (os.path.abspath(os.path.join(location, f)) for f in files) return abspath in paths
python
def is_owner(package, abspath): """Determine whether `abspath` belongs to `package`.""" try: files = package['files'] location = package['location'] except KeyError: return False paths = (os.path.abspath(os.path.join(location, f)) for f in files) return abspath in paths
[ "def", "is_owner", "(", "package", ",", "abspath", ")", ":", "try", ":", "files", "=", "package", "[", "'files'", "]", "location", "=", "package", "[", "'location'", "]", "except", "KeyError", ":", "return", "False", "paths", "=", "(", "os", ".", "path...
Determine whether `abspath` belongs to `package`.
[ "Determine", "whether", "abspath", "belongs", "to", "package", "." ]
train
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/locate.py#L41-L52
jasonbot/arcrest
arcrest/server.py
RestURL._get_subfolder
def _get_subfolder(self, foldername, returntype, params=None, file_data=None): """Return an object of the requested type with the path relative to the current object's URL. Optionally, query parameters may be set.""" newurl = compat.urljoin(self.url, compat.quote(foldername), False) params = params or {} file_data = file_data or {} # Add the key-value pairs sent in params to query string if they # are so defined. query_dict = {} url_tuple = compat.urlsplit(newurl) urllist = list(url_tuple) if params: # As above, pull out first element from parse_qs' values query_dict = dict((k, v[0]) for k, v in cgi.parse_qs(urllist[3]).items()) for key, val in params.items(): # Lowercase bool string if isinstance(val, bool): query_dict[key] = str(val).lower() # Special case: convert an envelope to .bbox in the bb # parameter elif isinstance(val, geometry.Envelope): query_dict[key] = val.bbox # Another special case: strings can't be quoted/escaped at the # top level elif isinstance(val, gptypes.GPString): query_dict[key] = val.value # Just use the wkid of SpatialReferences elif isinstance(val, geometry.SpatialReference): query_dict[key] = val.wkid # If it's a list, make it a comma-separated string elif isinstance(val, (list, tuple, set)): val = ",".join([str(v.id) if isinstance(v, Layer) else str(v) for v in val]) # If it's a dictionary, dump as JSON elif isinstance(val, dict): val = json.dumps(val) # Ignore null values, and coerce string values (hopefully # everything sent in to a query has a sane __str__) elif val is not None: query_dict[key] = str(val) if self.__token__ is not None: query_dict['token'] = self.__token__ query_dict[REQUEST_REFERER_MAGIC_NAME] = self._referer or self.url # Replace URL query component with newly altered component urllist[3] = compat.urlencode(query_dict) newurl = urllist # Instantiate new RestURL or subclass rt = returntype(newurl, file_data) # Remind the resource where it came from try: rt.parent = self except: rt._parent = self return rt
python
def _get_subfolder(self, foldername, returntype, params=None, file_data=None): """Return an object of the requested type with the path relative to the current object's URL. Optionally, query parameters may be set.""" newurl = compat.urljoin(self.url, compat.quote(foldername), False) params = params or {} file_data = file_data or {} # Add the key-value pairs sent in params to query string if they # are so defined. query_dict = {} url_tuple = compat.urlsplit(newurl) urllist = list(url_tuple) if params: # As above, pull out first element from parse_qs' values query_dict = dict((k, v[0]) for k, v in cgi.parse_qs(urllist[3]).items()) for key, val in params.items(): # Lowercase bool string if isinstance(val, bool): query_dict[key] = str(val).lower() # Special case: convert an envelope to .bbox in the bb # parameter elif isinstance(val, geometry.Envelope): query_dict[key] = val.bbox # Another special case: strings can't be quoted/escaped at the # top level elif isinstance(val, gptypes.GPString): query_dict[key] = val.value # Just use the wkid of SpatialReferences elif isinstance(val, geometry.SpatialReference): query_dict[key] = val.wkid # If it's a list, make it a comma-separated string elif isinstance(val, (list, tuple, set)): val = ",".join([str(v.id) if isinstance(v, Layer) else str(v) for v in val]) # If it's a dictionary, dump as JSON elif isinstance(val, dict): val = json.dumps(val) # Ignore null values, and coerce string values (hopefully # everything sent in to a query has a sane __str__) elif val is not None: query_dict[key] = str(val) if self.__token__ is not None: query_dict['token'] = self.__token__ query_dict[REQUEST_REFERER_MAGIC_NAME] = self._referer or self.url # Replace URL query component with newly altered component urllist[3] = compat.urlencode(query_dict) newurl = urllist # Instantiate new RestURL or subclass rt = returntype(newurl, file_data) # Remind the resource where it came from try: rt.parent = self except: rt._parent = self return rt
[ "def", "_get_subfolder", "(", "self", ",", "foldername", ",", "returntype", ",", "params", "=", "None", ",", "file_data", "=", "None", ")", ":", "newurl", "=", "compat", ".", "urljoin", "(", "self", ".", "url", ",", "compat", ".", "quote", "(", "folder...
Return an object of the requested type with the path relative to the current object's URL. Optionally, query parameters may be set.
[ "Return", "an", "object", "of", "the", "requested", "type", "with", "the", "path", "relative", "to", "the", "current", "object", "s", "URL", ".", "Optionally", "query", "parameters", "may", "be", "set", "." ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L112-L172
jasonbot/arcrest
arcrest/server.py
RestURL.url
def url(self): """The URL as a string of the resource.""" urlparts = self._url if self.__post__: urlparts = list(urlparts) urlparts[3] = '' # Clear out query string on POST if self.__token__ is not None: # But not the token urlparts[3] = compat.urlencode({'token': self.__token__}) return compat.urlunsplit(urlparts)
python
def url(self): """The URL as a string of the resource.""" urlparts = self._url if self.__post__: urlparts = list(urlparts) urlparts[3] = '' # Clear out query string on POST if self.__token__ is not None: # But not the token urlparts[3] = compat.urlencode({'token': self.__token__}) return compat.urlunsplit(urlparts)
[ "def", "url", "(", "self", ")", ":", "urlparts", "=", "self", ".", "_url", "if", "self", ".", "__post__", ":", "urlparts", "=", "list", "(", "urlparts", ")", "urlparts", "[", "3", "]", "=", "''", "# Clear out query string on POST", "if", "self", ".", "...
The URL as a string of the resource.
[ "The", "URL", "as", "a", "string", "of", "the", "resource", "." ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L177-L185
jasonbot/arcrest
arcrest/server.py
RestURL._contents
def _contents(self): """The raw contents of the URL as fetched, this is done lazily. For non-lazy fetching this is accessed in the object constructor.""" if self.__urldata__ is Ellipsis or self.__cache_request__ is False: if self._file_data: # Special-case: do a multipart upload if there's file data self.__post__ = True boundary = "-"*12+str(uuid.uuid4())+"$" multipart_data = '' for k, v in cgi.parse_qs(self.query).items(): if not isinstance(v, list): v = [v] for val in v: multipart_data += boundary + "\r\n" multipart_data += ('Content-Disposition: form-data; ' 'name="%s"\r\n\r\n' % k) multipart_data += val + "\r\n" for k, v in self._file_data.items(): fn = os.path.basename(getattr(v, 'name', 'file')) ct = (mimetypes.guess_type(fn) or ("application/octet-stream",))[0] multipart_data += boundary + "\r\n" multipart_data += ('Content-Disposition: form-data; ' 'name="%s"; filename="%s"\r\n' 'Content-Type:%s\r\n\r\n' % (k, fn, ct)) multipart_data += v.read() + "\r\n" multipart_data += boundary + "--\r\n\r\n" req_dict = {'User-Agent' : USER_AGENT, 'Content-Type': 'multipart/form-data; boundary='+boundary[2:], 'Content-Length': str(len(multipart_data)) } if self._referer: req_dict['Referer'] = self._referer request = compat.urllib2.Request(self.url, multipart_data, req_dict) else: req_dict = {'User-Agent' : USER_AGENT} if self._referer: req_dict['Referer'] = self._referer request = compat.urllib2.Request(self.url, self.query if self.__post__ else None, req_dict) handle = compat.urllib2.urlopen(request) # Handle the special case of a redirect (only follow once) -- # Note that only the first 3 components (protocol, hostname, path) # are altered as component 4 is the query string, which can get # clobbered by the server. fetched_url = list(compat.urlsplit(handle.url)[:3]) if fetched_url != list(self._url[:3]): self._url[:3] = fetched_url return self._contents # No redirect, proceed as usual. self.__headers__ = compat.get_headers(handle) self.__urldata__ = handle.read() data = self.__urldata__ if self.__cache_request__ is False: self.__urldata__ = Ellipsis return data
python
def _contents(self): """The raw contents of the URL as fetched, this is done lazily. For non-lazy fetching this is accessed in the object constructor.""" if self.__urldata__ is Ellipsis or self.__cache_request__ is False: if self._file_data: # Special-case: do a multipart upload if there's file data self.__post__ = True boundary = "-"*12+str(uuid.uuid4())+"$" multipart_data = '' for k, v in cgi.parse_qs(self.query).items(): if not isinstance(v, list): v = [v] for val in v: multipart_data += boundary + "\r\n" multipart_data += ('Content-Disposition: form-data; ' 'name="%s"\r\n\r\n' % k) multipart_data += val + "\r\n" for k, v in self._file_data.items(): fn = os.path.basename(getattr(v, 'name', 'file')) ct = (mimetypes.guess_type(fn) or ("application/octet-stream",))[0] multipart_data += boundary + "\r\n" multipart_data += ('Content-Disposition: form-data; ' 'name="%s"; filename="%s"\r\n' 'Content-Type:%s\r\n\r\n' % (k, fn, ct)) multipart_data += v.read() + "\r\n" multipart_data += boundary + "--\r\n\r\n" req_dict = {'User-Agent' : USER_AGENT, 'Content-Type': 'multipart/form-data; boundary='+boundary[2:], 'Content-Length': str(len(multipart_data)) } if self._referer: req_dict['Referer'] = self._referer request = compat.urllib2.Request(self.url, multipart_data, req_dict) else: req_dict = {'User-Agent' : USER_AGENT} if self._referer: req_dict['Referer'] = self._referer request = compat.urllib2.Request(self.url, self.query if self.__post__ else None, req_dict) handle = compat.urllib2.urlopen(request) # Handle the special case of a redirect (only follow once) -- # Note that only the first 3 components (protocol, hostname, path) # are altered as component 4 is the query string, which can get # clobbered by the server. fetched_url = list(compat.urlsplit(handle.url)[:3]) if fetched_url != list(self._url[:3]): self._url[:3] = fetched_url return self._contents # No redirect, proceed as usual. self.__headers__ = compat.get_headers(handle) self.__urldata__ = handle.read() data = self.__urldata__ if self.__cache_request__ is False: self.__urldata__ = Ellipsis return data
[ "def", "_contents", "(", "self", ")", ":", "if", "self", ".", "__urldata__", "is", "Ellipsis", "or", "self", ".", "__cache_request__", "is", "False", ":", "if", "self", ".", "_file_data", ":", "# Special-case: do a multipart upload if there's file data", "self", "...
The raw contents of the URL as fetched, this is done lazily. For non-lazy fetching this is accessed in the object constructor.
[ "The", "raw", "contents", "of", "the", "URL", "as", "fetched", "this", "is", "done", "lazily", ".", "For", "non", "-", "lazy", "fetching", "this", "is", "accessed", "in", "the", "object", "constructor", "." ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L197-L258
jasonbot/arcrest
arcrest/server.py
RestURL._json_struct
def _json_struct(self): """The json data structure in the URL contents, it will cache this if it makes sense so it doesn't parse over and over.""" if self.__has_json__: if self.__cache_request__: if self.__json_struct__ is Ellipsis: if self._contents is not Ellipsis: self.__json_struct__ = json.loads( compat.ensure_string(self._contents) .strip() or '{}') else: return {} return self.__json_struct__ else: return json.loads(compat.ensure_string(self._contents)) else: # Return an empty dict for things so they don't have to special # case against a None value or anything return {}
python
def _json_struct(self): """The json data structure in the URL contents, it will cache this if it makes sense so it doesn't parse over and over.""" if self.__has_json__: if self.__cache_request__: if self.__json_struct__ is Ellipsis: if self._contents is not Ellipsis: self.__json_struct__ = json.loads( compat.ensure_string(self._contents) .strip() or '{}') else: return {} return self.__json_struct__ else: return json.loads(compat.ensure_string(self._contents)) else: # Return an empty dict for things so they don't have to special # case against a None value or anything return {}
[ "def", "_json_struct", "(", "self", ")", ":", "if", "self", ".", "__has_json__", ":", "if", "self", ".", "__cache_request__", ":", "if", "self", ".", "__json_struct__", "is", "Ellipsis", ":", "if", "self", ".", "_contents", "is", "not", "Ellipsis", ":", ...
The json data structure in the URL contents, it will cache this if it makes sense so it doesn't parse over and over.
[ "The", "json", "data", "structure", "in", "the", "URL", "contents", "it", "will", "cache", "this", "if", "it", "makes", "sense", "so", "it", "doesn", "t", "parse", "over", "and", "over", "." ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L260-L278
jasonbot/arcrest
arcrest/server.py
RestURL.parent
def parent(self): "Get this object's parent" if self._parent: return self._parent # auto-compute parent if needed elif getattr(self, '__parent_type__', None): return self._get_subfolder('..' if self._url[2].endswith('/') else '.', self.__parent_type__) else: raise AttributeError("%r has no parent attribute" % type(self))
python
def parent(self): "Get this object's parent" if self._parent: return self._parent # auto-compute parent if needed elif getattr(self, '__parent_type__', None): return self._get_subfolder('..' if self._url[2].endswith('/') else '.', self.__parent_type__) else: raise AttributeError("%r has no parent attribute" % type(self))
[ "def", "parent", "(", "self", ")", ":", "if", "self", ".", "_parent", ":", "return", "self", ".", "_parent", "# auto-compute parent if needed", "elif", "getattr", "(", "self", ",", "'__parent_type__'", ",", "None", ")", ":", "return", "self", ".", "_get_subf...
Get this object's parent
[ "Get", "this", "object", "s", "parent" ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L280-L289
jasonbot/arcrest
arcrest/server.py
Folder._register_service_type
def _register_service_type(cls, subclass): """Registers subclass handlers of various service-type-specific service implementations. Look for classes decorated with @Folder._register_service_type for hints on how this works.""" if hasattr(subclass, '__service_type__'): cls._service_type_mapping[subclass.__service_type__] = subclass if subclass.__service_type__: setattr(subclass, subclass.__service_type__, property(lambda x: x)) return subclass
python
def _register_service_type(cls, subclass): """Registers subclass handlers of various service-type-specific service implementations. Look for classes decorated with @Folder._register_service_type for hints on how this works.""" if hasattr(subclass, '__service_type__'): cls._service_type_mapping[subclass.__service_type__] = subclass if subclass.__service_type__: setattr(subclass, subclass.__service_type__, property(lambda x: x)) return subclass
[ "def", "_register_service_type", "(", "cls", ",", "subclass", ")", ":", "if", "hasattr", "(", "subclass", ",", "'__service_type__'", ")", ":", "cls", ".", "_service_type_mapping", "[", "subclass", ".", "__service_type__", "]", "=", "subclass", "if", "subclass", ...
Registers subclass handlers of various service-type-specific service implementations. Look for classes decorated with @Folder._register_service_type for hints on how this works.
[ "Registers", "subclass", "handlers", "of", "various", "service", "-", "type", "-", "specific", "service", "implementations", ".", "Look", "for", "classes", "decorated", "with" ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L435-L445
jasonbot/arcrest
arcrest/server.py
Folder.servicenames
def servicenames(self): "Give the list of services available in this folder." return set([service['name'].rstrip('/').split('/')[-1] for service in self._json_struct.get('services', [])])
python
def servicenames(self): "Give the list of services available in this folder." return set([service['name'].rstrip('/').split('/')[-1] for service in self._json_struct.get('services', [])])
[ "def", "servicenames", "(", "self", ")", ":", "return", "set", "(", "[", "service", "[", "'name'", "]", ".", "rstrip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "for", "service", "in", "self", ".", "_json_struct", ".", "...
Give the list of services available in this folder.
[ "Give", "the", "list", "of", "services", "available", "in", "this", "folder", "." ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L471-L474
jasonbot/arcrest
arcrest/server.py
Folder.services
def services(self): "Returns a list of Service objects available in this folder" return [self._get_subfolder("%s/%s/" % (s['name'].rstrip('/').split('/')[-1], s['type']), self._service_type_mapping.get(s['type'], Service)) for s in self._json_struct.get('services', [])]
python
def services(self): "Returns a list of Service objects available in this folder" return [self._get_subfolder("%s/%s/" % (s['name'].rstrip('/').split('/')[-1], s['type']), self._service_type_mapping.get(s['type'], Service)) for s in self._json_struct.get('services', [])]
[ "def", "services", "(", "self", ")", ":", "return", "[", "self", ".", "_get_subfolder", "(", "\"%s/%s/\"", "%", "(", "s", "[", "'name'", "]", ".", "rstrip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ",", "s", "[", "'ty...
Returns a list of Service objects available in this folder
[ "Returns", "a", "list", "of", "Service", "objects", "available", "in", "this", "folder" ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L476-L481
jasonbot/arcrest
arcrest/server.py
Folder.url
def url(self): """The URL as a string of the resource.""" if not self._url[2].endswith('/'): self._url[2] += '/' return RestURL.url.__get__(self)
python
def url(self): """The URL as a string of the resource.""" if not self._url[2].endswith('/'): self._url[2] += '/' return RestURL.url.__get__(self)
[ "def", "url", "(", "self", ")", ":", "if", "not", "self", ".", "_url", "[", "2", "]", ".", "endswith", "(", "'/'", ")", ":", "self", ".", "_url", "[", "2", "]", "+=", "'/'", "return", "RestURL", ".", "url", ".", "__get__", "(", "self", ")" ]
The URL as a string of the resource.
[ "The", "URL", "as", "a", "string", "of", "the", "resource", "." ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L483-L487
jasonbot/arcrest
arcrest/server.py
BinaryResult.save
def save(self, outfile): """Save the image data to a file or file-like object""" if isinstance(outfile, compat.string_type): outfile = open(outfile, 'wb') outfile.write(self._contents)
python
def save(self, outfile): """Save the image data to a file or file-like object""" if isinstance(outfile, compat.string_type): outfile = open(outfile, 'wb') outfile.write(self._contents)
[ "def", "save", "(", "self", ",", "outfile", ")", ":", "if", "isinstance", "(", "outfile", ",", "compat", ".", "string_type", ")", ":", "outfile", "=", "open", "(", "outfile", ",", "'wb'", ")", "outfile", ".", "write", "(", "self", ".", "_contents", "...
Save the image data to a file or file-like object
[ "Save", "the", "image", "data", "to", "a", "file", "or", "file", "-", "like", "object" ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L633-L637
jasonbot/arcrest
arcrest/server.py
MapLayer.QueryLayer
def QueryLayer(self, text=None, Geometry=None, inSR=None, spatialRel='esriSpatialRelIntersects', where=None, outFields=None, returnGeometry=None, outSR=None, objectIds=None, time=None, maxAllowableOffset=None, returnIdsOnly=None): """The query operation is performed on a layer resource. The result of this operation is a resultset resource. This resource provides information about query results including the values for the fields requested by the user. If you request geometry information, the geometry of each result is also returned in the resultset. B{Spatial Relation Options:} - esriSpatialRelIntersects - esriSpatialRelContains - esriSpatialRelCrosses - esriSpatialRelEnvelopeIntersects - esriSpatialRelIndexIntersects - esriSpatialRelOverlaps - esriSpatialRelTouches - esriSpatialRelWithin""" if not inSR: if Geometry: inSR = Geometry.spatialReference out = self._get_subfolder("./query", JsonResult, { 'text': text, 'geometry': geometry, 'inSR': inSR, 'spatialRel': spatialRel, 'where': where, 'outFields': outFields, 'returnGeometry': returnGeometry, 'outSR': outSR, 'objectIds': objectIds, 'time': utils.pythonvaluetotime( time), 'maxAllowableOffset': maxAllowableOffset, 'returnIdsOnly': returnIdsOnly }) return gptypes.GPFeatureRecordSetLayer.fromJson(out._json_struct)
python
def QueryLayer(self, text=None, Geometry=None, inSR=None, spatialRel='esriSpatialRelIntersects', where=None, outFields=None, returnGeometry=None, outSR=None, objectIds=None, time=None, maxAllowableOffset=None, returnIdsOnly=None): """The query operation is performed on a layer resource. The result of this operation is a resultset resource. This resource provides information about query results including the values for the fields requested by the user. If you request geometry information, the geometry of each result is also returned in the resultset. B{Spatial Relation Options:} - esriSpatialRelIntersects - esriSpatialRelContains - esriSpatialRelCrosses - esriSpatialRelEnvelopeIntersects - esriSpatialRelIndexIntersects - esriSpatialRelOverlaps - esriSpatialRelTouches - esriSpatialRelWithin""" if not inSR: if Geometry: inSR = Geometry.spatialReference out = self._get_subfolder("./query", JsonResult, { 'text': text, 'geometry': geometry, 'inSR': inSR, 'spatialRel': spatialRel, 'where': where, 'outFields': outFields, 'returnGeometry': returnGeometry, 'outSR': outSR, 'objectIds': objectIds, 'time': utils.pythonvaluetotime( time), 'maxAllowableOffset': maxAllowableOffset, 'returnIdsOnly': returnIdsOnly }) return gptypes.GPFeatureRecordSetLayer.fromJson(out._json_struct)
[ "def", "QueryLayer", "(", "self", ",", "text", "=", "None", ",", "Geometry", "=", "None", ",", "inSR", "=", "None", ",", "spatialRel", "=", "'esriSpatialRelIntersects'", ",", "where", "=", "None", ",", "outFields", "=", "None", ",", "returnGeometry", "=", ...
The query operation is performed on a layer resource. The result of this operation is a resultset resource. This resource provides information about query results including the values for the fields requested by the user. If you request geometry information, the geometry of each result is also returned in the resultset. B{Spatial Relation Options:} - esriSpatialRelIntersects - esriSpatialRelContains - esriSpatialRelCrosses - esriSpatialRelEnvelopeIntersects - esriSpatialRelIndexIntersects - esriSpatialRelOverlaps - esriSpatialRelTouches - esriSpatialRelWithin
[ "The", "query", "operation", "is", "performed", "on", "a", "layer", "resource", ".", "The", "result", "of", "this", "operation", "is", "a", "resultset", "resource", ".", "This", "resource", "provides", "information", "about", "query", "results", "including", "...
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L704-L746
jasonbot/arcrest
arcrest/server.py
MapLayer.timeInfo
def timeInfo(self): """Return the time info for this Map Service""" time_info = self._json_struct.get('timeInfo', {}) if not time_info: return None time_info = time_info.copy() if 'timeExtent' in time_info: time_info['timeExtent'] = utils.timetopythonvalue( time_info['timeExtent']) return time_info
python
def timeInfo(self): """Return the time info for this Map Service""" time_info = self._json_struct.get('timeInfo', {}) if not time_info: return None time_info = time_info.copy() if 'timeExtent' in time_info: time_info['timeExtent'] = utils.timetopythonvalue( time_info['timeExtent']) return time_info
[ "def", "timeInfo", "(", "self", ")", ":", "time_info", "=", "self", ".", "_json_struct", ".", "get", "(", "'timeInfo'", ",", "{", "}", ")", "if", "not", "time_info", ":", "return", "None", "time_info", "=", "time_info", ".", "copy", "(", ")", "if", "...
Return the time info for this Map Service
[ "Return", "the", "time", "info", "for", "this", "Map", "Service" ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L795-L804
jasonbot/arcrest
arcrest/server.py
ExportMapResult.save
def save(self, outfile): """Save the image data to a file or file-like object""" if isinstance(outfile, compat.string_type): outfile = open(outfile, 'wb') assert hasattr(outfile, 'write') and callable(outfile.write), \ "Expect a file or file-like object with a .write() method" outfile.write(self.data)
python
def save(self, outfile): """Save the image data to a file or file-like object""" if isinstance(outfile, compat.string_type): outfile = open(outfile, 'wb') assert hasattr(outfile, 'write') and callable(outfile.write), \ "Expect a file or file-like object with a .write() method" outfile.write(self.data)
[ "def", "save", "(", "self", ",", "outfile", ")", ":", "if", "isinstance", "(", "outfile", ",", "compat", ".", "string_type", ")", ":", "outfile", "=", "open", "(", "outfile", ",", "'wb'", ")", "assert", "hasattr", "(", "outfile", ",", "'write'", ")", ...
Save the image data to a file or file-like object
[ "Save", "the", "image", "data", "to", "a", "file", "or", "file", "-", "like", "object" ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L844-L850
jasonbot/arcrest
arcrest/server.py
FindAddressCandidatesResult.candidates
def candidates(self): """A list of candidate addresses (as dictionaries) from a geocode operation""" # convert x['location'] to a point from a json point struct def cditer(): for candidate in self._json_struct['candidates']: newcandidate = candidate.copy() newcandidate['location'] = \ geometry.fromJson(newcandidate['location']) yield newcandidate return list(cditer())
python
def candidates(self): """A list of candidate addresses (as dictionaries) from a geocode operation""" # convert x['location'] to a point from a json point struct def cditer(): for candidate in self._json_struct['candidates']: newcandidate = candidate.copy() newcandidate['location'] = \ geometry.fromJson(newcandidate['location']) yield newcandidate return list(cditer())
[ "def", "candidates", "(", "self", ")", ":", "# convert x['location'] to a point from a json point struct", "def", "cditer", "(", ")", ":", "for", "candidate", "in", "self", ".", "_json_struct", "[", "'candidates'", "]", ":", "newcandidate", "=", "candidate", ".", ...
A list of candidate addresses (as dictionaries) from a geocode operation
[ "A", "list", "of", "candidate", "addresses", "(", "as", "dictionaries", ")", "from", "a", "geocode", "operation" ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L1043-L1053
jasonbot/arcrest
arcrest/server.py
GPExecutionResult.results
def results(self): "Returns a dict of outputs from the GPTask execution." if self._results is None: results = self._json_struct['results'] def result_iterator(): for result in results: datatype = None conversion = None for param in self.parent.parameters: if param['name'] == result['paramName']: datatype = param['datatype'] if datatype is None: conversion = str else: conversion = datatype.fromJson dt = result['paramName'] val = conversion(result['value']) yield (dt, val) self._results = dict(res for res in result_iterator()) return self._results
python
def results(self): "Returns a dict of outputs from the GPTask execution." if self._results is None: results = self._json_struct['results'] def result_iterator(): for result in results: datatype = None conversion = None for param in self.parent.parameters: if param['name'] == result['paramName']: datatype = param['datatype'] if datatype is None: conversion = str else: conversion = datatype.fromJson dt = result['paramName'] val = conversion(result['value']) yield (dt, val) self._results = dict(res for res in result_iterator()) return self._results
[ "def", "results", "(", "self", ")", ":", "if", "self", ".", "_results", "is", "None", ":", "results", "=", "self", ".", "_json_struct", "[", "'results'", "]", "def", "result_iterator", "(", ")", ":", "for", "result", "in", "results", ":", "datatype", "...
Returns a dict of outputs from the GPTask execution.
[ "Returns", "a", "dict", "of", "outputs", "from", "the", "GPTask", "execution", "." ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L1335-L1354
jasonbot/arcrest
arcrest/server.py
GPTask.Execute
def Execute(self, *params, **kw): """Synchronously execute the specified GP task. Parameters are passed in either in order or as keywords.""" fp = self.__expandparamstodict(params, kw) return self._get_subfolder('execute/', GPExecutionResult, fp)
python
def Execute(self, *params, **kw): """Synchronously execute the specified GP task. Parameters are passed in either in order or as keywords.""" fp = self.__expandparamstodict(params, kw) return self._get_subfolder('execute/', GPExecutionResult, fp)
[ "def", "Execute", "(", "self", ",", "*", "params", ",", "*", "*", "kw", ")", ":", "fp", "=", "self", ".", "__expandparamstodict", "(", "params", ",", "kw", ")", "return", "self", ".", "_get_subfolder", "(", "'execute/'", ",", "GPExecutionResult", ",", ...
Synchronously execute the specified GP task. Parameters are passed in either in order or as keywords.
[ "Synchronously", "execute", "the", "specified", "GP", "task", ".", "Parameters", "are", "passed", "in", "either", "in", "order", "or", "as", "keywords", "." ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L1401-L1405
jasonbot/arcrest
arcrest/server.py
GPTask.SubmitJob
def SubmitJob(self, *params, **kw): """Asynchronously execute the specified GP task. This will return a Geoprocessing Job object. Parameters are passed in either in order or as keywords.""" fp = self.__expandparamstodict(params, kw) return self._get_subfolder('submitJob/', GPJob, fp)._jobstatus
python
def SubmitJob(self, *params, **kw): """Asynchronously execute the specified GP task. This will return a Geoprocessing Job object. Parameters are passed in either in order or as keywords.""" fp = self.__expandparamstodict(params, kw) return self._get_subfolder('submitJob/', GPJob, fp)._jobstatus
[ "def", "SubmitJob", "(", "self", ",", "*", "params", ",", "*", "*", "kw", ")", ":", "fp", "=", "self", ".", "__expandparamstodict", "(", "params", ",", "kw", ")", "return", "self", ".", "_get_subfolder", "(", "'submitJob/'", ",", "GPJob", ",", "fp", ...
Asynchronously execute the specified GP task. This will return a Geoprocessing Job object. Parameters are passed in either in order or as keywords.
[ "Asynchronously", "execute", "the", "specified", "GP", "task", ".", "This", "will", "return", "a", "Geoprocessing", "Job", "object", ".", "Parameters", "are", "passed", "in", "either", "in", "order", "or", "as", "keywords", "." ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L1406-L1411
jasonbot/arcrest
arcrest/server.py
ExportImageResult.save
def save(self, outfile): """Save the image data to a file or file-like object""" if isinstance(outfile, compat.string_type): outfile = open(outfile, 'wb') outfile.write(compat.urllib2.urlopen(self.href).read())
python
def save(self, outfile): """Save the image data to a file or file-like object""" if isinstance(outfile, compat.string_type): outfile = open(outfile, 'wb') outfile.write(compat.urllib2.urlopen(self.href).read())
[ "def", "save", "(", "self", ",", "outfile", ")", ":", "if", "isinstance", "(", "outfile", ",", "compat", ".", "string_type", ")", ":", "outfile", "=", "open", "(", "outfile", ",", "'wb'", ")", "outfile", ".", "write", "(", "compat", ".", "urllib2", "...
Save the image data to a file or file-like object
[ "Save", "the", "image", "data", "to", "a", "file", "or", "file", "-", "like", "object" ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L1883-L1887
jasonbot/arcrest
arcrest/server.py
NetworkLayer.SolveClosestFacility
def SolveClosestFacility(self, facilities=None, incidents=None, barriers=None, polylineBarriers=None, polygonBarriers=None, attributeParameterValues=None, returnDirections=None, directionsLanguage=None, directionsStyleName=None, directionsLengthUnits=None, directionsTimeAttributeName=None, returnCFRoutes=None, returnFacilities=None, returnIncidents=None, returnBarriers=None, returnPolylineBarriers=None, returnPolygonBarriers=None, facilityReturnType=None, outputLines=None, defaultCutoff=None, defaultTargetFacilityCount=None, travelDirection=None, outSR=None, impedanceAttributeName=None, restrictionAttributeNames=None, restrictUTurns=None, useHierarchy=None, outputGeometryPrecision=None, outputGeometryPrecisionUnits=None): """The solve operation is performed on a network layer resource of type closest facility.""" raise NotImplementedError()
python
def SolveClosestFacility(self, facilities=None, incidents=None, barriers=None, polylineBarriers=None, polygonBarriers=None, attributeParameterValues=None, returnDirections=None, directionsLanguage=None, directionsStyleName=None, directionsLengthUnits=None, directionsTimeAttributeName=None, returnCFRoutes=None, returnFacilities=None, returnIncidents=None, returnBarriers=None, returnPolylineBarriers=None, returnPolygonBarriers=None, facilityReturnType=None, outputLines=None, defaultCutoff=None, defaultTargetFacilityCount=None, travelDirection=None, outSR=None, impedanceAttributeName=None, restrictionAttributeNames=None, restrictUTurns=None, useHierarchy=None, outputGeometryPrecision=None, outputGeometryPrecisionUnits=None): """The solve operation is performed on a network layer resource of type closest facility.""" raise NotImplementedError()
[ "def", "SolveClosestFacility", "(", "self", ",", "facilities", "=", "None", ",", "incidents", "=", "None", ",", "barriers", "=", "None", ",", "polylineBarriers", "=", "None", ",", "polygonBarriers", "=", "None", ",", "attributeParameterValues", "=", "None", ",...
The solve operation is performed on a network layer resource of type closest facility.
[ "The", "solve", "operation", "is", "performed", "on", "a", "network", "layer", "resource", "of", "type", "closest", "facility", "." ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L2052-L2083
jasonbot/arcrest
arcrest/server.py
NetworkLayer.SolveServiceArea
def SolveServiceArea(self, facilities=None, barriers=None, polylineBarriers=None, polygonBarriers=None, attributeParameterValues=None, defaultBreaks=None, excludeSourcesFromPolygons=None, mergeSimilarPolygonRanges=None, outputLines=None, outputPolygons=None, overlapLines=None, overlapPolygons=None, splitLinesAtBreaks=None, splitPolygonsAtBreaks=None, travelDirection=None, trimOuterPolygon=None, trimPolygonDistance=None, trimPolygonDistanceUnits=None, accumulateAttributeNames=None, impedanceAttributeName=None, restrictionAttributeNames=None, restrictUTurns=None, outputGeometryPrecision=None, outputGeometryPrecisionUnits=None): """The solve operation is performed on a network layer resource of type service area (layerType is esriNAServerServiceArea).""" raise NotImplementedError()
python
def SolveServiceArea(self, facilities=None, barriers=None, polylineBarriers=None, polygonBarriers=None, attributeParameterValues=None, defaultBreaks=None, excludeSourcesFromPolygons=None, mergeSimilarPolygonRanges=None, outputLines=None, outputPolygons=None, overlapLines=None, overlapPolygons=None, splitLinesAtBreaks=None, splitPolygonsAtBreaks=None, travelDirection=None, trimOuterPolygon=None, trimPolygonDistance=None, trimPolygonDistanceUnits=None, accumulateAttributeNames=None, impedanceAttributeName=None, restrictionAttributeNames=None, restrictUTurns=None, outputGeometryPrecision=None, outputGeometryPrecisionUnits=None): """The solve operation is performed on a network layer resource of type service area (layerType is esriNAServerServiceArea).""" raise NotImplementedError()
[ "def", "SolveServiceArea", "(", "self", ",", "facilities", "=", "None", ",", "barriers", "=", "None", ",", "polylineBarriers", "=", "None", ",", "polygonBarriers", "=", "None", ",", "attributeParameterValues", "=", "None", ",", "defaultBreaks", "=", "None", ",...
The solve operation is performed on a network layer resource of type service area (layerType is esriNAServerServiceArea).
[ "The", "solve", "operation", "is", "performed", "on", "a", "network", "layer", "resource", "of", "type", "service", "area", "(", "layerType", "is", "esriNAServerServiceArea", ")", "." ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L2084-L2110
jasonbot/arcrest
arcrest/server.py
RouteNetworkLayer.Solve
def Solve(self, stops=None, barriers=None, returnDirections=None, returnRoutes=None, returnStops=None, returnBarriers=None, outSR=None, ignoreInvalidLocations=None, outputLines=None, findBestSequence=None, preserveFirstStop=None, preserveLastStop=None, useTimeWindows=None, startTime=None, accumulateAttributeNames=None, impedanceAttributeName=None, restrictionAttributeNames=None, restrictUTurns=None, useHierarchy=None, directionsLanguage=None, outputGeometryPrecision=None, directionsLengthUnits=None, directionsTimeAttributeName=None, attributeParameterValues=None, polylineBarriers=None, polygonBarriers=None): """The solve operation is performed on a network layer resource. At 9.3.1, the solve operation is supported only on the route layer. Or specifically, on a network layer whose layerType is esriNAServerRouteLayer. You can provide arguments to the solve route operation as query parameters defined in the parameters table below. """ def ptlist_as_semilist(lst): if isinstance(lst, geometry.Point): lst = [lst] if isinstance(lst, (list, tuple)): return ";".join(','.join(str(x) for x in pt) for pt in lst) return lst if self.layerType != "esriNAServerRouteLayer": raise TypeError("Layer is of type %s; Solve is not available." % self.layerType) return self._get_subfolder('solve/', NetworkSolveResult, {'stops': ptlist_as_semilist(stops), 'barriers': ptlist_as_semilist(barriers), 'returnDirections': returnDirections, 'returnRoutes': returnRoutes, 'returnStops': returnStops, 'returnBarriers': returnBarriers, 'outSR': outSR, 'ignoreInvalidLocations': ignoreInvalidLocations, 'outputLines': outputLines, 'findBestSequence': findBestSequence, 'preserveFirstStop': preserveFirstStop, 'preserveLastStop': preserveLastStop, 'useTimeWindows': useTimeWindows, 'startTime': startTime, 'accumulateAttributeNames': accumulateAttributeNames, 'impedanceAttributeName': impedanceAttributeName, 'restrictionAttributeNames': restrictionAttributeNames, 'restrictUTurns': restrictUTurns, 'useHierarchy': useHierarchy, 'directionsLanguage': directionsLanguage, 'outputGeometryPrecision': outputGeometryPrecision, 'directionsLengthUnits': directionsLengthUnits, 'directionsTimeAttributeName': directionsTimeAttributeName, 'attributeParameterValues': attributeParameterValues, 'polylineBarriers': polylineBarriers, 'polygonBarriers': polygonBarriers})
python
def Solve(self, stops=None, barriers=None, returnDirections=None, returnRoutes=None, returnStops=None, returnBarriers=None, outSR=None, ignoreInvalidLocations=None, outputLines=None, findBestSequence=None, preserveFirstStop=None, preserveLastStop=None, useTimeWindows=None, startTime=None, accumulateAttributeNames=None, impedanceAttributeName=None, restrictionAttributeNames=None, restrictUTurns=None, useHierarchy=None, directionsLanguage=None, outputGeometryPrecision=None, directionsLengthUnits=None, directionsTimeAttributeName=None, attributeParameterValues=None, polylineBarriers=None, polygonBarriers=None): """The solve operation is performed on a network layer resource. At 9.3.1, the solve operation is supported only on the route layer. Or specifically, on a network layer whose layerType is esriNAServerRouteLayer. You can provide arguments to the solve route operation as query parameters defined in the parameters table below. """ def ptlist_as_semilist(lst): if isinstance(lst, geometry.Point): lst = [lst] if isinstance(lst, (list, tuple)): return ";".join(','.join(str(x) for x in pt) for pt in lst) return lst if self.layerType != "esriNAServerRouteLayer": raise TypeError("Layer is of type %s; Solve is not available." % self.layerType) return self._get_subfolder('solve/', NetworkSolveResult, {'stops': ptlist_as_semilist(stops), 'barriers': ptlist_as_semilist(barriers), 'returnDirections': returnDirections, 'returnRoutes': returnRoutes, 'returnStops': returnStops, 'returnBarriers': returnBarriers, 'outSR': outSR, 'ignoreInvalidLocations': ignoreInvalidLocations, 'outputLines': outputLines, 'findBestSequence': findBestSequence, 'preserveFirstStop': preserveFirstStop, 'preserveLastStop': preserveLastStop, 'useTimeWindows': useTimeWindows, 'startTime': startTime, 'accumulateAttributeNames': accumulateAttributeNames, 'impedanceAttributeName': impedanceAttributeName, 'restrictionAttributeNames': restrictionAttributeNames, 'restrictUTurns': restrictUTurns, 'useHierarchy': useHierarchy, 'directionsLanguage': directionsLanguage, 'outputGeometryPrecision': outputGeometryPrecision, 'directionsLengthUnits': directionsLengthUnits, 'directionsTimeAttributeName': directionsTimeAttributeName, 'attributeParameterValues': attributeParameterValues, 'polylineBarriers': polylineBarriers, 'polygonBarriers': polygonBarriers})
[ "def", "Solve", "(", "self", ",", "stops", "=", "None", ",", "barriers", "=", "None", ",", "returnDirections", "=", "None", ",", "returnRoutes", "=", "None", ",", "returnStops", "=", "None", ",", "returnBarriers", "=", "None", ",", "outSR", "=", "None", ...
The solve operation is performed on a network layer resource. At 9.3.1, the solve operation is supported only on the route layer. Or specifically, on a network layer whose layerType is esriNAServerRouteLayer. You can provide arguments to the solve route operation as query parameters defined in the parameters table below.
[ "The", "solve", "operation", "is", "performed", "on", "a", "network", "layer", "resource", "." ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L2115-L2171
jasonbot/arcrest
arcrest/server.py
FeatureLayer.QueryRelatedRecords
def QueryRelatedRecords(self, objectIds=None, relationshipId=None, outFields=None, definitionExpression=None, returnGeometry=None, outSR=None): """The query operation is performed on a feature service layer resource. The result of this operation are featuresets grouped by source layer / table object IDs. Each featureset contains Feature objects including the values for the fields requested by the user. For related layers, if you request geometry information, the geometry of each feature is also returned in the featureset. For related tables, the featureset does not include geometries.""" out = self._get_subfolder("./queryRelatedRecords", JsonResult, { 'objectIds': objectIds, 'relationshipId': relationshipId, 'outFields': outFields, 'definitionExpression': definitionExpression, 'returnGeometry': returnGeometry, 'outSR': outSR }) return out._json_struct
python
def QueryRelatedRecords(self, objectIds=None, relationshipId=None, outFields=None, definitionExpression=None, returnGeometry=None, outSR=None): """The query operation is performed on a feature service layer resource. The result of this operation are featuresets grouped by source layer / table object IDs. Each featureset contains Feature objects including the values for the fields requested by the user. For related layers, if you request geometry information, the geometry of each feature is also returned in the featureset. For related tables, the featureset does not include geometries.""" out = self._get_subfolder("./queryRelatedRecords", JsonResult, { 'objectIds': objectIds, 'relationshipId': relationshipId, 'outFields': outFields, 'definitionExpression': definitionExpression, 'returnGeometry': returnGeometry, 'outSR': outSR }) return out._json_struct
[ "def", "QueryRelatedRecords", "(", "self", ",", "objectIds", "=", "None", ",", "relationshipId", "=", "None", ",", "outFields", "=", "None", ",", "definitionExpression", "=", "None", ",", "returnGeometry", "=", "None", ",", "outSR", "=", "None", ")", ":", ...
The query operation is performed on a feature service layer resource. The result of this operation are featuresets grouped by source layer / table object IDs. Each featureset contains Feature objects including the values for the fields requested by the user. For related layers, if you request geometry information, the geometry of each feature is also returned in the featureset. For related tables, the featureset does not include geometries.
[ "The", "query", "operation", "is", "performed", "on", "a", "feature", "service", "layer", "resource", ".", "The", "result", "of", "this", "operation", "are", "featuresets", "grouped", "by", "source", "layer", "/", "table", "object", "IDs", ".", "Each", "feat...
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L2451-L2475
jasonbot/arcrest
arcrest/server.py
FeatureLayer.UpdateFeatures
def UpdateFeatures(self, features): """This operation updates features to the associated feature layer or table (POST only). The update features operation is performed on a feature service layer resource. The result of this operation is an array of edit results. Each edit result identifies a single feature and indicates if the edit were successful or not. If not, it also includes an error code and an error description.""" fd = {'features': ",".join(json.dumps( feature._json_struct_for_featureset) for feature in features)} return self._get_subfolder("./updateFeatures", JsonPostResult, fd)
python
def UpdateFeatures(self, features): """This operation updates features to the associated feature layer or table (POST only). The update features operation is performed on a feature service layer resource. The result of this operation is an array of edit results. Each edit result identifies a single feature and indicates if the edit were successful or not. If not, it also includes an error code and an error description.""" fd = {'features': ",".join(json.dumps( feature._json_struct_for_featureset) for feature in features)} return self._get_subfolder("./updateFeatures", JsonPostResult, fd)
[ "def", "UpdateFeatures", "(", "self", ",", "features", ")", ":", "fd", "=", "{", "'features'", ":", "\",\"", ".", "join", "(", "json", ".", "dumps", "(", "feature", ".", "_json_struct_for_featureset", ")", "for", "feature", "in", "features", ")", "}", "r...
This operation updates features to the associated feature layer or table (POST only). The update features operation is performed on a feature service layer resource. The result of this operation is an array of edit results. Each edit result identifies a single feature and indicates if the edit were successful or not. If not, it also includes an error code and an error description.
[ "This", "operation", "updates", "features", "to", "the", "associated", "feature", "layer", "or", "table", "(", "POST", "only", ")", ".", "The", "update", "features", "operation", "is", "performed", "on", "a", "feature", "service", "layer", "resource", ".", "...
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L2487-L2497
jasonbot/arcrest
arcrest/server.py
FeatureLayer.DeleteFeatures
def DeleteFeatures(self, objectIds=None, where=None, geometry=None, inSR=None, spatialRel=None): """This operation deletes features in a feature layer or table (POST only). The delete features operation is performed on a feature service layer resource. The result of this operation is an array of edit results. Each edit result identifies a single feature and indicates if the edit were successful or not. If not, it also includes an error code and an error description.""" gt = geometry.__geometry_type__ if sr is None: sr = geometry.spatialReference.wkid geo_json = json.dumps(Geometry._json_struct_without_sr) return self._get_subfolder("./deleteFeatures", JsonPostResult, { 'objectIds': objectIds, 'where': where, 'geometry': geo_json, 'geometryType': geometryType, 'inSR': inSR, 'spatialRel': spatialRel })
python
def DeleteFeatures(self, objectIds=None, where=None, geometry=None, inSR=None, spatialRel=None): """This operation deletes features in a feature layer or table (POST only). The delete features operation is performed on a feature service layer resource. The result of this operation is an array of edit results. Each edit result identifies a single feature and indicates if the edit were successful or not. If not, it also includes an error code and an error description.""" gt = geometry.__geometry_type__ if sr is None: sr = geometry.spatialReference.wkid geo_json = json.dumps(Geometry._json_struct_without_sr) return self._get_subfolder("./deleteFeatures", JsonPostResult, { 'objectIds': objectIds, 'where': where, 'geometry': geo_json, 'geometryType': geometryType, 'inSR': inSR, 'spatialRel': spatialRel })
[ "def", "DeleteFeatures", "(", "self", ",", "objectIds", "=", "None", ",", "where", "=", "None", ",", "geometry", "=", "None", ",", "inSR", "=", "None", ",", "spatialRel", "=", "None", ")", ":", "gt", "=", "geometry", ".", "__geometry_type__", "if", "sr...
This operation deletes features in a feature layer or table (POST only). The delete features operation is performed on a feature service layer resource. The result of this operation is an array of edit results. Each edit result identifies a single feature and indicates if the edit were successful or not. If not, it also includes an error code and an error description.
[ "This", "operation", "deletes", "features", "in", "a", "feature", "layer", "or", "table", "(", "POST", "only", ")", ".", "The", "delete", "features", "operation", "is", "performed", "on", "a", "feature", "service", "layer", "resource", ".", "The", "result", ...
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L2498-L2518
jasonbot/arcrest
arcrest/server.py
FeatureLayer.ApplyEdits
def ApplyEdits(self, adds=None, updates=None, deletes=None): """This operation adds, updates and deletes features to the associated feature layer or table in a single call (POST only). The apply edits operation is performed on a feature service layer resource. The result of this operation are 3 arrays of edit results (for adds, updates and deletes respectively). Each edit result identifies a single feature and indicates if the edit were successful or not. If not, it also includes an error code and an error description.""" add_str, update_str = None, None if adds: add_str = ",".join(json.dumps( feature._json_struct_for_featureset) for feature in adds) if updates: update_str = ",".join(json.dumps( feature._json_struct_for_featureset) for feature in updates) return self._get_subfolder("./applyEdits", JsonPostResult, {'adds': add_str, 'updates': update_str, 'deletes': deletes })
python
def ApplyEdits(self, adds=None, updates=None, deletes=None): """This operation adds, updates and deletes features to the associated feature layer or table in a single call (POST only). The apply edits operation is performed on a feature service layer resource. The result of this operation are 3 arrays of edit results (for adds, updates and deletes respectively). Each edit result identifies a single feature and indicates if the edit were successful or not. If not, it also includes an error code and an error description.""" add_str, update_str = None, None if adds: add_str = ",".join(json.dumps( feature._json_struct_for_featureset) for feature in adds) if updates: update_str = ",".join(json.dumps( feature._json_struct_for_featureset) for feature in updates) return self._get_subfolder("./applyEdits", JsonPostResult, {'adds': add_str, 'updates': update_str, 'deletes': deletes })
[ "def", "ApplyEdits", "(", "self", ",", "adds", "=", "None", ",", "updates", "=", "None", ",", "deletes", "=", "None", ")", ":", "add_str", ",", "update_str", "=", "None", ",", "None", "if", "adds", ":", "add_str", "=", "\",\"", ".", "join", "(", "j...
This operation adds, updates and deletes features to the associated feature layer or table in a single call (POST only). The apply edits operation is performed on a feature service layer resource. The result of this operation are 3 arrays of edit results (for adds, updates and deletes respectively). Each edit result identifies a single feature and indicates if the edit were successful or not. If not, it also includes an error code and an error description.
[ "This", "operation", "adds", "updates", "and", "deletes", "features", "to", "the", "associated", "feature", "layer", "or", "table", "in", "a", "single", "call", "(", "POST", "only", ")", ".", "The", "apply", "edits", "operation", "is", "performed", "on", "...
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L2519-L2543
APSL/transmanager
transmanager/serializers.py
TaskBulksSerializer.save
def save(self, **kwargs): """ Method that creates the translations tasks for every selected instance :param kwargs: :return: """ try: # result_ids = [] manager = Manager() for item in self.model_class.objects.language(manager.get_main_language()).filter(pk__in=self.ids).all(): create_translations_for_item_and_its_children.delay(self.model_class, item.pk, self.languages, update_item_languages=True) # return TransTaskSerializer(TransTask.objects.filter(pk__in=result_ids), many=True).data return {'status': 'ok'} except Exception as e: raise serializers.ValidationError(detail=str(e))
python
def save(self, **kwargs): """ Method that creates the translations tasks for every selected instance :param kwargs: :return: """ try: # result_ids = [] manager = Manager() for item in self.model_class.objects.language(manager.get_main_language()).filter(pk__in=self.ids).all(): create_translations_for_item_and_its_children.delay(self.model_class, item.pk, self.languages, update_item_languages=True) # return TransTaskSerializer(TransTask.objects.filter(pk__in=result_ids), many=True).data return {'status': 'ok'} except Exception as e: raise serializers.ValidationError(detail=str(e))
[ "def", "save", "(", "self", ",", "*", "*", "kwargs", ")", ":", "try", ":", "# result_ids = []", "manager", "=", "Manager", "(", ")", "for", "item", "in", "self", ".", "model_class", ".", "objects", ".", "language", "(", "manager", ".", "get_main_language...
Method that creates the translations tasks for every selected instance :param kwargs: :return:
[ "Method", "that", "creates", "the", "translations", "tasks", "for", "every", "selected", "instance" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/serializers.py#L44-L60
deepgram/deepgram-brain-python
setup.py
get_version
def get_version(): """ Gets the current version of the package. """ version_py = os.path.join(os.path.dirname(__file__), 'deepgram', 'version.py') with open(version_py, 'r') as fh: for line in fh: if line.startswith('__version__'): return line.split('=')[-1].strip().replace('"', '') raise ValueError('Failed to parse version from: {}'.format(version_py))
python
def get_version(): """ Gets the current version of the package. """ version_py = os.path.join(os.path.dirname(__file__), 'deepgram', 'version.py') with open(version_py, 'r') as fh: for line in fh: if line.startswith('__version__'): return line.split('=')[-1].strip().replace('"', '') raise ValueError('Failed to parse version from: {}'.format(version_py))
[ "def", "get_version", "(", ")", ":", "version_py", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'deepgram'", ",", "'version.py'", ")", "with", "open", "(", "version_py", ",", "'r'", ")", "as...
Gets the current version of the package.
[ "Gets", "the", "current", "version", "of", "the", "package", "." ]
train
https://github.com/deepgram/deepgram-brain-python/blob/030ba241186a762a3faf73cec9af92063f3a7524/setup.py#L20-L28
dstufft/dj-redis-url
dj_redis_url.py
config
def config(env=DEFAULT_ENV, default=None, **overrides): """Returns configured REDIS dictionary from REDIS_URL.""" config = {} s = os.environ.get(env, default) if s: config = parse(s) overrides = dict([(k.upper(), v) for k, v in overrides.items()]) config.update(overrides) return config
python
def config(env=DEFAULT_ENV, default=None, **overrides): """Returns configured REDIS dictionary from REDIS_URL.""" config = {} s = os.environ.get(env, default) if s: config = parse(s) overrides = dict([(k.upper(), v) for k, v in overrides.items()]) config.update(overrides) return config
[ "def", "config", "(", "env", "=", "DEFAULT_ENV", ",", "default", "=", "None", ",", "*", "*", "overrides", ")", ":", "config", "=", "{", "}", "s", "=", "os", ".", "environ", ".", "get", "(", "env", ",", "default", ")", "if", "s", ":", "config", ...
Returns configured REDIS dictionary from REDIS_URL.
[ "Returns", "configured", "REDIS", "dictionary", "from", "REDIS_URL", "." ]
train
https://github.com/dstufft/dj-redis-url/blob/06da0cc45db48f3274baf4b83ae413a051c6d8b2/dj_redis_url.py#L17-L31
dstufft/dj-redis-url
dj_redis_url.py
parse
def parse(url): """Parses a database URL.""" config = {} url = urlparse.urlparse(url) # Remove query strings. path = url.path[1:] path = path.split('?', 2)[0] # Update with environment configuration. config.update({ "DB": int(path or 0), "PASSWORD": url.password or None, "HOST": url.hostname or "localhost", "PORT": int(url.port or 6379), }) return config
python
def parse(url): """Parses a database URL.""" config = {} url = urlparse.urlparse(url) # Remove query strings. path = url.path[1:] path = path.split('?', 2)[0] # Update with environment configuration. config.update({ "DB": int(path or 0), "PASSWORD": url.password or None, "HOST": url.hostname or "localhost", "PORT": int(url.port or 6379), }) return config
[ "def", "parse", "(", "url", ")", ":", "config", "=", "{", "}", "url", "=", "urlparse", ".", "urlparse", "(", "url", ")", "# Remove query strings.", "path", "=", "url", ".", "path", "[", "1", ":", "]", "path", "=", "path", ".", "split", "(", "'?'", ...
Parses a database URL.
[ "Parses", "a", "database", "URL", "." ]
train
https://github.com/dstufft/dj-redis-url/blob/06da0cc45db48f3274baf4b83ae413a051c6d8b2/dj_redis_url.py#L34-L53
noxdafox/vminspect
vminspect/filesystem.py
hash_filesystem
def hash_filesystem(filesystem, hashtype='sha1'): """Utility function for running the files iterator at once. Returns a dictionary. {'/path/on/filesystem': 'file_hash'} """ try: return dict(filesystem.checksums('/')) except RuntimeError: results = {} logging.warning("Error hashing disk %s contents, iterating over files.", filesystem.disk_path) for path in filesystem.nodes('/'): try: regular = stat.S_ISREG(filesystem.stat(path)['mode']) except RuntimeError: continue # unaccessible node if regular: try: results[path] = filesystem.checksum(path, hashtype=hashtype) except RuntimeError: logging.debug("Unable to hash %s.", path) return results
python
def hash_filesystem(filesystem, hashtype='sha1'): """Utility function for running the files iterator at once. Returns a dictionary. {'/path/on/filesystem': 'file_hash'} """ try: return dict(filesystem.checksums('/')) except RuntimeError: results = {} logging.warning("Error hashing disk %s contents, iterating over files.", filesystem.disk_path) for path in filesystem.nodes('/'): try: regular = stat.S_ISREG(filesystem.stat(path)['mode']) except RuntimeError: continue # unaccessible node if regular: try: results[path] = filesystem.checksum(path, hashtype=hashtype) except RuntimeError: logging.debug("Unable to hash %s.", path) return results
[ "def", "hash_filesystem", "(", "filesystem", ",", "hashtype", "=", "'sha1'", ")", ":", "try", ":", "return", "dict", "(", "filesystem", ".", "checksums", "(", "'/'", ")", ")", "except", "RuntimeError", ":", "results", "=", "{", "}", "logging", ".", "warn...
Utility function for running the files iterator at once. Returns a dictionary. {'/path/on/filesystem': 'file_hash'}
[ "Utility", "function", "for", "running", "the", "files", "iterator", "at", "once", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/filesystem.py#L195-L223
noxdafox/vminspect
vminspect/filesystem.py
FileSystem.fsroot
def fsroot(self): """Returns the file system root.""" if self.osname == 'windows': return '{}:\\'.format( self._handler.inspect_get_drive_mappings(self._root)[0][0]) else: return self._handler.inspect_get_mountpoints(self._root)[0][0]
python
def fsroot(self): """Returns the file system root.""" if self.osname == 'windows': return '{}:\\'.format( self._handler.inspect_get_drive_mappings(self._root)[0][0]) else: return self._handler.inspect_get_mountpoints(self._root)[0][0]
[ "def", "fsroot", "(", "self", ")", ":", "if", "self", ".", "osname", "==", "'windows'", ":", "return", "'{}:\\\\'", ".", "format", "(", "self", ".", "_handler", ".", "inspect_get_drive_mappings", "(", "self", ".", "_root", ")", "[", "0", "]", "[", "0",...
Returns the file system root.
[ "Returns", "the", "file", "system", "root", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/filesystem.py#L74-L80
noxdafox/vminspect
vminspect/filesystem.py
FileSystem.mount
def mount(self, readonly=True): """Mounts the given disk. It must be called before any other method. """ self._handler.add_drive_opts(self.disk_path, readonly=True) self._handler.launch() for mountpoint, device in self._inspect_disk(): if readonly: self._handler.mount_ro(device, mountpoint) else: self._handler.mount(device, mountpoint) if self._handler.inspect_get_type(self._root) == 'windows': self.path = self._windows_path else: self.path = posix_path
python
def mount(self, readonly=True): """Mounts the given disk. It must be called before any other method. """ self._handler.add_drive_opts(self.disk_path, readonly=True) self._handler.launch() for mountpoint, device in self._inspect_disk(): if readonly: self._handler.mount_ro(device, mountpoint) else: self._handler.mount(device, mountpoint) if self._handler.inspect_get_type(self._root) == 'windows': self.path = self._windows_path else: self.path = posix_path
[ "def", "mount", "(", "self", ",", "readonly", "=", "True", ")", ":", "self", ".", "_handler", ".", "add_drive_opts", "(", "self", ".", "disk_path", ",", "readonly", "=", "True", ")", "self", ".", "_handler", ".", "launch", "(", ")", "for", "mountpoint"...
Mounts the given disk. It must be called before any other method.
[ "Mounts", "the", "given", "disk", ".", "It", "must", "be", "called", "before", "any", "other", "method", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/filesystem.py#L82-L99
noxdafox/vminspect
vminspect/filesystem.py
FileSystem._inspect_disk
def _inspect_disk(self): """Inspects the disk and returns the mountpoints mapping as a list which order is the supposed one for correct mounting. """ roots = self._handler.inspect_os() if roots: self._root = roots[0] return sorted(self._handler.inspect_get_mountpoints(self._root), key=lambda m: len(m[0])) else: raise RuntimeError("No OS found on the given disk image.")
python
def _inspect_disk(self): """Inspects the disk and returns the mountpoints mapping as a list which order is the supposed one for correct mounting. """ roots = self._handler.inspect_os() if roots: self._root = roots[0] return sorted(self._handler.inspect_get_mountpoints(self._root), key=lambda m: len(m[0])) else: raise RuntimeError("No OS found on the given disk image.")
[ "def", "_inspect_disk", "(", "self", ")", ":", "roots", "=", "self", ".", "_handler", ".", "inspect_os", "(", ")", "if", "roots", ":", "self", ".", "_root", "=", "roots", "[", "0", "]", "return", "sorted", "(", "self", ".", "_handler", ".", "inspect_...
Inspects the disk and returns the mountpoints mapping as a list which order is the supposed one for correct mounting.
[ "Inspects", "the", "disk", "and", "returns", "the", "mountpoints", "mapping", "as", "a", "list", "which", "order", "is", "the", "supposed", "one", "for", "correct", "mounting", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/filesystem.py#L101-L113
noxdafox/vminspect
vminspect/filesystem.py
FileSystem.download
def download(self, source, destination): """Downloads the file on the disk at source into destination.""" self._handler.download(posix_path(source), destination)
python
def download(self, source, destination): """Downloads the file on the disk at source into destination.""" self._handler.download(posix_path(source), destination)
[ "def", "download", "(", "self", ",", "source", ",", "destination", ")", ":", "self", ".", "_handler", ".", "download", "(", "posix_path", "(", "source", ")", ",", "destination", ")" ]
Downloads the file on the disk at source into destination.
[ "Downloads", "the", "file", "on", "the", "disk", "at", "source", "into", "destination", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/filesystem.py#L123-L125
noxdafox/vminspect
vminspect/filesystem.py
FileSystem.nodes
def nodes(self, path): """Iterates over the files and directories contained within the disk starting from the given path. Yields the path of the nodes. """ path = posix_path(path) yield from (self.path(path, e) for e in self._handler.find(path))
python
def nodes(self, path): """Iterates over the files and directories contained within the disk starting from the given path. Yields the path of the nodes. """ path = posix_path(path) yield from (self.path(path, e) for e in self._handler.find(path))
[ "def", "nodes", "(", "self", ",", "path", ")", ":", "path", "=", "posix_path", "(", "path", ")", "yield", "from", "(", "self", ".", "path", "(", "path", ",", "e", ")", "for", "e", "in", "self", ".", "_handler", ".", "find", "(", "path", ")", ")...
Iterates over the files and directories contained within the disk starting from the given path. Yields the path of the nodes.
[ "Iterates", "over", "the", "files", "and", "directories", "contained", "within", "the", "disk", "starting", "from", "the", "given", "path", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/filesystem.py#L131-L140
noxdafox/vminspect
vminspect/filesystem.py
FileSystem.checksum
def checksum(self, path, hashtype='sha1'): """Returns the checksum of the given path.""" return self._handler.checksum(hashtype, posix_path(path))
python
def checksum(self, path, hashtype='sha1'): """Returns the checksum of the given path.""" return self._handler.checksum(hashtype, posix_path(path))
[ "def", "checksum", "(", "self", ",", "path", ",", "hashtype", "=", "'sha1'", ")", ":", "return", "self", ".", "_handler", ".", "checksum", "(", "hashtype", ",", "posix_path", "(", "path", ")", ")" ]
Returns the checksum of the given path.
[ "Returns", "the", "checksum", "of", "the", "given", "path", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/filesystem.py#L142-L144
noxdafox/vminspect
vminspect/filesystem.py
FileSystem.checksums
def checksums(self, path, hashtype='sha1'): """Iterates over the files hashes contained within the disk starting from the given path. The hashtype keyword allows to choose the file hashing algorithm. Yields the following values: "C:\\Windows\\System32\\NTUSER.DAT", "hash" for windows "/home/user/text.txt", "hash" for other FS """ with NamedTemporaryFile(buffering=0) as tempfile: self._handler.checksums_out(hashtype, posix_path(path), tempfile.name) yield from ((self.path(f[1].lstrip('.')), f[0]) for f in (l.decode('utf8').strip().split(None, 1) for l in tempfile))
python
def checksums(self, path, hashtype='sha1'): """Iterates over the files hashes contained within the disk starting from the given path. The hashtype keyword allows to choose the file hashing algorithm. Yields the following values: "C:\\Windows\\System32\\NTUSER.DAT", "hash" for windows "/home/user/text.txt", "hash" for other FS """ with NamedTemporaryFile(buffering=0) as tempfile: self._handler.checksums_out(hashtype, posix_path(path), tempfile.name) yield from ((self.path(f[1].lstrip('.')), f[0]) for f in (l.decode('utf8').strip().split(None, 1) for l in tempfile))
[ "def", "checksums", "(", "self", ",", "path", ",", "hashtype", "=", "'sha1'", ")", ":", "with", "NamedTemporaryFile", "(", "buffering", "=", "0", ")", "as", "tempfile", ":", "self", ".", "_handler", ".", "checksums_out", "(", "hashtype", ",", "posix_path",...
Iterates over the files hashes contained within the disk starting from the given path. The hashtype keyword allows to choose the file hashing algorithm. Yields the following values: "C:\\Windows\\System32\\NTUSER.DAT", "hash" for windows "/home/user/text.txt", "hash" for other FS
[ "Iterates", "over", "the", "files", "hashes", "contained", "within", "the", "disk", "starting", "from", "the", "given", "path", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/filesystem.py#L146-L164
jlmadurga/listenclosely
listenclosely/app.py
ListenCloselyApp.listen
def listen(self): """ Listen/Connect to message service loop to start receiving messages. Do not include in constructor, in this way it can be included in tasks """ self.listening = True try: self.service_backend.listen() except AuthenticationError: self.listening = False raise else: self.listening = False
python
def listen(self): """ Listen/Connect to message service loop to start receiving messages. Do not include in constructor, in this way it can be included in tasks """ self.listening = True try: self.service_backend.listen() except AuthenticationError: self.listening = False raise else: self.listening = False
[ "def", "listen", "(", "self", ")", ":", "self", ".", "listening", "=", "True", "try", ":", "self", ".", "service_backend", ".", "listen", "(", ")", "except", "AuthenticationError", ":", "self", ".", "listening", "=", "False", "raise", "else", ":", "self"...
Listen/Connect to message service loop to start receiving messages. Do not include in constructor, in this way it can be included in tasks
[ "Listen", "/", "Connect", "to", "message", "service", "loop", "to", "start", "receiving", "messages", ".", "Do", "not", "include", "in", "constructor", "in", "this", "way", "it", "can", "be", "included", "in", "tasks" ]
train
https://github.com/jlmadurga/listenclosely/blob/d6df9110c3ed6fd337e0236cccbe4d931bf217b0/listenclosely/app.py#L18-L30
jlmadurga/listenclosely
listenclosely/app.py
ListenCloselyApp.attend_pendings
def attend_pendings(self): """ Check all chats created with no agent assigned yet. Schedule a timer timeout to call it. """ chats_attended = [] pending_chats = Chat.pending.all() for pending_chat in pending_chats: free_agent = self.strategy.free_agent() if free_agent: pending_chat.attend_pending(free_agent, self) pending_chat.save() chats_attended.append(pending_chat) else: break return chats_attended
python
def attend_pendings(self): """ Check all chats created with no agent assigned yet. Schedule a timer timeout to call it. """ chats_attended = [] pending_chats = Chat.pending.all() for pending_chat in pending_chats: free_agent = self.strategy.free_agent() if free_agent: pending_chat.attend_pending(free_agent, self) pending_chat.save() chats_attended.append(pending_chat) else: break return chats_attended
[ "def", "attend_pendings", "(", "self", ")", ":", "chats_attended", "=", "[", "]", "pending_chats", "=", "Chat", ".", "pending", ".", "all", "(", ")", "for", "pending_chat", "in", "pending_chats", ":", "free_agent", "=", "self", ".", "strategy", ".", "free_...
Check all chats created with no agent assigned yet. Schedule a timer timeout to call it.
[ "Check", "all", "chats", "created", "with", "no", "agent", "assigned", "yet", ".", "Schedule", "a", "timer", "timeout", "to", "call", "it", "." ]
train
https://github.com/jlmadurga/listenclosely/blob/d6df9110c3ed6fd337e0236cccbe4d931bf217b0/listenclosely/app.py#L43-L58
jlmadurga/listenclosely
listenclosely/app.py
ListenCloselyApp.terminate_obsolete
def terminate_obsolete(self): """ Check chats can be considered as obsolete to terminate them """ chats_terminated = [] live_chats = Chat.live.all() for live_chat in live_chats: if live_chat.is_obsolete(self.time_obsolete_offset): live_chat.terminate() live_chat.save() chats_terminated.append(live_chat) return chats_terminated
python
def terminate_obsolete(self): """ Check chats can be considered as obsolete to terminate them """ chats_terminated = [] live_chats = Chat.live.all() for live_chat in live_chats: if live_chat.is_obsolete(self.time_obsolete_offset): live_chat.terminate() live_chat.save() chats_terminated.append(live_chat) return chats_terminated
[ "def", "terminate_obsolete", "(", "self", ")", ":", "chats_terminated", "=", "[", "]", "live_chats", "=", "Chat", ".", "live", ".", "all", "(", ")", "for", "live_chat", "in", "live_chats", ":", "if", "live_chat", ".", "is_obsolete", "(", "self", ".", "ti...
Check chats can be considered as obsolete to terminate them
[ "Check", "chats", "can", "be", "considered", "as", "obsolete", "to", "terminate", "them" ]
train
https://github.com/jlmadurga/listenclosely/blob/d6df9110c3ed6fd337e0236cccbe4d931bf217b0/listenclosely/app.py#L60-L71
jlmadurga/listenclosely
listenclosely/app.py
ListenCloselyApp.on_message
def on_message(self, message_id_service, contact_id_service, content): """ To use as callback in message service backend """ try: live_chat = Chat.live.get( Q(agent__id_service=contact_id_service) | Q(asker__id_service=contact_id_service)) except ObjectDoesNotExist: self._new_chat_processing(message_id_service, contact_id_service, content) else: live_chat.handle_message(message_id_service, contact_id_service, content, self)
python
def on_message(self, message_id_service, contact_id_service, content): """ To use as callback in message service backend """ try: live_chat = Chat.live.get( Q(agent__id_service=contact_id_service) | Q(asker__id_service=contact_id_service)) except ObjectDoesNotExist: self._new_chat_processing(message_id_service, contact_id_service, content) else: live_chat.handle_message(message_id_service, contact_id_service, content, self)
[ "def", "on_message", "(", "self", ",", "message_id_service", ",", "contact_id_service", ",", "content", ")", ":", "try", ":", "live_chat", "=", "Chat", ".", "live", ".", "get", "(", "Q", "(", "agent__id_service", "=", "contact_id_service", ")", "|", "Q", "...
To use as callback in message service backend
[ "To", "use", "as", "callback", "in", "message", "service", "backend" ]
train
https://github.com/jlmadurga/listenclosely/blob/d6df9110c3ed6fd337e0236cccbe4d931bf217b0/listenclosely/app.py#L97-L107
pudo/banal
banal/filesystem.py
decode_path
def decode_path(file_path): """Turn a path name into unicode.""" if file_path is None: return if isinstance(file_path, six.binary_type): file_path = file_path.decode(sys.getfilesystemencoding()) return file_path
python
def decode_path(file_path): """Turn a path name into unicode.""" if file_path is None: return if isinstance(file_path, six.binary_type): file_path = file_path.decode(sys.getfilesystemencoding()) return file_path
[ "def", "decode_path", "(", "file_path", ")", ":", "if", "file_path", "is", "None", ":", "return", "if", "isinstance", "(", "file_path", ",", "six", ".", "binary_type", ")", ":", "file_path", "=", "file_path", ".", "decode", "(", "sys", ".", "getfilesysteme...
Turn a path name into unicode.
[ "Turn", "a", "path", "name", "into", "unicode", "." ]
train
https://github.com/pudo/banal/blob/528c339be5138458e387a058581cf7d261285447/banal/filesystem.py#L5-L11
Contraz/pyrocket
rocket/connectors/socket.py
SocketConnector.read_command
def read_command(self): """ Attempt to read the next command from the editor/server :return: boolean. Did we actually read a command? """ # Do a non-blocking read here so the demo can keep running if there is no data comm = self.reader.byte(blocking=False) if comm is None: return False cmds = { SET_KEY: self.handle_set_key, DELETE_KEY: self.handle_delete_key, SET_ROW: self.handle_set_row, PAUSE: self.handle_pause, SAVE_TRACKS: self.handle_save_tracks } func = cmds.get(comm) if func: func() else: logger.error("Unknown command: %s", comm) return True
python
def read_command(self): """ Attempt to read the next command from the editor/server :return: boolean. Did we actually read a command? """ # Do a non-blocking read here so the demo can keep running if there is no data comm = self.reader.byte(blocking=False) if comm is None: return False cmds = { SET_KEY: self.handle_set_key, DELETE_KEY: self.handle_delete_key, SET_ROW: self.handle_set_row, PAUSE: self.handle_pause, SAVE_TRACKS: self.handle_save_tracks } func = cmds.get(comm) if func: func() else: logger.error("Unknown command: %s", comm) return True
[ "def", "read_command", "(", "self", ")", ":", "# Do a non-blocking read here so the demo can keep running if there is no data", "comm", "=", "self", ".", "reader", ".", "byte", "(", "blocking", "=", "False", ")", "if", "comm", "is", "None", ":", "return", "False", ...
Attempt to read the next command from the editor/server :return: boolean. Did we actually read a command?
[ "Attempt", "to", "read", "the", "next", "command", "from", "the", "editor", "/", "server", ":", "return", ":", "boolean", ".", "Did", "we", "actually", "read", "a", "command?" ]
train
https://github.com/Contraz/pyrocket/blob/97f4153c79030497b97fbaf43b1aa6dc1a6c7f7b/rocket/connectors/socket.py#L87-L112
Contraz/pyrocket
rocket/connectors/socket.py
SocketConnector.handle_set_key
def handle_set_key(self): """Read incoming key from server""" track_id = self.reader.int() row = self.reader.int() value = self.reader.float() kind = self.reader.byte() logger.info(" -> track=%s, row=%s, value=%s, type=%s", track_id, row, value, kind) # Add or update track value track = self.tracks.get_by_id(track_id) track.add_or_update(row, value, kind)
python
def handle_set_key(self): """Read incoming key from server""" track_id = self.reader.int() row = self.reader.int() value = self.reader.float() kind = self.reader.byte() logger.info(" -> track=%s, row=%s, value=%s, type=%s", track_id, row, value, kind) # Add or update track value track = self.tracks.get_by_id(track_id) track.add_or_update(row, value, kind)
[ "def", "handle_set_key", "(", "self", ")", ":", "track_id", "=", "self", ".", "reader", ".", "int", "(", ")", "row", "=", "self", ".", "reader", ".", "int", "(", ")", "value", "=", "self", ".", "reader", ".", "float", "(", ")", "kind", "=", "self...
Read incoming key from server
[ "Read", "incoming", "key", "from", "server" ]
train
https://github.com/Contraz/pyrocket/blob/97f4153c79030497b97fbaf43b1aa6dc1a6c7f7b/rocket/connectors/socket.py#L114-L124
Contraz/pyrocket
rocket/connectors/socket.py
SocketConnector.handle_delete_key
def handle_delete_key(self): """Read incoming delete key event from server""" track_id = self.reader.int() row = self.reader.int() logger.info(" -> track=%s, row=%s", track_id, row) # Delete the actual track value track = self.tracks.get_by_id(track_id) track.delete(row)
python
def handle_delete_key(self): """Read incoming delete key event from server""" track_id = self.reader.int() row = self.reader.int() logger.info(" -> track=%s, row=%s", track_id, row) # Delete the actual track value track = self.tracks.get_by_id(track_id) track.delete(row)
[ "def", "handle_delete_key", "(", "self", ")", ":", "track_id", "=", "self", ".", "reader", ".", "int", "(", ")", "row", "=", "self", ".", "reader", ".", "int", "(", ")", "logger", ".", "info", "(", "\" -> track=%s, row=%s\"", ",", "track_id", ",", "row...
Read incoming delete key event from server
[ "Read", "incoming", "delete", "key", "event", "from", "server" ]
train
https://github.com/Contraz/pyrocket/blob/97f4153c79030497b97fbaf43b1aa6dc1a6c7f7b/rocket/connectors/socket.py#L126-L134
Contraz/pyrocket
rocket/connectors/socket.py
SocketConnector.handle_set_row
def handle_set_row(self): """Read incoming row change from server""" row = self.reader.int() logger.info(" -> row: %s", row) self.controller.row = row
python
def handle_set_row(self): """Read incoming row change from server""" row = self.reader.int() logger.info(" -> row: %s", row) self.controller.row = row
[ "def", "handle_set_row", "(", "self", ")", ":", "row", "=", "self", ".", "reader", ".", "int", "(", ")", "logger", ".", "info", "(", "\" -> row: %s\"", ",", "row", ")", "self", ".", "controller", ".", "row", "=", "row" ]
Read incoming row change from server
[ "Read", "incoming", "row", "change", "from", "server" ]
train
https://github.com/Contraz/pyrocket/blob/97f4153c79030497b97fbaf43b1aa6dc1a6c7f7b/rocket/connectors/socket.py#L136-L140
Contraz/pyrocket
rocket/connectors/socket.py
SocketConnector.handle_pause
def handle_pause(self): """Read pause signal from server""" flag = self.reader.byte() if flag > 0: logger.info(" -> pause: on") self.controller.playing = False else: logger.info(" -> pause: off") self.controller.playing = True
python
def handle_pause(self): """Read pause signal from server""" flag = self.reader.byte() if flag > 0: logger.info(" -> pause: on") self.controller.playing = False else: logger.info(" -> pause: off") self.controller.playing = True
[ "def", "handle_pause", "(", "self", ")", ":", "flag", "=", "self", ".", "reader", ".", "byte", "(", ")", "if", "flag", ">", "0", ":", "logger", ".", "info", "(", "\" -> pause: on\"", ")", "self", ".", "controller", ".", "playing", "=", "False", "else...
Read pause signal from server
[ "Read", "pause", "signal", "from", "server" ]
train
https://github.com/Contraz/pyrocket/blob/97f4153c79030497b97fbaf43b1aa6dc1a6c7f7b/rocket/connectors/socket.py#L142-L150
APSL/transmanager
transmanager/filters/filters.py
TaskFilter.get_choices_for_models
def get_choices_for_models(): """ Get the select choices for models in optgroup mode :return: """ result = {} apps = [item.model.split(' - ')[0] for item in TransModelLanguage.objects.all()] qs = ContentType.objects.exclude(model__contains='translation').filter(app_label__in=apps).order_by( 'app_label', 'model' ) for ct in qs: if not issubclass(ct.model_class(), TranslatableModel): continue if ct.app_label not in result: result[ct.app_label] = [] result[ct.app_label].append(( ct.id, str(ct.model_class()._meta.verbose_name_plural) )) choices = [(str(_('Todas las clases')), (('', _('Todas las clases')),))] for key, value in result.items(): choices.append((key, tuple([it for it in sorted(value, key=lambda el: el[1])]))) return choices
python
def get_choices_for_models(): """ Get the select choices for models in optgroup mode :return: """ result = {} apps = [item.model.split(' - ')[0] for item in TransModelLanguage.objects.all()] qs = ContentType.objects.exclude(model__contains='translation').filter(app_label__in=apps).order_by( 'app_label', 'model' ) for ct in qs: if not issubclass(ct.model_class(), TranslatableModel): continue if ct.app_label not in result: result[ct.app_label] = [] result[ct.app_label].append(( ct.id, str(ct.model_class()._meta.verbose_name_plural) )) choices = [(str(_('Todas las clases')), (('', _('Todas las clases')),))] for key, value in result.items(): choices.append((key, tuple([it for it in sorted(value, key=lambda el: el[1])]))) return choices
[ "def", "get_choices_for_models", "(", ")", ":", "result", "=", "{", "}", "apps", "=", "[", "item", ".", "model", ".", "split", "(", "' - '", ")", "[", "0", "]", "for", "item", "in", "TransModelLanguage", ".", "objects", ".", "all", "(", ")", "]", "...
Get the select choices for models in optgroup mode :return:
[ "Get", "the", "select", "choices", "for", "models", "in", "optgroup", "mode", ":", "return", ":" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/filters/filters.py#L97-L119
viraptor/wigle
wigle/__init__.py
Wigle.search
def search(self, lat_range=None, long_range=None, variance=None, bssid=None, ssid=None, last_update=None, address=None, state=None, zipcode=None, on_new_page=None, max_results=100): """ Search the Wigle wifi database for matching entries. The following criteria are supported: Args: lat_range ((float, float)): latitude range long_range ((float, float)): longitude range variance (float): radius tolerance in degrees bssid (str): BSSID/MAC of AP ssid (str): SSID of network last_update (datetime): when was the AP last seen address (str): location, address state (str): location, state zipcode (str): location, zip code on_new_page (func(int)): callback to notify when requesting new page of results Returns: [dict]: list of dicts describing matching wifis """ params = { 'latrange1': lat_range[0] if lat_range else "", 'latrange2': lat_range[1] if lat_range else "", 'longrange1': long_range[0] if long_range else "", 'longrange2': long_range[1] if long_range else "", 'variance': str(variance) if variance else "0.01", 'netid': bssid or "", 'ssid': ssid or "", 'lastupdt': last_update.strftime("%Y%m%d%H%M%S") if last_update else "", 'addresscode': address or "", 'statecode': state or "", 'zipcode': zipcode or "", 'Query': "Query", } wifis = [] while True: if on_new_page: on_new_page(params.get('first', 1)) resp = self._authenticated_request('jsonSearch', params=params) data = resp.json() if not data['success']: raise_wigle_error(data) for result in data['results'][:max_results-len(wifis)]: normalise_entry(result) wifis.append(result) if data['resultCount'] < WIGLE_PAGESIZE or len(wifis) >= max_results: break params['first'] = data['last'] + 1 return wifis
python
def search(self, lat_range=None, long_range=None, variance=None, bssid=None, ssid=None, last_update=None, address=None, state=None, zipcode=None, on_new_page=None, max_results=100): """ Search the Wigle wifi database for matching entries. The following criteria are supported: Args: lat_range ((float, float)): latitude range long_range ((float, float)): longitude range variance (float): radius tolerance in degrees bssid (str): BSSID/MAC of AP ssid (str): SSID of network last_update (datetime): when was the AP last seen address (str): location, address state (str): location, state zipcode (str): location, zip code on_new_page (func(int)): callback to notify when requesting new page of results Returns: [dict]: list of dicts describing matching wifis """ params = { 'latrange1': lat_range[0] if lat_range else "", 'latrange2': lat_range[1] if lat_range else "", 'longrange1': long_range[0] if long_range else "", 'longrange2': long_range[1] if long_range else "", 'variance': str(variance) if variance else "0.01", 'netid': bssid or "", 'ssid': ssid or "", 'lastupdt': last_update.strftime("%Y%m%d%H%M%S") if last_update else "", 'addresscode': address or "", 'statecode': state or "", 'zipcode': zipcode or "", 'Query': "Query", } wifis = [] while True: if on_new_page: on_new_page(params.get('first', 1)) resp = self._authenticated_request('jsonSearch', params=params) data = resp.json() if not data['success']: raise_wigle_error(data) for result in data['results'][:max_results-len(wifis)]: normalise_entry(result) wifis.append(result) if data['resultCount'] < WIGLE_PAGESIZE or len(wifis) >= max_results: break params['first'] = data['last'] + 1 return wifis
[ "def", "search", "(", "self", ",", "lat_range", "=", "None", ",", "long_range", "=", "None", ",", "variance", "=", "None", ",", "bssid", "=", "None", ",", "ssid", "=", "None", ",", "last_update", "=", "None", ",", "address", "=", "None", ",", "state"...
Search the Wigle wifi database for matching entries. The following criteria are supported: Args: lat_range ((float, float)): latitude range long_range ((float, float)): longitude range variance (float): radius tolerance in degrees bssid (str): BSSID/MAC of AP ssid (str): SSID of network last_update (datetime): when was the AP last seen address (str): location, address state (str): location, state zipcode (str): location, zip code on_new_page (func(int)): callback to notify when requesting new page of results Returns: [dict]: list of dicts describing matching wifis
[ "Search", "the", "Wigle", "wifi", "database", "for", "matching", "entries", ".", "The", "following", "criteria", "are", "supported", ":" ]
train
https://github.com/viraptor/wigle/blob/d87da28d58aaab546a277dfe919baac7804aaea8/wigle/__init__.py#L107-L166
APSL/transmanager
transmanager/models.py
TransLanguage.save
def save(self, force_insert=False, force_update=False, using=None, update_fields=None): """ Overwrite of the save method in order that when setting the language as main we deactivate any other model selected as main before :param force_insert: :param force_update: :param using: :param update_fields: :return: """ super().save(force_insert, force_update, using, update_fields) if self.main_language: TransLanguage.objects.exclude(pk=self.pk).update(main_language=False)
python
def save(self, force_insert=False, force_update=False, using=None, update_fields=None): """ Overwrite of the save method in order that when setting the language as main we deactivate any other model selected as main before :param force_insert: :param force_update: :param using: :param update_fields: :return: """ super().save(force_insert, force_update, using, update_fields) if self.main_language: TransLanguage.objects.exclude(pk=self.pk).update(main_language=False)
[ "def", "save", "(", "self", ",", "force_insert", "=", "False", ",", "force_update", "=", "False", ",", "using", "=", "None", ",", "update_fields", "=", "None", ")", ":", "super", "(", ")", ".", "save", "(", "force_insert", ",", "force_update", ",", "us...
Overwrite of the save method in order that when setting the language as main we deactivate any other model selected as main before :param force_insert: :param force_update: :param using: :param update_fields: :return:
[ "Overwrite", "of", "the", "save", "method", "in", "order", "that", "when", "setting", "the", "language", "as", "main", "we", "deactivate", "any", "other", "model", "selected", "as", "main", "before" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/models.py#L31-L44
rapidpro/dash
dash/orgs/tasks.py
trigger_org_task
def trigger_org_task(task_name, queue="celery"): """ Triggers the given org task to be run for all active orgs :param task_name: the full task name, e.g. 'myproj.myapp.tasks.do_stuff' :param queue: the name of the queue to send org sub-tasks to """ active_orgs = apps.get_model("orgs", "Org").objects.filter(is_active=True) for org in active_orgs: sig = signature(task_name, args=[org.pk]) sig.apply_async(queue=queue) logger.info("Requested task '%s' for %d active orgs" % (task_name, len(active_orgs)))
python
def trigger_org_task(task_name, queue="celery"): """ Triggers the given org task to be run for all active orgs :param task_name: the full task name, e.g. 'myproj.myapp.tasks.do_stuff' :param queue: the name of the queue to send org sub-tasks to """ active_orgs = apps.get_model("orgs", "Org").objects.filter(is_active=True) for org in active_orgs: sig = signature(task_name, args=[org.pk]) sig.apply_async(queue=queue) logger.info("Requested task '%s' for %d active orgs" % (task_name, len(active_orgs)))
[ "def", "trigger_org_task", "(", "task_name", ",", "queue", "=", "\"celery\"", ")", ":", "active_orgs", "=", "apps", ".", "get_model", "(", "\"orgs\"", ",", "\"Org\"", ")", ".", "objects", ".", "filter", "(", "is_active", "=", "True", ")", "for", "org", "...
Triggers the given org task to be run for all active orgs :param task_name: the full task name, e.g. 'myproj.myapp.tasks.do_stuff' :param queue: the name of the queue to send org sub-tasks to
[ "Triggers", "the", "given", "org", "task", "to", "be", "run", "for", "all", "active", "orgs", ":", "param", "task_name", ":", "the", "full", "task", "name", "e", ".", "g", ".", "myproj", ".", "myapp", ".", "tasks", ".", "do_stuff", ":", "param", "que...
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/orgs/tasks.py#L29-L40
rapidpro/dash
dash/orgs/tasks.py
org_task
def org_task(task_key, lock_timeout=None): """ Decorator to create an org task. :param task_key: the task key used for state storage and locking, e.g. 'do-stuff' :param lock_timeout: the lock timeout in seconds """ def _org_task(task_func): def _decorator(org_id): org = apps.get_model("orgs", "Org").objects.get(pk=org_id) maybe_run_for_org(org, task_func, task_key, lock_timeout) return shared_task(wraps(task_func)(_decorator)) return _org_task
python
def org_task(task_key, lock_timeout=None): """ Decorator to create an org task. :param task_key: the task key used for state storage and locking, e.g. 'do-stuff' :param lock_timeout: the lock timeout in seconds """ def _org_task(task_func): def _decorator(org_id): org = apps.get_model("orgs", "Org").objects.get(pk=org_id) maybe_run_for_org(org, task_func, task_key, lock_timeout) return shared_task(wraps(task_func)(_decorator)) return _org_task
[ "def", "org_task", "(", "task_key", ",", "lock_timeout", "=", "None", ")", ":", "def", "_org_task", "(", "task_func", ")", ":", "def", "_decorator", "(", "org_id", ")", ":", "org", "=", "apps", ".", "get_model", "(", "\"orgs\"", ",", "\"Org\"", ")", "....
Decorator to create an org task. :param task_key: the task key used for state storage and locking, e.g. 'do-stuff' :param lock_timeout: the lock timeout in seconds
[ "Decorator", "to", "create", "an", "org", "task", ".", ":", "param", "task_key", ":", "the", "task", "key", "used", "for", "state", "storage", "and", "locking", "e", ".", "g", ".", "do", "-", "stuff", ":", "param", "lock_timeout", ":", "the", "lock", ...
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/orgs/tasks.py#L43-L57
rapidpro/dash
dash/orgs/tasks.py
maybe_run_for_org
def maybe_run_for_org(org, task_func, task_key, lock_timeout): """ Runs the given task function for the specified org provided it's not already running :param org: the org :param task_func: the task function :param task_key: the task key :param lock_timeout: the lock timeout in seconds """ r = get_redis_connection() key = TaskState.get_lock_key(org, task_key) if r.get(key): logger.warning("Skipping task %s for org #%d as it is still running" % (task_key, org.id)) else: with r.lock(key, timeout=lock_timeout): state = org.get_task_state(task_key) if state.is_disabled: logger.info("Skipping task %s for org #%d as is marked disabled" % (task_key, org.id)) return logger.info("Started task %s for org #%d..." % (task_key, org.id)) prev_started_on = state.last_successfully_started_on this_started_on = timezone.now() state.started_on = this_started_on state.ended_on = None state.save(update_fields=("started_on", "ended_on")) num_task_args = len(inspect.getargspec(task_func).args) try: if num_task_args == 3: results = task_func(org, prev_started_on, this_started_on) elif num_task_args == 1: results = task_func(org) else: raise ValueError("Task signature must be foo(org) or foo(org, since, until)") # pragma: no cover state.ended_on = timezone.now() state.last_successfully_started_on = this_started_on state.last_results = json.dumps(results) state.is_failing = False state.save(update_fields=("ended_on", "last_successfully_started_on", "last_results", "is_failing")) logger.info("Finished task %s for org #%d with result: %s" % (task_key, org.id, json.dumps(results))) except Exception as e: state.ended_on = timezone.now() state.last_results = None state.is_failing = True state.save(update_fields=("ended_on", "last_results", "is_failing")) logger.exception("Task %s for org #%d failed" % (task_key, org.id)) raise e
python
def maybe_run_for_org(org, task_func, task_key, lock_timeout): """ Runs the given task function for the specified org provided it's not already running :param org: the org :param task_func: the task function :param task_key: the task key :param lock_timeout: the lock timeout in seconds """ r = get_redis_connection() key = TaskState.get_lock_key(org, task_key) if r.get(key): logger.warning("Skipping task %s for org #%d as it is still running" % (task_key, org.id)) else: with r.lock(key, timeout=lock_timeout): state = org.get_task_state(task_key) if state.is_disabled: logger.info("Skipping task %s for org #%d as is marked disabled" % (task_key, org.id)) return logger.info("Started task %s for org #%d..." % (task_key, org.id)) prev_started_on = state.last_successfully_started_on this_started_on = timezone.now() state.started_on = this_started_on state.ended_on = None state.save(update_fields=("started_on", "ended_on")) num_task_args = len(inspect.getargspec(task_func).args) try: if num_task_args == 3: results = task_func(org, prev_started_on, this_started_on) elif num_task_args == 1: results = task_func(org) else: raise ValueError("Task signature must be foo(org) or foo(org, since, until)") # pragma: no cover state.ended_on = timezone.now() state.last_successfully_started_on = this_started_on state.last_results = json.dumps(results) state.is_failing = False state.save(update_fields=("ended_on", "last_successfully_started_on", "last_results", "is_failing")) logger.info("Finished task %s for org #%d with result: %s" % (task_key, org.id, json.dumps(results))) except Exception as e: state.ended_on = timezone.now() state.last_results = None state.is_failing = True state.save(update_fields=("ended_on", "last_results", "is_failing")) logger.exception("Task %s for org #%d failed" % (task_key, org.id)) raise e
[ "def", "maybe_run_for_org", "(", "org", ",", "task_func", ",", "task_key", ",", "lock_timeout", ")", ":", "r", "=", "get_redis_connection", "(", ")", "key", "=", "TaskState", ".", "get_lock_key", "(", "org", ",", "task_key", ")", "if", "r", ".", "get", "...
Runs the given task function for the specified org provided it's not already running :param org: the org :param task_func: the task function :param task_key: the task key :param lock_timeout: the lock timeout in seconds
[ "Runs", "the", "given", "task", "function", "for", "the", "specified", "org", "provided", "it", "s", "not", "already", "running", ":", "param", "org", ":", "the", "org", ":", "param", "task_func", ":", "the", "task", "function", ":", "param", "task_key", ...
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/orgs/tasks.py#L60-L115
HDI-Project/RDT
rdt/transformers/base.py
BaseTransformer.check_data_type
def check_data_type(self): """Check the type of the transformer and column match. Args: column_metadata(dict): Metadata of the column. Raises a ValueError if the types don't match """ metadata_type = self.column_metadata.get('type') if self.type != metadata_type and metadata_type not in self.type: raise ValueError('Types of transformer don\'t match')
python
def check_data_type(self): """Check the type of the transformer and column match. Args: column_metadata(dict): Metadata of the column. Raises a ValueError if the types don't match """ metadata_type = self.column_metadata.get('type') if self.type != metadata_type and metadata_type not in self.type: raise ValueError('Types of transformer don\'t match')
[ "def", "check_data_type", "(", "self", ")", ":", "metadata_type", "=", "self", ".", "column_metadata", ".", "get", "(", "'type'", ")", "if", "self", ".", "type", "!=", "metadata_type", "and", "metadata_type", "not", "in", "self", ".", "type", ":", "raise",...
Check the type of the transformer and column match. Args: column_metadata(dict): Metadata of the column. Raises a ValueError if the types don't match
[ "Check", "the", "type", "of", "the", "transformer", "and", "column", "match", "." ]
train
https://github.com/HDI-Project/RDT/blob/b28fdd671a1d7fbd14983eefe0cfbd8d87ded92a/rdt/transformers/base.py#L60-L70
HDI-Project/RDT
rdt/transformers/datetime.py
DTTransformer.fit
def fit(self, col): """Prepare the transformer to convert data. Args: col(pandas.DataFrame): Data to transform. Returns: None """ dates = self.safe_datetime_cast(col) self.default_val = dates.groupby(dates).count().index[0].timestamp() * 1e9
python
def fit(self, col): """Prepare the transformer to convert data. Args: col(pandas.DataFrame): Data to transform. Returns: None """ dates = self.safe_datetime_cast(col) self.default_val = dates.groupby(dates).count().index[0].timestamp() * 1e9
[ "def", "fit", "(", "self", ",", "col", ")", ":", "dates", "=", "self", ".", "safe_datetime_cast", "(", "col", ")", "self", ".", "default_val", "=", "dates", ".", "groupby", "(", "dates", ")", ".", "count", "(", ")", ".", "index", "[", "0", "]", "...
Prepare the transformer to convert data. Args: col(pandas.DataFrame): Data to transform. Returns: None
[ "Prepare", "the", "transformer", "to", "convert", "data", "." ]
train
https://github.com/HDI-Project/RDT/blob/b28fdd671a1d7fbd14983eefe0cfbd8d87ded92a/rdt/transformers/datetime.py#L23-L33
HDI-Project/RDT
rdt/transformers/datetime.py
DTTransformer.transform
def transform(self, col): """Prepare the transformer to convert data and return the processed table. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame """ out = pd.DataFrame() out[self.col_name] = self.safe_datetime_cast(col) out[self.col_name] = self.to_timestamp(out) return out
python
def transform(self, col): """Prepare the transformer to convert data and return the processed table. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame """ out = pd.DataFrame() out[self.col_name] = self.safe_datetime_cast(col) out[self.col_name] = self.to_timestamp(out) return out
[ "def", "transform", "(", "self", ",", "col", ")", ":", "out", "=", "pd", ".", "DataFrame", "(", ")", "out", "[", "self", ".", "col_name", "]", "=", "self", ".", "safe_datetime_cast", "(", "col", ")", "out", "[", "self", ".", "col_name", "]", "=", ...
Prepare the transformer to convert data and return the processed table. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame
[ "Prepare", "the", "transformer", "to", "convert", "data", "and", "return", "the", "processed", "table", "." ]
train
https://github.com/HDI-Project/RDT/blob/b28fdd671a1d7fbd14983eefe0cfbd8d87ded92a/rdt/transformers/datetime.py#L35-L48
HDI-Project/RDT
rdt/transformers/datetime.py
DTTransformer.reverse_transform
def reverse_transform(self, col): """Converts data back into original format. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame """ if isinstance(col, pd.Series): col = col.to_frame() output = pd.DataFrame(index=col.index) output[self.col_name] = col.apply(self.safe_date, axis=1) return output
python
def reverse_transform(self, col): """Converts data back into original format. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame """ if isinstance(col, pd.Series): col = col.to_frame() output = pd.DataFrame(index=col.index) output[self.col_name] = col.apply(self.safe_date, axis=1) return output
[ "def", "reverse_transform", "(", "self", ",", "col", ")", ":", "if", "isinstance", "(", "col", ",", "pd", ".", "Series", ")", ":", "col", "=", "col", ".", "to_frame", "(", ")", "output", "=", "pd", ".", "DataFrame", "(", "index", "=", "col", ".", ...
Converts data back into original format. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame
[ "Converts", "data", "back", "into", "original", "format", "." ]
train
https://github.com/HDI-Project/RDT/blob/b28fdd671a1d7fbd14983eefe0cfbd8d87ded92a/rdt/transformers/datetime.py#L53-L68
HDI-Project/RDT
rdt/transformers/datetime.py
DTTransformer.safe_datetime_cast
def safe_datetime_cast(self, col): """Parses string values into datetime. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.Series """ casted_dates = pd.to_datetime(col[self.col_name], format=self.date_format, errors='coerce') if len(casted_dates[casted_dates.isnull()]): # This will raise an error for bad formatted data # but not for out of bonds or missing dates. slice_ = casted_dates.isnull() & ~col[self.col_name].isnull() col[slice_][self.col_name].apply(self.strptime_format) return casted_dates
python
def safe_datetime_cast(self, col): """Parses string values into datetime. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.Series """ casted_dates = pd.to_datetime(col[self.col_name], format=self.date_format, errors='coerce') if len(casted_dates[casted_dates.isnull()]): # This will raise an error for bad formatted data # but not for out of bonds or missing dates. slice_ = casted_dates.isnull() & ~col[self.col_name].isnull() col[slice_][self.col_name].apply(self.strptime_format) return casted_dates
[ "def", "safe_datetime_cast", "(", "self", ",", "col", ")", ":", "casted_dates", "=", "pd", ".", "to_datetime", "(", "col", "[", "self", ".", "col_name", "]", ",", "format", "=", "self", ".", "date_format", ",", "errors", "=", "'coerce'", ")", "if", "le...
Parses string values into datetime. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.Series
[ "Parses", "string", "values", "into", "datetime", "." ]
train
https://github.com/HDI-Project/RDT/blob/b28fdd671a1d7fbd14983eefe0cfbd8d87ded92a/rdt/transformers/datetime.py#L70-L87
HDI-Project/RDT
rdt/transformers/datetime.py
DTTransformer.to_timestamp
def to_timestamp(self, data): """Transform a datetime series into linux epoch. Args: data(pandas.DataFrame): DataFrame containins a column named as `self.col_name`. Returns: pandas.Series """ result = pd.Series(index=data.index) _slice = ~data[self.col_name].isnull() result[_slice] = data[_slice][self.col_name].astype('int64') return result
python
def to_timestamp(self, data): """Transform a datetime series into linux epoch. Args: data(pandas.DataFrame): DataFrame containins a column named as `self.col_name`. Returns: pandas.Series """ result = pd.Series(index=data.index) _slice = ~data[self.col_name].isnull() result[_slice] = data[_slice][self.col_name].astype('int64') return result
[ "def", "to_timestamp", "(", "self", ",", "data", ")", ":", "result", "=", "pd", ".", "Series", "(", "index", "=", "data", ".", "index", ")", "_slice", "=", "~", "data", "[", "self", ".", "col_name", "]", ".", "isnull", "(", ")", "result", "[", "_...
Transform a datetime series into linux epoch. Args: data(pandas.DataFrame): DataFrame containins a column named as `self.col_name`. Returns: pandas.Series
[ "Transform", "a", "datetime", "series", "into", "linux", "epoch", "." ]
train
https://github.com/HDI-Project/RDT/blob/b28fdd671a1d7fbd14983eefe0cfbd8d87ded92a/rdt/transformers/datetime.py#L89-L102
HDI-Project/RDT
rdt/transformers/datetime.py
DTTransformer.safe_date
def safe_date(self, x): """Transform x[self.col_name] into a date string. Args: x(dict like / pandas.Series): Row containing data to cast safely. Returns: str """ t = x[self.col_name] if np.isnan(t): return t elif np.isposinf(t): t = sys.maxsize elif np.isneginf(t): t = -sys.maxsize tmp = time.localtime(float(t) / 1e9) return time.strftime(self.date_format, tmp)
python
def safe_date(self, x): """Transform x[self.col_name] into a date string. Args: x(dict like / pandas.Series): Row containing data to cast safely. Returns: str """ t = x[self.col_name] if np.isnan(t): return t elif np.isposinf(t): t = sys.maxsize elif np.isneginf(t): t = -sys.maxsize tmp = time.localtime(float(t) / 1e9) return time.strftime(self.date_format, tmp)
[ "def", "safe_date", "(", "self", ",", "x", ")", ":", "t", "=", "x", "[", "self", ".", "col_name", "]", "if", "np", ".", "isnan", "(", "t", ")", ":", "return", "t", "elif", "np", ".", "isposinf", "(", "t", ")", ":", "t", "=", "sys", ".", "ma...
Transform x[self.col_name] into a date string. Args: x(dict like / pandas.Series): Row containing data to cast safely. Returns: str
[ "Transform", "x", "[", "self", ".", "col_name", "]", "into", "a", "date", "string", "." ]
train
https://github.com/HDI-Project/RDT/blob/b28fdd671a1d7fbd14983eefe0cfbd8d87ded92a/rdt/transformers/datetime.py#L104-L125
brbsix/pip-utils
setup.py
long_description
def long_description(): """Return the contents of README.rst (with badging removed).""" # use re.compile() for flags support in Python 2.6 pattern = re.compile(r'\n^\.\. start-badges.*^\.\. end-badges\n', flags=re.M | re.S) return pattern.sub('', read('README.rst'))
python
def long_description(): """Return the contents of README.rst (with badging removed).""" # use re.compile() for flags support in Python 2.6 pattern = re.compile(r'\n^\.\. start-badges.*^\.\. end-badges\n', flags=re.M | re.S) return pattern.sub('', read('README.rst'))
[ "def", "long_description", "(", ")", ":", "# use re.compile() for flags support in Python 2.6", "pattern", "=", "re", ".", "compile", "(", "r'\\n^\\.\\. start-badges.*^\\.\\. end-badges\\n'", ",", "flags", "=", "re", ".", "M", "|", "re", ".", "S", ")", "return", "pa...
Return the contents of README.rst (with badging removed).
[ "Return", "the", "contents", "of", "README", ".", "rst", "(", "with", "badging", "removed", ")", "." ]
train
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/setup.py#L32-L37
jlmadurga/listenclosely
listenclosely/services/console.py
ConsoleMessageService.send_message
def send_message(self, id_service, content): """Write all messages to the stream in a thread-safe way.""" if not content: return with self._lock: try: message = "Message: %s to %s" % (content, id_service) self.write_message(message) self.stream.flush() # flush after each message return "message_id" except Exception: if not self.fail_silently: raise
python
def send_message(self, id_service, content): """Write all messages to the stream in a thread-safe way.""" if not content: return with self._lock: try: message = "Message: %s to %s" % (content, id_service) self.write_message(message) self.stream.flush() # flush after each message return "message_id" except Exception: if not self.fail_silently: raise
[ "def", "send_message", "(", "self", ",", "id_service", ",", "content", ")", ":", "if", "not", "content", ":", "return", "with", "self", ".", "_lock", ":", "try", ":", "message", "=", "\"Message: %s to %s\"", "%", "(", "content", ",", "id_service", ")", "...
Write all messages to the stream in a thread-safe way.
[ "Write", "all", "messages", "to", "the", "stream", "in", "a", "thread", "-", "safe", "way", "." ]
train
https://github.com/jlmadurga/listenclosely/blob/d6df9110c3ed6fd337e0236cccbe4d931bf217b0/listenclosely/services/console.py#L16-L28
michal-stuglik/django-blastplus
blastplus/features/record.py
Hsp.chop_sequence
def chop_sequence(sequence, limit_length): """Input sequence is divided on smaller non-overlapping sequences with set length. """ return [sequence[i:i + limit_length] for i in range(0, len(sequence), limit_length)]
python
def chop_sequence(sequence, limit_length): """Input sequence is divided on smaller non-overlapping sequences with set length. """ return [sequence[i:i + limit_length] for i in range(0, len(sequence), limit_length)]
[ "def", "chop_sequence", "(", "sequence", ",", "limit_length", ")", ":", "return", "[", "sequence", "[", "i", ":", "i", "+", "limit_length", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "sequence", ")", ",", "limit_length", ")", "]" ]
Input sequence is divided on smaller non-overlapping sequences with set length.
[ "Input", "sequence", "is", "divided", "on", "smaller", "non", "-", "overlapping", "sequences", "with", "set", "length", "." ]
train
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/features/record.py#L35-L37
michal-stuglik/django-blastplus
blastplus/features/record.py
Hsp.get_tabular_str
def get_tabular_str(self): """Creates table-like string from fields. """ hsp_string = "" try: hsp_list = [ {"length": self.align_length}, {"e-value": self.expect}, {"score": self.score}, {"identities": self.identities}, {"positives": self.positives}, {"bits": self.bits}, {"query start": self.query_start}, {"query end": self.query_end}, {"subject start": self.sbjct_start}, {"subject end": self.sbjct_end}, ] for h in hsp_list: for k, v in h.items(): hsp_string += "{}\t{}\n".format(k, v) except: pass return hsp_string
python
def get_tabular_str(self): """Creates table-like string from fields. """ hsp_string = "" try: hsp_list = [ {"length": self.align_length}, {"e-value": self.expect}, {"score": self.score}, {"identities": self.identities}, {"positives": self.positives}, {"bits": self.bits}, {"query start": self.query_start}, {"query end": self.query_end}, {"subject start": self.sbjct_start}, {"subject end": self.sbjct_end}, ] for h in hsp_list: for k, v in h.items(): hsp_string += "{}\t{}\n".format(k, v) except: pass return hsp_string
[ "def", "get_tabular_str", "(", "self", ")", ":", "hsp_string", "=", "\"\"", "try", ":", "hsp_list", "=", "[", "{", "\"length\"", ":", "self", ".", "align_length", "}", ",", "{", "\"e-value\"", ":", "self", ".", "expect", "}", ",", "{", "\"score\"", ":"...
Creates table-like string from fields.
[ "Creates", "table", "-", "like", "string", "from", "fields", "." ]
train
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/features/record.py#L51-L75
michal-stuglik/django-blastplus
blastplus/features/record.py
Alignment.best_identities
def best_identities(self): """Returns identities of the best HSP in alignment. """ if len(self.hsp_list) > 0: return round(float(self.hsp_list[0].identities) / float(self.hsp_list[0].align_length) * 100, 1)
python
def best_identities(self): """Returns identities of the best HSP in alignment. """ if len(self.hsp_list) > 0: return round(float(self.hsp_list[0].identities) / float(self.hsp_list[0].align_length) * 100, 1)
[ "def", "best_identities", "(", "self", ")", ":", "if", "len", "(", "self", ".", "hsp_list", ")", ">", "0", ":", "return", "round", "(", "float", "(", "self", ".", "hsp_list", "[", "0", "]", ".", "identities", ")", "/", "float", "(", "self", ".", ...
Returns identities of the best HSP in alignment.
[ "Returns", "identities", "of", "the", "best", "HSP", "in", "alignment", "." ]
train
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/features/record.py#L114-L117
michal-stuglik/django-blastplus
blastplus/features/record.py
Alignment.get_id
def get_id(self): """Returns unique id of an alignment. """ return hash(str(self.title) + str(self.best_score()) + str(self.hit_def))
python
def get_id(self): """Returns unique id of an alignment. """ return hash(str(self.title) + str(self.best_score()) + str(self.hit_def))
[ "def", "get_id", "(", "self", ")", ":", "return", "hash", "(", "str", "(", "self", ".", "title", ")", "+", "str", "(", "self", ".", "best_score", "(", ")", ")", "+", "str", "(", "self", ".", "hit_def", ")", ")" ]
Returns unique id of an alignment.
[ "Returns", "unique", "id", "of", "an", "alignment", "." ]
train
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/features/record.py#L119-L121
mattupstate/flask-stache
flask_stache.py
render_template
def render_template(template, **context): """Renders a given template and context. :param template: The template name :param context: the variables that should be available in the context of the template. """ parts = template.split('/') renderer = _get_renderer(parts[:-1]) return renderer.render(renderer.load_template(parts[-1:][0]), context)
python
def render_template(template, **context): """Renders a given template and context. :param template: The template name :param context: the variables that should be available in the context of the template. """ parts = template.split('/') renderer = _get_renderer(parts[:-1]) return renderer.render(renderer.load_template(parts[-1:][0]), context)
[ "def", "render_template", "(", "template", ",", "*", "*", "context", ")", ":", "parts", "=", "template", ".", "split", "(", "'/'", ")", "renderer", "=", "_get_renderer", "(", "parts", "[", ":", "-", "1", "]", ")", "return", "renderer", ".", "render", ...
Renders a given template and context. :param template: The template name :param context: the variables that should be available in the context of the template.
[ "Renders", "a", "given", "template", "and", "context", "." ]
train
https://github.com/mattupstate/flask-stache/blob/b3bbd326bdbcab04f7eec04dae20e74c1713942b/flask_stache.py#L41-L50
gersolar/netcdf
netcdf/netcdf.py
open
def open(pattern, read_only=False): """ Return a root descriptor to work with one or multiple NetCDF files. Keyword arguments: pattern -- a list of filenames or a string pattern. """ root = NCObject.open(pattern, read_only=read_only) return root, root.is_new
python
def open(pattern, read_only=False): """ Return a root descriptor to work with one or multiple NetCDF files. Keyword arguments: pattern -- a list of filenames or a string pattern. """ root = NCObject.open(pattern, read_only=read_only) return root, root.is_new
[ "def", "open", "(", "pattern", ",", "read_only", "=", "False", ")", ":", "root", "=", "NCObject", ".", "open", "(", "pattern", ",", "read_only", "=", "read_only", ")", "return", "root", ",", "root", ".", "is_new" ]
Return a root descriptor to work with one or multiple NetCDF files. Keyword arguments: pattern -- a list of filenames or a string pattern.
[ "Return", "a", "root", "descriptor", "to", "work", "with", "one", "or", "multiple", "NetCDF", "files", "." ]
train
https://github.com/gersolar/netcdf/blob/cae82225be98586d7516bbfc5aafa8f2a2b266c4/netcdf/netcdf.py#L276-L284
gersolar/netcdf
netcdf/netcdf.py
getvar
def getvar(root, name, vtype='', dimensions=(), digits=0, fill_value=None, source=None): """ Return a variable from a NCFile or NCPackage instance. If the variable doesn't exists create it. Keyword arguments: root -- the root descriptor returned by the 'open' function name -- the name of the variable vtype -- the type of each value, ex ['f4', 'i4', 'i1', 'S1'] (default '') dimensions -- the tuple with dimensions name of the variables (default ()) digits -- the precision required when using a 'f4' vtype (default 0) fill_value -- the initial value used in the creation time (default None) source -- the source variable to be copied (default None) """ return root.getvar(name, vtype, dimensions, digits, fill_value, source)
python
def getvar(root, name, vtype='', dimensions=(), digits=0, fill_value=None, source=None): """ Return a variable from a NCFile or NCPackage instance. If the variable doesn't exists create it. Keyword arguments: root -- the root descriptor returned by the 'open' function name -- the name of the variable vtype -- the type of each value, ex ['f4', 'i4', 'i1', 'S1'] (default '') dimensions -- the tuple with dimensions name of the variables (default ()) digits -- the precision required when using a 'f4' vtype (default 0) fill_value -- the initial value used in the creation time (default None) source -- the source variable to be copied (default None) """ return root.getvar(name, vtype, dimensions, digits, fill_value, source)
[ "def", "getvar", "(", "root", ",", "name", ",", "vtype", "=", "''", ",", "dimensions", "=", "(", ")", ",", "digits", "=", "0", ",", "fill_value", "=", "None", ",", "source", "=", "None", ")", ":", "return", "root", ".", "getvar", "(", "name", ","...
Return a variable from a NCFile or NCPackage instance. If the variable doesn't exists create it. Keyword arguments: root -- the root descriptor returned by the 'open' function name -- the name of the variable vtype -- the type of each value, ex ['f4', 'i4', 'i1', 'S1'] (default '') dimensions -- the tuple with dimensions name of the variables (default ()) digits -- the precision required when using a 'f4' vtype (default 0) fill_value -- the initial value used in the creation time (default None) source -- the source variable to be copied (default None)
[ "Return", "a", "variable", "from", "a", "NCFile", "or", "NCPackage", "instance", ".", "If", "the", "variable", "doesn", "t", "exists", "create", "it", "." ]
train
https://github.com/gersolar/netcdf/blob/cae82225be98586d7516bbfc5aafa8f2a2b266c4/netcdf/netcdf.py#L300-L315
gersolar/netcdf
netcdf/netcdf.py
loader
def loader(pattern, dimensions=None, distributed_dim='time', read_only=False): """ It provide a root descriptor to be used inside a with statement. It automatically close the root when the with statement finish. Keyword arguments: root -- the root descriptor returned by the 'open' function """ if dimensions: root = tailor(pattern, dimensions, distributed_dim, read_only=read_only) else: root, _ = open(pattern, read_only=read_only) yield root root.close()
python
def loader(pattern, dimensions=None, distributed_dim='time', read_only=False): """ It provide a root descriptor to be used inside a with statement. It automatically close the root when the with statement finish. Keyword arguments: root -- the root descriptor returned by the 'open' function """ if dimensions: root = tailor(pattern, dimensions, distributed_dim, read_only=read_only) else: root, _ = open(pattern, read_only=read_only) yield root root.close()
[ "def", "loader", "(", "pattern", ",", "dimensions", "=", "None", ",", "distributed_dim", "=", "'time'", ",", "read_only", "=", "False", ")", ":", "if", "dimensions", ":", "root", "=", "tailor", "(", "pattern", ",", "dimensions", ",", "distributed_dim", ","...
It provide a root descriptor to be used inside a with statement. It automatically close the root when the with statement finish. Keyword arguments: root -- the root descriptor returned by the 'open' function
[ "It", "provide", "a", "root", "descriptor", "to", "be", "used", "inside", "a", "with", "statement", ".", "It", "automatically", "close", "the", "root", "when", "the", "with", "statement", "finish", "." ]
train
https://github.com/gersolar/netcdf/blob/cae82225be98586d7516bbfc5aafa8f2a2b266c4/netcdf/netcdf.py#L342-L355
carver/web3utils.py
web3utils/contracts.py
dict_copy
def dict_copy(func): "copy dict args, to avoid modifying caller's copy" def proxy(*args, **kwargs): new_args = [] new_kwargs = {} for var in kwargs: if isinstance(kwargs[var], dict): new_kwargs[var] = dict(kwargs[var]) else: new_kwargs[var] = kwargs[var] for arg in args: if isinstance(arg, dict): new_args.append(dict(arg)) else: new_args.append(arg) return func(*new_args, **new_kwargs) return proxy
python
def dict_copy(func): "copy dict args, to avoid modifying caller's copy" def proxy(*args, **kwargs): new_args = [] new_kwargs = {} for var in kwargs: if isinstance(kwargs[var], dict): new_kwargs[var] = dict(kwargs[var]) else: new_kwargs[var] = kwargs[var] for arg in args: if isinstance(arg, dict): new_args.append(dict(arg)) else: new_args.append(arg) return func(*new_args, **new_kwargs) return proxy
[ "def", "dict_copy", "(", "func", ")", ":", "def", "proxy", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "new_args", "=", "[", "]", "new_kwargs", "=", "{", "}", "for", "var", "in", "kwargs", ":", "if", "isinstance", "(", "kwargs", "[", "va...
copy dict args, to avoid modifying caller's copy
[ "copy", "dict", "args", "to", "avoid", "modifying", "caller", "s", "copy" ]
train
https://github.com/carver/web3utils.py/blob/81aa6b55f64dc857c604d5d071a37e0de6cd63ab/web3utils/contracts.py#L74-L90
pudo/banal
banal/lists.py
is_listish
def is_listish(obj): """Check if something quacks like a list.""" if isinstance(obj, (list, tuple, set)): return True return is_sequence(obj)
python
def is_listish(obj): """Check if something quacks like a list.""" if isinstance(obj, (list, tuple, set)): return True return is_sequence(obj)
[ "def", "is_listish", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "(", "list", ",", "tuple", ",", "set", ")", ")", ":", "return", "True", "return", "is_sequence", "(", "obj", ")" ]
Check if something quacks like a list.
[ "Check", "if", "something", "quacks", "like", "a", "list", "." ]
train
https://github.com/pudo/banal/blob/528c339be5138458e387a058581cf7d261285447/banal/lists.py#L12-L16
pudo/banal
banal/lists.py
unique_list
def unique_list(lst): """Make a list unique, retaining order of initial appearance.""" uniq = [] for item in lst: if item not in uniq: uniq.append(item) return uniq
python
def unique_list(lst): """Make a list unique, retaining order of initial appearance.""" uniq = [] for item in lst: if item not in uniq: uniq.append(item) return uniq
[ "def", "unique_list", "(", "lst", ")", ":", "uniq", "=", "[", "]", "for", "item", "in", "lst", ":", "if", "item", "not", "in", "uniq", ":", "uniq", ".", "append", "(", "item", ")", "return", "uniq" ]
Make a list unique, retaining order of initial appearance.
[ "Make", "a", "list", "unique", "retaining", "order", "of", "initial", "appearance", "." ]
train
https://github.com/pudo/banal/blob/528c339be5138458e387a058581cf7d261285447/banal/lists.py#L19-L25
APSL/transmanager
transmanager/management/commands/export_text_for_translations.py
Command._get_main_language
def _get_main_language(): """ returns the main language :return: """ try: main_language = TransLanguage.objects.filter(main_language=True).get() return main_language.code except TransLanguage.DoesNotExist: return 'es'
python
def _get_main_language(): """ returns the main language :return: """ try: main_language = TransLanguage.objects.filter(main_language=True).get() return main_language.code except TransLanguage.DoesNotExist: return 'es'
[ "def", "_get_main_language", "(", ")", ":", "try", ":", "main_language", "=", "TransLanguage", ".", "objects", ".", "filter", "(", "main_language", "=", "True", ")", ".", "get", "(", ")", "return", "main_language", ".", "code", "except", "TransLanguage", "."...
returns the main language :return:
[ "returns", "the", "main", "language", ":", "return", ":" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/management/commands/export_text_for_translations.py#L51-L60
APSL/transmanager
transmanager/management/commands/export_text_for_translations.py
Command.fields_need_translation
def fields_need_translation(self, elem, destination_lang): """ Detect if the tuple needs translation and which fields has to be translated :param elem :param destination_lang: :return: """ fields = self._get_translated_field_names(elem) elem_langs = elem.get_available_languages() # if we don't have a translation for the destination lang we have to include the tuple if destination_lang not in elem_langs: return fields # we have the translation, we decide which fields we need to translate. we have to get the translation first translation = get_translation(elem, destination_lang) result = [] for field in fields: value = getattr(translation, field, '') if not value or value.strip() == '': result.append(field) return result
python
def fields_need_translation(self, elem, destination_lang): """ Detect if the tuple needs translation and which fields has to be translated :param elem :param destination_lang: :return: """ fields = self._get_translated_field_names(elem) elem_langs = elem.get_available_languages() # if we don't have a translation for the destination lang we have to include the tuple if destination_lang not in elem_langs: return fields # we have the translation, we decide which fields we need to translate. we have to get the translation first translation = get_translation(elem, destination_lang) result = [] for field in fields: value = getattr(translation, field, '') if not value or value.strip() == '': result.append(field) return result
[ "def", "fields_need_translation", "(", "self", ",", "elem", ",", "destination_lang", ")", ":", "fields", "=", "self", ".", "_get_translated_field_names", "(", "elem", ")", "elem_langs", "=", "elem", ".", "get_available_languages", "(", ")", "# if we don't have a tra...
Detect if the tuple needs translation and which fields has to be translated :param elem :param destination_lang: :return:
[ "Detect", "if", "the", "tuple", "needs", "translation", "and", "which", "fields", "has", "to", "be", "translated", ":", "param", "elem", ":", "param", "destination_lang", ":", ":", "return", ":" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/management/commands/export_text_for_translations.py#L71-L93
raamana/pyradigm
pyradigm/pyradigm.py
check_compatibility
def check_compatibility(datasets, reqd_num_features=None): """ Checks whether the given MLdataset instances are compatible i.e. with same set of subjects, each beloning to the same class in all instances. Checks the first dataset in the list against the rest, and returns a boolean array. Parameters ---------- datasets : Iterable A list of n datasets reqd_num_features : int The required number of features in each dataset. Helpful to ensure test sets are compatible with training set, as well as within themselves. Returns ------- all_are_compatible : bool Boolean flag indicating whether all datasets are compatible or not compatibility : list List indicating whether first dataset is compatible with the rest individually. This could be useful to select a subset of mutually compatible datasets. Length : n-1 dim_mismatch : bool Boolean flag indicating mismatch in dimensionality from that specified size_descriptor : tuple A tuple with values for (num_samples, reqd_num_features) - num_samples must be common for all datasets that are evaluated for compatibility - reqd_num_features is None (when no check on dimensionality is perfomed), or list of corresponding dimensionalities for each input dataset """ from collections import Iterable if not isinstance(datasets, Iterable): raise TypeError('Input must be an iterable ' 'i.e. (list/tuple) of MLdataset/similar instances') datasets = list(datasets) # to make it indexable if coming from a set num_datasets = len(datasets) check_dimensionality = False dim_mismatch = False if reqd_num_features is not None: if isinstance(reqd_num_features, Iterable): if len(reqd_num_features) != num_datasets: raise ValueError('Specify dimensionality for exactly {} datasets.' ' Given for a different number {}' ''.format(num_datasets, len(reqd_num_features))) reqd_num_features = list(map(int, reqd_num_features)) else: # same dimensionality for all reqd_num_features = [int(reqd_num_features)] * num_datasets check_dimensionality = True else: # to enable iteration reqd_num_features = [None,] * num_datasets pivot = datasets[0] if not isinstance(pivot, MLDataset): pivot = MLDataset(pivot) if check_dimensionality and pivot.num_features != reqd_num_features[0]: warnings.warn('Dimensionality mismatch! Expected {} whereas current {}.' ''.format(reqd_num_features[0], pivot.num_features)) dim_mismatch = True compatible = list() for ds, reqd_dim in zip(datasets[1:], reqd_num_features[1:]): if not isinstance(ds, MLDataset): ds = MLDataset(ds) is_compatible = True # compound bool will short-circuit, not optim required if pivot.num_samples != ds.num_samples \ or pivot.keys != ds.keys \ or pivot.classes != ds.classes: is_compatible = False if check_dimensionality and reqd_dim != ds.num_features: warnings.warn('Dimensionality mismatch! Expected {} whereas current {}.' ''.format(reqd_dim, ds.num_features)) dim_mismatch = True compatible.append(is_compatible) return all(compatible), compatible, dim_mismatch, \ (pivot.num_samples, reqd_num_features)
python
def check_compatibility(datasets, reqd_num_features=None): """ Checks whether the given MLdataset instances are compatible i.e. with same set of subjects, each beloning to the same class in all instances. Checks the first dataset in the list against the rest, and returns a boolean array. Parameters ---------- datasets : Iterable A list of n datasets reqd_num_features : int The required number of features in each dataset. Helpful to ensure test sets are compatible with training set, as well as within themselves. Returns ------- all_are_compatible : bool Boolean flag indicating whether all datasets are compatible or not compatibility : list List indicating whether first dataset is compatible with the rest individually. This could be useful to select a subset of mutually compatible datasets. Length : n-1 dim_mismatch : bool Boolean flag indicating mismatch in dimensionality from that specified size_descriptor : tuple A tuple with values for (num_samples, reqd_num_features) - num_samples must be common for all datasets that are evaluated for compatibility - reqd_num_features is None (when no check on dimensionality is perfomed), or list of corresponding dimensionalities for each input dataset """ from collections import Iterable if not isinstance(datasets, Iterable): raise TypeError('Input must be an iterable ' 'i.e. (list/tuple) of MLdataset/similar instances') datasets = list(datasets) # to make it indexable if coming from a set num_datasets = len(datasets) check_dimensionality = False dim_mismatch = False if reqd_num_features is not None: if isinstance(reqd_num_features, Iterable): if len(reqd_num_features) != num_datasets: raise ValueError('Specify dimensionality for exactly {} datasets.' ' Given for a different number {}' ''.format(num_datasets, len(reqd_num_features))) reqd_num_features = list(map(int, reqd_num_features)) else: # same dimensionality for all reqd_num_features = [int(reqd_num_features)] * num_datasets check_dimensionality = True else: # to enable iteration reqd_num_features = [None,] * num_datasets pivot = datasets[0] if not isinstance(pivot, MLDataset): pivot = MLDataset(pivot) if check_dimensionality and pivot.num_features != reqd_num_features[0]: warnings.warn('Dimensionality mismatch! Expected {} whereas current {}.' ''.format(reqd_num_features[0], pivot.num_features)) dim_mismatch = True compatible = list() for ds, reqd_dim in zip(datasets[1:], reqd_num_features[1:]): if not isinstance(ds, MLDataset): ds = MLDataset(ds) is_compatible = True # compound bool will short-circuit, not optim required if pivot.num_samples != ds.num_samples \ or pivot.keys != ds.keys \ or pivot.classes != ds.classes: is_compatible = False if check_dimensionality and reqd_dim != ds.num_features: warnings.warn('Dimensionality mismatch! Expected {} whereas current {}.' ''.format(reqd_dim, ds.num_features)) dim_mismatch = True compatible.append(is_compatible) return all(compatible), compatible, dim_mismatch, \ (pivot.num_samples, reqd_num_features)
[ "def", "check_compatibility", "(", "datasets", ",", "reqd_num_features", "=", "None", ")", ":", "from", "collections", "import", "Iterable", "if", "not", "isinstance", "(", "datasets", ",", "Iterable", ")", ":", "raise", "TypeError", "(", "'Input must be an iterab...
Checks whether the given MLdataset instances are compatible i.e. with same set of subjects, each beloning to the same class in all instances. Checks the first dataset in the list against the rest, and returns a boolean array. Parameters ---------- datasets : Iterable A list of n datasets reqd_num_features : int The required number of features in each dataset. Helpful to ensure test sets are compatible with training set, as well as within themselves. Returns ------- all_are_compatible : bool Boolean flag indicating whether all datasets are compatible or not compatibility : list List indicating whether first dataset is compatible with the rest individually. This could be useful to select a subset of mutually compatible datasets. Length : n-1 dim_mismatch : bool Boolean flag indicating mismatch in dimensionality from that specified size_descriptor : tuple A tuple with values for (num_samples, reqd_num_features) - num_samples must be common for all datasets that are evaluated for compatibility - reqd_num_features is None (when no check on dimensionality is perfomed), or list of corresponding dimensionalities for each input dataset
[ "Checks", "whether", "the", "given", "MLdataset", "instances", "are", "compatible" ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1480-L1573