repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
smarie/python-parsyfiles | parsyfiles/parsing_registries.py | AbstractParserCache.get_capabilities_by_type | def get_capabilities_by_type(self, strict_type_matching: bool = False) -> Dict[Type, Dict[str, Dict[str, Parser]]]:
"""
For all types that are supported,
lists all extensions that can be parsed into such a type.
For each extension, provides the list of parsers supported. The order is "most pertinent first"
This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine.
That will ensure consistency of the results.
:param strict_type_matching:
:return:
"""
check_var(strict_type_matching, var_types=bool, var_name='strict_matching')
res = dict()
# List all types that can be parsed
for typ in self.get_all_supported_types():
res[typ] = self.get_capabilities_for_type(typ, strict_type_matching)
return res | python | def get_capabilities_by_type(self, strict_type_matching: bool = False) -> Dict[Type, Dict[str, Dict[str, Parser]]]:
"""
For all types that are supported,
lists all extensions that can be parsed into such a type.
For each extension, provides the list of parsers supported. The order is "most pertinent first"
This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine.
That will ensure consistency of the results.
:param strict_type_matching:
:return:
"""
check_var(strict_type_matching, var_types=bool, var_name='strict_matching')
res = dict()
# List all types that can be parsed
for typ in self.get_all_supported_types():
res[typ] = self.get_capabilities_for_type(typ, strict_type_matching)
return res | [
"def",
"get_capabilities_by_type",
"(",
"self",
",",
"strict_type_matching",
":",
"bool",
"=",
"False",
")",
"->",
"Dict",
"[",
"Type",
",",
"Dict",
"[",
"str",
",",
"Dict",
"[",
"str",
",",
"Parser",
"]",
"]",
"]",
":",
"check_var",
"(",
"strict_type_ma... | For all types that are supported,
lists all extensions that can be parsed into such a type.
For each extension, provides the list of parsers supported. The order is "most pertinent first"
This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine.
That will ensure consistency of the results.
:param strict_type_matching:
:return: | [
"For",
"all",
"types",
"that",
"are",
"supported",
"lists",
"all",
"extensions",
"that",
"can",
"be",
"parsed",
"into",
"such",
"a",
"type",
".",
"For",
"each",
"extension",
"provides",
"the",
"list",
"of",
"parsers",
"supported",
".",
"The",
"order",
"is"... | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L279-L300 | train | 54,600 |
smarie/python-parsyfiles | parsyfiles/parsing_registries.py | AbstractParserCache.get_capabilities_by_ext | def get_capabilities_by_ext(self, strict_type_matching: bool = False) -> Dict[str, Dict[Type, Dict[str, Parser]]]:
"""
For all extensions that are supported,
lists all types that can be parsed from this extension.
For each type, provide the list of parsers supported. The order is "most pertinent first"
This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine.
That will ensure consistency of the results.
:param strict_type_matching:
:return:
"""
check_var(strict_type_matching, var_types=bool, var_name='strict_matching')
res = dict()
# For all extensions that are supported,
for ext in self.get_all_supported_exts_for_type(type_to_match=JOKER, strict=strict_type_matching):
res[ext] = self.get_capabilities_for_ext(ext, strict_type_matching)
return res | python | def get_capabilities_by_ext(self, strict_type_matching: bool = False) -> Dict[str, Dict[Type, Dict[str, Parser]]]:
"""
For all extensions that are supported,
lists all types that can be parsed from this extension.
For each type, provide the list of parsers supported. The order is "most pertinent first"
This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine.
That will ensure consistency of the results.
:param strict_type_matching:
:return:
"""
check_var(strict_type_matching, var_types=bool, var_name='strict_matching')
res = dict()
# For all extensions that are supported,
for ext in self.get_all_supported_exts_for_type(type_to_match=JOKER, strict=strict_type_matching):
res[ext] = self.get_capabilities_for_ext(ext, strict_type_matching)
return res | [
"def",
"get_capabilities_by_ext",
"(",
"self",
",",
"strict_type_matching",
":",
"bool",
"=",
"False",
")",
"->",
"Dict",
"[",
"str",
",",
"Dict",
"[",
"Type",
",",
"Dict",
"[",
"str",
",",
"Parser",
"]",
"]",
"]",
":",
"check_var",
"(",
"strict_type_mat... | For all extensions that are supported,
lists all types that can be parsed from this extension.
For each type, provide the list of parsers supported. The order is "most pertinent first"
This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine.
That will ensure consistency of the results.
:param strict_type_matching:
:return: | [
"For",
"all",
"extensions",
"that",
"are",
"supported",
"lists",
"all",
"types",
"that",
"can",
"be",
"parsed",
"from",
"this",
"extension",
".",
"For",
"each",
"type",
"provide",
"the",
"list",
"of",
"parsers",
"supported",
".",
"The",
"order",
"is",
"mos... | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L337-L356 | train | 54,601 |
smarie/python-parsyfiles | parsyfiles/parsing_registries.py | AbstractParserCache.get_capabilities_for_ext | def get_capabilities_for_ext(self, ext, strict_type_matching: bool = False) -> Dict[Type, Dict[str, Parser]]:
"""
Utility method to return, for a given file extension, all known ways to parse a file with this extension,
organized by target object type.
:param ext:
:param strict_type_matching:
:return:
"""
r = dict()
# List all types that can be parsed from this extension.
for typ in self.get_all_supported_types_for_ext(ext):
# Use the query to fill
matching = self.find_all_matching_parsers(strict_type_matching, desired_type=typ, required_ext=ext)[0]
# matching_list = matching[0] + matching[1] + matching[2]
# insert_element_to_dict_of_dicts_of_list(res, ext, typ, list(reversed(matching_list)))
r[typ] = dict()
exact = list(reversed(matching[2]))
if len(exact) > 0:
r[typ]['1_exact_match'] = exact
approx = list(reversed(matching[1]))
if len(approx) > 0:
r[typ]['2_approx_match'] = approx
generic = list(reversed(matching[0]))
if len(generic) > 0:
r[typ]['3_generic'] = generic
# insert_element_to_dict_of_dicts(res, ext, typ, matching_dict)
return r | python | def get_capabilities_for_ext(self, ext, strict_type_matching: bool = False) -> Dict[Type, Dict[str, Parser]]:
"""
Utility method to return, for a given file extension, all known ways to parse a file with this extension,
organized by target object type.
:param ext:
:param strict_type_matching:
:return:
"""
r = dict()
# List all types that can be parsed from this extension.
for typ in self.get_all_supported_types_for_ext(ext):
# Use the query to fill
matching = self.find_all_matching_parsers(strict_type_matching, desired_type=typ, required_ext=ext)[0]
# matching_list = matching[0] + matching[1] + matching[2]
# insert_element_to_dict_of_dicts_of_list(res, ext, typ, list(reversed(matching_list)))
r[typ] = dict()
exact = list(reversed(matching[2]))
if len(exact) > 0:
r[typ]['1_exact_match'] = exact
approx = list(reversed(matching[1]))
if len(approx) > 0:
r[typ]['2_approx_match'] = approx
generic = list(reversed(matching[0]))
if len(generic) > 0:
r[typ]['3_generic'] = generic
# insert_element_to_dict_of_dicts(res, ext, typ, matching_dict)
return r | [
"def",
"get_capabilities_for_ext",
"(",
"self",
",",
"ext",
",",
"strict_type_matching",
":",
"bool",
"=",
"False",
")",
"->",
"Dict",
"[",
"Type",
",",
"Dict",
"[",
"str",
",",
"Parser",
"]",
"]",
":",
"r",
"=",
"dict",
"(",
")",
"# List all types that ... | Utility method to return, for a given file extension, all known ways to parse a file with this extension,
organized by target object type.
:param ext:
:param strict_type_matching:
:return: | [
"Utility",
"method",
"to",
"return",
"for",
"a",
"given",
"file",
"extension",
"all",
"known",
"ways",
"to",
"parse",
"a",
"file",
"with",
"this",
"extension",
"organized",
"by",
"target",
"object",
"type",
"."
] | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L361-L391 | train | 54,602 |
smarie/python-parsyfiles | parsyfiles/parsing_registries.py | ParserCache.get_all_supported_types_for_ext | def get_all_supported_types_for_ext(self, ext_to_match: str, strict_type_matching: bool = False) -> Set[Type]:
"""
Utility method to return the set of all supported types that may be parsed from files with the given extension.
ext=JOKER is a joker that means all extensions
:param ext_to_match:
:param strict_type_matching:
:return:
"""
matching = self.find_all_matching_parsers(required_ext=ext_to_match, strict=strict_type_matching)[0]
return {typ for types in [p.supported_types for p in (matching[0] + matching[1] + matching[2])]
for typ in types} | python | def get_all_supported_types_for_ext(self, ext_to_match: str, strict_type_matching: bool = False) -> Set[Type]:
"""
Utility method to return the set of all supported types that may be parsed from files with the given extension.
ext=JOKER is a joker that means all extensions
:param ext_to_match:
:param strict_type_matching:
:return:
"""
matching = self.find_all_matching_parsers(required_ext=ext_to_match, strict=strict_type_matching)[0]
return {typ for types in [p.supported_types for p in (matching[0] + matching[1] + matching[2])]
for typ in types} | [
"def",
"get_all_supported_types_for_ext",
"(",
"self",
",",
"ext_to_match",
":",
"str",
",",
"strict_type_matching",
":",
"bool",
"=",
"False",
")",
"->",
"Set",
"[",
"Type",
"]",
":",
"matching",
"=",
"self",
".",
"find_all_matching_parsers",
"(",
"required_ext... | Utility method to return the set of all supported types that may be parsed from files with the given extension.
ext=JOKER is a joker that means all extensions
:param ext_to_match:
:param strict_type_matching:
:return: | [
"Utility",
"method",
"to",
"return",
"the",
"set",
"of",
"all",
"supported",
"types",
"that",
"may",
"be",
"parsed",
"from",
"files",
"with",
"the",
"given",
"extension",
".",
"ext",
"=",
"JOKER",
"is",
"a",
"joker",
"that",
"means",
"all",
"extensions"
] | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L505-L516 | train | 54,603 |
smarie/python-parsyfiles | parsyfiles/parsing_registries.py | ParserCache.get_all_supported_exts_for_type | def get_all_supported_exts_for_type(self, type_to_match: Type[Any], strict: bool) -> Set[str]:
"""
Utility method to return the set of all supported file extensions that may be converted to objects of the given
type. type=JOKER is a joker that means all types
:param type_to_match:
:param strict:
:return:
"""
matching = self.find_all_matching_parsers(desired_type=type_to_match, strict=strict)[0]
return {ext for exts in [p.supported_exts for p in (matching[0] + matching[1] + matching[2])]
for ext in exts} | python | def get_all_supported_exts_for_type(self, type_to_match: Type[Any], strict: bool) -> Set[str]:
"""
Utility method to return the set of all supported file extensions that may be converted to objects of the given
type. type=JOKER is a joker that means all types
:param type_to_match:
:param strict:
:return:
"""
matching = self.find_all_matching_parsers(desired_type=type_to_match, strict=strict)[0]
return {ext for exts in [p.supported_exts for p in (matching[0] + matching[1] + matching[2])]
for ext in exts} | [
"def",
"get_all_supported_exts_for_type",
"(",
"self",
",",
"type_to_match",
":",
"Type",
"[",
"Any",
"]",
",",
"strict",
":",
"bool",
")",
"->",
"Set",
"[",
"str",
"]",
":",
"matching",
"=",
"self",
".",
"find_all_matching_parsers",
"(",
"desired_type",
"="... | Utility method to return the set of all supported file extensions that may be converted to objects of the given
type. type=JOKER is a joker that means all types
:param type_to_match:
:param strict:
:return: | [
"Utility",
"method",
"to",
"return",
"the",
"set",
"of",
"all",
"supported",
"file",
"extensions",
"that",
"may",
"be",
"converted",
"to",
"objects",
"of",
"the",
"given",
"type",
".",
"type",
"=",
"JOKER",
"is",
"a",
"joker",
"that",
"means",
"all",
"ty... | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L518-L529 | train | 54,604 |
smarie/python-parsyfiles | parsyfiles/parsing_registries.py | ParserCache.find_all_matching_parsers | def find_all_matching_parsers(self, strict: bool, desired_type: Type[Any] = JOKER, required_ext: str = JOKER) \
-> Tuple[Tuple[List[Parser], List[Parser], List[Parser]],
List[Parser], List[Parser], List[Parser]]:
"""
Implementation of the parent method by lookin into the registry to find the most appropriate parsers to use in
order
:param strict:
:param desired_type: the desired type, or 'JOKER' for a wildcard
:param required_ext:
:return: match=(matching_parsers_generic, matching_parsers_approx, matching_parsers_exact),
no_type_match_but_ext_match, no_ext_match_but_type_match, no_match
"""
# if desired_type is JOKER and required_ext is JOKER:
# # Easy : return everything (GENERIC first, SPECIFIC then) in order (make a copy first :) )
# matching_parsers_generic = self._generic_parsers.copy()
# matching_parsers_approx = []
# matching_parsers_exact = self._specific_parsers.copy()
# no_type_match_but_ext_match = []
# no_ext_match_but_type_match = []
# no_match = []
# else:
#
# Although the above could be thought as an easy way to accelerate the process, it does not any more since the
# JOKER special cases are handled in parser.is_able_to_parse and converter.is_able_to_convert functions.
#
# It was also dangerous since it prevented us to get consistency across views - hence parser/converter
# implementors could get the feeling that their parser was correctly registered where it wasn't
check_var(strict, var_types=bool, var_name='strict')
# first transform any 'Any' type requirement into the official class for that
desired_type = get_validated_type(desired_type, 'desired_type', enforce_not_joker=False)
matching_parsers_generic = []
matching_parsers_approx = []
matching_parsers_exact = []
no_type_match_but_ext_match = []
no_ext_match_but_type_match = []
no_match = []
# handle generic parsers first - except if desired type is Any
for p in self._generic_parsers:
match = p.is_able_to_parse(desired_type=desired_type, desired_ext=required_ext, strict=strict)
if match:
# match
if is_any_type(desired_type):
# special case : what is required is Any, so put in exact match
matching_parsers_exact.append(p)
else:
matching_parsers_generic.append(p)
else:
# check if by releasing the constraint on ext it makes a match
if p.is_able_to_parse(desired_type=desired_type, desired_ext=JOKER, strict=strict):
no_ext_match_but_type_match.append(p)
else:
# there will be no way to use this: it is a generic parser that is not able to parse this type...
# no_type_match_but_ext_match.append(p)
pass
# then the specific
for p in self._specific_parsers:
match, exact_match = p.is_able_to_parse_detailed(desired_type=desired_type,
desired_ext=required_ext,
strict=strict)
if match:
if is_any_type(desired_type):
# special case: dont register as a type match
no_type_match_but_ext_match.append(p)
else:
if exact_match is None or exact_match:
matching_parsers_exact.append(p)
else:
matching_parsers_approx.append(p)
else:
# try to set the type to a supported type to see if that makes a match
if p.is_able_to_parse(desired_type=JOKER, desired_ext=required_ext, strict=strict):
no_type_match_but_ext_match.append(p)
# try to set the ext to a supported ext to see if that makes a match
elif p.is_able_to_parse(desired_type=desired_type, desired_ext=JOKER, strict=strict):
no_ext_match_but_type_match.append(p)
# no match at all
else:
no_match.append(p)
return (matching_parsers_generic, matching_parsers_approx, matching_parsers_exact), \
no_type_match_but_ext_match, no_ext_match_but_type_match, no_match | python | def find_all_matching_parsers(self, strict: bool, desired_type: Type[Any] = JOKER, required_ext: str = JOKER) \
-> Tuple[Tuple[List[Parser], List[Parser], List[Parser]],
List[Parser], List[Parser], List[Parser]]:
"""
Implementation of the parent method by lookin into the registry to find the most appropriate parsers to use in
order
:param strict:
:param desired_type: the desired type, or 'JOKER' for a wildcard
:param required_ext:
:return: match=(matching_parsers_generic, matching_parsers_approx, matching_parsers_exact),
no_type_match_but_ext_match, no_ext_match_but_type_match, no_match
"""
# if desired_type is JOKER and required_ext is JOKER:
# # Easy : return everything (GENERIC first, SPECIFIC then) in order (make a copy first :) )
# matching_parsers_generic = self._generic_parsers.copy()
# matching_parsers_approx = []
# matching_parsers_exact = self._specific_parsers.copy()
# no_type_match_but_ext_match = []
# no_ext_match_but_type_match = []
# no_match = []
# else:
#
# Although the above could be thought as an easy way to accelerate the process, it does not any more since the
# JOKER special cases are handled in parser.is_able_to_parse and converter.is_able_to_convert functions.
#
# It was also dangerous since it prevented us to get consistency across views - hence parser/converter
# implementors could get the feeling that their parser was correctly registered where it wasn't
check_var(strict, var_types=bool, var_name='strict')
# first transform any 'Any' type requirement into the official class for that
desired_type = get_validated_type(desired_type, 'desired_type', enforce_not_joker=False)
matching_parsers_generic = []
matching_parsers_approx = []
matching_parsers_exact = []
no_type_match_but_ext_match = []
no_ext_match_but_type_match = []
no_match = []
# handle generic parsers first - except if desired type is Any
for p in self._generic_parsers:
match = p.is_able_to_parse(desired_type=desired_type, desired_ext=required_ext, strict=strict)
if match:
# match
if is_any_type(desired_type):
# special case : what is required is Any, so put in exact match
matching_parsers_exact.append(p)
else:
matching_parsers_generic.append(p)
else:
# check if by releasing the constraint on ext it makes a match
if p.is_able_to_parse(desired_type=desired_type, desired_ext=JOKER, strict=strict):
no_ext_match_but_type_match.append(p)
else:
# there will be no way to use this: it is a generic parser that is not able to parse this type...
# no_type_match_but_ext_match.append(p)
pass
# then the specific
for p in self._specific_parsers:
match, exact_match = p.is_able_to_parse_detailed(desired_type=desired_type,
desired_ext=required_ext,
strict=strict)
if match:
if is_any_type(desired_type):
# special case: dont register as a type match
no_type_match_but_ext_match.append(p)
else:
if exact_match is None or exact_match:
matching_parsers_exact.append(p)
else:
matching_parsers_approx.append(p)
else:
# try to set the type to a supported type to see if that makes a match
if p.is_able_to_parse(desired_type=JOKER, desired_ext=required_ext, strict=strict):
no_type_match_but_ext_match.append(p)
# try to set the ext to a supported ext to see if that makes a match
elif p.is_able_to_parse(desired_type=desired_type, desired_ext=JOKER, strict=strict):
no_ext_match_but_type_match.append(p)
# no match at all
else:
no_match.append(p)
return (matching_parsers_generic, matching_parsers_approx, matching_parsers_exact), \
no_type_match_but_ext_match, no_ext_match_but_type_match, no_match | [
"def",
"find_all_matching_parsers",
"(",
"self",
",",
"strict",
":",
"bool",
",",
"desired_type",
":",
"Type",
"[",
"Any",
"]",
"=",
"JOKER",
",",
"required_ext",
":",
"str",
"=",
"JOKER",
")",
"->",
"Tuple",
"[",
"Tuple",
"[",
"List",
"[",
"Parser",
"... | Implementation of the parent method by lookin into the registry to find the most appropriate parsers to use in
order
:param strict:
:param desired_type: the desired type, or 'JOKER' for a wildcard
:param required_ext:
:return: match=(matching_parsers_generic, matching_parsers_approx, matching_parsers_exact),
no_type_match_but_ext_match, no_ext_match_but_type_match, no_match | [
"Implementation",
"of",
"the",
"parent",
"method",
"by",
"lookin",
"into",
"the",
"registry",
"to",
"find",
"the",
"most",
"appropriate",
"parsers",
"to",
"use",
"in",
"order"
] | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L531-L621 | train | 54,605 |
smarie/python-parsyfiles | parsyfiles/parsing_registries.py | ParserRegistry._build_parser_for_fileobject_and_desiredtype | def _build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_typ: Type[T],
logger: Logger = None) -> Dict[Type, Parser]:
"""
Builds a parser for each subtype of object_typ
:param obj_on_filesystem:
:param object_typ:
:param logger:
:return:
"""
parsers = OrderedDict()
errors = OrderedDict()
try:
p = self.__build_parser_for_fileobject_and_desiredtype(obj_on_filesystem,
object_typ=object_typ,
logger=logger)
parsers[object_typ] = p
except NoParserFoundForObjectExt as e:
logger.warning("{} - {}".format(type(e).__name__, e))
errors[e] = e
except NoParserFoundForObjectType as f:
logger.warning("{} - {}".format(type(f).__name__, f))
errors[f] = f
# do not explore subclasses for collections
if is_collection(object_typ, strict=True):
if len(errors) > 0:
raise next(iter(errors.values()))
else:
return parsers
# Finally create one such parser for each subclass
subclasses = get_all_subclasses(object_typ)
# Then for each subclass also try (with a configurable limit in nb of subclasses)
for subclass in subclasses[0:GLOBAL_CONFIG.dict_to_object_subclass_limit]:
try:
parsers[subclass] = self.__build_parser_for_fileobject_and_desiredtype(obj_on_filesystem,
object_typ=subclass,
logger=logger)
except NoParserFoundForObjectExt as e:
logger.warning("{} - {}".format(type(e).__name__, e))
errors[e] = e
except NoParserFoundForObjectType as f:
logger.warning("{} - {}".format(type(f).__name__, f))
errors[f] = f
if len(subclasses) > GLOBAL_CONFIG.dict_to_object_subclass_limit:
warn('Type {} has more than {} subclasses, only {} were tried to convert it, with no success. You '
'can raise this limit by setting the appropriate option with `parsyfiles_global_config()`'
''.format(object_typ, len(subclasses), GLOBAL_CONFIG.dict_to_object_subclass_limit))
return parsers | python | def _build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_typ: Type[T],
logger: Logger = None) -> Dict[Type, Parser]:
"""
Builds a parser for each subtype of object_typ
:param obj_on_filesystem:
:param object_typ:
:param logger:
:return:
"""
parsers = OrderedDict()
errors = OrderedDict()
try:
p = self.__build_parser_for_fileobject_and_desiredtype(obj_on_filesystem,
object_typ=object_typ,
logger=logger)
parsers[object_typ] = p
except NoParserFoundForObjectExt as e:
logger.warning("{} - {}".format(type(e).__name__, e))
errors[e] = e
except NoParserFoundForObjectType as f:
logger.warning("{} - {}".format(type(f).__name__, f))
errors[f] = f
# do not explore subclasses for collections
if is_collection(object_typ, strict=True):
if len(errors) > 0:
raise next(iter(errors.values()))
else:
return parsers
# Finally create one such parser for each subclass
subclasses = get_all_subclasses(object_typ)
# Then for each subclass also try (with a configurable limit in nb of subclasses)
for subclass in subclasses[0:GLOBAL_CONFIG.dict_to_object_subclass_limit]:
try:
parsers[subclass] = self.__build_parser_for_fileobject_and_desiredtype(obj_on_filesystem,
object_typ=subclass,
logger=logger)
except NoParserFoundForObjectExt as e:
logger.warning("{} - {}".format(type(e).__name__, e))
errors[e] = e
except NoParserFoundForObjectType as f:
logger.warning("{} - {}".format(type(f).__name__, f))
errors[f] = f
if len(subclasses) > GLOBAL_CONFIG.dict_to_object_subclass_limit:
warn('Type {} has more than {} subclasses, only {} were tried to convert it, with no success. You '
'can raise this limit by setting the appropriate option with `parsyfiles_global_config()`'
''.format(object_typ, len(subclasses), GLOBAL_CONFIG.dict_to_object_subclass_limit))
return parsers | [
"def",
"_build_parser_for_fileobject_and_desiredtype",
"(",
"self",
",",
"obj_on_filesystem",
":",
"PersistedObject",
",",
"object_typ",
":",
"Type",
"[",
"T",
"]",
",",
"logger",
":",
"Logger",
"=",
"None",
")",
"->",
"Dict",
"[",
"Type",
",",
"Parser",
"]",
... | Builds a parser for each subtype of object_typ
:param obj_on_filesystem:
:param object_typ:
:param logger:
:return: | [
"Builds",
"a",
"parser",
"for",
"each",
"subtype",
"of",
"object_typ"
] | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L717-L770 | train | 54,606 |
smarie/python-parsyfiles | parsyfiles/parsing_registries.py | ConversionFinder.get_all_conversion_chains_to_type | def get_all_conversion_chains_to_type(self, to_type: Type[Any])\
-> Tuple[List[Converter], List[Converter], List[Converter]]:
"""
Utility method to find all converters to a given type
:param to_type:
:return:
"""
return self.get_all_conversion_chains(to_type=to_type) | python | def get_all_conversion_chains_to_type(self, to_type: Type[Any])\
-> Tuple[List[Converter], List[Converter], List[Converter]]:
"""
Utility method to find all converters to a given type
:param to_type:
:return:
"""
return self.get_all_conversion_chains(to_type=to_type) | [
"def",
"get_all_conversion_chains_to_type",
"(",
"self",
",",
"to_type",
":",
"Type",
"[",
"Any",
"]",
")",
"->",
"Tuple",
"[",
"List",
"[",
"Converter",
"]",
",",
"List",
"[",
"Converter",
"]",
",",
"List",
"[",
"Converter",
"]",
"]",
":",
"return",
"... | Utility method to find all converters to a given type
:param to_type:
:return: | [
"Utility",
"method",
"to",
"find",
"all",
"converters",
"to",
"a",
"given",
"type"
] | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L922-L930 | train | 54,607 |
smarie/python-parsyfiles | parsyfiles/parsing_registries.py | ConversionFinder.get_all_conversion_chains_from_type | def get_all_conversion_chains_from_type(self, from_type: Type[Any]) \
-> Tuple[List[Converter], List[Converter], List[Converter]]:
"""
Utility method to find all converters from a given type.
:param from_type:
:return:
"""
return self.get_all_conversion_chains(from_type=from_type) | python | def get_all_conversion_chains_from_type(self, from_type: Type[Any]) \
-> Tuple[List[Converter], List[Converter], List[Converter]]:
"""
Utility method to find all converters from a given type.
:param from_type:
:return:
"""
return self.get_all_conversion_chains(from_type=from_type) | [
"def",
"get_all_conversion_chains_from_type",
"(",
"self",
",",
"from_type",
":",
"Type",
"[",
"Any",
"]",
")",
"->",
"Tuple",
"[",
"List",
"[",
"Converter",
"]",
",",
"List",
"[",
"Converter",
"]",
",",
"List",
"[",
"Converter",
"]",
"]",
":",
"return",... | Utility method to find all converters from a given type.
:param from_type:
:return: | [
"Utility",
"method",
"to",
"find",
"all",
"converters",
"from",
"a",
"given",
"type",
"."
] | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L932-L940 | train | 54,608 |
smarie/python-parsyfiles | parsyfiles/parsing_registries.py | ConversionFinder.get_all_conversion_chains | def get_all_conversion_chains(self, from_type: Type[Any] = JOKER, to_type: Type[Any] = JOKER)\
-> Tuple[List[Converter], List[Converter], List[Converter]]:
"""
Utility method to find all converters or conversion chains matching the provided query.
:param from_type: a required type of input object, or JOKER for 'wildcard'(*) .
WARNING: "from_type=AnyObject/object/Any" means
"all converters able to source from anything", which is different from "from_type=JOKER" which means "all
converters whatever their source type".
:param to_type: a required type of output object, or JOKER for 'wildcard'(*) .
WARNING: "to_type=AnyObject/object/Any" means "all
converters able to produce any type of object", which is different from "to_type=JOKER" which means "all
converters whatever type they are able to produce".
:return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact
"""
pass | python | def get_all_conversion_chains(self, from_type: Type[Any] = JOKER, to_type: Type[Any] = JOKER)\
-> Tuple[List[Converter], List[Converter], List[Converter]]:
"""
Utility method to find all converters or conversion chains matching the provided query.
:param from_type: a required type of input object, or JOKER for 'wildcard'(*) .
WARNING: "from_type=AnyObject/object/Any" means
"all converters able to source from anything", which is different from "from_type=JOKER" which means "all
converters whatever their source type".
:param to_type: a required type of output object, or JOKER for 'wildcard'(*) .
WARNING: "to_type=AnyObject/object/Any" means "all
converters able to produce any type of object", which is different from "to_type=JOKER" which means "all
converters whatever type they are able to produce".
:return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact
"""
pass | [
"def",
"get_all_conversion_chains",
"(",
"self",
",",
"from_type",
":",
"Type",
"[",
"Any",
"]",
"=",
"JOKER",
",",
"to_type",
":",
"Type",
"[",
"Any",
"]",
"=",
"JOKER",
")",
"->",
"Tuple",
"[",
"List",
"[",
"Converter",
"]",
",",
"List",
"[",
"Conv... | Utility method to find all converters or conversion chains matching the provided query.
:param from_type: a required type of input object, or JOKER for 'wildcard'(*) .
WARNING: "from_type=AnyObject/object/Any" means
"all converters able to source from anything", which is different from "from_type=JOKER" which means "all
converters whatever their source type".
:param to_type: a required type of output object, or JOKER for 'wildcard'(*) .
WARNING: "to_type=AnyObject/object/Any" means "all
converters able to produce any type of object", which is different from "to_type=JOKER" which means "all
converters whatever type they are able to produce".
:return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact | [
"Utility",
"method",
"to",
"find",
"all",
"converters",
"or",
"conversion",
"chains",
"matching",
"the",
"provided",
"query",
"."
] | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L943-L958 | train | 54,609 |
smarie/python-parsyfiles | parsyfiles/parsing_registries.py | ConverterCache.get_all_conversion_chains | def get_all_conversion_chains(self, from_type: Type[Any] = JOKER, to_type: Type[Any] = JOKER) \
-> Tuple[List[Converter], List[Converter], List[Converter]]:
"""
Utility method to find matching converters or conversion chains.
:param from_type: a required type of input object, or JOKER for 'wildcard'(*) .
WARNING: "from_type=AnyObject/object/Any" means
"all converters able to source from anything", which is different from "from_type=JOKER" which means "all
converters whatever their source type".
:param to_type: a required type of output object, or JOKER for 'wildcard'(*) .
WARNING: "to_type=AnyObject/object/Any" means "all
converters able to produce any type of object", which is different from "to_type=JOKER" which means "all
converters whatever type they are able to produce".
:return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact.
The order of each list is from *less relevant* to *most relevant*
"""
if from_type is JOKER and to_type is JOKER:
matching_dest_generic = self._generic_nonstrict_conversion_chains.copy() + \
self._generic_conversion_chains.copy()
matching_dest_approx = []
matching_dest_exact = self._specific_non_strict_conversion_chains.copy() + \
self._specific_conversion_chains.copy()
else:
matching_dest_generic, matching_dest_approx, matching_dest_exact = [], [], []
# first transform any 'Any' type requirement into the official class for that
to_type = get_validated_type(to_type, 'to_type', enforce_not_joker=False)
# handle generic converters first
for c in (self._generic_nonstrict_conversion_chains + self._generic_conversion_chains):
match, source_exact, dest_exact = c.is_able_to_convert_detailed(strict=self.strict,
from_type=from_type,
to_type=to_type)
if match:
# match
if is_any_type(to_type):
# special case where desired to_type is already Any : in that case a generic converter will
# appear in 'exact match'
matching_dest_exact.append(c)
else:
# this is a match from a generic parser to a specific type : add in 'generic' cataegory
matching_dest_generic.append(c)
# then the specific
for c in (self._specific_non_strict_conversion_chains + self._specific_conversion_chains):
match, source_exact, dest_exact = c.is_able_to_convert_detailed(strict=self.strict,
from_type=from_type,
to_type=to_type)
if match:
if not is_any_type(to_type):
if dest_exact:
# we dont care if source is exact or approximate as long as dest is exact
matching_dest_exact.append(c)
else:
# this means that dest is approximate.
matching_dest_approx.append(c)
else:
# we only want to keep the generic ones, and they have already been added
pass
return matching_dest_generic, matching_dest_approx, matching_dest_exact | python | def get_all_conversion_chains(self, from_type: Type[Any] = JOKER, to_type: Type[Any] = JOKER) \
-> Tuple[List[Converter], List[Converter], List[Converter]]:
"""
Utility method to find matching converters or conversion chains.
:param from_type: a required type of input object, or JOKER for 'wildcard'(*) .
WARNING: "from_type=AnyObject/object/Any" means
"all converters able to source from anything", which is different from "from_type=JOKER" which means "all
converters whatever their source type".
:param to_type: a required type of output object, or JOKER for 'wildcard'(*) .
WARNING: "to_type=AnyObject/object/Any" means "all
converters able to produce any type of object", which is different from "to_type=JOKER" which means "all
converters whatever type they are able to produce".
:return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact.
The order of each list is from *less relevant* to *most relevant*
"""
if from_type is JOKER and to_type is JOKER:
matching_dest_generic = self._generic_nonstrict_conversion_chains.copy() + \
self._generic_conversion_chains.copy()
matching_dest_approx = []
matching_dest_exact = self._specific_non_strict_conversion_chains.copy() + \
self._specific_conversion_chains.copy()
else:
matching_dest_generic, matching_dest_approx, matching_dest_exact = [], [], []
# first transform any 'Any' type requirement into the official class for that
to_type = get_validated_type(to_type, 'to_type', enforce_not_joker=False)
# handle generic converters first
for c in (self._generic_nonstrict_conversion_chains + self._generic_conversion_chains):
match, source_exact, dest_exact = c.is_able_to_convert_detailed(strict=self.strict,
from_type=from_type,
to_type=to_type)
if match:
# match
if is_any_type(to_type):
# special case where desired to_type is already Any : in that case a generic converter will
# appear in 'exact match'
matching_dest_exact.append(c)
else:
# this is a match from a generic parser to a specific type : add in 'generic' cataegory
matching_dest_generic.append(c)
# then the specific
for c in (self._specific_non_strict_conversion_chains + self._specific_conversion_chains):
match, source_exact, dest_exact = c.is_able_to_convert_detailed(strict=self.strict,
from_type=from_type,
to_type=to_type)
if match:
if not is_any_type(to_type):
if dest_exact:
# we dont care if source is exact or approximate as long as dest is exact
matching_dest_exact.append(c)
else:
# this means that dest is approximate.
matching_dest_approx.append(c)
else:
# we only want to keep the generic ones, and they have already been added
pass
return matching_dest_generic, matching_dest_approx, matching_dest_exact | [
"def",
"get_all_conversion_chains",
"(",
"self",
",",
"from_type",
":",
"Type",
"[",
"Any",
"]",
"=",
"JOKER",
",",
"to_type",
":",
"Type",
"[",
"Any",
"]",
"=",
"JOKER",
")",
"->",
"Tuple",
"[",
"List",
"[",
"Converter",
"]",
",",
"List",
"[",
"Conv... | Utility method to find matching converters or conversion chains.
:param from_type: a required type of input object, or JOKER for 'wildcard'(*) .
WARNING: "from_type=AnyObject/object/Any" means
"all converters able to source from anything", which is different from "from_type=JOKER" which means "all
converters whatever their source type".
:param to_type: a required type of output object, or JOKER for 'wildcard'(*) .
WARNING: "to_type=AnyObject/object/Any" means "all
converters able to produce any type of object", which is different from "to_type=JOKER" which means "all
converters whatever type they are able to produce".
:return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact.
The order of each list is from *less relevant* to *most relevant* | [
"Utility",
"method",
"to",
"find",
"matching",
"converters",
"or",
"conversion",
"chains",
"."
] | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L1351-L1413 | train | 54,610 |
smarie/python-parsyfiles | parsyfiles/parsing_registries.py | ParserRegistryWithConverters.find_all_matching_parsers | def find_all_matching_parsers(self, strict: bool, desired_type: Type[Any] = JOKER, required_ext: str = JOKER) \
-> Tuple[Tuple[List[Parser], List[Parser], List[Parser]],
List[Parser], List[Parser], List[Parser]]:
"""
Overrides the parent method to find parsers appropriate to a given extension and type.
This leverages both the parser registry and the converter registry to propose parsing chains in a relevant order
:param strict:
:param desired_type: the type of object to match.
:param required_ext: the required extension.
:return: match=(matching_parsers_generic, matching_parsers_approx, matching_parsers_exact),
no_type_match_but_ext_match, no_ext_match_but_type_match, no_match
"""
# transform any 'Any' type requirement into the official class for that
desired_type = get_validated_type(desired_type, 'desired_type', enforce_not_joker=False)
# (1) call the super method to find all parsers
matching, no_type_match_but_ext_match, no_ext_match_but_type_match, no_match = \
super(ParserRegistryWithConverters, self).find_all_matching_parsers(strict=self.is_strict,
desired_type=desired_type,
required_ext=required_ext)
# these are ordered with 'preferred last'
matching_p_generic, matching_p_approx, matching_p_exact = matching
if desired_type is JOKER:
# then we want to try to append every possible converter chain, even if we have already found an exact match
# (exact match will probably contain all parsers in that case?...)
parsers_to_complete_with_converters = no_type_match_but_ext_match + matching_p_generic + matching_p_approx \
+ matching_p_exact
else:
# then we can try to complete all the ones matching the extension (even the generic because combining them
# with a conversion chain might provide another way to reach the result - different from using the generic
# alone to reach the to_type)
parsers_to_complete_with_converters = no_type_match_but_ext_match + matching_p_generic + matching_p_approx
# (2) find all conversion chains that lead to the expected result
matching_c_generic_to_type, matching_c_approx_to_type, matching_c_exact_to_type = \
self.get_all_conversion_chains_to_type(to_type=desired_type)
all_matching_converters = matching_c_generic_to_type + matching_c_approx_to_type + matching_c_exact_to_type
# (3) combine both (parser + conversion chain), and append to the appropriate list depending on the match
# -- (a) first Parsers matching EXT (not type) + Converters matching their type
# for all parsers able to parse this extension, and for all the types they support
#
# (we have to reverse the list because now we want 'preferred first'. Indeed we wish to prepend to the match
# lists in order not to hide the parser direct matches)
matching_p_generic_with_approx_chain, matching_p_approx_with_approx_chain, matching_p_exact_with_approx_chain\
= [], [], []
for parser in reversed(parsers_to_complete_with_converters):
for typ in parser.supported_types:
match_results = self._complete_parsers_with_converters(parser, typ, desired_type,
matching_c_generic_to_type,
matching_c_approx_to_type,
matching_c_exact_to_type)
# prepend the existing lists with the new match
matching_p_generic = match_results[1] + matching_p_generic
matching_p_approx = match_results[3] + matching_p_approx
matching_p_exact = match_results[5] + matching_p_exact
# store the approximate matchs in the separate lists
matching_p_generic_with_approx_chain = match_results[0] + matching_p_generic_with_approx_chain
matching_p_approx_with_approx_chain = match_results[2] + matching_p_approx_with_approx_chain
matching_p_exact_with_approx_chain = match_results[4] + matching_p_exact_with_approx_chain
# finally prepend the approximate match lists
matching_p_generic = matching_p_generic_with_approx_chain + matching_p_generic
matching_p_approx = matching_p_approx_with_approx_chain + matching_p_approx
matching_p_exact = matching_p_exact_with_approx_chain + matching_p_exact
# -- (b) then parsers that do not match at all (not the file extension nor the type): we can find parsing chains
# that make them at least match the type
#
# (we have to reverse it because it was 'best last', now it will be 'best first')
for parser in reversed(no_match):
for typ in parser.supported_types:
for converter in reversed(all_matching_converters):
# if converter is able to source from this parser
if converter.is_able_to_convert(self.is_strict, from_type=typ, to_type=desired_type):
if ParsingChain.are_worth_chaining(parser, typ, converter):
# insert it at the beginning since it should have less priority
no_ext_match_but_type_match.insert(0, ParsingChain(parser, converter, strict=self.is_strict,
base_parser_chosen_dest_type=typ))
# Finally sort by chain length
matching_p_generic = sorted(matching_p_generic, key=len, reverse=True)
matching_p_approx = sorted(matching_p_approx, key=len, reverse=True)
matching_p_exact = sorted(matching_p_exact, key=len, reverse=True)
# Return
return (matching_p_generic, matching_p_approx, matching_p_exact), no_type_match_but_ext_match, \
no_ext_match_but_type_match, no_match | python | def find_all_matching_parsers(self, strict: bool, desired_type: Type[Any] = JOKER, required_ext: str = JOKER) \
-> Tuple[Tuple[List[Parser], List[Parser], List[Parser]],
List[Parser], List[Parser], List[Parser]]:
"""
Overrides the parent method to find parsers appropriate to a given extension and type.
This leverages both the parser registry and the converter registry to propose parsing chains in a relevant order
:param strict:
:param desired_type: the type of object to match.
:param required_ext: the required extension.
:return: match=(matching_parsers_generic, matching_parsers_approx, matching_parsers_exact),
no_type_match_but_ext_match, no_ext_match_but_type_match, no_match
"""
# transform any 'Any' type requirement into the official class for that
desired_type = get_validated_type(desired_type, 'desired_type', enforce_not_joker=False)
# (1) call the super method to find all parsers
matching, no_type_match_but_ext_match, no_ext_match_but_type_match, no_match = \
super(ParserRegistryWithConverters, self).find_all_matching_parsers(strict=self.is_strict,
desired_type=desired_type,
required_ext=required_ext)
# these are ordered with 'preferred last'
matching_p_generic, matching_p_approx, matching_p_exact = matching
if desired_type is JOKER:
# then we want to try to append every possible converter chain, even if we have already found an exact match
# (exact match will probably contain all parsers in that case?...)
parsers_to_complete_with_converters = no_type_match_but_ext_match + matching_p_generic + matching_p_approx \
+ matching_p_exact
else:
# then we can try to complete all the ones matching the extension (even the generic because combining them
# with a conversion chain might provide another way to reach the result - different from using the generic
# alone to reach the to_type)
parsers_to_complete_with_converters = no_type_match_but_ext_match + matching_p_generic + matching_p_approx
# (2) find all conversion chains that lead to the expected result
matching_c_generic_to_type, matching_c_approx_to_type, matching_c_exact_to_type = \
self.get_all_conversion_chains_to_type(to_type=desired_type)
all_matching_converters = matching_c_generic_to_type + matching_c_approx_to_type + matching_c_exact_to_type
# (3) combine both (parser + conversion chain), and append to the appropriate list depending on the match
# -- (a) first Parsers matching EXT (not type) + Converters matching their type
# for all parsers able to parse this extension, and for all the types they support
#
# (we have to reverse the list because now we want 'preferred first'. Indeed we wish to prepend to the match
# lists in order not to hide the parser direct matches)
matching_p_generic_with_approx_chain, matching_p_approx_with_approx_chain, matching_p_exact_with_approx_chain\
= [], [], []
for parser in reversed(parsers_to_complete_with_converters):
for typ in parser.supported_types:
match_results = self._complete_parsers_with_converters(parser, typ, desired_type,
matching_c_generic_to_type,
matching_c_approx_to_type,
matching_c_exact_to_type)
# prepend the existing lists with the new match
matching_p_generic = match_results[1] + matching_p_generic
matching_p_approx = match_results[3] + matching_p_approx
matching_p_exact = match_results[5] + matching_p_exact
# store the approximate matchs in the separate lists
matching_p_generic_with_approx_chain = match_results[0] + matching_p_generic_with_approx_chain
matching_p_approx_with_approx_chain = match_results[2] + matching_p_approx_with_approx_chain
matching_p_exact_with_approx_chain = match_results[4] + matching_p_exact_with_approx_chain
# finally prepend the approximate match lists
matching_p_generic = matching_p_generic_with_approx_chain + matching_p_generic
matching_p_approx = matching_p_approx_with_approx_chain + matching_p_approx
matching_p_exact = matching_p_exact_with_approx_chain + matching_p_exact
# -- (b) then parsers that do not match at all (not the file extension nor the type): we can find parsing chains
# that make them at least match the type
#
# (we have to reverse it because it was 'best last', now it will be 'best first')
for parser in reversed(no_match):
for typ in parser.supported_types:
for converter in reversed(all_matching_converters):
# if converter is able to source from this parser
if converter.is_able_to_convert(self.is_strict, from_type=typ, to_type=desired_type):
if ParsingChain.are_worth_chaining(parser, typ, converter):
# insert it at the beginning since it should have less priority
no_ext_match_but_type_match.insert(0, ParsingChain(parser, converter, strict=self.is_strict,
base_parser_chosen_dest_type=typ))
# Finally sort by chain length
matching_p_generic = sorted(matching_p_generic, key=len, reverse=True)
matching_p_approx = sorted(matching_p_approx, key=len, reverse=True)
matching_p_exact = sorted(matching_p_exact, key=len, reverse=True)
# Return
return (matching_p_generic, matching_p_approx, matching_p_exact), no_type_match_but_ext_match, \
no_ext_match_but_type_match, no_match | [
"def",
"find_all_matching_parsers",
"(",
"self",
",",
"strict",
":",
"bool",
",",
"desired_type",
":",
"Type",
"[",
"Any",
"]",
"=",
"JOKER",
",",
"required_ext",
":",
"str",
"=",
"JOKER",
")",
"->",
"Tuple",
"[",
"Tuple",
"[",
"List",
"[",
"Parser",
"... | Overrides the parent method to find parsers appropriate to a given extension and type.
This leverages both the parser registry and the converter registry to propose parsing chains in a relevant order
:param strict:
:param desired_type: the type of object to match.
:param required_ext: the required extension.
:return: match=(matching_parsers_generic, matching_parsers_approx, matching_parsers_exact),
no_type_match_but_ext_match, no_ext_match_but_type_match, no_match | [
"Overrides",
"the",
"parent",
"method",
"to",
"find",
"parsers",
"appropriate",
"to",
"a",
"given",
"extension",
"and",
"type",
".",
"This",
"leverages",
"both",
"the",
"parser",
"registry",
"and",
"the",
"converter",
"registry",
"to",
"propose",
"parsing",
"c... | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L1440-L1530 | train | 54,611 |
smarie/python-parsyfiles | parsyfiles/parsing_registries.py | ParserRegistryWithConverters._complete_parsers_with_converters | def _complete_parsers_with_converters(self, parser, parser_supported_type, desired_type, matching_c_generic_to_type,
matching_c_approx_to_type, matching_c_exact_to_type):
"""
Internal method to create parsing chains made of a parser and converters from the provided lists.
Once again a JOKER for a type means 'joker' here.
:param parser:
:param parser_supported_type:
:param desired_type:
:param matching_c_generic_to_type:
:param matching_c_approx_to_type:
:param matching_c_exact_to_type:
:return:
"""
matching_p_generic, matching_p_generic_with_approx_chain, \
matching_p_approx, matching_p_approx_with_approx_chain,\
matching_p_exact, matching_p_exact_with_approx_chain = [], [], [], [], [], []
# resolve Union and TypeVar
desired_types = get_alternate_types_resolving_forwardref_union_and_typevar(desired_type)
for desired_type in desired_types:
# first transform any 'Any' type requirement into the official class for that
desired_type = get_validated_type(desired_type, 'desired_type', enforce_not_joker=False)
# ---- Generic converters - only if the parsed type is not already 'any'
if not is_any_type(parser_supported_type):
for cv in matching_c_generic_to_type:
# if the converter can attach to this parser, we have a matching parser !
# --start from strict
if cv.is_able_to_convert(strict=True,
from_type=parser_supported_type,
to_type=desired_type):
if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv):
chain = ParsingChain(parser, cv, strict=True,
base_parser_chosen_dest_type=parser_supported_type)
# insert it at the beginning since it should have less priority
matching_p_generic.append(chain)
# --then non-strict
elif (not self.strict) \
and cv.is_able_to_convert(strict=False,
from_type=parser_supported_type,
to_type=desired_type):
if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv):
chain = ParsingChain(parser, cv, strict=False,
base_parser_chosen_dest_type=parser_supported_type)
# insert it at the beginning since it should have less priority
matching_p_generic_with_approx_chain.append(chain)
# ---- Approx to_type
for cv in matching_c_approx_to_type:
# if the converter can attach to this parser, we have a matching parser !
# -- start from strict
if cv.is_able_to_convert(strict=True,
from_type=parser_supported_type,
to_type=desired_type):
if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv):
chain = ParsingChain(parser, cv, strict=True,
base_parser_chosen_dest_type=parser_supported_type)
# insert it at the beginning since it should have less priority
matching_p_approx.append(chain)
# then non-strict
elif (not self.strict) \
and cv.is_able_to_convert(strict=False,
from_type=parser_supported_type,
to_type=desired_type):
if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv):
chain = ParsingChain(parser, cv, strict=False,
base_parser_chosen_dest_type=parser_supported_type)
# insert it at the beginning since it should have less priority
matching_p_approx_with_approx_chain.append(chain)
# ---- Exact to_type
for cv in matching_c_exact_to_type:
# if the converter can attach to this parser, we have a matching parser !
if cv.is_able_to_convert(strict=True,
from_type=parser_supported_type,
to_type=desired_type):
if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv):
chain = ParsingChain(parser, cv, strict=True,
base_parser_chosen_dest_type=parser_supported_type)
# insert it at the beginning since it should have less priority
matching_p_exact.append(chain)
elif (not self.strict) \
and cv.is_able_to_convert(strict=False,
from_type=parser_supported_type,
to_type=desired_type):
if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv):
chain = ParsingChain(parser, cv, strict=False,
base_parser_chosen_dest_type=parser_supported_type)
# insert it at the beginning since it should have less priority
matching_p_exact_with_approx_chain.append(chain)
# Preferred is LAST, so approx should be first
return matching_p_generic_with_approx_chain, matching_p_generic, \
matching_p_approx_with_approx_chain, matching_p_approx, \
matching_p_exact_with_approx_chain, matching_p_exact | python | def _complete_parsers_with_converters(self, parser, parser_supported_type, desired_type, matching_c_generic_to_type,
matching_c_approx_to_type, matching_c_exact_to_type):
"""
Internal method to create parsing chains made of a parser and converters from the provided lists.
Once again a JOKER for a type means 'joker' here.
:param parser:
:param parser_supported_type:
:param desired_type:
:param matching_c_generic_to_type:
:param matching_c_approx_to_type:
:param matching_c_exact_to_type:
:return:
"""
matching_p_generic, matching_p_generic_with_approx_chain, \
matching_p_approx, matching_p_approx_with_approx_chain,\
matching_p_exact, matching_p_exact_with_approx_chain = [], [], [], [], [], []
# resolve Union and TypeVar
desired_types = get_alternate_types_resolving_forwardref_union_and_typevar(desired_type)
for desired_type in desired_types:
# first transform any 'Any' type requirement into the official class for that
desired_type = get_validated_type(desired_type, 'desired_type', enforce_not_joker=False)
# ---- Generic converters - only if the parsed type is not already 'any'
if not is_any_type(parser_supported_type):
for cv in matching_c_generic_to_type:
# if the converter can attach to this parser, we have a matching parser !
# --start from strict
if cv.is_able_to_convert(strict=True,
from_type=parser_supported_type,
to_type=desired_type):
if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv):
chain = ParsingChain(parser, cv, strict=True,
base_parser_chosen_dest_type=parser_supported_type)
# insert it at the beginning since it should have less priority
matching_p_generic.append(chain)
# --then non-strict
elif (not self.strict) \
and cv.is_able_to_convert(strict=False,
from_type=parser_supported_type,
to_type=desired_type):
if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv):
chain = ParsingChain(parser, cv, strict=False,
base_parser_chosen_dest_type=parser_supported_type)
# insert it at the beginning since it should have less priority
matching_p_generic_with_approx_chain.append(chain)
# ---- Approx to_type
for cv in matching_c_approx_to_type:
# if the converter can attach to this parser, we have a matching parser !
# -- start from strict
if cv.is_able_to_convert(strict=True,
from_type=parser_supported_type,
to_type=desired_type):
if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv):
chain = ParsingChain(parser, cv, strict=True,
base_parser_chosen_dest_type=parser_supported_type)
# insert it at the beginning since it should have less priority
matching_p_approx.append(chain)
# then non-strict
elif (not self.strict) \
and cv.is_able_to_convert(strict=False,
from_type=parser_supported_type,
to_type=desired_type):
if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv):
chain = ParsingChain(parser, cv, strict=False,
base_parser_chosen_dest_type=parser_supported_type)
# insert it at the beginning since it should have less priority
matching_p_approx_with_approx_chain.append(chain)
# ---- Exact to_type
for cv in matching_c_exact_to_type:
# if the converter can attach to this parser, we have a matching parser !
if cv.is_able_to_convert(strict=True,
from_type=parser_supported_type,
to_type=desired_type):
if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv):
chain = ParsingChain(parser, cv, strict=True,
base_parser_chosen_dest_type=parser_supported_type)
# insert it at the beginning since it should have less priority
matching_p_exact.append(chain)
elif (not self.strict) \
and cv.is_able_to_convert(strict=False,
from_type=parser_supported_type,
to_type=desired_type):
if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv):
chain = ParsingChain(parser, cv, strict=False,
base_parser_chosen_dest_type=parser_supported_type)
# insert it at the beginning since it should have less priority
matching_p_exact_with_approx_chain.append(chain)
# Preferred is LAST, so approx should be first
return matching_p_generic_with_approx_chain, matching_p_generic, \
matching_p_approx_with_approx_chain, matching_p_approx, \
matching_p_exact_with_approx_chain, matching_p_exact | [
"def",
"_complete_parsers_with_converters",
"(",
"self",
",",
"parser",
",",
"parser_supported_type",
",",
"desired_type",
",",
"matching_c_generic_to_type",
",",
"matching_c_approx_to_type",
",",
"matching_c_exact_to_type",
")",
":",
"matching_p_generic",
",",
"matching_p_ge... | Internal method to create parsing chains made of a parser and converters from the provided lists.
Once again a JOKER for a type means 'joker' here.
:param parser:
:param parser_supported_type:
:param desired_type:
:param matching_c_generic_to_type:
:param matching_c_approx_to_type:
:param matching_c_exact_to_type:
:return: | [
"Internal",
"method",
"to",
"create",
"parsing",
"chains",
"made",
"of",
"a",
"parser",
"and",
"converters",
"from",
"the",
"provided",
"lists",
".",
"Once",
"again",
"a",
"JOKER",
"for",
"a",
"type",
"means",
"joker",
"here",
"."
] | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L1532-L1632 | train | 54,612 |
NiklasRosenstein-Python/nr-deprecated | nr/tools/versionupgrade.py | get_changed_files | def get_changed_files(include_staged=False):
"""
Returns a list of the files that changed in the Git repository. This is
used to check if the files that are supposed to be upgraded have changed.
If so, the upgrade will be prevented.
"""
process = subprocess.Popen(['git', 'status', '--porcelain'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, __ = process.communicate()
if process.returncode != 0:
raise ValueError(stdout)
files = []
for line in stdout.decode().split('\n'):
if not line or line.startswith('#'): continue
assert line[2] == ' '
if not include_staged and line[1] == ' ': continue
files.append(line[3:])
return files | python | def get_changed_files(include_staged=False):
"""
Returns a list of the files that changed in the Git repository. This is
used to check if the files that are supposed to be upgraded have changed.
If so, the upgrade will be prevented.
"""
process = subprocess.Popen(['git', 'status', '--porcelain'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, __ = process.communicate()
if process.returncode != 0:
raise ValueError(stdout)
files = []
for line in stdout.decode().split('\n'):
if not line or line.startswith('#'): continue
assert line[2] == ' '
if not include_staged and line[1] == ' ': continue
files.append(line[3:])
return files | [
"def",
"get_changed_files",
"(",
"include_staged",
"=",
"False",
")",
":",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'git'",
",",
"'status'",
",",
"'--porcelain'",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subpro... | Returns a list of the files that changed in the Git repository. This is
used to check if the files that are supposed to be upgraded have changed.
If so, the upgrade will be prevented. | [
"Returns",
"a",
"list",
"of",
"the",
"files",
"that",
"changed",
"in",
"the",
"Git",
"repository",
".",
"This",
"is",
"used",
"to",
"check",
"if",
"the",
"files",
"that",
"are",
"supposed",
"to",
"be",
"upgraded",
"have",
"changed",
".",
"If",
"so",
"t... | f9f8b89ea1b084841a8ab65784eaf68852686b2a | https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/tools/versionupgrade.py#L126-L144 | train | 54,613 |
ponty/entrypoint2 | entrypoint2/__init__.py | _parse_doc | def _parse_doc(docs):
"""
Converts a well-formed docstring into documentation
to be fed into argparse.
See signature_parser for details.
shorts: (-k for --keyword -k, or "from" for "frm/from")
metavars: (FILE for --input=FILE)
helps: (docs for --keyword: docs)
description: the stuff before
epilog: the stuff after
"""
name = "(?:[a-zA-Z][a-zA-Z0-9-_]*)"
re_var = re.compile(r"^ *(%s)(?: */(%s))? *:(.*)$" % (name, name))
re_opt = re.compile(r"^ *(?:(-[a-zA-Z0-9]),? +)?--(%s)(?: *=(%s))? *:(.*)$"
% (name, name))
shorts, metavars, helps, description, epilog = {}, {}, {}, "", ""
if docs:
for line in docs.split("\n"):
line = line.strip()
# remove starting ':param'
if line.startswith(':param'):
line = line[len(':param'):]
# skip ':rtype:' row
if line.startswith(':rtype:'):
continue
if line.strip() == "----":
break
m = re_var.match(line)
if m:
if epilog:
helps[prev] += epilog.strip()
epilog = ""
if m.group(2):
shorts[m.group(1)] = m.group(2)
helps[m.group(1)] = m.group(3).strip()
prev = m.group(1)
previndent = len(line) - len(line.lstrip())
continue
m = re_opt.match(line)
if m:
if epilog:
helps[prev] += epilog.strip()
epilog = ""
name = m.group(2).replace("-", "_")
helps[name] = m.group(4)
prev = name
if m.group(1):
shorts[name] = m.group(1)
if m.group(3):
metavars[name] = m.group(3)
previndent = len(line) - len(line.lstrip())
continue
if helps:
if line.startswith(" " * (previndent + 1)):
helps[prev] += "\n" + line.strip()
else:
epilog += "\n" + line.strip()
else:
description += "\n" + line.strip()
if line.strip():
previndent = len(line) - len(line.lstrip())
return shorts, metavars, helps, description, epilog | python | def _parse_doc(docs):
"""
Converts a well-formed docstring into documentation
to be fed into argparse.
See signature_parser for details.
shorts: (-k for --keyword -k, or "from" for "frm/from")
metavars: (FILE for --input=FILE)
helps: (docs for --keyword: docs)
description: the stuff before
epilog: the stuff after
"""
name = "(?:[a-zA-Z][a-zA-Z0-9-_]*)"
re_var = re.compile(r"^ *(%s)(?: */(%s))? *:(.*)$" % (name, name))
re_opt = re.compile(r"^ *(?:(-[a-zA-Z0-9]),? +)?--(%s)(?: *=(%s))? *:(.*)$"
% (name, name))
shorts, metavars, helps, description, epilog = {}, {}, {}, "", ""
if docs:
for line in docs.split("\n"):
line = line.strip()
# remove starting ':param'
if line.startswith(':param'):
line = line[len(':param'):]
# skip ':rtype:' row
if line.startswith(':rtype:'):
continue
if line.strip() == "----":
break
m = re_var.match(line)
if m:
if epilog:
helps[prev] += epilog.strip()
epilog = ""
if m.group(2):
shorts[m.group(1)] = m.group(2)
helps[m.group(1)] = m.group(3).strip()
prev = m.group(1)
previndent = len(line) - len(line.lstrip())
continue
m = re_opt.match(line)
if m:
if epilog:
helps[prev] += epilog.strip()
epilog = ""
name = m.group(2).replace("-", "_")
helps[name] = m.group(4)
prev = name
if m.group(1):
shorts[name] = m.group(1)
if m.group(3):
metavars[name] = m.group(3)
previndent = len(line) - len(line.lstrip())
continue
if helps:
if line.startswith(" " * (previndent + 1)):
helps[prev] += "\n" + line.strip()
else:
epilog += "\n" + line.strip()
else:
description += "\n" + line.strip()
if line.strip():
previndent = len(line) - len(line.lstrip())
return shorts, metavars, helps, description, epilog | [
"def",
"_parse_doc",
"(",
"docs",
")",
":",
"name",
"=",
"\"(?:[a-zA-Z][a-zA-Z0-9-_]*)\"",
"re_var",
"=",
"re",
".",
"compile",
"(",
"r\"^ *(%s)(?: */(%s))? *:(.*)$\"",
"%",
"(",
"name",
",",
"name",
")",
")",
"re_opt",
"=",
"re",
".",
"compile",
"(",
"r\"^ ... | Converts a well-formed docstring into documentation
to be fed into argparse.
See signature_parser for details.
shorts: (-k for --keyword -k, or "from" for "frm/from")
metavars: (FILE for --input=FILE)
helps: (docs for --keyword: docs)
description: the stuff before
epilog: the stuff after | [
"Converts",
"a",
"well",
"-",
"formed",
"docstring",
"into",
"documentation",
"to",
"be",
"fed",
"into",
"argparse",
"."
] | d355dd1a6e0cabdd6751fc2f6016aee20755d332 | https://github.com/ponty/entrypoint2/blob/d355dd1a6e0cabdd6751fc2f6016aee20755d332/entrypoint2/__init__.py#L125-L206 | train | 54,614 |
scraperwiki/dumptruck | dumptruck/convert.py | quote | def quote(text):
'Handle quote characters'
# Convert to unicode.
if not isinstance(text, unicode):
text = text.decode('utf-8')
# Look for quote characters. Keep the text as is if it's already quoted.
for qp in QUOTEPAIRS:
if text[0] == qp[0] and text[-1] == qp[-1] and len(text) >= 2:
return text
# If it's not quoted, try quoting
for qp in QUOTEPAIRS:
if qp[1] not in text:
return qp[0] + text + qp[1]
#Darn
raise ValueError(u'The value "%s" is not quoted and contains too many quote characters to quote' % text) | python | def quote(text):
'Handle quote characters'
# Convert to unicode.
if not isinstance(text, unicode):
text = text.decode('utf-8')
# Look for quote characters. Keep the text as is if it's already quoted.
for qp in QUOTEPAIRS:
if text[0] == qp[0] and text[-1] == qp[-1] and len(text) >= 2:
return text
# If it's not quoted, try quoting
for qp in QUOTEPAIRS:
if qp[1] not in text:
return qp[0] + text + qp[1]
#Darn
raise ValueError(u'The value "%s" is not quoted and contains too many quote characters to quote' % text) | [
"def",
"quote",
"(",
"text",
")",
":",
"# Convert to unicode.",
"if",
"not",
"isinstance",
"(",
"text",
",",
"unicode",
")",
":",
"text",
"=",
"text",
".",
"decode",
"(",
"'utf-8'",
")",
"# Look for quote characters. Keep the text as is if it's already quoted.",
"fo... | Handle quote characters | [
"Handle",
"quote",
"characters"
] | ac5855e34d4dffc7e53a13ff925ccabda19604fc | https://github.com/scraperwiki/dumptruck/blob/ac5855e34d4dffc7e53a13ff925ccabda19604fc/dumptruck/convert.py#L72-L90 | train | 54,615 |
bniemczyk/automata | automata/automata.py | NFA.reltags | def reltags(self, src, cache=None):
'''
returns all the tags that are relevant at this state
cache should be a dictionary and it is updated
by the function
'''
if not self._tag_assocs:
return set()
# fucking python and it's terrible support for recursion makes this
# far more complicated than it needs to be
if cache == None:
cache = {}
q = _otq()
q.append(src)
updateq = _otq()
while q:
i = q.popleft()
if i in cache:
continue
cache[i] = set()
for (s,t) in self.transitions_to(i):
q.append(s)
if self.is_tagged(t,s,i):
cache[i].add((self.tag(t,s,i),s, i))
updateq.appendleft((i, s))
while updateq:
i = updateq.popleft()
cache[i[0]].update(cache[i[1]])
return cache[src] | python | def reltags(self, src, cache=None):
'''
returns all the tags that are relevant at this state
cache should be a dictionary and it is updated
by the function
'''
if not self._tag_assocs:
return set()
# fucking python and it's terrible support for recursion makes this
# far more complicated than it needs to be
if cache == None:
cache = {}
q = _otq()
q.append(src)
updateq = _otq()
while q:
i = q.popleft()
if i in cache:
continue
cache[i] = set()
for (s,t) in self.transitions_to(i):
q.append(s)
if self.is_tagged(t,s,i):
cache[i].add((self.tag(t,s,i),s, i))
updateq.appendleft((i, s))
while updateq:
i = updateq.popleft()
cache[i[0]].update(cache[i[1]])
return cache[src] | [
"def",
"reltags",
"(",
"self",
",",
"src",
",",
"cache",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_tag_assocs",
":",
"return",
"set",
"(",
")",
"# fucking python and it's terrible support for recursion makes this",
"# far more complicated than it needs to be",
... | returns all the tags that are relevant at this state
cache should be a dictionary and it is updated
by the function | [
"returns",
"all",
"the",
"tags",
"that",
"are",
"relevant",
"at",
"this",
"state",
"cache",
"should",
"be",
"a",
"dictionary",
"and",
"it",
"is",
"updated",
"by",
"the",
"function"
] | b4e21ba8b881f2cb1a07a813a4011209a3f1e017 | https://github.com/bniemczyk/automata/blob/b4e21ba8b881f2cb1a07a813a4011209a3f1e017/automata/automata.py#L60-L95 | train | 54,616 |
wearpants/instrument | instrument/output/logging.py | make_log_metric | def make_log_metric(level=logging.INFO, msg="%d items in %.2f seconds"):
"""Make a new metric function that logs at the given level
:arg int level: logging level, defaults to ``logging.INFO``
:arg string msg: logging message format string, taking ``count`` and ``elapsed``
:rtype: function
"""
def log_metric(name, count, elapsed):
log_name = 'instrument.{}'.format(name) if name else 'instrument'
logging.getLogger(log_name).log(level, msg, count, elapsed)
return log_metric | python | def make_log_metric(level=logging.INFO, msg="%d items in %.2f seconds"):
"""Make a new metric function that logs at the given level
:arg int level: logging level, defaults to ``logging.INFO``
:arg string msg: logging message format string, taking ``count`` and ``elapsed``
:rtype: function
"""
def log_metric(name, count, elapsed):
log_name = 'instrument.{}'.format(name) if name else 'instrument'
logging.getLogger(log_name).log(level, msg, count, elapsed)
return log_metric | [
"def",
"make_log_metric",
"(",
"level",
"=",
"logging",
".",
"INFO",
",",
"msg",
"=",
"\"%d items in %.2f seconds\"",
")",
":",
"def",
"log_metric",
"(",
"name",
",",
"count",
",",
"elapsed",
")",
":",
"log_name",
"=",
"'instrument.{}'",
".",
"format",
"(",
... | Make a new metric function that logs at the given level
:arg int level: logging level, defaults to ``logging.INFO``
:arg string msg: logging message format string, taking ``count`` and ``elapsed``
:rtype: function | [
"Make",
"a",
"new",
"metric",
"function",
"that",
"logs",
"at",
"the",
"given",
"level"
] | a0f6103574ab58a82361a951e5e56b69aedfe294 | https://github.com/wearpants/instrument/blob/a0f6103574ab58a82361a951e5e56b69aedfe294/instrument/output/logging.py#L4-L14 | train | 54,617 |
memphis-iis/GLUDB | gludb/simple.py | DBObject | def DBObject(table_name, versioning=VersioningTypes.NONE):
"""Classes annotated with DBObject gain persistence methods."""
def wrapped(cls):
field_names = set()
all_fields = []
for name in dir(cls):
fld = getattr(cls, name)
if fld and isinstance(fld, Field):
fld.name = name
all_fields.append(fld)
field_names.add(name)
def add_missing_field(name, default='', insert_pos=None):
if name not in field_names:
fld = Field(default=default)
fld.name = name
all_fields.insert(
len(all_fields) if insert_pos is None else insert_pos,
fld
)
add_missing_field('id', insert_pos=0)
add_missing_field('_create_date')
add_missing_field('_last_update')
if versioning == VersioningTypes.DELTA_HISTORY:
add_missing_field('_version_hist', default=list)
# Things we count on as part of our processing
cls.__table_name__ = table_name
cls.__versioning__ = versioning
cls.__fields__ = all_fields
# Give them a ctor for free - but make sure we aren't clobbering one
if not ctor_overridable(cls):
raise TypeError(
'Classes with user-supplied __init__ should not be decorated '
'with DBObject. Use the setup method'
)
cls.__init__ = _auto_init
# Duck-type the class for our data methods
cls.get_table_name = classmethod(_get_table_name)
cls.get_id = _get_id
cls.set_id = _set_id
cls.to_data = _to_data
cls.from_data = classmethod(_from_data)
cls.index_names = classmethod(_index_names)
cls.indexes = _indexes
# Bonus methods they get for using gludb.simple
cls.get_version_hist = _get_version_hist
# Register with our abc since we actually implement all necessary
# functionality
Storable.register(cls)
# And now that we're registered, we can also get the database
# read/write functionality for free
cls = DatabaseEnabled(cls)
if versioning == VersioningTypes.DELTA_HISTORY:
cls.save = _delta_save(cls.save)
return cls
return wrapped | python | def DBObject(table_name, versioning=VersioningTypes.NONE):
"""Classes annotated with DBObject gain persistence methods."""
def wrapped(cls):
field_names = set()
all_fields = []
for name in dir(cls):
fld = getattr(cls, name)
if fld and isinstance(fld, Field):
fld.name = name
all_fields.append(fld)
field_names.add(name)
def add_missing_field(name, default='', insert_pos=None):
if name not in field_names:
fld = Field(default=default)
fld.name = name
all_fields.insert(
len(all_fields) if insert_pos is None else insert_pos,
fld
)
add_missing_field('id', insert_pos=0)
add_missing_field('_create_date')
add_missing_field('_last_update')
if versioning == VersioningTypes.DELTA_HISTORY:
add_missing_field('_version_hist', default=list)
# Things we count on as part of our processing
cls.__table_name__ = table_name
cls.__versioning__ = versioning
cls.__fields__ = all_fields
# Give them a ctor for free - but make sure we aren't clobbering one
if not ctor_overridable(cls):
raise TypeError(
'Classes with user-supplied __init__ should not be decorated '
'with DBObject. Use the setup method'
)
cls.__init__ = _auto_init
# Duck-type the class for our data methods
cls.get_table_name = classmethod(_get_table_name)
cls.get_id = _get_id
cls.set_id = _set_id
cls.to_data = _to_data
cls.from_data = classmethod(_from_data)
cls.index_names = classmethod(_index_names)
cls.indexes = _indexes
# Bonus methods they get for using gludb.simple
cls.get_version_hist = _get_version_hist
# Register with our abc since we actually implement all necessary
# functionality
Storable.register(cls)
# And now that we're registered, we can also get the database
# read/write functionality for free
cls = DatabaseEnabled(cls)
if versioning == VersioningTypes.DELTA_HISTORY:
cls.save = _delta_save(cls.save)
return cls
return wrapped | [
"def",
"DBObject",
"(",
"table_name",
",",
"versioning",
"=",
"VersioningTypes",
".",
"NONE",
")",
":",
"def",
"wrapped",
"(",
"cls",
")",
":",
"field_names",
"=",
"set",
"(",
")",
"all_fields",
"=",
"[",
"]",
"for",
"name",
"in",
"dir",
"(",
"cls",
... | Classes annotated with DBObject gain persistence methods. | [
"Classes",
"annotated",
"with",
"DBObject",
"gain",
"persistence",
"methods",
"."
] | 25692528ff6fe8184a3570f61f31f1a90088a388 | https://github.com/memphis-iis/GLUDB/blob/25692528ff6fe8184a3570f61f31f1a90088a388/gludb/simple.py#L188-L253 | train | 54,618 |
VikParuchuri/percept | percept/tasks/validate.py | Validate.train | def train(self, data, target, **kwargs):
"""
Used in the training phase. Override.
"""
non_predictors = [i.replace(" ", "_").lower() for i in list(set(data['team']))] + ["team", "next_year_wins"]
self.column_names = [l for l in list(data.columns) if l not in non_predictors]
results, folds = self.cross_validate(data, non_predictors, **kwargs)
self.gather_results(results, folds, data) | python | def train(self, data, target, **kwargs):
"""
Used in the training phase. Override.
"""
non_predictors = [i.replace(" ", "_").lower() for i in list(set(data['team']))] + ["team", "next_year_wins"]
self.column_names = [l for l in list(data.columns) if l not in non_predictors]
results, folds = self.cross_validate(data, non_predictors, **kwargs)
self.gather_results(results, folds, data) | [
"def",
"train",
"(",
"self",
",",
"data",
",",
"target",
",",
"*",
"*",
"kwargs",
")",
":",
"non_predictors",
"=",
"[",
"i",
".",
"replace",
"(",
"\" \"",
",",
"\"_\"",
")",
".",
"lower",
"(",
")",
"for",
"i",
"in",
"list",
"(",
"set",
"(",
"da... | Used in the training phase. Override. | [
"Used",
"in",
"the",
"training",
"phase",
".",
"Override",
"."
] | 90304ba82053e2a9ad2bacaab3479403d3923bcf | https://github.com/VikParuchuri/percept/blob/90304ba82053e2a9ad2bacaab3479403d3923bcf/percept/tasks/validate.py#L78-L85 | train | 54,619 |
frascoweb/frasco | frasco/commands.py | CommandDecorator.as_command | def as_command(self):
"""Creates the click command wrapping the function
"""
try:
params = self.unbound_func.__click_params__
params.reverse()
del self.unbound_func.__click_params__
except AttributeError:
params = []
help = inspect.getdoc(self.real_func)
if isinstance(help, bytes):
help = help.decode('utf-8')
self.options.setdefault('help', help)
@pass_script_info_decorator
def callback(info, *args, **kwargs):
if self.with_reloader:
app = info.load_app()
if app.debug:
def inner():
return self.command_callback(info, *args, **kwargs)
run_with_reloader(inner, extra_files=get_reloader_extra_files())
return
self.command_callback(info, *args, **kwargs)
return self.cls(name=self.name, callback=callback, params=params, **self.options) | python | def as_command(self):
"""Creates the click command wrapping the function
"""
try:
params = self.unbound_func.__click_params__
params.reverse()
del self.unbound_func.__click_params__
except AttributeError:
params = []
help = inspect.getdoc(self.real_func)
if isinstance(help, bytes):
help = help.decode('utf-8')
self.options.setdefault('help', help)
@pass_script_info_decorator
def callback(info, *args, **kwargs):
if self.with_reloader:
app = info.load_app()
if app.debug:
def inner():
return self.command_callback(info, *args, **kwargs)
run_with_reloader(inner, extra_files=get_reloader_extra_files())
return
self.command_callback(info, *args, **kwargs)
return self.cls(name=self.name, callback=callback, params=params, **self.options) | [
"def",
"as_command",
"(",
"self",
")",
":",
"try",
":",
"params",
"=",
"self",
".",
"unbound_func",
".",
"__click_params__",
"params",
".",
"reverse",
"(",
")",
"del",
"self",
".",
"unbound_func",
".",
"__click_params__",
"except",
"AttributeError",
":",
"pa... | Creates the click command wrapping the function | [
"Creates",
"the",
"click",
"command",
"wrapping",
"the",
"function"
] | ea519d69dd5ca6deaf3650175692ee4a1a02518f | https://github.com/frascoweb/frasco/blob/ea519d69dd5ca6deaf3650175692ee4a1a02518f/frasco/commands.py#L76-L99 | train | 54,620 |
bioidiap/bob.ip.facedetect | bob/ip/facedetect/script/plot_froc.py | main | def main(command_line_arguments=None):
"""Reads score files, computes error measures and plots curves."""
args = command_line_options(command_line_arguments)
# get some colors for plotting
cmap = mpl.cm.get_cmap(name='hsv')
count = len(args.files) + (len(args.baselines) if args.baselines else 0)
colors = [cmap(i) for i in numpy.linspace(0, 1.0, count+1)]
# First, read the score files
logger.info("Loading %d score files" % len(args.files))
scores = [read_score_file(os.path.join(args.directory, f)) for f in args.files]
false_alarms = []
detection_rate = []
logger.info("Computing FROC curves")
for score in scores:
# compute some thresholds
tmin = min(score[2])
tmax = max(score[2])
count = 100
thresholds = [tmin + float(x)/count * (tmax - tmin) for x in range(count+2)]
false_alarms.append([])
detection_rate.append([])
for threshold in thresholds:
detection_rate[-1].append(numpy.count_nonzero(numpy.array(score[1]) >= threshold) / float(score[0]))
false_alarms[-1].append(numpy.count_nonzero(numpy.array(score[2]) >= threshold))
# to display 0 in a semilogx plot, we have to add a little
# false_alarms[-1][-1] += 1e-8
# also read baselines
if args.baselines is not None:
for baseline in args.baselines:
dr = []
fa = []
with open(os.path.join(args.baseline_directory, baseline)) as f:
for line in f:
splits = line.rstrip().split()
dr.append(float(splits[0]))
fa.append(int(splits[1]))
false_alarms.append(fa)
detection_rate.append(dr)
logger.info("Plotting FROC curves to file '%s'", args.output)
# create a multi-page PDF for the ROC curve
pdf = PdfPages(args.output)
figure = _plot_froc(false_alarms, detection_rate, colors, args.legends, args.title, args.max)
mpl.xlabel('False Alarm (of %d pruned)' % len(scores[0][2]))
mpl.ylabel('Detection Rate in \%% (total %d faces)' % scores[0][0])
pdf.savefig(figure)
pdf.close()
if args.count_detections:
for i, f in enumerate(args.files):
det, all = count_detections(f)
print("The number of detected faces for %s is %d out of %d" % (args.legends[i], det, all)) | python | def main(command_line_arguments=None):
"""Reads score files, computes error measures and plots curves."""
args = command_line_options(command_line_arguments)
# get some colors for plotting
cmap = mpl.cm.get_cmap(name='hsv')
count = len(args.files) + (len(args.baselines) if args.baselines else 0)
colors = [cmap(i) for i in numpy.linspace(0, 1.0, count+1)]
# First, read the score files
logger.info("Loading %d score files" % len(args.files))
scores = [read_score_file(os.path.join(args.directory, f)) for f in args.files]
false_alarms = []
detection_rate = []
logger.info("Computing FROC curves")
for score in scores:
# compute some thresholds
tmin = min(score[2])
tmax = max(score[2])
count = 100
thresholds = [tmin + float(x)/count * (tmax - tmin) for x in range(count+2)]
false_alarms.append([])
detection_rate.append([])
for threshold in thresholds:
detection_rate[-1].append(numpy.count_nonzero(numpy.array(score[1]) >= threshold) / float(score[0]))
false_alarms[-1].append(numpy.count_nonzero(numpy.array(score[2]) >= threshold))
# to display 0 in a semilogx plot, we have to add a little
# false_alarms[-1][-1] += 1e-8
# also read baselines
if args.baselines is not None:
for baseline in args.baselines:
dr = []
fa = []
with open(os.path.join(args.baseline_directory, baseline)) as f:
for line in f:
splits = line.rstrip().split()
dr.append(float(splits[0]))
fa.append(int(splits[1]))
false_alarms.append(fa)
detection_rate.append(dr)
logger.info("Plotting FROC curves to file '%s'", args.output)
# create a multi-page PDF for the ROC curve
pdf = PdfPages(args.output)
figure = _plot_froc(false_alarms, detection_rate, colors, args.legends, args.title, args.max)
mpl.xlabel('False Alarm (of %d pruned)' % len(scores[0][2]))
mpl.ylabel('Detection Rate in \%% (total %d faces)' % scores[0][0])
pdf.savefig(figure)
pdf.close()
if args.count_detections:
for i, f in enumerate(args.files):
det, all = count_detections(f)
print("The number of detected faces for %s is %d out of %d" % (args.legends[i], det, all)) | [
"def",
"main",
"(",
"command_line_arguments",
"=",
"None",
")",
":",
"args",
"=",
"command_line_options",
"(",
"command_line_arguments",
")",
"# get some colors for plotting",
"cmap",
"=",
"mpl",
".",
"cm",
".",
"get_cmap",
"(",
"name",
"=",
"'hsv'",
")",
"count... | Reads score files, computes error measures and plots curves. | [
"Reads",
"score",
"files",
"computes",
"error",
"measures",
"and",
"plots",
"curves",
"."
] | 601da5141ca7302ad36424d1421b33190ba46779 | https://github.com/bioidiap/bob.ip.facedetect/blob/601da5141ca7302ad36424d1421b33190ba46779/bob/ip/facedetect/script/plot_froc.py#L168-L225 | train | 54,621 |
Robpol86/Flask-Statics-Helper | flask_statics/helpers.py | get_resources | def get_resources(minify=False):
"""Find all resources which subclass ResourceBase.
Keyword arguments:
minify -- select minified resources if available.
Returns:
Dictionary of available resources. Keys are resource names (part of the config variable names), values are dicts
with css and js keys, and tuples of resources as values.
"""
all_resources = dict()
subclasses = resource_base.ResourceBase.__subclasses__() + resource_definitions.ResourceAngular.__subclasses__()
for resource in subclasses:
obj = resource(minify)
all_resources[resource.RESOURCE_NAME] = dict(css=tuple(obj.resources_css), js=tuple(obj.resources_js))
return all_resources | python | def get_resources(minify=False):
"""Find all resources which subclass ResourceBase.
Keyword arguments:
minify -- select minified resources if available.
Returns:
Dictionary of available resources. Keys are resource names (part of the config variable names), values are dicts
with css and js keys, and tuples of resources as values.
"""
all_resources = dict()
subclasses = resource_base.ResourceBase.__subclasses__() + resource_definitions.ResourceAngular.__subclasses__()
for resource in subclasses:
obj = resource(minify)
all_resources[resource.RESOURCE_NAME] = dict(css=tuple(obj.resources_css), js=tuple(obj.resources_js))
return all_resources | [
"def",
"get_resources",
"(",
"minify",
"=",
"False",
")",
":",
"all_resources",
"=",
"dict",
"(",
")",
"subclasses",
"=",
"resource_base",
".",
"ResourceBase",
".",
"__subclasses__",
"(",
")",
"+",
"resource_definitions",
".",
"ResourceAngular",
".",
"__subclass... | Find all resources which subclass ResourceBase.
Keyword arguments:
minify -- select minified resources if available.
Returns:
Dictionary of available resources. Keys are resource names (part of the config variable names), values are dicts
with css and js keys, and tuples of resources as values. | [
"Find",
"all",
"resources",
"which",
"subclass",
"ResourceBase",
"."
] | b1771e65225f62b760b3ef841b710ff23ef6f83c | https://github.com/Robpol86/Flask-Statics-Helper/blob/b1771e65225f62b760b3ef841b710ff23ef6f83c/flask_statics/helpers.py#L23-L38 | train | 54,622 |
scieloorg/citedbyapi | citedby/client.py | ThriftClient.search | def search(self, dsl, params):
"""
Free queries to ES index.
dsl (string): with DSL query
params (list): [(key, value), (key, value)]
where key is a query parameter, and value is the value required for parameter, ex: [('size', '0'), ('search_type', 'count')]
"""
query_parameters = []
for key, value in params:
query_parameters.append(self.CITEDBY_THRIFT.kwargs(str(key), str(value)))
try:
result = self.client.search(dsl, query_parameters)
except self.CITEDBY_THRIFT.ServerError:
raise ServerError('you may trying to run a bad DSL Query')
try:
return json.loads(result)
except:
return None | python | def search(self, dsl, params):
"""
Free queries to ES index.
dsl (string): with DSL query
params (list): [(key, value), (key, value)]
where key is a query parameter, and value is the value required for parameter, ex: [('size', '0'), ('search_type', 'count')]
"""
query_parameters = []
for key, value in params:
query_parameters.append(self.CITEDBY_THRIFT.kwargs(str(key), str(value)))
try:
result = self.client.search(dsl, query_parameters)
except self.CITEDBY_THRIFT.ServerError:
raise ServerError('you may trying to run a bad DSL Query')
try:
return json.loads(result)
except:
return None | [
"def",
"search",
"(",
"self",
",",
"dsl",
",",
"params",
")",
":",
"query_parameters",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"params",
":",
"query_parameters",
".",
"append",
"(",
"self",
".",
"CITEDBY_THRIFT",
".",
"kwargs",
"(",
"str",
"("... | Free queries to ES index.
dsl (string): with DSL query
params (list): [(key, value), (key, value)]
where key is a query parameter, and value is the value required for parameter, ex: [('size', '0'), ('search_type', 'count')] | [
"Free",
"queries",
"to",
"ES",
"index",
"."
] | c614d6d1a3c3a5aebd8d79c18bcab444fb351b90 | https://github.com/scieloorg/citedbyapi/blob/c614d6d1a3c3a5aebd8d79c18bcab444fb351b90/citedby/client.py#L215-L236 | train | 54,623 |
nimbusproject/dashi | dashi/__init__.py | raise_error | def raise_error(error):
"""Intakes a dict of remote error information and raises a DashiError
"""
exc_type = error.get('exc_type')
if exc_type and exc_type.startswith(ERROR_PREFIX):
exc_type = exc_type[len(ERROR_PREFIX):]
exc_cls = ERROR_TYPE_MAP.get(exc_type, DashiError)
else:
exc_cls = DashiError
raise exc_cls(**error) | python | def raise_error(error):
"""Intakes a dict of remote error information and raises a DashiError
"""
exc_type = error.get('exc_type')
if exc_type and exc_type.startswith(ERROR_PREFIX):
exc_type = exc_type[len(ERROR_PREFIX):]
exc_cls = ERROR_TYPE_MAP.get(exc_type, DashiError)
else:
exc_cls = DashiError
raise exc_cls(**error) | [
"def",
"raise_error",
"(",
"error",
")",
":",
"exc_type",
"=",
"error",
".",
"get",
"(",
"'exc_type'",
")",
"if",
"exc_type",
"and",
"exc_type",
".",
"startswith",
"(",
"ERROR_PREFIX",
")",
":",
"exc_type",
"=",
"exc_type",
"[",
"len",
"(",
"ERROR_PREFIX",... | Intakes a dict of remote error information and raises a DashiError | [
"Intakes",
"a",
"dict",
"of",
"remote",
"error",
"information",
"and",
"raises",
"a",
"DashiError"
] | 368b3963ec8abd60aebe0f81915429b45cbf4b5a | https://github.com/nimbusproject/dashi/blob/368b3963ec8abd60aebe0f81915429b45cbf4b5a/dashi/__init__.py#L553-L563 | train | 54,624 |
nimbusproject/dashi | dashi/__init__.py | Dashi.fire | def fire(self, name, operation, args=None, **kwargs):
"""Send a message without waiting for a reply
@param name: name of destination service queue
@param operation: name of service operation to invoke
@param args: dictionary of keyword args to pass to operation.
Use this OR kwargs.
@param kwargs: additional args to pass to operation
"""
if args:
if kwargs:
raise TypeError("specify args dict or keyword arguments, not both")
else:
args = kwargs
d = dict(op=operation, args=args)
headers = {'sender': self.add_sysname(self.name)}
dest = self.add_sysname(name)
def _fire(channel):
with Producer(channel) as producer:
producer.publish(d, routing_key=dest,
headers=headers, serializer=self._serializer,
exchange=self._exchange, declare=[self._exchange])
log.debug("sending message to %s", dest)
with connections[self._pool_conn].acquire(block=True) as conn:
_, channel = self.ensure(conn, _fire)
conn.maybe_close_channel(channel) | python | def fire(self, name, operation, args=None, **kwargs):
"""Send a message without waiting for a reply
@param name: name of destination service queue
@param operation: name of service operation to invoke
@param args: dictionary of keyword args to pass to operation.
Use this OR kwargs.
@param kwargs: additional args to pass to operation
"""
if args:
if kwargs:
raise TypeError("specify args dict or keyword arguments, not both")
else:
args = kwargs
d = dict(op=operation, args=args)
headers = {'sender': self.add_sysname(self.name)}
dest = self.add_sysname(name)
def _fire(channel):
with Producer(channel) as producer:
producer.publish(d, routing_key=dest,
headers=headers, serializer=self._serializer,
exchange=self._exchange, declare=[self._exchange])
log.debug("sending message to %s", dest)
with connections[self._pool_conn].acquire(block=True) as conn:
_, channel = self.ensure(conn, _fire)
conn.maybe_close_channel(channel) | [
"def",
"fire",
"(",
"self",
",",
"name",
",",
"operation",
",",
"args",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"args",
":",
"if",
"kwargs",
":",
"raise",
"TypeError",
"(",
"\"specify args dict or keyword arguments, not both\"",
")",
"else",
... | Send a message without waiting for a reply
@param name: name of destination service queue
@param operation: name of service operation to invoke
@param args: dictionary of keyword args to pass to operation.
Use this OR kwargs.
@param kwargs: additional args to pass to operation | [
"Send",
"a",
"message",
"without",
"waiting",
"for",
"a",
"reply"
] | 368b3963ec8abd60aebe0f81915429b45cbf4b5a | https://github.com/nimbusproject/dashi/blob/368b3963ec8abd60aebe0f81915429b45cbf4b5a/dashi/__init__.py#L101-L131 | train | 54,625 |
nimbusproject/dashi | dashi/__init__.py | Dashi.call | def call(self, name, operation, timeout=10, args=None, **kwargs):
"""Send a message and wait for reply
@param name: name of destination service queue
@param operation: name of service operation to invoke
@param timeout: RPC timeout to await a reply
@param args: dictionary of keyword args to pass to operation.
Use this OR kwargs.
@param kwargs: additional args to pass to operation
"""
if args:
if kwargs:
raise TypeError("specify args dict or keyword arguments, not both")
else:
args = kwargs
# create a direct queue for the reply. This may end up being a
# bottleneck for performance: each rpc call gets a brand new
# exclusive queue. However this approach is used nova.rpc and
# seems to have carried them pretty far. If/when this
# becomes a bottleneck we can set up a long-lived backend queue and
# use correlation_id to deal with concurrent RPC calls. See:
# http://www.rabbitmq.com/tutorials/tutorial-six-python.html
msg_id = uuid.uuid4().hex
# expire the reply queue shortly after the timeout. it will be
# (lazily) deleted by the broker if we don't clean it up first
queue_arguments = {'x-expires': int((timeout + 1) * 1000)}
queue = Queue(name=msg_id, exchange=self._exchange, routing_key=msg_id,
durable=False, queue_arguments=queue_arguments)
messages = []
event = threading.Event()
def _callback(body, message):
messages.append(body)
message.ack()
event.set()
d = dict(op=operation, args=args)
headers = {'reply-to': msg_id, 'sender': self.add_sysname(self.name)}
dest = self.add_sysname(name)
def _declare_and_send(channel):
consumer = Consumer(channel, (queue,), callbacks=(_callback,))
with Producer(channel) as producer:
producer.publish(d, routing_key=dest, headers=headers,
exchange=self._exchange, serializer=self._serializer)
return consumer
log.debug("sending call to %s:%s", dest, operation)
with connections[self._pool_conn].acquire(block=True) as conn:
consumer, channel = self.ensure(conn, _declare_and_send)
try:
self._consume(conn, consumer, timeout=timeout, until_event=event)
# try to delete queue, but don't worry if it fails (will expire)
try:
queue = queue.bind(channel)
queue.delete(nowait=True)
except Exception:
log.exception("error deleting queue")
finally:
conn.maybe_close_channel(channel)
msg_body = messages[0]
if msg_body.get('error'):
raise_error(msg_body['error'])
else:
return msg_body.get('result') | python | def call(self, name, operation, timeout=10, args=None, **kwargs):
"""Send a message and wait for reply
@param name: name of destination service queue
@param operation: name of service operation to invoke
@param timeout: RPC timeout to await a reply
@param args: dictionary of keyword args to pass to operation.
Use this OR kwargs.
@param kwargs: additional args to pass to operation
"""
if args:
if kwargs:
raise TypeError("specify args dict or keyword arguments, not both")
else:
args = kwargs
# create a direct queue for the reply. This may end up being a
# bottleneck for performance: each rpc call gets a brand new
# exclusive queue. However this approach is used nova.rpc and
# seems to have carried them pretty far. If/when this
# becomes a bottleneck we can set up a long-lived backend queue and
# use correlation_id to deal with concurrent RPC calls. See:
# http://www.rabbitmq.com/tutorials/tutorial-six-python.html
msg_id = uuid.uuid4().hex
# expire the reply queue shortly after the timeout. it will be
# (lazily) deleted by the broker if we don't clean it up first
queue_arguments = {'x-expires': int((timeout + 1) * 1000)}
queue = Queue(name=msg_id, exchange=self._exchange, routing_key=msg_id,
durable=False, queue_arguments=queue_arguments)
messages = []
event = threading.Event()
def _callback(body, message):
messages.append(body)
message.ack()
event.set()
d = dict(op=operation, args=args)
headers = {'reply-to': msg_id, 'sender': self.add_sysname(self.name)}
dest = self.add_sysname(name)
def _declare_and_send(channel):
consumer = Consumer(channel, (queue,), callbacks=(_callback,))
with Producer(channel) as producer:
producer.publish(d, routing_key=dest, headers=headers,
exchange=self._exchange, serializer=self._serializer)
return consumer
log.debug("sending call to %s:%s", dest, operation)
with connections[self._pool_conn].acquire(block=True) as conn:
consumer, channel = self.ensure(conn, _declare_and_send)
try:
self._consume(conn, consumer, timeout=timeout, until_event=event)
# try to delete queue, but don't worry if it fails (will expire)
try:
queue = queue.bind(channel)
queue.delete(nowait=True)
except Exception:
log.exception("error deleting queue")
finally:
conn.maybe_close_channel(channel)
msg_body = messages[0]
if msg_body.get('error'):
raise_error(msg_body['error'])
else:
return msg_body.get('result') | [
"def",
"call",
"(",
"self",
",",
"name",
",",
"operation",
",",
"timeout",
"=",
"10",
",",
"args",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"args",
":",
"if",
"kwargs",
":",
"raise",
"TypeError",
"(",
"\"specify args dict or keyword argument... | Send a message and wait for reply
@param name: name of destination service queue
@param operation: name of service operation to invoke
@param timeout: RPC timeout to await a reply
@param args: dictionary of keyword args to pass to operation.
Use this OR kwargs.
@param kwargs: additional args to pass to operation | [
"Send",
"a",
"message",
"and",
"wait",
"for",
"reply"
] | 368b3963ec8abd60aebe0f81915429b45cbf4b5a | https://github.com/nimbusproject/dashi/blob/368b3963ec8abd60aebe0f81915429b45cbf4b5a/dashi/__init__.py#L133-L204 | train | 54,626 |
nimbusproject/dashi | dashi/__init__.py | Dashi.handle | def handle(self, operation, operation_name=None, sender_kwarg=None):
"""Handle an operation using the specified function
@param operation: function to call for this operation
@param operation_name: operation name. if unspecified operation.__name__ is used
@param sender_kwarg: optional keyword arg on operation to feed in sender name
"""
if not self._consumer:
self._consumer = DashiConsumer(self, self._conn,
self._name, self._exchange, sysname=self._sysname)
self._consumer.add_op(operation_name or operation.__name__, operation,
sender_kwarg=sender_kwarg) | python | def handle(self, operation, operation_name=None, sender_kwarg=None):
"""Handle an operation using the specified function
@param operation: function to call for this operation
@param operation_name: operation name. if unspecified operation.__name__ is used
@param sender_kwarg: optional keyword arg on operation to feed in sender name
"""
if not self._consumer:
self._consumer = DashiConsumer(self, self._conn,
self._name, self._exchange, sysname=self._sysname)
self._consumer.add_op(operation_name or operation.__name__, operation,
sender_kwarg=sender_kwarg) | [
"def",
"handle",
"(",
"self",
",",
"operation",
",",
"operation_name",
"=",
"None",
",",
"sender_kwarg",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_consumer",
":",
"self",
".",
"_consumer",
"=",
"DashiConsumer",
"(",
"self",
",",
"self",
".",
"... | Handle an operation using the specified function
@param operation: function to call for this operation
@param operation_name: operation name. if unspecified operation.__name__ is used
@param sender_kwarg: optional keyword arg on operation to feed in sender name | [
"Handle",
"an",
"operation",
"using",
"the",
"specified",
"function"
] | 368b3963ec8abd60aebe0f81915429b45cbf4b5a | https://github.com/nimbusproject/dashi/blob/368b3963ec8abd60aebe0f81915429b45cbf4b5a/dashi/__init__.py#L276-L287 | train | 54,627 |
nimbusproject/dashi | dashi/__init__.py | Dashi.link_exceptions | def link_exceptions(self, custom_exception=None, dashi_exception=None):
"""Link a custom exception thrown on the receiver to a dashi exception
"""
if custom_exception is None:
raise ValueError("custom_exception must be set")
if dashi_exception is None:
raise ValueError("dashi_exception must be set")
self._linked_exceptions[custom_exception] = dashi_exception | python | def link_exceptions(self, custom_exception=None, dashi_exception=None):
"""Link a custom exception thrown on the receiver to a dashi exception
"""
if custom_exception is None:
raise ValueError("custom_exception must be set")
if dashi_exception is None:
raise ValueError("dashi_exception must be set")
self._linked_exceptions[custom_exception] = dashi_exception | [
"def",
"link_exceptions",
"(",
"self",
",",
"custom_exception",
"=",
"None",
",",
"dashi_exception",
"=",
"None",
")",
":",
"if",
"custom_exception",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"custom_exception must be set\"",
")",
"if",
"dashi_exception",
"... | Link a custom exception thrown on the receiver to a dashi exception | [
"Link",
"a",
"custom",
"exception",
"thrown",
"on",
"the",
"receiver",
"to",
"a",
"dashi",
"exception"
] | 368b3963ec8abd60aebe0f81915429b45cbf4b5a | https://github.com/nimbusproject/dashi/blob/368b3963ec8abd60aebe0f81915429b45cbf4b5a/dashi/__init__.py#L313-L321 | train | 54,628 |
nimbusproject/dashi | dashi/__init__.py | Dashi.ensure | def ensure(self, connection, func, *args, **kwargs):
"""Perform an operation until success
Repeats in the face of connection errors, pursuant to retry policy.
"""
channel = None
while 1:
try:
if channel is None:
channel = connection.channel()
return func(channel, *args, **kwargs), channel
except (connection.connection_errors, IOError):
self._call_errback()
channel = self.connect(connection) | python | def ensure(self, connection, func, *args, **kwargs):
"""Perform an operation until success
Repeats in the face of connection errors, pursuant to retry policy.
"""
channel = None
while 1:
try:
if channel is None:
channel = connection.channel()
return func(channel, *args, **kwargs), channel
except (connection.connection_errors, IOError):
self._call_errback()
channel = self.connect(connection) | [
"def",
"ensure",
"(",
"self",
",",
"connection",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"channel",
"=",
"None",
"while",
"1",
":",
"try",
":",
"if",
"channel",
"is",
"None",
":",
"channel",
"=",
"connection",
".",
"channe... | Perform an operation until success
Repeats in the face of connection errors, pursuant to retry policy. | [
"Perform",
"an",
"operation",
"until",
"success"
] | 368b3963ec8abd60aebe0f81915429b45cbf4b5a | https://github.com/nimbusproject/dashi/blob/368b3963ec8abd60aebe0f81915429b45cbf4b5a/dashi/__init__.py#L379-L393 | train | 54,629 |
cwoebker/pen | pen/edit.py | re_tab | def re_tab(s):
"""Return a tabbed string from an expanded one."""
l = []
p = 0
for i in range(8, len(s), 8):
if s[i - 2:i] == " ":
# collapse two or more spaces into a tab
l.append(s[p:i].rstrip() + "\t")
p = i
if p == 0:
return s
else:
l.append(s[p:])
return "".join(l) | python | def re_tab(s):
"""Return a tabbed string from an expanded one."""
l = []
p = 0
for i in range(8, len(s), 8):
if s[i - 2:i] == " ":
# collapse two or more spaces into a tab
l.append(s[p:i].rstrip() + "\t")
p = i
if p == 0:
return s
else:
l.append(s[p:])
return "".join(l) | [
"def",
"re_tab",
"(",
"s",
")",
":",
"l",
"=",
"[",
"]",
"p",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"8",
",",
"len",
"(",
"s",
")",
",",
"8",
")",
":",
"if",
"s",
"[",
"i",
"-",
"2",
":",
"i",
"]",
"==",
"\" \"",
":",
"# collapse t... | Return a tabbed string from an expanded one. | [
"Return",
"a",
"tabbed",
"string",
"from",
"an",
"expanded",
"one",
"."
] | 996dfcdc018f2fc14a376835a2622fb4a7230a2f | https://github.com/cwoebker/pen/blob/996dfcdc018f2fc14a376835a2622fb4a7230a2f/pen/edit.py#L187-L201 | train | 54,630 |
cwoebker/pen | pen/edit.py | LineWalker.read_next_line | def read_next_line(self):
"""Read another line from the file."""
next_line = self.file.readline()
if not next_line or next_line[-1:] != '\n':
# no newline on last line of file
self.file = None
else:
# trim newline characters
next_line = next_line[:-1]
expanded = next_line.expandtabs()
edit = urwid.Edit("", expanded, allow_tab=True)
edit.set_edit_pos(0)
edit.original_text = next_line
self.lines.append(edit)
return next_line | python | def read_next_line(self):
"""Read another line from the file."""
next_line = self.file.readline()
if not next_line or next_line[-1:] != '\n':
# no newline on last line of file
self.file = None
else:
# trim newline characters
next_line = next_line[:-1]
expanded = next_line.expandtabs()
edit = urwid.Edit("", expanded, allow_tab=True)
edit.set_edit_pos(0)
edit.original_text = next_line
self.lines.append(edit)
return next_line | [
"def",
"read_next_line",
"(",
"self",
")",
":",
"next_line",
"=",
"self",
".",
"file",
".",
"readline",
"(",
")",
"if",
"not",
"next_line",
"or",
"next_line",
"[",
"-",
"1",
":",
"]",
"!=",
"'\\n'",
":",
"# no newline on last line of file",
"self",
".",
... | Read another line from the file. | [
"Read",
"another",
"line",
"from",
"the",
"file",
"."
] | 996dfcdc018f2fc14a376835a2622fb4a7230a2f | https://github.com/cwoebker/pen/blob/996dfcdc018f2fc14a376835a2622fb4a7230a2f/pen/edit.py#L31-L50 | train | 54,631 |
cwoebker/pen | pen/edit.py | LineWalker._get_at_pos | def _get_at_pos(self, pos):
"""Return a widget for the line number passed."""
if pos < 0:
# line 0 is the start of the file, no more above
return None, None
if len(self.lines) > pos:
# we have that line so return it
return self.lines[pos], pos
if self.file is None:
# file is closed, so there are no more lines
return None, None
assert pos == len(self.lines), "out of order request?"
self.read_next_line()
return self.lines[-1], pos | python | def _get_at_pos(self, pos):
"""Return a widget for the line number passed."""
if pos < 0:
# line 0 is the start of the file, no more above
return None, None
if len(self.lines) > pos:
# we have that line so return it
return self.lines[pos], pos
if self.file is None:
# file is closed, so there are no more lines
return None, None
assert pos == len(self.lines), "out of order request?"
self.read_next_line()
return self.lines[-1], pos | [
"def",
"_get_at_pos",
"(",
"self",
",",
"pos",
")",
":",
"if",
"pos",
"<",
"0",
":",
"# line 0 is the start of the file, no more above",
"return",
"None",
",",
"None",
"if",
"len",
"(",
"self",
".",
"lines",
")",
">",
"pos",
":",
"# we have that line so return... | Return a widget for the line number passed. | [
"Return",
"a",
"widget",
"for",
"the",
"line",
"number",
"passed",
"."
] | 996dfcdc018f2fc14a376835a2622fb4a7230a2f | https://github.com/cwoebker/pen/blob/996dfcdc018f2fc14a376835a2622fb4a7230a2f/pen/edit.py#L52-L71 | train | 54,632 |
cwoebker/pen | pen/edit.py | LineWalker.split_focus | def split_focus(self):
"""Divide the focus edit widget at the cursor location."""
focus = self.lines[self.focus]
pos = focus.edit_pos
edit = urwid.Edit("", focus.edit_text[pos:], allow_tab=True)
edit.original_text = ""
focus.set_edit_text(focus.edit_text[:pos])
edit.set_edit_pos(0)
self.lines.insert(self.focus + 1, edit) | python | def split_focus(self):
"""Divide the focus edit widget at the cursor location."""
focus = self.lines[self.focus]
pos = focus.edit_pos
edit = urwid.Edit("", focus.edit_text[pos:], allow_tab=True)
edit.original_text = ""
focus.set_edit_text(focus.edit_text[:pos])
edit.set_edit_pos(0)
self.lines.insert(self.focus + 1, edit) | [
"def",
"split_focus",
"(",
"self",
")",
":",
"focus",
"=",
"self",
".",
"lines",
"[",
"self",
".",
"focus",
"]",
"pos",
"=",
"focus",
".",
"edit_pos",
"edit",
"=",
"urwid",
".",
"Edit",
"(",
"\"\"",
",",
"focus",
".",
"edit_text",
"[",
"pos",
":",
... | Divide the focus edit widget at the cursor location. | [
"Divide",
"the",
"focus",
"edit",
"widget",
"at",
"the",
"cursor",
"location",
"."
] | 996dfcdc018f2fc14a376835a2622fb4a7230a2f | https://github.com/cwoebker/pen/blob/996dfcdc018f2fc14a376835a2622fb4a7230a2f/pen/edit.py#L73-L82 | train | 54,633 |
cwoebker/pen | pen/edit.py | LineWalker.combine_focus_with_prev | def combine_focus_with_prev(self):
"""Combine the focus edit widget with the one above."""
above, ignore = self.get_prev(self.focus)
if above is None:
# already at the top
return
focus = self.lines[self.focus]
above.set_edit_pos(len(above.edit_text))
above.set_edit_text(above.edit_text + focus.edit_text)
del self.lines[self.focus]
self.focus -= 1 | python | def combine_focus_with_prev(self):
"""Combine the focus edit widget with the one above."""
above, ignore = self.get_prev(self.focus)
if above is None:
# already at the top
return
focus = self.lines[self.focus]
above.set_edit_pos(len(above.edit_text))
above.set_edit_text(above.edit_text + focus.edit_text)
del self.lines[self.focus]
self.focus -= 1 | [
"def",
"combine_focus_with_prev",
"(",
"self",
")",
":",
"above",
",",
"ignore",
"=",
"self",
".",
"get_prev",
"(",
"self",
".",
"focus",
")",
"if",
"above",
"is",
"None",
":",
"# already at the top",
"return",
"focus",
"=",
"self",
".",
"lines",
"[",
"s... | Combine the focus edit widget with the one above. | [
"Combine",
"the",
"focus",
"edit",
"widget",
"with",
"the",
"one",
"above",
"."
] | 996dfcdc018f2fc14a376835a2622fb4a7230a2f | https://github.com/cwoebker/pen/blob/996dfcdc018f2fc14a376835a2622fb4a7230a2f/pen/edit.py#L84-L96 | train | 54,634 |
cwoebker/pen | pen/edit.py | LineWalker.combine_focus_with_next | def combine_focus_with_next(self):
"""Combine the focus edit widget with the one below."""
below, ignore = self.get_next(self.focus)
if below is None:
# already at bottom
return
focus = self.lines[self.focus]
focus.set_edit_text(focus.edit_text + below.edit_text)
del self.lines[self.focus + 1] | python | def combine_focus_with_next(self):
"""Combine the focus edit widget with the one below."""
below, ignore = self.get_next(self.focus)
if below is None:
# already at bottom
return
focus = self.lines[self.focus]
focus.set_edit_text(focus.edit_text + below.edit_text)
del self.lines[self.focus + 1] | [
"def",
"combine_focus_with_next",
"(",
"self",
")",
":",
"below",
",",
"ignore",
"=",
"self",
".",
"get_next",
"(",
"self",
".",
"focus",
")",
"if",
"below",
"is",
"None",
":",
"# already at bottom",
"return",
"focus",
"=",
"self",
".",
"lines",
"[",
"se... | Combine the focus edit widget with the one below. | [
"Combine",
"the",
"focus",
"edit",
"widget",
"with",
"the",
"one",
"below",
"."
] | 996dfcdc018f2fc14a376835a2622fb4a7230a2f | https://github.com/cwoebker/pen/blob/996dfcdc018f2fc14a376835a2622fb4a7230a2f/pen/edit.py#L98-L108 | train | 54,635 |
cwoebker/pen | pen/edit.py | EditDisplay.handle_keypress | def handle_keypress(self, k):
"""Last resort for keypresses."""
if k == "esc":
self.save_file()
raise urwid.ExitMainLoop()
elif k == "delete":
# delete at end of line
self.walker.combine_focus_with_next()
elif k == "backspace":
# backspace at beginning of line
self.walker.combine_focus_with_prev()
elif k == "enter":
# start new line
self.walker.split_focus()
# move the cursor to the new line and reset pref_col
self.view.keypress(size, "down")
self.view.keypress(size, "home") | python | def handle_keypress(self, k):
"""Last resort for keypresses."""
if k == "esc":
self.save_file()
raise urwid.ExitMainLoop()
elif k == "delete":
# delete at end of line
self.walker.combine_focus_with_next()
elif k == "backspace":
# backspace at beginning of line
self.walker.combine_focus_with_prev()
elif k == "enter":
# start new line
self.walker.split_focus()
# move the cursor to the new line and reset pref_col
self.view.keypress(size, "down")
self.view.keypress(size, "home") | [
"def",
"handle_keypress",
"(",
"self",
",",
"k",
")",
":",
"if",
"k",
"==",
"\"esc\"",
":",
"self",
".",
"save_file",
"(",
")",
"raise",
"urwid",
".",
"ExitMainLoop",
"(",
")",
"elif",
"k",
"==",
"\"delete\"",
":",
"# delete at end of line",
"self",
".",... | Last resort for keypresses. | [
"Last",
"resort",
"for",
"keypresses",
"."
] | 996dfcdc018f2fc14a376835a2622fb4a7230a2f | https://github.com/cwoebker/pen/blob/996dfcdc018f2fc14a376835a2622fb4a7230a2f/pen/edit.py#L134-L150 | train | 54,636 |
cwoebker/pen | pen/edit.py | EditDisplay.save_file | def save_file(self):
"""Write the file out to disk."""
l = []
walk = self.walker
for edit in walk.lines:
# collect the text already stored in edit widgets
if edit.original_text.expandtabs() == edit.edit_text:
l.append(edit.original_text)
else:
l.append(re_tab(edit.edit_text))
# then the rest
while walk.file is not None:
l.append(walk.read_next_line())
# write back to disk
outfile = open(self.save_name, "w")
l_iter = iter(l)
line = next(l_iter)
prefix = ""
while True:
try:
outfile.write(prefix + line)
prefix = "\n"
line = next(l_iter)
except StopIteration:
if line != "\n":
outfile.write("\n")
break | python | def save_file(self):
"""Write the file out to disk."""
l = []
walk = self.walker
for edit in walk.lines:
# collect the text already stored in edit widgets
if edit.original_text.expandtabs() == edit.edit_text:
l.append(edit.original_text)
else:
l.append(re_tab(edit.edit_text))
# then the rest
while walk.file is not None:
l.append(walk.read_next_line())
# write back to disk
outfile = open(self.save_name, "w")
l_iter = iter(l)
line = next(l_iter)
prefix = ""
while True:
try:
outfile.write(prefix + line)
prefix = "\n"
line = next(l_iter)
except StopIteration:
if line != "\n":
outfile.write("\n")
break | [
"def",
"save_file",
"(",
"self",
")",
":",
"l",
"=",
"[",
"]",
"walk",
"=",
"self",
".",
"walker",
"for",
"edit",
"in",
"walk",
".",
"lines",
":",
"# collect the text already stored in edit widgets",
"if",
"edit",
".",
"original_text",
".",
"expandtabs",
"("... | Write the file out to disk. | [
"Write",
"the",
"file",
"out",
"to",
"disk",
"."
] | 996dfcdc018f2fc14a376835a2622fb4a7230a2f | https://github.com/cwoebker/pen/blob/996dfcdc018f2fc14a376835a2622fb4a7230a2f/pen/edit.py#L152-L184 | train | 54,637 |
moccu/django-markymark | markymark/widgets.py | MarkdownTextarea._media | def _media(self):
"""
Returns a forms.Media instance with the basic editor media and media
from all registered extensions.
"""
css = ['markymark/css/markdown-editor.css']
iconlibrary_css = getattr(
settings,
'MARKYMARK_FONTAWESOME_CSS',
'markymark/fontawesome/fontawesome.min.css'
)
if iconlibrary_css:
css.append(iconlibrary_css)
media = forms.Media(
css={'all': css},
js=('markymark/js/markdown-editor.js',)
)
# Use official extension loading to initialize all extensions
# and hook in extension-defined media files.
renderer = initialize_renderer()
for extension in renderer.registeredExtensions:
if hasattr(extension, 'media'):
media += extension.media
return media | python | def _media(self):
"""
Returns a forms.Media instance with the basic editor media and media
from all registered extensions.
"""
css = ['markymark/css/markdown-editor.css']
iconlibrary_css = getattr(
settings,
'MARKYMARK_FONTAWESOME_CSS',
'markymark/fontawesome/fontawesome.min.css'
)
if iconlibrary_css:
css.append(iconlibrary_css)
media = forms.Media(
css={'all': css},
js=('markymark/js/markdown-editor.js',)
)
# Use official extension loading to initialize all extensions
# and hook in extension-defined media files.
renderer = initialize_renderer()
for extension in renderer.registeredExtensions:
if hasattr(extension, 'media'):
media += extension.media
return media | [
"def",
"_media",
"(",
"self",
")",
":",
"css",
"=",
"[",
"'markymark/css/markdown-editor.css'",
"]",
"iconlibrary_css",
"=",
"getattr",
"(",
"settings",
",",
"'MARKYMARK_FONTAWESOME_CSS'",
",",
"'markymark/fontawesome/fontawesome.min.css'",
")",
"if",
"iconlibrary_css",
... | Returns a forms.Media instance with the basic editor media and media
from all registered extensions. | [
"Returns",
"a",
"forms",
".",
"Media",
"instance",
"with",
"the",
"basic",
"editor",
"media",
"and",
"media",
"from",
"all",
"registered",
"extensions",
"."
] | c1bf69f439981d6295e5b4d13c26dadf3dba2e9d | https://github.com/moccu/django-markymark/blob/c1bf69f439981d6295e5b4d13c26dadf3dba2e9d/markymark/widgets.py#L22-L48 | train | 54,638 |
NiklasRosenstein-Python/nr-deprecated | nr/path.py | getsuffix | def getsuffix(subject):
"""
Returns the suffix of a filename. If the file has no suffix, returns None.
Can return an empty string if the filenam ends with a period.
"""
index = subject.rfind('.')
if index > subject.replace('\\', '/').rfind('/'):
return subject[index+1:]
return None | python | def getsuffix(subject):
"""
Returns the suffix of a filename. If the file has no suffix, returns None.
Can return an empty string if the filenam ends with a period.
"""
index = subject.rfind('.')
if index > subject.replace('\\', '/').rfind('/'):
return subject[index+1:]
return None | [
"def",
"getsuffix",
"(",
"subject",
")",
":",
"index",
"=",
"subject",
".",
"rfind",
"(",
"'.'",
")",
"if",
"index",
">",
"subject",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
".",
"rfind",
"(",
"'/'",
")",
":",
"return",
"subject",
"[",
"inde... | Returns the suffix of a filename. If the file has no suffix, returns None.
Can return an empty string if the filenam ends with a period. | [
"Returns",
"the",
"suffix",
"of",
"a",
"filename",
".",
"If",
"the",
"file",
"has",
"no",
"suffix",
"returns",
"None",
".",
"Can",
"return",
"an",
"empty",
"string",
"if",
"the",
"filenam",
"ends",
"with",
"a",
"period",
"."
] | f9f8b89ea1b084841a8ab65784eaf68852686b2a | https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/path.py#L257-L266 | train | 54,639 |
Robpol86/Flask-Statics-Helper | flask_statics/__init__.py | Statics.init_app | def init_app(self, app):
"""Initialize the extension."""
# Set default Flask config option.
app.config.setdefault('STATICS_MINIFY', False)
# Select resources.
self.all_resources = ALL_RESOURCES_MINIFIED if app.config.get('STATICS_MINIFY') else ALL_RESOURCES
self.all_variables = ALL_VARIABLES
# Add this instance to app.extensions.
if not hasattr(app, 'extensions'):
app.extensions = dict()
if 'statics' in app.extensions:
raise ValueError('Already registered extension STATICS.')
app.extensions['statics'] = _StaticsState(self, app)
# Initialize blueprint.
name = 'flask_statics_helper'
static_url_path = '{0}/{1}'.format(app.static_url_path, name)
self.blueprint = Blueprint(name, __name__, template_folder='templates', static_folder='static',
static_url_path=static_url_path)
self.blueprint.add_app_template_global(self.all_variables, '_flask_statics_helper_all_variables')
self.blueprint.add_app_template_global(self.all_resources, '_flask_statics_helper_all_resources')
app.register_blueprint(self.blueprint) | python | def init_app(self, app):
"""Initialize the extension."""
# Set default Flask config option.
app.config.setdefault('STATICS_MINIFY', False)
# Select resources.
self.all_resources = ALL_RESOURCES_MINIFIED if app.config.get('STATICS_MINIFY') else ALL_RESOURCES
self.all_variables = ALL_VARIABLES
# Add this instance to app.extensions.
if not hasattr(app, 'extensions'):
app.extensions = dict()
if 'statics' in app.extensions:
raise ValueError('Already registered extension STATICS.')
app.extensions['statics'] = _StaticsState(self, app)
# Initialize blueprint.
name = 'flask_statics_helper'
static_url_path = '{0}/{1}'.format(app.static_url_path, name)
self.blueprint = Blueprint(name, __name__, template_folder='templates', static_folder='static',
static_url_path=static_url_path)
self.blueprint.add_app_template_global(self.all_variables, '_flask_statics_helper_all_variables')
self.blueprint.add_app_template_global(self.all_resources, '_flask_statics_helper_all_resources')
app.register_blueprint(self.blueprint) | [
"def",
"init_app",
"(",
"self",
",",
"app",
")",
":",
"# Set default Flask config option.",
"app",
".",
"config",
".",
"setdefault",
"(",
"'STATICS_MINIFY'",
",",
"False",
")",
"# Select resources.",
"self",
".",
"all_resources",
"=",
"ALL_RESOURCES_MINIFIED",
"if",... | Initialize the extension. | [
"Initialize",
"the",
"extension",
"."
] | b1771e65225f62b760b3ef841b710ff23ef6f83c | https://github.com/Robpol86/Flask-Statics-Helper/blob/b1771e65225f62b760b3ef841b710ff23ef6f83c/flask_statics/__init__.py#L56-L79 | train | 54,640 |
wheeler-microfluidics/dmf-control-board-firmware | dmf_control_board_firmware/calibrate/hv_attenuator.py | measure_board_rms | def measure_board_rms(control_board, n_samples=10, sampling_ms=10,
delay_between_samples_ms=0):
'''
Read RMS voltage samples from control board high-voltage feedback circuit.
'''
try:
results = control_board.measure_impedance(n_samples, sampling_ms,
delay_between_samples_ms,
True, True, [])
except RuntimeError:
# `RuntimeError` may be raised if, for example, current limit was
# reached during measurement. In such cases, return an empty frame.
logger.warning('Error encountered during high-voltage RMS '
'measurement.', exc_info=True)
data = pd.DataFrame(None, columns=['board measured V',
'divider resistor index'])
else:
data = pd.DataFrame({'board measured V': results.V_hv})
data['divider resistor index'] = results.hv_resistor
return data | python | def measure_board_rms(control_board, n_samples=10, sampling_ms=10,
delay_between_samples_ms=0):
'''
Read RMS voltage samples from control board high-voltage feedback circuit.
'''
try:
results = control_board.measure_impedance(n_samples, sampling_ms,
delay_between_samples_ms,
True, True, [])
except RuntimeError:
# `RuntimeError` may be raised if, for example, current limit was
# reached during measurement. In such cases, return an empty frame.
logger.warning('Error encountered during high-voltage RMS '
'measurement.', exc_info=True)
data = pd.DataFrame(None, columns=['board measured V',
'divider resistor index'])
else:
data = pd.DataFrame({'board measured V': results.V_hv})
data['divider resistor index'] = results.hv_resistor
return data | [
"def",
"measure_board_rms",
"(",
"control_board",
",",
"n_samples",
"=",
"10",
",",
"sampling_ms",
"=",
"10",
",",
"delay_between_samples_ms",
"=",
"0",
")",
":",
"try",
":",
"results",
"=",
"control_board",
".",
"measure_impedance",
"(",
"n_samples",
",",
"sa... | Read RMS voltage samples from control board high-voltage feedback circuit. | [
"Read",
"RMS",
"voltage",
"samples",
"from",
"control",
"board",
"high",
"-",
"voltage",
"feedback",
"circuit",
"."
] | 1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c | https://github.com/wheeler-microfluidics/dmf-control-board-firmware/blob/1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c/dmf_control_board_firmware/calibrate/hv_attenuator.py#L15-L34 | train | 54,641 |
wheeler-microfluidics/dmf-control-board-firmware | dmf_control_board_firmware/calibrate/hv_attenuator.py | find_good | def find_good(control_board, actuation_steps, resistor_index, start_index,
end_index):
'''
Use a binary search over the range of provided actuation_steps to find the
maximum actuation voltage that is measured by the board feedback circuit
using the specified feedback resistor.
'''
lower = start_index
upper = end_index
while lower < upper - 1:
index = lower + (upper - lower) / 2
v = actuation_steps[index]
control_board.set_waveform_voltage(v)
data = measure_board_rms(control_board)
valid_data = data[data['divider resistor index'] >= 0]
if (valid_data['divider resistor index'] < resistor_index).sum():
# We have some measurements from another resistor.
upper = index
else:
lower = index
control_board.set_waveform_voltage(actuation_steps[lower])
data = measure_board_rms(control_board)
return lower, data | python | def find_good(control_board, actuation_steps, resistor_index, start_index,
end_index):
'''
Use a binary search over the range of provided actuation_steps to find the
maximum actuation voltage that is measured by the board feedback circuit
using the specified feedback resistor.
'''
lower = start_index
upper = end_index
while lower < upper - 1:
index = lower + (upper - lower) / 2
v = actuation_steps[index]
control_board.set_waveform_voltage(v)
data = measure_board_rms(control_board)
valid_data = data[data['divider resistor index'] >= 0]
if (valid_data['divider resistor index'] < resistor_index).sum():
# We have some measurements from another resistor.
upper = index
else:
lower = index
control_board.set_waveform_voltage(actuation_steps[lower])
data = measure_board_rms(control_board)
return lower, data | [
"def",
"find_good",
"(",
"control_board",
",",
"actuation_steps",
",",
"resistor_index",
",",
"start_index",
",",
"end_index",
")",
":",
"lower",
"=",
"start_index",
"upper",
"=",
"end_index",
"while",
"lower",
"<",
"upper",
"-",
"1",
":",
"index",
"=",
"low... | Use a binary search over the range of provided actuation_steps to find the
maximum actuation voltage that is measured by the board feedback circuit
using the specified feedback resistor. | [
"Use",
"a",
"binary",
"search",
"over",
"the",
"range",
"of",
"provided",
"actuation_steps",
"to",
"find",
"the",
"maximum",
"actuation",
"voltage",
"that",
"is",
"measured",
"by",
"the",
"board",
"feedback",
"circuit",
"using",
"the",
"specified",
"feedback",
... | 1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c | https://github.com/wheeler-microfluidics/dmf-control-board-firmware/blob/1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c/dmf_control_board_firmware/calibrate/hv_attenuator.py#L37-L60 | train | 54,642 |
wheeler-microfluidics/dmf-control-board-firmware | dmf_control_board_firmware/calibrate/hv_attenuator.py | resistor_max_actuation_readings | def resistor_max_actuation_readings(control_board, frequencies,
oscope_reading_func):
'''
For each resistor in the high-voltage feedback resistor bank, read the
board measured voltage and the oscilloscope measured voltage for an
actuation voltage that nearly saturates the feedback resistor.
By searching for an actuation voltage near saturation, the signal-to-noise
ratio is minimized.
'''
# Set board amplifier gain to 1.
# __NB__ This is likely _far_ lower than the actual gain _(which may be a
# factor of several hundred)_..
control_board.set_waveform_voltage(0)
control_board.auto_adjust_amplifier_gain = False
control_board.amplifier_gain = 1.
# Set waveform voltage to a low value and obtain the corresponding
# oscilloscope reading to calculate an approximate gain of the amplifier.
target_voltage = 0.1
control_board.set_waveform_voltage(target_voltage)
oscope_rms = oscope_reading_func()
estimated_amplifier_gain = oscope_rms / target_voltage
# Based on the maximum amplified RMS voltage, define a set of actuation
# voltages to search when performing calibration.
max_post_gain_V = 0.8 * control_board.max_waveform_voltage
max_actuation_V = max_post_gain_V / estimated_amplifier_gain
actuation_steps = np.linspace(0.005, max_actuation_V, num=50)
resistor_count = len(control_board.a0_series_resistance)
# Define frequency/resistor index pairs to take measurements at.
conditions = pd.DataFrame([[r, f] for r in range(resistor_count - 1, -1, -1)
for f in frequencies],
columns=['resistor index', 'frequency'])
# Define function to process each frequency/resistor index pair.
def max_actuation_reading(x):
'''
Measure maximum board RMS voltage using specified feedback resistor, at
the specified frequency.
Request corresponding oscilloscope RMS voltage reading.
'''
r = x['resistor index'].values[0]
f = x['frequency'].values[0]
control_board.set_waveform_frequency(f)
actuation_index, data = find_good(control_board, actuation_steps, r, 0,
len(actuation_steps) - 1)
board_measured_rms = data.loc[data['divider resistor index'] >= 0,
'board measured V'].mean()
oscope_rms = oscope_reading_func()
print 'R=%s, f=%s' % (r, f)
return pd.DataFrame([[r, f, actuation_index, board_measured_rms,
oscope_rms]],
columns=['resistor index', 'frequency',
'actuation index', 'board measured V',
'oscope measured V'])
# Return board-measured RMS voltage and oscilloscope-measured RMS voltage
# for each frequency/feedback resistor pair.
return (conditions.groupby(['resistor index', 'frequency'])
.apply(max_actuation_reading).reset_index(drop=True)) | python | def resistor_max_actuation_readings(control_board, frequencies,
oscope_reading_func):
'''
For each resistor in the high-voltage feedback resistor bank, read the
board measured voltage and the oscilloscope measured voltage for an
actuation voltage that nearly saturates the feedback resistor.
By searching for an actuation voltage near saturation, the signal-to-noise
ratio is minimized.
'''
# Set board amplifier gain to 1.
# __NB__ This is likely _far_ lower than the actual gain _(which may be a
# factor of several hundred)_..
control_board.set_waveform_voltage(0)
control_board.auto_adjust_amplifier_gain = False
control_board.amplifier_gain = 1.
# Set waveform voltage to a low value and obtain the corresponding
# oscilloscope reading to calculate an approximate gain of the amplifier.
target_voltage = 0.1
control_board.set_waveform_voltage(target_voltage)
oscope_rms = oscope_reading_func()
estimated_amplifier_gain = oscope_rms / target_voltage
# Based on the maximum amplified RMS voltage, define a set of actuation
# voltages to search when performing calibration.
max_post_gain_V = 0.8 * control_board.max_waveform_voltage
max_actuation_V = max_post_gain_V / estimated_amplifier_gain
actuation_steps = np.linspace(0.005, max_actuation_V, num=50)
resistor_count = len(control_board.a0_series_resistance)
# Define frequency/resistor index pairs to take measurements at.
conditions = pd.DataFrame([[r, f] for r in range(resistor_count - 1, -1, -1)
for f in frequencies],
columns=['resistor index', 'frequency'])
# Define function to process each frequency/resistor index pair.
def max_actuation_reading(x):
'''
Measure maximum board RMS voltage using specified feedback resistor, at
the specified frequency.
Request corresponding oscilloscope RMS voltage reading.
'''
r = x['resistor index'].values[0]
f = x['frequency'].values[0]
control_board.set_waveform_frequency(f)
actuation_index, data = find_good(control_board, actuation_steps, r, 0,
len(actuation_steps) - 1)
board_measured_rms = data.loc[data['divider resistor index'] >= 0,
'board measured V'].mean()
oscope_rms = oscope_reading_func()
print 'R=%s, f=%s' % (r, f)
return pd.DataFrame([[r, f, actuation_index, board_measured_rms,
oscope_rms]],
columns=['resistor index', 'frequency',
'actuation index', 'board measured V',
'oscope measured V'])
# Return board-measured RMS voltage and oscilloscope-measured RMS voltage
# for each frequency/feedback resistor pair.
return (conditions.groupby(['resistor index', 'frequency'])
.apply(max_actuation_reading).reset_index(drop=True)) | [
"def",
"resistor_max_actuation_readings",
"(",
"control_board",
",",
"frequencies",
",",
"oscope_reading_func",
")",
":",
"# Set board amplifier gain to 1.",
"# __NB__ This is likely _far_ lower than the actual gain _(which may be a",
"# factor of several hundred)_..",
"control_board",
"... | For each resistor in the high-voltage feedback resistor bank, read the
board measured voltage and the oscilloscope measured voltage for an
actuation voltage that nearly saturates the feedback resistor.
By searching for an actuation voltage near saturation, the signal-to-noise
ratio is minimized. | [
"For",
"each",
"resistor",
"in",
"the",
"high",
"-",
"voltage",
"feedback",
"resistor",
"bank",
"read",
"the",
"board",
"measured",
"voltage",
"and",
"the",
"oscilloscope",
"measured",
"voltage",
"for",
"an",
"actuation",
"voltage",
"that",
"nearly",
"saturates"... | 1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c | https://github.com/wheeler-microfluidics/dmf-control-board-firmware/blob/1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c/dmf_control_board_firmware/calibrate/hv_attenuator.py#L63-L127 | train | 54,643 |
wheeler-microfluidics/dmf-control-board-firmware | dmf_control_board_firmware/calibrate/hv_attenuator.py | fit_feedback_params | def fit_feedback_params(calibration, max_resistor_readings):
'''
Fit model of control board high-voltage feedback resistor and
parasitic capacitance values based on measured voltage readings.
'''
R1 = 10e6
# Get transfer function to compute the amplitude of the high-voltage input
# to the control board _(i.e., the output of the amplifier)_ based on the
# attenuated voltage measured by the analog-to-digital converter on the
# control board.
#
# The signature of the transfer function is:
#
# H(V1, R1, C1, R2, C2, f)
#
# See the `z_transfer_functions` function docstring for definitions of the
# parameters based on the control board major version.
def fit_resistor_params(x):
resistor_index = x['resistor index'].values[0]
p0 = [calibration.R_hv[resistor_index],
calibration.C_hv[resistor_index]]
def error(p, df, R1):
v1 = compute_from_transfer_function(calibration.hw_version.major,
'V1',
V2=df['board measured V'],
R1=R1, R2=p[0], C2=p[1],
f=df['frequency'].values)
e = df['oscope measured V'] - v1
return e
p1, success = optimize.leastsq(error, p0, args=(x, R1))
# take the absolute value of the fitted values, since is possible
# for the fit to produce negative resistor and capacitor values
p1 = np.abs(p1)
return pd.DataFrame([p0 + p1.tolist()],
columns=['original R', 'original C',
'fitted R', 'fitted C']).T
results = (max_resistor_readings
[max_resistor_readings['resistor index'] >= 0]
.groupby(['resistor index']).apply(fit_resistor_params))
data = results.unstack()
data.columns = data.columns.droplevel()
return data | python | def fit_feedback_params(calibration, max_resistor_readings):
'''
Fit model of control board high-voltage feedback resistor and
parasitic capacitance values based on measured voltage readings.
'''
R1 = 10e6
# Get transfer function to compute the amplitude of the high-voltage input
# to the control board _(i.e., the output of the amplifier)_ based on the
# attenuated voltage measured by the analog-to-digital converter on the
# control board.
#
# The signature of the transfer function is:
#
# H(V1, R1, C1, R2, C2, f)
#
# See the `z_transfer_functions` function docstring for definitions of the
# parameters based on the control board major version.
def fit_resistor_params(x):
resistor_index = x['resistor index'].values[0]
p0 = [calibration.R_hv[resistor_index],
calibration.C_hv[resistor_index]]
def error(p, df, R1):
v1 = compute_from_transfer_function(calibration.hw_version.major,
'V1',
V2=df['board measured V'],
R1=R1, R2=p[0], C2=p[1],
f=df['frequency'].values)
e = df['oscope measured V'] - v1
return e
p1, success = optimize.leastsq(error, p0, args=(x, R1))
# take the absolute value of the fitted values, since is possible
# for the fit to produce negative resistor and capacitor values
p1 = np.abs(p1)
return pd.DataFrame([p0 + p1.tolist()],
columns=['original R', 'original C',
'fitted R', 'fitted C']).T
results = (max_resistor_readings
[max_resistor_readings['resistor index'] >= 0]
.groupby(['resistor index']).apply(fit_resistor_params))
data = results.unstack()
data.columns = data.columns.droplevel()
return data | [
"def",
"fit_feedback_params",
"(",
"calibration",
",",
"max_resistor_readings",
")",
":",
"R1",
"=",
"10e6",
"# Get transfer function to compute the amplitude of the high-voltage input",
"# to the control board _(i.e., the output of the amplifier)_ based on the",
"# attenuated voltage meas... | Fit model of control board high-voltage feedback resistor and
parasitic capacitance values based on measured voltage readings. | [
"Fit",
"model",
"of",
"control",
"board",
"high",
"-",
"voltage",
"feedback",
"resistor",
"and",
"parasitic",
"capacitance",
"values",
"based",
"on",
"measured",
"voltage",
"readings",
"."
] | 1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c | https://github.com/wheeler-microfluidics/dmf-control-board-firmware/blob/1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c/dmf_control_board_firmware/calibrate/hv_attenuator.py#L130-L175 | train | 54,644 |
wheeler-microfluidics/dmf-control-board-firmware | dmf_control_board_firmware/calibrate/hv_attenuator.py | update_control_board_calibration | def update_control_board_calibration(control_board, fitted_params):
'''
Update the control board with the specified fitted parameters.
'''
# Update the control board with the new fitted capacitor and resistor
# values for the reference load analog input (channel 0).
control_board.a0_series_resistance = fitted_params['fitted R'].values
control_board.a0_series_capacitance = fitted_params['fitted C'].values | python | def update_control_board_calibration(control_board, fitted_params):
'''
Update the control board with the specified fitted parameters.
'''
# Update the control board with the new fitted capacitor and resistor
# values for the reference load analog input (channel 0).
control_board.a0_series_resistance = fitted_params['fitted R'].values
control_board.a0_series_capacitance = fitted_params['fitted C'].values | [
"def",
"update_control_board_calibration",
"(",
"control_board",
",",
"fitted_params",
")",
":",
"# Update the control board with the new fitted capacitor and resistor",
"# values for the reference load analog input (channel 0).",
"control_board",
".",
"a0_series_resistance",
"=",
"fitte... | Update the control board with the specified fitted parameters. | [
"Update",
"the",
"control",
"board",
"with",
"the",
"specified",
"fitted",
"parameters",
"."
] | 1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c | https://github.com/wheeler-microfluidics/dmf-control-board-firmware/blob/1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c/dmf_control_board_firmware/calibrate/hv_attenuator.py#L249-L256 | train | 54,645 |
nimbusproject/dashi | dashi/bootstrap/config.py | Config.load | def load(self):
""" Load each path in order. Remember paths already loaded and only load new ones. """
data = self.dict_class()
for path in self.paths:
if path in self.paths_loaded: continue
try:
with open(path, 'r') as file:
path_data = yaml.load(file.read())
data = dict_merge(data, path_data)
self.paths_loaded.add(path)
except IOError:
# TODO: Log this correctly once logging is implemented
if not path.endswith('.local.yml'):
print 'CONFIG NOT FOUND: %s' % (path)
self.data = data | python | def load(self):
""" Load each path in order. Remember paths already loaded and only load new ones. """
data = self.dict_class()
for path in self.paths:
if path in self.paths_loaded: continue
try:
with open(path, 'r') as file:
path_data = yaml.load(file.read())
data = dict_merge(data, path_data)
self.paths_loaded.add(path)
except IOError:
# TODO: Log this correctly once logging is implemented
if not path.endswith('.local.yml'):
print 'CONFIG NOT FOUND: %s' % (path)
self.data = data | [
"def",
"load",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"dict_class",
"(",
")",
"for",
"path",
"in",
"self",
".",
"paths",
":",
"if",
"path",
"in",
"self",
".",
"paths_loaded",
":",
"continue",
"try",
":",
"with",
"open",
"(",
"path",
",",
... | Load each path in order. Remember paths already loaded and only load new ones. | [
"Load",
"each",
"path",
"in",
"order",
".",
"Remember",
"paths",
"already",
"loaded",
"and",
"only",
"load",
"new",
"ones",
"."
] | 368b3963ec8abd60aebe0f81915429b45cbf4b5a | https://github.com/nimbusproject/dashi/blob/368b3963ec8abd60aebe0f81915429b45cbf4b5a/dashi/bootstrap/config.py#L29-L46 | train | 54,646 |
VikParuchuri/percept | percept/conf/base.py | Settings._initialize | def _initialize(self, settings_module):
"""
Initialize the settings from a given settings_module
settings_module - path to settings module
"""
#Get the global settings values and assign them as self attributes
self.settings_list = []
for setting in dir(global_settings):
#Only get upper case settings
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
self.settings_list.append(setting)
#If a settings module was passed in, import it, and grab settings from it
#Overwrite global settings with theses
if settings_module is not None:
self.SETTINGS_MODULE = settings_module
#Try to import the settings module
try:
mod = import_module(self.SETTINGS_MODULE)
except ImportError:
error_message = "Could not import settings at {0}".format(self.SETTINGS_MODULE)
log.exception(error_message)
raise ImportError(error_message)
#Grab uppercased settings as set them as self attrs
for setting in dir(mod):
if setting == setting.upper():
if setting == "INSTALLED_APPS":
self.INSTALLED_APPS += getattr(mod, setting)
else:
setattr(self, setting, getattr(mod, setting))
self.settings_list.append(setting)
#If PATH_SETTINGS is in the settings file, extend the system path to include it
if hasattr(self, "PATH_SETTINGS"):
for path in self.PATH_SETTINGS:
sys.path.extend(getattr(self,path))
self.settings_list = list(set(self.settings_list)) | python | def _initialize(self, settings_module):
"""
Initialize the settings from a given settings_module
settings_module - path to settings module
"""
#Get the global settings values and assign them as self attributes
self.settings_list = []
for setting in dir(global_settings):
#Only get upper case settings
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
self.settings_list.append(setting)
#If a settings module was passed in, import it, and grab settings from it
#Overwrite global settings with theses
if settings_module is not None:
self.SETTINGS_MODULE = settings_module
#Try to import the settings module
try:
mod = import_module(self.SETTINGS_MODULE)
except ImportError:
error_message = "Could not import settings at {0}".format(self.SETTINGS_MODULE)
log.exception(error_message)
raise ImportError(error_message)
#Grab uppercased settings as set them as self attrs
for setting in dir(mod):
if setting == setting.upper():
if setting == "INSTALLED_APPS":
self.INSTALLED_APPS += getattr(mod, setting)
else:
setattr(self, setting, getattr(mod, setting))
self.settings_list.append(setting)
#If PATH_SETTINGS is in the settings file, extend the system path to include it
if hasattr(self, "PATH_SETTINGS"):
for path in self.PATH_SETTINGS:
sys.path.extend(getattr(self,path))
self.settings_list = list(set(self.settings_list)) | [
"def",
"_initialize",
"(",
"self",
",",
"settings_module",
")",
":",
"#Get the global settings values and assign them as self attributes",
"self",
".",
"settings_list",
"=",
"[",
"]",
"for",
"setting",
"in",
"dir",
"(",
"global_settings",
")",
":",
"#Only get upper case... | Initialize the settings from a given settings_module
settings_module - path to settings module | [
"Initialize",
"the",
"settings",
"from",
"a",
"given",
"settings_module",
"settings_module",
"-",
"path",
"to",
"settings",
"module"
] | 90304ba82053e2a9ad2bacaab3479403d3923bcf | https://github.com/VikParuchuri/percept/blob/90304ba82053e2a9ad2bacaab3479403d3923bcf/percept/conf/base.py#L20-L60 | train | 54,647 |
VikParuchuri/percept | percept/conf/base.py | Settings._setup | def _setup(self):
"""
Perform initial setup of the settings class, such as getting the settings module and setting the settings
"""
settings_module = None
#Get the settings module from the environment variables
try:
settings_module = os.environ[global_settings.MODULE_VARIABLE]
except KeyError:
error_message = "Settings not properly configured. Cannot find the environment variable {0}".format(global_settings.MODULE_VARIABLE)
log.exception(error_message)
self._initialize(settings_module)
self._configure_logging() | python | def _setup(self):
"""
Perform initial setup of the settings class, such as getting the settings module and setting the settings
"""
settings_module = None
#Get the settings module from the environment variables
try:
settings_module = os.environ[global_settings.MODULE_VARIABLE]
except KeyError:
error_message = "Settings not properly configured. Cannot find the environment variable {0}".format(global_settings.MODULE_VARIABLE)
log.exception(error_message)
self._initialize(settings_module)
self._configure_logging() | [
"def",
"_setup",
"(",
"self",
")",
":",
"settings_module",
"=",
"None",
"#Get the settings module from the environment variables",
"try",
":",
"settings_module",
"=",
"os",
".",
"environ",
"[",
"global_settings",
".",
"MODULE_VARIABLE",
"]",
"except",
"KeyError",
":",... | Perform initial setup of the settings class, such as getting the settings module and setting the settings | [
"Perform",
"initial",
"setup",
"of",
"the",
"settings",
"class",
"such",
"as",
"getting",
"the",
"settings",
"module",
"and",
"setting",
"the",
"settings"
] | 90304ba82053e2a9ad2bacaab3479403d3923bcf | https://github.com/VikParuchuri/percept/blob/90304ba82053e2a9ad2bacaab3479403d3923bcf/percept/conf/base.py#L62-L75 | train | 54,648 |
VikParuchuri/percept | percept/conf/base.py | Settings._configure_logging | def _configure_logging(self):
"""
Setting up logging from logging config in settings
"""
if not self.LOGGING_CONFIG:
#Fallback to default logging in global settings if needed
dictConfig(self.DEFAULT_LOGGING)
else:
dictConfig(self.LOGGING_CONFIG) | python | def _configure_logging(self):
"""
Setting up logging from logging config in settings
"""
if not self.LOGGING_CONFIG:
#Fallback to default logging in global settings if needed
dictConfig(self.DEFAULT_LOGGING)
else:
dictConfig(self.LOGGING_CONFIG) | [
"def",
"_configure_logging",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"LOGGING_CONFIG",
":",
"#Fallback to default logging in global settings if needed",
"dictConfig",
"(",
"self",
".",
"DEFAULT_LOGGING",
")",
"else",
":",
"dictConfig",
"(",
"self",
".",
"LO... | Setting up logging from logging config in settings | [
"Setting",
"up",
"logging",
"from",
"logging",
"config",
"in",
"settings"
] | 90304ba82053e2a9ad2bacaab3479403d3923bcf | https://github.com/VikParuchuri/percept/blob/90304ba82053e2a9ad2bacaab3479403d3923bcf/percept/conf/base.py#L91-L99 | train | 54,649 |
frascoweb/frasco | frasco/actions/context.py | ensure_context | def ensure_context(**vars):
"""Ensures that a context is in the stack, creates one otherwise.
"""
ctx = _context_stack.top
stacked = False
if not ctx:
ctx = Context()
stacked = True
_context_stack.push(ctx)
ctx.update(vars)
try:
yield ctx
finally:
if stacked:
_context_stack.pop() | python | def ensure_context(**vars):
"""Ensures that a context is in the stack, creates one otherwise.
"""
ctx = _context_stack.top
stacked = False
if not ctx:
ctx = Context()
stacked = True
_context_stack.push(ctx)
ctx.update(vars)
try:
yield ctx
finally:
if stacked:
_context_stack.pop() | [
"def",
"ensure_context",
"(",
"*",
"*",
"vars",
")",
":",
"ctx",
"=",
"_context_stack",
".",
"top",
"stacked",
"=",
"False",
"if",
"not",
"ctx",
":",
"ctx",
"=",
"Context",
"(",
")",
"stacked",
"=",
"True",
"_context_stack",
".",
"push",
"(",
"ctx",
... | Ensures that a context is in the stack, creates one otherwise. | [
"Ensures",
"that",
"a",
"context",
"is",
"in",
"the",
"stack",
"creates",
"one",
"otherwise",
"."
] | ea519d69dd5ca6deaf3650175692ee4a1a02518f | https://github.com/frascoweb/frasco/blob/ea519d69dd5ca6deaf3650175692ee4a1a02518f/frasco/actions/context.py#L25-L39 | train | 54,650 |
frascoweb/frasco | frasco/actions/context.py | request_context | def request_context(app, request):
"""Creates a Context instance from the given request object
"""
vars = {}
if request.view_args is not None:
vars.update(request.view_args)
vars.update({
"request": request,
"GET": AttrDict(request.args.to_dict()),
"POST" : AttrDict(request.form.to_dict()),
"app": app,
"config": app.config,
"session": session,
"g": g,
"now": datetime.datetime.now,
"utcnow": datetime.datetime.utcnow,
"today": datetime.date.today})
context = Context(vars)
context.vars["current_context"] = context
return context | python | def request_context(app, request):
"""Creates a Context instance from the given request object
"""
vars = {}
if request.view_args is not None:
vars.update(request.view_args)
vars.update({
"request": request,
"GET": AttrDict(request.args.to_dict()),
"POST" : AttrDict(request.form.to_dict()),
"app": app,
"config": app.config,
"session": session,
"g": g,
"now": datetime.datetime.now,
"utcnow": datetime.datetime.utcnow,
"today": datetime.date.today})
context = Context(vars)
context.vars["current_context"] = context
return context | [
"def",
"request_context",
"(",
"app",
",",
"request",
")",
":",
"vars",
"=",
"{",
"}",
"if",
"request",
".",
"view_args",
"is",
"not",
"None",
":",
"vars",
".",
"update",
"(",
"request",
".",
"view_args",
")",
"vars",
".",
"update",
"(",
"{",
"\"requ... | Creates a Context instance from the given request object | [
"Creates",
"a",
"Context",
"instance",
"from",
"the",
"given",
"request",
"object"
] | ea519d69dd5ca6deaf3650175692ee4a1a02518f | https://github.com/frascoweb/frasco/blob/ea519d69dd5ca6deaf3650175692ee4a1a02518f/frasco/actions/context.py#L111-L130 | train | 54,651 |
frascoweb/frasco | frasco/actions/context.py | Context.clone | def clone(self, **override_vars):
"""Creates a copy of this context"""
c = Context(self.vars, self.data)
c.executed_actions = set(self.executed_actions)
c.vars.update(override_vars)
return c | python | def clone(self, **override_vars):
"""Creates a copy of this context"""
c = Context(self.vars, self.data)
c.executed_actions = set(self.executed_actions)
c.vars.update(override_vars)
return c | [
"def",
"clone",
"(",
"self",
",",
"*",
"*",
"override_vars",
")",
":",
"c",
"=",
"Context",
"(",
"self",
".",
"vars",
",",
"self",
".",
"data",
")",
"c",
".",
"executed_actions",
"=",
"set",
"(",
"self",
".",
"executed_actions",
")",
"c",
".",
"var... | Creates a copy of this context | [
"Creates",
"a",
"copy",
"of",
"this",
"context"
] | ea519d69dd5ca6deaf3650175692ee4a1a02518f | https://github.com/frascoweb/frasco/blob/ea519d69dd5ca6deaf3650175692ee4a1a02518f/frasco/actions/context.py#L86-L91 | train | 54,652 |
m-weigand/sip_models | lib/sip_models/plot_helper.py | mpl_get_cb_bound_below_plot | def mpl_get_cb_bound_below_plot(ax):
"""
Return the coordinates for a colorbar axes below the provided axes object.
Take into account the changes of the axes due to aspect ratio settings.
Parts of this code are taken from the transforms.py file from matplotlib
Important: Use only AFTER fig.subplots_adjust(...)
Use as:
=======
"""
position = ax.get_position()
figW, figH = ax.get_figure().get_size_inches()
fig_aspect = figH / figW
box_aspect = ax.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect).bounds
ax_size = ax.get_position().bounds
# the colorbar is set to 0.01 width
sizes = [ax_size[0], ax_size[1] - 0.14, pb1[2], 0.03]
return sizes | python | def mpl_get_cb_bound_below_plot(ax):
"""
Return the coordinates for a colorbar axes below the provided axes object.
Take into account the changes of the axes due to aspect ratio settings.
Parts of this code are taken from the transforms.py file from matplotlib
Important: Use only AFTER fig.subplots_adjust(...)
Use as:
=======
"""
position = ax.get_position()
figW, figH = ax.get_figure().get_size_inches()
fig_aspect = figH / figW
box_aspect = ax.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect).bounds
ax_size = ax.get_position().bounds
# the colorbar is set to 0.01 width
sizes = [ax_size[0], ax_size[1] - 0.14, pb1[2], 0.03]
return sizes | [
"def",
"mpl_get_cb_bound_below_plot",
"(",
"ax",
")",
":",
"position",
"=",
"ax",
".",
"get_position",
"(",
")",
"figW",
",",
"figH",
"=",
"ax",
".",
"get_figure",
"(",
")",
".",
"get_size_inches",
"(",
")",
"fig_aspect",
"=",
"figH",
"/",
"figW",
"box_a... | Return the coordinates for a colorbar axes below the provided axes object.
Take into account the changes of the axes due to aspect ratio settings.
Parts of this code are taken from the transforms.py file from matplotlib
Important: Use only AFTER fig.subplots_adjust(...)
Use as:
======= | [
"Return",
"the",
"coordinates",
"for",
"a",
"colorbar",
"axes",
"below",
"the",
"provided",
"axes",
"object",
".",
"Take",
"into",
"account",
"the",
"changes",
"of",
"the",
"axes",
"due",
"to",
"aspect",
"ratio",
"settings",
"."
] | 917da5d956215d9df2bf65b24123ba020e3e17c0 | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/plot_helper.py#L129-L154 | train | 54,653 |
jkeyes/python-docraptor | example/basic_xls.py | main | def main():
"""Generate an XLS with specified content."""
table = """<table>
<thead>
<tr><th>First Name</th><th>Last Name</th></tr>
</thead>
<tbody>
<tr><td>Paul</td><td>McGrath</td></tr>
<tr><td>Liam</td><td>Brady</td></tr>
<tr><td>John</td><td>Giles</td></tr>
</tbody>
</table>"""
docraptor = DocRaptor()
print("Create test_basic.xls")
with open("test_basic.xls", "wb") as pdf_file:
pdf_file.write(
docraptor.create(
{"document_content": table, "document_type": "xls", "test": True}
).content
) | python | def main():
"""Generate an XLS with specified content."""
table = """<table>
<thead>
<tr><th>First Name</th><th>Last Name</th></tr>
</thead>
<tbody>
<tr><td>Paul</td><td>McGrath</td></tr>
<tr><td>Liam</td><td>Brady</td></tr>
<tr><td>John</td><td>Giles</td></tr>
</tbody>
</table>"""
docraptor = DocRaptor()
print("Create test_basic.xls")
with open("test_basic.xls", "wb") as pdf_file:
pdf_file.write(
docraptor.create(
{"document_content": table, "document_type": "xls", "test": True}
).content
) | [
"def",
"main",
"(",
")",
":",
"table",
"=",
"\"\"\"<table>\n <thead>\n <tr><th>First Name</th><th>Last Name</th></tr>\n </thead>\n <tbody>\n <tr><td>Paul</td><td>McGrath</td></tr>\n <tr><td>Liam</td><td>Brady</td></tr>\n <tr><td>John</td><td>Giles</td></tr>\n </tbody>\n </ta... | Generate an XLS with specified content. | [
"Generate",
"an",
"XLS",
"with",
"specified",
"content",
"."
] | 4be5b641f92820539b2c42165fec9251a6603dea | https://github.com/jkeyes/python-docraptor/blob/4be5b641f92820539b2c42165fec9251a6603dea/example/basic_xls.py#L5-L25 | train | 54,654 |
mdickinson/refcycle | refcycle/gc_utils.py | restore_gc_state | def restore_gc_state():
"""
Restore the garbage collector state on leaving the with block.
"""
old_isenabled = gc.isenabled()
old_flags = gc.get_debug()
try:
yield
finally:
gc.set_debug(old_flags)
(gc.enable if old_isenabled else gc.disable)() | python | def restore_gc_state():
"""
Restore the garbage collector state on leaving the with block.
"""
old_isenabled = gc.isenabled()
old_flags = gc.get_debug()
try:
yield
finally:
gc.set_debug(old_flags)
(gc.enable if old_isenabled else gc.disable)() | [
"def",
"restore_gc_state",
"(",
")",
":",
"old_isenabled",
"=",
"gc",
".",
"isenabled",
"(",
")",
"old_flags",
"=",
"gc",
".",
"get_debug",
"(",
")",
"try",
":",
"yield",
"finally",
":",
"gc",
".",
"set_debug",
"(",
"old_flags",
")",
"(",
"gc",
".",
... | Restore the garbage collector state on leaving the with block. | [
"Restore",
"the",
"garbage",
"collector",
"state",
"on",
"leaving",
"the",
"with",
"block",
"."
] | 627fad74c74efc601209c96405f8118cd99b2241 | https://github.com/mdickinson/refcycle/blob/627fad74c74efc601209c96405f8118cd99b2241/refcycle/gc_utils.py#L23-L34 | train | 54,655 |
frascoweb/frasco | frasco/declarative/loaders.py | ViewsLoader.add_view_file_mapping | def add_view_file_mapping(self, pattern, cls):
"""Adds a mapping between a file and a view class.
Pattern can be an extension in the form .EXT or a filename.
"""
if isinstance(pattern, str):
if not pattern.endswith("*"):
_, ext = os.path.splitext(pattern)
self.allowed_extensions.add(ext)
pattern = re.compile("^" + re.escape(pattern).replace("\\*", ".+") + "$", re.I)
self.view_class_files_map.append((pattern, cls)) | python | def add_view_file_mapping(self, pattern, cls):
"""Adds a mapping between a file and a view class.
Pattern can be an extension in the form .EXT or a filename.
"""
if isinstance(pattern, str):
if not pattern.endswith("*"):
_, ext = os.path.splitext(pattern)
self.allowed_extensions.add(ext)
pattern = re.compile("^" + re.escape(pattern).replace("\\*", ".+") + "$", re.I)
self.view_class_files_map.append((pattern, cls)) | [
"def",
"add_view_file_mapping",
"(",
"self",
",",
"pattern",
",",
"cls",
")",
":",
"if",
"isinstance",
"(",
"pattern",
",",
"str",
")",
":",
"if",
"not",
"pattern",
".",
"endswith",
"(",
"\"*\"",
")",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
"... | Adds a mapping between a file and a view class.
Pattern can be an extension in the form .EXT or a filename. | [
"Adds",
"a",
"mapping",
"between",
"a",
"file",
"and",
"a",
"view",
"class",
".",
"Pattern",
"can",
"be",
"an",
"extension",
"in",
"the",
"form",
".",
"EXT",
"or",
"a",
"filename",
"."
] | ea519d69dd5ca6deaf3650175692ee4a1a02518f | https://github.com/frascoweb/frasco/blob/ea519d69dd5ca6deaf3650175692ee4a1a02518f/frasco/declarative/loaders.py#L113-L122 | train | 54,656 |
frascoweb/frasco | frasco/declarative/loaders.py | ViewsLoader.get_file_view_cls | def get_file_view_cls(self, filename):
"""Returns the view class associated to a filename
"""
if filename is None:
return self.default_view_class
for pattern, cls in self.view_class_files_map:
if pattern.match(filename):
return cls
return self.default_view_class | python | def get_file_view_cls(self, filename):
"""Returns the view class associated to a filename
"""
if filename is None:
return self.default_view_class
for pattern, cls in self.view_class_files_map:
if pattern.match(filename):
return cls
return self.default_view_class | [
"def",
"get_file_view_cls",
"(",
"self",
",",
"filename",
")",
":",
"if",
"filename",
"is",
"None",
":",
"return",
"self",
".",
"default_view_class",
"for",
"pattern",
",",
"cls",
"in",
"self",
".",
"view_class_files_map",
":",
"if",
"pattern",
".",
"match",... | Returns the view class associated to a filename | [
"Returns",
"the",
"view",
"class",
"associated",
"to",
"a",
"filename"
] | ea519d69dd5ca6deaf3650175692ee4a1a02518f | https://github.com/frascoweb/frasco/blob/ea519d69dd5ca6deaf3650175692ee4a1a02518f/frasco/declarative/loaders.py#L179-L187 | train | 54,657 |
mdickinson/refcycle | refcycle/i_directed_graph.py | IDirectedGraph.children | def children(self, vertex):
"""
Return the list of immediate children of the given vertex.
"""
return [self.head(edge) for edge in self.out_edges(vertex)] | python | def children(self, vertex):
"""
Return the list of immediate children of the given vertex.
"""
return [self.head(edge) for edge in self.out_edges(vertex)] | [
"def",
"children",
"(",
"self",
",",
"vertex",
")",
":",
"return",
"[",
"self",
".",
"head",
"(",
"edge",
")",
"for",
"edge",
"in",
"self",
".",
"out_edges",
"(",
"vertex",
")",
"]"
] | Return the list of immediate children of the given vertex. | [
"Return",
"the",
"list",
"of",
"immediate",
"children",
"of",
"the",
"given",
"vertex",
"."
] | 627fad74c74efc601209c96405f8118cd99b2241 | https://github.com/mdickinson/refcycle/blob/627fad74c74efc601209c96405f8118cd99b2241/refcycle/i_directed_graph.py#L141-L146 | train | 54,658 |
mdickinson/refcycle | refcycle/i_directed_graph.py | IDirectedGraph.parents | def parents(self, vertex):
"""
Return the list of immediate parents of this vertex.
"""
return [self.tail(edge) for edge in self.in_edges(vertex)] | python | def parents(self, vertex):
"""
Return the list of immediate parents of this vertex.
"""
return [self.tail(edge) for edge in self.in_edges(vertex)] | [
"def",
"parents",
"(",
"self",
",",
"vertex",
")",
":",
"return",
"[",
"self",
".",
"tail",
"(",
"edge",
")",
"for",
"edge",
"in",
"self",
".",
"in_edges",
"(",
"vertex",
")",
"]"
] | Return the list of immediate parents of this vertex. | [
"Return",
"the",
"list",
"of",
"immediate",
"parents",
"of",
"this",
"vertex",
"."
] | 627fad74c74efc601209c96405f8118cd99b2241 | https://github.com/mdickinson/refcycle/blob/627fad74c74efc601209c96405f8118cd99b2241/refcycle/i_directed_graph.py#L148-L153 | train | 54,659 |
mdickinson/refcycle | refcycle/i_directed_graph.py | IDirectedGraph.descendants | def descendants(self, start, generations=None):
"""
Return the subgraph of all nodes reachable
from the given start vertex, including that vertex.
If specified, the optional `generations` argument specifies how
many generations to limit to.
"""
visited = self.vertex_set()
visited.add(start)
to_visit = deque([(start, 0)])
while to_visit:
vertex, depth = to_visit.popleft()
if depth == generations:
continue
for child in self.children(vertex):
if child not in visited:
visited.add(child)
to_visit.append((child, depth+1))
return self.full_subgraph(visited) | python | def descendants(self, start, generations=None):
"""
Return the subgraph of all nodes reachable
from the given start vertex, including that vertex.
If specified, the optional `generations` argument specifies how
many generations to limit to.
"""
visited = self.vertex_set()
visited.add(start)
to_visit = deque([(start, 0)])
while to_visit:
vertex, depth = to_visit.popleft()
if depth == generations:
continue
for child in self.children(vertex):
if child not in visited:
visited.add(child)
to_visit.append((child, depth+1))
return self.full_subgraph(visited) | [
"def",
"descendants",
"(",
"self",
",",
"start",
",",
"generations",
"=",
"None",
")",
":",
"visited",
"=",
"self",
".",
"vertex_set",
"(",
")",
"visited",
".",
"add",
"(",
"start",
")",
"to_visit",
"=",
"deque",
"(",
"[",
"(",
"start",
",",
"0",
"... | Return the subgraph of all nodes reachable
from the given start vertex, including that vertex.
If specified, the optional `generations` argument specifies how
many generations to limit to. | [
"Return",
"the",
"subgraph",
"of",
"all",
"nodes",
"reachable",
"from",
"the",
"given",
"start",
"vertex",
"including",
"that",
"vertex",
"."
] | 627fad74c74efc601209c96405f8118cd99b2241 | https://github.com/mdickinson/refcycle/blob/627fad74c74efc601209c96405f8118cd99b2241/refcycle/i_directed_graph.py#L167-L187 | train | 54,660 |
mdickinson/refcycle | refcycle/i_directed_graph.py | IDirectedGraph.ancestors | def ancestors(self, start, generations=None):
"""
Return the subgraph of all nodes from which the given vertex is
reachable, including that vertex.
If specified, the optional `generations` argument specifies how
many generations to limit to.
"""
visited = self.vertex_set()
visited.add(start)
to_visit = deque([(start, 0)])
while to_visit:
vertex, depth = to_visit.popleft()
if depth == generations:
continue
for parent in self.parents(vertex):
if parent not in visited:
visited.add(parent)
to_visit.append((parent, depth+1))
return self.full_subgraph(visited) | python | def ancestors(self, start, generations=None):
"""
Return the subgraph of all nodes from which the given vertex is
reachable, including that vertex.
If specified, the optional `generations` argument specifies how
many generations to limit to.
"""
visited = self.vertex_set()
visited.add(start)
to_visit = deque([(start, 0)])
while to_visit:
vertex, depth = to_visit.popleft()
if depth == generations:
continue
for parent in self.parents(vertex):
if parent not in visited:
visited.add(parent)
to_visit.append((parent, depth+1))
return self.full_subgraph(visited) | [
"def",
"ancestors",
"(",
"self",
",",
"start",
",",
"generations",
"=",
"None",
")",
":",
"visited",
"=",
"self",
".",
"vertex_set",
"(",
")",
"visited",
".",
"add",
"(",
"start",
")",
"to_visit",
"=",
"deque",
"(",
"[",
"(",
"start",
",",
"0",
")"... | Return the subgraph of all nodes from which the given vertex is
reachable, including that vertex.
If specified, the optional `generations` argument specifies how
many generations to limit to. | [
"Return",
"the",
"subgraph",
"of",
"all",
"nodes",
"from",
"which",
"the",
"given",
"vertex",
"is",
"reachable",
"including",
"that",
"vertex",
"."
] | 627fad74c74efc601209c96405f8118cd99b2241 | https://github.com/mdickinson/refcycle/blob/627fad74c74efc601209c96405f8118cd99b2241/refcycle/i_directed_graph.py#L189-L209 | train | 54,661 |
mdickinson/refcycle | refcycle/i_directed_graph.py | IDirectedGraph._component_graph | def _component_graph(self):
"""
Compute the graph of strongly connected components.
Each strongly connected component is itself represented as a list of
pairs, giving information not only about the vertices belonging to
this strongly connected component, but also the edges leading from
this strongly connected component to other components.
Each pair is of the form ('EDGE', v) or ('VERTEX', v) for some vertex
v. In the first case, that indicates that there's an edge from this
strongly connected component to the given vertex v (which will belong
to another component); in the second, it indicates that v is a member
of this strongly connected component.
Each component will begin with a vertex (the *root* vertex of the
strongly connected component); the following edges are edges from that
vertex.
Algorithm is based on that described in "Path-based depth-first search
for strong and biconnected components" by Harold N. Gabow,
Inf.Process.Lett. 74 (2000) 107--114.
"""
sccs = []
stack = []
boundaries = []
identified = self.vertex_set()
index = self.vertex_dict()
to_do = []
def visit_vertex(v):
index[v] = len(stack)
stack.append(('VERTEX', v))
boundaries.append(index[v])
to_do.append((leave_vertex, v))
to_do.extend((visit_edge, w) for w in self.children(v))
def visit_edge(v):
if v in identified:
stack.append(('EDGE', v))
elif v in index:
while index[v] < boundaries[-1]:
boundaries.pop()
else:
to_do.append((visit_vertex, v))
def leave_vertex(v):
if boundaries[-1] == index[v]:
root = boundaries.pop()
scc = stack[root:]
del stack[root:]
for item_type, w in scc:
if item_type == 'VERTEX':
identified.add(w)
del index[w]
sccs.append(scc)
stack.append(('EDGE', v))
# Visit every vertex of the graph.
for v in self.vertices:
if v not in identified:
to_do.append((visit_vertex, v))
while to_do:
operation, v = to_do.pop()
operation(v)
stack.pop()
return sccs | python | def _component_graph(self):
"""
Compute the graph of strongly connected components.
Each strongly connected component is itself represented as a list of
pairs, giving information not only about the vertices belonging to
this strongly connected component, but also the edges leading from
this strongly connected component to other components.
Each pair is of the form ('EDGE', v) or ('VERTEX', v) for some vertex
v. In the first case, that indicates that there's an edge from this
strongly connected component to the given vertex v (which will belong
to another component); in the second, it indicates that v is a member
of this strongly connected component.
Each component will begin with a vertex (the *root* vertex of the
strongly connected component); the following edges are edges from that
vertex.
Algorithm is based on that described in "Path-based depth-first search
for strong and biconnected components" by Harold N. Gabow,
Inf.Process.Lett. 74 (2000) 107--114.
"""
sccs = []
stack = []
boundaries = []
identified = self.vertex_set()
index = self.vertex_dict()
to_do = []
def visit_vertex(v):
index[v] = len(stack)
stack.append(('VERTEX', v))
boundaries.append(index[v])
to_do.append((leave_vertex, v))
to_do.extend((visit_edge, w) for w in self.children(v))
def visit_edge(v):
if v in identified:
stack.append(('EDGE', v))
elif v in index:
while index[v] < boundaries[-1]:
boundaries.pop()
else:
to_do.append((visit_vertex, v))
def leave_vertex(v):
if boundaries[-1] == index[v]:
root = boundaries.pop()
scc = stack[root:]
del stack[root:]
for item_type, w in scc:
if item_type == 'VERTEX':
identified.add(w)
del index[w]
sccs.append(scc)
stack.append(('EDGE', v))
# Visit every vertex of the graph.
for v in self.vertices:
if v not in identified:
to_do.append((visit_vertex, v))
while to_do:
operation, v = to_do.pop()
operation(v)
stack.pop()
return sccs | [
"def",
"_component_graph",
"(",
"self",
")",
":",
"sccs",
"=",
"[",
"]",
"stack",
"=",
"[",
"]",
"boundaries",
"=",
"[",
"]",
"identified",
"=",
"self",
".",
"vertex_set",
"(",
")",
"index",
"=",
"self",
".",
"vertex_dict",
"(",
")",
"to_do",
"=",
... | Compute the graph of strongly connected components.
Each strongly connected component is itself represented as a list of
pairs, giving information not only about the vertices belonging to
this strongly connected component, but also the edges leading from
this strongly connected component to other components.
Each pair is of the form ('EDGE', v) or ('VERTEX', v) for some vertex
v. In the first case, that indicates that there's an edge from this
strongly connected component to the given vertex v (which will belong
to another component); in the second, it indicates that v is a member
of this strongly connected component.
Each component will begin with a vertex (the *root* vertex of the
strongly connected component); the following edges are edges from that
vertex.
Algorithm is based on that described in "Path-based depth-first search
for strong and biconnected components" by Harold N. Gabow,
Inf.Process.Lett. 74 (2000) 107--114. | [
"Compute",
"the",
"graph",
"of",
"strongly",
"connected",
"components",
"."
] | 627fad74c74efc601209c96405f8118cd99b2241 | https://github.com/mdickinson/refcycle/blob/627fad74c74efc601209c96405f8118cd99b2241/refcycle/i_directed_graph.py#L290-L358 | train | 54,662 |
mdickinson/refcycle | refcycle/i_directed_graph.py | IDirectedGraph.source_components | def source_components(self):
"""
Return the strongly connected components not reachable from any other
component. Any component in the graph is reachable from one of these.
"""
raw_sccs = self._component_graph()
# Construct a dictionary mapping each vertex to the root of its scc.
vertex_to_root = self.vertex_dict()
# And keep track of which SCCs have incoming edges.
non_sources = self.vertex_set()
# Build maps from vertices to roots, and identify the sccs that *are*
# reachable from other components.
for scc in raw_sccs:
root = scc[0][1]
for item_type, w in scc:
if item_type == 'VERTEX':
vertex_to_root[w] = root
elif item_type == 'EDGE':
non_sources.add(vertex_to_root[w])
sccs = []
for raw_scc in raw_sccs:
root = raw_scc[0][1]
if root not in non_sources:
sccs.append([v for vtype, v in raw_scc if vtype == 'VERTEX'])
return [self.full_subgraph(scc) for scc in sccs] | python | def source_components(self):
"""
Return the strongly connected components not reachable from any other
component. Any component in the graph is reachable from one of these.
"""
raw_sccs = self._component_graph()
# Construct a dictionary mapping each vertex to the root of its scc.
vertex_to_root = self.vertex_dict()
# And keep track of which SCCs have incoming edges.
non_sources = self.vertex_set()
# Build maps from vertices to roots, and identify the sccs that *are*
# reachable from other components.
for scc in raw_sccs:
root = scc[0][1]
for item_type, w in scc:
if item_type == 'VERTEX':
vertex_to_root[w] = root
elif item_type == 'EDGE':
non_sources.add(vertex_to_root[w])
sccs = []
for raw_scc in raw_sccs:
root = raw_scc[0][1]
if root not in non_sources:
sccs.append([v for vtype, v in raw_scc if vtype == 'VERTEX'])
return [self.full_subgraph(scc) for scc in sccs] | [
"def",
"source_components",
"(",
"self",
")",
":",
"raw_sccs",
"=",
"self",
".",
"_component_graph",
"(",
")",
"# Construct a dictionary mapping each vertex to the root of its scc.",
"vertex_to_root",
"=",
"self",
".",
"vertex_dict",
"(",
")",
"# And keep track of which SCC... | Return the strongly connected components not reachable from any other
component. Any component in the graph is reachable from one of these. | [
"Return",
"the",
"strongly",
"connected",
"components",
"not",
"reachable",
"from",
"any",
"other",
"component",
".",
"Any",
"component",
"in",
"the",
"graph",
"is",
"reachable",
"from",
"one",
"of",
"these",
"."
] | 627fad74c74efc601209c96405f8118cd99b2241 | https://github.com/mdickinson/refcycle/blob/627fad74c74efc601209c96405f8118cd99b2241/refcycle/i_directed_graph.py#L360-L390 | train | 54,663 |
mdickinson/refcycle | refcycle/i_directed_graph.py | IDirectedGraph.strongly_connected_components | def strongly_connected_components(self):
"""
Return list of strongly connected components of this graph.
Returns a list of subgraphs.
Algorithm is based on that described in "Path-based depth-first search
for strong and biconnected components" by Harold N. Gabow,
Inf.Process.Lett. 74 (2000) 107--114.
"""
raw_sccs = self._component_graph()
sccs = []
for raw_scc in raw_sccs:
sccs.append([v for vtype, v in raw_scc if vtype == 'VERTEX'])
return [self.full_subgraph(scc) for scc in sccs] | python | def strongly_connected_components(self):
"""
Return list of strongly connected components of this graph.
Returns a list of subgraphs.
Algorithm is based on that described in "Path-based depth-first search
for strong and biconnected components" by Harold N. Gabow,
Inf.Process.Lett. 74 (2000) 107--114.
"""
raw_sccs = self._component_graph()
sccs = []
for raw_scc in raw_sccs:
sccs.append([v for vtype, v in raw_scc if vtype == 'VERTEX'])
return [self.full_subgraph(scc) for scc in sccs] | [
"def",
"strongly_connected_components",
"(",
"self",
")",
":",
"raw_sccs",
"=",
"self",
".",
"_component_graph",
"(",
")",
"sccs",
"=",
"[",
"]",
"for",
"raw_scc",
"in",
"raw_sccs",
":",
"sccs",
".",
"append",
"(",
"[",
"v",
"for",
"vtype",
",",
"v",
"... | Return list of strongly connected components of this graph.
Returns a list of subgraphs.
Algorithm is based on that described in "Path-based depth-first search
for strong and biconnected components" by Harold N. Gabow,
Inf.Process.Lett. 74 (2000) 107--114. | [
"Return",
"list",
"of",
"strongly",
"connected",
"components",
"of",
"this",
"graph",
"."
] | 627fad74c74efc601209c96405f8118cd99b2241 | https://github.com/mdickinson/refcycle/blob/627fad74c74efc601209c96405f8118cd99b2241/refcycle/i_directed_graph.py#L392-L409 | train | 54,664 |
presslabs/django-payu-ro | payu/forms.py | PayULiveUpdateForm.signature | def signature(self):
"""
Compute the ORDER_HASH of the request.
The hashable string is composed by getting the values from:
MERCHANT
ORDER_REF
ORDER_DATE
ORDER_PNAME[]
ORDER_PCODE[]
ORDER_PINFO[]
ORDER_PRICE[]
ORDER_QTY[]
ORDER_VAT[]
ORDER_SHIPPING
PRICES_CURRENCY
DISCOUNT
DESTINATION_CITY
DESTINATION_STATE
DESTINATION_COUNTRY
PAY_METHOD
ORDER_PRICE_TYPE[]
SELECTED_INSTALLMENTS_NO
TESTORDER
in this exact order. Next, we need to concatenate their lenghts with
thier values, resulting in a string like:
8PAYUDEMO9789456123192016-10-05 11:12:279CD Player12MobilePhone6Laptop
10PROD_0489110PROD_0740910PROD_0496527Extended Warranty - 5 Years8
Dual SIM1117"Display482.371945.7545230171311220220220103RON2559
Bucuresti9Bucuresti2RO8CCVISAMC5GROSS5GROSS5GROSS4TRUE
Using this string and the MERCHANT_KEY, we compute the HMAC.
"""
hashable_fields = ['MERCHANT', 'ORDER_REF', 'ORDER_DATE',
'ORDER_SHIPPING', 'PRICES_CURRENCY', 'DISCOUNT',
'DESTINATION_CITY', 'DESTINATION_STATE',
'DESTINATION_COUNTRY', 'PAY_METHOD',
'SELECTED_INSTALLMENTS_NO', 'TESTORDER']
result = text_type()
# We need this hack since payU is not consistent
# with the order of fields in hash string
suffix = text_type()
for field in self:
if field.name == 'ORDER_HASH':
continue
field_value = field.value()
if field.name in hashable_fields and field_value:
encoded_value = text_type('{length}{value}').format(
length=len(text_type(field_value).encode('utf-8')), value=field_value
)
if field.name == 'TESTORDER' or \
field.name == 'SELECTED_INSTALLMENTS_NO':
suffix += encoded_value
else:
result += encoded_value
if field.name == 'ORDER':
for detail in PAYU_ORDER_DETAILS:
if any([detail in order and order[detail]
for order in field_value]):
for order in field_value:
value = order.get(detail, '')
item = text_type('{length}{value}').format(
length=len(text_type(value).encode('utf-8')), value=value
)
if detail == 'PRICE_TYPE':
suffix += item
else:
result += item
result += suffix
result = result.encode('utf-8')
return hmac.new(PAYU_MERCHANT_KEY, result).hexdigest() | python | def signature(self):
"""
Compute the ORDER_HASH of the request.
The hashable string is composed by getting the values from:
MERCHANT
ORDER_REF
ORDER_DATE
ORDER_PNAME[]
ORDER_PCODE[]
ORDER_PINFO[]
ORDER_PRICE[]
ORDER_QTY[]
ORDER_VAT[]
ORDER_SHIPPING
PRICES_CURRENCY
DISCOUNT
DESTINATION_CITY
DESTINATION_STATE
DESTINATION_COUNTRY
PAY_METHOD
ORDER_PRICE_TYPE[]
SELECTED_INSTALLMENTS_NO
TESTORDER
in this exact order. Next, we need to concatenate their lenghts with
thier values, resulting in a string like:
8PAYUDEMO9789456123192016-10-05 11:12:279CD Player12MobilePhone6Laptop
10PROD_0489110PROD_0740910PROD_0496527Extended Warranty - 5 Years8
Dual SIM1117"Display482.371945.7545230171311220220220103RON2559
Bucuresti9Bucuresti2RO8CCVISAMC5GROSS5GROSS5GROSS4TRUE
Using this string and the MERCHANT_KEY, we compute the HMAC.
"""
hashable_fields = ['MERCHANT', 'ORDER_REF', 'ORDER_DATE',
'ORDER_SHIPPING', 'PRICES_CURRENCY', 'DISCOUNT',
'DESTINATION_CITY', 'DESTINATION_STATE',
'DESTINATION_COUNTRY', 'PAY_METHOD',
'SELECTED_INSTALLMENTS_NO', 'TESTORDER']
result = text_type()
# We need this hack since payU is not consistent
# with the order of fields in hash string
suffix = text_type()
for field in self:
if field.name == 'ORDER_HASH':
continue
field_value = field.value()
if field.name in hashable_fields and field_value:
encoded_value = text_type('{length}{value}').format(
length=len(text_type(field_value).encode('utf-8')), value=field_value
)
if field.name == 'TESTORDER' or \
field.name == 'SELECTED_INSTALLMENTS_NO':
suffix += encoded_value
else:
result += encoded_value
if field.name == 'ORDER':
for detail in PAYU_ORDER_DETAILS:
if any([detail in order and order[detail]
for order in field_value]):
for order in field_value:
value = order.get(detail, '')
item = text_type('{length}{value}').format(
length=len(text_type(value).encode('utf-8')), value=value
)
if detail == 'PRICE_TYPE':
suffix += item
else:
result += item
result += suffix
result = result.encode('utf-8')
return hmac.new(PAYU_MERCHANT_KEY, result).hexdigest() | [
"def",
"signature",
"(",
"self",
")",
":",
"hashable_fields",
"=",
"[",
"'MERCHANT'",
",",
"'ORDER_REF'",
",",
"'ORDER_DATE'",
",",
"'ORDER_SHIPPING'",
",",
"'PRICES_CURRENCY'",
",",
"'DISCOUNT'",
",",
"'DESTINATION_CITY'",
",",
"'DESTINATION_STATE'",
",",
"'DESTINA... | Compute the ORDER_HASH of the request.
The hashable string is composed by getting the values from:
MERCHANT
ORDER_REF
ORDER_DATE
ORDER_PNAME[]
ORDER_PCODE[]
ORDER_PINFO[]
ORDER_PRICE[]
ORDER_QTY[]
ORDER_VAT[]
ORDER_SHIPPING
PRICES_CURRENCY
DISCOUNT
DESTINATION_CITY
DESTINATION_STATE
DESTINATION_COUNTRY
PAY_METHOD
ORDER_PRICE_TYPE[]
SELECTED_INSTALLMENTS_NO
TESTORDER
in this exact order. Next, we need to concatenate their lenghts with
thier values, resulting in a string like:
8PAYUDEMO9789456123192016-10-05 11:12:279CD Player12MobilePhone6Laptop
10PROD_0489110PROD_0740910PROD_0496527Extended Warranty - 5 Years8
Dual SIM1117"Display482.371945.7545230171311220220220103RON2559
Bucuresti9Bucuresti2RO8CCVISAMC5GROSS5GROSS5GROSS4TRUE
Using this string and the MERCHANT_KEY, we compute the HMAC. | [
"Compute",
"the",
"ORDER_HASH",
"of",
"the",
"request",
"."
] | fda9432c57cee7a78789873b6e6f439d912e4c7b | https://github.com/presslabs/django-payu-ro/blob/fda9432c57cee7a78789873b6e6f439d912e4c7b/payu/forms.py#L143-L224 | train | 54,665 |
presslabs/django-payu-ro | payu/forms.py | PayULiveUpdateForm._prepare_orders | def _prepare_orders(self, orders):
"""
Each order needs to have all it's details filled with default value,
or None, in case those are not already filled.
"""
for detail in PAYU_ORDER_DETAILS:
if not any([detail in order for order in orders]):
for order in orders:
order[detail] = PAYU_ORDER_DETAILS_DEFAULTS.get(detail, None)
return orders | python | def _prepare_orders(self, orders):
"""
Each order needs to have all it's details filled with default value,
or None, in case those are not already filled.
"""
for detail in PAYU_ORDER_DETAILS:
if not any([detail in order for order in orders]):
for order in orders:
order[detail] = PAYU_ORDER_DETAILS_DEFAULTS.get(detail, None)
return orders | [
"def",
"_prepare_orders",
"(",
"self",
",",
"orders",
")",
":",
"for",
"detail",
"in",
"PAYU_ORDER_DETAILS",
":",
"if",
"not",
"any",
"(",
"[",
"detail",
"in",
"order",
"for",
"order",
"in",
"orders",
"]",
")",
":",
"for",
"order",
"in",
"orders",
":",... | Each order needs to have all it's details filled with default value,
or None, in case those are not already filled. | [
"Each",
"order",
"needs",
"to",
"have",
"all",
"it",
"s",
"details",
"filled",
"with",
"default",
"value",
"or",
"None",
"in",
"case",
"those",
"are",
"not",
"already",
"filled",
"."
] | fda9432c57cee7a78789873b6e6f439d912e4c7b | https://github.com/presslabs/django-payu-ro/blob/fda9432c57cee7a78789873b6e6f439d912e4c7b/payu/forms.py#L226-L237 | train | 54,666 |
WhyNotHugo/django-renderpdf | django_renderpdf/helpers.py | staticfiles_url_fetcher | def staticfiles_url_fetcher(url):
"""
Returns the file matching url.
This method will handle any URL resources that rendering HTML requires
(eg: images pointed my ``img`` tags, stylesheets, etc).
The default behaviour will fetch any http(s) files normally, and will
also attempt to resolve staticfiles internally (this should mostly
affect development scenarios, but also works if static files are served
under a relative url).
Returns a dictionary with two entries: ``string``, which is the
resources data as a string and ``mime_type``, which is the identified
mime type for the resource.
"""
if url.startswith('/'):
base_url = staticfiles_storage.base_url
filename = url.replace(base_url, '', 1)
path = finders.find(filename)
if path:
# This should match most cases. Manifest static files with relative
# URLs will only be picked up in DEBUG mode here.
with open(path, 'rb') as f:
data = f.read()
else:
# This should just match things like Manifest static files with
# relative URLs. While this code path will expect `collectstatic`
# to have run, it should only be reached on if DEBUG = False.
# XXX: Only Django >= 2.0 supports using this as a context manager:
f = staticfiles_storage.open(filename)
data = f.read()
f.close()
return {
'string': data,
'mime_type': mimetypes.guess_type(url)[0],
}
else:
return default_url_fetcher(url) | python | def staticfiles_url_fetcher(url):
"""
Returns the file matching url.
This method will handle any URL resources that rendering HTML requires
(eg: images pointed my ``img`` tags, stylesheets, etc).
The default behaviour will fetch any http(s) files normally, and will
also attempt to resolve staticfiles internally (this should mostly
affect development scenarios, but also works if static files are served
under a relative url).
Returns a dictionary with two entries: ``string``, which is the
resources data as a string and ``mime_type``, which is the identified
mime type for the resource.
"""
if url.startswith('/'):
base_url = staticfiles_storage.base_url
filename = url.replace(base_url, '', 1)
path = finders.find(filename)
if path:
# This should match most cases. Manifest static files with relative
# URLs will only be picked up in DEBUG mode here.
with open(path, 'rb') as f:
data = f.read()
else:
# This should just match things like Manifest static files with
# relative URLs. While this code path will expect `collectstatic`
# to have run, it should only be reached on if DEBUG = False.
# XXX: Only Django >= 2.0 supports using this as a context manager:
f = staticfiles_storage.open(filename)
data = f.read()
f.close()
return {
'string': data,
'mime_type': mimetypes.guess_type(url)[0],
}
else:
return default_url_fetcher(url) | [
"def",
"staticfiles_url_fetcher",
"(",
"url",
")",
":",
"if",
"url",
".",
"startswith",
"(",
"'/'",
")",
":",
"base_url",
"=",
"staticfiles_storage",
".",
"base_url",
"filename",
"=",
"url",
".",
"replace",
"(",
"base_url",
",",
"''",
",",
"1",
")",
"pat... | Returns the file matching url.
This method will handle any URL resources that rendering HTML requires
(eg: images pointed my ``img`` tags, stylesheets, etc).
The default behaviour will fetch any http(s) files normally, and will
also attempt to resolve staticfiles internally (this should mostly
affect development scenarios, but also works if static files are served
under a relative url).
Returns a dictionary with two entries: ``string``, which is the
resources data as a string and ``mime_type``, which is the identified
mime type for the resource. | [
"Returns",
"the",
"file",
"matching",
"url",
"."
] | 56de11326e61d317b5eb08c340790ef9955778e3 | https://github.com/WhyNotHugo/django-renderpdf/blob/56de11326e61d317b5eb08c340790ef9955778e3/django_renderpdf/helpers.py#L9-L50 | train | 54,667 |
WhyNotHugo/django-renderpdf | django_renderpdf/helpers.py | render_pdf | def render_pdf(
template,
file_,
url_fetcher=staticfiles_url_fetcher,
context=None,
):
"""
Writes the PDF data into ``file_``. Note that ``file_`` can actually be a
Django Response object as well.
This function may be used as a helper that can be used to save a PDF file
to a file (or anything else outside of a request/response cycle), eg::
:param str html: A rendered HTML.
:param file file_: A file like object (or a Response) where to output
the rendered PDF.
"""
context = context or {}
html = get_template(template).render(context)
HTML(
string=html,
base_url='not-used://',
url_fetcher=url_fetcher,
).write_pdf(
target=file_,
) | python | def render_pdf(
template,
file_,
url_fetcher=staticfiles_url_fetcher,
context=None,
):
"""
Writes the PDF data into ``file_``. Note that ``file_`` can actually be a
Django Response object as well.
This function may be used as a helper that can be used to save a PDF file
to a file (or anything else outside of a request/response cycle), eg::
:param str html: A rendered HTML.
:param file file_: A file like object (or a Response) where to output
the rendered PDF.
"""
context = context or {}
html = get_template(template).render(context)
HTML(
string=html,
base_url='not-used://',
url_fetcher=url_fetcher,
).write_pdf(
target=file_,
) | [
"def",
"render_pdf",
"(",
"template",
",",
"file_",
",",
"url_fetcher",
"=",
"staticfiles_url_fetcher",
",",
"context",
"=",
"None",
",",
")",
":",
"context",
"=",
"context",
"or",
"{",
"}",
"html",
"=",
"get_template",
"(",
"template",
")",
".",
"render",... | Writes the PDF data into ``file_``. Note that ``file_`` can actually be a
Django Response object as well.
This function may be used as a helper that can be used to save a PDF file
to a file (or anything else outside of a request/response cycle), eg::
:param str html: A rendered HTML.
:param file file_: A file like object (or a Response) where to output
the rendered PDF. | [
"Writes",
"the",
"PDF",
"data",
"into",
"file_",
".",
"Note",
"that",
"file_",
"can",
"actually",
"be",
"a",
"Django",
"Response",
"object",
"as",
"well",
"."
] | 56de11326e61d317b5eb08c340790ef9955778e3 | https://github.com/WhyNotHugo/django-renderpdf/blob/56de11326e61d317b5eb08c340790ef9955778e3/django_renderpdf/helpers.py#L53-L79 | train | 54,668 |
kcallin/mqtt-codec | mqtt_codec/io.py | encode_bytes | def encode_bytes(src_buf, dst_file):
"""Encode a buffer length followed by the bytes of the buffer
itself.
Parameters
----------
src_buf: bytes
Source bytes to be encoded. Function asserts that
0 <= len(src_buf) <= 2**16-1.
dst_file: file
File-like object with write method.
Returns
-------
int
Number of bytes written to `dst_file`.
"""
if not isinstance(src_buf, bytes):
raise TypeError('src_buf must by bytes.')
len_src_buf = len(src_buf)
assert 0 <= len_src_buf <= 2**16-1
num_written_bytes = len_src_buf + 2
len_buf = FIELD_U16.pack(len_src_buf)
dst_file.write(len_buf)
dst_file.write(src_buf)
return num_written_bytes | python | def encode_bytes(src_buf, dst_file):
"""Encode a buffer length followed by the bytes of the buffer
itself.
Parameters
----------
src_buf: bytes
Source bytes to be encoded. Function asserts that
0 <= len(src_buf) <= 2**16-1.
dst_file: file
File-like object with write method.
Returns
-------
int
Number of bytes written to `dst_file`.
"""
if not isinstance(src_buf, bytes):
raise TypeError('src_buf must by bytes.')
len_src_buf = len(src_buf)
assert 0 <= len_src_buf <= 2**16-1
num_written_bytes = len_src_buf + 2
len_buf = FIELD_U16.pack(len_src_buf)
dst_file.write(len_buf)
dst_file.write(src_buf)
return num_written_bytes | [
"def",
"encode_bytes",
"(",
"src_buf",
",",
"dst_file",
")",
":",
"if",
"not",
"isinstance",
"(",
"src_buf",
",",
"bytes",
")",
":",
"raise",
"TypeError",
"(",
"'src_buf must by bytes.'",
")",
"len_src_buf",
"=",
"len",
"(",
"src_buf",
")",
"assert",
"0",
... | Encode a buffer length followed by the bytes of the buffer
itself.
Parameters
----------
src_buf: bytes
Source bytes to be encoded. Function asserts that
0 <= len(src_buf) <= 2**16-1.
dst_file: file
File-like object with write method.
Returns
-------
int
Number of bytes written to `dst_file`. | [
"Encode",
"a",
"buffer",
"length",
"followed",
"by",
"the",
"bytes",
"of",
"the",
"buffer",
"itself",
"."
] | 0f754250cc3f44f4376777e7e8b3676c5a4d413a | https://github.com/kcallin/mqtt-codec/blob/0f754250cc3f44f4376777e7e8b3676c5a4d413a/mqtt_codec/io.py#L14-L42 | train | 54,669 |
kcallin/mqtt-codec | mqtt_codec/io.py | decode_bytes | def decode_bytes(f):
"""Decode a buffer length from a 2-byte unsigned int then read the
subsequent bytes.
Parameters
----------
f: file
File-like object with read method.
Raises
------
UnderflowDecodeError
When the end of stream is encountered before the end of the
encoded bytes.
Returns
-------
int
Number of bytes read from `f`.
bytes
Value bytes decoded from `f`.
"""
buf = f.read(FIELD_U16.size)
if len(buf) < FIELD_U16.size:
raise UnderflowDecodeError()
(num_bytes,) = FIELD_U16.unpack_from(buf)
num_bytes_consumed = FIELD_U16.size + num_bytes
buf = f.read(num_bytes)
if len(buf) < num_bytes:
raise UnderflowDecodeError()
return num_bytes_consumed, buf | python | def decode_bytes(f):
"""Decode a buffer length from a 2-byte unsigned int then read the
subsequent bytes.
Parameters
----------
f: file
File-like object with read method.
Raises
------
UnderflowDecodeError
When the end of stream is encountered before the end of the
encoded bytes.
Returns
-------
int
Number of bytes read from `f`.
bytes
Value bytes decoded from `f`.
"""
buf = f.read(FIELD_U16.size)
if len(buf) < FIELD_U16.size:
raise UnderflowDecodeError()
(num_bytes,) = FIELD_U16.unpack_from(buf)
num_bytes_consumed = FIELD_U16.size + num_bytes
buf = f.read(num_bytes)
if len(buf) < num_bytes:
raise UnderflowDecodeError()
return num_bytes_consumed, buf | [
"def",
"decode_bytes",
"(",
"f",
")",
":",
"buf",
"=",
"f",
".",
"read",
"(",
"FIELD_U16",
".",
"size",
")",
"if",
"len",
"(",
"buf",
")",
"<",
"FIELD_U16",
".",
"size",
":",
"raise",
"UnderflowDecodeError",
"(",
")",
"(",
"num_bytes",
",",
")",
"=... | Decode a buffer length from a 2-byte unsigned int then read the
subsequent bytes.
Parameters
----------
f: file
File-like object with read method.
Raises
------
UnderflowDecodeError
When the end of stream is encountered before the end of the
encoded bytes.
Returns
-------
int
Number of bytes read from `f`.
bytes
Value bytes decoded from `f`. | [
"Decode",
"a",
"buffer",
"length",
"from",
"a",
"2",
"-",
"byte",
"unsigned",
"int",
"then",
"read",
"the",
"subsequent",
"bytes",
"."
] | 0f754250cc3f44f4376777e7e8b3676c5a4d413a | https://github.com/kcallin/mqtt-codec/blob/0f754250cc3f44f4376777e7e8b3676c5a4d413a/mqtt_codec/io.py#L45-L79 | train | 54,670 |
kcallin/mqtt-codec | mqtt_codec/io.py | encode_utf8 | def encode_utf8(s, f):
"""UTF-8 encodes string `s` to file-like object `f` according to
the MQTT Version 3.1.1 specification in section 1.5.3.
The maximum length for the encoded string is 2**16-1 (65535) bytes.
An assertion error will result if the encoded string is longer.
Parameters
----------
s: str
String to be encoded.
f: file
File-like object.
Returns
-------
int
Number of bytes written to f.
"""
encode = codecs.getencoder('utf8')
encoded_str_bytes, num_encoded_chars = encode(s)
num_encoded_str_bytes = len(encoded_str_bytes)
assert 0 <= num_encoded_str_bytes <= 2**16-1
num_encoded_bytes = num_encoded_str_bytes + 2
f.write(FIELD_U8.pack((num_encoded_str_bytes & 0xff00) >> 8))
f.write(FIELD_U8.pack(num_encoded_str_bytes & 0x00ff))
f.write(encoded_str_bytes)
return num_encoded_bytes | python | def encode_utf8(s, f):
"""UTF-8 encodes string `s` to file-like object `f` according to
the MQTT Version 3.1.1 specification in section 1.5.3.
The maximum length for the encoded string is 2**16-1 (65535) bytes.
An assertion error will result if the encoded string is longer.
Parameters
----------
s: str
String to be encoded.
f: file
File-like object.
Returns
-------
int
Number of bytes written to f.
"""
encode = codecs.getencoder('utf8')
encoded_str_bytes, num_encoded_chars = encode(s)
num_encoded_str_bytes = len(encoded_str_bytes)
assert 0 <= num_encoded_str_bytes <= 2**16-1
num_encoded_bytes = num_encoded_str_bytes + 2
f.write(FIELD_U8.pack((num_encoded_str_bytes & 0xff00) >> 8))
f.write(FIELD_U8.pack(num_encoded_str_bytes & 0x00ff))
f.write(encoded_str_bytes)
return num_encoded_bytes | [
"def",
"encode_utf8",
"(",
"s",
",",
"f",
")",
":",
"encode",
"=",
"codecs",
".",
"getencoder",
"(",
"'utf8'",
")",
"encoded_str_bytes",
",",
"num_encoded_chars",
"=",
"encode",
"(",
"s",
")",
"num_encoded_str_bytes",
"=",
"len",
"(",
"encoded_str_bytes",
")... | UTF-8 encodes string `s` to file-like object `f` according to
the MQTT Version 3.1.1 specification in section 1.5.3.
The maximum length for the encoded string is 2**16-1 (65535) bytes.
An assertion error will result if the encoded string is longer.
Parameters
----------
s: str
String to be encoded.
f: file
File-like object.
Returns
-------
int
Number of bytes written to f. | [
"UTF",
"-",
"8",
"encodes",
"string",
"s",
"to",
"file",
"-",
"like",
"object",
"f",
"according",
"to",
"the",
"MQTT",
"Version",
"3",
".",
"1",
".",
"1",
"specification",
"in",
"section",
"1",
".",
"5",
".",
"3",
"."
] | 0f754250cc3f44f4376777e7e8b3676c5a4d413a | https://github.com/kcallin/mqtt-codec/blob/0f754250cc3f44f4376777e7e8b3676c5a4d413a/mqtt_codec/io.py#L82-L112 | train | 54,671 |
kcallin/mqtt-codec | mqtt_codec/io.py | encode_varint | def encode_varint(v, f):
"""Encode integer `v` to file `f`.
Parameters
----------
v: int
Integer v >= 0.
f: file
Object containing a write method.
Returns
-------
int
Number of bytes written.
"""
assert v >= 0
num_bytes = 0
while True:
b = v % 0x80
v = v // 0x80
if v > 0:
b = b | 0x80
f.write(FIELD_U8.pack(b))
num_bytes += 1
if v == 0:
break
return num_bytes | python | def encode_varint(v, f):
"""Encode integer `v` to file `f`.
Parameters
----------
v: int
Integer v >= 0.
f: file
Object containing a write method.
Returns
-------
int
Number of bytes written.
"""
assert v >= 0
num_bytes = 0
while True:
b = v % 0x80
v = v // 0x80
if v > 0:
b = b | 0x80
f.write(FIELD_U8.pack(b))
num_bytes += 1
if v == 0:
break
return num_bytes | [
"def",
"encode_varint",
"(",
"v",
",",
"f",
")",
":",
"assert",
"v",
">=",
"0",
"num_bytes",
"=",
"0",
"while",
"True",
":",
"b",
"=",
"v",
"%",
"0x80",
"v",
"=",
"v",
"//",
"0x80",
"if",
"v",
">",
"0",
":",
"b",
"=",
"b",
"|",
"0x80",
"f",... | Encode integer `v` to file `f`.
Parameters
----------
v: int
Integer v >= 0.
f: file
Object containing a write method.
Returns
-------
int
Number of bytes written. | [
"Encode",
"integer",
"v",
"to",
"file",
"f",
"."
] | 0f754250cc3f44f4376777e7e8b3676c5a4d413a | https://github.com/kcallin/mqtt-codec/blob/0f754250cc3f44f4376777e7e8b3676c5a4d413a/mqtt_codec/io.py#L161-L192 | train | 54,672 |
kcallin/mqtt-codec | mqtt_codec/io.py | FileDecoder.unpack | def unpack(self, struct):
"""Read as many bytes as are required to extract struct then
unpack and return a tuple of the values.
Raises
------
UnderflowDecodeError
Raised when a read failed to extract enough bytes from the
underlying stream to extract the bytes.
Parameters
----------
struct: struct.Struct
Returns
-------
tuple
Tuple of extracted values.
"""
v = struct.unpack(self.read(struct.size))
return v | python | def unpack(self, struct):
"""Read as many bytes as are required to extract struct then
unpack and return a tuple of the values.
Raises
------
UnderflowDecodeError
Raised when a read failed to extract enough bytes from the
underlying stream to extract the bytes.
Parameters
----------
struct: struct.Struct
Returns
-------
tuple
Tuple of extracted values.
"""
v = struct.unpack(self.read(struct.size))
return v | [
"def",
"unpack",
"(",
"self",
",",
"struct",
")",
":",
"v",
"=",
"struct",
".",
"unpack",
"(",
"self",
".",
"read",
"(",
"struct",
".",
"size",
")",
")",
"return",
"v"
] | Read as many bytes as are required to extract struct then
unpack and return a tuple of the values.
Raises
------
UnderflowDecodeError
Raised when a read failed to extract enough bytes from the
underlying stream to extract the bytes.
Parameters
----------
struct: struct.Struct
Returns
-------
tuple
Tuple of extracted values. | [
"Read",
"as",
"many",
"bytes",
"as",
"are",
"required",
"to",
"extract",
"struct",
"then",
"unpack",
"and",
"return",
"a",
"tuple",
"of",
"the",
"values",
"."
] | 0f754250cc3f44f4376777e7e8b3676c5a4d413a | https://github.com/kcallin/mqtt-codec/blob/0f754250cc3f44f4376777e7e8b3676c5a4d413a/mqtt_codec/io.py#L295-L315 | train | 54,673 |
kcallin/mqtt-codec | mqtt_codec/io.py | FileDecoder.unpack_bytes | def unpack_bytes(self):
"""Unpack a utf-8 string encoded as described in MQTT Version
3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length
followed by a utf-8 encoded string.
Returns
-------
int
Number of bytes consumed
bytes
A bytes object extracted from the underlying stream.
"""
num_bytes_consumed, b = decode_bytes(self.__f)
self.__num_bytes_consumed += num_bytes_consumed
return num_bytes_consumed, b | python | def unpack_bytes(self):
"""Unpack a utf-8 string encoded as described in MQTT Version
3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length
followed by a utf-8 encoded string.
Returns
-------
int
Number of bytes consumed
bytes
A bytes object extracted from the underlying stream.
"""
num_bytes_consumed, b = decode_bytes(self.__f)
self.__num_bytes_consumed += num_bytes_consumed
return num_bytes_consumed, b | [
"def",
"unpack_bytes",
"(",
"self",
")",
":",
"num_bytes_consumed",
",",
"b",
"=",
"decode_bytes",
"(",
"self",
".",
"__f",
")",
"self",
".",
"__num_bytes_consumed",
"+=",
"num_bytes_consumed",
"return",
"num_bytes_consumed",
",",
"b"
] | Unpack a utf-8 string encoded as described in MQTT Version
3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length
followed by a utf-8 encoded string.
Returns
-------
int
Number of bytes consumed
bytes
A bytes object extracted from the underlying stream. | [
"Unpack",
"a",
"utf",
"-",
"8",
"string",
"encoded",
"as",
"described",
"in",
"MQTT",
"Version",
"3",
".",
"1",
".",
"1",
"section",
"1",
".",
"5",
".",
"3",
"line",
"177",
".",
"This",
"is",
"a",
"16",
"-",
"bit",
"unsigned",
"length",
"followed",... | 0f754250cc3f44f4376777e7e8b3676c5a4d413a | https://github.com/kcallin/mqtt-codec/blob/0f754250cc3f44f4376777e7e8b3676c5a4d413a/mqtt_codec/io.py#L341-L355 | train | 54,674 |
kcallin/mqtt-codec | mqtt_codec/io.py | FileDecoder.read | def read(self, num_bytes):
"""Read `num_bytes` and return them.
Parameters
----------
num_bytes : int
Number of bytes to extract from the underlying stream.
Raises
------
UnderflowDecodeError
Raised when a read failed to extract enough bytes from the
underlying stream to extract the bytes.
Returns
-------
bytes
A bytes object extracted from underlying stream.
"""
buf = self.__f.read(num_bytes)
assert len(buf) <= num_bytes
if len(buf) < num_bytes:
raise UnderflowDecodeError()
self.__num_bytes_consumed += num_bytes
return buf | python | def read(self, num_bytes):
"""Read `num_bytes` and return them.
Parameters
----------
num_bytes : int
Number of bytes to extract from the underlying stream.
Raises
------
UnderflowDecodeError
Raised when a read failed to extract enough bytes from the
underlying stream to extract the bytes.
Returns
-------
bytes
A bytes object extracted from underlying stream.
"""
buf = self.__f.read(num_bytes)
assert len(buf) <= num_bytes
if len(buf) < num_bytes:
raise UnderflowDecodeError()
self.__num_bytes_consumed += num_bytes
return buf | [
"def",
"read",
"(",
"self",
",",
"num_bytes",
")",
":",
"buf",
"=",
"self",
".",
"__f",
".",
"read",
"(",
"num_bytes",
")",
"assert",
"len",
"(",
"buf",
")",
"<=",
"num_bytes",
"if",
"len",
"(",
"buf",
")",
"<",
"num_bytes",
":",
"raise",
"Underflo... | Read `num_bytes` and return them.
Parameters
----------
num_bytes : int
Number of bytes to extract from the underlying stream.
Raises
------
UnderflowDecodeError
Raised when a read failed to extract enough bytes from the
underlying stream to extract the bytes.
Returns
-------
bytes
A bytes object extracted from underlying stream. | [
"Read",
"num_bytes",
"and",
"return",
"them",
"."
] | 0f754250cc3f44f4376777e7e8b3676c5a4d413a | https://github.com/kcallin/mqtt-codec/blob/0f754250cc3f44f4376777e7e8b3676c5a4d413a/mqtt_codec/io.py#L388-L413 | train | 54,675 |
ooici/elasticpy | elasticpy/search.py | ElasticSearch.timeout | def timeout(self, value):
'''
Specifies a timeout on the search query
'''
if not self.params:
self.params = dict(timeout=value)
return self
self.params['timeout'] = value
return self | python | def timeout(self, value):
'''
Specifies a timeout on the search query
'''
if not self.params:
self.params = dict(timeout=value)
return self
self.params['timeout'] = value
return self | [
"def",
"timeout",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"self",
".",
"params",
":",
"self",
".",
"params",
"=",
"dict",
"(",
"timeout",
"=",
"value",
")",
"return",
"self",
"self",
".",
"params",
"[",
"'timeout'",
"]",
"=",
"value",
"re... | Specifies a timeout on the search query | [
"Specifies",
"a",
"timeout",
"on",
"the",
"search",
"query"
] | ec221800a80c39e80d8c31667c5b138da39219f2 | https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/search.py#L26-L34 | train | 54,676 |
ooici/elasticpy | elasticpy/search.py | ElasticSearch.filtered | def filtered(self, efilter):
'''
Applies a filter to the search
'''
if not self.params:
self.params={'filter' : efilter}
return self
if not self.params.has_key('filter'):
self.params['filter'] = efilter
return self
self.params['filter'].update(efilter)
return self | python | def filtered(self, efilter):
'''
Applies a filter to the search
'''
if not self.params:
self.params={'filter' : efilter}
return self
if not self.params.has_key('filter'):
self.params['filter'] = efilter
return self
self.params['filter'].update(efilter)
return self | [
"def",
"filtered",
"(",
"self",
",",
"efilter",
")",
":",
"if",
"not",
"self",
".",
"params",
":",
"self",
".",
"params",
"=",
"{",
"'filter'",
":",
"efilter",
"}",
"return",
"self",
"if",
"not",
"self",
".",
"params",
".",
"has_key",
"(",
"'filter'"... | Applies a filter to the search | [
"Applies",
"a",
"filter",
"to",
"the",
"search"
] | ec221800a80c39e80d8c31667c5b138da39219f2 | https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/search.py#L37-L48 | train | 54,677 |
ooici/elasticpy | elasticpy/search.py | ElasticSearch.size | def size(self,value):
'''
The number of hits to return. Defaults to 10
'''
if not self.params:
self.params = dict(size=value)
return self
self.params['size'] = value
return self | python | def size(self,value):
'''
The number of hits to return. Defaults to 10
'''
if not self.params:
self.params = dict(size=value)
return self
self.params['size'] = value
return self | [
"def",
"size",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"self",
".",
"params",
":",
"self",
".",
"params",
"=",
"dict",
"(",
"size",
"=",
"value",
")",
"return",
"self",
"self",
".",
"params",
"[",
"'size'",
"]",
"=",
"value",
"return",
... | The number of hits to return. Defaults to 10 | [
"The",
"number",
"of",
"hits",
"to",
"return",
".",
"Defaults",
"to",
"10"
] | ec221800a80c39e80d8c31667c5b138da39219f2 | https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/search.py#L50-L58 | train | 54,678 |
ooici/elasticpy | elasticpy/search.py | ElasticSearch.from_offset | def from_offset(self, value):
'''
The starting from index of the hits to return. Defaults to 0.
'''
if not self.params:
self.params = dict({'from':value})
return self
self.params['from'] = value
return self | python | def from_offset(self, value):
'''
The starting from index of the hits to return. Defaults to 0.
'''
if not self.params:
self.params = dict({'from':value})
return self
self.params['from'] = value
return self | [
"def",
"from_offset",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"self",
".",
"params",
":",
"self",
".",
"params",
"=",
"dict",
"(",
"{",
"'from'",
":",
"value",
"}",
")",
"return",
"self",
"self",
".",
"params",
"[",
"'from'",
"]",
"=",
... | The starting from index of the hits to return. Defaults to 0. | [
"The",
"starting",
"from",
"index",
"of",
"the",
"hits",
"to",
"return",
".",
"Defaults",
"to",
"0",
"."
] | ec221800a80c39e80d8c31667c5b138da39219f2 | https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/search.py#L60-L68 | train | 54,679 |
ooici/elasticpy | elasticpy/search.py | ElasticSearch.sorted | def sorted(self, fsort):
'''
Allows to add one or more sort on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special field name for _score to sort by score.
'''
if not self.params:
self.params = dict()
self.params['sort'] = fsort
return self | python | def sorted(self, fsort):
'''
Allows to add one or more sort on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special field name for _score to sort by score.
'''
if not self.params:
self.params = dict()
self.params['sort'] = fsort
return self | [
"def",
"sorted",
"(",
"self",
",",
"fsort",
")",
":",
"if",
"not",
"self",
".",
"params",
":",
"self",
".",
"params",
"=",
"dict",
"(",
")",
"self",
".",
"params",
"[",
"'sort'",
"]",
"=",
"fsort",
"return",
"self"
] | Allows to add one or more sort on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special field name for _score to sort by score. | [
"Allows",
"to",
"add",
"one",
"or",
"more",
"sort",
"on",
"specific",
"fields",
".",
"Each",
"sort",
"can",
"be",
"reversed",
"as",
"well",
".",
"The",
"sort",
"is",
"defined",
"on",
"a",
"per",
"field",
"level",
"with",
"special",
"field",
"name",
"fo... | ec221800a80c39e80d8c31667c5b138da39219f2 | https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/search.py#L87-L95 | train | 54,680 |
ooici/elasticpy | elasticpy/search.py | ElasticSearch.doc_create | def doc_create(self,index,itype,value):
'''
Creates a document
'''
request = self.session
url = 'http://%s:%s/%s/%s/' % (self.host, self.port, index, itype)
if self.verbose:
print value
response = request.post(url,value)
return response | python | def doc_create(self,index,itype,value):
'''
Creates a document
'''
request = self.session
url = 'http://%s:%s/%s/%s/' % (self.host, self.port, index, itype)
if self.verbose:
print value
response = request.post(url,value)
return response | [
"def",
"doc_create",
"(",
"self",
",",
"index",
",",
"itype",
",",
"value",
")",
":",
"request",
"=",
"self",
".",
"session",
"url",
"=",
"'http://%s:%s/%s/%s/'",
"%",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"index",
",",
"itype",
")... | Creates a document | [
"Creates",
"a",
"document"
] | ec221800a80c39e80d8c31667c5b138da39219f2 | https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/search.py#L145-L154 | train | 54,681 |
ooici/elasticpy | elasticpy/search.py | ElasticSearch.search_index_simple | def search_index_simple(self,index,key,search_term):
'''
Search the index using a simple key and search_term
@param index Name of the index
@param key Search Key
@param search_term The term to be searched for
'''
request = self.session
url = 'http://%s:%s/%s/_search?q=%s:%s' % (self.host,self.port,index,key,search_term)
response = request.get(url)
return response | python | def search_index_simple(self,index,key,search_term):
'''
Search the index using a simple key and search_term
@param index Name of the index
@param key Search Key
@param search_term The term to be searched for
'''
request = self.session
url = 'http://%s:%s/%s/_search?q=%s:%s' % (self.host,self.port,index,key,search_term)
response = request.get(url)
return response | [
"def",
"search_index_simple",
"(",
"self",
",",
"index",
",",
"key",
",",
"search_term",
")",
":",
"request",
"=",
"self",
".",
"session",
"url",
"=",
"'http://%s:%s/%s/_search?q=%s:%s'",
"%",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"index... | Search the index using a simple key and search_term
@param index Name of the index
@param key Search Key
@param search_term The term to be searched for | [
"Search",
"the",
"index",
"using",
"a",
"simple",
"key",
"and",
"search_term"
] | ec221800a80c39e80d8c31667c5b138da39219f2 | https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/search.py#L157-L167 | train | 54,682 |
ooici/elasticpy | elasticpy/search.py | ElasticSearch.search_index_advanced | def search_index_advanced(self, index, query):
'''
Advanced search query against an entire index
> query = ElasticQuery().query_string(query='imchi')
> search = ElasticSearch()
'''
request = self.session
url = 'http://%s:%s/%s/_search' % (self.host, self.port, index)
if self.params:
content = dict(query=query, **self.params)
else:
content = dict(query=query)
if self.verbose:
print content
response = request.post(url,content)
return response | python | def search_index_advanced(self, index, query):
'''
Advanced search query against an entire index
> query = ElasticQuery().query_string(query='imchi')
> search = ElasticSearch()
'''
request = self.session
url = 'http://%s:%s/%s/_search' % (self.host, self.port, index)
if self.params:
content = dict(query=query, **self.params)
else:
content = dict(query=query)
if self.verbose:
print content
response = request.post(url,content)
return response | [
"def",
"search_index_advanced",
"(",
"self",
",",
"index",
",",
"query",
")",
":",
"request",
"=",
"self",
".",
"session",
"url",
"=",
"'http://%s:%s/%s/_search'",
"%",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"index",
")",
"if",
"self",
... | Advanced search query against an entire index
> query = ElasticQuery().query_string(query='imchi')
> search = ElasticSearch() | [
"Advanced",
"search",
"query",
"against",
"an",
"entire",
"index"
] | ec221800a80c39e80d8c31667c5b138da39219f2 | https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/search.py#L169-L185 | train | 54,683 |
ooici/elasticpy | elasticpy/search.py | ElasticSearch.map | def map(self,index_name, index_type, map_value):
'''
Enable a specific map for an index and type
'''
request = self.session
url = 'http://%s:%s/%s/%s/_mapping' % (self.host, self.port, index_name, index_type)
content = { index_type : { 'properties' : map_value } }
if self.verbose:
print content
response = request.put(url,content)
return response | python | def map(self,index_name, index_type, map_value):
'''
Enable a specific map for an index and type
'''
request = self.session
url = 'http://%s:%s/%s/%s/_mapping' % (self.host, self.port, index_name, index_type)
content = { index_type : { 'properties' : map_value } }
if self.verbose:
print content
response = request.put(url,content)
return response | [
"def",
"map",
"(",
"self",
",",
"index_name",
",",
"index_type",
",",
"map_value",
")",
":",
"request",
"=",
"self",
".",
"session",
"url",
"=",
"'http://%s:%s/%s/%s/_mapping'",
"%",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"index_name",
... | Enable a specific map for an index and type | [
"Enable",
"a",
"specific",
"map",
"for",
"an",
"index",
"and",
"type"
] | ec221800a80c39e80d8c31667c5b138da39219f2 | https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/search.py#L315-L325 | train | 54,684 |
ooici/elasticpy | elasticpy/search.py | ElasticSearch.list_types | def list_types(index_name, host='localhost',port='9200'):
'''
Lists the context types available in an index
'''
return ElasticSearch(host=host, port=port).type_list(index_name) | python | def list_types(index_name, host='localhost',port='9200'):
'''
Lists the context types available in an index
'''
return ElasticSearch(host=host, port=port).type_list(index_name) | [
"def",
"list_types",
"(",
"index_name",
",",
"host",
"=",
"'localhost'",
",",
"port",
"=",
"'9200'",
")",
":",
"return",
"ElasticSearch",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
")",
".",
"type_list",
"(",
"index_name",
")"
] | Lists the context types available in an index | [
"Lists",
"the",
"context",
"types",
"available",
"in",
"an",
"index"
] | ec221800a80c39e80d8c31667c5b138da39219f2 | https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/search.py#L328-L332 | train | 54,685 |
ooici/elasticpy | elasticpy/search.py | ElasticSearch.type_list | def type_list(self, index_name):
'''
List the types available in an index
'''
request = self.session
url = 'http://%s:%s/%s/_mapping' % (self.host, self.port, index_name)
response = request.get(url)
if request.status_code == 200:
return response[index_name].keys()
else:
return response | python | def type_list(self, index_name):
'''
List the types available in an index
'''
request = self.session
url = 'http://%s:%s/%s/_mapping' % (self.host, self.port, index_name)
response = request.get(url)
if request.status_code == 200:
return response[index_name].keys()
else:
return response | [
"def",
"type_list",
"(",
"self",
",",
"index_name",
")",
":",
"request",
"=",
"self",
".",
"session",
"url",
"=",
"'http://%s:%s/%s/_mapping'",
"%",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"index_name",
")",
"response",
"=",
"request",
"... | List the types available in an index | [
"List",
"the",
"types",
"available",
"in",
"an",
"index"
] | ec221800a80c39e80d8c31667c5b138da39219f2 | https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/search.py#L334-L344 | train | 54,686 |
ooici/elasticpy | elasticpy/search.py | ElasticSearch.raw | def raw(self, module, method='GET', data=None):
'''
Submits or requsts raw input
'''
request = self.session
url = 'http://%s:%s/%s' % (self.host, self.port, module)
if self.verbose:
print data
if method=='GET':
response = request.get(url)
elif method=='POST':
response = request.post(url,data)
elif method=='PUT':
response = request.put(url,data)
elif method=='DELETE':
response = request.delete(url)
else:
return {'error' : 'No such request method %s' % method}
return response | python | def raw(self, module, method='GET', data=None):
'''
Submits or requsts raw input
'''
request = self.session
url = 'http://%s:%s/%s' % (self.host, self.port, module)
if self.verbose:
print data
if method=='GET':
response = request.get(url)
elif method=='POST':
response = request.post(url,data)
elif method=='PUT':
response = request.put(url,data)
elif method=='DELETE':
response = request.delete(url)
else:
return {'error' : 'No such request method %s' % method}
return response | [
"def",
"raw",
"(",
"self",
",",
"module",
",",
"method",
"=",
"'GET'",
",",
"data",
"=",
"None",
")",
":",
"request",
"=",
"self",
".",
"session",
"url",
"=",
"'http://%s:%s/%s'",
"%",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"modul... | Submits or requsts raw input | [
"Submits",
"or",
"requsts",
"raw",
"input"
] | ec221800a80c39e80d8c31667c5b138da39219f2 | https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/search.py#L350-L369 | train | 54,687 |
polyledger/lattice | lattice/wallet.py | secp256k1.inverse | def inverse(self, N):
"""
Returns the modular inverse of an integer with respect to the field
characteristic, P.
Use the Extended Euclidean Algorithm:
https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
"""
if N == 0:
return 0
lm, hm = 1, 0
low, high = N % self.P, self.P
while low > 1:
r = high//low
nm, new = hm - lm * r, high - low * r
lm, low, hm, high = nm, new, lm, low
return lm % self.P | python | def inverse(self, N):
"""
Returns the modular inverse of an integer with respect to the field
characteristic, P.
Use the Extended Euclidean Algorithm:
https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
"""
if N == 0:
return 0
lm, hm = 1, 0
low, high = N % self.P, self.P
while low > 1:
r = high//low
nm, new = hm - lm * r, high - low * r
lm, low, hm, high = nm, new, lm, low
return lm % self.P | [
"def",
"inverse",
"(",
"self",
",",
"N",
")",
":",
"if",
"N",
"==",
"0",
":",
"return",
"0",
"lm",
",",
"hm",
"=",
"1",
",",
"0",
"low",
",",
"high",
"=",
"N",
"%",
"self",
".",
"P",
",",
"self",
".",
"P",
"while",
"low",
">",
"1",
":",
... | Returns the modular inverse of an integer with respect to the field
characteristic, P.
Use the Extended Euclidean Algorithm:
https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm | [
"Returns",
"the",
"modular",
"inverse",
"of",
"an",
"integer",
"with",
"respect",
"to",
"the",
"field",
"characteristic",
"P",
"."
] | d68d27c93b1634ee29f5c1a1dbcd67397481323b | https://github.com/polyledger/lattice/blob/d68d27c93b1634ee29f5c1a1dbcd67397481323b/lattice/wallet.py#L47-L64 | train | 54,688 |
polyledger/lattice | lattice/wallet.py | secp256k1.is_on_curve | def is_on_curve(self, point):
"""
Checks whether a point is on the curve.
Args:
point (AffinePoint): Point to be checked.
Returns:
bool: True if point is on the curve, False otherwise.
"""
X, Y = point.X, point.Y
return (
pow(Y, 2, self.P) - pow(X, 3, self.P) - self.a * X - self.b
) % self.P == 0 | python | def is_on_curve(self, point):
"""
Checks whether a point is on the curve.
Args:
point (AffinePoint): Point to be checked.
Returns:
bool: True if point is on the curve, False otherwise.
"""
X, Y = point.X, point.Y
return (
pow(Y, 2, self.P) - pow(X, 3, self.P) - self.a * X - self.b
) % self.P == 0 | [
"def",
"is_on_curve",
"(",
"self",
",",
"point",
")",
":",
"X",
",",
"Y",
"=",
"point",
".",
"X",
",",
"point",
".",
"Y",
"return",
"(",
"pow",
"(",
"Y",
",",
"2",
",",
"self",
".",
"P",
")",
"-",
"pow",
"(",
"X",
",",
"3",
",",
"self",
"... | Checks whether a point is on the curve.
Args:
point (AffinePoint): Point to be checked.
Returns:
bool: True if point is on the curve, False otherwise. | [
"Checks",
"whether",
"a",
"point",
"is",
"on",
"the",
"curve",
"."
] | d68d27c93b1634ee29f5c1a1dbcd67397481323b | https://github.com/polyledger/lattice/blob/d68d27c93b1634ee29f5c1a1dbcd67397481323b/lattice/wallet.py#L66-L79 | train | 54,689 |
polyledger/lattice | lattice/wallet.py | Wallet.generate_private_key | def generate_private_key(self):
"""
Generates a private key based on the password.
SHA-256 is a member of the SHA-2 cryptographic hash functions designed by
the NSA. SHA stands for Secure Hash Algorithm. The password is converted
to bytes and hashed with SHA-256. The binary output is converted to a hex
representation.
Args:
data (str): The data to be hashed with SHA-256.
Returns:
bytes: The hexadecimal representation of the hashed binary data.
"""
random_string = base64.b64encode(os.urandom(4096)).decode('utf-8')
binary_data = bytes(random_string, 'utf-8')
hash_object = hashlib.sha256(binary_data)
message_digest_bin = hash_object.digest()
message_digest_hex = binascii.hexlify(message_digest_bin)
return message_digest_hex | python | def generate_private_key(self):
"""
Generates a private key based on the password.
SHA-256 is a member of the SHA-2 cryptographic hash functions designed by
the NSA. SHA stands for Secure Hash Algorithm. The password is converted
to bytes and hashed with SHA-256. The binary output is converted to a hex
representation.
Args:
data (str): The data to be hashed with SHA-256.
Returns:
bytes: The hexadecimal representation of the hashed binary data.
"""
random_string = base64.b64encode(os.urandom(4096)).decode('utf-8')
binary_data = bytes(random_string, 'utf-8')
hash_object = hashlib.sha256(binary_data)
message_digest_bin = hash_object.digest()
message_digest_hex = binascii.hexlify(message_digest_bin)
return message_digest_hex | [
"def",
"generate_private_key",
"(",
"self",
")",
":",
"random_string",
"=",
"base64",
".",
"b64encode",
"(",
"os",
".",
"urandom",
"(",
"4096",
")",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"binary_data",
"=",
"bytes",
"(",
"random_string",
",",
"'utf-8'",... | Generates a private key based on the password.
SHA-256 is a member of the SHA-2 cryptographic hash functions designed by
the NSA. SHA stands for Secure Hash Algorithm. The password is converted
to bytes and hashed with SHA-256. The binary output is converted to a hex
representation.
Args:
data (str): The data to be hashed with SHA-256.
Returns:
bytes: The hexadecimal representation of the hashed binary data. | [
"Generates",
"a",
"private",
"key",
"based",
"on",
"the",
"password",
"."
] | d68d27c93b1634ee29f5c1a1dbcd67397481323b | https://github.com/polyledger/lattice/blob/d68d27c93b1634ee29f5c1a1dbcd67397481323b/lattice/wallet.py#L93-L113 | train | 54,690 |
polyledger/lattice | lattice/wallet.py | Wallet.generate_public_key | def generate_public_key(self):
"""
Generates a public key from the hex-encoded private key using elliptic
curve cryptography. The private key is multiplied by a predetermined point
on the elliptic curve called the generator point, G, resulting in the
corresponding private key. The generator point is always the same for all
Bitcoin users.
Jacobian coordinates are used to represent the elliptic curve point G.
https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates
The exponentiating by squaring (also known by double-and-add) method is
used for the elliptic curve multiplication that results in the public key.
https://en.wikipedia.org/wiki/Exponentiation_by_squaring
Bitcoin public keys are 65 bytes. The first byte is 0x04, next 32
bytes correspond to the X coordinate, and last 32 bytes correspond
to the Y coordinate. They are typically encoded as 130-length hex
characters.
Args:
private_key (bytes): UTF-8 encoded hexadecimal
Returns:
str: The public key in hexadecimal representation.
"""
private_key = int(self.private_key, 16)
if private_key >= self.N:
raise Exception('Invalid private key.')
G = JacobianPoint(self.Gx, self.Gy, 1)
public_key = G * private_key
x_hex = '{0:0{1}x}'.format(public_key.X, 64)
y_hex = '{0:0{1}x}'.format(public_key.Y, 64)
return '04' + x_hex + y_hex | python | def generate_public_key(self):
"""
Generates a public key from the hex-encoded private key using elliptic
curve cryptography. The private key is multiplied by a predetermined point
on the elliptic curve called the generator point, G, resulting in the
corresponding private key. The generator point is always the same for all
Bitcoin users.
Jacobian coordinates are used to represent the elliptic curve point G.
https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates
The exponentiating by squaring (also known by double-and-add) method is
used for the elliptic curve multiplication that results in the public key.
https://en.wikipedia.org/wiki/Exponentiation_by_squaring
Bitcoin public keys are 65 bytes. The first byte is 0x04, next 32
bytes correspond to the X coordinate, and last 32 bytes correspond
to the Y coordinate. They are typically encoded as 130-length hex
characters.
Args:
private_key (bytes): UTF-8 encoded hexadecimal
Returns:
str: The public key in hexadecimal representation.
"""
private_key = int(self.private_key, 16)
if private_key >= self.N:
raise Exception('Invalid private key.')
G = JacobianPoint(self.Gx, self.Gy, 1)
public_key = G * private_key
x_hex = '{0:0{1}x}'.format(public_key.X, 64)
y_hex = '{0:0{1}x}'.format(public_key.Y, 64)
return '04' + x_hex + y_hex | [
"def",
"generate_public_key",
"(",
"self",
")",
":",
"private_key",
"=",
"int",
"(",
"self",
".",
"private_key",
",",
"16",
")",
"if",
"private_key",
">=",
"self",
".",
"N",
":",
"raise",
"Exception",
"(",
"'Invalid private key.'",
")",
"G",
"=",
"Jacobian... | Generates a public key from the hex-encoded private key using elliptic
curve cryptography. The private key is multiplied by a predetermined point
on the elliptic curve called the generator point, G, resulting in the
corresponding private key. The generator point is always the same for all
Bitcoin users.
Jacobian coordinates are used to represent the elliptic curve point G.
https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates
The exponentiating by squaring (also known by double-and-add) method is
used for the elliptic curve multiplication that results in the public key.
https://en.wikipedia.org/wiki/Exponentiation_by_squaring
Bitcoin public keys are 65 bytes. The first byte is 0x04, next 32
bytes correspond to the X coordinate, and last 32 bytes correspond
to the Y coordinate. They are typically encoded as 130-length hex
characters.
Args:
private_key (bytes): UTF-8 encoded hexadecimal
Returns:
str: The public key in hexadecimal representation. | [
"Generates",
"a",
"public",
"key",
"from",
"the",
"hex",
"-",
"encoded",
"private",
"key",
"using",
"elliptic",
"curve",
"cryptography",
".",
"The",
"private",
"key",
"is",
"multiplied",
"by",
"a",
"predetermined",
"point",
"on",
"the",
"elliptic",
"curve",
... | d68d27c93b1634ee29f5c1a1dbcd67397481323b | https://github.com/polyledger/lattice/blob/d68d27c93b1634ee29f5c1a1dbcd67397481323b/lattice/wallet.py#L115-L149 | train | 54,691 |
polyledger/lattice | lattice/wallet.py | JacobianPoint.to_affine | def to_affine(self):
"""
Converts this point to an affine representation.
Returns:
AffinePoint: The affine reprsentation.
"""
X, Y, Z = self.x, self.y, self.inverse(self.z)
return ((X * Z ** 2) % P, (Y * Z ** 3) % P) | python | def to_affine(self):
"""
Converts this point to an affine representation.
Returns:
AffinePoint: The affine reprsentation.
"""
X, Y, Z = self.x, self.y, self.inverse(self.z)
return ((X * Z ** 2) % P, (Y * Z ** 3) % P) | [
"def",
"to_affine",
"(",
"self",
")",
":",
"X",
",",
"Y",
",",
"Z",
"=",
"self",
".",
"x",
",",
"self",
".",
"y",
",",
"self",
".",
"inverse",
"(",
"self",
".",
"z",
")",
"return",
"(",
"(",
"X",
"*",
"Z",
"**",
"2",
")",
"%",
"P",
",",
... | Converts this point to an affine representation.
Returns:
AffinePoint: The affine reprsentation. | [
"Converts",
"this",
"point",
"to",
"an",
"affine",
"representation",
"."
] | d68d27c93b1634ee29f5c1a1dbcd67397481323b | https://github.com/polyledger/lattice/blob/d68d27c93b1634ee29f5c1a1dbcd67397481323b/lattice/wallet.py#L279-L287 | train | 54,692 |
polyledger/lattice | lattice/wallet.py | AffinePoint.slope | def slope(self, other):
"""
Determines the slope between this point and another point.
Args:
other (AffinePoint): The second point.
Returns:
int: Slope between self and other.
"""
X1, Y1, X2, Y2 = self.X, self.Y, other.X, other.Y
Y3 = Y1 - Y2
X3 = X1 - X2
return (Y3 * self.inverse(X3)) % self.P | python | def slope(self, other):
"""
Determines the slope between this point and another point.
Args:
other (AffinePoint): The second point.
Returns:
int: Slope between self and other.
"""
X1, Y1, X2, Y2 = self.X, self.Y, other.X, other.Y
Y3 = Y1 - Y2
X3 = X1 - X2
return (Y3 * self.inverse(X3)) % self.P | [
"def",
"slope",
"(",
"self",
",",
"other",
")",
":",
"X1",
",",
"Y1",
",",
"X2",
",",
"Y2",
"=",
"self",
".",
"X",
",",
"self",
".",
"Y",
",",
"other",
".",
"X",
",",
"other",
".",
"Y",
"Y3",
"=",
"Y1",
"-",
"Y2",
"X3",
"=",
"X1",
"-",
... | Determines the slope between this point and another point.
Args:
other (AffinePoint): The second point.
Returns:
int: Slope between self and other. | [
"Determines",
"the",
"slope",
"between",
"this",
"point",
"and",
"another",
"point",
"."
] | d68d27c93b1634ee29f5c1a1dbcd67397481323b | https://github.com/polyledger/lattice/blob/d68d27c93b1634ee29f5c1a1dbcd67397481323b/lattice/wallet.py#L378-L391 | train | 54,693 |
polyledger/lattice | lattice/wallet.py | AffinePoint.to_jacobian | def to_jacobian(self):
"""
Converts this point to a Jacobian representation.
Returns:
JacobianPoint: The Jacobian representation.
"""
if not self:
return JacobianPoint(X=0, Y=0, Z=0)
return JacobianPoint(X=self.X, Y=self.Y, Z=1) | python | def to_jacobian(self):
"""
Converts this point to a Jacobian representation.
Returns:
JacobianPoint: The Jacobian representation.
"""
if not self:
return JacobianPoint(X=0, Y=0, Z=0)
return JacobianPoint(X=self.X, Y=self.Y, Z=1) | [
"def",
"to_jacobian",
"(",
"self",
")",
":",
"if",
"not",
"self",
":",
"return",
"JacobianPoint",
"(",
"X",
"=",
"0",
",",
"Y",
"=",
"0",
",",
"Z",
"=",
"0",
")",
"return",
"JacobianPoint",
"(",
"X",
"=",
"self",
".",
"X",
",",
"Y",
"=",
"self"... | Converts this point to a Jacobian representation.
Returns:
JacobianPoint: The Jacobian representation. | [
"Converts",
"this",
"point",
"to",
"a",
"Jacobian",
"representation",
"."
] | d68d27c93b1634ee29f5c1a1dbcd67397481323b | https://github.com/polyledger/lattice/blob/d68d27c93b1634ee29f5c1a1dbcd67397481323b/lattice/wallet.py#L393-L402 | train | 54,694 |
nikcub/floyd | floyd/db/__init__.py | DataStore.import_model | def import_model(self, name, path="floyd.db.models"):
"""imports a model of name from path, returning from local model
cache if it has been previously loaded otherwise importing"""
if name in self._model_cache:
return self._model_cache[name]
try:
model = getattr(__import__(path, None, None, [name]), name)
self._model_cache[name] = model
except ImportError:
return False
return model | python | def import_model(self, name, path="floyd.db.models"):
"""imports a model of name from path, returning from local model
cache if it has been previously loaded otherwise importing"""
if name in self._model_cache:
return self._model_cache[name]
try:
model = getattr(__import__(path, None, None, [name]), name)
self._model_cache[name] = model
except ImportError:
return False
return model | [
"def",
"import_model",
"(",
"self",
",",
"name",
",",
"path",
"=",
"\"floyd.db.models\"",
")",
":",
"if",
"name",
"in",
"self",
".",
"_model_cache",
":",
"return",
"self",
".",
"_model_cache",
"[",
"name",
"]",
"try",
":",
"model",
"=",
"getattr",
"(",
... | imports a model of name from path, returning from local model
cache if it has been previously loaded otherwise importing | [
"imports",
"a",
"model",
"of",
"name",
"from",
"path",
"returning",
"from",
"local",
"model",
"cache",
"if",
"it",
"has",
"been",
"previously",
"loaded",
"otherwise",
"importing"
] | 5772d0047efb11c9ce5f7d234a9da4576ce24edc | https://github.com/nikcub/floyd/blob/5772d0047efb11c9ce5f7d234a9da4576ce24edc/floyd/db/__init__.py#L76-L87 | train | 54,695 |
nikcub/floyd | floyd/db/__init__.py | Model.parse_md | def parse_md(self):
"""Takes a post path and returns a dictionary of variables"""
post_content = _MARKDOWN.convert(self.raw_src)
if hasattr(_MARKDOWN, 'Meta'):
# 'Meta' in _MARKDOWN and _MARKDOWN.Meta:
for key in _MARKDOWN.Meta:
print "\t meta: %s: %s (%s)" % (key, _MARKDOWN.Meta[key][0], type(_MARKDOWN.Meta[key][0]))
if key == 'pubdate':
setattr(self, key, datetime.datetime.fromtimestamp(float(_MARKDOWN.Meta[key][0])))
else:
setattr(self, key, _MARKDOWN.Meta[key][0])
self.content = post_content
self.stub = self.__key__
# set required fields
# @TODO required in schema rather than here
if not hasattr(self, 'pubdate'):
print '\t Notice: setting default pubdate'
setattr(self, 'pubdate', datetime.datetime.now()) | python | def parse_md(self):
"""Takes a post path and returns a dictionary of variables"""
post_content = _MARKDOWN.convert(self.raw_src)
if hasattr(_MARKDOWN, 'Meta'):
# 'Meta' in _MARKDOWN and _MARKDOWN.Meta:
for key in _MARKDOWN.Meta:
print "\t meta: %s: %s (%s)" % (key, _MARKDOWN.Meta[key][0], type(_MARKDOWN.Meta[key][0]))
if key == 'pubdate':
setattr(self, key, datetime.datetime.fromtimestamp(float(_MARKDOWN.Meta[key][0])))
else:
setattr(self, key, _MARKDOWN.Meta[key][0])
self.content = post_content
self.stub = self.__key__
# set required fields
# @TODO required in schema rather than here
if not hasattr(self, 'pubdate'):
print '\t Notice: setting default pubdate'
setattr(self, 'pubdate', datetime.datetime.now()) | [
"def",
"parse_md",
"(",
"self",
")",
":",
"post_content",
"=",
"_MARKDOWN",
".",
"convert",
"(",
"self",
".",
"raw_src",
")",
"if",
"hasattr",
"(",
"_MARKDOWN",
",",
"'Meta'",
")",
":",
"# 'Meta' in _MARKDOWN and _MARKDOWN.Meta:",
"for",
"key",
"in",
"_MARKDOW... | Takes a post path and returns a dictionary of variables | [
"Takes",
"a",
"post",
"path",
"and",
"returns",
"a",
"dictionary",
"of",
"variables"
] | 5772d0047efb11c9ce5f7d234a9da4576ce24edc | https://github.com/nikcub/floyd/blob/5772d0047efb11c9ce5f7d234a9da4576ce24edc/floyd/db/__init__.py#L140-L161 | train | 54,696 |
VikParuchuri/percept | percept/workflows/base.py | BaseWorkflow.execute_train_task_with_dependencies | def execute_train_task_with_dependencies(self, task_cls, **kwargs):
"""
Run the training, as well as any dependencies of the training
task_cls - class of a task
"""
log.info("Task {0}".format(get_task_name(task_cls)))
#Instantiate the task
task_inst = task_cls()
#Grab arguments from the task instance and set them
for arg in task_inst.args:
if arg not in kwargs:
kwargs[arg] = task_inst.args[arg]
#Check for dependencies defined by the task
if hasattr(task_inst, "dependencies"):
deps = task_inst.dependencies
dep_results = []
#Run the dependencies through recursion (in case of dependencies of dependencies, etc)
for dep in deps:
log.info("Dependency {0}".format(get_task_name(dep)))
dep_results.append(self.execute_train_task_with_dependencies(dep.cls, **dep.args))
trained_dependencies = []
#Add executed dependency to trained_dependencies list on the task
for i in xrange(0,len(deps)):
dep = deps[i]
dep_result = dep_results[i]
name = dep.name
namespace = dep.namespace
category = dep.category
trained_dependencies.append(TrainedDependency(category=category, namespace=namespace, name = name, inst = dep))
task_inst.trained_dependencies = trained_dependencies
#Finally, run the task
task_inst.train(**kwargs)
return task_inst | python | def execute_train_task_with_dependencies(self, task_cls, **kwargs):
"""
Run the training, as well as any dependencies of the training
task_cls - class of a task
"""
log.info("Task {0}".format(get_task_name(task_cls)))
#Instantiate the task
task_inst = task_cls()
#Grab arguments from the task instance and set them
for arg in task_inst.args:
if arg not in kwargs:
kwargs[arg] = task_inst.args[arg]
#Check for dependencies defined by the task
if hasattr(task_inst, "dependencies"):
deps = task_inst.dependencies
dep_results = []
#Run the dependencies through recursion (in case of dependencies of dependencies, etc)
for dep in deps:
log.info("Dependency {0}".format(get_task_name(dep)))
dep_results.append(self.execute_train_task_with_dependencies(dep.cls, **dep.args))
trained_dependencies = []
#Add executed dependency to trained_dependencies list on the task
for i in xrange(0,len(deps)):
dep = deps[i]
dep_result = dep_results[i]
name = dep.name
namespace = dep.namespace
category = dep.category
trained_dependencies.append(TrainedDependency(category=category, namespace=namespace, name = name, inst = dep))
task_inst.trained_dependencies = trained_dependencies
#Finally, run the task
task_inst.train(**kwargs)
return task_inst | [
"def",
"execute_train_task_with_dependencies",
"(",
"self",
",",
"task_cls",
",",
"*",
"*",
"kwargs",
")",
":",
"log",
".",
"info",
"(",
"\"Task {0}\"",
".",
"format",
"(",
"get_task_name",
"(",
"task_cls",
")",
")",
")",
"#Instantiate the task",
"task_inst",
... | Run the training, as well as any dependencies of the training
task_cls - class of a task | [
"Run",
"the",
"training",
"as",
"well",
"as",
"any",
"dependencies",
"of",
"the",
"training",
"task_cls",
"-",
"class",
"of",
"a",
"task"
] | 90304ba82053e2a9ad2bacaab3479403d3923bcf | https://github.com/VikParuchuri/percept/blob/90304ba82053e2a9ad2bacaab3479403d3923bcf/percept/workflows/base.py#L54-L86 | train | 54,697 |
VikParuchuri/percept | percept/workflows/base.py | BaseWorkflow.execute_predict_task | def execute_predict_task(self, task_inst, predict_data, **kwargs):
"""
Do a prediction
task_inst - instance of a task
"""
result = task_inst.predict(predict_data, **task_inst.args)
return result | python | def execute_predict_task(self, task_inst, predict_data, **kwargs):
"""
Do a prediction
task_inst - instance of a task
"""
result = task_inst.predict(predict_data, **task_inst.args)
return result | [
"def",
"execute_predict_task",
"(",
"self",
",",
"task_inst",
",",
"predict_data",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"task_inst",
".",
"predict",
"(",
"predict_data",
",",
"*",
"*",
"task_inst",
".",
"args",
")",
"return",
"result"
] | Do a prediction
task_inst - instance of a task | [
"Do",
"a",
"prediction",
"task_inst",
"-",
"instance",
"of",
"a",
"task"
] | 90304ba82053e2a9ad2bacaab3479403d3923bcf | https://github.com/VikParuchuri/percept/blob/90304ba82053e2a9ad2bacaab3479403d3923bcf/percept/workflows/base.py#L88-L94 | train | 54,698 |
VikParuchuri/percept | percept/workflows/base.py | BaseWorkflow.train | def train(self, **kwargs):
"""
Do the workflow training
"""
log.info("Starting to train...")
if not self.setup_run:
self.setup()
self.trained_tasks = []
for task in self.tasks:
data = self.reformatted_input[task.data_format]['data']
target = self.reformatted_input[task.data_format]['target']
if data is None:
raise Exception("Data cannot be none. Check the config file to make sure the right input is being read.")
kwargs['data']=data
kwargs['target']=target
trained_task = self.execute_train_task_with_dependencies(task, **kwargs)
self.trained_tasks.append(trained_task)
#If the trained task alters the data in any way, pass it down the chain to the next task
if hasattr(trained_task, 'data'):
self.reformatted_input[task.data_format]['data'] = trained_task.data
log.info("Finished training.") | python | def train(self, **kwargs):
"""
Do the workflow training
"""
log.info("Starting to train...")
if not self.setup_run:
self.setup()
self.trained_tasks = []
for task in self.tasks:
data = self.reformatted_input[task.data_format]['data']
target = self.reformatted_input[task.data_format]['target']
if data is None:
raise Exception("Data cannot be none. Check the config file to make sure the right input is being read.")
kwargs['data']=data
kwargs['target']=target
trained_task = self.execute_train_task_with_dependencies(task, **kwargs)
self.trained_tasks.append(trained_task)
#If the trained task alters the data in any way, pass it down the chain to the next task
if hasattr(trained_task, 'data'):
self.reformatted_input[task.data_format]['data'] = trained_task.data
log.info("Finished training.") | [
"def",
"train",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"log",
".",
"info",
"(",
"\"Starting to train...\"",
")",
"if",
"not",
"self",
".",
"setup_run",
":",
"self",
".",
"setup",
"(",
")",
"self",
".",
"trained_tasks",
"=",
"[",
"]",
"for",
... | Do the workflow training | [
"Do",
"the",
"workflow",
"training"
] | 90304ba82053e2a9ad2bacaab3479403d3923bcf | https://github.com/VikParuchuri/percept/blob/90304ba82053e2a9ad2bacaab3479403d3923bcf/percept/workflows/base.py#L96-L116 | train | 54,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.