repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
numenta/htmresearch
htmresearch/support/expsuite.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/expsuite.py#L90-L99
def parse_cfg(self): """ parses the given config file for experiments. """ self.cfgparser = ConfigParser() if not self.cfgparser.read(self.options.config): raise SystemExit('config file %s not found.'%self.options.config) # Change the current working directory to be relative to 'experiments.cfg' projectDir = os.path.dirname(self.options.config) projectDir = os.path.abspath(projectDir) os.chdir(projectDir)
[ "def", "parse_cfg", "(", "self", ")", ":", "self", ".", "cfgparser", "=", "ConfigParser", "(", ")", "if", "not", "self", ".", "cfgparser", ".", "read", "(", "self", ".", "options", ".", "config", ")", ":", "raise", "SystemExit", "(", "'config file %s not...
parses the given config file for experiments.
[ "parses", "the", "given", "config", "file", "for", "experiments", "." ]
python
train
mikedh/trimesh
trimesh/primitives.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/primitives.py#L274-L296
def buffer(self, distance): """ Return a cylinder primitive which covers the source cylinder by distance: radius is inflated by distance, height by twice the distance. Parameters ------------ distance : float Distance to inflate cylinder radius and height Returns ------------- buffered : Cylinder Cylinder primitive inflated by distance """ distance = float(distance) buffered = Cylinder( height=self.primitive.height + distance * 2, radius=self.primitive.radius + distance, transform=self.primitive.transform.copy()) return buffered
[ "def", "buffer", "(", "self", ",", "distance", ")", ":", "distance", "=", "float", "(", "distance", ")", "buffered", "=", "Cylinder", "(", "height", "=", "self", ".", "primitive", ".", "height", "+", "distance", "*", "2", ",", "radius", "=", "self", ...
Return a cylinder primitive which covers the source cylinder by distance: radius is inflated by distance, height by twice the distance. Parameters ------------ distance : float Distance to inflate cylinder radius and height Returns ------------- buffered : Cylinder Cylinder primitive inflated by distance
[ "Return", "a", "cylinder", "primitive", "which", "covers", "the", "source", "cylinder", "by", "distance", ":", "radius", "is", "inflated", "by", "distance", "height", "by", "twice", "the", "distance", "." ]
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/git.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/git.py#L134-L164
def fetch_items(self, category, **kwargs): """Fetch the commits :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] to_date = kwargs['to_date'] branches = kwargs['branches'] latest_items = kwargs['latest_items'] no_update = kwargs['no_update'] ncommits = 0 try: if os.path.isfile(self.gitpath): commits = self.__fetch_from_log() else: commits = self.__fetch_from_repo(from_date, to_date, branches, latest_items, no_update) for commit in commits: yield commit ncommits += 1 except EmptyRepositoryError: pass logger.info("Fetch process completed: %s commits fetched", ncommits)
[ "def", "fetch_items", "(", "self", ",", "category", ",", "*", "*", "kwargs", ")", ":", "from_date", "=", "kwargs", "[", "'from_date'", "]", "to_date", "=", "kwargs", "[", "'to_date'", "]", "branches", "=", "kwargs", "[", "'branches'", "]", "latest_items", ...
Fetch the commits :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items
[ "Fetch", "the", "commits" ]
python
test
KelSolaar/Foundations
foundations/strings.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/strings.py#L348-L370
def get_normalized_path(path): """ Normalizes a path, escaping slashes if needed on Windows. Usage:: >>> get_normalized_path("C:\\Users/johnDoe\\Documents") u'C:\\Users\\JohnDoe\\Documents' :param path: Path to normalize. :type path: unicode :return: Normalized path. :rtype: unicode """ if platform.system() == "Windows" or platform.system() == "Microsoft": path = os.path.normpath(path).replace("\\", "\\\\") LOGGER.debug("> Path: '{0}', normalized path.".format(path)) return path else: path = os.path.normpath(path) LOGGER.debug("> Path: '{0}', normalized path.".format(path)) return path
[ "def", "get_normalized_path", "(", "path", ")", ":", "if", "platform", ".", "system", "(", ")", "==", "\"Windows\"", "or", "platform", ".", "system", "(", ")", "==", "\"Microsoft\"", ":", "path", "=", "os", ".", "path", ".", "normpath", "(", "path", ")...
Normalizes a path, escaping slashes if needed on Windows. Usage:: >>> get_normalized_path("C:\\Users/johnDoe\\Documents") u'C:\\Users\\JohnDoe\\Documents' :param path: Path to normalize. :type path: unicode :return: Normalized path. :rtype: unicode
[ "Normalizes", "a", "path", "escaping", "slashes", "if", "needed", "on", "Windows", "." ]
python
train
pennersr/django-allauth
allauth/socialaccount/providers/oauth/client.py
https://github.com/pennersr/django-allauth/blob/f70cb3d622f992f15fe9b57098e0b328445b664e/allauth/socialaccount/providers/oauth/client.py#L167-L178
def _get_at_from_session(self): """ Get the saved access token for private resources from the session. """ try: return self.request.session['oauth_%s_access_token' % get_token_prefix( self.request_token_url)] except KeyError: raise OAuthError( _('No access token saved for "%s".') % get_token_prefix(self.request_token_url))
[ "def", "_get_at_from_session", "(", "self", ")", ":", "try", ":", "return", "self", ".", "request", ".", "session", "[", "'oauth_%s_access_token'", "%", "get_token_prefix", "(", "self", ".", "request_token_url", ")", "]", "except", "KeyError", ":", "raise", "O...
Get the saved access token for private resources from the session.
[ "Get", "the", "saved", "access", "token", "for", "private", "resources", "from", "the", "session", "." ]
python
train
TrafficSenseMSD/SumoTools
traci/_vehicle.py
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_vehicle.py#L1073-L1079
def setMinGap(self, vehID, minGap): """setMinGap(string, double) -> None Sets the offset (gap to front vehicle if halting) for this vehicle. """ self._connection._sendDoubleCmd( tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_MINGAP, vehID, minGap)
[ "def", "setMinGap", "(", "self", ",", "vehID", ",", "minGap", ")", ":", "self", ".", "_connection", ".", "_sendDoubleCmd", "(", "tc", ".", "CMD_SET_VEHICLE_VARIABLE", ",", "tc", ".", "VAR_MINGAP", ",", "vehID", ",", "minGap", ")" ]
setMinGap(string, double) -> None Sets the offset (gap to front vehicle if halting) for this vehicle.
[ "setMinGap", "(", "string", "double", ")", "-", ">", "None" ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/graphql_schema.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/graphql_schema.py#L228-L383
def get_graphql_schema_from_schema_graph(schema_graph, class_to_field_type_overrides, hidden_classes): """Return a GraphQL schema object corresponding to the schema of the given schema graph. Args: schema_graph: SchemaGraph class_to_field_type_overrides: dict, class name -> {field name -> field type}, (string -> {string -> GraphQLType}). Used to override the type of a field in the class where it's first defined and all the class's subclasses. hidden_classes: set of strings, classes to not include in the GraphQL schema. Returns: tuple of (GraphQL schema object, GraphQL type equivalence hints dict). The tuple is of type (GraphQLSchema, {GraphQLObjectType -> GraphQLUnionType}). """ _validate_overriden_fields_are_not_defined_in_superclasses(class_to_field_type_overrides, schema_graph) # The field types of subclasses must also be overridden. # Remember that the result returned by get_subclass_set(class_name) includes class_name itself. inherited_field_type_overrides = _get_inherited_field_types(class_to_field_type_overrides, schema_graph) # We remove the base vertex class from the schema if it has no properties. # If it has no properties, it's meaningless and makes the schema less syntactically sweet. if not schema_graph.get_element_by_class_name(ORIENTDB_BASE_VERTEX_CLASS_NAME).properties: hidden_classes.add(ORIENTDB_BASE_VERTEX_CLASS_NAME) graphql_types = OrderedDict() type_equivalence_hints = OrderedDict() # For each vertex class, construct its analogous GraphQL type representation. for vertex_cls_name in sorted(schema_graph.vertex_class_names): vertex_cls = schema_graph.get_element_by_class_name(vertex_cls_name) if vertex_cls_name in hidden_classes: continue inherited_field_type_overrides.setdefault(vertex_cls_name, dict()) field_type_overrides = inherited_field_type_overrides[vertex_cls_name] # We have to use delayed type binding here, because some of the type references # are circular: if an edge connects vertices of types A and B, then # GraphQL type A has a List[B] field, and type B has a List[A] field. # To avoid the circular dependency, GraphQL allows us to initialize the types # initially without their field information, and fill in their field information # later using a lambda function as the second argument to GraphQLObjectType. # This lambda function will be called on each type after all types are created # in their initial blank state. # # However, 'cls_name' is a variable that would not be correctly bound # if we naively tried to construct a lambda in-place, because Python lambdas # are not closures. Instead, call a function with 'cls_name' as an argument, # and have that function construct and return the required lambda. field_specification_lambda = _create_field_specification( schema_graph, graphql_types, field_type_overrides, hidden_classes, vertex_cls_name) # Abstract classes are interfaces, concrete classes are object types. current_graphql_type = None if vertex_cls.abstract: # "fields" is a kwarg in the interface constructor, even though # it's a positional arg in the object type constructor. current_graphql_type = GraphQLInterfaceType(vertex_cls_name, fields=field_specification_lambda) else: # For similar reasons as the field_specification_lambda, # we need to create an interface specification lambda function that # specifies the interfaces implemented by this type. interface_specification_lambda = _create_interface_specification( schema_graph, graphql_types, hidden_classes, vertex_cls_name) # N.B.: Ignore the "is_type_of" argument below, it is simply a circumvention of # a sanity check inside the GraphQL library. The library assumes that we'll use # its execution system, so it complains that we don't provide a means to # differentiate between different implementations of the same interface. # We don't care, because we compile the GraphQL query to a database query. current_graphql_type = GraphQLObjectType(vertex_cls_name, field_specification_lambda, interfaces=interface_specification_lambda, is_type_of=lambda: None) graphql_types[vertex_cls_name] = current_graphql_type # For each vertex class, construct all union types representations. for vertex_cls_name in sorted(schema_graph.vertex_class_names): vertex_cls = schema_graph.get_element_by_class_name(vertex_cls_name) if vertex_cls_name in hidden_classes: continue vertex_cls_subclasses = schema_graph.get_subclass_set(vertex_cls_name) if not vertex_cls.abstract and len(vertex_cls_subclasses) > 1: # In addition to creating this class' corresponding GraphQL type, we'll need a # union type to represent it when it appears as the endpoint of an edge. union_type_name = _get_union_type_name(vertex_cls_subclasses) # For similar reasons as the field_specification_lambda, # we need to create a union type specification lambda function that specifies # the types that this union type is composed of. type_specification_lambda = _create_union_types_specification( schema_graph, graphql_types, hidden_classes, vertex_cls_name) union_type = GraphQLUnionType(union_type_name, types=type_specification_lambda) graphql_types[union_type_name] = union_type type_equivalence_hints[graphql_types[vertex_cls_name]] = union_type # Include all abstract non-vertex classes whose only non-abstract subclasses are vertices. for non_graph_cls_name in sorted(schema_graph.non_graph_class_names): if non_graph_cls_name in hidden_classes: continue if not schema_graph.get_element_by_class_name(non_graph_cls_name).abstract: continue cls_subclasses = schema_graph.get_subclass_set(non_graph_cls_name) # No need to add the possible abstract class if it doesn't have subclasses besides itself. if len(cls_subclasses) > 1: all_non_abstract_subclasses_are_vertices = True # Check all non-abstract subclasses are vertices. for subclass_name in cls_subclasses: subclass = schema_graph.get_element_by_class_name(subclass_name) if subclass_name != non_graph_cls_name: if not subclass.abstract and not subclass.is_vertex: all_non_abstract_subclasses_are_vertices = False break if all_non_abstract_subclasses_are_vertices: # Add abstract class as an interface. inherited_field_type_overrides.setdefault(non_graph_cls_name, dict()) field_type_overrides = inherited_field_type_overrides[non_graph_cls_name] field_specification_lambda = _create_field_specification( schema_graph, graphql_types, field_type_overrides, hidden_classes, non_graph_cls_name) graphql_type = GraphQLInterfaceType(non_graph_cls_name, fields=field_specification_lambda) graphql_types[non_graph_cls_name] = graphql_type if not graphql_types: raise EmptySchemaError(u'After evaluating all subclasses of V, we were not able to find ' u'visible schema data to import into the GraphQL schema object') # Create the root query GraphQL type. Consists of all non-union classes, i.e. # all non-abstract classes (as GraphQL types) and all abstract classes (as GraphQL interfaces). RootSchemaQuery = GraphQLObjectType('RootSchemaQuery', OrderedDict([ (name, GraphQLField(value)) for name, value in sorted(six.iteritems(graphql_types), key=lambda x: x[0]) if not isinstance(value, GraphQLUnionType) ])) schema = GraphQLSchema(RootSchemaQuery, directives=DIRECTIVES) # Note that the GraphQLSchema reconstructs the set of types in the schema by recursively # searching through the fields of the RootSchemaQuery. Since union types can only appear in the # fields of other types as edges, union types with no in or out edges will not appear in the # schema. Therefore, we remove these unions and their keys from the type equivalence hints. return schema, _get_referenced_type_equivalences(graphql_types, type_equivalence_hints)
[ "def", "get_graphql_schema_from_schema_graph", "(", "schema_graph", ",", "class_to_field_type_overrides", ",", "hidden_classes", ")", ":", "_validate_overriden_fields_are_not_defined_in_superclasses", "(", "class_to_field_type_overrides", ",", "schema_graph", ")", "# The field types ...
Return a GraphQL schema object corresponding to the schema of the given schema graph. Args: schema_graph: SchemaGraph class_to_field_type_overrides: dict, class name -> {field name -> field type}, (string -> {string -> GraphQLType}). Used to override the type of a field in the class where it's first defined and all the class's subclasses. hidden_classes: set of strings, classes to not include in the GraphQL schema. Returns: tuple of (GraphQL schema object, GraphQL type equivalence hints dict). The tuple is of type (GraphQLSchema, {GraphQLObjectType -> GraphQLUnionType}).
[ "Return", "a", "GraphQL", "schema", "object", "corresponding", "to", "the", "schema", "of", "the", "given", "schema", "graph", "." ]
python
train
mozillazg/python-shanbay
shanbay/api.py
https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/api.py#L58-L63
def add_word(self, word_id, url='https://api.shanbay.com/bdc/learning/'): """添加单词""" data = { 'id': word_id } return self._request(url, method='post', data=data).json()
[ "def", "add_word", "(", "self", ",", "word_id", ",", "url", "=", "'https://api.shanbay.com/bdc/learning/'", ")", ":", "data", "=", "{", "'id'", ":", "word_id", "}", "return", "self", ".", "_request", "(", "url", ",", "method", "=", "'post'", ",", "data", ...
添加单词
[ "添加单词" ]
python
train
wonambi-python/wonambi
wonambi/ioeeg/edf.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/edf.py#L401-L419
def remove_datetime(filename): """Remove datetime from filename by overwriting the date / time info. Parameters ---------- filename : Path path to edf file Notes ----- It modifies the file. TODO ---- This function might be part of a large anonymization procedure for edf """ with Path(filename).open('r+b') as f: f.seek(168) f.write(16 * b' ')
[ "def", "remove_datetime", "(", "filename", ")", ":", "with", "Path", "(", "filename", ")", ".", "open", "(", "'r+b'", ")", "as", "f", ":", "f", ".", "seek", "(", "168", ")", "f", ".", "write", "(", "16", "*", "b' '", ")" ]
Remove datetime from filename by overwriting the date / time info. Parameters ---------- filename : Path path to edf file Notes ----- It modifies the file. TODO ---- This function might be part of a large anonymization procedure for edf
[ "Remove", "datetime", "from", "filename", "by", "overwriting", "the", "date", "/", "time", "info", "." ]
python
train
UCL-INGI/INGInious
inginious/common/course_factory.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/common/course_factory.py#L84-L95
def get_all_courses(self): """ :return: a table containing courseid=>Course pairs """ course_ids = [f[0:len(f)-1] for f in self._filesystem.list(folders=True, files=False, recursive=False)] # remove trailing "/" output = {} for courseid in course_ids: try: output[courseid] = self.get_course(courseid) except Exception: get_course_logger(courseid).warning("Cannot open course", exc_info=True) return output
[ "def", "get_all_courses", "(", "self", ")", ":", "course_ids", "=", "[", "f", "[", "0", ":", "len", "(", "f", ")", "-", "1", "]", "for", "f", "in", "self", ".", "_filesystem", ".", "list", "(", "folders", "=", "True", ",", "files", "=", "False", ...
:return: a table containing courseid=>Course pairs
[ ":", "return", ":", "a", "table", "containing", "courseid", "=", ">", "Course", "pairs" ]
python
train
limodou/uliweb
uliweb/orm/__init__.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L3192-L3207
def with_relation(self, relation_name=None): """ if relation is not None, when fetch manytomany result, also fetch relation record and saved them to manytomany object, and named them as relation. If relation_name is not given, then default value is 'relation' """ if not relation_name: relation_name = 'relation' if hasattr(self.modelb, relation_name): raise Error("The attribute name %s has already existed in Model %s!" % (relation_name, self.modelb.__name__)) if not self.through_model: raise Error("Only with through style in ManyToMany supports with_relation function of Model %s!" % self.modelb.__name__) self.with_relation_name = relation_name return self
[ "def", "with_relation", "(", "self", ",", "relation_name", "=", "None", ")", ":", "if", "not", "relation_name", ":", "relation_name", "=", "'relation'", "if", "hasattr", "(", "self", ".", "modelb", ",", "relation_name", ")", ":", "raise", "Error", "(", "\"...
if relation is not None, when fetch manytomany result, also fetch relation record and saved them to manytomany object, and named them as relation. If relation_name is not given, then default value is 'relation'
[ "if", "relation", "is", "not", "None", "when", "fetch", "manytomany", "result", "also", "fetch", "relation", "record", "and", "saved", "them", "to", "manytomany", "object", "and", "named", "them", "as", "relation", ".", "If", "relation_name", "is", "not", "g...
python
train
nficano/python-lambda
aws_lambda/aws_lambda.py
https://github.com/nficano/python-lambda/blob/b0bd25404df70212d7fa057758760366406d64f2/aws_lambda/aws_lambda.py#L124-L158
def deploy_s3( src, requirements=None, local_package=None, config_file='config.yaml', profile_name=None, preserve_vpc=False ): """Deploys a new function via AWS S3. :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param str local_package: The path to a local package with should be included in the deploy as well (and/or is not available on PyPi) """ # Load and parse the config file. path_to_config_file = os.path.join(src, config_file) cfg = read_cfg(path_to_config_file, profile_name) # Copy all the pip dependencies required to run your code into a temporary # folder then add the handler file in the root of this directory. # Zip the contents of this folder into a single file and output to the dist # directory. path_to_zip_file = build( src, config_file=config_file, requirements=requirements, local_package=local_package, ) use_s3 = True s3_file = upload_s3(cfg, path_to_zip_file, use_s3) existing_config = get_function_config(cfg) if existing_config: update_function(cfg, path_to_zip_file, existing_config, use_s3=use_s3, s3_file=s3_file, preserve_vpc=preserve_vpc) else: create_function(cfg, path_to_zip_file, use_s3=use_s3, s3_file=s3_file)
[ "def", "deploy_s3", "(", "src", ",", "requirements", "=", "None", ",", "local_package", "=", "None", ",", "config_file", "=", "'config.yaml'", ",", "profile_name", "=", "None", ",", "preserve_vpc", "=", "False", ")", ":", "# Load and parse the config file.", "pa...
Deploys a new function via AWS S3. :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param str local_package: The path to a local package with should be included in the deploy as well (and/or is not available on PyPi)
[ "Deploys", "a", "new", "function", "via", "AWS", "S3", "." ]
python
valid
PyGithub/PyGithub
github/Repository.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Repository.py#L1481-L1503
def get_file_contents(self, path, ref=github.GithubObject.NotSet): """ :calls: `GET /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents>`_ :param path: string :param ref: string :rtype: :class:`github.ContentFile.ContentFile` """ assert isinstance(path, (str, unicode)), path assert ref is github.GithubObject.NotSet or isinstance(ref, (str, unicode)), ref url_parameters = dict() if ref is not github.GithubObject.NotSet: url_parameters["ref"] = ref headers, data = self._requester.requestJsonAndCheck( "GET", self.url + "/contents/" + urllib.quote(path), parameters=url_parameters ) if isinstance(data, list): return [ github.ContentFile.ContentFile(self._requester, headers, item, completed=False) for item in data ] return github.ContentFile.ContentFile(self._requester, headers, data, completed=True)
[ "def", "get_file_contents", "(", "self", ",", "path", ",", "ref", "=", "github", ".", "GithubObject", ".", "NotSet", ")", ":", "assert", "isinstance", "(", "path", ",", "(", "str", ",", "unicode", ")", ")", ",", "path", "assert", "ref", "is", "github",...
:calls: `GET /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents>`_ :param path: string :param ref: string :rtype: :class:`github.ContentFile.ContentFile`
[ ":", "calls", ":", "GET", "/", "repos", "/", ":", "owner", "/", ":", "repo", "/", "contents", "/", ":", "path", "<http", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "repos", "/", "contents", ">", "_", ":", "param", "path", ...
python
train
gdub/python-simpleldap
simpleldap/__init__.py
https://github.com/gdub/python-simpleldap/blob/a833f444d90ad2f3fe779c3e2cb08350052fedc8/simpleldap/__init__.py#L276-L283
def compare(self, dn, attr, value): """ Compare the ``attr`` of the entry ``dn`` with given ``value``. This is a convenience wrapper for the ldap library's ``compare`` function that returns a boolean value instead of 1 or 0. """ return self.connection.compare_s(dn, attr, value) == 1
[ "def", "compare", "(", "self", ",", "dn", ",", "attr", ",", "value", ")", ":", "return", "self", ".", "connection", ".", "compare_s", "(", "dn", ",", "attr", ",", "value", ")", "==", "1" ]
Compare the ``attr`` of the entry ``dn`` with given ``value``. This is a convenience wrapper for the ldap library's ``compare`` function that returns a boolean value instead of 1 or 0.
[ "Compare", "the", "attr", "of", "the", "entry", "dn", "with", "given", "value", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py#L59-L70
def qos_map_dscp_traffic_class_dscp_traffic_class_map_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos") map = ET.SubElement(qos, "map") dscp_traffic_class = ET.SubElement(map, "dscp-traffic-class") dscp_traffic_class_map_name = ET.SubElement(dscp_traffic_class, "dscp-traffic-class-map-name") dscp_traffic_class_map_name.text = kwargs.pop('dscp_traffic_class_map_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "qos_map_dscp_traffic_class_dscp_traffic_class_map_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "qos", "=", "ET", ".", "SubElement", "(", "config", ",", "\"qos\"", ",", "xmlns", "=...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
thebjorn/pydeps
pydeps/pystdlib.py
https://github.com/thebjorn/pydeps/blob/1e6715b7bea47a40e8042821b57937deaaa0fdc3/pydeps/pystdlib.py#L7-L26
def pystdlib(): """Return a set of all module-names in the Python standard library. """ curver = '.'.join(str(x) for x in sys.version_info[:2]) return (set(stdlib_list.stdlib_list(curver)) | { '_LWPCookieJar', '_MozillaCookieJar', '_abcoll', 'email._parseaddr', 'email.base64mime', 'email.feedparser', 'email.quoprimime', 'encodings', 'genericpath', 'ntpath', 'nturl2path', 'os2emxpath', 'posixpath', 'sre_compile', 'sre_parse', 'unittest.case', 'unittest.loader', 'unittest.main', 'unittest.result', 'unittest.runner', 'unittest.signals', 'unittest.suite', 'unittest.util', '_threading_local', 'sre_constants', 'strop', 'repr', 'opcode', 'nt', 'encodings.aliases', '_bisect', '_codecs', '_collections', '_functools', '_hashlib', '_heapq', '_io', '_locale', '_LWPCookieJar', '_md5', '_MozillaCookieJar', '_random', '_sha', '_sha256', '_sha512', '_socket', '_sre', '_ssl', '_struct', '_subprocess', '_threading_local', '_warnings', '_weakref', '_weakrefset', '_winreg' }) - {'__main__'}
[ "def", "pystdlib", "(", ")", ":", "curver", "=", "'.'", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "sys", ".", "version_info", "[", ":", "2", "]", ")", "return", "(", "set", "(", "stdlib_list", ".", "stdlib_list", "(", "curver", ")...
Return a set of all module-names in the Python standard library.
[ "Return", "a", "set", "of", "all", "module", "-", "names", "in", "the", "Python", "standard", "library", "." ]
python
train
OnroerendErfgoed/crabpy
crabpy/gateway/crab.py
https://github.com/OnroerendErfgoed/crabpy/blob/3a6fd8bc5aca37c2a173e3ea94e4e468b8aa79c1/crabpy/gateway/crab.py#L652-L688
def get_straat_by_id(self, id): ''' Retrieve a `straat` by the Id. :param integer id: The id of the `straat`. :rtype: :class:`Straat` ''' def creator(): res = crab_gateway_request( self.client, 'GetStraatnaamWithStatusByStraatnaamId', id ) if res == None: raise GatewayResourceNotFoundException() return Straat( res.StraatnaamId, res.StraatnaamLabel, res.GemeenteId, res.StatusStraatnaam, res.Straatnaam, res.TaalCode, res.StraatnaamTweedeTaal, res.TaalCodeTweedeTaal, Metadata( res.BeginDatum, res.BeginTijd, self.get_bewerking(res.BeginBewerking), self.get_organisatie(res.BeginOrganisatie) ) ) if self.caches['long'].is_configured: key = 'GetStraatnaamWithStatusByStraatnaamId#%s' % (id) straat = self.caches['long'].get_or_create(key, creator) else: straat = creator() straat.set_gateway(self) return straat
[ "def", "get_straat_by_id", "(", "self", ",", "id", ")", ":", "def", "creator", "(", ")", ":", "res", "=", "crab_gateway_request", "(", "self", ".", "client", ",", "'GetStraatnaamWithStatusByStraatnaamId'", ",", "id", ")", "if", "res", "==", "None", ":", "r...
Retrieve a `straat` by the Id. :param integer id: The id of the `straat`. :rtype: :class:`Straat`
[ "Retrieve", "a", "straat", "by", "the", "Id", "." ]
python
train
twilio/twilio-python
twilio/twiml/voice_response.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/twiml/voice_response.py#L953-L979
def number(self, phone_number, send_digits=None, url=None, method=None, status_callback_event=None, status_callback=None, status_callback_method=None, **kwargs): """ Create a <Number> element :param phone_number: Phone Number to dial :param send_digits: DTMF tones to play when the call is answered :param url: TwiML URL :param method: TwiML URL method :param status_callback_event: Events to call status callback :param status_callback: Status callback URL :param status_callback_method: Status callback URL method :param kwargs: additional attributes :returns: <Number> element """ return self.nest(Number( phone_number, send_digits=send_digits, url=url, method=method, status_callback_event=status_callback_event, status_callback=status_callback, status_callback_method=status_callback_method, **kwargs ))
[ "def", "number", "(", "self", ",", "phone_number", ",", "send_digits", "=", "None", ",", "url", "=", "None", ",", "method", "=", "None", ",", "status_callback_event", "=", "None", ",", "status_callback", "=", "None", ",", "status_callback_method", "=", "None...
Create a <Number> element :param phone_number: Phone Number to dial :param send_digits: DTMF tones to play when the call is answered :param url: TwiML URL :param method: TwiML URL method :param status_callback_event: Events to call status callback :param status_callback: Status callback URL :param status_callback_method: Status callback URL method :param kwargs: additional attributes :returns: <Number> element
[ "Create", "a", "<Number", ">", "element" ]
python
train
ConsenSys/mythril-classic
mythril/laser/ethereum/plugins/implementations/mutation_pruner.py
https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/laser/ethereum/plugins/implementations/mutation_pruner.py#L36-L63
def initialize(self, symbolic_vm: LaserEVM): """Initializes the mutation pruner Introduces hooks for SSTORE operations :param symbolic_vm: :return: """ @symbolic_vm.pre_hook("SSTORE") def mutator_hook(global_state: GlobalState): global_state.annotate(MutationAnnotation()) @symbolic_vm.laser_hook("add_world_state") def world_state_filter_hook(global_state: GlobalState): if And( *global_state.mstate.constraints[:] + [ global_state.environment.callvalue > symbol_factory.BitVecVal(0, 256) ] ).is_false: return if isinstance( global_state.current_transaction, ContractCreationTransaction ): return if len(list(global_state.get_annotations(MutationAnnotation))) == 0: raise PluginSkipWorldState
[ "def", "initialize", "(", "self", ",", "symbolic_vm", ":", "LaserEVM", ")", ":", "@", "symbolic_vm", ".", "pre_hook", "(", "\"SSTORE\"", ")", "def", "mutator_hook", "(", "global_state", ":", "GlobalState", ")", ":", "global_state", ".", "annotate", "(", "Mut...
Initializes the mutation pruner Introduces hooks for SSTORE operations :param symbolic_vm: :return:
[ "Initializes", "the", "mutation", "pruner" ]
python
train
earlye/nephele
nephele/AwsStack.py
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsStack.py#L142-L150
def do_resource(self,args): """Go to the specified resource. resource -h for detailed help""" parser = CommandArgumentParser("resource") parser.add_argument('-i','--logical-id',dest='logical-id',help='logical id of the child resource'); args = vars(parser.parse_args(args)) stackName = self.wrappedStack['rawStack'].name logicalId = args['logical-id'] self.stackResource(stackName,logicalId)
[ "def", "do_resource", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"resource\"", ")", "parser", ".", "add_argument", "(", "'-i'", ",", "'--logical-id'", ",", "dest", "=", "'logical-id'", ",", "help", "=", "'logical id of t...
Go to the specified resource. resource -h for detailed help
[ "Go", "to", "the", "specified", "resource", ".", "resource", "-", "h", "for", "detailed", "help" ]
python
train
openstack/networking-cisco
tools/saf_prepare_setup.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/tools/saf_prepare_setup.py#L103-L148
def get_mysql_credentials(cfg_file): """Get the credentials and database name from options in config file.""" try: parser = ConfigParser.ConfigParser() cfg_fp = open(cfg_file) parser.readfp(cfg_fp) cfg_fp.close() except ConfigParser.NoOptionError: cfg_fp.close() print('Failed to find mysql connections credentials.') sys.exit(1) except IOError: print('ERROR: Cannot open %s.', cfg_file) sys.exit(1) value = parser.get('dfa_mysql', 'connection') try: # Find location of pattern in connection parameter as shown below: # http://username:password@host/databasename?characterset=encoding' sobj = re.search(r"(://).*(@).*(/).*(\?)", value) # The list parameter contains: # indices[0], is the index of '://' # indices[1], is the index of '@' # indices[2], is the index of '/' # indices[3], is the index of '?' indices = [sobj.start(1), sobj.start(2), sobj.start(3), sobj.start(4)] # Get the credentials cred = value[indices[0] + 3:indices[1]].split(':') # Get the host name host = value[indices[1] + 1:indices[2]] # Get the database name db_name = value[indices[2] + 1:indices[3]] # Get the character encoding charset = value[indices[3] + 1:].split('=')[1] return cred[0], cred[1], host, db_name, charset except (ValueError, IndexError, AttributeError): print('Failed to find mysql connections credentials.') sys.exit(1)
[ "def", "get_mysql_credentials", "(", "cfg_file", ")", ":", "try", ":", "parser", "=", "ConfigParser", ".", "ConfigParser", "(", ")", "cfg_fp", "=", "open", "(", "cfg_file", ")", "parser", ".", "readfp", "(", "cfg_fp", ")", "cfg_fp", ".", "close", "(", ")...
Get the credentials and database name from options in config file.
[ "Get", "the", "credentials", "and", "database", "name", "from", "options", "in", "config", "file", "." ]
python
train
xiyouMc/ncmbot
ncmbot/core.py
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L150-L185
def send(self): """Sens the request.""" success = False if self.method is None: raise ParamsError() try: if self.method == 'SEARCH': req = self._get_requests() _url = self.__NETEAST_HOST + self._METHODS[self.method] resp = req.post(_url, data=self.data) self._build_response(resp) self.response.ok = True else: if isinstance(self.data, dict): data = encrypted_request(self.data) req = self._get_webapi_requests() _url = self.__NETEAST_HOST + self._METHODS[self.method] if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'): _url = _url % self.params['uid'] if self.method in ('LYRIC', 'MUSIC_COMMENT'): _url = _url % self.params['id'] # GET if self.method in ('LYRIC'): resp = req.get(_url) else: resp = req.post(_url, data=data) self._build_response(resp) self.response.ok = True except Exception as why: traceback.print_exc() print 'Requests Exception', why # self._build_response(why) self.response.error = why
[ "def", "send", "(", "self", ")", ":", "success", "=", "False", "if", "self", ".", "method", "is", "None", ":", "raise", "ParamsError", "(", ")", "try", ":", "if", "self", ".", "method", "==", "'SEARCH'", ":", "req", "=", "self", ".", "_get_requests",...
Sens the request.
[ "Sens", "the", "request", "." ]
python
train
angr/angr
angr/simos/linux.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/simos/linux.py#L359-L378
def initialize_gdt_x86(self,state,concrete_target): """ Create a GDT in the state memory and populate the segment registers. Rehook the vsyscall address using the real value in the concrete process memory :param state: state which will be modified :param concrete_target: concrete target that will be used to read the fs register :return: """ _l.debug("Creating fake Global Descriptor Table and synchronizing gs segment register") gs = self._read_gs_register_x86(concrete_target) gdt = self.generate_gdt(0x0, gs) self.setup_gdt(state, gdt) # Synchronize the address of vsyscall in simprocedures dictionary with the concrete value _vsyscall_address = concrete_target.read_memory(gs + 0x10, state.project.arch.bits / 8) _vsyscall_address = struct.unpack(state.project.arch.struct_fmt(), _vsyscall_address)[0] state.project.rehook_symbol(_vsyscall_address, '_vsyscall') return gdt
[ "def", "initialize_gdt_x86", "(", "self", ",", "state", ",", "concrete_target", ")", ":", "_l", ".", "debug", "(", "\"Creating fake Global Descriptor Table and synchronizing gs segment register\"", ")", "gs", "=", "self", ".", "_read_gs_register_x86", "(", "concrete_targe...
Create a GDT in the state memory and populate the segment registers. Rehook the vsyscall address using the real value in the concrete process memory :param state: state which will be modified :param concrete_target: concrete target that will be used to read the fs register :return:
[ "Create", "a", "GDT", "in", "the", "state", "memory", "and", "populate", "the", "segment", "registers", ".", "Rehook", "the", "vsyscall", "address", "using", "the", "real", "value", "in", "the", "concrete", "process", "memory" ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_bin_run.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_run.py#L1820-L1846
def _add_arg_python(self, key, value=None, mask=False): """Add CLI Arg formatted specifically for Python. Args: key (string): The CLI Args key (e.g., --name). value (string): The CLI Args value (e.g., bob). mask (boolean, default:False): Indicates whether no mask value. """ self._data[key] = value if not value: # both false boolean values (flags) and empty values should not be added. pass elif value is True: # true boolean values are flags and should not contain a value self._args.append('--{}'.format(key)) self._args_quoted.append('--{}'.format(key)) self._args_masked.append('--{}'.format(key)) else: self._args.append('--{}={}'.format(key, value)) if mask: # mask sensitive values value = 'x' * len(str(value)) else: # quote all values that would get displayed value = self.quote(value) self._args_quoted.append('--{}={}'.format(key, value)) self._args_masked.append('--{}={}'.format(key, value))
[ "def", "_add_arg_python", "(", "self", ",", "key", ",", "value", "=", "None", ",", "mask", "=", "False", ")", ":", "self", ".", "_data", "[", "key", "]", "=", "value", "if", "not", "value", ":", "# both false boolean values (flags) and empty values should not ...
Add CLI Arg formatted specifically for Python. Args: key (string): The CLI Args key (e.g., --name). value (string): The CLI Args value (e.g., bob). mask (boolean, default:False): Indicates whether no mask value.
[ "Add", "CLI", "Arg", "formatted", "specifically", "for", "Python", "." ]
python
train
Kortemme-Lab/klab
klab/benchmarking/analysis/ssm.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ssm.py#L461-L479
def parse_csv_file(csv_filepath, expect_negative_correlation = False, STDev_cutoff = 1.0, headers_start_with = 'ID', comments_start_with = None, separator = ','): """ Analyzes a CSV file. Expects a CSV file with a header line starting with headers_start_with e.g. "ID,experimental value, prediction 1 value, prediction 2 value," Record IDs are expected in the first column. Experimental values are expected in the second column. Predicted values are expected in the subsequent columns. :param csv_filepath: The path to a CSV file containing experimental and predicted data for some dataset. :param expect_negative_correlation: See parse_csv. :param STDev_cutoff: See parse_csv. :param headers_start_with: See parse_csv. :param comments_start_with: See parse_csv. :param separator: See parse_csv. """ assert (os.path.exists(csv_filepath)) return parse_csv(get_file_lines(csv_filepath), expect_negative_correlation = expect_negative_correlation, STDev_cutoff = STDev_cutoff, headers_start_with = headers_start_with, comments_start_with = comments_start_with, separator = separator)
[ "def", "parse_csv_file", "(", "csv_filepath", ",", "expect_negative_correlation", "=", "False", ",", "STDev_cutoff", "=", "1.0", ",", "headers_start_with", "=", "'ID'", ",", "comments_start_with", "=", "None", ",", "separator", "=", "','", ")", ":", "assert", "(...
Analyzes a CSV file. Expects a CSV file with a header line starting with headers_start_with e.g. "ID,experimental value, prediction 1 value, prediction 2 value," Record IDs are expected in the first column. Experimental values are expected in the second column. Predicted values are expected in the subsequent columns. :param csv_filepath: The path to a CSV file containing experimental and predicted data for some dataset. :param expect_negative_correlation: See parse_csv. :param STDev_cutoff: See parse_csv. :param headers_start_with: See parse_csv. :param comments_start_with: See parse_csv. :param separator: See parse_csv.
[ "Analyzes", "a", "CSV", "file", ".", "Expects", "a", "CSV", "file", "with", "a", "header", "line", "starting", "with", "headers_start_with", "e", ".", "g", ".", "ID", "experimental", "value", "prediction", "1", "value", "prediction", "2", "value", "Record", ...
python
train
klmitch/bark
bark/format.py
https://github.com/klmitch/bark/blob/6e0e002d55f01fee27e3e45bb86e30af1bfeef36/bark/format.py#L290-L365
def parse(cls, format): """ Parse a format string. Factory function for the Format class. :param format: The format string to parse. :returns: An instance of class Format. """ fmt = cls() # Return an empty Format if format is empty if not format: return fmt # Initialize the state for parsing state = ParseState(fmt, format) # Loop through the format string with a state-based parser for idx, char in enumerate(format): # Some characters get ignored if state.check_ignore(): continue if state == 'string': if char == '%': # Handle '%%' if format[idx:idx + 2] == '%%': # Add one % to the string context state.add_text(idx + 1, idx + 2) state.set_ignore(1) else: state.add_text(idx) state.conversion(idx) elif char == '\\': state.add_text(idx) state.escape(idx) elif state == 'escape': state.add_escape(idx + 1, char) state.pop_state(idx + 1) elif state == 'param': if char == '}': state.set_param(idx) state.pop_state() elif state == 'conv': if char == ')': state.set_conversion(idx) state.pop_state() # now in 'conversion' state.pop_state(idx + 1) # now in 'string' else: # state == 'conversion' if char in '<>': # Allowed for Apache compatibility, but ignored continue elif char == '!': state.set_reject() continue elif char == ',' and state.code_last: # Syntactically allowed ',' continue elif char.isdigit(): # True if the code is valid if state.set_code(idx): continue elif char == '{' and state.param_begin is None: state.param(idx + 1) continue elif char == '(' and state.conv_begin is None: state.conv(idx + 1) continue # OK, we have a complete conversion state.set_conversion(idx) state.pop_state(idx + 1) # Finish the parse and return the completed format return state.end_state()
[ "def", "parse", "(", "cls", ",", "format", ")", ":", "fmt", "=", "cls", "(", ")", "# Return an empty Format if format is empty", "if", "not", "format", ":", "return", "fmt", "# Initialize the state for parsing", "state", "=", "ParseState", "(", "fmt", ",", "form...
Parse a format string. Factory function for the Format class. :param format: The format string to parse. :returns: An instance of class Format.
[ "Parse", "a", "format", "string", ".", "Factory", "function", "for", "the", "Format", "class", "." ]
python
train
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L194-L206
def Copy(self, name=None): """Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. Args: name: string name for the new Hist """ new = copy.copy(self) new.d = copy.copy(self.d) new.name = name if name is not None else self.name return new
[ "def", "Copy", "(", "self", ",", "name", "=", "None", ")", ":", "new", "=", "copy", ".", "copy", "(", "self", ")", "new", ".", "d", "=", "copy", ".", "copy", "(", "self", ".", "d", ")", "new", ".", "name", "=", "name", "if", "name", "is", "...
Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. Args: name: string name for the new Hist
[ "Returns", "a", "copy", "." ]
python
train
cltk/cltk
cltk/prosody/latin/syllabifier.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/syllabifier.py#L317-L351
def _move_consonant(self, letters: list, positions: List[int]) -> List[str]: """ Given a list of consonant positions, move the consonants according to certain consonant syllable behavioral rules for gathering and grouping. :param letters: :param positions: :return: """ for pos in positions: previous_letter = letters[pos - 1] consonant = letters[pos] next_letter = letters[pos + 1] if self._contains_vowels(next_letter) and self._starts_with_vowel(next_letter): return string_utils.move_consonant_right(letters, [pos]) if self._contains_vowels(previous_letter) and self._ends_with_vowel( previous_letter) and len(previous_letter) == 1: return string_utils.move_consonant_left(letters, [pos]) if previous_letter + consonant in self.constants.ASPIRATES: return string_utils.move_consonant_left(letters, [pos]) if consonant + next_letter in self.constants.ASPIRATES: return string_utils.move_consonant_right(letters, [pos]) if next_letter[0] == consonant: return string_utils.move_consonant_left(letters, [pos]) if consonant in self.constants.MUTES and next_letter[0] in self.constants.LIQUIDS: return string_utils.move_consonant_right(letters, [pos]) if consonant in ['k', 'K'] and next_letter[0] in ['w', 'W']: return string_utils.move_consonant_right(letters, [pos]) if self._contains_consonants(next_letter[0]) and self._starts_with_vowel( previous_letter[-1]): return string_utils.move_consonant_left(letters, [pos]) # fall through case if self._contains_consonants(next_letter[0]): return string_utils.move_consonant_right(letters, [pos]) return letters
[ "def", "_move_consonant", "(", "self", ",", "letters", ":", "list", ",", "positions", ":", "List", "[", "int", "]", ")", "->", "List", "[", "str", "]", ":", "for", "pos", "in", "positions", ":", "previous_letter", "=", "letters", "[", "pos", "-", "1"...
Given a list of consonant positions, move the consonants according to certain consonant syllable behavioral rules for gathering and grouping. :param letters: :param positions: :return:
[ "Given", "a", "list", "of", "consonant", "positions", "move", "the", "consonants", "according", "to", "certain", "consonant", "syllable", "behavioral", "rules", "for", "gathering", "and", "grouping", "." ]
python
train
waqasbhatti/astrobase
astrobase/hatsurveys/hplc.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hplc.py#L642-L784
def read_hatpi_binnedlc(binnedpklf, textlcf, timebinsec): '''This reads a binnedlc pickle produced by the HATPI prototype pipeline. Converts it into a standard lcdict as produced by the read_hatpi_textlc function above by using the information in unbinnedtextlc for the same object. Adds a 'binned' key to the standard lcdict containing the binned mags, etc. ''' LOGINFO('reading binned LC %s' % binnedpklf) # read the textlc lcdict = read_hatpi_textlc(textlcf) # read the binned LC if binnedpklf.endswith('.gz'): infd = gzip.open(binnedpklf,'rb') else: infd = open(binnedpklf,'rb') try: binned = pickle.load(infd) except Exception as e: infd.seek(0) binned = pickle.load(infd, encoding='latin1') infd.close() # now that we have both, pull out the required columns from the binnedlc blckeys = binned.keys() lcdict['binned'] = {} for key in blckeys: # get EPD stuff if (key == 'epdlc' and 'AP0' in binned[key] and 'AP1' in binned[key] and 'AP2' in binned[key]): # we'll have to generate errors because we don't have any in the # generated binned LC. ap0mad = np.nanmedian(np.abs(binned[key]['AP0'] - np.nanmedian(binned[key]['AP0']))) ap1mad = np.nanmedian(np.abs(binned[key]['AP1'] - np.nanmedian(binned[key]['AP1']))) ap2mad = np.nanmedian(np.abs(binned[key]['AP2'] - np.nanmedian(binned[key]['AP2']))) lcdict['binned']['iep1'] = {'times':binned[key]['RJD'], 'mags':binned[key]['AP0'], 'errs':np.full_like(binned[key]['AP0'], ap0mad), 'nbins':binned[key]['nbins'], 'timebins':binned[key]['jdbins'], 'timebinsec':timebinsec} lcdict['binned']['iep2'] = {'times':binned[key]['RJD'], 'mags':binned[key]['AP1'], 'errs':np.full_like(binned[key]['AP1'], ap1mad), 'nbins':binned[key]['nbins'], 'timebins':binned[key]['jdbins'], 'timebinsec':timebinsec} lcdict['binned']['iep3'] = {'times':binned[key]['RJD'], 'mags':binned[key]['AP2'], 'errs':np.full_like(binned[key]['AP2'], ap2mad), 'nbins':binned[key]['nbins'], 'timebins':binned[key]['jdbins'], 'timebinsec':timebinsec} # get TFA stuff for aperture 1 if ((key == 'tfalc.TF1' or key == 'tfalc.TF1.gz') and 'AP0' in binned[key]): # we'll have to generate errors because we don't have any in the # generated binned LC. ap0mad = np.nanmedian(np.abs(binned[key]['AP0'] - np.nanmedian(binned[key]['AP0']))) lcdict['binned']['itf1'] = {'times':binned[key]['RJD'], 'mags':binned[key]['AP0'], 'errs':np.full_like(binned[key]['AP0'], ap0mad), 'nbins':binned[key]['nbins'], 'timebins':binned[key]['jdbins'], 'timebinsec':timebinsec} # get TFA stuff for aperture 1 if ((key == 'tfalc.TF2' or key == 'tfalc.TF2.gz') and 'AP0' in binned[key]): # we'll have to generate errors because we don't have any in the # generated binned LC. ap0mad = np.nanmedian(np.abs(binned[key]['AP0'] - np.nanmedian(binned[key]['AP0']))) lcdict['binned']['itf2'] = {'times':binned[key]['RJD'], 'mags':binned[key]['AP0'], 'errs':np.full_like(binned[key]['AP0'], ap0mad), 'nbins':binned[key]['nbins'], 'timebins':binned[key]['jdbins'], 'timebinsec':timebinsec} # get TFA stuff for aperture 1 if ((key == 'tfalc.TF3' or key == 'tfalc.TF3.gz') and 'AP0' in binned[key]): # we'll have to generate errors because we don't have any in the # generated binned LC. ap0mad = np.nanmedian(np.abs(binned[key]['AP0'] - np.nanmedian(binned[key]['AP0']))) lcdict['binned']['itf3'] = {'times':binned[key]['RJD'], 'mags':binned[key]['AP0'], 'errs':np.full_like(binned[key]['AP0'], ap0mad), 'nbins':binned[key]['nbins'], 'timebins':binned[key]['jdbins'], 'timebinsec':timebinsec} # all done, check if we succeeded if lcdict['binned']: return lcdict else: LOGERROR('no binned measurements found in %s!' % binnedpklf) return None
[ "def", "read_hatpi_binnedlc", "(", "binnedpklf", ",", "textlcf", ",", "timebinsec", ")", ":", "LOGINFO", "(", "'reading binned LC %s'", "%", "binnedpklf", ")", "# read the textlc", "lcdict", "=", "read_hatpi_textlc", "(", "textlcf", ")", "# read the binned LC", "if", ...
This reads a binnedlc pickle produced by the HATPI prototype pipeline. Converts it into a standard lcdict as produced by the read_hatpi_textlc function above by using the information in unbinnedtextlc for the same object. Adds a 'binned' key to the standard lcdict containing the binned mags, etc.
[ "This", "reads", "a", "binnedlc", "pickle", "produced", "by", "the", "HATPI", "prototype", "pipeline", "." ]
python
valid
materialsproject/pymatgen
pymatgen/io/feff/outputs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/feff/outputs.py#L158-L236
def charge_transfer_from_file(feff_inp_file, ldos_file): """ Get charge transfer from file. Args: feff_inp_file (str): name of feff.inp file for run ldos_file (str): ldos filename for run, assume consequetive order, i.e., ldos01.dat, ldos02.dat.... Returns: dictionary of dictionaries in order of potential sites ({"p": 0.154, "s": 0.078, "d": 0.0, "tot": 0.232}, ...) """ cht = OrderedDict() parameters = Tags.from_file(feff_inp_file) if 'RECIPROCAL' in parameters: dicts = [dict()] pot_dict = dict() dos_index = 1 begin = 0 pot_inp = re.sub(r'feff.inp', r'pot.inp', feff_inp_file) pot_readstart = re.compile('.*iz.*lmaxsc.*xnatph.*xion.*folp.*') pot_readend = re.compile('.*ExternalPot.*switch.*') with zopen(pot_inp, "r") as potfile: for line in potfile: if len(pot_readend.findall(line)) > 0: break if begin == 1: z_number = int(line.strip().split()[0]) ele_name = Element.from_Z(z_number).name if len(pot_dict) == 0: pot_dict[0] = ele_name elif len(pot_dict) > 0: pot_dict[max(pot_dict.keys()) + 1] = ele_name begin += 1 continue if begin == 2: z_number = int(line.strip().split()[0]) ele_name = Element.from_Z(z_number).name dicts[0][ele_name] = dos_index dos_index += 1 if len(pot_dict) == 0: pot_dict[0] = ele_name elif len(pot_dict) > 0: pot_dict[max(pot_dict.keys()) + 1] = ele_name if len(pot_readstart.findall(line)) > 0: begin = 1 else: pot_string = Potential.pot_string_from_file(feff_inp_file) dicts = Potential.pot_dict_from_string(pot_string) pot_dict = dicts[1] for i in range(0, len(dicts[0]) + 1): if len(str(i)) == 1: with zopen("{}0{}.dat".format(ldos_file, i), "rt") \ as fobject: f = fobject.readlines() s = float(f[3].split()[2]) p = float(f[4].split()[2]) d = float(f[5].split()[2]) f1 = float(f[6].split()[2]) tot = float(f[1].split()[4]) cht[str(i)] = {pot_dict[i]: {'s': s, 'p': p, 'd': d, 'f': f1, 'tot': tot}} else: with zopen(ldos_file + str(i) + ".dat", "rt") as fid: f = fid.readlines() s = float(f[3].split()[2]) p = float(f[4].split()[2]) d = float(f[5].split()[2]) f1 = float(f[6].split()[2]) tot = float(f[1].split()[4]) cht[str(i)] = {pot_dict[i]: {'s': s, 'p': p, 'd': d, 'f': f1, 'tot': tot}} return cht
[ "def", "charge_transfer_from_file", "(", "feff_inp_file", ",", "ldos_file", ")", ":", "cht", "=", "OrderedDict", "(", ")", "parameters", "=", "Tags", ".", "from_file", "(", "feff_inp_file", ")", "if", "'RECIPROCAL'", "in", "parameters", ":", "dicts", "=", "[",...
Get charge transfer from file. Args: feff_inp_file (str): name of feff.inp file for run ldos_file (str): ldos filename for run, assume consequetive order, i.e., ldos01.dat, ldos02.dat.... Returns: dictionary of dictionaries in order of potential sites ({"p": 0.154, "s": 0.078, "d": 0.0, "tot": 0.232}, ...)
[ "Get", "charge", "transfer", "from", "file", "." ]
python
train
markovmodel/PyEMMA
pyemma/coordinates/data/featurization/featurizer.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/data/featurization/featurizer.py#L197-L234
def pairs(sel, excluded_neighbors=0): """ Creates all pairs between indexes. Will exclude closest neighbors up to :py:obj:`excluded_neighbors` The self-pair (i,i) is always excluded Parameters ---------- sel : ndarray((n), dtype=int) array with selected atom indexes excluded_neighbors: int, default = 0 number of neighbors that will be excluded when creating the pairs Returns ------- sel : ndarray((m,2), dtype=int) m x 2 array with all pair indexes between different atoms that are at least :obj:`excluded_neighbors` indexes apart, i.e. if i is the index of an atom, the pairs [i,i-2], [i,i-1], [i,i], [i,i+1], [i,i+2], will not be in :py:obj:`sel` (n=excluded_neighbors) if :py:obj:`excluded_neighbors` = 2. Moreover, the list is non-redundant,i.e. if [i,j] is in sel, then [j,i] is not. """ assert isinstance(excluded_neighbors,int) p = [] for i in range(len(sel)): for j in range(i + 1, len(sel)): # get ordered pair I = sel[i] J = sel[j] if (I > J): I = sel[j] J = sel[i] # exclude 1 and 2 neighbors if (J > I + excluded_neighbors): p.append([I, J]) return np.array(p)
[ "def", "pairs", "(", "sel", ",", "excluded_neighbors", "=", "0", ")", ":", "assert", "isinstance", "(", "excluded_neighbors", ",", "int", ")", "p", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "sel", ")", ")", ":", "for", "j", "in", ...
Creates all pairs between indexes. Will exclude closest neighbors up to :py:obj:`excluded_neighbors` The self-pair (i,i) is always excluded Parameters ---------- sel : ndarray((n), dtype=int) array with selected atom indexes excluded_neighbors: int, default = 0 number of neighbors that will be excluded when creating the pairs Returns ------- sel : ndarray((m,2), dtype=int) m x 2 array with all pair indexes between different atoms that are at least :obj:`excluded_neighbors` indexes apart, i.e. if i is the index of an atom, the pairs [i,i-2], [i,i-1], [i,i], [i,i+1], [i,i+2], will not be in :py:obj:`sel` (n=excluded_neighbors) if :py:obj:`excluded_neighbors` = 2. Moreover, the list is non-redundant,i.e. if [i,j] is in sel, then [j,i] is not.
[ "Creates", "all", "pairs", "between", "indexes", ".", "Will", "exclude", "closest", "neighbors", "up", "to", ":", "py", ":", "obj", ":", "excluded_neighbors", "The", "self", "-", "pair", "(", "i", "i", ")", "is", "always", "excluded" ]
python
train
marcomusy/vtkplotter
vtkplotter/actors.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/actors.py#L315-L322
def rotateX(self, angle, axis_point=(0, 0, 0), rad=False): """Rotate around x-axis. If angle is in radians set ``rad=True``.""" if rad: angle *= 57.29578 self.RotateX(angle) if self.trail: self.updateTrail() return self
[ "def", "rotateX", "(", "self", ",", "angle", ",", "axis_point", "=", "(", "0", ",", "0", ",", "0", ")", ",", "rad", "=", "False", ")", ":", "if", "rad", ":", "angle", "*=", "57.29578", "self", ".", "RotateX", "(", "angle", ")", "if", "self", "....
Rotate around x-axis. If angle is in radians set ``rad=True``.
[ "Rotate", "around", "x", "-", "axis", ".", "If", "angle", "is", "in", "radians", "set", "rad", "=", "True", "." ]
python
train
crocs-muni/roca
roca/detect.py
https://github.com/crocs-muni/roca/blob/74ad6ce63c428d83dcffce9c5e26ef7b9e30faa5/roca/detect.py#L1721-L1751
def process_json_rec(self, data, name, idx, sub_idx): """ Processes json rec - json object :param data: :param name: :param idx: :param sub_idx: :return: """ ret = [] if isinstance(data, list): for kidx, rec in enumerate(data): sub = self.process_json_rec(rec, name, idx, list(sub_idx + [kidx])) ret.append(sub) return ret if isinstance(data, dict): for key in data: rec = data[key] sub = self.process_json_rec(rec, name, idx, list(sub_idx + [rec])) ret.append(sub) if 'n' in data: ret.append(self.process_js_mod(data['n'], name, idx, sub_idx)) if 'mod' in data: ret.append(self.process_js_mod(data['mod'], name, idx, sub_idx)) if 'cert' in data: ret.append(self.process_js_certs([data['cert']], name, idx, sub_idx)) if 'certs' in data: ret.append(self.process_js_certs(data['certs'], name, idx, sub_idx)) return ret
[ "def", "process_json_rec", "(", "self", ",", "data", ",", "name", ",", "idx", ",", "sub_idx", ")", ":", "ret", "=", "[", "]", "if", "isinstance", "(", "data", ",", "list", ")", ":", "for", "kidx", ",", "rec", "in", "enumerate", "(", "data", ")", ...
Processes json rec - json object :param data: :param name: :param idx: :param sub_idx: :return:
[ "Processes", "json", "rec", "-", "json", "object", ":", "param", "data", ":", ":", "param", "name", ":", ":", "param", "idx", ":", ":", "param", "sub_idx", ":", ":", "return", ":" ]
python
train
tslight/treepick
treepick/paths.py
https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/paths.py#L68-L76
def traverse(self): ''' Recursive generator that lazily unfolds the filesystem. ''' yield self, 0 if self.name in self.expanded: for path in self.getpaths(): for child, depth in path.traverse(): yield child, depth + 1
[ "def", "traverse", "(", "self", ")", ":", "yield", "self", ",", "0", "if", "self", ".", "name", "in", "self", ".", "expanded", ":", "for", "path", "in", "self", ".", "getpaths", "(", ")", ":", "for", "child", ",", "depth", "in", "path", ".", "tra...
Recursive generator that lazily unfolds the filesystem.
[ "Recursive", "generator", "that", "lazily", "unfolds", "the", "filesystem", "." ]
python
train
pytroll/satpy
satpy/readers/seviri_l1b_native.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/seviri_l1b_native.py#L147-L157
def _get_memmap(self): """Get the memory map for the SEVIRI data""" with open(self.filename) as fp: data_dtype = self._get_data_dtype() hdr_size = native_header.itemsize return np.memmap(fp, dtype=data_dtype, shape=(self.mda['number_of_lines'],), offset=hdr_size, mode="r")
[ "def", "_get_memmap", "(", "self", ")", ":", "with", "open", "(", "self", ".", "filename", ")", "as", "fp", ":", "data_dtype", "=", "self", ".", "_get_data_dtype", "(", ")", "hdr_size", "=", "native_header", ".", "itemsize", "return", "np", ".", "memmap"...
Get the memory map for the SEVIRI data
[ "Get", "the", "memory", "map", "for", "the", "SEVIRI", "data" ]
python
train
airspeed-velocity/asv
asv/step_detect.py
https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/step_detect.py#L441-L496
def detect_regressions(steps, threshold=0): """Detect regressions in a (noisy) signal. A regression means an upward step in the signal. The value 'before' a regression is the value immediately preceding the upward step. The value 'after' a regression is the minimum of values after the upward step. Parameters ---------- steps : list of (left, right, value, min, error) List of steps computed by detect_steps, or equivalent threshold : float Relative threshold for reporting regressions. Filter out jumps whose relative size is smaller than threshold, if they are not necessary to explain the difference between the best and the latest values. Returns ------- latest_value Latest value best_value Best value regression_pos : list of (before, after, value_before, value_after) List of positions between which the value increased. The first item corresponds to the last position at which the best value was obtained. """ if not steps: # No data: no regressions return None, None, None regression_pos = [] last_v = steps[-1][2] best_v = last_v best_err = steps[-1][4] prev_l = None # Find upward steps that resulted to worsened value afterward for l, r, cur_v, cur_min, cur_err in reversed(steps): if best_v - cur_v > max(cur_err, best_err, threshold * cur_v): regression_pos.append((r - 1, prev_l, cur_v, best_v)) prev_l = l if cur_v < best_v: best_v = cur_v best_err = cur_err regression_pos.reverse() # Return results if regression_pos: return (last_v, best_v, regression_pos) else: return (None, None, None)
[ "def", "detect_regressions", "(", "steps", ",", "threshold", "=", "0", ")", ":", "if", "not", "steps", ":", "# No data: no regressions", "return", "None", ",", "None", ",", "None", "regression_pos", "=", "[", "]", "last_v", "=", "steps", "[", "-", "1", "...
Detect regressions in a (noisy) signal. A regression means an upward step in the signal. The value 'before' a regression is the value immediately preceding the upward step. The value 'after' a regression is the minimum of values after the upward step. Parameters ---------- steps : list of (left, right, value, min, error) List of steps computed by detect_steps, or equivalent threshold : float Relative threshold for reporting regressions. Filter out jumps whose relative size is smaller than threshold, if they are not necessary to explain the difference between the best and the latest values. Returns ------- latest_value Latest value best_value Best value regression_pos : list of (before, after, value_before, value_after) List of positions between which the value increased. The first item corresponds to the last position at which the best value was obtained.
[ "Detect", "regressions", "in", "a", "(", "noisy", ")", "signal", "." ]
python
train
albu/albumentations
albumentations/augmentations/functional.py
https://github.com/albu/albumentations/blob/b31393cd6126516d37a84e44c879bd92c68ffc93/albumentations/augmentations/functional.py#L35-L44
def preserve_shape(func): """Preserve shape of the image.""" @wraps(func) def wrapped_function(img, *args, **kwargs): shape = img.shape result = func(img, *args, **kwargs) result = result.reshape(shape) return result return wrapped_function
[ "def", "preserve_shape", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapped_function", "(", "img", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "shape", "=", "img", ".", "shape", "result", "=", "func", "(", "img", ",", ...
Preserve shape of the image.
[ "Preserve", "shape", "of", "the", "image", "." ]
python
train
awacha/sastool
sastool/utils2d/corrections.py
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/corrections.py#L63-L83
def angledependentabsorption(twotheta, transmission): """Correction for angle-dependent absorption of the sample Inputs: twotheta: matrix of two-theta values transmission: the transmission of the sample (I_after/I_before, or exp(-mu*d)) The output matrix is of the same shape as twotheta. The scattering intensity matrix should be multiplied by it. Note, that this does not corrects for sample transmission by itself, as the 2*theta -> 0 limit of this matrix is unity. Twotheta==0 and transmission==1 cases are handled correctly (the limit is 1 in both cases). """ cor = np.ones(twotheta.shape) if transmission == 1: return cor mud = -np.log(transmission) cor[twotheta > 0] = transmission * mud * (1 - 1 / np.cos(twotheta[twotheta > 0])) / (np.exp(-mud / np.cos(twotheta[twotheta > 0])) - np.exp(-mud)) return cor
[ "def", "angledependentabsorption", "(", "twotheta", ",", "transmission", ")", ":", "cor", "=", "np", ".", "ones", "(", "twotheta", ".", "shape", ")", "if", "transmission", "==", "1", ":", "return", "cor", "mud", "=", "-", "np", ".", "log", "(", "transm...
Correction for angle-dependent absorption of the sample Inputs: twotheta: matrix of two-theta values transmission: the transmission of the sample (I_after/I_before, or exp(-mu*d)) The output matrix is of the same shape as twotheta. The scattering intensity matrix should be multiplied by it. Note, that this does not corrects for sample transmission by itself, as the 2*theta -> 0 limit of this matrix is unity. Twotheta==0 and transmission==1 cases are handled correctly (the limit is 1 in both cases).
[ "Correction", "for", "angle", "-", "dependent", "absorption", "of", "the", "sample" ]
python
train
googledatalab/pydatalab
google/datalab/bigquery/commands/_bigquery.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L456-L477
def _dryrun_cell(args, cell_body): """Implements the BigQuery cell magic used to dry run BQ queries. The supported syntax is: %%bq dryrun [-q|--sql <query identifier>] [<YAML or JSON cell_body or inline SQL>] Args: args: the argument following '%bq dryrun'. cell_body: optional contents of the cell interpreted as YAML or JSON. Returns: The response wrapped in a DryRunStats object """ query = _get_query_argument(args, cell_body, google.datalab.utils.commands.notebook_environment()) if args['verbose']: print(query.sql) context = google.datalab.utils._utils._construct_context_for_args(args) result = query.dry_run(context=context) return bigquery._query_stats.QueryStats( total_bytes=result['totalBytesProcessed'], is_cached=result['cacheHit'])
[ "def", "_dryrun_cell", "(", "args", ",", "cell_body", ")", ":", "query", "=", "_get_query_argument", "(", "args", ",", "cell_body", ",", "google", ".", "datalab", ".", "utils", ".", "commands", ".", "notebook_environment", "(", ")", ")", "if", "args", "[",...
Implements the BigQuery cell magic used to dry run BQ queries. The supported syntax is: %%bq dryrun [-q|--sql <query identifier>] [<YAML or JSON cell_body or inline SQL>] Args: args: the argument following '%bq dryrun'. cell_body: optional contents of the cell interpreted as YAML or JSON. Returns: The response wrapped in a DryRunStats object
[ "Implements", "the", "BigQuery", "cell", "magic", "used", "to", "dry", "run", "BQ", "queries", "." ]
python
train
MozillaSecurity/fuzzfetch
src/fuzzfetch/fetch.py
https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L444-L448
def moz_info(self): """Return the build's mozinfo""" if 'moz_info' not in self._memo: self._memo['moz_info'] = _get_url(self.artifact_url('mozinfo.json')).json() return self._memo['moz_info']
[ "def", "moz_info", "(", "self", ")", ":", "if", "'moz_info'", "not", "in", "self", ".", "_memo", ":", "self", ".", "_memo", "[", "'moz_info'", "]", "=", "_get_url", "(", "self", ".", "artifact_url", "(", "'mozinfo.json'", ")", ")", ".", "json", "(", ...
Return the build's mozinfo
[ "Return", "the", "build", "s", "mozinfo" ]
python
train
FNNDSC/chrisapp
chrisapp/base.py
https://github.com/FNNDSC/chrisapp/blob/b176655f97206240fe173dfe86736f82f0d85bc4/chrisapp/base.py#L283-L290
def save_json_representation(self, dir_path): """ Save the app's JSON representation object to a JSON file. """ file_name = self.__class__.__name__+ '.json' file_path = os.path.join(dir_path, file_name) with open(file_path, 'w') as outfile: json.dump(self.get_json_representation(), outfile)
[ "def", "save_json_representation", "(", "self", ",", "dir_path", ")", ":", "file_name", "=", "self", ".", "__class__", ".", "__name__", "+", "'.json'", "file_path", "=", "os", ".", "path", ".", "join", "(", "dir_path", ",", "file_name", ")", "with", "open"...
Save the app's JSON representation object to a JSON file.
[ "Save", "the", "app", "s", "JSON", "representation", "object", "to", "a", "JSON", "file", "." ]
python
train
EliotBerriot/django-dynamic-preferences
dynamic_preferences/managers.py
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/managers.py#L182-L200
def all(self): """Return a dictionary containing all preferences by section Loaded from cache or from db in case of cold cache """ if not preferences_settings.ENABLE_CACHE: return self.load_from_db() preferences = self.registry.preferences() # first we hit the cache once for all existing preferences a = self.many_from_cache(preferences) if len(a) == len(preferences): return a # avoid database hit if not necessary # then we fill those that miss, but exist in the database # (just hit the database for all of them, filtering is complicated, and # in most cases you'd need to grab the majority of them anyway) a.update(self.load_from_db(cache=True)) return a
[ "def", "all", "(", "self", ")", ":", "if", "not", "preferences_settings", ".", "ENABLE_CACHE", ":", "return", "self", ".", "load_from_db", "(", ")", "preferences", "=", "self", ".", "registry", ".", "preferences", "(", ")", "# first we hit the cache once for all...
Return a dictionary containing all preferences by section Loaded from cache or from db in case of cold cache
[ "Return", "a", "dictionary", "containing", "all", "preferences", "by", "section", "Loaded", "from", "cache", "or", "from", "db", "in", "case", "of", "cold", "cache" ]
python
train
tanghaibao/goatools
goatools/gosubdag/plot/go_name_shorten.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/plot/go_name_shorten.py#L73-L97
def shorten_go_name_ptbl3(self, name, dcnt): """Shorten GO description for Table 3 in manuscript.""" if self._keep_this(name): return name name = name.replace("positive regulation of immune system process", "+ reg. of immune sys. process") name = name.replace("positive regulation of immune response", "+ reg. of immune response") name = name.replace("positive regulation of cytokine production", "+ reg. of cytokine production") if dcnt < 40: name = name.replace("antigen processing and presentation", "a.p.p.") if dcnt < 10: name = name.replace("negative", "-") name = name.replace("positive", "+") #name = name.replace("tumor necrosis factor production", "tumor necrosis factor prod.") name = name.replace("tumor necrosis factor production", "TNF production") if dcnt < 4: name = name.replace("regulation", "reg.") name = name.replace("exogenous ", "") name = name.replace(" via ", " w/") name = name.replace("T cell mediated cytotoxicity", "cytotoxicity via T cell") name = name.replace('involved in', 'in') name = name.replace('-positive', '+') return name
[ "def", "shorten_go_name_ptbl3", "(", "self", ",", "name", ",", "dcnt", ")", ":", "if", "self", ".", "_keep_this", "(", "name", ")", ":", "return", "name", "name", "=", "name", ".", "replace", "(", "\"positive regulation of immune system process\"", ",", "\"+ r...
Shorten GO description for Table 3 in manuscript.
[ "Shorten", "GO", "description", "for", "Table", "3", "in", "manuscript", "." ]
python
train
atlassian-api/atlassian-python-api
atlassian/bitbucket.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/bitbucket.py#L315-L330
def delete_branch(self, project, repository, name, end_point): """ Delete branch from related repo :param self: :param project: :param repository: :param name: :param end_point: :return: """ url = 'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branches'.format(project=project, repository=repository) data = {"name": str(name), "endPoint": str(end_point)} return self.delete(url, data=data)
[ "def", "delete_branch", "(", "self", ",", "project", ",", "repository", ",", "name", ",", "end_point", ")", ":", "url", "=", "'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branches'", ".", "format", "(", "project", "=", "project", ",", "repository", "...
Delete branch from related repo :param self: :param project: :param repository: :param name: :param end_point: :return:
[ "Delete", "branch", "from", "related", "repo", ":", "param", "self", ":", ":", "param", "project", ":", ":", "param", "repository", ":", ":", "param", "name", ":", ":", "param", "end_point", ":", ":", "return", ":" ]
python
train
earlye/nephele
nephele/AwsAutoScalingGroup.py
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsAutoScalingGroup.py#L126-L136
def do_setDesiredCapacity(self,args): """Set the desired capacity""" parser = CommandArgumentParser("setDesiredCapacity") parser.add_argument(dest='value',type=int,help='new value'); args = vars(parser.parse_args(args)) value = int(args['value']) print "Setting desired capacity to {}".format(value) client = AwsConnectionFactory.getAsgClient() client.set_desired_capacity(AutoScalingGroupName=self.scalingGroup,DesiredCapacity=value,HonorCooldown=True) print "Scaling activity in progress"
[ "def", "do_setDesiredCapacity", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"setDesiredCapacity\"", ")", "parser", ".", "add_argument", "(", "dest", "=", "'value'", ",", "type", "=", "int", ",", "help", "=", "'new value'"...
Set the desired capacity
[ "Set", "the", "desired", "capacity" ]
python
train
Hundemeier/sacn
sacn/receiving/receiver_thread.py
https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/receiving/receiver_thread.py#L129-L140
def is_legal_priority(self, packet: DataPacket): """ Check if the given packet has high enough priority for the stored values for the packet's universe. :param packet: the packet to check :return: returns True if the priority is good. Otherwise False """ # check if the packet's priority is high enough to get processed if packet.universe not in self.callbacks.keys() or \ packet.priority < self.priorities[packet.universe][0]: return False # return if the universe is not interesting else: return True
[ "def", "is_legal_priority", "(", "self", ",", "packet", ":", "DataPacket", ")", ":", "# check if the packet's priority is high enough to get processed", "if", "packet", ".", "universe", "not", "in", "self", ".", "callbacks", ".", "keys", "(", ")", "or", "packet", ...
Check if the given packet has high enough priority for the stored values for the packet's universe. :param packet: the packet to check :return: returns True if the priority is good. Otherwise False
[ "Check", "if", "the", "given", "packet", "has", "high", "enough", "priority", "for", "the", "stored", "values", "for", "the", "packet", "s", "universe", ".", ":", "param", "packet", ":", "the", "packet", "to", "check", ":", "return", ":", "returns", "Tru...
python
train
fabioz/PyDev.Debugger
_pydev_imps/_pydev_inspect.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydev_imps/_pydev_inspect.py#L155-L164
def getmembers(object, predicate=None): """Return all members of an object as (name, value) pairs sorted by name. Optionally, only return members that satisfy a given predicate.""" results = [] for key in dir(object): value = getattr(object, key) if not predicate or predicate(value): results.append((key, value)) results.sort() return results
[ "def", "getmembers", "(", "object", ",", "predicate", "=", "None", ")", ":", "results", "=", "[", "]", "for", "key", "in", "dir", "(", "object", ")", ":", "value", "=", "getattr", "(", "object", ",", "key", ")", "if", "not", "predicate", "or", "pre...
Return all members of an object as (name, value) pairs sorted by name. Optionally, only return members that satisfy a given predicate.
[ "Return", "all", "members", "of", "an", "object", "as", "(", "name", "value", ")", "pairs", "sorted", "by", "name", ".", "Optionally", "only", "return", "members", "that", "satisfy", "a", "given", "predicate", "." ]
python
train
awslabs/aws-greengrass-group-setup
gg_group_setup/cmd.py
https://github.com/awslabs/aws-greengrass-group-setup/blob/06189ceccb794fedf80e0e7649938c18792e16c9/gg_group_setup/cmd.py#L73-L180
def create(self, group_type, config_file, group_name=None, region=None, profile_name=None): """ Create a Greengrass group in the given region. :param group_type: the type of group to create. Must match a `key` in the `group_types` dict :param config_file: config file of the group to create :param group_name: the name of the group. If no name is given, then group_type will be used. :param region: the region in which to create the new group. [default: us-west-2] :param profile_name: the name of the `awscli` profile to use. [default: None] """ logging.info("[begin] create command using group_types:{0}".format( self.group_types)) config = GroupConfigFile(config_file=config_file) if config.is_fresh() is False: raise ValueError( "Config file already tracking previously created group" ) if group_type not in self.group_types.keys(): raise ValueError("Can only create {0} groups.".format( self.group_types) ) if region is None: region = self._region # create an instance of the requested group type that uses the given # config file and region gt = self.group_types[group_type](config=config, region=region) # get and store the account's IoT endpoint for future use ep = _get_iot_session(region=region).describe_endpoint() misc = config['misc'] misc['iot_endpoint'] = ep['endpointAddress'] config['misc'] = misc # Create a Group logging.info("[begin] Creating a Greengrass Group") if group_name is None: group_name = group_type gg_client = _get_gg_session(region=region, profile_name=profile_name) group_info = gg_client.create_group(Name="{0}".format(group_name)) config['group'] = {"id": group_info['Id']} # setup the policies and roles gt.create_and_attach_thing_policy() gt.create_and_attach_iam_role() cl_arn = self._create_core_definition( gg_client=gg_client, group_type=gt, config=config, group_name=group_name ) dl_arn = self._create_device_definition( gg_client=gg_client, group_type=gt, config=config, group_name=group_name ) lv_arn = self._create_function_definition( gg_client=gg_client, group_type=gt, config=config ) log_arn = self._create_logger_definition( gg_client=gg_client, group_type=gt, config=config ) sub_arn = self._create_subscription_definition( gg_client=gg_client, group_type=gt, config=config ) logging.info( 'Group details, core_def:{0} device_def:{1} func_def:{2} ' 'logger_def:{3} subs_def:{4}'.format( cl_arn, dl_arn, lv_arn, log_arn, sub_arn) ) # Add all the constituent parts to the Greengrass Group group_args = {'GroupId': group_info['Id']} if cl_arn: group_args['CoreDefinitionVersionArn'] = cl_arn if dl_arn: group_args['DeviceDefinitionVersionArn'] = dl_arn if lv_arn: group_args['FunctionDefinitionVersionArn'] = lv_arn if log_arn: group_args['LoggerDefinitionVersionArn'] = log_arn if sub_arn: group_args['SubscriptionDefinitionVersionArn'] = sub_arn grp = gg_client.create_group_version( **group_args ) # store info about the provisioned artifacts into the local config file config['group'] = { "id": group_info['Id'], "version_arn": grp['Arn'], "version": grp['Version'], "name": group_name } logging.info( "[end] Created Greengrass Group {0}".format(group_info['Id']))
[ "def", "create", "(", "self", ",", "group_type", ",", "config_file", ",", "group_name", "=", "None", ",", "region", "=", "None", ",", "profile_name", "=", "None", ")", ":", "logging", ".", "info", "(", "\"[begin] create command using group_types:{0}\"", ".", "...
Create a Greengrass group in the given region. :param group_type: the type of group to create. Must match a `key` in the `group_types` dict :param config_file: config file of the group to create :param group_name: the name of the group. If no name is given, then group_type will be used. :param region: the region in which to create the new group. [default: us-west-2] :param profile_name: the name of the `awscli` profile to use. [default: None]
[ "Create", "a", "Greengrass", "group", "in", "the", "given", "region", "." ]
python
train
FNNDSC/pfmisc
pfmisc/C_snode.py
https://github.com/FNNDSC/pfmisc/blob/960b4d6135fcc50bed0a8e55db2ab1ddad9b99d8/pfmisc/C_snode.py#L1026-L1035
def lsf(self, astr_path=""): """ List only the "files" in the astr_path. :param astr_path: path to list :return: "files" in astr_path, empty list if no files """ d_files = self.ls(astr_path, nodes=False, data=True) l_files = d_files.keys() return l_files
[ "def", "lsf", "(", "self", ",", "astr_path", "=", "\"\"", ")", ":", "d_files", "=", "self", ".", "ls", "(", "astr_path", ",", "nodes", "=", "False", ",", "data", "=", "True", ")", "l_files", "=", "d_files", ".", "keys", "(", ")", "return", "l_files...
List only the "files" in the astr_path. :param astr_path: path to list :return: "files" in astr_path, empty list if no files
[ "List", "only", "the", "files", "in", "the", "astr_path", "." ]
python
train
hydpy-dev/hydpy
hydpy/core/timetools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/timetools.py#L582-L610
def wateryear(self): """The actual hydrological year according to the selected reference month. The reference mont reference |Date.refmonth| defaults to November: >>> october = Date('1996.10.01') >>> november = Date('1996.11.01') >>> october.wateryear 1996 >>> november.wateryear 1997 Note that changing |Date.refmonth| affects all |Date| objects: >>> october.refmonth = 10 >>> october.wateryear 1997 >>> november.wateryear 1997 >>> october.refmonth = 'November' >>> october.wateryear 1996 >>> november.wateryear 1997 """ if self.month < self._firstmonth_wateryear: return self.year return self.year + 1
[ "def", "wateryear", "(", "self", ")", ":", "if", "self", ".", "month", "<", "self", ".", "_firstmonth_wateryear", ":", "return", "self", ".", "year", "return", "self", ".", "year", "+", "1" ]
The actual hydrological year according to the selected reference month. The reference mont reference |Date.refmonth| defaults to November: >>> october = Date('1996.10.01') >>> november = Date('1996.11.01') >>> october.wateryear 1996 >>> november.wateryear 1997 Note that changing |Date.refmonth| affects all |Date| objects: >>> october.refmonth = 10 >>> october.wateryear 1997 >>> november.wateryear 1997 >>> october.refmonth = 'November' >>> october.wateryear 1996 >>> november.wateryear 1997
[ "The", "actual", "hydrological", "year", "according", "to", "the", "selected", "reference", "month", "." ]
python
train
ghukill/pyfc4
pyfc4/models.py
https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L585-L615
def parse_rdf_payload(self, data, headers): ''' small function to parse RDF payloads from various repository endpoints Args: data (response.data): data from requests response headers (response.headers): headers from requests response Returns: (rdflib.Graph): parsed graph ''' # handle edge case for content-types not recognized by rdflib parser if headers['Content-Type'].startswith('text/plain'): logger.debug('text/plain Content-Type detected, using application/n-triples for parser') parse_format = 'application/n-triples' else: parse_format = headers['Content-Type'] # clean parse format for rdf parser (see: https://www.w3.org/2008/01/rdf-media-types) if ';charset' in parse_format: parse_format = parse_format.split(';')[0] # parse graph graph = rdflib.Graph().parse( data=data.decode('utf-8'), format=parse_format) # return graph return graph
[ "def", "parse_rdf_payload", "(", "self", ",", "data", ",", "headers", ")", ":", "# handle edge case for content-types not recognized by rdflib parser", "if", "headers", "[", "'Content-Type'", "]", ".", "startswith", "(", "'text/plain'", ")", ":", "logger", ".", "debug...
small function to parse RDF payloads from various repository endpoints Args: data (response.data): data from requests response headers (response.headers): headers from requests response Returns: (rdflib.Graph): parsed graph
[ "small", "function", "to", "parse", "RDF", "payloads", "from", "various", "repository", "endpoints" ]
python
train
taleinat/fuzzysearch
src/fuzzysearch/substitutions_only.py
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/substitutions_only.py#L211-L226
def has_near_match_substitutions_ngrams(subsequence, sequence, max_substitutions): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the number of character substitutions must be less than max_substitutions * no deletions or insertions are allowed """ _check_arguments(subsequence, sequence, max_substitutions) for match in _find_near_matches_substitutions_ngrams(subsequence, sequence, max_substitutions): return True return False
[ "def", "has_near_match_substitutions_ngrams", "(", "subsequence", ",", "sequence", ",", "max_substitutions", ")", ":", "_check_arguments", "(", "subsequence", ",", "sequence", ",", "max_substitutions", ")", "for", "match", "in", "_find_near_matches_substitutions_ngrams", ...
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the number of character substitutions must be less than max_substitutions * no deletions or insertions are allowed
[ "search", "for", "near", "-", "matches", "of", "subsequence", "in", "sequence" ]
python
train
ml4ai/delphi
delphi/AnalysisGraph.py
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/AnalysisGraph.py#L655-L665
def to_dict(self) -> Dict: """ Export the CAG to a dict that can be serialized to JSON. """ return { "name": self.name, "dateCreated": str(self.dateCreated), "variables": lmap( lambda n: self.export_node(n), self.nodes(data=True) ), "timeStep": str(self.Δt), "edge_data": lmap(export_edge, self.edges(data=True)), }
[ "def", "to_dict", "(", "self", ")", "->", "Dict", ":", "return", "{", "\"name\"", ":", "self", ".", "name", ",", "\"dateCreated\"", ":", "str", "(", "self", ".", "dateCreated", ")", ",", "\"variables\"", ":", "lmap", "(", "lambda", "n", ":", "self", ...
Export the CAG to a dict that can be serialized to JSON.
[ "Export", "the", "CAG", "to", "a", "dict", "that", "can", "be", "serialized", "to", "JSON", "." ]
python
train
facetoe/zenpy
zenpy/lib/api.py
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L1423-L1427
def active(self, include=None): """ Return all active views. """ return self._get(self._build_url(self.endpoint.active(include=include)))
[ "def", "active", "(", "self", ",", "include", "=", "None", ")", ":", "return", "self", ".", "_get", "(", "self", ".", "_build_url", "(", "self", ".", "endpoint", ".", "active", "(", "include", "=", "include", ")", ")", ")" ]
Return all active views.
[ "Return", "all", "active", "views", "." ]
python
train
MDAnalysis/GridDataFormats
gridData/OpenDX.py
https://github.com/MDAnalysis/GridDataFormats/blob/3eeb0432f8cf856912436e4f3e7aba99d3c916be/gridData/OpenDX.py#L827-L852
def __gridconnections(self): """Level-2 parser for gridconnections. pattern: object 2 class gridconnections counts 97 93 99 """ try: tok = self.__consume() except DXParserNoTokens: return if tok.equals('counts'): shape = [] try: while True: # raises exception if not an int self.__peek().value('INTEGER') tok = self.__consume() shape.append(tok.value('INTEGER')) except (DXParserNoTokens, ValueError): pass if len(shape) == 0: raise DXParseError('gridconnections: no shape parameters') self.currentobject['shape'] = shape else: raise DXParseError('gridconnections: '+str(tok)+' not recognized.')
[ "def", "__gridconnections", "(", "self", ")", ":", "try", ":", "tok", "=", "self", ".", "__consume", "(", ")", "except", "DXParserNoTokens", ":", "return", "if", "tok", ".", "equals", "(", "'counts'", ")", ":", "shape", "=", "[", "]", "try", ":", "wh...
Level-2 parser for gridconnections. pattern: object 2 class gridconnections counts 97 93 99
[ "Level", "-", "2", "parser", "for", "gridconnections", "." ]
python
valid
gwastro/pycbc
pycbc/waveform/ringdown.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/ringdown.py#L700-L809
def get_td_from_final_mass_spin(template=None, taper=None, distance=None, **kwargs): """Return time domain ringdown with all the modes specified. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. taper: {None, float}, optional Tapering at the beginning of the waveform with duration taper * tau. This option is recommended with timescales taper=1./2 or 1. for time-domain ringdown-only injections. The abrupt turn on of the ringdown can cause issues on the waveform when doing the fourier transform to the frequency domain. Setting taper will add a rapid ringup with timescale tau/10. Each mode and overtone will have a different taper depending on its tau, the final taper being the superposition of all the tapers. distance : {None, float}, optional Luminosity distance of the system. If specified, the returned ringdown will contain the factor (final_mass/distance). final_mass : float Mass of the final black hole. final_spin : float Spin of the final black hole. lmns : list Desired lmn modes as strings (lm modes available: 22, 21, 33, 44, 55). The n specifies the number of overtones desired for the corresponding lm pair (maximum n=8). Example: lmns = ['223','331'] are the modes 220, 221, 222, and 330 amp220 : float Amplitude of the fundamental 220 mode. Note that if distance is given, this parameter will have a completely different order of magnitude. See table II in https://arxiv.org/abs/1107.0854 for an estimate. amplmn : float Fraction of the amplitude of the lmn overtone relative to the fundamental mode, as many as the number of subdominant modes. philmn : float Phase of the lmn overtone, as many as the number of modes. Should also include the information from the azimuthal angle (phi + m*Phi). inclination : float Inclination of the system in radians. delta_t : {None, float}, optional The time step used to generate the ringdown. If None, it will be set to the inverse of the frequency at which the amplitude is 1/1000 of the peak amplitude (the minimum of all modes). t_final : {None, float}, optional The ending time of the output frequency series. If None, it will be set to the time at which the amplitude is 1/1000 of the peak amplitude (the maximum of all modes). Returns ------- hplus: TimeSeries The plus phase of a ringdown with the lm modes specified and n overtones in time domain. hcross: TimeSeries The cross phase of a ringdown with the lm modes specified and n overtones in time domain. """ input_params = props(template, mass_spin_required_args, **kwargs) # Get required args final_mass = input_params['final_mass'] final_spin = input_params['final_spin'] lmns = input_params['lmns'] for lmn in lmns: if int(lmn[2]) == 0: raise ValueError('Number of overtones (nmodes) must be greater ' 'than zero.') # following may not be in input_params delta_t = input_params.pop('delta_t', None) t_final = input_params.pop('t_final', None) f_0, tau = get_lm_f0tau_allmodes(final_mass, final_spin, lmns) if not delta_t: delta_t = lm_deltat(f_0, tau, lmns) if not t_final: t_final = lm_tfinal(tau, lmns) kmax = int(t_final / delta_t) + 1 # Different overtones will have different tapering window-size # Find maximum window size to create long enough output vector if taper: taper_window = int(taper*max(tau.values())/delta_t) kmax += taper_window outplus = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t) outcross = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t) if taper: start = - taper * max(tau.values()) outplus._epoch, outcross._epoch = start, start for lmn in lmns: l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2]) hplus, hcross = get_td_lm(taper=taper, freqs=f_0, taus=tau, l=l, m=m, nmodes=nmodes, delta_t=delta_t, t_final=t_final, **input_params) if not taper: outplus.data += hplus.data outcross.data += hcross.data else: outplus = taper_shift(hplus, outplus) outcross = taper_shift(hcross, outcross) norm = Kerr_factor(final_mass, distance) if distance else 1. return norm*outplus, norm*outcross
[ "def", "get_td_from_final_mass_spin", "(", "template", "=", "None", ",", "taper", "=", "None", ",", "distance", "=", "None", ",", "*", "*", "kwargs", ")", ":", "input_params", "=", "props", "(", "template", ",", "mass_spin_required_args", ",", "*", "*", "k...
Return time domain ringdown with all the modes specified. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. taper: {None, float}, optional Tapering at the beginning of the waveform with duration taper * tau. This option is recommended with timescales taper=1./2 or 1. for time-domain ringdown-only injections. The abrupt turn on of the ringdown can cause issues on the waveform when doing the fourier transform to the frequency domain. Setting taper will add a rapid ringup with timescale tau/10. Each mode and overtone will have a different taper depending on its tau, the final taper being the superposition of all the tapers. distance : {None, float}, optional Luminosity distance of the system. If specified, the returned ringdown will contain the factor (final_mass/distance). final_mass : float Mass of the final black hole. final_spin : float Spin of the final black hole. lmns : list Desired lmn modes as strings (lm modes available: 22, 21, 33, 44, 55). The n specifies the number of overtones desired for the corresponding lm pair (maximum n=8). Example: lmns = ['223','331'] are the modes 220, 221, 222, and 330 amp220 : float Amplitude of the fundamental 220 mode. Note that if distance is given, this parameter will have a completely different order of magnitude. See table II in https://arxiv.org/abs/1107.0854 for an estimate. amplmn : float Fraction of the amplitude of the lmn overtone relative to the fundamental mode, as many as the number of subdominant modes. philmn : float Phase of the lmn overtone, as many as the number of modes. Should also include the information from the azimuthal angle (phi + m*Phi). inclination : float Inclination of the system in radians. delta_t : {None, float}, optional The time step used to generate the ringdown. If None, it will be set to the inverse of the frequency at which the amplitude is 1/1000 of the peak amplitude (the minimum of all modes). t_final : {None, float}, optional The ending time of the output frequency series. If None, it will be set to the time at which the amplitude is 1/1000 of the peak amplitude (the maximum of all modes). Returns ------- hplus: TimeSeries The plus phase of a ringdown with the lm modes specified and n overtones in time domain. hcross: TimeSeries The cross phase of a ringdown with the lm modes specified and n overtones in time domain.
[ "Return", "time", "domain", "ringdown", "with", "all", "the", "modes", "specified", "." ]
python
train
jahuth/litus
spikes.py
https://github.com/jahuth/litus/blob/712b016ea2dbb1cf0a30bfdbb0a136945a7b7c5e/spikes.py#L282-L301
def len(self,resolution=1.0,units=None,conversion_function=convert_time, end_at_end=True): """ Calculates the length of the Label Dimension from its minimum, maximum and wether it is discrete. `resolution`: `units`: output units `conversion_function`: `end_at_end`: additional switch for continuous behaviour """ if units is not None: resolution = conversion_function(resolution,from_units=units,to_units=self.units) else: units = self.units if self.min is None: return int(self.max / resolution) if self.max is None: return 0 if units != '1' and end_at_end: return int(np.ceil((self.max - self.min) / resolution)) return int(np.ceil((self.max - self.min) / resolution) + 1)
[ "def", "len", "(", "self", ",", "resolution", "=", "1.0", ",", "units", "=", "None", ",", "conversion_function", "=", "convert_time", ",", "end_at_end", "=", "True", ")", ":", "if", "units", "is", "not", "None", ":", "resolution", "=", "conversion_function...
Calculates the length of the Label Dimension from its minimum, maximum and wether it is discrete. `resolution`: `units`: output units `conversion_function`: `end_at_end`: additional switch for continuous behaviour
[ "Calculates", "the", "length", "of", "the", "Label", "Dimension", "from", "its", "minimum", "maximum", "and", "wether", "it", "is", "discrete", "." ]
python
train
dswah/pyGAM
pygam/pygam.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1027-L1047
def _estimate_AIC(self, y, mu, weights=None): """ estimate the Akaike Information Criterion Parameters ---------- y : array-like of shape (n_samples,) output data vector mu : array-like of shape (n_samples,), expected value of the targets given the model and inputs weights : array-like shape (n_samples,) or None, optional containing sample weights if None, defaults to array of ones Returns ------- None """ estimated_scale = not(self.distribution._known_scale) # if we estimate the scale, that adds 2 dof return -2*self._loglikelihood(y=y, mu=mu, weights=weights) + \ 2*self.statistics_['edof'] + 2*estimated_scale
[ "def", "_estimate_AIC", "(", "self", ",", "y", ",", "mu", ",", "weights", "=", "None", ")", ":", "estimated_scale", "=", "not", "(", "self", ".", "distribution", ".", "_known_scale", ")", "# if we estimate the scale, that adds 2 dof", "return", "-", "2", "*", ...
estimate the Akaike Information Criterion Parameters ---------- y : array-like of shape (n_samples,) output data vector mu : array-like of shape (n_samples,), expected value of the targets given the model and inputs weights : array-like shape (n_samples,) or None, optional containing sample weights if None, defaults to array of ones Returns ------- None
[ "estimate", "the", "Akaike", "Information", "Criterion" ]
python
train
click-contrib/click-configfile
tasks/_setup.py
https://github.com/click-contrib/click-configfile/blob/a616204cb9944125fd5051556f27a7ccef611e22/tasks/_setup.py#L61-L85
def require_invoke_minversion(min_version, verbose=False): """Ensures that :mod:`invoke` has at the least the :param:`min_version`. Otherwise, :param min_version: Minimal acceptable invoke version (as string). :param verbose: Indicates if invoke.version should be shown. :raises: VersionRequirementError=SystemExit if requirement fails. """ # -- REQUIRES: sys.path is setup and contains invoke try: import invoke invoke_version = invoke.__version__ except ImportError: invoke_version = "__NOT_INSTALLED" if invoke_version < min_version: message = "REQUIRE: invoke.version >= %s (but was: %s)" % \ (min_version, invoke_version) message += "\nUSE: pip install invoke>=%s" % min_version raise VersionRequirementError(message) INVOKE_VERSION = os.environ.get("INVOKE_VERSION", None) if verbose and not INVOKE_VERSION: os.environ["INVOKE_VERSION"] = invoke_version print("USING: invoke.version=%s" % invoke_version)
[ "def", "require_invoke_minversion", "(", "min_version", ",", "verbose", "=", "False", ")", ":", "# -- REQUIRES: sys.path is setup and contains invoke", "try", ":", "import", "invoke", "invoke_version", "=", "invoke", ".", "__version__", "except", "ImportError", ":", "in...
Ensures that :mod:`invoke` has at the least the :param:`min_version`. Otherwise, :param min_version: Minimal acceptable invoke version (as string). :param verbose: Indicates if invoke.version should be shown. :raises: VersionRequirementError=SystemExit if requirement fails.
[ "Ensures", "that", ":", "mod", ":", "invoke", "has", "at", "the", "least", "the", ":", "param", ":", "min_version", ".", "Otherwise" ]
python
train
ejeschke/ginga
ginga/ImageView.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/ImageView.py#L2908-L2941
def auto_orient(self): """Set the orientation for the image to a reasonable default.""" image = self.get_image() if image is None: return invert_y = not isinstance(image, AstroImage.AstroImage) # Check for various things to set based on metadata header = image.get_header() if header: # Auto-orientation orient = header.get('Orientation', None) if orient is None: orient = header.get('Image Orientation', None) if orient is not None: self.logger.debug("orientation [%s]" % orient) try: orient = int(str(orient)) self.logger.info( "setting orientation from metadata [%d]" % (orient)) flip_x, flip_y, swap_xy = self.orient_map[orient] self.transform(flip_x, flip_y, swap_xy) invert_y = False except Exception as e: # problems figuring out orientation--let it be self.logger.error("orientation error: %s" % str(e)) if invert_y: flip_x, flip_y, swap_xy = self.get_transforms() #flip_y = not flip_y flip_y = True self.transform(flip_x, flip_y, swap_xy)
[ "def", "auto_orient", "(", "self", ")", ":", "image", "=", "self", ".", "get_image", "(", ")", "if", "image", "is", "None", ":", "return", "invert_y", "=", "not", "isinstance", "(", "image", ",", "AstroImage", ".", "AstroImage", ")", "# Check for various t...
Set the orientation for the image to a reasonable default.
[ "Set", "the", "orientation", "for", "the", "image", "to", "a", "reasonable", "default", "." ]
python
train
GNS3/gns3-server
gns3server/utils/asyncio/input_stream.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/asyncio/input_stream.py#L342-L398
def feed(self, data): """ Feed the input stream. :param data: Input string (unicode). """ assert isinstance(data, six.text_type) if _DEBUG_RENDERER_INPUT: self.LOG.write(repr(data).encode('utf-8') + b'\n') self.LOG.flush() # Handle bracketed paste. (We bypass the parser that matches all other # key presses and keep reading input until we see the end mark.) # This is much faster then parsing character by character. if self._in_bracketed_paste: self._paste_buffer += data end_mark = '\x1b[201~' if end_mark in self._paste_buffer: end_index = self._paste_buffer.index(end_mark) # Feed content to key bindings. paste_content = self._paste_buffer[:end_index] self.feed_key_callback(KeyPress(Keys.BracketedPaste, paste_content)) # Quit bracketed paste mode and handle remaining input. self._in_bracketed_paste = False remaining = self._paste_buffer[end_index + len(end_mark):] self._paste_buffer = '' self.feed(remaining) # Handle normal input character by character. else: for i, c in enumerate(data): if self._in_bracketed_paste: # Quit loop and process from this position when the parser # entered bracketed paste. self.feed(data[i:]) break else: # Replace \r by \n. (Some clients send \r instead of \n # when enter is pressed. E.g. telnet and some other # terminals.) # XXX: We should remove this in a future version. It *is* # now possible to recognise the difference. # (We remove ICRNL/INLCR/IGNCR below.) # However, this breaks IPython and maybe other applications, # because they bind ControlJ (\n) for handling the Enter key. # When this is removed, replace Enter=ControlJ by # Enter=ControlM in keys.py. if c == '\r': c = '\n' self._input_parser.send(c)
[ "def", "feed", "(", "self", ",", "data", ")", ":", "assert", "isinstance", "(", "data", ",", "six", ".", "text_type", ")", "if", "_DEBUG_RENDERER_INPUT", ":", "self", ".", "LOG", ".", "write", "(", "repr", "(", "data", ")", ".", "encode", "(", "'utf-...
Feed the input stream. :param data: Input string (unicode).
[ "Feed", "the", "input", "stream", "." ]
python
train
mapbox/mapbox-sdk-py
mapbox/services/uploads.py
https://github.com/mapbox/mapbox-sdk-py/blob/72d19dbcf2d254a6ea08129a726471fd21f13023/mapbox/services/uploads.py#L177-L199
def list(self, account=None, username=None): """List of all uploads Returns a Response object, the json() method of which returns a list of uploads Parameters ---------- username : str Account username, defaults to the service's username. account : str, **deprecated** Alias for username. Will be removed in version 1.0. Returns ------- requests.Response """ username = self._resolve_username(account, username) uri = URITemplate(self.baseuri + '/{username}').expand( username=username) resp = self.session.get(uri) self.handle_http_error(resp) return resp
[ "def", "list", "(", "self", ",", "account", "=", "None", ",", "username", "=", "None", ")", ":", "username", "=", "self", ".", "_resolve_username", "(", "account", ",", "username", ")", "uri", "=", "URITemplate", "(", "self", ".", "baseuri", "+", "'/{u...
List of all uploads Returns a Response object, the json() method of which returns a list of uploads Parameters ---------- username : str Account username, defaults to the service's username. account : str, **deprecated** Alias for username. Will be removed in version 1.0. Returns ------- requests.Response
[ "List", "of", "all", "uploads" ]
python
train
sryza/spark-timeseries
python/sparkts/models/GARCH.py
https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/models/GARCH.py#L40-L49
def gradient(self, ts): """ Find the gradient of the log likelihood with respect to the given time series. Based on http://www.unc.edu/~jbhill/Bollerslev_GARCH_1986.pdf Returns an 3-element array containing the gradient for the alpha, beta, and omega parameters. """ gradient = self._jmodel.gradient(_py2java(self._ctx, Vectors.dense(ts))) return _java2py(self._ctx, gradient)
[ "def", "gradient", "(", "self", ",", "ts", ")", ":", "gradient", "=", "self", ".", "_jmodel", ".", "gradient", "(", "_py2java", "(", "self", ".", "_ctx", ",", "Vectors", ".", "dense", "(", "ts", ")", ")", ")", "return", "_java2py", "(", "self", "."...
Find the gradient of the log likelihood with respect to the given time series. Based on http://www.unc.edu/~jbhill/Bollerslev_GARCH_1986.pdf Returns an 3-element array containing the gradient for the alpha, beta, and omega parameters.
[ "Find", "the", "gradient", "of", "the", "log", "likelihood", "with", "respect", "to", "the", "given", "time", "series", ".", "Based", "on", "http", ":", "//", "www", ".", "unc", ".", "edu", "/", "~jbhill", "/", "Bollerslev_GARCH_1986", ".", "pdf", "Retur...
python
train
collectiveacuity/labPack
labpack/storage/google/drive.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/google/drive.py#L975-L1004
def remove(self): ''' a method to remove all records in the collection NOTE: this method removes all the files in the collection, but the collection folder itself created by oauth2 cannot be removed. only the user can remove access to the app folder :return: string with confirmation of deletion ''' title = '%s.remove' % self.__class__.__name__ # get contents of root for id, name, mimetype in self._list_directory(): try: self.drive.delete(fileId=id).execute() except Exception as err: if str(err).find('File not found') > -1: pass else: raise DriveConnectionError(title) # return outcome insert = 'collection' if self.collection_name: insert = self.collection_name exit_msg = 'Contents of %s will be removed from Google Drive.' % insert return exit_msg
[ "def", "remove", "(", "self", ")", ":", "title", "=", "'%s.remove'", "%", "self", ".", "__class__", ".", "__name__", "# get contents of root", "for", "id", ",", "name", ",", "mimetype", "in", "self", ".", "_list_directory", "(", ")", ":", "try", ":", "se...
a method to remove all records in the collection NOTE: this method removes all the files in the collection, but the collection folder itself created by oauth2 cannot be removed. only the user can remove access to the app folder :return: string with confirmation of deletion
[ "a", "method", "to", "remove", "all", "records", "in", "the", "collection" ]
python
train
google/grr
grr/server/grr_response_server/databases/mem_hunts.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_hunts.py#L356-L364
def CountHuntOutputPluginLogEntries(self, hunt_id, output_plugin_id, with_type=None): """Counts hunt output plugin log entries.""" return len( self.ReadHuntOutputPluginLogEntries( hunt_id, output_plugin_id, 0, sys.maxsize, with_type=with_type))
[ "def", "CountHuntOutputPluginLogEntries", "(", "self", ",", "hunt_id", ",", "output_plugin_id", ",", "with_type", "=", "None", ")", ":", "return", "len", "(", "self", ".", "ReadHuntOutputPluginLogEntries", "(", "hunt_id", ",", "output_plugin_id", ",", "0", ",", ...
Counts hunt output plugin log entries.
[ "Counts", "hunt", "output", "plugin", "log", "entries", "." ]
python
train
dlon/html2markdown
html2markdown.py
https://github.com/dlon/html2markdown/blob/5946da7136e69a67b3dd37fd0e896be4d6a5b482/html2markdown.py#L163-L330
def _markdownify(tag, _listType=None, _blockQuote=False, _listIndex=1): """recursively converts a tag into markdown""" children = tag.find_all(recursive=False) if tag.name == '[document]': for child in children: _markdownify(child) return if tag.name not in _supportedTags or not _supportedAttrs(tag): if tag.name not in _inlineTags: tag.insert_before('\n\n') tag.insert_after('\n\n') else: _escapeCharacters(tag) for child in children: _markdownify(child) return if tag.name not in ('pre', 'code'): _escapeCharacters(tag) _breakRemNewlines(tag) if tag.name == 'p': if tag.string != None: if tag.string.strip() == u'': tag.string = u'\xa0' tag.unwrap() return if not _blockQuote: tag.insert_before('\n\n') tag.insert_after('\n\n') else: tag.insert_before('\n') tag.insert_after('\n') tag.unwrap() for child in children: _markdownify(child) elif tag.name == 'br': tag.string = ' \n' tag.unwrap() elif tag.name == 'img': alt = '' title = '' if tag.has_attr('alt'): alt = tag['alt'] if tag.has_attr('title') and tag['title']: title = ' "%s"' % tag['title'] tag.string = '![%s](%s%s)' % (alt, tag['src'], title) tag.unwrap() elif tag.name == 'hr': tag.string = '\n---\n' tag.unwrap() elif tag.name == 'pre': tag.insert_before('\n\n') tag.insert_after('\n\n') if tag.code: if not _supportedAttrs(tag.code): return for child in tag.code.find_all(recursive=False): if child.name != 'br': return # code block for br in tag.code.find_all('br'): br.string = '\n' br.unwrap() tag.code.unwrap() lines = unicode(tag).strip().split('\n') lines[0] = lines[0][5:] lines[-1] = lines[-1][:-6] if not lines[-1]: lines.pop() for i,line in enumerate(lines): line = line.replace(u'\xa0', ' ') lines[i] = ' %s' % line tag.replace_with(BeautifulSoup('\n'.join(lines), 'html.parser')) return elif tag.name == 'code': # inline code if children: return tag.insert_before('`` ') tag.insert_after(' ``') tag.unwrap() elif _recursivelyValid(tag): if tag.name == 'blockquote': # ! FIXME: hack tag.insert_before('<<<BLOCKQUOTE: ') tag.insert_after('>>>') tag.unwrap() for child in children: _markdownify(child, _blockQuote=True) return elif tag.name == 'a': # process children first for child in children: _markdownify(child) if not tag.has_attr('href'): return if tag.string != tag.get('href') or tag.has_attr('title'): title = '' if tag.has_attr('title'): title = ' "%s"' % tag['title'] tag.string = '[%s](%s%s)' % (BeautifulSoup(unicode(tag), 'html.parser').string, tag.get('href', ''), title) else: # ! FIXME: hack tag.string = '<<<FLOATING LINK: %s>>>' % tag.string tag.unwrap() return elif tag.name == 'h1': tag.insert_before('\n\n# ') tag.insert_after('\n\n') tag.unwrap() elif tag.name == 'h2': tag.insert_before('\n\n## ') tag.insert_after('\n\n') tag.unwrap() elif tag.name == 'h3': tag.insert_before('\n\n### ') tag.insert_after('\n\n') tag.unwrap() elif tag.name == 'h4': tag.insert_before('\n\n#### ') tag.insert_after('\n\n') tag.unwrap() elif tag.name == 'h5': tag.insert_before('\n\n##### ') tag.insert_after('\n\n') tag.unwrap() elif tag.name == 'h6': tag.insert_before('\n\n###### ') tag.insert_after('\n\n') tag.unwrap() elif tag.name in ('ul', 'ol'): tag.insert_before('\n\n') tag.insert_after('\n\n') tag.unwrap() for i, child in enumerate(children): _markdownify(child, _listType=tag.name, _listIndex=i+1) return elif tag.name == 'li': if not _listType: # <li> outside of list; ignore return if _listType == 'ul': tag.insert_before('* ') else: tag.insert_before('%d. ' % _listIndex) for child in children: _markdownify(child) for c in tag.contents: if type(c) != bs4.element.NavigableString: continue c.replace_with('\n '.join(c.split('\n'))) tag.insert_after('\n') tag.unwrap() return elif tag.name in ('strong','b'): tag.insert_before('__') tag.insert_after('__') tag.unwrap() elif tag.name in ('em','i'): tag.insert_before('_') tag.insert_after('_') tag.unwrap() for child in children: _markdownify(child)
[ "def", "_markdownify", "(", "tag", ",", "_listType", "=", "None", ",", "_blockQuote", "=", "False", ",", "_listIndex", "=", "1", ")", ":", "children", "=", "tag", ".", "find_all", "(", "recursive", "=", "False", ")", "if", "tag", ".", "name", "==", "...
recursively converts a tag into markdown
[ "recursively", "converts", "a", "tag", "into", "markdown" ]
python
train
esheldon/fitsio
fitsio/fitslib.py
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L452-L462
def movnam_hdu(self, extname, hdutype=ANY_HDU, extver=0): """ Move to the indicated HDU by name In general, it is not necessary to use this method explicitly. returns the one-offset extension number """ extname = mks(extname) hdu = self._FITS.movnam_hdu(hdutype, extname, extver) return hdu
[ "def", "movnam_hdu", "(", "self", ",", "extname", ",", "hdutype", "=", "ANY_HDU", ",", "extver", "=", "0", ")", ":", "extname", "=", "mks", "(", "extname", ")", "hdu", "=", "self", ".", "_FITS", ".", "movnam_hdu", "(", "hdutype", ",", "extname", ",",...
Move to the indicated HDU by name In general, it is not necessary to use this method explicitly. returns the one-offset extension number
[ "Move", "to", "the", "indicated", "HDU", "by", "name" ]
python
train
aaren/notedown
notedown/notedown.py
https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L305-L319
def create_code_cell(block): """Create a notebook code cell from a block.""" code_cell = nbbase.new_code_cell(source=block['content']) attr = block['attributes'] if not attr.is_empty: code_cell.metadata \ = nbbase.NotebookNode({'attributes': attr.to_dict()}) execution_count = attr.kvs.get('n') if not execution_count: code_cell.execution_count = None else: code_cell.execution_count = int(execution_count) return code_cell
[ "def", "create_code_cell", "(", "block", ")", ":", "code_cell", "=", "nbbase", ".", "new_code_cell", "(", "source", "=", "block", "[", "'content'", "]", ")", "attr", "=", "block", "[", "'attributes'", "]", "if", "not", "attr", ".", "is_empty", ":", "code...
Create a notebook code cell from a block.
[ "Create", "a", "notebook", "code", "cell", "from", "a", "block", "." ]
python
train
KelSolaar/Umbra
umbra/components/factory/script_editor/search_in_files.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/search_in_files.py#L557-L570
def default_filter_out(self, value): """ Setter for **self.__default_filter_out** attribute. :param value: Attribute value. :type value: unicode """ if value is not None: assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "default_filter_out", value) assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format( "default_filter_out", value) self.__default_filter_out = value
[ "def", "default_filter_out", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "unicode", ",", "\"'{0}' attribute: '{1}' type is not 'unicode'!\"", ".", "format", "(", "\"default_filter_out\"",...
Setter for **self.__default_filter_out** attribute. :param value: Attribute value. :type value: unicode
[ "Setter", "for", "**", "self", ".", "__default_filter_out", "**", "attribute", "." ]
python
train
mdsol/rwslib
rwslib/builders/clinicaldata.py
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/builders/clinicaldata.py#L900-L910
def build(self, builder): """ Build XML by appending to builder """ params = dict(LocationOID=self.oid) # mixins self.mixin() self.mixin_params(params) builder.start("SiteRef", params) builder.end("SiteRef")
[ "def", "build", "(", "self", ",", "builder", ")", ":", "params", "=", "dict", "(", "LocationOID", "=", "self", ".", "oid", ")", "# mixins", "self", ".", "mixin", "(", ")", "self", ".", "mixin_params", "(", "params", ")", "builder", ".", "start", "(",...
Build XML by appending to builder
[ "Build", "XML", "by", "appending", "to", "builder" ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/__init__.py#L315-L336
def _set_lsp_reoptimize_timer(self, v, load=False): """ Setter method for lsp_reoptimize_timer, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_reoptimize_timer (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_lsp_reoptimize_timer is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_lsp_reoptimize_timer() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'300..65535']}), is_leaf=True, yang_name="lsp-reoptimize-timer", rest_name="reoptimize-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure Reoptimization timer', u'cli-full-no': None, u'alt-name': u'reoptimize-timer'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """lsp_reoptimize_timer must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'300..65535']}), is_leaf=True, yang_name="lsp-reoptimize-timer", rest_name="reoptimize-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure Reoptimization timer', u'cli-full-no': None, u'alt-name': u'reoptimize-timer'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""", }) self.__lsp_reoptimize_timer = t if hasattr(self, '_set'): self._set()
[ "def", "_set_lsp_reoptimize_timer", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", "...
Setter method for lsp_reoptimize_timer, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_reoptimize_timer (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_lsp_reoptimize_timer is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_lsp_reoptimize_timer() directly.
[ "Setter", "method", "for", "lsp_reoptimize_timer", "mapped", "from", "YANG", "variable", "/", "mpls_config", "/", "router", "/", "mpls", "/", "mpls_cmds_holder", "/", "lsp", "/", "secondary_path", "/", "lsp_reoptimize_timer", "(", "uint32", ")", "If", "this", "v...
python
train
bids-standard/pybids
bids/layout/layout.py
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L800-L817
def get_fieldmap(self, path, return_list=False): """ Get fieldmap(s) for specified path. """ fieldmaps = self._get_fieldmaps(path) if return_list: return fieldmaps else: if len(fieldmaps) == 1: return fieldmaps[0] elif len(fieldmaps) > 1: raise ValueError("More than one fieldmap found, but the " "'return_list' argument was set to False. " "Either ensure that there is only one " "fieldmap for this image, or set the " "'return_list' argument to True and handle " "the result as a list.") else: # len(fieldmaps) == 0 return None
[ "def", "get_fieldmap", "(", "self", ",", "path", ",", "return_list", "=", "False", ")", ":", "fieldmaps", "=", "self", ".", "_get_fieldmaps", "(", "path", ")", "if", "return_list", ":", "return", "fieldmaps", "else", ":", "if", "len", "(", "fieldmaps", "...
Get fieldmap(s) for specified path.
[ "Get", "fieldmap", "(", "s", ")", "for", "specified", "path", "." ]
python
train
gmr/tinman
tinman/process.py
https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/process.py#L90-L115
def on_sighup(self, signal_unused, frame_unused): """Reload the configuration :param int signal_unused: Unused signal number :param frame frame_unused: Unused frame the signal was caught in """ # Update HTTP configuration for setting in self.http_config: if getattr(self.http_server, setting) != self.http_config[setting]: LOGGER.debug('Changing HTTPServer %s setting', setting) setattr(self.http_server, setting, self.http_config[setting]) # Update Application Settings for setting in self.settings: if self.app.settings[setting] != self.settings[setting]: LOGGER.debug('Changing Application %s setting', setting) self.app.settings[setting] = self.settings[setting] # Update the routes self.app.handlers = [] self.app.named_handlers = {} routes = self.namespace.config.get(config.ROUTES) self.app.add_handlers(".*$", self.app.prepare_routes(routes)) LOGGER.info('Configuration reloaded')
[ "def", "on_sighup", "(", "self", ",", "signal_unused", ",", "frame_unused", ")", ":", "# Update HTTP configuration", "for", "setting", "in", "self", ".", "http_config", ":", "if", "getattr", "(", "self", ".", "http_server", ",", "setting", ")", "!=", "self", ...
Reload the configuration :param int signal_unused: Unused signal number :param frame frame_unused: Unused frame the signal was caught in
[ "Reload", "the", "configuration" ]
python
train
Clarify/clarify_brightcove_sync
clarify_brightcove_sync/brightcove_api_client.py
https://github.com/Clarify/clarify_brightcove_sync/blob/cda4443a40e72f1fb02af3d671d8f3f5f9644d24/clarify_brightcove_sync/brightcove_api_client.py#L186-L205
def get_all_videos(self, search_q=None): ''' Gets all the videos in an account by automatically paginating through getVideos(). WARNING: Use with caution if you have thousands of videos! WARNING: If deletes are being done during this iteration, the list may be missing videos. ''' page_size = 10 current_page = 0 total_count = self.get_video_count(search_q=search_q) total_page = math.ceil(total_count / page_size) # We sort by 'created_at' so any newly added videos will be on the last page. # We don't currently handle deletes during the iteration which could cause videos to be missed. sort = 'created_at' result = [] while current_page < total_page: page_result = self.get_videos(limit=page_size, offset=current_page * page_size, search_q=search_q, sort=sort) result += page_result current_page += 1 return result
[ "def", "get_all_videos", "(", "self", ",", "search_q", "=", "None", ")", ":", "page_size", "=", "10", "current_page", "=", "0", "total_count", "=", "self", ".", "get_video_count", "(", "search_q", "=", "search_q", ")", "total_page", "=", "math", ".", "ceil...
Gets all the videos in an account by automatically paginating through getVideos(). WARNING: Use with caution if you have thousands of videos! WARNING: If deletes are being done during this iteration, the list may be missing videos.
[ "Gets", "all", "the", "videos", "in", "an", "account", "by", "automatically", "paginating", "through", "getVideos", "()", ".", "WARNING", ":", "Use", "with", "caution", "if", "you", "have", "thousands", "of", "videos!", "WARNING", ":", "If", "deletes", "are"...
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/visuals/shaders/parsing.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/shaders/parsing.py#L118-L133
def find_program_variables(code): """ Return a dict describing program variables:: {'var_name': ('uniform|attribute|varying', type), ...} """ vars = {} lines = code.split('\n') for line in lines: m = re.match(r"\s*" + re_prog_var_declaration + r"\s*(=|;)", line) if m is not None: vtype, dtype, names = m.groups()[:3] for name in names.split(','): vars[name.strip()] = (vtype, dtype) return vars
[ "def", "find_program_variables", "(", "code", ")", ":", "vars", "=", "{", "}", "lines", "=", "code", ".", "split", "(", "'\\n'", ")", "for", "line", "in", "lines", ":", "m", "=", "re", ".", "match", "(", "r\"\\s*\"", "+", "re_prog_var_declaration", "+"...
Return a dict describing program variables:: {'var_name': ('uniform|attribute|varying', type), ...}
[ "Return", "a", "dict", "describing", "program", "variables", "::" ]
python
train
sckott/pygbif
pygbif/occurrences/get.py
https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/get.py#L41-L58
def get_fragment(key, **kwargs): ''' Get a single occurrence fragment in its raw form (xml or json) :param key: [int] A GBIF occurrence key :return: A dictionary, of results Usage:: from pygbif import occurrences occurrences.get_fragment(key = 1052909293) occurrences.get_fragment(key = 1227768771) occurrences.get_fragment(key = 1227769518) ''' url = gbif_baseurl + 'occurrence/' + str(key) + '/fragment' out = gbif_GET(url, {}, **kwargs) return out
[ "def", "get_fragment", "(", "key", ",", "*", "*", "kwargs", ")", ":", "url", "=", "gbif_baseurl", "+", "'occurrence/'", "+", "str", "(", "key", ")", "+", "'/fragment'", "out", "=", "gbif_GET", "(", "url", ",", "{", "}", ",", "*", "*", "kwargs", ")"...
Get a single occurrence fragment in its raw form (xml or json) :param key: [int] A GBIF occurrence key :return: A dictionary, of results Usage:: from pygbif import occurrences occurrences.get_fragment(key = 1052909293) occurrences.get_fragment(key = 1227768771) occurrences.get_fragment(key = 1227769518)
[ "Get", "a", "single", "occurrence", "fragment", "in", "its", "raw", "form", "(", "xml", "or", "json", ")" ]
python
train
ga4gh/ga4gh-server
ga4gh/server/cli/repomanager.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/cli/repomanager.py#L203-L235
def addReadGroupSet(self): """ Adds a new ReadGroupSet into this repo. """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) dataUrl = self._args.dataFile indexFile = self._args.indexFile parsed = urlparse.urlparse(dataUrl) # TODO, add https support and others when they have been # tested. if parsed.scheme in ['http', 'ftp']: if indexFile is None: raise exceptions.MissingIndexException(dataUrl) else: if indexFile is None: indexFile = dataUrl + ".bai" dataUrl = self._getFilePath(self._args.dataFile, self._args.relativePath) indexFile = self._getFilePath(indexFile, self._args.relativePath) name = self._args.name if self._args.name is None: name = getNameFromPath(dataUrl) readGroupSet = reads.HtslibReadGroupSet(dataset, name) readGroupSet.populateFromFile(dataUrl, indexFile) referenceSetName = self._args.referenceSetName if referenceSetName is None: # Try to find a reference set name from the BAM header. referenceSetName = readGroupSet.getBamHeaderReferenceSetName() referenceSet = self._repo.getReferenceSetByName(referenceSetName) readGroupSet.setReferenceSet(referenceSet) readGroupSet.setAttributes(json.loads(self._args.attributes)) self._updateRepo(self._repo.insertReadGroupSet, readGroupSet)
[ "def", "addReadGroupSet", "(", "self", ")", ":", "self", ".", "_openRepo", "(", ")", "dataset", "=", "self", ".", "_repo", ".", "getDatasetByName", "(", "self", ".", "_args", ".", "datasetName", ")", "dataUrl", "=", "self", ".", "_args", ".", "dataFile",...
Adds a new ReadGroupSet into this repo.
[ "Adds", "a", "new", "ReadGroupSet", "into", "this", "repo", "." ]
python
train
Esri/ArcREST
src/arcrest/manageorg/_portals.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_portals.py#L1000-L1028
def featureServers(self): """gets the hosting feature AGS Server""" if self.urls == {}: return {} featuresUrls = self.urls['urls']['features'] if 'https' in featuresUrls: res = featuresUrls['https'] elif 'http' in featuresUrls: res = featuresUrls['http'] else: return None services = [] for urlHost in res: if self.isPortal: services.append(AGSAdministration( url='%s/admin' % urlHost, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)) else: services.append(Services( url='https://%s/%s/ArcGIS/admin' % (urlHost, self.portalId), securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)) return services
[ "def", "featureServers", "(", "self", ")", ":", "if", "self", ".", "urls", "==", "{", "}", ":", "return", "{", "}", "featuresUrls", "=", "self", ".", "urls", "[", "'urls'", "]", "[", "'features'", "]", "if", "'https'", "in", "featuresUrls", ":", "res...
gets the hosting feature AGS Server
[ "gets", "the", "hosting", "feature", "AGS", "Server" ]
python
train
crytic/pyevmasm
pyevmasm/evmasm.py
https://github.com/crytic/pyevmasm/blob/d27daf19a36d630a31499e783b716cf1165798d8/pyevmasm/evmasm.py#L151-L165
def parse_operand(self, buf): """ Parses an operand from buf :param buf: a buffer :type buf: iterator/generator/string """ buf = iter(buf) try: operand = 0 for _ in range(self.operand_size): operand <<= 8 operand |= next(buf) self._operand = operand except StopIteration: raise ParseError("Not enough data for decoding")
[ "def", "parse_operand", "(", "self", ",", "buf", ")", ":", "buf", "=", "iter", "(", "buf", ")", "try", ":", "operand", "=", "0", "for", "_", "in", "range", "(", "self", ".", "operand_size", ")", ":", "operand", "<<=", "8", "operand", "|=", "next", ...
Parses an operand from buf :param buf: a buffer :type buf: iterator/generator/string
[ "Parses", "an", "operand", "from", "buf" ]
python
valid
aboSamoor/polyglot
polyglot/mapping/embeddings.py
https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/mapping/embeddings.py#L128-L146
def distances(self, word, words): """Calculate eucledean pairwise distances between `word` and `words`. Args: word (string): single word. words (list): list of strings. Returns: numpy array of the distances. Note: L2 metric is used to calculate distances. """ point = self[word] vectors = np.asarray([self[w] for w in words]) diff = vectors - point distances = np.linalg.norm(diff, axis=1) return distances
[ "def", "distances", "(", "self", ",", "word", ",", "words", ")", ":", "point", "=", "self", "[", "word", "]", "vectors", "=", "np", ".", "asarray", "(", "[", "self", "[", "w", "]", "for", "w", "in", "words", "]", ")", "diff", "=", "vectors", "-...
Calculate eucledean pairwise distances between `word` and `words`. Args: word (string): single word. words (list): list of strings. Returns: numpy array of the distances. Note: L2 metric is used to calculate distances.
[ "Calculate", "eucledean", "pairwise", "distances", "between", "word", "and", "words", "." ]
python
train
tBuLi/symfit
symfit/core/minimizers.py
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L477-L517
def scipy_constraints(self, constraints): """ Returns all constraints in a scipy compatible format. :param constraints: List of either MinimizeModel instances (this is what is provided by :class:`~symfit.core.fit.Fit`), :class:`~symfit.core.fit.BaseModel`, or :class:`sympy.core.relational.Relational`. :return: dict of scipy compatible statements. """ cons = [] types = { # scipy only distinguishes two types of constraint. sympy.Eq: 'eq', sympy.Ge: 'ineq', } for constraint in constraints: if isinstance(constraint, MinimizeModel): # Typically the case when called by `Fit constraint_type = constraint.model.constraint_type elif hasattr(constraint, 'constraint_type'): # Model object, not provided by `Fit`. Do the best we can. if self.parameters != constraint.params: raise AssertionError('The constraint should accept the same' ' parameters as used for the fit.') constraint_type = constraint.constraint_type constraint = MinimizeModel(constraint, data=self.objective.data) elif isinstance(constraint, sympy.Rel): constraint_type = constraint.__class__ constraint = self.objective.model.__class__.as_constraint( constraint, self.objective.model ) constraint = MinimizeModel(constraint, data=self.objective.data) else: raise TypeError('Unknown type for a constraint.') con = { 'type': types[constraint_type], 'fun': constraint, } cons.append(con) cons = tuple(cons) return cons
[ "def", "scipy_constraints", "(", "self", ",", "constraints", ")", ":", "cons", "=", "[", "]", "types", "=", "{", "# scipy only distinguishes two types of constraint.", "sympy", ".", "Eq", ":", "'eq'", ",", "sympy", ".", "Ge", ":", "'ineq'", ",", "}", "for", ...
Returns all constraints in a scipy compatible format. :param constraints: List of either MinimizeModel instances (this is what is provided by :class:`~symfit.core.fit.Fit`), :class:`~symfit.core.fit.BaseModel`, or :class:`sympy.core.relational.Relational`. :return: dict of scipy compatible statements.
[ "Returns", "all", "constraints", "in", "a", "scipy", "compatible", "format", "." ]
python
train
HPAC/matchpy
matchpy/utils.py
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/utils.py#L431-L474
def solve_linear_diop(total: int, *coeffs: int) -> Iterator[Tuple[int, ...]]: r"""Yield non-negative integer solutions of a linear Diophantine equation of the format :math:`c_1 x_1 + \dots + c_n x_n = total`. If there are at most two coefficients, :func:`base_solution_linear()` is used to find the solutions. Otherwise, the solutions are found recursively, by reducing the number of variables in each recursion: 1. Compute :math:`d := gcd(c_2, \dots , c_n)` 2. Solve :math:`c_1 x + d y = total` 3. Recursively solve :math:`c_2 x_2 + \dots + c_n x_n = y` for each solution for :math:`y` 4. Combine these solutions to form a solution for the whole equation Args: total: The constant of the equation. *coeffs: The coefficients :math:`c_i` of the equation. Yields: The non-negative integer solutions of the equation as a tuple :math:`(x_1, \dots, x_n)`. """ if len(coeffs) == 0: if total == 0: yield tuple() return if len(coeffs) == 1: if total % coeffs[0] == 0: yield (total // coeffs[0], ) return if len(coeffs) == 2: yield from base_solution_linear(coeffs[0], coeffs[1], total) return # calculate gcd(coeffs[1:]) remainder_gcd = math.gcd(coeffs[1], coeffs[2]) for coeff in coeffs[3:]: remainder_gcd = math.gcd(remainder_gcd, coeff) # solve coeffs[0] * x + remainder_gcd * y = total for coeff0_solution, remainder_gcd_solution in base_solution_linear(coeffs[0], remainder_gcd, total): new_coeffs = [c // remainder_gcd for c in coeffs[1:]] # use the solutions for y to solve the remaining variables recursively for remainder_solution in solve_linear_diop(remainder_gcd_solution, *new_coeffs): yield (coeff0_solution, ) + remainder_solution
[ "def", "solve_linear_diop", "(", "total", ":", "int", ",", "*", "coeffs", ":", "int", ")", "->", "Iterator", "[", "Tuple", "[", "int", ",", "...", "]", "]", ":", "if", "len", "(", "coeffs", ")", "==", "0", ":", "if", "total", "==", "0", ":", "y...
r"""Yield non-negative integer solutions of a linear Diophantine equation of the format :math:`c_1 x_1 + \dots + c_n x_n = total`. If there are at most two coefficients, :func:`base_solution_linear()` is used to find the solutions. Otherwise, the solutions are found recursively, by reducing the number of variables in each recursion: 1. Compute :math:`d := gcd(c_2, \dots , c_n)` 2. Solve :math:`c_1 x + d y = total` 3. Recursively solve :math:`c_2 x_2 + \dots + c_n x_n = y` for each solution for :math:`y` 4. Combine these solutions to form a solution for the whole equation Args: total: The constant of the equation. *coeffs: The coefficients :math:`c_i` of the equation. Yields: The non-negative integer solutions of the equation as a tuple :math:`(x_1, \dots, x_n)`.
[ "r", "Yield", "non", "-", "negative", "integer", "solutions", "of", "a", "linear", "Diophantine", "equation", "of", "the", "format", ":", "math", ":", "c_1", "x_1", "+", "\\", "dots", "+", "c_n", "x_n", "=", "total", "." ]
python
train
OTL/jps
jps/tools.py
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/tools.py#L11-L28
def pub(topic_name, json_msg, repeat_rate=None, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT): '''publishes the data to the topic :param topic_name: name of the topic :param json_msg: data to be published :param repeat_rate: if None, publishes once. if not None, it is used as [Hz]. ''' pub = jps.Publisher(topic_name, host=host, pub_port=pub_port) time.sleep(0.1) if repeat_rate is None: pub.publish(json_msg) else: try: while True: pub.publish(json_msg) time.sleep(1.0 / repeat_rate) except KeyboardInterrupt: pass
[ "def", "pub", "(", "topic_name", ",", "json_msg", ",", "repeat_rate", "=", "None", ",", "host", "=", "jps", ".", "env", ".", "get_master_host", "(", ")", ",", "pub_port", "=", "jps", ".", "DEFAULT_PUB_PORT", ")", ":", "pub", "=", "jps", ".", "Publisher...
publishes the data to the topic :param topic_name: name of the topic :param json_msg: data to be published :param repeat_rate: if None, publishes once. if not None, it is used as [Hz].
[ "publishes", "the", "data", "to", "the", "topic" ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/pstatsloader.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/pstatsloader.py#L44-L54
def load( self, stats ): """Build a squaremap-compatible model from a pstats class""" rows = self.rows for func, raw in stats.iteritems(): try: rows[func] = row = PStatRow( func,raw ) except ValueError, err: log.info( 'Null row: %s', func ) for row in rows.itervalues(): row.weave( rows ) return self.find_root( rows )
[ "def", "load", "(", "self", ",", "stats", ")", ":", "rows", "=", "self", ".", "rows", "for", "func", ",", "raw", "in", "stats", ".", "iteritems", "(", ")", ":", "try", ":", "rows", "[", "func", "]", "=", "row", "=", "PStatRow", "(", "func", ","...
Build a squaremap-compatible model from a pstats class
[ "Build", "a", "squaremap", "-", "compatible", "model", "from", "a", "pstats", "class" ]
python
train
rodricios/eatiht
eatiht/v2.py
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/v2.py#L177-L190
def calc_avgstrlen_pathstextnodes(pars_tnodes, dbg=False): """In the effort of not using external libraries (like scipy, numpy, etc), I've written some harmless code for basic statistical calculations """ ttl = 0 for _, tnodes in pars_tnodes: ttl += tnodes[3] # index #3 holds the avg strlen crd = len(pars_tnodes) avg = ttl/crd if dbg is True: print(avg) # avg = ttl/crd return (avg, ttl, crd)
[ "def", "calc_avgstrlen_pathstextnodes", "(", "pars_tnodes", ",", "dbg", "=", "False", ")", ":", "ttl", "=", "0", "for", "_", ",", "tnodes", "in", "pars_tnodes", ":", "ttl", "+=", "tnodes", "[", "3", "]", "# index #3 holds the avg strlen\r", "crd", "=", "len"...
In the effort of not using external libraries (like scipy, numpy, etc), I've written some harmless code for basic statistical calculations
[ "In", "the", "effort", "of", "not", "using", "external", "libraries", "(", "like", "scipy", "numpy", "etc", ")", "I", "ve", "written", "some", "harmless", "code", "for", "basic", "statistical", "calculations" ]
python
train
adafruit/Adafruit_Python_MCP3008
Adafruit_MCP3008/MCP3008.py
https://github.com/adafruit/Adafruit_Python_MCP3008/blob/03232cad6b888faf3dc2ff76e548c5fa3ffe3ae0/Adafruit_MCP3008/MCP3008.py#L68-L92
def read_adc_difference(self, differential): """Read the difference between two channels. Differential should be a value of: - 0: Return channel 0 minus channel 1 - 1: Return channel 1 minus channel 0 - 2: Return channel 2 minus channel 3 - 3: Return channel 3 minus channel 2 - 4: Return channel 4 minus channel 5 - 5: Return channel 5 minus channel 4 - 6: Return channel 6 minus channel 7 - 7: Return channel 7 minus channel 6 """ assert 0 <= differential <= 7, 'Differential number must be a value of 0-7!' # Build a difference channel read command. command = 0b10 << 6 # Start bit, differential read command |= (differential & 0x07) << 3 # Channel number (in 3 bits) # Note the bottom 3 bits of command are 0, this is to account for the # extra clock to do the conversion, and the low null bit returned at # the start of the response. resp = self._spi.transfer([command, 0x0, 0x0]) # Parse out the 10 bits of response data and return it. result = (resp[0] & 0x01) << 9 result |= (resp[1] & 0xFF) << 1 result |= (resp[2] & 0x80) >> 7 return result & 0x3FF
[ "def", "read_adc_difference", "(", "self", ",", "differential", ")", ":", "assert", "0", "<=", "differential", "<=", "7", ",", "'Differential number must be a value of 0-7!'", "# Build a difference channel read command.", "command", "=", "0b10", "<<", "6", "# Start bit, d...
Read the difference between two channels. Differential should be a value of: - 0: Return channel 0 minus channel 1 - 1: Return channel 1 minus channel 0 - 2: Return channel 2 minus channel 3 - 3: Return channel 3 minus channel 2 - 4: Return channel 4 minus channel 5 - 5: Return channel 5 minus channel 4 - 6: Return channel 6 minus channel 7 - 7: Return channel 7 minus channel 6
[ "Read", "the", "difference", "between", "two", "channels", ".", "Differential", "should", "be", "a", "value", "of", ":", "-", "0", ":", "Return", "channel", "0", "minus", "channel", "1", "-", "1", ":", "Return", "channel", "1", "minus", "channel", "0", ...
python
train
HumanBrainProject/hbp-service-client
hbp_service_client/storage_service/api.py
https://github.com/HumanBrainProject/hbp-service-client/blob/b338fb41a7f0e7b9d654ff28fcf13a56d03bff4d/hbp_service_client/storage_service/api.py#L271-L300
def get_metadata(self, entity_type, entity_id): '''Get metadata of an entity. Args: entity_type (str): Type of the entity. Admitted values: ['project', 'folder', 'file']. entity_id (str): The UUID of the entity to be modified. Returns: A dictionary of the metadata:: { u'bar': u'200', u'foo': u'100' } Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes ''' if not is_valid_uuid(entity_id): raise StorageArgumentException( 'Invalid UUID for entity_id: {0}'.format(entity_id)) return self._authenticated_request \ .to_endpoint('{}/{}/metadata/'.format(entity_type, entity_id)) \ .return_body() \ .get()
[ "def", "get_metadata", "(", "self", ",", "entity_type", ",", "entity_id", ")", ":", "if", "not", "is_valid_uuid", "(", "entity_id", ")", ":", "raise", "StorageArgumentException", "(", "'Invalid UUID for entity_id: {0}'", ".", "format", "(", "entity_id", ")", ")", ...
Get metadata of an entity. Args: entity_type (str): Type of the entity. Admitted values: ['project', 'folder', 'file']. entity_id (str): The UUID of the entity to be modified. Returns: A dictionary of the metadata:: { u'bar': u'200', u'foo': u'100' } Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
[ "Get", "metadata", "of", "an", "entity", "." ]
python
test
streamlink/streamlink
src/streamlink/plugins/vk.py
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/plugins/vk.py#L50-L102
def _get_streams(self): """ Find the streams for vk.com :return: """ self.session.http.headers.update({'User-Agent': useragents.IPHONE_6}) # If this is a 'videos' catalog URL # with an video ID in the GET request, get that instead url = self.follow_vk_redirect(self.url) m = self._url_re.match(url) if not m: log.error('URL is not compatible: {0}'.format(url)) return video_id = m.group('video_id') log.debug('video ID: {0}'.format(video_id)) params = { 'act': 'show_inline', 'al': '1', 'video': video_id, } res = self.session.http.post(self.API_URL, params=params) for _i in itertags(res.text, 'iframe'): if _i.attributes.get('src'): iframe_url = update_scheme(self.url, _i.attributes['src']) log.debug('Found iframe: {0}'.format(iframe_url)) for s in self.session.streams(iframe_url).items(): yield s for _i in itertags(res.text, 'source'): if _i.attributes.get('type') == 'application/vnd.apple.mpegurl': video_url = _i.attributes['src'] # Remove invalid URL if video_url.startswith('https://vk.com/'): continue streams = HLSStream.parse_variant_playlist(self.session, video_url) if not streams: yield 'live', HLSStream(self.session, video_url) else: for s in streams.items(): yield s elif _i.attributes.get('type') == 'video/mp4': q = 'vod' video_url = _i.attributes['src'] m = self._vod_quality_re.search(video_url) if m: q = '{0}p'.format(m.group(1)) yield q, HTTPStream(self.session, video_url)
[ "def", "_get_streams", "(", "self", ")", ":", "self", ".", "session", ".", "http", ".", "headers", ".", "update", "(", "{", "'User-Agent'", ":", "useragents", ".", "IPHONE_6", "}", ")", "# If this is a 'videos' catalog URL", "# with an video ID in the GET request, g...
Find the streams for vk.com :return:
[ "Find", "the", "streams", "for", "vk", ".", "com", ":", "return", ":" ]
python
test
pybel/pybel
src/pybel/io/gpickle.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/io/gpickle.py#L51-L64
def to_pickle(graph: BELGraph, file: Union[str, BinaryIO], protocol: int = HIGHEST_PROTOCOL) -> None: """Write this graph to a pickle object with :func:`networkx.write_gpickle`. Note that the pickle module has some incompatibilities between Python 2 and 3. To export a universally importable pickle, choose 0, 1, or 2. :param graph: A BEL graph :param file: A file or filename to write to :param protocol: Pickling protocol to use. Defaults to ``HIGHEST_PROTOCOL``. .. seealso:: https://docs.python.org/3.6/library/pickle.html#data-stream-format """ raise_for_not_bel(graph) nx.write_gpickle(graph, file, protocol=protocol)
[ "def", "to_pickle", "(", "graph", ":", "BELGraph", ",", "file", ":", "Union", "[", "str", ",", "BinaryIO", "]", ",", "protocol", ":", "int", "=", "HIGHEST_PROTOCOL", ")", "->", "None", ":", "raise_for_not_bel", "(", "graph", ")", "nx", ".", "write_gpickl...
Write this graph to a pickle object with :func:`networkx.write_gpickle`. Note that the pickle module has some incompatibilities between Python 2 and 3. To export a universally importable pickle, choose 0, 1, or 2. :param graph: A BEL graph :param file: A file or filename to write to :param protocol: Pickling protocol to use. Defaults to ``HIGHEST_PROTOCOL``. .. seealso:: https://docs.python.org/3.6/library/pickle.html#data-stream-format
[ "Write", "this", "graph", "to", "a", "pickle", "object", "with", ":", "func", ":", "networkx", ".", "write_gpickle", "." ]
python
train
kelproject/pykube
pykube/http.py
https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L274-L282
def head(self, *args, **kwargs): """ Executes an HTTP HEAD. :Parameters: - `args`: Non-keyword arguments - `kwargs`: Keyword arguments """ return self.session.head(*args, **self.get_kwargs(**kwargs))
[ "def", "head", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "session", ".", "head", "(", "*", "args", ",", "*", "*", "self", ".", "get_kwargs", "(", "*", "*", "kwargs", ")", ")" ]
Executes an HTTP HEAD. :Parameters: - `args`: Non-keyword arguments - `kwargs`: Keyword arguments
[ "Executes", "an", "HTTP", "HEAD", "." ]
python
train
DataBiosphere/toil
src/toil/jobStores/abstractJobStore.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/abstractJobStore.py#L179-L196
def loadRootJob(self): """ Loads the root job in the current job store. :raises toil.job.JobException: If no root job is set or if the root job doesn't exist in this job store :return: The root job. :rtype: toil.jobGraph.JobGraph """ try: with self.readSharedFileStream(self.rootJobStoreIDFileName) as f: rootJobStoreID = f.read().decode('utf-8') except NoSuchFileException: raise JobException('No job has been set as the root in this job store') if not self.exists(rootJobStoreID): raise JobException("The root job '%s' doesn't exist. Either the Toil workflow " "is finished or has never been started" % rootJobStoreID) return self.load(rootJobStoreID)
[ "def", "loadRootJob", "(", "self", ")", ":", "try", ":", "with", "self", ".", "readSharedFileStream", "(", "self", ".", "rootJobStoreIDFileName", ")", "as", "f", ":", "rootJobStoreID", "=", "f", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ...
Loads the root job in the current job store. :raises toil.job.JobException: If no root job is set or if the root job doesn't exist in this job store :return: The root job. :rtype: toil.jobGraph.JobGraph
[ "Loads", "the", "root", "job", "in", "the", "current", "job", "store", "." ]
python
train
chimera0/accel-brain-code
Reinforcement-Learning/pyqlearning/annealing_model.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Reinforcement-Learning/pyqlearning/annealing_model.py#L124-L129
def set_var_log_arr(self, value): ''' setter ''' if isinstance(value, np.ndarray): self.__var_log_arr = value else: raise TypeError()
[ "def", "set_var_log_arr", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "np", ".", "ndarray", ")", ":", "self", ".", "__var_log_arr", "=", "value", "else", ":", "raise", "TypeError", "(", ")" ]
setter
[ "setter" ]
python
train
mayfield/shellish
shellish/eventing.py
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/eventing.py#L48-L58
def fire_event(self, event, *args, **kwargs): """ Execute the listeners for this event passing any arguments along. """ remove = [] event_stack = self._events[event] for x in event_stack: x['callback'](*args, **kwargs) if x['single']: remove.append(x) for x in remove: event_stack.remove(x)
[ "def", "fire_event", "(", "self", ",", "event", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "remove", "=", "[", "]", "event_stack", "=", "self", ".", "_events", "[", "event", "]", "for", "x", "in", "event_stack", ":", "x", "[", "'callback'...
Execute the listeners for this event passing any arguments along.
[ "Execute", "the", "listeners", "for", "this", "event", "passing", "any", "arguments", "along", "." ]
python
train
pantsbuild/pants
src/python/pants/pantsd/process_manager.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/pantsd/process_manager.py#L110-L141
def _deadline_until(cls, closure, action_msg, timeout=FAIL_WAIT_SEC, wait_interval=WAIT_INTERVAL_SEC, info_interval=INFO_INTERVAL_SEC): """Execute a function/closure repeatedly until a True condition or timeout is met. :param func closure: the function/closure to execute (should not block for long periods of time and must return True on success). :param str action_msg: a description of the action that is being executed, to be rendered as info while we wait, and as part of any rendered exception. :param float timeout: the maximum amount of time to wait for a true result from the closure in seconds. N.B. this is timing based, so won't be exact if the runtime of the closure exceeds the timeout. :param float wait_interval: the amount of time to sleep between closure invocations. :param float info_interval: the amount of time to wait before and between reports via info logging that we're still waiting for the closure to succeed. :raises: :class:`ProcessManager.Timeout` on execution timeout. """ now = time.time() deadline = now + timeout info_deadline = now + info_interval while 1: if closure(): return True now = time.time() if now > deadline: raise cls.Timeout('exceeded timeout of {} seconds while waiting for {}'.format(timeout, action_msg)) if now > info_deadline: logger.info('waiting for {}...'.format(action_msg)) info_deadline = info_deadline + info_interval elif wait_interval: time.sleep(wait_interval)
[ "def", "_deadline_until", "(", "cls", ",", "closure", ",", "action_msg", ",", "timeout", "=", "FAIL_WAIT_SEC", ",", "wait_interval", "=", "WAIT_INTERVAL_SEC", ",", "info_interval", "=", "INFO_INTERVAL_SEC", ")", ":", "now", "=", "time", ".", "time", "(", ")", ...
Execute a function/closure repeatedly until a True condition or timeout is met. :param func closure: the function/closure to execute (should not block for long periods of time and must return True on success). :param str action_msg: a description of the action that is being executed, to be rendered as info while we wait, and as part of any rendered exception. :param float timeout: the maximum amount of time to wait for a true result from the closure in seconds. N.B. this is timing based, so won't be exact if the runtime of the closure exceeds the timeout. :param float wait_interval: the amount of time to sleep between closure invocations. :param float info_interval: the amount of time to wait before and between reports via info logging that we're still waiting for the closure to succeed. :raises: :class:`ProcessManager.Timeout` on execution timeout.
[ "Execute", "a", "function", "/", "closure", "repeatedly", "until", "a", "True", "condition", "or", "timeout", "is", "met", "." ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/assistant_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/assistant_v1.py#L6081-L6104
def _from_dict(cls, _dict): """Initialize a MessageRequest object from a json dictionary.""" args = {} if 'input' in _dict: args['input'] = MessageInput._from_dict(_dict.get('input')) if 'intents' in _dict: args['intents'] = [ RuntimeIntent._from_dict(x) for x in (_dict.get('intents')) ] if 'entities' in _dict: args['entities'] = [ RuntimeEntity._from_dict(x) for x in (_dict.get('entities')) ] if 'alternate_intents' in _dict: args['alternate_intents'] = _dict.get('alternate_intents') if 'context' in _dict: args['context'] = Context._from_dict(_dict.get('context')) if 'output' in _dict: args['output'] = OutputData._from_dict(_dict.get('output')) if 'actions' in _dict: args['actions'] = [ DialogNodeAction._from_dict(x) for x in (_dict.get('actions')) ] return cls(**args)
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'input'", "in", "_dict", ":", "args", "[", "'input'", "]", "=", "MessageInput", ".", "_from_dict", "(", "_dict", ".", "get", "(", "'input'", ")", ")", "if", "'in...
Initialize a MessageRequest object from a json dictionary.
[ "Initialize", "a", "MessageRequest", "object", "from", "a", "json", "dictionary", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/compat/__init__.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/compat/__init__.py#L78-L87
def rename_module(new, old): """ Attempts to import the old module and load it under the new name. Used for purely cosmetic name changes in Python 3.x. """ try: sys.modules[new] = imp.load_module(old, *imp.find_module(old)) return True except ImportError: return False
[ "def", "rename_module", "(", "new", ",", "old", ")", ":", "try", ":", "sys", ".", "modules", "[", "new", "]", "=", "imp", ".", "load_module", "(", "old", ",", "*", "imp", ".", "find_module", "(", "old", ")", ")", "return", "True", "except", "Import...
Attempts to import the old module and load it under the new name. Used for purely cosmetic name changes in Python 3.x.
[ "Attempts", "to", "import", "the", "old", "module", "and", "load", "it", "under", "the", "new", "name", ".", "Used", "for", "purely", "cosmetic", "name", "changes", "in", "Python", "3", ".", "x", "." ]
python
train
Julian/Filesystems
filesystems/common.py
https://github.com/Julian/Filesystems/blob/f366e877d6970712bb91d47167209ee2d1e489c5/filesystems/common.py#L157-L170
def _exists(fs, path): """ Check that the given path exists on the filesystem. Note that unlike `os.path.exists`, we *do* propagate file system errors other than a non-existent path or non-existent directory component. E.g., should EPERM or ELOOP be raised, an exception will bubble up. """ try: fs.stat(path) except (exceptions.FileNotFound, exceptions.NotADirectory): return False return True
[ "def", "_exists", "(", "fs", ",", "path", ")", ":", "try", ":", "fs", ".", "stat", "(", "path", ")", "except", "(", "exceptions", ".", "FileNotFound", ",", "exceptions", ".", "NotADirectory", ")", ":", "return", "False", "return", "True" ]
Check that the given path exists on the filesystem. Note that unlike `os.path.exists`, we *do* propagate file system errors other than a non-existent path or non-existent directory component. E.g., should EPERM or ELOOP be raised, an exception will bubble up.
[ "Check", "that", "the", "given", "path", "exists", "on", "the", "filesystem", "." ]
python
train
zarr-developers/zarr
zarr/n5.py
https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/n5.py#L357-L379
def array_metadata_to_zarr(array_metadata): '''Convert array metadata from N5 to zarr format.''' for t, f in zarr_to_n5_keys: array_metadata[t] = array_metadata[f] del array_metadata[f] array_metadata['zarr_format'] = ZARR_FORMAT array_metadata['shape'] = array_metadata['shape'][::-1] array_metadata['chunks'] = array_metadata['chunks'][::-1] array_metadata['fill_value'] = 0 # also if None was requested array_metadata['order'] = 'C' array_metadata['filters'] = [] compressor_config = array_metadata['compressor'] compressor_config = compressor_config_to_zarr(compressor_config) array_metadata['compressor'] = { 'id': N5ChunkWrapper.codec_id, 'compressor_config': compressor_config, 'dtype': array_metadata['dtype'], 'chunk_shape': array_metadata['chunks'] } return array_metadata
[ "def", "array_metadata_to_zarr", "(", "array_metadata", ")", ":", "for", "t", ",", "f", "in", "zarr_to_n5_keys", ":", "array_metadata", "[", "t", "]", "=", "array_metadata", "[", "f", "]", "del", "array_metadata", "[", "f", "]", "array_metadata", "[", "'zarr...
Convert array metadata from N5 to zarr format.
[ "Convert", "array", "metadata", "from", "N5", "to", "zarr", "format", "." ]
python
train
econ-ark/HARK
HARK/interpolation.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/interpolation.py#L1832-L1848
def _derY(self,x,y): ''' Returns the first derivative of the function with respect to Y at each value in (x,y). Only called internally by HARKinterpolator2D._derY. ''' m = len(x) temp = np.zeros((m,self.funcCount)) for j in range(self.funcCount): temp[:,j] = self.functions[j](x,y) temp[np.isnan(temp)] = np.inf i = np.argmin(temp,axis=1) y = temp[np.arange(m),i] dfdy = np.zeros_like(x) for j in range(self.funcCount): c = i == j dfdy[c] = self.functions[j].derivativeY(x[c],y[c]) return dfdy
[ "def", "_derY", "(", "self", ",", "x", ",", "y", ")", ":", "m", "=", "len", "(", "x", ")", "temp", "=", "np", ".", "zeros", "(", "(", "m", ",", "self", ".", "funcCount", ")", ")", "for", "j", "in", "range", "(", "self", ".", "funcCount", ")...
Returns the first derivative of the function with respect to Y at each value in (x,y). Only called internally by HARKinterpolator2D._derY.
[ "Returns", "the", "first", "derivative", "of", "the", "function", "with", "respect", "to", "Y", "at", "each", "value", "in", "(", "x", "y", ")", ".", "Only", "called", "internally", "by", "HARKinterpolator2D", ".", "_derY", "." ]
python
train
grycap/cpyutils
evaluate.py
https://github.com/grycap/cpyutils/blob/fa966fc6d2ae1e1e799e19941561aa79b617f1b1/evaluate.py#L368-L377
def p_kwl_kwl(self, p): ''' kwl : kwl SEPARATOR kwl ''' _LOGGER.debug("kwl -> kwl ; kwl") if p[3] is not None: p[0] = p[3] elif p[1] is not None: p[0] = p[1] else: p[0] = TypedClass(None, TypedClass.UNKNOWN)
[ "def", "p_kwl_kwl", "(", "self", ",", "p", ")", ":", "_LOGGER", ".", "debug", "(", "\"kwl -> kwl ; kwl\"", ")", "if", "p", "[", "3", "]", "is", "not", "None", ":", "p", "[", "0", "]", "=", "p", "[", "3", "]", "elif", "p", "[", "1", "]", "is",...
kwl : kwl SEPARATOR kwl
[ "kwl", ":", "kwl", "SEPARATOR", "kwl" ]
python
train