repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Arkq/flake8-requirements
src/flake8_requirements/checker.py
https://github.com/Arkq/flake8-requirements/blob/d7cb84af2429a63635528b531111a5da527bf2d1/src/flake8_requirements/checker.py#L205-L243
def visit_Call(self, node): """Call visitor - used for finding setup() call.""" self.generic_visit(node) # Setup() is a keywords-only function. if node.args: return keywords = set() for k in node.keywords: if k.arg is not None: keywords.add(k.arg) # Simple case for dictionary expansion for Python >= 3.5. if k.arg is None and isinstance(k.value, ast.Dict): keywords.update(x.s for x in k.value.keys) # Simple case for dictionary expansion for Python <= 3.4. if getattr(node, 'kwargs', ()) and isinstance(node.kwargs, ast.Dict): keywords.update(x.s for x in node.kwargs.keys) # The bare minimum number of arguments seems to be around five, which # includes author, name, version, module/package and something extra. if len(keywords) < 5: return score = sum( self.attributes.get(x, 0) for x in keywords ) / len(keywords) if score < 0.5: LOG.debug( "Scoring for setup%r below 0.5: %.2f", tuple(keywords), score) return # Redirect call to our setup() tap function. node.func = ast.Name(id='__f8r_setup', ctx=node.func.ctx) self.redirected = True
[ "def", "visit_Call", "(", "self", ",", "node", ")", ":", "self", ".", "generic_visit", "(", "node", ")", "# Setup() is a keywords-only function.", "if", "node", ".", "args", ":", "return", "keywords", "=", "set", "(", ")", "for", "k", "in", "node", ".", ...
Call visitor - used for finding setup() call.
[ "Call", "visitor", "-", "used", "for", "finding", "setup", "()", "call", "." ]
python
train
worstcase/blockade
blockade/config.py
https://github.com/worstcase/blockade/blob/3dc6ad803f0b0d56586dec9542a6a06aa06cf569/blockade/config.py#L122-L161
def from_dict(values): ''' Instantiate a BlockadeConfig instance based on a given dictionary of configuration values ''' try: containers = values['containers'] parsed_containers = {} for name, container_dict in containers.items(): try: # one config entry might result in many container # instances (indicated by the 'count' config value) for cnt in BlockadeContainerConfig.from_dict(name, container_dict): # check for duplicate 'container_name' definitions if cnt.container_name: cname = cnt.container_name existing = [c for c in parsed_containers.values() if c.container_name == cname] if existing: raise BlockadeConfigError("Duplicate 'container_name' definition: %s" % (cname)) parsed_containers[cnt.name] = cnt except Exception as err: raise BlockadeConfigError( "Container '%s' config problem: %s" % (name, err)) network = values.get('network') if network: defaults = _DEFAULT_NETWORK_CONFIG.copy() defaults.update(network) network = defaults else: network = _DEFAULT_NETWORK_CONFIG.copy() return BlockadeConfig(parsed_containers, network=network) except KeyError as err: raise BlockadeConfigError("Config missing value: " + str(err)) except Exception as err: # TODO log this to some debug stream? raise BlockadeConfigError("Failed to load config: " + str(err))
[ "def", "from_dict", "(", "values", ")", ":", "try", ":", "containers", "=", "values", "[", "'containers'", "]", "parsed_containers", "=", "{", "}", "for", "name", ",", "container_dict", "in", "containers", ".", "items", "(", ")", ":", "try", ":", "# one ...
Instantiate a BlockadeConfig instance based on a given dictionary of configuration values
[ "Instantiate", "a", "BlockadeConfig", "instance", "based", "on", "a", "given", "dictionary", "of", "configuration", "values" ]
python
valid
scanny/python-pptx
pptx/shapes/freeform.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/shapes/freeform.py#L193-L203
def _local_to_shape(self, local_x, local_y): """Translate local coordinates point to shape coordinates. Shape coordinates have the same unit as local coordinates, but are offset such that the origin of the shape coordinate system (0, 0) is located at the top-left corner of the shape bounding box. """ return ( local_x - self.shape_offset_x, local_y - self.shape_offset_y )
[ "def", "_local_to_shape", "(", "self", ",", "local_x", ",", "local_y", ")", ":", "return", "(", "local_x", "-", "self", ".", "shape_offset_x", ",", "local_y", "-", "self", ".", "shape_offset_y", ")" ]
Translate local coordinates point to shape coordinates. Shape coordinates have the same unit as local coordinates, but are offset such that the origin of the shape coordinate system (0, 0) is located at the top-left corner of the shape bounding box.
[ "Translate", "local", "coordinates", "point", "to", "shape", "coordinates", "." ]
python
train
AltSchool/dynamic-rest
dynamic_rest/fields/fields.py
https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/fields/fields.py#L161-L176
def root_serializer(self): """Return the root serializer (serializer for the primary resource).""" if not self.parent: # Don't cache, so that we'd recompute if parent is set. return None node = self seen = set() while True: seen.add(node) if getattr(node, 'parent', None): node = node.parent if node in seen: return None else: return node
[ "def", "root_serializer", "(", "self", ")", ":", "if", "not", "self", ".", "parent", ":", "# Don't cache, so that we'd recompute if parent is set.", "return", "None", "node", "=", "self", "seen", "=", "set", "(", ")", "while", "True", ":", "seen", ".", "add", ...
Return the root serializer (serializer for the primary resource).
[ "Return", "the", "root", "serializer", "(", "serializer", "for", "the", "primary", "resource", ")", "." ]
python
train
mikemaccana/python-docx
docx.py
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L759-L907
def advReplace(document, search, replace, bs=3): """ Replace all occurences of string with a different string, return updated document This is a modified version of python-docx.replace() that takes into account blocks of <bs> elements at a time. The replace element can also be a string or an xml etree element. What it does: It searches the entire document body for text blocks. Then scan thos text blocks for replace. Since the text to search could be spawned across multiple text blocks, we need to adopt some sort of algorithm to handle this situation. The smaller matching group of blocks (up to bs) is then adopted. If the matching group has more than one block, blocks other than first are cleared and all the replacement text is put on first block. Examples: original text blocks : [ 'Hel', 'lo,', ' world!' ] search / replace: 'Hello,' / 'Hi!' output blocks : [ 'Hi!', '', ' world!' ] original text blocks : [ 'Hel', 'lo,', ' world!' ] search / replace: 'Hello, world' / 'Hi!' output blocks : [ 'Hi!!', '', '' ] original text blocks : [ 'Hel', 'lo,', ' world!' ] search / replace: 'Hel' / 'Hal' output blocks : [ 'Hal', 'lo,', ' world!' ] @param instance document: The original document @param str search: The text to search for (regexp) @param mixed replace: The replacement text or lxml.etree element to append, or a list of etree elements @param int bs: See above @return instance The document with replacement applied """ # Enables debug output DEBUG = False newdocument = document # Compile the search regexp searchre = re.compile(search) # Will match against searchels. Searchels is a list that contains last # n text elements found in the document. 1 < n < bs searchels = [] for element in newdocument.iter(): if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements if element.text: # Add this element to searchels searchels.append(element) if len(searchels) > bs: # Is searchels is too long, remove first elements searchels.pop(0) # Search all combinations, of searchels, starting from # smaller up to bigger ones # l = search lenght # s = search start # e = element IDs to merge found = False for l in range(1, len(searchels)+1): if found: break #print "slen:", l for s in range(len(searchels)): if found: break if s+l <= len(searchels): e = range(s, s+l) #print "elems:", e txtsearch = '' for k in e: txtsearch += searchels[k].text # Searcs for the text in the whole txtsearch match = searchre.search(txtsearch) if match: found = True # I've found something :) if DEBUG: log.debug("Found element!") log.debug("Search regexp: %s", searchre.pattern) log.debug("Requested replacement: %s", replace) log.debug("Matched text: %s", txtsearch) log.debug("Matched text (splitted): %s", map(lambda i: i.text, searchels)) log.debug("Matched at position: %s", match.start()) log.debug("matched in elements: %s", e) if isinstance(replace, etree._Element): log.debug("Will replace with XML CODE") elif isinstance(replace(list, tuple)): log.debug("Will replace with LIST OF" " ELEMENTS") else: log.debug("Will replace with:", re.sub(search, replace, txtsearch)) curlen = 0 replaced = False for i in e: curlen += len(searchels[i].text) if curlen > match.start() and not replaced: # The match occurred in THIS element. # Puth in the whole replaced text if isinstance(replace, etree._Element): # Convert to a list and process # it later replace = [replace] if isinstance(replace, (list, tuple)): # I'm replacing with a list of # etree elements # clear the text in the tag and # append the element after the # parent paragraph # (because t elements cannot have # childs) p = findTypeParent( searchels[i], '{%s}p' % nsprefixes['w']) searchels[i].text = re.sub( search, '', txtsearch) insindex = p.getparent().index(p)+1 for r in replace: p.getparent().insert( insindex, r) insindex += 1 else: # Replacing with pure text searchels[i].text = re.sub( search, replace, txtsearch) replaced = True log.debug( "Replacing in element #: %s", i) else: # Clears the other text elements searchels[i].text = '' return newdocument
[ "def", "advReplace", "(", "document", ",", "search", ",", "replace", ",", "bs", "=", "3", ")", ":", "# Enables debug output", "DEBUG", "=", "False", "newdocument", "=", "document", "# Compile the search regexp", "searchre", "=", "re", ".", "compile", "(", "sea...
Replace all occurences of string with a different string, return updated document This is a modified version of python-docx.replace() that takes into account blocks of <bs> elements at a time. The replace element can also be a string or an xml etree element. What it does: It searches the entire document body for text blocks. Then scan thos text blocks for replace. Since the text to search could be spawned across multiple text blocks, we need to adopt some sort of algorithm to handle this situation. The smaller matching group of blocks (up to bs) is then adopted. If the matching group has more than one block, blocks other than first are cleared and all the replacement text is put on first block. Examples: original text blocks : [ 'Hel', 'lo,', ' world!' ] search / replace: 'Hello,' / 'Hi!' output blocks : [ 'Hi!', '', ' world!' ] original text blocks : [ 'Hel', 'lo,', ' world!' ] search / replace: 'Hello, world' / 'Hi!' output blocks : [ 'Hi!!', '', '' ] original text blocks : [ 'Hel', 'lo,', ' world!' ] search / replace: 'Hel' / 'Hal' output blocks : [ 'Hal', 'lo,', ' world!' ] @param instance document: The original document @param str search: The text to search for (regexp) @param mixed replace: The replacement text or lxml.etree element to append, or a list of etree elements @param int bs: See above @return instance The document with replacement applied
[ "Replace", "all", "occurences", "of", "string", "with", "a", "different", "string", "return", "updated", "document" ]
python
train
BlackEarth/bf
bf/styles.py
https://github.com/BlackEarth/bf/blob/376041168874bbd6dee5ccfeece4a9e553223316/bf/styles.py#L14-L23
def styleProperties(Class, style): """return a properties dict from a given cssutils style """ properties = Dict() for property in style.getProperties(all=True): stylename = property.name + ':' properties[stylename] = property.value if property.priority != '': properties[stylename] = ' !'+property.priority return properties
[ "def", "styleProperties", "(", "Class", ",", "style", ")", ":", "properties", "=", "Dict", "(", ")", "for", "property", "in", "style", ".", "getProperties", "(", "all", "=", "True", ")", ":", "stylename", "=", "property", ".", "name", "+", "':'", "prop...
return a properties dict from a given cssutils style
[ "return", "a", "properties", "dict", "from", "a", "given", "cssutils", "style" ]
python
train
cos-archives/modular-odm
modularodm/cache.py
https://github.com/cos-archives/modular-odm/blob/8a34891892b8af69b21fdc46701c91763a5c1cf9/modularodm/cache.py#L3-L21
def set_nested(data, value, *keys): """Assign to a nested dictionary. :param dict data: Dictionary to mutate :param value: Value to set :param list *keys: List of nested keys >>> data = {} >>> set_nested(data, 'hi', 'k0', 'k1', 'k2') >>> data {'k0': {'k1': {'k2': 'hi'}}} """ if len(keys) == 1: data[keys[0]] = value else: if keys[0] not in data: data[keys[0]] = {} set_nested(data[keys[0]], value, *keys[1:])
[ "def", "set_nested", "(", "data", ",", "value", ",", "*", "keys", ")", ":", "if", "len", "(", "keys", ")", "==", "1", ":", "data", "[", "keys", "[", "0", "]", "]", "=", "value", "else", ":", "if", "keys", "[", "0", "]", "not", "in", "data", ...
Assign to a nested dictionary. :param dict data: Dictionary to mutate :param value: Value to set :param list *keys: List of nested keys >>> data = {} >>> set_nested(data, 'hi', 'k0', 'k1', 'k2') >>> data {'k0': {'k1': {'k2': 'hi'}}}
[ "Assign", "to", "a", "nested", "dictionary", "." ]
python
valid
kragniz/python-etcd3
etcd3/client.py
https://github.com/kragniz/python-etcd3/blob/0adb14840d4a6011a2023a13f07e247e4c336a80/etcd3/client.py#L531-L553
def status(self): """Get the status of the responding member.""" status_request = etcdrpc.StatusRequest() status_response = self.maintenancestub.Status( status_request, self.timeout, credentials=self.call_credentials, metadata=self.metadata ) for m in self.members: if m.id == status_response.leader: leader = m break else: # raise exception? leader = None return Status(status_response.version, status_response.dbSize, leader, status_response.raftIndex, status_response.raftTerm)
[ "def", "status", "(", "self", ")", ":", "status_request", "=", "etcdrpc", ".", "StatusRequest", "(", ")", "status_response", "=", "self", ".", "maintenancestub", ".", "Status", "(", "status_request", ",", "self", ".", "timeout", ",", "credentials", "=", "sel...
Get the status of the responding member.
[ "Get", "the", "status", "of", "the", "responding", "member", "." ]
python
train
RiotGames/cloud-inquisitor
backend/cloud_inquisitor/plugins/views/roles.py
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/plugins/views/roles.py#L40-L54
def post(self): """Create a new role""" self.reqparse.add_argument('name', type=str, required=True) self.reqparse.add_argument('color', type=str, required=True) args = self.reqparse.parse_args() role = Role() role.name = args['name'] role.color = args['color'] db.session.add(role) db.session.commit() auditlog(event='role.create', actor=session['user'].username, data=args) return self.make_response('Role {} has been created'.format(role.role_id), HTTP.CREATED)
[ "def", "post", "(", "self", ")", ":", "self", ".", "reqparse", ".", "add_argument", "(", "'name'", ",", "type", "=", "str", ",", "required", "=", "True", ")", "self", ".", "reqparse", ".", "add_argument", "(", "'color'", ",", "type", "=", "str", ",",...
Create a new role
[ "Create", "a", "new", "role" ]
python
train
saltstack/salt
salt/modules/lxd.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxd.py#L3607-L3643
def _set_property_dict_item(obj, prop, key, value): ''' Sets the dict item key of the attr from obj. Basicaly it does getattr(obj, prop)[key] = value. For the disk device we added some checks to make device changes on the CLI saver. ''' attr = getattr(obj, prop) if prop == 'devices': device_type = value['type'] if device_type == 'disk': if 'path' not in value: raise SaltInvocationError( "path must be given as parameter" ) if value['path'] != '/' and 'source' not in value: raise SaltInvocationError( "source must be given as parameter" ) for k in value.keys(): if k.startswith('__'): del value[k] attr[key] = value else: # config attr[key] = six.text_type(value) pylxd_save_object(obj) return _pylxd_model_to_dict(obj)
[ "def", "_set_property_dict_item", "(", "obj", ",", "prop", ",", "key", ",", "value", ")", ":", "attr", "=", "getattr", "(", "obj", ",", "prop", ")", "if", "prop", "==", "'devices'", ":", "device_type", "=", "value", "[", "'type'", "]", "if", "device_ty...
Sets the dict item key of the attr from obj. Basicaly it does getattr(obj, prop)[key] = value. For the disk device we added some checks to make device changes on the CLI saver.
[ "Sets", "the", "dict", "item", "key", "of", "the", "attr", "from", "obj", "." ]
python
train
google/openhtf
openhtf/util/conf.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/conf.py#L544-L616
def inject_positional_args(self, method): """Decorator for injecting positional arguments from the configuration. This decorator wraps the given method, so that any positional arguments are passed with corresponding values from the configuration. The name of the positional argument must match the configuration key. Keyword arguments are *NEVER* modified, even if their names match configuration keys. Avoid naming keyword args names that are also configuration keys to avoid confusion. Additional positional arguments may be used that do not appear in the configuration, but those arguments *MUST* be specified as keyword arguments upon invocation of the method. This is to avoid ambiguity in which positional arguments are getting which values. Args: method: The method to wrap. Returns: A wrapper that, when invoked, will call the wrapped method, passing in configuration values for positional arguments. """ inspect = self._modules['inspect'] argspec = inspect.getargspec(method) # Index in argspec.args of the first keyword argument. This index is a # negative number if there are any kwargs, or 0 if there are no kwargs. keyword_arg_index = -1 * len(argspec.defaults or []) arg_names = argspec.args[:keyword_arg_index or None] kwarg_names = argspec.args[len(arg_names):] functools = self._modules['functools'] # pylint: disable=redefined-outer-name # Create the actual method wrapper, all we do is update kwargs. Note we # don't pass any *args through because there can't be any - we've filled # them all in with values from the configuration. Any positional args that # are missing from the configuration *must* be explicitly specified as # kwargs. @functools.wraps(method) def method_wrapper(**kwargs): """Wrapper that pulls values from openhtf.util.conf.""" # Check for keyword args with names that are in the config so we can warn. for kwarg in kwarg_names: if kwarg in self: self._logger.warning('Keyword arg %s not set from configuration, but ' 'is a configuration key', kwarg) # Set positional args from configuration values. final_kwargs = {name: self[name] for name in arg_names if name in self} for overridden in set(kwargs) & set(final_kwargs): self._logger.warning('Overriding configuration value for kwarg %s (%s) ' 'with provided kwarg value: %s', overridden, self[overridden], kwargs[overridden]) final_kwargs.update(kwargs) if inspect.ismethod(method): name = '%s.%s' % (method.__self__.__class__.__name__, method.__name__) else: name = method.__name__ self._logger.debug('Invoking %s with %s', name, final_kwargs) return method(**final_kwargs) # We have to check for a 'self' parameter explicitly because Python doesn't # pass it as a keyword arg, it passes it as the first positional arg. if argspec.args[0] == 'self': @functools.wraps(method) def self_wrapper(self, **kwargs): # pylint: disable=invalid-name """Wrapper that pulls values from openhtf.util.conf.""" kwargs['self'] = self return method_wrapper(**kwargs) return self_wrapper return method_wrapper
[ "def", "inject_positional_args", "(", "self", ",", "method", ")", ":", "inspect", "=", "self", ".", "_modules", "[", "'inspect'", "]", "argspec", "=", "inspect", ".", "getargspec", "(", "method", ")", "# Index in argspec.args of the first keyword argument. This index...
Decorator for injecting positional arguments from the configuration. This decorator wraps the given method, so that any positional arguments are passed with corresponding values from the configuration. The name of the positional argument must match the configuration key. Keyword arguments are *NEVER* modified, even if their names match configuration keys. Avoid naming keyword args names that are also configuration keys to avoid confusion. Additional positional arguments may be used that do not appear in the configuration, but those arguments *MUST* be specified as keyword arguments upon invocation of the method. This is to avoid ambiguity in which positional arguments are getting which values. Args: method: The method to wrap. Returns: A wrapper that, when invoked, will call the wrapped method, passing in configuration values for positional arguments.
[ "Decorator", "for", "injecting", "positional", "arguments", "from", "the", "configuration", "." ]
python
train
mardix/Mocha
mocha/decorators.py
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/decorators.py#L42-L78
def cors(*args, **kwargs): """ A wrapper around flask-cors cross_origin, to also act on classes **An extra note about cors, a response must be available before the cors is applied. Dynamic return is applied after the fact, so use the decorators, json, xml, or return self.render() for txt/html ie: @cors() class Index(Mocha): def index(self): return self.render() @json def json(self): return {} class Index2(Mocha): def index(self): return self.render() @cors() @json def json(self): return {} :return: """ def decorator(fn): cors_fn = flask_cors.cross_origin(automatic_options=False, *args, **kwargs) if inspect.isclass(fn): apply_function_to_members(fn, cors_fn) else: return cors_fn(fn) return fn return decorator
[ "def", "cors", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "decorator", "(", "fn", ")", ":", "cors_fn", "=", "flask_cors", ".", "cross_origin", "(", "automatic_options", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ...
A wrapper around flask-cors cross_origin, to also act on classes **An extra note about cors, a response must be available before the cors is applied. Dynamic return is applied after the fact, so use the decorators, json, xml, or return self.render() for txt/html ie: @cors() class Index(Mocha): def index(self): return self.render() @json def json(self): return {} class Index2(Mocha): def index(self): return self.render() @cors() @json def json(self): return {} :return:
[ "A", "wrapper", "around", "flask", "-", "cors", "cross_origin", "to", "also", "act", "on", "classes" ]
python
train
graphql-python/graphql-core-next
graphql/type/validate.py
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/type/validate.py#L40-L68
def validate_schema(schema: GraphQLSchema) -> List[GraphQLError]: """Validate a GraphQL schema. Implements the "Type Validation" sub-sections of the specification's "Type System" section. Validation runs synchronously, returning a list of encountered errors, or an empty list if no errors were encountered and the Schema is valid. """ # First check to ensure the provided value is in fact a GraphQLSchema. assert_schema(schema) # If this Schema has already been validated, return the previous results. # noinspection PyProtectedMember errors = schema._validation_errors if errors is None: # Validate the schema, producing a list of errors. context = SchemaValidationContext(schema) context.validate_root_types() context.validate_directives() context.validate_types() # Persist the results of validation before returning to ensure validation does # not run multiple times for this schema. errors = context.errors schema._validation_errors = errors return errors
[ "def", "validate_schema", "(", "schema", ":", "GraphQLSchema", ")", "->", "List", "[", "GraphQLError", "]", ":", "# First check to ensure the provided value is in fact a GraphQLSchema.", "assert_schema", "(", "schema", ")", "# If this Schema has already been validated, return the...
Validate a GraphQL schema. Implements the "Type Validation" sub-sections of the specification's "Type System" section. Validation runs synchronously, returning a list of encountered errors, or an empty list if no errors were encountered and the Schema is valid.
[ "Validate", "a", "GraphQL", "schema", "." ]
python
train
blockstack/virtualchain
virtualchain/lib/blockchain/bitcoin_blockchain/spv.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/blockchain/bitcoin_blockchain/spv.py#L542-L590
def get_target(cls, path, index, chain=None): """ Calculate the target difficulty at a particular difficulty interval (index). Return (bits, target) on success """ if chain is None: chain = [] # Do not use mutables as default values! max_target = 0x00000000FFFF0000000000000000000000000000000000000000000000000000 if index == 0: return 0x1d00ffff, max_target first = SPVClient.read_header( path, (index-1)*BLOCK_DIFFICULTY_CHUNK_SIZE) last = SPVClient.read_header( path, index*BLOCK_DIFFICULTY_CHUNK_SIZE - 1, allow_none=True) if last is None: for h in chain: if h.get('block_height') == index*BLOCK_DIFFICULTY_CHUNK_SIZE - 1: last = h nActualTimespan = last.get('timestamp') - first.get('timestamp') nTargetTimespan = BLOCK_DIFFICULTY_INTERVAL nActualTimespan = max(nActualTimespan, nTargetTimespan/4) nActualTimespan = min(nActualTimespan, nTargetTimespan*4) bits = last.get('bits') # convert to bignum MM = 256*256*256 a = bits%MM if a < 0x8000: a *= 256 target = (a) * pow(2, 8 * (bits/MM - 3)) # new target new_target = min( max_target, (target * nActualTimespan)/nTargetTimespan ) # convert it to bits c = ("%064X"%new_target)[2:] i = 31 while c[0:2]=="00": c = c[2:] i -= 1 c = int('0x'+c[0:6],16) if c >= 0x800000: c /= 256 i += 1 new_bits = c + MM * i return new_bits, new_target
[ "def", "get_target", "(", "cls", ",", "path", ",", "index", ",", "chain", "=", "None", ")", ":", "if", "chain", "is", "None", ":", "chain", "=", "[", "]", "# Do not use mutables as default values!", "max_target", "=", "0x00000000FFFF0000000000000000000000000000000...
Calculate the target difficulty at a particular difficulty interval (index). Return (bits, target) on success
[ "Calculate", "the", "target", "difficulty", "at", "a", "particular", "difficulty", "interval", "(", "index", ")", ".", "Return", "(", "bits", "target", ")", "on", "success" ]
python
train
googleads/googleads-python-lib
googleads/common.py
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/googleads/common.py#L1218-L1240
def _PackArguments(self, method_name, args, set_type_attrs=False): """Properly pack input dictionaries for zeep. Pack a list of python dictionaries into XML objects. Dictionaries which contain an 'xsi_type' entry are converted into that type instead of the argument default. This allows creation of complex objects which include inherited types. Args: method_name: The name of the method that will be called. args: A list of dictionaries containing arguments to the method. set_type_attrs: A boolean indicating whether or not attributes that end in .Type should be set. This is only necessary for batch job service. Returns: A list of XML objects that can be passed to zeep. """ # Get the params for the method to find the initial types to instantiate. op_params = self.zeep_client.get_element( '{%s}%s' % (self._GetBindingNamespace(), method_name)).type.elements result = [self._PackArgumentsHelper(param, param_data, set_type_attrs) for ((_, param), param_data) in izip(op_params, args)] return result
[ "def", "_PackArguments", "(", "self", ",", "method_name", ",", "args", ",", "set_type_attrs", "=", "False", ")", ":", "# Get the params for the method to find the initial types to instantiate.", "op_params", "=", "self", ".", "zeep_client", ".", "get_element", "(", "'{%...
Properly pack input dictionaries for zeep. Pack a list of python dictionaries into XML objects. Dictionaries which contain an 'xsi_type' entry are converted into that type instead of the argument default. This allows creation of complex objects which include inherited types. Args: method_name: The name of the method that will be called. args: A list of dictionaries containing arguments to the method. set_type_attrs: A boolean indicating whether or not attributes that end in .Type should be set. This is only necessary for batch job service. Returns: A list of XML objects that can be passed to zeep.
[ "Properly", "pack", "input", "dictionaries", "for", "zeep", "." ]
python
train
saltstack/salt
salt/cli/daemons.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/daemons.py#L534-L568
def prepare(self): ''' Run the preparation sequence required to start a salt syndic minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' super(Syndic, self).prepare() try: if self.config['verify_env']: verify_env( [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], ], self.config['user'], permissive=self.config['permissive_pki_access'], root_dir=self.config['root_dir'], pki_dir=self.config['pki_dir'], ) except OSError as error: self.environment_failure(error) self.setup_logfile_logger() verify_log(self.config) self.action_log_info('Setting up "{0}"'.format(self.config['id'])) # Late import so logging works correctly import salt.minion self.daemonize_if_required() self.syndic = salt.minion.SyndicManager(self.config) self.set_pidfile()
[ "def", "prepare", "(", "self", ")", ":", "super", "(", "Syndic", ",", "self", ")", ".", "prepare", "(", ")", "try", ":", "if", "self", ".", "config", "[", "'verify_env'", "]", ":", "verify_env", "(", "[", "self", ".", "config", "[", "'pki_dir'", "]...
Run the preparation sequence required to start a salt syndic minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare()
[ "Run", "the", "preparation", "sequence", "required", "to", "start", "a", "salt", "syndic", "minion", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/install.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L128-L134
def _set_pip_ssl(anaconda_dir): """Set PIP SSL certificate to installed conda certificate to avoid SSL errors """ if anaconda_dir: cert_file = os.path.join(anaconda_dir, "ssl", "cert.pem") if os.path.exists(cert_file): os.environ["PIP_CERT"] = cert_file
[ "def", "_set_pip_ssl", "(", "anaconda_dir", ")", ":", "if", "anaconda_dir", ":", "cert_file", "=", "os", ".", "path", ".", "join", "(", "anaconda_dir", ",", "\"ssl\"", ",", "\"cert.pem\"", ")", "if", "os", ".", "path", ".", "exists", "(", "cert_file", ")...
Set PIP SSL certificate to installed conda certificate to avoid SSL errors
[ "Set", "PIP", "SSL", "certificate", "to", "installed", "conda", "certificate", "to", "avoid", "SSL", "errors" ]
python
train
linkhub-sdk/popbill.py
popbill/statementService.py
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L204-L235
def issue(self, CorpNum, ItemCode, MgtKey, Memo=None, EmailSubject=None, UserID=None): """ 발행 args CorpNum : 팝빌회원 사업자번호 ItemCode : 명세서 종류 코드 [121 - 거래명세서], [122 - 청구서], [123 - 견적서], [124 - 발주서], [125 - 입금표], [126 - 영수증] MgtKey : 파트너 문서관리번호 Memo : 처리메모 EmailSubject : 발행메일 제목(미기재시 기본양식으로 전송) UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException """ if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.") if ItemCode == None or ItemCode == "": raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.") req = {} postData = "" if Memo != None and Memo != '': req["memo"] = Memo if EmailSubject != None and EmailSubject != '': req["emailSubject"] = EmailSubject postData = self._stringtify(req) return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, "ISSUE")
[ "def", "issue", "(", "self", ",", "CorpNum", ",", "ItemCode", ",", "MgtKey", ",", "Memo", "=", "None", ",", "EmailSubject", "=", "None", ",", "UserID", "=", "None", ")", ":", "if", "MgtKey", "==", "None", "or", "MgtKey", "==", "\"\"", ":", "raise", ...
발행 args CorpNum : 팝빌회원 사업자번호 ItemCode : 명세서 종류 코드 [121 - 거래명세서], [122 - 청구서], [123 - 견적서], [124 - 발주서], [125 - 입금표], [126 - 영수증] MgtKey : 파트너 문서관리번호 Memo : 처리메모 EmailSubject : 발행메일 제목(미기재시 기본양식으로 전송) UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException
[ "발행", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "ItemCode", ":", "명세서", "종류", "코드", "[", "121", "-", "거래명세서", "]", "[", "122", "-", "청구서", "]", "[", "123", "-", "견적서", "]", "[", "124", "-", "발주서", "]", "[", "125", "-", "입금표", "]", "[", "126", ...
python
train
atlassian-api/atlassian-python-api
atlassian/jira.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/jira.py#L671-L678
def delete_issue_remote_link_by_id(self, issue_key, link_id): """ Deletes Remote Link on Issue :param issue_key: str :param link_id: str """ url = 'rest/api/2/issue/{issue_key}/remotelink/{link_id}'.format(issue_key=issue_key, link_id=link_id) return self.delete(url)
[ "def", "delete_issue_remote_link_by_id", "(", "self", ",", "issue_key", ",", "link_id", ")", ":", "url", "=", "'rest/api/2/issue/{issue_key}/remotelink/{link_id}'", ".", "format", "(", "issue_key", "=", "issue_key", ",", "link_id", "=", "link_id", ")", "return", "se...
Deletes Remote Link on Issue :param issue_key: str :param link_id: str
[ "Deletes", "Remote", "Link", "on", "Issue", ":", "param", "issue_key", ":", "str", ":", "param", "link_id", ":", "str" ]
python
train
quintusdias/glymur
glymur/jp2box.py
https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/jp2box.py#L1255-L1277
def _validate(self, writing=False): """ Validate the box before writing to file. """ if self.brand not in ['jp2 ', 'jpx ']: msg = ("The file type brand was '{brand}'. " "It should be either 'jp2 ' or 'jpx '.") msg = msg.format(brand=self.brand) if writing: raise IOError(msg) else: warnings.warn(msg, UserWarning) for item in self.compatibility_list: if item not in self._valid_cls: msg = ("The file type compatibility list {items} is " "not valid. All items should be members of " "{valid_entries}.") msg = msg.format(items=self.compatibility_list, valid_entries=self._valid_cls) if writing: raise IOError(msg) else: warnings.warn(msg, UserWarning)
[ "def", "_validate", "(", "self", ",", "writing", "=", "False", ")", ":", "if", "self", ".", "brand", "not", "in", "[", "'jp2 '", ",", "'jpx '", "]", ":", "msg", "=", "(", "\"The file type brand was '{brand}'. \"", "\"It should be either 'jp2 ' or 'jpx '.\"", ")...
Validate the box before writing to file.
[ "Validate", "the", "box", "before", "writing", "to", "file", "." ]
python
train
Hackerfleet/hfos
hfos/ui/clientmanager.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/ui/clientmanager.py#L223-L245
def who(self, *args): """Display a table of connected users and clients""" if len(self._users) == 0: self.log('No users connected') if len(self._clients) == 0: self.log('No clients connected') return Row = namedtuple("Row", ['User', 'Client', 'IP']) rows = [] for user in self._users.values(): for key, client in self._clients.items(): if client.useruuid == user.uuid: row = Row(user.account.name, key, client.ip) rows.append(row) for key, client in self._clients.items(): if client.useruuid is None: row = Row('ANON', key, client.ip) rows.append(row) self.log("\n" + std_table(rows))
[ "def", "who", "(", "self", ",", "*", "args", ")", ":", "if", "len", "(", "self", ".", "_users", ")", "==", "0", ":", "self", ".", "log", "(", "'No users connected'", ")", "if", "len", "(", "self", ".", "_clients", ")", "==", "0", ":", "self", "...
Display a table of connected users and clients
[ "Display", "a", "table", "of", "connected", "users", "and", "clients" ]
python
train
elsampsa/valkka-live
valkka/mvision/alpr/openalpr_fix.py
https://github.com/elsampsa/valkka-live/blob/218bb2ecf71c516c85b1b6e075454bba13090cd8/valkka/mvision/alpr/openalpr_fix.py#L252-L261
def set_default_region(self, region): """ This sets the default region for detecting license plates. For example, setting region to "md" for Maryland or "fr" for France. :param region: A unicode/ascii string (Python 2/3) or bytes array (Python 3) :return: None """ region = _convert_to_charp(region) self._set_default_region_func(self.alpr_pointer, region)
[ "def", "set_default_region", "(", "self", ",", "region", ")", ":", "region", "=", "_convert_to_charp", "(", "region", ")", "self", ".", "_set_default_region_func", "(", "self", ".", "alpr_pointer", ",", "region", ")" ]
This sets the default region for detecting license plates. For example, setting region to "md" for Maryland or "fr" for France. :param region: A unicode/ascii string (Python 2/3) or bytes array (Python 3) :return: None
[ "This", "sets", "the", "default", "region", "for", "detecting", "license", "plates", ".", "For", "example", "setting", "region", "to", "md", "for", "Maryland", "or", "fr", "for", "France", "." ]
python
train
zqfang/GSEApy
gseapy/algorithm.py
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/algorithm.py#L191-L253
def ranking_metric_tensor(exprs, method, permutation_num, pos, neg, classes, ascending, rs=np.random.RandomState()): """Build shuffled ranking matrix when permutation_type eq to phenotype. :param exprs: gene_expression DataFrame, gene_name indexed. :param str method: calculate correlation or ranking. methods including: 1. 'signal_to_noise'. 2. 't_test'. 3. 'ratio_of_classes' (also referred to as fold change). 4. 'diff_of_classes'. 5. 'log2_ratio_of_classes'. :param int permuation_num: how many times of classes is being shuffled :param str pos: one of labels of phenotype's names. :param str neg: one of labels of phenotype's names. :param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what class of phenotype. :param bool ascending: bool. Sort ascending vs. descending. :return: returns two 2d ndarray with shape (nperm, gene_num). | cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix. | cor_mat: sorted and permutated (exclude last row) ranking matrix. """ # S: samples, G: gene number G, S = exprs.shape # genes = exprs.index.values expr_mat = exprs.values.T perm_cor_tensor = np.tile(expr_mat, (permutation_num+1,1,1)) # random shuffle on the first dim, last matrix is not shuffled for arr in perm_cor_tensor[:-1]: rs.shuffle(arr) classes = np.array(classes) pos = classes == pos neg = classes == neg pos_cor_mean = perm_cor_tensor[:,pos,:].mean(axis=1) neg_cor_mean = perm_cor_tensor[:,neg,:].mean(axis=1) pos_cor_std = perm_cor_tensor[:,pos,:].std(axis=1, ddof=1) neg_cor_std = perm_cor_tensor[:,neg,:].std(axis=1, ddof=1) if method == 'signal_to_noise': cor_mat = (pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std) elif method == 't_test': denom = 1.0/G cor_mat = (pos_cor_mean - neg_cor_mean)/ np.sqrt(denom*pos_cor_std**2 + denom*neg_cor_std**2) elif method == 'ratio_of_classes': cor_mat = pos_cor_mean / neg_cor_mean elif method == 'diff_of_classes': cor_mat = pos_cor_mean - neg_cor_mean elif method == 'log2_ratio_of_classes': cor_mat = np.log2(pos_cor_mean / neg_cor_mean) else: logging.error("Please provide correct method name!!!") sys.exit(0) # return matix[nperm+1, perm_cors] cor_mat_ind = cor_mat.argsort() # ndarray: sort in place cor_mat.sort() # genes_mat = genes.take(cor_mat_ind) if ascending: return cor_mat_ind, cor_mat # descending order of ranking and genes # return genes_mat[:,::-1], cor_mat[:,::-1] return cor_mat_ind[:, ::-1], cor_mat[:, ::-1]
[ "def", "ranking_metric_tensor", "(", "exprs", ",", "method", ",", "permutation_num", ",", "pos", ",", "neg", ",", "classes", ",", "ascending", ",", "rs", "=", "np", ".", "random", ".", "RandomState", "(", ")", ")", ":", "# S: samples, G: gene number", "G", ...
Build shuffled ranking matrix when permutation_type eq to phenotype. :param exprs: gene_expression DataFrame, gene_name indexed. :param str method: calculate correlation or ranking. methods including: 1. 'signal_to_noise'. 2. 't_test'. 3. 'ratio_of_classes' (also referred to as fold change). 4. 'diff_of_classes'. 5. 'log2_ratio_of_classes'. :param int permuation_num: how many times of classes is being shuffled :param str pos: one of labels of phenotype's names. :param str neg: one of labels of phenotype's names. :param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what class of phenotype. :param bool ascending: bool. Sort ascending vs. descending. :return: returns two 2d ndarray with shape (nperm, gene_num). | cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix. | cor_mat: sorted and permutated (exclude last row) ranking matrix.
[ "Build", "shuffled", "ranking", "matrix", "when", "permutation_type", "eq", "to", "phenotype", "." ]
python
test
Contraz/demosys-py
demosys/scene/scene.py
https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/scene/scene.py#L76-L95
def draw_bbox(self, projection_matrix=None, camera_matrix=None, all=True): """Draw scene and mesh bounding boxes""" projection_matrix = projection_matrix.astype('f4').tobytes() camera_matrix = camera_matrix.astype('f4').tobytes() # Scene bounding box self.bbox_program["m_proj"].write(projection_matrix) self.bbox_program["m_view"].write(self._view_matrix.astype('f4').tobytes()) self.bbox_program["m_cam"].write(camera_matrix) self.bbox_program["bb_min"].write(self.bbox_min.astype('f4').tobytes()) self.bbox_program["bb_max"].write(self.bbox_max.astype('f4').tobytes()) self.bbox_program["color"].value = (1.0, 0.0, 0.0) self.bbox_vao.render(self.bbox_program) if not all: return # Draw bounding box for children for node in self.root_nodes: node.draw_bbox(projection_matrix, camera_matrix, self.bbox_program, self.bbox_vao)
[ "def", "draw_bbox", "(", "self", ",", "projection_matrix", "=", "None", ",", "camera_matrix", "=", "None", ",", "all", "=", "True", ")", ":", "projection_matrix", "=", "projection_matrix", ".", "astype", "(", "'f4'", ")", ".", "tobytes", "(", ")", "camera_...
Draw scene and mesh bounding boxes
[ "Draw", "scene", "and", "mesh", "bounding", "boxes" ]
python
valid
django-salesforce/django-salesforce
salesforce/dbapi/subselect.py
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/subselect.py#L194-L214
def mark_quoted_strings(sql): """Mark all quoted strings in the SOQL by '@' and get them as params, with respect to all escaped backslashes and quotes. """ # pattern of a string parameter (pm), a char escaped by backslash (bs) # out_pattern: characters valid in SOQL pm_pattern = re.compile(r"'[^\\']*(?:\\[\\'][^\\']*)*'") bs_pattern = re.compile(r"\\([\\'])") out_pattern = re.compile(r"^(?:[-!()*+,.:<=>\w\s|%s])*$") missing_apostrophe = "invalid character in SOQL or a missing apostrophe" start = 0 out = [] params = [] for match in pm_pattern.finditer(sql): out.append(sql[start:match.start()]) assert out_pattern.match(sql[start:match.start()]), missing_apostrophe params.append(bs_pattern.sub('\\1', sql[match.start() + 1:match.end() - 1])) start = match.end() out.append(sql[start:]) assert out_pattern.match(sql[start:]), missing_apostrophe return '@'.join(out), params
[ "def", "mark_quoted_strings", "(", "sql", ")", ":", "# pattern of a string parameter (pm), a char escaped by backslash (bs)", "# out_pattern: characters valid in SOQL", "pm_pattern", "=", "re", ".", "compile", "(", "r\"'[^\\\\']*(?:\\\\[\\\\'][^\\\\']*)*'\"", ")", "bs_pattern", "="...
Mark all quoted strings in the SOQL by '@' and get them as params, with respect to all escaped backslashes and quotes.
[ "Mark", "all", "quoted", "strings", "in", "the", "SOQL", "by" ]
python
train
tmontaigu/pylas
pylas/point/dims.py
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/dims.py#L271-L278
def min_file_version_for_point_format(point_format_id): """ Returns the minimum file version that supports the given point_format_id """ for version, point_formats in sorted(VERSION_TO_POINT_FMT.items()): if point_format_id in point_formats: return version else: raise errors.PointFormatNotSupported(point_format_id)
[ "def", "min_file_version_for_point_format", "(", "point_format_id", ")", ":", "for", "version", ",", "point_formats", "in", "sorted", "(", "VERSION_TO_POINT_FMT", ".", "items", "(", ")", ")", ":", "if", "point_format_id", "in", "point_formats", ":", "return", "ver...
Returns the minimum file version that supports the given point_format_id
[ "Returns", "the", "minimum", "file", "version", "that", "supports", "the", "given", "point_format_id" ]
python
test
apache/incubator-superset
superset/utils/dict_import_export.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/dict_import_export.py#L66-L82
def import_from_dict(session, data, sync=[]): """Imports databases and druid clusters from dictionary""" if isinstance(data, dict): logging.info('Importing %d %s', len(data.get(DATABASES_KEY, [])), DATABASES_KEY) for database in data.get(DATABASES_KEY, []): Database.import_from_dict(session, database, sync=sync) logging.info('Importing %d %s', len(data.get(DRUID_CLUSTERS_KEY, [])), DRUID_CLUSTERS_KEY) for datasource in data.get(DRUID_CLUSTERS_KEY, []): DruidCluster.import_from_dict(session, datasource, sync=sync) session.commit() else: logging.info('Supplied object is not a dictionary.')
[ "def", "import_from_dict", "(", "session", ",", "data", ",", "sync", "=", "[", "]", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "logging", ".", "info", "(", "'Importing %d %s'", ",", "len", "(", "data", ".", "get", "(", "DATABAS...
Imports databases and druid clusters from dictionary
[ "Imports", "databases", "and", "druid", "clusters", "from", "dictionary" ]
python
train
softlayer/softlayer-python
SoftLayer/managers/object_storage.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/object_storage.py#L66-L78
def delete_credential(self, identifier, credential_id=None): """Delete the object storage credential. :param int id: The object storage account identifier. :param int credential_id: The credential id to be deleted. """ credential = { 'id': credential_id } return self.client.call('SoftLayer_Network_Storage_Hub_Cleversafe_Account', 'credentialDelete', credential, id=identifier)
[ "def", "delete_credential", "(", "self", ",", "identifier", ",", "credential_id", "=", "None", ")", ":", "credential", "=", "{", "'id'", ":", "credential_id", "}", "return", "self", ".", "client", ".", "call", "(", "'SoftLayer_Network_Storage_Hub_Cleversafe_Accoun...
Delete the object storage credential. :param int id: The object storage account identifier. :param int credential_id: The credential id to be deleted.
[ "Delete", "the", "object", "storage", "credential", "." ]
python
train
razorpay/razorpay-python
razorpay/resources/payment.py
https://github.com/razorpay/razorpay-python/blob/5bc63fd8452165a4b54556888492e555222c8afe/razorpay/resources/payment.py#L37-L50
def capture(self, payment_id, amount, data={}, **kwargs): """" Capture Payment for given Id Args: payment_id : Id for which payment object has to be retrieved Amount : Amount for which the payment has to be retrieved Returns: Payment dict after getting captured """ url = "{}/{}/capture".format(self.base_url, payment_id) data['amount'] = amount return self.post_url(url, data, **kwargs)
[ "def", "capture", "(", "self", ",", "payment_id", ",", "amount", ",", "data", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "url", "=", "\"{}/{}/capture\"", ".", "format", "(", "self", ".", "base_url", ",", "payment_id", ")", "data", "[", "'amoun...
Capture Payment for given Id Args: payment_id : Id for which payment object has to be retrieved Amount : Amount for which the payment has to be retrieved Returns: Payment dict after getting captured
[ "Capture", "Payment", "for", "given", "Id" ]
python
train
CyberZHG/keras-word-char-embd
keras_wc_embd/word_char_embd.py
https://github.com/CyberZHG/keras-word-char-embd/blob/cca6ddff01b6264dd0d12613bb9ed308e1367b8c/keras_wc_embd/word_char_embd.py#L201-L251
def get_dicts_generator(word_min_freq=4, char_min_freq=2, word_ignore_case=False, char_ignore_case=False): """Get word and character dictionaries from sentences. :param word_min_freq: The minimum frequency of a word. :param char_min_freq: The minimum frequency of a character. :param word_ignore_case: Word will be transformed to lower case before saving to dictionary. :param char_ignore_case: Character will be transformed to lower case before saving to dictionary. :return gen: A closure that accepts sentences and returns the dictionaries. """ word_count, char_count = {}, {} def get_dicts(sentence=None, return_dict=False): """Update and return dictionaries for each sentence. :param sentence: A list of strings representing the sentence. :param return_dict: Returns the dictionaries if it is True. :return word_dict, char_dict, max_word_len: """ if sentence is not None: for word in sentence: if not word: continue if word_ignore_case: word_key = word.lower() else: word_key = word word_count[word_key] = word_count.get(word_key, 0) + 1 for char in word: if char_ignore_case: char_key = char.lower() else: char_key = char char_count[char_key] = char_count.get(char_key, 0) + 1 if not return_dict: return None word_dict, char_dict = {'': 0, '<UNK>': 1}, {'': 0, '<UNK>': 1} max_word_len = 0 for word, count in word_count.items(): if count >= word_min_freq: word_dict[word] = len(word_dict) max_word_len = max(max_word_len, len(word)) for char, count in char_count.items(): if count >= char_min_freq: char_dict[char] = len(char_dict) return word_dict, char_dict, max_word_len return get_dicts
[ "def", "get_dicts_generator", "(", "word_min_freq", "=", "4", ",", "char_min_freq", "=", "2", ",", "word_ignore_case", "=", "False", ",", "char_ignore_case", "=", "False", ")", ":", "word_count", ",", "char_count", "=", "{", "}", ",", "{", "}", "def", "get...
Get word and character dictionaries from sentences. :param word_min_freq: The minimum frequency of a word. :param char_min_freq: The minimum frequency of a character. :param word_ignore_case: Word will be transformed to lower case before saving to dictionary. :param char_ignore_case: Character will be transformed to lower case before saving to dictionary. :return gen: A closure that accepts sentences and returns the dictionaries.
[ "Get", "word", "and", "character", "dictionaries", "from", "sentences", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/history.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/history.py#L257-L280
def search(self, pattern="*", raw=True, search_raw=True, output=False): """Search the database using unix glob-style matching (wildcards * and ?). Parameters ---------- pattern : str The wildcarded pattern to match when searching search_raw : bool If True, search the raw input, otherwise, the parsed input raw, output : bool See :meth:`get_range` Returns ------- Tuples as :meth:`get_range` """ tosearch = "source_raw" if search_raw else "source" if output: tosearch = "history." + tosearch self.writeout_cache() return self._run_sql("WHERE %s GLOB ?" % tosearch, (pattern,), raw=raw, output=output)
[ "def", "search", "(", "self", ",", "pattern", "=", "\"*\"", ",", "raw", "=", "True", ",", "search_raw", "=", "True", ",", "output", "=", "False", ")", ":", "tosearch", "=", "\"source_raw\"", "if", "search_raw", "else", "\"source\"", "if", "output", ":", ...
Search the database using unix glob-style matching (wildcards * and ?). Parameters ---------- pattern : str The wildcarded pattern to match when searching search_raw : bool If True, search the raw input, otherwise, the parsed input raw, output : bool See :meth:`get_range` Returns ------- Tuples as :meth:`get_range`
[ "Search", "the", "database", "using", "unix", "glob", "-", "style", "matching", "(", "wildcards", "*", "and", "?", ")", "." ]
python
test
CityOfZion/neo-python
neo/Network/NodeLeader.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Network/NodeLeader.py#L243-L293
def Start(self, seed_list: List[str] = None, skip_seeds: bool = False) -> None: """ Start connecting to the seed list. Args: seed_list: a list of host:port strings if not supplied use list from `protocol.xxx.json` skip_seeds: skip connecting to seed list """ if not seed_list: seed_list = settings.SEED_LIST logger.debug("Starting up nodeleader") if not skip_seeds: logger.debug("Attempting to connect to seed list...") for bootstrap in seed_list: if not is_ip_address(bootstrap): host, port = bootstrap.split(':') bootstrap = f"{hostname_to_ip(host)}:{port}" addr = Address(bootstrap) self.KNOWN_ADDRS.append(addr) self.SetupConnection(addr) logger.debug("Starting up nodeleader: starting peer, mempool, and blockheight check loops") # check in on peers every 10 seconds self.start_peer_check_loop() self.start_memcheck_loop() self.start_blockheight_loop() if settings.ACCEPT_INCOMING_PEERS and not self.incoming_server_running: class OneShotFactory(Factory): def __init__(self, leader): self.leader = leader def buildProtocol(self, addr): print(f"building new protocol for addr: {addr}") self.leader.AddKnownAddress(Address(f"{addr.host}:{addr.port}")) p = NeoNode(incoming_client=True) p.factory = self return p def listen_err(err): print(f"Failed start listening server for reason: {err.value}") def listen_ok(value): self.incoming_server_running = True logger.debug(f"Starting up nodeleader: setting up listen server on port: {settings.NODE_PORT}") server_endpoint = TCP4ServerEndpoint(self.reactor, settings.NODE_PORT) listenport_deferred = server_endpoint.listen(OneShotFactory(leader=self)) listenport_deferred.addCallback(listen_ok) listenport_deferred.addErrback(listen_err)
[ "def", "Start", "(", "self", ",", "seed_list", ":", "List", "[", "str", "]", "=", "None", ",", "skip_seeds", ":", "bool", "=", "False", ")", "->", "None", ":", "if", "not", "seed_list", ":", "seed_list", "=", "settings", ".", "SEED_LIST", "logger", "...
Start connecting to the seed list. Args: seed_list: a list of host:port strings if not supplied use list from `protocol.xxx.json` skip_seeds: skip connecting to seed list
[ "Start", "connecting", "to", "the", "seed", "list", "." ]
python
train
stevelittlefish/littlefish
littlefish/lfsmailer.py
https://github.com/stevelittlefish/littlefish/blob/6deee7f81fab30716c743efe2e94e786c6e17016/littlefish/lfsmailer.py#L69-L85
def parse_address(formatted_address): """ :param formatted_address: A string like "email@address.com" or "My Email <email@address.com>" :return: Tuple: (address, name) """ if email_regex.match(formatted_address): # Just a raw address return (formatted_address, None) match = formatted_address_regex.match(formatted_address) if match: (name, email) = match.group(1, 2) return email.strip(), name.strip() raise ValueError('"{}" is not a valid formatted address'.format(formatted_address))
[ "def", "parse_address", "(", "formatted_address", ")", ":", "if", "email_regex", ".", "match", "(", "formatted_address", ")", ":", "# Just a raw address", "return", "(", "formatted_address", ",", "None", ")", "match", "=", "formatted_address_regex", ".", "match", ...
:param formatted_address: A string like "email@address.com" or "My Email <email@address.com>" :return: Tuple: (address, name)
[ ":", "param", "formatted_address", ":", "A", "string", "like", "email" ]
python
test
benley/butcher
butcher/buildfile.py
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/buildfile.py#L116-L119
def crossref_paths(self): """Just like crossrefs, but all the targets are munged to :all.""" return set( [address.new(repo=x.repo, path=x.path) for x in self.crossrefs])
[ "def", "crossref_paths", "(", "self", ")", ":", "return", "set", "(", "[", "address", ".", "new", "(", "repo", "=", "x", ".", "repo", ",", "path", "=", "x", ".", "path", ")", "for", "x", "in", "self", ".", "crossrefs", "]", ")" ]
Just like crossrefs, but all the targets are munged to :all.
[ "Just", "like", "crossrefs", "but", "all", "the", "targets", "are", "munged", "to", ":", "all", "." ]
python
train
aaugustin/websockets
src/websockets/server.py
https://github.com/aaugustin/websockets/blob/17b3f47549b6f752a1be07fa1ba3037cb59c7d56/src/websockets/server.py#L436-L469
def select_subprotocol( self, client_subprotocols: Sequence[Subprotocol], server_subprotocols: Sequence[Subprotocol], ) -> Optional[Subprotocol]: """ Pick a subprotocol among those offered by the client. If several subprotocols are supported by the client and the server, the default implementation selects the preferred subprotocols by giving equal value to the priorities of the client and the server. If no subprotocols are supported by the client and the server, it proceeds without a subprotocol. This is unlikely to be the most useful implementation in practice, as many servers providing a subprotocol will require that the client uses that subprotocol. Such rules can be implemented in a subclass. This method may be overridden by passing a ``select_subprotocol`` argument to the :class:`WebSocketServerProtocol` constructor or the :func:`serve` function. """ if self._select_subprotocol is not None: return self._select_subprotocol(client_subprotocols, server_subprotocols) subprotocols = set(client_subprotocols) & set(server_subprotocols) if not subprotocols: return None priority = lambda p: ( client_subprotocols.index(p) + server_subprotocols.index(p) ) return sorted(subprotocols, key=priority)[0]
[ "def", "select_subprotocol", "(", "self", ",", "client_subprotocols", ":", "Sequence", "[", "Subprotocol", "]", ",", "server_subprotocols", ":", "Sequence", "[", "Subprotocol", "]", ",", ")", "->", "Optional", "[", "Subprotocol", "]", ":", "if", "self", ".", ...
Pick a subprotocol among those offered by the client. If several subprotocols are supported by the client and the server, the default implementation selects the preferred subprotocols by giving equal value to the priorities of the client and the server. If no subprotocols are supported by the client and the server, it proceeds without a subprotocol. This is unlikely to be the most useful implementation in practice, as many servers providing a subprotocol will require that the client uses that subprotocol. Such rules can be implemented in a subclass. This method may be overridden by passing a ``select_subprotocol`` argument to the :class:`WebSocketServerProtocol` constructor or the :func:`serve` function.
[ "Pick", "a", "subprotocol", "among", "those", "offered", "by", "the", "client", "." ]
python
train
pauleveritt/kaybee
kaybee/plugins/articles/handlers.py
https://github.com/pauleveritt/kaybee/blob/a00a718aaaa23b2d12db30dfacb6b2b6ec84459c/kaybee/plugins/articles/handlers.py#L114-L140
def stamp_excerpt(kb_app: kb, sphinx_app: Sphinx, doctree: doctree): """ Walk the tree and extract excert into resource.excerpt """ # First, find out which resource this is. Won't be easy. resources = sphinx_app.env.resources confdir = sphinx_app.confdir source = PurePath(doctree.attributes['source']) # Get the relative path inside the docs dir, without .rst, then # get the resource docname = str(source.relative_to(confdir)).split('.rst')[0] resource = resources.get(docname) if resource: # Stamp the excerpt on the resource excerpt = getattr(resource.props, 'excerpt', False) auto_excerpt = getattr(resource.props, 'auto_excerpt', False) if excerpt: resource.excerpt = excerpt elif not auto_excerpt: resource.excerpt = None else: # Extract the excerpt based on the number of paragraphs # in auto_excerpt resource.excerpt = get_rst_excerpt(doctree, auto_excerpt)
[ "def", "stamp_excerpt", "(", "kb_app", ":", "kb", ",", "sphinx_app", ":", "Sphinx", ",", "doctree", ":", "doctree", ")", ":", "# First, find out which resource this is. Won't be easy.", "resources", "=", "sphinx_app", ".", "env", ".", "resources", "confdir", "=", ...
Walk the tree and extract excert into resource.excerpt
[ "Walk", "the", "tree", "and", "extract", "excert", "into", "resource", ".", "excerpt" ]
python
train
buildbot/buildbot
master/buildbot/steps/source/git.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/steps/source/git.py#L437-L447
def _fullCloneOrFallback(self): """Wrapper for _fullClone(). In the case of failure, if clobberOnFailure is set to True remove the build directory and try a full clone again. """ res = yield self._fullClone() if res != RC_SUCCESS: if not self.clobberOnFailure: raise buildstep.BuildStepFailed() res = yield self.clobber() return res
[ "def", "_fullCloneOrFallback", "(", "self", ")", ":", "res", "=", "yield", "self", ".", "_fullClone", "(", ")", "if", "res", "!=", "RC_SUCCESS", ":", "if", "not", "self", ".", "clobberOnFailure", ":", "raise", "buildstep", ".", "BuildStepFailed", "(", ")",...
Wrapper for _fullClone(). In the case of failure, if clobberOnFailure is set to True remove the build directory and try a full clone again.
[ "Wrapper", "for", "_fullClone", "()", ".", "In", "the", "case", "of", "failure", "if", "clobberOnFailure", "is", "set", "to", "True", "remove", "the", "build", "directory", "and", "try", "a", "full", "clone", "again", "." ]
python
train
projectshift/shift-schema
shiftschema/schema.py
https://github.com/projectshift/shift-schema/blob/07787b540d3369bb37217ffbfbe629118edaf0eb/shiftschema/schema.py#L278-L306
def validate(self, model=None, context=None): """ Validate model and return validation result object :param model: object or dict :param context: object, dict or None :return: shiftschema.result.Result """ # inject with settings result = Result(translator=self.translator, locale=self.locale) # validate state state_result = self.validate_state(model, context=context) result.merge(state_result) # validate simple properties props_result = self.validate_properties(model, context=context) result.merge(props_result) # validate nested entity properties entities_result = self.validate_entities(model, context=context) result.merge(entities_result) # validate collection properties collections_result = self.validate_collections(model, context=context) result.merge(collections_result) # and return return result
[ "def", "validate", "(", "self", ",", "model", "=", "None", ",", "context", "=", "None", ")", ":", "# inject with settings", "result", "=", "Result", "(", "translator", "=", "self", ".", "translator", ",", "locale", "=", "self", ".", "locale", ")", "# val...
Validate model and return validation result object :param model: object or dict :param context: object, dict or None :return: shiftschema.result.Result
[ "Validate", "model", "and", "return", "validation", "result", "object", ":", "param", "model", ":", "object", "or", "dict", ":", "param", "context", ":", "object", "dict", "or", "None", ":", "return", ":", "shiftschema", ".", "result", ".", "Result" ]
python
train
ejeschke/ginga
ginga/rv/plugins/ChangeHistory.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/ChangeHistory.py#L238-L257
def remove_image_info_cb(self, gshell, channel, iminfo): """Delete entries related to deleted image.""" chname = channel.name if chname not in self.name_dict: return fileDict = self.name_dict[chname] name = iminfo.name if name not in fileDict: return del fileDict[name] self.logger.debug('{0} removed from ChangeHistory'.format(name)) if not self.gui_up: return False self.clear_selected_history() self.recreate_toc()
[ "def", "remove_image_info_cb", "(", "self", ",", "gshell", ",", "channel", ",", "iminfo", ")", ":", "chname", "=", "channel", ".", "name", "if", "chname", "not", "in", "self", ".", "name_dict", ":", "return", "fileDict", "=", "self", ".", "name_dict", "[...
Delete entries related to deleted image.
[ "Delete", "entries", "related", "to", "deleted", "image", "." ]
python
train
ktbyers/netmiko
netmiko/_textfsm/_clitable.py
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/_textfsm/_clitable.py#L344-L360
def AddKeys(self, key_list): """Mark additional columns as being part of the superkey. Supplements the Keys already extracted from the FSM template. Useful when adding new columns to existing tables. Note: This will impact attempts to further 'extend' the table as the superkey must be common between tables for successful extension. Args: key_list: list of header entries to be included in the superkey. Raises: KeyError: If any entry in list is not a valid header entry. """ for keyname in key_list: if keyname not in self.header: raise KeyError("'%s'" % keyname) self._keys = self._keys.union(set(key_list))
[ "def", "AddKeys", "(", "self", ",", "key_list", ")", ":", "for", "keyname", "in", "key_list", ":", "if", "keyname", "not", "in", "self", ".", "header", ":", "raise", "KeyError", "(", "\"'%s'\"", "%", "keyname", ")", "self", ".", "_keys", "=", "self", ...
Mark additional columns as being part of the superkey. Supplements the Keys already extracted from the FSM template. Useful when adding new columns to existing tables. Note: This will impact attempts to further 'extend' the table as the superkey must be common between tables for successful extension. Args: key_list: list of header entries to be included in the superkey. Raises: KeyError: If any entry in list is not a valid header entry.
[ "Mark", "additional", "columns", "as", "being", "part", "of", "the", "superkey", ".", "Supplements", "the", "Keys", "already", "extracted", "from", "the", "FSM", "template", ".", "Useful", "when", "adding", "new", "columns", "to", "existing", "tables", ".", ...
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/security/security_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/security/security_client.py#L205-L223
def query_security_namespaces(self, security_namespace_id=None, local_only=None): """QuerySecurityNamespaces. List all security namespaces or just the specified namespace. :param str security_namespace_id: Security namespace identifier. :param bool local_only: If true, retrieve only local security namespaces. :rtype: [SecurityNamespaceDescription] """ route_values = {} if security_namespace_id is not None: route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str') query_parameters = {} if local_only is not None: query_parameters['localOnly'] = self._serialize.query('local_only', local_only, 'bool') response = self._send(http_method='GET', location_id='ce7b9f95-fde9-4be8-a86d-83b366f0b87a', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[SecurityNamespaceDescription]', self._unwrap_collection(response))
[ "def", "query_security_namespaces", "(", "self", ",", "security_namespace_id", "=", "None", ",", "local_only", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "security_namespace_id", "is", "not", "None", ":", "route_values", "[", "'securityNamespaceId...
QuerySecurityNamespaces. List all security namespaces or just the specified namespace. :param str security_namespace_id: Security namespace identifier. :param bool local_only: If true, retrieve only local security namespaces. :rtype: [SecurityNamespaceDescription]
[ "QuerySecurityNamespaces", ".", "List", "all", "security", "namespaces", "or", "just", "the", "specified", "namespace", ".", ":", "param", "str", "security_namespace_id", ":", "Security", "namespace", "identifier", ".", ":", "param", "bool", "local_only", ":", "If...
python
train
theislab/scanpy
scanpy/plotting/_anndata.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/plotting/_anndata.py#L1624-L1822
def matrixplot(adata, var_names, groupby=None, use_raw=None, log=False, num_categories=7, figsize=None, dendrogram=False, gene_symbols=None, var_group_positions=None, var_group_labels=None, var_group_rotation=None, layer=None, standard_scale=None, swap_axes=False, show=None, save=None, **kwds): """\ Creates a heatmap of the mean expression values per cluster of each var_names If groupby is not given, the matrixplot assumes that all data belongs to a single category. Parameters ---------- {common_plot_args} standard_scale : {{'var', 'group'}}, optional (default: None) Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group, subtract the minimum and divide each by its maximum. {show_save_ax} **kwds : keyword arguments Are passed to `matplotlib.pyplot.pcolor`. Returns ------- List of :class:`~matplotlib.axes.Axes` Examples -------- >>> adata = sc.datasets.pbmc68k_reduced() >>> sc.pl.matrixplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'], ... groupby='bulk_labels', dendrogram=True) """ if use_raw is None and adata.raw is not None: use_raw = True if isinstance(var_names, str): var_names = [var_names] categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby, use_raw, log, num_categories, gene_symbols=gene_symbols, layer=layer) if groupby is None or len(categories) <= 1: # dendrogram can only be computed between groupby categories dendrogram = False mean_obs = obs_tidy.groupby(level=0).mean() if standard_scale == 'group': mean_obs = mean_obs.sub(mean_obs.min(1), axis=0) mean_obs = mean_obs.div(mean_obs.max(1), axis=0).fillna(0) elif standard_scale == 'var': mean_obs -= mean_obs.min(0) mean_obs = (mean_obs / mean_obs.max(0)).fillna(0) elif standard_scale is None: pass else: logg.warn('Unknown type for standard_scale, ignored') if dendrogram: dendro_data = _reorder_categories_after_dendrogram(adata, groupby, dendrogram, var_names=var_names, var_group_labels=var_group_labels, var_group_positions=var_group_positions) var_group_labels = dendro_data['var_group_labels'] var_group_positions = dendro_data['var_group_positions'] # reorder matrix if dendro_data['var_names_idx_ordered'] is not None: # reorder columns (usually genes) if needed. This only happens when # var_group_positions and var_group_labels is set mean_obs = mean_obs.iloc[:,dendro_data['var_names_idx_ordered']] # reorder rows (categories) to match the dendrogram order mean_obs = mean_obs.iloc[dendro_data['categories_idx_ordered'], :] colorbar_width = 0.2 if not swap_axes: dendro_width = 0.8 if dendrogram else 0 if figsize is None: height = len(categories) * 0.2 + 1 # +1 for labels heatmap_width = len(var_names) * 0.32 width = heatmap_width + dendro_width + colorbar_width # +1.6 to account for the colorbar and + 1 to account for labels else: width, height = figsize heatmap_width = width - (dendro_width + colorbar_width) if var_group_positions is not None and len(var_group_positions) > 0: # add some space in case 'brackets' want to be plotted on top of the image height_ratios = [0.5, 10] height += 0.5 else: height_ratios = [0, 10.5] # define a layout of 2 rows x 3 columns # first row is for 'brackets' (if no brackets needed, the height of this row is zero) # second row is for main content. This second row # is divided into three axes: # first ax is for the main matrix figure # second ax is for the dendrogram # third ax is for the color bar legend fig = pl.figure(figsize=(width, height)) axs = gridspec.GridSpec(nrows=2, ncols=3, wspace=0.02, hspace=0.04, width_ratios=[heatmap_width, dendro_width, colorbar_width], height_ratios=height_ratios) matrix_ax = fig.add_subplot(axs[1, 0]) y_ticks = np.arange(mean_obs.shape[0]) + 0.5 matrix_ax.set_yticks(y_ticks) matrix_ax.set_yticklabels([mean_obs.index[idx] for idx in range(mean_obs.shape[0])]) if dendrogram: dendro_ax = fig.add_subplot(axs[1, 1], sharey=matrix_ax) _plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, ticks=y_ticks) pc = matrix_ax.pcolor(mean_obs, edgecolor='gray', **kwds) # invert y axis to show categories ordered from top to bottom matrix_ax.set_ylim(mean_obs.shape[0], 0) x_ticks = np.arange(mean_obs.shape[1]) + 0.5 matrix_ax.set_xticks(x_ticks) matrix_ax.set_xticklabels([mean_obs.columns[idx] for idx in range(mean_obs.shape[1])], rotation=90) matrix_ax.tick_params(axis='both', labelsize='small') matrix_ax.grid(False) matrix_ax.set_xlim(-0.5, len(var_names) + 0.5) matrix_ax.set_ylabel(groupby) matrix_ax.set_xlim(0, mean_obs.shape[1]) # plot group legends on top of matrix_ax (if given) if var_group_positions is not None and len(var_group_positions) > 0: gene_groups_ax = fig.add_subplot(axs[0, 0], sharex=matrix_ax) _plot_gene_groups_brackets(gene_groups_ax, group_positions=var_group_positions, group_labels=var_group_labels, rotation=var_group_rotation, left_adjustment=0.2, right_adjustment=0.8) # plot colorbar _plot_colorbar(pc, fig, axs[1, 2]) else: dendro_height = 0.5 if dendrogram else 0 if var_group_positions is not None and len(var_group_positions) > 0: # add some space in case 'color blocks' want to be plotted on the right of the image vargroups_width = 0.4 else: vargroups_width = 0 if figsize is None: heatmap_height = len(var_names) * 0.2 height = dendro_height + heatmap_height + 1 # +1 for labels heatmap_width = len(categories) * 0.3 width = heatmap_width + vargroups_width + colorbar_width else: width, height = figsize heatmap_width = width - (vargroups_width + colorbar_width) heatmap_height = height - dendro_height # define a layout of 2 rows x 3 columns # first row is for 'dendrogram' (if no dendrogram is plotted, the height of this row is zero) # second row is for main content. This row # is divided into three axes: # first ax is for the main matrix figure # second ax is for the groupby categories (eg. brackets) # third ax is for the color bar legend fig = pl.figure(figsize=(width, height)) axs = gridspec.GridSpec(nrows=2, ncols=3, wspace=0.05, hspace=0.005, width_ratios=[heatmap_width, vargroups_width, colorbar_width], height_ratios=[dendro_height, heatmap_height]) mean_obs = mean_obs.T matrix_ax = fig.add_subplot(axs[1, 0]) pc = matrix_ax.pcolor(mean_obs, edgecolor='gray', **kwds) y_ticks = np.arange(mean_obs.shape[0]) + 0.5 matrix_ax.set_yticks(y_ticks) matrix_ax.set_yticklabels([mean_obs.index[idx] for idx in range(mean_obs.shape[0])]) x_ticks = np.arange(mean_obs.shape[1]) + 0.5 matrix_ax.set_xticks(x_ticks) matrix_ax.set_xticklabels([mean_obs.columns[idx] for idx in range(mean_obs.shape[1])], rotation=90) matrix_ax.tick_params(axis='both', labelsize='small') matrix_ax.grid(False) matrix_ax.set_xlim(0, len(categories)) matrix_ax.set_xlabel(groupby) # invert y axis to show var_names ordered from top to bottom matrix_ax.set_ylim(mean_obs.shape[0], 0) if dendrogram: dendro_ax = fig.add_subplot(axs[0, 0], sharex=matrix_ax) _plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, ticks=x_ticks, orientation='top') # plot group legends on top of matrix_ax (if given) if var_group_positions is not None and len(var_group_positions) > 0: gene_groups_ax = fig.add_subplot(axs[1, 1], sharey=matrix_ax) _plot_gene_groups_brackets(gene_groups_ax, group_positions=var_group_positions, group_labels=var_group_labels, rotation=var_group_rotation, left_adjustment=0.2, right_adjustment=0.8, orientation='right') # plot colorbar _plot_colorbar(pc, fig, axs[1, 2]) utils.savefig_or_show('matrixplot', show=show, save=save) return axs
[ "def", "matrixplot", "(", "adata", ",", "var_names", ",", "groupby", "=", "None", ",", "use_raw", "=", "None", ",", "log", "=", "False", ",", "num_categories", "=", "7", ",", "figsize", "=", "None", ",", "dendrogram", "=", "False", ",", "gene_symbols", ...
\ Creates a heatmap of the mean expression values per cluster of each var_names If groupby is not given, the matrixplot assumes that all data belongs to a single category. Parameters ---------- {common_plot_args} standard_scale : {{'var', 'group'}}, optional (default: None) Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group, subtract the minimum and divide each by its maximum. {show_save_ax} **kwds : keyword arguments Are passed to `matplotlib.pyplot.pcolor`. Returns ------- List of :class:`~matplotlib.axes.Axes` Examples -------- >>> adata = sc.datasets.pbmc68k_reduced() >>> sc.pl.matrixplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'], ... groupby='bulk_labels', dendrogram=True)
[ "\\", "Creates", "a", "heatmap", "of", "the", "mean", "expression", "values", "per", "cluster", "of", "each", "var_names", "If", "groupby", "is", "not", "given", "the", "matrixplot", "assumes", "that", "all", "data", "belongs", "to", "a", "single", "category...
python
train
novopl/peltak
src/peltak/extra/changelog/commands.py
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/extra/changelog/commands.py#L24-L32
def changelog_cli(ctx): # type: () -> None """ Generate changelog from commit messages. """ if ctx.invoked_subcommand: return from peltak.core import shell from . import logic shell.cprint(logic.changelog())
[ "def", "changelog_cli", "(", "ctx", ")", ":", "# type: () -> None", "if", "ctx", ".", "invoked_subcommand", ":", "return", "from", "peltak", ".", "core", "import", "shell", "from", ".", "import", "logic", "shell", ".", "cprint", "(", "logic", ".", "changelog...
Generate changelog from commit messages.
[ "Generate", "changelog", "from", "commit", "messages", "." ]
python
train
chaosmail/python-fs
fs/fs.py
https://github.com/chaosmail/python-fs/blob/2567922ced9387e327e65f3244caff3b7af35684/fs/fs.py#L271-L276
def join(*args, **kwargs): """Join parts of a path together""" import os.path if _is_list(args[0]): return os.path.join(*args[0]) return os.path.join(*args, **kwargs)
[ "def", "join", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "import", "os", ".", "path", "if", "_is_list", "(", "args", "[", "0", "]", ")", ":", "return", "os", ".", "path", ".", "join", "(", "*", "args", "[", "0", "]", ")", "return",...
Join parts of a path together
[ "Join", "parts", "of", "a", "path", "together" ]
python
train
basho/riak-python-client
riak/node.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/node.py#L46-L54
def incr(self, d): """ Increases the value by the argument. :param d: the value to increase by :type d: float """ with self.lock: self.p = self.value() + d
[ "def", "incr", "(", "self", ",", "d", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "p", "=", "self", ".", "value", "(", ")", "+", "d" ]
Increases the value by the argument. :param d: the value to increase by :type d: float
[ "Increases", "the", "value", "by", "the", "argument", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L17021-L17040
def find_host_network_interface_by_name(self, name): """Searches through all host network interfaces for an interface with the given @c name. The method returns an error if the given @c name does not correspond to any host network interface. in name of type str Name of the host network interface to search for. return network_interface of type :class:`IHostNetworkInterface` Found host network interface object. """ if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") network_interface = self._call("findHostNetworkInterfaceByName", in_p=[name]) network_interface = IHostNetworkInterface(network_interface) return network_interface
[ "def", "find_host_network_interface_by_name", "(", "self", ",", "name", ")", ":", "if", "not", "isinstance", "(", "name", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"name can only be an instance of type basestring\"", ")", "network_interface", "=", "sel...
Searches through all host network interfaces for an interface with the given @c name. The method returns an error if the given @c name does not correspond to any host network interface. in name of type str Name of the host network interface to search for. return network_interface of type :class:`IHostNetworkInterface` Found host network interface object.
[ "Searches", "through", "all", "host", "network", "interfaces", "for", "an", "interface", "with", "the", "given", "@c", "name", ".", "The", "method", "returns", "an", "error", "if", "the", "given", "@c", "name", "does", "not", "correspond", "to", "any", "ho...
python
train
pywbem/pywbem
pywbem/cim_obj.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_obj.py#L2955-L2985
def tocimxmlstr(self, indent=None, ignore_path=False): """ Return the CIM-XML representation of this CIM instance, as a :term:`unicode string`. *New in pywbem 0.9.* For the returned CIM-XML representation, see :meth:`~pywbem.CIMInstance.tocimxml`. Parameters: indent (:term:`string` or :term:`integer`): `None` indicates that a single-line version of the XML should be returned, without any whitespace between the XML elements. Other values indicate that a prettified, multi-line version of the XML should be returned. A string value specifies the indentation string to be used for each level of nested XML elements. An integer value specifies an indentation string of so many blanks. ignore_path (:class:`py:bool`): Ignore the path of the instance, even if a path is specified. Returns: The CIM-XML representation of the object, as a :term:`unicode string`. """ xml_elem = self.tocimxml(ignore_path) return tocimxmlstr(xml_elem, indent)
[ "def", "tocimxmlstr", "(", "self", ",", "indent", "=", "None", ",", "ignore_path", "=", "False", ")", ":", "xml_elem", "=", "self", ".", "tocimxml", "(", "ignore_path", ")", "return", "tocimxmlstr", "(", "xml_elem", ",", "indent", ")" ]
Return the CIM-XML representation of this CIM instance, as a :term:`unicode string`. *New in pywbem 0.9.* For the returned CIM-XML representation, see :meth:`~pywbem.CIMInstance.tocimxml`. Parameters: indent (:term:`string` or :term:`integer`): `None` indicates that a single-line version of the XML should be returned, without any whitespace between the XML elements. Other values indicate that a prettified, multi-line version of the XML should be returned. A string value specifies the indentation string to be used for each level of nested XML elements. An integer value specifies an indentation string of so many blanks. ignore_path (:class:`py:bool`): Ignore the path of the instance, even if a path is specified. Returns: The CIM-XML representation of the object, as a :term:`unicode string`.
[ "Return", "the", "CIM", "-", "XML", "representation", "of", "this", "CIM", "instance", "as", "a", ":", "term", ":", "unicode", "string", "." ]
python
train
ssato/python-anytemplate
anytemplate/utils.py
https://github.com/ssato/python-anytemplate/blob/3e56baa914bd47f044083b20e33100f836443596/anytemplate/utils.py#L159-L169
def load_context(ctx_path, ctx_type, scm=None): """ :param ctx_path: context file path or '-' (read from stdin) :param ctx_type: context file type :param scm: JSON schema file in any formats anyconfig supports, to validate given context files """ if ctx_path == '-': return loads(sys.stdin.read(), ac_parser=ctx_type, ac_schema=scm) return load(ctx_path, ac_parser=ctx_type, ac_schema=scm)
[ "def", "load_context", "(", "ctx_path", ",", "ctx_type", ",", "scm", "=", "None", ")", ":", "if", "ctx_path", "==", "'-'", ":", "return", "loads", "(", "sys", ".", "stdin", ".", "read", "(", ")", ",", "ac_parser", "=", "ctx_type", ",", "ac_schema", "...
:param ctx_path: context file path or '-' (read from stdin) :param ctx_type: context file type :param scm: JSON schema file in any formats anyconfig supports, to validate given context files
[ ":", "param", "ctx_path", ":", "context", "file", "path", "or", "-", "(", "read", "from", "stdin", ")", ":", "param", "ctx_type", ":", "context", "file", "type", ":", "param", "scm", ":", "JSON", "schema", "file", "in", "any", "formats", "anyconfig", "...
python
train
benvanwerkhoven/kernel_tuner
kernel_tuner/strategies/firefly_algorithm.py
https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/firefly_algorithm.py#L8-L90
def tune(runner, kernel_options, device_options, tuning_options): """ Find the best performing kernel configuration in the parameter space :params runner: A runner from kernel_tuner.runners :type runner: kernel_tuner.runner :param kernel_options: A dictionary with all options for the kernel. :type kernel_options: dict :param device_options: A dictionary with all options for the device on which the kernel should be tuned. :type device_options: dict :param tuning_options: A dictionary with all options regarding the tuning process. :type tuning_options: dict :returns: A list of dictionaries for executed kernel configurations and their execution times. And a dictionary that contains a information about the hardware/software environment on which the tuning took place. :rtype: list(dict()), dict() """ results = [] cache = {} #scale variables in x because PSO works with velocities to visit different configurations tuning_options["scaling"] = True #using this instead of get_bounds because scaling is used bounds, _, _ = get_bounds_x0_eps(tuning_options) args = (kernel_options, tuning_options, runner, results, cache) num_particles = 20 maxiter = 100 #parameters needed by the Firefly Algorithm B0 = 1.0 gamma = 1.0 alpha = 0.20 best_time_global = 1e20 best_position_global = [] # init particle swarm swarm = [] for i in range(0, num_particles): swarm.append(Firefly(bounds, args)) # compute initial intensities for j in range(num_particles): swarm[j].compute_intensity(_cost_func) for c in range(maxiter): if tuning_options.verbose: print("start iteration ", c, "best time global", best_time_global) # compare all to all and compute attractiveness for i in range(num_particles): for j in range(num_particles): if swarm[i].intensity < swarm[j].intensity: dist = swarm[i].distance_to(swarm[j]) beta = B0 * np.exp(-gamma * dist * dist) swarm[i].move_towards(swarm[j], beta, alpha) swarm[i].compute_intensity(_cost_func) # update global best if needed, actually only used for printing if swarm[i].time <= best_time_global: best_position_global = swarm[i].position best_time_global = swarm[i].time swarm.sort(key=lambda x: x.time) if tuning_options.verbose: print('Final result:') print(best_position_global) print(best_time_global) return results, runner.dev.get_environment()
[ "def", "tune", "(", "runner", ",", "kernel_options", ",", "device_options", ",", "tuning_options", ")", ":", "results", "=", "[", "]", "cache", "=", "{", "}", "#scale variables in x because PSO works with velocities to visit different configurations", "tuning_options", "[...
Find the best performing kernel configuration in the parameter space :params runner: A runner from kernel_tuner.runners :type runner: kernel_tuner.runner :param kernel_options: A dictionary with all options for the kernel. :type kernel_options: dict :param device_options: A dictionary with all options for the device on which the kernel should be tuned. :type device_options: dict :param tuning_options: A dictionary with all options regarding the tuning process. :type tuning_options: dict :returns: A list of dictionaries for executed kernel configurations and their execution times. And a dictionary that contains a information about the hardware/software environment on which the tuning took place. :rtype: list(dict()), dict()
[ "Find", "the", "best", "performing", "kernel", "configuration", "in", "the", "parameter", "space" ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/campbell_bozorgnia_2008.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/campbell_bozorgnia_2008.py#L323-L338
def _compute_intra_event_std(self, C, vs30, pga1100, sigma_pga): """ Returns the intra-event standard deviation at the site, as defined in equation 15, page 147 """ # Get intra-event standard deviation at the base of the site profile sig_lnyb = np.sqrt(C['s_lny'] ** 2. - C['s_lnAF'] ** 2.) sig_lnab = np.sqrt(sigma_pga ** 2. - C['s_lnAF'] ** 2.) # Get linearised relationship between f_site and ln PGA alpha = self._compute_intra_event_alpha(C, vs30, pga1100) return np.sqrt( (sig_lnyb ** 2.) + (C['s_lnAF'] ** 2.) + ((alpha ** 2.) * (sig_lnab ** 2.)) + (2.0 * alpha * C['rho'] * sig_lnyb * sig_lnab))
[ "def", "_compute_intra_event_std", "(", "self", ",", "C", ",", "vs30", ",", "pga1100", ",", "sigma_pga", ")", ":", "# Get intra-event standard deviation at the base of the site profile", "sig_lnyb", "=", "np", ".", "sqrt", "(", "C", "[", "'s_lny'", "]", "**", "2."...
Returns the intra-event standard deviation at the site, as defined in equation 15, page 147
[ "Returns", "the", "intra", "-", "event", "standard", "deviation", "at", "the", "site", "as", "defined", "in", "equation", "15", "page", "147" ]
python
train
openid/python-openid
openid/server/server.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/server/server.py#L1234-L1259
def createAssociation(self, dumb=True, assoc_type='HMAC-SHA1'): """Make a new association. @param dumb: Is this association for a dumb-mode transaction? @type dumb: bool @param assoc_type: The type of association to create. Currently there is only one type defined, C{HMAC-SHA1}. @type assoc_type: str @returns: the new association. @returntype: L{openid.association.Association} """ secret = cryptutil.getBytes(getSecretSize(assoc_type)) uniq = oidutil.toBase64(cryptutil.getBytes(4)) handle = '{%s}{%x}{%s}' % (assoc_type, int(time.time()), uniq) assoc = Association.fromExpiresIn( self.SECRET_LIFETIME, handle, secret, assoc_type) if dumb: key = self._dumb_key else: key = self._normal_key self.store.storeAssociation(key, assoc) return assoc
[ "def", "createAssociation", "(", "self", ",", "dumb", "=", "True", ",", "assoc_type", "=", "'HMAC-SHA1'", ")", ":", "secret", "=", "cryptutil", ".", "getBytes", "(", "getSecretSize", "(", "assoc_type", ")", ")", "uniq", "=", "oidutil", ".", "toBase64", "("...
Make a new association. @param dumb: Is this association for a dumb-mode transaction? @type dumb: bool @param assoc_type: The type of association to create. Currently there is only one type defined, C{HMAC-SHA1}. @type assoc_type: str @returns: the new association. @returntype: L{openid.association.Association}
[ "Make", "a", "new", "association", "." ]
python
train
nschloe/accupy
accupy/sums.py
https://github.com/nschloe/accupy/blob/63a031cab7f4d3b9ba1073f9328c10c1862d1c4d/accupy/sums.py#L10-L30
def knuth_sum(a, b): """Error-free transformation of the sum of two floating point numbers according to D.E. Knuth. The Art of Computer Programming: Seminumerical Algorithms, volume 2. Addison Wesley, Reading, Massachusetts, second edition, 1981. The underlying problem is that the exact sum a+b of two floating point number a and b is not necessarily a floating point number; for example if you add a very large and a very small number. It is however known that the difference between the best floating point approximation of a+b and the exact a+b is again a floating point number. This routine returns the sum and the error. Algorithm 3.1 in <https://doi.org/10.1137/030601818>. """ x = a + b z = x - a y = (a - (x - z)) + (b - z) return x, y
[ "def", "knuth_sum", "(", "a", ",", "b", ")", ":", "x", "=", "a", "+", "b", "z", "=", "x", "-", "a", "y", "=", "(", "a", "-", "(", "x", "-", "z", ")", ")", "+", "(", "b", "-", "z", ")", "return", "x", ",", "y" ]
Error-free transformation of the sum of two floating point numbers according to D.E. Knuth. The Art of Computer Programming: Seminumerical Algorithms, volume 2. Addison Wesley, Reading, Massachusetts, second edition, 1981. The underlying problem is that the exact sum a+b of two floating point number a and b is not necessarily a floating point number; for example if you add a very large and a very small number. It is however known that the difference between the best floating point approximation of a+b and the exact a+b is again a floating point number. This routine returns the sum and the error. Algorithm 3.1 in <https://doi.org/10.1137/030601818>.
[ "Error", "-", "free", "transformation", "of", "the", "sum", "of", "two", "floating", "point", "numbers", "according", "to" ]
python
train
artisanofcode/python-broadway
broadway/app.py
https://github.com/artisanofcode/python-broadway/blob/a051ca5a922ecb38a541df59e8740e2a047d9a4a/broadway/app.py#L128-L146
def add_extension(self, extension): """ Specify a broadway extension to initialise .. code-block:: python factory = Factory() factory.add_extension('broadway_sqlalchemy') :param extension: import path to extension :type extension: str """ instance = werkzeug.utils.import_string(extension) if hasattr(instance, 'register'): instance.register(self) self._extensions.append(instance)
[ "def", "add_extension", "(", "self", ",", "extension", ")", ":", "instance", "=", "werkzeug", ".", "utils", ".", "import_string", "(", "extension", ")", "if", "hasattr", "(", "instance", ",", "'register'", ")", ":", "instance", ".", "register", "(", "self"...
Specify a broadway extension to initialise .. code-block:: python factory = Factory() factory.add_extension('broadway_sqlalchemy') :param extension: import path to extension :type extension: str
[ "Specify", "a", "broadway", "extension", "to", "initialise" ]
python
train
ryukinix/decorating
decorating/stream.py
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/stream.py#L148-L155
def write(self, message, flush=True): if isinstance(message, bytes): # pragma: no cover message = message.decode('utf-8') """A Writting like write method, delayed at each char""" for char in message: time.sleep(self.delay * (4 if char == '\n' else 1)) super(Writting, self).write(char, flush)
[ "def", "write", "(", "self", ",", "message", ",", "flush", "=", "True", ")", ":", "if", "isinstance", "(", "message", ",", "bytes", ")", ":", "# pragma: no cover", "message", "=", "message", ".", "decode", "(", "'utf-8'", ")", "for", "char", "in", "mes...
A Writting like write method, delayed at each char
[ "A", "Writting", "like", "write", "method", "delayed", "at", "each", "char" ]
python
train
mikedh/trimesh
trimesh/scene/cameras.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/scene/cameras.py#L256-L270
def fov(self): """ Get the field of view in degrees. Returns ------------- fov : (2,) float XY field of view in degrees """ if self._fov is None: fov = [2.0 * np.degrees(np.arctan((px / 2.0) / f)) for px, f in zip(self._resolution, self._focal)] fov = np.asanyarray(fov, dtype=np.float64) self._fov = fov return self._fov
[ "def", "fov", "(", "self", ")", ":", "if", "self", ".", "_fov", "is", "None", ":", "fov", "=", "[", "2.0", "*", "np", ".", "degrees", "(", "np", ".", "arctan", "(", "(", "px", "/", "2.0", ")", "/", "f", ")", ")", "for", "px", ",", "f", "i...
Get the field of view in degrees. Returns ------------- fov : (2,) float XY field of view in degrees
[ "Get", "the", "field", "of", "view", "in", "degrees", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/execution/execution_engine.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/execution/execution_engine.py#L164-L183
def join(self, timeout=None): """Blocking wait for the execution to finish :param float timeout: Maximum time to wait or None for infinitely :return: True if the execution finished, False if no state machine was started or a timeout occurred :rtype: bool """ if self.__wait_for_finishing_thread: if not timeout: # signal handlers won't work if timeout is None and the thread is joined while True: self.__wait_for_finishing_thread.join(0.5) if not self.__wait_for_finishing_thread.isAlive(): break else: self.__wait_for_finishing_thread.join(timeout) return not self.__wait_for_finishing_thread.is_alive() else: logger.warning("Cannot join as state machine was not started yet.") return False
[ "def", "join", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "self", ".", "__wait_for_finishing_thread", ":", "if", "not", "timeout", ":", "# signal handlers won't work if timeout is None and the thread is joined", "while", "True", ":", "self", ".", "__w...
Blocking wait for the execution to finish :param float timeout: Maximum time to wait or None for infinitely :return: True if the execution finished, False if no state machine was started or a timeout occurred :rtype: bool
[ "Blocking", "wait", "for", "the", "execution", "to", "finish" ]
python
train
Legobot/Legobot
Legobot/Connectors/Slack.py
https://github.com/Legobot/Legobot/blob/d13da172960a149681cb5151ce34b2f3a58ad32b/Legobot/Connectors/Slack.py#L75-L93
def on_message(self, event): '''Runs when a message event is received Args: event: RTM API event. Returns: Legobot.messge ''' metadata = self._parse_metadata(event) message = Message(text=metadata['text'], metadata=metadata).__dict__ if message.get('text'): message['text'] = self.find_and_replace_userids(message['text']) message['text'] = self.find_and_replace_channel_refs( message['text'] ) return message
[ "def", "on_message", "(", "self", ",", "event", ")", ":", "metadata", "=", "self", ".", "_parse_metadata", "(", "event", ")", "message", "=", "Message", "(", "text", "=", "metadata", "[", "'text'", "]", ",", "metadata", "=", "metadata", ")", ".", "__di...
Runs when a message event is received Args: event: RTM API event. Returns: Legobot.messge
[ "Runs", "when", "a", "message", "event", "is", "received" ]
python
train
coinbase/coinbase-python
coinbase/wallet/client.py
https://github.com/coinbase/coinbase-python/blob/497c28158f529e8c7d0228521b4386a890baf088/coinbase/wallet/client.py#L613-L616
def create_checkout_order(self, checkout_id, **params): """https://developers.coinbase.com/api/v2#create-a-new-order-for-a-checkout""" response = self._post('v2', 'checkouts', checkout_id, 'orders', data=params) return self._make_api_object(response, Order)
[ "def", "create_checkout_order", "(", "self", ",", "checkout_id", ",", "*", "*", "params", ")", ":", "response", "=", "self", ".", "_post", "(", "'v2'", ",", "'checkouts'", ",", "checkout_id", ",", "'orders'", ",", "data", "=", "params", ")", "return", "s...
https://developers.coinbase.com/api/v2#create-a-new-order-for-a-checkout
[ "https", ":", "//", "developers", ".", "coinbase", ".", "com", "/", "api", "/", "v2#create", "-", "a", "-", "new", "-", "order", "-", "for", "-", "a", "-", "checkout" ]
python
train
CSchoel/nolds
nolds/measures.py
https://github.com/CSchoel/nolds/blob/8a5ecc472d67ac08b571bd68967287668ca9058e/nolds/measures.py#L345-L370
def lyap_e_len(**kwargs): """ Helper function that calculates the minimum number of data points required to use lyap_e. Note that none of the required parameters may be set to None. Kwargs: kwargs(dict): arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb and min_tsep) Returns: minimum number of data points required to call lyap_e with the given parameters """ m = (kwargs['emb_dim'] - 1) // (kwargs['matrix_dim'] - 1) # minimum length required to find single orbit vector min_len = kwargs['emb_dim'] # we need to follow each starting point of an orbit vector for m more steps min_len += m # we need min_tsep * 2 + 1 orbit vectors to find neighbors for each min_len += kwargs['min_tsep'] * 2 # we need at least min_nb neighbors for each orbit vector min_len += kwargs['min_nb'] return min_len
[ "def", "lyap_e_len", "(", "*", "*", "kwargs", ")", ":", "m", "=", "(", "kwargs", "[", "'emb_dim'", "]", "-", "1", ")", "//", "(", "kwargs", "[", "'matrix_dim'", "]", "-", "1", ")", "# minimum length required to find single orbit vector", "min_len", "=", "k...
Helper function that calculates the minimum number of data points required to use lyap_e. Note that none of the required parameters may be set to None. Kwargs: kwargs(dict): arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb and min_tsep) Returns: minimum number of data points required to call lyap_e with the given parameters
[ "Helper", "function", "that", "calculates", "the", "minimum", "number", "of", "data", "points", "required", "to", "use", "lyap_e", "." ]
python
train
lemieuxl/pyGenClean
pyGenClean/Ethnicity/find_outliers.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/Ethnicity/find_outliers.py#L34-L117
def main(argString=None): """The main function. :param argString: the options. :type argString: list of strings These are the steps of the modules: 1. Prints the options. 2. Reads the population file (:py:func:`read_population_file`). 3. Reads the ``mds`` file (:py:func:`read_mds_file`). 4. Computes the three reference population clusters' centers (:py:func:`find_ref_centers`). 5. Computes three clusters according to the reference population clusters' centers, and finds the outliers of a given reference population (:py:func:`find_outliers`). This steps also produce three different plots. 6. Writes outliers in a file (``prefix.outliers``). """ # Getting and checking the options args = parseArgs(argString) checkArgs(args) logger.info("Options used:") for key, value in vars(args).iteritems(): logger.info(" --{} {}".format(key.replace("_", "-"), value)) # Reads the population file logger.info("Reading population file") populations = read_population_file(args.population_file) # Reads the MDS file logger.info("Reading MDS file") mds = read_mds_file(args.mds, args.xaxis, args.yaxis, populations) # Finds the population centers logger.info("Finding reference population centers") centers, center_info = find_ref_centers(mds) # Computes three clusters using KMeans and the reference cluster centers logger.info("Finding outliers") outliers = find_outliers(mds, centers, center_info, args.outliers_of, args) logger.info(" - There are {} outliers for the {} population".format( len(outliers), args.outliers_of, )) # Printing the outlier file try: with open(args.out + ".outliers", 'w') as output_file: for sample_id in outliers: print >>output_file, "\t".join(sample_id) except IOError: msg = "{}: can't write file".format(args.out + ".outliers") raise ProgramError(msg) # Printing the outlier population file try: with open(args.out + ".population_file_outliers", "w") as output_file: for sample_id, population in populations.iteritems(): if sample_id in outliers: population = "OUTLIER" print >>output_file, "\t".join(list(sample_id) + [population]) except IOError: msg = "{}: can't write file".format( args.out + ".population_file_outliers", ) raise ProgramError(msg) # If there is a summary file in the working directory (for LaTeX), we want # to modify it, because it means that this script is run after the pipeline # (to modify the multiplier, for example). if args.overwrite_tex: summary_file = glob(os.path.join(os.getcwd(), "*.summary.tex")) if len(summary_file) == 0: logger.warning("No TeX summary file found") if len(summary_file) > 1: raise ProgramError("More than one TeX summary file found") summary_file = summary_file[0] # Overwriting overwrite_tex(summary_file, len(outliers), args)
[ "def", "main", "(", "argString", "=", "None", ")", ":", "# Getting and checking the options", "args", "=", "parseArgs", "(", "argString", ")", "checkArgs", "(", "args", ")", "logger", ".", "info", "(", "\"Options used:\"", ")", "for", "key", ",", "value", "i...
The main function. :param argString: the options. :type argString: list of strings These are the steps of the modules: 1. Prints the options. 2. Reads the population file (:py:func:`read_population_file`). 3. Reads the ``mds`` file (:py:func:`read_mds_file`). 4. Computes the three reference population clusters' centers (:py:func:`find_ref_centers`). 5. Computes three clusters according to the reference population clusters' centers, and finds the outliers of a given reference population (:py:func:`find_outliers`). This steps also produce three different plots. 6. Writes outliers in a file (``prefix.outliers``).
[ "The", "main", "function", "." ]
python
train
mitsei/dlkit
dlkit/handcar/repository/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/managers.py#L1704-L1726
def get_repository_search_session(self): """Gets the repository search session. return: (osid.repository.RepositorySearchSession) - a RepositorySearchSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_repository_search() is false compliance: optional - This method must be implemented if supports_repository_search() is true. """ if not self.supports_repository_search(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() try: session = sessions.RepositorySearchSession(proxy=self._proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
[ "def", "get_repository_search_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_repository_search", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportError", ":", "raise", "# O...
Gets the repository search session. return: (osid.repository.RepositorySearchSession) - a RepositorySearchSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_repository_search() is false compliance: optional - This method must be implemented if supports_repository_search() is true.
[ "Gets", "the", "repository", "search", "session", "." ]
python
train
TUNE-Archive/freight_forwarder
freight_forwarder/container_ship.py
https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/container_ship.py#L663-L679
def _update_container_host_config(self, service): """ :param service: :return None: """ if not isinstance(service, Service): raise TypeError("service must be an instance of Service") if service.dependencies: self._load_dependency_containers(service) if service.host_config.links: self._update_links(service) if service.host_config.volumes_from: self._update_volumes_from(service)
[ "def", "_update_container_host_config", "(", "self", ",", "service", ")", ":", "if", "not", "isinstance", "(", "service", ",", "Service", ")", ":", "raise", "TypeError", "(", "\"service must be an instance of Service\"", ")", "if", "service", ".", "dependencies", ...
:param service: :return None:
[ ":", "param", "service", ":", ":", "return", "None", ":" ]
python
train
IBM-Cloud/gp-python-client
gpclient/gpclient.py
https://github.com/IBM-Cloud/gp-python-client/blob/082c6cdc250fb61bea99cba8ac3ee855ee77a410/gpclient/gpclient.py#L387-L392
def __has_language(self, bundleId, languageId): """Returns ``True`` if the bundle has the language, ``False`` otherwise """ return True if self.__get_language_data(bundleId=bundleId, languageId=languageId) \ else False
[ "def", "__has_language", "(", "self", ",", "bundleId", ",", "languageId", ")", ":", "return", "True", "if", "self", ".", "__get_language_data", "(", "bundleId", "=", "bundleId", ",", "languageId", "=", "languageId", ")", "else", "False" ]
Returns ``True`` if the bundle has the language, ``False`` otherwise
[ "Returns", "True", "if", "the", "bundle", "has", "the", "language", "False", "otherwise" ]
python
train
pybel/pybel
src/pybel/struct/mutation/induction/random_subgraph.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/mutation/induction/random_subgraph.py#L168-L216
def get_random_subgraph(graph, number_edges=None, number_seed_edges=None, seed=None, invert_degrees=None): """Generate a random subgraph based on weighted random walks from random seed edges. :type graph: pybel.BELGraph graph :param Optional[int] number_edges: Maximum number of edges. Defaults to :data:`pybel_tools.constants.SAMPLE_RANDOM_EDGE_COUNT` (250). :param Optional[int] number_seed_edges: Number of nodes to start with (which likely results in different components in large graphs). Defaults to :data:`SAMPLE_RANDOM_EDGE_SEED_COUNT` (5). :param Optional[int] seed: A seed for the random state :param Optional[bool] invert_degrees: Should the degrees be inverted? Defaults to true. :rtype: pybel.BELGraph """ if number_edges is None: number_edges = SAMPLE_RANDOM_EDGE_COUNT if number_seed_edges is None: number_seed_edges = SAMPLE_RANDOM_EDGE_SEED_COUNT if seed is not None: random.seed(seed) # Check if graph will sample full graph, and just return it if it would if graph.number_of_edges() <= number_edges: log.info('sampled full graph') return graph.copy() log.debug('getting random sub-graph with %d seed edges, %d final edges, and seed=%s', number_seed_edges, number_edges, seed) # Get initial graph with `number_seed_edges` edges result = get_graph_with_random_edges(graph, number_seed_edges) number_edges_remaining = number_edges - result.number_of_edges() _helper( result, graph, number_edges_remaining, node_blacklist=set(), # This is the set of nodes that should no longer be chosen to grow from invert_degrees=invert_degrees, ) log.debug('removing isolated nodes') remove_isolated_nodes(result) # update metadata update_node_helper(graph, result) update_metadata(graph, result) return result
[ "def", "get_random_subgraph", "(", "graph", ",", "number_edges", "=", "None", ",", "number_seed_edges", "=", "None", ",", "seed", "=", "None", ",", "invert_degrees", "=", "None", ")", ":", "if", "number_edges", "is", "None", ":", "number_edges", "=", "SAMPLE...
Generate a random subgraph based on weighted random walks from random seed edges. :type graph: pybel.BELGraph graph :param Optional[int] number_edges: Maximum number of edges. Defaults to :data:`pybel_tools.constants.SAMPLE_RANDOM_EDGE_COUNT` (250). :param Optional[int] number_seed_edges: Number of nodes to start with (which likely results in different components in large graphs). Defaults to :data:`SAMPLE_RANDOM_EDGE_SEED_COUNT` (5). :param Optional[int] seed: A seed for the random state :param Optional[bool] invert_degrees: Should the degrees be inverted? Defaults to true. :rtype: pybel.BELGraph
[ "Generate", "a", "random", "subgraph", "based", "on", "weighted", "random", "walks", "from", "random", "seed", "edges", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17r_1_01a/interface/ethernet/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/interface/ethernet/__init__.py#L768-L789
def _set_vepa(self, v, load=False): """ Setter method for vepa, mapped from YANG variable /interface/ethernet/vepa (container) If this variable is read-only (config: false) in the source YANG file, then _set_vepa is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vepa() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=vepa.vepa, is_container='container', presence=False, yang_name="vepa", rest_name="vepa", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable vepa to support U-turn of the traffic on the \nselected interface', u'hidden': u'foscmd'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vepa must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=vepa.vepa, is_container='container', presence=False, yang_name="vepa", rest_name="vepa", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable vepa to support U-turn of the traffic on the \nselected interface', u'hidden': u'foscmd'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""", }) self.__vepa = t if hasattr(self, '_set'): self._set()
[ "def", "_set_vepa", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", ...
Setter method for vepa, mapped from YANG variable /interface/ethernet/vepa (container) If this variable is read-only (config: false) in the source YANG file, then _set_vepa is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vepa() directly.
[ "Setter", "method", "for", "vepa", "mapped", "from", "YANG", "variable", "/", "interface", "/", "ethernet", "/", "vepa", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", ...
python
train
inasafe/inasafe
safe/definitions/minimum_needs.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/definitions/minimum_needs.py#L47-L92
def _initializes_minimum_needs_fields(): """Initialize minimum needs fields. Minimum needs definitions are taken from currently used profile. """ needs_profile = NeedsProfile() needs_profile.load() fields = [] needs_parameters = needs_profile.get_needs_parameters() for need_parameter in needs_parameters: if isinstance(need_parameter, ResourceParameter): format_args = { 'namespace': minimum_needs_namespace, 'key': _normalize_field_name(need_parameter.name), 'name': need_parameter.name, 'field_name': _normalize_field_name(need_parameter.name), } key = '{namespace}__{key}_count_field'.format(**format_args) name = '{name}'.format(**format_args) field_name = '{namespace}__{field_name}'.format(**format_args) field_type = QVariant.LongLong # See issue #4039 length = 11 # See issue #4039 precision = 0 absolute = True replace_null = False description = need_parameter.description field_definition = { 'key': key, 'name': name, 'field_name': field_name, 'type': field_type, 'length': length, 'precision': precision, 'absolute': absolute, 'description': description, 'replace_null': replace_null, 'unit_abbreviation': need_parameter.unit.abbreviation, # Link to need_parameter 'need_parameter': need_parameter } fields.append(field_definition) return fields
[ "def", "_initializes_minimum_needs_fields", "(", ")", ":", "needs_profile", "=", "NeedsProfile", "(", ")", "needs_profile", ".", "load", "(", ")", "fields", "=", "[", "]", "needs_parameters", "=", "needs_profile", ".", "get_needs_parameters", "(", ")", "for", "n...
Initialize minimum needs fields. Minimum needs definitions are taken from currently used profile.
[ "Initialize", "minimum", "needs", "fields", "." ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/emit_match.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/emit_match.py#L21-L50
def _first_step_to_match(match_step): """Transform the very first MATCH step into a MATCH query string.""" parts = [] if match_step.root_block is not None: if not isinstance(match_step.root_block, QueryRoot): raise AssertionError(u'Expected None or QueryRoot root block, received: ' u'{} {}'.format(match_step.root_block, match_step)) match_step.root_block.validate() start_class = get_only_element_from_collection(match_step.root_block.start_class) parts.append(u'class: %s' % (start_class,)) # MATCH steps with a QueryRoot root block shouldn't have a 'coerce_type_block'. if match_step.coerce_type_block is not None: raise AssertionError(u'Invalid MATCH step: {}'.format(match_step)) if match_step.where_block: match_step.where_block.validate() parts.append(u'where: (%s)' % (match_step.where_block.predicate.to_match(),)) if match_step.as_block is None: raise AssertionError(u'Found a MATCH step without a corresponding Location. ' u'This should never happen: {}'.format(match_step)) else: match_step.as_block.validate() parts.append(u'as: %s' % (_get_vertex_location_name(match_step.as_block.location),)) return u'{{ %s }}' % (u', '.join(parts),)
[ "def", "_first_step_to_match", "(", "match_step", ")", ":", "parts", "=", "[", "]", "if", "match_step", ".", "root_block", "is", "not", "None", ":", "if", "not", "isinstance", "(", "match_step", ".", "root_block", ",", "QueryRoot", ")", ":", "raise", "Asse...
Transform the very first MATCH step into a MATCH query string.
[ "Transform", "the", "very", "first", "MATCH", "step", "into", "a", "MATCH", "query", "string", "." ]
python
train
fossasia/knittingpattern
knittingpattern/Parser.py
https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/Parser.py#L164-L172
def instruction_in_row(self, row, specification): """Parse an instruction. :param row: the row of the instruction :param specification: the specification of the instruction :return: the instruction in the row """ whole_instruction_ = self._as_instruction(specification) return self._spec.new_instruction_in_row(row, whole_instruction_)
[ "def", "instruction_in_row", "(", "self", ",", "row", ",", "specification", ")", ":", "whole_instruction_", "=", "self", ".", "_as_instruction", "(", "specification", ")", "return", "self", ".", "_spec", ".", "new_instruction_in_row", "(", "row", ",", "whole_ins...
Parse an instruction. :param row: the row of the instruction :param specification: the specification of the instruction :return: the instruction in the row
[ "Parse", "an", "instruction", "." ]
python
valid
chaoss/grimoirelab-cereslib
cereslib/dfutils/filter.py
https://github.com/chaoss/grimoirelab-cereslib/blob/5110e6ca490a4f24bec3124286ebf51fd4e08bdd/cereslib/dfutils/filter.py#L54-L75
def filter_(self, columns, value): """ This method filter some of the rows where the 'value' is found in each of the 'columns'. :param column: list of strings :param value: any type :returns: filtered dataframe :rtype: pandas.DataFrame """ for column in columns: if column not in self.data.columns: raise ValueError("Column %s not in DataFrame columns: %s" % (column, list(self.data))) for column in columns: # Filtering on empty data series doesn't make sense at all and also would raise an error column_len = len(self.data[column]) if column_len > 0 and column_len != self.data[column].isnull().sum(): self.data = self.data[self.data[column] != value] return self.data
[ "def", "filter_", "(", "self", ",", "columns", ",", "value", ")", ":", "for", "column", "in", "columns", ":", "if", "column", "not", "in", "self", ".", "data", ".", "columns", ":", "raise", "ValueError", "(", "\"Column %s not in DataFrame columns: %s\"", "%"...
This method filter some of the rows where the 'value' is found in each of the 'columns'. :param column: list of strings :param value: any type :returns: filtered dataframe :rtype: pandas.DataFrame
[ "This", "method", "filter", "some", "of", "the", "rows", "where", "the", "value", "is", "found", "in", "each", "of", "the", "columns", "." ]
python
train
bwesterb/mirte
src/mirteFile.py
https://github.com/bwesterb/mirte/blob/c58db8c993cd15ffdc64b52703cd466213913200/src/mirteFile.py#L52-L92
def module_definition_from_mirteFile_dict(man, d): """ Creates a ModuleDefinition instance from the dictionary <d> from a mirte-file for the Manager instance <man>. """ m = ModuleDefinition() if 'inherits' not in d: d['inherits'] = list() if 'settings' not in d: d['settings'] = dict() if 'implementedBy' in d: m.implementedBy = d['implementedBy'] m.inherits = set(d['inherits']) for p in d['inherits']: if p not in man.modules: raise ValueError("No such module %s" % p) m.deps.update(man.modules[p].deps) m.vsettings.update(man.modules[p].vsettings) m.inherits.update(man.modules[p].inherits) m.run = m.run or man.modules[p].run if 'run' in d: m.run = d['run'] if len(m.inherits) == 0: m.inherits = set(['module']) for k, v in six.iteritems(d['settings']): if 'type' not in v: if k not in m.vsettings: raise ValueError("No such existing vsetting %s" % k) if 'default' in v: m.vsettings[k] = copy.copy(m.vsettings[k]) m.vsettings[k].default = v['default'] continue if v['type'] in man.modules: m.deps[k] = DepDefinition(v['type'], v.get('allownull', False)) elif v['type'] in man.valueTypes: m.vsettings[k] = VSettingDefinition( v['type'], (man.valueTypes[v['type']](v['default']) if 'default' in v else None) ) else: raise ValueError("No such module or valuetype %s" % v) return m
[ "def", "module_definition_from_mirteFile_dict", "(", "man", ",", "d", ")", ":", "m", "=", "ModuleDefinition", "(", ")", "if", "'inherits'", "not", "in", "d", ":", "d", "[", "'inherits'", "]", "=", "list", "(", ")", "if", "'settings'", "not", "in", "d", ...
Creates a ModuleDefinition instance from the dictionary <d> from a mirte-file for the Manager instance <man>.
[ "Creates", "a", "ModuleDefinition", "instance", "from", "the", "dictionary", "<d", ">", "from", "a", "mirte", "-", "file", "for", "the", "Manager", "instance", "<man", ">", "." ]
python
train
ttinoco/OPTALG
optalg/opt_solver/opt_solver.py
https://github.com/ttinoco/OPTALG/blob/d4f141292f281eea4faa71473258139e7f433001/optalg/opt_solver/opt_solver.py#L173-L236
def line_search(self,x,p,F,GradF,func,smax=np.inf,maxiter=40): """ Finds steplength along search direction p that satisfies the strong Wolfe conditions. Parameters ---------- x : current point (ndarray) p : search direction (ndarray) F : function value at `x` (float) GradF : gradient of function at `x` (ndarray) func : function of `x` that returns function object with attributes `F` and `GradF` (function) smax : maximum allowed steplength (float) Returns ------- s : stephlength that satisfies the Wolfe conditions (float). """ # Parameters of line search c1 = 1e-4 c2 = 5e-1 # Initialize lower bound, upper bound and step l = 0. if 1. < smax: s = 1. else: s = smax u = np.NaN phi = F dphi = np.dot(GradF,p) # Check that p is descent direction if dphi >= 0: raise OptSolverError_BadSearchDir(self) # Bisection for i in range(0,maxiter): xsp = x+s*p fdata = func(xsp) phis = fdata.F dphis = np.dot(fdata.GradF,p) if phis > phi + c1*s*dphi: u = s elif dphis > 0 and dphis > -c2*dphi: u = s elif dphis < 0 and -dphis > -c2*dphi: l = s if s >= smax: return s,fdata else: return s,fdata if np.isnan(u): s = np.min([2.*s,smax]) else: s = (l + u)/2. raise OptSolverError_LineSearch(self)
[ "def", "line_search", "(", "self", ",", "x", ",", "p", ",", "F", ",", "GradF", ",", "func", ",", "smax", "=", "np", ".", "inf", ",", "maxiter", "=", "40", ")", ":", "# Parameters of line search", "c1", "=", "1e-4", "c2", "=", "5e-1", "# Initialize lo...
Finds steplength along search direction p that satisfies the strong Wolfe conditions. Parameters ---------- x : current point (ndarray) p : search direction (ndarray) F : function value at `x` (float) GradF : gradient of function at `x` (ndarray) func : function of `x` that returns function object with attributes `F` and `GradF` (function) smax : maximum allowed steplength (float) Returns ------- s : stephlength that satisfies the Wolfe conditions (float).
[ "Finds", "steplength", "along", "search", "direction", "p", "that", "satisfies", "the", "strong", "Wolfe", "conditions", ".", "Parameters", "----------", "x", ":", "current", "point", "(", "ndarray", ")", "p", ":", "search", "direction", "(", "ndarray", ")", ...
python
train
petl-developers/petlx
petlx/bio/vcf.py
https://github.com/petl-developers/petlx/blob/54039e30388c7da12407d6b5c3cb865b00436004/petlx/bio/vcf.py#L9-L37
def fromvcf(filename, chrom=None, start=None, stop=None, samples=True): """ Returns a table providing access to data from a variant call file (VCF). E.g.:: >>> import petl as etl >>> # activate bio extensions ... import petlx.bio >>> table1 = etl.fromvcf('fixture/sample.vcf') >>> table1.look(truncate=20) +-------+---------+-------------+-----+--------+------+---------+----------------------+----------------------+----------------------+----------------------+ | CHROM | POS | ID | REF | ALT | QUAL | FILTER | INFO | NA00001 | NA00002 | NA00003 | +=======+=========+=============+=====+========+======+=========+======================+======================+======================+======================+ | '19' | 111 | None | 'A' | [C] | 9.6 | None | {} | Call(sample=NA00001, | Call(sample=NA00002, | Call(sample=NA00003, | +-------+---------+-------------+-----+--------+------+---------+----------------------+----------------------+----------------------+----------------------+ | '19' | 112 | None | 'A' | [G] | 10 | None | {} | Call(sample=NA00001, | Call(sample=NA00002, | Call(sample=NA00003, | +-------+---------+-------------+-----+--------+------+---------+----------------------+----------------------+----------------------+----------------------+ | '20' | 14370 | 'rs6054257' | 'G' | [A] | 29 | [] | {'DP': 14, 'H2': Tru | Call(sample=NA00001, | Call(sample=NA00002, | Call(sample=NA00003, | +-------+---------+-------------+-----+--------+------+---------+----------------------+----------------------+----------------------+----------------------+ | '20' | 17330 | None | 'T' | [A] | 3 | ['q10'] | {'DP': 11, 'NS': 3, | Call(sample=NA00001, | Call(sample=NA00002, | Call(sample=NA00003, | +-------+---------+-------------+-----+--------+------+---------+----------------------+----------------------+----------------------+----------------------+ | '20' | 1110696 | 'rs6040355' | 'A' | [G, T] | 67 | [] | {'DP': 10, 'AA': 'T' | Call(sample=NA00001, | Call(sample=NA00002, | Call(sample=NA00003, | +-------+---------+-------------+-----+--------+------+---------+----------------------+----------------------+----------------------+----------------------+ ... """ return VCFView(filename, chrom=chrom, start=start, stop=stop, samples=samples)
[ "def", "fromvcf", "(", "filename", ",", "chrom", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "samples", "=", "True", ")", ":", "return", "VCFView", "(", "filename", ",", "chrom", "=", "chrom", ",", "start", "=", "start", ...
Returns a table providing access to data from a variant call file (VCF). E.g.:: >>> import petl as etl >>> # activate bio extensions ... import petlx.bio >>> table1 = etl.fromvcf('fixture/sample.vcf') >>> table1.look(truncate=20) +-------+---------+-------------+-----+--------+------+---------+----------------------+----------------------+----------------------+----------------------+ | CHROM | POS | ID | REF | ALT | QUAL | FILTER | INFO | NA00001 | NA00002 | NA00003 | +=======+=========+=============+=====+========+======+=========+======================+======================+======================+======================+ | '19' | 111 | None | 'A' | [C] | 9.6 | None | {} | Call(sample=NA00001, | Call(sample=NA00002, | Call(sample=NA00003, | +-------+---------+-------------+-----+--------+------+---------+----------------------+----------------------+----------------------+----------------------+ | '19' | 112 | None | 'A' | [G] | 10 | None | {} | Call(sample=NA00001, | Call(sample=NA00002, | Call(sample=NA00003, | +-------+---------+-------------+-----+--------+------+---------+----------------------+----------------------+----------------------+----------------------+ | '20' | 14370 | 'rs6054257' | 'G' | [A] | 29 | [] | {'DP': 14, 'H2': Tru | Call(sample=NA00001, | Call(sample=NA00002, | Call(sample=NA00003, | +-------+---------+-------------+-----+--------+------+---------+----------------------+----------------------+----------------------+----------------------+ | '20' | 17330 | None | 'T' | [A] | 3 | ['q10'] | {'DP': 11, 'NS': 3, | Call(sample=NA00001, | Call(sample=NA00002, | Call(sample=NA00003, | +-------+---------+-------------+-----+--------+------+---------+----------------------+----------------------+----------------------+----------------------+ | '20' | 1110696 | 'rs6040355' | 'A' | [G, T] | 67 | [] | {'DP': 10, 'AA': 'T' | Call(sample=NA00001, | Call(sample=NA00002, | Call(sample=NA00003, | +-------+---------+-------------+-----+--------+------+---------+----------------------+----------------------+----------------------+----------------------+ ...
[ "Returns", "a", "table", "providing", "access", "to", "data", "from", "a", "variant", "call", "file", "(", "VCF", ")", ".", "E", ".", "g", ".", "::" ]
python
train
tgbugs/pyontutils
nifstd/nifstd_tools/parcellation/__init__.py
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/nifstd/nifstd_tools/parcellation/__init__.py#L43-L269
def swanson(): """ not really a parcellation scheme NOTE: the defining information up here is now deprecated it is kept around to keep the code further down happy """ source = Path(devconfig.resources, 'swanson_aligned.txt').as_posix() ONT_PATH = 'http://ontology.neuinfo.org/NIF/ttl/generated/' filename = 'swanson_hierarchies' ontid = ONT_PATH + filename + '.ttl' PREFIXES = SwansonLabels.prefixes new_graph = makeGraph(filename, PREFIXES, writeloc='/tmp/') new_graph.add_ont(ontid, 'Swanson brain partomies', 'Swanson 2014 Partonomies', 'This file is automatically generated from ' + source + '.' + '**FIXME**', 'now') # FIXME citations should really go on the ... anatomy? scheme artifact definingCitation = 'Swanson, Larry W. Neuroanatomical Terminology: a lexicon of classical origins and historical foundations. Oxford University Press, USA, 2014.' definingCitationID = 'ISBN:9780195340624' new_graph.add_trip(ontid, 'NIFRID:definingCitation', definingCitation) new_graph.add_trip(ontid, 'NIFRID:definingCitationID', definingCitationID) with open(source, 'rt') as f: lines = [l.strip() for l in f.readlines()] # join header on page 794 lines[635] += ' ' + lines.pop(636) #fix for capitalization since this header is reused fixed = ' or '.join([' ('.join([n.capitalize() for n in _.split(' (')]) for _ in lines[635].lower().split(' or ')]).replace('human','HUMAN') lines[635] = fixed data = [] for l in lines: if not l.startswith('#'): level = l.count('.'*5) l = l.strip('.') if ' (' in l: if ') or' in l: n1, l = l.split(') or') area_name, citationP = n1.strip().split(' (') citation = citationP.rstrip(')') d = (level, area_name, citation, 'NEXT SYN') data.append(d) #print(tc.red(tc.bold(repr(d)))) area_name, citationP = l.strip().split(' (') citation = citationP.rstrip(')') else: area_name = l citation = None d = (level, area_name, citation, None) #print(d) data.append(d) results = async_getter(sgv.findByTerm, [(d[1],) for d in data]) #results = [None] * len(data) curies = [[r['curie'] for r in _ if 'UBERON' in r['curie']] if _ else [] for _ in results] output = [_[0] if _ else None for _ in curies] header = ['Depth', 'Name', 'Citation', 'NextSyn', 'Uberon'] zoop = [header] + [r for r in zip(*zip(*data), output)] + \ [(0, 'Appendix END None', None, None, None)] # needed to add last appendix # TODO annotate the appendicies and the classes with these appendix_root_mapping = (1, 1, 1, 1, 30, 83, 69, 70, 74, 1) # should generate? class SP(rowParse): def __init__(self): self.nodes = defaultdict(dict) self._appendix = 0 self.appendicies = {} self._last_at_level = {} self.names = defaultdict(set) self.children = defaultdict(set) self.parents = defaultdict(set) self.next_syn = False super().__init__(zoop) def Depth(self, value): if self.next_syn: self.synonym = self.next_syn else: self.synonym = False self.depth = value def Name(self, value): self.name = value def Citation(self, value): self.citation = value def NextSyn(self, value): if value: self.next_syn = self._rowind else: self.next_syn = False def Uberon(self, value): self.uberon = value def _row_post(self): # check if we are in the next appendix # may want to xref ids between appendicies as well... if self.depth == 0: if self.name.startswith('Appendix'): if self._appendix: self.appendicies[self._appendix]['children'] = dict(self.children) self.appendicies[self._appendix]['parents'] = dict(self.parents) self._last_at_level = {} self.children = defaultdict(set) self.parents = defaultdict(set) _, num, apname = self.name.split(' ', 2) if num == 'END': return self._appendix = int(num) self.appendicies[self._appendix] = { 'name':apname.capitalize(), 'type':self.citation.capitalize() if self.citation else None} return else: if ' [' in self.name: name, taxonB = self.name.split(' [') self.name = name self.appendicies[self._appendix]['taxon'] = taxonB.rstrip(']').capitalize() else: # top level is animalia self.appendicies[self._appendix]['taxon'] = 'ANIMALIA'.capitalize() self.name = self.name.capitalize() self.citation = self.citation.capitalize() # nodes if self.synonym: self.nodes[self.synonym]['synonym'] = self.name self.nodes[self.synonym]['syn-cite'] = self.citation self.nodes[self.synonym]['syn-uberon'] = self.uberon return else: if self.citation: # Transverse Longitudinal etc all @ lvl4 self.names[self.name + ' ' + self.citation].add(self._rowind) else: self.name += str(self._appendix) + self.nodes[self._last_at_level[self.depth - 1]]['label'] #print(level, self.name) # can't return here because they are their own level # replace with actually doing something... self.nodes[self._rowind]['label'] = self.name self.nodes[self._rowind]['citation'] = self.citation self.nodes[self._rowind]['uberon'] = self.uberon # edges self._last_at_level[self.depth] = self._rowind # TODO will need something to deal with the Lateral/ if self.depth > 0: try: parent = self._last_at_level[self.depth - 1] except: embed() self.children[parent].add(self._rowind) self.parents[self._rowind].add(parent) def _end(self): replace = {} for asdf in [sorted(n) for k,n in self.names.items() if len(n) > 1]: replace_with, to_replace = asdf[0], asdf[1:] for r in to_replace: replace[r] = replace_with for r, rw in replace.items(): #print(self.nodes[rw]) o = self.nodes.pop(r) #print(o) for vals in self.appendicies.values(): children = vals['children'] parents = vals['parents'] # need reversed so children are corrected before swap for r, rw in reversed(sorted(replace.items())): if r in parents: child = r new_child = rw parent = parents.pop(child) parents[new_child] = parent parent = list(parent)[0] children[parent].remove(child) children[parent].add(new_child) if r in children: parent = r new_parent = rw childs = children.pop(parent) children[new_parent] = childs for child in childs: parents[child] = {new_parent} self.nodes = dict(self.nodes) sp = SP() tp = [_ for _ in sorted(['{: <50}'.format(n['label']) + n['uberon'] if n['uberon'] else n['label'] for n in sp.nodes.values()])] #print('\n'.join(tp)) #print(sp.appendicies[1].keys()) #print(sp.nodes[1].keys()) nbase = PREFIXES['SWAN'] + '%s' json_ = {'nodes':[],'edges':[]} parent = ilxtr.swansonBrainRegionConcept for node, anns in sp.nodes.items(): nid = nbase % node new_graph.add_class(nid, parent, label=anns['label']) new_graph.add_trip(nid, 'NIFRID:definingCitation', anns['citation']) json_['nodes'].append({'lbl':anns['label'],'id':'SWA:' + str(node)}) #if anns['uberon']: #new_graph.add_trip(nid, owl.equivalentClass, anns['uberon']) # issues arrise here... for appendix, data in sp.appendicies.items(): aid = PREFIXES['SWAA'] + str(appendix) new_graph.add_class(aid, label=data['name'].capitalize()) new_graph.add_trip(aid, 'ilxtr:hasTaxonRank', data['taxon']) # FIXME appendix is the data artifact... children = data['children'] ahp = 'swanr:hasPart' + str(appendix) apo = 'swanr:partOf' + str(appendix) new_graph.add_op(ahp, transitive=True) new_graph.add_op(apo, inverse=ahp, transitive=True) for parent, childs in children.items(): # FIXME does this give complete coverage? pid = nbase % parent for child in childs: cid = nbase % child new_graph.add_restriction(pid, ahp, cid) # note hierarhcy inverts direction new_graph.add_restriction(cid, apo, pid) json_['edges'].append({'sub':'SWA:' + str(child),'pred':apo,'obj':'SWA:' + str(parent)}) return new_graph
[ "def", "swanson", "(", ")", ":", "source", "=", "Path", "(", "devconfig", ".", "resources", ",", "'swanson_aligned.txt'", ")", ".", "as_posix", "(", ")", "ONT_PATH", "=", "'http://ontology.neuinfo.org/NIF/ttl/generated/'", "filename", "=", "'swanson_hierarchies'", "...
not really a parcellation scheme NOTE: the defining information up here is now deprecated it is kept around to keep the code further down happy
[ "not", "really", "a", "parcellation", "scheme", "NOTE", ":", "the", "defining", "information", "up", "here", "is", "now", "deprecated", "it", "is", "kept", "around", "to", "keep", "the", "code", "further", "down", "happy" ]
python
train
scottjbarr/bitfinex
bitfinex/client.py
https://github.com/scottjbarr/bitfinex/blob/03f7c71615fe38c2e28be0ebb761d3106ef0a51a/bitfinex/client.py#L56-L89
def place_order(self, amount, price, side, ord_type, symbol='btcusd', exchange='bitfinex'): """ Submit a new order. :param amount: :param price: :param side: :param ord_type: :param symbol: :param exchange: :return: """ payload = { "request": "/v1/order/new", "nonce": self._nonce, "symbol": symbol, "amount": amount, "price": price, "exchange": exchange, "side": side, "type": ord_type } signed_payload = self._sign_payload(payload) r = requests.post(self.URL + "/order/new", headers=signed_payload, verify=True) json_resp = r.json() try: json_resp['order_id'] except: return json_resp['message'] return json_resp
[ "def", "place_order", "(", "self", ",", "amount", ",", "price", ",", "side", ",", "ord_type", ",", "symbol", "=", "'btcusd'", ",", "exchange", "=", "'bitfinex'", ")", ":", "payload", "=", "{", "\"request\"", ":", "\"/v1/order/new\"", ",", "\"nonce\"", ":",...
Submit a new order. :param amount: :param price: :param side: :param ord_type: :param symbol: :param exchange: :return:
[ "Submit", "a", "new", "order", ".", ":", "param", "amount", ":", ":", "param", "price", ":", ":", "param", "side", ":", ":", "param", "ord_type", ":", ":", "param", "symbol", ":", ":", "param", "exchange", ":", ":", "return", ":" ]
python
train
moreati/subresource-integrity
subresource_integrity.py
https://github.com/moreati/subresource-integrity/blob/c9f6cecddea85f1c7bb5562551a41b9678fbda21/subresource_integrity.py#L118-L130
def render(data, algorithms=(DEFAULT_ALOGRITHM,), seperator=' '): """Returns a subresource integrity string for the given data & algorithms >>> data = b"alert('Hello, world.');" >>> render(data) 'sha384-H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t+eX6xO' >>> print(render(data, ['sha256', 'sha384'], seperator='\\n')) sha256-qznLcsROx4GACP2dm0UCKCzCG+HiZ1guq6ZZDob/Tng= sha384-H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t+eX6xO """ return seperator.join(str(ihash) for ihash in generate(data, algorithms))
[ "def", "render", "(", "data", ",", "algorithms", "=", "(", "DEFAULT_ALOGRITHM", ",", ")", ",", "seperator", "=", "' '", ")", ":", "return", "seperator", ".", "join", "(", "str", "(", "ihash", ")", "for", "ihash", "in", "generate", "(", "data", ",", "...
Returns a subresource integrity string for the given data & algorithms >>> data = b"alert('Hello, world.');" >>> render(data) 'sha384-H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t+eX6xO' >>> print(render(data, ['sha256', 'sha384'], seperator='\\n')) sha256-qznLcsROx4GACP2dm0UCKCzCG+HiZ1guq6ZZDob/Tng= sha384-H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t+eX6xO
[ "Returns", "a", "subresource", "integrity", "string", "for", "the", "given", "data", "&", "algorithms" ]
python
train
pyroscope/pyrocore
src/pyrocore/torrent/formatting.py
https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/src/pyrocore/torrent/formatting.py#L231-L274
def expand_template(template, namespace): """ Expand the given (preparsed) template. Currently, only Tempita templates are supported. @param template: The template, in preparsed form, or as a string (which then will be preparsed). @param namespace: Custom namespace that is added to the predefined defaults and takes precedence over those. @return: The expanded template. @raise LoggableError: In case of typical errors during template execution. """ # Create helper namespace formatters = dict((name[4:], method) for name, method in globals().items() if name.startswith("fmt_") ) helpers = Bunch() helpers.update(formatters) # Default templating namespace variables = dict(h=helpers, c=config.custom_template_helpers) variables.update(formatters) # redundant, for backwards compatibility # Provided namespace takes precedence variables.update(namespace) # Expand template try: template = preparse(template) return template.substitute(**variables) except (AttributeError, ValueError, NameError, TypeError) as exc: hint = '' if "column" in str(exc): try: col = int(str(exc).split("column")[1].split()[0]) except (TypeError, ValueError): pass else: hint = "%svVv\n" % (' ' * (col+4)) content = getattr(template, "content", template) raise error.LoggableError("%s: %s in template:\n%s%s" % ( type(exc).__name__, exc, hint, "\n".join("%3d: %s" % (i+1, line) for i, line in enumerate(content.splitlines())) ))
[ "def", "expand_template", "(", "template", ",", "namespace", ")", ":", "# Create helper namespace", "formatters", "=", "dict", "(", "(", "name", "[", "4", ":", "]", ",", "method", ")", "for", "name", ",", "method", "in", "globals", "(", ")", ".", "items"...
Expand the given (preparsed) template. Currently, only Tempita templates are supported. @param template: The template, in preparsed form, or as a string (which then will be preparsed). @param namespace: Custom namespace that is added to the predefined defaults and takes precedence over those. @return: The expanded template. @raise LoggableError: In case of typical errors during template execution.
[ "Expand", "the", "given", "(", "preparsed", ")", "template", ".", "Currently", "only", "Tempita", "templates", "are", "supported", "." ]
python
train
pypa/pipenv
pipenv/vendor/pyparsing.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pyparsing.py#L1042-L1067
def pprint(self, *args, **kwargs): """ Pretty-printer for parsed results as a list, using the `pprint <https://docs.python.org/3/library/pprint.html>`_ module. Accepts additional positional or keyword args as defined for `pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ . Example:: ident = Word(alphas, alphanums) num = Word(nums) func = Forward() term = ident | num | Group('(' + func + ')') func <<= ident + Group(Optional(delimitedList(term))) result = func.parseString("fna a,b,(fnb c,d,200),100") result.pprint(width=40) prints:: ['fna', ['a', 'b', ['(', 'fnb', ['c', 'd', '200'], ')'], '100']] """ pprint.pprint(self.asList(), *args, **kwargs)
[ "def", "pprint", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "pprint", ".", "pprint", "(", "self", ".", "asList", "(", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Pretty-printer for parsed results as a list, using the `pprint <https://docs.python.org/3/library/pprint.html>`_ module. Accepts additional positional or keyword args as defined for `pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ . Example:: ident = Word(alphas, alphanums) num = Word(nums) func = Forward() term = ident | num | Group('(' + func + ')') func <<= ident + Group(Optional(delimitedList(term))) result = func.parseString("fna a,b,(fnb c,d,200),100") result.pprint(width=40) prints:: ['fna', ['a', 'b', ['(', 'fnb', ['c', 'd', '200'], ')'], '100']]
[ "Pretty", "-", "printer", "for", "parsed", "results", "as", "a", "list", "using", "the", "pprint", "<https", ":", "//", "docs", ".", "python", ".", "org", "/", "3", "/", "library", "/", "pprint", ".", "html", ">", "_", "module", ".", "Accepts", "addi...
python
train
influxdata/influxdb-python
influxdb/influxdb08/client.py
https://github.com/influxdata/influxdb-python/blob/d5d12499f3755199d5eedd8b363450f1cf4073bd/influxdb/influxdb08/client.py#L607-L621
def add_cluster_admin(self, new_username, new_password): """Add cluster admin.""" data = { 'name': new_username, 'password': new_password } self.request( url="cluster_admins", method='POST', data=data, expected_response_code=200 ) return True
[ "def", "add_cluster_admin", "(", "self", ",", "new_username", ",", "new_password", ")", ":", "data", "=", "{", "'name'", ":", "new_username", ",", "'password'", ":", "new_password", "}", "self", ".", "request", "(", "url", "=", "\"cluster_admins\"", ",", "me...
Add cluster admin.
[ "Add", "cluster", "admin", "." ]
python
train
tumblr/pytumblr
pytumblr/__init__.py
https://github.com/tumblr/pytumblr/blob/4a5cd7c4b8ae78d12811d9fd52620afa1692a415/pytumblr/__init__.py#L514-L532
def _send_post(self, blogname, params): """ Formats parameters and sends the API request off. Validates common and per-post-type parameters and formats your tags for you. :param blogname: a string, the blogname of the blog you are posting to :param params: a dict, the key-value of the parameters for the api request :param valid_options: a list of valid options that the request allows :returns: a dict parsed from the JSON response """ url = "/v2/blog/{}/post".format(blogname) valid_options = self._post_valid_options(params.get('type', None)) if len(params.get("tags", [])) > 0: # Take a list of tags and make them acceptable for upload params['tags'] = ",".join(params['tags']) return self.send_api_request("post", url, params, valid_options)
[ "def", "_send_post", "(", "self", ",", "blogname", ",", "params", ")", ":", "url", "=", "\"/v2/blog/{}/post\"", ".", "format", "(", "blogname", ")", "valid_options", "=", "self", ".", "_post_valid_options", "(", "params", ".", "get", "(", "'type'", ",", "N...
Formats parameters and sends the API request off. Validates common and per-post-type parameters and formats your tags for you. :param blogname: a string, the blogname of the blog you are posting to :param params: a dict, the key-value of the parameters for the api request :param valid_options: a list of valid options that the request allows :returns: a dict parsed from the JSON response
[ "Formats", "parameters", "and", "sends", "the", "API", "request", "off", ".", "Validates", "common", "and", "per", "-", "post", "-", "type", "parameters", "and", "formats", "your", "tags", "for", "you", "." ]
python
train
theonion/django-bulbs
bulbs/contributions/email.py
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/contributions/email.py#L80-L87
def contributions(self): """Apply a datetime filter against the contributor's contribution queryset.""" if self._contributions is None: self._contributions = self.contributor.contributions.filter( content__published__gte=self.start, content__published__lt=self.end ) return self._contributions
[ "def", "contributions", "(", "self", ")", ":", "if", "self", ".", "_contributions", "is", "None", ":", "self", ".", "_contributions", "=", "self", ".", "contributor", ".", "contributions", ".", "filter", "(", "content__published__gte", "=", "self", ".", "sta...
Apply a datetime filter against the contributor's contribution queryset.
[ "Apply", "a", "datetime", "filter", "against", "the", "contributor", "s", "contribution", "queryset", "." ]
python
train
ska-sa/purr
Purr/LogEntry.py
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/LogEntry.py#L16-L21
def _copy_update(sourcepath, destname): """Copy source to dest only if source is newer.""" if sys.platform.startswith('linux'): return os.system("/bin/cp -ua '%s' '%s'" % (sourcepath, destname)) else: return os.system("rsync -ua '%s' '%s'" % (sourcepath, destname))
[ "def", "_copy_update", "(", "sourcepath", ",", "destname", ")", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "'linux'", ")", ":", "return", "os", ".", "system", "(", "\"/bin/cp -ua '%s' '%s'\"", "%", "(", "sourcepath", ",", "destname", ")", "...
Copy source to dest only if source is newer.
[ "Copy", "source", "to", "dest", "only", "if", "source", "is", "newer", "." ]
python
train
CalebBell/fluids
fluids/friction.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/friction.py#L3042-L3124
def friction_plate_Martin_1999(Re, plate_enlargement_factor): r'''Calculates Darcy friction factor for single-phase flow in a Chevron-style plate heat exchanger according to [1]_. .. math:: \frac{1}{\sqrt{f_f}} = \frac{\cos \phi}{\sqrt{0.045\tan\phi + 0.09\sin\phi + f_0/\cos(\phi)}} + \frac{1-\cos\phi}{\sqrt{3.8f_1}} .. math:: f_0 = 16/Re \text{ for } Re < 2000 .. math:: f_0 = (1.56\ln Re - 3)^{-2} \text{ for } Re \ge 2000 .. math:: f_1 = \frac{149}{Re} + 0.9625 \text{ for } Re < 2000 .. math:: f_1 = \frac{9.75}{Re^{0.289}} \text{ for } Re \ge 2000 Parameters ---------- Re : float Reynolds number with respect to the hydraulic diameter of the channels, [-] plate_enlargement_factor : float The extra surface area multiplier as compared to a flat plate caused the corrugations, [-] Returns ------- fd : float Darcy friction factor [-] Notes ----- Based on experimental data from Re from 200 - 10000 and enhancement factors calculated with chevron angles of 0 to 80 degrees. See `PlateExchanger` for further clarification on the definitions. The length the friction factor gets multiplied by is not the flow path length, but rather the straight path length from port to port as if there were no chevrons. Note there is a discontinuity at Re = 2000 for the transition from laminar to turbulent flow, although the literature suggests the transition is actually smooth. This was first developed in [2]_ and only minor modifications by the original author were made before its republication in [1]_. This formula is also suggested in [3]_ Examples -------- >>> friction_plate_Martin_1999(Re=20000, plate_enlargement_factor=1.15) 2.284018089834134 References ---------- .. [1] Martin, Holger. "Economic optimization of compact heat exchangers." EF-Conference on Compact Heat Exchangers and Enhancement Technology for the Process Industries, Banff, Canada, July 18-23, 1999, 1999. https://publikationen.bibliothek.kit.edu/1000034866. .. [2] Martin, Holger. "A Theoretical Approach to Predict the Performance of Chevron-Type Plate Heat Exchangers." Chemical Engineering and Processing: Process Intensification 35, no. 4 (January 1, 1996): 301-10. https://doi.org/10.1016/0255-2701(95)04129-X. .. [3] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002. ''' phi = plate_enlargement_factor if Re < 2000.: f0 = 16./Re f1 = 149./Re + 0.9625 else: f0 = (1.56*log(Re) - 3.0)**-2 f1 = 9.75*Re**-0.289 rhs = cos(phi)*(0.045*tan(phi) + 0.09*sin(phi) + f0/cos(phi))**-0.5 rhs += (1. - cos(phi))*(3.8*f1)**-0.5 ff = rhs**-2. return ff*4.0
[ "def", "friction_plate_Martin_1999", "(", "Re", ",", "plate_enlargement_factor", ")", ":", "phi", "=", "plate_enlargement_factor", "if", "Re", "<", "2000.", ":", "f0", "=", "16.", "/", "Re", "f1", "=", "149.", "/", "Re", "+", "0.9625", "else", ":", "f0", ...
r'''Calculates Darcy friction factor for single-phase flow in a Chevron-style plate heat exchanger according to [1]_. .. math:: \frac{1}{\sqrt{f_f}} = \frac{\cos \phi}{\sqrt{0.045\tan\phi + 0.09\sin\phi + f_0/\cos(\phi)}} + \frac{1-\cos\phi}{\sqrt{3.8f_1}} .. math:: f_0 = 16/Re \text{ for } Re < 2000 .. math:: f_0 = (1.56\ln Re - 3)^{-2} \text{ for } Re \ge 2000 .. math:: f_1 = \frac{149}{Re} + 0.9625 \text{ for } Re < 2000 .. math:: f_1 = \frac{9.75}{Re^{0.289}} \text{ for } Re \ge 2000 Parameters ---------- Re : float Reynolds number with respect to the hydraulic diameter of the channels, [-] plate_enlargement_factor : float The extra surface area multiplier as compared to a flat plate caused the corrugations, [-] Returns ------- fd : float Darcy friction factor [-] Notes ----- Based on experimental data from Re from 200 - 10000 and enhancement factors calculated with chevron angles of 0 to 80 degrees. See `PlateExchanger` for further clarification on the definitions. The length the friction factor gets multiplied by is not the flow path length, but rather the straight path length from port to port as if there were no chevrons. Note there is a discontinuity at Re = 2000 for the transition from laminar to turbulent flow, although the literature suggests the transition is actually smooth. This was first developed in [2]_ and only minor modifications by the original author were made before its republication in [1]_. This formula is also suggested in [3]_ Examples -------- >>> friction_plate_Martin_1999(Re=20000, plate_enlargement_factor=1.15) 2.284018089834134 References ---------- .. [1] Martin, Holger. "Economic optimization of compact heat exchangers." EF-Conference on Compact Heat Exchangers and Enhancement Technology for the Process Industries, Banff, Canada, July 18-23, 1999, 1999. https://publikationen.bibliothek.kit.edu/1000034866. .. [2] Martin, Holger. "A Theoretical Approach to Predict the Performance of Chevron-Type Plate Heat Exchangers." Chemical Engineering and Processing: Process Intensification 35, no. 4 (January 1, 1996): 301-10. https://doi.org/10.1016/0255-2701(95)04129-X. .. [3] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002.
[ "r", "Calculates", "Darcy", "friction", "factor", "for", "single", "-", "phase", "flow", "in", "a", "Chevron", "-", "style", "plate", "heat", "exchanger", "according", "to", "[", "1", "]", "_", ".", "..", "math", "::", "\\", "frac", "{", "1", "}", "{...
python
train
Azure/azure-cli-extensions
src/aks-preview/azext_aks_preview/_validators.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/aks-preview/azext_aks_preview/_validators.py#L71-L83
def validate_linux_host_name(namespace): """Validates a string as a legal host name component. This validation will also occur server-side in the ARM API, but that may take a minute or two before the user sees it. So it's more user-friendly to validate in the CLI pre-flight. """ # https://stackoverflow.com/questions/106179/regular-expression-to-match-dns-hostname-or-ip-address rfc1123_regex = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$') # pylint:disable=line-too-long found = rfc1123_regex.findall(namespace.name) if not found: raise CLIError('--name cannot exceed 63 characters and can only contain ' 'letters, numbers, or dashes (-).')
[ "def", "validate_linux_host_name", "(", "namespace", ")", ":", "# https://stackoverflow.com/questions/106179/regular-expression-to-match-dns-hostname-or-ip-address", "rfc1123_regex", "=", "re", ".", "compile", "(", "r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])(\\.([a-zA-Z0-9...
Validates a string as a legal host name component. This validation will also occur server-side in the ARM API, but that may take a minute or two before the user sees it. So it's more user-friendly to validate in the CLI pre-flight.
[ "Validates", "a", "string", "as", "a", "legal", "host", "name", "component", "." ]
python
train
knipknap/exscript
Exscript/util/file.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/util/file.py#L206-L236
def load_lib(filename): """ Loads a Python file containing functions, and returns the content of the __lib__ variable. The __lib__ variable must contain a dictionary mapping function names to callables. Returns a dictionary mapping the namespaced function names to callables. The namespace is the basename of the file, without file extension. The result of this function can later be passed to run_template:: functions = load_lib('my_library.py') run_template(conn, 'foo.exscript', **functions) :type filename: string :param filename: A full filename. :rtype: dict[string->object] :return: The loaded functions. """ # Open the file. if not os.path.exists(filename): raise IOError('No such file: %s' % filename) name = os.path.splitext(os.path.basename(filename))[0] if sys.version_info[0] < 3: module = imp.load_source(name, filename) else: module = importlib.machinery.SourceFileLoader(name, filename).load_module() return dict((name + '.' + k, v) for (k, v) in list(module.__lib__.items()))
[ "def", "load_lib", "(", "filename", ")", ":", "# Open the file.", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "raise", "IOError", "(", "'No such file: %s'", "%", "filename", ")", "name", "=", "os", ".", "path", ".", "splitex...
Loads a Python file containing functions, and returns the content of the __lib__ variable. The __lib__ variable must contain a dictionary mapping function names to callables. Returns a dictionary mapping the namespaced function names to callables. The namespace is the basename of the file, without file extension. The result of this function can later be passed to run_template:: functions = load_lib('my_library.py') run_template(conn, 'foo.exscript', **functions) :type filename: string :param filename: A full filename. :rtype: dict[string->object] :return: The loaded functions.
[ "Loads", "a", "Python", "file", "containing", "functions", "and", "returns", "the", "content", "of", "the", "__lib__", "variable", ".", "The", "__lib__", "variable", "must", "contain", "a", "dictionary", "mapping", "function", "names", "to", "callables", "." ]
python
train
SBRG/ssbio
ssbio/protein/sequence/properties/scratch.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/scratch.py#L68-L78
def sspro_results(self): """Parse the SSpro output file and return a dict of secondary structure compositions. Returns: dict: Keys are sequence IDs, values are the lists of secondary structure predictions. H: helix E: strand C: the rest """ return ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqs(self.out_sspro)
[ "def", "sspro_results", "(", "self", ")", ":", "return", "ssbio", ".", "protein", ".", "sequence", ".", "utils", ".", "fasta", ".", "load_fasta_file_as_dict_of_seqs", "(", "self", ".", "out_sspro", ")" ]
Parse the SSpro output file and return a dict of secondary structure compositions. Returns: dict: Keys are sequence IDs, values are the lists of secondary structure predictions. H: helix E: strand C: the rest
[ "Parse", "the", "SSpro", "output", "file", "and", "return", "a", "dict", "of", "secondary", "structure", "compositions", "." ]
python
train
swharden/SWHLab
swhlab/core.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/swhlab/core.py#L379-L385
def sweepYfiltered(self): """ Get the filtered sweepY of the current sweep. Only works if self.kernel has been generated. """ assert self.kernel is not None return swhlab.common.convolve(self.sweepY,self.kernel)
[ "def", "sweepYfiltered", "(", "self", ")", ":", "assert", "self", ".", "kernel", "is", "not", "None", "return", "swhlab", ".", "common", ".", "convolve", "(", "self", ".", "sweepY", ",", "self", ".", "kernel", ")" ]
Get the filtered sweepY of the current sweep. Only works if self.kernel has been generated.
[ "Get", "the", "filtered", "sweepY", "of", "the", "current", "sweep", ".", "Only", "works", "if", "self", ".", "kernel", "has", "been", "generated", "." ]
python
valid
FutunnOpen/futuquant
futuquant/trade/open_trade_context.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/trade/open_trade_context.py#L663-L727
def acctradinginfo_query(self, order_type, code, price, order_id=None, adjust_limit=0, trd_env=TrdEnv.REAL, acc_id=0, acc_index=0): """ 查询账户下最大可买卖数量 :param order_type: 订单类型,参见OrderType :param code: 证券代码,例如'HK.00700' :param price: 报价,3位精度 :param order_id: 订单号。如果是新下单,则可以传None。如果是改单则要传单号。 :param adjust_limit: 调整方向和调整幅度百分比限制,正数代表向上调整,负数代表向下调整,具体值代表调整幅度限制,如:0.015代表向上调整且幅度不超过1.5%;-0.01代表向下调整且幅度不超过1%。默认0表示不调整 :param trd_env: 交易环境,参见TrdEnv :param acc_id: 业务账号,默认0表示第1个 :param acc_index: int,交易业务子账户ID列表所对应的下标,默认0,表示第1个业务ID :return: (ret, data) ret == RET_OK, data为pd.DataFrame,数据列如下 ret != RET_OK, data为错误信息 ======================= =========== ====================================================================================== 参数 类型 说明 ======================= =========== ====================================================================================== max_cash_buy float 不使用融资,仅自己的现金最大可买整手股数 max_cash_and_margin_buy float 使用融资,自己的现金 + 融资资金总共的最大可买整手股数 max_position_sell float 不使用融券(卖空),仅自己的持仓最大可卖整手股数 max_sell_short float 使用融券(卖空),最大可卖空整手股数,不包括多仓 max_buy_back float 卖空后,需要买回的最大整手股数。因为卖空后,必须先买回已卖空的股数,还掉股票,才能再继续买多。 ======================= =========== ====================================================================================== """ ret, msg = self._check_trd_env(trd_env) if ret != RET_OK: return ret, msg ret, msg, acc_id = self._check_acc_id_and_acc_index(trd_env, acc_id, acc_index) if ret != RET_OK: return ret, msg ret, content = self._split_stock_code(code) if ret != RET_OK: return ret, content market_str, stock_code = content query_processor = self._get_sync_query_processor( AccTradingInfoQuery.pack_req, AccTradingInfoQuery.unpack_rsp) kargs = { 'order_type': order_type, 'code': str(stock_code), 'price': price, 'order_id': order_id, 'adjust_limit': adjust_limit, 'trd_mkt': self.__trd_mkt, 'sec_mkt_str': market_str, 'trd_env': trd_env, 'acc_id': acc_id, 'conn_id': self.get_sync_conn_id() } ret_code, msg, data = query_processor(**kargs) if ret_code != RET_OK: return RET_ERROR, msg col_list = ['max_cash_buy', 'max_cash_and_margin_buy', 'max_position_sell', 'max_sell_short', 'max_buy_back'] acctradinginfo_table = pd.DataFrame(data, columns=col_list) return RET_OK, acctradinginfo_table
[ "def", "acctradinginfo_query", "(", "self", ",", "order_type", ",", "code", ",", "price", ",", "order_id", "=", "None", ",", "adjust_limit", "=", "0", ",", "trd_env", "=", "TrdEnv", ".", "REAL", ",", "acc_id", "=", "0", ",", "acc_index", "=", "0", ")",...
查询账户下最大可买卖数量 :param order_type: 订单类型,参见OrderType :param code: 证券代码,例如'HK.00700' :param price: 报价,3位精度 :param order_id: 订单号。如果是新下单,则可以传None。如果是改单则要传单号。 :param adjust_limit: 调整方向和调整幅度百分比限制,正数代表向上调整,负数代表向下调整,具体值代表调整幅度限制,如:0.015代表向上调整且幅度不超过1.5%;-0.01代表向下调整且幅度不超过1%。默认0表示不调整 :param trd_env: 交易环境,参见TrdEnv :param acc_id: 业务账号,默认0表示第1个 :param acc_index: int,交易业务子账户ID列表所对应的下标,默认0,表示第1个业务ID :return: (ret, data) ret == RET_OK, data为pd.DataFrame,数据列如下 ret != RET_OK, data为错误信息 ======================= =========== ====================================================================================== 参数 类型 说明 ======================= =========== ====================================================================================== max_cash_buy float 不使用融资,仅自己的现金最大可买整手股数 max_cash_and_margin_buy float 使用融资,自己的现金 + 融资资金总共的最大可买整手股数 max_position_sell float 不使用融券(卖空),仅自己的持仓最大可卖整手股数 max_sell_short float 使用融券(卖空),最大可卖空整手股数,不包括多仓 max_buy_back float 卖空后,需要买回的最大整手股数。因为卖空后,必须先买回已卖空的股数,还掉股票,才能再继续买多。 ======================= =========== ======================================================================================
[ "查询账户下最大可买卖数量", ":", "param", "order_type", ":", "订单类型,参见OrderType", ":", "param", "code", ":", "证券代码,例如", "HK", ".", "00700", ":", "param", "price", ":", "报价,3位精度", ":", "param", "order_id", ":", "订单号。如果是新下单,则可以传None。如果是改单则要传单号。", ":", "param", "adjust_limit", ...
python
train
data-8/datascience
datascience/tables.py
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L105-L108
def from_columns_dict(cls, columns): """Create a table from a mapping of column labels to column values. [Deprecated]""" warnings.warn("Table.from_columns_dict is deprecated. Use Table().with_columns(...)", FutureWarning) return cls().with_columns(columns.items())
[ "def", "from_columns_dict", "(", "cls", ",", "columns", ")", ":", "warnings", ".", "warn", "(", "\"Table.from_columns_dict is deprecated. Use Table().with_columns(...)\"", ",", "FutureWarning", ")", "return", "cls", "(", ")", ".", "with_columns", "(", "columns", ".", ...
Create a table from a mapping of column labels to column values. [Deprecated]
[ "Create", "a", "table", "from", "a", "mapping", "of", "column", "labels", "to", "column", "values", ".", "[", "Deprecated", "]" ]
python
train
arista-eosplus/pyeapi
pyeapi/api/stp.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/stp.py#L282-L312
def set_portfast(self, name, value=None, default=False, disable=False): """Configures the portfast value for the specified interface Args: name (string): The interface identifier to configure. The name must be the full interface name (eg Ethernet1, not Et1) value (bool): True if portfast is enabled otherwise False default (bool): Configures the portfast parameter to its default value using the EOS CLI default config command disable (bool): Negates the portfast parameter using the EOS CLI no config command Returns: True if the command succeeds, otherwise False Raises: ValueError: Rasied if an invalid interface name is specified TypeError: Raised if the value keyword argument does not evaluate to a valid boolean """ if value is False: disable = True string = 'spanning-tree portfast' cmds = self.command_builder(string, value=value, default=default, disable=disable) return self.configure_interface(name, cmds)
[ "def", "set_portfast", "(", "self", ",", "name", ",", "value", "=", "None", ",", "default", "=", "False", ",", "disable", "=", "False", ")", ":", "if", "value", "is", "False", ":", "disable", "=", "True", "string", "=", "'spanning-tree portfast'", "cmds"...
Configures the portfast value for the specified interface Args: name (string): The interface identifier to configure. The name must be the full interface name (eg Ethernet1, not Et1) value (bool): True if portfast is enabled otherwise False default (bool): Configures the portfast parameter to its default value using the EOS CLI default config command disable (bool): Negates the portfast parameter using the EOS CLI no config command Returns: True if the command succeeds, otherwise False Raises: ValueError: Rasied if an invalid interface name is specified TypeError: Raised if the value keyword argument does not evaluate to a valid boolean
[ "Configures", "the", "portfast", "value", "for", "the", "specified", "interface" ]
python
train
pylast/pylast
src/pylast/__init__.py
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L2147-L2158
def get_album(self): """Returns the album object of this track.""" doc = self._request(self.ws_prefix + ".getInfo", True) albums = doc.getElementsByTagName("album") if len(albums) == 0: return node = doc.getElementsByTagName("album")[0] return Album(_extract(node, "artist"), _extract(node, "title"), self.network)
[ "def", "get_album", "(", "self", ")", ":", "doc", "=", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".getInfo\"", ",", "True", ")", "albums", "=", "doc", ".", "getElementsByTagName", "(", "\"album\"", ")", "if", "len", "(", "albums", ...
Returns the album object of this track.
[ "Returns", "the", "album", "object", "of", "this", "track", "." ]
python
train
openvax/mhcnames
mhcnames/mouse.py
https://github.com/openvax/mhcnames/blob/71694b9d620db68ceee44da1b8422ff436f15bd3/mhcnames/mouse.py#L21-L52
def parse_mouse_allele_name(name): """Parses mouse MHc alleles such as H2-Kd, H-2-Db, H2-IAb. Returns pair of (gene, allele_code). """ original = name if name.upper().startswith("H2"): name = name[2:] elif name.upper().startswith("H-2"): name = name[3:] _, name = parse_separator(name) # special logic for mouse alleles if name.upper().startswith("I"): # class II mouse allele if len(name) < 2: raise AlleleParseError("Incomplete mouse MHC allele: %s" % original) gene_name = name[:2] name = name[2:] else: # class I mouse allele if len(name) < 1: raise AlleleParseError("Incomplete mouse MHC allele: %s" % original) gene_name = name[0] name = name[1:] _, name = parse_separator(name) if len(name) != 1: raise AlleleParseError( "Malformed mouse MHC allele: %s, parse error at %s" % ( original, name)) allele = name[0] return gene_name.upper(), allele.lower()
[ "def", "parse_mouse_allele_name", "(", "name", ")", ":", "original", "=", "name", "if", "name", ".", "upper", "(", ")", ".", "startswith", "(", "\"H2\"", ")", ":", "name", "=", "name", "[", "2", ":", "]", "elif", "name", ".", "upper", "(", ")", "."...
Parses mouse MHc alleles such as H2-Kd, H-2-Db, H2-IAb. Returns pair of (gene, allele_code).
[ "Parses", "mouse", "MHc", "alleles", "such", "as", "H2", "-", "Kd", "H", "-", "2", "-", "Db", "H2", "-", "IAb", ".", "Returns", "pair", "of", "(", "gene", "allele_code", ")", "." ]
python
train
tgsmith61591/pmdarima
pmdarima/utils/visualization.py
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/utils/visualization.py#L178-L261
def plot_pacf(series, ax=None, lags=None, alpha=None, method='yw', use_vlines=True, title='Partial Autocorrelation', zero=True, vlines_kwargs=None, show=True, **kwargs): """Plot a series' partial auto-correlation as a line plot. A wrapper method for the statsmodels ``plot_pacf`` method. Parameters ---------- series : array-like, shape=(n_samples,) The series or numpy array for which to plot an auto-correlation. ax : Matplotlib AxesSubplot instance, optional If given, this subplot is used to plot in instead of a new figure being created. lags : int, array-like or None, optional (default=None) int or Array of lag values, used on horizontal axis. Uses np.arange(lags) when lags is an int. If not provided, ``lags=np.arange(len(corr))`` is used. alpha : scalar, optional (default=None) If a number is given, the confidence intervals for the given level are returned. For instance if alpha=.05, 95 % confidence intervals are returned where the standard deviation is computed according to Bartlett's formula. If None, no confidence intervals are plotted. method : str, optional (default='yw') Specifies which method for the calculations to use. One of {'ywunbiased', 'ywmle', 'ols', 'ld', 'ldb', 'ldunbiased', 'ldbiased'}: - yw or ywunbiased : yule walker with bias correction in denominator for acovf. Default. - ywm or ywmle : yule walker without bias correction - ols - regression of time series on lags of it and on constant - ld or ldunbiased : Levinson-Durbin recursion with bias correction - ldb or ldbiased : Levinson-Durbin recursion without bias correction use_vlines : bool, optional (default=True) If True, vertical lines and markers are plotted. If False, only markers are plotted. The default marker is 'o'; it can be overridden with a ``marker`` kwarg. title : str, optional (default='Partial Autocorrelation') Title to place on plot. Default is 'Partial Autocorrelation' zero : bool, optional (default=True) Flag indicating whether to include the 0-lag autocorrelation. Default is True. vlines_kwargs : dict, optional (default=None) Optional dictionary of keyword arguments that are passed to vlines. show : bool, optional (default=True) Whether to show the plot after it's been created. If not, will return the plot as an Axis object instead. **kwargs : kwargs, optional Optional keyword arguments that are directly passed on to the Matplotlib ``plot`` and ``axhline`` functions. Notes ----- This method will only show the plot if ``show=True`` (which is the default behavior). To simply get the axis back (say, to add to another canvas), use ``show=False``. Examples -------- >>> plot_pacf([1, 2, 3, 4], show=False) # doctest: +SKIP <matplotlib.figure.Figure object at 0x129df1630> Returns ------- plt : Axis or None If ``show`` is True, does not return anything. If False, returns the Axis object. """ _err_for_no_mpl() res = ppacf(x=series, ax=ax, lags=lags, alpha=alpha, method=method, use_vlines=use_vlines, title=title, zero=zero, vlines_kwargs=vlines_kwargs, **kwargs) return _show_or_return(res, show)
[ "def", "plot_pacf", "(", "series", ",", "ax", "=", "None", ",", "lags", "=", "None", ",", "alpha", "=", "None", ",", "method", "=", "'yw'", ",", "use_vlines", "=", "True", ",", "title", "=", "'Partial Autocorrelation'", ",", "zero", "=", "True", ",", ...
Plot a series' partial auto-correlation as a line plot. A wrapper method for the statsmodels ``plot_pacf`` method. Parameters ---------- series : array-like, shape=(n_samples,) The series or numpy array for which to plot an auto-correlation. ax : Matplotlib AxesSubplot instance, optional If given, this subplot is used to plot in instead of a new figure being created. lags : int, array-like or None, optional (default=None) int or Array of lag values, used on horizontal axis. Uses np.arange(lags) when lags is an int. If not provided, ``lags=np.arange(len(corr))`` is used. alpha : scalar, optional (default=None) If a number is given, the confidence intervals for the given level are returned. For instance if alpha=.05, 95 % confidence intervals are returned where the standard deviation is computed according to Bartlett's formula. If None, no confidence intervals are plotted. method : str, optional (default='yw') Specifies which method for the calculations to use. One of {'ywunbiased', 'ywmle', 'ols', 'ld', 'ldb', 'ldunbiased', 'ldbiased'}: - yw or ywunbiased : yule walker with bias correction in denominator for acovf. Default. - ywm or ywmle : yule walker without bias correction - ols - regression of time series on lags of it and on constant - ld or ldunbiased : Levinson-Durbin recursion with bias correction - ldb or ldbiased : Levinson-Durbin recursion without bias correction use_vlines : bool, optional (default=True) If True, vertical lines and markers are plotted. If False, only markers are plotted. The default marker is 'o'; it can be overridden with a ``marker`` kwarg. title : str, optional (default='Partial Autocorrelation') Title to place on plot. Default is 'Partial Autocorrelation' zero : bool, optional (default=True) Flag indicating whether to include the 0-lag autocorrelation. Default is True. vlines_kwargs : dict, optional (default=None) Optional dictionary of keyword arguments that are passed to vlines. show : bool, optional (default=True) Whether to show the plot after it's been created. If not, will return the plot as an Axis object instead. **kwargs : kwargs, optional Optional keyword arguments that are directly passed on to the Matplotlib ``plot`` and ``axhline`` functions. Notes ----- This method will only show the plot if ``show=True`` (which is the default behavior). To simply get the axis back (say, to add to another canvas), use ``show=False``. Examples -------- >>> plot_pacf([1, 2, 3, 4], show=False) # doctest: +SKIP <matplotlib.figure.Figure object at 0x129df1630> Returns ------- plt : Axis or None If ``show`` is True, does not return anything. If False, returns the Axis object.
[ "Plot", "a", "series", "partial", "auto", "-", "correlation", "as", "a", "line", "plot", "." ]
python
train
quantopian/pgcontents
pgcontents/query.py
https://github.com/quantopian/pgcontents/blob/ed36268b7917332d16868208e1e565742a8753e1/pgcontents/query.py#L419-L451
def rename_file(db, user_id, old_api_path, new_api_path): """ Rename a file. """ # Overwriting existing files is disallowed. if file_exists(db, user_id, new_api_path): raise FileExists(new_api_path) old_dir, old_name = split_api_filepath(old_api_path) new_dir, new_name = split_api_filepath(new_api_path) if old_dir != new_dir: raise ValueError( dedent( """ Can't rename object to new directory. Old Path: {old_api_path} New Path: {new_api_path} """.format( old_api_path=old_api_path, new_api_path=new_api_path ) ) ) db.execute( files.update().where( _file_where(user_id, old_api_path), ).values( name=new_name, created_at=func.now(), ) )
[ "def", "rename_file", "(", "db", ",", "user_id", ",", "old_api_path", ",", "new_api_path", ")", ":", "# Overwriting existing files is disallowed.", "if", "file_exists", "(", "db", ",", "user_id", ",", "new_api_path", ")", ":", "raise", "FileExists", "(", "new_api_...
Rename a file.
[ "Rename", "a", "file", "." ]
python
test
achimnol/aiotools
src/aiotools/server.py
https://github.com/achimnol/aiotools/blob/9efc66a01fbd287f70ee3a937203d466aac4a765/src/aiotools/server.py#L198-L220
def _main_ctxmgr(func): ''' A decorator wrapper for :class:`ServerMainContextManager` Usage example: .. code:: python @aiotools.main def mymain(): server_args = do_init() stop_sig = yield server_args if stop_sig == signal.SIGINT: do_graceful_shutdown() else: do_forced_shutdown() aiotools.start_server(..., main_ctxmgr=mymain, ...) ''' @functools.wraps(func) def helper(*args, **kwargs): return ServerMainContextManager(func, args, kwargs) return helper
[ "def", "_main_ctxmgr", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "helper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "ServerMainContextManager", "(", "func", ",", "args", ",", "kwargs", ")", ...
A decorator wrapper for :class:`ServerMainContextManager` Usage example: .. code:: python @aiotools.main def mymain(): server_args = do_init() stop_sig = yield server_args if stop_sig == signal.SIGINT: do_graceful_shutdown() else: do_forced_shutdown() aiotools.start_server(..., main_ctxmgr=mymain, ...)
[ "A", "decorator", "wrapper", "for", ":", "class", ":", "ServerMainContextManager" ]
python
train
has2k1/plotnine
plotnine/guides/guide_colorbar.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/guides/guide_colorbar.py#L134-L239
def draw(self): """ Draw guide Returns ------- out : matplotlib.offsetbox.Offsetbox A drawing of this legend """ obverse = slice(0, None) reverse = slice(None, None, -1) width = self.barwidth height = self.barheight nbars = len(self.bar) length = height direction = self.direction colors = self.bar['color'].tolist() labels = self.key['label'].tolist() themeable = self.theme.figure._themeable # When there is more than one guide, we keep # record of all of them using lists if 'legend_title' not in themeable: themeable['legend_title'] = [] if 'legend_text_colorbar' not in themeable: themeable['legend_text_colorbar'] = [] # .5 puts the ticks in the middle of the bars when # raster=False. So when raster=True the ticks are # in between interpolation points and the matching is # close though not exactly right. _from = self.bar['value'].min(), self.bar['value'].max() tick_locations = rescale(self.key['value'], (.5, nbars-.5), _from) * length/nbars if direction == 'horizontal': width, height = height, width length = width if self.reverse: colors = colors[::-1] labels = labels[::-1] tick_locations = length - tick_locations[::-1] # title # title_box = TextArea(self.title, textprops=dict(color='black')) themeable['legend_title'].append(title_box) # colorbar and ticks # da = ColoredDrawingArea(width, height, 0, 0) if self.raster: add_interpolated_colorbar(da, colors, direction) else: add_segmented_colorbar(da, colors, direction) if self.ticks: _locations = tick_locations if not self.draw_ulim: _locations = _locations[:-1] if not self.draw_llim: _locations = _locations[1:] add_ticks(da, _locations, direction) # labels # if self.label: labels_da, legend_text = create_labels(da, labels, tick_locations, direction) themeable['legend_text_colorbar'].extend(legend_text) else: labels_da = ColoredDrawingArea(0, 0) # colorbar + labels # if direction == 'vertical': packer, align = HPacker, 'bottom' align = 'center' else: packer, align = VPacker, 'right' align = 'center' slc = obverse if self.label_position == 'right' else reverse if self.label_position in ('right', 'bottom'): slc = obverse else: slc = reverse main_box = packer(children=[da, labels_da][slc], sep=self._label_margin, align=align, pad=0) # title + colorbar(with labels) # lookup = { 'right': (HPacker, reverse), 'left': (HPacker, obverse), 'bottom': (VPacker, reverse), 'top': (VPacker, obverse)} packer, slc = lookup[self.title_position] children = [title_box, main_box][slc] box = packer(children=children, sep=self._title_margin, align=self._title_align, pad=0) return box
[ "def", "draw", "(", "self", ")", ":", "obverse", "=", "slice", "(", "0", ",", "None", ")", "reverse", "=", "slice", "(", "None", ",", "None", ",", "-", "1", ")", "width", "=", "self", ".", "barwidth", "height", "=", "self", ".", "barheight", "nba...
Draw guide Returns ------- out : matplotlib.offsetbox.Offsetbox A drawing of this legend
[ "Draw", "guide" ]
python
train
dsoprea/PySecure
pysecure/adapters/sftpa.py
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L698-L709
def open(self): """This is the only way to open a file resource.""" self.__sf = _sftp_open(self.__sftp_session_int, self.__filepath, self.access_type_int, self.__create_mode) if self.access_type_is_append is True: self.seek(self.filesize) return SftpFileObject(self)
[ "def", "open", "(", "self", ")", ":", "self", ".", "__sf", "=", "_sftp_open", "(", "self", ".", "__sftp_session_int", ",", "self", ".", "__filepath", ",", "self", ".", "access_type_int", ",", "self", ".", "__create_mode", ")", "if", "self", ".", "access_...
This is the only way to open a file resource.
[ "This", "is", "the", "only", "way", "to", "open", "a", "file", "resource", "." ]
python
train
google/textfsm
textfsm/terminal.py
https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/terminal.py#L385-L404
def _Scroll(self, lines=None): """Set attributes to scroll the buffer correctly. Args: lines: An int, number of lines to scroll. If None, scrolls by the terminal length. """ if lines is None: lines = self._cli_lines if lines < 0: self._displayed -= self._cli_lines self._displayed += lines if self._displayed < 0: self._displayed = 0 self._lines_to_show = self._cli_lines else: self._lines_to_show = lines self._lastscroll = lines
[ "def", "_Scroll", "(", "self", ",", "lines", "=", "None", ")", ":", "if", "lines", "is", "None", ":", "lines", "=", "self", ".", "_cli_lines", "if", "lines", "<", "0", ":", "self", ".", "_displayed", "-=", "self", ".", "_cli_lines", "self", ".", "_...
Set attributes to scroll the buffer correctly. Args: lines: An int, number of lines to scroll. If None, scrolls by the terminal length.
[ "Set", "attributes", "to", "scroll", "the", "buffer", "correctly", "." ]
python
train
openstax/cnx-archive
cnxarchive/sitemap.py
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/sitemap.py#L153-L170
def generate(self, root_element=None): """Create <url> element under root_element.""" if root_element is not None: url = etree.SubElement(root_element, 'url') else: url = etree.Element('url') etree.SubElement(url, 'loc').text = self.loc if self.lastmod: if hasattr(self.lastmod, 'strftime'): etree.SubElement(url, 'lastmod').text = \ self.lastmod.strftime('%Y-%m-%d') elif isinstance(self.lastmod, str): etree.SubElement(url, 'lastmod').text = self.lastmod if self.changefreq and self.changefreq in self.freq_values: etree.SubElement(url, 'changefreq').text = self.changefreq if self.priority and 0.0 <= self.priority <= 1.0: etree.SubElement(url, 'priority').text = str(self.priority) return url
[ "def", "generate", "(", "self", ",", "root_element", "=", "None", ")", ":", "if", "root_element", "is", "not", "None", ":", "url", "=", "etree", ".", "SubElement", "(", "root_element", ",", "'url'", ")", "else", ":", "url", "=", "etree", ".", "Element"...
Create <url> element under root_element.
[ "Create", "<url", ">", "element", "under", "root_element", "." ]
python
train
ArchiveTeam/wpull
wpull/thirdparty/dammit.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/thirdparty/dammit.py#L383-L398
def _sub_ms_char(self, match): """Changes a MS smart quote character to an XML or HTML entity, or an ASCII character.""" orig = match.group(1) if self.smart_quotes_to == 'ascii': sub = self.MS_CHARS_TO_ASCII.get(orig).encode() else: sub = self.MS_CHARS.get(orig) if type(sub) == tuple: if self.smart_quotes_to == 'xml': sub = '&#x'.encode() + sub[1].encode() + ';'.encode() else: sub = '&'.encode() + sub[0].encode() + ';'.encode() else: sub = sub.encode() return sub
[ "def", "_sub_ms_char", "(", "self", ",", "match", ")", ":", "orig", "=", "match", ".", "group", "(", "1", ")", "if", "self", ".", "smart_quotes_to", "==", "'ascii'", ":", "sub", "=", "self", ".", "MS_CHARS_TO_ASCII", ".", "get", "(", "orig", ")", "."...
Changes a MS smart quote character to an XML or HTML entity, or an ASCII character.
[ "Changes", "a", "MS", "smart", "quote", "character", "to", "an", "XML", "or", "HTML", "entity", "or", "an", "ASCII", "character", "." ]
python
train
atztogo/phonopy
phonopy/structure/cells.py
https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/structure/cells.py#L595-L741
def get_smallest_vectors(supercell_bases, supercell_pos, primitive_pos, symprec=1e-5): """Find shortest atomic pair vectors Note ---- Shortest vectors from an atom in primitive cell to an atom in supercell in the fractional coordinates of primitive cell. If an atom in supercell is on the border centered at an atom in primitive and there are multiple vectors that have the same distance (up to tolerance) and different directions, several shortest vectors are stored. In fact, this method is not limited to search shortest vectors between sueprcell atoms and primitive cell atoms, but can be used to measure shortest vectors between atoms in periodic supercell lattice frame. Parameters ---------- supercell_bases : ndarray Supercell basis vectors as row vectors, (a, b, c)^T. Must be order='C'. dtype='double' shape=(3, 3) supercell_pos : array_like Atomic positions in fractional coordinates of supercell. dtype='double' shape=(size_super, 3) primitive_pos : array_like Atomic positions in fractional coordinates of supercell. Note that not in fractional coodinates of primitive cell. dtype='double' shape=(size_prim, 3) symprec : float, optional, default=1e-5 Tolerance to find equal distances of vectors Returns ------- shortest_vectors : ndarray Shortest vectors in supercell coordinates. The 27 in shape is the possible maximum number of elements. dtype='double' shape=(size_super, size_prim, 27, 3) multiplicities : ndarray Number of equidistance shortest vectors dtype='intc' shape=(size_super, size_prim) """ reduced_bases = get_reduced_bases(supercell_bases, method='delaunay', tolerance=symprec) trans_mat_float = np.dot(supercell_bases, np.linalg.inv(reduced_bases)) trans_mat = np.rint(trans_mat_float).astype(int) assert (np.abs(trans_mat_float - trans_mat) < 1e-8).all() trans_mat_inv_float = np.linalg.inv(trans_mat) trans_mat_inv = np.rint(trans_mat_inv_float).astype(int) assert (np.abs(trans_mat_inv_float - trans_mat_inv) < 1e-8).all() # Reduce all positions into the cell formed by the reduced bases. supercell_fracs = np.dot(supercell_pos, trans_mat) supercell_fracs -= np.rint(supercell_fracs) supercell_fracs = np.array(supercell_fracs, dtype='double', order='C') primitive_fracs = np.dot(primitive_pos, trans_mat) primitive_fracs -= np.rint(primitive_fracs) primitive_fracs = np.array(primitive_fracs, dtype='double', order='C') # For each vector, we will need to consider all nearby images in the # reduced bases. lattice_points = np.array([[i, j, k] for i in (-1, 0, 1) for j in (-1, 0, 1) for k in (-1, 0, 1)], dtype='intc', order='C') # Here's where things get interesting. # We want to avoid manually iterating over all possible pairings of # supercell atoms and primitive atoms, because doing so creates a # tight loop in larger structures that is difficult to optimize. # # Furthermore, it seems wise to call numpy.dot on as large of an array # as possible, since numpy can shell out to BLAS to handle the # real heavy lifting. shortest_vectors = np.zeros( (len(supercell_fracs), len(primitive_fracs), 27, 3), dtype='double', order='C') multiplicity = np.zeros((len(supercell_fracs), len(primitive_fracs)), dtype='intc', order='C') import phonopy._phonopy as phonoc phonoc.gsv_set_smallest_vectors( shortest_vectors, multiplicity, supercell_fracs, primitive_fracs, lattice_points, np.array(reduced_bases.T, dtype='double', order='C'), np.array(trans_mat_inv.T, dtype='intc', order='C'), symprec) # # For every atom in the supercell and every atom in the primitive cell, # # we want 27 images of the vector between them. # # # # 'None' is used to insert trivial axes to make these arrays broadcast. # # # # shape: (size_super, size_prim, 27, 3) # candidate_fracs = ( # supercell_fracs[:, None, None, :] # shape: (size_super, 1, 1, 3) # - primitive_fracs[None, :, None, :] # shape: (1, size_prim, 1, 3) # + lattice_points[None, None, :, :] # shape: (1, 1, 27, 3) # ) # # To compute the lengths, we want cartesian positions. # # # # Conveniently, calling 'numpy.dot' between a 4D array and a 2D array # # does vector-matrix multiplication on each row vector in the last axis # # of the 4D array. # # # # shape: (size_super, size_prim, 27) # lengths = np.array(np.sqrt( # np.sum(np.dot(candidate_fracs, reduced_bases)**2, axis=-1)), # dtype='double', order='C') # # Create the output, initially consisting of all candidate vectors scaled # # by the primitive cell. # # # # shape: (size_super, size_prim, 27, 3) # candidate_vectors = np.array(np.dot(candidate_fracs, trans_mat_inv), # dtype='double', order='C') # # The last final bits are done in C. # # # # We will gather the shortest ones from each list of 27 vectors. # shortest_vectors = np.zeros_like(candidate_vectors, # dtype='double', order='C') # multiplicity = np.zeros(shortest_vectors.shape[:2], dtype='intc', # order='C') # import phonopy._phonopy as phonoc # phonoc.gsv_copy_smallest_vectors(shortest_vectors, # multiplicity, # candidate_vectors, # lengths, # symprec) return shortest_vectors, multiplicity
[ "def", "get_smallest_vectors", "(", "supercell_bases", ",", "supercell_pos", ",", "primitive_pos", ",", "symprec", "=", "1e-5", ")", ":", "reduced_bases", "=", "get_reduced_bases", "(", "supercell_bases", ",", "method", "=", "'delaunay'", ",", "tolerance", "=", "s...
Find shortest atomic pair vectors Note ---- Shortest vectors from an atom in primitive cell to an atom in supercell in the fractional coordinates of primitive cell. If an atom in supercell is on the border centered at an atom in primitive and there are multiple vectors that have the same distance (up to tolerance) and different directions, several shortest vectors are stored. In fact, this method is not limited to search shortest vectors between sueprcell atoms and primitive cell atoms, but can be used to measure shortest vectors between atoms in periodic supercell lattice frame. Parameters ---------- supercell_bases : ndarray Supercell basis vectors as row vectors, (a, b, c)^T. Must be order='C'. dtype='double' shape=(3, 3) supercell_pos : array_like Atomic positions in fractional coordinates of supercell. dtype='double' shape=(size_super, 3) primitive_pos : array_like Atomic positions in fractional coordinates of supercell. Note that not in fractional coodinates of primitive cell. dtype='double' shape=(size_prim, 3) symprec : float, optional, default=1e-5 Tolerance to find equal distances of vectors Returns ------- shortest_vectors : ndarray Shortest vectors in supercell coordinates. The 27 in shape is the possible maximum number of elements. dtype='double' shape=(size_super, size_prim, 27, 3) multiplicities : ndarray Number of equidistance shortest vectors dtype='intc' shape=(size_super, size_prim)
[ "Find", "shortest", "atomic", "pair", "vectors" ]
python
train