repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/generator/mavgen_java.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/generator/mavgen_java.py#L14-L37
def generate_enums(basename, xml): '''generate main header per XML file''' directory = os.path.join(basename, '''enums''') mavparse.mkdir_p(directory) for en in xml.enum: f = open(os.path.join(directory, en.name+".java"), mode='w') t.write(f, ''' /* AUTO-GENERATED FILE. DO NOT MODIFY. * * This class was automatically generated by the * java mavlink generator tool. It should not be modified by hand. */ package com.MAVLink.enums; /** * ${description} */ public class ${name} { ${{entry: public static final int ${name} = ${value}; /* ${description} |${{param:${description}| }} */ }} } ''', en) f.close()
[ "def", "generate_enums", "(", "basename", ",", "xml", ")", ":", "directory", "=", "os", ".", "path", ".", "join", "(", "basename", ",", "'''enums'''", ")", "mavparse", ".", "mkdir_p", "(", "directory", ")", "for", "en", "in", "xml", ".", "enum", ":", ...
generate main header per XML file
[ "generate", "main", "header", "per", "XML", "file" ]
python
train
twilio/twilio-python
twilio/rest/ip_messaging/v2/service/user/user_binding.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/ip_messaging/v2/service/user/user_binding.py#L62-L79
def list(self, binding_type=values.unset, limit=None, page_size=None): """ Lists UserBindingInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param UserBindingInstance.BindingType binding_type: The push technology used by the User Binding resources to read :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance] """ return list(self.stream(binding_type=binding_type, limit=limit, page_size=page_size, ))
[ "def", "list", "(", "self", ",", "binding_type", "=", "values", ".", "unset", ",", "limit", "=", "None", ",", "page_size", "=", "None", ")", ":", "return", "list", "(", "self", ".", "stream", "(", "binding_type", "=", "binding_type", ",", "limit", "=",...
Lists UserBindingInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param UserBindingInstance.BindingType binding_type: The push technology used by the User Binding resources to read :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance]
[ "Lists", "UserBindingInstance", "records", "from", "the", "API", "as", "a", "list", ".", "Unlike", "stream", "()", "this", "operation", "is", "eager", "and", "will", "load", "limit", "records", "into", "memory", "before", "returning", "." ]
python
train
noobermin/lspreader
lspreader/dotlsp.py
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/dotlsp.py#L43-L92
def getpexts(lsp): ''' Get information from pext planes. This might or might not work, use with caution! Parameters: ----------- lsp : .lsp string Returns a list of dicts with information for all pext planes ''' lines=lsp.split('\n'); #unfortunately regex doesn't work here lns,planens = zip( *[ (i,int(re.search('^ *extract *([0-9]+)',line).group(1))) for i,line in enumerate(lines) if re.search('^ *extract *[0-9]+', line)]); if len(lns) == 0: return []; end = lns[-1]; for i,line in enumerate(lines[end+1:]): if re.match(' *\[',line): break; end += i; lineranges = zip(lns,(lns+(end,))[1:]); planes=dict() for (i,end),plane in zip(lineranges,planens): d=dict(); labels = [ 'species', 'direction', 'position',]; datarx = [ '^ *species *([0-9]+)', '^ *direction *([xXyYzZ])', '^ *at *(.*)',]; convs = [ lambda s: int(s), lambda i: i, lambda s: np.array( map(float,s.split(' '))), ]; for line in lines[i:end]: for label,rx,conv in zip(labels,datarx,convs): if re.match(rx,line): d[label]=conv(re.match(rx,line).group(1)); pass pass planes[plane] = d; return planes;
[ "def", "getpexts", "(", "lsp", ")", ":", "lines", "=", "lsp", ".", "split", "(", "'\\n'", ")", "#unfortunately regex doesn't work here", "lns", ",", "planens", "=", "zip", "(", "*", "[", "(", "i", ",", "int", "(", "re", ".", "search", "(", "'^ *extract...
Get information from pext planes. This might or might not work, use with caution! Parameters: ----------- lsp : .lsp string Returns a list of dicts with information for all pext planes
[ "Get", "information", "from", "pext", "planes", ".", "This", "might", "or", "might", "not", "work", "use", "with", "caution!" ]
python
train
uber/doubles
doubles/allowance.py
https://github.com/uber/doubles/blob/15e68dcf98f709b19a581915fa6af5ef49ebdd8a/doubles/allowance.py#L165-L173
def with_args_validator(self, matching_function): """Define a custom function for testing arguments :param func matching_function: The function used to test arguments passed to the stub. """ self.args = None self.kwargs = None self._custom_matcher = matching_function return self
[ "def", "with_args_validator", "(", "self", ",", "matching_function", ")", ":", "self", ".", "args", "=", "None", "self", ".", "kwargs", "=", "None", "self", ".", "_custom_matcher", "=", "matching_function", "return", "self" ]
Define a custom function for testing arguments :param func matching_function: The function used to test arguments passed to the stub.
[ "Define", "a", "custom", "function", "for", "testing", "arguments" ]
python
train
n1analytics/python-paillier
phe/paillier.py
https://github.com/n1analytics/python-paillier/blob/955f8c0bfa9623be15b75462b121d28acf70f04b/phe/paillier.py#L349-L353
def h_function(self, x, xsquare): """Computes the h-function as defined in Paillier's paper page 12, 'Decryption using Chinese-remaindering'. """ return invert(self.l_function(powmod(self.public_key.g, x - 1, xsquare),x), x)
[ "def", "h_function", "(", "self", ",", "x", ",", "xsquare", ")", ":", "return", "invert", "(", "self", ".", "l_function", "(", "powmod", "(", "self", ".", "public_key", ".", "g", ",", "x", "-", "1", ",", "xsquare", ")", ",", "x", ")", ",", "x", ...
Computes the h-function as defined in Paillier's paper page 12, 'Decryption using Chinese-remaindering'.
[ "Computes", "the", "h", "-", "function", "as", "defined", "in", "Paillier", "s", "paper", "page", "12", "Decryption", "using", "Chinese", "-", "remaindering", "." ]
python
train
dw/mitogen
mitogen/select.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/select.py#L212-L231
def add(self, recv): """ Add a :class:`mitogen.core.Receiver`, :class:`Select` or :class:`mitogen.core.Latch` to the select. :raises mitogen.select.Error: An attempt was made to add a :class:`Select` to which this select is indirectly a member of. """ if isinstance(recv, Select): recv._check_no_loop(self) self._receivers.append(recv) if recv.notify is not None: raise Error(self.owned_msg) recv.notify = self._put # Avoid race by polling once after installation. if not recv.empty(): self._put(recv)
[ "def", "add", "(", "self", ",", "recv", ")", ":", "if", "isinstance", "(", "recv", ",", "Select", ")", ":", "recv", ".", "_check_no_loop", "(", "self", ")", "self", ".", "_receivers", ".", "append", "(", "recv", ")", "if", "recv", ".", "notify", "i...
Add a :class:`mitogen.core.Receiver`, :class:`Select` or :class:`mitogen.core.Latch` to the select. :raises mitogen.select.Error: An attempt was made to add a :class:`Select` to which this select is indirectly a member of.
[ "Add", "a", ":", "class", ":", "mitogen", ".", "core", ".", "Receiver", ":", "class", ":", "Select", "or", ":", "class", ":", "mitogen", ".", "core", ".", "Latch", "to", "the", "select", "." ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/compiler_frontend.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/compiler_frontend.py#L583-L626
def _compile_fragment_ast(schema, current_schema_type, ast, location, context): """Return a list of basic blocks corresponding to the inline fragment at this AST node. Args: schema: GraphQL schema object, obtained from the graphql library current_schema_type: GraphQLType, the schema type at the current location ast: GraphQL AST node, obtained from the graphql library. location: Location object representing the current location in the query context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! Returns: list of basic blocks, the compiled output of the vertex AST node """ query_metadata_table = context['metadata'] # step F-2. Emit a type coercion block if appropriate, # then recurse into the fragment's selection. coerces_to_type_name = ast.type_condition.name.value coerces_to_type_obj = schema.get_type(coerces_to_type_name) basic_blocks = [] # Check if the coercion is necessary. # No coercion is necessary if coercing to the current type of the scope, # or if the scope is of union type, to the base type of the union as defined by # the type_equivalence_hints compilation parameter. is_same_type_as_scope = current_schema_type.is_same_type(coerces_to_type_obj) equivalent_union_type = context['type_equivalence_hints'].get(coerces_to_type_obj, None) is_base_type_of_union = ( isinstance(current_schema_type, GraphQLUnionType) and current_schema_type.is_same_type(equivalent_union_type) ) if not (is_same_type_as_scope or is_base_type_of_union): # Coercion is required. query_metadata_table.record_coercion_at_location(location, coerces_to_type_obj) basic_blocks.append(blocks.CoerceType({coerces_to_type_name})) inner_basic_blocks = _compile_ast_node_to_ir( schema, coerces_to_type_obj, ast, location, context) basic_blocks.extend(inner_basic_blocks) return basic_blocks
[ "def", "_compile_fragment_ast", "(", "schema", ",", "current_schema_type", ",", "ast", ",", "location", ",", "context", ")", ":", "query_metadata_table", "=", "context", "[", "'metadata'", "]", "# step F-2. Emit a type coercion block if appropriate,", "# then recu...
Return a list of basic blocks corresponding to the inline fragment at this AST node. Args: schema: GraphQL schema object, obtained from the graphql library current_schema_type: GraphQLType, the schema type at the current location ast: GraphQL AST node, obtained from the graphql library. location: Location object representing the current location in the query context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! Returns: list of basic blocks, the compiled output of the vertex AST node
[ "Return", "a", "list", "of", "basic", "blocks", "corresponding", "to", "the", "inline", "fragment", "at", "this", "AST", "node", "." ]
python
train
frnsys/broca
broca/vectorize/doc2vec.py
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/doc2vec.py#L209-L241
def _add_new_labels(self, sentences): ''' Adds new sentences to the internal indexing of the model. Args: sentences (list): LabeledSentences for each doc to be added Returns: int: number of sentences added to the model ''' sentence_no = -1 total_words = 0 vocab = self.model.vocab model_sentence_n = len([l for l in vocab if l.startswith("DOC_")]) n_sentences = 0 for sentence_no, sentence in enumerate(sentences): sentence_length = len(sentence.words) for label in sentence.labels: total_words += 1 if label in vocab: vocab[label].count += sentence_length else: vocab[label] = gensim.models.word2vec.Vocab( count=sentence_length) vocab[label].index = len(self.model.vocab) - 1 vocab[label].code = [0] vocab[label].sample_probability = 1. self.model.index2word.append(label) n_sentences += 1 return n_sentences
[ "def", "_add_new_labels", "(", "self", ",", "sentences", ")", ":", "sentence_no", "=", "-", "1", "total_words", "=", "0", "vocab", "=", "self", ".", "model", ".", "vocab", "model_sentence_n", "=", "len", "(", "[", "l", "for", "l", "in", "vocab", "if", ...
Adds new sentences to the internal indexing of the model. Args: sentences (list): LabeledSentences for each doc to be added Returns: int: number of sentences added to the model
[ "Adds", "new", "sentences", "to", "the", "internal", "indexing", "of", "the", "model", "." ]
python
train
androguard/androguard
androguard/core/bytecodes/dvm.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/dvm.py#L6535-L6549
def get_instructions(self): """ Get the instructions :rtype: a generator of each :class:`Instruction` (or a cached list of instructions if you have setup instructions) """ # it is possible to a cache for instructions (avoid a new disasm) if self.cached_instructions is None: lsa = LinearSweepAlgorithm() ins = lsa.get_instructions(self.CM, self.size, self.insn, self.idx) self.cached_instructions = list(ins) for i in self.cached_instructions: yield i
[ "def", "get_instructions", "(", "self", ")", ":", "# it is possible to a cache for instructions (avoid a new disasm)", "if", "self", ".", "cached_instructions", "is", "None", ":", "lsa", "=", "LinearSweepAlgorithm", "(", ")", "ins", "=", "lsa", ".", "get_instructions", ...
Get the instructions :rtype: a generator of each :class:`Instruction` (or a cached list of instructions if you have setup instructions)
[ "Get", "the", "instructions" ]
python
train
barrust/mediawiki
mediawiki/mediawiki.py
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L903-L907
def _post_response(self, params): """ wrap a post call to the requests package """ return self._session.post( self._api_url, data=params, timeout=self._timeout ).json(encoding="utf8")
[ "def", "_post_response", "(", "self", ",", "params", ")", ":", "return", "self", ".", "_session", ".", "post", "(", "self", ".", "_api_url", ",", "data", "=", "params", ",", "timeout", "=", "self", ".", "_timeout", ")", ".", "json", "(", "encoding", ...
wrap a post call to the requests package
[ "wrap", "a", "post", "call", "to", "the", "requests", "package" ]
python
train
lepture/flask-oauthlib
flask_oauthlib/contrib/oauth2.py
https://github.com/lepture/flask-oauthlib/blob/9e6f152a5bb360e7496210da21561c3e6d41b0e1/flask_oauthlib/contrib/oauth2.py#L43-L52
def delete(self): """Removes itself from the cache Note: This is required by the oauthlib """ log.debug( "Deleting grant %s for client %s" % (self.code, self.client_id) ) self._cache.delete(self.key) return None
[ "def", "delete", "(", "self", ")", ":", "log", ".", "debug", "(", "\"Deleting grant %s for client %s\"", "%", "(", "self", ".", "code", ",", "self", ".", "client_id", ")", ")", "self", ".", "_cache", ".", "delete", "(", "self", ".", "key", ")", "return...
Removes itself from the cache Note: This is required by the oauthlib
[ "Removes", "itself", "from", "the", "cache" ]
python
test
jobovy/galpy
galpy/potential/SteadyLogSpiralPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/SteadyLogSpiralPotential.py#L95-L123
def _evaluate(self,R,phi=0.,t=0.): """ NAME: _evaluate PURPOSE: evaluate the potential at R,phi,t INPUT: R - Galactocentric cylindrical radius phi - azimuth t - time OUTPUT: Phi(R,phi,t) HISTORY: 2011-03-27 - Started - Bovy (NYU) """ if not self._tform is None: if t < self._tform: smooth= 0. elif t < self._tsteady: deltat= t-self._tform xi= 2.*deltat/(self._tsteady-self._tform)-1. smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5) else: #spiral is fully on smooth= 1. else: smooth= 1. return smooth*self._A/self._alpha*math.cos(self._alpha*math.log(R) -self._m*(phi-self._omegas*t -self._gamma))
[ "def", "_evaluate", "(", "self", ",", "R", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "if", "not", "self", ".", "_tform", "is", "None", ":", "if", "t", "<", "self", ".", "_tform", ":", "smooth", "=", "0.", "elif", "t", "<", "self",...
NAME: _evaluate PURPOSE: evaluate the potential at R,phi,t INPUT: R - Galactocentric cylindrical radius phi - azimuth t - time OUTPUT: Phi(R,phi,t) HISTORY: 2011-03-27 - Started - Bovy (NYU)
[ "NAME", ":", "_evaluate", "PURPOSE", ":", "evaluate", "the", "potential", "at", "R", "phi", "t", "INPUT", ":", "R", "-", "Galactocentric", "cylindrical", "radius", "phi", "-", "azimuth", "t", "-", "time", "OUTPUT", ":", "Phi", "(", "R", "phi", "t", ")"...
python
train
IdentityPython/pysaml2
src/saml2/cryptography/symmetric.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/cryptography/symmetric.py#L68-L75
def _deprecation_notice(cls): """Warn about deprecation of this class.""" _deprecation_msg = ( '{name} {type} is deprecated. ' 'It will be removed in the next version. ' 'Use saml2.cryptography.symmetric instead.' ).format(name=cls.__name__, type=type(cls).__name__) _warnings.warn(_deprecation_msg, DeprecationWarning)
[ "def", "_deprecation_notice", "(", "cls", ")", ":", "_deprecation_msg", "=", "(", "'{name} {type} is deprecated. '", "'It will be removed in the next version. '", "'Use saml2.cryptography.symmetric instead.'", ")", ".", "format", "(", "name", "=", "cls", ".", "__name__", ",...
Warn about deprecation of this class.
[ "Warn", "about", "deprecation", "of", "this", "class", "." ]
python
train
tensorlayer/tensorlayer
tensorlayer/layers/recurrent.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/recurrent.py#L849-L889
def retrieve_seq_length_op(data): """An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features], it can be used when the features of padding (on right hand side) are all zeros. Parameters ----------- data : tensor [batch_size, n_step(max), n_features] with zero padding on right hand side. Examples --------- >>> data = [[[1],[2],[0],[0],[0]], ... [[1],[2],[3],[0],[0]], ... [[1],[2],[6],[1],[0]]] >>> data = np.asarray(data) >>> print(data.shape) (3, 5, 1) >>> data = tf.constant(data) >>> sl = retrieve_seq_length_op(data) >>> sess = tf.InteractiveSession() >>> tl.layers.initialize_global_variables(sess) >>> y = sl.eval() [2 3 4] Multiple features >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]], ... [[2,3],[2,4],[3,2],[0,0],[0,0]], ... [[3,3],[2,2],[5,3],[1,2],[0,0]]] >>> print(sl) [4 3 4] References ------------ Borrow from `TFlearn <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`__. """ with tf.name_scope('GetLength'): used = tf.sign(tf.reduce_max(tf.abs(data), 2)) length = tf.reduce_sum(used, 1) return tf.cast(length, tf.int32)
[ "def", "retrieve_seq_length_op", "(", "data", ")", ":", "with", "tf", ".", "name_scope", "(", "'GetLength'", ")", ":", "used", "=", "tf", ".", "sign", "(", "tf", ".", "reduce_max", "(", "tf", ".", "abs", "(", "data", ")", ",", "2", ")", ")", "lengt...
An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features], it can be used when the features of padding (on right hand side) are all zeros. Parameters ----------- data : tensor [batch_size, n_step(max), n_features] with zero padding on right hand side. Examples --------- >>> data = [[[1],[2],[0],[0],[0]], ... [[1],[2],[3],[0],[0]], ... [[1],[2],[6],[1],[0]]] >>> data = np.asarray(data) >>> print(data.shape) (3, 5, 1) >>> data = tf.constant(data) >>> sl = retrieve_seq_length_op(data) >>> sess = tf.InteractiveSession() >>> tl.layers.initialize_global_variables(sess) >>> y = sl.eval() [2 3 4] Multiple features >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]], ... [[2,3],[2,4],[3,2],[0,0],[0,0]], ... [[3,3],[2,2],[5,3],[1,2],[0,0]]] >>> print(sl) [4 3 4] References ------------ Borrow from `TFlearn <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`__.
[ "An", "op", "to", "compute", "the", "length", "of", "a", "sequence", "from", "input", "shape", "of", "[", "batch_size", "n_step", "(", "max", ")", "n_features", "]", "it", "can", "be", "used", "when", "the", "features", "of", "padding", "(", "on", "rig...
python
valid
saltstack/salt
salt/states/firewalld.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/firewalld.py#L118-L126
def todict(self): ''' Returns a pretty dictionary meant for command line output. ''' return { 'Source port': self.srcport, 'Destination port': self.destport, 'Protocol': self.protocol, 'Destination address': self.destaddr}
[ "def", "todict", "(", "self", ")", ":", "return", "{", "'Source port'", ":", "self", ".", "srcport", ",", "'Destination port'", ":", "self", ".", "destport", ",", "'Protocol'", ":", "self", ".", "protocol", ",", "'Destination address'", ":", "self", ".", "...
Returns a pretty dictionary meant for command line output.
[ "Returns", "a", "pretty", "dictionary", "meant", "for", "command", "line", "output", "." ]
python
train
mdeous/fatbotslim
fatbotslim/cli.py
https://github.com/mdeous/fatbotslim/blob/341595d24454a79caee23750eac271f9d0626c88/fatbotslim/cli.py#L37-L96
def make_parser(): """ Creates an argument parser configured with options to run a bot from the command line. :return: configured argument parser :rtype: :class:`argparse.ArgumentParser` """ parser = ArgumentParser( description='Start an IRC bot instance from the command line.', formatter_class=ArgumentDefaultsHelpFormatter, ) parser.add_argument( '-v', '--version', action='version', version='{0} v{1}'.format(NAME, VERSION) ) parser.add_argument( '-s', '--server', metavar='HOST', required=True, help='the host to connect to' ) parser.add_argument( '-p', '--port', metavar='PORT', type=int, default=6667, help='the port the server is listening on' ) parser.add_argument( '-n', '--nick', metavar='NAME', required=True, help="the bot's nickname" ) parser.add_argument( '-N', '--name', metavar='NAME', default=NAME, help="the bot's real name" ) parser.add_argument( '-c', '--channels', metavar='CHAN', nargs='*', help='join this channel upon connection' ) parser.add_argument( '-l', '--log', metavar='LEVEL', default='INFO', help='minimal level for displayed logging messages' ) parser.add_argument( '-S', '--ssl', action='store_true', help='connect to the server using SSL' ) return parser
[ "def", "make_parser", "(", ")", ":", "parser", "=", "ArgumentParser", "(", "description", "=", "'Start an IRC bot instance from the command line.'", ",", "formatter_class", "=", "ArgumentDefaultsHelpFormatter", ",", ")", "parser", ".", "add_argument", "(", "'-v'", ",", ...
Creates an argument parser configured with options to run a bot from the command line. :return: configured argument parser :rtype: :class:`argparse.ArgumentParser`
[ "Creates", "an", "argument", "parser", "configured", "with", "options", "to", "run", "a", "bot", "from", "the", "command", "line", "." ]
python
train
pvlib/pvlib-python
pvlib/solarposition.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/solarposition.py#L123-L230
def spa_c(time, latitude, longitude, pressure=101325, altitude=0, temperature=12, delta_t=67.0, raw_spa_output=False): """ Calculate the solar position using the C implementation of the NREL SPA code. The source files for this code are located in './spa_c_files/', along with a README file which describes how the C code is wrapped in Python. Due to license restrictions, the C code must be downloaded seperately and used in accordance with it's license. This function is slower and no more accurate than :py:func:`spa_python`. Parameters ---------- time : pandas.DatetimeIndex Localized or UTC. latitude : float longitude : float pressure : float, default 101325 Pressure in Pascals altitude : float, default 0 Elevation above sea level. temperature : float, default 12 Temperature in C delta_t : float, default 67.0 Difference between terrestrial time and UT1. USNO has previous values and predictions. raw_spa_output : bool, default False If true, returns the raw SPA output. Returns ------- DataFrame The DataFrame will have the following columns: elevation, azimuth, zenith, apparent_elevation, apparent_zenith. References ---------- NREL SPA reference: http://rredc.nrel.gov/solar/codesandalgorithms/spa/ NREL SPA C files: https://midcdmz.nrel.gov/spa/ Note: The ``timezone`` field in the SPA C files is replaced with ``time_zone`` to avoid a nameclash with the function ``__timezone`` that is redefined by Python>=3.5. This issue is `Python bug 24643 <https://bugs.python.org/issue24643>`_. USNO delta T: http://www.usno.navy.mil/USNO/earth-orientation/eo-products/long-term See also -------- pyephem, spa_python, ephemeris """ # Added by Rob Andrews (@Calama-Consulting), Calama Consulting, 2014 # Edited by Will Holmgren (@wholmgren), University of Arizona, 2014 # Edited by Tony Lorenzo (@alorenzo175), University of Arizona, 2015 try: from pvlib.spa_c_files.spa_py import spa_calc except ImportError: raise ImportError('Could not import built-in SPA calculator. ' + 'You may need to recompile the SPA code.') # if localized, convert to UTC. otherwise, assume UTC. try: time_utc = time.tz_convert('UTC') except TypeError: time_utc = time spa_out = [] for date in time_utc: spa_out.append(spa_calc(year=date.year, month=date.month, day=date.day, hour=date.hour, minute=date.minute, second=date.second, time_zone=0, # date uses utc time latitude=latitude, longitude=longitude, elevation=altitude, pressure=pressure / 100, temperature=temperature, delta_t=delta_t )) spa_df = pd.DataFrame(spa_out, index=time) if raw_spa_output: # rename "time_zone" from raw output from spa_c_files.spa_py.spa_calc() # to "timezone" to match the API of pvlib.solarposition.spa_c() return spa_df.rename(columns={'time_zone': 'timezone'}) else: dfout = pd.DataFrame({'azimuth': spa_df['azimuth'], 'apparent_zenith': spa_df['zenith'], 'apparent_elevation': spa_df['e'], 'elevation': spa_df['e0'], 'zenith': 90 - spa_df['e0']}) return dfout
[ "def", "spa_c", "(", "time", ",", "latitude", ",", "longitude", ",", "pressure", "=", "101325", ",", "altitude", "=", "0", ",", "temperature", "=", "12", ",", "delta_t", "=", "67.0", ",", "raw_spa_output", "=", "False", ")", ":", "# Added by Rob Andrews (@...
Calculate the solar position using the C implementation of the NREL SPA code. The source files for this code are located in './spa_c_files/', along with a README file which describes how the C code is wrapped in Python. Due to license restrictions, the C code must be downloaded seperately and used in accordance with it's license. This function is slower and no more accurate than :py:func:`spa_python`. Parameters ---------- time : pandas.DatetimeIndex Localized or UTC. latitude : float longitude : float pressure : float, default 101325 Pressure in Pascals altitude : float, default 0 Elevation above sea level. temperature : float, default 12 Temperature in C delta_t : float, default 67.0 Difference between terrestrial time and UT1. USNO has previous values and predictions. raw_spa_output : bool, default False If true, returns the raw SPA output. Returns ------- DataFrame The DataFrame will have the following columns: elevation, azimuth, zenith, apparent_elevation, apparent_zenith. References ---------- NREL SPA reference: http://rredc.nrel.gov/solar/codesandalgorithms/spa/ NREL SPA C files: https://midcdmz.nrel.gov/spa/ Note: The ``timezone`` field in the SPA C files is replaced with ``time_zone`` to avoid a nameclash with the function ``__timezone`` that is redefined by Python>=3.5. This issue is `Python bug 24643 <https://bugs.python.org/issue24643>`_. USNO delta T: http://www.usno.navy.mil/USNO/earth-orientation/eo-products/long-term See also -------- pyephem, spa_python, ephemeris
[ "Calculate", "the", "solar", "position", "using", "the", "C", "implementation", "of", "the", "NREL", "SPA", "code", "." ]
python
train
smarie/python-parsyfiles
parsyfiles/converting_core.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/converting_core.py#L241-L275
def are_worth_chaining(left_converter, right_converter) -> bool: """ Utility method to check if it makes sense to chain these two converters. Returns True if it brings value to chain the first converter with the second converter. To bring value, * the second converter's input should not be a parent class of the first converter's input (in that case, it is always more interesting to use the second converter directly for any potential input) * the second converter's output should not be a parent class of the first converter's input or output. Otherwise the chain does not even make any progress :) * The first converter has to allow chaining (with converter.can_chain=True) :param left_converter: :param right_converter: :return: """ if not left_converter.can_chain: return False elif not is_any_type(left_converter.to_type) and is_any_type(right_converter.to_type): # we gain the capability to generate any type. So it is interesting. return True elif issubclass(left_converter.from_type, right_converter.to_type) \ or issubclass(left_converter.to_type, right_converter.to_type) \ or issubclass(left_converter.from_type, right_converter.from_type): # Not interesting : the outcome of the chain would be not better than one of the converters alone return False # Note: we dont say that chaining a generic converter with a converter is useless. Indeed it might unlock some # capabilities for the user (new file extensions, etc.) that would not be available with the generic parser # targetting to_type alone. For example parsing object A from its constructor then converting A to B might # sometimes be interesting, rather than parsing B from its constructor else: # interesting return True
[ "def", "are_worth_chaining", "(", "left_converter", ",", "right_converter", ")", "->", "bool", ":", "if", "not", "left_converter", ".", "can_chain", ":", "return", "False", "elif", "not", "is_any_type", "(", "left_converter", ".", "to_type", ")", "and", "is_any_...
Utility method to check if it makes sense to chain these two converters. Returns True if it brings value to chain the first converter with the second converter. To bring value, * the second converter's input should not be a parent class of the first converter's input (in that case, it is always more interesting to use the second converter directly for any potential input) * the second converter's output should not be a parent class of the first converter's input or output. Otherwise the chain does not even make any progress :) * The first converter has to allow chaining (with converter.can_chain=True) :param left_converter: :param right_converter: :return:
[ "Utility", "method", "to", "check", "if", "it", "makes", "sense", "to", "chain", "these", "two", "converters", ".", "Returns", "True", "if", "it", "brings", "value", "to", "chain", "the", "first", "converter", "with", "the", "second", "converter", ".", "To...
python
train
xhtml2pdf/xhtml2pdf
xhtml2pdf/util.py
https://github.com/xhtml2pdf/xhtml2pdf/blob/230357a392f48816532d3c2fa082a680b80ece48/xhtml2pdf/util.py#L372-L407
def getFrameDimensions(data, page_width, page_height): """Calculate dimensions of a frame Returns left, top, width and height of the frame in points. """ box = data.get("-pdf-frame-box", []) if len(box) == 4: return [getSize(x) for x in box] top = getSize(data.get("top", 0)) left = getSize(data.get("left", 0)) bottom = getSize(data.get("bottom", 0)) right = getSize(data.get("right", 0)) if "height" in data: height = getSize(data["height"]) if "top" in data: top = getSize(data["top"]) bottom = page_height - (top + height) elif "bottom" in data: bottom = getSize(data["bottom"]) top = page_height - (bottom + height) if "width" in data: width = getSize(data["width"]) if "left" in data: left = getSize(data["left"]) right = page_width - (left + width) elif "right" in data: right = getSize(data["right"]) left = page_width - (right + width) top += getSize(data.get("margin-top", 0)) left += getSize(data.get("margin-left", 0)) bottom += getSize(data.get("margin-bottom", 0)) right += getSize(data.get("margin-right", 0)) width = page_width - (left + right) height = page_height - (top + bottom) return left, top, width, height
[ "def", "getFrameDimensions", "(", "data", ",", "page_width", ",", "page_height", ")", ":", "box", "=", "data", ".", "get", "(", "\"-pdf-frame-box\"", ",", "[", "]", ")", "if", "len", "(", "box", ")", "==", "4", ":", "return", "[", "getSize", "(", "x"...
Calculate dimensions of a frame Returns left, top, width and height of the frame in points.
[ "Calculate", "dimensions", "of", "a", "frame" ]
python
train
rigetti/pyquil
pyquil/operator_estimation.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/operator_estimation.py#L903-L919
def _ops_bool_to_prog(ops_bool: Tuple[bool], qubits: List[int]) -> Program: """ :param ops_bool: tuple of booleans specifying the operation to be carried out on `qubits` :param qubits: list specifying the qubits to be carried operations on :return: Program with the operations specified in `ops_bool` on the qubits specified in `qubits` """ assert len(ops_bool) == len(qubits), "Mismatch of qubits and operations" prog = Program() for i, op_bool in enumerate(ops_bool): if op_bool == 0: continue elif op_bool == 1: prog += Program(X(qubits[i])) else: raise ValueError("ops_bool should only consist of 0s and/or 1s") return prog
[ "def", "_ops_bool_to_prog", "(", "ops_bool", ":", "Tuple", "[", "bool", "]", ",", "qubits", ":", "List", "[", "int", "]", ")", "->", "Program", ":", "assert", "len", "(", "ops_bool", ")", "==", "len", "(", "qubits", ")", ",", "\"Mismatch of qubits and op...
:param ops_bool: tuple of booleans specifying the operation to be carried out on `qubits` :param qubits: list specifying the qubits to be carried operations on :return: Program with the operations specified in `ops_bool` on the qubits specified in `qubits`
[ ":", "param", "ops_bool", ":", "tuple", "of", "booleans", "specifying", "the", "operation", "to", "be", "carried", "out", "on", "qubits", ":", "param", "qubits", ":", "list", "specifying", "the", "qubits", "to", "be", "carried", "operations", "on", ":", "r...
python
train
fossasia/knittingpattern
knittingpattern/convert/InstructionSVGCache.py
https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/convert/InstructionSVGCache.py#L56-L73
def to_svg(self, instruction_or_id, i_promise_not_to_change_the_result=False): """Return the SVG for an instruction. :param instruction_or_id: either an :class:`~knittingpattern.Instruction.Instruction` or an id returned by :meth:`get_instruction_id` :param bool i_promise_not_to_change_the_result: - :obj:`False`: the result is copied, you can alter it. - :obj:`True`: the result is directly from the cache. If you change the result, other calls of this function get the changed result. :return: an SVGDumper :rtype: knittingpattern.Dumper.SVGDumper """ return self._new_svg_dumper(lambda: self.instruction_to_svg_dict( instruction_or_id, not i_promise_not_to_change_the_result))
[ "def", "to_svg", "(", "self", ",", "instruction_or_id", ",", "i_promise_not_to_change_the_result", "=", "False", ")", ":", "return", "self", ".", "_new_svg_dumper", "(", "lambda", ":", "self", ".", "instruction_to_svg_dict", "(", "instruction_or_id", ",", "not", "...
Return the SVG for an instruction. :param instruction_or_id: either an :class:`~knittingpattern.Instruction.Instruction` or an id returned by :meth:`get_instruction_id` :param bool i_promise_not_to_change_the_result: - :obj:`False`: the result is copied, you can alter it. - :obj:`True`: the result is directly from the cache. If you change the result, other calls of this function get the changed result. :return: an SVGDumper :rtype: knittingpattern.Dumper.SVGDumper
[ "Return", "the", "SVG", "for", "an", "instruction", "." ]
python
valid
log2timeline/plaso
plaso/engine/profilers.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/profilers.py#L32-L36
def SampleStart(self): """Starts measuring the CPU time.""" self._start_cpu_time = time.clock() self.start_sample_time = time.time() self.total_cpu_time = 0
[ "def", "SampleStart", "(", "self", ")", ":", "self", ".", "_start_cpu_time", "=", "time", ".", "clock", "(", ")", "self", ".", "start_sample_time", "=", "time", ".", "time", "(", ")", "self", ".", "total_cpu_time", "=", "0" ]
Starts measuring the CPU time.
[ "Starts", "measuring", "the", "CPU", "time", "." ]
python
train
neurodata/ndio
ndio/remote/metadata.py
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/metadata.py#L44-L55
def get_public_tokens(self): """ Get a list of public tokens available on this server. Arguments: None Returns: str[]: list of public tokens """ r = self.remote_utils.get_url(self.url() + "public_tokens/") return r.json()
[ "def", "get_public_tokens", "(", "self", ")", ":", "r", "=", "self", ".", "remote_utils", ".", "get_url", "(", "self", ".", "url", "(", ")", "+", "\"public_tokens/\"", ")", "return", "r", ".", "json", "(", ")" ]
Get a list of public tokens available on this server. Arguments: None Returns: str[]: list of public tokens
[ "Get", "a", "list", "of", "public", "tokens", "available", "on", "this", "server", "." ]
python
test
IAMconsortium/pyam
pyam/core.py
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1072-L1082
def export_metadata(self, path): """Export metadata to Excel Parameters ---------- path: string path/filename for xlsx file of metadata export """ writer = pd.ExcelWriter(path) write_sheet(writer, 'meta', self.meta, index=True) writer.save()
[ "def", "export_metadata", "(", "self", ",", "path", ")", ":", "writer", "=", "pd", ".", "ExcelWriter", "(", "path", ")", "write_sheet", "(", "writer", ",", "'meta'", ",", "self", ".", "meta", ",", "index", "=", "True", ")", "writer", ".", "save", "("...
Export metadata to Excel Parameters ---------- path: string path/filename for xlsx file of metadata export
[ "Export", "metadata", "to", "Excel" ]
python
train
kubernetes-client/python
kubernetes/client/apis/rbac_authorization_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/rbac_authorization_v1_api.py#L3208-L3231
def replace_cluster_role(self, name, body, **kwargs): """ replace the specified ClusterRole This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_cluster_role(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ClusterRole (required) :param V1ClusterRole body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1ClusterRole If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_cluster_role_with_http_info(name, body, **kwargs) else: (data) = self.replace_cluster_role_with_http_info(name, body, **kwargs) return data
[ "def", "replace_cluster_role", "(", "self", ",", "name", ",", "body", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "replac...
replace the specified ClusterRole This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_cluster_role(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ClusterRole (required) :param V1ClusterRole body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1ClusterRole If the method is called asynchronously, returns the request thread.
[ "replace", "the", "specified", "ClusterRole", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api"...
python
train
scanny/python-pptx
lab/parse_xsd/parse_xsd.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/lab/parse_xsd/parse_xsd.py#L91-L100
def getdef(self, defname, tag='*'): """Return definition element with name *defname*""" if defname.startswith('a:'): defname = defname[2:] for xsd in self.__xsd_trees: xpath = "./%s[@name='%s']" % (tag, defname) elements = xsd.xpath(xpath) if elements: return elements[0] raise KeyError("no definition named '%s' found" % defname)
[ "def", "getdef", "(", "self", ",", "defname", ",", "tag", "=", "'*'", ")", ":", "if", "defname", ".", "startswith", "(", "'a:'", ")", ":", "defname", "=", "defname", "[", "2", ":", "]", "for", "xsd", "in", "self", ".", "__xsd_trees", ":", "xpath", ...
Return definition element with name *defname*
[ "Return", "definition", "element", "with", "name", "*", "defname", "*" ]
python
train
nschloe/matplotlib2tikz
matplotlib2tikz/save.py
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/save.py#L293-L309
def _print_pgfplot_libs_message(data): """Prints message to screen indicating the use of PGFPlots and its libraries.""" pgfplotslibs = ",".join(list(data["pgfplots libs"])) tikzlibs = ",".join(list(data["tikz libs"])) print(70 * "=") print("Please add the following lines to your LaTeX preamble:\n") print("\\usepackage[utf8]{inputenc}") print("\\usepackage{fontspec} % This line only for XeLaTeX and LuaLaTeX") print("\\usepackage{pgfplots}") if tikzlibs: print("\\usetikzlibrary{" + tikzlibs + "}") if pgfplotslibs: print("\\usepgfplotslibrary{" + pgfplotslibs + "}") print(70 * "=") return
[ "def", "_print_pgfplot_libs_message", "(", "data", ")", ":", "pgfplotslibs", "=", "\",\"", ".", "join", "(", "list", "(", "data", "[", "\"pgfplots libs\"", "]", ")", ")", "tikzlibs", "=", "\",\"", ".", "join", "(", "list", "(", "data", "[", "\"tikz libs\""...
Prints message to screen indicating the use of PGFPlots and its libraries.
[ "Prints", "message", "to", "screen", "indicating", "the", "use", "of", "PGFPlots", "and", "its", "libraries", "." ]
python
train
tuomas2/automate
src/automate/services/textui.py
https://github.com/tuomas2/automate/blob/d8a8cd03cd0da047e033a2d305f3f260f8c4e017/src/automate/services/textui.py#L96-L106
def text_ui(self): """ Start Text UI main loop """ self.logger.info("Starting command line interface") self.help() try: self.ipython_ui() except ImportError: self.fallback_ui() self.system.cleanup()
[ "def", "text_ui", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Starting command line interface\"", ")", "self", ".", "help", "(", ")", "try", ":", "self", ".", "ipython_ui", "(", ")", "except", "ImportError", ":", "self", ".", "fall...
Start Text UI main loop
[ "Start", "Text", "UI", "main", "loop" ]
python
train
qwiglydee/drf-mongo-filters
drf_mongo_filters/filters.py
https://github.com/qwiglydee/drf-mongo-filters/blob/f7e397c329bac6d7b8cbb1df70d96eccdcfbc1ec/drf_mongo_filters/filters.py#L48-L52
def make_field(self, **kwargs): """ create serializer field """ kwargs['required'] = False kwargs['allow_null'] = True return self.field_class(**kwargs)
[ "def", "make_field", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'required'", "]", "=", "False", "kwargs", "[", "'allow_null'", "]", "=", "True", "return", "self", ".", "field_class", "(", "*", "*", "kwargs", ")" ]
create serializer field
[ "create", "serializer", "field" ]
python
train
KnowledgeLinks/rdfframework
rdfframework/framework.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/framework.py#L221-L267
def verify_server_core(timeout=120, start_delay=90): ''' checks to see if the server_core is running args: delay: will cycle till core is up. timeout: number of seconds to wait ''' timestamp = time.time() last_check = time.time() + start_delay - 10 last_delay_notification = time.time() - 10 server_down = True return_val = False timeout += 1 # loop until the server is up or the timeout is reached while((time.time()-timestamp) < timeout) and server_down: # if delaying, the start of the check, print waiting to start if start_delay > 0 and time.time() - timestamp < start_delay \ and (time.time()-last_delay_notification) > 5: print("Delaying server status check until %ss. Current time: %ss" \ % (start_delay, int(time.time() - timestamp))) last_delay_notification = time.time() # send a request check every 10s until the server is up while ((time.time()-last_check) > 10) and server_down: print("Checking status of servers at %ss" % \ int((time.time()-timestamp))) last_check = time.time() try: repo = requests.get(CFG.REPOSITORY_URL) repo_code = repo.status_code print ("\t", CFG.REPOSITORY_URL, " - ", repo_code) except: repo_code = 400 print ("\t", CFG.REPOSITORY_URL, " - DOWN") try: triple = requests.get(CFG.DATA_TRIPLESTORE.url) triple_code = triple.status_code print ("\t", CFG.DATA_TRIPLESTORE.url, " - ", triple_code) except: triple_code = 400 print ("\t", CFG.DATA_TRIPLESTORE.url, " - down") if repo_code == 200 and triple_code == 200: server_down = False return_val = True print("**** Servers up at %ss" % \ int((time.time()-timestamp))) break return return_val
[ "def", "verify_server_core", "(", "timeout", "=", "120", ",", "start_delay", "=", "90", ")", ":", "timestamp", "=", "time", ".", "time", "(", ")", "last_check", "=", "time", ".", "time", "(", ")", "+", "start_delay", "-", "10", "last_delay_notification", ...
checks to see if the server_core is running args: delay: will cycle till core is up. timeout: number of seconds to wait
[ "checks", "to", "see", "if", "the", "server_core", "is", "running" ]
python
train
pandas-dev/pandas
pandas/core/frame.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L7607-L7642
def idxmin(self, axis=0, skipna=True): """ Return index of first occurrence of minimum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- Series Indexes of minima along the specified axis. Raises ------ ValueError * If the row/column is empty See Also -------- Series.idxmin Notes ----- This method is the DataFrame version of ``ndarray.argmin``. """ axis = self._get_axis_number(axis) indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) result = [index[i] if i >= 0 else np.nan for i in indices] return Series(result, index=self._get_agg_axis(axis))
[ "def", "idxmin", "(", "self", ",", "axis", "=", "0", ",", "skipna", "=", "True", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "indices", "=", "nanops", ".", "nanargmin", "(", "self", ".", "values", ",", "axis", "=", "ax...
Return index of first occurrence of minimum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- Series Indexes of minima along the specified axis. Raises ------ ValueError * If the row/column is empty See Also -------- Series.idxmin Notes ----- This method is the DataFrame version of ``ndarray.argmin``.
[ "Return", "index", "of", "first", "occurrence", "of", "minimum", "over", "requested", "axis", ".", "NA", "/", "null", "values", "are", "excluded", "." ]
python
train
ramrod-project/database-brain
schema/brain/binary/decorators.py
https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/binary/decorators.py#L56-L74
def _only_if_file_not_exist(func_, *args, **kwargs): """ horribly non-atomic :param func_: :param args: :param kwargs: :return: """ obj_dict = args[1] conn = args[-1] try: RBF.get(obj_dict[PRIMARY_FIELD]).pluck(PRIMARY_FIELD).run(conn) err_str = "Duplicate primary key `Name`: {}".format(obj_dict[PRIMARY_FIELD]) err_dict = {'errors': 1, 'first_error': err_str} return err_dict except r.errors.ReqlNonExistenceError: return func_(*args, **kwargs)
[ "def", "_only_if_file_not_exist", "(", "func_", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "obj_dict", "=", "args", "[", "1", "]", "conn", "=", "args", "[", "-", "1", "]", "try", ":", "RBF", ".", "get", "(", "obj_dict", "[", "PRIMARY_FIEL...
horribly non-atomic :param func_: :param args: :param kwargs: :return:
[ "horribly", "non", "-", "atomic" ]
python
train
ggaughan/pipe2py
pipe2py/modules/piperegex.py
https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/piperegex.py#L100-L129
def pipe_regex(context=None, _INPUT=None, conf=None, **kwargs): """An operator that replaces text in items using regexes. Each has the general format: "In [field] replace [match] with [replace]". Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipe2py.modules pipe like object (iterable of items) conf : { 'RULE': [ { 'field': {'value': <'search field'>}, 'match': {'value': <'regex'>}, 'replace': {'value': <'replacement'>}, 'globalmatch': {'value': '1'}, 'singlelinematch': {'value': '2'}, 'multilinematch': {'value': '4'}, 'casematch': {'value': '8'} } ] } Returns ------- _OUTPUT : generator of items """ splits = get_splits(_INPUT, conf['RULE'], **cdicts(opts, kwargs)) parsed = utils.dispatch(splits, *get_dispatch_funcs('pass', convert_func)) _OUTPUT = parse_results(parsed) return _OUTPUT
[ "def", "pipe_regex", "(", "context", "=", "None", ",", "_INPUT", "=", "None", ",", "conf", "=", "None", ",", "*", "*", "kwargs", ")", ":", "splits", "=", "get_splits", "(", "_INPUT", ",", "conf", "[", "'RULE'", "]", ",", "*", "*", "cdicts", "(", ...
An operator that replaces text in items using regexes. Each has the general format: "In [field] replace [match] with [replace]". Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipe2py.modules pipe like object (iterable of items) conf : { 'RULE': [ { 'field': {'value': <'search field'>}, 'match': {'value': <'regex'>}, 'replace': {'value': <'replacement'>}, 'globalmatch': {'value': '1'}, 'singlelinematch': {'value': '2'}, 'multilinematch': {'value': '4'}, 'casematch': {'value': '8'} } ] } Returns ------- _OUTPUT : generator of items
[ "An", "operator", "that", "replaces", "text", "in", "items", "using", "regexes", ".", "Each", "has", "the", "general", "format", ":", "In", "[", "field", "]", "replace", "[", "match", "]", "with", "[", "replace", "]", ".", "Not", "loopable", "." ]
python
train
smnorris/bcdata
bcdata/wfs.py
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L94-L110
def get_count(dataset, query=None): """Ask DataBC WFS how many features there are in a table/query """ # https://gis.stackexchange.com/questions/45101/only-return-the-numberoffeatures-in-a-wfs-query table = validate_name(dataset) payload = { "service": "WFS", "version": "2.0.0", "request": "GetFeature", "typeName": table, "resultType": "hits", "outputFormat": "json", } if query: payload["CQL_FILTER"] = query r = requests.get(bcdata.WFS_URL, params=payload) return int(ET.fromstring(r.text).attrib["numberMatched"])
[ "def", "get_count", "(", "dataset", ",", "query", "=", "None", ")", ":", "# https://gis.stackexchange.com/questions/45101/only-return-the-numberoffeatures-in-a-wfs-query", "table", "=", "validate_name", "(", "dataset", ")", "payload", "=", "{", "\"service\"", ":", "\"WFS\...
Ask DataBC WFS how many features there are in a table/query
[ "Ask", "DataBC", "WFS", "how", "many", "features", "there", "are", "in", "a", "table", "/", "query" ]
python
train
bspaans/python-mingus
mingus/midi/midi_track.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/midi_track.py#L242-L246
def set_key(self, key='C'): """Add a key signature event to the track_data.""" if isinstance(key, Key): key = key.name[0] self.track_data += self.key_signature_event(key)
[ "def", "set_key", "(", "self", ",", "key", "=", "'C'", ")", ":", "if", "isinstance", "(", "key", ",", "Key", ")", ":", "key", "=", "key", ".", "name", "[", "0", "]", "self", ".", "track_data", "+=", "self", ".", "key_signature_event", "(", "key", ...
Add a key signature event to the track_data.
[ "Add", "a", "key", "signature", "event", "to", "the", "track_data", "." ]
python
train
linkedin/shiv
src/shiv/bootstrap/filelock.py
https://github.com/linkedin/shiv/blob/6bda78676170b35d0877f67b71095c39ce41a74a/src/shiv/bootstrap/filelock.py#L22-L34
def acquire_win(lock_file): # pragma: no cover """Acquire a lock file on windows.""" try: fd = os.open(lock_file, OPEN_MODE) except OSError: pass else: try: msvcrt.locking(fd, msvcrt.LK_NBLCK, 1) except (IOError, OSError): os.close(fd) else: return fd
[ "def", "acquire_win", "(", "lock_file", ")", ":", "# pragma: no cover", "try", ":", "fd", "=", "os", ".", "open", "(", "lock_file", ",", "OPEN_MODE", ")", "except", "OSError", ":", "pass", "else", ":", "try", ":", "msvcrt", ".", "locking", "(", "fd", "...
Acquire a lock file on windows.
[ "Acquire", "a", "lock", "file", "on", "windows", "." ]
python
train
angr/claripy
claripy/vsa/discrete_strided_interval_set.py
https://github.com/angr/claripy/blob/4ed61924880af1ea8fb778047d896ec0156412a6/claripy/vsa/discrete_strided_interval_set.py#L109-L118
def cardinality(self): """ This is an over-approximation of the cardinality of this DSIS. :return: """ cardinality = 0 for si in self._si_set: cardinality += si.cardinality return cardinality
[ "def", "cardinality", "(", "self", ")", ":", "cardinality", "=", "0", "for", "si", "in", "self", ".", "_si_set", ":", "cardinality", "+=", "si", ".", "cardinality", "return", "cardinality" ]
This is an over-approximation of the cardinality of this DSIS. :return:
[ "This", "is", "an", "over", "-", "approximation", "of", "the", "cardinality", "of", "this", "DSIS", "." ]
python
train
pytest-dev/pluggy
pluggy/manager.py
https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/manager.py#L70-L105
def register(self, plugin, name=None): """ Register a plugin and return its canonical name or None if the name is blocked from registering. Raise a ValueError if the plugin is already registered. """ plugin_name = name or self.get_canonical_name(plugin) if plugin_name in self._name2plugin or plugin in self._plugin2hookcallers: if self._name2plugin.get(plugin_name, -1) is None: return # blocked plugin, return None to indicate no registration raise ValueError( "Plugin already registered: %s=%s\n%s" % (plugin_name, plugin, self._name2plugin) ) # XXX if an error happens we should make sure no state has been # changed at point of return self._name2plugin[plugin_name] = plugin # register matching hook implementations of the plugin self._plugin2hookcallers[plugin] = hookcallers = [] for name in dir(plugin): hookimpl_opts = self.parse_hookimpl_opts(plugin, name) if hookimpl_opts is not None: normalize_hookimpl_opts(hookimpl_opts) method = getattr(plugin, name) hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts) hook = getattr(self.hook, name, None) if hook is None: hook = _HookCaller(name, self._hookexec) setattr(self.hook, name, hook) elif hook.has_spec(): self._verify_hook(hook, hookimpl) hook._maybe_apply_history(hookimpl) hook._add_hookimpl(hookimpl) hookcallers.append(hook) return plugin_name
[ "def", "register", "(", "self", ",", "plugin", ",", "name", "=", "None", ")", ":", "plugin_name", "=", "name", "or", "self", ".", "get_canonical_name", "(", "plugin", ")", "if", "plugin_name", "in", "self", ".", "_name2plugin", "or", "plugin", "in", "sel...
Register a plugin and return its canonical name or None if the name is blocked from registering. Raise a ValueError if the plugin is already registered.
[ "Register", "a", "plugin", "and", "return", "its", "canonical", "name", "or", "None", "if", "the", "name", "is", "blocked", "from", "registering", ".", "Raise", "a", "ValueError", "if", "the", "plugin", "is", "already", "registered", "." ]
python
train
cirruscluster/cirruscluster
cirruscluster/ext/ansible/utils/__init__.py
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/utils/__init__.py#L126-L134
def write_tree_file(tree, hostname, buf): ''' write something into treedir/hostname ''' # TODO: might be nice to append playbook runs per host in a similar way # in which case, we'd want append mode. path = os.path.join(tree, hostname) fd = open(path, "w+") fd.write(buf) fd.close()
[ "def", "write_tree_file", "(", "tree", ",", "hostname", ",", "buf", ")", ":", "# TODO: might be nice to append playbook runs per host in a similar way", "# in which case, we'd want append mode.", "path", "=", "os", ".", "path", ".", "join", "(", "tree", ",", "hostname", ...
write something into treedir/hostname
[ "write", "something", "into", "treedir", "/", "hostname" ]
python
train
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L3579-L3584
def getApplicationSupportedMimeTypes(self, pchAppKey, pchMimeTypesBuffer, unMimeTypesBuffer): """Get the list of supported mime types for this application, comma-delimited""" fn = self.function_table.getApplicationSupportedMimeTypes result = fn(pchAppKey, pchMimeTypesBuffer, unMimeTypesBuffer) return result
[ "def", "getApplicationSupportedMimeTypes", "(", "self", ",", "pchAppKey", ",", "pchMimeTypesBuffer", ",", "unMimeTypesBuffer", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getApplicationSupportedMimeTypes", "result", "=", "fn", "(", "pchAppKey", ",", "p...
Get the list of supported mime types for this application, comma-delimited
[ "Get", "the", "list", "of", "supported", "mime", "types", "for", "this", "application", "comma", "-", "delimited" ]
python
train
letuananh/chirptext
chirptext/leutile.py
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/leutile.py#L606-L609
def add_potential(self, *patterns): ''' Add a potential config file pattern ''' for ptn in patterns: self.__potential.extend(self._ptn2fn(ptn))
[ "def", "add_potential", "(", "self", ",", "*", "patterns", ")", ":", "for", "ptn", "in", "patterns", ":", "self", ".", "__potential", ".", "extend", "(", "self", ".", "_ptn2fn", "(", "ptn", ")", ")" ]
Add a potential config file pattern
[ "Add", "a", "potential", "config", "file", "pattern" ]
python
train
materialsproject/pymatgen
pymatgen/analysis/chemenv/coordination_environments/structure_environments.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/chemenv/coordination_environments/structure_environments.py#L966-L998
def from_dict(cls, d): """ Reconstructs the StructureEnvironments object from a dict representation of the StructureEnvironments created using the as_dict method. :param d: dict representation of the StructureEnvironments object :return: StructureEnvironments object """ ce_list = [None if (ce_dict == 'None' or ce_dict is None) else { int(cn): [None if (ced is None or ced == 'None') else ChemicalEnvironments.from_dict(ced) for ced in ce_dict[cn]] for cn in ce_dict} for ce_dict in d['ce_list']] voronoi = DetailedVoronoiContainer.from_dict(d['voronoi']) structure = Structure.from_dict(d['structure']) neighbors_sets = [{int(cn): [cls.NeighborsSet.from_dict(dd=nb_set_dict, structure=structure, detailed_voronoi=voronoi) for nb_set_dict in nb_sets] for cn, nb_sets in site_nbs_sets_dict.items()} if site_nbs_sets_dict is not None else None for site_nbs_sets_dict in d['neighbors_sets']] info = {key: val for key, val in d['info'].items() if key not in ['sites_info']} if 'sites_info' in d['info']: info['sites_info'] = [{'nb_sets_info': {int(cn): {int(inb_set): nb_set_info for inb_set, nb_set_info in cn_sets.items()} for cn, cn_sets in site_info['nb_sets_info'].items()}, 'time': site_info['time']} if 'nb_sets_info' in site_info else {} for site_info in d['info']['sites_info']] return cls(voronoi=voronoi, valences=d['valences'], sites_map=d['sites_map'], equivalent_sites=[[PeriodicSite.from_dict(psd) for psd in psl] for psl in d['equivalent_sites']], ce_list=ce_list, structure=structure, neighbors_sets=neighbors_sets, info=info)
[ "def", "from_dict", "(", "cls", ",", "d", ")", ":", "ce_list", "=", "[", "None", "if", "(", "ce_dict", "==", "'None'", "or", "ce_dict", "is", "None", ")", "else", "{", "int", "(", "cn", ")", ":", "[", "None", "if", "(", "ced", "is", "None", "or...
Reconstructs the StructureEnvironments object from a dict representation of the StructureEnvironments created using the as_dict method. :param d: dict representation of the StructureEnvironments object :return: StructureEnvironments object
[ "Reconstructs", "the", "StructureEnvironments", "object", "from", "a", "dict", "representation", "of", "the", "StructureEnvironments", "created", "using", "the", "as_dict", "method", ".", ":", "param", "d", ":", "dict", "representation", "of", "the", "StructureEnvir...
python
train
weijia/djangoautoconf
djangoautoconf/obs/auto_conf_admin_utils.py
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/obs/auto_conf_admin_utils.py#L61-L75
def register_to_sys_with_admin_list(class_inst, admin_list=None, is_normal_admin_needed=False): """ :param class_inst: model class :param admin_list: admin class :param is_normal_admin_needed: is normal admin registration needed :return: """ if admin_list is None: admin_class = get_valid_admin_class_with_list([], class_inst) else: admin_class = get_valid_admin_class_with_list(admin_list, class_inst) if is_normal_admin_needed: register_all_type_of_admin(admin_class, class_inst) else: register_admin(admin_class, class_inst)
[ "def", "register_to_sys_with_admin_list", "(", "class_inst", ",", "admin_list", "=", "None", ",", "is_normal_admin_needed", "=", "False", ")", ":", "if", "admin_list", "is", "None", ":", "admin_class", "=", "get_valid_admin_class_with_list", "(", "[", "]", ",", "c...
:param class_inst: model class :param admin_list: admin class :param is_normal_admin_needed: is normal admin registration needed :return:
[ ":", "param", "class_inst", ":", "model", "class", ":", "param", "admin_list", ":", "admin", "class", ":", "param", "is_normal_admin_needed", ":", "is", "normal", "admin", "registration", "needed", ":", "return", ":" ]
python
train
materialsproject/pymatgen
pymatgen/core/units.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/units.py#L712-L717
def conversions(self): """ Returns a string showing the available conversions. Useful tool in interactive mode. """ return "\n".join(str(self.to(unit)) for unit in self.supported_units)
[ "def", "conversions", "(", "self", ")", ":", "return", "\"\\n\"", ".", "join", "(", "str", "(", "self", ".", "to", "(", "unit", ")", ")", "for", "unit", "in", "self", ".", "supported_units", ")" ]
Returns a string showing the available conversions. Useful tool in interactive mode.
[ "Returns", "a", "string", "showing", "the", "available", "conversions", ".", "Useful", "tool", "in", "interactive", "mode", "." ]
python
train
boriel/zxbasic
arch/zx48k/optimizer.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L1629-L1633
def swap(self, a, b): """ Swaps mem positions a and b """ self.mem[a], self.mem[b] = self.mem[b], self.mem[a] self.asm[a], self.asm[b] = self.asm[b], self.asm[a]
[ "def", "swap", "(", "self", ",", "a", ",", "b", ")", ":", "self", ".", "mem", "[", "a", "]", ",", "self", ".", "mem", "[", "b", "]", "=", "self", ".", "mem", "[", "b", "]", ",", "self", ".", "mem", "[", "a", "]", "self", ".", "asm", "["...
Swaps mem positions a and b
[ "Swaps", "mem", "positions", "a", "and", "b" ]
python
train
lambdamusic/Ontospy
ontospy/core/ontospy.py
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/ontospy.py#L144-L177
def load_sparql(self, sparql_endpoint, verbose=False, hide_base_schemas=True, hide_implicit_types=True, hide_implicit_preds=True, credentials=None): """ Set up a SPARQLStore backend as a virtual ontospy graph Note: we're using a 'SPARQLUpdateStore' backend instead of 'SPARQLStore' cause otherwise authentication fails (https://github.com/RDFLib/rdflib/issues/755) @TODO this error seems to be fixed in upcoming rdflib versions https://github.com/RDFLib/rdflib/pull/744 """ try: # graph = rdflib.Graph('SPARQLStore') # graph = rdflib.ConjunctiveGraph('SPARQLStore') graph = rdflib.ConjunctiveGraph('SPARQLUpdateStore') if credentials and type(credentials) == tuple: # https://github.com/RDFLib/rdflib/issues/343 graph.store.setCredentials(credentials[0], credentials[1]) # graph.store.setHTTPAuth('BASIC') # graph.store.setHTTPAuth('DIGEST') graph.open(sparql_endpoint) self.rdflib_graph = graph self.sparql_endpoint = sparql_endpoint self.sources = [sparql_endpoint] self.sparqlHelper = SparqlHelper(self.rdflib_graph, self.sparql_endpoint) self.namespaces = sorted(self.rdflib_graph.namespaces()) except: printDebug("Error trying to connect to Endpoint.") raise
[ "def", "load_sparql", "(", "self", ",", "sparql_endpoint", ",", "verbose", "=", "False", ",", "hide_base_schemas", "=", "True", ",", "hide_implicit_types", "=", "True", ",", "hide_implicit_preds", "=", "True", ",", "credentials", "=", "None", ")", ":", "try", ...
Set up a SPARQLStore backend as a virtual ontospy graph Note: we're using a 'SPARQLUpdateStore' backend instead of 'SPARQLStore' cause otherwise authentication fails (https://github.com/RDFLib/rdflib/issues/755) @TODO this error seems to be fixed in upcoming rdflib versions https://github.com/RDFLib/rdflib/pull/744
[ "Set", "up", "a", "SPARQLStore", "backend", "as", "a", "virtual", "ontospy", "graph" ]
python
train
matthiask/django-cte-forest
cte_forest/models.py
https://github.com/matthiask/django-cte-forest/blob/7bff29d69eddfcf214e9cf61647c91d28655619c/cte_forest/models.py#L696-L724
def _default_node_children(self, node, visitor, children): """ Generates a key and list of children of the given :class:`CTENode` `node`, intended to be used as an update to the dictionary representation generated by the :meth:`node_as_tree` method. The key is ``children`` and the list consists of the children of the given node as determined by the `children` callback. Each child node is, in turn, visited through recursive calls to :meth:`node_as_child`, and the `visitor` and `children` parameters are passed along. :param node: the :class:`CTENode` for which to generate the children representation. :param visitor: optional function responsible for generating the dictionary representation of the node. :param children: optional function responsible for generating a children key and list for the node. :return: a key and list representation of the structure of the children of the given node. """ return { self.model._cte_node_children: [ self.node_as_tree(child, visitor=visitor, children=children) for child in node.children.all() ] }
[ "def", "_default_node_children", "(", "self", ",", "node", ",", "visitor", ",", "children", ")", ":", "return", "{", "self", ".", "model", ".", "_cte_node_children", ":", "[", "self", ".", "node_as_tree", "(", "child", ",", "visitor", "=", "visitor", ",", ...
Generates a key and list of children of the given :class:`CTENode` `node`, intended to be used as an update to the dictionary representation generated by the :meth:`node_as_tree` method. The key is ``children`` and the list consists of the children of the given node as determined by the `children` callback. Each child node is, in turn, visited through recursive calls to :meth:`node_as_child`, and the `visitor` and `children` parameters are passed along. :param node: the :class:`CTENode` for which to generate the children representation. :param visitor: optional function responsible for generating the dictionary representation of the node. :param children: optional function responsible for generating a children key and list for the node. :return: a key and list representation of the structure of the children of the given node.
[ "Generates", "a", "key", "and", "list", "of", "children", "of", "the", "given", ":", "class", ":", "CTENode", "node", "intended", "to", "be", "used", "as", "an", "update", "to", "the", "dictionary", "representation", "generated", "by", "the", ":", "meth", ...
python
train
pickleshare/pickleshare
pickleshare.py
https://github.com/pickleshare/pickleshare/blob/f7950a9a359774c0190abde8da729b1810bdf3f4/pickleshare.py#L217-L224
def keys(self, globpat = None): """ All keys in DB, or all keys matching a glob""" if globpat is None: files = self.root.rglob('*') else: files = self.root.glob(globpat) return [self._normalized(p) for p in files if p.is_file()]
[ "def", "keys", "(", "self", ",", "globpat", "=", "None", ")", ":", "if", "globpat", "is", "None", ":", "files", "=", "self", ".", "root", ".", "rglob", "(", "'*'", ")", "else", ":", "files", "=", "self", ".", "root", ".", "glob", "(", "globpat", ...
All keys in DB, or all keys matching a glob
[ "All", "keys", "in", "DB", "or", "all", "keys", "matching", "a", "glob" ]
python
train
worstcase/blockade
blockade/chaos.py
https://github.com/worstcase/blockade/blob/3dc6ad803f0b0d56586dec9542a6a06aa06cf569/blockade/chaos.py#L286-L292
def _sm_start(self, *args, **kwargs): """ Start the timer waiting for pain """ millisec = random.randint(self._start_min_delay, self._start_max_delay) self._timer = threading.Timer(millisec / 1000.0, self.event_timeout) self._timer.start()
[ "def", "_sm_start", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "millisec", "=", "random", ".", "randint", "(", "self", ".", "_start_min_delay", ",", "self", ".", "_start_max_delay", ")", "self", ".", "_timer", "=", "threading", "...
Start the timer waiting for pain
[ "Start", "the", "timer", "waiting", "for", "pain" ]
python
valid
hotdoc/hotdoc
hotdoc/parsers/sitemap.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/parsers/sitemap.py#L144-L218
def parse(self, filename): """ Parse a sitemap file. Args: filename: str, the path to the sitemap file. Returns: Sitemap: the generated sitemap. """ with io.open(filename, 'r', encoding='utf-8') as _: lines = _.readlines() all_source_files = set() source_map = {} lineno = 0 root = None index = None cur_level = -1 parent_queue = [] for line in lines: try: level, line = dedent(line) if line.startswith('#'): lineno += 1 continue elif line.startswith('\\#'): line = line[1:] except IndentError as exc: error('bad-indent', 'Invalid indentation', filename=filename, lineno=lineno, column=exc.column) if not line: lineno += 1 continue source_file = dequote(line) if not source_file: lineno += 1 continue if source_file in all_source_files: error('sitemap-duplicate', 'Filename listed twice', filename=filename, lineno=lineno, column=level * 8 + 1) all_source_files.add(source_file) source_map[source_file] = (lineno, level * 8 + 1) page = OrderedDict() if root is not None and level == 0: error('sitemap-error', 'Sitemaps only support one root', filename=filename, lineno=lineno, column=0) if root is None: root = page index = source_file else: lvl_diff = cur_level - level while lvl_diff >= 0: parent_queue.pop() lvl_diff -= 1 parent_queue[-1][source_file] = page parent_queue.append(page) cur_level = level lineno += 1 return Sitemap(root, filename, index, source_map)
[ "def", "parse", "(", "self", ",", "filename", ")", ":", "with", "io", ".", "open", "(", "filename", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "_", ":", "lines", "=", "_", ".", "readlines", "(", ")", "all_source_files", "=", "set", "("...
Parse a sitemap file. Args: filename: str, the path to the sitemap file. Returns: Sitemap: the generated sitemap.
[ "Parse", "a", "sitemap", "file", "." ]
python
train
snbuback/django_services
django_services/admin/__init__.py
https://github.com/snbuback/django_services/blob/58cbdea878bb11197add0ed1008a9206e4d92671/django_services/admin/__init__.py#L99-L159
def delete_view(self, request, object_id, extra_context=None): "The 'delete' admin view for this model." opts = self.model._meta app_label = opts.app_label obj = self.get_object(request, unquote(object_id)) if not self.has_delete_permission(request, obj): raise PermissionDenied if obj is None: raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)}) using = router.db_for_write(self.model) # Populate deleted_objects, a data structure of all related objects that # will also be deleted. (deleted_objects, perms_needed, protected) = get_deleted_objects( [obj], opts, request.user, self.admin_site, using) perms_needed = False # cheat! Only object permission is required if request.POST: # The user has already confirmed the deletion. if perms_needed: raise PermissionDenied obj_display = force_unicode(obj) self.log_deletion(request, obj, obj_display) self.delete_model(request, obj) self.message_user(request, _('The %(name)s "%(obj)s" was deleted successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj_display)}) if not self.has_change_permission(request, None): return HttpResponseRedirect(reverse('admin:index', current_app=self.admin_site.name)) return HttpResponseRedirect(reverse('admin:%s_%s_changelist' % (opts.app_label, opts.module_name), current_app=self.admin_site.name)) object_name = force_unicode(opts.verbose_name) if perms_needed or protected: title = _("Cannot delete %(name)s") % {"name": object_name} else: title = _("Are you sure?") context = { "title": title, "object_name": object_name, "object": obj, "deleted_objects": deleted_objects, "perms_lacking": perms_needed, "protected": protected, "opts": opts, "app_label": app_label, } context.update(extra_context or {}) return TemplateResponse(request, self.delete_confirmation_template or [ "admin/%s/%s/delete_confirmation.html" % (app_label, opts.object_name.lower()), "admin/%s/delete_confirmation.html" % app_label, "admin/delete_confirmation.html" ], context, current_app=self.admin_site.name)
[ "def", "delete_view", "(", "self", ",", "request", ",", "object_id", ",", "extra_context", "=", "None", ")", ":", "opts", "=", "self", ".", "model", ".", "_meta", "app_label", "=", "opts", ".", "app_label", "obj", "=", "self", ".", "get_object", "(", "...
The 'delete' admin view for this model.
[ "The", "delete", "admin", "view", "for", "this", "model", "." ]
python
train
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L2842-L2853
def applyTransform(self): """ Convenience utility to apply the specified transform to the specified pose. This properly transforms all pose components, including velocity and angular velocity """ fn = self.function_table.applyTransform pOutputPose = TrackedDevicePose_t() pTrackedDevicePose = TrackedDevicePose_t() pTransform = HmdMatrix34_t() fn(byref(pOutputPose), byref(pTrackedDevicePose), byref(pTransform)) return pOutputPose, pTrackedDevicePose, pTransform
[ "def", "applyTransform", "(", "self", ")", ":", "fn", "=", "self", ".", "function_table", ".", "applyTransform", "pOutputPose", "=", "TrackedDevicePose_t", "(", ")", "pTrackedDevicePose", "=", "TrackedDevicePose_t", "(", ")", "pTransform", "=", "HmdMatrix34_t", "(...
Convenience utility to apply the specified transform to the specified pose. This properly transforms all pose components, including velocity and angular velocity
[ "Convenience", "utility", "to", "apply", "the", "specified", "transform", "to", "the", "specified", "pose", ".", "This", "properly", "transforms", "all", "pose", "components", "including", "velocity", "and", "angular", "velocity" ]
python
train
villebro/pyhtzee
pyhtzee/scoring.py
https://github.com/villebro/pyhtzee/blob/92a4296325fb1aac743c00c49b537b9f0d48f041/pyhtzee/scoring.py#L39-L46
def score_x_of_a_kind_yahtzee(dice: List[int], min_same_faces: int) -> int: """Return sum of dice if there are a minimum of equal min_same_faces dice, otherwise return zero. Only works for 3 or more min_same_faces. """ for die, count in Counter(dice).most_common(1): if count >= min_same_faces: return sum(dice) return 0
[ "def", "score_x_of_a_kind_yahtzee", "(", "dice", ":", "List", "[", "int", "]", ",", "min_same_faces", ":", "int", ")", "->", "int", ":", "for", "die", ",", "count", "in", "Counter", "(", "dice", ")", ".", "most_common", "(", "1", ")", ":", "if", "cou...
Return sum of dice if there are a minimum of equal min_same_faces dice, otherwise return zero. Only works for 3 or more min_same_faces.
[ "Return", "sum", "of", "dice", "if", "there", "are", "a", "minimum", "of", "equal", "min_same_faces", "dice", "otherwise", "return", "zero", ".", "Only", "works", "for", "3", "or", "more", "min_same_faces", "." ]
python
train
hazelcast/hazelcast-python-client
hazelcast/protocol/codec/map_execute_on_keys_codec.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/protocol/codec/map_execute_on_keys_codec.py#L12-L20
def calculate_size(name, entry_processor, keys): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(name) data_size += calculate_size_data(entry_processor) data_size += INT_SIZE_IN_BYTES for keys_item in keys: data_size += calculate_size_data(keys_item) return data_size
[ "def", "calculate_size", "(", "name", ",", "entry_processor", ",", "keys", ")", ":", "data_size", "=", "0", "data_size", "+=", "calculate_size_str", "(", "name", ")", "data_size", "+=", "calculate_size_data", "(", "entry_processor", ")", "data_size", "+=", "INT_...
Calculates the request payload size
[ "Calculates", "the", "request", "payload", "size" ]
python
train
fhcrc/taxtastic
taxtastic/ncbi.py
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/ncbi.py#L283-L326
def read_names(rows, source_id=1): """Return an iterator of rows ready to insert into table "names". Adds columns "is_primary" (identifying the primary name for each tax_id with a vaule of 1) and "is_classified" (always None). * rows - iterator of lists (eg, output from read_archive or read_dmp) * unclassified_regex - a compiled re matching "unclassified" names From the NCBI docs: Taxonomy names file (names.dmp): tax_id -- the id of node associated with this name name_txt -- name itself unique name -- the unique variant of this name if name not unique name class -- (synonym, common name, ...) """ ncbi_keys = ['tax_id', 'tax_name', 'unique_name', 'name_class'] extra_keys = ['source_id', 'is_primary', 'is_classified'] # is_classified applies to species only; we will set this value # later is_classified = None tax_id = ncbi_keys.index('tax_id') tax_name = ncbi_keys.index('tax_name') unique_name = ncbi_keys.index('unique_name') name_class = ncbi_keys.index('name_class') yield ncbi_keys + extra_keys for tid, grp in itertools.groupby(rows, itemgetter(tax_id)): # confirm that each tax_id has exactly one scientific name num_primary = 0 for r in grp: is_primary = r[name_class] == 'scientific name' # fix primary key uniqueness violation if r[unique_name]: r[tax_name] = r[unique_name] num_primary += is_primary yield (r + [source_id, is_primary, is_classified]) assert num_primary == 1
[ "def", "read_names", "(", "rows", ",", "source_id", "=", "1", ")", ":", "ncbi_keys", "=", "[", "'tax_id'", ",", "'tax_name'", ",", "'unique_name'", ",", "'name_class'", "]", "extra_keys", "=", "[", "'source_id'", ",", "'is_primary'", ",", "'is_classified'", ...
Return an iterator of rows ready to insert into table "names". Adds columns "is_primary" (identifying the primary name for each tax_id with a vaule of 1) and "is_classified" (always None). * rows - iterator of lists (eg, output from read_archive or read_dmp) * unclassified_regex - a compiled re matching "unclassified" names From the NCBI docs: Taxonomy names file (names.dmp): tax_id -- the id of node associated with this name name_txt -- name itself unique name -- the unique variant of this name if name not unique name class -- (synonym, common name, ...)
[ "Return", "an", "iterator", "of", "rows", "ready", "to", "insert", "into", "table", "names", ".", "Adds", "columns", "is_primary", "(", "identifying", "the", "primary", "name", "for", "each", "tax_id", "with", "a", "vaule", "of", "1", ")", "and", "is_class...
python
train
lycantropos/paradigm
paradigm/cached.py
https://github.com/lycantropos/paradigm/blob/70415f77964dbb1b6d444f890a5d988174194ff0/paradigm/cached.py#L15-L33
def map_(cache: Mapping[Domain, Range]) -> Operator[Map[Domain, Range]]: """ Returns decorator that calls wrapped function if nothing was found in cache for its argument. Wrapped function arguments should be hashable. """ def wrapper(function: Map[Domain, Range]) -> Map[Domain, Range]: @wraps(function) def wrapped(argument: Domain) -> Range: try: return cache[argument] except KeyError: return function(argument) return wrapped return wrapper
[ "def", "map_", "(", "cache", ":", "Mapping", "[", "Domain", ",", "Range", "]", ")", "->", "Operator", "[", "Map", "[", "Domain", ",", "Range", "]", "]", ":", "def", "wrapper", "(", "function", ":", "Map", "[", "Domain", ",", "Range", "]", ")", "-...
Returns decorator that calls wrapped function if nothing was found in cache for its argument. Wrapped function arguments should be hashable.
[ "Returns", "decorator", "that", "calls", "wrapped", "function", "if", "nothing", "was", "found", "in", "cache", "for", "its", "argument", "." ]
python
train
project-rig/rig
rig/place_and_route/place/utils.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/utils.py#L232-L240
def finalise_same_chip_constraints(substitutions, placements): """Given a set of placements containing the supplied :py:class:`MergedVertex`, remove the merged vertices replacing them with their constituent vertices (changing the placements inplace). """ for merged_vertex in reversed(substitutions): placement = placements.pop(merged_vertex) for v in merged_vertex.vertices: placements[v] = placement
[ "def", "finalise_same_chip_constraints", "(", "substitutions", ",", "placements", ")", ":", "for", "merged_vertex", "in", "reversed", "(", "substitutions", ")", ":", "placement", "=", "placements", ".", "pop", "(", "merged_vertex", ")", "for", "v", "in", "merged...
Given a set of placements containing the supplied :py:class:`MergedVertex`, remove the merged vertices replacing them with their constituent vertices (changing the placements inplace).
[ "Given", "a", "set", "of", "placements", "containing", "the", "supplied", ":", "py", ":", "class", ":", "MergedVertex", "remove", "the", "merged", "vertices", "replacing", "them", "with", "their", "constituent", "vertices", "(", "changing", "the", "placements", ...
python
train
bokeh/bokeh
bokeh/document/events.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/document/events.py#L434-L442
def dispatch(self, receiver): ''' Dispatch handling of this event to a receiver. This method will invoke ``receiver._columns_streamed`` if it exists. ''' super(ColumnsStreamedEvent, self).dispatch(receiver) if hasattr(receiver, '_columns_streamed'): receiver._columns_streamed(self)
[ "def", "dispatch", "(", "self", ",", "receiver", ")", ":", "super", "(", "ColumnsStreamedEvent", ",", "self", ")", ".", "dispatch", "(", "receiver", ")", "if", "hasattr", "(", "receiver", ",", "'_columns_streamed'", ")", ":", "receiver", ".", "_columns_strea...
Dispatch handling of this event to a receiver. This method will invoke ``receiver._columns_streamed`` if it exists.
[ "Dispatch", "handling", "of", "this", "event", "to", "a", "receiver", "." ]
python
train
ansible/tower-cli
tower_cli/resources/activity_stream.py
https://github.com/ansible/tower-cli/blob/a2b151fed93c47725018d3034848cb3a1814bed7/tower_cli/resources/activity_stream.py#L55-L68
def read(self, *args, **kwargs): ''' Do extra processing so we can display the actor field as a top-level field ''' if 'actor' in kwargs: kwargs['actor'] = kwargs.pop('actor') r = super(Resource, self).read(*args, **kwargs) if 'results' in r: for d in r['results']: self._promote_actor(d) else: self._promote_actor(d) return r
[ "def", "read", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "'actor'", "in", "kwargs", ":", "kwargs", "[", "'actor'", "]", "=", "kwargs", ".", "pop", "(", "'actor'", ")", "r", "=", "super", "(", "Resource", ",", "self",...
Do extra processing so we can display the actor field as a top-level field
[ "Do", "extra", "processing", "so", "we", "can", "display", "the", "actor", "field", "as", "a", "top", "-", "level", "field" ]
python
valid
hobson/aima
aima/search.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/search.py#L316-L329
def hill_climbing(problem): """From the initial node, keep choosing the neighbor with highest value, stopping when no neighbor is better. [Fig. 4.2]""" current = Node(problem.initial) while True: neighbors = current.expand(problem) if not neighbors: break neighbor = argmax_random_tie(neighbors, lambda node: problem.value(node.state)) if problem.value(neighbor.state) <= problem.value(current.state): break current = neighbor return current.state
[ "def", "hill_climbing", "(", "problem", ")", ":", "current", "=", "Node", "(", "problem", ".", "initial", ")", "while", "True", ":", "neighbors", "=", "current", ".", "expand", "(", "problem", ")", "if", "not", "neighbors", ":", "break", "neighbor", "=",...
From the initial node, keep choosing the neighbor with highest value, stopping when no neighbor is better. [Fig. 4.2]
[ "From", "the", "initial", "node", "keep", "choosing", "the", "neighbor", "with", "highest", "value", "stopping", "when", "no", "neighbor", "is", "better", ".", "[", "Fig", ".", "4", ".", "2", "]" ]
python
valid
reanahub/reana-commons
reana_commons/serial.py
https://github.com/reanahub/reana-commons/blob/abf31d9f495e0d93171c43fc4a414cd292091b11/reana_commons/serial.py#L55-L75
def serial_load(workflow_file, specification, parameters=None, original=None): """Validate and return a expanded REANA Serial workflow specification. :param workflow_file: A specification file compliant with REANA Serial workflow specification. :returns: A dictionary which represents the valid Serial workflow with all parameters expanded. """ parameters = parameters or {} if not specification: with open(workflow_file, 'r') as f: specification = json.loads(f.read()) expanded_specification = _expand_parameters(specification, parameters, original) validate(specification, serial_workflow_schema) return expanded_specification
[ "def", "serial_load", "(", "workflow_file", ",", "specification", ",", "parameters", "=", "None", ",", "original", "=", "None", ")", ":", "parameters", "=", "parameters", "or", "{", "}", "if", "not", "specification", ":", "with", "open", "(", "workflow_file"...
Validate and return a expanded REANA Serial workflow specification. :param workflow_file: A specification file compliant with REANA Serial workflow specification. :returns: A dictionary which represents the valid Serial workflow with all parameters expanded.
[ "Validate", "and", "return", "a", "expanded", "REANA", "Serial", "workflow", "specification", "." ]
python
train
samluescher/django-media-tree
media_tree/utils/__init__.py
https://github.com/samluescher/django-media-tree/blob/3eb6345faaf57e2fbe35ca431d4d133f950f2b5f/media_tree/utils/__init__.py#L90-L109
def join_formatted(text, new_text, glue_format_if_true = u'%s%s', glue_format_if_false = u'%s%s', condition=None, format = u'%s', escape=False): """ Joins two strings, optionally escaping the second, and using one of two string formats for glueing them together, depending on whether a condition is True or False. This function is a shorthand for complicated code blocks when you want to format some strings and link them together. A typical use case might be: Wrap string B with <strong> tags, but only if it is not empty, and join it with A with a comma in between, but only if A is not empty, etc. """ if condition is None: condition = text and new_text add_text = new_text if escape: add_text = conditional_escape(add_text) if add_text: add_text = format % add_text glue_format = glue_format_if_true if condition else glue_format_if_false return glue_format % (text, add_text)
[ "def", "join_formatted", "(", "text", ",", "new_text", ",", "glue_format_if_true", "=", "u'%s%s'", ",", "glue_format_if_false", "=", "u'%s%s'", ",", "condition", "=", "None", ",", "format", "=", "u'%s'", ",", "escape", "=", "False", ")", ":", "if", "conditio...
Joins two strings, optionally escaping the second, and using one of two string formats for glueing them together, depending on whether a condition is True or False. This function is a shorthand for complicated code blocks when you want to format some strings and link them together. A typical use case might be: Wrap string B with <strong> tags, but only if it is not empty, and join it with A with a comma in between, but only if A is not empty, etc.
[ "Joins", "two", "strings", "optionally", "escaping", "the", "second", "and", "using", "one", "of", "two", "string", "formats", "for", "glueing", "them", "together", "depending", "on", "whether", "a", "condition", "is", "True", "or", "False", "." ]
python
train
librosa/librosa
librosa/filters.py
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L860-L954
def _multirate_fb(center_freqs=None, sample_rates=None, Q=25.0, passband_ripple=1, stopband_attenuation=50, ftype='ellip', flayout='ba'): r'''Helper function to construct a multirate filterbank. A filter bank consists of multiple band-pass filters which divide the input signal into subbands. In the case of a multirate filter bank, the band-pass filters operate with resampled versions of the input signal, e.g. to keep the length of a filter constant while shifting its center frequency. This implementation uses `scipy.signal.iirdesign` to design the filters. Parameters ---------- center_freqs : np.ndarray [shape=(n,), dtype=float] Center frequencies of the filter kernels. Also defines the number of filters in the filterbank. sample_rates : np.ndarray [shape=(n,), dtype=float] Samplerate for each filter (used for multirate filterbank). Q : float Q factor (influences the filter bandwith). passband_ripple : float The maximum loss in the passband (dB) See `scipy.signal.iirdesign` for details. stopband_attenuation : float The minimum attenuation in the stopband (dB) See `scipy.signal.iirdesign` for details. ftype : str The type of IIR filter to design See `scipy.signal.iirdesign` for details. flayout : string Valid `output` argument for `scipy.signal.iirdesign`. - If `ba`, returns numerators/denominators of the transfer functions, used for filtering with `scipy.signal.filtfilt`. Can be unstable for high-order filters. - If `sos`, returns a series of second-order filters, used for filtering with `scipy.signal.sosfiltfilt`. Minimizes numerical precision errors for high-order filters, but is slower. - If `zpk`, returns zeros, poles, and system gains of the transfer functions. Returns ------- filterbank : list [shape=(n,), dtype=float] Each list entry comprises the filter coefficients for a single filter. sample_rates : np.ndarray [shape=(n,), dtype=float] Samplerate for each filter. Notes ----- This function caches at level 10. See Also -------- scipy.signal.iirdesign Raises ------ ParameterError If `center_freqs` is `None`. If `sample_rates` is `None`. If `center_freqs.shape` does not match `sample_rates.shape`. ''' if center_freqs is None: raise ParameterError('center_freqs must be provided.') if sample_rates is None: raise ParameterError('sample_rates must be provided.') if center_freqs.shape != sample_rates.shape: raise ParameterError('Number of provided center_freqs and sample_rates must be equal.') nyquist = 0.5 * sample_rates filter_bandwidths = center_freqs / float(Q) filterbank = [] for cur_center_freq, cur_nyquist, cur_bw in zip(center_freqs, nyquist, filter_bandwidths): passband_freqs = [cur_center_freq - 0.5 * cur_bw, cur_center_freq + 0.5 * cur_bw] / cur_nyquist stopband_freqs = [cur_center_freq - cur_bw, cur_center_freq + cur_bw] / cur_nyquist cur_filter = scipy.signal.iirdesign(passband_freqs, stopband_freqs, passband_ripple, stopband_attenuation, analog=False, ftype=ftype, output=flayout) filterbank.append(cur_filter) return filterbank, sample_rates
[ "def", "_multirate_fb", "(", "center_freqs", "=", "None", ",", "sample_rates", "=", "None", ",", "Q", "=", "25.0", ",", "passband_ripple", "=", "1", ",", "stopband_attenuation", "=", "50", ",", "ftype", "=", "'ellip'", ",", "flayout", "=", "'ba'", ")", "...
r'''Helper function to construct a multirate filterbank. A filter bank consists of multiple band-pass filters which divide the input signal into subbands. In the case of a multirate filter bank, the band-pass filters operate with resampled versions of the input signal, e.g. to keep the length of a filter constant while shifting its center frequency. This implementation uses `scipy.signal.iirdesign` to design the filters. Parameters ---------- center_freqs : np.ndarray [shape=(n,), dtype=float] Center frequencies of the filter kernels. Also defines the number of filters in the filterbank. sample_rates : np.ndarray [shape=(n,), dtype=float] Samplerate for each filter (used for multirate filterbank). Q : float Q factor (influences the filter bandwith). passband_ripple : float The maximum loss in the passband (dB) See `scipy.signal.iirdesign` for details. stopband_attenuation : float The minimum attenuation in the stopband (dB) See `scipy.signal.iirdesign` for details. ftype : str The type of IIR filter to design See `scipy.signal.iirdesign` for details. flayout : string Valid `output` argument for `scipy.signal.iirdesign`. - If `ba`, returns numerators/denominators of the transfer functions, used for filtering with `scipy.signal.filtfilt`. Can be unstable for high-order filters. - If `sos`, returns a series of second-order filters, used for filtering with `scipy.signal.sosfiltfilt`. Minimizes numerical precision errors for high-order filters, but is slower. - If `zpk`, returns zeros, poles, and system gains of the transfer functions. Returns ------- filterbank : list [shape=(n,), dtype=float] Each list entry comprises the filter coefficients for a single filter. sample_rates : np.ndarray [shape=(n,), dtype=float] Samplerate for each filter. Notes ----- This function caches at level 10. See Also -------- scipy.signal.iirdesign Raises ------ ParameterError If `center_freqs` is `None`. If `sample_rates` is `None`. If `center_freqs.shape` does not match `sample_rates.shape`.
[ "r", "Helper", "function", "to", "construct", "a", "multirate", "filterbank", "." ]
python
test
PyProphet/pyprophet
pyprophet/main.py
https://github.com/PyProphet/pyprophet/blob/f546ad171750cd7685afbde6785fe71f82cadb35/pyprophet/main.py#L195-L205
def subsample(infile, outfile, subsample_ratio, test): """ Subsample OpenSWATH file to minimum for integrated scoring """ if outfile is None: outfile = infile else: outfile = outfile subsample_osw(infile, outfile, subsample_ratio, test)
[ "def", "subsample", "(", "infile", ",", "outfile", ",", "subsample_ratio", ",", "test", ")", ":", "if", "outfile", "is", "None", ":", "outfile", "=", "infile", "else", ":", "outfile", "=", "outfile", "subsample_osw", "(", "infile", ",", "outfile", ",", "...
Subsample OpenSWATH file to minimum for integrated scoring
[ "Subsample", "OpenSWATH", "file", "to", "minimum", "for", "integrated", "scoring" ]
python
test
qacafe/cdrouter.py
cdrouter/results.py
https://github.com/qacafe/cdrouter.py/blob/aacf2c6ab0b987250f7b1892f4bba14bb2b7dbe5/cdrouter/results.py#L819-L828
def progress_stats(self, id): # pylint: disable=invalid-name,redefined-builtin """Compute progress stats for a result. :param id: Result ID as an int. :return: :class:`results.Progress <results.Progress>` object :rtype: results.Progress """ schema = ProgressSchema() resp = self.service.get(self.base+str(id)+'/', params={'stats': 'progress'}) return self.service.decode(schema, resp)
[ "def", "progress_stats", "(", "self", ",", "id", ")", ":", "# pylint: disable=invalid-name,redefined-builtin", "schema", "=", "ProgressSchema", "(", ")", "resp", "=", "self", ".", "service", ".", "get", "(", "self", ".", "base", "+", "str", "(", "id", ")", ...
Compute progress stats for a result. :param id: Result ID as an int. :return: :class:`results.Progress <results.Progress>` object :rtype: results.Progress
[ "Compute", "progress", "stats", "for", "a", "result", "." ]
python
train
gem/oq-engine
openquake/server/views.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/server/views.py#L341-L372
def calc_abort(request, calc_id): """ Abort the given calculation, it is it running """ job = logs.dbcmd('get_job', calc_id) if job is None: message = {'error': 'Unknown job %s' % calc_id} return HttpResponse(content=json.dumps(message), content_type=JSON) if job.status not in ('submitted', 'executing'): message = {'error': 'Job %s is not running' % job.id} return HttpResponse(content=json.dumps(message), content_type=JSON) if not utils.user_has_permission(request, job.user_name): message = {'error': ('User %s has no permission to abort job %s' % (job.user_name, job.id))} return HttpResponse(content=json.dumps(message), content_type=JSON, status=403) if job.pid: # is a spawned job try: os.kill(job.pid, signal.SIGTERM) except Exception as exc: logging.error(exc) else: logging.warning('Aborting job %d, pid=%d', job.id, job.pid) logs.dbcmd('set_status', job.id, 'aborted') message = {'success': 'Killing job %d' % job.id} return HttpResponse(content=json.dumps(message), content_type=JSON) message = {'error': 'PID for job %s not found' % job.id} return HttpResponse(content=json.dumps(message), content_type=JSON)
[ "def", "calc_abort", "(", "request", ",", "calc_id", ")", ":", "job", "=", "logs", ".", "dbcmd", "(", "'get_job'", ",", "calc_id", ")", "if", "job", "is", "None", ":", "message", "=", "{", "'error'", ":", "'Unknown job %s'", "%", "calc_id", "}", "retur...
Abort the given calculation, it is it running
[ "Abort", "the", "given", "calculation", "it", "is", "it", "running" ]
python
train
axialmarket/fsq
fsq/scan.py
https://github.com/axialmarket/fsq/blob/43b84c292cb8a187599d86753b947cf73248f989/fsq/scan.py#L121-L141
def scan_forever(queue, *args, **kwargs): """Return an infinite iterator over an fsq queue that blocks waiting for the queue trigger. Work is yielded as FSQWorkItem objects when available, assuming the default generator (FSQScanGenerator) is in use. Essentially, this function wraps fsq.scan() and blocks for more work. It takes all the same parameters as scan(), plus process_once_now, which is a boolean to determine if an initial .scan() is run before listening to the trigger. This argument defaults to True. """ process_once_now = kwargs.get('process_once_now', True) if process_once_now: for work in scan(queue, *args, **kwargs): yield work while True: with open(fsq_path.trigger(queue), 'rb') as t: t.read(1) for work in scan(queue, *args, **kwargs): yield work
[ "def", "scan_forever", "(", "queue", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "process_once_now", "=", "kwargs", ".", "get", "(", "'process_once_now'", ",", "True", ")", "if", "process_once_now", ":", "for", "work", "in", "scan", "(", "queue"...
Return an infinite iterator over an fsq queue that blocks waiting for the queue trigger. Work is yielded as FSQWorkItem objects when available, assuming the default generator (FSQScanGenerator) is in use. Essentially, this function wraps fsq.scan() and blocks for more work. It takes all the same parameters as scan(), plus process_once_now, which is a boolean to determine if an initial .scan() is run before listening to the trigger. This argument defaults to True.
[ "Return", "an", "infinite", "iterator", "over", "an", "fsq", "queue", "that", "blocks", "waiting", "for", "the", "queue", "trigger", ".", "Work", "is", "yielded", "as", "FSQWorkItem", "objects", "when", "available", "assuming", "the", "default", "generator", "...
python
train
TrafficSenseMSD/SumoTools
sumolib/miscutils.py
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/sumolib/miscutils.py#L160-L166
def mean(self): """return the median value""" # XXX rename this method if len(self.values) > 0: return sorted(self.values)[len(self.values) / 2] else: return None
[ "def", "mean", "(", "self", ")", ":", "# XXX rename this method", "if", "len", "(", "self", ".", "values", ")", ">", "0", ":", "return", "sorted", "(", "self", ".", "values", ")", "[", "len", "(", "self", ".", "values", ")", "/", "2", "]", "else", ...
return the median value
[ "return", "the", "median", "value" ]
python
train
d0c-s4vage/pfp
pfp/bitwrap.py
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L219-L241
def unconsumed_ranges(self): """Return an IntervalTree of unconsumed ranges, of the format (start, end] with the end value not being included """ res = IntervalTree() prev = None # normal iteration is not in a predictable order ranges = sorted([x for x in self.range_set], key=lambda x: x.begin) for rng in ranges: if prev is None: prev = rng continue res.add(Interval(prev.end, rng.begin)) prev = rng # means we've seeked past the end if len(self.range_set[self.tell()]) != 1: res.add(Interval(prev.end, self.tell())) return res
[ "def", "unconsumed_ranges", "(", "self", ")", ":", "res", "=", "IntervalTree", "(", ")", "prev", "=", "None", "# normal iteration is not in a predictable order", "ranges", "=", "sorted", "(", "[", "x", "for", "x", "in", "self", ".", "range_set", "]", ",", "k...
Return an IntervalTree of unconsumed ranges, of the format (start, end] with the end value not being included
[ "Return", "an", "IntervalTree", "of", "unconsumed", "ranges", "of", "the", "format", "(", "start", "end", "]", "with", "the", "end", "value", "not", "being", "included" ]
python
train
andy29485/embypy
embypy/emby.py
https://github.com/andy29485/embypy/blob/cde658d380965caaf4789d4d182d045b0346797b/embypy/emby.py#L137-L159
async def nextUp(self, userId=None): '''returns list of items marked as `next up` |coro| Parameters ---------- userId : str if provided, then the list returned is the one that that use will see. Returns ------- list the itmes that will appear as next up (for user if id was given) ''' json = await self.connector.getJson('/Shows/NextUp', pass_uid=True, remote=False, userId=userId ) return await self.process(json)
[ "async", "def", "nextUp", "(", "self", ",", "userId", "=", "None", ")", ":", "json", "=", "await", "self", ".", "connector", ".", "getJson", "(", "'/Shows/NextUp'", ",", "pass_uid", "=", "True", ",", "remote", "=", "False", ",", "userId", "=", "userId"...
returns list of items marked as `next up` |coro| Parameters ---------- userId : str if provided, then the list returned is the one that that use will see. Returns ------- list the itmes that will appear as next up (for user if id was given)
[ "returns", "list", "of", "items", "marked", "as", "next", "up" ]
python
train
crossbario/txaio
txaio/aio.py
https://github.com/crossbario/txaio/blob/29c77ff1210cabd4cc03f16f34672612e7eef704/txaio/aio.py#L283-L322
def start_logging(out=_stdout, level='info'): """ Begin logging. :param out: if provided, a file-like object to log to. By default, this is stdout. :param level: the maximum log-level to emit (a string) """ global _log_level, _loggers, _started_logging if level not in log_levels: raise RuntimeError( "Invalid log level '{0}'; valid are: {1}".format( level, ', '.join(log_levels) ) ) if _started_logging: return _started_logging = True _log_level = level handler = _TxaioFileHandler(out) logging.getLogger().addHandler(handler) # note: Don't need to call basicConfig() or similar, because we've # now added at least one handler to the root logger logging.raiseExceptions = True # FIXME level_to_stdlib = { 'critical': logging.CRITICAL, 'error': logging.ERROR, 'warn': logging.WARNING, 'info': logging.INFO, 'debug': logging.DEBUG, 'trace': logging.DEBUG, } logging.getLogger().setLevel(level_to_stdlib[level]) # make sure any loggers we created before now have their log-level # set (any created after now will get it from _log_level for logger in _loggers: logger._set_log_level(level)
[ "def", "start_logging", "(", "out", "=", "_stdout", ",", "level", "=", "'info'", ")", ":", "global", "_log_level", ",", "_loggers", ",", "_started_logging", "if", "level", "not", "in", "log_levels", ":", "raise", "RuntimeError", "(", "\"Invalid log level '{0}'; ...
Begin logging. :param out: if provided, a file-like object to log to. By default, this is stdout. :param level: the maximum log-level to emit (a string)
[ "Begin", "logging", "." ]
python
train
gtaylor/petfinder-api
petfinder/client.py
https://github.com/gtaylor/petfinder-api/blob/4f1bc76c276d537208c9b11f7c87282f6d2bb50d/petfinder/client.py#L231-L240
def pet_get(self, **kwargs): """ pet.get wrapper. Returns a record dict for the requested pet. :rtype: dict :returns: The pet's record dict. """ root = self._do_api_call("pet.get", kwargs) return self._parse_pet_record(root.find("pet"))
[ "def", "pet_get", "(", "self", ",", "*", "*", "kwargs", ")", ":", "root", "=", "self", ".", "_do_api_call", "(", "\"pet.get\"", ",", "kwargs", ")", "return", "self", ".", "_parse_pet_record", "(", "root", ".", "find", "(", "\"pet\"", ")", ")" ]
pet.get wrapper. Returns a record dict for the requested pet. :rtype: dict :returns: The pet's record dict.
[ "pet", ".", "get", "wrapper", ".", "Returns", "a", "record", "dict", "for", "the", "requested", "pet", "." ]
python
train
RRZE-HPC/kerncraft
kerncraft/models/ecm.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/models/ecm.py#L210-L228
def report(self, output_file=sys.stdout): """Print generated model data in human readable format.""" if self.verbose > 1: print('{}'.format(pprint.pformat(self.results['verbose infos'])), file=output_file) for level, cycles in self.results['cycles']: print('{} = {}'.format( level, self.conv_cy(cycles)[self._args.unit]), file=output_file) if self.verbose > 1: if 'memory bandwidth kernel' in self.results: print('memory cycles based on {} kernel with {}'.format( self.results['memory bandwidth kernel'], self.results['memory bandwidth']), file=output_file) if self.verbose > 1: print(file=output_file) print(self.report_data_transfers(), file=output_file)
[ "def", "report", "(", "self", ",", "output_file", "=", "sys", ".", "stdout", ")", ":", "if", "self", ".", "verbose", ">", "1", ":", "print", "(", "'{}'", ".", "format", "(", "pprint", ".", "pformat", "(", "self", ".", "results", "[", "'verbose infos'...
Print generated model data in human readable format.
[ "Print", "generated", "model", "data", "in", "human", "readable", "format", "." ]
python
test
NickMonzillo/SmartCloud
SmartCloud/__init__.py
https://github.com/NickMonzillo/SmartCloud/blob/481d1ef428427b452a8a787999c1d4a8868a3824/SmartCloud/__init__.py#L109-L119
def display(self): '''Displays the word cloud to the screen.''' pygame.init() self.display = pygame.display.set_mode((self.width,self.height)) self.display.blit(self.cloud,(0,0)) pygame.display.update() while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() return
[ "def", "display", "(", "self", ")", ":", "pygame", ".", "init", "(", ")", "self", ".", "display", "=", "pygame", ".", "display", ".", "set_mode", "(", "(", "self", ".", "width", ",", "self", ".", "height", ")", ")", "self", ".", "display", ".", "...
Displays the word cloud to the screen.
[ "Displays", "the", "word", "cloud", "to", "the", "screen", "." ]
python
train
saltstack/salt
salt/utils/openstack/nova.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/nova.py#L543-L548
def root_password(self, server_id, password): ''' Change server(uuid's) root password ''' nt_ks = self.compute_conn nt_ks.servers.change_password(server_id, password)
[ "def", "root_password", "(", "self", ",", "server_id", ",", "password", ")", ":", "nt_ks", "=", "self", ".", "compute_conn", "nt_ks", ".", "servers", ".", "change_password", "(", "server_id", ",", "password", ")" ]
Change server(uuid's) root password
[ "Change", "server", "(", "uuid", "s", ")", "root", "password" ]
python
train
Parquery/sphinx-icontract
sphinx_icontract/__init__.py
https://github.com/Parquery/sphinx-icontract/blob/92918f23a8ea1873112e9b7446c64cd6f12ee04b/sphinx_icontract/__init__.py#L27-L84
def _negate_compare_text(atok: asttokens.ASTTokens, node: ast.Compare) -> str: """ Generate the text representing the negation of the comparison node. :param atok: parsing obtained with ``asttokens`` so that we can access the last tokens of a node. The standard ``ast`` module provides only the first token of an AST node. In lack of concrete syntax tree, getting text from first to last token is currently the simplest approach. :param node: AST node representing the comparison in a condition :return: text representation of the node's negation """ assert len(node.ops) == 1, "A single comparison expected, but got: {}".format(len(node.ops)) assert len(node.comparators) == 1, "A single comparator expected, but got: {}".format(len(node.comparators)) operator = node.ops[0] left = node.left right = node.comparators[0] left_text = atok.get_text(node=left) right_text = atok.get_text(node=right) text = '' if isinstance(operator, ast.Eq): text = '{} != {}'.format(left_text, right_text) elif isinstance(operator, ast.NotEq): text = '{} == {}'.format(left_text, right_text) elif isinstance(operator, ast.Lt): text = '{} >= {}'.format(left_text, right_text) elif isinstance(operator, ast.LtE): text = '{} > {}'.format(left_text, right_text) elif isinstance(operator, ast.Gt): text = '{} <= {}'.format(left_text, right_text) elif isinstance(operator, ast.GtE): text = '{} < {}'.format(left_text, right_text) elif isinstance(operator, ast.Is): text = '{} is not {}'.format(left_text, right_text) elif isinstance(operator, ast.IsNot): text = '{} is {}'.format(left_text, right_text) elif isinstance(operator, ast.In): text = '{} not in {}'.format(left_text, right_text) elif isinstance(operator, ast.NotIn): text = '{} in {}'.format(left_text, right_text) else: raise NotImplementedError("Unhandled comparison operator: {}".format(operator)) return text
[ "def", "_negate_compare_text", "(", "atok", ":", "asttokens", ".", "ASTTokens", ",", "node", ":", "ast", ".", "Compare", ")", "->", "str", ":", "assert", "len", "(", "node", ".", "ops", ")", "==", "1", ",", "\"A single comparison expected, but got: {}\"", "....
Generate the text representing the negation of the comparison node. :param atok: parsing obtained with ``asttokens`` so that we can access the last tokens of a node. The standard ``ast`` module provides only the first token of an AST node. In lack of concrete syntax tree, getting text from first to last token is currently the simplest approach. :param node: AST node representing the comparison in a condition :return: text representation of the node's negation
[ "Generate", "the", "text", "representing", "the", "negation", "of", "the", "comparison", "node", "." ]
python
train
grycap/RADL
radl/radl.py
https://github.com/grycap/RADL/blob/03ccabb0313a48a5aa0e20c1f7983fddcb95e9cb/radl/radl.py#L1291-L1297
def get_network_by_id(self, net_id): """Return a network with that id or None.""" for elem in self.networks: if elem.id == net_id: return elem return None
[ "def", "get_network_by_id", "(", "self", ",", "net_id", ")", ":", "for", "elem", "in", "self", ".", "networks", ":", "if", "elem", ".", "id", "==", "net_id", ":", "return", "elem", "return", "None" ]
Return a network with that id or None.
[ "Return", "a", "network", "with", "that", "id", "or", "None", "." ]
python
train
GoogleCloudPlatform/appengine-gcs-client
python/src/cloudstorage/cloudstorage_api.py
https://github.com/GoogleCloudPlatform/appengine-gcs-client/blob/d11078331ecd915d753c886e96a80133599f3f98/python/src/cloudstorage/cloudstorage_api.py#L651-L673
def _find_elements(self, result, elements): """Find interesting elements from XML. This function tries to only look for specified elements without parsing the entire XML. The specified elements is better located near the beginning. Args: result: response XML. elements: a set of interesting element tags. Returns: A dict from element tag to element value. """ element_mapping = {} result = StringIO.StringIO(result) for _, e in ET.iterparse(result, events=('end',)): if not elements: break if e.tag in elements: element_mapping[e.tag] = e.text elements.remove(e.tag) return element_mapping
[ "def", "_find_elements", "(", "self", ",", "result", ",", "elements", ")", ":", "element_mapping", "=", "{", "}", "result", "=", "StringIO", ".", "StringIO", "(", "result", ")", "for", "_", ",", "e", "in", "ET", ".", "iterparse", "(", "result", ",", ...
Find interesting elements from XML. This function tries to only look for specified elements without parsing the entire XML. The specified elements is better located near the beginning. Args: result: response XML. elements: a set of interesting element tags. Returns: A dict from element tag to element value.
[ "Find", "interesting", "elements", "from", "XML", "." ]
python
train
CalebBell/thermo
thermo/utils.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/utils.py#L3485-L3561
def plot_isotherm(self, T, zs, ws, Pmin=None, Pmax=None, methods=[], pts=50, only_valid=True): # pragma: no cover r'''Method to create a plot of the property vs pressure at a specified temperature and composition according to either a specified list of methods, or the user methods (if set), or all methods. User-selectable number of points, and pressure range. If only_valid is set, `test_method_validity` will be used to check if each condition in the specified range is valid, and `test_property_validity` will be used to test the answer, and the method is allowed to fail; only the valid points will be plotted. Otherwise, the result will be calculated and displayed as-is. This will not suceed if the method fails. Parameters ---------- T : float Temperature at which to create the plot, [K] zs : list[float] Mole fractions of all species in the mixture, [-] ws : list[float] Weight fractions of all species in the mixture, [-] Pmin : float Minimum pressure, to begin calculating the property, [Pa] Pmax : float Maximum pressure, to stop calculating the property, [Pa] methods : list, optional List of methods to consider pts : int, optional A list of points to calculate the property at; if Pmin to Pmax covers a wide range of method validities, only a few points may end up calculated for a given method so this may need to be large only_valid : bool If True, only plot successful methods and calculated properties, and handle errors; if False, attempt calculation without any checking and use methods outside their bounds ''' # This function cannot be tested if not has_matplotlib: raise Exception('Optional dependency matplotlib is required for plotting') if Pmin is None: if self.Pmin is not None: Pmin = self.Pmin else: raise Exception('Minimum pressure could not be auto-detected; please provide it') if Pmax is None: if self.Pmax is not None: Pmax = self.Pmax else: raise Exception('Maximum pressure could not be auto-detected; please provide it') if not methods: if self.user_methods: methods = self.user_methods else: methods = self.all_methods Ps = np.linspace(Pmin, Pmax, pts) for method in methods: if only_valid: properties, Ps2 = [], [] for P in Ps: if self.test_method_validity(T, P, zs, ws, method): try: p = self.calculate(T, P, zs, ws, method) if self.test_property_validity(p): properties.append(p) Ps2.append(P) except: pass plt.plot(Ps2, properties, label=method) else: properties = [self.calculate(T, P, zs, ws, method) for P in Ps] plt.plot(Ps, properties, label=method) plt.legend(loc='best') plt.ylabel(self.name + ', ' + self.units) plt.xlabel('Pressure, Pa') plt.title(self.name + ' of a mixture of ' + ', '.join(self.CASs) + ' at mole fractions of ' + ', '.join(str(round(i, 4)) for i in zs) + '.') plt.show()
[ "def", "plot_isotherm", "(", "self", ",", "T", ",", "zs", ",", "ws", ",", "Pmin", "=", "None", ",", "Pmax", "=", "None", ",", "methods", "=", "[", "]", ",", "pts", "=", "50", ",", "only_valid", "=", "True", ")", ":", "# pragma: no cover", "# This f...
r'''Method to create a plot of the property vs pressure at a specified temperature and composition according to either a specified list of methods, or the user methods (if set), or all methods. User-selectable number of points, and pressure range. If only_valid is set, `test_method_validity` will be used to check if each condition in the specified range is valid, and `test_property_validity` will be used to test the answer, and the method is allowed to fail; only the valid points will be plotted. Otherwise, the result will be calculated and displayed as-is. This will not suceed if the method fails. Parameters ---------- T : float Temperature at which to create the plot, [K] zs : list[float] Mole fractions of all species in the mixture, [-] ws : list[float] Weight fractions of all species in the mixture, [-] Pmin : float Minimum pressure, to begin calculating the property, [Pa] Pmax : float Maximum pressure, to stop calculating the property, [Pa] methods : list, optional List of methods to consider pts : int, optional A list of points to calculate the property at; if Pmin to Pmax covers a wide range of method validities, only a few points may end up calculated for a given method so this may need to be large only_valid : bool If True, only plot successful methods and calculated properties, and handle errors; if False, attempt calculation without any checking and use methods outside their bounds
[ "r", "Method", "to", "create", "a", "plot", "of", "the", "property", "vs", "pressure", "at", "a", "specified", "temperature", "and", "composition", "according", "to", "either", "a", "specified", "list", "of", "methods", "or", "the", "user", "methods", "(", ...
python
valid
tensorflow/mesh
mesh_tensorflow/transformer/transformer.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/transformer.py#L263-L271
def nonpadding(self): """Tensor with zeros in padding positions and ones elsewhere.""" if self.sequence_id is None: return None if self.sequence_id == 1: return 1 else: return mtf.cast( mtf.not_equal(self.sequence_id, 0), self.activation_dtype)
[ "def", "nonpadding", "(", "self", ")", ":", "if", "self", ".", "sequence_id", "is", "None", ":", "return", "None", "if", "self", ".", "sequence_id", "==", "1", ":", "return", "1", "else", ":", "return", "mtf", ".", "cast", "(", "mtf", ".", "not_equal...
Tensor with zeros in padding positions and ones elsewhere.
[ "Tensor", "with", "zeros", "in", "padding", "positions", "and", "ones", "elsewhere", "." ]
python
train
matthieugouel/gibica
gibica/parser.py
https://github.com/matthieugouel/gibica/blob/65f937f7a6255078cc22eb7691a2897466032909/gibica/parser.py#L149-L160
def assignment(self): """ assignment: logical_or_expr ['=' logical_or_expr] """ node = self.logical_or_expr() if self.token.nature == Nature.ASSIGN: token = self.token self._process(Nature.ASSIGN) right = self.logical_or_expr() return Assignment(left=node, op=token, right=right) else: return node
[ "def", "assignment", "(", "self", ")", ":", "node", "=", "self", ".", "logical_or_expr", "(", ")", "if", "self", ".", "token", ".", "nature", "==", "Nature", ".", "ASSIGN", ":", "token", "=", "self", ".", "token", "self", ".", "_process", "(", "Natur...
assignment: logical_or_expr ['=' logical_or_expr]
[ "assignment", ":", "logical_or_expr", "[", "=", "logical_or_expr", "]" ]
python
train
grst/geos
geos/mapsource.py
https://github.com/grst/geos/blob/ea15abcc5d8f86c9051df55e489b7d941b51a638/geos/mapsource.py#L161-L173
def min_zoom(self): """ Get the minimal zoom level of all layers. Returns: int: the minimum of all zoom levels of all layers Raises: ValueError: if no layers exist """ zoom_levels = [map_layer.min_zoom for map_layer in self.layers] return min(zoom_levels)
[ "def", "min_zoom", "(", "self", ")", ":", "zoom_levels", "=", "[", "map_layer", ".", "min_zoom", "for", "map_layer", "in", "self", ".", "layers", "]", "return", "min", "(", "zoom_levels", ")" ]
Get the minimal zoom level of all layers. Returns: int: the minimum of all zoom levels of all layers Raises: ValueError: if no layers exist
[ "Get", "the", "minimal", "zoom", "level", "of", "all", "layers", "." ]
python
train
martinkosir/neverbounce-python
neverbounce/objects.py
https://github.com/martinkosir/neverbounce-python/blob/8d8b3f381dbff2a753a8770fac0d2bfab80d5bec/neverbounce/objects.py#L19-L27
def from_text_code(cls, email, result_text_code): """ Alternative method to create an instance of VerifiedEmail object from a text code. :param str email: Email address. :param str result_text_code: A result of verification represented by text (e.g. valid, unknown). :return: An instance of object. """ result_code = cls.result_text_codes[result_text_code] return cls(email, result_code)
[ "def", "from_text_code", "(", "cls", ",", "email", ",", "result_text_code", ")", ":", "result_code", "=", "cls", ".", "result_text_codes", "[", "result_text_code", "]", "return", "cls", "(", "email", ",", "result_code", ")" ]
Alternative method to create an instance of VerifiedEmail object from a text code. :param str email: Email address. :param str result_text_code: A result of verification represented by text (e.g. valid, unknown). :return: An instance of object.
[ "Alternative", "method", "to", "create", "an", "instance", "of", "VerifiedEmail", "object", "from", "a", "text", "code", ".", ":", "param", "str", "email", ":", "Email", "address", ".", ":", "param", "str", "result_text_code", ":", "A", "result", "of", "ve...
python
train
jantman/awslimitchecker
awslimitchecker/checker.py
https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/checker.py#L178-L214
def _boto_conn_kwargs(self): """ Generate keyword arguments for boto3 connection functions. If ``self.account_id`` is defined, this will call :py:meth:`~._get_sts_token` to get STS token credentials using `boto3.STS.Client.assume_role <https://boto3.readthedocs.org/en/ latest/reference/services/sts.html#STS.Client.assume_role>`_ and include those credentials in the return value. If ``self.profile_name`` is defined, this will call `boto3.Session() <http://boto3.readthedocs.io/en/latest/reference/core/session.html>` with that profile and include those credentials in the return value. :return: keyword arguments for boto3 connection functions :rtype: dict """ kwargs = {'region_name': self.region} if self.account_id is not None: logger.debug("Connecting for account %s role '%s' with STS " "(region: %s)", self.account_id, self.account_role, self.region) credentials = self._get_sts_token() kwargs['aws_access_key_id'] = credentials.access_key kwargs['aws_secret_access_key'] = credentials.secret_key kwargs['aws_session_token'] = credentials.session_token elif self.profile_name is not None: # use boto3.Session to get credentials from the named profile logger.debug("Using credentials profile: %s", self.profile_name) session = boto3.Session(profile_name=self.profile_name) credentials = session._session.get_credentials() kwargs['aws_access_key_id'] = credentials.access_key kwargs['aws_secret_access_key'] = credentials.secret_key kwargs['aws_session_token'] = credentials.token else: logger.debug("Connecting to region %s", self.region) return kwargs
[ "def", "_boto_conn_kwargs", "(", "self", ")", ":", "kwargs", "=", "{", "'region_name'", ":", "self", ".", "region", "}", "if", "self", ".", "account_id", "is", "not", "None", ":", "logger", ".", "debug", "(", "\"Connecting for account %s role '%s' with STS \"", ...
Generate keyword arguments for boto3 connection functions. If ``self.account_id`` is defined, this will call :py:meth:`~._get_sts_token` to get STS token credentials using `boto3.STS.Client.assume_role <https://boto3.readthedocs.org/en/ latest/reference/services/sts.html#STS.Client.assume_role>`_ and include those credentials in the return value. If ``self.profile_name`` is defined, this will call `boto3.Session() <http://boto3.readthedocs.io/en/latest/reference/core/session.html>` with that profile and include those credentials in the return value. :return: keyword arguments for boto3 connection functions :rtype: dict
[ "Generate", "keyword", "arguments", "for", "boto3", "connection", "functions", "." ]
python
train
jhermann/rituals
src/rituals/acts/releasing.py
https://github.com/jhermann/rituals/blob/1534f50d81e19bbbe799e2eba0acdefbce047c06/src/rituals/acts/releasing.py#L179-L201
def dist(ctx, devpi=False, egg=False, wheel=False, auto=True): """Distribute the project.""" config.load() cmd = ["python", "setup.py", "sdist"] # Automatically create wheels if possible if auto: egg = sys.version_info.major == 2 try: import wheel as _ wheel = True except ImportError: wheel = False if egg: cmd.append("bdist_egg") if wheel: cmd.append("bdist_wheel") ctx.run("invoke clean --all build --docs test check") ctx.run(' '.join(cmd)) if devpi: ctx.run("devpi upload dist/*")
[ "def", "dist", "(", "ctx", ",", "devpi", "=", "False", ",", "egg", "=", "False", ",", "wheel", "=", "False", ",", "auto", "=", "True", ")", ":", "config", ".", "load", "(", ")", "cmd", "=", "[", "\"python\"", ",", "\"setup.py\"", ",", "\"sdist\"", ...
Distribute the project.
[ "Distribute", "the", "project", "." ]
python
valid
roclark/sportsreference
sportsreference/nfl/roster.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nfl/roster.py#L271-L320
def _combine_season_stats(self, table_rows, career_stats, all_stats_dict): """ Combine all stats for each season. Since all of the stats are spread across multiple tables, they should be combined into a single field which can be used to easily query stats at once. Parameters ---------- table_rows : generator A generator where each element is a row in a stats table. career_stats : generator A generator where each element is a row in the footer of a stats table. Career stats are kept in the footer, hence the usage. all_stats_dict : dictionary A dictionary of all stats separated by season where each key is the season ``string``, such as '2017', and the value is a ``dictionary`` with a ``string`` of 'data' and ``string`` containing all of the data. Returns ------- dictionary Returns an updated version of the passed all_stats_dict which includes more metrics from the provided table. """ most_recent_season = self._most_recent_season if not table_rows: table_rows = [] for row in table_rows: season = self._parse_season(row) try: all_stats_dict[season]['data'] += str(row) except KeyError: all_stats_dict[season] = {'data': str(row)} most_recent_season = season self._most_recent_season = most_recent_season if not career_stats: return all_stats_dict try: all_stats_dict['Career']['data'] += str(next(career_stats)) except KeyError: try: all_stats_dict['Career'] = {'data': str(next(career_stats))} # Occurs when the player doesn't have any career stats listed on # their page in error. except StopIteration: return all_stats_dict return all_stats_dict
[ "def", "_combine_season_stats", "(", "self", ",", "table_rows", ",", "career_stats", ",", "all_stats_dict", ")", ":", "most_recent_season", "=", "self", ".", "_most_recent_season", "if", "not", "table_rows", ":", "table_rows", "=", "[", "]", "for", "row", "in", ...
Combine all stats for each season. Since all of the stats are spread across multiple tables, they should be combined into a single field which can be used to easily query stats at once. Parameters ---------- table_rows : generator A generator where each element is a row in a stats table. career_stats : generator A generator where each element is a row in the footer of a stats table. Career stats are kept in the footer, hence the usage. all_stats_dict : dictionary A dictionary of all stats separated by season where each key is the season ``string``, such as '2017', and the value is a ``dictionary`` with a ``string`` of 'data' and ``string`` containing all of the data. Returns ------- dictionary Returns an updated version of the passed all_stats_dict which includes more metrics from the provided table.
[ "Combine", "all", "stats", "for", "each", "season", "." ]
python
train
jtwhite79/pyemu
pyemu/utils/helpers.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/helpers.py#L2550-L2563
def setup_observations(self): """ main entry point for setting up observations """ obs_methods = [self.setup_water_budget_obs,self.setup_hyd, self.setup_smp,self.setup_hob,self.setup_hds, self.setup_sfr_obs] obs_types = ["mflist water budget obs","hyd file", "external obs-sim smp files","hob","hds","sfr"] self.obs_dfs = {} for obs_method, obs_type in zip(obs_methods,obs_types): self.log("processing obs type {0}".format(obs_type)) obs_method() self.log("processing obs type {0}".format(obs_type))
[ "def", "setup_observations", "(", "self", ")", ":", "obs_methods", "=", "[", "self", ".", "setup_water_budget_obs", ",", "self", ".", "setup_hyd", ",", "self", ".", "setup_smp", ",", "self", ".", "setup_hob", ",", "self", ".", "setup_hds", ",", "self", "."...
main entry point for setting up observations
[ "main", "entry", "point", "for", "setting", "up", "observations" ]
python
train
soldag/python-pwmled
pwmled/led/rgb.py
https://github.com/soldag/python-pwmled/blob/09cde36ecc0153fa81dc2a1b9bb07d1c0e418c8c/pwmled/led/rgb.py#L115-L137
def _transition_stage(self, step, total_steps, brightness=None, color=None): """ Get a transition stage at a specific step. :param step: The current step. :param total_steps: The total number of steps. :param brightness: The brightness to transition to (0.0-1.0). :param color: The color to transition to. :return: The stage at the specific step. """ if brightness is not None: self._assert_is_brightness(brightness) brightness = self._interpolate(self.brightness, brightness, step, total_steps) if color is not None: self._assert_is_color(color) color = Color(*[self._interpolate(self.color[i], color[i], step, total_steps) for i in range(3)]) return {'brightness': brightness, 'color': color}
[ "def", "_transition_stage", "(", "self", ",", "step", ",", "total_steps", ",", "brightness", "=", "None", ",", "color", "=", "None", ")", ":", "if", "brightness", "is", "not", "None", ":", "self", ".", "_assert_is_brightness", "(", "brightness", ")", "brig...
Get a transition stage at a specific step. :param step: The current step. :param total_steps: The total number of steps. :param brightness: The brightness to transition to (0.0-1.0). :param color: The color to transition to. :return: The stage at the specific step.
[ "Get", "a", "transition", "stage", "at", "a", "specific", "step", "." ]
python
train
gccxml/pygccxml
pygccxml/declarations/class_declaration.py
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/class_declaration.py#L363-L387
def get_members(self, access=None): """ returns list of members according to access type If access equals to None, then returned list will contain all members. You should not modify the list content, otherwise different optimization data will stop work and may to give you wrong results. :param access: describes desired members :type access: :class:ACCESS_TYPES :rtype: [ members ] """ if access == ACCESS_TYPES.PUBLIC: return self.public_members elif access == ACCESS_TYPES.PROTECTED: return self.protected_members elif access == ACCESS_TYPES.PRIVATE: return self.private_members all_members = [] all_members.extend(self.public_members) all_members.extend(self.protected_members) all_members.extend(self.private_members) return all_members
[ "def", "get_members", "(", "self", ",", "access", "=", "None", ")", ":", "if", "access", "==", "ACCESS_TYPES", ".", "PUBLIC", ":", "return", "self", ".", "public_members", "elif", "access", "==", "ACCESS_TYPES", ".", "PROTECTED", ":", "return", "self", "."...
returns list of members according to access type If access equals to None, then returned list will contain all members. You should not modify the list content, otherwise different optimization data will stop work and may to give you wrong results. :param access: describes desired members :type access: :class:ACCESS_TYPES :rtype: [ members ]
[ "returns", "list", "of", "members", "according", "to", "access", "type" ]
python
train
log2timeline/plaso
plaso/engine/profilers.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/profilers.py#L263-L276
def Sample(self, tasks_status): """Takes a sample of the status of queued tasks for profiling. Args: tasks_status (TasksStatus): status information about tasks. """ sample_time = time.time() sample = '{0:f}\t{1:d}\t{2:d}\t{3:d}\t{4:d}\t{5:d}\n'.format( sample_time, tasks_status.number_of_queued_tasks, tasks_status.number_of_tasks_processing, tasks_status.number_of_tasks_pending_merge, tasks_status.number_of_abandoned_tasks, tasks_status.total_number_of_tasks) self._WritesString(sample)
[ "def", "Sample", "(", "self", ",", "tasks_status", ")", ":", "sample_time", "=", "time", ".", "time", "(", ")", "sample", "=", "'{0:f}\\t{1:d}\\t{2:d}\\t{3:d}\\t{4:d}\\t{5:d}\\n'", ".", "format", "(", "sample_time", ",", "tasks_status", ".", "number_of_queued_tasks"...
Takes a sample of the status of queued tasks for profiling. Args: tasks_status (TasksStatus): status information about tasks.
[ "Takes", "a", "sample", "of", "the", "status", "of", "queued", "tasks", "for", "profiling", "." ]
python
train
elliterate/capybara.py
capybara/node/matchers.py
https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/node/matchers.py#L97-L121
def has_all_of_selectors(self, selector, *locators, **kwargs): """ Checks if allof the provided selectors are present on the given page or descendants of the current node. If options are provided, the assertion will check that each locator is present with those options as well (other than ``wait``). :: page.has_all_of_selectors("custom", "Tom", "Joe", visible="all") page.has_all_of_selectors("css", "#my_dif", "a.not_clicked") It accepts all options that :meth:`find_all` accepts, such as ``text`` and ``visible``. The ``wait`` option applies to all of the selectors as a group, so all of the locators must be present within ``wait`` (defaults to :data:`capybara.default_max_wait_time`) seconds. If the given selector is not a valid selector, the first argument is assumed to be a locator and the default selector will be used. Args: selector (str, optional): The name of the selector to use. Defaults to :data:`capybara.default_selector`. *locators (str): Variable length list of locators. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. """ return self.assert_all_of_selectors(selector, *locators, **kwargs)
[ "def", "has_all_of_selectors", "(", "self", ",", "selector", ",", "*", "locators", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "assert_all_of_selectors", "(", "selector", ",", "*", "locators", ",", "*", "*", "kwargs", ")" ]
Checks if allof the provided selectors are present on the given page or descendants of the current node. If options are provided, the assertion will check that each locator is present with those options as well (other than ``wait``). :: page.has_all_of_selectors("custom", "Tom", "Joe", visible="all") page.has_all_of_selectors("css", "#my_dif", "a.not_clicked") It accepts all options that :meth:`find_all` accepts, such as ``text`` and ``visible``. The ``wait`` option applies to all of the selectors as a group, so all of the locators must be present within ``wait`` (defaults to :data:`capybara.default_max_wait_time`) seconds. If the given selector is not a valid selector, the first argument is assumed to be a locator and the default selector will be used. Args: selector (str, optional): The name of the selector to use. Defaults to :data:`capybara.default_selector`. *locators (str): Variable length list of locators. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
[ "Checks", "if", "allof", "the", "provided", "selectors", "are", "present", "on", "the", "given", "page", "or", "descendants", "of", "the", "current", "node", ".", "If", "options", "are", "provided", "the", "assertion", "will", "check", "that", "each", "locat...
python
test
square/connect-python-sdk
squareconnect/models/catalog_modifier_override.py
https://github.com/square/connect-python-sdk/blob/adc1d09e817986cdc607391580f71d6b48ed4066/squareconnect/models/catalog_modifier_override.py#L64-L78
def modifier_id(self, modifier_id): """ Sets the modifier_id of this CatalogModifierOverride. The ID of the [CatalogModifier](#type-catalogmodifier) whose default behavior is being overridden. :param modifier_id: The modifier_id of this CatalogModifierOverride. :type: str """ if modifier_id is None: raise ValueError("Invalid value for `modifier_id`, must not be `None`") if len(modifier_id) < 1: raise ValueError("Invalid value for `modifier_id`, length must be greater than or equal to `1`") self._modifier_id = modifier_id
[ "def", "modifier_id", "(", "self", ",", "modifier_id", ")", ":", "if", "modifier_id", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `modifier_id`, must not be `None`\"", ")", "if", "len", "(", "modifier_id", ")", "<", "1", ":", "raise", "Va...
Sets the modifier_id of this CatalogModifierOverride. The ID of the [CatalogModifier](#type-catalogmodifier) whose default behavior is being overridden. :param modifier_id: The modifier_id of this CatalogModifierOverride. :type: str
[ "Sets", "the", "modifier_id", "of", "this", "CatalogModifierOverride", ".", "The", "ID", "of", "the", "[", "CatalogModifier", "]", "(", "#type", "-", "catalogmodifier", ")", "whose", "default", "behavior", "is", "being", "overridden", "." ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L4110-L4122
def user_tickets_assigned(self, user_id, external_id=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/tickets#allowed-for" api_path = "/api/v2/users/{user_id}/tickets/assigned.json" api_path = api_path.format(user_id=user_id) api_query = {} if "query" in kwargs.keys(): api_query.update(kwargs["query"]) del kwargs["query"] if external_id: api_query.update({ "external_id": external_id, }) return self.call(api_path, query=api_query, **kwargs)
[ "def", "user_tickets_assigned", "(", "self", ",", "user_id", ",", "external_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/users/{user_id}/tickets/assigned.json\"", "api_path", "=", "api_path", ".", "format", "(", "user_id", "=", ...
https://developer.zendesk.com/rest_api/docs/core/tickets#allowed-for
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "tickets#allowed", "-", "for" ]
python
train
Jayin/ETipsService
service/wyulibrary.py
https://github.com/Jayin/ETipsService/blob/1a42612a5e5d11bec0ec1a26c99dec6fe216fca4/service/wyulibrary.py#L15-L46
def __search_book_html(self, anywords, page): """ 检索图书列表页面 :param anywords: 关键字 :param page: 页码 :return: html code """ _params = { 'dt': 'ALL', 'cl': 'ALL', 'dp': '20', 'sf': 'M_PUB_YEAR', 'ob': 'DESC', 'sm': 'table', 'dept': 'ALL', 'ecx': '0', 'anywords': '', # not anywords.. 'page': 1 } _headers = { 'Host': 'lib.wyu.edu.cn', 'Referer': 'http://lib.wyu.edu.cn/opac/search.aspx', 'Accept-Language': ':zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4' } # url要对中文编码.. _params['anywords'] = anywords.decode('utf-8').encode('gbk') _params['page'] = page r = requests.get(url=WyuLibrary.url_search, headers=_headers, params=_params, timeout=self._timeout) # _.d(r.content.decode(_.get_charset(r.content))) return r.content.decode(_.get_charset(r.content))
[ "def", "__search_book_html", "(", "self", ",", "anywords", ",", "page", ")", ":", "_params", "=", "{", "'dt'", ":", "'ALL'", ",", "'cl'", ":", "'ALL'", ",", "'dp'", ":", "'20'", ",", "'sf'", ":", "'M_PUB_YEAR'", ",", "'ob'", ":", "'DESC'", ",", "'sm'...
检索图书列表页面 :param anywords: 关键字 :param page: 页码 :return: html code
[ "检索图书列表页面", ":", "param", "anywords", ":", "关键字", ":", "param", "page", ":", "页码", ":", "return", ":", "html", "code" ]
python
train
aio-libs/aiomonitor
aiomonitor/utils.py
https://github.com/aio-libs/aiomonitor/blob/fe5f9caa0b117861afef13b64bce5dce3a415b80/aiomonitor/utils.py#L115-L126
def alt_names(names: str) -> Callable[..., Any]: """Add alternative names to you custom commands. `names` is a single string with a space separated list of aliases for the decorated command. """ names_split = names.split() def decorator(func: Callable[..., Any]) -> Callable[..., Any]: func.alt_names = names_split # type: ignore return func return decorator
[ "def", "alt_names", "(", "names", ":", "str", ")", "->", "Callable", "[", "...", ",", "Any", "]", ":", "names_split", "=", "names", ".", "split", "(", ")", "def", "decorator", "(", "func", ":", "Callable", "[", "...", ",", "Any", "]", ")", "->", ...
Add alternative names to you custom commands. `names` is a single string with a space separated list of aliases for the decorated command.
[ "Add", "alternative", "names", "to", "you", "custom", "commands", "." ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/core_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L583-L605
def connect_delete_node_proxy_with_path(self, name, path, **kwargs): # noqa: E501 """connect_delete_node_proxy_with_path # noqa: E501 connect DELETE requests to proxy of Node # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_delete_node_proxy_with_path(name, path, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the NodeProxyOptions (required) :param str path: path to the resource (required) :param str path2: Path is the URL path to use for the current proxy request to node. :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.connect_delete_node_proxy_with_path_with_http_info(name, path, **kwargs) # noqa: E501 else: (data) = self.connect_delete_node_proxy_with_path_with_http_info(name, path, **kwargs) # noqa: E501 return data
[ "def", "connect_delete_node_proxy_with_path", "(", "self", ",", "name", ",", "path", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "re...
connect_delete_node_proxy_with_path # noqa: E501 connect DELETE requests to proxy of Node # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_delete_node_proxy_with_path(name, path, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the NodeProxyOptions (required) :param str path: path to the resource (required) :param str path2: Path is the URL path to use for the current proxy request to node. :return: str If the method is called asynchronously, returns the request thread.
[ "connect_delete_node_proxy_with_path", "#", "noqa", ":", "E501" ]
python
train
kivy/python-for-android
pythonforandroid/python.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/python.py#L292-L361
def create_python_bundle(self, dirn, arch): """ Create a packaged python bundle in the target directory, by copying all the modules and standard library to the right place. """ # Todo: find a better way to find the build libs folder modules_build_dir = join( self.get_build_dir(arch.arch), 'android-build', 'build', 'lib.linux{}-{}-{}'.format( '2' if self.version[0] == '2' else '', arch.command_prefix.split('-')[0], self.major_minor_version_string )) # Compile to *.pyc/*.pyo the python modules self.compile_python_files(modules_build_dir) # Compile to *.pyc/*.pyo the standard python library self.compile_python_files(join(self.get_build_dir(arch.arch), 'Lib')) # Compile to *.pyc/*.pyo the other python packages (site-packages) self.compile_python_files(self.ctx.get_python_install_dir()) # Bundle compiled python modules to a folder modules_dir = join(dirn, 'modules') c_ext = self.compiled_extension ensure_dir(modules_dir) module_filens = (glob.glob(join(modules_build_dir, '*.so')) + glob.glob(join(modules_build_dir, '*' + c_ext))) info("Copy {} files into the bundle".format(len(module_filens))) for filen in module_filens: info(" - copy {}".format(filen)) copy2(filen, modules_dir) # zip up the standard library stdlib_zip = join(dirn, 'stdlib.zip') with current_directory(join(self.get_build_dir(arch.arch), 'Lib')): stdlib_filens = list(walk_valid_filens( '.', self.stdlib_dir_blacklist, self.stdlib_filen_blacklist)) info("Zip {} files into the bundle".format(len(stdlib_filens))) shprint(sh.zip, stdlib_zip, *stdlib_filens) # copy the site-packages into place ensure_dir(join(dirn, 'site-packages')) ensure_dir(self.ctx.get_python_install_dir()) # TODO: Improve the API around walking and copying the files with current_directory(self.ctx.get_python_install_dir()): filens = list(walk_valid_filens( '.', self.site_packages_dir_blacklist, self.site_packages_filen_blacklist)) info("Copy {} files into the site-packages".format(len(filens))) for filen in filens: info(" - copy {}".format(filen)) ensure_dir(join(dirn, 'site-packages', dirname(filen))) copy2(filen, join(dirn, 'site-packages', filen)) # copy the python .so files into place python_build_dir = join(self.get_build_dir(arch.arch), 'android-build') python_lib_name = 'libpython' + self.major_minor_version_string if self.major_minor_version_string[0] == '3': python_lib_name += 'm' shprint(sh.cp, join(python_build_dir, python_lib_name + '.so'), join(self.ctx.dist_dir, self.ctx.dist_name, 'libs', arch.arch)) info('Renaming .so files to reflect cross-compile') self.reduce_object_file_names(join(dirn, 'site-packages')) return join(dirn, 'site-packages')
[ "def", "create_python_bundle", "(", "self", ",", "dirn", ",", "arch", ")", ":", "# Todo: find a better way to find the build libs folder", "modules_build_dir", "=", "join", "(", "self", ".", "get_build_dir", "(", "arch", ".", "arch", ")", ",", "'android-build'", ","...
Create a packaged python bundle in the target directory, by copying all the modules and standard library to the right place.
[ "Create", "a", "packaged", "python", "bundle", "in", "the", "target", "directory", "by", "copying", "all", "the", "modules", "and", "standard", "library", "to", "the", "right", "place", "." ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_sql/__init__.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_sql/__init__.py#L255-L272
def lower_unsupported_metafield_expressions(ir_blocks): """Raise exception if an unsupported metafield is encountered in any LocalField expression.""" def visitor_fn(expression): """Visitor function raising exception for any unsupported metafield.""" if not isinstance(expression, expressions.LocalField): return expression if expression.field_name not in constants.UNSUPPORTED_META_FIELDS: return expression raise NotImplementedError( u'Encountered unsupported metafield {} in LocalField {} during construction of ' u'SQL query tree for IR blocks {}.'.format( constants.UNSUPPORTED_META_FIELDS[expression.field_name], expression, ir_blocks)) new_ir_blocks = [ block.visit_and_update_expressions(visitor_fn) for block in ir_blocks ] return new_ir_blocks
[ "def", "lower_unsupported_metafield_expressions", "(", "ir_blocks", ")", ":", "def", "visitor_fn", "(", "expression", ")", ":", "\"\"\"Visitor function raising exception for any unsupported metafield.\"\"\"", "if", "not", "isinstance", "(", "expression", ",", "expressions", "...
Raise exception if an unsupported metafield is encountered in any LocalField expression.
[ "Raise", "exception", "if", "an", "unsupported", "metafield", "is", "encountered", "in", "any", "LocalField", "expression", "." ]
python
train
spacetelescope/drizzlepac
drizzlepac/imageObject.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/imageObject.py#L222-L244
def putData(self,data=None,exten=None): """ Now that we are removing the data from the object to save memory, we need something that cleanly puts the data array back into the object so that we can write out everything together using something like fits.writeto....this method is an attempt to make sure that when you add an array back to the .data section of the hdu it still matches the header information for that section ( ie. update the bitpix to reflect the datatype of the array you are adding). The other header stuff is up to you to verify. Data should be the data array exten is where you want to stick it, either extension number or a string like 'sci,1' """ if data is None: log.warning("No data supplied") else: extnum = _interpretExten(exten) ext = self._image[extnum] # update the bitpix to the current datatype, this aint fancy and # ignores bscale ext.header['BITPIX'] = _NUMPY_TO_IRAF_DTYPES[data.dtype.name] ext.data = data
[ "def", "putData", "(", "self", ",", "data", "=", "None", ",", "exten", "=", "None", ")", ":", "if", "data", "is", "None", ":", "log", ".", "warning", "(", "\"No data supplied\"", ")", "else", ":", "extnum", "=", "_interpretExten", "(", "exten", ")", ...
Now that we are removing the data from the object to save memory, we need something that cleanly puts the data array back into the object so that we can write out everything together using something like fits.writeto....this method is an attempt to make sure that when you add an array back to the .data section of the hdu it still matches the header information for that section ( ie. update the bitpix to reflect the datatype of the array you are adding). The other header stuff is up to you to verify. Data should be the data array exten is where you want to stick it, either extension number or a string like 'sci,1'
[ "Now", "that", "we", "are", "removing", "the", "data", "from", "the", "object", "to", "save", "memory", "we", "need", "something", "that", "cleanly", "puts", "the", "data", "array", "back", "into", "the", "object", "so", "that", "we", "can", "write", "ou...
python
train
RJT1990/pyflux
pyflux/tsm.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/tsm.py#L518-L559
def shift_dates(self,h): """ Auxiliary function for creating dates for forecasts Parameters ---------- h : int How many steps to forecast Returns ---------- A transformed date_index object """ date_index = copy.deepcopy(self.index) date_index = date_index[self.max_lag:len(date_index)] if self.is_pandas is True: if isinstance(date_index, pd.core.indexes.datetimes.DatetimeIndex): if pd.infer_freq(date_index) in ['H', 'M', 'S']: for t in range(h): date_index += pd.DateOffset((date_index[len(date_index)-1] - date_index[len(date_index)-2]).seconds) else: # Assume higher frequency (configured for days) for t in range(h): date_index += pd.DateOffset((date_index[len(date_index)-1] - date_index[len(date_index)-2]).days) elif isinstance(date_index, pd.core.indexes.numeric.Int64Index): for i in range(h): new_value = date_index.values[len(date_index.values)-1] + (date_index.values[len(date_index.values)-1] - date_index.values[len(date_index.values)-2]) date_index = pd.Int64Index(np.append(date_index.values,new_value)) else: for t in range(h): date_index.append(date_index[len(date_index)-1]+1) return date_index
[ "def", "shift_dates", "(", "self", ",", "h", ")", ":", "date_index", "=", "copy", ".", "deepcopy", "(", "self", ".", "index", ")", "date_index", "=", "date_index", "[", "self", ".", "max_lag", ":", "len", "(", "date_index", ")", "]", "if", "self", "....
Auxiliary function for creating dates for forecasts Parameters ---------- h : int How many steps to forecast Returns ---------- A transformed date_index object
[ "Auxiliary", "function", "for", "creating", "dates", "for", "forecasts" ]
python
train